diff --git a/builtin/providers/README b/builtin/providers/README new file mode 100644 index 000000000..00ffa7145 --- /dev/null +++ b/builtin/providers/README @@ -0,0 +1 @@ +providers moved to github.com/terraform-providers diff --git a/builtin/providers/alicloud/common.go b/builtin/providers/alicloud/common.go deleted file mode 100644 index e9bb1a9f8..000000000 --- a/builtin/providers/alicloud/common.go +++ /dev/null @@ -1,93 +0,0 @@ -package alicloud - -import ( - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/schema" -) - -type InstanceNetWork string - -const ( - ClassicNet = InstanceNetWork("classic") - VpcNet = InstanceNetWork("vpc") -) - -// timeout for common product, ecs e.g. -const defaultTimeout = 120 - -// timeout for long time progerss product, rds e.g. -const defaultLongTimeout = 1000 - -func getRegion(d *schema.ResourceData, meta interface{}) common.Region { - return meta.(*AliyunClient).Region -} - -func notFoundError(err error) bool { - if e, ok := err.(*common.Error); ok && - (e.StatusCode == 404 || e.ErrorResponse.Message == "Not found" || e.Code == InstanceNotfound) { - return true - } - - return false -} - -// Protocol represents network protocol -type Protocol string - -// Constants of protocol definition -const ( - Http = Protocol("http") - Https = Protocol("https") - Tcp = Protocol("tcp") - Udp = Protocol("udp") -) - -// ValidProtocols network protocol list -var ValidProtocols = []Protocol{Http, Https, Tcp, Udp} - -// simple array value check method, support string type only -func isProtocolValid(value string) bool { - res := false - for _, v := range ValidProtocols { - if string(v) == value { - res = true - } - } - return res -} - -var DefaultBusinessInfo = ecs.BusinessInfo{ - Pack: "terraform", -} - -// default region for all resource -const DEFAULT_REGION = "cn-beijing" - -// default security ip for db -const DEFAULT_DB_SECURITY_IP = "127.0.0.1" - -// we the count of create instance is only one -const DEFAULT_INSTANCE_COUNT = 1 - -// symbol of multiIZ -const MULTI_IZ_SYMBOL = "MAZ" - -// default connect port of db -const DB_DEFAULT_CONNECT_PORT = "3306" - -const COMMA_SEPARATED = "," - -const COLON_SEPARATED = ":" - -const LOCAL_HOST_IP = "127.0.0.1" - -// Takes the result of flatmap.Expand for an array of strings -// and returns a []string -func expandStringList(configured []interface{}) []string { - vs := make([]string, 0, len(configured)) - for _, v := range configured { - vs = append(vs, v.(string)) - } - return vs -} diff --git a/builtin/providers/alicloud/config.go b/builtin/providers/alicloud/config.go deleted file mode 100644 index f84c7e02a..000000000 --- a/builtin/providers/alicloud/config.go +++ /dev/null @@ -1,138 +0,0 @@ -package alicloud - -import ( - "fmt" - - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ecs" - "github.com/denverdino/aliyungo/ess" - "github.com/denverdino/aliyungo/rds" - "github.com/denverdino/aliyungo/slb" -) - -// Config of aliyun -type Config struct { - AccessKey string - SecretKey string - Region common.Region -} - -// AliyunClient of aliyun -type AliyunClient struct { - Region common.Region - ecsconn *ecs.Client - essconn *ess.Client - rdsconn *rds.Client - // use new version - ecsNewconn *ecs.Client - vpcconn *ecs.Client - slbconn *slb.Client -} - -// Client for AliyunClient -func (c *Config) Client() (*AliyunClient, error) { - err := c.loadAndValidate() - if err != nil { - return nil, err - } - - ecsconn, err := c.ecsConn() - if err != nil { - return nil, err - } - - ecsNewconn, err := c.ecsConn() - if err != nil { - return nil, err - } - ecsNewconn.SetVersion(EcsApiVersion20160314) - - rdsconn, err := c.rdsConn() - if err != nil { - return nil, err - } - - slbconn, err := c.slbConn() - if err != nil { - return nil, err - } - - vpcconn, err := c.vpcConn() - if err != nil { - return nil, err - } - - essconn, err := c.essConn() - if err != nil { - return nil, err - } - - return &AliyunClient{ - Region: c.Region, - ecsconn: ecsconn, - ecsNewconn: ecsNewconn, - vpcconn: vpcconn, - slbconn: slbconn, - rdsconn: rdsconn, - essconn: essconn, - }, nil -} - -const BusinessInfoKey = "Terraform" - -func (c *Config) loadAndValidate() error { - err := c.validateRegion() - if err != nil { - return err - } - - return nil -} - -func (c *Config) validateRegion() error { - - for _, valid := range common.ValidRegions { - if c.Region == valid { - return nil - } - } - - return fmt.Errorf("Not a valid region: %s", c.Region) -} - -func (c *Config) ecsConn() (*ecs.Client, error) { - client := ecs.NewECSClient(c.AccessKey, c.SecretKey, c.Region) - client.SetBusinessInfo(BusinessInfoKey) - - _, err := client.DescribeRegions() - - if err != nil { - return nil, err - } - - return client, nil -} - -func (c *Config) rdsConn() (*rds.Client, error) { - client := rds.NewRDSClient(c.AccessKey, c.SecretKey, c.Region) - client.SetBusinessInfo(BusinessInfoKey) - return client, nil -} - -func (c *Config) slbConn() (*slb.Client, error) { - client := slb.NewSLBClient(c.AccessKey, c.SecretKey, c.Region) - client.SetBusinessInfo(BusinessInfoKey) - return client, nil -} - -func (c *Config) vpcConn() (*ecs.Client, error) { - client := ecs.NewVPCClient(c.AccessKey, c.SecretKey, c.Region) - client.SetBusinessInfo(BusinessInfoKey) - return client, nil - -} -func (c *Config) essConn() (*ess.Client, error) { - client := ess.NewESSClient(c.AccessKey, c.SecretKey, c.Region) - client.SetBusinessInfo(BusinessInfoKey) - return client, nil -} diff --git a/builtin/providers/alicloud/data_source_alicloud_common.go b/builtin/providers/alicloud/data_source_alicloud_common.go deleted file mode 100644 index 1da432cfc..000000000 --- a/builtin/providers/alicloud/data_source_alicloud_common.go +++ /dev/null @@ -1,18 +0,0 @@ -package alicloud - -import ( - "bytes" - "fmt" - "github.com/hashicorp/terraform/helper/hashcode" -) - -// Generates a hash for the set hash function used by the ID -func dataResourceIdHash(ids []string) string { - var buf bytes.Buffer - - for _, id := range ids { - buf.WriteString(fmt.Sprintf("%s-", id)) - } - - return fmt.Sprintf("%d", hashcode.String(buf.String())) -} diff --git a/builtin/providers/alicloud/data_source_alicloud_images.go b/builtin/providers/alicloud/data_source_alicloud_images.go deleted file mode 100644 index d9a873782..000000000 --- a/builtin/providers/alicloud/data_source_alicloud_images.go +++ /dev/null @@ -1,337 +0,0 @@ -package alicloud - -import ( - "fmt" - "log" - "regexp" - "sort" - "time" - - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAlicloudImages() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAlicloudImagesRead, - - Schema: map[string]*schema.Schema{ - "name_regex": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateNameRegex, - }, - "most_recent": { - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - }, - "owners": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateImageOwners, - }, - // Computed values. - "images": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, - "image_id": { - Type: schema.TypeString, - Computed: true, - }, - "architecture": { - Type: schema.TypeString, - Computed: true, - }, - "creation_time": { - Type: schema.TypeString, - Computed: true, - }, - "description": { - Type: schema.TypeString, - Computed: true, - }, - "image_owner_alias": { - Type: schema.TypeString, - Computed: true, - }, - "os_type": { - Type: schema.TypeString, - Computed: true, - }, - "os_name": { - Type: schema.TypeString, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "platform": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "state": { - Type: schema.TypeString, - Computed: true, - }, - "size": { - Type: schema.TypeInt, - Computed: true, - }, - // Complex computed values - "disk_device_mappings": { - Type: schema.TypeList, - Computed: true, - //Set: imageDiskDeviceMappingHash, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "device": { - Type: schema.TypeString, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - }, - "snapshot_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "product_code": { - Type: schema.TypeString, - Computed: true, - }, - "is_self_shared": { - Type: schema.TypeString, - Computed: true, - }, - "is_subscribed": { - Type: schema.TypeBool, - Computed: true, - }, - "is_copied": { - Type: schema.TypeBool, - Computed: true, - }, - "is_support_io_optimized": { - Type: schema.TypeBool, - Computed: true, - }, - "image_version": { - Type: schema.TypeString, - Computed: true, - }, - "progress": { - Type: schema.TypeString, - Computed: true, - }, - "usage": { - Type: schema.TypeString, - Computed: true, - }, - - "tags": tagsSchema(), - }, - }, - }, - }, - } -} - -// dataSourceAlicloudImagesDescriptionRead performs the Alicloud Image lookup. -func dataSourceAlicloudImagesRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AliyunClient).ecsconn - - nameRegex, nameRegexOk := d.GetOk("name_regex") - owners, ownersOk := d.GetOk("owners") - mostRecent, mostRecentOk := d.GetOk("most_recent") - - if nameRegexOk == false && ownersOk == false && mostRecentOk == false { - return fmt.Errorf("One of name_regex, owners or most_recent must be assigned") - } - - params := &ecs.DescribeImagesArgs{ - RegionId: getRegion(d, meta), - } - - if ownersOk { - params.ImageOwnerAlias = ecs.ImageOwnerAlias(owners.(string)) - } - - var allImages []ecs.ImageType - - for { - images, paginationResult, err := conn.DescribeImages(params) - if err != nil { - break - } - - allImages = append(allImages, images...) - - pagination := paginationResult.NextPage() - if pagination == nil { - break - } - - params.Pagination = *pagination - } - - var filteredImages []ecs.ImageType - if nameRegexOk { - r := regexp.MustCompile(nameRegex.(string)) - for _, image := range allImages { - // Check for a very rare case where the response would include no - // image name. No name means nothing to attempt a match against, - // therefore we are skipping such image. - if image.ImageName == "" { - log.Printf("[WARN] Unable to find Image name to match against "+ - "for image ID %q, nothing to do.", - image.ImageId) - continue - } - if r.MatchString(image.ImageName) { - filteredImages = append(filteredImages, image) - } - } - } else { - filteredImages = allImages[:] - } - - var images []ecs.ImageType - if len(filteredImages) < 1 { - return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") - } - - log.Printf("[DEBUG] alicloud_image - multiple results found and `most_recent` is set to: %t", mostRecent.(bool)) - if len(filteredImages) > 1 && mostRecent.(bool) { - // Query returned single result. - images = append(images, mostRecentImage(filteredImages)) - } else { - images = filteredImages - } - - log.Printf("[DEBUG] alicloud_image - Images found: %#v", images) - return imagesDescriptionAttributes(d, images, meta) -} - -// populate the numerous fields that the image description returns. -func imagesDescriptionAttributes(d *schema.ResourceData, images []ecs.ImageType, meta interface{}) error { - var ids []string - var s []map[string]interface{} - for _, image := range images { - mapping := map[string]interface{}{ - "id": image.ImageId, - "architecture": image.Architecture, - "creation_time": image.CreationTime.String(), - "description": image.Description, - "image_id": image.ImageId, - "image_owner_alias": image.ImageOwnerAlias, - "os_name": image.OSName, - "os_type": image.OSType, - "name": image.ImageName, - "platform": image.Platform, - "status": image.Status, - "state": image.Status, - "size": image.Size, - "is_self_shared": image.IsSelfShared, - "is_subscribed": image.IsSubscribed, - "is_copied": image.IsCopied, - "is_support_io_optimized": image.IsSupportIoOptimized, - "image_version": image.ImageVersion, - "progress": image.Progress, - "usage": image.Usage, - "product_code": image.ProductCode, - - // Complex types get their own functions - "disk_device_mappings": imageDiskDeviceMappings(image.DiskDeviceMappings.DiskDeviceMapping), - "tags": imageTagsMappings(d, image.ImageId, meta), - } - - log.Printf("[DEBUG] alicloud_image - adding image mapping: %v", mapping) - ids = append(ids, image.ImageId) - s = append(s, mapping) - } - - d.SetId(dataResourceIdHash(ids)) - if err := d.Set("images", s); err != nil { - return err - } - return nil -} - -//Find most recent image -type imageSort []ecs.ImageType - -func (a imageSort) Len() int { - return len(a) -} -func (a imageSort) Swap(i, j int) { - a[i], a[j] = a[j], a[i] -} -func (a imageSort) Less(i, j int) bool { - itime, _ := time.Parse(time.RFC3339, a[i].CreationTime.String()) - jtime, _ := time.Parse(time.RFC3339, a[j].CreationTime.String()) - return itime.Unix() < jtime.Unix() -} - -// Returns the most recent Image out of a slice of images. -func mostRecentImage(images []ecs.ImageType) ecs.ImageType { - sortedImages := images - sort.Sort(imageSort(sortedImages)) - return sortedImages[len(sortedImages)-1] -} - -// Returns a set of disk device mappings. -func imageDiskDeviceMappings(m []ecs.DiskDeviceMapping) []map[string]interface{} { - var s []map[string]interface{} - - for _, v := range m { - mapping := map[string]interface{}{ - "device": v.Device, - "size": v.Size, - "snapshot_id": v.SnapshotId, - } - - log.Printf("[DEBUG] alicloud_image - adding disk device mapping: %v", mapping) - s = append(s, mapping) - } - - return s -} - -//Returns a mapping of image tags -func imageTagsMappings(d *schema.ResourceData, imageId string, meta interface{}) map[string]string { - client := meta.(*AliyunClient) - conn := client.ecsconn - - tags, _, err := conn.DescribeTags(&ecs.DescribeTagsArgs{ - RegionId: getRegion(d, meta), - ResourceType: ecs.TagResourceImage, - ResourceId: imageId, - }) - - if err != nil { - log.Printf("[ERROR] DescribeTags for image got error: %#v", err) - return nil - } - - log.Printf("[DEBUG] DescribeTags for image : %v", tags) - return tagsToMap(tags) -} diff --git a/builtin/providers/alicloud/data_source_alicloud_images_test.go b/builtin/providers/alicloud/data_source_alicloud_images_test.go deleted file mode 100644 index 9c6e225e4..000000000 --- a/builtin/providers/alicloud/data_source_alicloud_images_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package alicloud - -import ( - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAlicloudImagesDataSource_images(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAlicloudImagesDataSourceImagesConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAlicloudDataSourceID("data.alicloud_images.multi_image"), - - resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.#", "2"), - - resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.architecture", "x86_64"), - resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.disk_device_mappings.#", "0"), - resource.TestMatchResourceAttr("data.alicloud_images.multi_image", "images.0.creation_time", regexp.MustCompile("^20[0-9]{2}-")), - resource.TestMatchResourceAttr("data.alicloud_images.multi_image", "images.0.image_id", regexp.MustCompile("^centos_6\\w{1,5}[64]{1}.")), - resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.image_owner_alias", "system"), - resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.os_type", "linux"), - resource.TestMatchResourceAttr("data.alicloud_images.multi_image", "images.0.name", regexp.MustCompile("^centos_6[a-zA-Z0-9_]{1,5}[64]{1}.")), - resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.progress", "100%"), - resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.state", "Available"), - resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.status", "Available"), - resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.usage", "instance"), - resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.tags.%", "0"), - - resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.architecture", "i386"), - resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.disk_device_mappings.#", "0"), - resource.TestMatchResourceAttr("data.alicloud_images.multi_image", "images.1.creation_time", regexp.MustCompile("^20[0-9]{2}-")), - resource.TestMatchResourceAttr("data.alicloud_images.multi_image", "images.1.image_id", regexp.MustCompile("^centos_6[a-zA-Z0-9_]{1,5}[32]{1}.")), - resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.image_owner_alias", "system"), - resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.os_type", "linux"), - resource.TestMatchResourceAttr("data.alicloud_images.multi_image", "images.1.name", regexp.MustCompile("^centos_6\\w{1,5}[32]{1}.")), - resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.progress", "100%"), - resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.state", "Available"), - resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.status", "Available"), - resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.usage", "instance"), - resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.tags.%", "0"), - ), - }, - }, - }) -} - -func TestAccAlicloudImagesDataSource_owners(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAlicloudImagesDataSourceOwnersConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAlicloudDataSourceID("data.alicloud_images.owners_filtered_image"), - ), - }, - }, - }) -} - -func TestAccAlicloudImagesDataSource_ownersEmpty(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAlicloudImagesDataSourceEmptyOwnersConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAlicloudDataSourceID("data.alicloud_images.empty_owners_filtered_image"), - resource.TestCheckResourceAttr("data.alicloud_images.empty_owners_filtered_image", "most_recent", "true"), - ), - }, - }, - }) -} - -func TestAccAlicloudImagesDataSource_nameRegexFilter(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAlicloudImagesDataSourceNameRegexConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAlicloudDataSourceID("data.alicloud_images.name_regex_filtered_image"), - resource.TestMatchResourceAttr("data.alicloud_images.name_regex_filtered_image", "images.0.image_id", regexp.MustCompile("^centos_")), - ), - }, - }, - }) -} - -func TestAccAlicloudImagesDataSource_imageNotInFirstPage(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAlicloudImagesDataSourceImageNotInFirstPageConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAlicloudDataSourceID("data.alicloud_images.name_regex_filtered_image"), - resource.TestMatchResourceAttr("data.alicloud_images.name_regex_filtered_image", "images.0.image_id", regexp.MustCompile("^ubuntu_14")), - ), - }, - }, - }) -} - -// Instance store test - using centos images -const testAccCheckAlicloudImagesDataSourceImagesConfig = ` -data "alicloud_images" "multi_image" { - owners = "system" - name_regex = "^centos_6" -} -` - -// Testing owner parameter -const testAccCheckAlicloudImagesDataSourceOwnersConfig = ` -data "alicloud_images" "owners_filtered_image" { - most_recent = true - owners = "system" -} -` - -const testAccCheckAlicloudImagesDataSourceEmptyOwnersConfig = ` -data "alicloud_images" "empty_owners_filtered_image" { - most_recent = true - owners = "" -} -` - -// Testing name_regex parameter -const testAccCheckAlicloudImagesDataSourceNameRegexConfig = ` -data "alicloud_images" "name_regex_filtered_image" { - most_recent = true - owners = "system" - name_regex = "^centos_6\\w{1,5}[64]{1}.*" -} -` - -// Testing image not in first page response -const testAccCheckAlicloudImagesDataSourceImageNotInFirstPageConfig = ` -data "alicloud_images" "name_regex_filtered_image" { - most_recent = true - owners = "system" - name_regex = "^ubuntu_14.*_64" -} -` diff --git a/builtin/providers/alicloud/data_source_alicloud_instance_types.go b/builtin/providers/alicloud/data_source_alicloud_instance_types.go deleted file mode 100644 index 87d6bfc43..000000000 --- a/builtin/providers/alicloud/data_source_alicloud_instance_types.go +++ /dev/null @@ -1,127 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/schema" - "log" -) - -func dataSourceAlicloudInstanceTypes() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAlicloudInstanceTypesRead, - - Schema: map[string]*schema.Schema{ - "instance_type_family": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "cpu_core_count": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - "memory_size": { - Type: schema.TypeFloat, - Optional: true, - ForceNew: true, - }, - // Computed values. - "instance_types": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, - "cpu_core_count": { - Type: schema.TypeInt, - Computed: true, - }, - "memory_size": { - Type: schema.TypeFloat, - Computed: true, - }, - "family": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - }, - } -} - -func dataSourceAlicloudInstanceTypesRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AliyunClient).ecsconn - - cpu, _ := d.Get("cpu_core_count").(int) - mem, _ := d.Get("memory_size").(float64) - - args, err := buildAliyunAlicloudInstanceTypesArgs(d, meta) - - if err != nil { - return err - } - - resp, err := conn.DescribeInstanceTypesNew(args) - if err != nil { - return err - } - - var instanceTypes []ecs.InstanceTypeItemType - for _, types := range resp { - if cpu > 0 && types.CpuCoreCount != cpu { - continue - } - - if mem > 0 && types.MemorySize != mem { - continue - } - instanceTypes = append(instanceTypes, types) - } - - if len(instanceTypes) < 1 { - return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") - } - - log.Printf("[DEBUG] alicloud_instance_type - Types found: %#v", instanceTypes) - return instanceTypesDescriptionAttributes(d, instanceTypes) -} - -func instanceTypesDescriptionAttributes(d *schema.ResourceData, types []ecs.InstanceTypeItemType) error { - var ids []string - var s []map[string]interface{} - for _, t := range types { - mapping := map[string]interface{}{ - "id": t.InstanceTypeId, - "cpu_core_count": t.CpuCoreCount, - "memory_size": t.MemorySize, - "family": t.InstanceTypeFamily, - } - - log.Printf("[DEBUG] alicloud_instance_type - adding type mapping: %v", mapping) - ids = append(ids, t.InstanceTypeId) - s = append(s, mapping) - } - - d.SetId(dataResourceIdHash(ids)) - if err := d.Set("instance_types", s); err != nil { - return err - } - return nil -} - -func buildAliyunAlicloudInstanceTypesArgs(d *schema.ResourceData, meta interface{}) (*ecs.DescribeInstanceTypesArgs, error) { - args := &ecs.DescribeInstanceTypesArgs{} - - if v := d.Get("instance_type_family").(string); v != "" { - args.InstanceTypeFamily = v - } - - return args, nil -} diff --git a/builtin/providers/alicloud/data_source_alicloud_instance_types_test.go b/builtin/providers/alicloud/data_source_alicloud_instance_types_test.go deleted file mode 100644 index 335da3fbd..000000000 --- a/builtin/providers/alicloud/data_source_alicloud_instance_types_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package alicloud - -import ( - "github.com/hashicorp/terraform/helper/resource" - "testing" -) - -func TestAccAlicloudInstanceTypesDataSource_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAlicloudInstanceTypesDataSourceBasicConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAlicloudDataSourceID("data.alicloud_instance_types.4c8g"), - - resource.TestCheckResourceAttr("data.alicloud_instance_types.4c8g", "instance_types.0.cpu_core_count", "4"), - resource.TestCheckResourceAttr("data.alicloud_instance_types.4c8g", "instance_types.0.memory_size", "8"), - resource.TestCheckResourceAttr("data.alicloud_instance_types.4c8g", "instance_types.0.id", "ecs.s3.large"), - ), - }, - - resource.TestStep{ - Config: testAccCheckAlicloudInstanceTypesDataSourceBasicConfigUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckAlicloudDataSourceID("data.alicloud_instance_types.4c8g"), - - resource.TestCheckResourceAttr("data.alicloud_instance_types.4c8g", "instance_types.#", "1"), - - resource.TestCheckResourceAttr("data.alicloud_instance_types.4c8g", "instance_types.0.cpu_core_count", "4"), - resource.TestCheckResourceAttr("data.alicloud_instance_types.4c8g", "instance_types.0.memory_size", "8"), - ), - }, - }, - }) -} - -const testAccCheckAlicloudInstanceTypesDataSourceBasicConfig = ` -data "alicloud_instance_types" "4c8g" { - cpu_core_count = 4 - memory_size = 8 -} -` - -const testAccCheckAlicloudInstanceTypesDataSourceBasicConfigUpdate = ` -data "alicloud_instance_types" "4c8g" { - instance_type_family= "ecs.s3" - cpu_core_count = 4 - memory_size = 8 -} -` diff --git a/builtin/providers/alicloud/data_source_alicloud_regions.go b/builtin/providers/alicloud/data_source_alicloud_regions.go deleted file mode 100644 index 8bdee26b6..000000000 --- a/builtin/providers/alicloud/data_source_alicloud_regions.go +++ /dev/null @@ -1,114 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/schema" - "log" -) - -func dataSourceAlicloudRegions() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAlicloudRegionsRead, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "current": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - //Computed value - "regions": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, - "region_id": { - Type: schema.TypeString, - Computed: true, - }, - "local_name": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - }, - } -} - -func dataSourceAlicloudRegionsRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AliyunClient).ecsconn - currentRegion := getRegion(d, meta) - - resp, err := conn.DescribeRegions() - if err != nil { - return err - } - if resp == nil || len(resp) == 0 { - return fmt.Errorf("no matching regions found") - } - name, nameOk := d.GetOk("name") - current := d.Get("current").(bool) - var filterRegions []ecs.RegionType - for _, region := range resp { - if current { - if nameOk && common.Region(name.(string)) != currentRegion { - return fmt.Errorf("name doesn't match current region: %#v, please input again.", currentRegion) - } - if region.RegionId == currentRegion { - filterRegions = append(filterRegions, region) - break - } - continue - } - if nameOk { - if common.Region(name.(string)) == region.RegionId { - filterRegions = append(filterRegions, region) - break - } - continue - } - filterRegions = append(filterRegions, region) - } - if len(filterRegions) < 1 { - return fmt.Errorf("Your query region returned no results. Please change your search criteria and try again.") - } - - return regionsDescriptionAttributes(d, filterRegions) -} - -func regionsDescriptionAttributes(d *schema.ResourceData, regions []ecs.RegionType) error { - var ids []string - var s []map[string]interface{} - for _, region := range regions { - mapping := map[string]interface{}{ - "id": region.RegionId, - "region_id": region.RegionId, - "local_name": region.LocalName, - } - - log.Printf("[DEBUG] alicloud_regions - adding region mapping: %v", mapping) - ids = append(ids, string(region.RegionId)) - s = append(s, mapping) - } - - d.SetId(dataResourceIdHash(ids)) - if err := d.Set("regions", s); err != nil { - return err - } - return nil -} diff --git a/builtin/providers/alicloud/data_source_alicloud_regions_test.go b/builtin/providers/alicloud/data_source_alicloud_regions_test.go deleted file mode 100644 index 9dafaba1e..000000000 --- a/builtin/providers/alicloud/data_source_alicloud_regions_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package alicloud - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAlicloudRegionsDataSource_regions(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAlicloudRegionsDataSourceRegionsConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAlicloudDataSourceID("data.alicloud_regions.region"), - - resource.TestCheckResourceAttr("data.alicloud_regions.region", "name", "cn-beijing"), - resource.TestCheckResourceAttr("data.alicloud_regions.region", "current", "true"), - - resource.TestCheckResourceAttr("data.alicloud_regions.region", "regions.#", "1"), - - resource.TestCheckResourceAttr("data.alicloud_regions.region", "regions.0.id", "cn-beijing"), - resource.TestCheckResourceAttr("data.alicloud_regions.region", "regions.0.region_id", "cn-beijing"), - resource.TestCheckResourceAttr("data.alicloud_regions.region", "regions.0.local_name", "华北 2"), - ), - }, - }, - }) -} - -func TestAccAlicloudRegionsDataSource_name(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAlicloudRegionsDataSourceNameConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAlicloudDataSourceID("data.alicloud_regions.name_filtered_region"), - resource.TestCheckResourceAttr("data.alicloud_regions.name_filtered_region", "name", "cn-hangzhou")), - }, - }, - }) -} - -func TestAccAlicloudRegionsDataSource_current(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAlicloudRegionsDataSourceCurrentConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAlicloudDataSourceID("data.alicloud_regions.current_filtered_region"), - resource.TestCheckResourceAttr("data.alicloud_regions.current_filtered_region", "current", "true"), - ), - }, - }, - }) -} - -func TestAccAlicloudRegionsDataSource_empty(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAlicloudRegionsDataSourceEmptyConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAlicloudDataSourceID("data.alicloud_regions.empty_params_region"), - - resource.TestCheckResourceAttr("data.alicloud_regions.empty_params_region", "regions.0.id", "cn-shenzhen"), - resource.TestCheckResourceAttr("data.alicloud_regions.empty_params_region", "regions.0.region_id", "cn-shenzhen"), - resource.TestCheckResourceAttr("data.alicloud_regions.empty_params_region", "regions.0.local_name", "华南 1"), - ), - }, - }, - }) -} - -// Instance store test - using centos regions -const testAccCheckAlicloudRegionsDataSourceRegionsConfig = ` -data "alicloud_regions" "region" { - name = "cn-beijing" - current = true -} -` - -// Testing name parameter -const testAccCheckAlicloudRegionsDataSourceNameConfig = ` -data "alicloud_regions" "name_filtered_region" { - name = "cn-hangzhou" -} -` - -// Testing current parameter -const testAccCheckAlicloudRegionsDataSourceCurrentConfig = ` -data "alicloud_regions" "current_filtered_region" { - current = true -} -` - -// Testing empty parmas -const testAccCheckAlicloudRegionsDataSourceEmptyConfig = ` -data "alicloud_regions" "empty_params_region" { -} -` diff --git a/builtin/providers/alicloud/data_source_alicloud_zones.go b/builtin/providers/alicloud/data_source_alicloud_zones.go deleted file mode 100644 index 689e98a16..000000000 --- a/builtin/providers/alicloud/data_source_alicloud_zones.go +++ /dev/null @@ -1,137 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/schema" - "log" - "reflect" -) - -func dataSourceAlicloudZones() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAlicloudZonesRead, - - Schema: map[string]*schema.Schema{ - "available_instance_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "available_resource_creation": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "available_disk_category": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - // Computed values. - "zones": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, - "local_name": { - Type: schema.TypeString, - Computed: true, - }, - "available_instance_types": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "available_resource_creation": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "available_disk_categories": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - }, - } -} - -func dataSourceAlicloudZonesRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AliyunClient).ecsconn - - insType, _ := d.Get("available_instance_type").(string) - resType, _ := d.Get("available_resource_creation").(string) - diskType, _ := d.Get("available_disk_category").(string) - - resp, err := conn.DescribeZones(getRegion(d, meta)) - if err != nil { - return err - } - - var zoneTypes []ecs.ZoneType - for _, types := range resp { - if insType != "" && !constraints(types.AvailableInstanceTypes.InstanceTypes, insType) { - continue - } - - if resType != "" && !constraints(types.AvailableResourceCreation.ResourceTypes, resType) { - continue - } - - if diskType != "" && !constraints(types.AvailableDiskCategories.DiskCategories, diskType) { - continue - } - zoneTypes = append(zoneTypes, types) - } - - if len(zoneTypes) < 1 { - return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") - } - - log.Printf("[DEBUG] alicloud_zones - Zones found: %#v", zoneTypes) - return zonesDescriptionAttributes(d, zoneTypes) -} - -// check array constraints str -func constraints(arr interface{}, v string) bool { - arrs := reflect.ValueOf(arr) - len := arrs.Len() - for i := 0; i < len; i++ { - if arrs.Index(i).String() == v { - return true - } - } - return false -} - -func zonesDescriptionAttributes(d *schema.ResourceData, types []ecs.ZoneType) error { - var ids []string - var s []map[string]interface{} - for _, t := range types { - mapping := map[string]interface{}{ - "id": t.ZoneId, - "local_name": t.LocalName, - "available_instance_types": t.AvailableInstanceTypes.InstanceTypes, - "available_resource_creation": t.AvailableResourceCreation.ResourceTypes, - "available_disk_categories": t.AvailableDiskCategories.DiskCategories, - } - - log.Printf("[DEBUG] alicloud_zones - adding zone mapping: %v", mapping) - ids = append(ids, t.ZoneId) - s = append(s, mapping) - } - - d.SetId(dataResourceIdHash(ids)) - if err := d.Set("zones", s); err != nil { - return err - } - return nil -} diff --git a/builtin/providers/alicloud/data_source_alicloud_zones_test.go b/builtin/providers/alicloud/data_source_alicloud_zones_test.go deleted file mode 100644 index 4757f495c..000000000 --- a/builtin/providers/alicloud/data_source_alicloud_zones_test.go +++ /dev/null @@ -1,132 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "strconv" - "testing" -) - -func TestAccAlicloudZonesDataSource_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAlicloudZonesDataSourceBasicConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAlicloudDataSourceID("data.alicloud_zones.foo"), - ), - }, - }, - }) -} - -func TestAccAlicloudZonesDataSource_filter(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAlicloudZonesDataSourceFilter, - Check: resource.ComposeTestCheckFunc( - testAccCheckAlicloudDataSourceID("data.alicloud_zones.foo"), - testCheckZoneLength("data.alicloud_zones.foo"), - ), - }, - - resource.TestStep{ - Config: testAccCheckAlicloudZonesDataSourceFilterIoOptimized, - Check: resource.ComposeTestCheckFunc( - testAccCheckAlicloudDataSourceID("data.alicloud_zones.foo"), - testCheckZoneLength("data.alicloud_zones.foo"), - ), - }, - }, - }) -} - -func TestAccAlicloudZonesDataSource_unitRegion(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAlicloudZonesDataSource_unitRegion, - Check: resource.ComposeTestCheckFunc( - testAccCheckAlicloudDataSourceID("data.alicloud_zones.foo"), - ), - }, - }, - }) -} - -// the zone length changed occasionally -// check by range to avoid test case failure -func testCheckZoneLength(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - ms := s.RootModule() - rs, ok := ms.Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - is := rs.Primary - if is == nil { - return fmt.Errorf("No primary instance: %s", name) - } - - i, err := strconv.Atoi(is.Attributes["zones.#"]) - - if err != nil { - return fmt.Errorf("convert zone length err: %#v", err) - } - - if i <= 0 { - return fmt.Errorf("zone length expected greater than 0 got err: %d", i) - } - - return nil - } -} - -const testAccCheckAlicloudZonesDataSourceBasicConfig = ` -data "alicloud_zones" "foo" { -} -` - -const testAccCheckAlicloudZonesDataSourceFilter = ` -data "alicloud_zones" "foo" { - available_instance_type= "ecs.c2.xlarge" - available_resource_creation= "VSwitch" - available_disk_category= "cloud_efficiency" -} -` - -const testAccCheckAlicloudZonesDataSourceFilterIoOptimized = ` -data "alicloud_zones" "foo" { - available_instance_type= "ecs.c2.xlarge" - available_resource_creation= "IoOptimized" - available_disk_category= "cloud" -} -` - -const testAccCheckAlicloudZonesDataSource_unitRegion = ` -provider "alicloud" { - alias = "northeast" - region = "ap-northeast-1" -} - -data "alicloud_zones" "foo" { - provider = "alicloud.northeast" - available_resource_creation= "VSwitch" -} -` diff --git a/builtin/providers/alicloud/errors.go b/builtin/providers/alicloud/errors.go deleted file mode 100644 index c159d5c65..000000000 --- a/builtin/providers/alicloud/errors.go +++ /dev/null @@ -1,52 +0,0 @@ -package alicloud - -import "github.com/denverdino/aliyungo/common" - -const ( - // common - Notfound = "Not found" - // ecs - InstanceNotfound = "Instance.Notfound" - // disk - DiskIncorrectStatus = "IncorrectDiskStatus" - DiskCreatingSnapshot = "DiskCreatingSnapshot" - InstanceLockedForSecurity = "InstanceLockedForSecurity" - SystemDiskNotFound = "SystemDiskNotFound" - // eip - EipIncorrectStatus = "IncorrectEipStatus" - InstanceIncorrectStatus = "IncorrectInstanceStatus" - HaVipIncorrectStatus = "IncorrectHaVipStatus" - // slb - LoadBalancerNotFound = "InvalidLoadBalancerId.NotFound" - - // security_group - InvalidInstanceIdAlreadyExists = "InvalidInstanceId.AlreadyExists" - InvalidSecurityGroupIdNotFound = "InvalidSecurityGroupId.NotFound" - SgDependencyViolation = "DependencyViolation" - - //Nat gateway - NatGatewayInvalidRegionId = "Invalid.RegionId" - DependencyViolationBandwidthPackages = "DependencyViolation.BandwidthPackages" - NotFindSnatEntryBySnatId = "NotFindSnatEntryBySnatId" - NotFindForwardEntryByForwardId = "NotFindForwardEntryByForwardId" - - // vswitch - VswitcInvalidRegionId = "InvalidRegionId.NotFound" - - // ess - InvalidScalingGroupIdNotFound = "InvalidScalingGroupId.NotFound" - IncorrectScalingConfigurationLifecycleState = "IncorrectScalingConfigurationLifecycleState" - - //unknown Error - UnknownError = "UnknownError" -) - -func GetNotFoundErrorFromString(str string) error { - return &common.Error{ - ErrorResponse: common.ErrorResponse{ - Code: InstanceNotfound, - Message: str, - }, - StatusCode: -1, - } -} diff --git a/builtin/providers/alicloud/extension_ecs.go b/builtin/providers/alicloud/extension_ecs.go deleted file mode 100644 index df21138bf..000000000 --- a/builtin/providers/alicloud/extension_ecs.go +++ /dev/null @@ -1,37 +0,0 @@ -package alicloud - -type GroupRuleDirection string - -const ( - GroupRuleIngress = GroupRuleDirection("ingress") - GroupRuleEgress = GroupRuleDirection("egress") -) - -type GroupRuleIpProtocol string - -const ( - GroupRuleTcp = GroupRuleIpProtocol("tcp") - GroupRuleUdp = GroupRuleIpProtocol("udp") - GroupRuleIcmp = GroupRuleIpProtocol("icmp") - GroupRuleGre = GroupRuleIpProtocol("gre") - GroupRuleAll = GroupRuleIpProtocol("all") -) - -type GroupRuleNicType string - -const ( - GroupRuleInternet = GroupRuleNicType("internet") - GroupRuleIntranet = GroupRuleNicType("intranet") -) - -type GroupRulePolicy string - -const ( - GroupRulePolicyAccept = GroupRulePolicy("accept") - GroupRulePolicyDrop = GroupRulePolicy("drop") -) - -const ( - EcsApiVersion20160314 = "2016-03-14" - EcsApiVersion20140526 = "2014-05-26" -) diff --git a/builtin/providers/alicloud/extension_slb.go b/builtin/providers/alicloud/extension_slb.go deleted file mode 100644 index 2c4cf787b..000000000 --- a/builtin/providers/alicloud/extension_slb.go +++ /dev/null @@ -1,164 +0,0 @@ -package alicloud - -import ( - "fmt" - "strings" - - "github.com/denverdino/aliyungo/slb" -) - -type Listener struct { - slb.HTTPListenerType - - InstancePort int - LoadBalancerPort int - Protocol string - //tcp & udp - PersistenceTimeout int - - //https - SSLCertificateId string - - //tcp - HealthCheckType slb.HealthCheckType - - //api interface: http & https is HealthCheckTimeout, tcp & udp is HealthCheckConnectTimeout - HealthCheckConnectTimeout int -} - -type ListenerErr struct { - ErrType string - Err error -} - -func (e *ListenerErr) Error() string { - return e.ErrType + " " + e.Err.Error() - -} - -const ( - HealthCheckErrType = "healthCheckErrType" - StickySessionErrType = "stickySessionErrType" - CookieTimeOutErrType = "cookieTimeoutErrType" - CookieErrType = "cookieErrType" -) - -// Takes the result of flatmap.Expand for an array of listeners and -// returns ELB API compatible objects -func expandListeners(configured []interface{}) ([]*Listener, error) { - listeners := make([]*Listener, 0, len(configured)) - - // Loop over our configured listeners and create - // an array of aws-sdk-go compatabile objects - for _, lRaw := range configured { - data := lRaw.(map[string]interface{}) - - ip := data["instance_port"].(int) - lp := data["lb_port"].(int) - l := &Listener{ - InstancePort: ip, - LoadBalancerPort: lp, - Protocol: data["lb_protocol"].(string), - } - - l.Bandwidth = data["bandwidth"].(int) - - if v, ok := data["scheduler"]; ok { - l.Scheduler = slb.SchedulerType(v.(string)) - } - - if v, ok := data["ssl_certificate_id"]; ok { - l.SSLCertificateId = v.(string) - } - - if v, ok := data["sticky_session"]; ok { - l.StickySession = slb.FlagType(v.(string)) - } - - if v, ok := data["sticky_session_type"]; ok { - l.StickySessionType = slb.StickySessionType(v.(string)) - } - - if v, ok := data["cookie_timeout"]; ok { - l.CookieTimeout = v.(int) - } - - if v, ok := data["cookie"]; ok { - l.Cookie = v.(string) - } - - if v, ok := data["persistence_timeout"]; ok { - l.PersistenceTimeout = v.(int) - } - - if v, ok := data["health_check"]; ok { - l.HealthCheck = slb.FlagType(v.(string)) - } - - if v, ok := data["health_check_type"]; ok { - l.HealthCheckType = slb.HealthCheckType(v.(string)) - } - - if v, ok := data["health_check_domain"]; ok { - l.HealthCheckDomain = v.(string) - } - - if v, ok := data["health_check_uri"]; ok { - l.HealthCheckURI = v.(string) - } - - if v, ok := data["health_check_connect_port"]; ok { - l.HealthCheckConnectPort = v.(int) - } - - if v, ok := data["healthy_threshold"]; ok { - l.HealthyThreshold = v.(int) - } - - if v, ok := data["unhealthy_threshold"]; ok { - l.UnhealthyThreshold = v.(int) - } - - if v, ok := data["health_check_timeout"]; ok { - l.HealthCheckTimeout = v.(int) - } - - if v, ok := data["health_check_interval"]; ok { - l.HealthCheckInterval = v.(int) - } - - if v, ok := data["health_check_http_code"]; ok { - l.HealthCheckHttpCode = slb.HealthCheckHttpCodeType(v.(string)) - } - - var valid bool - if l.SSLCertificateId != "" { - // validate the protocol is correct - for _, p := range []string{"https", "ssl"} { - if strings.ToLower(l.Protocol) == p { - valid = true - } - } - } else { - valid = true - } - - if valid { - listeners = append(listeners, l) - } else { - return nil, fmt.Errorf("[ERR] SLB Listener: ssl_certificate_id may be set only when protocol is 'https' or 'ssl'") - } - } - - return listeners, nil -} - -func expandBackendServers(list []interface{}) []slb.BackendServerType { - result := make([]slb.BackendServerType, 0, len(list)) - for _, i := range list { - if i.(string) != "" { - result = append(result, slb.BackendServerType{ServerId: i.(string), Weight: 100}) - } - } - return result -} diff --git a/builtin/providers/alicloud/extension_tags.go b/builtin/providers/alicloud/extension_tags.go deleted file mode 100644 index 5b86ebab7..000000000 --- a/builtin/providers/alicloud/extension_tags.go +++ /dev/null @@ -1,43 +0,0 @@ -package alicloud - -import ( - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ecs" -) - -type Tag struct { - Key string - Value string -} - -type AddTagsArgs struct { - ResourceId string - ResourceType ecs.TagResourceType //image, instance, snapshot or disk - RegionId common.Region - Tag []Tag -} - -type RemoveTagsArgs struct { - ResourceId string - ResourceType ecs.TagResourceType //image, instance, snapshot or disk - RegionId common.Region - Tag []Tag -} - -func AddTags(client *ecs.Client, args *AddTagsArgs) error { - response := ecs.AddTagsResponse{} - err := client.Invoke("AddTags", args, &response) - if err != nil { - return err - } - return err -} - -func RemoveTags(client *ecs.Client, args *RemoveTagsArgs) error { - response := ecs.RemoveTagsResponse{} - err := client.Invoke("RemoveTags", args, &response) - if err != nil { - return err - } - return err -} diff --git a/builtin/providers/alicloud/provider.go b/builtin/providers/alicloud/provider.go deleted file mode 100644 index d7f92da64..000000000 --- a/builtin/providers/alicloud/provider.go +++ /dev/null @@ -1,112 +0,0 @@ -package alicloud - -import ( - "github.com/denverdino/aliyungo/common" - "github.com/hashicorp/terraform/helper/mutexkv" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - "os" -) - -// Provider returns a schema.Provider for alicloud -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "access_key": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ACCESS_KEY", nil), - Description: descriptions["access_key"], - }, - "secret_key": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_SECRET_KEY", nil), - Description: descriptions["secret_key"], - }, - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_REGION", DEFAULT_REGION), - Description: descriptions["region"], - }, - }, - DataSourcesMap: map[string]*schema.Resource{ - - "alicloud_images": dataSourceAlicloudImages(), - "alicloud_regions": dataSourceAlicloudRegions(), - "alicloud_zones": dataSourceAlicloudZones(), - "alicloud_instance_types": dataSourceAlicloudInstanceTypes(), - }, - ResourcesMap: map[string]*schema.Resource{ - "alicloud_instance": resourceAliyunInstance(), - "alicloud_disk": resourceAliyunDisk(), - "alicloud_disk_attachment": resourceAliyunDiskAttachment(), - "alicloud_security_group": resourceAliyunSecurityGroup(), - "alicloud_security_group_rule": resourceAliyunSecurityGroupRule(), - "alicloud_db_instance": resourceAlicloudDBInstance(), - "alicloud_ess_scaling_group": resourceAlicloudEssScalingGroup(), - "alicloud_ess_scaling_configuration": resourceAlicloudEssScalingConfiguration(), - "alicloud_ess_scaling_rule": resourceAlicloudEssScalingRule(), - "alicloud_ess_schedule": resourceAlicloudEssSchedule(), - "alicloud_vpc": resourceAliyunVpc(), - "alicloud_nat_gateway": resourceAliyunNatGateway(), - //both subnet and vswith exists,cause compatible old version, and compatible aws habit. - "alicloud_subnet": resourceAliyunSubnet(), - "alicloud_vswitch": resourceAliyunSubnet(), - "alicloud_route_entry": resourceAliyunRouteEntry(), - "alicloud_snat_entry": resourceAliyunSnatEntry(), - "alicloud_forward_entry": resourceAliyunForwardEntry(), - "alicloud_eip": resourceAliyunEip(), - "alicloud_eip_association": resourceAliyunEipAssociation(), - "alicloud_slb": resourceAliyunSlb(), - "alicloud_slb_attachment": resourceAliyunSlbAttachment(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - accesskey, ok := d.GetOk("access_key") - if !ok { - accesskey = os.Getenv("ALICLOUD_ACCESS_KEY") - } - secretkey, ok := d.GetOk("secret_key") - if !ok { - secretkey = os.Getenv("ALICLOUD_SECRET_KEY") - } - region, ok := d.GetOk("region") - if !ok { - region = os.Getenv("ALICLOUD_REGION") - if region == "" { - region = DEFAULT_REGION - } - } - - config := Config{ - AccessKey: accesskey.(string), - SecretKey: secretkey.(string), - Region: common.Region(region.(string)), - } - - client, err := config.Client() - if err != nil { - return nil, err - } - - return client, nil -} - -// This is a global MutexKV for use within this plugin. -var alicloudMutexKV = mutexkv.NewMutexKV() - -var descriptions map[string]string - -func init() { - descriptions = map[string]string{ - "access_key": "Access key of alicloud", - "secret_key": "Secret key of alicloud", - "region": "Region of alicloud", - } -} diff --git a/builtin/providers/alicloud/provider_test.go b/builtin/providers/alicloud/provider_test.go deleted file mode 100644 index f0f5e9bec..000000000 --- a/builtin/providers/alicloud/provider_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package alicloud - -import ( - "log" - "os" - "testing" - - "fmt" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "alicloud": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("ALICLOUD_ACCESS_KEY"); v == "" { - t.Fatal("ALICLOUD_ACCESS_KEY must be set for acceptance tests") - } - if v := os.Getenv("ALICLOUD_SECRET_KEY"); v == "" { - t.Fatal("ALICLOUD_SECRET_KEY must be set for acceptance tests") - } - if v := os.Getenv("ALICLOUD_REGION"); v == "" { - log.Println("[INFO] Test: Using cn-beijing as test region") - os.Setenv("ALICLOUD_REGION", "cn-beijing") - } -} - -func testAccCheckAlicloudDataSourceID(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Can't find data source: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("data source ID not set") - } - return nil - } -} diff --git a/builtin/providers/alicloud/resource_alicloud_db_instance.go b/builtin/providers/alicloud/resource_alicloud_db_instance.go deleted file mode 100644 index 062b5d0e1..000000000 --- a/builtin/providers/alicloud/resource_alicloud_db_instance.go +++ /dev/null @@ -1,550 +0,0 @@ -package alicloud - -import ( - "bytes" - "encoding/json" - "fmt" - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/rds" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "log" - "strconv" - "strings" - "time" -) - -func resourceAlicloudDBInstance() *schema.Resource { - return &schema.Resource{ - Create: resourceAlicloudDBInstanceCreate, - Read: resourceAlicloudDBInstanceRead, - Update: resourceAlicloudDBInstanceUpdate, - Delete: resourceAlicloudDBInstanceDelete, - - Schema: map[string]*schema.Schema{ - "engine": &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateAllowedStringValue([]string{"MySQL", "SQLServer", "PostgreSQL", "PPAS"}), - ForceNew: true, - Required: true, - }, - "engine_version": &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateAllowedStringValue([]string{"5.5", "5.6", "5.7", "2008r2", "2012", "9.4", "9.3"}), - ForceNew: true, - Required: true, - }, - "db_instance_class": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "db_instance_storage": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "instance_charge_type": &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateAllowedStringValue([]string{string(rds.Postpaid), string(rds.Prepaid)}), - Optional: true, - ForceNew: true, - Default: rds.Postpaid, - }, - "period": &schema.Schema{ - Type: schema.TypeInt, - ValidateFunc: validateAllowedIntValue([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 12, 24, 36}), - Optional: true, - ForceNew: true, - Default: 1, - }, - - "zone_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "multi_az": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - "db_instance_net_type": &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateAllowedStringValue([]string{string(common.Internet), string(common.Intranet)}), - Optional: true, - }, - "allocate_public_connection": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "instance_network_type": &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateAllowedStringValue([]string{string(common.VPC), string(common.Classic)}), - Optional: true, - Computed: true, - }, - "vswitch_id": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Optional: true, - }, - - "master_user_name": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Optional: true, - }, - "master_user_password": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Sensitive: true, - }, - - "preferred_backup_period": &schema.Schema{ - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - // terraform does not support ValidateFunc of TypeList attr - // ValidateFunc: validateAllowedStringValue([]string{"Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"}), - Optional: true, - }, - "preferred_backup_time": &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateAllowedStringValue(rds.BACKUP_TIME), - Optional: true, - }, - "backup_retention_period": &schema.Schema{ - Type: schema.TypeInt, - ValidateFunc: validateIntegerInRange(7, 730), - Optional: true, - }, - - "security_ips": &schema.Schema{ - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Computed: true, - Optional: true, - }, - - "port": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "connections": &schema.Schema{ - Type: schema.TypeList, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "connection_string": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "ip_type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "ip_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - Computed: true, - }, - - "db_mappings": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "db_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "character_set_name": &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateAllowedStringValue(rds.CHARACTER_SET_NAME), - Required: true, - }, - "db_description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - Optional: true, - Set: resourceAlicloudDatabaseHash, - }, - }, - } -} - -func resourceAlicloudDatabaseHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["db_name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["character_set_name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["db_description"].(string))) - - return hashcode.String(buf.String()) -} - -func resourceAlicloudDBInstanceCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - conn := client.rdsconn - - args, err := buildDBCreateOrderArgs(d, meta) - if err != nil { - return err - } - - resp, err := conn.CreateOrder(args) - - if err != nil { - return fmt.Errorf("Error creating Alicloud db instance: %#v", err) - } - - instanceId := resp.DBInstanceId - if instanceId == "" { - return fmt.Errorf("Error get Alicloud db instance id") - } - - d.SetId(instanceId) - d.Set("instance_charge_type", d.Get("instance_charge_type")) - d.Set("period", d.Get("period")) - d.Set("period_type", d.Get("period_type")) - - // wait instance status change from Creating to running - if err := conn.WaitForInstance(d.Id(), rds.Running, defaultLongTimeout); err != nil { - return fmt.Errorf("WaitForInstance %s got error: %#v", rds.Running, err) - } - - if err := modifySecurityIps(d.Id(), d.Get("security_ips"), meta); err != nil { - return err - } - - masterUserName := d.Get("master_user_name").(string) - masterUserPwd := d.Get("master_user_password").(string) - if masterUserName != "" && masterUserPwd != "" { - if err := client.CreateAccountByInfo(d.Id(), masterUserName, masterUserPwd); err != nil { - return fmt.Errorf("Create db account %s error: %v", masterUserName, err) - } - } - - if d.Get("allocate_public_connection").(bool) { - if err := client.AllocateDBPublicConnection(d.Id(), DB_DEFAULT_CONNECT_PORT); err != nil { - return fmt.Errorf("Allocate public connection error: %v", err) - } - } - - return resourceAlicloudDBInstanceUpdate(d, meta) -} - -func modifySecurityIps(id string, ips interface{}, meta interface{}) error { - client := meta.(*AliyunClient) - ipList := expandStringList(ips.([]interface{})) - - ipstr := strings.Join(ipList[:], COMMA_SEPARATED) - // default disable connect from outside - if ipstr == "" { - ipstr = LOCAL_HOST_IP - } - - if err := client.ModifyDBSecurityIps(id, ipstr); err != nil { - return fmt.Errorf("Error modify security ips %s: %#v", ipstr, err) - } - return nil -} - -func resourceAlicloudDBInstanceUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - conn := client.rdsconn - d.Partial(true) - - if d.HasChange("db_mappings") { - o, n := d.GetChange("db_mappings") - os := o.(*schema.Set) - ns := n.(*schema.Set) - - var allDbs []string - remove := os.Difference(ns).List() - add := ns.Difference(os).List() - - if len(remove) > 0 && len(add) > 0 { - return fmt.Errorf("Failure modify database, we neither support create and delete database simultaneous nor modify database attributes.") - } - - if len(remove) > 0 { - for _, db := range remove { - dbm, _ := db.(map[string]interface{}) - if err := conn.DeleteDatabase(d.Id(), dbm["db_name"].(string)); err != nil { - return fmt.Errorf("Failure delete database %s: %#v", dbm["db_name"].(string), err) - } - } - } - - if len(add) > 0 { - for _, db := range add { - dbm, _ := db.(map[string]interface{}) - dbName := dbm["db_name"].(string) - allDbs = append(allDbs, dbName) - - if err := client.CreateDatabaseByInfo(d.Id(), dbName, dbm["character_set_name"].(string), dbm["db_description"].(string)); err != nil { - return fmt.Errorf("Failure create database %s: %#v", dbName, err) - } - - } - } - - if err := conn.WaitForAllDatabase(d.Id(), allDbs, rds.Running, 600); err != nil { - return fmt.Errorf("Failure create database %#v", err) - } - - if user := d.Get("master_user_name").(string); user != "" { - for _, dbName := range allDbs { - if err := client.GrantDBPrivilege2Account(d.Id(), user, dbName); err != nil { - return fmt.Errorf("Failed to grant database %s readwrite privilege to account %s: %#v", dbName, user, err) - } - } - } - - d.SetPartial("db_mappings") - } - - if d.HasChange("preferred_backup_period") || d.HasChange("preferred_backup_time") || d.HasChange("backup_retention_period") { - period := d.Get("preferred_backup_period").([]interface{}) - periodList := expandStringList(period) - time := d.Get("preferred_backup_time").(string) - retention := d.Get("backup_retention_period").(int) - - if time == "" || retention == 0 || len(periodList) < 1 { - return fmt.Errorf("Both backup_time, backup_period and retention_period are required to set backup policy.") - } - - ps := strings.Join(periodList[:], COMMA_SEPARATED) - - if err := client.ConfigDBBackup(d.Id(), time, ps, retention); err != nil { - return fmt.Errorf("Error set backup policy: %#v", err) - } - d.SetPartial("preferred_backup_period") - d.SetPartial("preferred_backup_time") - d.SetPartial("backup_retention_period") - } - - if d.HasChange("security_ips") { - if err := modifySecurityIps(d.Id(), d.Get("security_ips"), meta); err != nil { - return err - } - d.SetPartial("security_ips") - } - - if d.HasChange("db_instance_class") || d.HasChange("db_instance_storage") { - co, cn := d.GetChange("db_instance_class") - so, sn := d.GetChange("db_instance_storage") - classOld := co.(string) - classNew := cn.(string) - storageOld := so.(int) - storageNew := sn.(int) - - // update except the first time, because we will do it in create function - if classOld != "" && storageOld != 0 { - chargeType := d.Get("instance_charge_type").(string) - if chargeType == string(rds.Prepaid) { - return fmt.Errorf("Prepaid db instance does not support modify db_instance_class or db_instance_storage") - } - - if err := client.ModifyDBClassStorage(d.Id(), classNew, strconv.Itoa(storageNew)); err != nil { - return fmt.Errorf("Error modify db instance class or storage error: %#v", err) - } - } - } - - d.Partial(false) - return resourceAlicloudDBInstanceRead(d, meta) -} - -func resourceAlicloudDBInstanceRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - conn := client.rdsconn - - instance, err := client.DescribeDBInstanceById(d.Id()) - if err != nil { - if notFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error Describe DB InstanceAttribute: %#v", err) - } - - args := rds.DescribeDatabasesArgs{ - DBInstanceId: d.Id(), - } - - resp, err := conn.DescribeDatabases(&args) - if err != nil { - return err - } - if resp.Databases.Database == nil { - d.SetId("") - return nil - } - - d.Set("db_mappings", flattenDatabaseMappings(resp.Databases.Database)) - - argn := rds.DescribeDBInstanceNetInfoArgs{ - DBInstanceId: d.Id(), - } - - resn, err := conn.DescribeDBInstanceNetInfo(&argn) - if err != nil { - return err - } - d.Set("connections", flattenDBConnections(resn.DBInstanceNetInfos.DBInstanceNetInfo)) - - ips, err := client.GetSecurityIps(d.Id()) - if err != nil { - log.Printf("Describe DB security ips error: %#v", err) - } - d.Set("security_ips", ips) - - d.Set("engine", instance.Engine) - d.Set("engine_version", instance.EngineVersion) - d.Set("db_instance_class", instance.DBInstanceClass) - d.Set("port", instance.Port) - d.Set("db_instance_storage", instance.DBInstanceStorage) - d.Set("zone_id", instance.ZoneId) - d.Set("db_instance_net_type", instance.DBInstanceNetType) - d.Set("instance_network_type", instance.InstanceNetworkType) - - return nil -} - -func resourceAlicloudDBInstanceDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AliyunClient).rdsconn - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - err := conn.DeleteInstance(d.Id()) - - if err != nil { - return resource.RetryableError(fmt.Errorf("DB Instance in use - trying again while it is deleted.")) - } - - args := &rds.DescribeDBInstancesArgs{ - DBInstanceId: d.Id(), - } - resp, err := conn.DescribeDBInstanceAttribute(args) - if err != nil { - return resource.NonRetryableError(err) - } else if len(resp.Items.DBInstanceAttribute) < 1 { - return nil - } - - return resource.RetryableError(fmt.Errorf("DB in use - trying again while it is deleted.")) - }) -} - -func buildDBCreateOrderArgs(d *schema.ResourceData, meta interface{}) (*rds.CreateOrderArgs, error) { - client := meta.(*AliyunClient) - args := &rds.CreateOrderArgs{ - RegionId: getRegion(d, meta), - // we does not expose this param to user, - // because create prepaid instance progress will be stopped when set auto_pay to false, - // then could not get instance info, cause timeout error - AutoPay: "true", - EngineVersion: d.Get("engine_version").(string), - Engine: rds.Engine(d.Get("engine").(string)), - DBInstanceStorage: d.Get("db_instance_storage").(int), - DBInstanceClass: d.Get("db_instance_class").(string), - Quantity: DEFAULT_INSTANCE_COUNT, - Resource: rds.DefaultResource, - } - - bussStr, err := json.Marshal(DefaultBusinessInfo) - if err != nil { - return nil, fmt.Errorf("Failed to translate bussiness info %#v from json to string", DefaultBusinessInfo) - } - - args.BusinessInfo = string(bussStr) - - zoneId := d.Get("zone_id").(string) - args.ZoneId = zoneId - - multiAZ := d.Get("multi_az").(bool) - if multiAZ { - if zoneId != "" { - return nil, fmt.Errorf("You cannot set the ZoneId parameter when the MultiAZ parameter is set to true") - } - izs, err := client.DescribeMultiIZByRegion() - if err != nil { - return nil, fmt.Errorf("Get multiAZ id error") - } - - if len(izs) < 1 { - return nil, fmt.Errorf("Current region does not support MultiAZ.") - } - - args.ZoneId = izs[0] - } - - vswitchId := d.Get("vswitch_id").(string) - - networkType := d.Get("instance_network_type").(string) - args.InstanceNetworkType = common.NetworkType(networkType) - - if vswitchId != "" { - args.VSwitchId = vswitchId - - // check InstanceNetworkType with vswitchId - if networkType == string(common.Classic) { - return nil, fmt.Errorf("When fill vswitchId, you shold set instance_network_type to VPC") - } else if networkType == "" { - args.InstanceNetworkType = common.VPC - } - - // get vpcId - vpcId, err := client.GetVpcIdByVSwitchId(vswitchId) - - if err != nil { - return nil, fmt.Errorf("VswitchId %s is not valid of current region", vswitchId) - } - // fill vpcId by vswitchId - args.VPCId = vpcId - - // check vswitchId in zone - vsw, err := client.QueryVswitchById(vpcId, vswitchId) - if err != nil { - return nil, fmt.Errorf("VswitchId %s is not valid of current region", vswitchId) - } - - if zoneId == "" { - args.ZoneId = vsw.ZoneId - } else if vsw.ZoneId != zoneId { - return nil, fmt.Errorf("VswitchId %s is not belong to the zone %s", vswitchId, zoneId) - } - } - - if v := d.Get("db_instance_net_type").(string); v != "" { - args.DBInstanceNetType = common.NetType(v) - } - - chargeType := d.Get("instance_charge_type").(string) - if chargeType != "" { - args.PayType = rds.DBPayType(chargeType) - } else { - args.PayType = rds.Postpaid - } - - // if charge type is postpaid, the commodity code must set to bards - if chargeType == string(rds.Postpaid) { - args.CommodityCode = rds.Bards - } else { - args.CommodityCode = rds.Rds - } - - period := d.Get("period").(int) - args.UsedTime, args.TimeType = TransformPeriod2Time(period, chargeType) - - return args, nil -} diff --git a/builtin/providers/alicloud/resource_alicloud_db_instance_test.go b/builtin/providers/alicloud/resource_alicloud_db_instance_test.go deleted file mode 100644 index 498cc3ae1..000000000 --- a/builtin/providers/alicloud/resource_alicloud_db_instance_test.go +++ /dev/null @@ -1,765 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/rds" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "log" - "strings" - "testing" -) - -func TestAccAlicloudDBInstance_basic(t *testing.T) { - var instance rds.DBInstanceAttribute - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_db_instance.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckDBInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDBInstanceConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists( - "alicloud_db_instance.foo", &instance), - resource.TestCheckResourceAttr( - "alicloud_db_instance.foo", - "port", - "3306"), - resource.TestCheckResourceAttr( - "alicloud_db_instance.foo", - "db_instance_storage", - "10"), - resource.TestCheckResourceAttr( - "alicloud_db_instance.foo", - "instance_network_type", - "Classic"), - resource.TestCheckResourceAttr( - "alicloud_db_instance.foo", - "db_instance_net_type", - "Intranet"), - resource.TestCheckResourceAttr( - "alicloud_db_instance.foo", - "engine_version", - "5.6"), - resource.TestCheckResourceAttr( - "alicloud_db_instance.foo", - "engine", - "MySQL"), - ), - }, - }, - }) - -} - -func TestAccAlicloudDBInstance_vpc(t *testing.T) { - var instance rds.DBInstanceAttribute - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_db_instance.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckDBInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDBInstance_vpc, - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists( - "alicloud_db_instance.foo", &instance), - resource.TestCheckResourceAttr( - "alicloud_db_instance.foo", - "port", - "3306"), - resource.TestCheckResourceAttr( - "alicloud_db_instance.foo", - "db_instance_storage", - "10"), - resource.TestCheckResourceAttr( - "alicloud_db_instance.foo", - "instance_network_type", - "VPC"), - resource.TestCheckResourceAttr( - "alicloud_db_instance.foo", - "db_instance_net_type", - "Intranet"), - resource.TestCheckResourceAttr( - "alicloud_db_instance.foo", - "engine_version", - "5.6"), - resource.TestCheckResourceAttr( - "alicloud_db_instance.foo", - "engine", - "MySQL"), - ), - }, - }, - }) - -} - -func TestC2CAlicloudDBInstance_prepaid_order(t *testing.T) { - var instance rds.DBInstanceAttribute - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_db_instance.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckDBInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDBInstance_prepaid_order, - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists( - "alicloud_db_instance.foo", &instance), - resource.TestCheckResourceAttr( - "alicloud_db_instance.foo", - "port", - "3306"), - resource.TestCheckResourceAttr( - "alicloud_db_instance.foo", - "db_instance_storage", - "10"), - resource.TestCheckResourceAttr( - "alicloud_db_instance.foo", - "instance_network_type", - "VPC"), - resource.TestCheckResourceAttr( - "alicloud_db_instance.foo", - "db_instance_net_type", - "Intranet"), - resource.TestCheckResourceAttr( - "alicloud_db_instance.foo", - "engine_version", - "5.6"), - resource.TestCheckResourceAttr( - "alicloud_db_instance.foo", - "engine", - "MySQL"), - ), - }, - }, - }) - -} - -func TestAccAlicloudDBInstance_multiIZ(t *testing.T) { - var instance rds.DBInstanceAttribute - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_db_instance.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckDBInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDBInstance_multiIZ, - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists( - "alicloud_db_instance.foo", &instance), - testAccCheckDBInstanceMultiIZ(&instance), - ), - }, - }, - }) - -} - -func TestAccAlicloudDBInstance_database(t *testing.T) { - var instance rds.DBInstanceAttribute - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_db_instance.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckDBInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDBInstance_database, - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists( - "alicloud_db_instance.foo", &instance), - resource.TestCheckResourceAttr("alicloud_db_instance.foo", "db_mappings.#", "2"), - ), - }, - - resource.TestStep{ - Config: testAccDBInstance_database_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists( - "alicloud_db_instance.foo", &instance), - resource.TestCheckResourceAttr("alicloud_db_instance.foo", "db_mappings.#", "3"), - ), - }, - }, - }) - -} - -func TestAccAlicloudDBInstance_account(t *testing.T) { - var instance rds.DBInstanceAttribute - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_db_instance.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckDBInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDBInstance_grantDatabasePrivilege2Account, - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists( - "alicloud_db_instance.foo", &instance), - resource.TestCheckResourceAttr("alicloud_db_instance.foo", "db_mappings.#", "2"), - testAccCheckAccountHasPrivilege2Database("alicloud_db_instance.foo", "tester", "foo", "ReadWrite"), - ), - }, - }, - }) - -} - -func TestAccAlicloudDBInstance_allocatePublicConnection(t *testing.T) { - var instance rds.DBInstanceAttribute - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_db_instance.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckDBInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDBInstance_allocatePublicConnection, - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists( - "alicloud_db_instance.foo", &instance), - resource.TestCheckResourceAttr("alicloud_db_instance.foo", "connections.#", "2"), - testAccCheckHasPublicConnection("alicloud_db_instance.foo"), - ), - }, - }, - }) - -} - -func TestAccAlicloudDBInstance_backupPolicy(t *testing.T) { - var policies []map[string]interface{} - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_db_instance.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckDBInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDBInstance_backup, - Check: resource.ComposeTestCheckFunc( - testAccCheckBackupPolicyExists( - "alicloud_db_instance.foo", policies), - testAccCheckKeyValueInMaps(policies, "backup policy", "preferred_backup_period", "Wednesday,Thursday"), - testAccCheckKeyValueInMaps(policies, "backup policy", "preferred_backup_time", "00:00Z-01:00Z"), - ), - }, - }, - }) - -} - -func TestAccAlicloudDBInstance_securityIps(t *testing.T) { - var ips []map[string]interface{} - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_db_instance.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckDBInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDBInstance_securityIps, - Check: resource.ComposeTestCheckFunc( - testAccCheckSecurityIpExists( - "alicloud_db_instance.foo", ips), - testAccCheckKeyValueInMaps(ips, "security ip", "security_ips", "127.0.0.1"), - ), - }, - - resource.TestStep{ - Config: testAccDBInstance_securityIpsConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckSecurityIpExists( - "alicloud_db_instance.foo", ips), - testAccCheckKeyValueInMaps(ips, "security ip", "security_ips", "10.168.1.12,100.69.7.112"), - ), - }, - }, - }) - -} - -func TestAccAlicloudDBInstance_upgradeClass(t *testing.T) { - var instance rds.DBInstanceAttribute - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_db_instance.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckDBInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDBInstance_class, - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists( - "alicloud_db_instance.foo", &instance), - resource.TestCheckResourceAttr("alicloud_db_instance.foo", "db_instance_class", "rds.mysql.t1.small"), - ), - }, - - resource.TestStep{ - Config: testAccDBInstance_classUpgrade, - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists( - "alicloud_db_instance.foo", &instance), - resource.TestCheckResourceAttr("alicloud_db_instance.foo", "db_instance_class", "rds.mysql.s1.small"), - ), - }, - }, - }) - -} - -func testAccCheckSecurityIpExists(n string, ips []map[string]interface{}) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No DB Instance ID is set") - } - - conn := testAccProvider.Meta().(*AliyunClient).rdsconn - args := rds.DescribeDBInstanceIPsArgs{ - DBInstanceId: rs.Primary.ID, - } - - resp, err := conn.DescribeDBInstanceIPs(&args) - log.Printf("[DEBUG] check instance %s security ip %#v", rs.Primary.ID, resp) - - if err != nil { - return err - } - - p := resp.Items.DBInstanceIPArray - - if len(p) < 1 { - return fmt.Errorf("DB security ip not found") - } - - ips = flattenDBSecurityIPs(p) - return nil - } -} - -func testAccCheckDBInstanceMultiIZ(i *rds.DBInstanceAttribute) resource.TestCheckFunc { - return func(s *terraform.State) error { - if !strings.Contains(i.ZoneId, MULTI_IZ_SYMBOL) { - return fmt.Errorf("Current region does not support multiIZ.") - } - return nil - } -} - -func testAccCheckAccountHasPrivilege2Database(n, accountName, dbName, privilege string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No DB instance ID is set") - } - - conn := testAccProvider.Meta().(*AliyunClient).rdsconn - if err := conn.WaitForAccountPrivilege(rs.Primary.ID, accountName, dbName, rds.AccountPrivilege(privilege), 50); err != nil { - return fmt.Errorf("Failed to grant database %s privilege to account %s: %v", dbName, accountName, err) - } - return nil - } -} - -func testAccCheckHasPublicConnection(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No DB instance ID is set") - } - - conn := testAccProvider.Meta().(*AliyunClient).rdsconn - if err := conn.WaitForPublicConnection(rs.Primary.ID, 50); err != nil { - return fmt.Errorf("Failed to allocate public connection: %v", err) - } - return nil - } -} - -func testAccCheckDBInstanceExists(n string, d *rds.DBInstanceAttribute) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No DB Instance ID is set") - } - - client := testAccProvider.Meta().(*AliyunClient) - attr, err := client.DescribeDBInstanceById(rs.Primary.ID) - log.Printf("[DEBUG] check instance %s attribute %#v", rs.Primary.ID, attr) - - if err != nil { - return err - } - - if attr == nil { - return fmt.Errorf("DB Instance not found") - } - - *d = *attr - return nil - } -} - -func testAccCheckBackupPolicyExists(n string, ps []map[string]interface{}) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Backup policy not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No DB Instance ID is set") - } - - conn := testAccProvider.Meta().(*AliyunClient).rdsconn - - args := rds.DescribeBackupPolicyArgs{ - DBInstanceId: rs.Primary.ID, - } - resp, err := conn.DescribeBackupPolicy(&args) - log.Printf("[DEBUG] check instance %s backup policy %#v", rs.Primary.ID, resp) - - if err != nil { - return err - } - - var bs []rds.BackupPolicy - bs = append(bs, resp.BackupPolicy) - ps = flattenDBBackup(bs) - - return nil - } -} - -func testAccCheckKeyValueInMaps(ps []map[string]interface{}, propName, key, value string) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, policy := range ps { - if policy[key].(string) != value { - return fmt.Errorf("DB %s attribute '%s' expected %#v, got %#v", propName, key, value, policy[key]) - } - } - return nil - } -} - -func testAccCheckDBInstanceDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*AliyunClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "alicloud_db_instance" { - continue - } - - ins, err := client.DescribeDBInstanceById(rs.Primary.ID) - - if ins != nil { - return fmt.Errorf("Error DB Instance still exist") - } - - // Verify the error is what we want - if err != nil { - // Verify the error is what we want - e, _ := err.(*common.Error) - if e.ErrorResponse.Code == InstanceNotfound { - continue - } - return err - } - } - - return nil -} - -const testAccDBInstanceConfig = ` -resource "alicloud_db_instance" "foo" { - engine = "MySQL" - engine_version = "5.6" - db_instance_class = "rds.mysql.t1.small" - db_instance_storage = "10" - instance_charge_type = "Postpaid" - db_instance_net_type = "Intranet" -} -` - -const testAccDBInstance_vpc = ` -data "alicloud_zones" "default" { - "available_resource_creation"= "VSwitch" -} - -resource "alicloud_vpc" "foo" { - name = "tf_test_foo" - cidr_block = "172.16.0.0/12" -} - -resource "alicloud_vswitch" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - cidr_block = "172.16.0.0/21" - availability_zone = "${data.alicloud_zones.default.zones.0.id}" -} - -resource "alicloud_db_instance" "foo" { - engine = "MySQL" - engine_version = "5.6" - db_instance_class = "rds.mysql.t1.small" - db_instance_storage = "10" - instance_charge_type = "Postpaid" - db_instance_net_type = "Intranet" - - vswitch_id = "${alicloud_vswitch.foo.id}" -} -` -const testAccDBInstance_multiIZ = ` -resource "alicloud_db_instance" "foo" { - engine = "MySQL" - engine_version = "5.6" - db_instance_class = "rds.mysql.t1.small" - db_instance_storage = "10" - db_instance_net_type = "Intranet" - multi_az = true -} -` - -const testAccDBInstance_prepaid_order = ` -resource "alicloud_db_instance" "foo" { - engine = "MySQL" - engine_version = "5.6" - db_instance_class = "rds.mysql.t1.small" - db_instance_storage = "10" - instance_charge_type = "Prepaid" - db_instance_net_type = "Intranet" -} -` - -const testAccDBInstance_database = ` -resource "alicloud_db_instance" "foo" { - engine = "MySQL" - engine_version = "5.6" - db_instance_class = "rds.mysql.t1.small" - db_instance_storage = "10" - instance_charge_type = "Postpaid" - db_instance_net_type = "Intranet" - - db_mappings = [ - { - "db_name" = "foo" - "character_set_name" = "utf8" - "db_description" = "tf" - },{ - "db_name" = "bar" - "character_set_name" = "utf8" - "db_description" = "tf" - }] -} -` -const testAccDBInstance_database_update = ` -resource "alicloud_db_instance" "foo" { - engine = "MySQL" - engine_version = "5.6" - db_instance_class = "rds.mysql.t1.small" - db_instance_storage = "10" - instance_charge_type = "Postpaid" - db_instance_net_type = "Intranet" - - db_mappings = [ - { - "db_name" = "foo" - "character_set_name" = "utf8" - "db_description" = "tf" - },{ - "db_name" = "bar" - "character_set_name" = "utf8" - "db_description" = "tf" - },{ - "db_name" = "zzz" - "character_set_name" = "utf8" - "db_description" = "tf" - }] -} -` - -const testAccDBInstance_grantDatabasePrivilege2Account = ` -resource "alicloud_db_instance" "foo" { - engine = "MySQL" - engine_version = "5.6" - db_instance_class = "rds.mysql.t1.small" - db_instance_storage = "10" - instance_charge_type = "Postpaid" - db_instance_net_type = "Intranet" - - master_user_name = "tester" - master_user_password = "Test12345" - - db_mappings = [ - { - "db_name" = "foo" - "character_set_name" = "utf8" - "db_description" = "tf" - },{ - "db_name" = "bar" - "character_set_name" = "utf8" - "db_description" = "tf" - }] -} -` - -const testAccDBInstance_allocatePublicConnection = ` -resource "alicloud_db_instance" "foo" { - engine = "MySQL" - engine_version = "5.6" - db_instance_class = "rds.mysql.t1.small" - db_instance_storage = "10" - instance_charge_type = "Postpaid" - db_instance_net_type = "Intranet" - - master_user_name = "tester" - master_user_password = "Test12345" - - allocate_public_connection = true -} -` - -const testAccDBInstance_backup = ` -resource "alicloud_db_instance" "foo" { - engine = "MySQL" - engine_version = "5.6" - db_instance_class = "rds.mysql.t1.small" - db_instance_storage = "10" - instance_charge_type = "Postpaid" - db_instance_net_type = "Intranet" - - preferred_backup_period = ["Wednesday","Thursday"] - preferred_backup_time = "00:00Z-01:00Z" - backup_retention_period = 9 -} -` - -const testAccDBInstance_securityIps = ` -resource "alicloud_db_instance" "foo" { - engine = "MySQL" - engine_version = "5.6" - db_instance_class = "rds.mysql.t1.small" - db_instance_storage = "10" - instance_charge_type = "Postpaid" - db_instance_net_type = "Intranet" -} -` -const testAccDBInstance_securityIpsConfig = ` -resource "alicloud_db_instance" "foo" { - engine = "MySQL" - engine_version = "5.6" - db_instance_class = "rds.mysql.t1.small" - db_instance_storage = "10" - instance_charge_type = "Postpaid" - db_instance_net_type = "Intranet" - - security_ips = ["10.168.1.12", "100.69.7.112"] -} -` - -const testAccDBInstance_class = ` -resource "alicloud_db_instance" "foo" { - engine = "MySQL" - engine_version = "5.6" - db_instance_class = "rds.mysql.t1.small" - db_instance_storage = "10" - db_instance_net_type = "Intranet" -} -` -const testAccDBInstance_classUpgrade = ` -resource "alicloud_db_instance" "foo" { - engine = "MySQL" - engine_version = "5.6" - db_instance_class = "rds.mysql.s1.small" - db_instance_storage = "10" - db_instance_net_type = "Intranet" -} -` diff --git a/builtin/providers/alicloud/resource_alicloud_disk.go b/builtin/providers/alicloud/resource_alicloud_disk.go deleted file mode 100644 index b31c37d93..000000000 --- a/builtin/providers/alicloud/resource_alicloud_disk.go +++ /dev/null @@ -1,247 +0,0 @@ -package alicloud - -import ( - "fmt" - - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "log" - "time" -) - -func resourceAliyunDisk() *schema.Resource { - return &schema.Resource{ - Create: resourceAliyunDiskCreate, - Read: resourceAliyunDiskRead, - Update: resourceAliyunDiskUpdate, - Delete: resourceAliyunDiskDelete, - - Schema: map[string]*schema.Schema{ - "availability_zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateDiskName, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateDiskDescription, - }, - - "category": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateDiskCategory, - Default: "cloud", - }, - - "size": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - - "snapshot_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "status": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAliyunDiskCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - - conn := client.ecsconn - - availabilityZone, err := client.DescribeZone(d.Get("availability_zone").(string)) - if err != nil { - return err - } - - args := &ecs.CreateDiskArgs{ - RegionId: getRegion(d, meta), - ZoneId: availabilityZone.ZoneId, - } - - if v, ok := d.GetOk("category"); ok && v.(string) != "" { - category := ecs.DiskCategory(v.(string)) - if err := client.DiskAvailable(availabilityZone, category); err != nil { - return err - } - args.DiskCategory = category - } - - if v, ok := d.GetOk("size"); ok { - size := v.(int) - if args.DiskCategory == ecs.DiskCategoryCloud && (size < 5 || size > 2000) { - return fmt.Errorf("the size of cloud disk must between 5 to 2000") - } - - if (args.DiskCategory == ecs.DiskCategoryCloudEfficiency || - args.DiskCategory == ecs.DiskCategoryCloudSSD) && (size < 20 || size > 32768) { - return fmt.Errorf("the size of %s disk must between 20 to 32768", args.DiskCategory) - } - args.Size = size - - d.Set("size", args.Size) - } - - if v, ok := d.GetOk("snapshot_id"); ok && v.(string) != "" { - args.SnapshotId = v.(string) - } - - if args.Size <= 0 && args.SnapshotId == "" { - return fmt.Errorf("One of size or snapshot_id is required when specifying an ECS disk.") - } - - if v, ok := d.GetOk("name"); ok && v.(string) != "" { - args.DiskName = v.(string) - } - - if v, ok := d.GetOk("description"); ok && v.(string) != "" { - args.Description = v.(string) - } - - diskID, err := conn.CreateDisk(args) - if err != nil { - return fmt.Errorf("CreateDisk got a error: %#v", err) - } - - d.SetId(diskID) - - return resourceAliyunDiskUpdate(d, meta) -} - -func resourceAliyunDiskRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AliyunClient).ecsconn - - disks, _, err := conn.DescribeDisks(&ecs.DescribeDisksArgs{ - RegionId: getRegion(d, meta), - DiskIds: []string{d.Id()}, - }) - - if err != nil { - if notFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error DescribeDiskAttribute: %#v", err) - } - - log.Printf("[DEBUG] DescribeDiskAttribute for instance: %#v", disks) - - if disks == nil || len(disks) <= 0 { - return fmt.Errorf("No disks found.") - } - - disk := disks[0] - d.Set("availability_zone", disk.ZoneId) - d.Set("category", disk.Category) - d.Set("size", disk.Size) - d.Set("status", disk.Status) - d.Set("name", disk.DiskName) - d.Set("description", disk.Description) - d.Set("snapshot_id", disk.SourceSnapshotId) - - tags, _, err := conn.DescribeTags(&ecs.DescribeTagsArgs{ - RegionId: getRegion(d, meta), - ResourceType: ecs.TagResourceDisk, - ResourceId: d.Id(), - }) - - if err != nil { - log.Printf("[DEBUG] DescribeTags for disk got error: %#v", err) - } - - d.Set("tags", tagsToMap(tags)) - - return nil -} - -func resourceAliyunDiskUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - conn := client.ecsconn - - d.Partial(true) - - if err := setTags(client, ecs.TagResourceDisk, d); err != nil { - log.Printf("[DEBUG] Set tags for instance got error: %#v", err) - return fmt.Errorf("Set tags for instance got error: %#v", err) - } else { - d.SetPartial("tags") - } - attributeUpdate := false - args := &ecs.ModifyDiskAttributeArgs{ - DiskId: d.Id(), - } - - if d.HasChange("name") { - d.SetPartial("name") - val := d.Get("name").(string) - args.DiskName = val - - attributeUpdate = true - } - - if d.HasChange("description") { - d.SetPartial("description") - val := d.Get("description").(string) - args.Description = val - - attributeUpdate = true - } - if attributeUpdate { - if err := conn.ModifyDiskAttribute(args); err != nil { - return err - } - } - - d.Partial(false) - - return resourceAliyunDiskRead(d, meta) -} - -func resourceAliyunDiskDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AliyunClient).ecsconn - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - err := conn.DeleteDisk(d.Id()) - if err != nil { - e, _ := err.(*common.Error) - if e.ErrorResponse.Code == DiskIncorrectStatus || e.ErrorResponse.Code == DiskCreatingSnapshot { - return resource.RetryableError(fmt.Errorf("Disk in use - trying again while it is deleted.")) - } - } - - disks, _, descErr := conn.DescribeDisks(&ecs.DescribeDisksArgs{ - RegionId: getRegion(d, meta), - DiskIds: []string{d.Id()}, - }) - - if descErr != nil { - log.Printf("[ERROR] Delete disk is failed.") - return resource.NonRetryableError(descErr) - } - if disks == nil || len(disks) < 1 { - return nil - } - - return resource.RetryableError(fmt.Errorf("Disk in use - trying again while it is deleted.")) - }) -} diff --git a/builtin/providers/alicloud/resource_alicloud_disk_attachment.go b/builtin/providers/alicloud/resource_alicloud_disk_attachment.go deleted file mode 100644 index 48a2c2c5c..000000000 --- a/builtin/providers/alicloud/resource_alicloud_disk_attachment.go +++ /dev/null @@ -1,176 +0,0 @@ -package alicloud - -import ( - "fmt" - "strings" - - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "log" - "time" -) - -func resourceAliyunDiskAttachment() *schema.Resource { - return &schema.Resource{ - Create: resourceAliyunDiskAttachmentCreate, - Read: resourceAliyunDiskAttachmentRead, - Delete: resourceAliyunDiskAttachmentDelete, - - Schema: map[string]*schema.Schema{ - "instance_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "disk_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "device_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - } -} - -func resourceAliyunDiskAttachmentCreate(d *schema.ResourceData, meta interface{}) error { - - err := diskAttachment(d, meta) - if err != nil { - return err - } - - d.SetId(d.Get("disk_id").(string) + ":" + d.Get("instance_id").(string)) - - return resourceAliyunDiskAttachmentRead(d, meta) -} - -func resourceAliyunDiskAttachmentRead(d *schema.ResourceData, meta interface{}) error { - diskId, instanceId, err := getDiskIDAndInstanceID(d, meta) - if err != nil { - return err - } - - conn := meta.(*AliyunClient).ecsconn - disks, _, err := conn.DescribeDisks(&ecs.DescribeDisksArgs{ - RegionId: getRegion(d, meta), - InstanceId: instanceId, - DiskIds: []string{diskId}, - }) - - if err != nil { - if notFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error DescribeDiskAttribute: %#v", err) - } - - log.Printf("[DEBUG] DescribeDiskAttribute for instance: %#v", disks) - if disks == nil || len(disks) <= 0 { - return fmt.Errorf("No Disks Found.") - } - - disk := disks[0] - d.Set("instance_id", disk.InstanceId) - d.Set("disk_id", disk.DiskId) - d.Set("device_name", disk.Device) - - return nil -} - -func resourceAliyunDiskAttachmentDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AliyunClient).ecsconn - diskID, instanceID, err := getDiskIDAndInstanceID(d, meta) - if err != nil { - return err - } - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - err := conn.DetachDisk(instanceID, diskID) - if err != nil { - e, _ := err.(*common.Error) - if e.ErrorResponse.Code == DiskIncorrectStatus || e.ErrorResponse.Code == InstanceLockedForSecurity { - return resource.RetryableError(fmt.Errorf("Disk in use - trying again while it detaches")) - } - } - - disks, _, descErr := conn.DescribeDisks(&ecs.DescribeDisksArgs{ - RegionId: getRegion(d, meta), - DiskIds: []string{diskID}, - }) - - if descErr != nil { - log.Printf("[ERROR] Disk %s is not detached.", diskID) - return resource.NonRetryableError(err) - } - - for _, disk := range disks { - if disk.Status != ecs.DiskStatusAvailable { - return resource.RetryableError(fmt.Errorf("Disk in use - trying again while it is deleted.")) - } - } - return nil - }) -} - -func getDiskIDAndInstanceID(d *schema.ResourceData, meta interface{}) (string, string, error) { - parts := strings.Split(d.Id(), ":") - - if len(parts) != 2 { - return "", "", fmt.Errorf("invalid resource id") - } - return parts[0], parts[1], nil -} -func diskAttachment(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AliyunClient).ecsconn - - diskID := d.Get("disk_id").(string) - instanceID := d.Get("instance_id").(string) - - deviceName := d.Get("device_name").(string) - - args := &ecs.AttachDiskArgs{ - InstanceId: instanceID, - DiskId: diskID, - Device: deviceName, - } - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - err := conn.AttachDisk(args) - log.Printf("error : %s", err) - - if err != nil { - e, _ := err.(*common.Error) - if e.ErrorResponse.Code == DiskIncorrectStatus || e.ErrorResponse.Code == InstanceIncorrectStatus { - return resource.RetryableError(fmt.Errorf("Disk or Instance status is incorrect - trying again while it attaches")) - } - return resource.NonRetryableError(err) - } - - disks, _, descErr := conn.DescribeDisks(&ecs.DescribeDisksArgs{ - RegionId: getRegion(d, meta), - InstanceId: instanceID, - DiskIds: []string{diskID}, - }) - - if descErr != nil { - log.Printf("[ERROR] Disk %s is not attached.", diskID) - return resource.NonRetryableError(err) - } - - if disks == nil || len(disks) <= 0 { - return resource.RetryableError(fmt.Errorf("Disk in attaching - trying again while it is attached.")) - } - - return nil - - }) -} diff --git a/builtin/providers/alicloud/resource_alicloud_disk_attachment_test.go b/builtin/providers/alicloud/resource_alicloud_disk_attachment_test.go deleted file mode 100644 index 00239f5c5..000000000 --- a/builtin/providers/alicloud/resource_alicloud_disk_attachment_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package alicloud - -import ( - "fmt" - "testing" - - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "time" -) - -func TestAccAlicloudDiskAttachment(t *testing.T) { - var i ecs.InstanceAttributesType - var v ecs.DiskItemType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_disk_attachment.disk-att", - Providers: testAccProviders, - CheckDestroy: testAccCheckDiskAttachmentDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDiskAttachmentConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists( - "alicloud_instance.instance", &i), - testAccCheckDiskExists( - "alicloud_disk.disk", &v), - testAccCheckDiskAttachmentExists( - "alicloud_disk_attachment.disk-att", &i, &v), - resource.TestCheckResourceAttr( - "alicloud_disk_attachment.disk-att", - "device_name", - "/dev/xvdb"), - ), - }, - }, - }) - -} - -func testAccCheckDiskAttachmentExists(n string, instance *ecs.InstanceAttributesType, disk *ecs.DiskItemType) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Disk ID is set") - } - - client := testAccProvider.Meta().(*AliyunClient) - conn := client.ecsconn - - request := &ecs.DescribeDisksArgs{ - RegionId: client.Region, - DiskIds: []string{rs.Primary.Attributes["disk_id"]}, - } - - return resource.Retry(3*time.Minute, func() *resource.RetryError { - response, _, err := conn.DescribeDisks(request) - if response != nil { - for _, d := range response { - if d.Status != ecs.DiskStatusInUse { - return resource.RetryableError(fmt.Errorf("Disk is in attaching - trying again while it attaches")) - } else if d.InstanceId == instance.InstanceId { - // pass - *disk = d - return nil - } - } - } - if err != nil { - return resource.NonRetryableError(err) - } - - return resource.NonRetryableError(fmt.Errorf("Error finding instance/disk")) - }) - } -} - -func testAccCheckDiskAttachmentDestroy(s *terraform.State) error { - - for _, rs := range s.RootModule().Resources { - if rs.Type != "alicloud_disk_attachment" { - continue - } - // Try to find the Disk - client := testAccProvider.Meta().(*AliyunClient) - conn := client.ecsconn - - request := &ecs.DescribeDisksArgs{ - RegionId: client.Region, - DiskIds: []string{rs.Primary.ID}, - } - - response, _, err := conn.DescribeDisks(request) - - for _, disk := range response { - if disk.Status != ecs.DiskStatusAvailable { - return fmt.Errorf("Error ECS Disk Attachment still exist") - } - } - - if err != nil { - // Verify the error is what we want - return err - } - } - - return nil -} - -const testAccDiskAttachmentConfig = ` -resource "alicloud_disk" "disk" { - availability_zone = "cn-beijing-a" - size = "50" - - tags { - Name = "TerraformTest-disk" - } -} - -resource "alicloud_instance" "instance" { - image_id = "ubuntu_140405_64_40G_cloudinit_20161115.vhd" - instance_type = "ecs.s1.small" - availability_zone = "cn-beijing-a" - security_groups = ["${alicloud_security_group.group.id}"] - instance_name = "hello" - internet_charge_type = "PayByBandwidth" - io_optimized = "none" - - tags { - Name = "TerraformTest-instance" - } -} - -resource "alicloud_disk_attachment" "disk-att" { - disk_id = "${alicloud_disk.disk.id}" - instance_id = "${alicloud_instance.instance.id}" - device_name = "/dev/xvdb" -} - -resource "alicloud_security_group" "group" { - name = "terraform-test-group" - description = "New security group" -} - -` diff --git a/builtin/providers/alicloud/resource_alicloud_disk_test.go b/builtin/providers/alicloud/resource_alicloud_disk_test.go deleted file mode 100644 index b8d73a662..000000000 --- a/builtin/providers/alicloud/resource_alicloud_disk_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package alicloud - -import ( - "fmt" - "testing" - - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "log" -) - -func TestAccAlicloudDisk_basic(t *testing.T) { - var v ecs.DiskItemType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_disk.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckDiskDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDiskConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckDiskExists( - "alicloud_disk.foo", &v), - resource.TestCheckResourceAttr( - "alicloud_disk.foo", - "category", - "cloud_efficiency"), - resource.TestCheckResourceAttr( - "alicloud_disk.foo", - "size", - "30"), - ), - }, - }, - }) - -} - -func TestAccAlicloudDisk_withTags(t *testing.T) { - var v ecs.DiskItemType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - //module name - IDRefreshName: "alicloud_disk.bar", - - Providers: testAccProviders, - CheckDestroy: testAccCheckDiskDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDiskConfigWithTags, - Check: resource.ComposeTestCheckFunc( - testAccCheckDiskExists("alicloud_disk.bar", &v), - resource.TestCheckResourceAttr( - "alicloud_disk.bar", - "tags.Name", - "TerraformTest"), - ), - }, - }, - }) -} - -func testAccCheckDiskExists(n string, disk *ecs.DiskItemType) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Disk ID is set") - } - - client := testAccProvider.Meta().(*AliyunClient) - conn := client.ecsconn - - request := &ecs.DescribeDisksArgs{ - RegionId: client.Region, - DiskIds: []string{rs.Primary.ID}, - } - - response, _, err := conn.DescribeDisks(request) - log.Printf("[WARN] disk ids %#v", rs.Primary.ID) - - if err == nil { - if response != nil && len(response) > 0 { - *disk = response[0] - return nil - } - } - return fmt.Errorf("Error finding ECS Disk %#v", rs.Primary.ID) - } -} - -func testAccCheckDiskDestroy(s *terraform.State) error { - - for _, rs := range s.RootModule().Resources { - if rs.Type != "alicloud_disk" { - continue - } - - // Try to find the Disk - client := testAccProvider.Meta().(*AliyunClient) - conn := client.ecsconn - - request := &ecs.DescribeDisksArgs{ - RegionId: client.Region, - DiskIds: []string{rs.Primary.ID}, - } - - response, _, err := conn.DescribeDisks(request) - - if response != nil && len(response) > 0 { - return fmt.Errorf("Error ECS Disk still exist") - } - - if err != nil { - // Verify the error is what we want - return err - } - } - - return nil -} - -const testAccDiskConfig = ` -data "alicloud_zones" "default" { - "available_disk_category"= "cloud_efficiency" -} - -resource "alicloud_disk" "foo" { - # cn-beijing - availability_zone = "${data.alicloud_zones.default.zones.0.id}" - name = "New-disk" - description = "Hello ecs disk." - category = "cloud_efficiency" - size = "30" -} -` -const testAccDiskConfigWithTags = ` -data "alicloud_zones" "default" { - "available_disk_category"= "cloud_efficiency" -} - -resource "alicloud_disk" "bar" { - # cn-beijing - availability_zone = "${data.alicloud_zones.default.zones.0.id}" - category = "cloud_efficiency" - size = "20" - tags { - Name = "TerraformTest" - } -} -` diff --git a/builtin/providers/alicloud/resource_alicloud_eip.go b/builtin/providers/alicloud/resource_alicloud_eip.go deleted file mode 100644 index f1c9621a8..000000000 --- a/builtin/providers/alicloud/resource_alicloud_eip.go +++ /dev/null @@ -1,157 +0,0 @@ -package alicloud - -import ( - "strconv" - - "fmt" - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "time" -) - -func resourceAliyunEip() *schema.Resource { - return &schema.Resource{ - Create: resourceAliyunEipCreate, - Read: resourceAliyunEipRead, - Update: resourceAliyunEipUpdate, - Delete: resourceAliyunEipDelete, - - Schema: map[string]*schema.Schema{ - "bandwidth": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 5, - }, - "internet_charge_type": &schema.Schema{ - Type: schema.TypeString, - Default: "PayByBandwidth", - Optional: true, - ForceNew: true, - ValidateFunc: validateInternetChargeType, - }, - - "ip_address": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "status": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "instance": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - } -} - -func resourceAliyunEipCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AliyunClient).ecsconn - - args, err := buildAliyunEipArgs(d, meta) - if err != nil { - return err - } - - _, allocationID, err := conn.AllocateEipAddress(args) - if err != nil { - return err - } - - d.SetId(allocationID) - - return resourceAliyunEipRead(d, meta) -} - -func resourceAliyunEipRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - - eip, err := client.DescribeEipAddress(d.Id()) - if err != nil { - if notFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error Describe Eip Attribute: %#v", err) - } - - if eip.InstanceId != "" { - d.Set("instance", eip.InstanceId) - } else { - d.Set("instance", "") - return nil - } - - bandwidth, _ := strconv.Atoi(eip.Bandwidth) - d.Set("bandwidth", bandwidth) - d.Set("internet_charge_type", eip.InternetChargeType) - d.Set("ip_address", eip.IpAddress) - d.Set("status", eip.Status) - - return nil -} - -func resourceAliyunEipUpdate(d *schema.ResourceData, meta interface{}) error { - - conn := meta.(*AliyunClient).ecsconn - - d.Partial(true) - - if d.HasChange("bandwidth") { - err := conn.ModifyEipAddressAttribute(d.Id(), d.Get("bandwidth").(int)) - if err != nil { - return err - } - - d.SetPartial("bandwidth") - } - - d.Partial(false) - - return nil -} - -func resourceAliyunEipDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AliyunClient).ecsconn - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - err := conn.ReleaseEipAddress(d.Id()) - - if err != nil { - e, _ := err.(*common.Error) - if e.ErrorResponse.Code == EipIncorrectStatus { - return resource.RetryableError(fmt.Errorf("EIP in use - trying again while it is deleted.")) - } - } - - args := &ecs.DescribeEipAddressesArgs{ - RegionId: getRegion(d, meta), - AllocationId: d.Id(), - } - - eips, _, descErr := conn.DescribeEipAddresses(args) - if descErr != nil { - return resource.NonRetryableError(descErr) - } else if eips == nil || len(eips) < 1 { - return nil - } - return resource.RetryableError(fmt.Errorf("EIP in use - trying again while it is deleted.")) - }) -} - -func buildAliyunEipArgs(d *schema.ResourceData, meta interface{}) (*ecs.AllocateEipAddressArgs, error) { - - args := &ecs.AllocateEipAddressArgs{ - RegionId: getRegion(d, meta), - Bandwidth: d.Get("bandwidth").(int), - InternetChargeType: common.InternetChargeType(d.Get("internet_charge_type").(string)), - } - - return args, nil -} diff --git a/builtin/providers/alicloud/resource_alicloud_eip_association.go b/builtin/providers/alicloud/resource_alicloud_eip_association.go deleted file mode 100644 index 5f492b40b..000000000 --- a/builtin/providers/alicloud/resource_alicloud_eip_association.go +++ /dev/null @@ -1,131 +0,0 @@ -package alicloud - -import ( - "fmt" - "strings" - - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "time" -) - -func resourceAliyunEipAssociation() *schema.Resource { - return &schema.Resource{ - Create: resourceAliyunEipAssociationCreate, - Read: resourceAliyunEipAssociationRead, - Delete: resourceAliyunEipAssociationDelete, - - Schema: map[string]*schema.Schema{ - "allocation_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "instance_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - } -} - -func resourceAliyunEipAssociationCreate(d *schema.ResourceData, meta interface{}) error { - - conn := meta.(*AliyunClient).ecsconn - - allocationId := d.Get("allocation_id").(string) - instanceId := d.Get("instance_id").(string) - - if err := conn.AssociateEipAddress(allocationId, instanceId); err != nil { - return err - } - - d.SetId(allocationId + ":" + instanceId) - - return resourceAliyunEipAssociationRead(d, meta) -} - -func resourceAliyunEipAssociationRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - - allocationId, instanceId, err := getAllocationIdAndInstanceId(d, meta) - if err != nil { - return err - } - - eip, err := client.DescribeEipAddress(allocationId) - - if err != nil { - if notFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error Describe Eip Attribute: %#v", err) - } - - if eip.InstanceId != instanceId { - d.SetId("") - return nil - } - - d.Set("instance_id", eip.InstanceId) - d.Set("allocation_id", allocationId) - return nil -} - -func resourceAliyunEipAssociationDelete(d *schema.ResourceData, meta interface{}) error { - - conn := meta.(*AliyunClient).ecsconn - - allocationId, instanceId, err := getAllocationIdAndInstanceId(d, meta) - if err != nil { - return err - } - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - err := conn.UnassociateEipAddress(allocationId, instanceId) - - if err != nil { - e, _ := err.(*common.Error) - errCode := e.ErrorResponse.Code - if errCode == InstanceIncorrectStatus || errCode == HaVipIncorrectStatus { - return resource.RetryableError(fmt.Errorf("Eip in use - trying again while make it unassociated.")) - } - } - - args := &ecs.DescribeEipAddressesArgs{ - RegionId: getRegion(d, meta), - AllocationId: allocationId, - } - - eips, _, descErr := conn.DescribeEipAddresses(args) - - if descErr != nil { - return resource.NonRetryableError(descErr) - } else if eips == nil || len(eips) < 1 { - return nil - } - for _, eip := range eips { - if eip.Status != ecs.EipStatusAvailable { - return resource.RetryableError(fmt.Errorf("Eip in use - trying again while make it unassociated.")) - } - } - - return nil - }) -} - -func getAllocationIdAndInstanceId(d *schema.ResourceData, meta interface{}) (string, string, error) { - parts := strings.Split(d.Id(), ":") - - if len(parts) != 2 { - return "", "", fmt.Errorf("invalid resource id") - } - return parts[0], parts[1], nil -} diff --git a/builtin/providers/alicloud/resource_alicloud_eip_association_test.go b/builtin/providers/alicloud/resource_alicloud_eip_association_test.go deleted file mode 100644 index 37c79f005..000000000 --- a/builtin/providers/alicloud/resource_alicloud_eip_association_test.go +++ /dev/null @@ -1,158 +0,0 @@ -package alicloud - -import ( - "fmt" - "testing" - - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "time" -) - -func TestAccAlicloudEIPAssociation(t *testing.T) { - var asso ecs.EipAddressSetType - var inst ecs.InstanceAttributesType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_eip_association.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckEIPAssociationDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccEIPAssociationConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists( - "alicloud_instance.instance", &inst), - testAccCheckEIPExists( - "alicloud_eip.eip", &asso), - testAccCheckEIPAssociationExists( - "alicloud_eip_association.foo", &inst, &asso), - ), - }, - }, - }) - -} - -func testAccCheckEIPAssociationExists(n string, instance *ecs.InstanceAttributesType, eip *ecs.EipAddressSetType) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No EIP Association ID is set") - } - - client := testAccProvider.Meta().(*AliyunClient) - return resource.Retry(3*time.Minute, func() *resource.RetryError { - d, err := client.DescribeEipAddress(rs.Primary.Attributes["allocation_id"]) - - if err != nil { - return resource.NonRetryableError(err) - } - - if d != nil { - if d.Status != ecs.EipStatusInUse { - return resource.RetryableError(fmt.Errorf("Eip is in associating - trying again while it associates")) - } else if d.InstanceId == instance.InstanceId { - *eip = *d - return nil - } - } - - return resource.NonRetryableError(fmt.Errorf("EIP Association not found")) - }) - } -} - -func testAccCheckEIPAssociationDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*AliyunClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "alicloud_eip_association" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No EIP Association ID is set") - } - - // Try to find the EIP - eips, _, err := client.ecsconn.DescribeEipAddresses(&ecs.DescribeEipAddressesArgs{ - RegionId: client.Region, - AllocationId: rs.Primary.Attributes["allocation_id"], - }) - - for _, eip := range eips { - if eip.Status != ecs.EipStatusAvailable { - return fmt.Errorf("Error EIP Association still exist") - } - } - - // Verify the error is what we want - if err != nil { - return err - } - } - - return nil -} - -const testAccEIPAssociationConfig = ` -data "alicloud_zones" "default" { - "available_resource_creation"= "VSwitch" -} - -resource "alicloud_vpc" "main" { - cidr_block = "10.1.0.0/21" -} - -resource "alicloud_vswitch" "main" { - vpc_id = "${alicloud_vpc.main.id}" - cidr_block = "10.1.1.0/24" - availability_zone = "${data.alicloud_zones.default.zones.0.id}" - depends_on = [ - "alicloud_vpc.main"] -} - -resource "alicloud_instance" "instance" { - # cn-beijing - vswitch_id = "${alicloud_vswitch.main.id}" - image_id = "ubuntu_140405_32_40G_cloudinit_20161115.vhd" - - # series II - instance_type = "ecs.n1.medium" - io_optimized = "optimized" - system_disk_category = "cloud_efficiency" - - security_groups = ["${alicloud_security_group.group.id}"] - instance_name = "test_foo" - - tags { - Name = "TerraformTest-instance" - } -} - -resource "alicloud_eip" "eip" { -} - -resource "alicloud_eip_association" "foo" { - allocation_id = "${alicloud_eip.eip.id}" - instance_id = "${alicloud_instance.instance.id}" -} - -resource "alicloud_security_group" "group" { - name = "terraform-test-group" - description = "New security group" - vpc_id = "${alicloud_vpc.main.id}" -} -` diff --git a/builtin/providers/alicloud/resource_alicloud_eip_test.go b/builtin/providers/alicloud/resource_alicloud_eip_test.go deleted file mode 100644 index 560f426ba..000000000 --- a/builtin/providers/alicloud/resource_alicloud_eip_test.go +++ /dev/null @@ -1,131 +0,0 @@ -package alicloud - -import ( - "fmt" - "testing" - - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "log" -) - -func TestAccAlicloudEIP_basic(t *testing.T) { - var eip ecs.EipAddressSetType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_eip.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckEIPDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccEIPConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckEIPExists( - "alicloud_eip.foo", &eip), - testAccCheckEIPAttributes(&eip), - ), - }, - resource.TestStep{ - Config: testAccEIPConfigTwo, - Check: resource.ComposeTestCheckFunc( - testAccCheckEIPExists( - "alicloud_eip.foo", &eip), - testAccCheckEIPAttributes(&eip), - resource.TestCheckResourceAttr( - "alicloud_eip.foo", - "bandwidth", - "10"), - ), - }, - }, - }) - -} - -func testAccCheckEIPExists(n string, eip *ecs.EipAddressSetType) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No EIP ID is set") - } - - client := testAccProvider.Meta().(*AliyunClient) - d, err := client.DescribeEipAddress(rs.Primary.ID) - - log.Printf("[WARN] eip id %#v", rs.Primary.ID) - - if err != nil { - return err - } - - if d == nil || d.IpAddress == "" { - return fmt.Errorf("EIP not found") - } - - *eip = *d - return nil - } -} - -func testAccCheckEIPAttributes(eip *ecs.EipAddressSetType) resource.TestCheckFunc { - return func(s *terraform.State) error { - if eip.IpAddress == "" { - return fmt.Errorf("Empty Ip address") - } - - return nil - } -} - -func testAccCheckEIPDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*AliyunClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "alicloud_eip" { - continue - } - - // Try to find the EIP - conn := client.ecsconn - - args := &ecs.DescribeEipAddressesArgs{ - RegionId: client.Region, - AllocationId: rs.Primary.ID, - } - d, _, err := conn.DescribeEipAddresses(args) - - if d != nil && len(d) > 0 { - return fmt.Errorf("Error EIP still exist") - } - - // Verify the error is what we want - if err != nil { - return err - } - } - - return nil -} - -const testAccEIPConfig = ` -resource "alicloud_eip" "foo" { -} -` - -const testAccEIPConfigTwo = ` -resource "alicloud_eip" "foo" { - bandwidth = "10" - internet_charge_type = "PayByBandwidth" -} -` diff --git a/builtin/providers/alicloud/resource_alicloud_ess_scalingconfiguration.go b/builtin/providers/alicloud/resource_alicloud_ess_scalingconfiguration.go deleted file mode 100644 index 3a8d94380..000000000 --- a/builtin/providers/alicloud/resource_alicloud_ess_scalingconfiguration.go +++ /dev/null @@ -1,320 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ecs" - "github.com/denverdino/aliyungo/ess" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "strings" - "time" -) - -func resourceAlicloudEssScalingConfiguration() *schema.Resource { - return &schema.Resource{ - Create: resourceAliyunEssScalingConfigurationCreate, - Read: resourceAliyunEssScalingConfigurationRead, - Update: resourceAliyunEssScalingConfigurationUpdate, - Delete: resourceAliyunEssScalingConfigurationDelete, - - Schema: map[string]*schema.Schema{ - "active": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - "enable": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - "scaling_group_id": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Required: true, - }, - "image_id": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Required: true, - }, - "instance_type": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Required: true, - }, - "io_optimized": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateIoOptimized, - }, - "security_group_id": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Required: true, - }, - "scaling_configuration_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "internet_charge_type": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Computed: true, - ValidateFunc: validateInternetChargeType, - }, - "internet_max_bandwidth_in": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Computed: true, - }, - "internet_max_bandwidth_out": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validateInternetMaxBandWidthOut, - }, - "system_disk_category": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - ValidateFunc: validateAllowedStringValue([]string{ - string(ecs.DiskCategoryCloud), - string(ecs.DiskCategoryCloudSSD), - string(ecs.DiskCategoryCloudEfficiency), - string(ecs.DiskCategoryEphemeralSSD), - }), - }, - "data_disk": &schema.Schema{ - Optional: true, - ForceNew: true, - Type: schema.TypeList, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "size": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - "category": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "snapshot_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "device": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "instance_ids": &schema.Schema{ - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - MaxItems: 20, - }, - }, - } -} - -func resourceAliyunEssScalingConfigurationCreate(d *schema.ResourceData, meta interface{}) error { - - args, err := buildAlicloudEssScalingConfigurationArgs(d, meta) - if err != nil { - return err - } - - essconn := meta.(*AliyunClient).essconn - - scaling, err := essconn.CreateScalingConfiguration(args) - if err != nil { - return err - } - - d.SetId(d.Get("scaling_group_id").(string) + COLON_SEPARATED + scaling.ScalingConfigurationId) - - return resourceAliyunEssScalingConfigurationUpdate(d, meta) -} - -func resourceAliyunEssScalingConfigurationUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - if d.HasChange("active") { - active := d.Get("active").(bool) - if !active { - return fmt.Errorf("Please active the scaling configuration directly.") - } - ids := strings.Split(d.Id(), COLON_SEPARATED) - err := client.ActiveScalingConfigurationById(ids[0], ids[1]) - - if err != nil { - return fmt.Errorf("Active scaling configuration %s err: %#v", ids[1], err) - } - } - - if err := enableEssScalingConfiguration(d, meta); err != nil { - return err - } - - return resourceAliyunEssScalingConfigurationRead(d, meta) -} - -func enableEssScalingConfiguration(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - ids := strings.Split(d.Id(), COLON_SEPARATED) - - if d.HasChange("enable") { - d.SetPartial("enable") - enable := d.Get("enable").(bool) - if !enable { - err := client.DisableScalingConfigurationById(ids[0]) - - if err != nil { - return fmt.Errorf("Disable scaling group %s err: %#v", ids[0], err) - } - } - - instance_ids := []string{} - if d.HasChange("instance_ids") { - d.SetPartial("instance_ids") - instances := d.Get("instance_ids").([]interface{}) - instance_ids = expandStringList(instances) - } - err := client.EnableScalingConfigurationById(ids[0], ids[1], instance_ids) - - if err != nil { - return fmt.Errorf("Enable scaling configuration %s err: %#v", ids[1], err) - } - } - return nil -} - -func resourceAliyunEssScalingConfigurationRead(d *schema.ResourceData, meta interface{}) error { - - client := meta.(*AliyunClient) - ids := strings.Split(d.Id(), COLON_SEPARATED) - c, err := client.DescribeScalingConfigurationById(ids[0], ids[1]) - if err != nil { - if e, ok := err.(*common.Error); ok && e.Code == InstanceNotfound { - d.SetId("") - return nil - } - return fmt.Errorf("Error Describe ESS scaling configuration Attribute: %#v", err) - } - - d.Set("scaling_group_id", c.ScalingGroupId) - d.Set("active", c.LifecycleState == ess.Active) - d.Set("image_id", c.ImageId) - d.Set("instance_type", c.InstanceType) - d.Set("io_optimized", c.IoOptimized) - d.Set("security_group_id", c.SecurityGroupId) - d.Set("scaling_configuration_name", c.ScalingConfigurationName) - d.Set("internet_charge_type", c.InternetChargeType) - d.Set("internet_max_bandwidth_in", c.InternetMaxBandwidthIn) - d.Set("internet_max_bandwidth_out", c.InternetMaxBandwidthOut) - d.Set("system_disk_category", c.SystemDiskCategory) - d.Set("data_disk", flattenDataDiskMappings(c.DataDisks.DataDisk)) - - return nil -} - -func resourceAliyunEssScalingConfigurationDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - ids := strings.Split(d.Id(), COLON_SEPARATED) - err := client.DeleteScalingConfigurationById(ids[0], ids[1]) - - if err != nil { - e, _ := err.(*common.Error) - if e.ErrorResponse.Code == IncorrectScalingConfigurationLifecycleState { - return resource.NonRetryableError( - fmt.Errorf("Scaling configuration is active - please active another one and trying again.")) - } - if e.ErrorResponse.Code != InvalidScalingGroupIdNotFound { - return resource.RetryableError( - fmt.Errorf("Scaling configuration in use - trying again while it is deleted.")) - } - } - - _, err = client.DescribeScalingConfigurationById(ids[0], ids[1]) - if err != nil { - if notFoundError(err) { - return nil - } - return resource.NonRetryableError(err) - } - - return resource.RetryableError( - fmt.Errorf("Scaling configuration in use - trying again while it is deleted.")) - }) -} - -func buildAlicloudEssScalingConfigurationArgs(d *schema.ResourceData, meta interface{}) (*ess.CreateScalingConfigurationArgs, error) { - args := &ess.CreateScalingConfigurationArgs{ - ScalingGroupId: d.Get("scaling_group_id").(string), - ImageId: d.Get("image_id").(string), - InstanceType: d.Get("instance_type").(string), - IoOptimized: ecs.IoOptimized(d.Get("io_optimized").(string)), - SecurityGroupId: d.Get("security_group_id").(string), - } - - if v := d.Get("scaling_configuration_name").(string); v != "" { - args.ScalingConfigurationName = v - } - - if v := d.Get("internet_charge_type").(string); v != "" { - args.InternetChargeType = common.InternetChargeType(v) - } - - if v := d.Get("internet_max_bandwidth_in").(int); v != 0 { - args.InternetMaxBandwidthIn = v - } - - if v := d.Get("internet_max_bandwidth_out").(int); v != 0 { - args.InternetMaxBandwidthOut = v - } - - if v := d.Get("system_disk_category").(string); v != "" { - args.SystemDisk_Category = common.UnderlineString(v) - } - - dds, ok := d.GetOk("data_disk") - if ok { - disks := dds.([]interface{}) - diskTypes := []ess.DataDiskType{} - - for _, e := range disks { - pack := e.(map[string]interface{}) - disk := ess.DataDiskType{ - Size: pack["size"].(int), - Category: pack["category"].(string), - SnapshotId: pack["snapshot_id"].(string), - Device: pack["device"].(string), - } - if v := pack["size"].(int); v != 0 { - disk.Size = v - } - if v := pack["category"].(string); v != "" { - disk.Category = v - } - if v := pack["snapshot_id"].(string); v != "" { - disk.SnapshotId = v - } - if v := pack["device"].(string); v != "" { - disk.Device = v - } - diskTypes = append(diskTypes, disk) - } - args.DataDisk = diskTypes - } - - return args, nil -} diff --git a/builtin/providers/alicloud/resource_alicloud_ess_scalingconfiguration_test.go b/builtin/providers/alicloud/resource_alicloud_ess_scalingconfiguration_test.go deleted file mode 100644 index 4a2269b38..000000000 --- a/builtin/providers/alicloud/resource_alicloud_ess_scalingconfiguration_test.go +++ /dev/null @@ -1,495 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ess" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "log" - "regexp" - "strings" - "testing" -) - -func TestAccAlicloudEssScalingConfiguration_basic(t *testing.T) { - var sc ess.ScalingConfigurationItemType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_ess_scaling_configuration.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckEssScalingConfigurationDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccEssScalingConfigurationConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckEssScalingConfigurationExists( - "alicloud_ess_scaling_configuration.foo", &sc), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_configuration.foo", - "instance_type", - "ecs.s2.large"), - resource.TestMatchResourceAttr( - "alicloud_ess_scaling_configuration.foo", - "image_id", - regexp.MustCompile("^centos_6")), - ), - }, - }, - }) -} - -func TestAccAlicloudEssScalingConfiguration_multiConfig(t *testing.T) { - var sc ess.ScalingConfigurationItemType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_ess_scaling_configuration.bar", - - Providers: testAccProviders, - CheckDestroy: testAccCheckEssScalingConfigurationDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccEssScalingConfiguration_multiConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckEssScalingConfigurationExists( - "alicloud_ess_scaling_configuration.bar", &sc), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_configuration.bar", - "active", - "false"), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_configuration.bar", - "instance_type", - "ecs.s2.large"), - resource.TestMatchResourceAttr( - "alicloud_ess_scaling_configuration.bar", - "image_id", - regexp.MustCompile("^centos_6")), - ), - }, - }, - }) -} - -func SkipTestAccAlicloudEssScalingConfiguration_active(t *testing.T) { - var sc ess.ScalingConfigurationItemType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_ess_scaling_configuration.bar", - - Providers: testAccProviders, - CheckDestroy: testAccCheckEssScalingConfigurationDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccEssScalingConfiguration_active, - Check: resource.ComposeTestCheckFunc( - testAccCheckEssScalingConfigurationExists( - "alicloud_ess_scaling_configuration.bar", &sc), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_configuration.bar", - "active", - "true"), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_configuration.bar", - "instance_type", - "ecs.s2.large"), - resource.TestMatchResourceAttr( - "alicloud_ess_scaling_configuration.bar", - "image_id", - regexp.MustCompile("^centos_6")), - ), - }, - - resource.TestStep{ - Config: testAccEssScalingConfiguration_inActive, - Check: resource.ComposeTestCheckFunc( - testAccCheckEssScalingConfigurationExists( - "alicloud_ess_scaling_configuration.bar", &sc), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_configuration.bar", - "active", - "false"), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_configuration.bar", - "instance_type", - "ecs.s2.large"), - resource.TestMatchResourceAttr( - "alicloud_ess_scaling_configuration.bar", - "image_id", - regexp.MustCompile("^centos_6")), - ), - }, - }, - }) -} - -func SkipTestAccAlicloudEssScalingConfiguration_enable(t *testing.T) { - var sc ess.ScalingConfigurationItemType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_ess_scaling_configuration.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckEssScalingConfigurationDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccEssScalingConfiguration_enable, - Check: resource.ComposeTestCheckFunc( - testAccCheckEssScalingConfigurationExists( - "alicloud_ess_scaling_configuration.foo", &sc), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_configuration.foo", - "enable", - "true"), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_configuration.foo", - "instance_type", - "ecs.s2.large"), - resource.TestMatchResourceAttr( - "alicloud_ess_scaling_configuration.foo", - "image_id", - regexp.MustCompile("^centos_6")), - ), - }, - - resource.TestStep{ - Config: testAccEssScalingConfiguration_disable, - Check: resource.ComposeTestCheckFunc( - testAccCheckEssScalingConfigurationExists( - "alicloud_ess_scaling_configuration.foo", &sc), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_configuration.foo", - "enable", - "false"), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_configuration.foo", - "instance_type", - "ecs.s2.large"), - resource.TestMatchResourceAttr( - "alicloud_ess_scaling_configuration.foo", - "image_id", - regexp.MustCompile("^centos_6")), - ), - }, - }, - }) -} - -func testAccCheckEssScalingConfigurationExists(n string, d *ess.ScalingConfigurationItemType) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ESS Scaling Configuration ID is set") - } - - client := testAccProvider.Meta().(*AliyunClient) - ids := strings.Split(rs.Primary.ID, COLON_SEPARATED) - attr, err := client.DescribeScalingConfigurationById(ids[0], ids[1]) - log.Printf("[DEBUG] check scaling configuration %s attribute %#v", rs.Primary.ID, attr) - - if err != nil { - return err - } - - if attr == nil { - return fmt.Errorf("Scaling Configuration not found") - } - - *d = *attr - return nil - } -} - -func testAccCheckEssScalingConfigurationDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*AliyunClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "alicloud_ess_scaling_configuration" { - continue - } - ids := strings.Split(rs.Primary.ID, COLON_SEPARATED) - ins, err := client.DescribeScalingConfigurationById(ids[0], ids[1]) - - if ins != nil { - return fmt.Errorf("Error ESS scaling configuration still exist") - } - - // Verify the error is what we want - if err != nil { - // Verify the error is what we want - e, _ := err.(*common.Error) - if e.ErrorResponse.Code == InstanceNotfound { - continue - } - return err - } - } - - return nil -} - -const testAccEssScalingConfigurationConfig = ` -data "alicloud_images" "ecs_image" { - most_recent = true - name_regex = "^centos_6\\w{1,5}[64].*" -} - -resource "alicloud_security_group" "tf_test_foo" { - description = "foo" -} - -resource "alicloud_security_group_rule" "ssh-in" { - type = "ingress" - ip_protocol = "tcp" - nic_type = "internet" - policy = "accept" - port_range = "22/22" - priority = 1 - security_group_id = "${alicloud_security_group.tf_test_foo.id}" - cidr_ip = "0.0.0.0/0" -} - -resource "alicloud_ess_scaling_group" "foo" { - min_size = 1 - max_size = 1 - scaling_group_name = "foo" - removal_policies = ["OldestInstance", "NewestInstance"] -} - -resource "alicloud_ess_scaling_configuration" "foo" { - scaling_group_id = "${alicloud_ess_scaling_group.foo.id}" - - image_id = "${data.alicloud_images.ecs_image.images.0.id}" - instance_type = "ecs.s2.large" - io_optimized = "optimized" - security_group_id = "${alicloud_security_group.tf_test_foo.id}" -} -` - -const testAccEssScalingConfiguration_multiConfig = ` -data "alicloud_images" "ecs_image" { - most_recent = true - name_regex = "^centos_6\\w{1,5}[64].*" -} - -resource "alicloud_security_group" "tf_test_foo" { - description = "foo" -} - -resource "alicloud_security_group_rule" "ssh-in" { - type = "ingress" - ip_protocol = "tcp" - nic_type = "internet" - policy = "accept" - port_range = "22/22" - priority = 1 - security_group_id = "${alicloud_security_group.tf_test_foo.id}" - cidr_ip = "0.0.0.0/0" -} - -resource "alicloud_ess_scaling_group" "foo" { - min_size = 1 - max_size = 1 - scaling_group_name = "foo" - removal_policies = ["OldestInstance", "NewestInstance"] -} - -resource "alicloud_ess_scaling_configuration" "foo" { - scaling_group_id = "${alicloud_ess_scaling_group.foo.id}" - - image_id = "${data.alicloud_images.ecs_image.images.0.id}" - instance_type = "ecs.s2.large" - io_optimized = "optimized" - security_group_id = "${alicloud_security_group.tf_test_foo.id}" -} - -resource "alicloud_ess_scaling_configuration" "bar" { - scaling_group_id = "${alicloud_ess_scaling_group.foo.id}" - - image_id = "${data.alicloud_images.ecs_image.images.0.id}" - instance_type = "ecs.s2.large" - io_optimized = "optimized" - security_group_id = "${alicloud_security_group.tf_test_foo.id}" -} -` - -const testAccEssScalingConfiguration_active = ` -data "alicloud_images" "ecs_image" { - most_recent = true - name_regex = "^centos_6\\w{1,5}[64].*" -} - -resource "alicloud_security_group" "tf_test_foo" { - description = "foo" -} - -resource "alicloud_security_group_rule" "ssh-in" { - type = "ingress" - ip_protocol = "tcp" - nic_type = "internet" - policy = "accept" - port_range = "22/22" - priority = 1 - security_group_id = "${alicloud_security_group.tf_test_foo.id}" - cidr_ip = "0.0.0.0/0" -} - -resource "alicloud_ess_scaling_group" "foo" { - min_size = 1 - max_size = 1 - scaling_group_name = "foo" - removal_policies = ["OldestInstance", "NewestInstance"] -} - -resource "alicloud_ess_scaling_configuration" "foo" { - scaling_group_id = "${alicloud_ess_scaling_group.foo.id}" - active = true - - image_id = "${data.alicloud_images.ecs_image.images.0.id}" - instance_type = "ecs.s2.large" - io_optimized = "optimized" - security_group_id = "${alicloud_security_group.tf_test_foo.id}" -} -` - -const testAccEssScalingConfiguration_inActive = ` -data "alicloud_images" "ecs_image" { - most_recent = true - name_regex = "^centos_6\\w{1,5}[64].*" -} - -resource "alicloud_security_group" "tf_test_foo" { - description = "foo" -} - -resource "alicloud_security_group_rule" "ssh-in" { - type = "ingress" - ip_protocol = "tcp" - nic_type = "internet" - policy = "accept" - port_range = "22/22" - priority = 1 - security_group_id = "${alicloud_security_group.tf_test_foo.id}" - cidr_ip = "0.0.0.0/0" -} - -resource "alicloud_ess_scaling_group" "foo" { - min_size = 1 - max_size = 1 - scaling_group_name = "foo" - removal_policies = ["OldestInstance", "NewestInstance"] -} - -resource "alicloud_ess_scaling_configuration" "foo" { - scaling_group_id = "${alicloud_ess_scaling_group.foo.id}" - active = false - - image_id = "${data.alicloud_images.ecs_image.images.0.id}" - instance_type = "ecs.s2.large" - io_optimized = "optimized" - security_group_id = "${alicloud_security_group.tf_test_foo.id}" -} -` - -const testAccEssScalingConfiguration_enable = ` -data "alicloud_images" "ecs_image" { - most_recent = true - name_regex = "^centos_6\\w{1,5}[64].*" -} - -resource "alicloud_security_group" "tf_test_foo" { - description = "foo" -} - -resource "alicloud_security_group_rule" "ssh-in" { - type = "ingress" - ip_protocol = "tcp" - nic_type = "internet" - policy = "accept" - port_range = "22/22" - priority = 1 - security_group_id = "${alicloud_security_group.tf_test_foo.id}" - cidr_ip = "0.0.0.0/0" -} - -resource "alicloud_ess_scaling_group" "foo" { - min_size = 1 - max_size = 1 - scaling_group_name = "foo" - removal_policies = ["OldestInstance", "NewestInstance"] -} - -resource "alicloud_ess_scaling_configuration" "foo" { - scaling_group_id = "${alicloud_ess_scaling_group.foo.id}" - enable = true - - image_id = "${data.alicloud_images.ecs_image.images.0.id}" - instance_type = "ecs.s2.large" - io_optimized = "optimized" - security_group_id = "${alicloud_security_group.tf_test_foo.id}" -} -` - -const testAccEssScalingConfiguration_disable = ` -data "alicloud_images" "ecs_image" { - most_recent = true - name_regex = "^centos_6\\w{1,5}[64].*" -} - -resource "alicloud_security_group" "tf_test_foo" { - description = "foo" -} - -resource "alicloud_security_group_rule" "ssh-in" { - type = "ingress" - ip_protocol = "tcp" - nic_type = "internet" - policy = "accept" - port_range = "22/22" - priority = 1 - security_group_id = "${alicloud_security_group.tf_test_foo.id}" - cidr_ip = "0.0.0.0/0" -} - -resource "alicloud_ess_scaling_group" "foo" { - min_size = 1 - max_size = 1 - scaling_group_name = "foo" - removal_policies = ["OldestInstance", "NewestInstance"] -} - -resource "alicloud_ess_scaling_configuration" "foo" { - scaling_group_id = "${alicloud_ess_scaling_group.foo.id}" - enable = false - - image_id = "${data.alicloud_images.ecs_image.images.0.id}" - instance_type = "ecs.s2.large" - io_optimized = "optimized" - security_group_id = "${alicloud_security_group.tf_test_foo.id}" -} -` diff --git a/builtin/providers/alicloud/resource_alicloud_ess_scalinggroup.go b/builtin/providers/alicloud/resource_alicloud_ess_scalinggroup.go deleted file mode 100644 index 89f4154db..000000000 --- a/builtin/providers/alicloud/resource_alicloud_ess_scalinggroup.go +++ /dev/null @@ -1,209 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ess" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "strings" - "time" -) - -func resourceAlicloudEssScalingGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceAliyunEssScalingGroupCreate, - Read: resourceAliyunEssScalingGroupRead, - Update: resourceAliyunEssScalingGroupUpdate, - Delete: resourceAliyunEssScalingGroupDelete, - - Schema: map[string]*schema.Schema{ - "min_size": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ValidateFunc: validateIntegerInRange(0, 100), - }, - "max_size": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ValidateFunc: validateIntegerInRange(0, 100), - }, - "scaling_group_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "default_cooldown": &schema.Schema{ - Type: schema.TypeInt, - Default: 300, - Optional: true, - ValidateFunc: validateIntegerInRange(0, 86400), - }, - "vswitch_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "removal_policies": &schema.Schema{ - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - MaxItems: 2, - }, - "db_instance_ids": &schema.Schema{ - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - MaxItems: 3, - }, - "loadbalancer_ids": &schema.Schema{ - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - }, - }, - } -} - -func resourceAliyunEssScalingGroupCreate(d *schema.ResourceData, meta interface{}) error { - - args, err := buildAlicloudEssScalingGroupArgs(d, meta) - if err != nil { - return err - } - - essconn := meta.(*AliyunClient).essconn - - scaling, err := essconn.CreateScalingGroup(args) - if err != nil { - return err - } - - d.SetId(scaling.ScalingGroupId) - - return resourceAliyunEssScalingGroupUpdate(d, meta) -} - -func resourceAliyunEssScalingGroupRead(d *schema.ResourceData, meta interface{}) error { - - client := meta.(*AliyunClient) - - scaling, err := client.DescribeScalingGroupById(d.Id()) - if err != nil { - if e, ok := err.(*common.Error); ok && e.Code == InstanceNotfound { - d.SetId("") - return nil - } - return fmt.Errorf("Error Describe ESS scaling group Attribute: %#v", err) - } - - d.Set("min_size", scaling.MinSize) - d.Set("max_size", scaling.MaxSize) - d.Set("scaling_group_name", scaling.ScalingGroupName) - d.Set("default_cooldown", scaling.DefaultCooldown) - d.Set("removal_policies", scaling.RemovalPolicies) - d.Set("db_instance_ids", scaling.DBInstanceIds) - d.Set("loadbalancer_ids", scaling.LoadBalancerId) - - return nil -} - -func resourceAliyunEssScalingGroupUpdate(d *schema.ResourceData, meta interface{}) error { - - conn := meta.(*AliyunClient).essconn - args := &ess.ModifyScalingGroupArgs{ - ScalingGroupId: d.Id(), - } - - if d.HasChange("scaling_group_name") { - args.ScalingGroupName = d.Get("scaling_group_name").(string) - } - - if d.HasChange("min_size") { - args.MinSize = d.Get("min_size").(int) - } - - if d.HasChange("max_size") { - args.MaxSize = d.Get("max_size").(int) - } - - if d.HasChange("default_cooldown") { - args.DefaultCooldown = d.Get("default_cooldown").(int) - } - - if d.HasChange("removal_policies") { - policyStrings := d.Get("removal_policies").([]interface{}) - args.RemovalPolicy = expandStringList(policyStrings) - } - - if _, err := conn.ModifyScalingGroup(args); err != nil { - return err - } - - return resourceAliyunEssScalingGroupRead(d, meta) -} - -func resourceAliyunEssScalingGroupDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - - return resource.Retry(2*time.Minute, func() *resource.RetryError { - err := client.DeleteScalingGroupById(d.Id()) - - if err != nil { - e, _ := err.(*common.Error) - if e.ErrorResponse.Code != InvalidScalingGroupIdNotFound { - return resource.RetryableError(fmt.Errorf("Scaling group in use - trying again while it is deleted.")) - } - } - - _, err = client.DescribeScalingGroupById(d.Id()) - if err != nil { - if notFoundError(err) { - return nil - } - return resource.NonRetryableError(err) - } - - return resource.RetryableError(fmt.Errorf("Scaling group in use - trying again while it is deleted.")) - }) -} - -func buildAlicloudEssScalingGroupArgs(d *schema.ResourceData, meta interface{}) (*ess.CreateScalingGroupArgs, error) { - client := meta.(*AliyunClient) - args := &ess.CreateScalingGroupArgs{ - RegionId: getRegion(d, meta), - MinSize: d.Get("min_size").(int), - MaxSize: d.Get("max_size").(int), - DefaultCooldown: d.Get("default_cooldown").(int), - } - - if v := d.Get("scaling_group_name").(string); v != "" { - args.ScalingGroupName = v - } - - if v := d.Get("vswitch_id").(string); v != "" { - args.VSwitchId = v - - // get vpcId - vpcId, err := client.GetVpcIdByVSwitchId(v) - - if err != nil { - return nil, fmt.Errorf("VswitchId %s is not valid of current region", v) - } - // fill vpcId by vswitchId - args.VpcId = vpcId - - } - - dbs, ok := d.GetOk("db_instance_ids") - if ok { - dbsStrings := dbs.([]interface{}) - args.DBInstanceId = expandStringList(dbsStrings) - } - - lbs, ok := d.GetOk("loadbalancer_ids") - if ok { - lbsStrings := lbs.([]interface{}) - args.LoadBalancerId = strings.Join(expandStringList(lbsStrings), COMMA_SEPARATED) - } - - return args, nil -} diff --git a/builtin/providers/alicloud/resource_alicloud_ess_scalinggroup_test.go b/builtin/providers/alicloud/resource_alicloud_ess_scalinggroup_test.go deleted file mode 100644 index e707035b1..000000000 --- a/builtin/providers/alicloud/resource_alicloud_ess_scalinggroup_test.go +++ /dev/null @@ -1,297 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ess" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "log" - "testing" -) - -func TestAccAlicloudEssScalingGroup_basic(t *testing.T) { - var sg ess.ScalingGroupItemType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_ess_scaling_group.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckEssScalingGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccEssScalingGroupConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckEssScalingGroupExists( - "alicloud_ess_scaling_group.foo", &sg), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_group.foo", - "min_size", - "1"), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_group.foo", - "max_size", - "1"), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_group.foo", - "scaling_group_name", - "foo"), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_group.foo", - "removal_policies.#", - "2", - ), - ), - }, - }, - }) - -} - -func TestAccAlicloudEssScalingGroup_update(t *testing.T) { - var sg ess.ScalingGroupItemType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_ess_scaling_group.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckEssScalingGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccEssScalingGroup, - Check: resource.ComposeTestCheckFunc( - testAccCheckEssScalingGroupExists( - "alicloud_ess_scaling_group.foo", &sg), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_group.foo", - "min_size", - "1"), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_group.foo", - "max_size", - "1"), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_group.foo", - "scaling_group_name", - "foo"), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_group.foo", - "removal_policies.#", - "2", - ), - ), - }, - - resource.TestStep{ - Config: testAccEssScalingGroup_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckEssScalingGroupExists( - "alicloud_ess_scaling_group.foo", &sg), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_group.foo", - "min_size", - "2"), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_group.foo", - "max_size", - "2"), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_group.foo", - "scaling_group_name", - "update"), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_group.foo", - "removal_policies.#", - "1", - ), - ), - }, - }, - }) - -} - -func SkipTestAccAlicloudEssScalingGroup_vpc(t *testing.T) { - var sg ess.ScalingGroupItemType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_ess_scaling_group.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckEssScalingGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccEssScalingGroup_vpc, - Check: resource.ComposeTestCheckFunc( - testAccCheckEssScalingGroupExists( - "alicloud_ess_scaling_group.foo", &sg), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_group.foo", - "min_size", - "1"), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_group.foo", - "max_size", - "1"), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_group.foo", - "scaling_group_name", - "foo"), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_group.foo", - "removal_policies.#", - "2", - ), - ), - }, - }, - }) - -} - -func testAccCheckEssScalingGroupExists(n string, d *ess.ScalingGroupItemType) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ESS Scaling Group ID is set") - } - - client := testAccProvider.Meta().(*AliyunClient) - attr, err := client.DescribeScalingGroupById(rs.Primary.ID) - log.Printf("[DEBUG] check scaling group %s attribute %#v", rs.Primary.ID, attr) - - if err != nil { - return err - } - - if attr == nil { - return fmt.Errorf("Scaling Group not found") - } - - *d = *attr - return nil - } -} - -func testAccCheckEssScalingGroupDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*AliyunClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "alicloud_ess_scaling_group" { - continue - } - - ins, err := client.DescribeScalingGroupById(rs.Primary.ID) - - if ins != nil { - return fmt.Errorf("Error ESS scaling group still exist") - } - - // Verify the error is what we want - if err != nil { - // Verify the error is what we want - e, _ := err.(*common.Error) - if e.ErrorResponse.Code == InstanceNotfound { - continue - } - return err - } - } - - return nil -} - -const testAccEssScalingGroupConfig = ` -resource "alicloud_ess_scaling_group" "foo" { - min_size = 1 - max_size = 1 - scaling_group_name = "foo" - removal_policies = ["OldestInstance", "NewestInstance"] -} -` - -const testAccEssScalingGroup = ` -resource "alicloud_ess_scaling_group" "foo" { - min_size = 1 - max_size = 1 - scaling_group_name = "foo" - removal_policies = ["OldestInstance", "NewestInstance"] -} -` - -const testAccEssScalingGroup_update = ` -resource "alicloud_ess_scaling_group" "foo" { - min_size = 2 - max_size = 2 - scaling_group_name = "update" - removal_policies = ["OldestInstance"] -} -` -const testAccEssScalingGroup_vpc = ` -data "alicloud_images" "ecs_image" { - most_recent = true - name_regex = "^centos_6\\w{1,5}[64].*" -} - -data "alicloud_zones" "default" { - "available_disk_category"= "cloud_efficiency" - "available_resource_creation"= "VSwitch" -} - -resource "alicloud_vpc" "foo" { - name = "tf_test_foo" - cidr_block = "172.16.0.0/12" -} - -resource "alicloud_vswitch" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - cidr_block = "172.16.0.0/21" - availability_zone = "${data.alicloud_zones.default.zones.0.id}" -} - -resource "alicloud_security_group" "tf_test_foo" { - description = "foo" - vpc_id = "${alicloud_vpc.foo.id}" -} - -resource "alicloud_ess_scaling_group" "foo" { - min_size = 1 - max_size = 1 - scaling_group_name = "foo" - default_cooldown = 20 - vswitch_id = "${alicloud_vswitch.foo.id}" - removal_policies = ["OldestInstance", "NewestInstance"] -} - -resource "alicloud_ess_scaling_configuration" "foo" { - scaling_group_id = "${alicloud_ess_scaling_group.foo.id}" - enable = true - - image_id = "${data.alicloud_images.ecs_image.images.0.id}" - instance_type = "ecs.n1.medium" - io_optimized = "optimized" - system_disk_category = "cloud_efficiency" - internet_charge_type = "PayByTraffic" - internet_max_bandwidth_out = 10 - security_group_id = "${alicloud_security_group.tf_test_foo.id}" -} -` diff --git a/builtin/providers/alicloud/resource_alicloud_ess_scalingrule.go b/builtin/providers/alicloud/resource_alicloud_ess_scalingrule.go deleted file mode 100644 index bfa1f904f..000000000 --- a/builtin/providers/alicloud/resource_alicloud_ess_scalingrule.go +++ /dev/null @@ -1,168 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ess" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "strings" - "time" -) - -func resourceAlicloudEssScalingRule() *schema.Resource { - return &schema.Resource{ - Create: resourceAliyunEssScalingRuleCreate, - Read: resourceAliyunEssScalingRuleRead, - Update: resourceAliyunEssScalingRuleUpdate, - Delete: resourceAliyunEssScalingRuleDelete, - - Schema: map[string]*schema.Schema{ - "scaling_group_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "adjustment_type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateAllowedStringValue([]string{string(ess.QuantityChangeInCapacity), - string(ess.PercentChangeInCapacity), string(ess.TotalCapacity)}), - }, - "adjustment_value": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - "scaling_rule_name": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - "ari": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "cooldown": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validateIntegerInRange(0, 86400), - }, - }, - } -} - -func resourceAliyunEssScalingRuleCreate(d *schema.ResourceData, meta interface{}) error { - - args, err := buildAlicloudEssScalingRuleArgs(d, meta) - if err != nil { - return err - } - - essconn := meta.(*AliyunClient).essconn - - rule, err := essconn.CreateScalingRule(args) - if err != nil { - return err - } - - d.SetId(d.Get("scaling_group_id").(string) + COLON_SEPARATED + rule.ScalingRuleId) - - return resourceAliyunEssScalingRuleUpdate(d, meta) -} - -func resourceAliyunEssScalingRuleRead(d *schema.ResourceData, meta interface{}) error { - - client := meta.(*AliyunClient) - ids := strings.Split(d.Id(), COLON_SEPARATED) - - rule, err := client.DescribeScalingRuleById(ids[0], ids[1]) - if err != nil { - if e, ok := err.(*common.Error); ok && e.Code == InstanceNotfound { - d.SetId("") - return nil - } - return fmt.Errorf("Error Describe ESS scaling rule Attribute: %#v", err) - } - - d.Set("scaling_group_id", rule.ScalingGroupId) - d.Set("ari", rule.ScalingRuleAri) - d.Set("adjustment_type", rule.AdjustmentType) - d.Set("adjustment_value", rule.AdjustmentValue) - d.Set("scaling_rule_name", rule.ScalingRuleName) - d.Set("cooldown", rule.Cooldown) - - return nil -} - -func resourceAliyunEssScalingRuleDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - ids := strings.Split(d.Id(), COLON_SEPARATED) - - return resource.Retry(2*time.Minute, func() *resource.RetryError { - err := client.DeleteScalingRuleById(ids[1]) - - if err != nil { - return resource.RetryableError(fmt.Errorf("Scaling rule in use - trying again while it is deleted.")) - } - - _, err = client.DescribeScalingRuleById(ids[0], ids[1]) - if err != nil { - if notFoundError(err) { - return nil - } - return resource.NonRetryableError(err) - } - - return resource.RetryableError(fmt.Errorf("Scaling rule in use - trying again while it is deleted.")) - }) -} - -func resourceAliyunEssScalingRuleUpdate(d *schema.ResourceData, meta interface{}) error { - - conn := meta.(*AliyunClient).essconn - ids := strings.Split(d.Id(), COLON_SEPARATED) - - args := &ess.ModifyScalingRuleArgs{ - ScalingRuleId: ids[1], - } - - if d.HasChange("adjustment_type") { - args.AdjustmentType = ess.AdjustmentType(d.Get("adjustment_type").(string)) - } - - if d.HasChange("adjustment_value") { - args.AdjustmentValue = d.Get("adjustment_value").(int) - } - - if d.HasChange("scaling_rule_name") { - args.ScalingRuleName = d.Get("scaling_rule_name").(string) - } - - if d.HasChange("cooldown") { - args.Cooldown = d.Get("cooldown").(int) - } - - if _, err := conn.ModifyScalingRule(args); err != nil { - return err - } - - return resourceAliyunEssScalingRuleRead(d, meta) -} - -func buildAlicloudEssScalingRuleArgs(d *schema.ResourceData, meta interface{}) (*ess.CreateScalingRuleArgs, error) { - args := &ess.CreateScalingRuleArgs{ - RegionId: getRegion(d, meta), - ScalingGroupId: d.Get("scaling_group_id").(string), - AdjustmentType: ess.AdjustmentType(d.Get("adjustment_type").(string)), - AdjustmentValue: d.Get("adjustment_value").(int), - } - - if v := d.Get("scaling_rule_name").(string); v != "" { - args.ScalingRuleName = v - } - - if v := d.Get("cooldown").(int); v != 0 { - args.Cooldown = v - } - - return args, nil -} diff --git a/builtin/providers/alicloud/resource_alicloud_ess_scalingrule_test.go b/builtin/providers/alicloud/resource_alicloud_ess_scalingrule_test.go deleted file mode 100644 index 81020a747..000000000 --- a/builtin/providers/alicloud/resource_alicloud_ess_scalingrule_test.go +++ /dev/null @@ -1,290 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ess" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "log" - "strings" - "testing" -) - -func TestAccAlicloudEssScalingRule_basic(t *testing.T) { - var sc ess.ScalingRuleItemType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_ess_scaling_rule.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckEssScalingRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccEssScalingRuleConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckEssScalingRuleExists( - "alicloud_ess_scaling_rule.foo", &sc), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_rule.foo", - "adjustment_type", - "TotalCapacity"), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_rule.foo", - "adjustment_value", - "1"), - ), - }, - }, - }) -} - -func TestAccAlicloudEssScalingRule_update(t *testing.T) { - var sc ess.ScalingRuleItemType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_ess_scaling_rule.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckEssScalingRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccEssScalingRule, - Check: resource.ComposeTestCheckFunc( - testAccCheckEssScalingRuleExists( - "alicloud_ess_scaling_rule.foo", &sc), - testAccCheckEssScalingRuleExists( - "alicloud_ess_scaling_rule.foo", &sc), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_rule.foo", - "adjustment_type", - "TotalCapacity"), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_rule.foo", - "adjustment_value", - "1"), - ), - }, - - resource.TestStep{ - Config: testAccEssScalingRule_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckEssScalingRuleExists( - "alicloud_ess_scaling_rule.foo", &sc), - testAccCheckEssScalingRuleExists( - "alicloud_ess_scaling_rule.foo", &sc), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_rule.foo", - "adjustment_type", - "TotalCapacity"), - resource.TestCheckResourceAttr( - "alicloud_ess_scaling_rule.foo", - "adjustment_value", - "2"), - ), - }, - }, - }) -} - -func testAccCheckEssScalingRuleExists(n string, d *ess.ScalingRuleItemType) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ESS Scaling Rule ID is set") - } - - client := testAccProvider.Meta().(*AliyunClient) - ids := strings.Split(rs.Primary.ID, COLON_SEPARATED) - attr, err := client.DescribeScalingRuleById(ids[0], ids[1]) - log.Printf("[DEBUG] check scaling rule %s attribute %#v", rs.Primary.ID, attr) - - if err != nil { - return err - } - - if attr == nil { - return fmt.Errorf("Scaling rule not found") - } - - *d = *attr - return nil - } -} - -func testAccCheckEssScalingRuleDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*AliyunClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "alicloud_ess_scaling_rule" { - continue - } - ids := strings.Split(rs.Primary.ID, COLON_SEPARATED) - ins, err := client.DescribeScalingRuleById(ids[0], ids[1]) - - if ins != nil { - return fmt.Errorf("Error ESS scaling rule still exist") - } - - // Verify the error is what we want - if err != nil { - // Verify the error is what we want - e, _ := err.(*common.Error) - if e.ErrorResponse.Code == InstanceNotfound { - continue - } - return err - } - } - - return nil -} - -const testAccEssScalingRuleConfig = ` -data "alicloud_images" "ecs_image" { - most_recent = true - name_regex = "^centos_6\\w{1,5}[64].*" -} - -resource "alicloud_security_group" "tf_test_foo" { - description = "foo" -} - -resource "alicloud_security_group_rule" "ssh-in" { - type = "ingress" - ip_protocol = "tcp" - nic_type = "internet" - policy = "accept" - port_range = "22/22" - priority = 1 - security_group_id = "${alicloud_security_group.tf_test_foo.id}" - cidr_ip = "0.0.0.0/0" -} - -resource "alicloud_ess_scaling_group" "bar" { - min_size = 1 - max_size = 1 - scaling_group_name = "bar" - removal_policies = ["OldestInstance", "NewestInstance"] -} - -resource "alicloud_ess_scaling_configuration" "foo" { - scaling_group_id = "${alicloud_ess_scaling_group.bar.id}" - - image_id = "${data.alicloud_images.ecs_image.images.0.id}" - instance_type = "ecs.s2.large" - io_optimized = "optimized" - security_group_id = "${alicloud_security_group.tf_test_foo.id}" -} - -resource "alicloud_ess_scaling_rule" "foo" { - scaling_group_id = "${alicloud_ess_scaling_group.bar.id}" - adjustment_type = "TotalCapacity" - adjustment_value = 1 - cooldown = 120 -} -` - -const testAccEssScalingRule = ` -data "alicloud_images" "ecs_image" { - most_recent = true - name_regex = "^centos_6\\w{1,5}[64].*" -} - -resource "alicloud_security_group" "tf_test_foo" { - description = "foo" -} - -resource "alicloud_security_group_rule" "ssh-in" { - type = "ingress" - ip_protocol = "tcp" - nic_type = "internet" - policy = "accept" - port_range = "22/22" - priority = 1 - security_group_id = "${alicloud_security_group.tf_test_foo.id}" - cidr_ip = "0.0.0.0/0" -} - -resource "alicloud_ess_scaling_group" "bar" { - min_size = 1 - max_size = 1 - scaling_group_name = "bar" - removal_policies = ["OldestInstance", "NewestInstance"] -} - -resource "alicloud_ess_scaling_configuration" "foo" { - scaling_group_id = "${alicloud_ess_scaling_group.bar.id}" - - image_id = "${data.alicloud_images.ecs_image.images.0.id}" - instance_type = "ecs.s2.large" - io_optimized = "optimized" - security_group_id = "${alicloud_security_group.tf_test_foo.id}" -} - -resource "alicloud_ess_scaling_rule" "foo" { - scaling_group_id = "${alicloud_ess_scaling_group.bar.id}" - adjustment_type = "TotalCapacity" - adjustment_value = 1 - cooldown = 120 -} -` - -const testAccEssScalingRule_update = ` -data "alicloud_images" "ecs_image" { - most_recent = true - name_regex = "^centos_6\\w{1,5}[64].*" -} - -resource "alicloud_security_group" "tf_test_foo" { - description = "foo" -} - -resource "alicloud_security_group_rule" "ssh-in" { - type = "ingress" - ip_protocol = "tcp" - nic_type = "internet" - policy = "accept" - port_range = "22/22" - priority = 1 - security_group_id = "${alicloud_security_group.tf_test_foo.id}" - cidr_ip = "0.0.0.0/0" -} - -resource "alicloud_ess_scaling_group" "bar" { - min_size = 1 - max_size = 1 - scaling_group_name = "bar" - removal_policies = ["OldestInstance", "NewestInstance"] -} - -resource "alicloud_ess_scaling_configuration" "foo" { - scaling_group_id = "${alicloud_ess_scaling_group.bar.id}" - - image_id = "${data.alicloud_images.ecs_image.images.0.id}" - instance_type = "ecs.s2.large" - io_optimized = "optimized" - security_group_id = "${alicloud_security_group.tf_test_foo.id}" -} - -resource "alicloud_ess_scaling_rule" "foo" { - scaling_group_id = "${alicloud_ess_scaling_group.bar.id}" - adjustment_type = "TotalCapacity" - adjustment_value = 2 - cooldown = 60 -} -` diff --git a/builtin/providers/alicloud/resource_alicloud_ess_schedule.go b/builtin/providers/alicloud/resource_alicloud_ess_schedule.go deleted file mode 100644 index 4e5660a50..000000000 --- a/builtin/providers/alicloud/resource_alicloud_ess_schedule.go +++ /dev/null @@ -1,220 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ess" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "time" -) - -func resourceAlicloudEssSchedule() *schema.Resource { - return &schema.Resource{ - Create: resourceAliyunEssScheduleCreate, - Read: resourceAliyunEssScheduleRead, - Update: resourceAliyunEssScheduleUpdate, - Delete: resourceAliyunEssScheduleDelete, - - Schema: map[string]*schema.Schema{ - "scheduled_action": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "launch_time": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "scheduled_task_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - "launch_expiration_time": &schema.Schema{ - Type: schema.TypeInt, - Default: 600, - Optional: true, - ValidateFunc: validateIntegerInRange(0, 21600), - }, - "recurrence_type": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validateAllowedStringValue([]string{string(ess.Daily), - string(ess.Weekly), string(ess.Monthly)}), - }, - "recurrence_value": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - "recurrence_end_time": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - "task_enabled": &schema.Schema{ - Type: schema.TypeBool, - Default: true, - Optional: true, - }, - }, - } -} - -func resourceAliyunEssScheduleCreate(d *schema.ResourceData, meta interface{}) error { - - args, err := buildAlicloudEssScheduleArgs(d, meta) - if err != nil { - return err - } - - essconn := meta.(*AliyunClient).essconn - - rule, err := essconn.CreateScheduledTask(args) - if err != nil { - return err - } - - d.SetId(rule.ScheduledTaskId) - - return resourceAliyunEssScheduleUpdate(d, meta) -} - -func resourceAliyunEssScheduleRead(d *schema.ResourceData, meta interface{}) error { - - client := meta.(*AliyunClient) - - rule, err := client.DescribeScheduleById(d.Id()) - if err != nil { - if e, ok := err.(*common.Error); ok && e.Code == InstanceNotfound { - d.SetId("") - return nil - } - return fmt.Errorf("Error Describe ESS schedule Attribute: %#v", err) - } - - d.Set("scheduled_action", rule.ScheduledAction) - d.Set("launch_time", rule.LaunchTime) - d.Set("scheduled_task_name", rule.ScheduledTaskName) - d.Set("description", rule.Description) - d.Set("launch_expiration_time", rule.LaunchExpirationTime) - d.Set("recurrence_type", rule.RecurrenceType) - d.Set("recurrence_value", rule.RecurrenceValue) - d.Set("recurrence_end_time", rule.RecurrenceEndTime) - d.Set("task_enabled", rule.TaskEnabled) - - return nil -} - -func resourceAliyunEssScheduleUpdate(d *schema.ResourceData, meta interface{}) error { - - conn := meta.(*AliyunClient).essconn - - args := &ess.ModifyScheduledTaskArgs{ - ScheduledTaskId: d.Id(), - } - - if d.HasChange("scheduled_task_name") { - args.ScheduledTaskName = d.Get("scheduled_task_name").(string) - } - - if d.HasChange("description") { - args.Description = d.Get("description").(string) - } - - if d.HasChange("scheduled_action") { - args.ScheduledAction = d.Get("scheduled_action").(string) - } - - if d.HasChange("launch_time") { - args.LaunchTime = d.Get("launch_time").(string) - } - - if d.HasChange("launch_expiration_time") { - args.LaunchExpirationTime = d.Get("launch_expiration_time").(int) - } - - if d.HasChange("recurrence_type") { - args.RecurrenceType = ess.RecurrenceType(d.Get("recurrence_type").(string)) - } - - if d.HasChange("recurrence_value") { - args.RecurrenceValue = d.Get("recurrence_value").(string) - } - - if d.HasChange("recurrence_end_time") { - args.RecurrenceEndTime = d.Get("recurrence_end_time").(string) - } - - if d.HasChange("task_enabled") { - args.TaskEnabled = d.Get("task_enabled").(bool) - } - - if _, err := conn.ModifyScheduledTask(args); err != nil { - return err - } - - return resourceAliyunEssScheduleRead(d, meta) -} - -func resourceAliyunEssScheduleDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - - return resource.Retry(2*time.Minute, func() *resource.RetryError { - err := client.DeleteScheduleById(d.Id()) - - if err != nil { - return resource.RetryableError(fmt.Errorf("Scaling schedule in use - trying again while it is deleted.")) - } - - _, err = client.DescribeScheduleById(d.Id()) - if err != nil { - if notFoundError(err) { - return nil - } - return resource.NonRetryableError(err) - } - - return resource.RetryableError(fmt.Errorf("Scaling schedule in use - trying again while it is deleted.")) - }) -} - -func buildAlicloudEssScheduleArgs(d *schema.ResourceData, meta interface{}) (*ess.CreateScheduledTaskArgs, error) { - args := &ess.CreateScheduledTaskArgs{ - RegionId: getRegion(d, meta), - ScheduledAction: d.Get("scheduled_action").(string), - LaunchTime: d.Get("launch_time").(string), - TaskEnabled: d.Get("task_enabled").(bool), - } - - if v := d.Get("scheduled_task_name").(string); v != "" { - args.ScheduledTaskName = v - } - - if v := d.Get("description").(string); v != "" { - args.Description = v - } - - if v := d.Get("recurrence_type").(string); v != "" { - args.RecurrenceType = ess.RecurrenceType(v) - } - - if v := d.Get("recurrence_value").(string); v != "" { - args.RecurrenceValue = v - } - - if v := d.Get("recurrence_end_time").(string); v != "" { - args.RecurrenceEndTime = v - } - - if v := d.Get("launch_expiration_time").(int); v != 0 { - args.LaunchExpirationTime = v - } - - return args, nil -} diff --git a/builtin/providers/alicloud/resource_alicloud_ess_schedule_test.go b/builtin/providers/alicloud/resource_alicloud_ess_schedule_test.go deleted file mode 100644 index cb8044cc4..000000000 --- a/builtin/providers/alicloud/resource_alicloud_ess_schedule_test.go +++ /dev/null @@ -1,151 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ess" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "log" - "testing" -) - -func TestAccAlicloudEssSchedule_basic(t *testing.T) { - var sc ess.ScheduledTaskItemType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_ess_schedule.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckEssScheduleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccEssScheduleConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckEssScheduleExists( - "alicloud_ess_schedule.foo", &sc), - resource.TestCheckResourceAttr( - "alicloud_ess_schedule.foo", - "launch_time", - "2017-04-29T07:30Z"), - resource.TestCheckResourceAttr( - "alicloud_ess_schedule.foo", - "task_enabled", - "true"), - ), - }, - }, - }) -} - -func testAccCheckEssScheduleExists(n string, d *ess.ScheduledTaskItemType) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ESS Schedule ID is set") - } - - client := testAccProvider.Meta().(*AliyunClient) - attr, err := client.DescribeScheduleById(rs.Primary.ID) - log.Printf("[DEBUG] check schedule %s attribute %#v", rs.Primary.ID, attr) - - if err != nil { - return err - } - - if attr == nil { - return fmt.Errorf("Ess schedule not found") - } - - *d = *attr - return nil - } -} - -func testAccCheckEssScheduleDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*AliyunClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "alicloud_ess_schedule" { - continue - } - ins, err := client.DescribeScheduleById(rs.Primary.ID) - - if ins != nil { - return fmt.Errorf("Error ESS schedule still exist") - } - - // Verify the error is what we want - if err != nil { - // Verify the error is what we want - e, _ := err.(*common.Error) - if e.ErrorResponse.Code == InstanceNotfound { - continue - } - return err - } - } - - return nil -} - -const testAccEssScheduleConfig = ` -data "alicloud_images" "ecs_image" { - most_recent = true - name_regex = "^centos_6\\w{1,5}[64].*" -} - -resource "alicloud_security_group" "tf_test_foo" { - name = "tf_test_foo" - description = "foo" -} - -resource "alicloud_security_group_rule" "ssh-in" { - type = "ingress" - ip_protocol = "tcp" - nic_type = "internet" - policy = "accept" - port_range = "22/22" - priority = 1 - security_group_id = "${alicloud_security_group.tf_test_foo.id}" - cidr_ip = "0.0.0.0/0" -} - -resource "alicloud_ess_scaling_group" "bar" { - min_size = 1 - max_size = 1 - scaling_group_name = "bar" - removal_policies = ["OldestInstance", "NewestInstance"] -} - -resource "alicloud_ess_scaling_configuration" "foo" { - scaling_group_id = "${alicloud_ess_scaling_group.bar.id}" - - image_id = "${data.alicloud_images.ecs_image.images.0.id}" - instance_type = "ecs.s2.large" - io_optimized = "optimized" - security_group_id = "${alicloud_security_group.tf_test_foo.id}" -} - -resource "alicloud_ess_scaling_rule" "foo" { - scaling_group_id = "${alicloud_ess_scaling_group.bar.id}" - adjustment_type = "TotalCapacity" - adjustment_value = 2 - cooldown = 60 -} - -resource "alicloud_ess_schedule" "foo" { - scheduled_action = "${alicloud_ess_scaling_rule.foo.ari}" - launch_time = "2017-04-29T07:30Z" - scheduled_task_name = "tf-foo" -} -` diff --git a/builtin/providers/alicloud/resource_alicloud_forward.go b/builtin/providers/alicloud/resource_alicloud_forward.go deleted file mode 100644 index 8f75c54d0..000000000 --- a/builtin/providers/alicloud/resource_alicloud_forward.go +++ /dev/null @@ -1,165 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAliyunForwardEntry() *schema.Resource { - return &schema.Resource{ - Create: resourceAliyunForwardEntryCreate, - Read: resourceAliyunForwardEntryRead, - Update: resourceAliyunForwardEntryUpdate, - Delete: resourceAliyunForwardEntryDelete, - - Schema: map[string]*schema.Schema{ - "forward_table_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "external_ip": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "external_port": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateForwardPort, - }, - "ip_protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateAllowedStringValue([]string{"tcp", "udp", "any"}), - }, - "internal_ip": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "internal_port": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateForwardPort, - }, - }, - } -} - -func resourceAliyunForwardEntryCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AliyunClient).vpcconn - - args := &ecs.CreateForwardEntryArgs{ - RegionId: getRegion(d, meta), - ForwardTableId: d.Get("forward_table_id").(string), - ExternalIp: d.Get("external_ip").(string), - ExternalPort: d.Get("external_port").(string), - IpProtocol: d.Get("ip_protocol").(string), - InternalIp: d.Get("internal_ip").(string), - InternalPort: d.Get("internal_port").(string), - } - - resp, err := conn.CreateForwardEntry(args) - if err != nil { - return fmt.Errorf("CreateForwardEntry got error: %#v", err) - } - - d.SetId(resp.ForwardEntryId) - d.Set("forward_table_id", d.Get("forward_table_id").(string)) - - return resourceAliyunForwardEntryRead(d, meta) -} - -func resourceAliyunForwardEntryRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - - forwardEntry, err := client.DescribeForwardEntry(d.Get("forward_table_id").(string), d.Id()) - - if err != nil { - if notFoundError(err) { - return nil - } - return err - } - - d.Set("forward_table_id", forwardEntry.ForwardTableId) - d.Set("external_ip", forwardEntry.ExternalIp) - d.Set("external_port", forwardEntry.ExternalPort) - d.Set("ip_protocol", forwardEntry.IpProtocol) - d.Set("internal_ip", forwardEntry.InternalIp) - d.Set("internal_port", forwardEntry.InternalPort) - - return nil -} - -func resourceAliyunForwardEntryUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - conn := client.vpcconn - - forwardEntry, err := client.DescribeForwardEntry(d.Get("forward_table_id").(string), d.Id()) - if err != nil { - return err - } - - d.Partial(true) - attributeUpdate := false - args := &ecs.ModifyForwardEntryArgs{ - RegionId: getRegion(d, meta), - ForwardTableId: forwardEntry.ForwardTableId, - ForwardEntryId: forwardEntry.ForwardEntryId, - ExternalIp: forwardEntry.ExternalIp, - IpProtocol: forwardEntry.IpProtocol, - ExternalPort: forwardEntry.ExternalPort, - InternalIp: forwardEntry.InternalIp, - InternalPort: forwardEntry.InternalPort, - } - - if d.HasChange("external_port") { - d.SetPartial("external_port") - args.ExternalPort = d.Get("external_port").(string) - attributeUpdate = true - } - - if d.HasChange("ip_protocol") { - d.SetPartial("ip_protocol") - args.IpProtocol = d.Get("ip_protocol").(string) - attributeUpdate = true - } - - if d.HasChange("internal_port") { - d.SetPartial("internal_port") - args.InternalPort = d.Get("internal_port").(string) - attributeUpdate = true - } - - if attributeUpdate { - if err := conn.ModifyForwardEntry(args); err != nil { - return err - } - } - - d.Partial(false) - - return resourceAliyunForwardEntryRead(d, meta) -} - -func resourceAliyunForwardEntryDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - conn := client.vpcconn - - forwardEntryId := d.Id() - forwardTableId := d.Get("forward_table_id").(string) - - args := &ecs.DeleteForwardEntryArgs{ - RegionId: getRegion(d, meta), - ForwardTableId: forwardTableId, - ForwardEntryId: forwardEntryId, - } - - if err := conn.DeleteForwardEntry(args); err != nil { - return err - } - - return nil -} diff --git a/builtin/providers/alicloud/resource_alicloud_forward_test.go b/builtin/providers/alicloud/resource_alicloud_forward_test.go deleted file mode 100644 index 60a67f322..000000000 --- a/builtin/providers/alicloud/resource_alicloud_forward_test.go +++ /dev/null @@ -1,216 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "testing" -) - -func TestAccAlicloudForward_basic(t *testing.T) { - var forward ecs.ForwardTableEntrySetType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_forward_entry.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckForwardEntryDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccForwardEntryConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckForwardEntryExists( - "alicloud_forward_entry.foo", &forward), - ), - }, - - resource.TestStep{ - Config: testAccForwardEntryUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckForwardEntryExists( - "alicloud_forward_entry.foo", &forward), - ), - }, - }, - }) - -} - -func testAccCheckForwardEntryDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*AliyunClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "alicloud_snat_entry" { - continue - } - - // Try to find the Snat entry - instance, err := client.DescribeForwardEntry(rs.Primary.Attributes["forward_table_id"], rs.Primary.ID) - - //this special deal cause the DescribeSnatEntry can't find the records would be throw "cant find the snatTable error" - if instance.ForwardEntryId == "" { - return nil - } - - if instance.ForwardEntryId != "" { - return fmt.Errorf("Forward entry still exist") - } - - if err != nil { - // Verify the error is what we want - e, _ := err.(*common.Error) - - if !notFoundError(e) { - return err - } - } - - } - - return nil -} - -func testAccCheckForwardEntryExists(n string, snat *ecs.ForwardTableEntrySetType) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ForwardEntry ID is set") - } - - client := testAccProvider.Meta().(*AliyunClient) - instance, err := client.DescribeForwardEntry(rs.Primary.Attributes["forward_table_id"], rs.Primary.ID) - - if err != nil { - return err - } - if instance.ForwardEntryId == "" { - return fmt.Errorf("ForwardEntry not found") - } - - *snat = instance - return nil - } -} - -const testAccForwardEntryConfig = ` -provider "alicloud"{ - region = "cn-hangzhou" -} - -data "alicloud_zones" "default" { - "available_resource_creation"= "VSwitch" -} - -resource "alicloud_vpc" "foo" { - name = "tf_test_foo" - cidr_block = "172.16.0.0/12" -} - -resource "alicloud_vswitch" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - cidr_block = "172.16.0.0/21" - availability_zone = "${data.alicloud_zones.default.zones.0.id}" -} - -resource "alicloud_nat_gateway" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - spec = "Small" - name = "test_foo" - bandwidth_packages = [{ - ip_count = 1 - bandwidth = 5 - zone = "${data.alicloud_zones.default.zones.0.id}" - },{ - ip_count = 1 - bandwidth = 6 - zone = "${data.alicloud_zones.default.zones.0.id}" - }] - depends_on = [ - "alicloud_vswitch.foo"] -} - -resource "alicloud_forward_entry" "foo"{ - forward_table_id = "${alicloud_nat_gateway.foo.forward_table_ids}" - external_ip = "${alicloud_nat_gateway.foo.bandwidth_packages.0.public_ip_addresses}" - external_port = "80" - ip_protocol = "tcp" - internal_ip = "172.16.0.3" - internal_port = "8080" -} - -resource "alicloud_forward_entry" "foo1"{ - forward_table_id = "${alicloud_nat_gateway.foo.forward_table_ids}" - external_ip = "${alicloud_nat_gateway.foo.bandwidth_packages.0.public_ip_addresses}" - external_port = "443" - ip_protocol = "udp" - internal_ip = "172.16.0.4" - internal_port = "8080" -} -` - -const testAccForwardEntryUpdate = ` -provider "alicloud"{ - region = "cn-hangzhou" -} - -data "alicloud_zones" "default" { - "available_resource_creation"= "VSwitch" -} - -resource "alicloud_vpc" "foo" { - name = "tf_test_foo" - cidr_block = "172.16.0.0/12" -} - -resource "alicloud_vswitch" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - cidr_block = "172.16.0.0/21" - availability_zone = "${data.alicloud_zones.default.zones.0.id}" -} - -resource "alicloud_nat_gateway" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - spec = "Small" - name = "test_foo" - bandwidth_packages = [{ - ip_count = 1 - bandwidth = 5 - zone = "${data.alicloud_zones.default.zones.0.id}" - },{ - ip_count = 1 - bandwidth = 6 - zone = "${data.alicloud_zones.default.zones.0.id}" - }] - depends_on = [ - "alicloud_vswitch.foo"] -} - -resource "alicloud_forward_entry" "foo"{ - forward_table_id = "${alicloud_nat_gateway.foo.forward_table_ids}" - external_ip = "${alicloud_nat_gateway.foo.bandwidth_packages.0.public_ip_addresses}" - external_port = "80" - ip_protocol = "tcp" - internal_ip = "172.16.0.3" - internal_port = "8081" -} - - -resource "alicloud_forward_entry" "foo1"{ - forward_table_id = "${alicloud_nat_gateway.foo.forward_table_ids}" - external_ip = "${alicloud_nat_gateway.foo.bandwidth_packages.0.public_ip_addresses}" - external_port = "22" - ip_protocol = "udp" - internal_ip = "172.16.0.4" - internal_port = "8080" -} -` diff --git a/builtin/providers/alicloud/resource_alicloud_instance.go b/builtin/providers/alicloud/resource_alicloud_instance.go deleted file mode 100644 index f6884ea83..000000000 --- a/builtin/providers/alicloud/resource_alicloud_instance.go +++ /dev/null @@ -1,700 +0,0 @@ -package alicloud - -import ( - "fmt" - "log" - - "encoding/base64" - "encoding/json" - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "strings" - "time" -) - -func resourceAliyunInstance() *schema.Resource { - return &schema.Resource{ - Create: resourceAliyunInstanceCreate, - Read: resourceAliyunInstanceRead, - Update: resourceAliyunInstanceUpdate, - Delete: resourceAliyunInstanceDelete, - - Schema: map[string]*schema.Schema{ - "availability_zone": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "image_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "instance_type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "security_groups": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Required: true, - }, - - "allocate_public_ip": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "instance_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "ECS-Instance", - ValidateFunc: validateInstanceName, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateInstanceDescription, - }, - - "internet_charge_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateInternetChargeType, - }, - "internet_max_bandwidth_in": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "internet_max_bandwidth_out": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validateInternetMaxBandWidthOut, - }, - "host_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "password": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Sensitive: true, - }, - "io_optimized": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateIoOptimized, - }, - - "system_disk_category": &schema.Schema{ - Type: schema.TypeString, - Default: "cloud", - Optional: true, - ForceNew: true, - ValidateFunc: validateAllowedStringValue([]string{ - string(ecs.DiskCategoryCloud), - string(ecs.DiskCategoryCloudSSD), - string(ecs.DiskCategoryCloudEfficiency), - string(ecs.DiskCategoryEphemeralSSD), - }), - }, - "system_disk_size": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - ValidateFunc: validateIntegerInRange(40, 500), - }, - - //subnet_id and vswitch_id both exists, cause compatible old version, and aws habit. - "subnet_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, //add this schema cause subnet_id not used enter parameter, will different, so will be ForceNew - }, - - "vswitch_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "instance_charge_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateInstanceChargeType, - }, - "period": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - - "public_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "private_ip": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "status": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "user_data": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAliyunInstanceCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AliyunClient).ecsconn - - // create postpaid instance by runInstances API - if v := d.Get("instance_charge_type").(string); v != string(common.PrePaid) { - return resourceAliyunRunInstance(d, meta) - } - - args, err := buildAliyunInstanceArgs(d, meta) - if err != nil { - return err - } - - instanceID, err := conn.CreateInstance(args) - if err != nil { - return fmt.Errorf("Error creating Aliyun ecs instance: %#v", err) - } - - d.SetId(instanceID) - - d.Set("password", d.Get("password")) - - // after instance created, its status is pending, - // so we need to wait it become to stopped and then start it - if err := conn.WaitForInstanceAsyn(d.Id(), ecs.Stopped, defaultTimeout); err != nil { - return fmt.Errorf("[DEBUG] WaitForInstance %s got error: %#v", ecs.Stopped, err) - } - - if err := allocateIpAndBandWidthRelative(d, meta); err != nil { - return fmt.Errorf("allocateIpAndBandWidthRelative err: %#v", err) - } - - if err := conn.StartInstance(d.Id()); err != nil { - return fmt.Errorf("Start instance got error: %#v", err) - } - - if err := conn.WaitForInstanceAsyn(d.Id(), ecs.Running, defaultTimeout); err != nil { - return fmt.Errorf("[DEBUG] WaitForInstance %s got error: %#v", ecs.Running, err) - } - - return resourceAliyunInstanceUpdate(d, meta) -} - -func resourceAliyunRunInstance(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AliyunClient).ecsconn - newConn := meta.(*AliyunClient).ecsNewconn - - args, err := buildAliyunInstanceArgs(d, meta) - if err != nil { - return err - } - - if args.IoOptimized == "optimized" { - args.IoOptimized = ecs.IoOptimized("true") - } else { - args.IoOptimized = ecs.IoOptimized("false") - } - - runArgs, err := buildAliyunRunInstancesArgs(d, meta) - if err != nil { - return err - } - - runArgs.CreateInstanceArgs = *args - - // runInstances is support in version 2016-03-14 - instanceIds, err := newConn.RunInstances(runArgs) - - if err != nil { - return fmt.Errorf("Error creating Aliyun ecs instance: %#v", err) - } - - d.SetId(instanceIds[0]) - - d.Set("password", d.Get("password")) - d.Set("system_disk_category", d.Get("system_disk_category")) - d.Set("system_disk_size", d.Get("system_disk_size")) - - // after instance created, its status change from pending, starting to running - if err := conn.WaitForInstanceAsyn(d.Id(), ecs.Running, defaultTimeout); err != nil { - return fmt.Errorf("[DEBUG] WaitForInstance %s got error: %#v", ecs.Running, err) - } - - if err := allocateIpAndBandWidthRelative(d, meta); err != nil { - return fmt.Errorf("allocateIpAndBandWidthRelative err: %#v", err) - } - - if err := conn.WaitForInstanceAsyn(d.Id(), ecs.Running, defaultTimeout); err != nil { - return fmt.Errorf("[DEBUG] WaitForInstance %s got error: %#v", ecs.Running, err) - } - - return resourceAliyunInstanceUpdate(d, meta) -} - -func resourceAliyunInstanceRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - conn := client.ecsconn - - instance, err := client.QueryInstancesById(d.Id()) - - if err != nil { - if notFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error DescribeInstanceAttribute: %#v", err) - } - - disk, diskErr := client.QueryInstanceSystemDisk(d.Id()) - - if diskErr != nil { - if notFoundError(diskErr) { - d.SetId("") - return nil - } - return fmt.Errorf("Error DescribeSystemDisk: %#v", err) - } - - d.Set("instance_name", instance.InstanceName) - d.Set("description", instance.Description) - d.Set("status", instance.Status) - d.Set("availability_zone", instance.ZoneId) - d.Set("host_name", instance.HostName) - d.Set("image_id", instance.ImageId) - d.Set("instance_type", instance.InstanceType) - d.Set("system_disk_category", disk.Category) - d.Set("system_disk_size", disk.Size) - - // In Classic network, internet_charge_type is valid in any case, and its default value is 'PayByBanwidth'. - // In VPC network, internet_charge_type is valid when instance has public ip, and its default value is 'PayByBanwidth'. - d.Set("internet_charge_type", instance.InternetChargeType) - - if d.Get("allocate_public_ip").(bool) { - d.Set("public_ip", instance.PublicIpAddress.IpAddress[0]) - } - - if ecs.StringOrBool(instance.IoOptimized).Value { - d.Set("io_optimized", "optimized") - } else { - d.Set("io_optimized", "none") - } - - if d.Get("subnet_id").(string) != "" || d.Get("vswitch_id").(string) != "" { - ipAddress := instance.VpcAttributes.PrivateIpAddress.IpAddress[0] - d.Set("private_ip", ipAddress) - d.Set("subnet_id", instance.VpcAttributes.VSwitchId) - d.Set("vswitch_id", instance.VpcAttributes.VSwitchId) - } else { - ipAddress := strings.Join(ecs.IpAddressSetType(instance.InnerIpAddress).IpAddress, ",") - d.Set("private_ip", ipAddress) - } - - if d.Get("user_data").(string) != "" { - ud, err := conn.DescribeUserdata(&ecs.DescribeUserdataArgs{ - RegionId: getRegion(d, meta), - InstanceId: d.Id(), - }) - - if err != nil { - log.Printf("[ERROR] DescribeUserData for instance got error: %#v", err) - } - d.Set("user_data", userDataHashSum(ud.UserData)) - } - - tags, _, err := conn.DescribeTags(&ecs.DescribeTagsArgs{ - RegionId: getRegion(d, meta), - ResourceType: ecs.TagResourceInstance, - ResourceId: d.Id(), - }) - - if err != nil { - log.Printf("[ERROR] DescribeTags for instance got error: %#v", err) - } - d.Set("tags", tagsToMap(tags)) - - return nil -} - -func resourceAliyunInstanceUpdate(d *schema.ResourceData, meta interface{}) error { - - client := meta.(*AliyunClient) - conn := client.ecsconn - - d.Partial(true) - - if err := setTags(client, ecs.TagResourceInstance, d); err != nil { - log.Printf("[DEBUG] Set tags for instance got error: %#v", err) - return fmt.Errorf("Set tags for instance got error: %#v", err) - } else { - d.SetPartial("tags") - } - - imageUpdate := false - if d.HasChange("image_id") && !d.IsNewResource() { - log.Printf("[DEBUG] Replace instance system disk via changing image_id") - replaceSystemArgs := &ecs.ReplaceSystemDiskArgs{ - InstanceId: d.Id(), - ImageId: d.Get("image_id").(string), - SystemDisk: ecs.SystemDiskType{ - Size: d.Get("system_disk_size").(int), - }, - } - if v, ok := d.GetOk("status"); ok && v.(string) != "" { - if ecs.InstanceStatus(d.Get("status").(string)) == ecs.Running { - log.Printf("[DEBUG] StopInstance before change system disk") - if err := conn.StopInstance(d.Id(), true); err != nil { - return fmt.Errorf("Force Stop Instance got an error: %#v", err) - } - if err := conn.WaitForInstance(d.Id(), ecs.Stopped, 60); err != nil { - return fmt.Errorf("WaitForInstance got error: %#v", err) - } - } - } - _, err := conn.ReplaceSystemDisk(replaceSystemArgs) - if err != nil { - return fmt.Errorf("Replace system disk got an error: %#v", err) - } - // Ensure instance's image has been replaced successfully. - timeout := ecs.InstanceDefaultTimeout - for { - instance, errDesc := conn.DescribeInstanceAttribute(d.Id()) - if errDesc != nil { - return fmt.Errorf("Describe instance got an error: %#v", errDesc) - } - if instance.ImageId == d.Get("image_id") { - break - } - time.Sleep(ecs.DefaultWaitForInterval * time.Second) - timeout = timeout - ecs.DefaultWaitForInterval - if timeout <= 0 { - return common.GetClientErrorFromString("Timeout") - } - } - imageUpdate = true - d.SetPartial("system_disk_size") - d.SetPartial("image_id") - } - // Provider doesn't support change 'system_disk_size'separately. - if d.HasChange("system_disk_size") && !d.HasChange("image_id") { - return fmt.Errorf("Update resource failed. 'system_disk_size' isn't allowed to change separately. You can update it via renewing instance or replacing system disk.") - } - - attributeUpdate := false - args := &ecs.ModifyInstanceAttributeArgs{ - InstanceId: d.Id(), - } - - if d.HasChange("instance_name") && !d.IsNewResource() { - log.Printf("[DEBUG] ModifyInstanceAttribute instance_name") - d.SetPartial("instance_name") - args.InstanceName = d.Get("instance_name").(string) - - attributeUpdate = true - } - - if d.HasChange("description") && !d.IsNewResource() { - log.Printf("[DEBUG] ModifyInstanceAttribute description") - d.SetPartial("description") - args.Description = d.Get("description").(string) - - attributeUpdate = true - } - - if d.HasChange("host_name") && !d.IsNewResource() { - log.Printf("[DEBUG] ModifyInstanceAttribute host_name") - d.SetPartial("host_name") - args.HostName = d.Get("host_name").(string) - - attributeUpdate = true - } - - passwordUpdate := false - if d.HasChange("password") && !d.IsNewResource() { - log.Printf("[DEBUG] ModifyInstanceAttribute password") - d.SetPartial("password") - args.Password = d.Get("password").(string) - - attributeUpdate = true - passwordUpdate = true - } - - if attributeUpdate { - if err := conn.ModifyInstanceAttribute(args); err != nil { - return fmt.Errorf("Modify instance attribute got error: %#v", err) - } - } - - if imageUpdate || passwordUpdate { - instance, errDesc := conn.DescribeInstanceAttribute(d.Id()) - if errDesc != nil { - return fmt.Errorf("Describe instance got an error: %#v", errDesc) - } - if instance.Status != ecs.Running && instance.Status != ecs.Stopped { - return fmt.Errorf("ECS instance's status doesn't support to start or reboot operation after replace image_id or update password. The current instance's status is %#v", instance.Status) - } else if instance.Status == ecs.Running { - log.Printf("[DEBUG] Reboot instance after change image or password") - if err := conn.RebootInstance(d.Id(), false); err != nil { - return fmt.Errorf("RebootInstance got error: %#v", err) - } - } else { - log.Printf("[DEBUG] Start instance after change image or password") - if err := conn.StartInstance(d.Id()); err != nil { - return fmt.Errorf("StartInstance got error: %#v", err) - } - } - // Start instance sometimes costs more than 6 minutes when os type is centos. - if err := conn.WaitForInstance(d.Id(), ecs.Running, 400); err != nil { - return fmt.Errorf("WaitForInstance got error: %#v", err) - } - } - - if d.HasChange("security_groups") { - o, n := d.GetChange("security_groups") - os := o.(*schema.Set) - ns := n.(*schema.Set) - - rl := expandStringList(os.Difference(ns).List()) - al := expandStringList(ns.Difference(os).List()) - - if len(al) > 0 { - err := client.JoinSecurityGroups(d.Id(), al) - if err != nil { - return err - } - } - if len(rl) > 0 { - err := client.LeaveSecurityGroups(d.Id(), rl) - if err != nil { - return err - } - } - - d.SetPartial("security_groups") - } - - d.Partial(false) - return resourceAliyunInstanceRead(d, meta) -} - -func resourceAliyunInstanceDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - conn := client.ecsconn - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - instance, err := client.QueryInstancesById(d.Id()) - if err != nil { - if notFoundError(err) { - return nil - } - } - - if instance.Status != ecs.Stopped { - if err := conn.StopInstance(d.Id(), true); err != nil { - return resource.RetryableError(fmt.Errorf("ECS stop error - trying again.")) - } - - if err := conn.WaitForInstance(d.Id(), ecs.Stopped, defaultTimeout); err != nil { - return resource.RetryableError(fmt.Errorf("Waiting for ecs stopped timeout - trying again.")) - } - } - - if err := conn.DeleteInstance(d.Id()); err != nil { - return resource.RetryableError(fmt.Errorf("ECS Instance in use - trying again while it is deleted.")) - } - - return nil - }) - -} - -func allocateIpAndBandWidthRelative(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AliyunClient).ecsconn - if d.Get("allocate_public_ip").(bool) { - if d.Get("internet_max_bandwidth_out") == 0 { - return fmt.Errorf("Error: if allocate_public_ip is true than the internet_max_bandwidth_out cannot equal zero.") - } - _, err := conn.AllocatePublicIpAddress(d.Id()) - if err != nil { - return fmt.Errorf("[DEBUG] AllocatePublicIpAddress for instance got error: %#v", err) - } - } - return nil -} - -func buildAliyunRunInstancesArgs(d *schema.ResourceData, meta interface{}) (*ecs.RunInstanceArgs, error) { - args := &ecs.RunInstanceArgs{ - MaxAmount: DEFAULT_INSTANCE_COUNT, - MinAmount: DEFAULT_INSTANCE_COUNT, - } - - bussStr, err := json.Marshal(DefaultBusinessInfo) - if err != nil { - log.Printf("Failed to translate bussiness info %#v from json to string", DefaultBusinessInfo) - } - - args.BusinessInfo = string(bussStr) - - subnetValue := d.Get("subnet_id").(string) - vswitchValue := d.Get("vswitch_id").(string) - //networkValue := d.Get("instance_network_type").(string) - - // because runInstance is not compatible with createInstance, force NetworkType value to classic - if subnetValue == "" && vswitchValue == "" { - args.NetworkType = string(ClassicNet) - } - - return args, nil -} - -func buildAliyunInstanceArgs(d *schema.ResourceData, meta interface{}) (*ecs.CreateInstanceArgs, error) { - client := meta.(*AliyunClient) - - args := &ecs.CreateInstanceArgs{ - RegionId: getRegion(d, meta), - InstanceType: d.Get("instance_type").(string), - } - - imageID := d.Get("image_id").(string) - - args.ImageId = imageID - - systemDiskCategory := ecs.DiskCategory(d.Get("system_disk_category").(string)) - systemDiskSize := d.Get("system_disk_size").(int) - - zoneID := d.Get("availability_zone").(string) - // check instanceType and systemDiskCategory, when zoneID is not empty - if zoneID != "" { - zone, err := client.DescribeZone(zoneID) - if err != nil { - return nil, err - } - - if err := client.ResourceAvailable(zone, ecs.ResourceTypeInstance); err != nil { - return nil, err - } - - if err := client.DiskAvailable(zone, systemDiskCategory); err != nil { - return nil, err - } - - args.ZoneId = zoneID - - } - - args.SystemDisk = ecs.SystemDiskType{ - Category: systemDiskCategory, - Size: systemDiskSize, - } - - sgs, ok := d.GetOk("security_groups") - - if ok { - sgList := expandStringList(sgs.(*schema.Set).List()) - sg0 := sgList[0] - // check security group instance exist - _, err := client.DescribeSecurity(sg0) - if err == nil { - args.SecurityGroupId = sg0 - } - } - - if v := d.Get("instance_name").(string); v != "" { - args.InstanceName = v - } - - if v := d.Get("description").(string); v != "" { - args.Description = v - } - - if v := d.Get("internet_charge_type").(string); v != "" { - args.InternetChargeType = common.InternetChargeType(v) - } - - if v := d.Get("internet_max_bandwidth_out").(int); v != 0 { - args.InternetMaxBandwidthOut = v - } - - if v := d.Get("host_name").(string); v != "" { - args.HostName = v - } - - if v := d.Get("password").(string); v != "" { - args.Password = v - } - - if v := d.Get("io_optimized").(string); v != "" { - args.IoOptimized = ecs.IoOptimized(v) - } - - vswitchValue := d.Get("subnet_id").(string) - if vswitchValue == "" { - vswitchValue = d.Get("vswitch_id").(string) - } - if vswitchValue != "" { - args.VSwitchId = vswitchValue - if d.Get("allocate_public_ip").(bool) && args.InternetMaxBandwidthOut <= 0 { - return nil, fmt.Errorf("Invalid internet_max_bandwidth_out result in allocation public ip failed in the VPC.") - } - } - - if v := d.Get("instance_charge_type").(string); v != "" { - args.InstanceChargeType = common.InstanceChargeType(v) - } - - log.Printf("[DEBUG] period is %d", d.Get("period").(int)) - if v := d.Get("period").(int); v != 0 { - args.Period = v - } else if args.InstanceChargeType == common.PrePaid { - return nil, fmt.Errorf("period is required for instance_charge_type is PrePaid") - } - - if v := d.Get("user_data").(string); v != "" { - args.UserData = v - } - - return args, nil -} - -func userDataHashSum(user_data string) string { - // Check whether the user_data is not Base64 encoded. - // Always calculate hash of base64 decoded value since we - // check against double-encoding when setting it - v, base64DecodeError := base64.StdEncoding.DecodeString(user_data) - if base64DecodeError != nil { - v = []byte(user_data) - } - return string(v) -} diff --git a/builtin/providers/alicloud/resource_alicloud_instance_test.go b/builtin/providers/alicloud/resource_alicloud_instance_test.go deleted file mode 100644 index 18bb33b9c..000000000 --- a/builtin/providers/alicloud/resource_alicloud_instance_test.go +++ /dev/null @@ -1,1333 +0,0 @@ -package alicloud - -import ( - "fmt" - "testing" - - "log" - - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAlicloudInstance_basic(t *testing.T) { - var instance ecs.InstanceAttributesType - - testCheck := func(*terraform.State) error { - log.Printf("[WARN] instances: %#v", instance) - if instance.ZoneId == "" { - return fmt.Errorf("bad availability zone") - } - if len(instance.SecurityGroupIds.SecurityGroupId) == 0 { - return fmt.Errorf("no security group: %#v", instance.SecurityGroupIds.SecurityGroupId) - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_instance.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccInstanceConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists( - "alicloud_instance.foo", &instance), - testCheck, - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "image_id", - "ubuntu_140405_32_40G_cloudinit_20161115.vhd"), - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "instance_name", - "test_foo"), - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "internet_charge_type", - "PayByBandwidth"), - testAccCheckSystemDiskSize("alicloud_instance.foo", 80), - ), - }, - - // test for multi steps - resource.TestStep{ - Config: testAccInstanceConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists( - "alicloud_instance.foo", &instance), - testCheck, - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "image_id", - "ubuntu_140405_32_40G_cloudinit_20161115.vhd"), - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "instance_name", - "test_foo"), - ), - }, - }, - }) - -} - -func TestAccAlicloudInstance_vpc(t *testing.T) { - var instance ecs.InstanceAttributesType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - IDRefreshName: "alicloud_instance.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccInstanceConfigVPC, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists( - "alicloud_instance.foo", &instance), - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "system_disk_category", - "cloud_efficiency"), - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "internet_charge_type", - "PayByTraffic"), - ), - }, - }, - }) -} - -func TestAccAlicloudInstance_userData(t *testing.T) { - var instance ecs.InstanceAttributesType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - IDRefreshName: "alicloud_instance.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccInstanceConfigUserData, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists( - "alicloud_instance.foo", &instance), - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "system_disk_category", - "cloud_efficiency"), - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "internet_charge_type", - "PayByTraffic"), - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "user_data", - "echo 'net.ipv4.ip_forward=1'>> /etc/sysctl.conf"), - ), - }, - }, - }) -} - -func TestAccAlicloudInstance_multipleRegions(t *testing.T) { - var instance ecs.InstanceAttributesType - - // multi provideris - var providers []*schema.Provider - providerFactories := map[string]terraform.ResourceProviderFactory{ - "alicloud": func() (terraform.ResourceProvider, error) { - p := Provider() - providers = append(providers, p.(*schema.Provider)) - return p, nil - }, - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - ProviderFactories: providerFactories, - CheckDestroy: testAccCheckInstanceDestroyWithProviders(&providers), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccInstanceConfigMultipleRegions, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExistsWithProviders( - "alicloud_instance.foo", &instance, &providers), - testAccCheckInstanceExistsWithProviders( - "alicloud_instance.bar", &instance, &providers), - ), - }, - }, - }) -} - -func TestAccAlicloudInstance_multiSecurityGroup(t *testing.T) { - var instance ecs.InstanceAttributesType - - testCheck := func(sgCount int) resource.TestCheckFunc { - return func(*terraform.State) error { - if len(instance.SecurityGroupIds.SecurityGroupId) < 0 { - return fmt.Errorf("no security group: %#v", instance.SecurityGroupIds.SecurityGroupId) - } - - if len(instance.SecurityGroupIds.SecurityGroupId) < sgCount { - return fmt.Errorf("less security group: %#v", instance.SecurityGroupIds.SecurityGroupId) - } - - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_instance.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccInstanceConfig_multiSecurityGroup, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists( - "alicloud_instance.foo", &instance), - testCheck(2), - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "image_id", - "ubuntu_140405_32_40G_cloudinit_20161115.vhd"), - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "instance_name", - "test_foo"), - ), - }, - resource.TestStep{ - Config: testAccInstanceConfig_multiSecurityGroup_add, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists( - "alicloud_instance.foo", &instance), - testCheck(3), - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "image_id", - "ubuntu_140405_32_40G_cloudinit_20161115.vhd"), - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "instance_name", - "test_foo"), - ), - }, - resource.TestStep{ - Config: testAccInstanceConfig_multiSecurityGroup_remove, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists( - "alicloud_instance.foo", &instance), - testCheck(1), - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "image_id", - "ubuntu_140405_32_40G_cloudinit_20161115.vhd"), - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "instance_name", - "test_foo"), - ), - }, - }, - }) - -} - -func TestAccAlicloudInstance_multiSecurityGroupByCount(t *testing.T) { - var instance ecs.InstanceAttributesType - - testCheck := func(sgCount int) resource.TestCheckFunc { - return func(*terraform.State) error { - if len(instance.SecurityGroupIds.SecurityGroupId) < 0 { - return fmt.Errorf("no security group: %#v", instance.SecurityGroupIds.SecurityGroupId) - } - - if len(instance.SecurityGroupIds.SecurityGroupId) < sgCount { - return fmt.Errorf("less security group: %#v", instance.SecurityGroupIds.SecurityGroupId) - } - - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_instance.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccInstanceConfig_multiSecurityGroupByCount, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists( - "alicloud_instance.foo", &instance), - testCheck(2), - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "image_id", - "ubuntu_140405_32_40G_cloudinit_20161115.vhd"), - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "instance_name", - "test_foo"), - ), - }, - }, - }) - -} - -func TestAccAlicloudInstance_NetworkInstanceSecurityGroups(t *testing.T) { - var instance ecs.InstanceAttributesType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - IDRefreshName: "alicloud_instance.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccInstanceNetworkInstanceSecurityGroups, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists( - "alicloud_instance.foo", &instance), - ), - }, - }, - }) -} - -func TestAccAlicloudInstance_tags(t *testing.T) { - var instance ecs.InstanceAttributesType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckInstanceConfigTags, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("alicloud_instance.foo", &instance), - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "tags.foo", - "bar"), - ), - }, - - resource.TestStep{ - Config: testAccCheckInstanceConfigTagsUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("alicloud_instance.foo", &instance), - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "tags.bar", - "zzz"), - ), - }, - }, - }) -} - -func TestAccAlicloudInstance_update(t *testing.T) { - var instance ecs.InstanceAttributesType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckInstanceConfigOrigin, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("alicloud_instance.foo", &instance), - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "instance_name", - "instance_foo"), - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "host_name", - "host-foo"), - ), - }, - - resource.TestStep{ - Config: testAccCheckInstanceConfigOriginUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("alicloud_instance.foo", &instance), - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "instance_name", - "instance_bar"), - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "host_name", - "host-bar"), - ), - }, - }, - }) -} - -func TestAccAlicloudInstanceImage_update(t *testing.T) { - var instance ecs.InstanceAttributesType - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckInstanceImageOrigin, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("alicloud_instance.update_image", &instance), - resource.TestCheckResourceAttr( - "alicloud_instance.update_image", - "system_disk_size", - "50"), - ), - }, - resource.TestStep{ - Config: testAccCheckInstanceImageUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("alicloud_instance.update_image", &instance), - resource.TestCheckResourceAttr( - "alicloud_instance.update_image", - "system_disk_size", - "60"), - ), - }, - }, - }) -} - -func TestAccAlicloudInstance_privateIP(t *testing.T) { - var instance ecs.InstanceAttributesType - - testCheckPrivateIP := func() resource.TestCheckFunc { - return func(*terraform.State) error { - privateIP := instance.VpcAttributes.PrivateIpAddress.IpAddress[0] - if privateIP == "" { - return fmt.Errorf("can't get private IP") - } - - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - IDRefreshName: "alicloud_instance.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccInstanceConfigPrivateIP, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("alicloud_instance.foo", &instance), - testCheckPrivateIP(), - ), - }, - }, - }) -} - -func TestAccAlicloudInstance_associatePublicIP(t *testing.T) { - var instance ecs.InstanceAttributesType - - testCheckPrivateIP := func() resource.TestCheckFunc { - return func(*terraform.State) error { - privateIP := instance.VpcAttributes.PrivateIpAddress.IpAddress[0] - if privateIP == "" { - return fmt.Errorf("can't get private IP") - } - - return nil - } - } - - testCheckPublicIP := func() resource.TestCheckFunc { - return func(*terraform.State) error { - publicIP := instance.PublicIpAddress.IpAddress[0] - if publicIP == "" { - return fmt.Errorf("can't get public IP") - } - - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - IDRefreshName: "alicloud_instance.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccInstanceConfigAssociatePublicIP, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("alicloud_instance.foo", &instance), - testCheckPrivateIP(), - testCheckPublicIP(), - ), - }, - }, - }) -} - -func TestAccAlicloudInstance_vpcRule(t *testing.T) { - var instance ecs.InstanceAttributesType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - IDRefreshName: "alicloud_instance.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVpcInstanceWithSecurityRule, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("alicloud_instance.foo", &instance), - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "internet_charge_type", - "PayByBandwidth"), - resource.TestCheckResourceAttr( - "alicloud_instance.foo", - "internet_max_bandwidth_out", - "5"), - ), - }, - }, - }) -} - -func testAccCheckInstanceExists(n string, i *ecs.InstanceAttributesType) resource.TestCheckFunc { - providers := []*schema.Provider{testAccProvider} - return testAccCheckInstanceExistsWithProviders(n, i, &providers) -} - -func testAccCheckInstanceExistsWithProviders(n string, i *ecs.InstanceAttributesType, providers *[]*schema.Provider) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - for _, provider := range *providers { - // Ignore if Meta is empty, this can happen for validation providers - if provider.Meta() == nil { - continue - } - - client := provider.Meta().(*AliyunClient) - instance, err := client.QueryInstancesById(rs.Primary.ID) - log.Printf("[WARN]get ecs instance %#v", instance) - if err == nil && instance != nil { - *i = *instance - return nil - } - - // Verify the error is what we want - e, _ := err.(*common.Error) - if e.ErrorResponse.Message == InstanceNotfound { - continue - } - if err != nil { - return err - - } - } - - return fmt.Errorf("Instance not found") - } -} - -func testAccCheckInstanceDestroy(s *terraform.State) error { - return testAccCheckInstanceDestroyWithProvider(s, testAccProvider) -} - -func testAccCheckInstanceDestroyWithProviders(providers *[]*schema.Provider) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, provider := range *providers { - if provider.Meta() == nil { - continue - } - if err := testAccCheckInstanceDestroyWithProvider(s, provider); err != nil { - return err - } - } - return nil - } -} - -func testAccCheckInstanceDestroyWithProvider(s *terraform.State, provider *schema.Provider) error { - client := provider.Meta().(*AliyunClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "alicloud_instance" { - continue - } - - // Try to find the resource - instance, err := client.QueryInstancesById(rs.Primary.ID) - if err == nil { - if instance.Status != "" && instance.Status != "Stopped" { - return fmt.Errorf("Found unstopped instance: %s", instance.InstanceId) - } - } - - // Verify the error is what we want - e, _ := err.(*common.Error) - if e.ErrorResponse.Message == InstanceNotfound { - continue - } - - return err - } - - return nil -} - -func testAccCheckSystemDiskSize(n string, size int) resource.TestCheckFunc { - return func(s *terraform.State) error { - providers := []*schema.Provider{testAccProvider} - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - for _, provider := range providers { - if provider.Meta() == nil { - continue - } - client := provider.Meta().(*AliyunClient) - systemDisk, err := client.QueryInstanceSystemDisk(rs.Primary.ID) - if err != nil { - log.Printf("[ERROR]get system disk size error: %#v", err) - return err - } - - if systemDisk.Size != size { - return fmt.Errorf("system disk size not equal %d, the instance system size is %d", - size, systemDisk.Size) - } - } - - return nil - } -} - -const testAccInstanceConfig = ` -resource "alicloud_security_group" "tf_test_foo" { - name = "tf_test_foo" - description = "foo" -} - -resource "alicloud_security_group" "tf_test_bar" { - name = "tf_test_bar" - description = "bar" -} - -resource "alicloud_instance" "foo" { - image_id = "ubuntu_140405_32_40G_cloudinit_20161115.vhd" - - system_disk_category = "cloud_ssd" - system_disk_size = 80 - - instance_type = "ecs.n1.small" - internet_charge_type = "PayByBandwidth" - security_groups = ["${alicloud_security_group.tf_test_foo.id}"] - instance_name = "test_foo" - io_optimized = "optimized" - - tags { - foo = "bar" - work = "test" - } -} -` -const testAccInstanceConfigVPC = ` -data "alicloud_zones" "default" { - "available_disk_category"= "cloud_efficiency" - "available_resource_creation"= "VSwitch" -} - -resource "alicloud_vpc" "foo" { - name = "tf_test_foo" - cidr_block = "172.16.0.0/12" -} - -resource "alicloud_vswitch" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - cidr_block = "172.16.0.0/21" - availability_zone = "${data.alicloud_zones.default.zones.0.id}" -} - -resource "alicloud_security_group" "tf_test_foo" { - name = "tf_test_foo" - description = "foo" - vpc_id = "${alicloud_vpc.foo.id}" -} - -resource "alicloud_instance" "foo" { - # cn-beijing - vswitch_id = "${alicloud_vswitch.foo.id}" - image_id = "ubuntu_140405_32_40G_cloudinit_20161115.vhd" - - # series II - instance_type = "ecs.n1.medium" - io_optimized = "optimized" - system_disk_category = "cloud_efficiency" - - internet_charge_type = "PayByTraffic" - internet_max_bandwidth_out = 5 - allocate_public_ip = true - security_groups = ["${alicloud_security_group.tf_test_foo.id}"] - instance_name = "test_foo" -} - -` - -const testAccInstanceConfigUserData = ` -data "alicloud_zones" "default" { - "available_disk_category"= "cloud_efficiency" - "available_resource_creation"= "VSwitch" -} - -resource "alicloud_vpc" "foo" { - name = "tf_test_foo" - cidr_block = "172.16.0.0/12" -} - -resource "alicloud_vswitch" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - cidr_block = "172.16.0.0/21" - availability_zone = "${data.alicloud_zones.default.zones.0.id}" -} - -resource "alicloud_security_group" "tf_test_foo" { - name = "tf_test_foo" - description = "foo" - vpc_id = "${alicloud_vpc.foo.id}" -} - -resource "alicloud_instance" "foo" { - # cn-beijing - vswitch_id = "${alicloud_vswitch.foo.id}" - image_id = "ubuntu_140405_32_40G_cloudinit_20161115.vhd" - # series II - instance_type = "ecs.n1.medium" - io_optimized = "optimized" - system_disk_category = "cloud_efficiency" - internet_charge_type = "PayByTraffic" - internet_max_bandwidth_out = 5 - allocate_public_ip = true - security_groups = ["${alicloud_security_group.tf_test_foo.id}"] - instance_name = "test_foo" - user_data = "echo 'net.ipv4.ip_forward=1'>> /etc/sysctl.conf" -} -` - -const testAccInstanceConfigMultipleRegions = ` -provider "alicloud" { - alias = "beijing" - region = "cn-beijing" -} - -provider "alicloud" { - alias = "shanghai" - region = "cn-shanghai" -} - -resource "alicloud_security_group" "tf_test_foo" { - name = "tf_test_foo" - provider = "alicloud.beijing" - description = "foo" -} - -resource "alicloud_security_group" "tf_test_bar" { - name = "tf_test_bar" - provider = "alicloud.shanghai" - description = "bar" -} - -resource "alicloud_instance" "foo" { - # cn-beijing - provider = "alicloud.beijing" - image_id = "ubuntu_140405_32_40G_cloudinit_20161115.vhd" - - internet_charge_type = "PayByBandwidth" - - instance_type = "ecs.n1.medium" - io_optimized = "optimized" - system_disk_category = "cloud_efficiency" - security_groups = ["${alicloud_security_group.tf_test_foo.id}"] - instance_name = "test_foo" -} - -resource "alicloud_instance" "bar" { - # cn-shanghai - provider = "alicloud.shanghai" - image_id = "ubuntu_140405_32_40G_cloudinit_20161115.vhd" - - internet_charge_type = "PayByBandwidth" - - instance_type = "ecs.n1.medium" - io_optimized = "optimized" - system_disk_category = "cloud_efficiency" - security_groups = ["${alicloud_security_group.tf_test_bar.id}"] - instance_name = "test_bar" -} -` - -const testAccInstanceConfig_multiSecurityGroup = ` -resource "alicloud_security_group" "tf_test_foo" { - name = "tf_test_foo" - description = "foo" -} - -resource "alicloud_security_group" "tf_test_bar" { - name = "tf_test_bar" - description = "bar" -} - -resource "alicloud_instance" "foo" { - # cn-beijing - image_id = "ubuntu_140405_32_40G_cloudinit_20161115.vhd" - - instance_type = "ecs.s2.large" - internet_charge_type = "PayByBandwidth" - security_groups = ["${alicloud_security_group.tf_test_foo.id}", "${alicloud_security_group.tf_test_bar.id}"] - instance_name = "test_foo" - io_optimized = "optimized" - system_disk_category = "cloud_efficiency" -}` - -const testAccInstanceConfig_multiSecurityGroup_add = ` -resource "alicloud_security_group" "tf_test_foo" { - name = "tf_test_foo" - description = "foo" -} - -resource "alicloud_security_group" "tf_test_bar" { - name = "tf_test_bar" - description = "bar" -} - -resource "alicloud_security_group" "tf_test_add_sg" { - name = "tf_test_add_sg" - description = "sg" -} - -resource "alicloud_instance" "foo" { - # cn-beijing - image_id = "ubuntu_140405_32_40G_cloudinit_20161115.vhd" - - instance_type = "ecs.s2.large" - internet_charge_type = "PayByBandwidth" - security_groups = ["${alicloud_security_group.tf_test_foo.id}", "${alicloud_security_group.tf_test_bar.id}", - "${alicloud_security_group.tf_test_add_sg.id}"] - instance_name = "test_foo" - io_optimized = "optimized" - system_disk_category = "cloud_efficiency" -} -` - -const testAccInstanceConfig_multiSecurityGroup_remove = ` -resource "alicloud_security_group" "tf_test_foo" { - name = "tf_test_foo" - description = "foo" -} - -resource "alicloud_security_group_rule" "http-in" { - type = "ingress" - ip_protocol = "tcp" - nic_type = "internet" - policy = "accept" - port_range = "80/80" - priority = 1 - security_group_id = "${alicloud_security_group.tf_test_foo.id}" - cidr_ip = "0.0.0.0/0" -} - -resource "alicloud_security_group_rule" "ssh-in" { - type = "ingress" - ip_protocol = "tcp" - nic_type = "internet" - policy = "accept" - port_range = "22/22" - priority = 1 - security_group_id = "${alicloud_security_group.tf_test_foo.id}" - cidr_ip = "0.0.0.0/0" -} - -resource "alicloud_instance" "foo" { - # cn-beijing - image_id = "ubuntu_140405_32_40G_cloudinit_20161115.vhd" - - instance_type = "ecs.s2.large" - internet_charge_type = "PayByBandwidth" - security_groups = ["${alicloud_security_group.tf_test_foo.id}"] - instance_name = "test_foo" - io_optimized = "optimized" - system_disk_category = "cloud_efficiency" -} -` - -const testAccInstanceConfig_multiSecurityGroupByCount = ` -resource "alicloud_security_group" "tf_test_foo" { - name = "tf_test_foo" - count = 2 - description = "foo" -} - -resource "alicloud_instance" "foo" { - # cn-beijing - image_id = "ubuntu_140405_32_40G_cloudinit_20161115.vhd" - - instance_type = "ecs.s2.large" - internet_charge_type = "PayByBandwidth" - security_groups = ["${alicloud_security_group.tf_test_foo.*.id}"] - instance_name = "test_foo" - io_optimized = "optimized" - system_disk_category = "cloud_efficiency" -} -` - -const testAccInstanceNetworkInstanceSecurityGroups = ` -data "alicloud_zones" "default" { - "available_disk_category"= "cloud_efficiency" - "available_resource_creation"= "VSwitch" -} - -resource "alicloud_vpc" "foo" { - name = "tf_test_foo" - cidr_block = "172.16.0.0/12" -} - -resource "alicloud_vswitch" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - cidr_block = "172.16.0.0/21" - availability_zone = "${data.alicloud_zones.default.zones.0.id}" -} - -resource "alicloud_security_group" "tf_test_foo" { - name = "tf_test_foo" - description = "foo" - vpc_id = "${alicloud_vpc.foo.id}" -} - -resource "alicloud_instance" "foo" { - # cn-beijing - vswitch_id = "${alicloud_vswitch.foo.id}" - image_id = "ubuntu_140405_32_40G_cloudinit_20161115.vhd" - - # series II - instance_type = "ecs.n1.medium" - io_optimized = "optimized" - system_disk_category = "cloud_efficiency" - - security_groups = ["${alicloud_security_group.tf_test_foo.id}"] - instance_name = "test_foo" - - internet_max_bandwidth_out = 5 - allocate_public_ip = "true" - internet_charge_type = "PayByBandwidth" -} -` -const testAccCheckInstanceConfigTags = ` -resource "alicloud_security_group" "tf_test_foo" { - name = "tf_test_foo" - description = "foo" -} - -resource "alicloud_instance" "foo" { - # cn-beijing - image_id = "ubuntu_140405_32_40G_cloudinit_20161115.vhd" - - # series II - instance_type = "ecs.n1.medium" - io_optimized = "optimized" - internet_charge_type = "PayByBandwidth" - system_disk_category = "cloud_efficiency" - - security_groups = ["${alicloud_security_group.tf_test_foo.id}"] - instance_name = "test_foo" - - tags { - foo = "bar" - } -} -` - -const testAccCheckInstanceConfigTagsUpdate = ` -resource "alicloud_security_group" "tf_test_foo" { - name = "tf_test_foo" - description = "foo" -} - -resource "alicloud_instance" "foo" { - # cn-beijing - image_id = "ubuntu_140405_32_40G_cloudinit_20161115.vhd" - - # series II - instance_type = "ecs.n1.medium" - io_optimized = "optimized" - internet_charge_type = "PayByBandwidth" - system_disk_category = "cloud_efficiency" - - security_groups = ["${alicloud_security_group.tf_test_foo.id}"] - instance_name = "test_foo" - - tags { - bar = "zzz" - } -} -` -const testAccCheckInstanceConfigOrigin = ` -resource "alicloud_security_group" "tf_test_foo" { - name = "tf_test_foo" - description = "foo" -} - -resource "alicloud_security_group_rule" "http-in" { - type = "ingress" - ip_protocol = "tcp" - nic_type = "internet" - policy = "accept" - port_range = "80/80" - priority = 1 - security_group_id = "${alicloud_security_group.tf_test_foo.id}" - cidr_ip = "0.0.0.0/0" -} - -resource "alicloud_security_group_rule" "ssh-in" { - type = "ingress" - ip_protocol = "tcp" - nic_type = "internet" - policy = "accept" - port_range = "22/22" - priority = 1 - security_group_id = "${alicloud_security_group.tf_test_foo.id}" - cidr_ip = "0.0.0.0/0" -} - -resource "alicloud_instance" "foo" { - # cn-beijing - image_id = "ubuntu_140405_32_40G_cloudinit_20161115.vhd" - - # series II - instance_type = "ecs.n1.medium" - io_optimized = "optimized" - internet_charge_type = "PayByBandwidth" - system_disk_category = "cloud_efficiency" - - security_groups = ["${alicloud_security_group.tf_test_foo.id}"] - - instance_name = "instance_foo" - host_name = "host-foo" -} -` - -const testAccCheckInstanceConfigOriginUpdate = ` -resource "alicloud_security_group" "tf_test_foo" { - name = "tf_test_foo" - description = "foo" -} - -resource "alicloud_security_group_rule" "http-in" { - type = "ingress" - ip_protocol = "tcp" - nic_type = "internet" - policy = "accept" - port_range = "80/80" - priority = 1 - security_group_id = "${alicloud_security_group.tf_test_foo.id}" - cidr_ip = "0.0.0.0/0" -} - -resource "alicloud_security_group_rule" "ssh-in" { - type = "ingress" - ip_protocol = "tcp" - nic_type = "internet" - policy = "accept" - port_range = "22/22" - priority = 1 - security_group_id = "${alicloud_security_group.tf_test_foo.id}" - cidr_ip = "0.0.0.0/0" -} - -resource "alicloud_instance" "foo" { - # cn-beijing - image_id = "ubuntu_140405_32_40G_cloudinit_20161115.vhd" - - # series II - instance_type = "ecs.n1.medium" - io_optimized = "optimized" - internet_charge_type = "PayByBandwidth" - system_disk_category = "cloud_efficiency" - - security_groups = ["${alicloud_security_group.tf_test_foo.id}"] - - instance_name = "instance_bar" - host_name = "host-bar" -} -` - -const testAccInstanceConfigPrivateIP = ` -data "alicloud_zones" "default" { - "available_disk_category"= "cloud_efficiency" - "available_resource_creation"= "VSwitch" -} - -resource "alicloud_vpc" "foo" { - name = "tf_test_foo" - cidr_block = "172.16.0.0/12" -} - -resource "alicloud_vswitch" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - cidr_block = "172.16.0.0/24" - availability_zone = "${data.alicloud_zones.default.zones.0.id}" -} - -resource "alicloud_security_group" "tf_test_foo" { - name = "tf_test_foo" - description = "foo" - vpc_id = "${alicloud_vpc.foo.id}" -} - -resource "alicloud_instance" "foo" { - # cn-beijing - security_groups = ["${alicloud_security_group.tf_test_foo.id}"] - - vswitch_id = "${alicloud_vswitch.foo.id}" - - # series II - instance_type = "ecs.n1.medium" - io_optimized = "optimized" - system_disk_category = "cloud_efficiency" - image_id = "ubuntu_140405_32_40G_cloudinit_20161115.vhd" - instance_name = "test_foo" -} -` -const testAccInstanceConfigAssociatePublicIP = ` -data "alicloud_zones" "default" { - "available_disk_category"= "cloud_efficiency" - "available_resource_creation"= "VSwitch" -} - -resource "alicloud_vpc" "foo" { - name = "tf_test_foo" - cidr_block = "172.16.0.0/12" -} - -resource "alicloud_vswitch" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - cidr_block = "172.16.0.0/24" - availability_zone = "${data.alicloud_zones.default.zones.0.id}" -} - -resource "alicloud_security_group" "tf_test_foo" { - name = "tf_test_foo" - description = "foo" - vpc_id = "${alicloud_vpc.foo.id}" -} - -resource "alicloud_instance" "foo" { - # cn-beijing - security_groups = ["${alicloud_security_group.tf_test_foo.id}"] - - vswitch_id = "${alicloud_vswitch.foo.id}" - allocate_public_ip = "true" - internet_max_bandwidth_out = 5 - internet_charge_type = "PayByBandwidth" - - # series II - instance_type = "ecs.n1.medium" - io_optimized = "optimized" - system_disk_category = "cloud_efficiency" - image_id = "ubuntu_140405_32_40G_cloudinit_20161115.vhd" - instance_name = "test_foo" -} -` -const testAccVpcInstanceWithSecurityRule = ` -data "alicloud_zones" "default" { - "available_disk_category"= "cloud_efficiency" - "available_resource_creation"= "VSwitch" -} - -resource "alicloud_vpc" "foo" { - name = "tf_test_foo" - cidr_block = "10.1.0.0/21" -} - -resource "alicloud_vswitch" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - cidr_block = "10.1.1.0/24" - availability_zone = "${data.alicloud_zones.default.zones.0.id}" -} - -resource "alicloud_security_group" "tf_test_foo" { - name = "tf_test_foo" - description = "foo" - vpc_id = "${alicloud_vpc.foo.id}" -} - -resource "alicloud_security_group_rule" "ingress" { - type = "ingress" - ip_protocol = "tcp" - nic_type = "intranet" - policy = "accept" - port_range = "22/22" - priority = 1 - security_group_id = "${alicloud_security_group.tf_test_foo.id}" - cidr_ip = "0.0.0.0/0" -} - -resource "alicloud_instance" "foo" { - # cn-beijing - security_groups = ["${alicloud_security_group.tf_test_foo.id}"] - - vswitch_id = "${alicloud_vswitch.foo.id}" - allocate_public_ip = true - - # series II - instance_charge_type = "PostPaid" - instance_type = "ecs.n1.small" - internet_charge_type = "PayByBandwidth" - internet_max_bandwidth_out = 5 - - system_disk_category = "cloud_efficiency" - image_id = "ubuntu_140405_64_40G_cloudinit_20161115.vhd" - instance_name = "test_foo" - io_optimized = "optimized" -} -` -const testAccCheckInstanceImageOrigin = ` -data "alicloud_images" "centos" { - most_recent = true - owners = "system" - name_regex = "^centos_6\\w{1,5}[64]{1}.*" -} - -resource "alicloud_vpc" "foo" { - name = "tf_test_image" - cidr_block = "10.1.0.0/21" -} - -resource "alicloud_vswitch" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - cidr_block = "10.1.1.0/24" - availability_zone = "cn-beijing-a" -} - -resource "alicloud_security_group" "tf_test_foo" { - name = "tf_test_foo" - description = "foo" - vpc_id = "${alicloud_vpc.foo.id}" -} - -resource "alicloud_instance" "update_image" { - image_id = "${data.alicloud_images.centos.images.0.id}" - availability_zone = "cn-beijing-a" - system_disk_category = "cloud_efficiency" - system_disk_size = 50 - - instance_type = "ecs.n1.small" - internet_charge_type = "PayByBandwidth" - instance_name = "update_image" - io_optimized = "optimized" - password = "Test12345" -} -` -const testAccCheckInstanceImageUpdate = ` -data "alicloud_images" "ubuntu" { - most_recent = true - owners = "system" - name_regex = "^ubuntu_14\\w{1,5}[64]{1}.*" -} - -resource "alicloud_vpc" "foo" { - name = "tf_test_image" - cidr_block = "10.1.0.0/21" -} - -resource "alicloud_vswitch" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - cidr_block = "10.1.1.0/24" - availability_zone = "cn-beijing-a" -} - -resource "alicloud_security_group" "tf_test_foo" { - name = "tf_test_foo" - description = "foo" - vpc_id = "${alicloud_vpc.foo.id}" -} - -resource "alicloud_instance" "update_image" { - image_id = "${data.alicloud_images.ubuntu.images.0.id}" - availability_zone = "cn-beijing-a" - system_disk_category = "cloud_efficiency" - system_disk_size = 60 - - instance_type = "ecs.n1.small" - internet_charge_type = "PayByBandwidth" - instance_name = "update_image" - io_optimized = "optimized" - password = "Test12345" -} -` diff --git a/builtin/providers/alicloud/resource_alicloud_nat_gateway.go b/builtin/providers/alicloud/resource_alicloud_nat_gateway.go deleted file mode 100644 index b078ee0a2..000000000 --- a/builtin/providers/alicloud/resource_alicloud_nat_gateway.go +++ /dev/null @@ -1,374 +0,0 @@ -package alicloud - -import ( - "fmt" - - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "log" - "strconv" - "strings" - "time" -) - -func resourceAliyunNatGateway() *schema.Resource { - return &schema.Resource{ - Create: resourceAliyunNatGatewayCreate, - Read: resourceAliyunNatGatewayRead, - Update: resourceAliyunNatGatewayUpdate, - Delete: resourceAliyunNatGatewayDelete, - - Schema: map[string]*schema.Schema{ - "vpc_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "spec": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "bandwidth_package_ids": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "snat_table_ids": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "forward_table_ids": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "bandwidth_packages": &schema.Schema{ - Type: schema.TypeList, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ip_count": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - "bandwidth": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - "zone": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "public_ip_addresses": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - }, - Required: true, - MaxItems: 4, - }, - }, - } -} - -func resourceAliyunNatGatewayCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AliyunClient).vpcconn - - args := &ecs.CreateNatGatewayArgs{ - RegionId: getRegion(d, meta), - VpcId: d.Get("vpc_id").(string), - Spec: d.Get("spec").(string), - } - - bandwidthPackages := d.Get("bandwidth_packages").([]interface{}) - - bandwidthPackageTypes := []ecs.BandwidthPackageType{} - - for _, e := range bandwidthPackages { - pack := e.(map[string]interface{}) - bandwidthPackage := ecs.BandwidthPackageType{ - IpCount: pack["ip_count"].(int), - Bandwidth: pack["bandwidth"].(int), - } - if pack["zone"].(string) != "" { - bandwidthPackage.Zone = pack["zone"].(string) - } - - bandwidthPackageTypes = append(bandwidthPackageTypes, bandwidthPackage) - } - - args.BandwidthPackage = bandwidthPackageTypes - - var name string - if v, ok := d.GetOk("name"); ok { - name = v.(string) - } - - args.Name = name - - if v, ok := d.GetOk("description"); ok { - args.Description = v.(string) - } - resp, err := conn.CreateNatGateway(args) - if err != nil { - return fmt.Errorf("CreateNatGateway got error: %#v", err) - } - - d.SetId(resp.NatGatewayId) - - return resourceAliyunNatGatewayRead(d, meta) -} - -func resourceAliyunNatGatewayRead(d *schema.ResourceData, meta interface{}) error { - - client := meta.(*AliyunClient) - - natGateway, err := client.DescribeNatGateway(d.Id()) - if err != nil { - if notFoundError(err) { - d.SetId("") - return nil - } - return err - } - - d.Set("name", natGateway.Name) - d.Set("spec", natGateway.Spec) - d.Set("bandwidth_package_ids", strings.Join(natGateway.BandwidthPackageIds.BandwidthPackageId, ",")) - d.Set("snat_table_ids", strings.Join(natGateway.SnatTableIds.SnatTableId, ",")) - d.Set("forward_table_ids", strings.Join(natGateway.ForwardTableIds.ForwardTableId, ",")) - d.Set("description", natGateway.Description) - d.Set("vpc_id", natGateway.VpcId) - bindWidthPackages, err := flattenBandWidthPackages(natGateway.BandwidthPackageIds.BandwidthPackageId, meta, d) - if err != nil { - log.Printf("[ERROR] bindWidthPackages flattenBandWidthPackages failed. natgateway id is %#v", d.Id()) - } else { - d.Set("bandwidth_packages", bindWidthPackages) - } - - return nil -} - -func resourceAliyunNatGatewayUpdate(d *schema.ResourceData, meta interface{}) error { - - client := meta.(*AliyunClient) - conn := client.vpcconn - - natGateway, err := client.DescribeNatGateway(d.Id()) - if err != nil { - return err - } - - d.Partial(true) - attributeUpdate := false - args := &ecs.ModifyNatGatewayAttributeArgs{ - RegionId: natGateway.RegionId, - NatGatewayId: natGateway.NatGatewayId, - } - - if d.HasChange("name") { - d.SetPartial("name") - var name string - if v, ok := d.GetOk("name"); ok { - name = v.(string) - } else { - return fmt.Errorf("cann't change name to empty string") - } - args.Name = name - - attributeUpdate = true - } - - if d.HasChange("description") { - d.SetPartial("description") - var description string - if v, ok := d.GetOk("description"); ok { - description = v.(string) - } else { - return fmt.Errorf("can to change description to empty string") - } - - args.Description = description - - attributeUpdate = true - } - - if attributeUpdate { - if err := conn.ModifyNatGatewayAttribute(args); err != nil { - return err - } - } - - if d.HasChange("spec") { - d.SetPartial("spec") - var spec ecs.NatGatewaySpec - if v, ok := d.GetOk("spec"); ok { - spec = ecs.NatGatewaySpec(v.(string)) - } else { - // set default to small spec - spec = ecs.NatGatewaySmallSpec - } - - args := &ecs.ModifyNatGatewaySpecArgs{ - RegionId: natGateway.RegionId, - NatGatewayId: natGateway.NatGatewayId, - Spec: spec, - } - - err := conn.ModifyNatGatewaySpec(args) - if err != nil { - return fmt.Errorf("%#v %#v", err, *args) - } - - } - d.Partial(false) - - return resourceAliyunNatGatewayRead(d, meta) -} - -func resourceAliyunNatGatewayDelete(d *schema.ResourceData, meta interface{}) error { - - client := meta.(*AliyunClient) - conn := client.vpcconn - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - - packages, err := conn.DescribeBandwidthPackages(&ecs.DescribeBandwidthPackagesArgs{ - RegionId: getRegion(d, meta), - NatGatewayId: d.Id(), - }) - if err != nil { - log.Printf("[ERROR] Describe bandwidth package is failed, natGateway Id: %s", d.Id()) - return resource.NonRetryableError(err) - } - - retry := false - for _, pack := range packages { - err = conn.DeleteBandwidthPackage(&ecs.DeleteBandwidthPackageArgs{ - RegionId: getRegion(d, meta), - BandwidthPackageId: pack.BandwidthPackageId, - }) - - if err != nil { - er, _ := err.(*common.Error) - if er.ErrorResponse.Code == NatGatewayInvalidRegionId { - log.Printf("[ERROR] Delete bandwidth package is failed, bandwidthPackageId: %#v", pack.BandwidthPackageId) - return resource.NonRetryableError(err) - } - retry = true - } - } - - if retry { - return resource.RetryableError(fmt.Errorf("Bandwidth package in use - trying again while it is deleted.")) - } - - args := &ecs.DeleteNatGatewayArgs{ - RegionId: getRegion(d, meta), - NatGatewayId: d.Id(), - } - - err = conn.DeleteNatGateway(args) - if err != nil { - er, _ := err.(*common.Error) - if er.ErrorResponse.Code == DependencyViolationBandwidthPackages { - return resource.RetryableError(fmt.Errorf("NatGateway in use - trying again while it is deleted.")) - } - } - - describeArgs := &ecs.DescribeNatGatewaysArgs{ - RegionId: getRegion(d, meta), - NatGatewayId: d.Id(), - } - gw, _, gwErr := conn.DescribeNatGateways(describeArgs) - - if gwErr != nil { - log.Printf("[ERROR] Describe NatGateways failed.") - return resource.NonRetryableError(gwErr) - } else if gw == nil || len(gw) < 1 { - return nil - } - - return resource.RetryableError(fmt.Errorf("NatGateway in use - trying again while it is deleted.")) - }) -} - -func flattenBandWidthPackages(bandWidthPackageIds []string, meta interface{}, d *schema.ResourceData) ([]map[string]interface{}, error) { - - packageLen := len(bandWidthPackageIds) - result := make([]map[string]interface{}, 0, packageLen) - - for i := packageLen - 1; i >= 0; i-- { - packageId := bandWidthPackageIds[i] - packages, err := getPackages(packageId, meta, d) - if err != nil { - log.Printf("[ERROR] NatGateways getPackages failed. packageId is %#v", packageId) - return result, err - } - ipAddress := flattenPackPublicIp(packages.PublicIpAddresses.PublicIpAddresse) - ipCont, ipContErr := strconv.Atoi(packages.IpCount) - bandWidth, bandWidthErr := strconv.Atoi(packages.Bandwidth) - if ipContErr != nil { - log.Printf("[ERROR] NatGateways getPackages failed: ipCont convert error. packageId is %#v", packageId) - return result, ipContErr - } - if bandWidthErr != nil { - log.Printf("[ERROR] NatGateways getPackages failed: bandWidthErr convert error. packageId is %#v", packageId) - return result, bandWidthErr - } - l := map[string]interface{}{ - "ip_count": ipCont, - "bandwidth": bandWidth, - "zone": packages.ZoneId, - "public_ip_addresses": ipAddress, - } - result = append(result, l) - } - return result, nil -} - -func getPackages(packageId string, meta interface{}, d *schema.ResourceData) (*ecs.DescribeBandwidthPackageType, error) { - client := meta.(*AliyunClient) - conn := client.vpcconn - packages, err := conn.DescribeBandwidthPackages(&ecs.DescribeBandwidthPackagesArgs{ - RegionId: getRegion(d, meta), - BandwidthPackageId: packageId, - }) - - if err != nil { - log.Printf("[ERROR] Describe bandwidth package is failed, BandwidthPackageId Id: %s", packageId) - return nil, err - } - - if len(packages) == 0 { - return nil, common.GetClientErrorFromString(InstanceNotfound) - } - - return &packages[0], nil - -} - -func flattenPackPublicIp(publicIpAddressList []ecs.PublicIpAddresseType) string { - var result []string - - for _, publicIpAddresses := range publicIpAddressList { - ipAddress := publicIpAddresses.IpAddress - result = append(result, ipAddress) - } - - return strings.Join(result, ",") -} diff --git a/builtin/providers/alicloud/resource_alicloud_nat_gateway_test.go b/builtin/providers/alicloud/resource_alicloud_nat_gateway_test.go deleted file mode 100644 index 963be3cb1..000000000 --- a/builtin/providers/alicloud/resource_alicloud_nat_gateway_test.go +++ /dev/null @@ -1,288 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "testing" -) - -func TestAccAlicloudNatGateway_basic(t *testing.T) { - var nat ecs.NatGatewaySetType - - testCheck := func(*terraform.State) error { - if nat.BusinessStatus != "Normal" { - return fmt.Errorf("abnormal instance status") - } - - if len(nat.BandwidthPackageIds.BandwidthPackageId) == 0 { - return fmt.Errorf("no bandwidth package: %#v", nat.BandwidthPackageIds.BandwidthPackageId) - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_nat_gateway.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckNatGatewayDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNatGatewayConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckNatGatewayExists( - "alicloud_nat_gateway.foo", &nat), - testCheck, - resource.TestCheckResourceAttr( - "alicloud_nat_gateway.foo", - "spec", - "Small"), - resource.TestCheckResourceAttr( - "alicloud_nat_gateway.foo", - "name", - "test_foo"), - testAccCheckNatgatewayIpAddress("alicloud_nat_gateway.foo", &nat), - ), - }, - }, - }) - -} - -func TestAccAlicloudNatGateway_spec(t *testing.T) { - var nat ecs.NatGatewaySetType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_nat_gateway.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckNatGatewayDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNatGatewayConfigSpec, - Check: resource.ComposeTestCheckFunc( - testAccCheckNatGatewayExists( - "alicloud_nat_gateway.foo", &nat), - resource.TestCheckResourceAttr( - "alicloud_nat_gateway.foo", - "spec", - "Middle"), - ), - }, - - resource.TestStep{ - Config: testAccNatGatewayConfigSpecUpgrade, - Check: resource.ComposeTestCheckFunc( - testAccCheckNatGatewayExists( - "alicloud_nat_gateway.foo", &nat), - resource.TestCheckResourceAttr( - "alicloud_nat_gateway.foo", - "spec", - "Large"), - ), - }, - }, - }) - -} - -func testAccCheckNatgatewayIpAddress(n string, nat *ecs.NatGatewaySetType) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No NatGateway ID is set") - } - - client := testAccProvider.Meta().(*AliyunClient) - natGateway, err := client.DescribeNatGateway(rs.Primary.ID) - - if err != nil { - return err - } - if natGateway == nil { - return fmt.Errorf("Natgateway not found") - } - - return nil - } -} - -func testAccCheckNatGatewayExists(n string, nat *ecs.NatGatewaySetType) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Gateway ID is set") - } - - client := testAccProvider.Meta().(*AliyunClient) - instance, err := client.DescribeNatGateway(rs.Primary.ID) - - if err != nil { - return err - } - if instance == nil { - return fmt.Errorf("Nat gateway not found") - } - - *nat = *instance - return nil - } -} - -func testAccCheckNatGatewayDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*AliyunClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "alicloud_nat_gateway" { - continue - } - - // Try to find the Nat gateway - instance, err := client.DescribeNatGateway(rs.Primary.ID) - - if instance != nil { - return fmt.Errorf("Nat gateway still exist") - } - - if err != nil { - // Verify the error is what we want - e, _ := err.(*common.Error) - - if !notFoundError(e) { - return err - } - } - - } - - return nil -} - -const testAccNatGatewayConfig = ` -data "alicloud_zones" "default" { - "available_resource_creation"= "VSwitch" -} - -resource "alicloud_vpc" "foo" { - name = "tf_test_foo" - cidr_block = "172.16.0.0/12" -} - -resource "alicloud_vswitch" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - cidr_block = "172.16.0.0/21" - availability_zone = "${data.alicloud_zones.default.zones.2.id}" -} - -resource "alicloud_nat_gateway" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - spec = "Small" - name = "test_foo" - bandwidth_packages = [{ - ip_count = 1 - bandwidth = 5 - zone = "${data.alicloud_zones.default.zones.2.id}" - }, { - ip_count = 2 - bandwidth = 6 - zone = "${data.alicloud_zones.default.zones.2.id}" - }, { - ip_count = 3 - bandwidth = 7 - zone = "${data.alicloud_zones.default.zones.2.id}" - }, { - ip_count = 1 - bandwidth = 8 - zone = "${data.alicloud_zones.default.zones.2.id}" - }] - depends_on = [ - "alicloud_vswitch.foo"] -} -` - -const testAccNatGatewayConfigSpec = ` -data "alicloud_zones" "default" { - "available_resource_creation"= "VSwitch" -} - -resource "alicloud_vpc" "foo" { - name = "tf_test_foo" - cidr_block = "172.16.0.0/12" -} - -resource "alicloud_vswitch" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - cidr_block = "172.16.0.0/21" - availability_zone = "${data.alicloud_zones.default.zones.0.id}" -} - -resource "alicloud_nat_gateway" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - spec = "Middle" - name = "test_foo" - bandwidth_packages = [{ - ip_count = 1 - bandwidth = 5 - zone = "${data.alicloud_zones.default.zones.0.id}" - }, { - ip_count = 2 - bandwidth = 10 - zone = "${data.alicloud_zones.default.zones.0.id}" - }] - depends_on = [ - "alicloud_vswitch.foo"] -} -` - -const testAccNatGatewayConfigSpecUpgrade = ` -data "alicloud_zones" "default" { - "available_resource_creation"= "VSwitch" -} - -resource "alicloud_vpc" "foo" { - name = "tf_test_foo" - cidr_block = "172.16.0.0/12" -} - -resource "alicloud_vswitch" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - cidr_block = "172.16.0.0/21" - availability_zone = "${data.alicloud_zones.default.zones.0.id}" -} - -resource "alicloud_nat_gateway" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - spec = "Large" - name = "test_foo" - bandwidth_packages = [{ - ip_count = 1 - bandwidth = 5 - zone = "${data.alicloud_zones.default.zones.0.id}" - }, { - ip_count = 2 - bandwidth = 10 - zone = "${data.alicloud_zones.default.zones.0.id}" - }] - depends_on = [ - "alicloud_vswitch.foo"] -} -` diff --git a/builtin/providers/alicloud/resource_alicloud_security_group.go b/builtin/providers/alicloud/resource_alicloud_security_group.go deleted file mode 100644 index b1d60f704..000000000 --- a/builtin/providers/alicloud/resource_alicloud_security_group.go +++ /dev/null @@ -1,174 +0,0 @@ -package alicloud - -import ( - "fmt" - - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "time" -) - -func resourceAliyunSecurityGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceAliyunSecurityGroupCreate, - Read: resourceAliyunSecurityGroupRead, - Update: resourceAliyunSecurityGroupUpdate, - Delete: resourceAliyunSecurityGroupDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateSecurityGroupName, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateSecurityGroupDescription, - }, - - "vpc_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceAliyunSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AliyunClient).ecsconn - - args, err := buildAliyunSecurityGroupArgs(d, meta) - if err != nil { - return err - } - - securityGroupID, err := conn.CreateSecurityGroup(args) - if err != nil { - return err - } - - d.SetId(securityGroupID) - - return resourceAliyunSecurityGroupRead(d, meta) -} - -func resourceAliyunSecurityGroupRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AliyunClient).ecsconn - - args := &ecs.DescribeSecurityGroupAttributeArgs{ - SecurityGroupId: d.Id(), - RegionId: getRegion(d, meta), - } - - sg, err := conn.DescribeSecurityGroupAttribute(args) - if err != nil { - if notFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error DescribeSecurityGroupAttribute: %#v", err) - } - - if sg == nil { - d.SetId("") - return nil - } - - d.Set("name", sg.SecurityGroupName) - d.Set("description", sg.Description) - - return nil -} - -func resourceAliyunSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) error { - - conn := meta.(*AliyunClient).ecsconn - - d.Partial(true) - attributeUpdate := false - args := &ecs.ModifySecurityGroupAttributeArgs{ - SecurityGroupId: d.Id(), - RegionId: getRegion(d, meta), - } - - if d.HasChange("name") { - d.SetPartial("name") - args.SecurityGroupName = d.Get("name").(string) - - attributeUpdate = true - } - - if d.HasChange("description") { - d.SetPartial("description") - args.Description = d.Get("description").(string) - - attributeUpdate = true - } - if attributeUpdate { - if err := conn.ModifySecurityGroupAttribute(args); err != nil { - return err - } - } - - return nil -} - -func resourceAliyunSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error { - - conn := meta.(*AliyunClient).ecsconn - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - err := conn.DeleteSecurityGroup(getRegion(d, meta), d.Id()) - - if err != nil { - e, _ := err.(*common.Error) - if e.ErrorResponse.Code == SgDependencyViolation { - return resource.RetryableError(fmt.Errorf("Security group in use - trying again while it is deleted.")) - } - } - - sg, err := conn.DescribeSecurityGroupAttribute(&ecs.DescribeSecurityGroupAttributeArgs{ - RegionId: getRegion(d, meta), - SecurityGroupId: d.Id(), - }) - - if err != nil { - e, _ := err.(*common.Error) - if e.ErrorResponse.Code == InvalidSecurityGroupIdNotFound { - return nil - } - return resource.NonRetryableError(err) - } else if sg == nil { - return nil - } - - return resource.RetryableError(fmt.Errorf("Security group in use - trying again while it is deleted.")) - }) - -} - -func buildAliyunSecurityGroupArgs(d *schema.ResourceData, meta interface{}) (*ecs.CreateSecurityGroupArgs, error) { - - args := &ecs.CreateSecurityGroupArgs{ - RegionId: getRegion(d, meta), - } - - if v := d.Get("name").(string); v != "" { - args.SecurityGroupName = v - } - - if v := d.Get("description").(string); v != "" { - args.Description = v - } - - if v := d.Get("vpc_id").(string); v != "" { - args.VpcId = v - } - - return args, nil -} diff --git a/builtin/providers/alicloud/resource_alicloud_security_group_rule.go b/builtin/providers/alicloud/resource_alicloud_security_group_rule.go deleted file mode 100644 index dd671879c..000000000 --- a/builtin/providers/alicloud/resource_alicloud_security_group_rule.go +++ /dev/null @@ -1,352 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "strings" - "time" -) - -func resourceAliyunSecurityGroupRule() *schema.Resource { - return &schema.Resource{ - Create: resourceAliyunSecurityGroupRuleCreate, - Read: resourceAliyunSecurityGroupRuleRead, - Delete: resourceAliyunSecurityGroupRuleDelete, - - Schema: map[string]*schema.Schema{ - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateSecurityRuleType, - Description: "Type of rule, ingress (inbound) or egress (outbound).", - }, - - "ip_protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateSecurityRuleIpProtocol, - }, - - "nic_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - ValidateFunc: validateSecurityRuleNicType, - }, - - "policy": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateSecurityRulePolicy, - }, - - "port_range": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "priority": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validateSecurityPriority, - }, - - "security_group_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "cidr_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"source_security_group_id"}, - }, - - "source_security_group_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"cidr_ip"}, - }, - - "source_group_owner_account": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceAliyunSecurityGroupRuleCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - conn := client.ecsconn - - direction := d.Get("type").(string) - sgId := d.Get("security_group_id").(string) - ptl := d.Get("ip_protocol").(string) - port := d.Get("port_range").(string) - nicType := d.Get("nic_type").(string) - - var autherr error - switch GroupRuleDirection(direction) { - case GroupRuleIngress: - args, err := buildAliyunSecurityIngressArgs(d, meta) - if err != nil { - return err - } - autherr = conn.AuthorizeSecurityGroup(args) - case GroupRuleEgress: - args, err := buildAliyunSecurityEgressArgs(d, meta) - if err != nil { - return err - } - autherr = conn.AuthorizeSecurityGroupEgress(args) - default: - return fmt.Errorf("Security Group Rule must be type 'ingress' or type 'egress'") - } - - if autherr != nil { - return fmt.Errorf( - "Error authorizing security group rule type %s: %s", - direction, autherr) - } - - d.SetId(sgId + ":" + direction + ":" + ptl + ":" + port + ":" + nicType) - - return resourceAliyunSecurityGroupRuleRead(d, meta) -} - -func resourceAliyunSecurityGroupRuleRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - parts := strings.Split(d.Id(), ":") - sgId := parts[0] - direction := parts[1] - ip_protocol := parts[2] - port_range := parts[3] - nic_type := parts[4] - rule, err := client.DescribeSecurityGroupRule(sgId, direction, nic_type, ip_protocol, port_range) - - if err != nil { - if notFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error SecurityGroup rule: %#v", err) - } - - d.Set("type", rule.Direction) - d.Set("ip_protocol", strings.ToLower(string(rule.IpProtocol))) - d.Set("nic_type", rule.NicType) - d.Set("policy", strings.ToLower(string(rule.Policy))) - d.Set("port_range", rule.PortRange) - d.Set("priority", rule.Priority) - d.Set("security_group_id", sgId) - //support source and desc by type - if GroupRuleDirection(direction) == GroupRuleIngress { - d.Set("cidr_ip", rule.SourceCidrIp) - d.Set("source_security_group_id", rule.SourceGroupId) - d.Set("source_group_owner_account", rule.SourceGroupOwnerAccount) - } else { - d.Set("cidr_ip", rule.DestCidrIp) - d.Set("source_security_group_id", rule.DestGroupId) - d.Set("source_group_owner_account", rule.DestGroupOwnerAccount) - } - - return nil -} - -func deleteSecurityGroupRule(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - ruleType := d.Get("type").(string) - - if GroupRuleDirection(ruleType) == GroupRuleIngress { - args, err := buildAliyunSecurityIngressArgs(d, meta) - if err != nil { - return err - } - revokeArgs := &ecs.RevokeSecurityGroupArgs{ - AuthorizeSecurityGroupArgs: *args, - } - return client.RevokeSecurityGroup(revokeArgs) - } - - args, err := buildAliyunSecurityEgressArgs(d, meta) - - if err != nil { - return err - } - revokeArgs := &ecs.RevokeSecurityGroupEgressArgs{ - AuthorizeSecurityGroupEgressArgs: *args, - } - return client.RevokeSecurityGroupEgress(revokeArgs) -} - -func resourceAliyunSecurityGroupRuleDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - parts := strings.Split(d.Id(), ":") - sgId, direction, ip_protocol, port_range, nic_type := parts[0], parts[1], parts[2], parts[3], parts[4] - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - err := deleteSecurityGroupRule(d, meta) - - if err != nil { - resource.RetryableError(fmt.Errorf("Security group rule in use - trying again while it is deleted.")) - } - - _, err = client.DescribeSecurityGroupRule(sgId, direction, nic_type, ip_protocol, port_range) - if err != nil { - if notFoundError(err) { - return nil - } - return resource.NonRetryableError(err) - } - - return resource.RetryableError(fmt.Errorf("Security group rule in use - trying again while it is deleted.")) - }) - -} - -func checkCidrAndSourceGroupId(cidrIp, sourceGroupId string) error { - if cidrIp == "" && sourceGroupId == "" { - return fmt.Errorf("Either cidr_ip or source_security_group_id is required.") - } - - if cidrIp != "" && sourceGroupId != "" { - return fmt.Errorf("You should set only one value of cidr_ip or source_security_group_id.") - } - return nil -} -func buildAliyunSecurityIngressArgs(d *schema.ResourceData, meta interface{}) (*ecs.AuthorizeSecurityGroupArgs, error) { - conn := meta.(*AliyunClient).ecsconn - - args := &ecs.AuthorizeSecurityGroupArgs{ - RegionId: getRegion(d, meta), - } - - if v := d.Get("ip_protocol").(string); v != "" { - args.IpProtocol = ecs.IpProtocol(v) - } - - if v := d.Get("port_range").(string); v != "" { - args.PortRange = v - } - - if v := d.Get("policy").(string); v != "" { - args.Policy = ecs.PermissionPolicy(v) - } - - if v := d.Get("priority").(int); v != 0 { - args.Priority = v - } - - if v := d.Get("cidr_ip").(string); v != "" { - args.SourceCidrIp = v - } - - if v := d.Get("source_security_group_id").(string); v != "" { - args.SourceGroupId = v - } - - if v := d.Get("source_group_owner_account").(string); v != "" { - args.SourceGroupOwnerAccount = v - } - - sgId := d.Get("security_group_id").(string) - - sgArgs := &ecs.DescribeSecurityGroupAttributeArgs{ - SecurityGroupId: sgId, - RegionId: getRegion(d, meta), - } - - group, err := conn.DescribeSecurityGroupAttribute(sgArgs) - if err != nil { - return nil, fmt.Errorf("Error get security group %s error: %#v", sgId, err) - } - - if v := d.Get("nic_type").(string); v != "" { - if (group != nil && group.VpcId != "") || args.SourceGroupId != "" { - if GroupRuleNicType(v) != GroupRuleIntranet { - return nil, fmt.Errorf("When security group in the vpc or authorizing permission for source security group, " + - "the nic_type must be 'intranet'.") - } - } - args.NicType = ecs.NicType(v) - } - - args.SecurityGroupId = sgId - - return args, nil -} - -func buildAliyunSecurityEgressArgs(d *schema.ResourceData, meta interface{}) (*ecs.AuthorizeSecurityGroupEgressArgs, error) { - conn := meta.(*AliyunClient).ecsconn - - args := &ecs.AuthorizeSecurityGroupEgressArgs{ - RegionId: getRegion(d, meta), - } - - if v := d.Get("ip_protocol").(string); v != "" { - args.IpProtocol = ecs.IpProtocol(v) - } - - if v := d.Get("port_range").(string); v != "" { - args.PortRange = v - } - - if v := d.Get("policy").(string); v != "" { - args.Policy = ecs.PermissionPolicy(v) - } - - if v := d.Get("priority").(int); v != 0 { - args.Priority = v - } - - if v := d.Get("cidr_ip").(string); v != "" { - args.DestCidrIp = v - } - - if v := d.Get("source_security_group_id").(string); v != "" { - args.DestGroupId = v - } - - if v := d.Get("source_group_owner_account").(string); v != "" { - args.DestGroupOwnerAccount = v - } - - sgId := d.Get("security_group_id").(string) - - sgArgs := &ecs.DescribeSecurityGroupAttributeArgs{ - SecurityGroupId: sgId, - RegionId: getRegion(d, meta), - } - - group, err := conn.DescribeSecurityGroupAttribute(sgArgs) - if err != nil { - return nil, fmt.Errorf("Error get security group %s error: %#v", sgId, err) - } - - if v := d.Get("nic_type").(string); v != "" { - if (group != nil && group.VpcId != "") || args.DestGroupId != "" { - if GroupRuleNicType(v) != GroupRuleIntranet { - return nil, fmt.Errorf("When security group in the vpc or authorizing permission for destination security group, " + - "the nic_type must be 'intranet'.") - } - } - args.NicType = ecs.NicType(v) - } - - args.SecurityGroupId = sgId - - return args, nil -} diff --git a/builtin/providers/alicloud/resource_alicloud_security_group_rule_test.go b/builtin/providers/alicloud/resource_alicloud_security_group_rule_test.go deleted file mode 100644 index 0792966f2..000000000 --- a/builtin/providers/alicloud/resource_alicloud_security_group_rule_test.go +++ /dev/null @@ -1,428 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "log" - "regexp" - "strings" - "testing" -) - -func TestAccAlicloudSecurityGroupRule_Ingress(t *testing.T) { - var pt ecs.PermissionType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_security_group_rule.ingress", - Providers: testAccProviders, - CheckDestroy: testAccCheckSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccSecurityGroupRuleIngress, - Check: resource.ComposeTestCheckFunc( - testAccCheckSecurityGroupRuleExists( - "alicloud_security_group_rule.ingress", &pt), - resource.TestCheckResourceAttr( - "alicloud_security_group_rule.ingress", - "priority", - "1"), - resource.TestCheckResourceAttr( - "alicloud_security_group_rule.ingress", - "nic_type", - "internet"), - resource.TestCheckResourceAttr( - "alicloud_security_group_rule.ingress", - "ip_protocol", - "tcp"), - ), - }, - }, - }) - -} - -func TestAccAlicloudSecurityGroupRule_Egress(t *testing.T) { - var pt ecs.PermissionType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_security_group_rule.egress", - Providers: testAccProviders, - CheckDestroy: testAccCheckSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccSecurityGroupRuleEgress, - Check: resource.ComposeTestCheckFunc( - testAccCheckSecurityGroupRuleExists( - "alicloud_security_group_rule.egress", &pt), - resource.TestCheckResourceAttr( - "alicloud_security_group_rule.egress", - "port_range", - "80/80"), - resource.TestCheckResourceAttr( - "alicloud_security_group_rule.egress", - "ip_protocol", - "udp"), - ), - }, - }, - }) - -} - -func TestAccAlicloudSecurityGroupRule_EgressDefaultNicType(t *testing.T) { - var pt ecs.PermissionType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_security_group_rule.egress", - Providers: testAccProviders, - CheckDestroy: testAccCheckSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccSecurityGroupRuleEgress_emptyNicType, - Check: resource.ComposeTestCheckFunc( - testAccCheckSecurityGroupRuleExists( - "alicloud_security_group_rule.egress", &pt), - resource.TestCheckResourceAttr( - "alicloud_security_group_rule.egress", - "port_range", - "80/80"), - resource.TestCheckResourceAttr( - "alicloud_security_group_rule.egress", - "nic_type", - "internet"), - ), - }, - }, - }) - -} - -func TestAccAlicloudSecurityGroupRule_Vpc_Ingress(t *testing.T) { - var pt ecs.PermissionType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_security_group_rule.ingress", - Providers: testAccProviders, - CheckDestroy: testAccCheckSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccSecurityGroupRuleVpcIngress, - Check: resource.ComposeTestCheckFunc( - testAccCheckSecurityGroupRuleExists( - "alicloud_security_group_rule.ingress", &pt), - resource.TestCheckResourceAttr( - "alicloud_security_group_rule.ingress", - "port_range", - "1/200"), - resource.TestCheckResourceAttr( - "alicloud_security_group_rule.ingress", - "ip_protocol", - "udp"), - ), - }, - }, - }) - -} - -func TestAccAlicloudSecurityGroupRule_MissParameterSourceCidrIp(t *testing.T) { - var pt ecs.PermissionType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_security_group_rule.egress", - Providers: testAccProviders, - CheckDestroy: testAccCheckSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccSecurityGroupRule_missingSourceCidrIp, - Check: resource.ComposeTestCheckFunc( - testAccCheckSecurityGroupRuleExists( - "alicloud_security_group_rule.egress", &pt), - resource.TestCheckResourceAttr( - "alicloud_security_group_rule.egress", - "port_range", - "80/80"), - resource.TestCheckResourceAttr( - "alicloud_security_group_rule.egress", - "nic_type", - "internet"), - resource.TestCheckResourceAttr( - "alicloud_security_group_rule.egress", - "ip_protocol", - "udp"), - ), - }, - }, - }) - -} - -func TestAccAlicloudSecurityGroupRule_SourceSecurityGroup(t *testing.T) { - var pt ecs.PermissionType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_security_group_rule.ingress", - Providers: testAccProviders, - CheckDestroy: testAccCheckSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccSecurityGroupRuleSourceSecurityGroup, - Check: resource.ComposeTestCheckFunc( - testAccCheckSecurityGroupRuleExists( - "alicloud_security_group_rule.ingress", &pt), - resource.TestCheckResourceAttr( - "alicloud_security_group_rule.ingress", - "port_range", - "3306/3306"), - resource.TestMatchResourceAttr( - "alicloud_security_group_rule.ingress", - "source_security_group_id", - regexp.MustCompile("^sg-[a-zA-Z0-9_]+")), - resource.TestCheckResourceAttr( - "alicloud_security_group_rule.ingress", - "cidr_ip", - ""), - ), - }, - }, - }) - -} - -func testAccCheckSecurityGroupRuleExists(n string, m *ecs.PermissionType) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No SecurityGroup Rule ID is set") - } - - client := testAccProvider.Meta().(*AliyunClient) - log.Printf("[WARN]get sg rule %s", rs.Primary.ID) - parts := strings.Split(rs.Primary.ID, ":") - // securityGroupId, direction, nicType, ipProtocol, portRange - rule, err := client.DescribeSecurityGroupRule(parts[0], parts[1], parts[4], parts[2], parts[3]) - - if err != nil { - return err - } - - if rule == nil { - return fmt.Errorf("SecurityGroup not found") - } - - *m = *rule - return nil - } -} - -func testAccCheckSecurityGroupRuleDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*AliyunClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "alicloud_security_group_rule" { - continue - } - - parts := strings.Split(rs.Primary.ID, ":") - rule, err := client.DescribeSecurityGroupRule(parts[0], parts[1], parts[4], parts[2], parts[3]) - - if rule != nil { - return fmt.Errorf("Error SecurityGroup Rule still exist") - } - - // Verify the error is what we want - if err != nil { - // Verify the error is what we want - e, _ := err.(*common.Error) - if e.ErrorResponse.Code == InvalidSecurityGroupIdNotFound { - continue - } - return err - } - } - - return nil -} - -const testAccSecurityGroupRuleIngress = ` -resource "alicloud_security_group" "foo" { - name = "sg_foo" -} - -resource "alicloud_security_group_rule" "ingress" { - type = "ingress" - ip_protocol = "tcp" - nic_type = "internet" - policy = "accept" - port_range = "1/200" - priority = 1 - security_group_id = "${alicloud_security_group.foo.id}" - cidr_ip = "10.159.6.18/12" -} - - -` - -const testAccSecurityGroupRuleEgress = ` -resource "alicloud_security_group" "foo" { - name = "sg_foo" -} - - -resource "alicloud_security_group_rule" "egress" { - type = "egress" - ip_protocol = "udp" - nic_type = "internet" - policy = "accept" - port_range = "80/80" - priority = 1 - security_group_id = "${alicloud_security_group.foo.id}" - cidr_ip = "10.159.6.18/12" -} - -` - -const testAccSecurityGroupRuleEgress_emptyNicType = ` -resource "alicloud_security_group" "foo" { - name = "sg_foo" -} - -resource "alicloud_security_group_rule" "egress" { - type = "egress" - ip_protocol = "udp" - policy = "accept" - port_range = "80/80" - priority = 1 - security_group_id = "${alicloud_security_group.foo.id}" - cidr_ip = "10.159.6.18/12" -} - -` - -const testAccSecurityGroupRuleVpcIngress = ` -resource "alicloud_security_group" "foo" { - vpc_id = "${alicloud_vpc.vpc.id}" - name = "sg_foo" -} - -resource "alicloud_vpc" "vpc" { - cidr_block = "10.1.0.0/21" -} - -resource "alicloud_security_group_rule" "ingress" { - type = "ingress" - ip_protocol = "udp" - nic_type = "intranet" - policy = "accept" - port_range = "1/200" - priority = 1 - security_group_id = "${alicloud_security_group.foo.id}" - cidr_ip = "10.159.6.18/12" -} - -` -const testAccSecurityGroupRule_missingSourceCidrIp = ` -resource "alicloud_security_group" "foo" { - name = "sg_foo" -} - -resource "alicloud_security_group_rule" "egress" { - security_group_id = "${alicloud_security_group.foo.id}" - type = "egress" - cidr_ip= "0.0.0.0/0" - policy = "accept" - ip_protocol= "udp" - port_range= "80/80" - priority= 1 -} - -` - -const testAccSecurityGroupRuleMultiIngress = ` -resource "alicloud_security_group" "foo" { - name = "sg_foo" -} - -resource "alicloud_security_group_rule" "ingress1" { - type = "ingress" - ip_protocol = "tcp" - nic_type = "internet" - policy = "accept" - port_range = "1/200" - priority = 1 - security_group_id = "${alicloud_security_group.foo.id}" - cidr_ip = "10.159.6.18/12" -} - -resource "alicloud_security_group_rule" "ingress2" { - type = "ingress" - ip_protocol = "gre" - nic_type = "internet" - policy = "accept" - port_range = "-1/-1" - priority = 1 - security_group_id = "${alicloud_security_group.foo.id}" - cidr_ip = "127.0.1.18/16" -} - -` - -const testAccSecurityGroupRuleSourceSecurityGroup = ` -resource "alicloud_security_group" "foo" { - name = "sg_foo" -} - -resource "alicloud_security_group" "bar" { - name = "sg_bar" -} - -resource "alicloud_security_group_rule" "ingress" { - type = "ingress" - ip_protocol = "tcp" - nic_type = "intranet" - policy = "accept" - port_range = "3306/3306" - priority = 50 - security_group_id = "${alicloud_security_group.bar.id}" - source_security_group_id = "${alicloud_security_group.foo.id}" -} - - -` diff --git a/builtin/providers/alicloud/resource_alicloud_security_group_test.go b/builtin/providers/alicloud/resource_alicloud_security_group_test.go deleted file mode 100644 index 19211bc1f..000000000 --- a/builtin/providers/alicloud/resource_alicloud_security_group_test.go +++ /dev/null @@ -1,151 +0,0 @@ -package alicloud - -import ( - "fmt" - "testing" - - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "log" -) - -func TestAccAlicloudSecurityGroup_basic(t *testing.T) { - var sg ecs.DescribeSecurityGroupAttributeResponse - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_security_group.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckSecurityGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccSecurityGroupConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckSecurityGroupExists( - "alicloud_security_group.foo", &sg), - resource.TestCheckResourceAttr( - "alicloud_security_group.foo", - "name", - "sg_test"), - ), - }, - }, - }) - -} - -func TestAccAlicloudSecurityGroup_withVpc(t *testing.T) { - var sg ecs.DescribeSecurityGroupAttributeResponse - var vpc ecs.VpcSetType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_security_group.foo", - - Providers: testAccProviders, - CheckDestroy: testAccCheckSecurityGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccSecurityGroupConfig_withVpc, - Check: resource.ComposeTestCheckFunc( - testAccCheckSecurityGroupExists( - "alicloud_security_group.foo", &sg), - testAccCheckVpcExists( - "alicloud_vpc.vpc", &vpc), - ), - }, - }, - }) - -} - -func testAccCheckSecurityGroupExists(n string, sg *ecs.DescribeSecurityGroupAttributeResponse) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No SecurityGroup ID is set") - } - - client := testAccProvider.Meta().(*AliyunClient) - conn := client.ecsconn - args := &ecs.DescribeSecurityGroupAttributeArgs{ - RegionId: client.Region, - SecurityGroupId: rs.Primary.ID, - } - d, err := conn.DescribeSecurityGroupAttribute(args) - - log.Printf("[WARN] security group id %#v", rs.Primary.ID) - - if err != nil { - return err - } - - if d == nil { - return fmt.Errorf("SecurityGroup not found") - } - - *sg = *d - return nil - } -} - -func testAccCheckSecurityGroupDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*AliyunClient) - conn := client.ecsconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "alicloud_security_group" { - continue - } - - // Try to find the SecurityGroup - args := &ecs.DescribeSecurityGroupsArgs{ - RegionId: client.Region, - } - - groups, _, err := conn.DescribeSecurityGroups(args) - - for _, sg := range groups { - if sg.SecurityGroupId == rs.Primary.ID { - return fmt.Errorf("Error SecurityGroup still exist") - } - } - - // Verify the error is what we want - if err != nil { - return err - } - } - - return nil -} - -const testAccSecurityGroupConfig = ` -resource "alicloud_security_group" "foo" { - name = "sg_test" -} -` - -const testAccSecurityGroupConfig_withVpc = ` -resource "alicloud_security_group" "foo" { - vpc_id = "${alicloud_vpc.vpc.id}" -} - -resource "alicloud_vpc" "vpc" { - cidr_block = "10.1.0.0/21" -} -` diff --git a/builtin/providers/alicloud/resource_alicloud_slb.go b/builtin/providers/alicloud/resource_alicloud_slb.go deleted file mode 100644 index de8a8906d..000000000 --- a/builtin/providers/alicloud/resource_alicloud_slb.go +++ /dev/null @@ -1,611 +0,0 @@ -package alicloud - -import ( - "bytes" - "fmt" - "strings" - - "errors" - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/slb" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "time" -) - -func resourceAliyunSlb() *schema.Resource { - return &schema.Resource{ - Create: resourceAliyunSlbCreate, - Read: resourceAliyunSlbRead, - Update: resourceAliyunSlbUpdate, - Delete: resourceAliyunSlbDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateSlbName, - Computed: true, - }, - - "internet": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - - "vswitch_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "internet_charge_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "paybytraffic", - ValidateFunc: validateSlbInternetChargeType, - }, - - "bandwidth": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validateSlbBandwidth, - Computed: true, - }, - - "listener": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_port": &schema.Schema{ - Type: schema.TypeInt, - ValidateFunc: validateInstancePort, - Required: true, - }, - - "lb_port": &schema.Schema{ - Type: schema.TypeInt, - ValidateFunc: validateInstancePort, - Required: true, - }, - - "lb_protocol": &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateInstanceProtocol, - Required: true, - }, - - "bandwidth": &schema.Schema{ - Type: schema.TypeInt, - ValidateFunc: validateSlbListenerBandwidth, - Required: true, - }, - "scheduler": &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateSlbListenerScheduler, - Optional: true, - Default: slb.WRRScheduler, - }, - //http & https - "sticky_session": &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateAllowedStringValue([]string{ - string(slb.OnFlag), - string(slb.OffFlag)}), - Optional: true, - Default: slb.OffFlag, - }, - //http & https - "sticky_session_type": &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateAllowedStringValue([]string{ - string(slb.InsertStickySessionType), - string(slb.ServerStickySessionType)}), - Optional: true, - }, - //http & https - "cookie_timeout": &schema.Schema{ - Type: schema.TypeInt, - ValidateFunc: validateSlbListenerCookieTimeout, - Optional: true, - }, - //http & https - "cookie": &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateSlbListenerCookie, - Optional: true, - }, - //tcp & udp - "persistence_timeout": &schema.Schema{ - Type: schema.TypeInt, - ValidateFunc: validateSlbListenerPersistenceTimeout, - Optional: true, - Default: 0, - }, - //http & https - "health_check": &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateAllowedStringValue([]string{ - string(slb.OnFlag), - string(slb.OffFlag)}), - Optional: true, - Default: slb.OffFlag, - }, - //tcp - "health_check_type": &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateAllowedStringValue([]string{ - string(slb.TCPHealthCheckType), - string(slb.HTTPHealthCheckType)}), - Optional: true, - Default: slb.TCPHealthCheckType, - }, - //http & https & tcp - "health_check_domain": &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateSlbListenerHealthCheckDomain, - Optional: true, - }, - //http & https & tcp - "health_check_uri": &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateSlbListenerHealthCheckUri, - Optional: true, - }, - "health_check_connect_port": &schema.Schema{ - Type: schema.TypeInt, - ValidateFunc: validateSlbListenerHealthCheckConnectPort, - Optional: true, - }, - "healthy_threshold": &schema.Schema{ - Type: schema.TypeInt, - ValidateFunc: validateIntegerInRange(1, 10), - Optional: true, - }, - "unhealthy_threshold": &schema.Schema{ - Type: schema.TypeInt, - ValidateFunc: validateIntegerInRange(1, 10), - Optional: true, - }, - - "health_check_timeout": &schema.Schema{ - Type: schema.TypeInt, - ValidateFunc: validateIntegerInRange(1, 50), - Optional: true, - }, - "health_check_interval": &schema.Schema{ - Type: schema.TypeInt, - ValidateFunc: validateIntegerInRange(1, 5), - Optional: true, - }, - //http & https & tcp - "health_check_http_code": &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateAllowedSplitStringValue([]string{ - string(slb.HTTP_2XX), - string(slb.HTTP_3XX), - string(slb.HTTP_4XX), - string(slb.HTTP_5XX)}, ","), - Optional: true, - }, - //https - "ssl_certificate_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - //https - //"ca_certificate_id": &schema.Schema{ - // Type: schema.TypeString, - // Optional: true, - //}, - }, - }, - Set: resourceAliyunSlbListenerHash, - }, - - //deprecated - "instances": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - Set: schema.HashString, - }, - - "address": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceAliyunSlbCreate(d *schema.ResourceData, meta interface{}) error { - - slbconn := meta.(*AliyunClient).slbconn - - var slbName string - if v, ok := d.GetOk("name"); ok { - slbName = v.(string) - } else { - slbName = resource.PrefixedUniqueId("tf-lb-") - d.Set("name", slbName) - } - - slbArgs := &slb.CreateLoadBalancerArgs{ - RegionId: getRegion(d, meta), - LoadBalancerName: slbName, - } - - if internet, ok := d.GetOk("internet"); ok && internet.(bool) { - slbArgs.AddressType = slb.InternetAddressType - d.Set("internet", true) - } else { - slbArgs.AddressType = slb.IntranetAddressType - d.Set("internet", false) - } - - if v, ok := d.GetOk("internet_charge_type"); ok && v.(string) != "" { - slbArgs.InternetChargeType = slb.InternetChargeType(v.(string)) - } - - if v, ok := d.GetOk("bandwidth"); ok && v.(int) != 0 { - slbArgs.Bandwidth = v.(int) - } - - if v, ok := d.GetOk("vswitch_id"); ok && v.(string) != "" { - slbArgs.VSwitchId = v.(string) - } - slb, err := slbconn.CreateLoadBalancer(slbArgs) - if err != nil { - return err - } - - d.SetId(slb.LoadBalancerId) - - return resourceAliyunSlbUpdate(d, meta) -} - -func resourceAliyunSlbRead(d *schema.ResourceData, meta interface{}) error { - slbconn := meta.(*AliyunClient).slbconn - loadBalancer, err := slbconn.DescribeLoadBalancerAttribute(d.Id()) - if err != nil { - if notFoundError(err) { - d.SetId("") - return nil - } - - return err - } - - if loadBalancer == nil { - d.SetId("") - return nil - } - - d.Set("name", loadBalancer.LoadBalancerName) - - if loadBalancer.AddressType == slb.InternetAddressType { - d.Set("internal", true) - } else { - d.Set("internal", false) - } - d.Set("internet_charge_type", loadBalancer.InternetChargeType) - d.Set("bandwidth", loadBalancer.Bandwidth) - d.Set("vswitch_id", loadBalancer.VSwitchId) - d.Set("address", loadBalancer.Address) - - return nil -} - -func resourceAliyunSlbUpdate(d *schema.ResourceData, meta interface{}) error { - - slbconn := meta.(*AliyunClient).slbconn - - d.Partial(true) - - if d.HasChange("name") { - err := slbconn.SetLoadBalancerName(d.Id(), d.Get("name").(string)) - if err != nil { - return err - } - - d.SetPartial("name") - } - - if d.Get("internet") == true && d.Get("internet_charge_type") == "paybybandwidth" { - //don't intranet web and paybybandwidth, then can modify bandwidth - if d.HasChange("bandwidth") { - args := &slb.ModifyLoadBalancerInternetSpecArgs{ - LoadBalancerId: d.Id(), - Bandwidth: d.Get("bandwidth").(int), - } - err := slbconn.ModifyLoadBalancerInternetSpec(args) - if err != nil { - return err - } - - d.SetPartial("bandwidth") - } - } - - if d.HasChange("listener") { - o, n := d.GetChange("listener") - os := o.(*schema.Set) - ns := n.(*schema.Set) - - remove, _ := expandListeners(os.Difference(ns).List()) - add, _ := expandListeners(ns.Difference(os).List()) - - if len(remove) > 0 { - for _, listener := range remove { - err := slbconn.DeleteLoadBalancerListener(d.Id(), listener.LoadBalancerPort) - if err != nil { - return fmt.Errorf("Failure removing outdated SLB listeners: %#v", err) - } - } - } - - if len(add) > 0 { - for _, listener := range add { - err := createListener(slbconn, d.Id(), listener) - if err != nil { - return fmt.Errorf("Failure add SLB listeners: %#v", err) - } - } - } - - d.SetPartial("listener") - } - - // If we currently have instances, or did have instances, - // we want to figure out what to add and remove from the load - // balancer - if d.HasChange("instances") { - o, n := d.GetChange("instances") - os := o.(*schema.Set) - ns := n.(*schema.Set) - remove := expandBackendServers(os.Difference(ns).List()) - add := expandBackendServers(ns.Difference(os).List()) - - if len(add) > 0 { - _, err := slbconn.AddBackendServers(d.Id(), add) - if err != nil { - return err - } - } - if len(remove) > 0 { - removeBackendServers := make([]string, 0, len(remove)) - for _, e := range remove { - removeBackendServers = append(removeBackendServers, e.ServerId) - } - _, err := slbconn.RemoveBackendServers(d.Id(), removeBackendServers) - if err != nil { - return err - } - } - - d.SetPartial("instances") - } - - d.Partial(false) - - return resourceAliyunSlbRead(d, meta) -} - -func resourceAliyunSlbDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AliyunClient).slbconn - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - err := conn.DeleteLoadBalancer(d.Id()) - - if err != nil { - return resource.NonRetryableError(err) - } - - loadBalancer, err := conn.DescribeLoadBalancerAttribute(d.Id()) - if err != nil { - e, _ := err.(*common.Error) - if e.ErrorResponse.Code == LoadBalancerNotFound { - return nil - } - return resource.NonRetryableError(err) - } - if loadBalancer != nil { - return resource.RetryableError(fmt.Errorf("LoadBalancer in use - trying again while it deleted.")) - } - return nil - }) -} - -func resourceAliyunSlbListenerHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%d-", m["instance_port"].(int))) - buf.WriteString(fmt.Sprintf("%d-", m["lb_port"].(int))) - buf.WriteString(fmt.Sprintf("%s-", - strings.ToLower(m["lb_protocol"].(string)))) - - buf.WriteString(fmt.Sprintf("%d-", m["bandwidth"].(int))) - - if v, ok := m["ssl_certificate_id"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - - return hashcode.String(buf.String()) -} - -func createListener(conn *slb.Client, loadBalancerId string, listener *Listener) error { - - errTypeJudge := func(err error) error { - if err != nil { - if listenerType, ok := err.(*ListenerErr); ok { - if listenerType.ErrType == HealthCheckErrType { - return fmt.Errorf("When the HealthCheck is %s, then related HealthCheck parameter "+ - "must have.", slb.OnFlag) - } else if listenerType.ErrType == StickySessionErrType { - return fmt.Errorf("When the StickySession is %s, then StickySessionType parameter "+ - "must have.", slb.OnFlag) - } else if listenerType.ErrType == CookieTimeOutErrType { - return fmt.Errorf("When the StickySession is %s and StickySessionType is %s, "+ - "then CookieTimeout parameter must have.", slb.OnFlag, slb.InsertStickySessionType) - } else if listenerType.ErrType == CookieErrType { - return fmt.Errorf("When the StickySession is %s and StickySessionType is %s, "+ - "then Cookie parameter must have.", slb.OnFlag, slb.ServerStickySessionType) - } - return fmt.Errorf("slb listener check errtype not found.") - } - } - return nil - } - - if listener.Protocol == strings.ToLower("tcp") { - - args := getTcpListenerArgs(loadBalancerId, listener) - - if err := conn.CreateLoadBalancerTCPListener(&args); err != nil { - return err - } - } else if listener.Protocol == strings.ToLower("http") { - args, argsErr := getHttpListenerArgs(loadBalancerId, listener) - if paramErr := errTypeJudge(argsErr); paramErr != nil { - return paramErr - } - - if err := conn.CreateLoadBalancerHTTPListener(&args); err != nil { - return err - } - } else if listener.Protocol == strings.ToLower("https") { - listenerType, err := getHttpListenerType(loadBalancerId, listener) - if paramErr := errTypeJudge(err); paramErr != nil { - return paramErr - } - - args := &slb.CreateLoadBalancerHTTPSListenerArgs{ - HTTPListenerType: listenerType, - } - if listener.SSLCertificateId == "" { - return fmt.Errorf("Server Certificated Id cann't be null") - } - - args.ServerCertificateId = listener.SSLCertificateId - - if err := conn.CreateLoadBalancerHTTPSListener(args); err != nil { - return err - } - } else if listener.Protocol == strings.ToLower("udp") { - args := getUdpListenerArgs(loadBalancerId, listener) - - if err := conn.CreateLoadBalancerUDPListener(&args); err != nil { - return err - } - } - - if err := conn.StartLoadBalancerListener(loadBalancerId, listener.LoadBalancerPort); err != nil { - return err - } - - return nil -} - -func getTcpListenerArgs(loadBalancerId string, listener *Listener) slb.CreateLoadBalancerTCPListenerArgs { - args := slb.CreateLoadBalancerTCPListenerArgs{ - LoadBalancerId: loadBalancerId, - ListenerPort: listener.LoadBalancerPort, - BackendServerPort: listener.InstancePort, - Bandwidth: listener.Bandwidth, - Scheduler: listener.Scheduler, - PersistenceTimeout: listener.PersistenceTimeout, - HealthCheckType: listener.HealthCheckType, - HealthCheckDomain: listener.HealthCheckDomain, - HealthCheckURI: listener.HealthCheckURI, - HealthCheckConnectPort: listener.HealthCheckConnectPort, - HealthyThreshold: listener.HealthyThreshold, - UnhealthyThreshold: listener.UnhealthyThreshold, - HealthCheckConnectTimeout: listener.HealthCheckTimeout, - HealthCheckInterval: listener.HealthCheckInterval, - HealthCheckHttpCode: listener.HealthCheckHttpCode, - } - return args -} - -func getUdpListenerArgs(loadBalancerId string, listener *Listener) slb.CreateLoadBalancerUDPListenerArgs { - args := slb.CreateLoadBalancerUDPListenerArgs{ - LoadBalancerId: loadBalancerId, - ListenerPort: listener.LoadBalancerPort, - BackendServerPort: listener.InstancePort, - Bandwidth: listener.Bandwidth, - PersistenceTimeout: listener.PersistenceTimeout, - HealthCheckConnectTimeout: listener.HealthCheckTimeout, - HealthCheckInterval: listener.HealthCheckInterval, - } - return args -} - -func getHttpListenerType(loadBalancerId string, listener *Listener) (listenType slb.HTTPListenerType, err error) { - - if listener.HealthCheck == slb.OnFlag { - if listener.HealthCheckURI == "" || listener.HealthCheckDomain == "" || listener.HealthCheckConnectPort == 0 || - listener.HealthyThreshold == 0 || listener.UnhealthyThreshold == 0 || listener.HealthCheckTimeout == 0 || - listener.HealthCheckHttpCode == "" || listener.HealthCheckInterval == 0 { - - errMsg := errors.New("err: HealthCheck empty.") - return listenType, &ListenerErr{HealthCheckErrType, errMsg} - } - } - - if listener.StickySession == slb.OnFlag { - if listener.StickySessionType == "" { - errMsg := errors.New("err: stickySession empty.") - return listenType, &ListenerErr{StickySessionErrType, errMsg} - } - - if listener.StickySessionType == slb.InsertStickySessionType { - if listener.CookieTimeout == 0 { - errMsg := errors.New("err: cookieTimeout empty.") - return listenType, &ListenerErr{CookieTimeOutErrType, errMsg} - } - } else if listener.StickySessionType == slb.ServerStickySessionType { - if listener.Cookie == "" { - errMsg := errors.New("err: cookie empty.") - return listenType, &ListenerErr{CookieErrType, errMsg} - } - } - } - - httpListenertType := slb.HTTPListenerType{ - LoadBalancerId: loadBalancerId, - ListenerPort: listener.LoadBalancerPort, - BackendServerPort: listener.InstancePort, - Bandwidth: listener.Bandwidth, - Scheduler: listener.Scheduler, - HealthCheck: listener.HealthCheck, - StickySession: listener.StickySession, - StickySessionType: listener.StickySessionType, - CookieTimeout: listener.CookieTimeout, - Cookie: listener.Cookie, - HealthCheckDomain: listener.HealthCheckDomain, - HealthCheckURI: listener.HealthCheckURI, - HealthCheckConnectPort: listener.HealthCheckConnectPort, - HealthyThreshold: listener.HealthyThreshold, - UnhealthyThreshold: listener.UnhealthyThreshold, - HealthCheckTimeout: listener.HealthCheckTimeout, - HealthCheckInterval: listener.HealthCheckInterval, - HealthCheckHttpCode: listener.HealthCheckHttpCode, - } - - return httpListenertType, err -} - -func getHttpListenerArgs(loadBalancerId string, listener *Listener) (listenType slb.CreateLoadBalancerHTTPListenerArgs, err error) { - httpListenerType, err := getHttpListenerType(loadBalancerId, listener) - if err != nil { - return listenType, err - } - - httpArgs := slb.CreateLoadBalancerHTTPListenerArgs(httpListenerType) - return httpArgs, err -} diff --git a/builtin/providers/alicloud/resource_alicloud_slb_attachment.go b/builtin/providers/alicloud/resource_alicloud_slb_attachment.go deleted file mode 100644 index 74e13c26c..000000000 --- a/builtin/providers/alicloud/resource_alicloud_slb_attachment.go +++ /dev/null @@ -1,148 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "strings" -) - -func resourceAliyunSlbAttachment() *schema.Resource { - return &schema.Resource{ - Create: resourceAliyunSlbAttachmentCreate, - Read: resourceAliyunSlbAttachmentRead, - Update: resourceAliyunSlbAttachmentUpdate, - Delete: resourceAliyunSlbAttachmentDelete, - - Schema: map[string]*schema.Schema{ - - "slb_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "instances": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Required: true, - Set: schema.HashString, - }, - - "backend_servers": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - } -} - -func resourceAliyunSlbAttachmentCreate(d *schema.ResourceData, meta interface{}) error { - - slbId := d.Get("slb_id").(string) - - slbconn := meta.(*AliyunClient).slbconn - - loadBalancer, err := slbconn.DescribeLoadBalancerAttribute(slbId) - if err != nil { - if notFoundError(err) { - d.SetId("") - return fmt.Errorf("Special SLB Id not found: %#v", err) - } - - return err - } - - d.SetId(loadBalancer.LoadBalancerId) - - return resourceAliyunSlbAttachmentUpdate(d, meta) -} - -func resourceAliyunSlbAttachmentRead(d *schema.ResourceData, meta interface{}) error { - - slbconn := meta.(*AliyunClient).slbconn - loadBalancer, err := slbconn.DescribeLoadBalancerAttribute(d.Id()) - if err != nil { - if notFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Read special SLB Id not found: %#v", err) - } - - if loadBalancer == nil { - d.SetId("") - return nil - } - - backendServerType := loadBalancer.BackendServers - servers := backendServerType.BackendServer - instanceIds := make([]string, 0, len(servers)) - if len(servers) > 0 { - for _, e := range servers { - instanceIds = append(instanceIds, e.ServerId) - } - if err != nil { - return err - } - } - - d.Set("slb_id", d.Id()) - d.Set("instances", instanceIds) - d.Set("backend_servers", strings.Join(instanceIds, ",")) - - return nil -} - -func resourceAliyunSlbAttachmentUpdate(d *schema.ResourceData, meta interface{}) error { - - slbconn := meta.(*AliyunClient).slbconn - if d.HasChange("instances") { - o, n := d.GetChange("instances") - os := o.(*schema.Set) - ns := n.(*schema.Set) - remove := expandBackendServers(os.Difference(ns).List()) - add := expandBackendServers(ns.Difference(os).List()) - - if len(add) > 0 { - _, err := slbconn.AddBackendServers(d.Id(), add) - if err != nil { - return err - } - } - if len(remove) > 0 { - removeBackendServers := make([]string, 0, len(remove)) - for _, e := range remove { - removeBackendServers = append(removeBackendServers, e.ServerId) - } - _, err := slbconn.RemoveBackendServers(d.Id(), removeBackendServers) - if err != nil { - return err - } - } - - } - - return resourceAliyunSlbAttachmentRead(d, meta) - -} - -func resourceAliyunSlbAttachmentDelete(d *schema.ResourceData, meta interface{}) error { - - slbconn := meta.(*AliyunClient).slbconn - o := d.Get("instances") - os := o.(*schema.Set) - remove := expandBackendServers(os.List()) - - if len(remove) > 0 { - removeBackendServers := make([]string, 0, len(remove)) - for _, e := range remove { - removeBackendServers = append(removeBackendServers, e.ServerId) - } - _, err := slbconn.RemoveBackendServers(d.Id(), removeBackendServers) - if err != nil { - return err - } - } - - return nil -} diff --git a/builtin/providers/alicloud/resource_alicloud_slb_attachment_test.go b/builtin/providers/alicloud/resource_alicloud_slb_attachment_test.go deleted file mode 100644 index 5caa4a710..000000000 --- a/builtin/providers/alicloud/resource_alicloud_slb_attachment_test.go +++ /dev/null @@ -1,131 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/denverdino/aliyungo/slb" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "log" - "testing" -) - -func TestAccAlicloudSlbAttachment_basic(t *testing.T) { - var slb slb.LoadBalancerType - - testCheckAttr := func() resource.TestCheckFunc { - return func(*terraform.State) error { - log.Printf("testCheckAttr slb BackendServers is: %#v", slb.BackendServers) - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_slb_attachment.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckSlbDestroy, - Steps: []resource.TestStep{ - //test internet_charge_type is paybybandwidth - resource.TestStep{ - Config: testAccSlbAttachment, - Check: resource.ComposeTestCheckFunc( - testAccCheckSlbExists("alicloud_slb_attachment.foo", &slb), - testCheckAttr(), - testAccCheckAttachment("alicloud_instance.foo", &slb), - ), - }, - }, - }) -} - -func testAccCheckAttachment(n string, slb *slb.LoadBalancerType) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ECS ID is set") - } - - ecsInstanceId := rs.Primary.ID - - backendServers := slb.BackendServers.BackendServer - - if len(backendServers) == 0 { - return fmt.Errorf("no SLB backendServer: %#v", backendServers) - } - - log.Printf("slb bacnendservers: %#v", backendServers) - - backendServersInstanceId := backendServers[0].ServerId - - if ecsInstanceId != backendServersInstanceId { - return fmt.Errorf("SLB attachment check invalid: ECS instance %s is not equal SLB backendServer %s", - ecsInstanceId, backendServersInstanceId) - } - return nil - } -} - -const testAccSlbAttachment = ` -resource "alicloud_security_group" "foo" { - name = "tf_test_foo" - description = "foo" -} - -resource "alicloud_security_group_rule" "http-in" { - type = "ingress" - ip_protocol = "tcp" - nic_type = "internet" - policy = "accept" - port_range = "80/80" - priority = 1 - security_group_id = "${alicloud_security_group.foo.id}" - cidr_ip = "0.0.0.0/0" -} - -resource "alicloud_security_group_rule" "ssh-in" { - type = "ingress" - ip_protocol = "tcp" - nic_type = "internet" - policy = "accept" - port_range = "22/22" - priority = 1 - security_group_id = "${alicloud_security_group.foo.id}" - cidr_ip = "0.0.0.0/0" -} - -resource "alicloud_instance" "foo" { - # cn-beijing - image_id = "ubuntu_140405_64_40G_cloudinit_20161115.vhd" - - # series II - instance_type = "ecs.n1.medium" - internet_charge_type = "PayByBandwidth" - internet_max_bandwidth_out = "5" - system_disk_category = "cloud_efficiency" - io_optimized = "optimized" - - security_groups = ["${alicloud_security_group.foo.id}"] - instance_name = "test_foo" -} - -resource "alicloud_slb" "foo" { - name = "tf_test_slb_bind" - internet_charge_type = "paybybandwidth" - bandwidth = "5" - internet = "true" -} - -resource "alicloud_slb_attachment" "foo" { - slb_id = "${alicloud_slb.foo.id}" - instances = ["${alicloud_instance.foo.id}"] -} - -` diff --git a/builtin/providers/alicloud/resource_alicloud_slb_test.go b/builtin/providers/alicloud/resource_alicloud_slb_test.go deleted file mode 100644 index 42308f187..000000000 --- a/builtin/providers/alicloud/resource_alicloud_slb_test.go +++ /dev/null @@ -1,322 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/slb" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "log" - "testing" -) - -func TestAccAlicloudSlb_basic(t *testing.T) { - var slb slb.LoadBalancerType - - testCheckAttr := func() resource.TestCheckFunc { - return func(*terraform.State) error { - log.Printf("testCheckAttr slb AddressType is: %s", slb.AddressType) - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_slb.bindwidth", - - Providers: testAccProviders, - CheckDestroy: testAccCheckSlbDestroy, - Steps: []resource.TestStep{ - //test internet_charge_type is paybybandwidth - resource.TestStep{ - Config: testAccSlbBindWidth, - Check: resource.ComposeTestCheckFunc( - testAccCheckSlbExists("alicloud_slb.bindwidth", &slb), - testCheckAttr(), - resource.TestCheckResourceAttr( - "alicloud_slb.bindwidth", "internet_charge_type", "paybybandwidth"), - ), - }, - }, - }) -} - -func TestAccAlicloudSlb_traffic(t *testing.T) { - var slb slb.LoadBalancerType - - testCheckAttr := func() resource.TestCheckFunc { - return func(*terraform.State) error { - log.Printf("testCheckAttr slb AddressType is: %s", slb.AddressType) - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_slb.traffic", - Providers: testAccProviders, - CheckDestroy: testAccCheckSlbDestroy, - Steps: []resource.TestStep{ - //test internet_charge_type is paybytraffic - resource.TestStep{ - Config: testAccSlbTraffic, - Check: resource.ComposeTestCheckFunc( - testAccCheckSlbExists("alicloud_slb.traffic", &slb), - testCheckAttr(), - resource.TestCheckResourceAttr( - "alicloud_slb.traffic", "name", "tf_test_slb_classic"), - ), - }, - }, - }) -} - -func TestAccAlicloudSlb_listener(t *testing.T) { - var slb slb.LoadBalancerType - - testListener := func() resource.TestCheckFunc { - return func(*terraform.State) error { - listenerPorts := slb.ListenerPorts.ListenerPort[0] - if listenerPorts != 2001 { - return fmt.Errorf("bad loadbalancer listener: %#v", listenerPorts) - } - - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_slb.listener", - Providers: testAccProviders, - CheckDestroy: testAccCheckSlbDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccSlbListener, - Check: resource.ComposeTestCheckFunc( - testAccCheckSlbExists("alicloud_slb.listener", &slb), - resource.TestCheckResourceAttr( - "alicloud_slb.listener", "name", "tf_test_slb"), - testAccCheckListenersExists("alicloud_slb.listener", &slb, "http"), - testListener(), - ), - }, - }, - }) -} - -func TestAccAlicloudSlb_vpc(t *testing.T) { - var slb slb.LoadBalancerType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_slb.vpc", - Providers: testAccProviders, - CheckDestroy: testAccCheckSlbDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccSlb4Vpc, - Check: resource.ComposeTestCheckFunc( - testAccCheckSlbExists("alicloud_slb.vpc", &slb), - resource.TestCheckResourceAttr( - "alicloud_slb.vpc", "name", "tf_test_slb_vpc"), - ), - }, - }, - }) -} - -func testAccCheckSlbExists(n string, slb *slb.LoadBalancerType) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No SLB ID is set") - } - - client := testAccProvider.Meta().(*AliyunClient) - instance, err := client.DescribeLoadBalancerAttribute(rs.Primary.ID) - - if err != nil { - return err - } - if instance == nil { - return fmt.Errorf("SLB not found") - } - - *slb = *instance - return nil - } -} - -func testAccCheckListenersExists(n string, slb *slb.LoadBalancerType, p string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No SLB ID is set") - } - - client := testAccProvider.Meta().(*AliyunClient) - instance, err := client.DescribeLoadBalancerAttribute(rs.Primary.ID) - - if err != nil { - return err - } - if instance == nil { - return fmt.Errorf("SLB not found") - } - - exist := false - for _, listener := range instance.ListenerPortsAndProtocol.ListenerPortAndProtocol { - if listener.ListenerProtocol == p { - exist = true - break - } - } - - if !exist { - return fmt.Errorf("The %s protocol Listener not found.", p) - } - return nil - } -} - -func testAccCheckSlbDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*AliyunClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "alicloud_slb" { - continue - } - - // Try to find the Slb - instance, err := client.DescribeLoadBalancerAttribute(rs.Primary.ID) - - if instance != nil { - return fmt.Errorf("SLB still exist") - } - - if err != nil { - e, _ := err.(*common.Error) - // Verify the error is what we want - if e.ErrorResponse.Code != LoadBalancerNotFound { - return err - } - - } - - } - - return nil -} - -const testAccSlbBindWidth = ` -resource "alicloud_slb" "bindwidth" { - name = "tf_test_slb_bindwidth" - internet_charge_type = "paybybandwidth" - bandwidth = 5 - internet = true -} -` - -const testAccSlbTraffic = ` -resource "alicloud_slb" "traffic" { - name = "tf_test_slb_classic" -} -` - -const testAccSlbListener = ` -resource "alicloud_slb" "listener" { - name = "tf_test_slb" - internet_charge_type = "paybybandwidth" - bandwidth = 5 - internet = true - listener = [ - { - "instance_port" = "2111" - "lb_port" = "21" - "lb_protocol" = "tcp" - "bandwidth" = 1 - "persistence_timeout" = 500 - "health_check_type" = "http" - },{ - "instance_port" = "8000" - "lb_port" = "80" - "lb_protocol" = "http" - "sticky_session" = "on" - "sticky_session_type" = "insert" - "cookie_timeout" = 800 - "bandwidth" = 1 - },{ - "instance_port" = "8001" - "lb_port" = "81" - "lb_protocol" = "http" - "sticky_session" = "on" - "sticky_session_type" = "server" - "cookie" = "testslblistenercookie" - "cookie_timeout" = 1800 - "health_check" = "on" - "health_check_domain" = "$_ip" - "health_check_uri" = "/console" - "health_check_connect_port" = 20 - "healthy_threshold" = 8 - "unhealthy_threshold" = 8 - "health_check_timeout" = 8 - "health_check_interval" = 4 - "health_check_http_code" = "http_2xx" - "bandwidth" = 1 - },{ - "instance_port" = "2001" - "lb_port" = "2001" - "lb_protocol" = "udp" - "bandwidth" = 1 - "persistence_timeout" = 700 - }] -} -` - -const testAccSlb4Vpc = ` -data "alicloud_zones" "default" { - "available_resource_creation"= "VSwitch" -} - -resource "alicloud_vpc" "foo" { - name = "tf_test_foo" - cidr_block = "172.16.0.0/12" -} - -resource "alicloud_vswitch" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - cidr_block = "172.16.0.0/21" - availability_zone = "${data.alicloud_zones.default.zones.0.id}" -} - -resource "alicloud_slb" "vpc" { - name = "tf_test_slb_vpc" - //internet_charge_type = "paybybandwidth" - vswitch_id = "${alicloud_vswitch.foo.id}" -} -` diff --git a/builtin/providers/alicloud/resource_alicloud_snat.go b/builtin/providers/alicloud/resource_alicloud_snat.go deleted file mode 100644 index 887d50388..000000000 --- a/builtin/providers/alicloud/resource_alicloud_snat.go +++ /dev/null @@ -1,134 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAliyunSnatEntry() *schema.Resource { - return &schema.Resource{ - Create: resourceAliyunSnatEntryCreate, - Read: resourceAliyunSnatEntryRead, - Update: resourceAliyunSnatEntryUpdate, - Delete: resourceAliyunSnatEntryDelete, - - Schema: map[string]*schema.Schema{ - "snat_table_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "source_vswitch_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "snat_ip": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - } -} - -func resourceAliyunSnatEntryCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AliyunClient).vpcconn - - args := &ecs.CreateSnatEntryArgs{ - RegionId: getRegion(d, meta), - SnatTableId: d.Get("snat_table_id").(string), - SourceVSwitchId: d.Get("source_vswitch_id").(string), - SnatIp: d.Get("snat_ip").(string), - } - - resp, err := conn.CreateSnatEntry(args) - if err != nil { - return fmt.Errorf("CreateSnatEntry got error: %#v", err) - } - - d.SetId(resp.SnatEntryId) - d.Set("snat_table_id", d.Get("snat_table_id").(string)) - - return resourceAliyunSnatEntryRead(d, meta) -} - -func resourceAliyunSnatEntryRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - - snatEntry, err := client.DescribeSnatEntry(d.Get("snat_table_id").(string), d.Id()) - - if err != nil { - if notFoundError(err) { - return nil - } - return err - } - - d.Set("snat_table_id", snatEntry.SnatTableId) - d.Set("source_vswitch_id", snatEntry.SourceVSwitchId) - d.Set("snat_ip", snatEntry.SnatIp) - - return nil -} - -func resourceAliyunSnatEntryUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - conn := client.vpcconn - - snatEntry, err := client.DescribeSnatEntry(d.Get("snat_table_id").(string), d.Id()) - if err != nil { - return err - } - - d.Partial(true) - attributeUpdate := false - args := &ecs.ModifySnatEntryArgs{ - RegionId: getRegion(d, meta), - SnatTableId: snatEntry.SnatTableId, - SnatEntryId: snatEntry.SnatEntryId, - } - - if d.HasChange("snat_ip") { - d.SetPartial("snat_ip") - var snat_ip string - if v, ok := d.GetOk("snat_ip"); ok { - snat_ip = v.(string) - } else { - return fmt.Errorf("cann't change snap_ip to empty string") - } - args.SnatIp = snat_ip - - attributeUpdate = true - } - - if attributeUpdate { - if err := conn.ModifySnatEntry(args); err != nil { - return err - } - } - - d.Partial(false) - - return resourceAliyunSnatEntryRead(d, meta) -} - -func resourceAliyunSnatEntryDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - conn := client.vpcconn - - snatEntryId := d.Id() - snatTableId := d.Get("snat_table_id").(string) - - args := &ecs.DeleteSnatEntryArgs{ - RegionId: getRegion(d, meta), - SnatTableId: snatTableId, - SnatEntryId: snatEntryId, - } - - if err := conn.DeleteSnatEntry(args); err != nil { - return err - } - - return nil -} diff --git a/builtin/providers/alicloud/resource_alicloud_snat_test.go b/builtin/providers/alicloud/resource_alicloud_snat_test.go deleted file mode 100644 index 673ff59dd..000000000 --- a/builtin/providers/alicloud/resource_alicloud_snat_test.go +++ /dev/null @@ -1,180 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "testing" -) - -func TestAccAlicloudSnat_basic(t *testing.T) { - var snat ecs.SnatEntrySetType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_snat_entry.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckSnatEntryDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccSnatEntryConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckSnatEntryExists( - "alicloud_snat_entry.foo", &snat), - ), - }, - resource.TestStep{ - Config: testAccSnatEntryUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckSnatEntryExists( - "alicloud_snat_entry.foo", &snat), - ), - }, - }, - }) - -} - -func testAccCheckSnatEntryDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*AliyunClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "alicloud_snat_entry" { - continue - } - - // Try to find the Snat entry - instance, err := client.DescribeSnatEntry(rs.Primary.Attributes["snat_table_id"], rs.Primary.ID) - - //this special deal cause the DescribeSnatEntry can't find the records would be throw "cant find the snatTable error" - if instance.SnatEntryId == "" { - return nil - } - - if instance.SnatEntryId != "" { - return fmt.Errorf("Snat entry still exist") - } - - if err != nil { - // Verify the error is what we want - e, _ := err.(*common.Error) - - if !notFoundError(e) { - return err - } - } - - } - - return nil -} - -func testAccCheckSnatEntryExists(n string, snat *ecs.SnatEntrySetType) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No SnatEntry ID is set") - } - - client := testAccProvider.Meta().(*AliyunClient) - instance, err := client.DescribeSnatEntry(rs.Primary.Attributes["snat_table_id"], rs.Primary.ID) - - if err != nil { - return err - } - if instance.SnatEntryId == "" { - return fmt.Errorf("SnatEntry not found") - } - - *snat = instance - return nil - } -} - -const testAccSnatEntryConfig = ` -data "alicloud_zones" "default" { - "available_resource_creation"= "VSwitch" -} - -resource "alicloud_vpc" "foo" { - name = "tf_test_foo" - cidr_block = "172.16.0.0/12" -} - -resource "alicloud_vswitch" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - cidr_block = "172.16.0.0/21" - availability_zone = "${data.alicloud_zones.default.zones.2.id}" -} - -resource "alicloud_nat_gateway" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - spec = "Small" - name = "test_foo" - bandwidth_packages = [{ - ip_count = 2 - bandwidth = 5 - zone = "${data.alicloud_zones.default.zones.2.id}" - },{ - ip_count = 1 - bandwidth = 6 - zone = "${data.alicloud_zones.default.zones.2.id}" - }] - depends_on = [ - "alicloud_vswitch.foo"] -} -resource "alicloud_snat_entry" "foo"{ - snat_table_id = "${alicloud_nat_gateway.foo.snat_table_ids}" - source_vswitch_id = "${alicloud_vswitch.foo.id}" - snat_ip = "${alicloud_nat_gateway.foo.bandwidth_packages.0.public_ip_addresses}" -} -` - -const testAccSnatEntryUpdate = ` -data "alicloud_zones" "default" { - "available_resource_creation"= "VSwitch" -} - -resource "alicloud_vpc" "foo" { - name = "tf_test_foo" - cidr_block = "172.16.0.0/12" -} - -resource "alicloud_vswitch" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - cidr_block = "172.16.0.0/21" - availability_zone = "${data.alicloud_zones.default.zones.2.id}" -} - -resource "alicloud_nat_gateway" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - spec = "Small" - name = "test_foo" - bandwidth_packages = [{ - ip_count = 2 - bandwidth = 5 - zone = "${data.alicloud_zones.default.zones.2.id}" - },{ - ip_count = 1 - bandwidth = 6 - zone = "${data.alicloud_zones.default.zones.2.id}" - }] - depends_on = [ - "alicloud_vswitch.foo"] -} -resource "alicloud_snat_entry" "foo"{ - snat_table_id = "${alicloud_nat_gateway.foo.snat_table_ids}" - source_vswitch_id = "${alicloud_vswitch.foo.id}" - snat_ip = "${alicloud_nat_gateway.foo.bandwidth_packages.1.public_ip_addresses}" -} -` diff --git a/builtin/providers/alicloud/resource_alicloud_vpc.go b/builtin/providers/alicloud/resource_alicloud_vpc.go deleted file mode 100644 index 5362c638a..000000000 --- a/builtin/providers/alicloud/resource_alicloud_vpc.go +++ /dev/null @@ -1,201 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "strings" - "time" -) - -func resourceAliyunVpc() *schema.Resource { - return &schema.Resource{ - Create: resourceAliyunVpcCreate, - Read: resourceAliyunVpcRead, - Update: resourceAliyunVpcUpdate, - Delete: resourceAliyunVpcDelete, - - Schema: map[string]*schema.Schema{ - "cidr_block": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateCIDRNetworkAddress, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) < 2 || len(value) > 128 { - errors = append(errors, fmt.Errorf("%s cannot be longer than 128 characters", k)) - } - - if strings.HasPrefix(value, "http://") || strings.HasPrefix(value, "https://") { - errors = append(errors, fmt.Errorf("%s cannot starts with http:// or https://", k)) - } - - return - }, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) < 2 || len(value) > 256 { - errors = append(errors, fmt.Errorf("%s cannot be longer than 256 characters", k)) - - } - return - }, - }, - "router_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "router_table_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceAliyunVpcCreate(d *schema.ResourceData, meta interface{}) error { - - args, err := buildAliyunVpcArgs(d, meta) - if err != nil { - return err - } - - ecsconn := meta.(*AliyunClient).ecsconn - - var vpc *ecs.CreateVpcResponse - err = resource.Retry(3*time.Minute, func() *resource.RetryError { - resp, err := ecsconn.CreateVpc(args) - if err != nil { - if e, ok := err.(*common.Error); ok && (e.StatusCode == 400 || e.Code == UnknownError) { - return resource.RetryableError(fmt.Errorf("Vpc is still creating result from some unknown error -- try again")) - } - return resource.NonRetryableError(err) - } - vpc = resp - return nil - }) - if err != nil { - return fmt.Errorf("Create vpc got an error :%#v", err) - } - - d.SetId(vpc.VpcId) - d.Set("router_table_id", vpc.RouteTableId) - - err = ecsconn.WaitForVpcAvailable(args.RegionId, vpc.VpcId, 60) - if err != nil { - return fmt.Errorf("Timeout when WaitForVpcAvailable") - } - - return resourceAliyunVpcUpdate(d, meta) -} - -func resourceAliyunVpcRead(d *schema.ResourceData, meta interface{}) error { - - client := meta.(*AliyunClient) - - vpc, err := client.DescribeVpc(d.Id()) - if err != nil { - return err - } - - if vpc == nil { - d.SetId("") - return nil - } - - d.Set("cidr_block", vpc.CidrBlock) - d.Set("name", vpc.VpcName) - d.Set("description", vpc.Description) - d.Set("router_id", vpc.VRouterId) - - return nil -} - -func resourceAliyunVpcUpdate(d *schema.ResourceData, meta interface{}) error { - - conn := meta.(*AliyunClient).ecsconn - - d.Partial(true) - - attributeUpdate := false - args := &ecs.ModifyVpcAttributeArgs{ - VpcId: d.Id(), - } - - if d.HasChange("name") { - d.SetPartial("name") - args.VpcName = d.Get("name").(string) - - attributeUpdate = true - } - - if d.HasChange("description") { - d.SetPartial("description") - args.Description = d.Get("description").(string) - - attributeUpdate = true - } - - if attributeUpdate { - if err := conn.ModifyVpcAttribute(args); err != nil { - return err - } - } - - d.Partial(false) - - return resourceAliyunVpcRead(d, meta) -} - -func resourceAliyunVpcDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AliyunClient).ecsconn - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - err := conn.DeleteVpc(d.Id()) - - if err != nil { - return resource.RetryableError(fmt.Errorf("Vpc in use - trying again while it is deleted.")) - } - - args := &ecs.DescribeVpcsArgs{ - RegionId: getRegion(d, meta), - VpcId: d.Id(), - } - vpc, _, descErr := conn.DescribeVpcs(args) - if descErr != nil { - return resource.NonRetryableError(err) - } else if vpc == nil || len(vpc) < 1 { - return nil - } - - return resource.RetryableError(fmt.Errorf("Vpc in use - trying again while it is deleted.")) - }) -} - -func buildAliyunVpcArgs(d *schema.ResourceData, meta interface{}) (*ecs.CreateVpcArgs, error) { - args := &ecs.CreateVpcArgs{ - RegionId: getRegion(d, meta), - CidrBlock: d.Get("cidr_block").(string), - } - - if v := d.Get("name").(string); v != "" { - args.VpcName = v - } - - if v := d.Get("description").(string); v != "" { - args.Description = v - } - - return args, nil -} diff --git a/builtin/providers/alicloud/resource_alicloud_vpc_test.go b/builtin/providers/alicloud/resource_alicloud_vpc_test.go deleted file mode 100644 index b67c65001..000000000 --- a/builtin/providers/alicloud/resource_alicloud_vpc_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package alicloud - -import ( - "fmt" - "testing" - - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAlicloudVpc_basic(t *testing.T) { - var vpc ecs.VpcSetType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_vpc.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckVpcDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVpcConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpcExists("alicloud_vpc.foo", &vpc), - resource.TestCheckResourceAttr( - "alicloud_vpc.foo", "cidr_block", "172.16.0.0/12"), - resource.TestCheckResourceAttrSet( - "alicloud_vpc.foo", "router_id"), - resource.TestCheckResourceAttrSet( - "alicloud_vpc.foo", "router_table_id"), - ), - }, - }, - }) - -} - -func TestAccAlicloudVpc_update(t *testing.T) { - var vpc ecs.VpcSetType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVpcDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVpcConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpcExists("alicloud_vpc.foo", &vpc), - resource.TestCheckResourceAttr( - "alicloud_vpc.foo", "cidr_block", "172.16.0.0/12"), - ), - }, - resource.TestStep{ - Config: testAccVpcConfigUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpcExists("alicloud_vpc.foo", &vpc), - resource.TestCheckResourceAttr( - "alicloud_vpc.foo", "name", "tf_test_bar"), - ), - }, - }, - }) -} - -func testAccCheckVpcExists(n string, vpc *ecs.VpcSetType) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No VPC ID is set") - } - - client := testAccProvider.Meta().(*AliyunClient) - instance, err := client.DescribeVpc(rs.Primary.ID) - - if err != nil { - return err - } - if instance == nil { - return fmt.Errorf("VPC not found") - } - - *vpc = *instance - return nil - } -} - -func testAccCheckVpcDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*AliyunClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "alicloud_vpc" { - continue - } - - // Try to find the VPC - instance, err := client.DescribeVpc(rs.Primary.ID) - - if instance != nil { - return fmt.Errorf("VPCs still exist") - } - - if err != nil { - // Verify the error is what we want - e, _ := err.(*common.Error) - - if e.ErrorResponse.Code != "InvalidVpcID.NotFound" { - return err - } - } - - } - - return nil -} - -const testAccVpcConfig = ` -resource "alicloud_vpc" "foo" { - name = "tf_test_foo" - cidr_block = "172.16.0.0/12" -} -` - -const testAccVpcConfigUpdate = ` -resource "alicloud_vpc" "foo" { - cidr_block = "172.16.0.0/12" - name = "tf_test_bar" -} -` diff --git a/builtin/providers/alicloud/resource_alicloud_vroute_entry.go b/builtin/providers/alicloud/resource_alicloud_vroute_entry.go deleted file mode 100644 index b71e92ffe..000000000 --- a/builtin/providers/alicloud/resource_alicloud_vroute_entry.go +++ /dev/null @@ -1,145 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/schema" - "strings" -) - -func resourceAliyunRouteEntry() *schema.Resource { - return &schema.Resource{ - Create: resourceAliyunRouteEntryCreate, - Read: resourceAliyunRouteEntryRead, - Delete: resourceAliyunRouteEntryDelete, - - Schema: map[string]*schema.Schema{ - "router_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "route_table_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "destination_cidrblock": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "nexthop_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateRouteEntryNextHopType, - }, - "nexthop_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceAliyunRouteEntryCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AliyunClient).ecsconn - - rtId := d.Get("route_table_id").(string) - rId := d.Get("router_id").(string) - cidr := d.Get("destination_cidrblock").(string) - nt := d.Get("nexthop_type").(string) - ni := d.Get("nexthop_id").(string) - - args, err := buildAliyunRouteEntryArgs(d, meta) - if err != nil { - return err - } - err = conn.CreateRouteEntry(args) - - if err != nil { - return err - } - // route_table_id:router_id:destination_cidrblock:nexthop_type:nexthop_id - d.SetId(rtId + ":" + rId + ":" + cidr + ":" + nt + ":" + ni) - d.Set("router_id", rId) - - if err := conn.WaitForAllRouteEntriesAvailable(rId, rtId, defaultTimeout); err != nil { - return fmt.Errorf("WaitFor route entry got error: %#v", err) - } - return resourceAliyunRouteEntryRead(d, meta) -} - -func resourceAliyunRouteEntryRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AliyunClient) - parts := strings.Split(d.Id(), ":") - rtId := parts[0] - //rId := parts[1] - cidr := parts[2] - nexthop_type := parts[3] - nexthop_id := parts[4] - - en, err := client.QueryRouteEntry(rtId, cidr, nexthop_type, nexthop_id) - - if err != nil { - if notFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error route entry: %#v", err) - } - - d.Set("route_table_id", en.RouteTableId) - d.Set("destination_cidrblock", en.DestinationCidrBlock) - d.Set("nexthop_type", en.NextHopType) - d.Set("nexthop_id", en.InstanceId) - return nil -} - -func resourceAliyunRouteEntryDelete(d *schema.ResourceData, meta interface{}) error { - con := meta.(*AliyunClient).ecsconn - args, err := buildAliyunRouteEntryDeleteArgs(d, meta) - - if err != nil { - return err - } - return con.DeleteRouteEntry(args) -} - -func buildAliyunRouteEntryArgs(d *schema.ResourceData, meta interface{}) (*ecs.CreateRouteEntryArgs, error) { - - args := &ecs.CreateRouteEntryArgs{ - RouteTableId: d.Get("route_table_id").(string), - DestinationCidrBlock: d.Get("destination_cidrblock").(string), - } - - if v := d.Get("nexthop_type").(string); v != "" { - args.NextHopType = ecs.NextHopType(v) - } - - if v := d.Get("nexthop_id").(string); v != "" { - args.NextHopId = v - } - - return args, nil -} - -func buildAliyunRouteEntryDeleteArgs(d *schema.ResourceData, meta interface{}) (*ecs.DeleteRouteEntryArgs, error) { - - args := &ecs.DeleteRouteEntryArgs{ - RouteTableId: d.Get("route_table_id").(string), - DestinationCidrBlock: d.Get("destination_cidrblock").(string), - } - - if v := d.Get("destination_cidrblock").(string); v != "" { - args.DestinationCidrBlock = v - } - - if v := d.Get("nexthop_id").(string); v != "" { - args.NextHopId = v - } - - return args, nil -} diff --git a/builtin/providers/alicloud/resource_alicloud_vroute_entry_test.go b/builtin/providers/alicloud/resource_alicloud_vroute_entry_test.go deleted file mode 100644 index 8726de64c..000000000 --- a/builtin/providers/alicloud/resource_alicloud_vroute_entry_test.go +++ /dev/null @@ -1,186 +0,0 @@ -package alicloud - -import ( - "fmt" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "strings" - "testing" -) - -func TestAccAlicloudRouteEntry_Basic(t *testing.T) { - var rt ecs.RouteTableSetType - var rn ecs.RouteEntrySetType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_route_entry.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckRouteEntryDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRouteEntryConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableEntryExists( - "alicloud_route_entry.foo", &rt, &rn), - resource.TestCheckResourceAttrSet( - "alicloud_route_entry.foo", "nexthop_id"), - ), - }, - }, - }) - -} - -func testAccCheckRouteTableExists(rtId string, t *ecs.RouteTableSetType) error { - client := testAccProvider.Meta().(*AliyunClient) - //query route table - rt, terr := client.QueryRouteTableById(rtId) - - if terr != nil { - return terr - } - - if rt == nil { - return fmt.Errorf("Route Table not found") - } - - *t = *rt - return nil -} - -func testAccCheckRouteEntryExists(routeTableId, cidrBlock, nextHopType, nextHopId string, e *ecs.RouteEntrySetType) error { - client := testAccProvider.Meta().(*AliyunClient) - //query route table entry - re, rerr := client.QueryRouteEntry(routeTableId, cidrBlock, nextHopType, nextHopId) - - if rerr != nil { - return rerr - } - - if re == nil { - return fmt.Errorf("Route Table Entry not found") - } - - *e = *re - return nil -} - -func testAccCheckRouteTableEntryExists(n string, t *ecs.RouteTableSetType, e *ecs.RouteEntrySetType) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Route Entry ID is set") - } - - parts := strings.Split(rs.Primary.ID, ":") - - //query route table - err := testAccCheckRouteTableExists(parts[0], t) - - if err != nil { - return err - } - //query route table entry - err = testAccCheckRouteEntryExists(parts[0], parts[2], parts[3], parts[4], e) - return err - } -} - -func testAccCheckRouteEntryDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*AliyunClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "alicloud_route_entry" { - continue - } - - parts := strings.Split(rs.Primary.ID, ":") - re, err := client.QueryRouteEntry(parts[0], parts[2], parts[3], parts[4]) - - if re != nil { - return fmt.Errorf("Error Route Entry still exist") - } - - // Verify the error is what we want - if err != nil { - if notFoundError(err) { - return nil - } - return err - } - } - - return nil -} - -const testAccRouteEntryConfig = ` -data "alicloud_zones" "default" { - "available_resource_creation"= "VSwitch" -} - -resource "alicloud_vpc" "foo" { - name = "tf_test_foo" - cidr_block = "10.1.0.0/21" -} - -resource "alicloud_vswitch" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - cidr_block = "10.1.1.0/24" - availability_zone = "${data.alicloud_zones.default.zones.0.id}" -} - -resource "alicloud_route_entry" "foo" { - router_id = "${alicloud_vpc.foo.router_id}" - route_table_id = "${alicloud_vpc.foo.router_table_id}" - destination_cidrblock = "172.11.1.1/32" - nexthop_type = "Instance" - nexthop_id = "${alicloud_instance.foo.id}" -} - -resource "alicloud_security_group" "tf_test_foo" { - name = "tf_test_foo" - description = "foo" - vpc_id = "${alicloud_vpc.foo.id}" -} - -resource "alicloud_security_group_rule" "ingress" { - type = "ingress" - ip_protocol = "tcp" - nic_type = "intranet" - policy = "accept" - port_range = "22/22" - priority = 1 - security_group_id = "${alicloud_security_group.tf_test_foo.id}" - cidr_ip = "0.0.0.0/0" -} - -resource "alicloud_instance" "foo" { - # cn-beijing - security_groups = ["${alicloud_security_group.tf_test_foo.id}"] - - vswitch_id = "${alicloud_vswitch.foo.id}" - allocate_public_ip = true - - # series II - instance_charge_type = "PostPaid" - instance_type = "ecs.n1.small" - internet_charge_type = "PayByTraffic" - internet_max_bandwidth_out = 5 - io_optimized = "optimized" - - system_disk_category = "cloud_efficiency" - image_id = "ubuntu_140405_64_40G_cloudinit_20161115.vhd" - instance_name = "test_foo" -} - -` diff --git a/builtin/providers/alicloud/resource_alicloud_vswitch.go b/builtin/providers/alicloud/resource_alicloud_vswitch.go deleted file mode 100644 index 89e24108e..000000000 --- a/builtin/providers/alicloud/resource_alicloud_vswitch.go +++ /dev/null @@ -1,232 +0,0 @@ -package alicloud - -import ( - "fmt" - - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "log" - "time" -) - -func resourceAliyunSubnet() *schema.Resource { - return &schema.Resource{ - Create: resourceAliyunSwitchCreate, - Read: resourceAliyunSwitchRead, - Update: resourceAliyunSwitchUpdate, - Delete: resourceAliyunSwitchDelete, - - Schema: map[string]*schema.Schema{ - "availability_zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "vpc_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "cidr_block": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateSwitchCIDRNetworkAddress, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -func resourceAliyunSwitchCreate(d *schema.ResourceData, meta interface{}) error { - - conn := meta.(*AliyunClient).ecsconn - - args, err := buildAliyunSwitchArgs(d, meta) - if err != nil { - return err - } - - var vswitchID string - err = resource.Retry(3*time.Minute, func() *resource.RetryError { - vswId, err := conn.CreateVSwitch(args) - if err != nil { - if e, ok := err.(*common.Error); ok && (e.StatusCode == 400 || e.Code == UnknownError) { - return resource.RetryableError(fmt.Errorf("Vswitch is still creating result from some unknown error -- try again")) - } - return resource.NonRetryableError(err) - } - vswitchID = vswId - return nil - }) - - if err != nil { - return fmt.Errorf("Create subnet got an error :%s", err) - } - - d.SetId(vswitchID) - - err = conn.WaitForVSwitchAvailable(args.VpcId, vswitchID, 60) - if err != nil { - return fmt.Errorf("WaitForVSwitchAvailable got a error: %s", err) - } - - return resourceAliyunSwitchUpdate(d, meta) -} - -func resourceAliyunSwitchRead(d *schema.ResourceData, meta interface{}) error { - - conn := meta.(*AliyunClient).ecsconn - - args := &ecs.DescribeVSwitchesArgs{ - VpcId: d.Get("vpc_id").(string), - VSwitchId: d.Id(), - } - - vswitches, _, err := conn.DescribeVSwitches(args) - - if err != nil { - if notFoundError(err) { - d.SetId("") - return nil - } - return err - } - - if len(vswitches) == 0 { - d.SetId("") - return nil - } - - vswitch := vswitches[0] - - d.Set("availability_zone", vswitch.ZoneId) - d.Set("vpc_id", vswitch.VpcId) - d.Set("cidr_block", vswitch.CidrBlock) - d.Set("name", vswitch.VSwitchName) - d.Set("description", vswitch.Description) - - return nil -} - -func resourceAliyunSwitchUpdate(d *schema.ResourceData, meta interface{}) error { - - conn := meta.(*AliyunClient).ecsconn - - d.Partial(true) - - attributeUpdate := false - args := &ecs.ModifyVSwitchAttributeArgs{ - VSwitchId: d.Id(), - } - - if d.HasChange("name") { - d.SetPartial("name") - args.VSwitchName = d.Get("name").(string) - - attributeUpdate = true - } - - if d.HasChange("description") { - d.SetPartial("description") - args.Description = d.Get("description").(string) - - attributeUpdate = true - } - if attributeUpdate { - if err := conn.ModifyVSwitchAttribute(args); err != nil { - return err - } - - } - - d.Partial(false) - - return resourceAliyunSwitchRead(d, meta) -} - -func resourceAliyunSwitchDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AliyunClient).ecsconn - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - err := conn.DeleteVSwitch(d.Id()) - - if err != nil { - e, _ := err.(*common.Error) - if e.ErrorResponse.Code == VswitcInvalidRegionId { - log.Printf("[ERROR] Delete Switch is failed.") - return resource.NonRetryableError(err) - } - - return resource.RetryableError(fmt.Errorf("Switch in use. -- trying again while it is deleted.")) - } - - vsw, _, vswErr := conn.DescribeVSwitches(&ecs.DescribeVSwitchesArgs{ - VpcId: d.Get("vpc_id").(string), - VSwitchId: d.Id(), - }) - - if vswErr != nil { - return resource.NonRetryableError(vswErr) - } else if vsw == nil || len(vsw) < 1 { - return nil - } - - return resource.RetryableError(fmt.Errorf("Switch in use. -- trying again while it is deleted.")) - }) -} - -func buildAliyunSwitchArgs(d *schema.ResourceData, meta interface{}) (*ecs.CreateVSwitchArgs, error) { - - client := meta.(*AliyunClient) - - vpcID := d.Get("vpc_id").(string) - - vpc, err := client.DescribeVpc(vpcID) - if err != nil { - return nil, err - } - - if vpc == nil { - return nil, fmt.Errorf("vpc_id not found") - } - - zoneID := d.Get("availability_zone").(string) - - zone, err := client.DescribeZone(zoneID) - if err != nil { - return nil, err - } - - err = client.ResourceAvailable(zone, ecs.ResourceTypeVSwitch) - if err != nil { - return nil, err - } - - cidrBlock := d.Get("cidr_block").(string) - - args := &ecs.CreateVSwitchArgs{ - VpcId: vpcID, - ZoneId: zoneID, - CidrBlock: cidrBlock, - } - - if v, ok := d.GetOk("name"); ok && v != "" { - args.VSwitchName = v.(string) - } - - if v, ok := d.GetOk("description"); ok && v != "" { - args.Description = v.(string) - } - - return args, nil -} diff --git a/builtin/providers/alicloud/resource_alicloud_vswitch_test.go b/builtin/providers/alicloud/resource_alicloud_vswitch_test.go deleted file mode 100644 index 1a1a75bd6..000000000 --- a/builtin/providers/alicloud/resource_alicloud_vswitch_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package alicloud - -import ( - "testing" - - "fmt" - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAlicloudVswitch_basic(t *testing.T) { - var vsw ecs.VSwitchSetType - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - - // module name - IDRefreshName: "alicloud_vswitch.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckVswitchDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVswitchConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckVswitchExists("alicloud_vswitch.foo", &vsw), - resource.TestCheckResourceAttr( - "alicloud_vswitch.foo", "cidr_block", "172.16.0.0/21"), - ), - }, - }, - }) - -} - -func testAccCheckVswitchExists(n string, vpc *ecs.VSwitchSetType) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Vswitch ID is set") - } - - client := testAccProvider.Meta().(*AliyunClient) - instance, err := client.QueryVswitchById(rs.Primary.Attributes["vpc_id"], rs.Primary.ID) - - if err != nil { - return err - } - if instance == nil { - return fmt.Errorf("Vswitch not found") - } - - *vpc = *instance - return nil - } -} - -func testAccCheckVswitchDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*AliyunClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "alicloud_vswitch" { - continue - } - - // Try to find the Vswitch - instance, err := client.QueryVswitchById(rs.Primary.Attributes["vpc_id"], rs.Primary.ID) - - if instance != nil { - return fmt.Errorf("Vswitch still exist") - } - - if err != nil { - // Verify the error is what we want - e, _ := err.(*common.Error) - - if e.ErrorResponse.Code != "InvalidVswitchID.NotFound" { - return err - } - } - - } - - return nil -} - -const testAccVswitchConfig = ` -data "alicloud_zones" "default" { - "available_resource_creation"= "VSwitch" -} - -resource "alicloud_vpc" "foo" { - name = "tf_test_foo" - cidr_block = "172.16.0.0/12" -} - -resource "alicloud_vswitch" "foo" { - vpc_id = "${alicloud_vpc.foo.id}" - cidr_block = "172.16.0.0/21" - availability_zone = "${data.alicloud_zones.default.zones.0.id}" -} -` diff --git a/builtin/providers/alicloud/service_alicloud_ecs.go b/builtin/providers/alicloud/service_alicloud_ecs.go deleted file mode 100644 index 79b6b07fb..000000000 --- a/builtin/providers/alicloud/service_alicloud_ecs.go +++ /dev/null @@ -1,259 +0,0 @@ -package alicloud - -import ( - "encoding/json" - "fmt" - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ecs" - "strings" -) - -func (client *AliyunClient) DescribeImage(imageId string) (*ecs.ImageType, error) { - - pagination := common.Pagination{ - PageNumber: 1, - } - args := ecs.DescribeImagesArgs{ - Pagination: pagination, - RegionId: client.Region, - Status: ecs.ImageStatusAvailable, - } - - var allImages []ecs.ImageType - - for { - images, _, err := client.ecsconn.DescribeImages(&args) - if err != nil { - break - } - - if len(images) == 0 { - break - } - - allImages = append(allImages, images...) - - args.Pagination.PageNumber++ - } - - if len(allImages) == 0 { - return nil, common.GetClientErrorFromString("Not found") - } - - var image *ecs.ImageType - imageIds := []string{} - for _, im := range allImages { - if im.ImageId == imageId { - image = &im - } - imageIds = append(imageIds, im.ImageId) - } - - if image == nil { - return nil, fmt.Errorf("image_id %s not exists in range %s, all images are %s", imageId, client.Region, imageIds) - } - - return image, nil -} - -// DescribeZone validate zoneId is valid in region -func (client *AliyunClient) DescribeZone(zoneID string) (*ecs.ZoneType, error) { - zones, err := client.ecsconn.DescribeZones(client.Region) - if err != nil { - return nil, fmt.Errorf("error to list zones not found") - } - - var zone *ecs.ZoneType - zoneIds := []string{} - for _, z := range zones { - if z.ZoneId == zoneID { - zone = &ecs.ZoneType{ - ZoneId: z.ZoneId, - LocalName: z.LocalName, - AvailableResourceCreation: z.AvailableResourceCreation, - AvailableDiskCategories: z.AvailableDiskCategories, - } - } - zoneIds = append(zoneIds, z.ZoneId) - } - - if zone == nil { - return nil, fmt.Errorf("availability_zone not exists in range %s, all zones are %s", client.Region, zoneIds) - } - - return zone, nil -} - -// return multiIZ list of current region -func (client *AliyunClient) DescribeMultiIZByRegion() (izs []string, err error) { - resp, err := client.rdsconn.DescribeRegions() - if err != nil { - return nil, fmt.Errorf("error to list regions not found") - } - regions := resp.Regions.RDSRegion - - zoneIds := []string{} - for _, r := range regions { - if r.RegionId == string(client.Region) && strings.Contains(r.ZoneId, MULTI_IZ_SYMBOL) { - zoneIds = append(zoneIds, r.ZoneId) - } - } - - return zoneIds, nil -} - -func (client *AliyunClient) QueryInstancesByIds(ids []string) (instances []ecs.InstanceAttributesType, err error) { - idsStr, jerr := json.Marshal(ids) - if jerr != nil { - return nil, jerr - } - - args := ecs.DescribeInstancesArgs{ - RegionId: client.Region, - InstanceIds: string(idsStr), - } - - instances, _, errs := client.ecsconn.DescribeInstances(&args) - - if errs != nil { - return nil, errs - } - - return instances, nil -} - -func (client *AliyunClient) QueryInstancesById(id string) (instance *ecs.InstanceAttributesType, err error) { - ids := []string{id} - - instances, errs := client.QueryInstancesByIds(ids) - if errs != nil { - return nil, errs - } - - if len(instances) == 0 { - return nil, GetNotFoundErrorFromString(InstanceNotfound) - } - - return &instances[0], nil -} - -func (client *AliyunClient) QueryInstanceSystemDisk(id string) (disk *ecs.DiskItemType, err error) { - args := ecs.DescribeDisksArgs{ - RegionId: client.Region, - InstanceId: string(id), - DiskType: ecs.DiskTypeAllSystem, - } - disks, _, err := client.ecsconn.DescribeDisks(&args) - if err != nil { - return nil, err - } - if len(disks) == 0 { - return nil, common.GetClientErrorFromString(SystemDiskNotFound) - } - - return &disks[0], nil -} - -// ResourceAvailable check resource available for zone -func (client *AliyunClient) ResourceAvailable(zone *ecs.ZoneType, resourceType ecs.ResourceType) error { - available := false - for _, res := range zone.AvailableResourceCreation.ResourceTypes { - if res == resourceType { - available = true - } - } - if !available { - return fmt.Errorf("%s is not available in %s zone of %s region", resourceType, zone.ZoneId, client.Region) - } - - return nil -} - -func (client *AliyunClient) DiskAvailable(zone *ecs.ZoneType, diskCategory ecs.DiskCategory) error { - available := false - for _, dist := range zone.AvailableDiskCategories.DiskCategories { - if dist == diskCategory { - available = true - } - } - if !available { - return fmt.Errorf("%s is not available in %s zone of %s region", diskCategory, zone.ZoneId, client.Region) - } - return nil -} - -// todo: support syc -func (client *AliyunClient) JoinSecurityGroups(instanceId string, securityGroupIds []string) error { - for _, sid := range securityGroupIds { - err := client.ecsconn.JoinSecurityGroup(instanceId, sid) - if err != nil { - e, _ := err.(*common.Error) - if e.ErrorResponse.Code != InvalidInstanceIdAlreadyExists { - return err - } - } - } - - return nil -} - -func (client *AliyunClient) LeaveSecurityGroups(instanceId string, securityGroupIds []string) error { - for _, sid := range securityGroupIds { - err := client.ecsconn.LeaveSecurityGroup(instanceId, sid) - if err != nil { - e, _ := err.(*common.Error) - if e.ErrorResponse.Code != InvalidSecurityGroupIdNotFound { - return err - } - } - } - - return nil -} - -func (client *AliyunClient) DescribeSecurity(securityGroupId string) (*ecs.DescribeSecurityGroupAttributeResponse, error) { - - args := &ecs.DescribeSecurityGroupAttributeArgs{ - RegionId: client.Region, - SecurityGroupId: securityGroupId, - } - - return client.ecsconn.DescribeSecurityGroupAttribute(args) -} - -func (client *AliyunClient) DescribeSecurityByAttr(securityGroupId, direction, nicType string) (*ecs.DescribeSecurityGroupAttributeResponse, error) { - - args := &ecs.DescribeSecurityGroupAttributeArgs{ - RegionId: client.Region, - SecurityGroupId: securityGroupId, - Direction: direction, - NicType: ecs.NicType(nicType), - } - - return client.ecsconn.DescribeSecurityGroupAttribute(args) -} - -func (client *AliyunClient) DescribeSecurityGroupRule(securityGroupId, direction, nicType, ipProtocol, portRange string) (*ecs.PermissionType, error) { - sg, err := client.DescribeSecurityByAttr(securityGroupId, direction, nicType) - if err != nil { - return nil, err - } - - for _, p := range sg.Permissions.Permission { - if strings.ToLower(string(p.IpProtocol)) == ipProtocol && p.PortRange == portRange { - return &p, nil - } - } - return nil, GetNotFoundErrorFromString("Security group rule not found") - -} - -func (client *AliyunClient) RevokeSecurityGroup(args *ecs.RevokeSecurityGroupArgs) error { - //when the rule is not exist, api will return success(200) - return client.ecsconn.RevokeSecurityGroup(args) -} - -func (client *AliyunClient) RevokeSecurityGroupEgress(args *ecs.RevokeSecurityGroupEgressArgs) error { - //when the rule is not exist, api will return success(200) - return client.ecsconn.RevokeSecurityGroupEgress(args) -} diff --git a/builtin/providers/alicloud/service_alicloud_ess.go b/builtin/providers/alicloud/service_alicloud_ess.go deleted file mode 100644 index 69d514ef2..000000000 --- a/builtin/providers/alicloud/service_alicloud_ess.go +++ /dev/null @@ -1,167 +0,0 @@ -package alicloud - -import ( - "github.com/denverdino/aliyungo/ess" -) - -func (client *AliyunClient) DescribeScalingGroupById(sgId string) (*ess.ScalingGroupItemType, error) { - args := ess.DescribeScalingGroupsArgs{ - RegionId: client.Region, - ScalingGroupId: []string{sgId}, - } - - sgs, _, err := client.essconn.DescribeScalingGroups(&args) - if err != nil { - return nil, err - } - - if len(sgs) == 0 { - return nil, GetNotFoundErrorFromString("Scaling group not found") - } - - return &sgs[0], nil -} - -func (client *AliyunClient) DeleteScalingGroupById(sgId string) error { - args := ess.DeleteScalingGroupArgs{ - ScalingGroupId: sgId, - ForceDelete: true, - } - - _, err := client.essconn.DeleteScalingGroup(&args) - return err -} - -func (client *AliyunClient) DescribeScalingConfigurationById(sgId, configId string) (*ess.ScalingConfigurationItemType, error) { - args := ess.DescribeScalingConfigurationsArgs{ - RegionId: client.Region, - ScalingGroupId: sgId, - ScalingConfigurationId: []string{configId}, - } - - cs, _, err := client.essconn.DescribeScalingConfigurations(&args) - if err != nil { - return nil, err - } - - if len(cs) == 0 { - return nil, GetNotFoundErrorFromString("Scaling configuration not found") - } - - return &cs[0], nil -} - -func (client *AliyunClient) ActiveScalingConfigurationById(sgId, configId string) error { - args := ess.ModifyScalingGroupArgs{ - ScalingGroupId: sgId, - ActiveScalingConfigurationId: configId, - } - - _, err := client.essconn.ModifyScalingGroup(&args) - return err -} - -func (client *AliyunClient) EnableScalingConfigurationById(sgId, configId string, ids []string) error { - args := ess.EnableScalingGroupArgs{ - ScalingGroupId: sgId, - ActiveScalingConfigurationId: configId, - } - - if len(ids) > 0 { - args.InstanceId = ids - } - - _, err := client.essconn.EnableScalingGroup(&args) - return err -} - -func (client *AliyunClient) DisableScalingConfigurationById(sgId string) error { - args := ess.DisableScalingGroupArgs{ - ScalingGroupId: sgId, - } - - _, err := client.essconn.DisableScalingGroup(&args) - return err -} - -func (client *AliyunClient) DeleteScalingConfigurationById(sgId, configId string) error { - args := ess.DeleteScalingConfigurationArgs{ - ScalingGroupId: sgId, - ScalingConfigurationId: configId, - } - - _, err := client.essconn.DeleteScalingConfiguration(&args) - return err -} - -// Flattens an array of datadisk into a []map[string]interface{} -func flattenDataDiskMappings(list []ess.DataDiskItemType) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(list)) - for _, i := range list { - l := map[string]interface{}{ - "size": i.Size, - "category": i.Category, - "snapshot_id": i.SnapshotId, - "device": i.Device, - } - result = append(result, l) - } - return result -} - -func (client *AliyunClient) DescribeScalingRuleById(sgId, ruleId string) (*ess.ScalingRuleItemType, error) { - args := ess.DescribeScalingRulesArgs{ - RegionId: client.Region, - ScalingGroupId: sgId, - ScalingRuleId: []string{ruleId}, - } - - cs, _, err := client.essconn.DescribeScalingRules(&args) - if err != nil { - return nil, err - } - - if len(cs) == 0 { - return nil, GetNotFoundErrorFromString("Scaling rule not found") - } - - return &cs[0], nil -} - -func (client *AliyunClient) DeleteScalingRuleById(ruleId string) error { - args := ess.DeleteScalingRuleArgs{ - RegionId: client.Region, - ScalingRuleId: ruleId, - } - - _, err := client.essconn.DeleteScalingRule(&args) - return err -} - -func (client *AliyunClient) DescribeScheduleById(scheduleId string) (*ess.ScheduledTaskItemType, error) { - args := ess.DescribeScheduledTasksArgs{ - RegionId: client.Region, - ScheduledTaskId: []string{scheduleId}, - } - - cs, _, err := client.essconn.DescribeScheduledTasks(&args) - if err != nil { - return nil, err - } - - if len(cs) == 0 { - return nil, GetNotFoundErrorFromString("Schedule not found") - } - - return &cs[0], nil -} - -func (client *AliyunClient) DeleteScheduleById(scheduleId string) error { - args := ess.DeleteScheduledTaskArgs{ - RegionId: client.Region, - ScheduledTaskId: scheduleId, - } - - _, err := client.essconn.DeleteScheduledTask(&args) - return err -} diff --git a/builtin/providers/alicloud/service_alicloud_rds.go b/builtin/providers/alicloud/service_alicloud_rds.go deleted file mode 100644 index 700a5d138..000000000 --- a/builtin/providers/alicloud/service_alicloud_rds.go +++ /dev/null @@ -1,288 +0,0 @@ -package alicloud - -import ( - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/rds" - "strings" -) - -// -// _______________ _______________ _______________ -// | | ______param______\ | | _____request_____\ | | -// | Business | | Service | | SDK/API | -// | | __________________ | | __________________ | | -// |______________| \ (obj, err) |______________| \ (status, cont) |______________| -// | | -// |A. {instance, nil} |a. {200, content} -// |B. {nil, error} |b. {200, nil} -// |c. {4xx, nil} -// -// The API return 200 for resource not found. -// When getInstance is empty, then throw InstanceNotfound error. -// That the business layer only need to check error. -func (client *AliyunClient) DescribeDBInstanceById(id string) (instance *rds.DBInstanceAttribute, err error) { - arrtArgs := rds.DescribeDBInstancesArgs{ - DBInstanceId: id, - } - resp, err := client.rdsconn.DescribeDBInstanceAttribute(&arrtArgs) - if err != nil { - return nil, err - } - - attr := resp.Items.DBInstanceAttribute - - if len(attr) <= 0 { - return nil, GetNotFoundErrorFromString("DB instance not found") - } - - return &attr[0], nil -} - -func (client *AliyunClient) CreateAccountByInfo(instanceId, username, pwd string) error { - conn := client.rdsconn - args := rds.CreateAccountArgs{ - DBInstanceId: instanceId, - AccountName: username, - AccountPassword: pwd, - } - - if _, err := conn.CreateAccount(&args); err != nil { - return err - } - - if err := conn.WaitForAccount(instanceId, username, rds.Available, 200); err != nil { - return err - } - return nil -} - -func (client *AliyunClient) CreateDatabaseByInfo(instanceId, dbName, charset, desp string) error { - conn := client.rdsconn - args := rds.CreateDatabaseArgs{ - DBInstanceId: instanceId, - DBName: dbName, - CharacterSetName: charset, - DBDescription: desp, - } - _, err := conn.CreateDatabase(&args) - return err -} - -func (client *AliyunClient) DescribeDatabaseByName(instanceId, dbName string) (ds []rds.Database, err error) { - conn := client.rdsconn - args := rds.DescribeDatabasesArgs{ - DBInstanceId: instanceId, - DBName: dbName, - } - - resp, err := conn.DescribeDatabases(&args) - if err != nil { - return nil, err - } - - return resp.Databases.Database, nil -} - -func (client *AliyunClient) GrantDBPrivilege2Account(instanceId, username, dbName string) error { - conn := client.rdsconn - pargs := rds.GrantAccountPrivilegeArgs{ - DBInstanceId: instanceId, - AccountName: username, - DBName: dbName, - AccountPrivilege: rds.ReadWrite, - } - if _, err := conn.GrantAccountPrivilege(&pargs); err != nil { - return err - } - - if err := conn.WaitForAccountPrivilege(instanceId, username, dbName, rds.ReadWrite, 200); err != nil { - return err - } - return nil -} - -func (client *AliyunClient) AllocateDBPublicConnection(instanceId, port string) error { - conn := client.rdsconn - args := rds.AllocateInstancePublicConnectionArgs{ - DBInstanceId: instanceId, - ConnectionStringPrefix: instanceId + "o", - Port: port, - } - - if _, err := conn.AllocateInstancePublicConnection(&args); err != nil { - return err - } - - if err := conn.WaitForPublicConnection(instanceId, 600); err != nil { - return err - } - return nil -} - -func (client *AliyunClient) ConfigDBBackup(instanceId, backupTime, backupPeriod string, retentionPeriod int) error { - bargs := rds.BackupPolicy{ - PreferredBackupTime: backupTime, - PreferredBackupPeriod: backupPeriod, - BackupRetentionPeriod: retentionPeriod, - } - args := rds.ModifyBackupPolicyArgs{ - DBInstanceId: instanceId, - BackupPolicy: bargs, - } - - if _, err := client.rdsconn.ModifyBackupPolicy(&args); err != nil { - return err - } - - if err := client.rdsconn.WaitForInstance(instanceId, rds.Running, 600); err != nil { - return err - } - return nil -} - -func (client *AliyunClient) ModifyDBSecurityIps(instanceId, ips string) error { - sargs := rds.DBInstanceIPArray{ - SecurityIps: ips, - } - - args := rds.ModifySecurityIpsArgs{ - DBInstanceId: instanceId, - DBInstanceIPArray: sargs, - } - - if _, err := client.rdsconn.ModifySecurityIps(&args); err != nil { - return err - } - - if err := client.rdsconn.WaitForInstance(instanceId, rds.Running, 600); err != nil { - return err - } - return nil -} - -func (client *AliyunClient) DescribeDBSecurityIps(instanceId string) (ips []rds.DBInstanceIPList, err error) { - args := rds.DescribeDBInstanceIPsArgs{ - DBInstanceId: instanceId, - } - - resp, err := client.rdsconn.DescribeDBInstanceIPs(&args) - if err != nil { - return nil, err - } - return resp.Items.DBInstanceIPArray, nil -} - -func (client *AliyunClient) GetSecurityIps(instanceId string) ([]string, error) { - arr, err := client.DescribeDBSecurityIps(instanceId) - if err != nil { - return nil, err - } - var ips, separator string - for _, ip := range arr { - ips += separator + ip.SecurityIPList - separator = COMMA_SEPARATED - } - return strings.Split(ips, COMMA_SEPARATED), nil -} - -func (client *AliyunClient) ModifyDBClassStorage(instanceId, class, storage string) error { - conn := client.rdsconn - args := rds.ModifyDBInstanceSpecArgs{ - DBInstanceId: instanceId, - PayType: rds.Postpaid, - DBInstanceClass: class, - DBInstanceStorage: storage, - } - - if _, err := conn.ModifyDBInstanceSpec(&args); err != nil { - return err - } - - if err := conn.WaitForInstance(instanceId, rds.Running, 600); err != nil { - return err - } - return nil -} - -// turn period to TimeType -func TransformPeriod2Time(period int, chargeType string) (ut int, tt common.TimeType) { - if chargeType == string(rds.Postpaid) { - return 1, common.Day - } - - if period >= 1 && period <= 9 { - return period, common.Month - } - - if period == 12 { - return 1, common.Year - } - - if period == 24 { - return 2, common.Year - } - return 0, common.Day - -} - -// turn TimeType to Period -func TransformTime2Period(ut int, tt common.TimeType) (period int) { - if tt == common.Year { - return 12 * ut - } - - return ut - -} - -// Flattens an array of databases into a []map[string]interface{} -func flattenDatabaseMappings(list []rds.Database) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(list)) - for _, i := range list { - l := map[string]interface{}{ - "db_name": i.DBName, - "character_set_name": i.CharacterSetName, - "db_description": i.DBDescription, - } - result = append(result, l) - } - return result -} - -func flattenDBBackup(list []rds.BackupPolicy) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(list)) - for _, i := range list { - l := map[string]interface{}{ - "preferred_backup_period": i.PreferredBackupPeriod, - "preferred_backup_time": i.PreferredBackupTime, - "backup_retention_period": i.LogBackupRetentionPeriod, - } - result = append(result, l) - } - return result -} - -func flattenDBSecurityIPs(list []rds.DBInstanceIPList) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(list)) - for _, i := range list { - l := map[string]interface{}{ - "security_ips": i.SecurityIPList, - } - result = append(result, l) - } - return result -} - -// Flattens an array of databases connection into a []map[string]interface{} -func flattenDBConnections(list []rds.DBInstanceNetInfo) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(list)) - for _, i := range list { - l := map[string]interface{}{ - "connection_string": i.ConnectionString, - "ip_type": i.IPType, - "ip_address": i.IPAddress, - } - result = append(result, l) - } - return result -} diff --git a/builtin/providers/alicloud/service_alicloud_slb.go b/builtin/providers/alicloud/service_alicloud_slb.go deleted file mode 100644 index 9fc473018..000000000 --- a/builtin/providers/alicloud/service_alicloud_slb.go +++ /dev/null @@ -1,21 +0,0 @@ -package alicloud - -import ( - "github.com/denverdino/aliyungo/slb" -) - -func (client *AliyunClient) DescribeLoadBalancerAttribute(slbId string) (*slb.LoadBalancerType, error) { - loadBalancer, err := client.slbconn.DescribeLoadBalancerAttribute(slbId) - if err != nil { - if notFoundError(err) { - return nil, nil - } - return nil, err - } - - if loadBalancer != nil { - return loadBalancer, nil - } - - return nil, nil -} diff --git a/builtin/providers/alicloud/service_alicloud_vpc.go b/builtin/providers/alicloud/service_alicloud_vpc.go deleted file mode 100644 index 491ab034f..000000000 --- a/builtin/providers/alicloud/service_alicloud_vpc.go +++ /dev/null @@ -1,227 +0,0 @@ -package alicloud - -import ( - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ecs" - "strings" -) - -func (client *AliyunClient) DescribeEipAddress(allocationId string) (*ecs.EipAddressSetType, error) { - - args := ecs.DescribeEipAddressesArgs{ - RegionId: client.Region, - AllocationId: allocationId, - } - - eips, _, err := client.ecsconn.DescribeEipAddresses(&args) - if err != nil { - return nil, err - } - if len(eips) == 0 { - return nil, common.GetClientErrorFromString("Not found") - } - - return &eips[0], nil -} - -func (client *AliyunClient) DescribeNatGateway(natGatewayId string) (*ecs.NatGatewaySetType, error) { - - args := &ecs.DescribeNatGatewaysArgs{ - RegionId: client.Region, - NatGatewayId: natGatewayId, - } - - natGateways, _, err := client.vpcconn.DescribeNatGateways(args) - //fmt.Println("natGateways %#v", natGateways) - if err != nil { - return nil, err - } - - if len(natGateways) == 0 { - return nil, common.GetClientErrorFromString("Not found") - } - - return &natGateways[0], nil -} - -func (client *AliyunClient) DescribeVpc(vpcId string) (*ecs.VpcSetType, error) { - args := ecs.DescribeVpcsArgs{ - RegionId: client.Region, - VpcId: vpcId, - } - - vpcs, _, err := client.ecsconn.DescribeVpcs(&args) - if err != nil { - if notFoundError(err) { - return nil, nil - } - return nil, err - } - - if len(vpcs) == 0 { - return nil, nil - } - - return &vpcs[0], nil -} - -func (client *AliyunClient) DescribeSnatEntry(snatTableId string, snatEntryId string) (ecs.SnatEntrySetType, error) { - - var resultSnat ecs.SnatEntrySetType - - args := &ecs.DescribeSnatTableEntriesArgs{ - RegionId: client.Region, - SnatTableId: snatTableId, - } - - snatEntries, _, err := client.vpcconn.DescribeSnatTableEntries(args) - - //this special deal cause the DescribeSnatEntry can't find the records would be throw "cant find the snatTable error" - //so judge the snatEntries length priority - if len(snatEntries) == 0 { - return resultSnat, common.GetClientErrorFromString(InstanceNotfound) - } - - if err != nil { - return resultSnat, err - } - - findSnat := false - - for _, snat := range snatEntries { - if snat.SnatEntryId == snatEntryId { - resultSnat = snat - findSnat = true - } - } - if !findSnat { - return resultSnat, common.GetClientErrorFromString(NotFindSnatEntryBySnatId) - } - - return resultSnat, nil -} - -func (client *AliyunClient) DescribeForwardEntry(forwardTableId string, forwardEntryId string) (ecs.ForwardTableEntrySetType, error) { - - var resultFoward ecs.ForwardTableEntrySetType - - args := &ecs.DescribeForwardTableEntriesArgs{ - RegionId: client.Region, - ForwardTableId: forwardTableId, - } - - forwardEntries, _, err := client.vpcconn.DescribeForwardTableEntries(args) - - //this special deal cause the DescribeSnatEntry can't find the records would be throw "cant find the snatTable error" - //so judge the snatEntries length priority - if len(forwardEntries) == 0 { - return resultFoward, common.GetClientErrorFromString(InstanceNotfound) - } - - findForward := false - - for _, forward := range forwardEntries { - if forward.ForwardEntryId == forwardEntryId { - resultFoward = forward - findForward = true - } - } - if !findForward { - return resultFoward, common.GetClientErrorFromString(NotFindForwardEntryByForwardId) - } - - if err != nil { - return resultFoward, err - } - - return resultFoward, nil -} - -// describe vswitch by param filters -func (client *AliyunClient) QueryVswitches(args *ecs.DescribeVSwitchesArgs) (vswitches []ecs.VSwitchSetType, err error) { - vsws, _, err := client.ecsconn.DescribeVSwitches(args) - if err != nil { - if notFoundError(err) { - return nil, nil - } - return nil, err - } - - return vsws, nil -} - -func (client *AliyunClient) QueryVswitchById(vpcId, vswitchId string) (vsw *ecs.VSwitchSetType, err error) { - args := &ecs.DescribeVSwitchesArgs{ - VpcId: vpcId, - VSwitchId: vswitchId, - } - vsws, err := client.QueryVswitches(args) - if err != nil { - return nil, err - } - - if len(vsws) == 0 { - return nil, nil - } - - return &vsws[0], nil -} - -func (client *AliyunClient) QueryRouteTables(args *ecs.DescribeRouteTablesArgs) (routeTables []ecs.RouteTableSetType, err error) { - rts, _, err := client.ecsconn.DescribeRouteTables(args) - if err != nil { - return nil, err - } - - return rts, nil -} - -func (client *AliyunClient) QueryRouteTableById(routeTableId string) (rt *ecs.RouteTableSetType, err error) { - args := &ecs.DescribeRouteTablesArgs{ - RouteTableId: routeTableId, - } - rts, err := client.QueryRouteTables(args) - if err != nil { - return nil, err - } - - if len(rts) == 0 { - return nil, &common.Error{ErrorResponse: common.ErrorResponse{Message: Notfound}} - } - - return &rts[0], nil -} - -func (client *AliyunClient) QueryRouteEntry(routeTableId, cidrBlock, nextHopType, nextHopId string) (rn *ecs.RouteEntrySetType, err error) { - rt, errs := client.QueryRouteTableById(routeTableId) - if errs != nil { - return nil, errs - } - - for _, e := range rt.RouteEntrys.RouteEntry { - if strings.ToLower(string(e.DestinationCidrBlock)) == cidrBlock { - return &e, nil - } - } - return nil, GetNotFoundErrorFromString("Vpc router entry not found") -} - -func (client *AliyunClient) GetVpcIdByVSwitchId(vswitchId string) (vpcId string, err error) { - - vs, _, err := client.ecsconn.DescribeVpcs(&ecs.DescribeVpcsArgs{ - RegionId: client.Region, - }) - if err != nil { - return "", err - } - - for _, v := range vs { - for _, sw := range v.VSwitchIds.VSwitchId { - if sw == vswitchId { - return v.VpcId, nil - } - } - } - - return "", &common.Error{ErrorResponse: common.ErrorResponse{Message: Notfound}} -} diff --git a/builtin/providers/alicloud/tags.go b/builtin/providers/alicloud/tags.go deleted file mode 100644 index a2d906ab9..000000000 --- a/builtin/providers/alicloud/tags.go +++ /dev/null @@ -1,126 +0,0 @@ -package alicloud - -import ( - "fmt" - "log" - - "github.com/denverdino/aliyungo/ecs" - "github.com/hashicorp/terraform/helper/schema" - "strings" -) - -func String(v string) *string { - return &v -} - -// tagsSchema returns the schema to use for tags. -// -func tagsSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeMap, - //Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - } -} - -// setTags is a helper to set the tags for a resource. It expects the -// tags field to be named "tags" -func setTags(client *AliyunClient, resourceType ecs.TagResourceType, d *schema.ResourceData) error { - - conn := client.ecsconn - - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffTags(tagsFromMap(o), tagsFromMap(n)) - - // Set tags - if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags: %#v from %s", remove, d.Id()) - err := RemoveTags(conn, &RemoveTagsArgs{ - RegionId: client.Region, - ResourceId: d.Id(), - ResourceType: resourceType, - Tag: remove, - }) - if err != nil { - return fmt.Errorf("Remove tags got error: %s", err) - } - } - - if len(create) > 0 { - log.Printf("[DEBUG] Creating tags: %s for %s", create, d.Id()) - err := AddTags(conn, &AddTagsArgs{ - RegionId: client.Region, - ResourceId: d.Id(), - ResourceType: resourceType, - Tag: create, - }) - if err != nil { - return fmt.Errorf("Creating tags got error: %s", err) - } - } - } - - return nil -} - -// diffTags takes our tags locally and the ones remotely and returns -// the set of tags that must be created, and the set of tags that must -// be destroyed. -func diffTags(oldTags, newTags []Tag) ([]Tag, []Tag) { - // First, we're creating everything we have - create := make(map[string]interface{}) - for _, t := range newTags { - create[t.Key] = t.Value - } - - // Build the list of what to remove - var remove []Tag - for _, t := range oldTags { - old, ok := create[t.Key] - if !ok || old != t.Value { - // Delete it! - remove = append(remove, t) - } - } - - return tagsFromMap(create), remove -} - -// tagsFromMap returns the tags for the given map of data. -func tagsFromMap(m map[string]interface{}) []Tag { - result := make([]Tag, 0, len(m)) - for k, v := range m { - result = append(result, Tag{ - Key: k, - Value: v.(string), - }) - } - - return result -} - -func tagsToMap(tags []ecs.TagItemType) map[string]string { - result := make(map[string]string) - for _, t := range tags { - result[t.TagKey] = t.TagValue - } - - return result -} - -func tagsToString(tags []ecs.TagItemType) string { - result := make([]string, 0, len(tags)) - - for _, tag := range tags { - ecsTags := ecs.TagItemType{ - TagKey: tag.TagKey, - TagValue: tag.TagValue, - } - result = append(result, ecsTags.TagKey+":"+ecsTags.TagValue) - } - - return strings.Join(result, ",") -} diff --git a/builtin/providers/alicloud/validators.go b/builtin/providers/alicloud/validators.go deleted file mode 100644 index 4c3c82f3e..000000000 --- a/builtin/providers/alicloud/validators.go +++ /dev/null @@ -1,578 +0,0 @@ -package alicloud - -import ( - "fmt" - "net" - "strconv" - "strings" - - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/ecs" - "github.com/denverdino/aliyungo/slb" - "github.com/hashicorp/terraform/helper/schema" - "regexp" -) - -// common -func validateInstancePort(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 1 || value > 65535 { - errors = append(errors, fmt.Errorf( - "%q must be a valid port between 1 and 65535", - k)) - return - } - return -} - -func validateInstanceProtocol(v interface{}, k string) (ws []string, errors []error) { - protocol := v.(string) - if !isProtocolValid(protocol) { - errors = append(errors, fmt.Errorf( - "%q is an invalid value. Valid values are either http, https, tcp or udp", - k)) - return - } - return -} - -// ecs -func validateDiskCategory(v interface{}, k string) (ws []string, errors []error) { - category := ecs.DiskCategory(v.(string)) - if category != ecs.DiskCategoryCloud && category != ecs.DiskCategoryCloudEfficiency && category != ecs.DiskCategoryCloudSSD { - errors = append(errors, fmt.Errorf("%s must be one of %s %s %s", k, ecs.DiskCategoryCloud, ecs.DiskCategoryCloudEfficiency, ecs.DiskCategoryCloudSSD)) - } - - return -} - -func validateInstanceName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) < 2 || len(value) > 128 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 128 characters", k)) - } - - if strings.HasPrefix(value, "http://") || strings.HasPrefix(value, "https://") { - errors = append(errors, fmt.Errorf("%s cannot starts with http:// or https://", k)) - } - - return -} - -func validateInstanceDescription(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) < 2 || len(value) > 256 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 256 characters", k)) - - } - return -} - -func validateDiskName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if value == "" { - return - } - - if len(value) < 2 || len(value) > 128 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 128 characters", k)) - } - - if strings.HasPrefix(value, "http://") || strings.HasPrefix(value, "https://") { - errors = append(errors, fmt.Errorf("%s cannot starts with http:// or https://", k)) - } - - return -} - -func validateDiskDescription(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) < 2 || len(value) > 256 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 256 characters", k)) - - } - return -} - -//security group -func validateSecurityGroupName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) < 2 || len(value) > 128 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 128 characters", k)) - } - - if strings.HasPrefix(value, "http://") || strings.HasPrefix(value, "https://") { - errors = append(errors, fmt.Errorf("%s cannot starts with http:// or https://", k)) - } - - return -} - -func validateSecurityGroupDescription(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) < 2 || len(value) > 256 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 256 characters", k)) - - } - return -} - -func validateSecurityRuleType(v interface{}, k string) (ws []string, errors []error) { - rt := GroupRuleDirection(v.(string)) - if rt != GroupRuleIngress && rt != GroupRuleEgress { - errors = append(errors, fmt.Errorf("%s must be one of %s %s", k, GroupRuleIngress, GroupRuleEgress)) - } - - return -} - -func validateSecurityRuleIpProtocol(v interface{}, k string) (ws []string, errors []error) { - pt := GroupRuleIpProtocol(v.(string)) - if pt != GroupRuleTcp && pt != GroupRuleUdp && pt != GroupRuleIcmp && pt != GroupRuleGre && pt != GroupRuleAll { - errors = append(errors, fmt.Errorf("%s must be one of %s %s %s %s %s", k, - GroupRuleTcp, GroupRuleUdp, GroupRuleIcmp, GroupRuleGre, GroupRuleAll)) - } - - return -} - -func validateSecurityRuleNicType(v interface{}, k string) (ws []string, errors []error) { - pt := GroupRuleNicType(v.(string)) - if pt != GroupRuleInternet && pt != GroupRuleIntranet { - errors = append(errors, fmt.Errorf("%s must be one of %s %s", k, GroupRuleInternet, GroupRuleIntranet)) - } - - return -} - -func validateSecurityRulePolicy(v interface{}, k string) (ws []string, errors []error) { - pt := GroupRulePolicy(v.(string)) - if pt != GroupRulePolicyAccept && pt != GroupRulePolicyDrop { - errors = append(errors, fmt.Errorf("%s must be one of %s %s", k, GroupRulePolicyAccept, GroupRulePolicyDrop)) - } - - return -} - -func validateSecurityPriority(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 1 || value > 100 { - errors = append(errors, fmt.Errorf( - "%q must be a valid authorization policy priority between 1 and 100", - k)) - return - } - return -} - -// validateCIDRNetworkAddress ensures that the string value is a valid CIDR that -// represents a network address - it adds an error otherwise -func validateCIDRNetworkAddress(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - _, ipnet, err := net.ParseCIDR(value) - if err != nil { - errors = append(errors, fmt.Errorf( - "%q must contain a valid CIDR, got error parsing: %s", k, err)) - return - } - - if ipnet == nil || value != ipnet.String() { - errors = append(errors, fmt.Errorf( - "%q must contain a valid network CIDR, expected %q, got %q", - k, ipnet, value)) - } - - return -} - -func validateRouteEntryNextHopType(v interface{}, k string) (ws []string, errors []error) { - nht := ecs.NextHopType(v.(string)) - if nht != ecs.NextHopIntance && nht != ecs.NextHopTunnel { - errors = append(errors, fmt.Errorf("%s must be one of %s %s", k, - ecs.NextHopIntance, ecs.NextHopTunnel)) - } - - return -} - -func validateSwitchCIDRNetworkAddress(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - _, ipnet, err := net.ParseCIDR(value) - if err != nil { - errors = append(errors, fmt.Errorf( - "%q must contain a valid CIDR, got error parsing: %s", k, err)) - return - } - - if ipnet == nil || value != ipnet.String() { - errors = append(errors, fmt.Errorf( - "%q must contain a valid network CIDR, expected %q, got %q", - k, ipnet, value)) - return - } - - mark, _ := strconv.Atoi(strings.Split(ipnet.String(), "/")[1]) - if mark < 16 || mark > 29 { - errors = append(errors, fmt.Errorf( - "%q must contain a network CIDR which mark between 16 and 29", - k)) - } - - return -} - -// validateIoOptimized ensures that the string value is a valid IoOptimized that -// represents a IoOptimized - it adds an error otherwise -func validateIoOptimized(v interface{}, k string) (ws []string, errors []error) { - if value := v.(string); value != "" { - ioOptimized := ecs.IoOptimized(value) - if ioOptimized != ecs.IoOptimizedNone && - ioOptimized != ecs.IoOptimizedOptimized { - errors = append(errors, fmt.Errorf( - "%q must contain a valid IoOptimized, expected %s or %s, got %q", - k, ecs.IoOptimizedNone, ecs.IoOptimizedOptimized, ioOptimized)) - } - } - - return -} - -// validateInstanceNetworkType ensures that the string value is a classic or vpc -func validateInstanceNetworkType(v interface{}, k string) (ws []string, errors []error) { - if value := v.(string); value != "" { - network := InstanceNetWork(value) - if network != ClassicNet && - network != VpcNet { - errors = append(errors, fmt.Errorf( - "%q must contain a valid InstanceNetworkType, expected %s or %s, go %q", - k, ClassicNet, VpcNet, network)) - } - } - return -} - -func validateInstanceChargeType(v interface{}, k string) (ws []string, errors []error) { - if value := v.(string); value != "" { - chargeType := common.InstanceChargeType(value) - if chargeType != common.PrePaid && - chargeType != common.PostPaid { - errors = append(errors, fmt.Errorf( - "%q must contain a valid InstanceChargeType, expected %s or %s, got %q", - k, common.PrePaid, common.PostPaid, chargeType)) - } - } - - return -} - -func validateInternetChargeType(v interface{}, k string) (ws []string, errors []error) { - if value := v.(string); value != "" { - chargeType := common.InternetChargeType(value) - if chargeType != common.PayByBandwidth && - chargeType != common.PayByTraffic { - errors = append(errors, fmt.Errorf( - "%q must contain a valid InstanceChargeType, expected %s or %s, got %q", - k, common.PayByBandwidth, common.PayByTraffic, chargeType)) - } - } - - return -} - -func validateInternetMaxBandWidthOut(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 0 || value > 100 { - errors = append(errors, fmt.Errorf( - "%q must be a valid internet bandwidth out between 0 and 100", - k)) - return - } - return -} - -// SLB -func validateSlbName(v interface{}, k string) (ws []string, errors []error) { - if value := v.(string); value != "" { - if len(value) < 1 || len(value) > 80 { - errors = append(errors, fmt.Errorf( - "%q must be a valid load balancer name characters between 1 and 80", - k)) - return - } - } - - return -} - -func validateSlbInternetChargeType(v interface{}, k string) (ws []string, errors []error) { - if value := v.(string); value != "" { - chargeType := common.InternetChargeType(value) - - if chargeType != "paybybandwidth" && - chargeType != "paybytraffic" { - errors = append(errors, fmt.Errorf( - "%q must contain a valid InstanceChargeType, expected %s or %s, got %q", - k, "paybybandwidth", "paybytraffic", value)) - } - } - - return -} - -func validateSlbBandwidth(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 1 || value > 1000 { - errors = append(errors, fmt.Errorf( - "%q must be a valid load balancer bandwidth between 1 and 1000", - k)) - return - } - return -} - -func validateSlbListenerBandwidth(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if (value < 1 || value > 1000) && value != -1 { - errors = append(errors, fmt.Errorf( - "%q must be a valid load balancer bandwidth between 1 and 1000 or -1", - k)) - return - } - return -} - -func validateSlbListenerScheduler(v interface{}, k string) (ws []string, errors []error) { - if value := v.(string); value != "" { - scheduler := slb.SchedulerType(value) - - if scheduler != "wrr" && scheduler != "wlc" { - errors = append(errors, fmt.Errorf( - "%q must contain a valid SchedulerType, expected %s or %s, got %q", - k, "wrr", "wlc", value)) - } - } - - return -} - -func validateSlbListenerCookie(v interface{}, k string) (ws []string, errors []error) { - if value := v.(string); value != "" { - if len(value) < 1 || len(value) > 200 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 200 characters", k)) - } - } - return -} - -func validateSlbListenerCookieTimeout(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 0 || value > 86400 { - errors = append(errors, fmt.Errorf( - "%q must be a valid load balancer cookie timeout between 0 and 86400", - k)) - return - } - return -} - -func validateSlbListenerPersistenceTimeout(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 0 || value > 3600 { - errors = append(errors, fmt.Errorf( - "%q must be a valid load balancer persistence timeout between 0 and 86400", - k)) - return - } - return -} - -func validateSlbListenerHealthCheckDomain(v interface{}, k string) (ws []string, errors []error) { - if value := v.(string); value != "" { - //the len add "$_ip",so to max is 84 - if len(value) < 1 || len(value) > 84 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 84 characters", k)) - } - } - return -} - -func validateSlbListenerHealthCheckUri(v interface{}, k string) (ws []string, errors []error) { - if value := v.(string); value != "" { - if len(value) < 1 || len(value) > 80 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 80 characters", k)) - } - } - return -} - -func validateSlbListenerHealthCheckConnectPort(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 1 || value > 65535 { - if value != -520 { - errors = append(errors, fmt.Errorf( - "%q must be a valid load balancer health check connect port between 1 and 65535 or -520", - k)) - return - } - - } - return -} - -func validateDBBackupPeriod(v interface{}, k string) (ws []string, errors []error) { - days := []string{"Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"} - value := v.(string) - exist := false - for _, d := range days { - if value == d { - exist = true - break - } - } - if !exist { - errors = append(errors, fmt.Errorf( - "%q must contain a valid backup period value should in array %#v, got %q", - k, days, value)) - } - - return -} - -func validateAllowedStringValue(ss []string) schema.SchemaValidateFunc { - return func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - existed := false - for _, s := range ss { - if s == value { - existed = true - break - } - } - if !existed { - errors = append(errors, fmt.Errorf( - "%q must contain a valid string value should in array %#v, got %q", - k, ss, value)) - } - return - - } -} - -func validateAllowedSplitStringValue(ss []string, splitStr string) schema.SchemaValidateFunc { - return func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - existed := false - tsList := strings.Split(value, splitStr) - - for _, ts := range tsList { - existed = false - for _, s := range ss { - if ts == s { - existed = true - break - } - } - } - if !existed { - errors = append(errors, fmt.Errorf( - "%q must contain a valid string value should in %#v, got %q", - k, ss, value)) - } - return - - } -} - -func validateAllowedIntValue(is []int) schema.SchemaValidateFunc { - return func(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - existed := false - for _, i := range is { - if i == value { - existed = true - break - } - } - if !existed { - errors = append(errors, fmt.Errorf( - "%q must contain a valid int value should in array %#v, got %q", - k, is, value)) - } - return - - } -} - -func validateIntegerInRange(min, max int) schema.SchemaValidateFunc { - return func(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < min { - errors = append(errors, fmt.Errorf( - "%q cannot be lower than %d: %d", k, min, value)) - } - if value > max { - errors = append(errors, fmt.Errorf( - "%q cannot be higher than %d: %d", k, max, value)) - } - return - } -} - -//data source validate func -//data_source_alicloud_image -func validateNameRegex(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if _, err := regexp.Compile(value); err != nil { - errors = append(errors, fmt.Errorf( - "%q contains an invalid regular expression: %s", - k, err)) - } - return -} - -func validateImageOwners(v interface{}, k string) (ws []string, errors []error) { - if value := v.(string); value != "" { - owners := ecs.ImageOwnerAlias(value) - if owners != ecs.ImageOwnerSystem && - owners != ecs.ImageOwnerSelf && - owners != ecs.ImageOwnerOthers && - owners != ecs.ImageOwnerMarketplace && - owners != ecs.ImageOwnerDefault { - errors = append(errors, fmt.Errorf( - "%q must contain a valid Image owner , expected %s, %s, %s, %s or %s, got %q", - k, ecs.ImageOwnerSystem, ecs.ImageOwnerSelf, ecs.ImageOwnerOthers, ecs.ImageOwnerMarketplace, ecs.ImageOwnerDefault, owners)) - } - } - return -} - -func validateRegion(v interface{}, k string) (ws []string, errors []error) { - if value := v.(string); value != "" { - region := common.Region(value) - var valid string - for _, re := range common.ValidRegions { - if region == re { - return - } - valid = valid + ", " + string(re) - } - errors = append(errors, fmt.Errorf( - "%q must contain a valid Region ID , expected %#v, got %q", - k, valid, value)) - - } - return -} - -func validateForwardPort(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "any" { - valueConv, err := strconv.Atoi(value) - if err != nil || valueConv < 1 || valueConv > 65535 { - errors = append(errors, fmt.Errorf("%q must be a valid port between 1 and 65535 or any ", k)) - } - } - return -} diff --git a/builtin/providers/alicloud/validators_test.go b/builtin/providers/alicloud/validators_test.go deleted file mode 100644 index 3160c496c..000000000 --- a/builtin/providers/alicloud/validators_test.go +++ /dev/null @@ -1,502 +0,0 @@ -package alicloud - -import "testing" - -func TestValidateInstancePort(t *testing.T) { - validPorts := []int{1, 22, 80, 100, 8088, 65535} - for _, v := range validPorts { - _, errors := validateInstancePort(v, "instance_port") - if len(errors) != 0 { - t.Fatalf("%q should be a valid instance port number between 1 and 65535: %q", v, errors) - } - } - - invalidPorts := []int{-10, -1, 0} - for _, v := range invalidPorts { - _, errors := validateInstancePort(v, "instance_port") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid instance port number", v) - } - } -} - -func TestValidateInstanceProtocol(t *testing.T) { - validProtocols := []string{"http", "tcp", "https", "udp"} - for _, v := range validProtocols { - _, errors := validateInstanceProtocol(v, "instance_protocol") - if len(errors) != 0 { - t.Fatalf("%q should be a valid instance protocol: %q", v, errors) - } - } - - invalidProtocols := []string{"HTTP", "abc", "ecmp", "dubbo"} - for _, v := range invalidProtocols { - _, errors := validateInstanceProtocol(v, "instance_protocol") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid instance protocol", v) - } - } -} - -func TestValidateInstanceDiskCategory(t *testing.T) { - validDiskCategory := []string{"cloud", "cloud_efficiency", "cloud_ssd"} - for _, v := range validDiskCategory { - _, errors := validateDiskCategory(v, "instance_disk_category") - if len(errors) != 0 { - t.Fatalf("%q should be a valid instance disk category: %q", v, errors) - } - } - - invalidDiskCategory := []string{"all", "ephemeral", "ephemeral_ssd", "ALL", "efficiency"} - for _, v := range invalidDiskCategory { - _, errors := validateDiskCategory(v, "instance_disk_category") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid instance disk category", v) - } - } -} - -func TestValidateInstanceName(t *testing.T) { - validInstanceName := []string{"hi", "hi http://", "some word + any word &", "http", "中文"} - for _, v := range validInstanceName { - _, errors := validateInstanceName(v, "instance_name") - if len(errors) != 0 { - t.Fatalf("%q should be a valid instance name: %q", v, errors) - } - } - - invalidInstanceName := []string{"y", "http://", "https://", "+"} - for _, v := range invalidInstanceName { - _, errors := validateInstanceName(v, "instance_name") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid instance name", v) - } - } -} - -func TestValidateInstanceDescription(t *testing.T) { - validInstanceDescription := []string{"hi", "hi http://", "some word + any word &", "http://", "中文"} - for _, v := range validInstanceDescription { - _, errors := validateInstanceDescription(v, "instance_description") - if len(errors) != 0 { - t.Fatalf("%q should be a valid instance description: %q", v, errors) - } - } - - invalidvalidInstanceDescription := []string{"y", ""} - for _, v := range invalidvalidInstanceDescription { - _, errors := validateInstanceName(v, "instance_description") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid instance description", v) - } - } -} - -func TestValidateSecurityGroupName(t *testing.T) { - validSecurityGroupName := []string{"hi", "hi http://", "some word + any word &", "http", "中文", "12345"} - for _, v := range validSecurityGroupName { - _, errors := validateSecurityGroupName(v, "security_group_name") - if len(errors) != 0 { - t.Fatalf("%q should be a valid security group name: %q", v, errors) - } - } - - invalidSecurityGroupName := []string{"y", "http://", "https://", "+"} - for _, v := range invalidSecurityGroupName { - _, errors := validateSecurityGroupName(v, "security_group_name") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid security group name", v) - } - } -} - -func TestValidateSecurityGroupDescription(t *testing.T) { - validSecurityGroupDescription := []string{"hi", "hi http://", "some word + any word &", "http://", "中文"} - for _, v := range validSecurityGroupDescription { - _, errors := validateSecurityGroupDescription(v, "security_group_description") - if len(errors) != 0 { - t.Fatalf("%q should be a valid security group description: %q", v, errors) - } - } - - invalidSecurityGroupDescription := []string{"y", ""} - for _, v := range invalidSecurityGroupDescription { - _, errors := validateSecurityGroupDescription(v, "security_group_description") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid security group description", v) - } - } -} - -func TestValidateSecurityRuleType(t *testing.T) { - validSecurityRuleType := []string{"ingress", "egress"} - for _, v := range validSecurityRuleType { - _, errors := validateSecurityRuleType(v, "security_rule_type") - if len(errors) != 0 { - t.Fatalf("%q should be a valid security rule type: %q", v, errors) - } - } - - invalidSecurityRuleType := []string{"y", "gress", "in", "out"} - for _, v := range invalidSecurityRuleType { - _, errors := validateSecurityRuleType(v, "security_rule_type") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid security rule type", v) - } - } -} - -func TestValidateSecurityRuleIpProtocol(t *testing.T) { - validIpProtocol := []string{"tcp", "udp", "icmp", "gre", "all"} - for _, v := range validIpProtocol { - _, errors := validateSecurityRuleIpProtocol(v, "security_rule_ip_protocol") - if len(errors) != 0 { - t.Fatalf("%q should be a valid ip protocol: %q", v, errors) - } - } - - invalidIpProtocol := []string{"y", "ecmp", "http", "https"} - for _, v := range invalidIpProtocol { - _, errors := validateSecurityRuleIpProtocol(v, "security_rule_ip_protocol") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid ip protocol", v) - } - } -} - -func TestValidateSecurityRuleNicType(t *testing.T) { - validRuleNicType := []string{"intranet", "internet"} - for _, v := range validRuleNicType { - _, errors := validateSecurityRuleNicType(v, "security_rule_nic_type") - if len(errors) != 0 { - t.Fatalf("%q should be a valid nic type: %q", v, errors) - } - } - - invalidRuleNicType := []string{"inter", "ecmp", "http", "https"} - for _, v := range invalidRuleNicType { - _, errors := validateSecurityRuleNicType(v, "security_rule_nic_type") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid nic type", v) - } - } -} - -func TestValidateSecurityRulePolicy(t *testing.T) { - validRulePolicy := []string{"accept", "drop"} - for _, v := range validRulePolicy { - _, errors := validateSecurityRulePolicy(v, "security_rule_policy") - if len(errors) != 0 { - t.Fatalf("%q should be a valid security rule policy: %q", v, errors) - } - } - - invalidRulePolicy := []string{"inter", "ecmp", "http", "https"} - for _, v := range invalidRulePolicy { - _, errors := validateSecurityRulePolicy(v, "security_rule_policy") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid security rule policy", v) - } - } -} - -func TestValidateSecurityRulePriority(t *testing.T) { - validPriority := []int{1, 50, 100} - for _, v := range validPriority { - _, errors := validateSecurityPriority(v, "security_rule_priority") - if len(errors) != 0 { - t.Fatalf("%q should be a valid security rule priority: %q", v, errors) - } - } - - invalidPriority := []int{-1, 0, 101} - for _, v := range invalidPriority { - _, errors := validateSecurityPriority(v, "security_rule_priority") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid security rule priority", v) - } - } -} - -func TestValidateCIDRNetworkAddress(t *testing.T) { - validCIDRNetworkAddress := []string{"192.168.10.0/24", "0.0.0.0/0", "10.121.10.0/24"} - for _, v := range validCIDRNetworkAddress { - _, errors := validateCIDRNetworkAddress(v, "cidr_network_address") - if len(errors) != 0 { - t.Fatalf("%q should be a valid cidr network address: %q", v, errors) - } - } - - invalidCIDRNetworkAddress := []string{"1.2.3.4", "0x38732/21"} - for _, v := range invalidCIDRNetworkAddress { - _, errors := validateCIDRNetworkAddress(v, "cidr_network_address") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid cidr network address", v) - } - } -} - -func TestValidateRouteEntryNextHopType(t *testing.T) { - validNexthopType := []string{"Instance", "Tunnel"} - for _, v := range validNexthopType { - _, errors := validateRouteEntryNextHopType(v, "route_entry_nexthop_type") - if len(errors) != 0 { - t.Fatalf("%q should be a valid route entry nexthop type: %q", v, errors) - } - } - - invalidNexthopType := []string{"ri", "vpc"} - for _, v := range invalidNexthopType { - _, errors := validateRouteEntryNextHopType(v, "route_entry_nexthop_type") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid route entry nexthop type", v) - } - } -} - -func TestValidateSwitchCIDRNetworkAddress(t *testing.T) { - validSwitchCIDRNetworkAddress := []string{"192.168.10.0/24", "0.0.0.0/16", "127.0.0.0/29", "10.121.10.0/24"} - for _, v := range validSwitchCIDRNetworkAddress { - _, errors := validateSwitchCIDRNetworkAddress(v, "switch_cidr_network_address") - if len(errors) != 0 { - t.Fatalf("%q should be a valid switch cidr network address: %q", v, errors) - } - } - - invalidSwitchCIDRNetworkAddress := []string{"1.2.3.4", "0x38732/21", "10.121.10.0/15", "10.121.10.0/30", "256.121.10.0/22"} - for _, v := range invalidSwitchCIDRNetworkAddress { - _, errors := validateSwitchCIDRNetworkAddress(v, "switch_cidr_network_address") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid switch cidr network address", v) - } - } -} - -func TestValidateIoOptimized(t *testing.T) { - validIoOptimized := []string{"", "none", "optimized"} - for _, v := range validIoOptimized { - _, errors := validateIoOptimized(v, "ioOptimized") - if len(errors) != 0 { - t.Fatalf("%q should be a valid IoOptimized value: %q", v, errors) - } - } - - invalidIoOptimized := []string{"true", "ioOptimized"} - for _, v := range invalidIoOptimized { - _, errors := validateIoOptimized(v, "ioOptimized") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid IoOptimized value", v) - } - } -} - -func TestValidateInstanceNetworkType(t *testing.T) { - validInstanceNetworkType := []string{"", "classic", "vpc"} - for _, v := range validInstanceNetworkType { - _, errors := validateInstanceNetworkType(v, "instance_network_type") - if len(errors) != 0 { - t.Fatalf("%q should be a valid instance network type value: %q", v, errors) - } - } - - invalidInstanceNetworkType := []string{"Classic", "vswitch", "123"} - for _, v := range invalidInstanceNetworkType { - _, errors := validateInstanceNetworkType(v, "instance_network_type") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid instance network type value", v) - } - } -} - -func TestValidateInstanceChargeType(t *testing.T) { - validInstanceChargeType := []string{"", "PrePaid", "PostPaid"} - for _, v := range validInstanceChargeType { - _, errors := validateInstanceChargeType(v, "instance_charge_type") - if len(errors) != 0 { - t.Fatalf("%q should be a valid instance charge type value: %q", v, errors) - } - } - - invalidInstanceChargeType := []string{"prepay", "yearly", "123"} - for _, v := range invalidInstanceChargeType { - _, errors := validateInstanceChargeType(v, "instance_charge_type") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid instance charge type value", v) - } - } -} - -func TestValidateInternetChargeType(t *testing.T) { - validInternetChargeType := []string{"", "PayByBandwidth", "PayByTraffic"} - for _, v := range validInternetChargeType { - _, errors := validateInternetChargeType(v, "internet_charge_type") - if len(errors) != 0 { - t.Fatalf("%q should be a valid internet charge type value: %q", v, errors) - } - } - - invalidInternetChargeType := []string{"paybybandwidth", "paybytraffic", "123"} - for _, v := range invalidInternetChargeType { - _, errors := validateInternetChargeType(v, "internet_charge_type") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid internet charge type value", v) - } - } -} - -func TestValidateInternetMaxBandWidthOut(t *testing.T) { - validInternetMaxBandWidthOut := []int{1, 22, 100} - for _, v := range validInternetMaxBandWidthOut { - _, errors := validateInternetMaxBandWidthOut(v, "internet_max_bandwidth_out") - if len(errors) != 0 { - t.Fatalf("%q should be a valid internet max bandwidth out value: %q", v, errors) - } - } - - invalidInternetMaxBandWidthOut := []int{-2, 101, 123} - for _, v := range invalidInternetMaxBandWidthOut { - _, errors := validateInternetMaxBandWidthOut(v, "internet_max_bandwidth_out") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid internet max bandwidth out value", v) - } - } -} - -func TestValidateSlbName(t *testing.T) { - validSlbName := []string{"h", "http://", "123", "hello, aliyun! "} - for _, v := range validSlbName { - _, errors := validateSlbName(v, "slb_name") - if len(errors) != 0 { - t.Fatalf("%q should be a valid slb name: %q", v, errors) - } - } - - // todo: add invalid case -} - -func TestValidateSlbInternetChargeType(t *testing.T) { - validSlbInternetChargeType := []string{"paybybandwidth", "paybytraffic"} - for _, v := range validSlbInternetChargeType { - _, errors := validateSlbInternetChargeType(v, "slb_internet_charge_type") - if len(errors) != 0 { - t.Fatalf("%q should be a valid slb internet charge type value: %q", v, errors) - } - } - - invalidSlbInternetChargeType := []string{"PayByBandwidth", "PayByTraffic"} - for _, v := range invalidSlbInternetChargeType { - _, errors := validateSlbInternetChargeType(v, "slb_internet_charge_type") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid slb internet charge type value", v) - } - } -} - -func TestValidateSlbBandwidth(t *testing.T) { - validSlbBandwidth := []int{1, 22, 1000} - for _, v := range validSlbBandwidth { - _, errors := validateSlbBandwidth(v, "slb_bandwidth") - if len(errors) != 0 { - t.Fatalf("%q should be a valid slb bandwidth value: %q", v, errors) - } - } - - invalidSlbBandwidth := []int{-2, 0, 1001} - for _, v := range invalidSlbBandwidth { - _, errors := validateSlbBandwidth(v, "slb_bandwidth") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid slb bandwidth value", v) - } - } -} - -func TestValidateSlbListenerBandwidth(t *testing.T) { - validSlbListenerBandwidth := []int{-1, 1, 22, 1000} - for _, v := range validSlbListenerBandwidth { - _, errors := validateSlbListenerBandwidth(v, "slb_bandwidth") - if len(errors) != 0 { - t.Fatalf("%q should be a valid slb listener bandwidth value: %q", v, errors) - } - } - - invalidSlbListenerBandwidth := []int{-2, 0, -10, 1001} - for _, v := range invalidSlbListenerBandwidth { - _, errors := validateSlbListenerBandwidth(v, "slb_bandwidth") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid slb listener bandwidth value", v) - } - } -} - -func TestValidateAllowedStringValue(t *testing.T) { - exceptValues := []string{"aliyun", "alicloud", "alibaba"} - validValues := []string{"aliyun"} - for _, v := range validValues { - _, errors := validateAllowedStringValue(exceptValues)(v, "allowvalue") - if len(errors) != 0 { - t.Fatalf("%q should be a valid value in %#v: %q", v, exceptValues, errors) - } - } - - invalidValues := []string{"ali", "alidata", "terraform"} - for _, v := range invalidValues { - _, errors := validateAllowedStringValue(exceptValues)(v, "allowvalue") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid value", v) - } - } -} - -func TestValidateAllowedStringSplitValue(t *testing.T) { - exceptValues := []string{"aliyun", "alicloud", "alibaba"} - validValues := "aliyun,alicloud" - _, errors := validateAllowedSplitStringValue(exceptValues, ",")(validValues, "allowvalue") - if len(errors) != 0 { - t.Fatalf("%q should be a valid value in %#v: %q", validValues, exceptValues, errors) - } - - invalidValues := "ali,alidata" - _, invalidErr := validateAllowedSplitStringValue(exceptValues, ",")(invalidValues, "allowvalue") - if len(invalidErr) == 0 { - t.Fatalf("%q should be an invalid value", invalidValues) - } -} - -func TestValidateAllowedIntValue(t *testing.T) { - exceptValues := []int{1, 3, 5, 6} - validValues := []int{1, 3, 5, 6} - for _, v := range validValues { - _, errors := validateAllowedIntValue(exceptValues)(v, "allowvalue") - if len(errors) != 0 { - t.Fatalf("%q should be a valid value in %#v: %q", v, exceptValues, errors) - } - } - - invalidValues := []int{0, 7, 10} - for _, v := range invalidValues { - _, errors := validateAllowedIntValue(exceptValues)(v, "allowvalue") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid value", v) - } - } -} - -func TestValidateIntegerInRange(t *testing.T) { - validIntegers := []int{-259, 0, 1, 5, 999} - min := -259 - max := 999 - for _, v := range validIntegers { - _, errors := validateIntegerInRange(min, max)(v, "name") - if len(errors) != 0 { - t.Fatalf("%q should be an integer in range (%d, %d): %q", v, min, max, errors) - } - } - - invalidIntegers := []int{-260, -99999, 1000, 25678} - for _, v := range invalidIntegers { - _, errors := validateIntegerInRange(min, max)(v, "name") - if len(errors) == 0 { - t.Fatalf("%q should be an integer outside range (%d, %d)", v, min, max) - } - } -} diff --git a/builtin/providers/archive/.gitignore b/builtin/providers/archive/.gitignore deleted file mode 100644 index c4c4ffc6a..000000000 --- a/builtin/providers/archive/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.zip diff --git a/builtin/providers/archive/archiver.go b/builtin/providers/archive/archiver.go deleted file mode 100644 index c216f4620..000000000 --- a/builtin/providers/archive/archiver.go +++ /dev/null @@ -1,48 +0,0 @@ -package archive - -import ( - "fmt" - "os" -) - -type Archiver interface { - ArchiveContent(content []byte, infilename string) error - ArchiveFile(infilename string) error - ArchiveDir(indirname string) error - ArchiveMultiple(content map[string][]byte) error -} - -type ArchiverBuilder func(filepath string) Archiver - -var archiverBuilders = map[string]ArchiverBuilder{ - "zip": NewZipArchiver, -} - -func getArchiver(archiveType string, filepath string) Archiver { - if builder, ok := archiverBuilders[archiveType]; ok { - return builder(filepath) - } - return nil -} - -func assertValidFile(infilename string) (os.FileInfo, error) { - fi, err := os.Stat(infilename) - if err != nil && os.IsNotExist(err) { - return fi, fmt.Errorf("could not archive missing file: %s", infilename) - } - return fi, err -} - -func assertValidDir(indirname string) (os.FileInfo, error) { - fi, err := os.Stat(indirname) - if err != nil { - if os.IsNotExist(err) { - return fi, fmt.Errorf("could not archive missing directory: %s", indirname) - } - return fi, err - } - if !fi.IsDir() { - return fi, fmt.Errorf("could not archive directory that is a file: %s", indirname) - } - return fi, nil -} diff --git a/builtin/providers/archive/data_source_archive_file.go b/builtin/providers/archive/data_source_archive_file.go deleted file mode 100644 index 218629cba..000000000 --- a/builtin/providers/archive/data_source_archive_file.go +++ /dev/null @@ -1,205 +0,0 @@ -package archive - -import ( - "bytes" - "crypto/md5" - "crypto/sha1" - "crypto/sha256" - "encoding/base64" - "encoding/hex" - "fmt" - "io/ioutil" - "os" - "path" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceFile() *schema.Resource { - return &schema.Resource{ - Read: dataSourceFileRead, - - Schema: map[string]*schema.Schema{ - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "source": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "content": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "filename": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - }, - ConflictsWith: []string{"source_file", "source_dir", "source_content", "source_content_filename"}, - Set: func(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["filename"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["content"].(string))) - return hashcode.String(buf.String()) - }, - }, - "source_content": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"source_file", "source_dir"}, - }, - "source_content_filename": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"source_file", "source_dir"}, - }, - "source_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"source_content", "source_content_filename", "source_dir"}, - }, - "source_dir": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"source_content", "source_content_filename", "source_file"}, - }, - "output_path": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "output_size": &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - ForceNew: true, - }, - "output_sha": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - ForceNew: true, - Description: "SHA1 checksum of output file", - }, - "output_base64sha256": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - ForceNew: true, - Description: "Base64 Encoded SHA256 checksum of output file", - }, - "output_md5": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - ForceNew: true, - Description: "MD5 of output file", - }, - }, - } -} - -func dataSourceFileRead(d *schema.ResourceData, meta interface{}) error { - outputPath := d.Get("output_path").(string) - - outputDirectory := path.Dir(outputPath) - if outputDirectory != "" { - if _, err := os.Stat(outputDirectory); err != nil { - if err := os.MkdirAll(outputDirectory, 0755); err != nil { - return err - } - } - } - - if err := archive(d); err != nil { - return err - } - - // Generate archived file stats - fi, err := os.Stat(outputPath) - if err != nil { - return err - } - - sha1, base64sha256, md5, err := genFileShas(outputPath) - if err != nil { - - return fmt.Errorf("could not generate file checksum sha256: %s", err) - } - d.Set("output_sha", sha1) - d.Set("output_base64sha256", base64sha256) - d.Set("output_md5", md5) - - d.Set("output_size", fi.Size()) - d.SetId(d.Get("output_sha").(string)) - - return nil -} - -func archive(d *schema.ResourceData) error { - archiveType := d.Get("type").(string) - outputPath := d.Get("output_path").(string) - - archiver := getArchiver(archiveType, outputPath) - if archiver == nil { - return fmt.Errorf("archive type not supported: %s", archiveType) - } - - if dir, ok := d.GetOk("source_dir"); ok { - if err := archiver.ArchiveDir(dir.(string)); err != nil { - return fmt.Errorf("error archiving directory: %s", err) - } - } else if file, ok := d.GetOk("source_file"); ok { - if err := archiver.ArchiveFile(file.(string)); err != nil { - return fmt.Errorf("error archiving file: %s", err) - } - } else if filename, ok := d.GetOk("source_content_filename"); ok { - content := d.Get("source_content").(string) - if err := archiver.ArchiveContent([]byte(content), filename.(string)); err != nil { - return fmt.Errorf("error archiving content: %s", err) - } - } else if v, ok := d.GetOk("source"); ok { - vL := v.(*schema.Set).List() - content := make(map[string][]byte) - for _, v := range vL { - src := v.(map[string]interface{}) - content[src["filename"].(string)] = []byte(src["content"].(string)) - } - if err := archiver.ArchiveMultiple(content); err != nil { - return fmt.Errorf("error archiving content: %s", err) - } - } else { - return fmt.Errorf("one of 'source_dir', 'source_file', 'source_content_filename' must be specified") - } - return nil -} - -func genFileShas(filename string) (string, string, string, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - return "", "", "", fmt.Errorf("could not compute file '%s' checksum: %s", filename, err) - } - h := sha1.New() - h.Write([]byte(data)) - sha1 := hex.EncodeToString(h.Sum(nil)) - - h256 := sha256.New() - h256.Write([]byte(data)) - shaSum := h256.Sum(nil) - sha256base64 := base64.StdEncoding.EncodeToString(shaSum[:]) - - md5 := md5.New() - md5.Write([]byte(data)) - md5Sum := hex.EncodeToString(md5.Sum(nil)) - - return sha1, sha256base64, md5Sum, nil -} diff --git a/builtin/providers/archive/data_source_archive_file_test.go b/builtin/providers/archive/data_source_archive_file_test.go deleted file mode 100644 index e1ccc4705..000000000 --- a/builtin/providers/archive/data_source_archive_file_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package archive - -import ( - "fmt" - "os" - "regexp" - "testing" - - r "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccArchiveFile_Basic(t *testing.T) { - var fileSize string - r.Test(t, r.TestCase{ - Providers: testProviders, - Steps: []r.TestStep{ - r.TestStep{ - Config: testAccArchiveFileContentConfig, - Check: r.ComposeTestCheckFunc( - testAccArchiveFileExists("zip_file_acc_test.zip", &fileSize), - r.TestCheckResourceAttrPtr("data.archive_file.foo", "output_size", &fileSize), - - // We just check the hashes for syntax rather than exact - // content since we don't want to break if the archive - // library starts generating different bytes that are - // functionally equivalent. - r.TestMatchResourceAttr( - "data.archive_file.foo", "output_base64sha256", - regexp.MustCompile(`^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$`), - ), - r.TestMatchResourceAttr( - "data.archive_file.foo", "output_md5", regexp.MustCompile(`^[0-9a-f]{32}$`), - ), - r.TestMatchResourceAttr( - "data.archive_file.foo", "output_sha", regexp.MustCompile(`^[0-9a-f]{40}$`), - ), - ), - }, - r.TestStep{ - Config: testAccArchiveFileFileConfig, - Check: r.ComposeTestCheckFunc( - testAccArchiveFileExists("zip_file_acc_test.zip", &fileSize), - r.TestCheckResourceAttrPtr("data.archive_file.foo", "output_size", &fileSize), - ), - }, - r.TestStep{ - Config: testAccArchiveFileDirConfig, - Check: r.ComposeTestCheckFunc( - testAccArchiveFileExists("zip_file_acc_test.zip", &fileSize), - r.TestCheckResourceAttrPtr("data.archive_file.foo", "output_size", &fileSize), - ), - }, - r.TestStep{ - Config: testAccArchiveFileMultiConfig, - Check: r.ComposeTestCheckFunc( - testAccArchiveFileExists("zip_file_acc_test.zip", &fileSize), - r.TestCheckResourceAttrPtr("data.archive_file.foo", "output_size", &fileSize), - ), - }, - r.TestStep{ - Config: testAccArchiveFileOutputPath, - Check: r.ComposeTestCheckFunc( - testAccArchiveFileExists(fmt.Sprintf("%s/test.zip", tmpDir), &fileSize), - ), - }, - }, - }) -} - -func testAccArchiveFileExists(filename string, fileSize *string) r.TestCheckFunc { - return func(s *terraform.State) error { - *fileSize = "" - fi, err := os.Stat(filename) - if err != nil { - return err - } - *fileSize = fmt.Sprintf("%d", fi.Size()) - return nil - } -} - -var testAccArchiveFileContentConfig = ` -data "archive_file" "foo" { - type = "zip" - source_content = "This is some content" - source_content_filename = "content.txt" - output_path = "zip_file_acc_test.zip" -} -` - -var tmpDir = os.TempDir() + "/test" -var testAccArchiveFileOutputPath = fmt.Sprintf(` -data "archive_file" "foo" { - type = "zip" - source_content = "This is some content" - source_content_filename = "content.txt" - output_path = "%s/test.zip" -} -`, tmpDir) - -var testAccArchiveFileFileConfig = ` -data "archive_file" "foo" { - type = "zip" - source_file = "test-fixtures/test-file.txt" - output_path = "zip_file_acc_test.zip" -} -` - -var testAccArchiveFileDirConfig = ` -data "archive_file" "foo" { - type = "zip" - source_dir = "test-fixtures/test-dir" - output_path = "zip_file_acc_test.zip" -} -` - -var testAccArchiveFileMultiConfig = ` -data "archive_file" "foo" { - type = "zip" - source { - filename = "content.txt" - content = "This is some content" - } - output_path = "zip_file_acc_test.zip" -} -` diff --git a/builtin/providers/archive/provider.go b/builtin/providers/archive/provider.go deleted file mode 100644 index 195b9342a..000000000 --- a/builtin/providers/archive/provider.go +++ /dev/null @@ -1,20 +0,0 @@ -package archive - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - DataSourcesMap: map[string]*schema.Resource{ - "archive_file": dataSourceFile(), - }, - ResourcesMap: map[string]*schema.Resource{ - "archive_file": schema.DataSourceResourceShim( - "archive_file", - dataSourceFile(), - ), - }, - } -} diff --git a/builtin/providers/archive/provider_test.go b/builtin/providers/archive/provider_test.go deleted file mode 100644 index 13cd92e30..000000000 --- a/builtin/providers/archive/provider_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package archive - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testProviders = map[string]terraform.ResourceProvider{ - "archive": Provider(), -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} diff --git a/builtin/providers/archive/test-fixtures/test-dir/file1.txt b/builtin/providers/archive/test-fixtures/test-dir/file1.txt deleted file mode 100644 index 28bf5b1fb..000000000 --- a/builtin/providers/archive/test-fixtures/test-dir/file1.txt +++ /dev/null @@ -1 +0,0 @@ -This is file 1 \ No newline at end of file diff --git a/builtin/providers/archive/test-fixtures/test-dir/file2.txt b/builtin/providers/archive/test-fixtures/test-dir/file2.txt deleted file mode 100644 index 4419d52b7..000000000 --- a/builtin/providers/archive/test-fixtures/test-dir/file2.txt +++ /dev/null @@ -1 +0,0 @@ -This is file 2 \ No newline at end of file diff --git a/builtin/providers/archive/test-fixtures/test-dir/file3.txt b/builtin/providers/archive/test-fixtures/test-dir/file3.txt deleted file mode 100644 index 819cd4d48..000000000 --- a/builtin/providers/archive/test-fixtures/test-dir/file3.txt +++ /dev/null @@ -1 +0,0 @@ -This is file 3 \ No newline at end of file diff --git a/builtin/providers/archive/test-fixtures/test-file.txt b/builtin/providers/archive/test-fixtures/test-file.txt deleted file mode 100644 index 417be07f3..000000000 --- a/builtin/providers/archive/test-fixtures/test-file.txt +++ /dev/null @@ -1 +0,0 @@ -This is test content \ No newline at end of file diff --git a/builtin/providers/archive/zip_archiver.go b/builtin/providers/archive/zip_archiver.go deleted file mode 100644 index 0bbdd8825..000000000 --- a/builtin/providers/archive/zip_archiver.go +++ /dev/null @@ -1,136 +0,0 @@ -package archive - -import ( - "archive/zip" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sort" -) - -type ZipArchiver struct { - filepath string - filewriter *os.File - writer *zip.Writer -} - -func NewZipArchiver(filepath string) Archiver { - return &ZipArchiver{ - filepath: filepath, - } -} - -func (a *ZipArchiver) ArchiveContent(content []byte, infilename string) error { - if err := a.open(); err != nil { - return err - } - defer a.close() - - f, err := a.writer.Create(infilename) - if err != nil { - return err - } - - _, err = f.Write(content) - return err -} - -func (a *ZipArchiver) ArchiveFile(infilename string) error { - fi, err := assertValidFile(infilename) - if err != nil { - return err - } - - content, err := ioutil.ReadFile(infilename) - if err != nil { - return err - } - - return a.ArchiveContent(content, fi.Name()) -} - -func (a *ZipArchiver) ArchiveDir(indirname string) error { - _, err := assertValidDir(indirname) - if err != nil { - return err - } - - if err := a.open(); err != nil { - return err - } - defer a.close() - - return filepath.Walk(indirname, func(path string, info os.FileInfo, err error) error { - if info.IsDir() { - return nil - } - if err != nil { - return err - } - relname, err := filepath.Rel(indirname, path) - if err != nil { - return fmt.Errorf("error relativizing file for archival: %s", err) - } - f, err := a.writer.Create(relname) - if err != nil { - return fmt.Errorf("error creating file inside archive: %s", err) - } - content, err := ioutil.ReadFile(path) - if err != nil { - return fmt.Errorf("error reading file for archival: %s", err) - } - _, err = f.Write(content) - return err - }) - -} - -func (a *ZipArchiver) ArchiveMultiple(content map[string][]byte) error { - if err := a.open(); err != nil { - return err - } - defer a.close() - - // Ensure files are processed in the same order so hashes don't change - keys := make([]string, len(content)) - i := 0 - for k := range content { - keys[i] = k - i++ - } - sort.Strings(keys) - - for _, filename := range keys { - f, err := a.writer.Create(filename) - if err != nil { - return err - } - _, err = f.Write(content[filename]) - if err != nil { - return err - } - } - return nil -} - -func (a *ZipArchiver) open() error { - f, err := os.Create(a.filepath) - if err != nil { - return err - } - a.filewriter = f - a.writer = zip.NewWriter(f) - return nil -} - -func (a *ZipArchiver) close() { - if a.writer != nil { - a.writer.Close() - a.writer = nil - } - if a.filewriter != nil { - a.filewriter.Close() - a.filewriter = nil - } -} diff --git a/builtin/providers/archive/zip_archiver_test.go b/builtin/providers/archive/zip_archiver_test.go deleted file mode 100644 index 34a272724..000000000 --- a/builtin/providers/archive/zip_archiver_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package archive - -import ( - "archive/zip" - "io/ioutil" - "testing" -) - -func TestZipArchiver_Content(t *testing.T) { - zipfilepath := "archive-content.zip" - archiver := NewZipArchiver(zipfilepath) - if err := archiver.ArchiveContent([]byte("This is some content"), "content.txt"); err != nil { - t.Fatalf("unexpected error: %s", err) - } - - ensureContents(t, zipfilepath, map[string][]byte{ - "content.txt": []byte("This is some content"), - }) -} - -func TestZipArchiver_File(t *testing.T) { - zipfilepath := "archive-file.zip" - archiver := NewZipArchiver(zipfilepath) - if err := archiver.ArchiveFile("./test-fixtures/test-file.txt"); err != nil { - t.Fatalf("unexpected error: %s", err) - } - - ensureContents(t, zipfilepath, map[string][]byte{ - "test-file.txt": []byte("This is test content"), - }) -} - -func TestZipArchiver_Dir(t *testing.T) { - zipfilepath := "archive-dir.zip" - archiver := NewZipArchiver(zipfilepath) - if err := archiver.ArchiveDir("./test-fixtures/test-dir"); err != nil { - t.Fatalf("unexpected error: %s", err) - } - - ensureContents(t, zipfilepath, map[string][]byte{ - "file1.txt": []byte("This is file 1"), - "file2.txt": []byte("This is file 2"), - "file3.txt": []byte("This is file 3"), - }) -} - -func TestZipArchiver_Multiple(t *testing.T) { - zipfilepath := "archive-content.zip" - content := map[string][]byte{ - "file1.txt": []byte("This is file 1"), - "file2.txt": []byte("This is file 2"), - "file3.txt": []byte("This is file 3"), - } - - archiver := NewZipArchiver(zipfilepath) - if err := archiver.ArchiveMultiple(content); err != nil { - t.Fatalf("unexpected error: %s", err) - } - - ensureContents(t, zipfilepath, content) - -} - -func ensureContents(t *testing.T, zipfilepath string, wants map[string][]byte) { - r, err := zip.OpenReader(zipfilepath) - if err != nil { - t.Fatalf("could not open zip file: %s", err) - } - defer r.Close() - - if len(r.File) != len(wants) { - t.Errorf("mismatched file count, got %d, want %d", len(r.File), len(wants)) - } - for _, cf := range r.File { - ensureContent(t, wants, cf) - } -} - -func ensureContent(t *testing.T, wants map[string][]byte, got *zip.File) { - want, ok := wants[got.Name] - if !ok { - t.Errorf("additional file in zip: %s", got.Name) - return - } - - r, err := got.Open() - if err != nil { - t.Errorf("could not open file: %s", err) - } - defer r.Close() - gotContentBytes, err := ioutil.ReadAll(r) - if err != nil { - t.Errorf("could not read file: %s", err) - } - - wantContent := string(want) - gotContent := string(gotContentBytes) - if gotContent != wantContent { - t.Errorf("mismatched content\ngot\n%s\nwant\n%s", gotContent, wantContent) - } -} diff --git a/builtin/providers/arukas/config.go b/builtin/providers/arukas/config.go deleted file mode 100644 index 330c4a76b..000000000 --- a/builtin/providers/arukas/config.go +++ /dev/null @@ -1,52 +0,0 @@ -package arukas - -import ( - API "github.com/arukasio/cli" - "os" - "time" -) - -const ( - JSONTokenParamName = "ARUKAS_JSON_API_TOKEN" - JSONSecretParamName = "ARUKAS_JSON_API_SECRET" - JSONUrlParamName = "ARUKAS_JSON_API_URL" - JSONDebugParamName = "ARUKAS_DEBUG" - JSONTimeoutParamName = "ARUKAS_TIMEOUT" -) - -type Config struct { - Token string - Secret string - URL string - Trace string - Timeout int -} - -func (c *Config) NewClient() (*ArukasClient, error) { - - os.Setenv(JSONTokenParamName, c.Token) - os.Setenv(JSONSecretParamName, c.Secret) - os.Setenv(JSONUrlParamName, c.URL) - os.Setenv(JSONDebugParamName, c.Trace) - - client, err := API.NewClient() - if err != nil { - return nil, err - } - client.UserAgent = "Terraform for Arukas" - - timeout := time.Duration(0) - if c.Timeout > 0 { - timeout = time.Duration(c.Timeout) * time.Second - } - - return &ArukasClient{ - Client: client, - Timeout: timeout, - }, nil -} - -type ArukasClient struct { - *API.Client - Timeout time.Duration -} diff --git a/builtin/providers/arukas/provider.go b/builtin/providers/arukas/provider.go deleted file mode 100644 index 81f4a3264..000000000 --- a/builtin/providers/arukas/provider.go +++ /dev/null @@ -1,59 +0,0 @@ -package arukas - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "token": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc(JSONTokenParamName, nil), - Description: "your Arukas APIKey(token)", - }, - "secret": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc(JSONSecretParamName, nil), - Description: "your Arukas APIKey(secret)", - }, - "api_url": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc(JSONUrlParamName, "https://app.arukas.io/api/"), - Description: "default Arukas API url", - }, - "trace": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc(JSONDebugParamName, ""), - }, - "timeout": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc(JSONTimeoutParamName, "900"), - }, - }, - ResourcesMap: map[string]*schema.Resource{ - "arukas_container": resourceArukasContainer(), - }, - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - - config := Config{ - Token: d.Get("token").(string), - Secret: d.Get("secret").(string), - URL: d.Get("api_url").(string), - Trace: d.Get("trace").(string), - Timeout: d.Get("timeout").(int), - } - - return config.NewClient() -} diff --git a/builtin/providers/arukas/provider_test.go b/builtin/providers/arukas/provider_test.go deleted file mode 100644 index d1b5d87e6..000000000 --- a/builtin/providers/arukas/provider_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package arukas - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "arukas": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("ARUKAS_JSON_API_TOKEN"); v == "" { - t.Fatal("ARUKAS_JSON_API_TOKEN must be set for acceptance tests") - } - if v := os.Getenv("ARUKAS_JSON_API_SECRET"); v == "" { - t.Fatal("ARUKAS_JSON_API_SECRET must be set for acceptance tests") - } -} diff --git a/builtin/providers/arukas/resource_arukas_container.go b/builtin/providers/arukas/resource_arukas_container.go deleted file mode 100644 index cb40dfc0f..000000000 --- a/builtin/providers/arukas/resource_arukas_container.go +++ /dev/null @@ -1,289 +0,0 @@ -package arukas - -import ( - "fmt" - "strings" - - API "github.com/arukasio/cli" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceArukasContainer() *schema.Resource { - return &schema.Resource{ - Create: resourceArukasContainerCreate, - Read: resourceArukasContainerRead, - Update: resourceArukasContainerUpdate, - Delete: resourceArukasContainerDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "image": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "instances": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 1, - ValidateFunc: validateIntegerInRange(1, 10), - }, - "memory": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 256, - ValidateFunc: validateIntInWord([]string{"256", "512"}), - }, - "endpoint": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "ports": &schema.Schema{ - Type: schema.TypeList, - Required: true, - MaxItems: 20, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "protocol": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "tcp", - ValidateFunc: validateStringInWord([]string{"tcp", "udp"}), - }, - "number": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: "80", - ValidateFunc: validateIntegerInRange(1, 65535), - }, - }, - }, - }, - "environments": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 20, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "value": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "cmd": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "port_mappings": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "host": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "ipaddress": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "container_port": &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - }, - "service_port": &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "endpoint_full_hostname": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "endpoint_full_url": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "app_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceArukasContainerCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArukasClient) - - var appSet API.AppSet - - // create an app - newApp := API.App{Name: d.Get("name").(string)} - - var parsedEnvs API.Envs - var parsedPorts API.Ports - - if rawEnvs, ok := d.GetOk("environments"); ok { - parsedEnvs = expandEnvs(rawEnvs) - } - if rawPorts, ok := d.GetOk("ports"); ok { - parsedPorts = expandPorts(rawPorts) - } - - newContainer := API.Container{ - Envs: parsedEnvs, - Ports: parsedPorts, - ImageName: d.Get("image").(string), - Mem: d.Get("memory").(int), - Instances: d.Get("instances").(int), - Cmd: d.Get("cmd").(string), - - Name: d.Get("endpoint").(string), - } - newAppSet := API.AppSet{ - App: newApp, - Container: newContainer, - } - - // create - if err := client.Post(&appSet, "/app-sets", newAppSet); err != nil { - return err - } - - // start container - if err := client.Post(nil, fmt.Sprintf("/containers/%s/power", appSet.Container.ID), nil); err != nil { - return err - } - - d.SetId(appSet.Container.ID) - - stateConf := &resource.StateChangeConf{ - Target: []string{"running"}, - Pending: []string{"stopped", "booting"}, - Timeout: client.Timeout, - Refresh: func() (interface{}, string, error) { - var container API.Container - err := client.Get(&container, fmt.Sprintf("/containers/%s", appSet.Container.ID)) - if err != nil { - return nil, "", err - } - - return container, container.StatusText, nil - }, - } - _, err := stateConf.WaitForState() - if err != nil { - return err - } - - return resourceArukasContainerRead(d, meta) -} - -func resourceArukasContainerRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArukasClient) - - var container API.Container - var app API.App - - if err := client.Get(&container, fmt.Sprintf("/containers/%s", d.Id())); err != nil { - return err - } - if err := client.Get(&app, fmt.Sprintf("/apps/%s", container.AppID)); err != nil { - return err - } - - d.Set("app_id", container.AppID) - d.Set("name", app.Name) - d.Set("image", container.ImageName) - d.Set("instances", container.Instances) - d.Set("memory", container.Mem) - endpoint := container.Endpoint - if strings.HasSuffix(endpoint, ".arukascloud.io") { - endpoint = strings.Replace(endpoint, ".arukascloud.io", "", -1) - } - - d.Set("endpoint", endpoint) - d.Set("endpoint_full_hostname", container.Endpoint) - d.Set("endpoint_full_url", fmt.Sprintf("https://%s", container.Endpoint)) - - d.Set("cmd", container.Cmd) - - //ports - d.Set("ports", flattenPorts(container.Ports)) - - //port mappings - d.Set("port_mappings", flattenPortMappings(container.PortMappings)) - - //envs - d.Set("environments", flattenEnvs(container.Envs)) - - return nil -} - -func resourceArukasContainerUpdate(d *schema.ResourceData, meta interface{}) error { - - client := meta.(*ArukasClient) - var container API.Container - - if err := client.Get(&container, fmt.Sprintf("/containers/%s", d.Id())); err != nil { - return err - } - - var parsedEnvs API.Envs - var parsedPorts API.Ports - - if rawEnvs, ok := d.GetOk("environments"); ok { - parsedEnvs = expandEnvs(rawEnvs) - } - if rawPorts, ok := d.GetOk("ports"); ok { - parsedPorts = expandPorts(rawPorts) - } - - newContainer := API.Container{ - Envs: parsedEnvs, - Ports: parsedPorts, - ImageName: d.Get("image").(string), - Mem: d.Get("memory").(int), - Instances: d.Get("instances").(int), - Cmd: d.Get("cmd").(string), - Name: d.Get("endpoint").(string), - } - - // update - if err := client.Patch(nil, fmt.Sprintf("/containers/%s", d.Id()), newContainer); err != nil { - return err - } - - return resourceArukasContainerRead(d, meta) - -} - -func resourceArukasContainerDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArukasClient) - var container API.Container - - if err := client.Get(&container, fmt.Sprintf("/containers/%s", d.Id())); err != nil { - return err - } - - if err := client.Delete(fmt.Sprintf("/apps/%s", container.AppID)); err != nil { - return err - } - - return nil -} diff --git a/builtin/providers/arukas/resource_arukas_container_test.go b/builtin/providers/arukas/resource_arukas_container_test.go deleted file mode 100644 index 3fabc9b0d..000000000 --- a/builtin/providers/arukas/resource_arukas_container_test.go +++ /dev/null @@ -1,301 +0,0 @@ -package arukas - -import ( - "fmt" - API "github.com/arukasio/cli" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "testing" -) - -func TestAccArukasContainer_Basic(t *testing.T) { - var container API.Container - randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - name := fmt.Sprintf("terraform_acc_test_%s", randString) - endpoint := fmt.Sprintf("terraform-acc-test-endpoint-%s", randString) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckArukasContainerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckArukasContainerConfig_basic(randString), - Check: resource.ComposeTestCheckFunc( - testAccCheckArukasContainerExists("arukas_container.foobar", &container), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "name", name), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "image", "nginx:latest"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "instances", "1"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "memory", "256"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "endpoint", endpoint), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "ports.#", "1"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "ports.0.protocol", "tcp"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "ports.0.number", "80"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "environments.#", "1"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "environments.0.key", "key"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "environments.0.value", "value"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "port_mappings.#", "1"), - ), - }, - }, - }) -} - -func TestAccArukasContainer_Update(t *testing.T) { - var container API.Container - randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - name := fmt.Sprintf("terraform_acc_test_%s", randString) - updatedName := fmt.Sprintf("terraform_acc_test_update_%s", randString) - endpoint := fmt.Sprintf("terraform-acc-test-endpoint-%s", randString) - updatedEndpoint := fmt.Sprintf("terraform-acc-test-endpoint-update-%s", randString) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckArukasContainerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckArukasContainerConfig_basic(randString), - Check: resource.ComposeTestCheckFunc( - testAccCheckArukasContainerExists("arukas_container.foobar", &container), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "name", name), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "image", "nginx:latest"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "instances", "1"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "memory", "256"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "endpoint", endpoint), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "ports.#", "1"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "ports.0.protocol", "tcp"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "ports.0.number", "80"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "environments.#", "1"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "environments.0.key", "key"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "environments.0.value", "value"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "port_mappings.#", "1"), - ), - }, - resource.TestStep{ - Config: testAccCheckArukasContainerConfig_update(randString), - Check: resource.ComposeTestCheckFunc( - testAccCheckArukasContainerExists("arukas_container.foobar", &container), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "name", updatedName), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "image", "nginx:latest"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "instances", "2"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "memory", "512"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "endpoint", updatedEndpoint), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "ports.#", "2"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "ports.0.protocol", "tcp"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "ports.0.number", "80"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "ports.1.protocol", "tcp"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "ports.1.number", "443"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "environments.#", "2"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "environments.0.key", "key"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "environments.0.value", "value"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "environments.1.key", "key_upd"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "environments.1.value", "value_upd"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "port_mappings.#", "4"), - ), - }, - }, - }) -} - -func TestAccArukasContainer_Minimum(t *testing.T) { - var container API.Container - randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - name := fmt.Sprintf("terraform_acc_test_minimum_%s", randString) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckArukasContainerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckArukasContainerConfig_minimum(randString), - Check: resource.ComposeTestCheckFunc( - testAccCheckArukasContainerExists("arukas_container.foobar", &container), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "name", name), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "image", "nginx:latest"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "instances", "1"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "memory", "256"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "ports.#", "1"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "ports.0.protocol", "tcp"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "ports.0.number", "80"), - resource.TestCheckResourceAttr( - "arukas_container.foobar", "port_mappings.#", "1"), - ), - }, - }, - }) -} - -func TestAccArukasContainer_Import(t *testing.T) { - resourceName := "arukas_container.foobar" - randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckArukasContainerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckArukasContainerConfig_basic(randString), - }, - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccCheckArukasContainerExists(n string, container *API.Container) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Container ID is set") - } - client := testAccProvider.Meta().(*ArukasClient) - var foundContainer API.Container - err := client.Get(&foundContainer, fmt.Sprintf("/containers/%s", rs.Primary.ID)) - - if err != nil { - return err - } - - if foundContainer.ID != rs.Primary.ID { - return fmt.Errorf("Container not found") - } - - *container = foundContainer - - return nil - } -} - -func testAccCheckArukasContainerDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*ArukasClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "arukas_container" { - continue - } - - err := client.Get(nil, fmt.Sprintf("/containers/%s", rs.Primary.ID)) - - if err == nil { - return fmt.Errorf("Note still exists") - } - } - - return nil -} - -func testAccCheckArukasContainerConfig_basic(randString string) string { - return fmt.Sprintf(` -resource "arukas_container" "foobar" { - name = "terraform_acc_test_%s" - image = "nginx:latest" - instances = 1 - memory = 256 - endpoint = "terraform-acc-test-endpoint-%s" - ports = { - protocol = "tcp" - number = "80" - } - environments { - key = "key" - value = "value" - } -}`, randString, randString) -} - -func testAccCheckArukasContainerConfig_update(randString string) string { - return fmt.Sprintf(` -resource "arukas_container" "foobar" { - name = "terraform_acc_test_update_%s" - image = "nginx:latest" - instances = 2 - memory = 512 - endpoint = "terraform-acc-test-endpoint-update-%s" - ports = { - protocol = "tcp" - number = "80" - } - ports = { - protocol = "tcp" - number = "443" - } - environments { - key = "key" - value = "value" - } - environments { - key = "key_upd" - value = "value_upd" - } -}`, randString, randString) -} - -func testAccCheckArukasContainerConfig_minimum(randString string) string { - return fmt.Sprintf(` -resource "arukas_container" "foobar" { - name = "terraform_acc_test_minimum_%s" - image = "nginx:latest" - ports = { - number = "80" - } -}`, randString) -} diff --git a/builtin/providers/arukas/structure.go b/builtin/providers/arukas/structure.go deleted file mode 100644 index 155de034c..000000000 --- a/builtin/providers/arukas/structure.go +++ /dev/null @@ -1,110 +0,0 @@ -package arukas - -import ( - API "github.com/arukasio/cli" - "github.com/hashicorp/terraform/helper/schema" - "net" -) - -// Takes the result of flatmap.Expand for an array of strings -// and returns a []string -func expandStringList(configured []interface{}) []string { - vs := make([]string, 0, len(configured)) - for _, v := range configured { - vs = append(vs, string(v.(string))) - } - return vs -} - -// Takes the result of schema.Set of strings and returns a []string -func expandStringSet(configured *schema.Set) []string { - return expandStringList(configured.List()) -} - -// Takes list of pointers to strings. Expand to an array -// of raw strings and returns a []interface{} -// to keep compatibility w/ schema.NewSetschema.NewSet -func flattenStringList(list []string) []interface{} { - vs := make([]interface{}, 0, len(list)) - for _, v := range list { - vs = append(vs, v) - } - return vs -} - -func expandEnvs(configured interface{}) API.Envs { - var envs API.Envs - if configured == nil { - return envs - } - rawEnvs := configured.([]interface{}) - for _, raw := range rawEnvs { - env := raw.(map[string]interface{}) - envs = append(envs, API.Env{Key: env["key"].(string), Value: env["value"].(string)}) - } - return envs -} - -func flattenEnvs(envs API.Envs) []interface{} { - var ret []interface{} - for _, env := range envs { - r := map[string]interface{}{} - r["key"] = env.Key - r["value"] = env.Value - ret = append(ret, r) - } - return ret -} - -func expandPorts(configured interface{}) API.Ports { - var ports API.Ports - if configured == nil { - return ports - } - rawPorts := configured.([]interface{}) - for _, raw := range rawPorts { - port := raw.(map[string]interface{}) - ports = append(ports, API.Port{Protocol: port["protocol"].(string), Number: port["number"].(int)}) - } - return ports -} - -func flattenPorts(ports API.Ports) []interface{} { - var ret []interface{} - for _, port := range ports { - r := map[string]interface{}{} - r["protocol"] = port.Protocol - r["number"] = port.Number - ret = append(ret, r) - } - return ret -} -func flattenPortMappings(ports API.PortMappings) []interface{} { - var ret []interface{} - for _, tasks := range ports { - for _, port := range tasks { - r := map[string]interface{}{} - ip := "" - - addrs, err := net.LookupHost(port.Host) - if err == nil && len(addrs) > 0 { - ip = addrs[0] - } - - r["host"] = port.Host - r["ipaddress"] = ip - r["container_port"] = port.ContainerPort - r["service_port"] = port.ServicePort - ret = append(ret, r) - } - } - return ret -} - -func forceString(target interface{}) string { - if target == nil { - return "" - } - - return target.(string) -} diff --git a/builtin/providers/arukas/validators.go b/builtin/providers/arukas/validators.go deleted file mode 100644 index e1c6a866c..000000000 --- a/builtin/providers/arukas/validators.go +++ /dev/null @@ -1,92 +0,0 @@ -package arukas - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "strings" -) - -func validateMaxLength(minLength, maxLength int) schema.SchemaValidateFunc { - return func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) < minLength { - errors = append(errors, fmt.Errorf( - "%q cannot be shorter than %d characters: %q", k, minLength, value)) - } - if len(value) > maxLength { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than %d characters: %q", k, maxLength, value)) - } - return - } -} - -func validateIntegerInRange(min, max int) schema.SchemaValidateFunc { - return func(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < min { - errors = append(errors, fmt.Errorf( - "%q cannot be lower than %d: %d", k, min, value)) - } - if value > max { - errors = append(errors, fmt.Errorf( - "%q cannot be higher than %d: %d", k, max, value)) - } - return - } -} - -func validateStringInWord(allowWords []string) schema.SchemaValidateFunc { - return func(v interface{}, k string) (ws []string, errors []error) { - var found bool - for _, t := range allowWords { - if v.(string) == t { - found = true - } - } - if !found { - errors = append(errors, fmt.Errorf("%q must be one of [%s]", k, strings.Join(allowWords, "/"))) - - } - return - } -} - -func validateIntInWord(allowWords []string) schema.SchemaValidateFunc { - return func(v interface{}, k string) (ws []string, errors []error) { - var found bool - for _, t := range allowWords { - if fmt.Sprintf("%d", v.(int)) == t { - found = true - } - } - if !found { - errors = append(errors, fmt.Errorf("%q must be one of [%s]", k, strings.Join(allowWords, "/"))) - - } - return - } -} - -func validateDNSRecordValue() schema.SchemaValidateFunc { - return func(v interface{}, k string) (ws []string, errors []error) { - var rtype, value string - - values := v.(map[string]interface{}) - rtype = values["type"].(string) - value = values["value"].(string) - switch rtype { - case "MX", "NS", "CNAME": - if rtype == "MX" { - if values["priority"] == nil { - errors = append(errors, fmt.Errorf("%q required when TYPE was MX", k)) - } - } - if !strings.HasSuffix(value, ".") { - errors = append(errors, fmt.Errorf("%q must be period at the end [%s]", k, value)) - } - } - return - } - -} diff --git a/builtin/providers/atlas/data_source_artifact.go b/builtin/providers/atlas/data_source_artifact.go deleted file mode 100644 index 7dcfdbe81..000000000 --- a/builtin/providers/atlas/data_source_artifact.go +++ /dev/null @@ -1,149 +0,0 @@ -package atlas - -import ( - "fmt" - - "github.com/hashicorp/atlas-go/v1" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAtlasArtifact() *schema.Resource { - return &schema.Resource{ - Read: dataSourceArtifactRead, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "build": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "version": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "metadata_keys": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "metadata": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - - "file_url": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "metadata_full": &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - }, - - "slug": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "version_real": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceArtifactRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*atlas.Client) - - // Parse the slug from the name given of the artifact since the API - // expects these to be split. - user, name, err := atlas.ParseSlug(d.Get("name").(string)) - if err != nil { - return err - } - - // Filter by version or build if given - var build, version string - if v, ok := d.GetOk("version"); ok { - version = v.(string) - } else if b, ok := d.GetOk("build"); ok { - build = b.(string) - } - - // If we have neither, default to latest version - if build == "" && version == "" { - version = "latest" - } - - // Compile the metadata search params - md := make(map[string]string) - for _, v := range d.Get("metadata_keys").(*schema.Set).List() { - md[v.(string)] = atlas.MetadataAnyValue - } - for k, v := range d.Get("metadata").(map[string]interface{}) { - md[k] = v.(string) - } - - // Do the search! - vs, err := client.ArtifactSearch(&atlas.ArtifactSearchOpts{ - User: user, - Name: name, - Type: d.Get("type").(string), - Build: build, - Version: version, - Metadata: md, - }) - if err != nil { - return fmt.Errorf( - "Error searching for artifact '%s/%s': %s", - user, name, err) - } - - if len(vs) == 0 { - return fmt.Errorf("No matching artifact for '%s/%s'", user, name) - } else if len(vs) > 1 { - return fmt.Errorf( - "Got %d results for '%s/%s', only one is allowed", - len(vs), user, name) - } - v := vs[0] - - d.SetId(v.ID) - if v.ID == "" { - d.SetId(fmt.Sprintf("%s %d", v.Tag, v.Version)) - } - d.Set("version_real", v.Version) - d.Set("metadata_full", cleanMetadata(v.Metadata)) - d.Set("slug", v.Slug) - - d.Set("file_url", "") - if u, err := client.ArtifactFileURL(v); err != nil { - return fmt.Errorf( - "Error reading file URL: %s", err) - } else if u != nil { - d.Set("file_url", u.String()) - } - - return nil -} diff --git a/builtin/providers/atlas/data_source_artifact_test.go b/builtin/providers/atlas/data_source_artifact_test.go deleted file mode 100644 index c6d503b3b..000000000 --- a/builtin/providers/atlas/data_source_artifact_test.go +++ /dev/null @@ -1,150 +0,0 @@ -package atlas - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourceArtifact_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataArtifact_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckDataArtifactState("name", "hashicorp/tf-provider-test"), - ), - }, - }, - }) -} - -func TestAccDataSourceArtifact_metadata(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataArtifact_metadata, - Check: resource.ComposeTestCheckFunc( - testAccCheckDataArtifactState("name", "hashicorp/tf-provider-test"), - testAccCheckDataArtifactState("id", "x86"), - testAccCheckDataArtifactState("metadata_full.arch", "x86"), - ), - }, - }, - }) -} - -func TestAccDataSourceArtifact_metadataSet(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataArtifact_metadataSet, - Check: resource.ComposeTestCheckFunc( - testAccCheckDataArtifactState("name", "hashicorp/tf-provider-test"), - testAccCheckDataArtifactState("id", "x64"), - testAccCheckDataArtifactState("metadata_full.arch", "x64"), - ), - }, - }, - }) -} - -func TestAccDataSourceArtifact_buildLatest(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataArtifact_buildLatest, - Check: resource.ComposeTestCheckFunc( - testAccCheckDataArtifactState("name", "hashicorp/tf-provider-test"), - ), - }, - }, - }) -} - -func TestAccDataSourceArtifact_versionAny(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataArtifact_versionAny, - Check: resource.ComposeTestCheckFunc( - testAccCheckDataArtifactState("name", "hashicorp/tf-provider-test"), - ), - }, - }, - }) -} - -func testAccCheckDataArtifactState(key, value string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources["data.atlas_artifact.foobar"] - if !ok { - return fmt.Errorf("Not found: %s", "data.atlas_artifact.foobar") - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - p := rs.Primary - if p.Attributes[key] != value { - return fmt.Errorf( - "%s != %s (actual: %s)", key, value, p.Attributes[key]) - } - - return nil - } -} - -const testAccDataArtifact_basic = ` -data "atlas_artifact" "foobar" { - name = "hashicorp/tf-provider-test" - type = "foo" -}` - -const testAccDataArtifact_metadata = ` -data "atlas_artifact" "foobar" { - name = "hashicorp/tf-provider-test" - type = "foo" - metadata { - arch = "x86" - } - version = "any" -}` - -const testAccDataArtifact_metadataSet = ` -data "atlas_artifact" "foobar" { - name = "hashicorp/tf-provider-test" - type = "foo" - metadata_keys = ["arch"] - version = "any" -}` - -const testAccDataArtifact_buildLatest = ` -data "atlas_artifact" "foobar" { - name = "hashicorp/tf-provider-test" - type = "foo" - build = "latest" - metadata { - arch = "x86" - } -}` - -const testAccDataArtifact_versionAny = ` -data "atlas_artifact" "foobar" { - name = "hashicorp/tf-provider-test" - type = "foo" - version = "any" -}` diff --git a/builtin/providers/atlas/provider.go b/builtin/providers/atlas/provider.go deleted file mode 100644 index 14928de63..000000000 --- a/builtin/providers/atlas/provider.go +++ /dev/null @@ -1,71 +0,0 @@ -package atlas - -import ( - "github.com/hashicorp/atlas-go/v1" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -const ( - // defaultAtlasServer is the default endpoint for Atlas if - // none is specified. - defaultAtlasServer = "https://atlas.hashicorp.com" -) - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "token": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("ATLAS_TOKEN", nil), - Description: descriptions["token"], - }, - - "address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("ATLAS_ADDRESS", defaultAtlasServer), - Description: descriptions["address"], - }, - }, - - DataSourcesMap: map[string]*schema.Resource{ - "atlas_artifact": dataSourceAtlasArtifact(), - }, - - ResourcesMap: map[string]*schema.Resource{ - "atlas_artifact": resourceArtifact(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - var err error - client := atlas.DefaultClient() - if v := d.Get("address").(string); v != "" { - client, err = atlas.NewClient(v) - if err != nil { - return nil, err - } - } - client.DefaultHeader.Set(terraform.VersionHeader, terraform.VersionString()) - client.Token = d.Get("token").(string) - - return client, nil -} - -var descriptions map[string]string - -func init() { - descriptions = map[string]string{ - "address": "The address of the Atlas server. If blank, the public\n" + - "server at atlas.hashicorp.com will be used.", - - "token": "The access token for reading artifacts. This is required\n" + - "if reading private artifacts.", - } -} diff --git a/builtin/providers/atlas/provider_test.go b/builtin/providers/atlas/provider_test.go deleted file mode 100644 index cd03dc3a5..000000000 --- a/builtin/providers/atlas/provider_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package atlas - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "atlas": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("ATLAS_TOKEN"); v == "" { - t.Fatal("ATLAS_TOKEN must be set for acceptance tests") - } -} diff --git a/builtin/providers/atlas/resource_artifact.go b/builtin/providers/atlas/resource_artifact.go deleted file mode 100644 index 4967de412..000000000 --- a/builtin/providers/atlas/resource_artifact.go +++ /dev/null @@ -1,176 +0,0 @@ -package atlas - -import ( - "fmt" - "regexp" - - "github.com/hashicorp/atlas-go/v1" - "github.com/hashicorp/terraform/helper/schema" -) - -var ( - // saneMetaKey is used to sanitize the metadata keys so that - // they can be accessed as a variable interpolation from TF - saneMetaKey = regexp.MustCompile("[^a-zA-Z0-9-_]") -) - -func resourceArtifact() *schema.Resource { - return &schema.Resource{ - Create: resourceArtifactRead, - Read: resourceArtifactRead, - Delete: resourceArtifactDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - Deprecated: `atlas_artifact is now deprecated. Use the Atlas Artifact Data Source instead. See https://www.terraform.io/docs/providers/terraform-enterprise/d/artifact.html`, - }, - - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "build": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "version": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "metadata_keys": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "metadata": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - - "file_url": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "metadata_full": &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - }, - - "slug": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "version_real": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceArtifactRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*atlas.Client) - - // Parse the slug from the name given of the artifact since the API - // expects these to be split. - user, name, err := atlas.ParseSlug(d.Get("name").(string)) - if err != nil { - return err - } - - // Filter by version or build if given - var build, version string - if v, ok := d.GetOk("version"); ok { - version = v.(string) - } else if b, ok := d.GetOk("build"); ok { - build = b.(string) - } - - // If we have neither, default to latest version - if build == "" && version == "" { - version = "latest" - } - - // Compile the metadata search params - md := make(map[string]string) - for _, v := range d.Get("metadata_keys").(*schema.Set).List() { - md[v.(string)] = atlas.MetadataAnyValue - } - for k, v := range d.Get("metadata").(map[string]interface{}) { - md[k] = v.(string) - } - - // Do the search! - vs, err := client.ArtifactSearch(&atlas.ArtifactSearchOpts{ - User: user, - Name: name, - Type: d.Get("type").(string), - Build: build, - Version: version, - Metadata: md, - }) - if err != nil { - return fmt.Errorf( - "Error searching for artifact '%s/%s': %s", - user, name, err) - } - - if len(vs) == 0 { - return fmt.Errorf("No matching artifact for '%s/%s'", user, name) - } else if len(vs) > 1 { - return fmt.Errorf( - "Got %d results for '%s/%s', only one is allowed", - len(vs), user, name) - } - v := vs[0] - - d.SetId(v.ID) - if v.ID == "" { - d.SetId(fmt.Sprintf("%s %d", v.Tag, v.Version)) - } - d.Set("version_real", v.Version) - d.Set("metadata_full", cleanMetadata(v.Metadata)) - d.Set("slug", v.Slug) - - d.Set("file_url", "") - if u, err := client.ArtifactFileURL(v); err != nil { - return fmt.Errorf( - "Error reading file URL: %s", err) - } else if u != nil { - d.Set("file_url", u.String()) - } - - return nil -} - -func resourceArtifactDelete(d *schema.ResourceData, meta interface{}) error { - // This just always succeeds since this is a readonly element. - d.SetId("") - return nil -} - -// cleanMetadata is used to ensure the metadata is accessible as -// a variable by doing a simple re-write. -func cleanMetadata(in map[string]string) map[string]string { - out := make(map[string]string, len(in)) - for k, v := range in { - sane := saneMetaKey.ReplaceAllString(k, "-") - out[sane] = v - } - return out -} diff --git a/builtin/providers/atlas/resource_artifact_test.go b/builtin/providers/atlas/resource_artifact_test.go deleted file mode 100644 index 440b954e4..000000000 --- a/builtin/providers/atlas/resource_artifact_test.go +++ /dev/null @@ -1,150 +0,0 @@ -package atlas - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccArtifact_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccArtifact_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckArtifactState("name", "hashicorp/tf-provider-test"), - ), - }, - }, - }) -} - -func TestAccArtifact_metadata(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccArtifact_metadata, - Check: resource.ComposeTestCheckFunc( - testAccCheckArtifactState("name", "hashicorp/tf-provider-test"), - testAccCheckArtifactState("id", "x86"), - testAccCheckArtifactState("metadata_full.arch", "x86"), - ), - }, - }, - }) -} - -func TestAccArtifact_metadataSet(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccArtifact_metadataSet, - Check: resource.ComposeTestCheckFunc( - testAccCheckArtifactState("name", "hashicorp/tf-provider-test"), - testAccCheckArtifactState("id", "x64"), - testAccCheckArtifactState("metadata_full.arch", "x64"), - ), - }, - }, - }) -} - -func TestAccArtifact_buildLatest(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccArtifact_buildLatest, - Check: resource.ComposeTestCheckFunc( - testAccCheckArtifactState("name", "hashicorp/tf-provider-test"), - ), - }, - }, - }) -} - -func TestAccArtifact_versionAny(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccArtifact_versionAny, - Check: resource.ComposeTestCheckFunc( - testAccCheckArtifactState("name", "hashicorp/tf-provider-test"), - ), - }, - }, - }) -} - -func testAccCheckArtifactState(key, value string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources["atlas_artifact.foobar"] - if !ok { - return fmt.Errorf("Not found: %s", "atlas_artifact.foobar") - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - p := rs.Primary - if p.Attributes[key] != value { - return fmt.Errorf( - "%s != %s (actual: %s)", key, value, p.Attributes[key]) - } - - return nil - } -} - -const testAccArtifact_basic = ` -resource "atlas_artifact" "foobar" { - name = "hashicorp/tf-provider-test" - type = "foo" -}` - -const testAccArtifact_metadata = ` -resource "atlas_artifact" "foobar" { - name = "hashicorp/tf-provider-test" - type = "foo" - metadata { - arch = "x86" - } - version = "any" -}` - -const testAccArtifact_metadataSet = ` -resource "atlas_artifact" "foobar" { - name = "hashicorp/tf-provider-test" - type = "foo" - metadata_keys = ["arch"] - version = "any" -}` - -const testAccArtifact_buildLatest = ` -resource "atlas_artifact" "foobar" { - name = "hashicorp/tf-provider-test" - type = "foo" - build = "latest" - metadata { - arch = "x86" - } -}` - -const testAccArtifact_versionAny = ` -resource "atlas_artifact" "foobar" { - name = "hashicorp/tf-provider-test" - type = "foo" - version = "any" -}` diff --git a/builtin/providers/aws/auth_helpers.go b/builtin/providers/aws/auth_helpers.go deleted file mode 100644 index e808d4d39..000000000 --- a/builtin/providers/aws/auth_helpers.go +++ /dev/null @@ -1,217 +0,0 @@ -package aws - -import ( - "errors" - "fmt" - "log" - "os" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - awsCredentials "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/aws/aws-sdk-go/service/sts" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/go-cleanhttp" -) - -func GetAccountInfo(iamconn *iam.IAM, stsconn *sts.STS, authProviderName string) (string, string, error) { - // If we have creds from instance profile, we can use metadata API - if authProviderName == ec2rolecreds.ProviderName { - log.Println("[DEBUG] Trying to get account ID via AWS Metadata API") - - cfg := &aws.Config{} - setOptionalEndpoint(cfg) - sess, err := session.NewSession(cfg) - if err != nil { - return "", "", errwrap.Wrapf("Error creating AWS session: {{err}}", err) - } - - metadataClient := ec2metadata.New(sess) - info, err := metadataClient.IAMInfo() - if err != nil { - // This can be triggered when no IAM Role is assigned - // or AWS just happens to return invalid response - return "", "", fmt.Errorf("Failed getting EC2 IAM info: %s", err) - } - - return parseAccountInfoFromArn(info.InstanceProfileArn) - } - - // Then try IAM GetUser - log.Println("[DEBUG] Trying to get account ID via iam:GetUser") - outUser, err := iamconn.GetUser(nil) - if err == nil { - return parseAccountInfoFromArn(*outUser.User.Arn) - } - - awsErr, ok := err.(awserr.Error) - // AccessDenied and ValidationError can be raised - // if credentials belong to federated profile, so we ignore these - if !ok || (awsErr.Code() != "AccessDenied" && awsErr.Code() != "ValidationError" && awsErr.Code() != "InvalidClientTokenId") { - return "", "", fmt.Errorf("Failed getting account ID via 'iam:GetUser': %s", err) - } - log.Printf("[DEBUG] Getting account ID via iam:GetUser failed: %s", err) - - // Then try STS GetCallerIdentity - log.Println("[DEBUG] Trying to get account ID via sts:GetCallerIdentity") - outCallerIdentity, err := stsconn.GetCallerIdentity(&sts.GetCallerIdentityInput{}) - if err == nil { - return parseAccountInfoFromArn(*outCallerIdentity.Arn) - } - log.Printf("[DEBUG] Getting account ID via sts:GetCallerIdentity failed: %s", err) - - // Then try IAM ListRoles - log.Println("[DEBUG] Trying to get account ID via iam:ListRoles") - outRoles, err := iamconn.ListRoles(&iam.ListRolesInput{ - MaxItems: aws.Int64(int64(1)), - }) - if err != nil { - return "", "", fmt.Errorf("Failed getting account ID via 'iam:ListRoles': %s", err) - } - - if len(outRoles.Roles) < 1 { - return "", "", errors.New("Failed getting account ID via 'iam:ListRoles': No roles available") - } - - return parseAccountInfoFromArn(*outRoles.Roles[0].Arn) -} - -func parseAccountInfoFromArn(arn string) (string, string, error) { - parts := strings.Split(arn, ":") - if len(parts) < 5 { - return "", "", fmt.Errorf("Unable to parse ID from invalid ARN: %q", arn) - } - return parts[1], parts[4], nil -} - -// This function is responsible for reading credentials from the -// environment in the case that they're not explicitly specified -// in the Terraform configuration. -func GetCredentials(c *Config) (*awsCredentials.Credentials, error) { - // build a chain provider, lazy-evaulated by aws-sdk - providers := []awsCredentials.Provider{ - &awsCredentials.StaticProvider{Value: awsCredentials.Value{ - AccessKeyID: c.AccessKey, - SecretAccessKey: c.SecretKey, - SessionToken: c.Token, - }}, - &awsCredentials.EnvProvider{}, - &awsCredentials.SharedCredentialsProvider{ - Filename: c.CredsFilename, - Profile: c.Profile, - }, - } - - // Build isolated HTTP client to avoid issues with globally-shared settings - client := cleanhttp.DefaultClient() - - // Keep the timeout low as we don't want to wait in non-EC2 environments - client.Timeout = 100 * time.Millisecond - cfg := &aws.Config{ - HTTPClient: client, - } - usedEndpoint := setOptionalEndpoint(cfg) - - if !c.SkipMetadataApiCheck { - // Real AWS should reply to a simple metadata request. - // We check it actually does to ensure something else didn't just - // happen to be listening on the same IP:Port - metadataClient := ec2metadata.New(session.New(cfg)) - if metadataClient.Available() { - providers = append(providers, &ec2rolecreds.EC2RoleProvider{ - Client: metadataClient, - }) - log.Print("[INFO] AWS EC2 instance detected via default metadata" + - " API endpoint, EC2RoleProvider added to the auth chain") - } else { - if usedEndpoint == "" { - usedEndpoint = "default location" - } - log.Printf("[INFO] Ignoring AWS metadata API endpoint at %s "+ - "as it doesn't return any instance-id", usedEndpoint) - } - } - - // This is the "normal" flow (i.e. not assuming a role) - if c.AssumeRoleARN == "" { - return awsCredentials.NewChainCredentials(providers), nil - } - - // Otherwise we need to construct and STS client with the main credentials, and verify - // that we can assume the defined role. - log.Printf("[INFO] Attempting to AssumeRole %s (SessionName: %q, ExternalId: %q, Policy: %q)", - c.AssumeRoleARN, c.AssumeRoleSessionName, c.AssumeRoleExternalID, c.AssumeRolePolicy) - - creds := awsCredentials.NewChainCredentials(providers) - cp, err := creds.Get() - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" { - return nil, errors.New(`No valid credential sources found for AWS Provider. - Please see https://terraform.io/docs/providers/aws/index.html for more information on - providing credentials for the AWS Provider`) - } - - return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err) - } - - log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName) - - awsConfig := &aws.Config{ - Credentials: creds, - Region: aws.String(c.Region), - MaxRetries: aws.Int(c.MaxRetries), - HTTPClient: cleanhttp.DefaultClient(), - S3ForcePathStyle: aws.Bool(c.S3ForcePathStyle), - } - - stsclient := sts.New(session.New(awsConfig)) - assumeRoleProvider := &stscreds.AssumeRoleProvider{ - Client: stsclient, - RoleARN: c.AssumeRoleARN, - } - if c.AssumeRoleSessionName != "" { - assumeRoleProvider.RoleSessionName = c.AssumeRoleSessionName - } - if c.AssumeRoleExternalID != "" { - assumeRoleProvider.ExternalID = aws.String(c.AssumeRoleExternalID) - } - if c.AssumeRolePolicy != "" { - assumeRoleProvider.Policy = aws.String(c.AssumeRolePolicy) - } - - providers = []awsCredentials.Provider{assumeRoleProvider} - - assumeRoleCreds := awsCredentials.NewChainCredentials(providers) - _, err = assumeRoleCreds.Get() - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" { - return nil, fmt.Errorf("The role %q cannot be assumed.\n\n"+ - " There are a number of possible causes of this - the most common are:\n"+ - " * The credentials used in order to assume the role are invalid\n"+ - " * The credentials do not have appropriate permission to assume the role\n"+ - " * The role ARN is not valid", - c.AssumeRoleARN) - } - - return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err) - } - - return assumeRoleCreds, nil -} - -func setOptionalEndpoint(cfg *aws.Config) string { - endpoint := os.Getenv("AWS_METADATA_URL") - if endpoint != "" { - log.Printf("[INFO] Setting custom metadata endpoint: %q", endpoint) - cfg.Endpoint = aws.String(endpoint) - return endpoint - } - return "" -} diff --git a/builtin/providers/aws/auth_helpers_test.go b/builtin/providers/aws/auth_helpers_test.go deleted file mode 100644 index 25120c43b..000000000 --- a/builtin/providers/aws/auth_helpers_test.go +++ /dev/null @@ -1,902 +0,0 @@ -package aws - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "log" - "net/http" - "net/http/httptest" - "os" - "testing" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/aws/aws-sdk-go/service/sts" -) - -func TestAWSGetAccountInfo_shouldBeValid_fromEC2Role(t *testing.T) { - resetEnv := unsetEnv(t) - defer resetEnv() - // capture the test server's close method, to call after the test returns - awsTs := awsEnv(t) - defer awsTs() - - closeEmpty, emptySess, err := getMockedAwsApiSession("zero", []*awsMockEndpoint{}) - defer closeEmpty() - if err != nil { - t.Fatal(err) - } - - iamConn := iam.New(emptySess) - stsConn := sts.New(emptySess) - - part, id, err := GetAccountInfo(iamConn, stsConn, ec2rolecreds.ProviderName) - if err != nil { - t.Fatalf("Getting account ID from EC2 metadata API failed: %s", err) - } - - expectedPart := "aws" - if part != expectedPart { - t.Fatalf("Expected partition: %s, given: %s", expectedPart, part) - } - - expectedAccountId := "123456789013" - if id != expectedAccountId { - t.Fatalf("Expected account ID: %s, given: %s", expectedAccountId, id) - } -} - -func TestAWSGetAccountInfo_shouldBeValid_EC2RoleHasPriority(t *testing.T) { - resetEnv := unsetEnv(t) - defer resetEnv() - // capture the test server's close method, to call after the test returns - awsTs := awsEnv(t) - defer awsTs() - - iamEndpoints := []*awsMockEndpoint{ - { - Request: &awsMockRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"}, - Response: &awsMockResponse{200, iamResponse_GetUser_valid, "text/xml"}, - }, - } - closeIam, iamSess, err := getMockedAwsApiSession("IAM", iamEndpoints) - defer closeIam() - if err != nil { - t.Fatal(err) - } - iamConn := iam.New(iamSess) - closeSts, stsSess, err := getMockedAwsApiSession("STS", []*awsMockEndpoint{}) - defer closeSts() - if err != nil { - t.Fatal(err) - } - stsConn := sts.New(stsSess) - - part, id, err := GetAccountInfo(iamConn, stsConn, ec2rolecreds.ProviderName) - if err != nil { - t.Fatalf("Getting account ID from EC2 metadata API failed: %s", err) - } - - expectedPart := "aws" - if part != expectedPart { - t.Fatalf("Expected partition: %s, given: %s", expectedPart, part) - } - - expectedAccountId := "123456789013" - if id != expectedAccountId { - t.Fatalf("Expected account ID: %s, given: %s", expectedAccountId, id) - } -} - -func TestAWSGetAccountInfo_shouldBeValid_fromIamUser(t *testing.T) { - iamEndpoints := []*awsMockEndpoint{ - { - Request: &awsMockRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"}, - Response: &awsMockResponse{200, iamResponse_GetUser_valid, "text/xml"}, - }, - } - - closeIam, iamSess, err := getMockedAwsApiSession("IAM", iamEndpoints) - defer closeIam() - if err != nil { - t.Fatal(err) - } - closeSts, stsSess, err := getMockedAwsApiSession("STS", []*awsMockEndpoint{}) - defer closeSts() - if err != nil { - t.Fatal(err) - } - - iamConn := iam.New(iamSess) - stsConn := sts.New(stsSess) - - part, id, err := GetAccountInfo(iamConn, stsConn, "") - if err != nil { - t.Fatalf("Getting account ID via GetUser failed: %s", err) - } - - expectedPart := "aws" - if part != expectedPart { - t.Fatalf("Expected partition: %s, given: %s", expectedPart, part) - } - - expectedAccountId := "123456789012" - if id != expectedAccountId { - t.Fatalf("Expected account ID: %s, given: %s", expectedAccountId, id) - } -} - -func TestAWSGetAccountInfo_shouldBeValid_fromGetCallerIdentity(t *testing.T) { - iamEndpoints := []*awsMockEndpoint{ - { - Request: &awsMockRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"}, - Response: &awsMockResponse{403, iamResponse_GetUser_unauthorized, "text/xml"}, - }, - } - closeIam, iamSess, err := getMockedAwsApiSession("IAM", iamEndpoints) - defer closeIam() - if err != nil { - t.Fatal(err) - } - - stsEndpoints := []*awsMockEndpoint{ - { - Request: &awsMockRequest{"POST", "/", "Action=GetCallerIdentity&Version=2011-06-15"}, - Response: &awsMockResponse{200, stsResponse_GetCallerIdentity_valid, "text/xml"}, - }, - } - closeSts, stsSess, err := getMockedAwsApiSession("STS", stsEndpoints) - defer closeSts() - if err != nil { - t.Fatal(err) - } - - iamConn := iam.New(iamSess) - stsConn := sts.New(stsSess) - - part, id, err := GetAccountInfo(iamConn, stsConn, "") - if err != nil { - t.Fatalf("Getting account ID via GetUser failed: %s", err) - } - - expectedPart := "aws" - if part != expectedPart { - t.Fatalf("Expected partition: %s, given: %s", expectedPart, part) - } - - expectedAccountId := "123456789012" - if id != expectedAccountId { - t.Fatalf("Expected account ID: %s, given: %s", expectedAccountId, id) - } -} - -func TestAWSGetAccountInfo_shouldBeValid_fromIamListRoles(t *testing.T) { - iamEndpoints := []*awsMockEndpoint{ - { - Request: &awsMockRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"}, - Response: &awsMockResponse{403, iamResponse_GetUser_unauthorized, "text/xml"}, - }, - { - Request: &awsMockRequest{"POST", "/", "Action=ListRoles&MaxItems=1&Version=2010-05-08"}, - Response: &awsMockResponse{200, iamResponse_ListRoles_valid, "text/xml"}, - }, - } - closeIam, iamSess, err := getMockedAwsApiSession("IAM", iamEndpoints) - defer closeIam() - if err != nil { - t.Fatal(err) - } - - stsEndpoints := []*awsMockEndpoint{ - { - Request: &awsMockRequest{"POST", "/", "Action=GetCallerIdentity&Version=2011-06-15"}, - Response: &awsMockResponse{403, stsResponse_GetCallerIdentity_unauthorized, "text/xml"}, - }, - } - closeSts, stsSess, err := getMockedAwsApiSession("STS", stsEndpoints) - defer closeSts() - if err != nil { - t.Fatal(err) - } - - iamConn := iam.New(iamSess) - stsConn := sts.New(stsSess) - - part, id, err := GetAccountInfo(iamConn, stsConn, "") - if err != nil { - t.Fatalf("Getting account ID via ListRoles failed: %s", err) - } - - expectedPart := "aws" - if part != expectedPart { - t.Fatalf("Expected partition: %s, given: %s", expectedPart, part) - } - - expectedAccountId := "123456789012" - if id != expectedAccountId { - t.Fatalf("Expected account ID: %s, given: %s", expectedAccountId, id) - } -} - -func TestAWSGetAccountInfo_shouldBeValid_federatedRole(t *testing.T) { - iamEndpoints := []*awsMockEndpoint{ - { - Request: &awsMockRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"}, - Response: &awsMockResponse{400, iamResponse_GetUser_federatedFailure, "text/xml"}, - }, - { - Request: &awsMockRequest{"POST", "/", "Action=ListRoles&MaxItems=1&Version=2010-05-08"}, - Response: &awsMockResponse{200, iamResponse_ListRoles_valid, "text/xml"}, - }, - } - closeIam, iamSess, err := getMockedAwsApiSession("IAM", iamEndpoints) - defer closeIam() - if err != nil { - t.Fatal(err) - } - - closeSts, stsSess, err := getMockedAwsApiSession("STS", []*awsMockEndpoint{}) - defer closeSts() - if err != nil { - t.Fatal(err) - } - - iamConn := iam.New(iamSess) - stsConn := sts.New(stsSess) - - part, id, err := GetAccountInfo(iamConn, stsConn, "") - if err != nil { - t.Fatalf("Getting account ID via ListRoles failed: %s", err) - } - - expectedPart := "aws" - if part != expectedPart { - t.Fatalf("Expected partition: %s, given: %s", expectedPart, part) - } - - expectedAccountId := "123456789012" - if id != expectedAccountId { - t.Fatalf("Expected account ID: %s, given: %s", expectedAccountId, id) - } -} - -func TestAWSGetAccountInfo_shouldError_unauthorizedFromIam(t *testing.T) { - iamEndpoints := []*awsMockEndpoint{ - { - Request: &awsMockRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"}, - Response: &awsMockResponse{403, iamResponse_GetUser_unauthorized, "text/xml"}, - }, - { - Request: &awsMockRequest{"POST", "/", "Action=ListRoles&MaxItems=1&Version=2010-05-08"}, - Response: &awsMockResponse{403, iamResponse_ListRoles_unauthorized, "text/xml"}, - }, - } - closeIam, iamSess, err := getMockedAwsApiSession("IAM", iamEndpoints) - defer closeIam() - if err != nil { - t.Fatal(err) - } - - closeSts, stsSess, err := getMockedAwsApiSession("STS", []*awsMockEndpoint{}) - defer closeSts() - if err != nil { - t.Fatal(err) - } - - iamConn := iam.New(iamSess) - stsConn := sts.New(stsSess) - - part, id, err := GetAccountInfo(iamConn, stsConn, "") - if err == nil { - t.Fatal("Expected error when getting account ID") - } - - if part != "" { - t.Fatalf("Expected no partition, given: %s", part) - } - - if id != "" { - t.Fatalf("Expected no account ID, given: %s", id) - } -} - -func TestAWSParseAccountInfoFromArn(t *testing.T) { - validArn := "arn:aws:iam::101636750127:instance-profile/aws-elasticbeanstalk-ec2-role" - expectedPart := "aws" - expectedId := "101636750127" - part, id, err := parseAccountInfoFromArn(validArn) - if err != nil { - t.Fatalf("Expected no error when parsing valid ARN: %s", err) - } - if part != expectedPart { - t.Fatalf("Parsed part doesn't match with expected (%q != %q)", part, expectedPart) - } - if id != expectedId { - t.Fatalf("Parsed id doesn't match with expected (%q != %q)", id, expectedId) - } - - invalidArn := "blablah" - part, id, err = parseAccountInfoFromArn(invalidArn) - if err == nil { - t.Fatalf("Expected error when parsing invalid ARN (%q)", invalidArn) - } -} - -func TestAWSGetCredentials_shouldError(t *testing.T) { - resetEnv := unsetEnv(t) - defer resetEnv() - cfg := Config{} - - c, err := GetCredentials(&cfg) - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() != "NoCredentialProviders" { - t.Fatal("Expected NoCredentialProviders error") - } - } - _, err = c.Get() - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() != "NoCredentialProviders" { - t.Fatal("Expected NoCredentialProviders error") - } - } - if err == nil { - t.Fatal("Expected an error with empty env, keys, and IAM in AWS Config") - } -} - -func TestAWSGetCredentials_shouldBeStatic(t *testing.T) { - simple := []struct { - Key, Secret, Token string - }{ - { - Key: "test", - Secret: "secret", - }, { - Key: "test", - Secret: "test", - Token: "test", - }, - } - - for _, c := range simple { - cfg := Config{ - AccessKey: c.Key, - SecretKey: c.Secret, - Token: c.Token, - } - - creds, err := GetCredentials(&cfg) - if err != nil { - t.Fatalf("Error gettings creds: %s", err) - } - if creds == nil { - t.Fatal("Expected a static creds provider to be returned") - } - - v, err := creds.Get() - if err != nil { - t.Fatalf("Error gettings creds: %s", err) - } - - if v.AccessKeyID != c.Key { - t.Fatalf("AccessKeyID mismatch, expected: (%s), got (%s)", c.Key, v.AccessKeyID) - } - if v.SecretAccessKey != c.Secret { - t.Fatalf("SecretAccessKey mismatch, expected: (%s), got (%s)", c.Secret, v.SecretAccessKey) - } - if v.SessionToken != c.Token { - t.Fatalf("SessionToken mismatch, expected: (%s), got (%s)", c.Token, v.SessionToken) - } - } -} - -// TestAWSGetCredentials_shouldIAM is designed to test the scenario of running Terraform -// from an EC2 instance, without environment variables or manually supplied -// credentials. -func TestAWSGetCredentials_shouldIAM(t *testing.T) { - // clear AWS_* environment variables - resetEnv := unsetEnv(t) - defer resetEnv() - - // capture the test server's close method, to call after the test returns - ts := awsEnv(t) - defer ts() - - // An empty config, no key supplied - cfg := Config{} - - creds, err := GetCredentials(&cfg) - if err != nil { - t.Fatalf("Error gettings creds: %s", err) - } - if creds == nil { - t.Fatal("Expected a static creds provider to be returned") - } - - v, err := creds.Get() - if err != nil { - t.Fatalf("Error gettings creds: %s", err) - } - if v.AccessKeyID != "somekey" { - t.Fatalf("AccessKeyID mismatch, expected: (somekey), got (%s)", v.AccessKeyID) - } - if v.SecretAccessKey != "somesecret" { - t.Fatalf("SecretAccessKey mismatch, expected: (somesecret), got (%s)", v.SecretAccessKey) - } - if v.SessionToken != "sometoken" { - t.Fatalf("SessionToken mismatch, expected: (sometoken), got (%s)", v.SessionToken) - } -} - -// TestAWSGetCredentials_shouldIAM is designed to test the scenario of running Terraform -// from an EC2 instance, without environment variables or manually supplied -// credentials. -func TestAWSGetCredentials_shouldIgnoreIAM(t *testing.T) { - resetEnv := unsetEnv(t) - defer resetEnv() - // capture the test server's close method, to call after the test returns - ts := awsEnv(t) - defer ts() - simple := []struct { - Key, Secret, Token string - }{ - { - Key: "test", - Secret: "secret", - }, { - Key: "test", - Secret: "test", - Token: "test", - }, - } - - for _, c := range simple { - cfg := Config{ - AccessKey: c.Key, - SecretKey: c.Secret, - Token: c.Token, - } - - creds, err := GetCredentials(&cfg) - if err != nil { - t.Fatalf("Error gettings creds: %s", err) - } - if creds == nil { - t.Fatal("Expected a static creds provider to be returned") - } - - v, err := creds.Get() - if err != nil { - t.Fatalf("Error gettings creds: %s", err) - } - if v.AccessKeyID != c.Key { - t.Fatalf("AccessKeyID mismatch, expected: (%s), got (%s)", c.Key, v.AccessKeyID) - } - if v.SecretAccessKey != c.Secret { - t.Fatalf("SecretAccessKey mismatch, expected: (%s), got (%s)", c.Secret, v.SecretAccessKey) - } - if v.SessionToken != c.Token { - t.Fatalf("SessionToken mismatch, expected: (%s), got (%s)", c.Token, v.SessionToken) - } - } -} - -func TestAWSGetCredentials_shouldErrorWithInvalidEndpoint(t *testing.T) { - resetEnv := unsetEnv(t) - defer resetEnv() - // capture the test server's close method, to call after the test returns - ts := invalidAwsEnv(t) - defer ts() - - creds, err := GetCredentials(&Config{}) - if err != nil { - t.Fatalf("Error gettings creds: %s", err) - } - if creds == nil { - t.Fatal("Expected a static creds provider to be returned") - } - - v, err := creds.Get() - if err == nil { - t.Fatal("Expected error returned when getting creds w/ invalid EC2 endpoint") - } - - if v.ProviderName != "" { - t.Fatalf("Expected provider name to be empty, %q given", v.ProviderName) - } -} - -func TestAWSGetCredentials_shouldIgnoreInvalidEndpoint(t *testing.T) { - resetEnv := unsetEnv(t) - defer resetEnv() - // capture the test server's close method, to call after the test returns - ts := invalidAwsEnv(t) - defer ts() - - creds, err := GetCredentials(&Config{AccessKey: "accessKey", SecretKey: "secretKey"}) - if err != nil { - t.Fatalf("Error gettings creds: %s", err) - } - v, err := creds.Get() - if err != nil { - t.Fatalf("Getting static credentials w/ invalid EC2 endpoint failed: %s", err) - } - if creds == nil { - t.Fatal("Expected a static creds provider to be returned") - } - - if v.ProviderName != "StaticProvider" { - t.Fatalf("Expected provider name to be %q, %q given", "StaticProvider", v.ProviderName) - } - - if v.AccessKeyID != "accessKey" { - t.Fatalf("Static Access Key %q doesn't match: %s", "accessKey", v.AccessKeyID) - } - - if v.SecretAccessKey != "secretKey" { - t.Fatalf("Static Secret Key %q doesn't match: %s", "secretKey", v.SecretAccessKey) - } -} - -func TestAWSGetCredentials_shouldCatchEC2RoleProvider(t *testing.T) { - resetEnv := unsetEnv(t) - defer resetEnv() - // capture the test server's close method, to call after the test returns - ts := awsEnv(t) - defer ts() - - creds, err := GetCredentials(&Config{}) - if err != nil { - t.Fatalf("Error gettings creds: %s", err) - } - if creds == nil { - t.Fatal("Expected an EC2Role creds provider to be returned") - } - - v, err := creds.Get() - if err != nil { - t.Fatalf("Expected no error when getting creds: %s", err) - } - expectedProvider := "EC2RoleProvider" - if v.ProviderName != expectedProvider { - t.Fatalf("Expected provider name to be %q, %q given", - expectedProvider, v.ProviderName) - } -} - -var credentialsFileContents = `[myprofile] -aws_access_key_id = accesskey -aws_secret_access_key = secretkey -` - -func TestAWSGetCredentials_shouldBeShared(t *testing.T) { - file, err := ioutil.TempFile(os.TempDir(), "terraform_aws_cred") - if err != nil { - t.Fatalf("Error writing temporary credentials file: %s", err) - } - _, err = file.WriteString(credentialsFileContents) - if err != nil { - t.Fatalf("Error writing temporary credentials to file: %s", err) - } - err = file.Close() - if err != nil { - t.Fatalf("Error closing temporary credentials file: %s", err) - } - - defer os.Remove(file.Name()) - - resetEnv := unsetEnv(t) - defer resetEnv() - - if err := os.Setenv("AWS_PROFILE", "myprofile"); err != nil { - t.Fatalf("Error resetting env var AWS_PROFILE: %s", err) - } - if err := os.Setenv("AWS_SHARED_CREDENTIALS_FILE", file.Name()); err != nil { - t.Fatalf("Error resetting env var AWS_SHARED_CREDENTIALS_FILE: %s", err) - } - - creds, err := GetCredentials(&Config{Profile: "myprofile", CredsFilename: file.Name()}) - if err != nil { - t.Fatalf("Error gettings creds: %s", err) - } - if creds == nil { - t.Fatal("Expected a provider chain to be returned") - } - - v, err := creds.Get() - if err != nil { - t.Fatalf("Error gettings creds: %s", err) - } - - if v.AccessKeyID != "accesskey" { - t.Fatalf("AccessKeyID mismatch, expected (%s), got (%s)", "accesskey", v.AccessKeyID) - } - - if v.SecretAccessKey != "secretkey" { - t.Fatalf("SecretAccessKey mismatch, expected (%s), got (%s)", "accesskey", v.AccessKeyID) - } -} - -func TestAWSGetCredentials_shouldBeENV(t *testing.T) { - // need to set the environment variables to a dummy string, as we don't know - // what they may be at runtime without hardcoding here - s := "some_env" - resetEnv := setEnv(s, t) - - defer resetEnv() - - cfg := Config{} - creds, err := GetCredentials(&cfg) - if err != nil { - t.Fatalf("Error gettings creds: %s", err) - } - if creds == nil { - t.Fatalf("Expected a static creds provider to be returned") - } - - v, err := creds.Get() - if err != nil { - t.Fatalf("Error gettings creds: %s", err) - } - if v.AccessKeyID != s { - t.Fatalf("AccessKeyID mismatch, expected: (%s), got (%s)", s, v.AccessKeyID) - } - if v.SecretAccessKey != s { - t.Fatalf("SecretAccessKey mismatch, expected: (%s), got (%s)", s, v.SecretAccessKey) - } - if v.SessionToken != s { - t.Fatalf("SessionToken mismatch, expected: (%s), got (%s)", s, v.SessionToken) - } -} - -// unsetEnv unsets environment variables for testing a "clean slate" with no -// credentials in the environment -func unsetEnv(t *testing.T) func() { - // Grab any existing AWS keys and preserve. In some tests we'll unset these, so - // we need to have them and restore them after - e := getEnv() - if err := os.Unsetenv("AWS_ACCESS_KEY_ID"); err != nil { - t.Fatalf("Error unsetting env var AWS_ACCESS_KEY_ID: %s", err) - } - if err := os.Unsetenv("AWS_SECRET_ACCESS_KEY"); err != nil { - t.Fatalf("Error unsetting env var AWS_SECRET_ACCESS_KEY: %s", err) - } - if err := os.Unsetenv("AWS_SESSION_TOKEN"); err != nil { - t.Fatalf("Error unsetting env var AWS_SESSION_TOKEN: %s", err) - } - if err := os.Unsetenv("AWS_PROFILE"); err != nil { - t.Fatalf("Error unsetting env var AWS_PROFILE: %s", err) - } - if err := os.Unsetenv("AWS_SHARED_CREDENTIALS_FILE"); err != nil { - t.Fatalf("Error unsetting env var AWS_SHARED_CREDENTIALS_FILE: %s", err) - } - - return func() { - // re-set all the envs we unset above - if err := os.Setenv("AWS_ACCESS_KEY_ID", e.Key); err != nil { - t.Fatalf("Error resetting env var AWS_ACCESS_KEY_ID: %s", err) - } - if err := os.Setenv("AWS_SECRET_ACCESS_KEY", e.Secret); err != nil { - t.Fatalf("Error resetting env var AWS_SECRET_ACCESS_KEY: %s", err) - } - if err := os.Setenv("AWS_SESSION_TOKEN", e.Token); err != nil { - t.Fatalf("Error resetting env var AWS_SESSION_TOKEN: %s", err) - } - if err := os.Setenv("AWS_PROFILE", e.Profile); err != nil { - t.Fatalf("Error resetting env var AWS_PROFILE: %s", err) - } - if err := os.Setenv("AWS_SHARED_CREDENTIALS_FILE", e.CredsFilename); err != nil { - t.Fatalf("Error resetting env var AWS_SHARED_CREDENTIALS_FILE: %s", err) - } - } -} - -func setEnv(s string, t *testing.T) func() { - e := getEnv() - // Set all the envs to a dummy value - if err := os.Setenv("AWS_ACCESS_KEY_ID", s); err != nil { - t.Fatalf("Error setting env var AWS_ACCESS_KEY_ID: %s", err) - } - if err := os.Setenv("AWS_SECRET_ACCESS_KEY", s); err != nil { - t.Fatalf("Error setting env var AWS_SECRET_ACCESS_KEY: %s", err) - } - if err := os.Setenv("AWS_SESSION_TOKEN", s); err != nil { - t.Fatalf("Error setting env var AWS_SESSION_TOKEN: %s", err) - } - if err := os.Setenv("AWS_PROFILE", s); err != nil { - t.Fatalf("Error setting env var AWS_PROFILE: %s", err) - } - if err := os.Setenv("AWS_SHARED_CREDENTIALS_FILE", s); err != nil { - t.Fatalf("Error setting env var AWS_SHARED_CREDENTIALS_FLE: %s", err) - } - - return func() { - // re-set all the envs we unset above - if err := os.Setenv("AWS_ACCESS_KEY_ID", e.Key); err != nil { - t.Fatalf("Error resetting env var AWS_ACCESS_KEY_ID: %s", err) - } - if err := os.Setenv("AWS_SECRET_ACCESS_KEY", e.Secret); err != nil { - t.Fatalf("Error resetting env var AWS_SECRET_ACCESS_KEY: %s", err) - } - if err := os.Setenv("AWS_SESSION_TOKEN", e.Token); err != nil { - t.Fatalf("Error resetting env var AWS_SESSION_TOKEN: %s", err) - } - if err := os.Setenv("AWS_PROFILE", e.Profile); err != nil { - t.Fatalf("Error setting env var AWS_PROFILE: %s", err) - } - if err := os.Setenv("AWS_SHARED_CREDENTIALS_FILE", s); err != nil { - t.Fatalf("Error setting env var AWS_SHARED_CREDENTIALS_FLE: %s", err) - } - } -} - -// awsEnv establishes a httptest server to mock out the internal AWS Metadata -// service. IAM Credentials are retrieved by the EC2RoleProvider, which makes -// API calls to this internal URL. By replacing the server with a test server, -// we can simulate an AWS environment -func awsEnv(t *testing.T) func() { - routes := routes{} - if err := json.Unmarshal([]byte(metadataApiRoutes), &routes); err != nil { - t.Fatalf("Failed to unmarshal JSON in AWS ENV test: %s", err) - } - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain") - w.Header().Add("Server", "MockEC2") - log.Printf("[DEBUG] Mocker server received request to %q", r.RequestURI) - for _, e := range routes.Endpoints { - if r.RequestURI == e.Uri { - fmt.Fprintln(w, e.Body) - w.WriteHeader(200) - return - } - } - w.WriteHeader(400) - })) - - os.Setenv("AWS_METADATA_URL", ts.URL+"/latest") - return ts.Close -} - -// invalidAwsEnv establishes a httptest server to simulate behaviour -// when endpoint doesn't respond as expected -func invalidAwsEnv(t *testing.T) func() { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(400) - })) - - os.Setenv("AWS_METADATA_URL", ts.URL+"/latest") - return ts.Close -} - -func getEnv() *currentEnv { - // Grab any existing AWS keys and preserve. In some tests we'll unset these, so - // we need to have them and restore them after - return ¤tEnv{ - Key: os.Getenv("AWS_ACCESS_KEY_ID"), - Secret: os.Getenv("AWS_SECRET_ACCESS_KEY"), - Token: os.Getenv("AWS_SESSION_TOKEN"), - Profile: os.Getenv("AWS_PROFILE"), - CredsFilename: os.Getenv("AWS_SHARED_CREDENTIALS_FILE"), - } -} - -// struct to preserve the current environment -type currentEnv struct { - Key, Secret, Token, Profile, CredsFilename string -} - -type routes struct { - Endpoints []*endpoint `json:"endpoints"` -} -type endpoint struct { - Uri string `json:"uri"` - Body string `json:"body"` -} - -const metadataApiRoutes = ` -{ - "endpoints": [ - { - "uri": "/latest/meta-data/instance-id", - "body": "mock-instance-id" - }, - { - "uri": "/latest/meta-data/iam/info", - "body": "{\"Code\": \"Success\",\"LastUpdated\": \"2016-03-17T12:27:32Z\",\"InstanceProfileArn\": \"arn:aws:iam::123456789013:instance-profile/my-instance-profile\",\"InstanceProfileId\": \"AIPAABCDEFGHIJKLMN123\"}" - }, - { - "uri": "/latest/meta-data/iam/security-credentials", - "body": "test_role" - }, - { - "uri": "/latest/meta-data/iam/security-credentials/test_role", - "body": "{\"Code\":\"Success\",\"LastUpdated\":\"2015-12-11T17:17:25Z\",\"Type\":\"AWS-HMAC\",\"AccessKeyId\":\"somekey\",\"SecretAccessKey\":\"somesecret\",\"Token\":\"sometoken\"}" - } - ] -} -` - -const iamResponse_GetUser_valid = ` - - - AIDACKCEVSQ6C2EXAMPLE - /division_abc/subdivision_xyz/ - Bob - arn:aws:iam::123456789012:user/division_abc/subdivision_xyz/Bob - 2013-10-02T17:01:44Z - 2014-10-10T14:37:51Z - - - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - -` - -const iamResponse_GetUser_unauthorized = ` - - Sender - AccessDenied - User: arn:aws:iam::123456789012:user/Bob is not authorized to perform: iam:GetUser on resource: arn:aws:iam::123456789012:user/Bob - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE -` - -const stsResponse_GetCallerIdentity_valid = ` - - arn:aws:iam::123456789012:user/Alice - AKIAI44QH8DHBEXAMPLE - 123456789012 - - - 01234567-89ab-cdef-0123-456789abcdef - -` - -const stsResponse_GetCallerIdentity_unauthorized = ` - - Sender - AccessDenied - User: arn:aws:iam::123456789012:user/Bob is not authorized to perform: sts:GetCallerIdentity - - 01234567-89ab-cdef-0123-456789abcdef -` - -const iamResponse_GetUser_federatedFailure = ` - - Sender - ValidationError - Must specify userName when calling with non-User credentials - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE -` - -const iamResponse_ListRoles_valid = ` - - true - AWceSSsKsazQ4IEplT9o4hURCzBs00iavlEvEXAMPLE - - - / - %7B%22Version%22%3A%222008-10-17%22%2C%22Statement%22%3A%5B%7B%22Sid%22%3A%22%22%2C%22Effect%22%3A%22Allow%22%2C%22Principal%22%3A%7B%22Service%22%3A%22ec2.amazonaws.com%22%7D%2C%22Action%22%3A%22sts%3AAssumeRole%22%7D%5D%7D - AROACKCEVSQ6C2EXAMPLE - elasticbeanstalk-role - arn:aws:iam::123456789012:role/elasticbeanstalk-role - 2013-10-02T17:01:44Z - - - - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - -` - -const iamResponse_ListRoles_unauthorized = ` - - Sender - AccessDenied - User: arn:aws:iam::123456789012:user/Bob is not authorized to perform: iam:ListRoles on resource: arn:aws:iam::123456789012:role/ - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE -` diff --git a/builtin/providers/aws/autoscaling_tags.go b/builtin/providers/aws/autoscaling_tags.go deleted file mode 100644 index 5c0911505..000000000 --- a/builtin/providers/aws/autoscaling_tags.go +++ /dev/null @@ -1,317 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "log" - "regexp" - "strconv" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -// autoscalingTagSchema returns the schema to use for the tag element. -func autoscalingTagSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "value": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "propagate_at_launch": &schema.Schema{ - Type: schema.TypeBool, - Required: true, - }, - }, - }, - Set: autoscalingTagToHash, - } -} - -func autoscalingTagToHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["key"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["value"].(string))) - buf.WriteString(fmt.Sprintf("%t-", m["propagate_at_launch"].(bool))) - - return hashcode.String(buf.String()) -} - -// setTags is a helper to set the tags for a resource. It expects the -// tags field to be named "tag" -func setAutoscalingTags(conn *autoscaling.AutoScaling, d *schema.ResourceData) error { - resourceID := d.Get("name").(string) - var createTags, removeTags []*autoscaling.Tag - - if d.HasChange("tag") || d.HasChange("tags") { - oraw, nraw := d.GetChange("tag") - o := setToMapByKey(oraw.(*schema.Set), "key") - n := setToMapByKey(nraw.(*schema.Set), "key") - - old, err := autoscalingTagsFromMap(o, resourceID) - if err != nil { - return err - } - - new, err := autoscalingTagsFromMap(n, resourceID) - if err != nil { - return err - } - - c, r, err := diffAutoscalingTags(old, new, resourceID) - if err != nil { - return err - } - - createTags = append(createTags, c...) - removeTags = append(removeTags, r...) - - oraw, nraw = d.GetChange("tags") - old, err = autoscalingTagsFromList(oraw.([]interface{}), resourceID) - if err != nil { - return err - } - - new, err = autoscalingTagsFromList(nraw.([]interface{}), resourceID) - if err != nil { - return err - } - - c, r, err = diffAutoscalingTags(old, new, resourceID) - if err != nil { - return err - } - - createTags = append(createTags, c...) - removeTags = append(removeTags, r...) - } - - // Set tags - if len(removeTags) > 0 { - log.Printf("[DEBUG] Removing autoscaling tags: %#v", removeTags) - - remove := autoscaling.DeleteTagsInput{ - Tags: removeTags, - } - - if _, err := conn.DeleteTags(&remove); err != nil { - return err - } - } - - if len(createTags) > 0 { - log.Printf("[DEBUG] Creating autoscaling tags: %#v", createTags) - - create := autoscaling.CreateOrUpdateTagsInput{ - Tags: createTags, - } - - if _, err := conn.CreateOrUpdateTags(&create); err != nil { - return err - } - } - - return nil -} - -// diffTags takes our tags locally and the ones remotely and returns -// the set of tags that must be created, and the set of tags that must -// be destroyed. -func diffAutoscalingTags(oldTags, newTags []*autoscaling.Tag, resourceID string) ([]*autoscaling.Tag, []*autoscaling.Tag, error) { - // First, we're creating everything we have - create := make(map[string]interface{}) - for _, t := range newTags { - tag := map[string]interface{}{ - "key": *t.Key, - "value": *t.Value, - "propagate_at_launch": *t.PropagateAtLaunch, - } - create[*t.Key] = tag - } - - // Build the list of what to remove - var remove []*autoscaling.Tag - for _, t := range oldTags { - old, ok := create[*t.Key].(map[string]interface{}) - - if !ok || old["value"] != *t.Value || old["propagate_at_launch"] != *t.PropagateAtLaunch { - // Delete it! - remove = append(remove, t) - } - } - - createTags, err := autoscalingTagsFromMap(create, resourceID) - if err != nil { - return nil, nil, err - } - - return createTags, remove, nil -} - -func autoscalingTagsFromList(vs []interface{}, resourceID string) ([]*autoscaling.Tag, error) { - result := make([]*autoscaling.Tag, 0, len(vs)) - for _, tag := range vs { - attr, ok := tag.(map[string]interface{}) - if !ok { - continue - } - - t, err := autoscalingTagFromMap(attr, resourceID) - if err != nil { - return nil, err - } - - if t != nil { - result = append(result, t) - } - } - return result, nil -} - -// tagsFromMap returns the tags for the given map of data. -func autoscalingTagsFromMap(m map[string]interface{}, resourceID string) ([]*autoscaling.Tag, error) { - result := make([]*autoscaling.Tag, 0, len(m)) - for _, v := range m { - attr, ok := v.(map[string]interface{}) - if !ok { - continue - } - - t, err := autoscalingTagFromMap(attr, resourceID) - if err != nil { - return nil, err - } - - if t != nil { - result = append(result, t) - } - } - - return result, nil -} - -func autoscalingTagFromMap(attr map[string]interface{}, resourceID string) (*autoscaling.Tag, error) { - if _, ok := attr["key"]; !ok { - return nil, fmt.Errorf("%s: invalid tag attributes: key missing", resourceID) - } - - if _, ok := attr["value"]; !ok { - return nil, fmt.Errorf("%s: invalid tag attributes: value missing", resourceID) - } - - if _, ok := attr["propagate_at_launch"]; !ok { - return nil, fmt.Errorf("%s: invalid tag attributes: propagate_at_launch missing", resourceID) - } - - var propagateAtLaunch bool - var err error - - if v, ok := attr["propagate_at_launch"].(bool); ok { - propagateAtLaunch = v - } - - if v, ok := attr["propagate_at_launch"].(string); ok { - if propagateAtLaunch, err = strconv.ParseBool(v); err != nil { - return nil, fmt.Errorf( - "%s: invalid tag attribute: invalid value for propagate_at_launch: %s", - resourceID, - v, - ) - } - } - - t := &autoscaling.Tag{ - Key: aws.String(attr["key"].(string)), - Value: aws.String(attr["value"].(string)), - PropagateAtLaunch: aws.Bool(propagateAtLaunch), - ResourceId: aws.String(resourceID), - ResourceType: aws.String("auto-scaling-group"), - } - - if tagIgnoredAutoscaling(t) { - return nil, nil - } - - return t, nil -} - -// autoscalingTagsToMap turns the list of tags into a map. -func autoscalingTagsToMap(ts []*autoscaling.Tag) map[string]interface{} { - tags := make(map[string]interface{}) - for _, t := range ts { - tag := map[string]interface{}{ - "key": *t.Key, - "value": *t.Value, - "propagate_at_launch": *t.PropagateAtLaunch, - } - tags[*t.Key] = tag - } - - return tags -} - -// autoscalingTagDescriptionsToMap turns the list of tags into a map. -func autoscalingTagDescriptionsToMap(ts *[]*autoscaling.TagDescription) map[string]map[string]interface{} { - tags := make(map[string]map[string]interface{}) - for _, t := range *ts { - tag := map[string]interface{}{ - "key": *t.Key, - "value": *t.Value, - "propagate_at_launch": *t.PropagateAtLaunch, - } - tags[*t.Key] = tag - } - - return tags -} - -// autoscalingTagDescriptionsToSlice turns the list of tags into a slice. -func autoscalingTagDescriptionsToSlice(ts []*autoscaling.TagDescription) []map[string]interface{} { - tags := make([]map[string]interface{}, 0, len(ts)) - for _, t := range ts { - tags = append(tags, map[string]interface{}{ - "key": *t.Key, - "value": *t.Value, - "propagate_at_launch": *t.PropagateAtLaunch, - }) - } - - return tags -} - -func setToMapByKey(s *schema.Set, key string) map[string]interface{} { - result := make(map[string]interface{}) - for _, rawData := range s.List() { - data := rawData.(map[string]interface{}) - result[data[key].(string)] = data - } - - return result -} - -// compare a tag against a list of strings and checks if it should -// be ignored or not -func tagIgnoredAutoscaling(t *autoscaling.Tag) bool { - filter := []string{"^aws:"} - for _, v := range filter { - log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) - if r, _ := regexp.MatchString(v, *t.Key); r == true { - log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) - return true - } - } - return false -} diff --git a/builtin/providers/aws/autoscaling_tags_test.go b/builtin/providers/aws/autoscaling_tags_test.go deleted file mode 100644 index 0107764d1..000000000 --- a/builtin/providers/aws/autoscaling_tags_test.go +++ /dev/null @@ -1,158 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestDiffAutoscalingTags(t *testing.T) { - cases := []struct { - Old, New map[string]interface{} - Create, Remove map[string]interface{} - }{ - // Basic add/remove - { - Old: map[string]interface{}{ - "Name": map[string]interface{}{ - "key": "Name", - "value": "bar", - "propagate_at_launch": true, - }, - }, - New: map[string]interface{}{ - "DifferentTag": map[string]interface{}{ - "key": "DifferentTag", - "value": "baz", - "propagate_at_launch": true, - }, - }, - Create: map[string]interface{}{ - "DifferentTag": map[string]interface{}{ - "key": "DifferentTag", - "value": "baz", - "propagate_at_launch": true, - }, - }, - Remove: map[string]interface{}{ - "Name": map[string]interface{}{ - "key": "Name", - "value": "bar", - "propagate_at_launch": true, - }, - }, - }, - - // Modify - { - Old: map[string]interface{}{ - "Name": map[string]interface{}{ - "key": "Name", - "value": "bar", - "propagate_at_launch": true, - }, - }, - New: map[string]interface{}{ - "Name": map[string]interface{}{ - "key": "Name", - "value": "baz", - "propagate_at_launch": false, - }, - }, - Create: map[string]interface{}{ - "Name": map[string]interface{}{ - "key": "Name", - "value": "baz", - "propagate_at_launch": false, - }, - }, - Remove: map[string]interface{}{ - "Name": map[string]interface{}{ - "key": "Name", - "value": "bar", - "propagate_at_launch": true, - }, - }, - }, - } - - var resourceID = "sample" - - for i, tc := range cases { - awsTagsOld, err := autoscalingTagsFromMap(tc.Old, resourceID) - if err != nil { - t.Fatalf("%d: unexpected error convertig old tags: %v", i, err) - } - - awsTagsNew, err := autoscalingTagsFromMap(tc.New, resourceID) - if err != nil { - t.Fatalf("%d: unexpected error convertig new tags: %v", i, err) - } - - c, r, err := diffAutoscalingTags(awsTagsOld, awsTagsNew, resourceID) - if err != nil { - t.Fatalf("%d: unexpected error diff'ing tags: %v", i, err) - } - - cm := autoscalingTagsToMap(c) - rm := autoscalingTagsToMap(r) - if !reflect.DeepEqual(cm, tc.Create) { - t.Fatalf("%d: bad create: \n%#v\n%#v", i, cm, tc.Create) - } - if !reflect.DeepEqual(rm, tc.Remove) { - t.Fatalf("%d: bad remove: \n%#v\n%#v", i, rm, tc.Remove) - } - } -} - -// testAccCheckTags can be used to check the tags on a resource. -func testAccCheckAutoscalingTags( - ts *[]*autoscaling.TagDescription, key string, expected map[string]interface{}) resource.TestCheckFunc { - return func(s *terraform.State) error { - m := autoscalingTagDescriptionsToMap(ts) - v, ok := m[key] - if !ok { - return fmt.Errorf("Missing tag: %s", key) - } - - if v["value"] != expected["value"].(string) || - v["propagate_at_launch"] != expected["propagate_at_launch"].(bool) { - return fmt.Errorf("%s: bad value: %s", key, v) - } - - return nil - } -} - -func testAccCheckAutoscalingTagNotExists(ts *[]*autoscaling.TagDescription, key string) resource.TestCheckFunc { - return func(s *terraform.State) error { - m := autoscalingTagDescriptionsToMap(ts) - if _, ok := m[key]; ok { - return fmt.Errorf("Tag exists when it should not: %s", key) - } - - return nil - } -} - -func TestIgnoringTagsAutoscaling(t *testing.T) { - var ignoredTags []*autoscaling.Tag - ignoredTags = append(ignoredTags, &autoscaling.Tag{ - Key: aws.String("aws:cloudformation:logical-id"), - Value: aws.String("foo"), - }) - ignoredTags = append(ignoredTags, &autoscaling.Tag{ - Key: aws.String("aws:foo:bar"), - Value: aws.String("baz"), - }) - for _, tag := range ignoredTags { - if !tagIgnoredAutoscaling(tag) { - t.Fatalf("Tag %v with value %v not ignored, but should be!", *tag.Key, *tag.Value) - } - } -} diff --git a/builtin/providers/aws/aws_sweeper_test.go b/builtin/providers/aws/aws_sweeper_test.go deleted file mode 100644 index 57f403c76..000000000 --- a/builtin/providers/aws/aws_sweeper_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package aws - -import ( - "fmt" - "os" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestMain(m *testing.M) { - resource.TestMain(m) -} - -// sharedClientForRegion returns a common AWSClient setup needed for the sweeper -// functions for a given region -func sharedClientForRegion(region string) (interface{}, error) { - if os.Getenv("AWS_ACCESS_KEY_ID") == "" { - return nil, fmt.Errorf("empty AWS_ACCESS_KEY_ID") - } - - if os.Getenv("AWS_SECRET_ACCESS_KEY") == "" { - return nil, fmt.Errorf("empty AWS_SECRET_ACCESS_KEY") - } - - conf := &Config{ - Region: region, - } - - // configures a default client for the region, using the above env vars - client, err := conf.Client() - if err != nil { - return nil, fmt.Errorf("error getting AWS client") - } - - return client, nil -} diff --git a/builtin/providers/aws/awserr.go b/builtin/providers/aws/awserr.go deleted file mode 100644 index 8fc056801..000000000 --- a/builtin/providers/aws/awserr.go +++ /dev/null @@ -1,14 +0,0 @@ -package aws - -import ( - "strings" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -func isAWSErr(err error, code string, message string) bool { - if err, ok := err.(awserr.Error); ok { - return err.Code() == code && strings.Contains(err.Message(), message) - } - return false -} diff --git a/builtin/providers/aws/cloudfront_distribution_configuration_structure.go b/builtin/providers/aws/cloudfront_distribution_configuration_structure.go deleted file mode 100644 index a47217647..000000000 --- a/builtin/providers/aws/cloudfront_distribution_configuration_structure.go +++ /dev/null @@ -1,1140 +0,0 @@ -// CloudFront DistributionConfig structure helpers. -// -// These functions assist in pulling in data from Terraform resource -// configuration for the aws_cloudfront_distribution resource, as there are -// several sub-fields that require their own data type, and do not necessarily -// 1-1 translate to resource configuration. - -package aws - -import ( - "bytes" - "fmt" - "reflect" - "sort" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudfront" - "github.com/hashicorp/terraform/flatmap" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -// cloudFrontRoute53ZoneID defines the route 53 zone ID for CloudFront. This -// is used to set the zone_id attribute. -const cloudFrontRoute53ZoneID = "Z2FDTNDATAQYW2" - -// Define Sort interface for []*string so we can ensure the order of -// geo_restrictions.locations -type StringPtrSlice []*string - -func (p StringPtrSlice) Len() int { return len(p) } -func (p StringPtrSlice) Less(i, j int) bool { return *p[i] < *p[j] } -func (p StringPtrSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// Assemble the *cloudfront.DistributionConfig variable. Calls out to various -// expander functions to convert attributes and sub-attributes to the various -// complex structures which are necessary to properly build the -// DistributionConfig structure. -// -// Used by the aws_cloudfront_distribution Create and Update functions. -func expandDistributionConfig(d *schema.ResourceData) *cloudfront.DistributionConfig { - distributionConfig := &cloudfront.DistributionConfig{ - CacheBehaviors: expandCacheBehaviors(d.Get("cache_behavior").(*schema.Set)), - CustomErrorResponses: expandCustomErrorResponses(d.Get("custom_error_response").(*schema.Set)), - DefaultCacheBehavior: expandDefaultCacheBehavior(d.Get("default_cache_behavior").(*schema.Set).List()[0].(map[string]interface{})), - Enabled: aws.Bool(d.Get("enabled").(bool)), - IsIPV6Enabled: aws.Bool(d.Get("is_ipv6_enabled").(bool)), - HttpVersion: aws.String(d.Get("http_version").(string)), - Origins: expandOrigins(d.Get("origin").(*schema.Set)), - PriceClass: aws.String(d.Get("price_class").(string)), - } - // This sets CallerReference if it's still pending computation (ie: new resource) - if v, ok := d.GetOk("caller_reference"); ok == false { - distributionConfig.CallerReference = aws.String(time.Now().Format(time.RFC3339Nano)) - } else { - distributionConfig.CallerReference = aws.String(v.(string)) - } - if v, ok := d.GetOk("comment"); ok { - distributionConfig.Comment = aws.String(v.(string)) - } else { - distributionConfig.Comment = aws.String("") - } - if v, ok := d.GetOk("default_root_object"); ok { - distributionConfig.DefaultRootObject = aws.String(v.(string)) - } else { - distributionConfig.DefaultRootObject = aws.String("") - } - if v, ok := d.GetOk("logging_config"); ok { - distributionConfig.Logging = expandLoggingConfig(v.(*schema.Set).List()[0].(map[string]interface{})) - } else { - distributionConfig.Logging = expandLoggingConfig(nil) - } - if v, ok := d.GetOk("aliases"); ok { - distributionConfig.Aliases = expandAliases(v.(*schema.Set)) - } else { - distributionConfig.Aliases = expandAliases(schema.NewSet(aliasesHash, []interface{}{})) - } - if v, ok := d.GetOk("restrictions"); ok { - distributionConfig.Restrictions = expandRestrictions(v.(*schema.Set).List()[0].(map[string]interface{})) - } - if v, ok := d.GetOk("viewer_certificate"); ok { - distributionConfig.ViewerCertificate = expandViewerCertificate(v.(*schema.Set).List()[0].(map[string]interface{})) - } - if v, ok := d.GetOk("web_acl_id"); ok { - distributionConfig.WebACLId = aws.String(v.(string)) - } else { - distributionConfig.WebACLId = aws.String("") - } - - return distributionConfig -} - -// Unpack the *cloudfront.DistributionConfig variable and set resource data. -// Calls out to flatten functions to convert the DistributionConfig -// sub-structures to their respective attributes in the -// aws_cloudfront_distribution resource. -// -// Used by the aws_cloudfront_distribution Read function. -func flattenDistributionConfig(d *schema.ResourceData, distributionConfig *cloudfront.DistributionConfig) error { - var err error - - d.Set("enabled", distributionConfig.Enabled) - d.Set("is_ipv6_enabled", distributionConfig.IsIPV6Enabled) - d.Set("price_class", distributionConfig.PriceClass) - d.Set("hosted_zone_id", cloudFrontRoute53ZoneID) - - err = d.Set("default_cache_behavior", flattenDefaultCacheBehavior(distributionConfig.DefaultCacheBehavior)) - if err != nil { - return err - } - err = d.Set("viewer_certificate", flattenViewerCertificate(distributionConfig.ViewerCertificate)) - if err != nil { - return err - } - - if distributionConfig.CallerReference != nil { - d.Set("caller_reference", distributionConfig.CallerReference) - } - if distributionConfig.Comment != nil { - if *distributionConfig.Comment != "" { - d.Set("comment", distributionConfig.Comment) - } - } - if distributionConfig.DefaultRootObject != nil { - d.Set("default_root_object", distributionConfig.DefaultRootObject) - } - if distributionConfig.HttpVersion != nil { - d.Set("http_version", distributionConfig.HttpVersion) - } - if distributionConfig.WebACLId != nil { - d.Set("web_acl_id", distributionConfig.WebACLId) - } - - if distributionConfig.CustomErrorResponses != nil { - err = d.Set("custom_error_response", flattenCustomErrorResponses(distributionConfig.CustomErrorResponses)) - if err != nil { - return err - } - } - if distributionConfig.CacheBehaviors != nil { - err = d.Set("cache_behavior", flattenCacheBehaviors(distributionConfig.CacheBehaviors)) - if err != nil { - return err - } - } - - if distributionConfig.Logging != nil && *distributionConfig.Logging.Enabled { - err = d.Set("logging_config", flattenLoggingConfig(distributionConfig.Logging)) - } else { - err = d.Set("logging_config", schema.NewSet(loggingConfigHash, []interface{}{})) - } - if err != nil { - return err - } - - if distributionConfig.Aliases != nil { - err = d.Set("aliases", flattenAliases(distributionConfig.Aliases)) - if err != nil { - return err - } - } - if distributionConfig.Restrictions != nil { - err = d.Set("restrictions", flattenRestrictions(distributionConfig.Restrictions)) - if err != nil { - return err - } - } - if *distributionConfig.Origins.Quantity > 0 { - err = d.Set("origin", flattenOrigins(distributionConfig.Origins)) - if err != nil { - return err - } - } - - return nil -} - -func expandDefaultCacheBehavior(m map[string]interface{}) *cloudfront.DefaultCacheBehavior { - cb := expandCacheBehavior(m) - var dcb cloudfront.DefaultCacheBehavior - - simpleCopyStruct(cb, &dcb) - return &dcb -} - -func flattenDefaultCacheBehavior(dcb *cloudfront.DefaultCacheBehavior) *schema.Set { - m := make(map[string]interface{}) - var cb cloudfront.CacheBehavior - - simpleCopyStruct(dcb, &cb) - m = flattenCacheBehavior(&cb) - return schema.NewSet(defaultCacheBehaviorHash, []interface{}{m}) -} - -// Assemble the hash for the aws_cloudfront_distribution default_cache_behavior -// TypeSet attribute. -func defaultCacheBehaviorHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%t-", m["compress"].(bool))) - buf.WriteString(fmt.Sprintf("%s-", m["viewer_protocol_policy"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["target_origin_id"].(string))) - buf.WriteString(fmt.Sprintf("%d-", forwardedValuesHash(m["forwarded_values"].(*schema.Set).List()[0].(map[string]interface{})))) - buf.WriteString(fmt.Sprintf("%d-", m["min_ttl"].(int))) - if d, ok := m["trusted_signers"]; ok { - for _, e := range sortInterfaceSlice(d.([]interface{})) { - buf.WriteString(fmt.Sprintf("%s-", e.(string))) - } - } - if d, ok := m["max_ttl"]; ok { - buf.WriteString(fmt.Sprintf("%d-", d.(int))) - } - if d, ok := m["smooth_streaming"]; ok { - buf.WriteString(fmt.Sprintf("%t-", d.(bool))) - } - if d, ok := m["default_ttl"]; ok { - buf.WriteString(fmt.Sprintf("%d-", d.(int))) - } - if d, ok := m["allowed_methods"]; ok { - for _, e := range sortInterfaceSlice(d.([]interface{})) { - buf.WriteString(fmt.Sprintf("%s-", e.(string))) - } - } - if d, ok := m["cached_methods"]; ok { - for _, e := range sortInterfaceSlice(d.([]interface{})) { - buf.WriteString(fmt.Sprintf("%s-", e.(string))) - } - } - if d, ok := m["lambda_function_association"]; ok { - var associations []interface{} - switch d.(type) { - case *schema.Set: - associations = d.(*schema.Set).List() - default: - associations = d.([]interface{}) - } - for _, lfa := range associations { - buf.WriteString(fmt.Sprintf("%d-", lambdaFunctionAssociationHash(lfa.(map[string]interface{})))) - } - } - return hashcode.String(buf.String()) -} - -func expandCacheBehaviors(s *schema.Set) *cloudfront.CacheBehaviors { - var qty int64 - var items []*cloudfront.CacheBehavior - for _, v := range s.List() { - items = append(items, expandCacheBehavior(v.(map[string]interface{}))) - qty++ - } - return &cloudfront.CacheBehaviors{ - Quantity: aws.Int64(qty), - Items: items, - } -} - -func flattenCacheBehaviors(cbs *cloudfront.CacheBehaviors) *schema.Set { - s := []interface{}{} - for _, v := range cbs.Items { - s = append(s, flattenCacheBehavior(v)) - } - return schema.NewSet(cacheBehaviorHash, s) -} - -func expandCacheBehavior(m map[string]interface{}) *cloudfront.CacheBehavior { - cb := &cloudfront.CacheBehavior{ - Compress: aws.Bool(m["compress"].(bool)), - ViewerProtocolPolicy: aws.String(m["viewer_protocol_policy"].(string)), - TargetOriginId: aws.String(m["target_origin_id"].(string)), - ForwardedValues: expandForwardedValues(m["forwarded_values"].(*schema.Set).List()[0].(map[string]interface{})), - MinTTL: aws.Int64(int64(m["min_ttl"].(int))), - MaxTTL: aws.Int64(int64(m["max_ttl"].(int))), - DefaultTTL: aws.Int64(int64(m["default_ttl"].(int))), - } - if v, ok := m["trusted_signers"]; ok { - cb.TrustedSigners = expandTrustedSigners(v.([]interface{})) - } else { - cb.TrustedSigners = expandTrustedSigners([]interface{}{}) - } - - if v, ok := m["lambda_function_association"]; ok { - cb.LambdaFunctionAssociations = expandLambdaFunctionAssociations(v.(*schema.Set).List()) - } - - if v, ok := m["smooth_streaming"]; ok { - cb.SmoothStreaming = aws.Bool(v.(bool)) - } - if v, ok := m["allowed_methods"]; ok { - cb.AllowedMethods = expandAllowedMethods(v.([]interface{})) - } - if v, ok := m["cached_methods"]; ok { - cb.AllowedMethods.CachedMethods = expandCachedMethods(v.([]interface{})) - } - if v, ok := m["path_pattern"]; ok { - cb.PathPattern = aws.String(v.(string)) - } - return cb -} - -func flattenCacheBehavior(cb *cloudfront.CacheBehavior) map[string]interface{} { - m := make(map[string]interface{}) - - m["compress"] = *cb.Compress - m["viewer_protocol_policy"] = *cb.ViewerProtocolPolicy - m["target_origin_id"] = *cb.TargetOriginId - m["forwarded_values"] = schema.NewSet(forwardedValuesHash, []interface{}{flattenForwardedValues(cb.ForwardedValues)}) - m["min_ttl"] = int(*cb.MinTTL) - - if len(cb.TrustedSigners.Items) > 0 { - m["trusted_signers"] = flattenTrustedSigners(cb.TrustedSigners) - } - if len(cb.LambdaFunctionAssociations.Items) > 0 { - m["lambda_function_association"] = flattenLambdaFunctionAssociations(cb.LambdaFunctionAssociations) - } - if cb.MaxTTL != nil { - m["max_ttl"] = int(*cb.MaxTTL) - } - if cb.SmoothStreaming != nil { - m["smooth_streaming"] = *cb.SmoothStreaming - } - if cb.DefaultTTL != nil { - m["default_ttl"] = int(*cb.DefaultTTL) - } - if cb.AllowedMethods != nil { - m["allowed_methods"] = flattenAllowedMethods(cb.AllowedMethods) - } - if cb.AllowedMethods.CachedMethods != nil { - m["cached_methods"] = flattenCachedMethods(cb.AllowedMethods.CachedMethods) - } - if cb.PathPattern != nil { - m["path_pattern"] = *cb.PathPattern - } - return m -} - -// Assemble the hash for the aws_cloudfront_distribution cache_behavior -// TypeSet attribute. -func cacheBehaviorHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%t-", m["compress"].(bool))) - buf.WriteString(fmt.Sprintf("%s-", m["viewer_protocol_policy"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["target_origin_id"].(string))) - buf.WriteString(fmt.Sprintf("%d-", forwardedValuesHash(m["forwarded_values"].(*schema.Set).List()[0].(map[string]interface{})))) - buf.WriteString(fmt.Sprintf("%d-", m["min_ttl"].(int))) - if d, ok := m["trusted_signers"]; ok { - for _, e := range sortInterfaceSlice(d.([]interface{})) { - buf.WriteString(fmt.Sprintf("%s-", e.(string))) - } - } - if d, ok := m["max_ttl"]; ok { - buf.WriteString(fmt.Sprintf("%d-", d.(int))) - } - if d, ok := m["smooth_streaming"]; ok { - buf.WriteString(fmt.Sprintf("%t-", d.(bool))) - } - if d, ok := m["default_ttl"]; ok { - buf.WriteString(fmt.Sprintf("%d-", d.(int))) - } - if d, ok := m["allowed_methods"]; ok { - for _, e := range sortInterfaceSlice(d.([]interface{})) { - buf.WriteString(fmt.Sprintf("%s-", e.(string))) - } - } - if d, ok := m["cached_methods"]; ok { - for _, e := range sortInterfaceSlice(d.([]interface{})) { - buf.WriteString(fmt.Sprintf("%s-", e.(string))) - } - } - if d, ok := m["path_pattern"]; ok { - buf.WriteString(fmt.Sprintf("%s-", d)) - } - if d, ok := m["lambda_function_association"]; ok { - var associations []interface{} - switch d.(type) { - case *schema.Set: - associations = d.(*schema.Set).List() - default: - associations = d.([]interface{}) - } - for _, lfa := range associations { - buf.WriteString(fmt.Sprintf("%d-", lambdaFunctionAssociationHash(lfa.(map[string]interface{})))) - } - } - return hashcode.String(buf.String()) -} - -func expandTrustedSigners(s []interface{}) *cloudfront.TrustedSigners { - var ts cloudfront.TrustedSigners - if len(s) > 0 { - ts.Quantity = aws.Int64(int64(len(s))) - ts.Items = expandStringList(s) - ts.Enabled = aws.Bool(true) - } else { - ts.Quantity = aws.Int64(0) - ts.Enabled = aws.Bool(false) - } - return &ts -} - -func flattenTrustedSigners(ts *cloudfront.TrustedSigners) []interface{} { - if ts.Items != nil { - return flattenStringList(ts.Items) - } - return []interface{}{} -} - -func lambdaFunctionAssociationHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["event_type"].(string))) - buf.WriteString(fmt.Sprintf("%s", m["lambda_arn"].(string))) - return hashcode.String(buf.String()) -} - -func expandLambdaFunctionAssociations(v interface{}) *cloudfront.LambdaFunctionAssociations { - if v == nil { - return &cloudfront.LambdaFunctionAssociations{ - Quantity: aws.Int64(0), - } - } - - s := v.([]interface{}) - var lfa cloudfront.LambdaFunctionAssociations - lfa.Quantity = aws.Int64(int64(len(s))) - lfa.Items = make([]*cloudfront.LambdaFunctionAssociation, len(s)) - for i, lf := range s { - lfa.Items[i] = expandLambdaFunctionAssociation(lf.(map[string]interface{})) - } - return &lfa -} - -func expandLambdaFunctionAssociation(lf map[string]interface{}) *cloudfront.LambdaFunctionAssociation { - var lfa cloudfront.LambdaFunctionAssociation - if v, ok := lf["event_type"]; ok { - lfa.EventType = aws.String(v.(string)) - } - if v, ok := lf["lambda_arn"]; ok { - lfa.LambdaFunctionARN = aws.String(v.(string)) - } - return &lfa -} - -func flattenLambdaFunctionAssociations(lfa *cloudfront.LambdaFunctionAssociations) *schema.Set { - s := schema.NewSet(lambdaFunctionAssociationHash, []interface{}{}) - for _, v := range lfa.Items { - s.Add(flattenLambdaFunctionAssociation(v)) - } - return s -} - -func flattenLambdaFunctionAssociation(lfa *cloudfront.LambdaFunctionAssociation) map[string]interface{} { - m := map[string]interface{}{} - if lfa != nil { - m["event_type"] = *lfa.EventType - m["lambda_arn"] = *lfa.LambdaFunctionARN - } - return m -} - -func expandForwardedValues(m map[string]interface{}) *cloudfront.ForwardedValues { - fv := &cloudfront.ForwardedValues{ - QueryString: aws.Bool(m["query_string"].(bool)), - } - if v, ok := m["cookies"]; ok && v.(*schema.Set).Len() > 0 { - fv.Cookies = expandCookiePreference(v.(*schema.Set).List()[0].(map[string]interface{})) - } - if v, ok := m["headers"]; ok { - fv.Headers = expandHeaders(v.([]interface{})) - } - if v, ok := m["query_string_cache_keys"]; ok { - fv.QueryStringCacheKeys = expandQueryStringCacheKeys(v.([]interface{})) - } - return fv -} - -func flattenForwardedValues(fv *cloudfront.ForwardedValues) map[string]interface{} { - m := make(map[string]interface{}) - m["query_string"] = *fv.QueryString - if fv.Cookies != nil { - m["cookies"] = schema.NewSet(cookiePreferenceHash, []interface{}{flattenCookiePreference(fv.Cookies)}) - } - if fv.Headers != nil { - m["headers"] = flattenHeaders(fv.Headers) - } - if fv.QueryStringCacheKeys != nil { - m["query_string_cache_keys"] = flattenQueryStringCacheKeys(fv.QueryStringCacheKeys) - } - return m -} - -// Assemble the hash for the aws_cloudfront_distribution forwarded_values -// TypeSet attribute. -func forwardedValuesHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%t-", m["query_string"].(bool))) - if d, ok := m["cookies"]; ok && d.(*schema.Set).Len() > 0 { - buf.WriteString(fmt.Sprintf("%d-", cookiePreferenceHash(d.(*schema.Set).List()[0].(map[string]interface{})))) - } - if d, ok := m["headers"]; ok { - for _, e := range sortInterfaceSlice(d.([]interface{})) { - buf.WriteString(fmt.Sprintf("%s-", e.(string))) - } - } - if d, ok := m["query_string_cache_keys"]; ok { - for _, e := range sortInterfaceSlice(d.([]interface{})) { - buf.WriteString(fmt.Sprintf("%s-", e.(string))) - } - } - return hashcode.String(buf.String()) -} - -func expandHeaders(d []interface{}) *cloudfront.Headers { - return &cloudfront.Headers{ - Quantity: aws.Int64(int64(len(d))), - Items: expandStringList(d), - } -} - -func flattenHeaders(h *cloudfront.Headers) []interface{} { - if h.Items != nil { - return flattenStringList(h.Items) - } - return []interface{}{} -} - -func expandQueryStringCacheKeys(d []interface{}) *cloudfront.QueryStringCacheKeys { - return &cloudfront.QueryStringCacheKeys{ - Quantity: aws.Int64(int64(len(d))), - Items: expandStringList(d), - } -} - -func flattenQueryStringCacheKeys(k *cloudfront.QueryStringCacheKeys) []interface{} { - if k.Items != nil { - return flattenStringList(k.Items) - } - return []interface{}{} -} - -func expandCookiePreference(m map[string]interface{}) *cloudfront.CookiePreference { - cp := &cloudfront.CookiePreference{ - Forward: aws.String(m["forward"].(string)), - } - if v, ok := m["whitelisted_names"]; ok { - cp.WhitelistedNames = expandCookieNames(v.([]interface{})) - } - return cp -} - -func flattenCookiePreference(cp *cloudfront.CookiePreference) map[string]interface{} { - m := make(map[string]interface{}) - m["forward"] = *cp.Forward - if cp.WhitelistedNames != nil { - m["whitelisted_names"] = flattenCookieNames(cp.WhitelistedNames) - } - return m -} - -// Assemble the hash for the aws_cloudfront_distribution cookies -// TypeSet attribute. -func cookiePreferenceHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["forward"].(string))) - if d, ok := m["whitelisted_names"]; ok { - for _, e := range sortInterfaceSlice(d.([]interface{})) { - buf.WriteString(fmt.Sprintf("%s-", e.(string))) - } - } - return hashcode.String(buf.String()) -} - -func expandCookieNames(d []interface{}) *cloudfront.CookieNames { - return &cloudfront.CookieNames{ - Quantity: aws.Int64(int64(len(d))), - Items: expandStringList(d), - } -} - -func flattenCookieNames(cn *cloudfront.CookieNames) []interface{} { - if cn.Items != nil { - return flattenStringList(cn.Items) - } - return []interface{}{} -} - -func expandAllowedMethods(s []interface{}) *cloudfront.AllowedMethods { - return &cloudfront.AllowedMethods{ - Quantity: aws.Int64(int64(len(s))), - Items: expandStringList(s), - } -} - -func flattenAllowedMethods(am *cloudfront.AllowedMethods) []interface{} { - if am.Items != nil { - return flattenStringList(am.Items) - } - return []interface{}{} -} - -func expandCachedMethods(s []interface{}) *cloudfront.CachedMethods { - return &cloudfront.CachedMethods{ - Quantity: aws.Int64(int64(len(s))), - Items: expandStringList(s), - } -} - -func flattenCachedMethods(cm *cloudfront.CachedMethods) []interface{} { - if cm.Items != nil { - return flattenStringList(cm.Items) - } - return []interface{}{} -} - -func expandOrigins(s *schema.Set) *cloudfront.Origins { - qty := 0 - items := []*cloudfront.Origin{} - for _, v := range s.List() { - items = append(items, expandOrigin(v.(map[string]interface{}))) - qty++ - } - return &cloudfront.Origins{ - Quantity: aws.Int64(int64(qty)), - Items: items, - } -} - -func flattenOrigins(ors *cloudfront.Origins) *schema.Set { - s := []interface{}{} - for _, v := range ors.Items { - s = append(s, flattenOrigin(v)) - } - return schema.NewSet(originHash, s) -} - -func expandOrigin(m map[string]interface{}) *cloudfront.Origin { - origin := &cloudfront.Origin{ - Id: aws.String(m["origin_id"].(string)), - DomainName: aws.String(m["domain_name"].(string)), - } - if v, ok := m["custom_header"]; ok { - origin.CustomHeaders = expandCustomHeaders(v.(*schema.Set)) - } - if v, ok := m["custom_origin_config"]; ok { - if s := v.(*schema.Set).List(); len(s) > 0 { - origin.CustomOriginConfig = expandCustomOriginConfig(s[0].(map[string]interface{})) - } - } - if v, ok := m["origin_path"]; ok { - origin.OriginPath = aws.String(v.(string)) - } - if v, ok := m["s3_origin_config"]; ok { - if s := v.(*schema.Set).List(); len(s) > 0 { - origin.S3OriginConfig = expandS3OriginConfig(s[0].(map[string]interface{})) - } - } - - // if both custom and s3 origin are missing, add an empty s3 origin - // One or the other must be specified, but the S3 origin can be "empty" - if origin.S3OriginConfig == nil && origin.CustomOriginConfig == nil { - origin.S3OriginConfig = &cloudfront.S3OriginConfig{ - OriginAccessIdentity: aws.String(""), - } - } - - return origin -} - -func flattenOrigin(or *cloudfront.Origin) map[string]interface{} { - m := make(map[string]interface{}) - m["origin_id"] = *or.Id - m["domain_name"] = *or.DomainName - if or.CustomHeaders != nil { - m["custom_header"] = flattenCustomHeaders(or.CustomHeaders) - } - if or.CustomOriginConfig != nil { - m["custom_origin_config"] = schema.NewSet(customOriginConfigHash, []interface{}{flattenCustomOriginConfig(or.CustomOriginConfig)}) - } - if or.OriginPath != nil { - m["origin_path"] = *or.OriginPath - } - if or.S3OriginConfig != nil { - if or.S3OriginConfig.OriginAccessIdentity != nil && *or.S3OriginConfig.OriginAccessIdentity != "" { - m["s3_origin_config"] = schema.NewSet(s3OriginConfigHash, []interface{}{flattenS3OriginConfig(or.S3OriginConfig)}) - } - } - return m -} - -// Assemble the hash for the aws_cloudfront_distribution origin -// TypeSet attribute. -func originHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["origin_id"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["domain_name"].(string))) - if v, ok := m["custom_header"]; ok { - buf.WriteString(fmt.Sprintf("%d-", customHeadersHash(v.(*schema.Set)))) - } - if v, ok := m["custom_origin_config"]; ok { - if s := v.(*schema.Set).List(); len(s) > 0 { - buf.WriteString(fmt.Sprintf("%d-", customOriginConfigHash((s[0].(map[string]interface{}))))) - } - } - if v, ok := m["origin_path"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - if v, ok := m["s3_origin_config"]; ok { - if s := v.(*schema.Set).List(); len(s) > 0 { - buf.WriteString(fmt.Sprintf("%d-", s3OriginConfigHash((s[0].(map[string]interface{}))))) - } - } - return hashcode.String(buf.String()) -} - -func expandCustomHeaders(s *schema.Set) *cloudfront.CustomHeaders { - qty := 0 - items := []*cloudfront.OriginCustomHeader{} - for _, v := range s.List() { - items = append(items, expandOriginCustomHeader(v.(map[string]interface{}))) - qty++ - } - return &cloudfront.CustomHeaders{ - Quantity: aws.Int64(int64(qty)), - Items: items, - } -} - -func flattenCustomHeaders(chs *cloudfront.CustomHeaders) *schema.Set { - s := []interface{}{} - for _, v := range chs.Items { - s = append(s, flattenOriginCustomHeader(v)) - } - return schema.NewSet(originCustomHeaderHash, s) -} - -func expandOriginCustomHeader(m map[string]interface{}) *cloudfront.OriginCustomHeader { - return &cloudfront.OriginCustomHeader{ - HeaderName: aws.String(m["name"].(string)), - HeaderValue: aws.String(m["value"].(string)), - } -} - -func flattenOriginCustomHeader(och *cloudfront.OriginCustomHeader) map[string]interface{} { - return map[string]interface{}{ - "name": *och.HeaderName, - "value": *och.HeaderValue, - } -} - -// Helper function used by originHash to get a composite hash for all -// aws_cloudfront_distribution custom_header attributes. -func customHeadersHash(s *schema.Set) int { - var buf bytes.Buffer - for _, v := range s.List() { - buf.WriteString(fmt.Sprintf("%d-", originCustomHeaderHash(v))) - } - return hashcode.String(buf.String()) -} - -// Assemble the hash for the aws_cloudfront_distribution custom_header -// TypeSet attribute. -func originCustomHeaderHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["value"].(string))) - return hashcode.String(buf.String()) -} - -func expandCustomOriginConfig(m map[string]interface{}) *cloudfront.CustomOriginConfig { - - customOrigin := &cloudfront.CustomOriginConfig{ - OriginProtocolPolicy: aws.String(m["origin_protocol_policy"].(string)), - HTTPPort: aws.Int64(int64(m["http_port"].(int))), - HTTPSPort: aws.Int64(int64(m["https_port"].(int))), - OriginSslProtocols: expandCustomOriginConfigSSL(m["origin_ssl_protocols"].([]interface{})), - OriginReadTimeout: aws.Int64(int64(m["origin_read_timeout"].(int))), - OriginKeepaliveTimeout: aws.Int64(int64(m["origin_keepalive_timeout"].(int))), - } - - return customOrigin -} - -func flattenCustomOriginConfig(cor *cloudfront.CustomOriginConfig) map[string]interface{} { - - customOrigin := map[string]interface{}{ - "origin_protocol_policy": *cor.OriginProtocolPolicy, - "http_port": int(*cor.HTTPPort), - "https_port": int(*cor.HTTPSPort), - "origin_ssl_protocols": flattenCustomOriginConfigSSL(cor.OriginSslProtocols), - "origin_read_timeout": int(*cor.OriginReadTimeout), - "origin_keepalive_timeout": int(*cor.OriginKeepaliveTimeout), - } - - return customOrigin -} - -// Assemble the hash for the aws_cloudfront_distribution custom_origin_config -// TypeSet attribute. -func customOriginConfigHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["origin_protocol_policy"].(string))) - buf.WriteString(fmt.Sprintf("%d-", m["http_port"].(int))) - buf.WriteString(fmt.Sprintf("%d-", m["https_port"].(int))) - for _, v := range sortInterfaceSlice(m["origin_ssl_protocols"].([]interface{})) { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - buf.WriteString(fmt.Sprintf("%d-", m["origin_keepalive_timeout"].(int))) - buf.WriteString(fmt.Sprintf("%d-", m["origin_read_timeout"].(int))) - - return hashcode.String(buf.String()) -} - -func expandCustomOriginConfigSSL(s []interface{}) *cloudfront.OriginSslProtocols { - items := expandStringList(s) - return &cloudfront.OriginSslProtocols{ - Quantity: aws.Int64(int64(len(items))), - Items: items, - } -} - -func flattenCustomOriginConfigSSL(osp *cloudfront.OriginSslProtocols) []interface{} { - return flattenStringList(osp.Items) -} - -func expandS3OriginConfig(m map[string]interface{}) *cloudfront.S3OriginConfig { - return &cloudfront.S3OriginConfig{ - OriginAccessIdentity: aws.String(m["origin_access_identity"].(string)), - } -} - -func flattenS3OriginConfig(s3o *cloudfront.S3OriginConfig) map[string]interface{} { - return map[string]interface{}{ - "origin_access_identity": *s3o.OriginAccessIdentity, - } -} - -// Assemble the hash for the aws_cloudfront_distribution s3_origin_config -// TypeSet attribute. -func s3OriginConfigHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["origin_access_identity"].(string))) - return hashcode.String(buf.String()) -} - -func expandCustomErrorResponses(s *schema.Set) *cloudfront.CustomErrorResponses { - qty := 0 - items := []*cloudfront.CustomErrorResponse{} - for _, v := range s.List() { - items = append(items, expandCustomErrorResponse(v.(map[string]interface{}))) - qty++ - } - return &cloudfront.CustomErrorResponses{ - Quantity: aws.Int64(int64(qty)), - Items: items, - } -} - -func flattenCustomErrorResponses(ers *cloudfront.CustomErrorResponses) *schema.Set { - s := []interface{}{} - for _, v := range ers.Items { - s = append(s, flattenCustomErrorResponse(v)) - } - return schema.NewSet(customErrorResponseHash, s) -} - -func expandCustomErrorResponse(m map[string]interface{}) *cloudfront.CustomErrorResponse { - er := cloudfront.CustomErrorResponse{ - ErrorCode: aws.Int64(int64(m["error_code"].(int))), - } - if v, ok := m["error_caching_min_ttl"]; ok { - er.ErrorCachingMinTTL = aws.Int64(int64(v.(int))) - } - if v, ok := m["response_code"]; ok && v.(int) != 0 { - er.ResponseCode = aws.String(strconv.Itoa(v.(int))) - } else { - er.ResponseCode = aws.String("") - } - if v, ok := m["response_page_path"]; ok { - er.ResponsePagePath = aws.String(v.(string)) - } - - return &er -} - -func flattenCustomErrorResponse(er *cloudfront.CustomErrorResponse) map[string]interface{} { - m := make(map[string]interface{}) - m["error_code"] = int(*er.ErrorCode) - if er.ErrorCachingMinTTL != nil { - m["error_caching_min_ttl"] = int(*er.ErrorCachingMinTTL) - } - if er.ResponseCode != nil { - m["response_code"], _ = strconv.Atoi(*er.ResponseCode) - } - if er.ResponsePagePath != nil { - m["response_page_path"] = *er.ResponsePagePath - } - return m -} - -// Assemble the hash for the aws_cloudfront_distribution custom_error_response -// TypeSet attribute. -func customErrorResponseHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%d-", m["error_code"].(int))) - if v, ok := m["error_caching_min_ttl"]; ok { - buf.WriteString(fmt.Sprintf("%d-", v.(int))) - } - if v, ok := m["response_code"]; ok { - buf.WriteString(fmt.Sprintf("%d-", v.(int))) - } - if v, ok := m["response_page_path"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - return hashcode.String(buf.String()) -} - -func expandLoggingConfig(m map[string]interface{}) *cloudfront.LoggingConfig { - var lc cloudfront.LoggingConfig - if m != nil { - lc.Prefix = aws.String(m["prefix"].(string)) - lc.Bucket = aws.String(m["bucket"].(string)) - lc.IncludeCookies = aws.Bool(m["include_cookies"].(bool)) - lc.Enabled = aws.Bool(true) - } else { - lc.Prefix = aws.String("") - lc.Bucket = aws.String("") - lc.IncludeCookies = aws.Bool(false) - lc.Enabled = aws.Bool(false) - } - return &lc -} - -func flattenLoggingConfig(lc *cloudfront.LoggingConfig) *schema.Set { - m := make(map[string]interface{}) - m["prefix"] = *lc.Prefix - m["bucket"] = *lc.Bucket - m["include_cookies"] = *lc.IncludeCookies - return schema.NewSet(loggingConfigHash, []interface{}{m}) -} - -// Assemble the hash for the aws_cloudfront_distribution logging_config -// TypeSet attribute. -func loggingConfigHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["prefix"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["bucket"].(string))) - buf.WriteString(fmt.Sprintf("%t-", m["include_cookies"].(bool))) - return hashcode.String(buf.String()) -} - -func expandAliases(as *schema.Set) *cloudfront.Aliases { - s := as.List() - var aliases cloudfront.Aliases - if len(s) > 0 { - aliases.Quantity = aws.Int64(int64(len(s))) - aliases.Items = expandStringList(s) - } else { - aliases.Quantity = aws.Int64(0) - } - return &aliases -} - -func flattenAliases(aliases *cloudfront.Aliases) *schema.Set { - if aliases.Items != nil { - return schema.NewSet(aliasesHash, flattenStringList(aliases.Items)) - } - return schema.NewSet(aliasesHash, []interface{}{}) -} - -// Assemble the hash for the aws_cloudfront_distribution aliases -// TypeSet attribute. -func aliasesHash(v interface{}) int { - return hashcode.String(v.(string)) -} - -func expandRestrictions(m map[string]interface{}) *cloudfront.Restrictions { - return &cloudfront.Restrictions{ - GeoRestriction: expandGeoRestriction(m["geo_restriction"].(*schema.Set).List()[0].(map[string]interface{})), - } -} - -func flattenRestrictions(r *cloudfront.Restrictions) *schema.Set { - m := make(map[string]interface{}) - s := schema.NewSet(geoRestrictionHash, []interface{}{flattenGeoRestriction(r.GeoRestriction)}) - m["geo_restriction"] = s - return schema.NewSet(restrictionsHash, []interface{}{m}) -} - -// Assemble the hash for the aws_cloudfront_distribution restrictions -// TypeSet attribute. -func restrictionsHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%d-", geoRestrictionHash(m["geo_restriction"].(*schema.Set).List()[0].(map[string]interface{})))) - return hashcode.String(buf.String()) -} - -func expandGeoRestriction(m map[string]interface{}) *cloudfront.GeoRestriction { - gr := cloudfront.GeoRestriction{ - RestrictionType: aws.String(m["restriction_type"].(string)), - } - if v, ok := m["locations"]; ok { - gr.Quantity = aws.Int64(int64(len(v.([]interface{})))) - gr.Items = expandStringList(v.([]interface{})) - sort.Sort(StringPtrSlice(gr.Items)) - } else { - gr.Quantity = aws.Int64(0) - } - return &gr -} - -func flattenGeoRestriction(gr *cloudfront.GeoRestriction) map[string]interface{} { - m := make(map[string]interface{}) - - m["restriction_type"] = *gr.RestrictionType - if gr.Items != nil { - sort.Sort(StringPtrSlice(gr.Items)) - m["locations"] = flattenStringList(gr.Items) - } - return m -} - -// Assemble the hash for the aws_cloudfront_distribution geo_restriction -// TypeSet attribute. -func geoRestrictionHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - // All keys added in alphabetical order. - buf.WriteString(fmt.Sprintf("%s-", m["restriction_type"].(string))) - if v, ok := m["locations"]; ok { - for _, w := range sortInterfaceSlice(v.([]interface{})) { - buf.WriteString(fmt.Sprintf("%s-", w.(string))) - } - } - return hashcode.String(buf.String()) -} - -func expandViewerCertificate(m map[string]interface{}) *cloudfront.ViewerCertificate { - var vc cloudfront.ViewerCertificate - if v, ok := m["iam_certificate_id"]; ok && v != "" { - vc.IAMCertificateId = aws.String(v.(string)) - vc.SSLSupportMethod = aws.String(m["ssl_support_method"].(string)) - } else if v, ok := m["acm_certificate_arn"]; ok && v != "" { - vc.ACMCertificateArn = aws.String(v.(string)) - vc.SSLSupportMethod = aws.String(m["ssl_support_method"].(string)) - } else { - vc.CloudFrontDefaultCertificate = aws.Bool(m["cloudfront_default_certificate"].(bool)) - } - if v, ok := m["minimum_protocol_version"]; ok && v != "" { - vc.MinimumProtocolVersion = aws.String(v.(string)) - } - return &vc -} - -func flattenViewerCertificate(vc *cloudfront.ViewerCertificate) *schema.Set { - m := make(map[string]interface{}) - - if vc.IAMCertificateId != nil { - m["iam_certificate_id"] = *vc.IAMCertificateId - m["ssl_support_method"] = *vc.SSLSupportMethod - } - if vc.ACMCertificateArn != nil { - m["acm_certificate_arn"] = *vc.ACMCertificateArn - m["ssl_support_method"] = *vc.SSLSupportMethod - } - if vc.CloudFrontDefaultCertificate != nil { - m["cloudfront_default_certificate"] = *vc.CloudFrontDefaultCertificate - } - if vc.MinimumProtocolVersion != nil { - m["minimum_protocol_version"] = *vc.MinimumProtocolVersion - } - return schema.NewSet(viewerCertificateHash, []interface{}{m}) -} - -// Assemble the hash for the aws_cloudfront_distribution viewer_certificate -// TypeSet attribute. -func viewerCertificateHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - if v, ok := m["iam_certificate_id"]; ok && v.(string) != "" { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - buf.WriteString(fmt.Sprintf("%s-", m["ssl_support_method"].(string))) - } else if v, ok := m["acm_certificate_arn"]; ok && v.(string) != "" { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - buf.WriteString(fmt.Sprintf("%s-", m["ssl_support_method"].(string))) - } else { - buf.WriteString(fmt.Sprintf("%t-", m["cloudfront_default_certificate"].(bool))) - } - if v, ok := m["minimum_protocol_version"]; ok && v.(string) != "" { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - return hashcode.String(buf.String()) -} - -// Do a top-level copy of struct fields from one struct to another. Used to -// copy fields between CacheBehavior and DefaultCacheBehavior structs. -func simpleCopyStruct(src, dst interface{}) { - s := reflect.ValueOf(src).Elem() - d := reflect.ValueOf(dst).Elem() - - for i := 0; i < s.NumField(); i++ { - if s.Field(i).CanSet() == true { - if s.Field(i).Interface() != nil { - for j := 0; j < d.NumField(); j++ { - if d.Type().Field(j).Name == s.Type().Field(i).Name { - d.Field(j).Set(s.Field(i)) - } - } - } - } - } -} - -// Convert *cloudfront.ActiveTrustedSigners to a flatmap.Map type, which ensures -// it can probably be inserted into the schema.TypeMap type used by the -// active_trusted_signers attribute. -func flattenActiveTrustedSigners(ats *cloudfront.ActiveTrustedSigners) flatmap.Map { - m := make(map[string]interface{}) - s := []interface{}{} - m["enabled"] = *ats.Enabled - - for _, v := range ats.Items { - signer := make(map[string]interface{}) - signer["aws_account_number"] = *v.AwsAccountNumber - signer["key_pair_ids"] = aws.StringValueSlice(v.KeyPairIds.Items) - s = append(s, signer) - } - m["items"] = s - return flatmap.Flatten(m) -} diff --git a/builtin/providers/aws/cloudfront_distribution_configuration_structure_test.go b/builtin/providers/aws/cloudfront_distribution_configuration_structure_test.go deleted file mode 100644 index cb594d48e..000000000 --- a/builtin/providers/aws/cloudfront_distribution_configuration_structure_test.go +++ /dev/null @@ -1,1161 +0,0 @@ -package aws - -import ( - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudfront" - "github.com/hashicorp/terraform/helper/schema" -) - -func defaultCacheBehaviorConf() map[string]interface{} { - return map[string]interface{}{ - "viewer_protocol_policy": "allow-all", - "target_origin_id": "myS3Origin", - "forwarded_values": schema.NewSet(forwardedValuesHash, []interface{}{forwardedValuesConf()}), - "min_ttl": 86400, - "trusted_signers": trustedSignersConf(), - "lambda_function_association": lambdaFunctionAssociationsConf(), - "max_ttl": 365000000, - "smooth_streaming": false, - "default_ttl": 86400, - "allowed_methods": allowedMethodsConf(), - "cached_methods": cachedMethodsConf(), - "compress": true, - } -} - -func cacheBehaviorConf1() map[string]interface{} { - cb := defaultCacheBehaviorConf() - cb["path_pattern"] = "/path1" - return cb -} - -func cacheBehaviorConf2() map[string]interface{} { - cb := defaultCacheBehaviorConf() - cb["path_pattern"] = "/path2" - return cb -} - -func cacheBehaviorsConf() *schema.Set { - return schema.NewSet(cacheBehaviorHash, []interface{}{cacheBehaviorConf1(), cacheBehaviorConf2()}) -} - -func trustedSignersConf() []interface{} { - return []interface{}{"1234567890EX", "1234567891EX"} -} - -func lambdaFunctionAssociationsConf() *schema.Set { - x := []interface{}{ - map[string]interface{}{ - "event_type": "viewer-request", - "lambda_arn": "arn:aws:lambda:us-east-1:999999999:function1:alias", - }, - map[string]interface{}{ - "event_type": "origin-response", - "lambda_arn": "arn:aws:lambda:us-east-1:999999999:function2:alias", - }, - } - - return schema.NewSet(lambdaFunctionAssociationHash, x) -} - -func forwardedValuesConf() map[string]interface{} { - return map[string]interface{}{ - "query_string": true, - "query_string_cache_keys": queryStringCacheKeysConf(), - "cookies": schema.NewSet(cookiePreferenceHash, []interface{}{cookiePreferenceConf()}), - "headers": headersConf(), - } -} - -func headersConf() []interface{} { - return []interface{}{"X-Example1", "X-Example2"} -} - -func queryStringCacheKeysConf() []interface{} { - return []interface{}{"foo", "bar"} -} - -func cookiePreferenceConf() map[string]interface{} { - return map[string]interface{}{ - "forward": "whitelist", - "whitelisted_names": cookieNamesConf(), - } -} - -func cookieNamesConf() []interface{} { - return []interface{}{"Example1", "Example2"} -} - -func allowedMethodsConf() []interface{} { - return []interface{}{"DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"} -} - -func cachedMethodsConf() []interface{} { - return []interface{}{"GET", "HEAD", "OPTIONS"} -} - -func originCustomHeadersConf() *schema.Set { - return schema.NewSet(originCustomHeaderHash, []interface{}{originCustomHeaderConf1(), originCustomHeaderConf2()}) -} - -func originCustomHeaderConf1() map[string]interface{} { - return map[string]interface{}{ - "name": "X-Custom-Header1", - "value": "samplevalue", - } -} - -func originCustomHeaderConf2() map[string]interface{} { - return map[string]interface{}{ - "name": "X-Custom-Header2", - "value": "samplevalue", - } -} - -func customOriginConf() map[string]interface{} { - return map[string]interface{}{ - "origin_protocol_policy": "http-only", - "http_port": 80, - "https_port": 443, - "origin_ssl_protocols": customOriginSslProtocolsConf(), - "origin_read_timeout": 30, - "origin_keepalive_timeout": 5, - } -} - -func customOriginSslProtocolsConf() []interface{} { - return []interface{}{"SSLv3", "TLSv1", "TLSv1.1", "TLSv1.2"} -} - -func s3OriginConf() map[string]interface{} { - return map[string]interface{}{ - "origin_access_identity": "origin-access-identity/cloudfront/E127EXAMPLE51Z", - } -} - -func originWithCustomConf() map[string]interface{} { - return map[string]interface{}{ - "origin_id": "CustomOrigin", - "domain_name": "www.example.com", - "origin_path": "/", - "custom_origin_config": schema.NewSet(customOriginConfigHash, []interface{}{customOriginConf()}), - "custom_header": originCustomHeadersConf(), - } -} -func originWithS3Conf() map[string]interface{} { - return map[string]interface{}{ - "origin_id": "S3Origin", - "domain_name": "s3.example.com", - "origin_path": "/", - "s3_origin_config": schema.NewSet(s3OriginConfigHash, []interface{}{s3OriginConf()}), - "custom_header": originCustomHeadersConf(), - } -} - -func multiOriginConf() *schema.Set { - return schema.NewSet(originHash, []interface{}{originWithCustomConf(), originWithS3Conf()}) -} - -func geoRestrictionWhitelistConf() map[string]interface{} { - return map[string]interface{}{ - "restriction_type": "whitelist", - "locations": []interface{}{"CA", "GB", "US"}, - } -} - -func geoRestrictionsConf() map[string]interface{} { - return map[string]interface{}{ - "geo_restriction": schema.NewSet(geoRestrictionHash, []interface{}{geoRestrictionWhitelistConf()}), - } -} - -func geoRestrictionConfNoItems() map[string]interface{} { - return map[string]interface{}{ - "restriction_type": "none", - } -} - -func customErrorResponsesConf() []interface{} { - return []interface{}{ - map[string]interface{}{ - "error_code": 404, - "error_caching_min_ttl": 30, - "response_code": 200, - "response_page_path": "/error-pages/404.html", - }, - map[string]interface{}{ - "error_code": 403, - "error_caching_min_ttl": 15, - "response_code": 404, - "response_page_path": "/error-pages/404.html", - }, - } -} - -func aliasesConf() *schema.Set { - return schema.NewSet(aliasesHash, []interface{}{"example.com", "www.example.com"}) -} - -func loggingConfigConf() map[string]interface{} { - return map[string]interface{}{ - "include_cookies": false, - "bucket": "mylogs.s3.amazonaws.com", - "prefix": "myprefix", - } -} - -func customErrorResponsesConfSet() *schema.Set { - return schema.NewSet(customErrorResponseHash, customErrorResponsesConf()) -} - -func customErrorResponsesConfFirst() map[string]interface{} { - return customErrorResponsesConf()[0].(map[string]interface{}) -} - -func customErrorResponseConfNoResponseCode() map[string]interface{} { - er := customErrorResponsesConf()[0].(map[string]interface{}) - er["response_code"] = 0 - er["response_page_path"] = "" - return er -} - -func viewerCertificateConfSetCloudFrontDefault() map[string]interface{} { - return map[string]interface{}{ - "acm_certificate_arn": "", - "cloudfront_default_certificate": true, - "iam_certificate_id": "", - "minimum_protocol_version": "", - "ssl_support_method": "", - } -} - -func viewerCertificateConfSetIAM() map[string]interface{} { - return map[string]interface{}{ - "acm_certificate_arn": "", - "cloudfront_default_certificate": false, - "iam_certificate_id": "iamcert-01234567", - "ssl_support_method": "vip", - "minimum_protocol_version": "TLSv1", - } -} - -func viewerCertificateConfSetACM() map[string]interface{} { - return map[string]interface{}{ - "acm_certificate_arn": "arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012", - "cloudfront_default_certificate": false, - "iam_certificate_id": "", - "ssl_support_method": "sni-only", - "minimum_protocol_version": "TLSv1", - } -} - -func TestCloudFrontStructure_expandDefaultCacheBehavior(t *testing.T) { - data := defaultCacheBehaviorConf() - dcb := expandDefaultCacheBehavior(data) - if dcb == nil { - t.Fatalf("ExpandDefaultCacheBehavior returned nil") - } - if *dcb.Compress != true { - t.Fatalf("Expected Compress to be true, got %v", *dcb.Compress) - } - if *dcb.ViewerProtocolPolicy != "allow-all" { - t.Fatalf("Expected ViewerProtocolPolicy to be allow-all, got %v", *dcb.ViewerProtocolPolicy) - } - if *dcb.TargetOriginId != "myS3Origin" { - t.Fatalf("Expected TargetOriginId to be allow-all, got %v", *dcb.TargetOriginId) - } - if reflect.DeepEqual(dcb.ForwardedValues.Headers.Items, expandStringList(headersConf())) != true { - t.Fatalf("Expected Items to be %v, got %v", headersConf(), dcb.ForwardedValues.Headers.Items) - } - if *dcb.MinTTL != 86400 { - t.Fatalf("Expected MinTTL to be 86400, got %v", *dcb.MinTTL) - } - if reflect.DeepEqual(dcb.TrustedSigners.Items, expandStringList(trustedSignersConf())) != true { - t.Fatalf("Expected TrustedSigners.Items to be %v, got %v", trustedSignersConf(), dcb.TrustedSigners.Items) - } - if *dcb.MaxTTL != 365000000 { - t.Fatalf("Expected MaxTTL to be 365000000, got %v", *dcb.MaxTTL) - } - if *dcb.SmoothStreaming != false { - t.Fatalf("Expected SmoothStreaming to be false, got %v", *dcb.SmoothStreaming) - } - if *dcb.DefaultTTL != 86400 { - t.Fatalf("Expected DefaultTTL to be 86400, got %v", *dcb.DefaultTTL) - } - if *dcb.LambdaFunctionAssociations.Quantity != 2 { - t.Fatalf("Expected LambdaFunctionAssociations to be 2, got %v", *dcb.LambdaFunctionAssociations.Quantity) - } - if reflect.DeepEqual(dcb.AllowedMethods.Items, expandStringList(allowedMethodsConf())) != true { - t.Fatalf("Expected TrustedSigners.Items to be %v, got %v", allowedMethodsConf(), dcb.AllowedMethods.Items) - } - if reflect.DeepEqual(dcb.AllowedMethods.CachedMethods.Items, expandStringList(cachedMethodsConf())) != true { - t.Fatalf("Expected TrustedSigners.Items to be %v, got %v", cachedMethodsConf(), dcb.AllowedMethods.CachedMethods.Items) - } -} - -func TestCloudFrontStructure_flattenDefaultCacheBehavior(t *testing.T) { - in := defaultCacheBehaviorConf() - dcb := expandDefaultCacheBehavior(in) - out := flattenDefaultCacheBehavior(dcb) - diff := schema.NewSet(defaultCacheBehaviorHash, []interface{}{in}).Difference(out) - - if len(diff.List()) > 0 { - t.Fatalf("Expected out to be %v, got %v, diff: %v", in, out, diff) - } -} - -func TestCloudFrontStructure_expandCacheBehavior(t *testing.T) { - data := cacheBehaviorConf1() - cb := expandCacheBehavior(data) - if *cb.Compress != true { - t.Fatalf("Expected Compress to be true, got %v", *cb.Compress) - } - if *cb.ViewerProtocolPolicy != "allow-all" { - t.Fatalf("Expected ViewerProtocolPolicy to be allow-all, got %v", *cb.ViewerProtocolPolicy) - } - if *cb.TargetOriginId != "myS3Origin" { - t.Fatalf("Expected TargetOriginId to be myS3Origin, got %v", *cb.TargetOriginId) - } - if reflect.DeepEqual(cb.ForwardedValues.Headers.Items, expandStringList(headersConf())) != true { - t.Fatalf("Expected Items to be %v, got %v", headersConf(), cb.ForwardedValues.Headers.Items) - } - if *cb.MinTTL != 86400 { - t.Fatalf("Expected MinTTL to be 86400, got %v", *cb.MinTTL) - } - if reflect.DeepEqual(cb.TrustedSigners.Items, expandStringList(trustedSignersConf())) != true { - t.Fatalf("Expected TrustedSigners.Items to be %v, got %v", trustedSignersConf(), cb.TrustedSigners.Items) - } - if *cb.MaxTTL != 365000000 { - t.Fatalf("Expected MaxTTL to be 365000000, got %v", *cb.MaxTTL) - } - if *cb.SmoothStreaming != false { - t.Fatalf("Expected SmoothStreaming to be false, got %v", *cb.SmoothStreaming) - } - if *cb.DefaultTTL != 86400 { - t.Fatalf("Expected DefaultTTL to be 86400, got %v", *cb.DefaultTTL) - } - if *cb.LambdaFunctionAssociations.Quantity != 2 { - t.Fatalf("Expected LambdaFunctionAssociations to be 2, got %v", *cb.LambdaFunctionAssociations.Quantity) - } - if reflect.DeepEqual(cb.AllowedMethods.Items, expandStringList(allowedMethodsConf())) != true { - t.Fatalf("Expected AllowedMethods.Items to be %v, got %v", allowedMethodsConf(), cb.AllowedMethods.Items) - } - if reflect.DeepEqual(cb.AllowedMethods.CachedMethods.Items, expandStringList(cachedMethodsConf())) != true { - t.Fatalf("Expected AllowedMethods.CachedMethods.Items to be %v, got %v", cachedMethodsConf(), cb.AllowedMethods.CachedMethods.Items) - } - if *cb.PathPattern != "/path1" { - t.Fatalf("Expected PathPattern to be /path1, got %v", *cb.PathPattern) - } -} - -func TestCloudFrontStructure_flattenCacheBehavior(t *testing.T) { - in := cacheBehaviorConf1() - cb := expandCacheBehavior(in) - out := flattenCacheBehavior(cb) - var diff *schema.Set - if out["compress"] != true { - t.Fatalf("Expected out[compress] to be true, got %v", out["compress"]) - } - if out["viewer_protocol_policy"] != "allow-all" { - t.Fatalf("Expected out[viewer_protocol_policy] to be allow-all, got %v", out["viewer_protocol_policy"]) - } - if out["target_origin_id"] != "myS3Origin" { - t.Fatalf("Expected out[target_origin_id] to be myS3Origin, got %v", out["target_origin_id"]) - } - - var outSet, ok = out["lambda_function_association"].(*schema.Set) - if !ok { - t.Fatalf("out['lambda_function_association'] is not a slice as expected: %#v", out["lambda_function_association"]) - } - - inSet, ok := in["lambda_function_association"].(*schema.Set) - if !ok { - t.Fatalf("in['lambda_function_association'] is not a set as expected: %#v", in["lambda_function_association"]) - } - - if !inSet.Equal(outSet) { - t.Fatalf("in / out sets are not equal, in: \n%#v\n\nout: \n%#v\n", inSet, outSet) - } - - diff = out["forwarded_values"].(*schema.Set).Difference(in["forwarded_values"].(*schema.Set)) - if len(diff.List()) > 0 { - t.Fatalf("Expected out[forwarded_values] to be %v, got %v, diff: %v", out["forwarded_values"], in["forwarded_values"], diff) - } - if out["min_ttl"] != int(86400) { - t.Fatalf("Expected out[min_ttl] to be 86400 (int), got %v", out["forwarded_values"]) - } - if reflect.DeepEqual(out["trusted_signers"], in["trusted_signers"]) != true { - t.Fatalf("Expected out[trusted_signers] to be %v, got %v", in["trusted_signers"], out["trusted_signers"]) - } - if out["max_ttl"] != int(365000000) { - t.Fatalf("Expected out[max_ttl] to be 365000000 (int), got %v", out["max_ttl"]) - } - if out["smooth_streaming"] != false { - t.Fatalf("Expected out[smooth_streaming] to be false, got %v", out["smooth_streaming"]) - } - if out["default_ttl"] != int(86400) { - t.Fatalf("Expected out[default_ttl] to be 86400 (int), got %v", out["default_ttl"]) - } - if reflect.DeepEqual(out["allowed_methods"], in["allowed_methods"]) != true { - t.Fatalf("Expected out[allowed_methods] to be %v, got %v", in["allowed_methods"], out["allowed_methods"]) - } - if reflect.DeepEqual(out["cached_methods"], in["cached_methods"]) != true { - t.Fatalf("Expected out[cached_methods] to be %v, got %v", in["cached_methods"], out["cached_methods"]) - } - if out["path_pattern"] != "/path1" { - t.Fatalf("Expected out[path_pattern] to be /path1, got %v", out["path_pattern"]) - } -} - -func TestCloudFrontStructure_expandCacheBehaviors(t *testing.T) { - data := cacheBehaviorsConf() - cbs := expandCacheBehaviors(data) - if *cbs.Quantity != 2 { - t.Fatalf("Expected Quantity to be 2, got %v", *cbs.Quantity) - } - if *cbs.Items[0].TargetOriginId != "myS3Origin" { - t.Fatalf("Expected first Item's TargetOriginId to be myS3Origin, got %v", *cbs.Items[0].TargetOriginId) - } -} - -func TestCloudFrontStructure_flattenCacheBehaviors(t *testing.T) { - in := cacheBehaviorsConf() - cbs := expandCacheBehaviors(in) - out := flattenCacheBehaviors(cbs) - diff := in.Difference(out) - - if len(diff.List()) > 0 { - t.Fatalf("Expected out to be %v, got %v, diff: %v", in, out, diff) - } -} - -func TestCloudFrontStructure_expandTrustedSigners(t *testing.T) { - data := trustedSignersConf() - ts := expandTrustedSigners(data) - if *ts.Quantity != 2 { - t.Fatalf("Expected Quantity to be 2, got %v", *ts.Quantity) - } - if *ts.Enabled != true { - t.Fatalf("Expected Enabled to be true, got %v", *ts.Enabled) - } - if reflect.DeepEqual(ts.Items, expandStringList(data)) != true { - t.Fatalf("Expected Items to be %v, got %v", data, ts.Items) - } -} - -func TestCloudFrontStructure_flattenTrustedSigners(t *testing.T) { - in := trustedSignersConf() - ts := expandTrustedSigners(in) - out := flattenTrustedSigners(ts) - - if reflect.DeepEqual(in, out) != true { - t.Fatalf("Expected out to be %v, got %v", in, out) - } -} - -func TestCloudFrontStructure_expandTrustedSigners_empty(t *testing.T) { - data := []interface{}{} - ts := expandTrustedSigners(data) - if *ts.Quantity != 0 { - t.Fatalf("Expected Quantity to be 0, got %v", *ts.Quantity) - } - if *ts.Enabled != false { - t.Fatalf("Expected Enabled to be true, got %v", *ts.Enabled) - } - if ts.Items != nil { - t.Fatalf("Expected Items to be nil, got %v", ts.Items) - } -} - -func TestCloudFrontStructure_expandLambdaFunctionAssociations(t *testing.T) { - data := lambdaFunctionAssociationsConf() - lfa := expandLambdaFunctionAssociations(data.List()) - if *lfa.Quantity != 2 { - t.Fatalf("Expected Quantity to be 2, got %v", *lfa.Quantity) - } - if len(lfa.Items) != 2 { - t.Fatalf("Expected Items to be len 2, got %v", len(lfa.Items)) - } - if et := "viewer-request"; *lfa.Items[0].EventType != et { - t.Fatalf("Expected first Item's EventType to be %q, got %q", et, *lfa.Items[0].EventType) - } - if et := "origin-response"; *lfa.Items[1].EventType != et { - t.Fatalf("Expected second Item's EventType to be %q, got %q", et, *lfa.Items[1].EventType) - } -} - -func TestCloudFrontStructure_flattenlambdaFunctionAssociations(t *testing.T) { - in := lambdaFunctionAssociationsConf() - lfa := expandLambdaFunctionAssociations(in.List()) - out := flattenLambdaFunctionAssociations(lfa) - - if reflect.DeepEqual(in.List(), out.List()) != true { - t.Fatalf("Expected out to be %v, got %v", in, out) - } -} - -func TestCloudFrontStructure_expandlambdaFunctionAssociations_empty(t *testing.T) { - data := new(schema.Set) - lfa := expandLambdaFunctionAssociations(data.List()) - if *lfa.Quantity != 0 { - t.Fatalf("Expected Quantity to be 0, got %v", *lfa.Quantity) - } - if len(lfa.Items) != 0 { - t.Fatalf("Expected Items to be len 0, got %v", len(lfa.Items)) - } - if reflect.DeepEqual(lfa.Items, []*cloudfront.LambdaFunctionAssociation{}) != true { - t.Fatalf("Expected Items to be empty, got %v", lfa.Items) - } -} - -func TestCloudFrontStructure_expandForwardedValues(t *testing.T) { - data := forwardedValuesConf() - fv := expandForwardedValues(data) - if *fv.QueryString != true { - t.Fatalf("Expected QueryString to be true, got %v", *fv.QueryString) - } - if reflect.DeepEqual(fv.Cookies.WhitelistedNames.Items, expandStringList(cookieNamesConf())) != true { - t.Fatalf("Expected Cookies.WhitelistedNames.Items to be %v, got %v", cookieNamesConf(), fv.Cookies.WhitelistedNames.Items) - } - if reflect.DeepEqual(fv.Headers.Items, expandStringList(headersConf())) != true { - t.Fatalf("Expected Headers.Items to be %v, got %v", headersConf(), fv.Headers.Items) - } -} - -func TestCloudFrontStructure_flattenForwardedValues(t *testing.T) { - in := forwardedValuesConf() - fv := expandForwardedValues(in) - out := flattenForwardedValues(fv) - - if out["query_string"] != true { - t.Fatalf("Expected out[query_string] to be true, got %v", out["query_string"]) - } - if out["cookies"].(*schema.Set).Equal(in["cookies"].(*schema.Set)) != true { - t.Fatalf("Expected out[cookies] to be %v, got %v", in["cookies"], out["cookies"]) - } - if reflect.DeepEqual(out["headers"], in["headers"]) != true { - t.Fatalf("Expected out[headers] to be %v, got %v", in["headers"], out["headers"]) - } -} - -func TestCloudFrontStructure_expandHeaders(t *testing.T) { - data := headersConf() - h := expandHeaders(data) - if *h.Quantity != 2 { - t.Fatalf("Expected Quantity to be 2, got %v", *h.Quantity) - } - if reflect.DeepEqual(h.Items, expandStringList(data)) != true { - t.Fatalf("Expected Items to be %v, got %v", data, h.Items) - } -} - -func TestCloudFrontStructure_flattenHeaders(t *testing.T) { - in := headersConf() - h := expandHeaders(in) - out := flattenHeaders(h) - - if reflect.DeepEqual(in, out) != true { - t.Fatalf("Expected out to be %v, got %v", in, out) - } -} - -func TestCloudFrontStructure_expandQueryStringCacheKeys(t *testing.T) { - data := queryStringCacheKeysConf() - k := expandQueryStringCacheKeys(data) - if *k.Quantity != 2 { - t.Fatalf("Expected Quantity to be 2, got %v", *k.Quantity) - } - if reflect.DeepEqual(k.Items, expandStringList(data)) != true { - t.Fatalf("Expected Items to be %v, got %v", data, k.Items) - } -} - -func TestCloudFrontStructure_flattenQueryStringCacheKeys(t *testing.T) { - in := queryStringCacheKeysConf() - k := expandQueryStringCacheKeys(in) - out := flattenQueryStringCacheKeys(k) - - if reflect.DeepEqual(in, out) != true { - t.Fatalf("Expected out to be %v, got %v", in, out) - } -} - -func TestCloudFrontStructure_expandCookiePreference(t *testing.T) { - data := cookiePreferenceConf() - cp := expandCookiePreference(data) - if *cp.Forward != "whitelist" { - t.Fatalf("Expected Forward to be whitelist, got %v", *cp.Forward) - } - if reflect.DeepEqual(cp.WhitelistedNames.Items, expandStringList(cookieNamesConf())) != true { - t.Fatalf("Expected WhitelistedNames.Items to be %v, got %v", cookieNamesConf(), cp.WhitelistedNames.Items) - } -} - -func TestCloudFrontStructure_flattenCookiePreference(t *testing.T) { - in := cookiePreferenceConf() - cp := expandCookiePreference(in) - out := flattenCookiePreference(cp) - - if reflect.DeepEqual(in, out) != true { - t.Fatalf("Expected out to be %v, got %v", in, out) - } -} - -func TestCloudFrontStructure_expandCookieNames(t *testing.T) { - data := cookieNamesConf() - cn := expandCookieNames(data) - if *cn.Quantity != 2 { - t.Fatalf("Expected Quantity to be 2, got %v", *cn.Quantity) - } - if reflect.DeepEqual(cn.Items, expandStringList(data)) != true { - t.Fatalf("Expected Items to be %v, got %v", data, cn.Items) - } -} - -func TestCloudFrontStructure_flattenCookieNames(t *testing.T) { - in := cookieNamesConf() - cn := expandCookieNames(in) - out := flattenCookieNames(cn) - - if reflect.DeepEqual(in, out) != true { - t.Fatalf("Expected out to be %v, got %v", in, out) - } -} - -func TestCloudFrontStructure_expandAllowedMethods(t *testing.T) { - data := allowedMethodsConf() - am := expandAllowedMethods(data) - if *am.Quantity != 7 { - t.Fatalf("Expected Quantity to be 7, got %v", *am.Quantity) - } - if reflect.DeepEqual(am.Items, expandStringList(data)) != true { - t.Fatalf("Expected Items to be %v, got %v", data, am.Items) - } -} - -func TestCloudFrontStructure_flattenAllowedMethods(t *testing.T) { - in := allowedMethodsConf() - am := expandAllowedMethods(in) - out := flattenAllowedMethods(am) - - if reflect.DeepEqual(in, out) != true { - t.Fatalf("Expected out to be %v, got %v", in, out) - } -} - -func TestCloudFrontStructure_expandCachedMethods(t *testing.T) { - data := cachedMethodsConf() - cm := expandCachedMethods(data) - if *cm.Quantity != 3 { - t.Fatalf("Expected Quantity to be 3, got %v", *cm.Quantity) - } - if reflect.DeepEqual(cm.Items, expandStringList(data)) != true { - t.Fatalf("Expected Items to be %v, got %v", data, cm.Items) - } -} - -func TestCloudFrontStructure_flattenCachedMethods(t *testing.T) { - in := cachedMethodsConf() - cm := expandCachedMethods(in) - out := flattenCachedMethods(cm) - - if reflect.DeepEqual(in, out) != true { - t.Fatalf("Expected out to be %v, got %v", in, out) - } -} - -func TestCloudFrontStructure_expandOrigins(t *testing.T) { - data := multiOriginConf() - origins := expandOrigins(data) - if *origins.Quantity != 2 { - t.Fatalf("Expected Quantity to be 2, got %v", *origins.Quantity) - } - if *origins.Items[0].OriginPath != "/" { - t.Fatalf("Expected first Item's OriginPath to be /, got %v", *origins.Items[0].OriginPath) - } -} - -func TestCloudFrontStructure_flattenOrigins(t *testing.T) { - in := multiOriginConf() - origins := expandOrigins(in) - out := flattenOrigins(origins) - diff := in.Difference(out) - - if len(diff.List()) > 0 { - t.Fatalf("Expected out to be %v, got %v, diff: %v", in, out, diff) - } -} - -func TestCloudFrontStructure_expandOrigin(t *testing.T) { - data := originWithCustomConf() - or := expandOrigin(data) - if *or.Id != "CustomOrigin" { - t.Fatalf("Expected Id to be CustomOrigin, got %v", *or.Id) - } - if *or.DomainName != "www.example.com" { - t.Fatalf("Expected DomainName to be www.example.com, got %v", *or.DomainName) - } - if *or.OriginPath != "/" { - t.Fatalf("Expected OriginPath to be /, got %v", *or.OriginPath) - } - if *or.CustomOriginConfig.OriginProtocolPolicy != "http-only" { - t.Fatalf("Expected CustomOriginConfig.OriginProtocolPolicy to be http-only, got %v", *or.CustomOriginConfig.OriginProtocolPolicy) - } - if *or.CustomHeaders.Items[0].HeaderValue != "samplevalue" { - t.Fatalf("Expected CustomHeaders.Items[0].HeaderValue to be samplevalue, got %v", *or.CustomHeaders.Items[0].HeaderValue) - } -} - -func TestCloudFrontStructure_flattenOrigin(t *testing.T) { - in := originWithCustomConf() - or := expandOrigin(in) - out := flattenOrigin(or) - - if out["origin_id"] != "CustomOrigin" { - t.Fatalf("Expected out[origin_id] to be CustomOrigin, got %v", out["origin_id"]) - } - if out["domain_name"] != "www.example.com" { - t.Fatalf("Expected out[domain_name] to be www.example.com, got %v", out["domain_name"]) - } - if out["origin_path"] != "/" { - t.Fatalf("Expected out[origin_path] to be /, got %v", out["origin_path"]) - } - if out["custom_origin_config"].(*schema.Set).Equal(in["custom_origin_config"].(*schema.Set)) != true { - t.Fatalf("Expected out[custom_origin_config] to be %v, got %v", in["custom_origin_config"], out["custom_origin_config"]) - } -} - -func TestCloudFrontStructure_expandCustomHeaders(t *testing.T) { - in := originCustomHeadersConf() - chs := expandCustomHeaders(in) - if *chs.Quantity != 2 { - t.Fatalf("Expected Quantity to be 2, got %v", *chs.Quantity) - } - if *chs.Items[0].HeaderValue != "samplevalue" { - t.Fatalf("Expected first Item's HeaderValue to be samplevalue, got %v", *chs.Items[0].HeaderValue) - } -} - -func TestCloudFrontStructure_flattenCustomHeaders(t *testing.T) { - in := originCustomHeadersConf() - chs := expandCustomHeaders(in) - out := flattenCustomHeaders(chs) - diff := in.Difference(out) - - if len(diff.List()) > 0 { - t.Fatalf("Expected out to be %v, got %v, diff: %v", in, out, diff) - } -} - -func TestCloudFrontStructure_flattenOriginCustomHeader(t *testing.T) { - in := originCustomHeaderConf1() - och := expandOriginCustomHeader(in) - out := flattenOriginCustomHeader(och) - - if out["name"] != "X-Custom-Header1" { - t.Fatalf("Expected out[name] to be X-Custom-Header1, got %v", out["name"]) - } - if out["value"] != "samplevalue" { - t.Fatalf("Expected out[value] to be samplevalue, got %v", out["value"]) - } -} - -func TestCloudFrontStructure_expandOriginCustomHeader(t *testing.T) { - in := originCustomHeaderConf1() - och := expandOriginCustomHeader(in) - - if *och.HeaderName != "X-Custom-Header1" { - t.Fatalf("Expected HeaderName to be X-Custom-Header1, got %v", *och.HeaderName) - } - if *och.HeaderValue != "samplevalue" { - t.Fatalf("Expected HeaderValue to be samplevalue, got %v", *och.HeaderValue) - } -} - -func TestCloudFrontStructure_expandCustomOriginConfig(t *testing.T) { - data := customOriginConf() - co := expandCustomOriginConfig(data) - if *co.OriginProtocolPolicy != "http-only" { - t.Fatalf("Expected OriginProtocolPolicy to be http-only, got %v", *co.OriginProtocolPolicy) - } - if *co.HTTPPort != 80 { - t.Fatalf("Expected HTTPPort to be 80, got %v", *co.HTTPPort) - } - if *co.HTTPSPort != 443 { - t.Fatalf("Expected HTTPSPort to be 443, got %v", *co.HTTPSPort) - } - if *co.OriginReadTimeout != 30 { - t.Fatalf("Expected Origin Read Timeout to be 30, got %v", *co.OriginReadTimeout) - } - if *co.OriginKeepaliveTimeout != 5 { - t.Fatalf("Expected Origin Keepalive Timeout to be 5, got %v", *co.OriginKeepaliveTimeout) - } -} - -func TestCloudFrontStructure_flattenCustomOriginConfig(t *testing.T) { - in := customOriginConf() - co := expandCustomOriginConfig(in) - out := flattenCustomOriginConfig(co) - - if reflect.DeepEqual(in, out) != true { - t.Fatalf("Expected out to be %v, got %v", in, out) - } -} - -func TestCloudFrontStructure_expandCustomOriginConfigSSL(t *testing.T) { - in := customOriginSslProtocolsConf() - ocs := expandCustomOriginConfigSSL(in) - if *ocs.Quantity != 4 { - t.Fatalf("Expected Quantity to be 4, got %v", *ocs.Quantity) - } - if *ocs.Items[0] != "SSLv3" { - t.Fatalf("Expected first Item to be SSLv3, got %v", *ocs.Items[0]) - } -} - -func TestCloudFrontStructure_flattenCustomOriginConfigSSL(t *testing.T) { - in := customOriginSslProtocolsConf() - ocs := expandCustomOriginConfigSSL(in) - out := flattenCustomOriginConfigSSL(ocs) - - if reflect.DeepEqual(in, out) != true { - t.Fatalf("Expected out to be %v, got %v", in, out) - } -} - -func TestCloudFrontStructure_expandS3OriginConfig(t *testing.T) { - data := s3OriginConf() - s3o := expandS3OriginConfig(data) - if *s3o.OriginAccessIdentity != "origin-access-identity/cloudfront/E127EXAMPLE51Z" { - t.Fatalf("Expected OriginAccessIdentity to be origin-access-identity/cloudfront/E127EXAMPLE51Z, got %v", *s3o.OriginAccessIdentity) - } -} - -func TestCloudFrontStructure_flattenS3OriginConfig(t *testing.T) { - in := s3OriginConf() - s3o := expandS3OriginConfig(in) - out := flattenS3OriginConfig(s3o) - - if reflect.DeepEqual(in, out) != true { - t.Fatalf("Expected out to be %v, got %v", in, out) - } -} - -func TestCloudFrontStructure_expandCustomErrorResponses(t *testing.T) { - data := customErrorResponsesConfSet() - ers := expandCustomErrorResponses(data) - if *ers.Quantity != 2 { - t.Fatalf("Expected Quantity to be 2, got %v", *ers.Quantity) - } - if *ers.Items[0].ResponsePagePath != "/error-pages/404.html" { - t.Fatalf("Expected ResponsePagePath in first Item to be /error-pages/404.html, got %v", *ers.Items[0].ResponsePagePath) - } -} - -func TestCloudFrontStructure_flattenCustomErrorResponses(t *testing.T) { - in := customErrorResponsesConfSet() - ers := expandCustomErrorResponses(in) - out := flattenCustomErrorResponses(ers) - - if in.Equal(out) != true { - t.Fatalf("Expected out to be %v, got %v", in, out) - } -} - -func TestCloudFrontStructure_expandCustomErrorResponse(t *testing.T) { - data := customErrorResponsesConfFirst() - er := expandCustomErrorResponse(data) - if *er.ErrorCode != 404 { - t.Fatalf("Expected ErrorCode to be 404, got %v", *er.ErrorCode) - } - if *er.ErrorCachingMinTTL != 30 { - t.Fatalf("Expected ErrorCachingMinTTL to be 30, got %v", *er.ErrorCachingMinTTL) - } - if *er.ResponseCode != "200" { - t.Fatalf("Expected ResponseCode to be 200 (as string), got %v", *er.ResponseCode) - } - if *er.ResponsePagePath != "/error-pages/404.html" { - t.Fatalf("Expected ResponsePagePath to be /error-pages/404.html, got %v", *er.ResponsePagePath) - } -} - -func TestCloudFrontStructure_expandCustomErrorResponse_emptyResponseCode(t *testing.T) { - data := customErrorResponseConfNoResponseCode() - er := expandCustomErrorResponse(data) - if *er.ResponseCode != "" { - t.Fatalf("Expected ResponseCode to be empty string, got %v", *er.ResponseCode) - } - if *er.ResponsePagePath != "" { - t.Fatalf("Expected ResponsePagePath to be empty string, got %v", *er.ResponsePagePath) - } -} - -func TestCloudFrontStructure_flattenCustomErrorResponse(t *testing.T) { - in := customErrorResponsesConfFirst() - er := expandCustomErrorResponse(in) - out := flattenCustomErrorResponse(er) - - if reflect.DeepEqual(in, out) != true { - t.Fatalf("Expected out to be %v, got %v", in, out) - } -} - -func TestCloudFrontStructure_expandLoggingConfig(t *testing.T) { - data := loggingConfigConf() - - lc := expandLoggingConfig(data) - if *lc.Enabled != true { - t.Fatalf("Expected Enabled to be true, got %v", *lc.Enabled) - } - if *lc.Prefix != "myprefix" { - t.Fatalf("Expected Prefix to be myprefix, got %v", *lc.Prefix) - } - if *lc.Bucket != "mylogs.s3.amazonaws.com" { - t.Fatalf("Expected Bucket to be mylogs.s3.amazonaws.com, got %v", *lc.Bucket) - } - if *lc.IncludeCookies != false { - t.Fatalf("Expected IncludeCookies to be false, got %v", *lc.IncludeCookies) - } -} - -func TestCloudFrontStructure_expandLoggingConfig_nilValue(t *testing.T) { - lc := expandLoggingConfig(nil) - if *lc.Enabled != false { - t.Fatalf("Expected Enabled to be false, got %v", *lc.Enabled) - } - if *lc.Prefix != "" { - t.Fatalf("Expected Prefix to be blank, got %v", *lc.Prefix) - } - if *lc.Bucket != "" { - t.Fatalf("Expected Bucket to be blank, got %v", *lc.Bucket) - } - if *lc.IncludeCookies != false { - t.Fatalf("Expected IncludeCookies to be false, got %v", *lc.IncludeCookies) - } -} - -func TestCloudFrontStructure_flattenLoggingConfig(t *testing.T) { - in := loggingConfigConf() - lc := expandLoggingConfig(in) - out := flattenLoggingConfig(lc) - diff := schema.NewSet(loggingConfigHash, []interface{}{in}).Difference(out) - - if len(diff.List()) > 0 { - t.Fatalf("Expected out to be %v, got %v, diff: %v", in, out, diff) - } -} - -func TestCloudFrontStructure_expandAliases(t *testing.T) { - data := aliasesConf() - a := expandAliases(data) - if *a.Quantity != 2 { - t.Fatalf("Expected Quantity to be 2, got %v", *a.Quantity) - } - if reflect.DeepEqual(a.Items, expandStringList(data.List())) != true { - t.Fatalf("Expected Items to be [example.com www.example.com], got %v", a.Items) - } -} - -func TestCloudFrontStructure_flattenAliases(t *testing.T) { - in := aliasesConf() - a := expandAliases(in) - out := flattenAliases(a) - diff := in.Difference(out) - - if len(diff.List()) > 0 { - t.Fatalf("Expected out to be %v, got %v, diff: %v", in, out, diff) - } -} - -func TestCloudFrontStructure_expandRestrictions(t *testing.T) { - data := geoRestrictionsConf() - r := expandRestrictions(data) - if *r.GeoRestriction.RestrictionType != "whitelist" { - t.Fatalf("Expected GeoRestriction.RestrictionType to be whitelist, got %v", *r.GeoRestriction.RestrictionType) - } -} - -func TestCloudFrontStructure_flattenRestrictions(t *testing.T) { - in := geoRestrictionsConf() - r := expandRestrictions(in) - out := flattenRestrictions(r) - diff := schema.NewSet(restrictionsHash, []interface{}{in}).Difference(out) - - if len(diff.List()) > 0 { - t.Fatalf("Expected out to be %v, got %v, diff: %v", in, out, diff) - } -} - -func TestCloudFrontStructure_expandGeoRestriction_whitelist(t *testing.T) { - data := geoRestrictionWhitelistConf() - gr := expandGeoRestriction(data) - if *gr.RestrictionType != "whitelist" { - t.Fatalf("Expected RestrictionType to be whitelist, got %v", *gr.RestrictionType) - } - if *gr.Quantity != 3 { - t.Fatalf("Expected Quantity to be 3, got %v", *gr.Quantity) - } - if reflect.DeepEqual(gr.Items, aws.StringSlice([]string{"CA", "GB", "US"})) != true { - t.Fatalf("Expected Items be [CA, GB, US], got %v", gr.Items) - } -} - -func TestCloudFrontStructure_flattenGeoRestriction_whitelist(t *testing.T) { - in := geoRestrictionWhitelistConf() - gr := expandGeoRestriction(in) - out := flattenGeoRestriction(gr) - - if reflect.DeepEqual(in, out) != true { - t.Fatalf("Expected out to be %v, got %v", in, out) - } -} - -func TestCloudFrontStructure_expandGeoRestriction_no_items(t *testing.T) { - data := geoRestrictionConfNoItems() - gr := expandGeoRestriction(data) - if *gr.RestrictionType != "none" { - t.Fatalf("Expected RestrictionType to be none, got %v", *gr.RestrictionType) - } - if *gr.Quantity != 0 { - t.Fatalf("Expected Quantity to be 0, got %v", *gr.Quantity) - } - if gr.Items != nil { - t.Fatalf("Expected Items to not be set, got %v", gr.Items) - } -} - -func TestCloudFrontStructure_flattenGeoRestriction_no_items(t *testing.T) { - in := geoRestrictionConfNoItems() - gr := expandGeoRestriction(in) - out := flattenGeoRestriction(gr) - - if reflect.DeepEqual(in, out) != true { - t.Fatalf("Expected out to be %v, got %v", in, out) - } -} - -func TestCloudFrontStructure_expandViewerCertificate_cloudfront_default_certificate(t *testing.T) { - data := viewerCertificateConfSetCloudFrontDefault() - vc := expandViewerCertificate(data) - if vc.ACMCertificateArn != nil { - t.Fatalf("Expected ACMCertificateArn to be unset, got %v", *vc.ACMCertificateArn) - } - if *vc.CloudFrontDefaultCertificate != true { - t.Fatalf("Expected CloudFrontDefaultCertificate to be true, got %v", *vc.CloudFrontDefaultCertificate) - } - if vc.IAMCertificateId != nil { - t.Fatalf("Expected IAMCertificateId to not be set, got %v", *vc.IAMCertificateId) - } - if vc.SSLSupportMethod != nil { - t.Fatalf("Expected IAMCertificateId to not be set, got %v", *vc.SSLSupportMethod) - } - if vc.MinimumProtocolVersion != nil { - t.Fatalf("Expected IAMCertificateId to not be set, got %v", *vc.MinimumProtocolVersion) - } -} - -func TestCloudFrontStructure_flattenViewerCertificate_cloudfront_default_certificate(t *testing.T) { - in := viewerCertificateConfSetCloudFrontDefault() - vc := expandViewerCertificate(in) - out := flattenViewerCertificate(vc) - diff := schema.NewSet(viewerCertificateHash, []interface{}{in}).Difference(out) - - if len(diff.List()) > 0 { - t.Fatalf("Expected out to be %v, got %v, diff: %v", in, out, diff) - } -} - -func TestCloudFrontStructure_expandViewerCertificate_iam_certificate_id(t *testing.T) { - data := viewerCertificateConfSetIAM() - vc := expandViewerCertificate(data) - if vc.ACMCertificateArn != nil { - t.Fatalf("Expected ACMCertificateArn to be unset, got %v", *vc.ACMCertificateArn) - } - if vc.CloudFrontDefaultCertificate != nil { - t.Fatalf("Expected CloudFrontDefaultCertificate to be unset, got %v", *vc.CloudFrontDefaultCertificate) - } - if *vc.IAMCertificateId != "iamcert-01234567" { - t.Fatalf("Expected IAMCertificateId to be iamcert-01234567, got %v", *vc.IAMCertificateId) - } - if *vc.SSLSupportMethod != "vip" { - t.Fatalf("Expected IAMCertificateId to be vip, got %v", *vc.SSLSupportMethod) - } - if *vc.MinimumProtocolVersion != "TLSv1" { - t.Fatalf("Expected IAMCertificateId to be TLSv1, got %v", *vc.MinimumProtocolVersion) - } -} - -func TestCloudFrontStructure_expandViewerCertificate_acm_certificate_arn(t *testing.T) { - data := viewerCertificateConfSetACM() - vc := expandViewerCertificate(data) - if *vc.ACMCertificateArn != "arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012" { - t.Fatalf("Expected ACMCertificateArn to be arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012, got %v", *vc.ACMCertificateArn) - } - if vc.CloudFrontDefaultCertificate != nil { - t.Fatalf("Expected CloudFrontDefaultCertificate to be unset, got %v", *vc.CloudFrontDefaultCertificate) - } - if vc.IAMCertificateId != nil { - t.Fatalf("Expected IAMCertificateId to be unset, got %v", *vc.IAMCertificateId) - } - if *vc.SSLSupportMethod != "sni-only" { - t.Fatalf("Expected IAMCertificateId to be sni-only, got %v", *vc.SSLSupportMethod) - } - if *vc.MinimumProtocolVersion != "TLSv1" { - t.Fatalf("Expected IAMCertificateId to be TLSv1, got %v", *vc.MinimumProtocolVersion) - } -} - -func TestCloudFrontStructure_falttenViewerCertificate_iam_certificate_id(t *testing.T) { - in := viewerCertificateConfSetIAM() - vc := expandViewerCertificate(in) - out := flattenViewerCertificate(vc) - diff := schema.NewSet(viewerCertificateHash, []interface{}{in}).Difference(out) - - if len(diff.List()) > 0 { - t.Fatalf("Expected out to be %v, got %v, diff: %v", in, out, diff) - } -} - -func TestCloudFrontStructure_falttenViewerCertificate_acm_certificate_arn(t *testing.T) { - in := viewerCertificateConfSetACM() - vc := expandViewerCertificate(in) - out := flattenViewerCertificate(vc) - diff := schema.NewSet(viewerCertificateHash, []interface{}{in}).Difference(out) - - if len(diff.List()) > 0 { - t.Fatalf("Expected out to be %v, got %v, diff: %v", in, out, diff) - } -} - -func TestCloudFrontStructure_viewerCertificateHash_IAM(t *testing.T) { - in := viewerCertificateConfSetIAM() - out := viewerCertificateHash(in) - expected := 1157261784 - - if expected != out { - t.Fatalf("Expected %v, got %v", expected, out) - } -} - -func TestCloudFrontStructure_viewerCertificateHash_ACM(t *testing.T) { - in := viewerCertificateConfSetACM() - out := viewerCertificateHash(in) - expected := 2883600425 - - if expected != out { - t.Fatalf("Expected %v, got %v", expected, out) - } -} - -func TestCloudFrontStructure_viewerCertificateHash_default(t *testing.T) { - in := viewerCertificateConfSetCloudFrontDefault() - out := viewerCertificateHash(in) - expected := 69840937 - - if expected != out { - t.Fatalf("Expected %v, got %v", expected, out) - } -} diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go deleted file mode 100644 index dd1149b91..000000000 --- a/builtin/providers/aws/config.go +++ /dev/null @@ -1,510 +0,0 @@ -package aws - -import ( - "crypto/tls" - "errors" - "fmt" - "log" - "net/http" - "os" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/acm" - "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/aws/aws-sdk-go/service/applicationautoscaling" - "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/aws/aws-sdk-go/service/cloudformation" - "github.com/aws/aws-sdk-go/service/cloudfront" - "github.com/aws/aws-sdk-go/service/cloudtrail" - "github.com/aws/aws-sdk-go/service/cloudwatch" - "github.com/aws/aws-sdk-go/service/cloudwatchevents" - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" - "github.com/aws/aws-sdk-go/service/codebuild" - "github.com/aws/aws-sdk-go/service/codecommit" - "github.com/aws/aws-sdk-go/service/codedeploy" - "github.com/aws/aws-sdk-go/service/codepipeline" - "github.com/aws/aws-sdk-go/service/cognitoidentity" - "github.com/aws/aws-sdk-go/service/configservice" - "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/aws/aws-sdk-go/service/devicefarm" - "github.com/aws/aws-sdk-go/service/directoryservice" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/ecr" - "github.com/aws/aws-sdk-go/service/ecs" - "github.com/aws/aws-sdk-go/service/efs" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/aws/aws-sdk-go/service/elasticbeanstalk" - elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" - "github.com/aws/aws-sdk-go/service/elastictranscoder" - "github.com/aws/aws-sdk-go/service/elb" - "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/aws/aws-sdk-go/service/emr" - "github.com/aws/aws-sdk-go/service/firehose" - "github.com/aws/aws-sdk-go/service/glacier" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/aws/aws-sdk-go/service/inspector" - "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/aws/aws-sdk-go/service/kms" - "github.com/aws/aws-sdk-go/service/lambda" - "github.com/aws/aws-sdk-go/service/lightsail" - "github.com/aws/aws-sdk-go/service/opsworks" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/aws/aws-sdk-go/service/redshift" - "github.com/aws/aws-sdk-go/service/route53" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/ses" - "github.com/aws/aws-sdk-go/service/sfn" - "github.com/aws/aws-sdk-go/service/simpledb" - "github.com/aws/aws-sdk-go/service/sns" - "github.com/aws/aws-sdk-go/service/sqs" - "github.com/aws/aws-sdk-go/service/ssm" - "github.com/aws/aws-sdk-go/service/sts" - "github.com/aws/aws-sdk-go/service/waf" - "github.com/aws/aws-sdk-go/service/wafregional" - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/terraform/helper/logging" - "github.com/hashicorp/terraform/terraform" -) - -type Config struct { - AccessKey string - SecretKey string - CredsFilename string - Profile string - Token string - Region string - MaxRetries int - - AssumeRoleARN string - AssumeRoleExternalID string - AssumeRoleSessionName string - AssumeRolePolicy string - - AllowedAccountIds []interface{} - ForbiddenAccountIds []interface{} - - CloudFormationEndpoint string - CloudWatchEndpoint string - CloudWatchEventsEndpoint string - CloudWatchLogsEndpoint string - DynamoDBEndpoint string - DeviceFarmEndpoint string - Ec2Endpoint string - ElbEndpoint string - IamEndpoint string - KinesisEndpoint string - KmsEndpoint string - RdsEndpoint string - S3Endpoint string - SnsEndpoint string - SqsEndpoint string - Insecure bool - - SkipCredsValidation bool - SkipGetEC2Platforms bool - SkipRegionValidation bool - SkipRequestingAccountId bool - SkipMetadataApiCheck bool - S3ForcePathStyle bool -} - -type AWSClient struct { - cfconn *cloudformation.CloudFormation - cloudfrontconn *cloudfront.CloudFront - cloudtrailconn *cloudtrail.CloudTrail - cloudwatchconn *cloudwatch.CloudWatch - cloudwatchlogsconn *cloudwatchlogs.CloudWatchLogs - cloudwatcheventsconn *cloudwatchevents.CloudWatchEvents - cognitoconn *cognitoidentity.CognitoIdentity - configconn *configservice.ConfigService - devicefarmconn *devicefarm.DeviceFarm - dmsconn *databasemigrationservice.DatabaseMigrationService - dsconn *directoryservice.DirectoryService - dynamodbconn *dynamodb.DynamoDB - ec2conn *ec2.EC2 - ecrconn *ecr.ECR - ecsconn *ecs.ECS - efsconn *efs.EFS - elbconn *elb.ELB - elbv2conn *elbv2.ELBV2 - emrconn *emr.EMR - esconn *elasticsearch.ElasticsearchService - acmconn *acm.ACM - apigateway *apigateway.APIGateway - appautoscalingconn *applicationautoscaling.ApplicationAutoScaling - autoscalingconn *autoscaling.AutoScaling - s3conn *s3.S3 - sesConn *ses.SES - simpledbconn *simpledb.SimpleDB - sqsconn *sqs.SQS - snsconn *sns.SNS - stsconn *sts.STS - redshiftconn *redshift.Redshift - r53conn *route53.Route53 - partition string - accountid string - supportedplatforms []string - region string - rdsconn *rds.RDS - iamconn *iam.IAM - kinesisconn *kinesis.Kinesis - kmsconn *kms.KMS - firehoseconn *firehose.Firehose - inspectorconn *inspector.Inspector - elasticacheconn *elasticache.ElastiCache - elasticbeanstalkconn *elasticbeanstalk.ElasticBeanstalk - elastictranscoderconn *elastictranscoder.ElasticTranscoder - lambdaconn *lambda.Lambda - lightsailconn *lightsail.Lightsail - opsworksconn *opsworks.OpsWorks - glacierconn *glacier.Glacier - codebuildconn *codebuild.CodeBuild - codedeployconn *codedeploy.CodeDeploy - codecommitconn *codecommit.CodeCommit - codepipelineconn *codepipeline.CodePipeline - sfnconn *sfn.SFN - ssmconn *ssm.SSM - wafconn *waf.WAF - wafregionalconn *wafregional.WAFRegional -} - -func (c *AWSClient) S3() *s3.S3 { - return c.s3conn -} - -func (c *AWSClient) DynamoDB() *dynamodb.DynamoDB { - return c.dynamodbconn -} - -func (c *AWSClient) IsGovCloud() bool { - if c.region == "us-gov-west-1" { - return true - } - return false -} - -func (c *AWSClient) IsChinaCloud() bool { - if c.region == "cn-north-1" { - return true - } - return false -} - -// Client configures and returns a fully initialized AWSClient -func (c *Config) Client() (interface{}, error) { - // Get the auth and region. This can fail if keys/regions were not - // specified and we're attempting to use the environment. - if c.SkipRegionValidation { - log.Println("[INFO] Skipping region validation") - } else { - log.Println("[INFO] Building AWS region structure") - err := c.ValidateRegion() - if err != nil { - return nil, err - } - } - - var client AWSClient - // store AWS region in client struct, for region specific operations such as - // bucket storage in S3 - client.region = c.Region - - log.Println("[INFO] Building AWS auth structure") - creds, err := GetCredentials(c) - if err != nil { - return nil, err - } - // Call Get to check for credential provider. If nothing found, we'll get an - // error, and we can present it nicely to the user - cp, err := creds.Get() - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" { - return nil, errors.New(`No valid credential sources found for AWS Provider. - Please see https://terraform.io/docs/providers/aws/index.html for more information on - providing credentials for the AWS Provider`) - } - - return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err) - } - - log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName) - - awsConfig := &aws.Config{ - Credentials: creds, - Region: aws.String(c.Region), - MaxRetries: aws.Int(c.MaxRetries), - HTTPClient: cleanhttp.DefaultClient(), - S3ForcePathStyle: aws.Bool(c.S3ForcePathStyle), - } - - if logging.IsDebugOrHigher() { - awsConfig.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody) - awsConfig.Logger = awsLogger{} - } - - if c.Insecure { - transport := awsConfig.HTTPClient.Transport.(*http.Transport) - transport.TLSClientConfig = &tls.Config{ - InsecureSkipVerify: true, - } - } - - // Set up base session - sess, err := session.NewSession(awsConfig) - if err != nil { - return nil, errwrap.Wrapf("Error creating AWS session: {{err}}", err) - } - - sess.Handlers.Build.PushBackNamed(addTerraformVersionToUserAgent) - - if extraDebug := os.Getenv("TERRAFORM_AWS_AUTHFAILURE_DEBUG"); extraDebug != "" { - sess.Handlers.UnmarshalError.PushFrontNamed(debugAuthFailure) - } - - // This restriction should only be used for Route53 sessions. - // Other resources that have restrictions should allow the API to fail, rather - // than Terraform abstracting the region for the user. This can lead to breaking - // changes if that resource is ever opened up to more regions. - r53Sess := sess.Copy(&aws.Config{Region: aws.String("us-east-1")}) - - // Some services have user-configurable endpoints - awsCfSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.CloudFormationEndpoint)}) - awsCwSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.CloudWatchEndpoint)}) - awsCweSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.CloudWatchEventsEndpoint)}) - awsCwlSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.CloudWatchLogsEndpoint)}) - awsDynamoSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.DynamoDBEndpoint)}) - awsEc2Sess := sess.Copy(&aws.Config{Endpoint: aws.String(c.Ec2Endpoint)}) - awsElbSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.ElbEndpoint)}) - awsIamSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.IamEndpoint)}) - awsKinesisSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.KinesisEndpoint)}) - awsKmsSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.KmsEndpoint)}) - awsRdsSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.RdsEndpoint)}) - awsS3Sess := sess.Copy(&aws.Config{Endpoint: aws.String(c.S3Endpoint)}) - awsSnsSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.SnsEndpoint)}) - awsSqsSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.SqsEndpoint)}) - awsDeviceFarmSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.DeviceFarmEndpoint)}) - - log.Println("[INFO] Initializing DeviceFarm SDK connection") - client.devicefarmconn = devicefarm.New(awsDeviceFarmSess) - - // These two services need to be set up early so we can check on AccountID - client.iamconn = iam.New(awsIamSess) - client.stsconn = sts.New(sess) - - if !c.SkipCredsValidation { - err = c.ValidateCredentials(client.stsconn) - if err != nil { - return nil, err - } - } - - if !c.SkipRequestingAccountId { - partition, accountId, err := GetAccountInfo(client.iamconn, client.stsconn, cp.ProviderName) - if err == nil { - client.partition = partition - client.accountid = accountId - } - } - - authErr := c.ValidateAccountId(client.accountid) - if authErr != nil { - return nil, authErr - } - - client.ec2conn = ec2.New(awsEc2Sess) - - if !c.SkipGetEC2Platforms { - supportedPlatforms, err := GetSupportedEC2Platforms(client.ec2conn) - if err != nil { - // We intentionally fail *silently* because there's a chance - // user just doesn't have ec2:DescribeAccountAttributes permissions - log.Printf("[WARN] Unable to get supported EC2 platforms: %s", err) - } else { - client.supportedplatforms = supportedPlatforms - } - } - - client.acmconn = acm.New(sess) - client.apigateway = apigateway.New(sess) - client.appautoscalingconn = applicationautoscaling.New(sess) - client.autoscalingconn = autoscaling.New(sess) - client.cfconn = cloudformation.New(awsCfSess) - client.cloudfrontconn = cloudfront.New(sess) - client.cloudtrailconn = cloudtrail.New(sess) - client.cloudwatchconn = cloudwatch.New(awsCwSess) - client.cloudwatcheventsconn = cloudwatchevents.New(awsCweSess) - client.cloudwatchlogsconn = cloudwatchlogs.New(awsCwlSess) - client.codecommitconn = codecommit.New(sess) - client.codebuildconn = codebuild.New(sess) - client.codedeployconn = codedeploy.New(sess) - client.configconn = configservice.New(sess) - client.cognitoconn = cognitoidentity.New(sess) - client.dmsconn = databasemigrationservice.New(sess) - client.codepipelineconn = codepipeline.New(sess) - client.dsconn = directoryservice.New(sess) - client.dynamodbconn = dynamodb.New(awsDynamoSess) - client.ecrconn = ecr.New(sess) - client.ecsconn = ecs.New(sess) - client.efsconn = efs.New(sess) - client.elasticacheconn = elasticache.New(sess) - client.elasticbeanstalkconn = elasticbeanstalk.New(sess) - client.elastictranscoderconn = elastictranscoder.New(sess) - client.elbconn = elb.New(awsElbSess) - client.elbv2conn = elbv2.New(awsElbSess) - client.emrconn = emr.New(sess) - client.esconn = elasticsearch.New(sess) - client.firehoseconn = firehose.New(sess) - client.inspectorconn = inspector.New(sess) - client.glacierconn = glacier.New(sess) - client.kinesisconn = kinesis.New(awsKinesisSess) - client.kmsconn = kms.New(awsKmsSess) - client.lambdaconn = lambda.New(sess) - client.lightsailconn = lightsail.New(sess) - client.opsworksconn = opsworks.New(sess) - client.r53conn = route53.New(r53Sess) - client.rdsconn = rds.New(awsRdsSess) - client.redshiftconn = redshift.New(sess) - client.simpledbconn = simpledb.New(sess) - client.s3conn = s3.New(awsS3Sess) - client.sesConn = ses.New(sess) - client.sfnconn = sfn.New(sess) - client.snsconn = sns.New(awsSnsSess) - client.sqsconn = sqs.New(awsSqsSess) - client.ssmconn = ssm.New(sess) - client.wafconn = waf.New(sess) - client.wafregionalconn = wafregional.New(sess) - - return &client, nil -} - -// ValidateRegion returns an error if the configured region is not a -// valid aws region and nil otherwise. -func (c *Config) ValidateRegion() error { - var regions = []string{ - "ap-northeast-1", - "ap-northeast-2", - "ap-south-1", - "ap-southeast-1", - "ap-southeast-2", - "ca-central-1", - "cn-north-1", - "eu-central-1", - "eu-west-1", - "eu-west-2", - "sa-east-1", - "us-east-1", - "us-east-2", - "us-gov-west-1", - "us-west-1", - "us-west-2", - } - - for _, valid := range regions { - if c.Region == valid { - return nil - } - } - return fmt.Errorf("Not a valid region: %s", c.Region) -} - -// Validate credentials early and fail before we do any graph walking. -func (c *Config) ValidateCredentials(stsconn *sts.STS) error { - _, err := stsconn.GetCallerIdentity(&sts.GetCallerIdentityInput{}) - return err -} - -// ValidateAccountId returns a context-specific error if the configured account -// id is explicitly forbidden or not authorised; and nil if it is authorised. -func (c *Config) ValidateAccountId(accountId string) error { - if c.AllowedAccountIds == nil && c.ForbiddenAccountIds == nil { - return nil - } - - log.Println("[INFO] Validating account ID") - - if c.ForbiddenAccountIds != nil { - for _, id := range c.ForbiddenAccountIds { - if id == accountId { - return fmt.Errorf("Forbidden account ID (%s)", id) - } - } - } - - if c.AllowedAccountIds != nil { - for _, id := range c.AllowedAccountIds { - if id == accountId { - return nil - } - } - return fmt.Errorf("Account ID not allowed (%s)", accountId) - } - - return nil -} - -func GetSupportedEC2Platforms(conn *ec2.EC2) ([]string, error) { - attrName := "supported-platforms" - - input := ec2.DescribeAccountAttributesInput{ - AttributeNames: []*string{aws.String(attrName)}, - } - attributes, err := conn.DescribeAccountAttributes(&input) - if err != nil { - return nil, err - } - - var platforms []string - for _, attr := range attributes.AccountAttributes { - if *attr.AttributeName == attrName { - for _, v := range attr.AttributeValues { - platforms = append(platforms, *v.AttributeValue) - } - break - } - } - - if len(platforms) == 0 { - return nil, fmt.Errorf("No EC2 platforms detected") - } - - return platforms, nil -} - -// addTerraformVersionToUserAgent is a named handler that will add Terraform's -// version information to requests made by the AWS SDK. -var addTerraformVersionToUserAgent = request.NamedHandler{ - Name: "terraform.TerraformVersionUserAgentHandler", - Fn: request.MakeAddToUserAgentHandler( - "APN/1.0 HashiCorp/1.0 Terraform", terraform.VersionString()), -} - -var debugAuthFailure = request.NamedHandler{ - Name: "terraform.AuthFailureAdditionalDebugHandler", - Fn: func(req *request.Request) { - if isAWSErr(req.Error, "AuthFailure", "AWS was not able to validate the provided access credentials") { - log.Printf("[INFO] Additional AuthFailure Debugging Context") - log.Printf("[INFO] Current system UTC time: %s", time.Now().UTC()) - log.Printf("[INFO] Request object: %s", spew.Sdump(req)) - } - }, -} - -type awsLogger struct{} - -func (l awsLogger) Log(args ...interface{}) { - tokens := make([]string, 0, len(args)) - for _, arg := range args { - if token, ok := arg.(string); ok { - tokens = append(tokens, token) - } - } - log.Printf("[DEBUG] [aws-sdk-go] %s", strings.Join(tokens, " ")) -} diff --git a/builtin/providers/aws/config_test.go b/builtin/providers/aws/config_test.go deleted file mode 100644 index 50b175c1e..000000000 --- a/builtin/providers/aws/config_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "log" - "net/http" - "net/http/httptest" - "reflect" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - awsCredentials "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/ec2" -) - -func TestGetSupportedEC2Platforms(t *testing.T) { - ec2Endpoints := []*awsMockEndpoint{ - &awsMockEndpoint{ - Request: &awsMockRequest{"POST", "/", "Action=DescribeAccountAttributes&" + - "AttributeName.1=supported-platforms&Version=2016-11-15"}, - Response: &awsMockResponse{200, test_ec2_describeAccountAttributes_response, "text/xml"}, - }, - } - closeFunc, sess, err := getMockedAwsApiSession("EC2", ec2Endpoints) - if err != nil { - t.Fatal(err) - } - defer closeFunc() - conn := ec2.New(sess) - - platforms, err := GetSupportedEC2Platforms(conn) - if err != nil { - t.Fatalf("Expected no error, received: %s", err) - } - expectedPlatforms := []string{"VPC", "EC2"} - if !reflect.DeepEqual(platforms, expectedPlatforms) { - t.Fatalf("Received platforms: %q\nExpected: %q\n", platforms, expectedPlatforms) - } -} - -// getMockedAwsApiSession establishes a httptest server to simulate behaviour -// of a real AWS API server -func getMockedAwsApiSession(svcName string, endpoints []*awsMockEndpoint) (func(), *session.Session, error) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buf := new(bytes.Buffer) - buf.ReadFrom(r.Body) - requestBody := buf.String() - - log.Printf("[DEBUG] Received %s API %q request to %q: %s", - svcName, r.Method, r.RequestURI, requestBody) - - for _, e := range endpoints { - if r.Method == e.Request.Method && r.RequestURI == e.Request.Uri && requestBody == e.Request.Body { - log.Printf("[DEBUG] Mocked %s API responding with %d: %s", - svcName, e.Response.StatusCode, e.Response.Body) - - w.WriteHeader(e.Response.StatusCode) - w.Header().Set("Content-Type", e.Response.ContentType) - w.Header().Set("X-Amzn-Requestid", "1b206dd1-f9a8-11e5-becf-051c60f11c4a") - w.Header().Set("Date", time.Now().Format(time.RFC1123)) - - fmt.Fprintln(w, e.Response.Body) - return - } - } - - w.WriteHeader(400) - return - })) - - sc := awsCredentials.NewStaticCredentials("accessKey", "secretKey", "") - - sess, err := session.NewSession(&aws.Config{ - Credentials: sc, - Region: aws.String("us-east-1"), - Endpoint: aws.String(ts.URL), - CredentialsChainVerboseErrors: aws.Bool(true), - }) - - return ts.Close, sess, err -} - -type awsMockEndpoint struct { - Request *awsMockRequest - Response *awsMockResponse -} - -type awsMockRequest struct { - Method string - Uri string - Body string -} - -type awsMockResponse struct { - StatusCode int - Body string - ContentType string -} - -var test_ec2_describeAccountAttributes_response = ` - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - - - supported-platforms - - - VPC - - - EC2 - - - - -` diff --git a/builtin/providers/aws/core_acceptance_test.go b/builtin/providers/aws/core_acceptance_test.go deleted file mode 100644 index 796380ad9..000000000 --- a/builtin/providers/aws/core_acceptance_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSVPC_coreMismatchedDiffs(t *testing.T) { - var vpc ec2.Vpc - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVpcDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testMatchedDiffs, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpcExists("aws_vpc.test", &vpc), - testAccCheckVpcCidr(&vpc, "10.0.0.0/16"), - resource.TestCheckResourceAttr( - "aws_vpc.test", "cidr_block", "10.0.0.0/16"), - ), - }, - }, - }) -} - -const testMatchedDiffs = `resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - - tags { - Name = "Repro GH-4965" - } - - lifecycle { - ignore_changes = ["tags"] - } -}` diff --git a/builtin/providers/aws/data_source_aws_acm_certificate.go b/builtin/providers/aws/data_source_aws_acm_certificate.go deleted file mode 100644 index 5b69ed93d..000000000 --- a/builtin/providers/aws/data_source_aws_acm_certificate.go +++ /dev/null @@ -1,103 +0,0 @@ -package aws - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/acm" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsAcmCertificate() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsAcmCertificateRead, - Schema: map[string]*schema.Schema{ - "domain": { - Type: schema.TypeString, - Required: true, - }, - "arn": { - Type: schema.TypeString, - Computed: true, - }, - "statuses": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "types": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func dataSourceAwsAcmCertificateRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).acmconn - params := &acm.ListCertificatesInput{} - - target := d.Get("domain") - - statuses, ok := d.GetOk("statuses") - if ok { - statusStrings := statuses.([]interface{}) - params.CertificateStatuses = expandStringList(statusStrings) - } else { - params.CertificateStatuses = []*string{aws.String("ISSUED")} - } - - var arns []string - err := conn.ListCertificatesPages(params, func(page *acm.ListCertificatesOutput, lastPage bool) bool { - for _, cert := range page.CertificateSummaryList { - if *cert.DomainName == target { - arns = append(arns, *cert.CertificateArn) - } - } - - return true - }) - if err != nil { - return errwrap.Wrapf("Error describing certificates: {{err}}", err) - } - - // filter based on certificate type (imported or aws-issued) - types, ok := d.GetOk("types") - if ok { - typesStrings := expandStringList(types.([]interface{})) - var matchedArns []string - for _, arn := range arns { - params := &acm.DescribeCertificateInput{} - params.CertificateArn = &arn - - description, err := conn.DescribeCertificate(params) - if err != nil { - return errwrap.Wrapf("Error describing certificates: {{err}}", err) - } - - for _, certType := range typesStrings { - if *description.Certificate.Type == *certType { - matchedArns = append(matchedArns, arn) - break - } - } - } - - arns = matchedArns - } - - if len(arns) == 0 { - return fmt.Errorf("No certificate for domain %q found in this region.", target) - } - if len(arns) > 1 { - return fmt.Errorf("Multiple certificates for domain %q found in this region.", target) - } - - d.SetId(time.Now().UTC().String()) - d.Set("arn", arns[0]) - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_acm_certificate_test.go b/builtin/providers/aws/data_source_aws_acm_certificate_test.go deleted file mode 100644 index a862b12e7..000000000 --- a/builtin/providers/aws/data_source_aws_acm_certificate_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package aws - -import ( - "fmt" - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAwsAcmCertificateDataSource_noMatchReturnsError(t *testing.T) { - domain := "hashicorp.com" - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAwsAcmCertificateDataSourceConfig(domain), - ExpectError: regexp.MustCompile(`No certificate for domain`), - }, - { - Config: testAccCheckAwsAcmCertificateDataSourceConfigWithStatus(domain), - ExpectError: regexp.MustCompile(`No certificate for domain`), - }, - { - Config: testAccCheckAwsAcmCertificateDataSourceConfigWithTypes(domain), - ExpectError: regexp.MustCompile(`No certificate for domain`), - }, - }, - }) -} - -func testAccCheckAwsAcmCertificateDataSourceConfig(domain string) string { - return fmt.Sprintf(` -data "aws_acm_certificate" "test" { - domain = "%s" -} -`, domain) -} - -func testAccCheckAwsAcmCertificateDataSourceConfigWithStatus(domain string) string { - return fmt.Sprintf(` -data "aws_acm_certificate" "test" { - domain = "%s" - statuses = ["ISSUED"] -} -`, domain) -} - -func testAccCheckAwsAcmCertificateDataSourceConfigWithTypes(domain string) string { - return fmt.Sprintf(` -data "aws_acm_certificate" "test" { - domain = "%s" - types = ["IMPORTED"] -} -`, domain) -} diff --git a/builtin/providers/aws/data_source_aws_alb.go b/builtin/providers/aws/data_source_aws_alb.go deleted file mode 100644 index d314e0ed7..000000000 --- a/builtin/providers/aws/data_source_aws_alb.go +++ /dev/null @@ -1,127 +0,0 @@ -package aws - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsAlb() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsAlbRead, - Schema: map[string]*schema.Schema{ - "arn": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "arn_suffix": { - Type: schema.TypeString, - Computed: true, - }, - - "name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "internal": { - Type: schema.TypeBool, - Computed: true, - }, - - "security_groups": { - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Computed: true, - Set: schema.HashString, - }, - - "subnets": { - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Computed: true, - Set: schema.HashString, - }, - - "access_logs": { - Type: schema.TypeList, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Computed: true, - }, - "prefix": { - Type: schema.TypeString, - Computed: true, - }, - "enabled": { - Type: schema.TypeBool, - Computed: true, - }, - }, - }, - }, - - "enable_deletion_protection": { - Type: schema.TypeBool, - Computed: true, - }, - - "idle_timeout": { - Type: schema.TypeInt, - Computed: true, - }, - - "vpc_id": { - Type: schema.TypeString, - Computed: true, - }, - - "zone_id": { - Type: schema.TypeString, - Computed: true, - }, - - "dns_name": { - Type: schema.TypeString, - Computed: true, - }, - - "tags": tagsSchemaComputed(), - }, - } -} - -func dataSourceAwsAlbRead(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbv2conn - albArn := d.Get("arn").(string) - albName := d.Get("name").(string) - - describeAlbOpts := &elbv2.DescribeLoadBalancersInput{} - switch { - case albArn != "": - describeAlbOpts.LoadBalancerArns = []*string{aws.String(albArn)} - case albName != "": - describeAlbOpts.Names = []*string{aws.String(albName)} - } - - describeResp, err := elbconn.DescribeLoadBalancers(describeAlbOpts) - if err != nil { - return errwrap.Wrapf("Error retrieving ALB: {{err}}", err) - } - if len(describeResp.LoadBalancers) != 1 { - return fmt.Errorf("Search returned %d results, please revise so only one is returned", len(describeResp.LoadBalancers)) - } - d.SetId(*describeResp.LoadBalancers[0].LoadBalancerArn) - - return flattenAwsAlbResource(d, meta, describeResp.LoadBalancers[0]) -} diff --git a/builtin/providers/aws/data_source_aws_alb_listener.go b/builtin/providers/aws/data_source_aws_alb_listener.go deleted file mode 100644 index 63ec4ed1a..000000000 --- a/builtin/providers/aws/data_source_aws_alb_listener.go +++ /dev/null @@ -1,62 +0,0 @@ -package aws - -import "github.com/hashicorp/terraform/helper/schema" - -func dataSourceAwsAlbListener() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsAlbListenerRead, - - Schema: map[string]*schema.Schema{ - "arn": { - Type: schema.TypeString, - Required: true, - }, - - "load_balancer_arn": { - Type: schema.TypeString, - Computed: true, - }, - "port": { - Type: schema.TypeInt, - Computed: true, - }, - - "protocol": { - Type: schema.TypeString, - Computed: true, - }, - - "ssl_policy": { - Type: schema.TypeString, - Computed: true, - }, - - "certificate_arn": { - Type: schema.TypeString, - Computed: true, - }, - - "default_action": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "target_group_arn": { - Type: schema.TypeString, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - }, - } -} - -func dataSourceAwsAlbListenerRead(d *schema.ResourceData, meta interface{}) error { - d.SetId(d.Get("arn").(string)) - return resourceAwsAlbListenerRead(d, meta) -} diff --git a/builtin/providers/aws/data_source_aws_alb_listener_test.go b/builtin/providers/aws/data_source_aws_alb_listener_test.go deleted file mode 100644 index 5eea80e24..000000000 --- a/builtin/providers/aws/data_source_aws_alb_listener_test.go +++ /dev/null @@ -1,326 +0,0 @@ -package aws - -import ( - "fmt" - "math/rand" - "testing" - "time" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccDataSourceAWSALBListener_basic(t *testing.T) { - albName := fmt.Sprintf("testlistener-basic-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) - targetGroupName := fmt.Sprintf("testtargetgroup-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAWSALBListenerConfigBasic(albName, targetGroupName), - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet("data.aws_alb_listener.front_end", "load_balancer_arn"), - resource.TestCheckResourceAttrSet("data.aws_alb_listener.front_end", "arn"), - resource.TestCheckResourceAttr("data.aws_alb_listener.front_end", "protocol", "HTTP"), - resource.TestCheckResourceAttr("data.aws_alb_listener.front_end", "port", "80"), - resource.TestCheckResourceAttr("data.aws_alb_listener.front_end", "default_action.#", "1"), - resource.TestCheckResourceAttr("data.aws_alb_listener.front_end", "default_action.0.type", "forward"), - resource.TestCheckResourceAttrSet("data.aws_alb_listener.front_end", "default_action.0.target_group_arn"), - ), - }, - }, - }) -} - -func TestAccDataSourceAWSALBListener_https(t *testing.T) { - albName := fmt.Sprintf("testlistener-https-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) - targetGroupName := fmt.Sprintf("testtargetgroup-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAWSALBListenerConfigHTTPS(albName, targetGroupName), - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet("data.aws_alb_listener.front_end", "load_balancer_arn"), - resource.TestCheckResourceAttrSet("data.aws_alb_listener.front_end", "arn"), - resource.TestCheckResourceAttr("data.aws_alb_listener.front_end", "protocol", "HTTPS"), - resource.TestCheckResourceAttr("data.aws_alb_listener.front_end", "port", "443"), - resource.TestCheckResourceAttr("data.aws_alb_listener.front_end", "default_action.#", "1"), - resource.TestCheckResourceAttr("data.aws_alb_listener.front_end", "default_action.0.type", "forward"), - resource.TestCheckResourceAttrSet("data.aws_alb_listener.front_end", "default_action.0.target_group_arn"), - resource.TestCheckResourceAttrSet("data.aws_alb_listener.front_end", "certificate_arn"), - resource.TestCheckResourceAttr("data.aws_alb_listener.front_end", "ssl_policy", "ELBSecurityPolicy-2015-05"), - ), - }, - }, - }) -} - -func testAccDataSourceAWSALBListenerConfigBasic(albName, targetGroupName string) string { - return fmt.Sprintf(`resource "aws_alb_listener" "front_end" { - load_balancer_arn = "${aws_alb.alb_test.id}" - protocol = "HTTP" - port = "80" - - default_action { - target_group_arn = "${aws_alb_target_group.test.id}" - type = "forward" - } -} - -resource "aws_alb" "alb_test" { - name = "%s" - internal = true - security_groups = ["${aws_security_group.alb_test.id}"] - subnets = ["${aws_subnet.alb_test.*.id}"] - - idle_timeout = 30 - enable_deletion_protection = false - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_alb_target_group" "test" { - name = "%s" - port = 8080 - protocol = "HTTP" - vpc_id = "${aws_vpc.alb_test.id}" - - health_check { - path = "/health" - interval = 60 - port = 8081 - protocol = "HTTP" - timeout = 3 - healthy_threshold = 3 - unhealthy_threshold = 3 - matcher = "200-299" - } -} - -variable "subnets" { - default = ["10.0.1.0/24", "10.0.2.0/24"] - type = "list" -} - -data "aws_availability_zones" "available" {} - -resource "aws_vpc" "alb_test" { - cidr_block = "10.0.0.0/16" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_subnet" "alb_test" { - count = 2 - vpc_id = "${aws_vpc.alb_test.id}" - cidr_block = "${element(var.subnets, count.index)}" - map_public_ip_on_launch = true - availability_zone = "${element(data.aws_availability_zones.available.names, count.index)}" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_security_group" "alb_test" { - name = "allow_all_alb_test" - description = "Used for ALB Testing" - vpc_id = "${aws_vpc.alb_test.id}" - - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -data "aws_alb_listener" "front_end" { - arn = "${aws_alb_listener.front_end.arn}" -}`, albName, targetGroupName) -} - -func testAccDataSourceAWSALBListenerConfigHTTPS(albName, targetGroupName string) string { - return fmt.Sprintf(`resource "aws_alb_listener" "front_end" { - load_balancer_arn = "${aws_alb.alb_test.id}" - protocol = "HTTPS" - port = "443" - ssl_policy = "ELBSecurityPolicy-2015-05" - certificate_arn = "${aws_iam_server_certificate.test_cert.arn}" - - default_action { - target_group_arn = "${aws_alb_target_group.test.id}" - type = "forward" - } -} - -resource "aws_alb" "alb_test" { - name = "%s" - internal = false - security_groups = ["${aws_security_group.alb_test.id}"] - subnets = ["${aws_subnet.alb_test.*.id}"] - - idle_timeout = 30 - enable_deletion_protection = false - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_alb_target_group" "test" { - name = "%s" - port = 8080 - protocol = "HTTP" - vpc_id = "${aws_vpc.alb_test.id}" - - health_check { - path = "/health" - interval = 60 - port = 8081 - protocol = "HTTP" - timeout = 3 - healthy_threshold = 3 - unhealthy_threshold = 3 - matcher = "200-299" - } -} - -variable "subnets" { - default = ["10.0.1.0/24", "10.0.2.0/24"] - type = "list" -} - -data "aws_availability_zones" "available" {} - -resource "aws_vpc" "alb_test" { - cidr_block = "10.0.0.0/16" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_internet_gateway" "gw" { - vpc_id = "${aws_vpc.alb_test.id}" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_subnet" "alb_test" { - count = 2 - vpc_id = "${aws_vpc.alb_test.id}" - cidr_block = "${element(var.subnets, count.index)}" - map_public_ip_on_launch = true - availability_zone = "${element(data.aws_availability_zones.available.names, count.index)}" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_security_group" "alb_test" { - name = "allow_all_alb_test" - description = "Used for ALB Testing" - vpc_id = "${aws_vpc.alb_test.id}" - - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_iam_server_certificate" "test_cert" { - name = "terraform-test-cert-%d" - certificate_body = < 0 { - params.Owners = o - } - } - - resp, err := conn.DescribeImages(params) - if err != nil { - return err - } - - var filteredImages []*ec2.Image - if nameRegexOk { - r := regexp.MustCompile(nameRegex.(string)) - for _, image := range resp.Images { - // Check for a very rare case where the response would include no - // image name. No name means nothing to attempt a match against, - // therefore we are skipping such image. - if image.Name == nil || *image.Name == "" { - log.Printf("[WARN] Unable to find AMI name to match against "+ - "for image ID %q owned by %q, nothing to do.", - *image.ImageId, *image.OwnerId) - continue - } - if r.MatchString(*image.Name) { - filteredImages = append(filteredImages, image) - } - } - } else { - filteredImages = resp.Images[:] - } - - var image *ec2.Image - if len(filteredImages) < 1 { - return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") - } - - if len(filteredImages) > 1 { - recent := d.Get("most_recent").(bool) - log.Printf("[DEBUG] aws_ami - multiple results found and `most_recent` is set to: %t", recent) - if recent { - image = mostRecentAmi(filteredImages) - } else { - return fmt.Errorf("Your query returned more than one result. Please try a more " + - "specific search criteria, or set `most_recent` attribute to true.") - } - } else { - // Query returned single result. - image = filteredImages[0] - } - - log.Printf("[DEBUG] aws_ami - Single AMI found: %s", *image.ImageId) - return amiDescriptionAttributes(d, image) -} - -// Returns the most recent AMI out of a slice of images. -func mostRecentAmi(images []*ec2.Image) *ec2.Image { - return sortImages(images)[0] -} - -// populate the numerous fields that the image description returns. -func amiDescriptionAttributes(d *schema.ResourceData, image *ec2.Image) error { - // Simple attributes first - d.SetId(*image.ImageId) - d.Set("architecture", image.Architecture) - d.Set("creation_date", image.CreationDate) - if image.Description != nil { - d.Set("description", image.Description) - } - d.Set("hypervisor", image.Hypervisor) - d.Set("image_id", image.ImageId) - d.Set("image_location", image.ImageLocation) - if image.ImageOwnerAlias != nil { - d.Set("image_owner_alias", image.ImageOwnerAlias) - } - d.Set("image_type", image.ImageType) - if image.KernelId != nil { - d.Set("kernel_id", image.KernelId) - } - d.Set("name", image.Name) - d.Set("owner_id", image.OwnerId) - if image.Platform != nil { - d.Set("platform", image.Platform) - } - d.Set("public", image.Public) - if image.RamdiskId != nil { - d.Set("ramdisk_id", image.RamdiskId) - } - if image.RootDeviceName != nil { - d.Set("root_device_name", image.RootDeviceName) - } - d.Set("root_device_type", image.RootDeviceType) - if image.SriovNetSupport != nil { - d.Set("sriov_net_support", image.SriovNetSupport) - } - d.Set("state", image.State) - d.Set("virtualization_type", image.VirtualizationType) - // Complex types get their own functions - if err := d.Set("block_device_mappings", amiBlockDeviceMappings(image.BlockDeviceMappings)); err != nil { - return err - } - if err := d.Set("product_codes", amiProductCodes(image.ProductCodes)); err != nil { - return err - } - if err := d.Set("state_reason", amiStateReason(image.StateReason)); err != nil { - return err - } - if err := d.Set("tags", dataSourceTags(image.Tags)); err != nil { - return err - } - return nil -} - -// Returns a set of block device mappings. -func amiBlockDeviceMappings(m []*ec2.BlockDeviceMapping) *schema.Set { - s := &schema.Set{ - F: amiBlockDeviceMappingHash, - } - for _, v := range m { - mapping := map[string]interface{}{ - "device_name": *v.DeviceName, - } - if v.Ebs != nil { - ebs := map[string]interface{}{ - "delete_on_termination": fmt.Sprintf("%t", *v.Ebs.DeleteOnTermination), - "encrypted": fmt.Sprintf("%t", *v.Ebs.Encrypted), - "volume_size": fmt.Sprintf("%d", *v.Ebs.VolumeSize), - "volume_type": *v.Ebs.VolumeType, - } - // Iops is not always set - if v.Ebs.Iops != nil { - ebs["iops"] = fmt.Sprintf("%d", *v.Ebs.Iops) - } else { - ebs["iops"] = "0" - } - // snapshot id may not be set - if v.Ebs.SnapshotId != nil { - ebs["snapshot_id"] = *v.Ebs.SnapshotId - } - - mapping["ebs"] = ebs - } - if v.VirtualName != nil { - mapping["virtual_name"] = *v.VirtualName - } - log.Printf("[DEBUG] aws_ami - adding block device mapping: %v", mapping) - s.Add(mapping) - } - return s -} - -// Returns a set of product codes. -func amiProductCodes(m []*ec2.ProductCode) *schema.Set { - s := &schema.Set{ - F: amiProductCodesHash, - } - for _, v := range m { - code := map[string]interface{}{ - "product_code_id": *v.ProductCodeId, - "product_code_type": *v.ProductCodeType, - } - s.Add(code) - } - return s -} - -// Returns the state reason. -func amiStateReason(m *ec2.StateReason) map[string]interface{} { - s := make(map[string]interface{}) - if m != nil { - s["code"] = *m.Code - s["message"] = *m.Message - } else { - s["code"] = "UNSET" - s["message"] = "UNSET" - } - return s -} - -// Generates a hash for the set hash function used by the block_device_mappings -// attribute. -func amiBlockDeviceMappingHash(v interface{}) int { - var buf bytes.Buffer - // All keys added in alphabetical order. - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string))) - if d, ok := m["ebs"]; ok { - if len(d.(map[string]interface{})) > 0 { - e := d.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", e["delete_on_termination"].(string))) - buf.WriteString(fmt.Sprintf("%s-", e["encrypted"].(string))) - buf.WriteString(fmt.Sprintf("%s-", e["iops"].(string))) - buf.WriteString(fmt.Sprintf("%s-", e["volume_size"].(string))) - buf.WriteString(fmt.Sprintf("%s-", e["volume_type"].(string))) - } - } - if d, ok := m["no_device"]; ok { - buf.WriteString(fmt.Sprintf("%s-", d.(string))) - } - if d, ok := m["virtual_name"]; ok { - buf.WriteString(fmt.Sprintf("%s-", d.(string))) - } - if d, ok := m["snapshot_id"]; ok { - buf.WriteString(fmt.Sprintf("%s-", d.(string))) - } - return hashcode.String(buf.String()) -} - -// Generates a hash for the set hash function used by the product_codes -// attribute. -func amiProductCodesHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - // All keys added in alphabetical order. - buf.WriteString(fmt.Sprintf("%s-", m["product_code_id"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["product_code_type"].(string))) - return hashcode.String(buf.String()) -} - -func validateNameRegex(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if _, err := regexp.Compile(value); err != nil { - errors = append(errors, fmt.Errorf( - "%q contains an invalid regular expression: %s", - k, err)) - } - return -} diff --git a/builtin/providers/aws/data_source_aws_ami_ids.go b/builtin/providers/aws/data_source_aws_ami_ids.go deleted file mode 100644 index 20df34ac3..000000000 --- a/builtin/providers/aws/data_source_aws_ami_ids.go +++ /dev/null @@ -1,111 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "regexp" - - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsAmiIds() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsAmiIdsRead, - - Schema: map[string]*schema.Schema{ - "filter": dataSourceFiltersSchema(), - "executable_users": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "name_regex": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateNameRegex, - }, - "owners": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "tags": dataSourceTagsSchema(), - "ids": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func dataSourceAwsAmiIdsRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - executableUsers, executableUsersOk := d.GetOk("executable_users") - filters, filtersOk := d.GetOk("filter") - nameRegex, nameRegexOk := d.GetOk("name_regex") - owners, ownersOk := d.GetOk("owners") - - if executableUsersOk == false && filtersOk == false && nameRegexOk == false && ownersOk == false { - return fmt.Errorf("One of executable_users, filters, name_regex, or owners must be assigned") - } - - params := &ec2.DescribeImagesInput{} - - if executableUsersOk { - params.ExecutableUsers = expandStringList(executableUsers.([]interface{})) - } - if filtersOk { - params.Filters = buildAwsDataSourceFilters(filters.(*schema.Set)) - } - if ownersOk { - o := expandStringList(owners.([]interface{})) - - if len(o) > 0 { - params.Owners = o - } - } - - resp, err := conn.DescribeImages(params) - if err != nil { - return err - } - - var filteredImages []*ec2.Image - imageIds := make([]string, 0) - - if nameRegexOk { - r := regexp.MustCompile(nameRegex.(string)) - for _, image := range resp.Images { - // Check for a very rare case where the response would include no - // image name. No name means nothing to attempt a match against, - // therefore we are skipping such image. - if image.Name == nil || *image.Name == "" { - log.Printf("[WARN] Unable to find AMI name to match against "+ - "for image ID %q owned by %q, nothing to do.", - *image.ImageId, *image.OwnerId) - continue - } - if r.MatchString(*image.Name) { - filteredImages = append(filteredImages, image) - } - } - } else { - filteredImages = resp.Images[:] - } - - for _, image := range sortImages(filteredImages) { - imageIds = append(imageIds, *image.ImageId) - } - - d.SetId(fmt.Sprintf("%d", hashcode.String(params.String()))) - d.Set("ids", imageIds) - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_ami_ids_test.go b/builtin/providers/aws/data_source_aws_ami_ids_test.go deleted file mode 100644 index 52582eaba..000000000 --- a/builtin/providers/aws/data_source_aws_ami_ids_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/satori/uuid" -) - -func TestAccDataSourceAwsAmiIds_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsAmiIdsConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsAmiDataSourceID("data.aws_ami_ids.ubuntu"), - ), - }, - }, - }) -} - -func TestAccDataSourceAwsAmiIds_sorted(t *testing.T) { - uuid := uuid.NewV4().String() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsAmiIdsConfig_sorted1(uuid), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("aws_ami_from_instance.a", "id"), - resource.TestCheckResourceAttrSet("aws_ami_from_instance.b", "id"), - ), - }, - { - Config: testAccDataSourceAwsAmiIdsConfig_sorted2(uuid), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsEbsSnapshotDataSourceID("data.aws_ami_ids.test"), - resource.TestCheckResourceAttr("data.aws_ami_ids.test", "ids.#", "2"), - resource.TestCheckResourceAttrPair( - "data.aws_ami_ids.test", "ids.0", - "aws_ami_from_instance.b", "id"), - resource.TestCheckResourceAttrPair( - "data.aws_ami_ids.test", "ids.1", - "aws_ami_from_instance.a", "id"), - ), - }, - }, - }) -} - -func TestAccDataSourceAwsAmiIds_empty(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsAmiIdsConfig_empty, - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsAmiDataSourceID("data.aws_ami_ids.empty"), - resource.TestCheckResourceAttr("data.aws_ami_ids.empty", "ids.#", "0"), - ), - }, - }, - }) -} - -const testAccDataSourceAwsAmiIdsConfig_basic = ` -data "aws_ami_ids" "ubuntu" { - owners = ["099720109477"] - - filter { - name = "name" - values = ["ubuntu/images/ubuntu-*-*-amd64-server-*"] - } -} -` - -func testAccDataSourceAwsAmiIdsConfig_sorted1(uuid string) string { - return fmt.Sprintf(` -resource "aws_instance" "test" { - ami = "ami-efd0428f" - instance_type = "m3.medium" - - count = 2 -} - -resource "aws_ami_from_instance" "a" { - name = "tf-test-%s-a" - source_instance_id = "${aws_instance.test.*.id[0]}" - snapshot_without_reboot = true -} - -resource "aws_ami_from_instance" "b" { - name = "tf-test-%s-b" - source_instance_id = "${aws_instance.test.*.id[1]}" - snapshot_without_reboot = true - - // We want to ensure that 'aws_ami_from_instance.a.creation_date' is less - // than 'aws_ami_from_instance.b.creation_date' so that we can ensure that - // the images are being sorted correctly. - depends_on = ["aws_ami_from_instance.a"] -} -`, uuid, uuid) -} - -func testAccDataSourceAwsAmiIdsConfig_sorted2(uuid string) string { - return testAccDataSourceAwsAmiIdsConfig_sorted1(uuid) + fmt.Sprintf(` -data "aws_ami_ids" "test" { - owners = ["self"] - name_regex = "^tf-test-%s-" -} -`, uuid) -} - -const testAccDataSourceAwsAmiIdsConfig_empty = ` -data "aws_ami_ids" "empty" { - filter { - name = "name" - values = [] - } -} -` diff --git a/builtin/providers/aws/data_source_aws_ami_test.go b/builtin/providers/aws/data_source_aws_ami_test.go deleted file mode 100644 index 2375e6569..000000000 --- a/builtin/providers/aws/data_source_aws_ami_test.go +++ /dev/null @@ -1,350 +0,0 @@ -package aws - -import ( - "fmt" - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSAmiDataSource_natInstance(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAwsAmiDataSourceConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsAmiDataSourceID("data.aws_ami.nat_ami"), - // Check attributes. Some attributes are tough to test - any not contained here should not be considered - // stable and should not be used in interpolation. Exception to block_device_mappings which should both - // show up consistently and break if certain references are not available. However modification of the - // snapshot ID which is bound to happen on the NAT AMIs will cause testing to break consistently, so - // deep inspection is not included, simply the count is checked. - // Tags and product codes may need more testing, but I'm having a hard time finding images with - // these attributes set. - resource.TestCheckResourceAttr("data.aws_ami.nat_ami", "architecture", "x86_64"), - resource.TestCheckResourceAttr("data.aws_ami.nat_ami", "block_device_mappings.#", "1"), - resource.TestMatchResourceAttr("data.aws_ami.nat_ami", "creation_date", regexp.MustCompile("^20[0-9]{2}-")), - resource.TestMatchResourceAttr("data.aws_ami.nat_ami", "description", regexp.MustCompile("^Amazon Linux AMI")), - resource.TestCheckResourceAttr("data.aws_ami.nat_ami", "hypervisor", "xen"), - resource.TestMatchResourceAttr("data.aws_ami.nat_ami", "image_id", regexp.MustCompile("^ami-")), - resource.TestMatchResourceAttr("data.aws_ami.nat_ami", "image_location", regexp.MustCompile("^amazon/")), - resource.TestCheckResourceAttr("data.aws_ami.nat_ami", "image_owner_alias", "amazon"), - resource.TestCheckResourceAttr("data.aws_ami.nat_ami", "image_type", "machine"), - resource.TestCheckResourceAttr("data.aws_ami.nat_ami", "most_recent", "true"), - resource.TestMatchResourceAttr("data.aws_ami.nat_ami", "name", regexp.MustCompile("^amzn-ami-vpc-nat")), - resource.TestCheckResourceAttr("data.aws_ami.nat_ami", "owner_id", "137112412989"), - resource.TestCheckResourceAttr("data.aws_ami.nat_ami", "public", "true"), - resource.TestCheckResourceAttr("data.aws_ami.nat_ami", "product_codes.#", "0"), - resource.TestCheckResourceAttr("data.aws_ami.nat_ami", "root_device_name", "/dev/xvda"), - resource.TestCheckResourceAttr("data.aws_ami.nat_ami", "root_device_type", "ebs"), - resource.TestCheckResourceAttr("data.aws_ami.nat_ami", "sriov_net_support", "simple"), - resource.TestCheckResourceAttr("data.aws_ami.nat_ami", "state", "available"), - resource.TestCheckResourceAttr("data.aws_ami.nat_ami", "state_reason.code", "UNSET"), - resource.TestCheckResourceAttr("data.aws_ami.nat_ami", "state_reason.message", "UNSET"), - resource.TestCheckResourceAttr("data.aws_ami.nat_ami", "tags.#", "0"), - resource.TestCheckResourceAttr("data.aws_ami.nat_ami", "virtualization_type", "hvm"), - ), - }, - }, - }) -} -func TestAccAWSAmiDataSource_windowsInstance(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAwsAmiDataSourceWindowsConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsAmiDataSourceID("data.aws_ami.windows_ami"), - resource.TestCheckResourceAttr("data.aws_ami.windows_ami", "architecture", "x86_64"), - resource.TestCheckResourceAttr("data.aws_ami.windows_ami", "block_device_mappings.#", "27"), - resource.TestMatchResourceAttr("data.aws_ami.windows_ami", "creation_date", regexp.MustCompile("^20[0-9]{2}-")), - resource.TestMatchResourceAttr("data.aws_ami.windows_ami", "description", regexp.MustCompile("^Microsoft Windows Server 2012")), - resource.TestCheckResourceAttr("data.aws_ami.windows_ami", "hypervisor", "xen"), - resource.TestMatchResourceAttr("data.aws_ami.windows_ami", "image_id", regexp.MustCompile("^ami-")), - resource.TestMatchResourceAttr("data.aws_ami.windows_ami", "image_location", regexp.MustCompile("^amazon/")), - resource.TestCheckResourceAttr("data.aws_ami.windows_ami", "image_owner_alias", "amazon"), - resource.TestCheckResourceAttr("data.aws_ami.windows_ami", "image_type", "machine"), - resource.TestCheckResourceAttr("data.aws_ami.windows_ami", "most_recent", "true"), - resource.TestMatchResourceAttr("data.aws_ami.windows_ami", "name", regexp.MustCompile("^Windows_Server-2012-R2")), - resource.TestCheckResourceAttr("data.aws_ami.windows_ami", "owner_id", "801119661308"), - resource.TestCheckResourceAttr("data.aws_ami.windows_ami", "platform", "windows"), - resource.TestCheckResourceAttr("data.aws_ami.windows_ami", "public", "true"), - resource.TestCheckResourceAttr("data.aws_ami.windows_ami", "product_codes.#", "0"), - resource.TestCheckResourceAttr("data.aws_ami.windows_ami", "root_device_name", "/dev/sda1"), - resource.TestCheckResourceAttr("data.aws_ami.windows_ami", "root_device_type", "ebs"), - resource.TestCheckResourceAttr("data.aws_ami.windows_ami", "sriov_net_support", "simple"), - resource.TestCheckResourceAttr("data.aws_ami.windows_ami", "state", "available"), - resource.TestCheckResourceAttr("data.aws_ami.windows_ami", "state_reason.code", "UNSET"), - resource.TestCheckResourceAttr("data.aws_ami.windows_ami", "state_reason.message", "UNSET"), - resource.TestCheckResourceAttr("data.aws_ami.windows_ami", "tags.#", "0"), - resource.TestCheckResourceAttr("data.aws_ami.windows_ami", "virtualization_type", "hvm"), - ), - }, - }, - }) -} - -func TestAccAWSAmiDataSource_instanceStore(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAwsAmiDataSourceInstanceStoreConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsAmiDataSourceID("data.aws_ami.instance_store_ami"), - resource.TestCheckResourceAttr("data.aws_ami.instance_store_ami", "architecture", "x86_64"), - resource.TestCheckResourceAttr("data.aws_ami.instance_store_ami", "block_device_mappings.#", "0"), - resource.TestMatchResourceAttr("data.aws_ami.instance_store_ami", "creation_date", regexp.MustCompile("^20[0-9]{2}-")), - resource.TestCheckResourceAttr("data.aws_ami.instance_store_ami", "hypervisor", "xen"), - resource.TestMatchResourceAttr("data.aws_ami.instance_store_ami", "image_id", regexp.MustCompile("^ami-")), - resource.TestMatchResourceAttr("data.aws_ami.instance_store_ami", "image_location", regexp.MustCompile("images/hvm-instance/ubuntu-trusty-14.04-amd64-server")), - resource.TestCheckResourceAttr("data.aws_ami.instance_store_ami", "image_type", "machine"), - resource.TestCheckResourceAttr("data.aws_ami.instance_store_ami", "most_recent", "true"), - resource.TestMatchResourceAttr("data.aws_ami.instance_store_ami", "name", regexp.MustCompile("^ubuntu/images/hvm-instance/ubuntu-trusty-14.04-amd64-server")), - resource.TestCheckResourceAttr("data.aws_ami.instance_store_ami", "owner_id", "099720109477"), - resource.TestCheckResourceAttr("data.aws_ami.instance_store_ami", "public", "true"), - resource.TestCheckResourceAttr("data.aws_ami.instance_store_ami", "product_codes.#", "0"), - resource.TestCheckResourceAttr("data.aws_ami.instance_store_ami", "root_device_type", "instance-store"), - resource.TestCheckResourceAttr("data.aws_ami.instance_store_ami", "sriov_net_support", "simple"), - resource.TestCheckResourceAttr("data.aws_ami.instance_store_ami", "state", "available"), - resource.TestCheckResourceAttr("data.aws_ami.instance_store_ami", "state_reason.code", "UNSET"), - resource.TestCheckResourceAttr("data.aws_ami.instance_store_ami", "state_reason.message", "UNSET"), - resource.TestCheckResourceAttr("data.aws_ami.instance_store_ami", "tags.#", "0"), - resource.TestCheckResourceAttr("data.aws_ami.instance_store_ami", "virtualization_type", "hvm"), - ), - }, - }, - }) -} - -func TestAccAWSAmiDataSource_owners(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAwsAmiDataSourceOwnersConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsAmiDataSourceID("data.aws_ami.amazon_ami"), - ), - }, - }, - }) -} - -// Acceptance test for: https://github.com/hashicorp/terraform/issues/10758 -func TestAccAWSAmiDataSource_ownersEmpty(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAwsAmiDataSourceEmptyOwnersConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsAmiDataSourceID("data.aws_ami.amazon_ami"), - ), - }, - }, - }) -} - -func TestAccAWSAmiDataSource_localNameFilter(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAwsAmiDataSourceNameRegexConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsAmiDataSourceID("data.aws_ami.name_regex_filtered_ami"), - resource.TestMatchResourceAttr("data.aws_ami.name_regex_filtered_ami", "image_id", regexp.MustCompile("^ami-")), - ), - }, - }, - }) -} - -func TestResourceValidateNameRegex(t *testing.T) { - type testCases struct { - Value string - ErrCount int - } - - invalidCases := []testCases{ - { - Value: `\`, - ErrCount: 1, - }, - { - Value: `**`, - ErrCount: 1, - }, - { - Value: `(.+`, - ErrCount: 1, - }, - } - - for _, tc := range invalidCases { - _, errors := validateNameRegex(tc.Value, "name_regex") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q to trigger a validation error.", tc.Value) - } - } - - validCases := []testCases{ - { - Value: `\/`, - ErrCount: 0, - }, - { - Value: `.*`, - ErrCount: 0, - }, - { - Value: `\b(?:\d{1,3}\.){3}\d{1,3}\b`, - ErrCount: 0, - }, - } - - for _, tc := range validCases { - _, errors := validateNameRegex(tc.Value, "name_regex") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q not to trigger a validation error.", tc.Value) - } - } -} - -func testAccCheckAwsAmiDataSourceDestroy(s *terraform.State) error { - return nil -} - -func testAccCheckAwsAmiDataSourceID(n string) resource.TestCheckFunc { - // Wait for IAM role - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Can't find AMI data source: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("AMI data source ID not set") - } - return nil - } -} - -// Using NAT AMIs for testing - I would expect with NAT gateways now a thing, -// that this will possibly be deprecated at some point in time. Other candidates -// for testing this after that may be Ubuntu's AMI's, or Amazon's regular -// Amazon Linux AMIs. -const testAccCheckAwsAmiDataSourceConfig = ` -data "aws_ami" "nat_ami" { - most_recent = true - filter { - name = "owner-alias" - values = ["amazon"] - } - filter { - name = "name" - values = ["amzn-ami-vpc-nat*"] - } - filter { - name = "virtualization-type" - values = ["hvm"] - } - filter { - name = "root-device-type" - values = ["ebs"] - } - filter { - name = "block-device-mapping.volume-type" - values = ["standard"] - } -} -` - -// Windows image test. -const testAccCheckAwsAmiDataSourceWindowsConfig = ` -data "aws_ami" "windows_ami" { - most_recent = true - filter { - name = "owner-alias" - values = ["amazon"] - } - filter { - name = "name" - values = ["Windows_Server-2012-R2*"] - } - filter { - name = "virtualization-type" - values = ["hvm"] - } - filter { - name = "root-device-type" - values = ["ebs"] - } - filter { - name = "block-device-mapping.volume-type" - values = ["gp2"] - } -} -` - -// Instance store test - using Ubuntu images -const testAccCheckAwsAmiDataSourceInstanceStoreConfig = ` -data "aws_ami" "instance_store_ami" { - most_recent = true - filter { - name = "owner-id" - values = ["099720109477"] - } - filter { - name = "name" - values = ["ubuntu/images/hvm-instance/ubuntu-trusty-14.04-amd64-server*"] - } - filter { - name = "virtualization-type" - values = ["hvm"] - } - filter { - name = "root-device-type" - values = ["instance-store"] - } -} -` - -// Testing owner parameter -const testAccCheckAwsAmiDataSourceOwnersConfig = ` -data "aws_ami" "amazon_ami" { - most_recent = true - owners = ["amazon"] -} -` - -const testAccCheckAwsAmiDataSourceEmptyOwnersConfig = ` -data "aws_ami" "amazon_ami" { - most_recent = true - owners = [""] -} -` - -// Testing name_regex parameter -const testAccCheckAwsAmiDataSourceNameRegexConfig = ` -data "aws_ami" "name_regex_filtered_ami" { - most_recent = true - owners = ["amazon"] - filter { - name = "name" - values = ["amzn-ami-*"] - } - name_regex = "^amzn-ami-\\d{3}[5].*-ecs-optimized" -} -` diff --git a/builtin/providers/aws/data_source_aws_autoscaling_groups.go b/builtin/providers/aws/data_source_aws_autoscaling_groups.go deleted file mode 100644 index f43f21d4e..000000000 --- a/builtin/providers/aws/data_source_aws_autoscaling_groups.go +++ /dev/null @@ -1,102 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "sort" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsAutoscalingGroups() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsAutoscalingGroupsRead, - - Schema: map[string]*schema.Schema{ - "names": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "filter": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "values": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - }, - }, - }, - } -} - -func dataSourceAwsAutoscalingGroupsRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).autoscalingconn - - log.Printf("[DEBUG] Reading Autoscaling Groups.") - d.SetId(time.Now().UTC().String()) - - var raw []string - - tf := d.Get("filter").(*schema.Set) - if tf.Len() > 0 { - out, err := conn.DescribeTags(&autoscaling.DescribeTagsInput{ - Filters: expandAsgTagFilters(tf.List()), - }) - if err != nil { - return err - } - - raw = make([]string, len(out.Tags)) - for i, v := range out.Tags { - raw[i] = *v.ResourceId - } - } else { - - resp, err := conn.DescribeAutoScalingGroups(&autoscaling.DescribeAutoScalingGroupsInput{}) - if err != nil { - return fmt.Errorf("Error fetching Autoscaling Groups: %s", err) - } - - raw = make([]string, len(resp.AutoScalingGroups)) - for i, v := range resp.AutoScalingGroups { - raw[i] = *v.AutoScalingGroupName - } - } - - sort.Strings(raw) - - if err := d.Set("names", raw); err != nil { - return fmt.Errorf("[WARN] Error setting Autoscaling Group Names: %s", err) - } - - return nil - -} - -func expandAsgTagFilters(in []interface{}) []*autoscaling.Filter { - out := make([]*autoscaling.Filter, len(in), len(in)) - for i, filter := range in { - m := filter.(map[string]interface{}) - values := expandStringList(m["values"].(*schema.Set).List()) - - out[i] = &autoscaling.Filter{ - Name: aws.String(m["name"].(string)), - Values: values, - } - } - return out -} diff --git a/builtin/providers/aws/data_source_aws_autoscaling_groups_test.go b/builtin/providers/aws/data_source_aws_autoscaling_groups_test.go deleted file mode 100644 index 3a6ba7644..000000000 --- a/builtin/providers/aws/data_source_aws_autoscaling_groups_test.go +++ /dev/null @@ -1,217 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "sort" - "strconv" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSAutoscalingGroups_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAwsAutoscalingGroupsConfig(acctest.RandInt(), acctest.RandInt(), acctest.RandInt()), - }, - { - Config: testAccCheckAwsAutoscalingGroupsConfigWithDataSource(acctest.RandInt(), acctest.RandInt(), acctest.RandInt()), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsAutoscalingGroups("data.aws_autoscaling_groups.group_list"), - resource.TestCheckResourceAttr("data.aws_autoscaling_groups.group_list", "names.#", "3"), - ), - }, - }, - }) -} - -func testAccCheckAwsAutoscalingGroups(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Can't find ASG resource: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("AZ resource ID not set.") - } - - actual, err := testAccCheckAwsAutoscalingGroupsAvailable(rs.Primary.Attributes) - if err != nil { - return err - } - - expected := actual - sort.Strings(expected) - if reflect.DeepEqual(expected, actual) != true { - return fmt.Errorf("ASG not sorted - expected %v, got %v", expected, actual) - } - return nil - } -} - -func testAccCheckAwsAutoscalingGroupsAvailable(attrs map[string]string) ([]string, error) { - v, ok := attrs["names.#"] - if !ok { - return nil, fmt.Errorf("Available ASG list is missing.") - } - qty, err := strconv.Atoi(v) - if err != nil { - return nil, err - } - if qty < 1 { - return nil, fmt.Errorf("No ASG found in region, this is probably a bug.") - } - zones := make([]string, qty) - for n := range zones { - zone, ok := attrs["names."+strconv.Itoa(n)] - if !ok { - return nil, fmt.Errorf("ASG list corrupt, this is definitely a bug.") - } - zones[n] = zone - } - return zones, nil -} - -func testAccCheckAwsAutoscalingGroupsConfig(rInt1, rInt2, rInt3 int) string { - return fmt.Sprintf(` -resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" -} - -resource "aws_autoscaling_group" "bar" { - availability_zones = ["us-west-2a"] - name = "test-asg-%d" - max_size = 1 - min_size = 0 - health_check_type = "EC2" - desired_capacity = 0 - force_delete = true - - launch_configuration = "${aws_launch_configuration.foobar.name}" - - tag { - key = "Foo" - value = "foo-bar" - propagate_at_launch = true - } -} - -resource "aws_autoscaling_group" "foo" { - availability_zones = ["us-west-2b"] - name = "test-asg-%d" - max_size = 1 - min_size = 0 - health_check_type = "EC2" - desired_capacity = 0 - force_delete = true - - launch_configuration = "${aws_launch_configuration.foobar.name}" - - tag { - key = "Foo" - value = "foo-bar" - propagate_at_launch = true - } -} - -resource "aws_autoscaling_group" "barbaz" { - availability_zones = ["us-west-2c"] - name = "test-asg-%d" - max_size = 1 - min_size = 0 - health_check_type = "EC2" - desired_capacity = 0 - force_delete = true - - launch_configuration = "${aws_launch_configuration.foobar.name}" - - tag { - key = "Foo" - value = "foo-bar" - propagate_at_launch = true - } -}`, rInt1, rInt2, rInt3) -} - -func testAccCheckAwsAutoscalingGroupsConfigWithDataSource(rInt1, rInt2, rInt3 int) string { - return fmt.Sprintf(` -resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" -} - -resource "aws_autoscaling_group" "bar" { - availability_zones = ["us-west-2a"] - name = "test-asg-%d" - max_size = 1 - min_size = 0 - health_check_type = "EC2" - desired_capacity = 0 - force_delete = true - - launch_configuration = "${aws_launch_configuration.foobar.name}" - - tag { - key = "Foo" - value = "foo-bar" - propagate_at_launch = true - } -} - -resource "aws_autoscaling_group" "foo" { - availability_zones = ["us-west-2b"] - name = "test-asg-%d" - max_size = 1 - min_size = 0 - health_check_type = "EC2" - desired_capacity = 0 - force_delete = true - - launch_configuration = "${aws_launch_configuration.foobar.name}" - - tag { - key = "Foo" - value = "foo-bar" - propagate_at_launch = true - } -} - -resource "aws_autoscaling_group" "barbaz" { - availability_zones = ["us-west-2c"] - name = "test-asg-%d" - max_size = 1 - min_size = 0 - health_check_type = "EC2" - desired_capacity = 0 - force_delete = true - - launch_configuration = "${aws_launch_configuration.foobar.name}" - - tag { - key = "Foo" - value = "foo-bar" - propagate_at_launch = true - } -} - -data "aws_autoscaling_groups" "group_list" { - filter { - name = "key" - values = ["Foo"] - } - - filter { - name = "value" - values = ["foo-bar"] - } -} -`, rInt1, rInt2, rInt3) -} diff --git a/builtin/providers/aws/data_source_aws_availability_zone.go b/builtin/providers/aws/data_source_aws_availability_zone.go deleted file mode 100644 index edab7c926..000000000 --- a/builtin/providers/aws/data_source_aws_availability_zone.go +++ /dev/null @@ -1,89 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsAvailabilityZone() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsAvailabilityZoneRead, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "region": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "name_suffix": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "state": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - } -} - -func dataSourceAwsAvailabilityZoneRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - req := &ec2.DescribeAvailabilityZonesInput{} - - if name := d.Get("name"); name != "" { - req.ZoneNames = []*string{aws.String(name.(string))} - } - - req.Filters = buildEC2AttributeFilterList( - map[string]string{ - "state": d.Get("state").(string), - }, - ) - if len(req.Filters) == 0 { - // Don't send an empty filters list; the EC2 API won't accept it. - req.Filters = nil - } - - log.Printf("[DEBUG] DescribeAvailabilityZones %s\n", req) - resp, err := conn.DescribeAvailabilityZones(req) - if err != nil { - return err - } - if resp == nil || len(resp.AvailabilityZones) == 0 { - return fmt.Errorf("no matching AZ found") - } - if len(resp.AvailabilityZones) > 1 { - return fmt.Errorf("multiple AZs matched; use additional constraints to reduce matches to a single AZ") - } - - az := resp.AvailabilityZones[0] - - // As a convenience when working with AZs generically, we expose - // the AZ suffix alone, without the region name. - // This can be used e.g. to create lookup tables by AZ letter that - // work regardless of region. - nameSuffix := (*az.ZoneName)[len(*az.RegionName):] - - d.SetId(*az.ZoneName) - d.Set("id", az.ZoneName) - d.Set("name", az.ZoneName) - d.Set("name_suffix", nameSuffix) - d.Set("region", az.RegionName) - d.Set("state", az.State) - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_availability_zone_test.go b/builtin/providers/aws/data_source_aws_availability_zone_test.go deleted file mode 100644 index 8808011db..000000000 --- a/builtin/providers/aws/data_source_aws_availability_zone_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourceAwsAvailabilityZone(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataSourceAwsAvailabilityZoneConfig, - Check: resource.ComposeTestCheckFunc( - testAccDataSourceAwsAvailabilityZoneCheck("data.aws_availability_zone.by_name"), - ), - }, - }, - }) -} - -func testAccDataSourceAwsAvailabilityZoneCheck(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("root module has no resource called %s", name) - } - - attr := rs.Primary.Attributes - - if attr["name"] != "us-west-2a" { - return fmt.Errorf("bad name %s", attr["name"]) - } - if attr["name_suffix"] != "a" { - return fmt.Errorf("bad name_suffix %s", attr["name_suffix"]) - } - if attr["region"] != "us-west-2" { - return fmt.Errorf("bad region %s", attr["region"]) - } - - return nil - } -} - -const testAccDataSourceAwsAvailabilityZoneConfig = ` -provider "aws" { - region = "us-west-2" -} - -data "aws_availability_zone" "by_name" { - name = "us-west-2a" -} -` diff --git a/builtin/providers/aws/data_source_aws_availability_zones.go b/builtin/providers/aws/data_source_aws_availability_zones.go deleted file mode 100644 index dcc09438f..000000000 --- a/builtin/providers/aws/data_source_aws_availability_zones.go +++ /dev/null @@ -1,87 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "sort" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsAvailabilityZones() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsAvailabilityZonesRead, - - Schema: map[string]*schema.Schema{ - "names": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "state": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateStateType, - }, - }, - } -} - -func dataSourceAwsAvailabilityZonesRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - log.Printf("[DEBUG] Reading Availability Zones.") - d.SetId(time.Now().UTC().String()) - - request := &ec2.DescribeAvailabilityZonesInput{} - - if v, ok := d.GetOk("state"); ok { - request.Filters = []*ec2.Filter{ - { - Name: aws.String("state"), - Values: []*string{aws.String(v.(string))}, - }, - } - } - - log.Printf("[DEBUG] Availability Zones request options: %#v", *request) - - resp, err := conn.DescribeAvailabilityZones(request) - if err != nil { - return fmt.Errorf("Error fetching Availability Zones: %s", err) - } - - raw := make([]string, len(resp.AvailabilityZones)) - for i, v := range resp.AvailabilityZones { - raw[i] = *v.ZoneName - } - - sort.Strings(raw) - - if err := d.Set("names", raw); err != nil { - return fmt.Errorf("[WARN] Error setting Availability Zones: %s", err) - } - - return nil -} - -func validateStateType(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - validState := map[string]bool{ - "available": true, - "information": true, - "impaired": true, - "unavailable": true, - } - - if !validState[value] { - errors = append(errors, fmt.Errorf( - "%q contains an invalid Availability Zone state %q. Valid states are: %q, %q, %q and %q.", - k, value, "available", "information", "impaired", "unavailable")) - } - return -} diff --git a/builtin/providers/aws/data_source_aws_availability_zones_test.go b/builtin/providers/aws/data_source_aws_availability_zones_test.go deleted file mode 100644 index a65ec511f..000000000 --- a/builtin/providers/aws/data_source_aws_availability_zones_test.go +++ /dev/null @@ -1,151 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "sort" - "strconv" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSAvailabilityZones_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAwsAvailabilityZonesConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsAvailabilityZonesMeta("data.aws_availability_zones.availability_zones"), - ), - }, - }, - }) -} - -func TestAccAWSAvailabilityZones_stateFilter(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAwsAvailabilityZonesStateConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsAvailabilityZoneState("data.aws_availability_zones.state_filter"), - ), - }, - }, - }) -} - -func TestResourceCheckAwsAvailabilityZones_validateStateType(t *testing.T) { - _, errors := validateStateType("incorrect", "state") - if len(errors) == 0 { - t.Fatalf("Expected to trigger a validation error") - } - - var testCases = []struct { - Value string - ErrCount int - }{ - { - Value: "available", - ErrCount: 0, - }, - { - Value: "unavailable", - ErrCount: 0, - }, - } - - for _, tc := range testCases { - _, errors := validateStateType(tc.Value, "state") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q not to trigger a validation error.", tc.Value) - } - } -} - -func testAccCheckAwsAvailabilityZonesMeta(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Can't find AZ resource: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("AZ resource ID not set.") - } - - actual, err := testAccCheckAwsAvailabilityZonesBuildAvailable(rs.Primary.Attributes) - if err != nil { - return err - } - - expected := actual - sort.Strings(expected) - if reflect.DeepEqual(expected, actual) != true { - return fmt.Errorf("AZs not sorted - expected %v, got %v", expected, actual) - } - return nil - } -} - -func testAccCheckAwsAvailabilityZoneState(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Can't find AZ resource: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("AZ resource ID not set.") - } - - if _, ok := rs.Primary.Attributes["state"]; !ok { - return fmt.Errorf("AZs state filter is missing, should be set.") - } - - _, err := testAccCheckAwsAvailabilityZonesBuildAvailable(rs.Primary.Attributes) - if err != nil { - return err - } - return nil - } -} - -func testAccCheckAwsAvailabilityZonesBuildAvailable(attrs map[string]string) ([]string, error) { - v, ok := attrs["names.#"] - if !ok { - return nil, fmt.Errorf("Available AZ list is missing.") - } - qty, err := strconv.Atoi(v) - if err != nil { - return nil, err - } - if qty < 1 { - return nil, fmt.Errorf("No AZs found in region, this is probably a bug.") - } - zones := make([]string, qty) - for n := range zones { - zone, ok := attrs["names."+strconv.Itoa(n)] - if !ok { - return nil, fmt.Errorf("AZ list corrupt, this is definitely a bug.") - } - zones[n] = zone - } - return zones, nil -} - -const testAccCheckAwsAvailabilityZonesConfig = ` -data "aws_availability_zones" "availability_zones" { } -` - -const testAccCheckAwsAvailabilityZonesStateConfig = ` -data "aws_availability_zones" "state_filter" { - state = "available" -} -` diff --git a/builtin/providers/aws/data_source_aws_billing_service_account.go b/builtin/providers/aws/data_source_aws_billing_service_account.go deleted file mode 100644 index 23ec40843..000000000 --- a/builtin/providers/aws/data_source_aws_billing_service_account.go +++ /dev/null @@ -1,31 +0,0 @@ -package aws - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" -) - -// See http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/billing-getting-started.html#step-2 -var billingAccountId = "386209384616" - -func dataSourceAwsBillingServiceAccount() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsBillingServiceAccountRead, - - Schema: map[string]*schema.Schema{ - "arn": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceAwsBillingServiceAccountRead(d *schema.ResourceData, meta interface{}) error { - d.SetId(billingAccountId) - - d.Set("arn", fmt.Sprintf("arn:%s:iam::%s:root", meta.(*AWSClient).partition, billingAccountId)) - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_billing_service_account_test.go b/builtin/providers/aws/data_source_aws_billing_service_account_test.go deleted file mode 100644 index 53f9c2df8..000000000 --- a/builtin/providers/aws/data_source_aws_billing_service_account_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSBillingServiceAccount_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAwsBillingServiceAccountConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.aws_billing_service_account.main", "id", "386209384616"), - resource.TestCheckResourceAttr("data.aws_billing_service_account.main", "arn", "arn:aws:iam::386209384616:root"), - ), - }, - }, - }) -} - -const testAccCheckAwsBillingServiceAccountConfig = ` -data "aws_billing_service_account" "main" { } -` diff --git a/builtin/providers/aws/data_source_aws_caller_identity.go b/builtin/providers/aws/data_source_aws_caller_identity.go deleted file mode 100644 index a2adcef34..000000000 --- a/builtin/providers/aws/data_source_aws_caller_identity.go +++ /dev/null @@ -1,50 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/service/sts" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsCallerIdentity() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsCallerIdentityRead, - - Schema: map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeString, - Computed: true, - }, - - "arn": { - Type: schema.TypeString, - Computed: true, - }, - - "user_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceAwsCallerIdentityRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).stsconn - - res, err := client.GetCallerIdentity(&sts.GetCallerIdentityInput{}) - if err != nil { - return fmt.Errorf("Error getting Caller Identity: %v", err) - } - - log.Printf("[DEBUG] Received Caller Identity: %s", res) - - d.SetId(time.Now().UTC().String()) - d.Set("account_id", res.Account) - d.Set("arn", res.Arn) - d.Set("user_id", res.UserId) - return nil -} diff --git a/builtin/providers/aws/data_source_aws_caller_identity_test.go b/builtin/providers/aws/data_source_aws_caller_identity_test.go deleted file mode 100644 index 100bb4db8..000000000 --- a/builtin/providers/aws/data_source_aws_caller_identity_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSCallerIdentity_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAwsCallerIdentityConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsCallerIdentityAccountId("data.aws_caller_identity.current"), - ), - }, - }, - }) -} - -func testAccCheckAwsCallerIdentityAccountId(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Can't find AccountID resource: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Account Id resource ID not set.") - } - - expected := testAccProvider.Meta().(*AWSClient).accountid - if rs.Primary.Attributes["account_id"] != expected { - return fmt.Errorf("Incorrect Account ID: expected %q, got %q", expected, rs.Primary.Attributes["account_id"]) - } - - if rs.Primary.Attributes["user_id"] == "" { - return fmt.Errorf("UserID expected to not be nil") - } - - if rs.Primary.Attributes["arn"] == "" { - return fmt.Errorf("ARN expected to not be nil") - } - - return nil - } -} - -const testAccCheckAwsCallerIdentityConfig_basic = ` -data "aws_caller_identity" "current" { } -` diff --git a/builtin/providers/aws/data_source_aws_canonical_user_id.go b/builtin/providers/aws/data_source_aws_canonical_user_id.go deleted file mode 100644 index ba6a0b098..000000000 --- a/builtin/providers/aws/data_source_aws_canonical_user_id.go +++ /dev/null @@ -1,48 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsCanonicalUserId() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsCanonicalUserIdRead, - - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, - "display_name": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceAwsCanonicalUserIdRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).s3conn - - log.Printf("[DEBUG] Listing S3 buckets.") - - req := &s3.ListBucketsInput{} - resp, err := conn.ListBuckets(req) - if err != nil { - return err - } - if resp == nil || resp.Owner == nil { - return fmt.Errorf("no canonical user ID found") - } - - d.SetId(aws.StringValue(resp.Owner.ID)) - d.Set("id", resp.Owner.ID) - d.Set("display_name", resp.Owner.DisplayName) - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_canonical_user_id_test.go b/builtin/providers/aws/data_source_aws_canonical_user_id_test.go deleted file mode 100644 index c1bd6e598..000000000 --- a/builtin/providers/aws/data_source_aws_canonical_user_id_test.go +++ /dev/null @@ -1,52 +0,0 @@ -// make testacc TEST=./builtin/providers/aws/ TESTARGS='-run=TestAccDataSourceAwsCanonicalUserId_' - -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourceAwsCanonicalUserId_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsCanonicalUserIdConfig, - Check: resource.ComposeTestCheckFunc( - testAccDataSourceAwsCanonicalUserIdCheckExists("data.aws_canonical_user_id.current"), - ), - }, - }, - }) -} - -func testAccDataSourceAwsCanonicalUserIdCheckExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Can't find Canonical User ID resource: %s", name) - } - - if rs.Primary.Attributes["id"] == "" { - return fmt.Errorf("Missing Canonical User ID") - } - if rs.Primary.Attributes["display_name"] == "" { - return fmt.Errorf("Missing Display Name") - } - - return nil - } -} - -const testAccDataSourceAwsCanonicalUserIdConfig = ` -provider "aws" { - region = "us-west-2" -} - -data "aws_canonical_user_id" "current" { } -` diff --git a/builtin/providers/aws/data_source_aws_cloudformation_stack.go b/builtin/providers/aws/data_source_aws_cloudformation_stack.go deleted file mode 100644 index b834e0a29..000000000 --- a/builtin/providers/aws/data_source_aws_cloudformation_stack.go +++ /dev/null @@ -1,122 +0,0 @@ -package aws - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudformation" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsCloudFormationStack() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsCloudFormationStackRead, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "template_body": { - Type: schema.TypeString, - Computed: true, - StateFunc: func(v interface{}) string { - template, _ := normalizeCloudFormationTemplate(v) - return template - }, - }, - "capabilities": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "description": { - Type: schema.TypeString, - Computed: true, - }, - "disable_rollback": { - Type: schema.TypeBool, - Computed: true, - }, - "notification_arns": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "parameters": { - Type: schema.TypeMap, - Computed: true, - }, - "outputs": { - Type: schema.TypeMap, - Computed: true, - }, - "timeout_in_minutes": { - Type: schema.TypeInt, - Computed: true, - }, - "iam_role_arn": { - Type: schema.TypeString, - Computed: true, - }, - "tags": { - Type: schema.TypeMap, - Computed: true, - }, - }, - } -} - -func dataSourceAwsCloudFormationStackRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).cfconn - name := d.Get("name").(string) - input := cloudformation.DescribeStacksInput{ - StackName: aws.String(name), - } - - out, err := conn.DescribeStacks(&input) - if err != nil { - return fmt.Errorf("Failed describing CloudFormation stack (%s): %s", name, err) - } - if l := len(out.Stacks); l != 1 { - return fmt.Errorf("Expected 1 CloudFormation stack (%s), found %d", name, l) - } - stack := out.Stacks[0] - d.SetId(*stack.StackId) - - d.Set("description", stack.Description) - d.Set("disable_rollback", stack.DisableRollback) - d.Set("timeout_in_minutes", stack.TimeoutInMinutes) - d.Set("iam_role_arn", stack.RoleARN) - - if len(stack.NotificationARNs) > 0 { - d.Set("notification_arns", schema.NewSet(schema.HashString, flattenStringList(stack.NotificationARNs))) - } - - d.Set("parameters", flattenAllCloudFormationParameters(stack.Parameters)) - d.Set("tags", flattenCloudFormationTags(stack.Tags)) - d.Set("outputs", flattenCloudFormationOutputs(stack.Outputs)) - - if len(stack.Capabilities) > 0 { - d.Set("capabilities", schema.NewSet(schema.HashString, flattenStringList(stack.Capabilities))) - } - - tInput := cloudformation.GetTemplateInput{ - StackName: aws.String(name), - } - tOut, err := conn.GetTemplate(&tInput) - if err != nil { - return err - } - - template, err := normalizeCloudFormationTemplate(*tOut.TemplateBody) - if err != nil { - return errwrap.Wrapf("template body contains an invalid JSON or YAML: {{err}}", err) - } - d.Set("template_body", template) - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_cloudformation_stack_test.go b/builtin/providers/aws/data_source_aws_cloudformation_stack_test.go deleted file mode 100644 index 4a4cde785..000000000 --- a/builtin/providers/aws/data_source_aws_cloudformation_stack_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package aws - -import ( - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSCloudFormationStack_dataSource_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckAwsCloudFormationStackDataSourceConfig_basic, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.aws_cloudformation_stack.network", "outputs.%", "1"), - resource.TestMatchResourceAttr("data.aws_cloudformation_stack.network", "outputs.VPCId", - regexp.MustCompile("^vpc-[a-z0-9]{8}$")), - resource.TestCheckResourceAttr("data.aws_cloudformation_stack.network", "capabilities.#", "0"), - resource.TestCheckResourceAttr("data.aws_cloudformation_stack.network", "disable_rollback", "false"), - resource.TestCheckResourceAttr("data.aws_cloudformation_stack.network", "notification_arns.#", "0"), - resource.TestCheckResourceAttr("data.aws_cloudformation_stack.network", "parameters.%", "1"), - resource.TestCheckResourceAttr("data.aws_cloudformation_stack.network", "parameters.CIDR", "10.10.10.0/24"), - resource.TestCheckResourceAttr("data.aws_cloudformation_stack.network", "timeout_in_minutes", "6"), - resource.TestCheckResourceAttr("data.aws_cloudformation_stack.network", "tags.%", "2"), - resource.TestCheckResourceAttr("data.aws_cloudformation_stack.network", "tags.Name", "Form the Cloud"), - resource.TestCheckResourceAttr("data.aws_cloudformation_stack.network", "tags.Second", "meh"), - ), - }, - }, - }) -} - -const testAccCheckAwsCloudFormationStackDataSourceConfig_basic = ` -resource "aws_cloudformation_stack" "cfs" { - name = "tf-acc-ds-networking-stack" - parameters { - CIDR = "10.10.10.0/24" - } - timeout_in_minutes = 6 - template_body = < 1 { - return fmt.Errorf("Your query returned more than one result. Please try a more specific search criteria.") - } - - dbInstance := *resp.DBInstances[0] - - d.SetId(d.Get("db_instance_identifier").(string)) - - d.Set("allocated_storage", dbInstance.AllocatedStorage) - d.Set("auto_minor_upgrade_enabled", dbInstance.AutoMinorVersionUpgrade) - d.Set("availability_zone", dbInstance.AvailabilityZone) - d.Set("backup_retention_period", dbInstance.BackupRetentionPeriod) - d.Set("db_cluster_identifier", dbInstance.DBClusterIdentifier) - d.Set("db_instance_arn", dbInstance.DBClusterIdentifier) - d.Set("db_instance_class", dbInstance.DBInstanceClass) - d.Set("db_name", dbInstance.DBName) - - var parameterGroups []string - for _, v := range dbInstance.DBParameterGroups { - parameterGroups = append(parameterGroups, *v.DBParameterGroupName) - } - if err := d.Set("db_parameter_groups", parameterGroups); err != nil { - return fmt.Errorf("[DEBUG] Error setting db_parameter_groups attribute: %#v, error: %#v", parameterGroups, err) - } - - var dbSecurityGroups []string - for _, v := range dbInstance.DBSecurityGroups { - dbSecurityGroups = append(dbSecurityGroups, *v.DBSecurityGroupName) - } - if err := d.Set("db_security_groups", dbSecurityGroups); err != nil { - return fmt.Errorf("[DEBUG] Error setting db_security_groups attribute: %#v, error: %#v", dbSecurityGroups, err) - } - - d.Set("db_subnet_group", dbInstance.DBSubnetGroup) - d.Set("db_instance_port", dbInstance.DbInstancePort) - d.Set("engine", dbInstance.Engine) - d.Set("engine_version", dbInstance.EngineVersion) - d.Set("iops", dbInstance.Iops) - d.Set("kms_key_id", dbInstance.KmsKeyId) - d.Set("license_model", dbInstance.LicenseModel) - d.Set("master_username", dbInstance.MasterUsername) - d.Set("monitoring_interval", dbInstance.MonitoringInterval) - d.Set("monitoring_role_arn", dbInstance.MonitoringRoleArn) - d.Set("address", dbInstance.Endpoint.Address) - d.Set("port", dbInstance.Endpoint.Port) - d.Set("hosted_zone_id", dbInstance.Endpoint.HostedZoneId) - d.Set("endpoint", fmt.Sprintf("%s:%d", *dbInstance.Endpoint.Address, *dbInstance.Endpoint.Port)) - - var optionGroups []string - for _, v := range dbInstance.OptionGroupMemberships { - optionGroups = append(optionGroups, *v.OptionGroupName) - } - if err := d.Set("option_group_memberships", optionGroups); err != nil { - return fmt.Errorf("[DEBUG] Error setting option_group_memberships attribute: %#v, error: %#v", optionGroups, err) - } - - d.Set("preferred_backup_window", dbInstance.PreferredBackupWindow) - d.Set("preferred_maintenance_window", dbInstance.PreferredMaintenanceWindow) - d.Set("publicly_accessible", dbInstance.PubliclyAccessible) - d.Set("storage_encrypted", dbInstance.StorageEncrypted) - d.Set("storage_type", dbInstance.StorageType) - d.Set("timezone", dbInstance.Timezone) - d.Set("replicate_source_db", dbInstance.ReadReplicaSourceDBInstanceIdentifier) - - var vpcSecurityGroups []string - for _, v := range dbInstance.VpcSecurityGroups { - vpcSecurityGroups = append(vpcSecurityGroups, *v.VpcSecurityGroupId) - } - if err := d.Set("vpc_security_groups", vpcSecurityGroups); err != nil { - return fmt.Errorf("[DEBUG] Error setting vpc_security_groups attribute: %#v, error: %#v", vpcSecurityGroups, err) - } - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_db_instance_test.go b/builtin/providers/aws/data_source_aws_db_instance_test.go deleted file mode 100644 index 5d3a200ec..000000000 --- a/builtin/providers/aws/data_source_aws_db_instance_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSDataDbInstance_basic(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBInstanceConfigWithDataSource(rInt), - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet("data.aws_db_instance.bar", "allocated_storage"), - resource.TestCheckResourceAttrSet("data.aws_db_instance.bar", "engine"), - resource.TestCheckResourceAttrSet("data.aws_db_instance.bar", "db_instance_class"), - resource.TestCheckResourceAttrSet("data.aws_db_instance.bar", "db_name"), - resource.TestCheckResourceAttrSet("data.aws_db_instance.bar", "master_username"), - ), - }, - }, - }) -} - -func TestAccAWSDataDbInstance_endpoint(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBInstanceConfigWithDataSource(rInt), - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet("data.aws_db_instance.bar", "address"), - resource.TestCheckResourceAttrSet("data.aws_db_instance.bar", "port"), - resource.TestCheckResourceAttrSet("data.aws_db_instance.bar", "hosted_zone_id"), - resource.TestCheckResourceAttrSet("data.aws_db_instance.bar", "endpoint"), - ), - }, - }, - }) -} - -func testAccAWSDBInstanceConfigWithDataSource(rInt int) string { - return fmt.Sprintf(` -resource "aws_db_instance" "bar" { - identifier = "datasource-test-terraform-%d" - - allocated_storage = 10 - engine = "MySQL" - instance_class = "db.m1.small" - name = "baz" - password = "barbarbarbar" - username = "foo" - - backup_retention_period = 0 - skip_final_snapshot = true -} - -data "aws_db_instance" "bar" { - db_instance_identifier = "${aws_db_instance.bar.identifier}" -} - -`, rInt) -} diff --git a/builtin/providers/aws/data_source_aws_db_snapshot.go b/builtin/providers/aws/data_source_aws_db_snapshot.go deleted file mode 100644 index 1f381e405..000000000 --- a/builtin/providers/aws/data_source_aws_db_snapshot.go +++ /dev/null @@ -1,217 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "sort" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsDbSnapshot() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsDbSnapshotRead, - - Schema: map[string]*schema.Schema{ - //selection criteria - "db_instance_identifier": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "db_snapshot_identifier": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "snapshot_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "include_shared": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Default: false, - }, - - "include_public": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Default: false, - }, - "most_recent": { - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - }, - - //Computed values returned - "allocated_storage": { - Type: schema.TypeInt, - Computed: true, - }, - "availability_zone": { - Type: schema.TypeString, - Computed: true, - }, - "db_snapshot_arn": { - Type: schema.TypeString, - Computed: true, - }, - "encrypted": { - Type: schema.TypeBool, - Computed: true, - }, - "engine": { - Type: schema.TypeString, - Computed: true, - }, - "engine_version": { - Type: schema.TypeString, - Computed: true, - }, - "iops": { - Type: schema.TypeInt, - Computed: true, - }, - "kms_key_id": { - Type: schema.TypeString, - Computed: true, - }, - "license_model": { - Type: schema.TypeString, - Computed: true, - }, - "option_group_name": { - Type: schema.TypeString, - Computed: true, - }, - "port": { - Type: schema.TypeInt, - Computed: true, - }, - "source_db_snapshot_identifier": { - Type: schema.TypeString, - Computed: true, - }, - "source_region": { - Type: schema.TypeString, - Computed: true, - }, - "snapshot_create_time": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "storage_type": { - Type: schema.TypeString, - Computed: true, - }, - "vpc_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceAwsDbSnapshotRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn - - instanceIdentifier, instanceIdentifierOk := d.GetOk("db_instance_identifier") - snapshotIdentifier, snapshotIdentifierOk := d.GetOk("db_snapshot_identifier") - - if !instanceIdentifierOk && !snapshotIdentifierOk { - return fmt.Errorf("One of db_snapshot_indentifier or db_instance_identifier must be assigned") - } - - params := &rds.DescribeDBSnapshotsInput{ - IncludePublic: aws.Bool(d.Get("include_public").(bool)), - IncludeShared: aws.Bool(d.Get("include_shared").(bool)), - } - if v, ok := d.GetOk("snapshot_type"); ok { - params.SnapshotType = aws.String(v.(string)) - } - if instanceIdentifierOk { - params.DBInstanceIdentifier = aws.String(instanceIdentifier.(string)) - } - if snapshotIdentifierOk { - params.DBSnapshotIdentifier = aws.String(snapshotIdentifier.(string)) - } - - resp, err := conn.DescribeDBSnapshots(params) - if err != nil { - return err - } - - if len(resp.DBSnapshots) < 1 { - return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") - } - - var snapshot *rds.DBSnapshot - if len(resp.DBSnapshots) > 1 { - recent := d.Get("most_recent").(bool) - log.Printf("[DEBUG] aws_db_snapshot - multiple results found and `most_recent` is set to: %t", recent) - if recent { - snapshot = mostRecentDbSnapshot(resp.DBSnapshots) - } else { - return fmt.Errorf("Your query returned more than one result. Please try a more specific search criteria.") - } - } else { - snapshot = resp.DBSnapshots[0] - } - - return dbSnapshotDescriptionAttributes(d, snapshot) -} - -type rdsSnapshotSort []*rds.DBSnapshot - -func (a rdsSnapshotSort) Len() int { return len(a) } -func (a rdsSnapshotSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a rdsSnapshotSort) Less(i, j int) bool { - return (*a[i].SnapshotCreateTime).Before(*a[j].SnapshotCreateTime) -} - -func mostRecentDbSnapshot(snapshots []*rds.DBSnapshot) *rds.DBSnapshot { - sortedSnapshots := snapshots - sort.Sort(rdsSnapshotSort(sortedSnapshots)) - return sortedSnapshots[len(sortedSnapshots)-1] -} - -func dbSnapshotDescriptionAttributes(d *schema.ResourceData, snapshot *rds.DBSnapshot) error { - d.SetId(*snapshot.DBInstanceIdentifier) - d.Set("db_instance_identifier", snapshot.DBInstanceIdentifier) - d.Set("db_snapshot_identifier", snapshot.DBSnapshotIdentifier) - d.Set("snapshot_type", snapshot.SnapshotType) - d.Set("allocated_storage", snapshot.AllocatedStorage) - d.Set("availability_zone", snapshot.AvailabilityZone) - d.Set("db_snapshot_arn", snapshot.DBSnapshotArn) - d.Set("encrypted", snapshot.Encrypted) - d.Set("engine", snapshot.Engine) - d.Set("engine_version", snapshot.EngineVersion) - d.Set("iops", snapshot.Iops) - d.Set("kms_key_id", snapshot.KmsKeyId) - d.Set("license_model", snapshot.LicenseModel) - d.Set("option_group_name", snapshot.OptionGroupName) - d.Set("port", snapshot.Port) - d.Set("source_db_snapshot_identifier", snapshot.SourceDBSnapshotIdentifier) - d.Set("source_region", snapshot.SourceRegion) - d.Set("status", snapshot.Status) - d.Set("vpc_id", snapshot.VpcId) - d.Set("snapshot_create_time", snapshot.SnapshotCreateTime.Format(time.RFC3339)) - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_db_snapshot_test.go b/builtin/providers/aws/data_source_aws_db_snapshot_test.go deleted file mode 100644 index c222136cd..000000000 --- a/builtin/providers/aws/data_source_aws_db_snapshot_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSDbSnapshotDataSource_basic(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAwsDbSnapshotDataSourceConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsDbSnapshotDataSourceID("data.aws_db_snapshot.snapshot"), - ), - }, - }, - }) -} - -func testAccCheckAwsDbSnapshotDataSourceID(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Can't find Volume data source: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Snapshot data source ID not set") - } - return nil - } -} - -func testAccCheckAwsDbSnapshotDataSourceConfig(rInt int) string { - return fmt.Sprintf(` -resource "aws_db_instance" "bar" { - allocated_storage = 10 - engine = "MySQL" - engine_version = "5.6.21" - instance_class = "db.t1.micro" - name = "baz" - password = "barbarbarbar" - username = "foo" - skip_final_snapshot = true - - # Maintenance Window is stored in lower case in the API, though not strictly - # documented. Terraform will downcase this to match (as opposed to throw a - # validation error). - maintenance_window = "Fri:09:00-Fri:09:30" - - backup_retention_period = 0 - - parameter_group_name = "default.mysql5.6" -} - -data "aws_db_snapshot" "snapshot" { - most_recent = "true" - db_snapshot_identifier = "${aws_db_snapshot.test.id}" -} - - -resource "aws_db_snapshot" "test" { - db_instance_identifier = "${aws_db_instance.bar.id}" - db_snapshot_identifier = "testsnapshot%d" -}`, rInt) -} diff --git a/builtin/providers/aws/data_source_aws_ebs_snapshot.go b/builtin/providers/aws/data_source_aws_ebs_snapshot.go deleted file mode 100644 index c0e386643..000000000 --- a/builtin/providers/aws/data_source_aws_ebs_snapshot.go +++ /dev/null @@ -1,162 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsEbsSnapshot() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsEbsSnapshotRead, - - Schema: map[string]*schema.Schema{ - //selection criteria - "filter": dataSourceFiltersSchema(), - "most_recent": { - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - }, - "owners": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "snapshot_ids": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "restorable_by_user_ids": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - //Computed values returned - "snapshot_id": { - Type: schema.TypeString, - Computed: true, - }, - "volume_id": { - Type: schema.TypeString, - Computed: true, - }, - "state": { - Type: schema.TypeString, - Computed: true, - }, - "owner_id": { - Type: schema.TypeString, - Computed: true, - }, - "owner_alias": { - Type: schema.TypeString, - Computed: true, - }, - "encrypted": { - Type: schema.TypeBool, - Computed: true, - }, - "description": { - Type: schema.TypeString, - Computed: true, - }, - "volume_size": { - Type: schema.TypeInt, - Computed: true, - }, - "kms_key_id": { - Type: schema.TypeString, - Computed: true, - }, - "data_encryption_key_id": { - Type: schema.TypeString, - Computed: true, - }, - "tags": dataSourceTagsSchema(), - }, - } -} - -func dataSourceAwsEbsSnapshotRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - restorableUsers, restorableUsersOk := d.GetOk("restorable_by_user_ids") - filters, filtersOk := d.GetOk("filter") - snapshotIds, snapshotIdsOk := d.GetOk("snapshot_ids") - owners, ownersOk := d.GetOk("owners") - - if !restorableUsersOk && !filtersOk && !snapshotIdsOk && !ownersOk { - return fmt.Errorf("One of snapshot_ids, filters, restorable_by_user_ids, or owners must be assigned") - } - - params := &ec2.DescribeSnapshotsInput{} - if restorableUsersOk { - params.RestorableByUserIds = expandStringList(restorableUsers.([]interface{})) - } - if filtersOk { - params.Filters = buildAwsDataSourceFilters(filters.(*schema.Set)) - } - if ownersOk { - params.OwnerIds = expandStringList(owners.([]interface{})) - } - if snapshotIdsOk { - params.SnapshotIds = expandStringList(snapshotIds.([]interface{})) - } - - resp, err := conn.DescribeSnapshots(params) - if err != nil { - return err - } - - var snapshot *ec2.Snapshot - if len(resp.Snapshots) < 1 { - return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") - } - - if len(resp.Snapshots) > 1 { - recent := d.Get("most_recent").(bool) - log.Printf("[DEBUG] aws_ebs_snapshot - multiple results found and `most_recent` is set to: %t", recent) - if recent { - snapshot = mostRecentSnapshot(resp.Snapshots) - } else { - return fmt.Errorf("Your query returned more than one result. Please try a more specific search criteria.") - } - } else { - snapshot = resp.Snapshots[0] - } - - //Single Snapshot found so set to state - return snapshotDescriptionAttributes(d, snapshot) -} - -func mostRecentSnapshot(snapshots []*ec2.Snapshot) *ec2.Snapshot { - return sortSnapshots(snapshots)[0] -} - -func snapshotDescriptionAttributes(d *schema.ResourceData, snapshot *ec2.Snapshot) error { - d.SetId(*snapshot.SnapshotId) - d.Set("snapshot_id", snapshot.SnapshotId) - d.Set("volume_id", snapshot.VolumeId) - d.Set("data_encryption_key_id", snapshot.DataEncryptionKeyId) - d.Set("description", snapshot.Description) - d.Set("encrypted", snapshot.Encrypted) - d.Set("kms_key_id", snapshot.KmsKeyId) - d.Set("volume_size", snapshot.VolumeSize) - d.Set("state", snapshot.State) - d.Set("owner_id", snapshot.OwnerId) - d.Set("owner_alias", snapshot.OwnerAlias) - - if err := d.Set("tags", dataSourceTags(snapshot.Tags)); err != nil { - return err - } - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_ebs_snapshot_ids.go b/builtin/providers/aws/data_source_aws_ebs_snapshot_ids.go deleted file mode 100644 index bd4f2ad8b..000000000 --- a/builtin/providers/aws/data_source_aws_ebs_snapshot_ids.go +++ /dev/null @@ -1,77 +0,0 @@ -package aws - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsEbsSnapshotIds() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsEbsSnapshotIdsRead, - - Schema: map[string]*schema.Schema{ - "filter": dataSourceFiltersSchema(), - "owners": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "restorable_by_user_ids": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "tags": dataSourceTagsSchema(), - "ids": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func dataSourceAwsEbsSnapshotIdsRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - restorableUsers, restorableUsersOk := d.GetOk("restorable_by_user_ids") - filters, filtersOk := d.GetOk("filter") - owners, ownersOk := d.GetOk("owners") - - if restorableUsers == false && filtersOk == false && ownersOk == false { - return fmt.Errorf("One of filters, restorable_by_user_ids, or owners must be assigned") - } - - params := &ec2.DescribeSnapshotsInput{} - - if restorableUsersOk { - params.RestorableByUserIds = expandStringList(restorableUsers.([]interface{})) - } - if filtersOk { - params.Filters = buildAwsDataSourceFilters(filters.(*schema.Set)) - } - if ownersOk { - params.OwnerIds = expandStringList(owners.([]interface{})) - } - - resp, err := conn.DescribeSnapshots(params) - if err != nil { - return err - } - - snapshotIds := make([]string, 0) - - for _, snapshot := range sortSnapshots(resp.Snapshots) { - snapshotIds = append(snapshotIds, *snapshot.SnapshotId) - } - - d.SetId(fmt.Sprintf("%d", hashcode.String(params.String()))) - d.Set("ids", snapshotIds) - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_ebs_snapshot_ids_test.go b/builtin/providers/aws/data_source_aws_ebs_snapshot_ids_test.go deleted file mode 100644 index 0c5f3ec4d..000000000 --- a/builtin/providers/aws/data_source_aws_ebs_snapshot_ids_test.go +++ /dev/null @@ -1,131 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/satori/uuid" -) - -func TestAccDataSourceAwsEbsSnapshotIds_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsEbsSnapshotIdsConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsEbsSnapshotDataSourceID("data.aws_ebs_snapshot_ids.test"), - ), - }, - }, - }) -} - -func TestAccDataSourceAwsEbsSnapshotIds_sorted(t *testing.T) { - uuid := uuid.NewV4().String() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsEbsSnapshotIdsConfig_sorted1(uuid), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("aws_ebs_snapshot.a", "id"), - resource.TestCheckResourceAttrSet("aws_ebs_snapshot.b", "id"), - ), - }, - { - Config: testAccDataSourceAwsEbsSnapshotIdsConfig_sorted2(uuid), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsEbsSnapshotDataSourceID("data.aws_ebs_snapshot_ids.test"), - resource.TestCheckResourceAttr("data.aws_ebs_snapshot_ids.test", "ids.#", "2"), - resource.TestCheckResourceAttrPair( - "data.aws_ebs_snapshot_ids.test", "ids.0", - "aws_ebs_snapshot.b", "id"), - resource.TestCheckResourceAttrPair( - "data.aws_ebs_snapshot_ids.test", "ids.1", - "aws_ebs_snapshot.a", "id"), - ), - }, - }, - }) -} - -func TestAccDataSourceAwsEbsSnapshotIds_empty(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsEbsSnapshotIdsConfig_empty, - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsEbsSnapshotDataSourceID("data.aws_ebs_snapshot_ids.empty"), - resource.TestCheckResourceAttr("data.aws_ebs_snapshot_ids.empty", "ids.#", "0"), - ), - }, - }, - }) -} - -const testAccDataSourceAwsEbsSnapshotIdsConfig_basic = ` -resource "aws_ebs_volume" "test" { - availability_zone = "us-west-2a" - size = 1 -} - -resource "aws_ebs_snapshot" "test" { - volume_id = "${aws_ebs_volume.test.id}" -} - -data "aws_ebs_snapshot_ids" "test" { - owners = ["self"] -} -` - -func testAccDataSourceAwsEbsSnapshotIdsConfig_sorted1(uuid string) string { - return fmt.Sprintf(` -resource "aws_ebs_volume" "test" { - availability_zone = "us-west-2a" - size = 1 - - count = 2 -} - -resource "aws_ebs_snapshot" "a" { - volume_id = "${aws_ebs_volume.test.*.id[0]}" - description = "tf-test-%s" -} - -resource "aws_ebs_snapshot" "b" { - volume_id = "${aws_ebs_volume.test.*.id[1]}" - description = "tf-test-%s" - - // We want to ensure that 'aws_ebs_snapshot.a.creation_date' is less than - // 'aws_ebs_snapshot.b.creation_date'/ so that we can ensure that the - // snapshots are being sorted correctly. - depends_on = ["aws_ebs_snapshot.a"] -} -`, uuid, uuid) -} - -func testAccDataSourceAwsEbsSnapshotIdsConfig_sorted2(uuid string) string { - return testAccDataSourceAwsEbsSnapshotIdsConfig_sorted1(uuid) + fmt.Sprintf(` -data "aws_ebs_snapshot_ids" "test" { - owners = ["self"] - - filter { - name = "description" - values = ["tf-test-%s"] - } -} -`, uuid) -} - -const testAccDataSourceAwsEbsSnapshotIdsConfig_empty = ` -data "aws_ebs_snapshot_ids" "empty" { - owners = ["000000000000"] -} -` diff --git a/builtin/providers/aws/data_source_aws_ebs_snapshot_test.go b/builtin/providers/aws/data_source_aws_ebs_snapshot_test.go deleted file mode 100644 index 58a20165a..000000000 --- a/builtin/providers/aws/data_source_aws_ebs_snapshot_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSEbsSnapshotDataSource_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAwsEbsSnapshotDataSourceConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsEbsSnapshotDataSourceID("data.aws_ebs_snapshot.snapshot"), - resource.TestCheckResourceAttr("data.aws_ebs_snapshot.snapshot", "volume_size", "40"), - ), - }, - }, - }) -} - -func TestAccAWSEbsSnapshotDataSource_multipleFilters(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAwsEbsSnapshotDataSourceConfigWithMultipleFilters, - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsEbsSnapshotDataSourceID("data.aws_ebs_snapshot.snapshot"), - resource.TestCheckResourceAttr("data.aws_ebs_snapshot.snapshot", "volume_size", "10"), - ), - }, - }, - }) -} - -func testAccCheckAwsEbsSnapshotDataSourceID(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Can't find snapshot data source: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Snapshot data source ID not set") - } - return nil - } -} - -const testAccCheckAwsEbsSnapshotDataSourceConfig = ` -resource "aws_ebs_volume" "example" { - availability_zone = "us-west-2a" - type = "gp2" - size = 40 - tags { - Name = "External Volume" - } -} - -resource "aws_ebs_snapshot" "snapshot" { - volume_id = "${aws_ebs_volume.example.id}" -} - -data "aws_ebs_snapshot" "snapshot" { - most_recent = true - snapshot_ids = ["${aws_ebs_snapshot.snapshot.id}"] -} -` - -const testAccCheckAwsEbsSnapshotDataSourceConfigWithMultipleFilters = ` -resource "aws_ebs_volume" "external1" { - availability_zone = "us-west-2a" - type = "gp2" - size = 10 - tags { - Name = "External Volume 1" - } -} - -resource "aws_ebs_snapshot" "snapshot" { - volume_id = "${aws_ebs_volume.external1.id}" -} - -data "aws_ebs_snapshot" "snapshot" { - most_recent = true - snapshot_ids = ["${aws_ebs_snapshot.snapshot.id}"] - filter { - name = "volume-size" - values = ["10"] - } -} -` diff --git a/builtin/providers/aws/data_source_aws_ebs_volume.go b/builtin/providers/aws/data_source_aws_ebs_volume.go deleted file mode 100644 index 7794ecf28..000000000 --- a/builtin/providers/aws/data_source_aws_ebs_volume.go +++ /dev/null @@ -1,136 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "sort" - - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsEbsVolume() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsEbsVolumeRead, - - Schema: map[string]*schema.Schema{ - "filter": dataSourceFiltersSchema(), - "most_recent": { - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - }, - "availability_zone": { - Type: schema.TypeString, - Computed: true, - }, - "encrypted": { - Type: schema.TypeBool, - Computed: true, - }, - "iops": { - Type: schema.TypeInt, - Computed: true, - }, - "volume_type": { - Type: schema.TypeString, - Computed: true, - }, - "size": { - Type: schema.TypeInt, - Computed: true, - }, - "snapshot_id": { - Type: schema.TypeString, - Computed: true, - }, - "kms_key_id": { - Type: schema.TypeString, - Computed: true, - }, - "volume_id": { - Type: schema.TypeString, - Computed: true, - }, - "tags": dataSourceTagsSchema(), - }, - } -} - -func dataSourceAwsEbsVolumeRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - filters, filtersOk := d.GetOk("filter") - - params := &ec2.DescribeVolumesInput{} - if filtersOk { - params.Filters = buildAwsDataSourceFilters(filters.(*schema.Set)) - } - - resp, err := conn.DescribeVolumes(params) - if err != nil { - return err - } - - log.Printf("Found These Volumes %s", spew.Sdump(resp.Volumes)) - - filteredVolumes := resp.Volumes[:] - - var volume *ec2.Volume - if len(filteredVolumes) < 1 { - return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") - } - - if len(filteredVolumes) > 1 { - recent := d.Get("most_recent").(bool) - log.Printf("[DEBUG] aws_ebs_volume - multiple results found and `most_recent` is set to: %t", recent) - if recent { - volume = mostRecentVolume(filteredVolumes) - } else { - return fmt.Errorf("Your query returned more than one result. Please try a more " + - "specific search criteria, or set `most_recent` attribute to true.") - } - } else { - // Query returned single result. - volume = filteredVolumes[0] - } - - log.Printf("[DEBUG] aws_ebs_volume - Single Volume found: %s", *volume.VolumeId) - return volumeDescriptionAttributes(d, volume) -} - -type volumeSort []*ec2.Volume - -func (a volumeSort) Len() int { return len(a) } -func (a volumeSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a volumeSort) Less(i, j int) bool { - itime := *a[i].CreateTime - jtime := *a[j].CreateTime - return itime.Unix() < jtime.Unix() -} - -func mostRecentVolume(volumes []*ec2.Volume) *ec2.Volume { - sortedVolumes := volumes - sort.Sort(volumeSort(sortedVolumes)) - return sortedVolumes[len(sortedVolumes)-1] -} - -func volumeDescriptionAttributes(d *schema.ResourceData, volume *ec2.Volume) error { - d.SetId(*volume.VolumeId) - d.Set("volume_id", volume.VolumeId) - d.Set("availability_zone", volume.AvailabilityZone) - d.Set("encrypted", volume.Encrypted) - d.Set("iops", volume.Iops) - d.Set("kms_key_id", volume.KmsKeyId) - d.Set("size", volume.Size) - d.Set("snapshot_id", volume.SnapshotId) - d.Set("volume_type", volume.VolumeType) - - if err := d.Set("tags", dataSourceTags(volume.Tags)); err != nil { - return err - } - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_ebs_volume_test.go b/builtin/providers/aws/data_source_aws_ebs_volume_test.go deleted file mode 100644 index 8be4455fe..000000000 --- a/builtin/providers/aws/data_source_aws_ebs_volume_test.go +++ /dev/null @@ -1,106 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSEbsVolumeDataSource_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAwsEbsVolumeDataSourceConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsEbsVolumeDataSourceID("data.aws_ebs_volume.ebs_volume"), - resource.TestCheckResourceAttr("data.aws_ebs_volume.ebs_volume", "size", "40"), - ), - }, - }, - }) -} - -func TestAccAWSEbsVolumeDataSource_multipleFilters(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAwsEbsVolumeDataSourceConfigWithMultipleFilters, - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsEbsVolumeDataSourceID("data.aws_ebs_volume.ebs_volume"), - resource.TestCheckResourceAttr("data.aws_ebs_volume.ebs_volume", "size", "10"), - resource.TestCheckResourceAttr("data.aws_ebs_volume.ebs_volume", "volume_type", "gp2"), - ), - }, - }, - }) -} - -func testAccCheckAwsEbsVolumeDataSourceID(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Can't find Volume data source: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Volume data source ID not set") - } - return nil - } -} - -const testAccCheckAwsEbsVolumeDataSourceConfig = ` -resource "aws_ebs_volume" "example" { - availability_zone = "us-west-2a" - type = "gp2" - size = 40 - tags { - Name = "External Volume" - } -} - -data "aws_ebs_volume" "ebs_volume" { - most_recent = true - filter { - name = "tag:Name" - values = ["External Volume"] - } - filter { - name = "volume-type" - values = ["${aws_ebs_volume.example.type}"] - } -} -` - -const testAccCheckAwsEbsVolumeDataSourceConfigWithMultipleFilters = ` -resource "aws_ebs_volume" "external1" { - availability_zone = "us-west-2a" - type = "gp2" - size = 10 - tags { - Name = "External Volume 1" - } -} - -data "aws_ebs_volume" "ebs_volume" { - most_recent = true - filter { - name = "tag:Name" - values = ["External Volume 1"] - } - filter { - name = "size" - values = ["${aws_ebs_volume.external1.size}"] - } - filter { - name = "volume-type" - values = ["${aws_ebs_volume.external1.type}"] - } -} -` diff --git a/builtin/providers/aws/data_source_aws_ecs_cluster.go b/builtin/providers/aws/data_source_aws_ecs_cluster.go deleted file mode 100644 index 2d8afeeea..000000000 --- a/builtin/providers/aws/data_source_aws_ecs_cluster.go +++ /dev/null @@ -1,77 +0,0 @@ -package aws - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ecs" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsEcsCluster() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsEcsClusterRead, - - Schema: map[string]*schema.Schema{ - "cluster_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "arn": { - Type: schema.TypeString, - Computed: true, - }, - - "status": { - Type: schema.TypeString, - Computed: true, - }, - - "pending_tasks_count": { - Type: schema.TypeInt, - Computed: true, - }, - - "running_tasks_count": { - Type: schema.TypeInt, - Computed: true, - }, - - "registered_container_instances_count": { - Type: schema.TypeInt, - Computed: true, - }, - }, - } -} - -func dataSourceAwsEcsClusterRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ecsconn - - desc, err := conn.DescribeClusters(&ecs.DescribeClustersInput{ - Clusters: []*string{aws.String(d.Get("cluster_name").(string))}, - }) - - if err != nil { - return err - } - - for _, cluster := range desc.Clusters { - if aws.StringValue(cluster.ClusterName) != d.Get("cluster_name").(string) { - continue - } - d.SetId(aws.StringValue(cluster.ClusterArn)) - d.Set("status", cluster.Status) - d.Set("pending_tasks_count", cluster.PendingTasksCount) - d.Set("running_tasks_count", cluster.RunningTasksCount) - d.Set("registered_container_instances_count", cluster.RegisteredContainerInstancesCount) - } - - if d.Id() == "" { - return fmt.Errorf("cluster with name %q not found", d.Get("cluster_name").(string)) - } - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_ecs_cluster_test.go b/builtin/providers/aws/data_source_aws_ecs_cluster_test.go deleted file mode 100644 index 131c4656e..000000000 --- a/builtin/providers/aws/data_source_aws_ecs_cluster_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSEcsDataSource_ecsCluster(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAwsEcsClusterDataSourceConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.aws_ecs_cluster.default", "status", "ACTIVE"), - resource.TestCheckResourceAttr("data.aws_ecs_cluster.default", "pending_tasks_count", "0"), - resource.TestCheckResourceAttr("data.aws_ecs_cluster.default", "running_tasks_count", "0"), - resource.TestCheckResourceAttr("data.aws_ecs_cluster.default", "registered_container_instances_count", "0"), - ), - }, - }, - }) -} - -var testAccCheckAwsEcsClusterDataSourceConfig = fmt.Sprintf(` -resource "aws_ecs_cluster" "default" { - name = "default-%d" -} - -resource "aws_ecs_task_definition" "mongo" { - family = "mongodb" - container_definitions = < 1 { - return fmt.Errorf("multiple Elastic IPs matched; use additional constraints to reduce matches to a single Elastic IP") - } - - eip := resp.Addresses[0] - - d.SetId(*eip.AllocationId) - d.Set("id", eip.AllocationId) - d.Set("public_ip", eip.PublicIp) - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_eip_test.go b/builtin/providers/aws/data_source_aws_eip_test.go deleted file mode 100644 index e19db7924..000000000 --- a/builtin/providers/aws/data_source_aws_eip_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourceAwsEip(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataSourceAwsEipConfig, - Check: resource.ComposeTestCheckFunc( - testAccDataSourceAwsEipCheck("data.aws_eip.by_id"), - testAccDataSourceAwsEipCheck("data.aws_eip.by_public_ip"), - ), - }, - }, - }) -} - -func testAccDataSourceAwsEipCheck(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("root module has no resource called %s", name) - } - - eipRs, ok := s.RootModule().Resources["aws_eip.test"] - if !ok { - return fmt.Errorf("can't find aws_eip.test in state") - } - - attr := rs.Primary.Attributes - - if attr["id"] != eipRs.Primary.Attributes["id"] { - return fmt.Errorf( - "id is %s; want %s", - attr["id"], - eipRs.Primary.Attributes["id"], - ) - } - - if attr["public_ip"] != eipRs.Primary.Attributes["public_ip"] { - return fmt.Errorf( - "public_ip is %s; want %s", - attr["public_ip"], - eipRs.Primary.Attributes["public_ip"], - ) - } - - return nil - } -} - -const testAccDataSourceAwsEipConfig = ` -provider "aws" { - region = "us-west-2" -} - -resource "aws_eip" "wrong1" {} -resource "aws_eip" "test" {} -resource "aws_eip" "wrong2" {} - -data "aws_eip" "by_id" { - id = "${aws_eip.test.id}" -} - -data "aws_eip" "by_public_ip" { - public_ip = "${aws_eip.test.public_ip}" -} -` diff --git a/builtin/providers/aws/data_source_aws_elastic_beanstalk_solution_stack.go b/builtin/providers/aws/data_source_aws_elastic_beanstalk_solution_stack.go deleted file mode 100644 index f9bec5bce..000000000 --- a/builtin/providers/aws/data_source_aws_elastic_beanstalk_solution_stack.go +++ /dev/null @@ -1,105 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "regexp" - - "github.com/aws/aws-sdk-go/service/elasticbeanstalk" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsElasticBeanstalkSolutionStack() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsElasticBeanstalkSolutionStackRead, - - Schema: map[string]*schema.Schema{ - "name_regex": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateSolutionStackNameRegex, - }, - "most_recent": { - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - }, - // Computed values. - "name": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -// dataSourceAwsElasticBeanstalkSolutionStackRead performs the API lookup. -func dataSourceAwsElasticBeanstalkSolutionStackRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticbeanstalkconn - - nameRegex := d.Get("name_regex") - - var params *elasticbeanstalk.ListAvailableSolutionStacksInput - - resp, err := conn.ListAvailableSolutionStacks(params) - if err != nil { - return err - } - - var filteredSolutionStacks []*string - - r := regexp.MustCompile(nameRegex.(string)) - for _, solutionStack := range resp.SolutionStacks { - if r.MatchString(*solutionStack) { - filteredSolutionStacks = append(filteredSolutionStacks, solutionStack) - } - } - - var solutionStack *string - if len(filteredSolutionStacks) < 1 { - return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") - } - - if len(filteredSolutionStacks) == 1 { - // Query returned single result. - solutionStack = filteredSolutionStacks[0] - } else { - recent := d.Get("most_recent").(bool) - log.Printf("[DEBUG] aws_elastic_beanstalk_solution_stack - multiple results found and `most_recent` is set to: %t", recent) - if recent { - solutionStack = mostRecentSolutionStack(filteredSolutionStacks) - } else { - return fmt.Errorf("Your query returned more than one result. Please try a more " + - "specific search criteria, or set `most_recent` attribute to true.") - } - } - - log.Printf("[DEBUG] aws_elastic_beanstalk_solution_stack - Single solution stack found: %s", *solutionStack) - return solutionStackDescriptionAttributes(d, solutionStack) -} - -// Returns the most recent solution stack out of a slice of stacks. -func mostRecentSolutionStack(solutionStacks []*string) *string { - return solutionStacks[0] -} - -// populate the numerous fields that the image description returns. -func solutionStackDescriptionAttributes(d *schema.ResourceData, solutionStack *string) error { - // Simple attributes first - d.SetId(*solutionStack) - d.Set("name", solutionStack) - return nil -} - -func validateSolutionStackNameRegex(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if _, err := regexp.Compile(value); err != nil { - errors = append(errors, fmt.Errorf( - "%q contains an invalid regular expression: %s", - k, err)) - } - return -} diff --git a/builtin/providers/aws/data_source_aws_elastic_beanstalk_solution_stack_test.go b/builtin/providers/aws/data_source_aws_elastic_beanstalk_solution_stack_test.go deleted file mode 100644 index 99ca8f7c8..000000000 --- a/builtin/providers/aws/data_source_aws_elastic_beanstalk_solution_stack_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package aws - -import ( - "fmt" - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSElasticBeanstalkSolutionStackDataSource(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAwsElasticBeanstalkSolutionStackDataSourceConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsElasticBeanstalkSolutionStackDataSourceID("data.aws_elastic_beanstalk_solution_stack.multi_docker"), - resource.TestMatchResourceAttr("data.aws_elastic_beanstalk_solution_stack.multi_docker", "name", regexp.MustCompile("^64bit Amazon Linux (.*) Multi-container Docker (.*)$")), - ), - }, - }, - }) -} - -func TestResourceValidateSolutionStackNameRegex(t *testing.T) { - type testCases struct { - Value string - ErrCount int - } - - invalidCases := []testCases{ - { - Value: `\`, - ErrCount: 1, - }, - { - Value: `**`, - ErrCount: 1, - }, - { - Value: `(.+`, - ErrCount: 1, - }, - } - - for _, tc := range invalidCases { - _, errors := validateSolutionStackNameRegex(tc.Value, "name_regex") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q to trigger a validation error.", tc.Value) - } - } - - validCases := []testCases{ - { - Value: `\/`, - ErrCount: 0, - }, - { - Value: `.*`, - ErrCount: 0, - }, - { - Value: `\b(?:\d{1,3}\.){3}\d{1,3}\b`, - ErrCount: 0, - }, - } - - for _, tc := range validCases { - _, errors := validateSolutionStackNameRegex(tc.Value, "name_regex") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q not to trigger a validation error.", tc.Value) - } - } -} - -func testAccCheckAwsElasticBeanstalkSolutionStackDataSourceDestroy(s *terraform.State) error { - return nil -} - -func testAccCheckAwsElasticBeanstalkSolutionStackDataSourceID(n string) resource.TestCheckFunc { - // Wait for solution stacks - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Can't find solution stack data source: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Solution stack data source ID not set") - } - return nil - } -} - -const testAccCheckAwsElasticBeanstalkSolutionStackDataSourceConfig = ` -data "aws_elastic_beanstalk_solution_stack" "multi_docker" { - most_recent = true - name_regex = "^64bit Amazon Linux (.*) Multi-container Docker (.*)$" -} -` diff --git a/builtin/providers/aws/data_source_aws_elasticache_cluster.go b/builtin/providers/aws/data_source_aws_elasticache_cluster.go deleted file mode 100644 index eaa539d3a..000000000 --- a/builtin/providers/aws/data_source_aws_elasticache_cluster.go +++ /dev/null @@ -1,236 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsElastiCacheCluster() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsElastiCacheClusterRead, - - Schema: map[string]*schema.Schema{ - "cluster_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - StateFunc: func(v interface{}) string { - value := v.(string) - return strings.ToLower(value) - }, - }, - - "node_type": { - Type: schema.TypeString, - Computed: true, - }, - - "num_cache_nodes": { - Type: schema.TypeInt, - Computed: true, - }, - - "subnet_group_name": { - Type: schema.TypeString, - Computed: true, - }, - - "engine": { - Type: schema.TypeString, - Computed: true, - }, - - "engine_version": { - Type: schema.TypeString, - Computed: true, - }, - - "parameter_group_name": { - Type: schema.TypeString, - Computed: true, - }, - - "replication_group_id": { - Type: schema.TypeString, - Computed: true, - }, - - "security_group_names": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "security_group_ids": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "maintenance_window": { - Type: schema.TypeString, - Computed: true, - }, - - "snapshot_window": { - Type: schema.TypeString, - Computed: true, - }, - - "snapshot_retention_limit": { - Type: schema.TypeInt, - Computed: true, - }, - - "availability_zone": { - Type: schema.TypeString, - Computed: true, - }, - - "notification_topic_arn": { - Type: schema.TypeString, - Computed: true, - }, - - "port": { - Type: schema.TypeInt, - Computed: true, - }, - - "configuration_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - - "cluster_address": { - Type: schema.TypeString, - Computed: true, - }, - - "arn": { - Type: schema.TypeString, - Computed: true, - }, - - "cache_nodes": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, - "address": { - Type: schema.TypeString, - Computed: true, - }, - "port": { - Type: schema.TypeInt, - Computed: true, - }, - "availability_zone": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - - "tags": tagsSchemaComputed(), - }, - } -} - -func dataSourceAwsElastiCacheClusterRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticacheconn - - req := &elasticache.DescribeCacheClustersInput{ - CacheClusterId: aws.String(d.Get("cluster_id").(string)), - ShowCacheNodeInfo: aws.Bool(true), - } - - resp, err := conn.DescribeCacheClusters(req) - if err != nil { - return err - } - - if len(resp.CacheClusters) < 1 { - return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") - } - if len(resp.CacheClusters) > 1 { - return fmt.Errorf("Your query returned more than one result. Please try a more specific search criteria.") - } - - cluster := resp.CacheClusters[0] - - d.SetId(*cluster.CacheClusterId) - - d.Set("cluster_id", cluster.CacheClusterId) - d.Set("node_type", cluster.CacheNodeType) - d.Set("num_cache_nodes", cluster.NumCacheNodes) - d.Set("subnet_group_name", cluster.CacheSubnetGroupName) - d.Set("engine", cluster.Engine) - d.Set("engine_version", cluster.EngineVersion) - d.Set("security_group_names", flattenElastiCacheSecurityGroupNames(cluster.CacheSecurityGroups)) - d.Set("security_group_ids", flattenElastiCacheSecurityGroupIds(cluster.SecurityGroups)) - - if cluster.CacheParameterGroup != nil { - d.Set("parameter_group_name", cluster.CacheParameterGroup.CacheParameterGroupName) - } - - if cluster.ReplicationGroupId != nil { - d.Set("replication_group_id", cluster.ReplicationGroupId) - } - - d.Set("maintenance_window", cluster.PreferredMaintenanceWindow) - d.Set("snapshot_window", cluster.SnapshotWindow) - d.Set("snapshot_retention_limit", cluster.SnapshotRetentionLimit) - d.Set("availability_zone", cluster.PreferredAvailabilityZone) - - if cluster.NotificationConfiguration != nil { - if *cluster.NotificationConfiguration.TopicStatus == "active" { - d.Set("notification_topic_arn", cluster.NotificationConfiguration.TopicArn) - } - } - - if cluster.ConfigurationEndpoint != nil { - d.Set("port", cluster.ConfigurationEndpoint.Port) - d.Set("configuration_endpoint", aws.String(fmt.Sprintf("%s:%d", *cluster.ConfigurationEndpoint.Address, *cluster.ConfigurationEndpoint.Port))) - d.Set("cluster_address", aws.String(fmt.Sprintf("%s", *cluster.ConfigurationEndpoint.Address))) - } - - if err := setCacheNodeData(d, cluster); err != nil { - return err - } - - arn, err := buildECARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) - if err != nil { - log.Printf("[DEBUG] Error building ARN for ElastiCache Cluster %s", *cluster.CacheClusterId) - } - d.Set("arn", arn) - - tagResp, err := conn.ListTagsForResource(&elasticache.ListTagsForResourceInput{ - ResourceName: aws.String(arn), - }) - - if err != nil { - log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn) - } - - var et []*elasticache.Tag - if len(tagResp.TagList) > 0 { - et = tagResp.TagList - } - d.Set("tags", tagsToMapEC(et)) - - return nil - -} diff --git a/builtin/providers/aws/data_source_aws_elasticache_cluster_test.go b/builtin/providers/aws/data_source_aws_elasticache_cluster_test.go deleted file mode 100644 index 57791cf05..000000000 --- a/builtin/providers/aws/data_source_aws_elasticache_cluster_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSDataElasticacheCluster_basic(t *testing.T) { - rInt := acctest.RandInt() - rString := acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccAWSElastiCacheClusterConfigWithDataSource(rString, rInt), - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("data.aws_elasticache_cluster.bar", "engine", "memcached"), - resource.TestCheckResourceAttr("data.aws_elasticache_cluster.bar", "node_type", "cache.m1.small"), - resource.TestCheckResourceAttr("data.aws_elasticache_cluster.bar", "port", "11211"), - resource.TestCheckResourceAttr("data.aws_elasticache_cluster.bar", "num_cache_nodes", "1"), - resource.TestCheckResourceAttrSet("data.aws_elasticache_cluster.bar", "configuration_endpoint"), - resource.TestCheckResourceAttrSet("data.aws_elasticache_cluster.bar", "cluster_address"), - resource.TestCheckResourceAttrSet("data.aws_elasticache_cluster.bar", "availability_zone"), - ), - }, - }, - }) -} - -func testAccAWSElastiCacheClusterConfigWithDataSource(rString string, rInt int) string { - return fmt.Sprintf(` -provider "aws" { - region = "us-east-1" -} - -resource "aws_security_group" "bar" { - name = "tf-test-security-group-%d" - description = "tf-test-security-group-descr" - ingress { - from_port = -1 - to_port = -1 - protocol = "icmp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_elasticache_security_group" "bar" { - name = "tf-test-security-group-%d" - description = "tf-test-security-group-descr" - security_group_names = ["${aws_security_group.bar.name}"] -} - -resource "aws_elasticache_cluster" "bar" { - cluster_id = "tf-%s" - engine = "memcached" - node_type = "cache.m1.small" - num_cache_nodes = 1 - port = 11211 - parameter_group_name = "default.memcached1.4" - security_group_names = ["${aws_elasticache_security_group.bar.name}"] -} - -data "aws_elasticache_cluster" "bar" { - cluster_id = "${aws_elasticache_cluster.bar.cluster_id}" -} - -`, rInt, rInt, rString) -} diff --git a/builtin/providers/aws/data_source_aws_elb_hosted_zone_id.go b/builtin/providers/aws/data_source_aws_elb_hosted_zone_id.go deleted file mode 100644 index ee75a27bf..000000000 --- a/builtin/providers/aws/data_source_aws_elb_hosted_zone_id.go +++ /dev/null @@ -1,56 +0,0 @@ -package aws - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" -) - -// See https://github.com/fog/fog-aws/pull/332/files -// This list isn't exposed by AWS - it's been found through -// trouble solving -var elbHostedZoneIdPerRegionMap = map[string]string{ - "ap-northeast-1": "Z14GRHDCWA56QT", - "ap-northeast-2": "ZWKZPGTI48KDX", - "ap-south-1": "ZP97RAFLXTNZK", - "ap-southeast-1": "Z1LMS91P8CMLE5", - "ap-southeast-2": "Z1GM3OXH4ZPM65", - "ca-central-1": "ZQSVJUPU6J1EY", - "eu-central-1": "Z215JYRZR1TBD5", - "eu-west-1": "Z32O12XQLNTSW2", - "eu-west-2": "ZHURV8PSTC4K8", - "us-east-1": "Z35SXDOTRQ7X7K", - "us-east-2": "Z3AADJGX6KTTL2", - "us-west-1": "Z368ELLRRE2KJ0", - "us-west-2": "Z1H1FL5HABSF5", - "sa-east-1": "Z2P70J7HTTTPLU", - "us-gov-west-1": "048591011584", - "cn-north-1": "638102146993", -} - -func dataSourceAwsElbHostedZoneId() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsElbHostedZoneIdRead, - - Schema: map[string]*schema.Schema{ - "region": { - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -func dataSourceAwsElbHostedZoneIdRead(d *schema.ResourceData, meta interface{}) error { - region := meta.(*AWSClient).region - if v, ok := d.GetOk("region"); ok { - region = v.(string) - } - - if zoneId, ok := elbHostedZoneIdPerRegionMap[region]; ok { - d.SetId(zoneId) - return nil - } - - return fmt.Errorf("Unknown region (%q)", region) -} diff --git a/builtin/providers/aws/data_source_aws_elb_hosted_zone_id_test.go b/builtin/providers/aws/data_source_aws_elb_hosted_zone_id_test.go deleted file mode 100644 index e7fe326a0..000000000 --- a/builtin/providers/aws/data_source_aws_elb_hosted_zone_id_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSElbHostedZoneId_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAwsElbHostedZoneIdConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.aws_elb_hosted_zone_id.main", "id", "Z1H1FL5HABSF5"), - ), - }, - { - Config: testAccCheckAwsElbHostedZoneIdExplicitRegionConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.aws_elb_hosted_zone_id.regional", "id", "Z32O12XQLNTSW2"), - ), - }, - }, - }) -} - -const testAccCheckAwsElbHostedZoneIdConfig = ` -data "aws_elb_hosted_zone_id" "main" { } -` - -const testAccCheckAwsElbHostedZoneIdExplicitRegionConfig = ` -data "aws_elb_hosted_zone_id" "regional" { - region = "eu-west-1" -} -` diff --git a/builtin/providers/aws/data_source_aws_elb_service_account.go b/builtin/providers/aws/data_source_aws_elb_service_account.go deleted file mode 100644 index a3d6cdd71..000000000 --- a/builtin/providers/aws/data_source_aws_elb_service_account.go +++ /dev/null @@ -1,61 +0,0 @@ -package aws - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" -) - -// See http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html#attach-bucket-policy -var elbAccountIdPerRegionMap = map[string]string{ - "ap-northeast-1": "582318560864", - "ap-northeast-2": "600734575887", - "ap-south-1": "718504428378", - "ap-southeast-1": "114774131450", - "ap-southeast-2": "783225319266", - "ca-central-1": "985666609251", - "cn-north-1": "638102146993", - "eu-central-1": "054676820928", - "eu-west-1": "156460612806", - "eu-west-2": "652711504416", - "sa-east-1": "507241528517", - "us-east-1": "127311923021", - "us-east-2": "033677994240", - "us-gov-west": "048591011584", - "us-west-1": "027434742980", - "us-west-2": "797873946194", -} - -func dataSourceAwsElbServiceAccount() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsElbServiceAccountRead, - - Schema: map[string]*schema.Schema{ - "region": { - Type: schema.TypeString, - Optional: true, - }, - "arn": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceAwsElbServiceAccountRead(d *schema.ResourceData, meta interface{}) error { - region := meta.(*AWSClient).region - if v, ok := d.GetOk("region"); ok { - region = v.(string) - } - - if accid, ok := elbAccountIdPerRegionMap[region]; ok { - d.SetId(accid) - - d.Set("arn", fmt.Sprintf("arn:%s:iam::%s:root", meta.(*AWSClient).partition, accid)) - - return nil - } - - return fmt.Errorf("Unknown region (%q)", region) -} diff --git a/builtin/providers/aws/data_source_aws_elb_service_account_test.go b/builtin/providers/aws/data_source_aws_elb_service_account_test.go deleted file mode 100644 index 551d7df46..000000000 --- a/builtin/providers/aws/data_source_aws_elb_service_account_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSElbServiceAccount_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAwsElbServiceAccountConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.aws_elb_service_account.main", "id", "797873946194"), - resource.TestCheckResourceAttr("data.aws_elb_service_account.main", "arn", "arn:aws:iam::797873946194:root"), - ), - }, - { - Config: testAccCheckAwsElbServiceAccountExplicitRegionConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.aws_elb_service_account.regional", "id", "156460612806"), - resource.TestCheckResourceAttr("data.aws_elb_service_account.regional", "arn", "arn:aws:iam::156460612806:root"), - ), - }, - }, - }) -} - -const testAccCheckAwsElbServiceAccountConfig = ` -data "aws_elb_service_account" "main" { } -` - -const testAccCheckAwsElbServiceAccountExplicitRegionConfig = ` -data "aws_elb_service_account" "regional" { - region = "eu-west-1" -} -` diff --git a/builtin/providers/aws/data_source_aws_iam_account_alias.go b/builtin/providers/aws/data_source_aws_iam_account_alias.go deleted file mode 100644 index f93897373..000000000 --- a/builtin/providers/aws/data_source_aws_iam_account_alias.go +++ /dev/null @@ -1,48 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsIamAccountAlias() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsIamAccountAliasRead, - - Schema: map[string]*schema.Schema{ - "account_alias": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceAwsIamAccountAliasRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iamconn - - log.Printf("[DEBUG] Reading IAM Account Aliases.") - d.SetId(time.Now().UTC().String()) - - req := &iam.ListAccountAliasesInput{} - resp, err := conn.ListAccountAliases(req) - if err != nil { - return err - } - - // 'AccountAliases': [] if there is no alias. - if resp == nil || len(resp.AccountAliases) == 0 { - return fmt.Errorf("no IAM account alias found") - } - - alias := aws.StringValue(resp.AccountAliases[0]) - log.Printf("[DEBUG] Setting AWS IAM Account Alias to %s.", alias) - d.Set("account_alias", alias) - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_iam_policy_document.go b/builtin/providers/aws/data_source_aws_iam_policy_document.go deleted file mode 100644 index 2366ae4bc..000000000 --- a/builtin/providers/aws/data_source_aws_iam_policy_document.go +++ /dev/null @@ -1,232 +0,0 @@ -package aws - -import ( - "fmt" - - "encoding/json" - "strings" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" - "strconv" -) - -var dataSourceAwsIamPolicyDocumentVarReplacer = strings.NewReplacer("&{", "${") - -func dataSourceAwsIamPolicyDocument() *schema.Resource { - setOfString := &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - } - - return &schema.Resource{ - Read: dataSourceAwsIamPolicyDocumentRead, - - Schema: map[string]*schema.Schema{ - "policy_id": { - Type: schema.TypeString, - Optional: true, - }, - "statement": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sid": { - Type: schema.TypeString, - Optional: true, - }, - "effect": { - Type: schema.TypeString, - Optional: true, - Default: "Allow", - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - switch v.(string) { - case "Allow", "Deny": - return - default: - es = append(es, fmt.Errorf("%q must be either \"Allow\" or \"Deny\"", k)) - return - } - }, - }, - "actions": setOfString, - "not_actions": setOfString, - "resources": setOfString, - "not_resources": setOfString, - "principals": dataSourceAwsIamPolicyPrincipalSchema(), - "not_principals": dataSourceAwsIamPolicyPrincipalSchema(), - "condition": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "test": { - Type: schema.TypeString, - Required: true, - }, - "variable": { - Type: schema.TypeString, - Required: true, - }, - "values": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - }, - }, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{}) error { - doc := &IAMPolicyDoc{ - Version: "2012-10-17", - } - - if policyId, hasPolicyId := d.GetOk("policy_id"); hasPolicyId { - doc.Id = policyId.(string) - } - - var cfgStmts = d.Get("statement").([]interface{}) - stmts := make([]*IAMPolicyStatement, len(cfgStmts)) - doc.Statements = stmts - for i, stmtI := range cfgStmts { - cfgStmt := stmtI.(map[string]interface{}) - stmt := &IAMPolicyStatement{ - Effect: cfgStmt["effect"].(string), - } - - if sid, ok := cfgStmt["sid"]; ok { - stmt.Sid = sid.(string) - } - - if actions := cfgStmt["actions"].(*schema.Set).List(); len(actions) > 0 { - stmt.Actions = iamPolicyDecodeConfigStringList(actions) - } - if actions := cfgStmt["not_actions"].(*schema.Set).List(); len(actions) > 0 { - stmt.NotActions = iamPolicyDecodeConfigStringList(actions) - } - - if resources := cfgStmt["resources"].(*schema.Set).List(); len(resources) > 0 { - stmt.Resources = dataSourceAwsIamPolicyDocumentReplaceVarsInList( - iamPolicyDecodeConfigStringList(resources), - ) - } - if resources := cfgStmt["not_resources"].(*schema.Set).List(); len(resources) > 0 { - stmt.NotResources = dataSourceAwsIamPolicyDocumentReplaceVarsInList( - iamPolicyDecodeConfigStringList(resources), - ) - } - - if principals := cfgStmt["principals"].(*schema.Set).List(); len(principals) > 0 { - stmt.Principals = dataSourceAwsIamPolicyDocumentMakePrincipals(principals) - } - - if principals := cfgStmt["not_principals"].(*schema.Set).List(); len(principals) > 0 { - stmt.NotPrincipals = dataSourceAwsIamPolicyDocumentMakePrincipals(principals) - } - - if conditions := cfgStmt["condition"].(*schema.Set).List(); len(conditions) > 0 { - stmt.Conditions = dataSourceAwsIamPolicyDocumentMakeConditions(conditions) - } - - stmts[i] = stmt - } - - jsonDoc, err := json.MarshalIndent(doc, "", " ") - if err != nil { - // should never happen if the above code is correct - return err - } - jsonString := string(jsonDoc) - - d.Set("json", jsonString) - d.SetId(strconv.Itoa(hashcode.String(jsonString))) - - return nil -} - -func dataSourceAwsIamPolicyDocumentReplaceVarsInList(in interface{}) interface{} { - switch v := in.(type) { - case string: - return dataSourceAwsIamPolicyDocumentVarReplacer.Replace(v) - case []string: - out := make([]string, len(v)) - for i, item := range v { - out[i] = dataSourceAwsIamPolicyDocumentVarReplacer.Replace(item) - } - return out - default: - panic("dataSourceAwsIamPolicyDocumentReplaceVarsInList: input not string nor []string") - } -} - -func dataSourceAwsIamPolicyDocumentMakeConditions(in []interface{}) IAMPolicyStatementConditionSet { - out := make([]IAMPolicyStatementCondition, len(in)) - for i, itemI := range in { - item := itemI.(map[string]interface{}) - out[i] = IAMPolicyStatementCondition{ - Test: item["test"].(string), - Variable: item["variable"].(string), - Values: dataSourceAwsIamPolicyDocumentReplaceVarsInList( - iamPolicyDecodeConfigStringList( - item["values"].(*schema.Set).List(), - ), - ), - } - } - return IAMPolicyStatementConditionSet(out) -} - -func dataSourceAwsIamPolicyDocumentMakePrincipals(in []interface{}) IAMPolicyStatementPrincipalSet { - out := make([]IAMPolicyStatementPrincipal, len(in)) - for i, itemI := range in { - item := itemI.(map[string]interface{}) - out[i] = IAMPolicyStatementPrincipal{ - Type: item["type"].(string), - Identifiers: dataSourceAwsIamPolicyDocumentReplaceVarsInList( - iamPolicyDecodeConfigStringList( - item["identifiers"].(*schema.Set).List(), - ), - ), - } - } - return IAMPolicyStatementPrincipalSet(out) -} - -func dataSourceAwsIamPolicyPrincipalSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "identifiers": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - } -} diff --git a/builtin/providers/aws/data_source_aws_iam_policy_document_test.go b/builtin/providers/aws/data_source_aws_iam_policy_document_test.go deleted file mode 100644 index a720d181a..000000000 --- a/builtin/providers/aws/data_source_aws_iam_policy_document_test.go +++ /dev/null @@ -1,191 +0,0 @@ -package aws - -import ( - "testing" - - "fmt" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSIAMPolicyDocument(t *testing.T) { - // This really ought to be able to be a unit test rather than an - // acceptance test, but just instantiating the AWS provider requires - // some AWS API calls, and so this needs valid AWS credentials to work. - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccAWSIAMPolicyDocumentConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckStateValue( - "data.aws_iam_policy_document.test", - "json", - testAccAWSIAMPolicyDocumentExpectedJSON, - ), - ), - }, - }, - }) -} - -func testAccCheckStateValue(id, name, value string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[id] - if !ok { - return fmt.Errorf("Not found: %s", id) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - v := rs.Primary.Attributes[name] - if v != value { - return fmt.Errorf( - "Value for %s is %s, not %s", name, v, value) - } - - return nil - } -} - -var testAccAWSIAMPolicyDocumentConfig = ` -data "aws_iam_policy_document" "test" { - policy_id = "policy_id" - statement { - sid = "1" - actions = [ - "s3:ListAllMyBuckets", - "s3:GetBucketLocation", - ] - resources = [ - "arn:aws:s3:::*", - ] - } - - statement { - actions = [ - "s3:ListBucket", - ] - resources = [ - "arn:aws:s3:::foo", - ] - condition { - test = "StringLike" - variable = "s3:prefix" - values = [ - "home/", - "home/&{aws:username}/", - ] - } - - not_principals { - type = "AWS" - identifiers = ["arn:blahblah:example"] - } - } - - statement { - actions = [ - "s3:*", - ] - resources = [ - "arn:aws:s3:::foo/home/&{aws:username}", - "arn:aws:s3:::foo/home/&{aws:username}/*", - ] - principals { - type = "AWS" - identifiers = ["arn:blahblah:example"] - } - } - - statement { - effect = "Deny" - not_actions = ["s3:*"] - not_resources = ["arn:aws:s3:::*"] - } - - # Normalization of wildcard principals - statement { - effect = "Allow" - actions = ["kinesis:*"] - principals { - type = "AWS" - identifiers = ["*"] - } - } - statement { - effect = "Allow" - actions = ["firehose:*"] - principals { - type = "*" - identifiers = ["*"] - } - } - -} -` - -var testAccAWSIAMPolicyDocumentExpectedJSON = `{ - "Version": "2012-10-17", - "Id": "policy_id", - "Statement": [ - { - "Sid": "1", - "Effect": "Allow", - "Action": [ - "s3:ListAllMyBuckets", - "s3:GetBucketLocation" - ], - "Resource": "arn:aws:s3:::*" - }, - { - "Sid": "", - "Effect": "Allow", - "Action": "s3:ListBucket", - "Resource": "arn:aws:s3:::foo", - "NotPrincipal": { - "AWS": "arn:blahblah:example" - }, - "Condition": { - "StringLike": { - "s3:prefix": [ - "home/${aws:username}/", - "home/" - ] - } - } - }, - { - "Sid": "", - "Effect": "Allow", - "Action": "s3:*", - "Resource": [ - "arn:aws:s3:::foo/home/${aws:username}/*", - "arn:aws:s3:::foo/home/${aws:username}" - ], - "Principal": { - "AWS": "arn:blahblah:example" - } - }, - { - "Sid": "", - "Effect": "Deny", - "NotAction": "s3:*", - "NotResource": "arn:aws:s3:::*" - }, - { - "Sid": "", - "Effect": "Allow", - "Action": "kinesis:*", - "Principal": "*" - }, - { - "Sid": "", - "Effect": "Allow", - "Action": "firehose:*", - "Principal": "*" - } - ] -}` diff --git a/builtin/providers/aws/data_source_aws_iam_role.go b/builtin/providers/aws/data_source_aws_iam_role.go deleted file mode 100644 index f681268b9..000000000 --- a/builtin/providers/aws/data_source_aws_iam_role.go +++ /dev/null @@ -1,67 +0,0 @@ -package aws - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsIAMRole() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsIAMRoleRead, - - Schema: map[string]*schema.Schema{ - "arn": { - Type: schema.TypeString, - Computed: true, - }, - "assume_role_policy_document": { - Type: schema.TypeString, - Computed: true, - }, - "path": { - Type: schema.TypeString, - Computed: true, - }, - "role_id": { - Type: schema.TypeString, - Computed: true, - }, - "role_name": { - Type: schema.TypeString, - Required: true, - }, - }, - } -} - -func dataSourceAwsIAMRoleRead(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - roleName := d.Get("role_name").(string) - - req := &iam.GetRoleInput{ - RoleName: aws.String(roleName), - } - - resp, err := iamconn.GetRole(req) - if err != nil { - return errwrap.Wrapf("Error getting roles: {{err}}", err) - } - if resp == nil { - return fmt.Errorf("no IAM role found") - } - - role := resp.Role - - d.SetId(*role.RoleId) - d.Set("arn", role.Arn) - d.Set("assume_role_policy_document", role.AssumeRolePolicyDocument) - d.Set("path", role.Path) - d.Set("role_id", role.RoleId) - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_iam_role_test.go b/builtin/providers/aws/data_source_aws_iam_role_test.go deleted file mode 100644 index 160e5d49b..000000000 --- a/builtin/providers/aws/data_source_aws_iam_role_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package aws - -import ( - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSDataSourceIAMRole_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccAwsIAMRoleConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.aws_iam_role.test", "role_id"), - resource.TestCheckResourceAttr("data.aws_iam_role.test", "assume_role_policy_document", "%7B%22Version%22%3A%222012-10-17%22%2C%22Statement%22%3A%5B%7B%22Sid%22%3A%22%22%2C%22Effect%22%3A%22Allow%22%2C%22Principal%22%3A%7B%22Service%22%3A%22ec2.amazonaws.com%22%7D%2C%22Action%22%3A%22sts%3AAssumeRole%22%7D%5D%7D"), - resource.TestCheckResourceAttr("data.aws_iam_role.test", "path", "/testpath/"), - resource.TestCheckResourceAttr("data.aws_iam_role.test", "role_name", "TestRole"), - resource.TestMatchResourceAttr("data.aws_iam_role.test", "arn", regexp.MustCompile("^arn:aws:iam::[0-9]{12}:role/testpath/TestRole$")), - ), - }, - }, - }) -} - -const testAccAwsIAMRoleConfig = ` -provider "aws" { - region = "us-east-1" -} - -resource "aws_iam_role" "test_role" { - name = "TestRole" - - assume_role_policy = < 128 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 128 characters", k)) - } - return - }, - }, - - "name_prefix": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 30 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 30 characters, name is limited to 128", k)) - } - return - }, - }, - - "latest": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Default: false, - }, - - "arn": { - Type: schema.TypeString, - Computed: true, - }, - - "id": { - Type: schema.TypeString, - Computed: true, - }, - - "path": { - Type: schema.TypeString, - Computed: true, - }, - - "expiration_date": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -type certificateByExpiration []*iam.ServerCertificateMetadata - -func (m certificateByExpiration) Len() int { - return len(m) -} - -func (m certificateByExpiration) Swap(i, j int) { - m[i], m[j] = m[j], m[i] -} - -func (m certificateByExpiration) Less(i, j int) bool { - return m[i].Expiration.After(*m[j].Expiration) -} - -func dataSourceAwsIAMServerCertificateRead(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - var matcher = func(cert *iam.ServerCertificateMetadata) bool { - return strings.HasPrefix(aws.StringValue(cert.ServerCertificateName), d.Get("name_prefix").(string)) - } - if v, ok := d.GetOk("name"); ok { - matcher = func(cert *iam.ServerCertificateMetadata) bool { - return aws.StringValue(cert.ServerCertificateName) == v.(string) - } - } - - var metadatas = []*iam.ServerCertificateMetadata{} - err := iamconn.ListServerCertificatesPages(&iam.ListServerCertificatesInput{}, func(p *iam.ListServerCertificatesOutput, lastPage bool) bool { - for _, cert := range p.ServerCertificateMetadataList { - if matcher(cert) { - metadatas = append(metadatas, cert) - } - } - return true - }) - if err != nil { - return errwrap.Wrapf("Error describing certificates: {{err}}", err) - } - - if len(metadatas) == 0 { - return fmt.Errorf("Search for AWS IAM server certificate returned no results") - } - if len(metadatas) > 1 { - if !d.Get("latest").(bool) { - return fmt.Errorf("Search for AWS IAM server certificate returned too many results") - } - - sort.Sort(certificateByExpiration(metadatas)) - } - - metadata := metadatas[0] - d.SetId(*metadata.ServerCertificateId) - d.Set("arn", *metadata.Arn) - d.Set("path", *metadata.Path) - d.Set("id", *metadata.ServerCertificateId) - d.Set("name", *metadata.ServerCertificateName) - if metadata.Expiration != nil { - d.Set("expiration_date", metadata.Expiration.Format("2006-01-02T15:04:05")) - } - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_iam_server_certificate_test.go b/builtin/providers/aws/data_source_aws_iam_server_certificate_test.go deleted file mode 100644 index b840ac115..000000000 --- a/builtin/providers/aws/data_source_aws_iam_server_certificate_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package aws - -import ( - "fmt" - "regexp" - "sort" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func timePtr(t time.Time) *time.Time { - return &t -} - -func TestResourceSortByExpirationDate(t *testing.T) { - certs := []*iam.ServerCertificateMetadata{ - { - ServerCertificateName: aws.String("oldest"), - Expiration: timePtr(time.Now()), - }, - { - ServerCertificateName: aws.String("latest"), - Expiration: timePtr(time.Now().Add(3 * time.Hour)), - }, - { - ServerCertificateName: aws.String("in between"), - Expiration: timePtr(time.Now().Add(2 * time.Hour)), - }, - } - sort.Sort(certificateByExpiration(certs)) - if *certs[0].ServerCertificateName != "latest" { - t.Fatalf("Expected first item to be %q, but was %q", "latest", *certs[0].ServerCertificateName) - } -} - -func TestAccAWSDataSourceIAMServerCertificate_basic(t *testing.T) { - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckIAMServerCertificateDestroy, - Steps: []resource.TestStep{ - { - Config: testAccIAMServerCertConfig(rInt), - }, - { - Config: testAccAwsDataIAMServerCertConfig(rInt), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("aws_iam_server_certificate.test_cert", "arn"), - resource.TestCheckResourceAttrSet("data.aws_iam_server_certificate.test", "arn"), - resource.TestCheckResourceAttrSet("data.aws_iam_server_certificate.test", "id"), - resource.TestCheckResourceAttrSet("data.aws_iam_server_certificate.test", "name"), - resource.TestCheckResourceAttrSet("data.aws_iam_server_certificate.test", "path"), - ), - }, - }, - }) -} - -func TestAccAWSDataSourceIAMServerCertificate_matchNamePrefix(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckIAMServerCertificateDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsDataIAMServerCertConfigMatchNamePrefix, - ExpectError: regexp.MustCompile(`Search for AWS IAM server certificate returned no results`), - }, - }, - }) -} - -func testAccAwsDataIAMServerCertConfig(rInt int) string { - return fmt.Sprintf(` -%s - -data "aws_iam_server_certificate" "test" { - name = "${aws_iam_server_certificate.test_cert.name}" - latest = true -} -`, testAccIAMServerCertConfig(rInt)) -} - -var testAccAwsDataIAMServerCertConfigMatchNamePrefix = ` -data "aws_iam_server_certificate" "test" { - name_prefix = "MyCert" - latest = true -} -` diff --git a/builtin/providers/aws/data_source_aws_instance.go b/builtin/providers/aws/data_source_aws_instance.go deleted file mode 100644 index 617a5c257..000000000 --- a/builtin/providers/aws/data_source_aws_instance.go +++ /dev/null @@ -1,356 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsInstance() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsInstanceRead, - - Schema: map[string]*schema.Schema{ - "filter": dataSourceFiltersSchema(), - "tags": dataSourceTagsSchema(), - "instance_tags": tagsSchemaComputed(), - "instance_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "ami": { - Type: schema.TypeString, - Computed: true, - }, - "instance_type": { - Type: schema.TypeString, - Computed: true, - }, - "instance_state": { - Type: schema.TypeString, - Computed: true, - }, - "availability_zone": { - Type: schema.TypeString, - Computed: true, - }, - "tenancy": { - Type: schema.TypeString, - Computed: true, - }, - "key_name": { - Type: schema.TypeString, - Computed: true, - }, - "public_dns": { - Type: schema.TypeString, - Computed: true, - }, - "public_ip": { - Type: schema.TypeString, - Computed: true, - }, - "private_dns": { - Type: schema.TypeString, - Computed: true, - }, - "private_ip": { - Type: schema.TypeString, - Computed: true, - }, - "iam_instance_profile": { - Type: schema.TypeString, - Computed: true, - }, - "subnet_id": { - Type: schema.TypeString, - Computed: true, - }, - "network_interface_id": { - Type: schema.TypeString, - Computed: true, - }, - "associate_public_ip_address": { - Type: schema.TypeBool, - Computed: true, - }, - "ebs_optimized": { - Type: schema.TypeBool, - Computed: true, - }, - "source_dest_check": { - Type: schema.TypeBool, - Computed: true, - }, - "monitoring": { - Type: schema.TypeBool, - Computed: true, - }, - "user_data": { - Type: schema.TypeString, - Computed: true, - }, - "security_groups": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "vpc_security_group_ids": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "ephemeral_block_device": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "device_name": { - Type: schema.TypeString, - Required: true, - }, - - "virtual_name": { - Type: schema.TypeString, - Optional: true, - }, - - "no_device": { - Type: schema.TypeBool, - Optional: true, - }, - }, - }, - }, - "ebs_block_device": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "delete_on_termination": { - Type: schema.TypeBool, - Computed: true, - }, - - "device_name": { - Type: schema.TypeString, - Computed: true, - }, - - "encrypted": { - Type: schema.TypeBool, - Computed: true, - }, - - "iops": { - Type: schema.TypeInt, - Computed: true, - }, - - "snapshot_id": { - Type: schema.TypeString, - Computed: true, - }, - - "volume_size": { - Type: schema.TypeInt, - Computed: true, - }, - - "volume_type": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "root_block_device": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "delete_on_termination": { - Type: schema.TypeBool, - Computed: true, - }, - - "iops": { - Type: schema.TypeInt, - Computed: true, - }, - - "volume_size": { - Type: schema.TypeInt, - Computed: true, - }, - - "volume_type": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - }, - } -} - -// dataSourceAwsInstanceRead performs the instanceID lookup -func dataSourceAwsInstanceRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - filters, filtersOk := d.GetOk("filter") - instanceID, instanceIDOk := d.GetOk("instance_id") - tags, tagsOk := d.GetOk("instance_tags") - - if filtersOk == false && instanceIDOk == false && tagsOk == false { - return fmt.Errorf("One of filters, instance_tags, or instance_id must be assigned") - } - - // Build up search parameters - params := &ec2.DescribeInstancesInput{} - if filtersOk { - params.Filters = buildAwsDataSourceFilters(filters.(*schema.Set)) - } - if instanceIDOk { - params.InstanceIds = []*string{aws.String(instanceID.(string))} - } - if tagsOk { - params.Filters = append(params.Filters, buildEC2TagFilterList( - tagsFromMap(tags.(map[string]interface{})), - )...) - } - - // Perform the lookup - resp, err := conn.DescribeInstances(params) - if err != nil { - return err - } - - // If no instances were returned, return - if len(resp.Reservations) == 0 { - return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") - } - - var filteredInstances []*ec2.Instance - - // loop through reservations, and remove terminated instances, populate instance slice - for _, res := range resp.Reservations { - for _, instance := range res.Instances { - if instance.State != nil && *instance.State.Name != "terminated" { - filteredInstances = append(filteredInstances, instance) - } - } - } - - var instance *ec2.Instance - if len(filteredInstances) < 1 { - return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") - } - - // (TODO: Support a list of instances to be returned) - // Possibly with a different data source that returns a list of individual instance data sources - if len(filteredInstances) > 1 { - return fmt.Errorf("Your query returned more than one result. Please try a more " + - "specific search criteria.") - } else { - instance = filteredInstances[0] - } - - log.Printf("[DEBUG] aws_instance - Single Instance ID found: %s", *instance.InstanceId) - return instanceDescriptionAttributes(d, instance, conn) -} - -// Populate instance attribute fields with the returned instance -func instanceDescriptionAttributes(d *schema.ResourceData, instance *ec2.Instance, conn *ec2.EC2) error { - d.SetId(*instance.InstanceId) - // Set the easy attributes - d.Set("instance_state", instance.State.Name) - if instance.Placement != nil { - d.Set("availability_zone", instance.Placement.AvailabilityZone) - } - if instance.Placement.Tenancy != nil { - d.Set("tenancy", instance.Placement.Tenancy) - } - d.Set("ami", instance.ImageId) - d.Set("instance_type", instance.InstanceType) - d.Set("key_name", instance.KeyName) - d.Set("public_dns", instance.PublicDnsName) - d.Set("public_ip", instance.PublicIpAddress) - d.Set("private_dns", instance.PrivateDnsName) - d.Set("private_ip", instance.PrivateIpAddress) - d.Set("iam_instance_profile", iamInstanceProfileArnToName(instance.IamInstanceProfile)) - - // iterate through network interfaces, and set subnet, network_interface, public_addr - if len(instance.NetworkInterfaces) > 0 { - for _, ni := range instance.NetworkInterfaces { - if *ni.Attachment.DeviceIndex == 0 { - d.Set("subnet_id", ni.SubnetId) - d.Set("network_interface_id", ni.NetworkInterfaceId) - d.Set("associate_public_ip_address", ni.Association != nil) - } - } - } else { - d.Set("subnet_id", instance.SubnetId) - d.Set("network_interface_id", "") - } - - d.Set("ebs_optimized", instance.EbsOptimized) - if instance.SubnetId != nil && *instance.SubnetId != "" { - d.Set("source_dest_check", instance.SourceDestCheck) - } - - if instance.Monitoring != nil && instance.Monitoring.State != nil { - monitoringState := *instance.Monitoring.State - d.Set("monitoring", monitoringState == "enabled" || monitoringState == "pending") - } - - d.Set("tags", dataSourceTags(instance.Tags)) - - // Security Groups - if err := readSecurityGroups(d, instance); err != nil { - return err - } - - // Block devices - if err := readBlockDevices(d, instance, conn); err != nil { - return err - } - if _, ok := d.GetOk("ephemeral_block_device"); !ok { - d.Set("ephemeral_block_device", []interface{}{}) - } - - // Lookup and Set Instance Attributes - { - attr, err := conn.DescribeInstanceAttribute(&ec2.DescribeInstanceAttributeInput{ - Attribute: aws.String("disableApiTermination"), - InstanceId: aws.String(d.Id()), - }) - if err != nil { - return err - } - d.Set("disable_api_termination", attr.DisableApiTermination.Value) - } - { - attr, err := conn.DescribeInstanceAttribute(&ec2.DescribeInstanceAttributeInput{ - Attribute: aws.String(ec2.InstanceAttributeNameUserData), - InstanceId: aws.String(d.Id()), - }) - if err != nil { - return err - } - if attr.UserData.Value != nil { - d.Set("user_data", userDataHashSum(*attr.UserData.Value)) - } - } - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_instance_test.go b/builtin/providers/aws/data_source_aws_instance_test.go deleted file mode 100644 index 04eee4bed..000000000 --- a/builtin/providers/aws/data_source_aws_instance_test.go +++ /dev/null @@ -1,554 +0,0 @@ -package aws - -import ( - "testing" - - "fmt" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSInstanceDataSource_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccInstanceDataSourceConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "data.aws_instance.web-instance", "ami", "ami-4fccb37f"), - resource.TestCheckResourceAttr( - "data.aws_instance.web-instance", "tags.#", "1"), - resource.TestCheckResourceAttr( - "data.aws_instance.web-instance", "instance_type", "m1.small"), - ), - }, - }, - }) -} - -func TestAccAWSInstanceDataSource_tags(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccInstanceDataSourceConfig_Tags(rInt), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "data.aws_instance.web-instance", "ami", "ami-4fccb37f"), - resource.TestCheckResourceAttr( - "data.aws_instance.web-instance", "tags.#", "2"), - resource.TestCheckResourceAttr( - "data.aws_instance.web-instance", "instance_type", "m1.small"), - ), - }, - }, - }) -} - -func TestAccAWSInstanceDataSource_AzUserData(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccInstanceDataSourceConfig_AzUserData, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "ami", "ami-4fccb37f"), - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "tags.#", "1"), - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "instance_type", "m1.small"), - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "availability_zone", "us-west-2a"), - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "user_data", "3dc39dda39be1205215e776bad998da361a5955d"), - ), - }, - }, - }) -} - -func TestAccAWSInstanceDataSource_gp2IopsDevice(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccInstanceDataSourceConfig_gp2IopsDevice, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "ami", "ami-55a7ea65"), - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "instance_type", "m3.medium"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.#", "1"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.0.volume_size", "11"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.0.volume_type", "gp2"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.0.iops", "100"), - ), - }, - }, - }) -} - -func TestAccAWSInstanceDataSource_blockDevices(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccInstanceDataSourceConfig_blockDevices, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "ami", "ami-55a7ea65"), - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "instance_type", "m3.medium"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.#", "1"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.0.volume_size", "11"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.0.volume_type", "gp2"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.#", "3"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ephemeral_block_device.#", "1"), - ), - }, - }, - }) -} - -func TestAccAWSInstanceDataSource_rootInstanceStore(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccInstanceDataSourceConfig_rootInstanceStore, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "ami", "ami-44c36524"), - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "instance_type", "m3.medium"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.#", "0"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_optimized", "false"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.#", "0"), - ), - }, - }, - }) -} - -func TestAccAWSInstanceDataSource_privateIP(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccInstanceDataSourceConfig_privateIP, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "ami", "ami-c5eabbf5"), - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "instance_type", "t2.micro"), - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "private_ip", "10.1.1.42"), - ), - }, - }, - }) -} - -func TestAccAWSInstanceDataSource_keyPair(t *testing.T) { - rName := fmt.Sprintf("tf-test-key-%d", acctest.RandInt()) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccInstanceDataSourceConfig_keyPair(rName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "ami", "ami-408c7f28"), - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "instance_type", "t1.micro"), - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "tags.#", "1"), - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "key_name", rName), - ), - }, - }, - }) -} - -func TestAccAWSInstanceDataSource_VPC(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccInstanceDataSourceConfig_VPC, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "ami", "ami-4fccb37f"), - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "instance_type", "m1.small"), - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "user_data", "562a3e32810edf6ff09994f050f12e799452379d"), - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "associate_public_ip_address", "true"), - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "tenancy", "dedicated"), - ), - }, - }, - }) -} - -func TestAccAWSInstanceDataSource_SecurityGroups(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccInstanceDataSourceConfig_SecurityGroups(rInt), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "ami", "ami-408c7f28"), - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "instance_type", "m1.small"), - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "vpc_security_group_ids.#", "0"), - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "security_groups.#", "1"), - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "user_data", "3dc39dda39be1205215e776bad998da361a5955d"), - ), - }, - }, - }) -} - -func TestAccAWSInstanceDataSource_VPCSecurityGroups(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccInstanceDataSourceConfig_VPCSecurityGroups, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "ami", "ami-21f78e11"), - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "instance_type", "t1.micro"), - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "security_groups.#", "0"), - resource.TestCheckResourceAttr( - "data.aws_instance.foo", "vpc_security_group_ids.#", "1"), - ), - }, - }, - }) -} - -// Lookup based on InstanceID -const testAccInstanceDataSourceConfig = ` -resource "aws_instance" "web" { - # us-west-2 - ami = "ami-4fccb37f" - instance_type = "m1.small" - tags { - Name = "HelloWorld" - } -} - -data "aws_instance" "web-instance" { - filter { - name = "instance-id" - values = ["${aws_instance.web.id}"] - } -} -` - -// Use the tags attribute to filter -func testAccInstanceDataSourceConfig_Tags(rInt int) string { - return fmt.Sprintf(` -resource "aws_instance" "web" { - # us-west-2 - ami = "ami-4fccb37f" - instance_type = "m1.small" - tags { - Name = "HelloWorld" - TestSeed = "%d" - } -} - -data "aws_instance" "web-instance" { - instance_tags { - Name = "${aws_instance.web.tags["Name"]}" - TestSeed = "%d" - } -} -`, rInt, rInt) -} - -// filter on tag, populate more attributes -const testAccInstanceDataSourceConfig_AzUserData = ` -resource "aws_instance" "foo" { - # us-west-2 - ami = "ami-4fccb37f" - availability_zone = "us-west-2a" - - instance_type = "m1.small" - user_data = "foo:-with-character's" - tags { - TFAccTest = "YesThisIsATest" - } -} - -data "aws_instance" "foo" { - instance_id = "${aws_instance.foo.id}" -} -` - -// GP2IopsDevice -const testAccInstanceDataSourceConfig_gp2IopsDevice = ` -resource "aws_instance" "foo" { - # us-west-2 - ami = "ami-55a7ea65" - instance_type = "m3.medium" - root_block_device { - volume_type = "gp2" - volume_size = 11 - } -} - -data "aws_instance" "foo" { - instance_id = "${aws_instance.foo.id}" -} -` - -// Block Device -const testAccInstanceDataSourceConfig_blockDevices = ` -resource "aws_instance" "foo" { - # us-west-2 - ami = "ami-55a7ea65" - instance_type = "m3.medium" - - root_block_device { - volume_type = "gp2" - volume_size = 11 - } - ebs_block_device { - device_name = "/dev/sdb" - volume_size = 9 - } - ebs_block_device { - device_name = "/dev/sdc" - volume_size = 10 - volume_type = "io1" - iops = 100 - } - - # Encrypted ebs block device - ebs_block_device { - device_name = "/dev/sdd" - volume_size = 12 - encrypted = true - } - - ephemeral_block_device { - device_name = "/dev/sde" - virtual_name = "ephemeral0" - } -} - -data "aws_instance" "foo" { - instance_id = "${aws_instance.foo.id}" -} -` - -const testAccInstanceDataSourceConfig_rootInstanceStore = ` -resource "aws_instance" "foo" { - ami = "ami-44c36524" - instance_type = "m3.medium" -} -data "aws_instance" "foo" { - instance_id = "${aws_instance.foo.id}" -} -` - -const testAccInstanceDataSourceConfig_privateIP = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccInstanceDataSourceConfig_privateIP" - } -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_instance" "foo" { - ami = "ami-c5eabbf5" - instance_type = "t2.micro" - subnet_id = "${aws_subnet.foo.id}" - private_ip = "10.1.1.42" -} - -data "aws_instance" "foo" { - instance_id = "${aws_instance.foo.id}" -} -` - -func testAccInstanceDataSourceConfig_keyPair(rName string) string { - return fmt.Sprintf(` -provider "aws" { - region = "us-east-1" -} - -resource "aws_key_pair" "debugging" { - key_name = "%s" - public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD3F6tyPEFEzV0LX3X8BsXdMsQz1x2cEikKDEY0aIj41qgxMCP/iteneqXSIFZBp5vizPvaoIR3Um9xK7PGoW8giupGn+EPuxIA4cDM4vzOqOkiMPhz5XK0whEjkVzTo4+S0puvDZuwIsdiW9mxhJc7tgBNL0cYlWSYVkz4G/fslNfRPW5mYAM49f4fhtxPb5ok4Q2Lg9dPKVHO/Bgeu5woMc7RY0p1ej6D4CKFE6lymSDJpW0YHX/wqE9+cfEauh7xZcG0q9t2ta6F6fmX0agvpFyZo8aFbXeUBr7osSCJNgvavWbM/06niWrOvYX2xwWdhXmXSrbX8ZbabVohBK41 phodgson@thoughtworks.com" -} - -resource "aws_instance" "foo" { - ami = "ami-408c7f28" - instance_type = "t1.micro" - key_name = "${aws_key_pair.debugging.key_name}" - tags { - Name = "testAccInstanceDataSourceConfigKeyPair_TestAMI" - } -} - -data "aws_instance" "foo" { - filter { - name = "tag:Name" - values = ["testAccInstanceDataSourceConfigKeyPair_TestAMI"] - } - filter { - name = "key-name" - values = ["${aws_instance.foo.key_name}"] - } -}`, rName) -} - -const testAccInstanceDataSourceConfig_VPC = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccInstanceDataSourceConfig_VPC" - } -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_instance" "foo" { - # us-west-2 - ami = "ami-4fccb37f" - instance_type = "m1.small" - subnet_id = "${aws_subnet.foo.id}" - associate_public_ip_address = true - tenancy = "dedicated" - # pre-encoded base64 data - user_data = "3dc39dda39be1205215e776bad998da361a5955d" -} - -data "aws_instance" "foo" { - instance_id = "${aws_instance.foo.id}" -} -` - -func testAccInstanceDataSourceConfig_SecurityGroups(rInt int) string { - return fmt.Sprintf(` -provider "aws" { - region = "us-east-1" -} - -resource "aws_security_group" "tf_test_foo" { - name = "tf_test_foo-%d" - description = "foo" - - ingress { - protocol = "icmp" - from_port = -1 - to_port = -1 - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_instance" "foo" { - ami = "ami-408c7f28" - instance_type = "m1.small" - security_groups = ["${aws_security_group.tf_test_foo.name}"] - user_data = "foo:-with-character's" -} - -data "aws_instance" "foo" { - instance_id = "${aws_instance.foo.id}" -} -`, rInt) -} - -const testAccInstanceDataSourceConfig_VPCSecurityGroups = ` -resource "aws_internet_gateway" "gw" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "tf-network-test" - } -} - -resource "aws_security_group" "tf_test_foo" { - name = "tf_test_foo" - description = "foo" - vpc_id="${aws_vpc.foo.id}" - - ingress { - protocol = "icmp" - from_port = -1 - to_port = -1 - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_instance" "foo_instance" { - ami = "ami-21f78e11" - instance_type = "t1.micro" - vpc_security_group_ids = ["${aws_security_group.tf_test_foo.id}"] - subnet_id = "${aws_subnet.foo.id}" - depends_on = ["aws_internet_gateway.gw"] -} - -data "aws_instance" "foo" { - instance_id = "${aws_instance.foo_instance.id}" -} -` diff --git a/builtin/providers/aws/data_source_aws_ip_ranges.go b/builtin/providers/aws/data_source_aws_ip_ranges.go deleted file mode 100644 index 32e9d8988..000000000 --- a/builtin/providers/aws/data_source_aws_ip_ranges.go +++ /dev/null @@ -1,151 +0,0 @@ -package aws - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "log" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/terraform/helper/schema" -) - -type dataSourceAwsIPRangesResult struct { - CreateDate string - Prefixes []dataSourceAwsIPRangesPrefix - SyncToken string -} - -type dataSourceAwsIPRangesPrefix struct { - IpPrefix string `json:"ip_prefix"` - Region string - Service string -} - -func dataSourceAwsIPRanges() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsIPRangesRead, - - Schema: map[string]*schema.Schema{ - "cidr_blocks": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "create_date": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "regions": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - }, - "services": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "sync_token": &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - }, - }, - } -} - -func dataSourceAwsIPRangesRead(d *schema.ResourceData, meta interface{}) error { - - conn := cleanhttp.DefaultClient() - - log.Printf("[DEBUG] Reading IP ranges") - - res, err := conn.Get("https://ip-ranges.amazonaws.com/ip-ranges.json") - - if err != nil { - return fmt.Errorf("Error listing IP ranges: %s", err) - } - - defer res.Body.Close() - - data, err := ioutil.ReadAll(res.Body) - - if err != nil { - return fmt.Errorf("Error reading response body: %s", err) - } - - result := new(dataSourceAwsIPRangesResult) - - if err := json.Unmarshal(data, result); err != nil { - return fmt.Errorf("Error parsing result: %s", err) - } - - if err := d.Set("create_date", result.CreateDate); err != nil { - return fmt.Errorf("Error setting create date: %s", err) - } - - syncToken, err := strconv.Atoi(result.SyncToken) - - if err != nil { - return fmt.Errorf("Error while converting sync token: %s", err) - } - - d.SetId(result.SyncToken) - - if err := d.Set("sync_token", syncToken); err != nil { - return fmt.Errorf("Error setting sync token: %s", err) - } - - get := func(key string) *schema.Set { - - set := d.Get(key).(*schema.Set) - - for _, e := range set.List() { - - s := e.(string) - - set.Remove(s) - set.Add(strings.ToLower(s)) - - } - - return set - - } - - var ( - regions = get("regions") - services = get("services") - noRegionFilter = regions.Len() == 0 - prefixes []string - ) - - for _, e := range result.Prefixes { - - var ( - matchRegion = noRegionFilter || regions.Contains(strings.ToLower(e.Region)) - matchService = services.Contains(strings.ToLower(e.Service)) - ) - - if matchRegion && matchService { - prefixes = append(prefixes, e.IpPrefix) - } - - } - - if len(prefixes) == 0 { - return fmt.Errorf(" No IP ranges result from filters") - } - - sort.Strings(prefixes) - - if err := d.Set("cidr_blocks", prefixes); err != nil { - return fmt.Errorf("Error setting ip ranges: %s", err) - } - - return nil - -} diff --git a/builtin/providers/aws/data_source_aws_ip_ranges_test.go b/builtin/providers/aws/data_source_aws_ip_ranges_test.go deleted file mode 100644 index 5e8f4b13d..000000000 --- a/builtin/providers/aws/data_source_aws_ip_ranges_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package aws - -import ( - "fmt" - "net" - "regexp" - "sort" - "strconv" - "testing" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSIPRanges(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSIPRangesConfig, - Check: resource.ComposeTestCheckFunc( - testAccAWSIPRanges("data.aws_ip_ranges.some"), - ), - }, - }, - }) -} - -func testAccAWSIPRanges(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - r := s.RootModule().Resources[n] - a := r.Primary.Attributes - - var ( - cidrBlockSize int - createDate time.Time - err error - syncToken int - ) - - if cidrBlockSize, err = strconv.Atoi(a["cidr_blocks.#"]); err != nil { - return err - } - - if cidrBlockSize < 10 { - return fmt.Errorf("cidr_blocks for eu-west-1 seem suspiciously low: %d", cidrBlockSize) - } - - if createDate, err = time.Parse("2006-01-02-15-04-05", a["create_date"]); err != nil { - return err - } - - if syncToken, err = strconv.Atoi(a["sync_token"]); err != nil { - return err - } - - if syncToken != int(createDate.Unix()) { - return fmt.Errorf("sync_token %d does not match create_date %s", syncToken, createDate) - } - - var cidrBlocks sort.StringSlice = make([]string, cidrBlockSize) - - for i := range make([]string, cidrBlockSize) { - - block := a[fmt.Sprintf("cidr_blocks.%d", i)] - - if _, _, err := net.ParseCIDR(block); err != nil { - return fmt.Errorf("malformed CIDR block %s: %s", block, err) - } - - cidrBlocks[i] = block - - } - - if !sort.IsSorted(cidrBlocks) { - return fmt.Errorf("unexpected order of cidr_blocks: %s", cidrBlocks) - } - - var ( - regionMember = regexp.MustCompile(`regions\.\d+`) - regions, services int - serviceMember = regexp.MustCompile(`services\.\d+`) - ) - - for k, v := range a { - - if regionMember.MatchString(k) { - - if !(v == "eu-west-1" || v == "EU-central-1") { - return fmt.Errorf("unexpected region %s", v) - } - - regions = regions + 1 - - } - - if serviceMember.MatchString(k) { - - if v != "EC2" { - return fmt.Errorf("unexpected service %s", v) - } - - services = services + 1 - } - - } - - if regions != 2 { - return fmt.Errorf("unexpected number of regions: %d", regions) - } - - if services != 1 { - return fmt.Errorf("unexpected number of services: %d", services) - } - - return nil - } -} - -const testAccAWSIPRangesConfig = ` -data "aws_ip_ranges" "some" { - regions = [ "eu-west-1", "EU-central-1" ] - services = [ "EC2" ] -} -` diff --git a/builtin/providers/aws/data_source_aws_kinesis_stream.go b/builtin/providers/aws/data_source_aws_kinesis_stream.go deleted file mode 100644 index ebc843d11..000000000 --- a/builtin/providers/aws/data_source_aws_kinesis_stream.go +++ /dev/null @@ -1,95 +0,0 @@ -package aws - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsKinesisStream() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsKinesisStreamRead, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "arn": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "creation_timestamp": &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - }, - - "status": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "retention_period": &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - }, - - "open_shards": &schema.Schema{ - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "closed_shards": &schema.Schema{ - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "shard_level_metrics": &schema.Schema{ - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "tags": &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - }, - }, - } -} - -func dataSourceAwsKinesisStreamRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).kinesisconn - sn := d.Get("name").(string) - - state, err := readKinesisStreamState(conn, sn) - if err != nil { - return err - } - d.SetId(state.arn) - d.Set("arn", state.arn) - d.Set("name", sn) - d.Set("open_shards", state.openShards) - d.Set("closed_shards", state.closedShards) - d.Set("status", state.status) - d.Set("creation_timestamp", state.creationTimestamp) - d.Set("retention_period", state.retentionPeriod) - d.Set("shard_level_metrics", state.shardLevelMetrics) - - tags, err := conn.ListTagsForStream(&kinesis.ListTagsForStreamInput{ - StreamName: aws.String(sn), - }) - if err != nil { - return err - } - d.Set("tags", tagsToMapKinesis(tags.Tags)) - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_kinesis_stream_test.go b/builtin/providers/aws/data_source_aws_kinesis_stream_test.go deleted file mode 100644 index 815724ae8..000000000 --- a/builtin/providers/aws/data_source_aws_kinesis_stream_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSKinesisStreamDataSource(t *testing.T) { - var stream kinesis.StreamDescription - - sn := fmt.Sprintf("terraform-kinesis-test-%d", acctest.RandInt()) - config := fmt.Sprintf(testAccCheckAwsKinesisStreamDataSourceConfig, sn) - - updateShardCount := func() { - conn := testAccProvider.Meta().(*AWSClient).kinesisconn - _, err := conn.UpdateShardCount(&kinesis.UpdateShardCountInput{ - ScalingType: aws.String(kinesis.ScalingTypeUniformScaling), - StreamName: aws.String(sn), - TargetShardCount: aws.Int64(3), - }) - if err != nil { - t.Fatalf("Error calling UpdateShardCount: %s", err) - } - if err := waitForKinesisToBeActive(conn, sn); err != nil { - t.Fatal(err) - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKinesisStreamDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisStreamExists("aws_kinesis_stream.test_stream", &stream), - resource.TestCheckResourceAttrSet("data.aws_kinesis_stream.test_stream", "arn"), - resource.TestCheckResourceAttr("data.aws_kinesis_stream.test_stream", "name", sn), - resource.TestCheckResourceAttr("data.aws_kinesis_stream.test_stream", "status", "ACTIVE"), - resource.TestCheckResourceAttr("data.aws_kinesis_stream.test_stream", "open_shards.#", "2"), - resource.TestCheckResourceAttr("data.aws_kinesis_stream.test_stream", "closed_shards.#", "0"), - resource.TestCheckResourceAttr("data.aws_kinesis_stream.test_stream", "shard_level_metrics.#", "2"), - resource.TestCheckResourceAttr("data.aws_kinesis_stream.test_stream", "retention_period", "72"), - resource.TestCheckResourceAttrSet("data.aws_kinesis_stream.test_stream", "creation_timestamp"), - resource.TestCheckResourceAttr("data.aws_kinesis_stream.test_stream", "tags.Name", "tf-test"), - ), - }, - { - Config: config, - PreConfig: updateShardCount, - Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisStreamExists("aws_kinesis_stream.test_stream", &stream), - resource.TestCheckResourceAttrSet("data.aws_kinesis_stream.test_stream", "arn"), - resource.TestCheckResourceAttr("data.aws_kinesis_stream.test_stream", "name", sn), - resource.TestCheckResourceAttr("data.aws_kinesis_stream.test_stream", "status", "ACTIVE"), - resource.TestCheckResourceAttr("data.aws_kinesis_stream.test_stream", "open_shards.#", "3"), - resource.TestCheckResourceAttr("data.aws_kinesis_stream.test_stream", "closed_shards.#", "4"), - resource.TestCheckResourceAttr("data.aws_kinesis_stream.test_stream", "shard_level_metrics.#", "2"), - resource.TestCheckResourceAttr("data.aws_kinesis_stream.test_stream", "retention_period", "72"), - resource.TestCheckResourceAttrSet("data.aws_kinesis_stream.test_stream", "creation_timestamp"), - resource.TestCheckResourceAttr("data.aws_kinesis_stream.test_stream", "tags.Name", "tf-test"), - ), - }, - }, - }) -} - -var testAccCheckAwsKinesisStreamDataSourceConfig = ` -resource "aws_kinesis_stream" "test_stream" { - name = "%s" - shard_count = 2 - retention_period = 72 - tags { - Name = "tf-test" - } - shard_level_metrics = [ - "IncomingBytes", - "OutgoingBytes" - ] - lifecycle { - ignore_changes = ["shard_count"] - } -} - -data "aws_kinesis_stream" "test_stream" { - name = "${aws_kinesis_stream.test_stream.name}" -} -` diff --git a/builtin/providers/aws/data_source_aws_kms_alias.go b/builtin/providers/aws/data_source_aws_kms_alias.go deleted file mode 100644 index 41c33b680..000000000 --- a/builtin/providers/aws/data_source_aws_kms_alias.go +++ /dev/null @@ -1,62 +0,0 @@ -package aws - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/service/kms" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsKmsAlias() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsKmsAliasRead, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateAwsKmsName, - }, - "arn": { - Type: schema.TypeString, - Computed: true, - }, - "target_key_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceAwsKmsAliasRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).kmsconn - params := &kms.ListAliasesInput{} - - target := d.Get("name") - var alias *kms.AliasListEntry - err := conn.ListAliasesPages(params, func(page *kms.ListAliasesOutput, lastPage bool) bool { - for _, entity := range page.Aliases { - if *entity.AliasName == target { - alias = entity - return false - } - } - - return true - }) - if err != nil { - return errwrap.Wrapf("Error fetch KMS alias list: {{err}}", err) - } - - if alias == nil { - return fmt.Errorf("No alias with name %q found in this region.", target) - } - - d.SetId(time.Now().UTC().String()) - d.Set("arn", alias.AliasArn) - d.Set("target_key_id", alias.TargetKeyId) - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_kms_alias_test.go b/builtin/providers/aws/data_source_aws_kms_alias_test.go deleted file mode 100644 index c498d5168..000000000 --- a/builtin/providers/aws/data_source_aws_kms_alias_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourceAwsKmsAlias(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataSourceAwsKmsAlias(rInt), - Check: resource.ComposeTestCheckFunc( - testAccDataSourceAwsKmsAliasCheck("data.aws_kms_alias.by_name"), - ), - }, - }, - }) -} - -func testAccDataSourceAwsKmsAliasCheck(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("root module has no resource called %s", name) - } - - kmsKeyRs, ok := s.RootModule().Resources["aws_kms_alias.single"] - if !ok { - return fmt.Errorf("can't find aws_kms_alias.single in state") - } - - attr := rs.Primary.Attributes - - if attr["arn"] != kmsKeyRs.Primary.Attributes["arn"] { - return fmt.Errorf( - "arn is %s; want %s", - attr["arn"], - kmsKeyRs.Primary.Attributes["arn"], - ) - } - - if attr["target_key_id"] != kmsKeyRs.Primary.Attributes["target_key_id"] { - return fmt.Errorf( - "target_key_id is %s; want %s", - attr["target_key_id"], - kmsKeyRs.Primary.Attributes["target_key_id"], - ) - } - - return nil - } -} - -func testAccDataSourceAwsKmsAlias(rInt int) string { - return fmt.Sprintf(` -resource "aws_kms_key" "one" { - description = "Terraform acc test" - deletion_window_in_days = 7 -} - -resource "aws_kms_alias" "single" { - name = "alias/tf-acc-key-alias-%d" - target_key_id = "${aws_kms_key.one.key_id}" -} - -data "aws_kms_alias" "by_name" { - name = "${aws_kms_alias.single.name}" -}`, rInt) -} diff --git a/builtin/providers/aws/data_source_aws_kms_ciphertext.go b/builtin/providers/aws/data_source_aws_kms_ciphertext.go deleted file mode 100644 index 3f15965ca..000000000 --- a/builtin/providers/aws/data_source_aws_kms_ciphertext.go +++ /dev/null @@ -1,66 +0,0 @@ -package aws - -import ( - "encoding/base64" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kms" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsKmsCiphetext() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsKmsCiphetextRead, - - Schema: map[string]*schema.Schema{ - "plaintext": { - Type: schema.TypeString, - Required: true, - }, - - "key_id": { - Type: schema.TypeString, - Required: true, - }, - - "context": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "ciphertext_blob": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceAwsKmsCiphetextRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).kmsconn - - d.SetId(time.Now().UTC().String()) - - req := &kms.EncryptInput{ - KeyId: aws.String(d.Get("key_id").(string)), - Plaintext: []byte(d.Get("plaintext").(string)), - } - - if ec := d.Get("context"); ec != nil { - req.EncryptionContext = stringMapToPointers(ec.(map[string]interface{})) - } - - log.Printf("[DEBUG] KMS encrypt for key: %s", d.Get("key_id").(string)) - - resp, err := conn.Encrypt(req) - if err != nil { - return err - } - - d.Set("ciphertext_blob", base64.StdEncoding.EncodeToString(resp.CiphertextBlob)) - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_kms_ciphertext_test.go b/builtin/providers/aws/data_source_aws_kms_ciphertext_test.go deleted file mode 100644 index f871acc03..000000000 --- a/builtin/providers/aws/data_source_aws_kms_ciphertext_test.go +++ /dev/null @@ -1,136 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccDataSourceAwsKmsCiphertext_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsKmsCiphertextConfig_basic, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet( - "data.aws_kms_ciphertext.foo", "ciphertext_blob"), - ), - }, - }, - }) -} - -func TestAccDataSourceAwsKmsCiphertext_validate(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsKmsCiphertextConfig_validate, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet( - "data.aws_kms_ciphertext.foo", "ciphertext_blob"), - resource.TestCheckResourceAttrSet( - "data.aws_kms_secret.foo", "plaintext"), - resource.TestCheckResourceAttr( - "data.aws_kms_secret.foo", "plaintext", "Super secret data"), - ), - }, - }, - }) -} - -func TestAccDataSourceAwsKmsCiphertext_validate_withContext(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsKmsCiphertextConfig_validate_withContext, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet( - "data.aws_kms_ciphertext.foo", "ciphertext_blob"), - resource.TestCheckResourceAttrSet( - "data.aws_kms_secret.foo", "plaintext"), - resource.TestCheckResourceAttr( - "data.aws_kms_secret.foo", "plaintext", "Super secret data"), - ), - }, - }, - }) -} - -const testAccDataSourceAwsKmsCiphertextConfig_basic = ` -provider "aws" { - region = "us-west-2" -} - -resource "aws_kms_key" "foo" { - description = "tf-test-acc-data-source-aws-kms-ciphertext-basic" - is_enabled = true -} - -data "aws_kms_ciphertext" "foo" { - key_id = "${aws_kms_key.foo.key_id}" - - plaintext = "Super secret data" -} -` - -const testAccDataSourceAwsKmsCiphertextConfig_validate = ` -provider "aws" { - region = "us-west-2" -} - -resource "aws_kms_key" "foo" { - description = "tf-test-acc-data-source-aws-kms-ciphertext-validate" - is_enabled = true -} - -data "aws_kms_ciphertext" "foo" { - key_id = "${aws_kms_key.foo.key_id}" - - plaintext = "Super secret data" -} - -data "aws_kms_secret" "foo" { - secret { - name = "plaintext" - payload = "${data.aws_kms_ciphertext.foo.ciphertext_blob}" - } -} -` - -const testAccDataSourceAwsKmsCiphertextConfig_validate_withContext = ` -provider "aws" { - region = "us-west-2" -} - -resource "aws_kms_key" "foo" { - description = "tf-test-acc-data-source-aws-kms-ciphertext-validate-with-context" - is_enabled = true -} - -data "aws_kms_ciphertext" "foo" { - key_id = "${aws_kms_key.foo.key_id}" - - plaintext = "Super secret data" - - context { - name = "value" - } -} - -data "aws_kms_secret" "foo" { - secret { - name = "plaintext" - payload = "${data.aws_kms_ciphertext.foo.ciphertext_blob}" - - context { - name = "value" - } - } -} -` diff --git a/builtin/providers/aws/data_source_aws_kms_secret.go b/builtin/providers/aws/data_source_aws_kms_secret.go deleted file mode 100644 index 92d5134fd..000000000 --- a/builtin/providers/aws/data_source_aws_kms_secret.go +++ /dev/null @@ -1,99 +0,0 @@ -package aws - -import ( - "encoding/base64" - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kms" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsKmsSecret() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsKmsSecretRead, - - Schema: map[string]*schema.Schema{ - "secret": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "payload": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "context": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "grant_tokens": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - "__has_dynamic_attributes": { - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -// dataSourceAwsKmsSecretRead decrypts the specified secrets -func dataSourceAwsKmsSecretRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).kmsconn - secrets := d.Get("secret").(*schema.Set) - - d.SetId(time.Now().UTC().String()) - - for _, v := range secrets.List() { - secret := v.(map[string]interface{}) - - // base64 decode the payload - payload, err := base64.StdEncoding.DecodeString(secret["payload"].(string)) - if err != nil { - return fmt.Errorf("Invalid base64 value for secret '%s': %v", secret["name"].(string), err) - } - - // build the kms decrypt params - params := &kms.DecryptInput{ - CiphertextBlob: []byte(payload), - } - if context, exists := secret["context"]; exists { - params.EncryptionContext = make(map[string]*string) - for k, v := range context.(map[string]interface{}) { - params.EncryptionContext[k] = aws.String(v.(string)) - } - } - if grant_tokens, exists := secret["grant_tokens"]; exists { - params.GrantTokens = make([]*string, 0) - for _, v := range grant_tokens.([]interface{}) { - params.GrantTokens = append(params.GrantTokens, aws.String(v.(string))) - } - } - - // decrypt - resp, err := conn.Decrypt(params) - if err != nil { - return fmt.Errorf("Failed to decrypt '%s': %s", secret["name"].(string), err) - } - - // Set the secret via the name - log.Printf("[DEBUG] aws_kms_secret - successfully decrypted secret: %s", secret["name"].(string)) - d.UnsafeSetFieldRaw(secret["name"].(string), string(resp.Plaintext)) - } - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_kms_secret_test.go b/builtin/providers/aws/data_source_aws_kms_secret_test.go deleted file mode 100644 index 4d0bf139e..000000000 --- a/builtin/providers/aws/data_source_aws_kms_secret_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package aws - -import ( - "encoding/base64" - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kms" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSKmsSecretDataSource_basic(t *testing.T) { - // Run a resource test to setup our KMS key - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAwsKmsSecretDataSourceKey, - Check: func(s *terraform.State) error { - encryptedPayload, err := testAccCheckAwsKmsSecretDataSourceCheckKeySetup(s) - if err != nil { - return err - } - - // We run the actual test on our data source nested in the - // Check function of the KMS key so we can access the - // encrypted output, above, and so that the key will be - // deleted at the end of the test - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testAccCheckAwsKmsSecretDataSourceSecret, encryptedPayload), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.aws_kms_secret.testing", "secret_name", "PAYLOAD"), - ), - }, - }, - }) - - return nil - }, - }, - }, - }) - -} - -func testAccCheckAwsKmsSecretDataSourceCheckKeySetup(s *terraform.State) (string, error) { - rs, ok := s.RootModule().Resources["aws_kms_key.terraform_data_source_testing"] - if !ok { - return "", fmt.Errorf("Failed to setup a KMS key for data source testing!") - } - - // Now that the key is setup encrypt a string using it - // XXX TODO: Set up and test with grants - params := &kms.EncryptInput{ - KeyId: aws.String(rs.Primary.Attributes["arn"]), - Plaintext: []byte("PAYLOAD"), - EncryptionContext: map[string]*string{ - "name": aws.String("value"), - }, - } - - kmsconn := testAccProvider.Meta().(*AWSClient).kmsconn - resp, err := kmsconn.Encrypt(params) - if err != nil { - return "", fmt.Errorf("Failed encrypting string with KMS for data source testing: %s", err) - } - - return base64.StdEncoding.EncodeToString(resp.CiphertextBlob), nil -} - -const testAccCheckAwsKmsSecretDataSourceKey = ` -resource "aws_kms_key" "terraform_data_source_testing" { - description = "Testing the Terraform AWS KMS Secret data_source" -} -` - -const testAccCheckAwsKmsSecretDataSourceSecret = ` -data "aws_kms_secret" "testing" { - secret { - name = "secret_name" - payload = "%s" - - context { - name = "value" - } - } -} -` diff --git a/builtin/providers/aws/data_source_aws_partition.go b/builtin/providers/aws/data_source_aws_partition.go deleted file mode 100644 index d52f7ee47..000000000 --- a/builtin/providers/aws/data_source_aws_partition.go +++ /dev/null @@ -1,33 +0,0 @@ -package aws - -import ( - "log" - "time" - - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsPartition() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsPartitionRead, - - Schema: map[string]*schema.Schema{ - "partition": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceAwsPartitionRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient) - - log.Printf("[DEBUG] Reading Partition.") - d.SetId(time.Now().UTC().String()) - - log.Printf("[DEBUG] Setting AWS Partition to %s.", client.partition) - d.Set("partition", meta.(*AWSClient).partition) - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_partition_test.go b/builtin/providers/aws/data_source_aws_partition_test.go deleted file mode 100644 index 5610a0b96..000000000 --- a/builtin/providers/aws/data_source_aws_partition_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSPartition_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAwsPartitionConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsPartition("data.aws_partition.current"), - ), - }, - }, - }) -} - -func testAccCheckAwsPartition(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Can't find resource: %s", n) - } - - expected := testAccProvider.Meta().(*AWSClient).partition - if rs.Primary.Attributes["partition"] != expected { - return fmt.Errorf("Incorrect Partition: expected %q, got %q", expected, rs.Primary.Attributes["partition"]) - } - - return nil - } -} - -const testAccCheckAwsPartitionConfig_basic = ` -data "aws_partition" "current" { } -` diff --git a/builtin/providers/aws/data_source_aws_prefix_list.go b/builtin/providers/aws/data_source_aws_prefix_list.go deleted file mode 100644 index 8bed85506..000000000 --- a/builtin/providers/aws/data_source_aws_prefix_list.go +++ /dev/null @@ -1,76 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsPrefixList() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsPrefixListRead, - - Schema: map[string]*schema.Schema{ - "prefix_list_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - // Computed values. - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "cidr_blocks": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func dataSourceAwsPrefixListRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - req := &ec2.DescribePrefixListsInput{} - - if prefixListID := d.Get("prefix_list_id"); prefixListID != "" { - req.PrefixListIds = aws.StringSlice([]string{prefixListID.(string)}) - } - req.Filters = buildEC2AttributeFilterList( - map[string]string{ - "prefix-list-name": d.Get("name").(string), - }, - ) - - log.Printf("[DEBUG] DescribePrefixLists %s\n", req) - resp, err := conn.DescribePrefixLists(req) - if err != nil { - return err - } - if resp == nil || len(resp.PrefixLists) == 0 { - return fmt.Errorf("no matching prefix list found; the prefix list ID or name may be invalid or not exist in the current region") - } - - pl := resp.PrefixLists[0] - - d.SetId(*pl.PrefixListId) - d.Set("id", pl.PrefixListId) - d.Set("name", pl.PrefixListName) - - cidrs := make([]string, len(pl.Cidrs)) - for i, v := range pl.Cidrs { - cidrs[i] = *v - } - d.Set("cidr_blocks", cidrs) - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_prefix_list_test.go b/builtin/providers/aws/data_source_aws_prefix_list_test.go deleted file mode 100644 index c9ad308d0..000000000 --- a/builtin/providers/aws/data_source_aws_prefix_list_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package aws - -import ( - "fmt" - "strconv" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourceAwsPrefixList(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataSourceAwsPrefixListConfig, - Check: resource.ComposeTestCheckFunc( - testAccDataSourceAwsPrefixListCheck("data.aws_prefix_list.s3_by_id"), - testAccDataSourceAwsPrefixListCheck("data.aws_prefix_list.s3_by_name"), - ), - }, - }, - }) -} - -func testAccDataSourceAwsPrefixListCheck(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("root module has no resource called %s", name) - } - - attr := rs.Primary.Attributes - - if attr["name"] != "com.amazonaws.us-west-2.s3" { - return fmt.Errorf("bad name %s", attr["name"]) - } - if attr["id"] != "pl-68a54001" { - return fmt.Errorf("bad id %s", attr["id"]) - } - - var ( - cidrBlockSize int - err error - ) - - if cidrBlockSize, err = strconv.Atoi(attr["cidr_blocks.#"]); err != nil { - return err - } - if cidrBlockSize < 1 { - return fmt.Errorf("cidr_blocks seem suspiciously low: %d", cidrBlockSize) - } - - return nil - } -} - -const testAccDataSourceAwsPrefixListConfig = ` -provider "aws" { - region = "us-west-2" -} - -data "aws_prefix_list" "s3_by_id" { - prefix_list_id = "pl-68a54001" -} - -data "aws_prefix_list" "s3_by_name" { - name = "com.amazonaws.us-west-2.s3" -} -` diff --git a/builtin/providers/aws/data_source_aws_redshift_service_account.go b/builtin/providers/aws/data_source_aws_redshift_service_account.go deleted file mode 100644 index faa210fff..000000000 --- a/builtin/providers/aws/data_source_aws_redshift_service_account.go +++ /dev/null @@ -1,50 +0,0 @@ -package aws - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" -) - -// See http://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html#db-auditing-enable-logging -var redshiftServiceAccountPerRegionMap = map[string]string{ - "us-east-1": "193672423079", - "us-east-2": "391106570357", - "us-west-1": "262260360010", - "us-west-2": "902366379725", - "ap-south-1": "865932855811", - "ap-northeast-2": "760740231472", - "ap-southeast-1": "361669875840", - "ap-southeast-2": "762762565011", - "ap-northeast-1": "404641285394", - "ca-central-1": "907379612154", - "eu-central-1": "053454850223", - "eu-west-1": "210876761215", -} - -func dataSourceAwsRedshiftServiceAccount() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsRedshiftServiceAccountRead, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -func dataSourceAwsRedshiftServiceAccountRead(d *schema.ResourceData, meta interface{}) error { - region := meta.(*AWSClient).region - if v, ok := d.GetOk("region"); ok { - region = v.(string) - } - - if accid, ok := redshiftServiceAccountPerRegionMap[region]; ok { - d.SetId(accid) - return nil - } - - return fmt.Errorf("Unknown region (%q)", region) -} diff --git a/builtin/providers/aws/data_source_aws_redshift_service_account_test.go b/builtin/providers/aws/data_source_aws_redshift_service_account_test.go deleted file mode 100644 index 347de6814..000000000 --- a/builtin/providers/aws/data_source_aws_redshift_service_account_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSRedshiftServiceAccount_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckAwsRedshiftServiceAccountConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.aws_redshift_service_account.main", "id", "902366379725"), - ), - }, - resource.TestStep{ - Config: testAccCheckAwsRedshiftServiceAccountExplicitRegionConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.aws_redshift_service_account.regional", "id", "210876761215"), - ), - }, - }, - }) -} - -const testAccCheckAwsRedshiftServiceAccountConfig = ` -data "aws_redshift_service_account" "main" { } -` - -const testAccCheckAwsRedshiftServiceAccountExplicitRegionConfig = ` -data "aws_redshift_service_account" "regional" { - region = "eu-west-1" -} -` diff --git a/builtin/providers/aws/data_source_aws_region.go b/builtin/providers/aws/data_source_aws_region.go deleted file mode 100644 index ed75f7056..000000000 --- a/builtin/providers/aws/data_source_aws_region.go +++ /dev/null @@ -1,84 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsRegion() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsRegionRead, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "current": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "endpoint": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - } -} - -func dataSourceAwsRegionRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - currentRegion := meta.(*AWSClient).region - - req := &ec2.DescribeRegionsInput{} - - req.RegionNames = make([]*string, 0, 2) - if name := d.Get("name").(string); name != "" { - req.RegionNames = append(req.RegionNames, aws.String(name)) - } - - if d.Get("current").(bool) { - req.RegionNames = append(req.RegionNames, aws.String(currentRegion)) - } - - req.Filters = buildEC2AttributeFilterList( - map[string]string{ - "endpoint": d.Get("endpoint").(string), - }, - ) - if len(req.Filters) == 0 { - // Don't send an empty filters list; the EC2 API won't accept it. - req.Filters = nil - } - - log.Printf("[DEBUG] DescribeRegions %s\n", req) - resp, err := conn.DescribeRegions(req) - if err != nil { - return err - } - if resp == nil || len(resp.Regions) == 0 { - return fmt.Errorf("no matching regions found") - } - if len(resp.Regions) > 1 { - return fmt.Errorf("multiple regions matched; use additional constraints to reduce matches to a single region") - } - - region := resp.Regions[0] - - d.SetId(*region.RegionName) - d.Set("id", region.RegionName) - d.Set("name", region.RegionName) - d.Set("endpoint", region.Endpoint) - d.Set("current", *region.RegionName == currentRegion) - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_region_test.go b/builtin/providers/aws/data_source_aws_region_test.go deleted file mode 100644 index 370c4b2b0..000000000 --- a/builtin/providers/aws/data_source_aws_region_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourceAwsRegion(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataSourceAwsRegionConfig, - Check: resource.ComposeTestCheckFunc( - testAccDataSourceAwsRegionCheck("data.aws_region.by_name_current", "us-west-2", "true"), - testAccDataSourceAwsRegionCheck("data.aws_region.by_name_other", "us-west-1", "false"), - testAccDataSourceAwsRegionCheck("data.aws_region.by_current", "us-west-2", "true"), - ), - }, - }, - }) -} - -func testAccDataSourceAwsRegionCheck(name, region, current string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("root module has no resource called %s", name) - } - - attr := rs.Primary.Attributes - - if attr["name"] != region { - return fmt.Errorf("bad name %s", attr["name"]) - } - if attr["current"] != current { - return fmt.Errorf("bad current %s; want %s", attr["current"], current) - } - - return nil - } -} - -const testAccDataSourceAwsRegionConfig = ` -provider "aws" { - region = "us-west-2" -} - -data "aws_region" "by_name_current" { - name = "us-west-2" -} - -data "aws_region" "by_name_other" { - name = "us-west-1" -} - -data "aws_region" "by_current" { - current = true -} -` diff --git a/builtin/providers/aws/data_source_aws_route53_zone.go b/builtin/providers/aws/data_source_aws_route53_zone.go deleted file mode 100644 index b3de4eed4..000000000 --- a/builtin/providers/aws/data_source_aws_route53_zone.go +++ /dev/null @@ -1,176 +0,0 @@ -package aws - -import ( - "fmt" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/route53" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsRoute53Zone() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsRoute53ZoneRead, - - Schema: map[string]*schema.Schema{ - "zone_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "private_zone": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "comment": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "caller_reference": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "vpc_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "tags": tagsSchemaComputed(), - "resource_record_set_count": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - }, - } -} - -func dataSourceAwsRoute53ZoneRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).r53conn - name, nameExists := d.GetOk("name") - name = hostedZoneName(name.(string)) - id, idExists := d.GetOk("zone_id") - vpcId, vpcIdExists := d.GetOk("vpc_id") - tags := tagsFromMap(d.Get("tags").(map[string]interface{})) - if nameExists && idExists { - return fmt.Errorf("zone_id and name arguments can't be used together") - } else if !nameExists && !idExists { - return fmt.Errorf("Either name or zone_id must be set") - } - - var nextMarker *string - - var hostedZoneFound *route53.HostedZone - // We loop through all hostedzone - for allHostedZoneListed := false; !allHostedZoneListed; { - req := &route53.ListHostedZonesInput{} - if nextMarker != nil { - req.Marker = nextMarker - } - resp, err := conn.ListHostedZones(req) - - if err != nil { - return fmt.Errorf("Error finding Route 53 Hosted Zone: %v", err) - } - for _, hostedZone := range resp.HostedZones { - hostedZoneId := cleanZoneID(*hostedZone.Id) - if idExists && hostedZoneId == id.(string) { - hostedZoneFound = hostedZone - break - // we check if the name is the same as requested and if private zone field is the same as requested or if there is a vpc_id - } else if *hostedZone.Name == name && (*hostedZone.Config.PrivateZone == d.Get("private_zone").(bool) || (*hostedZone.Config.PrivateZone == true && vpcIdExists)) { - matchingVPC := false - if vpcIdExists { - reqHostedZone := &route53.GetHostedZoneInput{} - reqHostedZone.Id = aws.String(hostedZoneId) - - respHostedZone, errHostedZone := conn.GetHostedZone(reqHostedZone) - if errHostedZone != nil { - return fmt.Errorf("Error finding Route 53 Hosted Zone: %v", errHostedZone) - } - // we go through all VPCs - for _, vpc := range respHostedZone.VPCs { - if *vpc.VPCId == vpcId.(string) { - matchingVPC = true - break - } - } - } else { - matchingVPC = true - } - // we check if tags match - matchingTags := true - if len(tags) > 0 { - reqListTags := &route53.ListTagsForResourceInput{} - reqListTags.ResourceId = aws.String(hostedZoneId) - reqListTags.ResourceType = aws.String("hostedzone") - respListTags, errListTags := conn.ListTagsForResource(reqListTags) - - if errListTags != nil { - return fmt.Errorf("Error finding Route 53 Hosted Zone: %v", errListTags) - } - for _, tag := range tags { - found := false - for _, tagRequested := range respListTags.ResourceTagSet.Tags { - if *tag.Key == *tagRequested.Key && *tag.Value == *tagRequested.Value { - found = true - } - } - - if !found { - matchingTags = false - break - } - } - - } - - if matchingTags && matchingVPC { - if hostedZoneFound != nil { - return fmt.Errorf("multiple Route53Zone found please use vpc_id option to filter") - } else { - hostedZoneFound = hostedZone - } - } - } - - } - if *resp.IsTruncated { - - nextMarker = resp.NextMarker - } else { - allHostedZoneListed = true - } - } - if hostedZoneFound == nil { - return fmt.Errorf("no matching Route53Zone found") - } - - idHostedZone := cleanZoneID(*hostedZoneFound.Id) - d.SetId(idHostedZone) - d.Set("zone_id", idHostedZone) - d.Set("name", hostedZoneFound.Name) - d.Set("comment", hostedZoneFound.Config.Comment) - d.Set("private_zone", hostedZoneFound.Config.PrivateZone) - d.Set("caller_reference", hostedZoneFound.CallerReference) - d.Set("resource_record_set_count", hostedZoneFound.ResourceRecordSetCount) - return nil -} - -// used to manage trailing . -func hostedZoneName(name string) string { - if strings.HasSuffix(name, ".") { - return name - } else { - return name + "." - } -} diff --git a/builtin/providers/aws/data_source_aws_route53_zone_test.go b/builtin/providers/aws/data_source_aws_route53_zone_test.go deleted file mode 100644 index 4be9cae42..000000000 --- a/builtin/providers/aws/data_source_aws_route53_zone_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourceAwsRoute53Zone(t *testing.T) { - rInt := acctest.RandInt() - publicResourceName := "aws_route53_zone.test" - publicDomain := fmt.Sprintf("terraformtestacchz-%d.com.", rInt) - privateResourceName := "aws_route53_zone.test_private" - privateDomain := fmt.Sprintf("test.acc-%d.", rInt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsRoute53ZoneConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccDataSourceAwsRoute53ZoneCheck( - publicResourceName, "data.aws_route53_zone.by_zone_id", publicDomain), - testAccDataSourceAwsRoute53ZoneCheck( - publicResourceName, "data.aws_route53_zone.by_name", publicDomain), - testAccDataSourceAwsRoute53ZoneCheck( - privateResourceName, "data.aws_route53_zone.by_vpc", privateDomain), - testAccDataSourceAwsRoute53ZoneCheck( - privateResourceName, "data.aws_route53_zone.by_tag", privateDomain), - ), - }, - }, - }) -} - -// rsName for the name of the created resource -// dsName for the name of the created data source -// zName for the name of the domain -func testAccDataSourceAwsRoute53ZoneCheck(rsName, dsName, zName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[rsName] - if !ok { - return fmt.Errorf("root module has no resource called %s", rsName) - } - - hostedZone, ok := s.RootModule().Resources[dsName] - if !ok { - return fmt.Errorf("can't find zone %q in state", dsName) - } - - attr := rs.Primary.Attributes - if attr["id"] != hostedZone.Primary.Attributes["id"] { - return fmt.Errorf( - "id is %s; want %s", - attr["id"], - hostedZone.Primary.Attributes["id"], - ) - } - - if attr["name"] != zName { - return fmt.Errorf("Route53 Zone name is %q; want %q", attr["name"], zName) - } - - return nil - } -} - -func testAccDataSourceAwsRoute53ZoneConfig(rInt int) string { - return fmt.Sprintf(` - provider "aws" { - region = "us-east-1" - } - - resource "aws_vpc" "test" { - cidr_block = "172.16.0.0/16" - tags { - Name = "testAccDataSourceAwsRoute53ZoneConfig" - } - } - - resource "aws_route53_zone" "test_private" { - name = "test.acc-%d." - vpc_id = "${aws_vpc.test.id}" - tags { - Environment = "dev-%d" - } - } - - data "aws_route53_zone" "by_vpc" { - name = "${aws_route53_zone.test_private.name}" - vpc_id = "${aws_vpc.test.id}" - } - - data "aws_route53_zone" "by_tag" { - name = "${aws_route53_zone.test_private.name}" - private_zone = true - tags { - Environment = "dev-%d" - } - } - - resource "aws_route53_zone" "test" { - name = "terraformtestacchz-%d.com." - } - - data "aws_route53_zone" "by_zone_id" { - zone_id = "${aws_route53_zone.test.zone_id}" - } - - data "aws_route53_zone" "by_name" { - name = "${data.aws_route53_zone.by_zone_id.name}" - }`, rInt, rInt, rInt, rInt) -} diff --git a/builtin/providers/aws/data_source_aws_route_table.go b/builtin/providers/aws/data_source_aws_route_table.go deleted file mode 100644 index c332bdd91..000000000 --- a/builtin/providers/aws/data_source_aws_route_table.go +++ /dev/null @@ -1,233 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsRouteTable() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsRouteTableRead, - - Schema: map[string]*schema.Schema{ - "subnet_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "route_table_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "vpc_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "filter": ec2CustomFiltersSchema(), - "tags": tagsSchemaComputed(), - "routes": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cidr_block": { - Type: schema.TypeString, - Computed: true, - }, - - "ipv6_cidr_block": { - Type: schema.TypeString, - Computed: true, - }, - - "egress_only_gateway_id": { - Type: schema.TypeString, - Computed: true, - }, - - "gateway_id": { - Type: schema.TypeString, - Computed: true, - }, - - "instance_id": { - Type: schema.TypeString, - Computed: true, - }, - - "nat_gateway_id": { - Type: schema.TypeString, - Computed: true, - }, - - "vpc_peering_connection_id": { - Type: schema.TypeString, - Computed: true, - }, - - "network_interface_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "associations": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "route_table_association_id": { - Type: schema.TypeString, - Computed: true, - }, - - "route_table_id": { - Type: schema.TypeString, - Computed: true, - }, - - "subnet_id": { - Type: schema.TypeString, - Computed: true, - }, - - "main": { - Type: schema.TypeBool, - Computed: true, - }, - }, - }, - }, - }, - } -} - -func dataSourceAwsRouteTableRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - req := &ec2.DescribeRouteTablesInput{} - vpcId, vpcIdOk := d.GetOk("vpc_id") - subnetId, subnetIdOk := d.GetOk("subnet_id") - rtbId, rtbOk := d.GetOk("route_table_id") - tags, tagsOk := d.GetOk("tags") - filter, filterOk := d.GetOk("filter") - - if !vpcIdOk && !subnetIdOk && !tagsOk && !filterOk && !rtbOk { - return fmt.Errorf("One of route_table_id, vpc_id, subnet_id, filters, or tags must be assigned") - } - req.Filters = buildEC2AttributeFilterList( - map[string]string{ - "route-table-id": rtbId.(string), - "vpc-id": vpcId.(string), - "association.subnet-id": subnetId.(string), - }, - ) - req.Filters = append(req.Filters, buildEC2TagFilterList( - tagsFromMap(tags.(map[string]interface{})), - )...) - req.Filters = append(req.Filters, buildEC2CustomFilterList( - filter.(*schema.Set), - )...) - - log.Printf("[DEBUG] Describe Route Tables %v\n", req) - resp, err := conn.DescribeRouteTables(req) - if err != nil { - return err - } - if resp == nil || len(resp.RouteTables) == 0 { - return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") - } - if len(resp.RouteTables) > 1 { - return fmt.Errorf("Multiple Route Table matched; use additional constraints to reduce matches to a single Route Table") - } - - rt := resp.RouteTables[0] - - d.SetId(aws.StringValue(rt.RouteTableId)) - d.Set("route_table_id", rt.RouteTableId) - d.Set("vpc_id", rt.VpcId) - d.Set("tags", tagsToMap(rt.Tags)) - if err := d.Set("routes", dataSourceRoutesRead(rt.Routes)); err != nil { - return err - } - - if err := d.Set("associations", dataSourceAssociationsRead(rt.Associations)); err != nil { - return err - } - - return nil -} - -func dataSourceRoutesRead(ec2Routes []*ec2.Route) []map[string]interface{} { - routes := make([]map[string]interface{}, 0, len(ec2Routes)) - // Loop through the routes and add them to the set - for _, r := range ec2Routes { - if r.GatewayId != nil && *r.GatewayId == "local" { - continue - } - - if r.Origin != nil && *r.Origin == "EnableVgwRoutePropagation" { - continue - } - - if r.DestinationPrefixListId != nil { - // Skipping because VPC endpoint routes are handled separately - // See aws_vpc_endpoint - continue - } - - m := make(map[string]interface{}) - - if r.DestinationCidrBlock != nil { - m["cidr_block"] = *r.DestinationCidrBlock - } - if r.DestinationIpv6CidrBlock != nil { - m["ipv6_cidr_block"] = *r.DestinationIpv6CidrBlock - } - if r.EgressOnlyInternetGatewayId != nil { - m["egress_only_gateway_id"] = *r.EgressOnlyInternetGatewayId - } - if r.GatewayId != nil { - m["gateway_id"] = *r.GatewayId - } - if r.NatGatewayId != nil { - m["nat_gateway_id"] = *r.NatGatewayId - } - if r.InstanceId != nil { - m["instance_id"] = *r.InstanceId - } - if r.VpcPeeringConnectionId != nil { - m["vpc_peering_connection_id"] = *r.VpcPeeringConnectionId - } - if r.NetworkInterfaceId != nil { - m["network_interface_id"] = *r.NetworkInterfaceId - } - - routes = append(routes, m) - } - return routes -} - -func dataSourceAssociationsRead(ec2Assocations []*ec2.RouteTableAssociation) []map[string]interface{} { - associations := make([]map[string]interface{}, 0, len(ec2Assocations)) - // Loop through the routes and add them to the set - for _, a := range ec2Assocations { - - m := make(map[string]interface{}) - m["route_table_id"] = *a.RouteTableId - m["route_table_association_id"] = *a.RouteTableAssociationId - // GH[11134] - if a.SubnetId != nil { - m["subnet_id"] = *a.SubnetId - } - m["main"] = *a.Main - associations = append(associations, m) - } - return associations -} diff --git a/builtin/providers/aws/data_source_aws_route_table_test.go b/builtin/providers/aws/data_source_aws_route_table_test.go deleted file mode 100644 index 71957541f..000000000 --- a/builtin/providers/aws/data_source_aws_route_table_test.go +++ /dev/null @@ -1,215 +0,0 @@ -// make testacc TEST=./builtin/providers/aws/ TESTARGS='-run=TestAccDataSourceAwsRouteTable_' -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourceAwsRouteTable_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsRouteTableGroupConfig, - Check: resource.ComposeTestCheckFunc( - testAccDataSourceAwsRouteTableCheck("data.aws_route_table.by_tag"), - testAccDataSourceAwsRouteTableCheck("data.aws_route_table.by_filter"), - testAccDataSourceAwsRouteTableCheck("data.aws_route_table.by_subnet"), - testAccDataSourceAwsRouteTableCheck("data.aws_route_table.by_id"), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccDataSourceAwsRouteTable_main(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsRouteTableMainRoute, - Check: resource.ComposeTestCheckFunc( - testAccDataSourceAwsRouteTableCheckMain("data.aws_route_table.by_filter"), - ), - }, - }, - }) -} - -func testAccDataSourceAwsRouteTableCheck(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - - if !ok { - return fmt.Errorf("root module has no resource called %s", name) - } - - rts, ok := s.RootModule().Resources["aws_route_table.test"] - if !ok { - return fmt.Errorf("can't find aws_route_table.test in state") - } - vpcRs, ok := s.RootModule().Resources["aws_vpc.test"] - if !ok { - return fmt.Errorf("can't find aws_vpc.test in state") - } - subnetRs, ok := s.RootModule().Resources["aws_subnet.test"] - if !ok { - return fmt.Errorf("can't find aws_subnet.test in state") - } - attr := rs.Primary.Attributes - - if attr["id"] != rts.Primary.Attributes["id"] { - return fmt.Errorf( - "id is %s; want %s", - attr["id"], - rts.Primary.Attributes["id"], - ) - } - - if attr["route_table_id"] != rts.Primary.Attributes["id"] { - return fmt.Errorf( - "route_table_id is %s; want %s", - attr["route_table_id"], - rts.Primary.Attributes["id"], - ) - } - - if attr["vpc_id"] != vpcRs.Primary.Attributes["id"] { - return fmt.Errorf( - "vpc_id is %s; want %s", - attr["vpc_id"], - vpcRs.Primary.Attributes["id"], - ) - } - - if attr["tags.Name"] != "terraform-testacc-routetable-data-source" { - return fmt.Errorf("bad Name tag %s", attr["tags.Name"]) - } - if attr["associations.0.subnet_id"] != subnetRs.Primary.Attributes["id"] { - return fmt.Errorf( - "subnet_id is %v; want %s", - attr["associations.0.subnet_id"], - subnetRs.Primary.Attributes["id"], - ) - } - - return nil - } -} - -func testAccDataSourceAwsRouteTableCheckMain(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - - if !ok { - return fmt.Errorf("root module has no resource called %s", name) - } - - attr := rs.Primary.Attributes - - // Verify attributes are set - if _, ok := attr["id"]; !ok { - return fmt.Errorf("id not set for main route table") - } - if _, ok := attr["vpc_id"]; !ok { - return fmt.Errorf("vpc_id not set for main route table") - } - // Verify it's actually the main route table that's returned - if attr["associations.0.main"] != "true" { - return fmt.Errorf("main route table not found") - } - - return nil - } -} - -const testAccDataSourceAwsRouteTableGroupConfig = ` -provider "aws" { - region = "eu-central-1" -} -resource "aws_vpc" "test" { - cidr_block = "172.16.0.0/16" - - tags { - Name = "terraform-testacc-data-source" - } -} - -resource "aws_subnet" "test" { - cidr_block = "172.16.0.0/24" - vpc_id = "${aws_vpc.test.id}" - tags { - Name = "terraform-testacc-data-source" - } -} - -resource "aws_route_table" "test" { - vpc_id = "${aws_vpc.test.id}" - tags { - Name = "terraform-testacc-routetable-data-source" - } -} - -resource "aws_route_table_association" "a" { - subnet_id = "${aws_subnet.test.id}" - route_table_id = "${aws_route_table.test.id}" -} - -data "aws_route_table" "by_filter" { - filter { - name = "association.route-table-association-id" - values = ["${aws_route_table_association.a.id}"] - } - depends_on = ["aws_route_table_association.a"] -} - -data "aws_route_table" "by_tag" { - tags { - Name = "${aws_route_table.test.tags["Name"]}" - } - depends_on = ["aws_route_table_association.a"] -} - -data "aws_route_table" "by_subnet" { - subnet_id = "${aws_subnet.test.id}" - depends_on = ["aws_route_table_association.a"] -} - -data "aws_route_table" "by_id" { - route_table_id = "${aws_route_table.test.id}" - depends_on = ["aws_route_table_association.a"] -} -` - -// Uses us-east-2, as region only has a single main route table -const testAccDataSourceAwsRouteTableMainRoute = ` -provider "aws" { - region = "us-east-2" -} - -resource "aws_vpc" "test" { - cidr_block = "172.16.0.0/16" - - tags { - Name = "terraform-testacc-data-source" - } -} - -data "aws_route_table" "by_filter" { - filter { - name = "association.main" - values = ["true"] - } - filter { - name = "vpc-id" - values = ["${aws_vpc.test.id}"] - } -} -` diff --git a/builtin/providers/aws/data_source_aws_s3_bucket_object.go b/builtin/providers/aws/data_source_aws_s3_bucket_object.go deleted file mode 100644 index 2eff5e6da..000000000 --- a/builtin/providers/aws/data_source_aws_s3_bucket_object.go +++ /dev/null @@ -1,239 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "log" - "regexp" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsS3BucketObject() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsS3BucketObjectRead, - - Schema: map[string]*schema.Schema{ - "body": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "bucket": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "cache_control": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "content_disposition": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "content_encoding": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "content_language": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "content_length": &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - }, - "content_type": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "etag": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "expiration": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "expires": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "key": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "last_modified": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "metadata": &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - }, - "range": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "server_side_encryption": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "sse_kms_key_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "storage_class": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "version_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "website_redirect_location": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "tags": tagsSchemaComputed(), - }, - } -} - -func dataSourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).s3conn - - bucket := d.Get("bucket").(string) - key := d.Get("key").(string) - - input := s3.HeadObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - } - if v, ok := d.GetOk("range"); ok { - input.Range = aws.String(v.(string)) - } - if v, ok := d.GetOk("version_id"); ok { - input.VersionId = aws.String(v.(string)) - } - - versionText := "" - uniqueId := bucket + "/" + key - if v, ok := d.GetOk("version_id"); ok { - versionText = fmt.Sprintf(" of version %q", v.(string)) - uniqueId += "@" + v.(string) - } - - log.Printf("[DEBUG] Reading S3 object: %s", input) - out, err := conn.HeadObject(&input) - if err != nil { - return fmt.Errorf("Failed getting S3 object: %s Bucket: %q Object: %q", err, bucket, key) - } - if out.DeleteMarker != nil && *out.DeleteMarker == true { - return fmt.Errorf("Requested S3 object %q%s has been deleted", - bucket+key, versionText) - } - - log.Printf("[DEBUG] Received S3 object: %s", out) - - d.SetId(uniqueId) - - d.Set("cache_control", out.CacheControl) - d.Set("content_disposition", out.ContentDisposition) - d.Set("content_encoding", out.ContentEncoding) - d.Set("content_language", out.ContentLanguage) - d.Set("content_length", out.ContentLength) - d.Set("content_type", out.ContentType) - // See https://forums.aws.amazon.com/thread.jspa?threadID=44003 - d.Set("etag", strings.Trim(*out.ETag, `"`)) - d.Set("expiration", out.Expiration) - d.Set("expires", out.Expires) - d.Set("last_modified", out.LastModified.Format(time.RFC1123)) - d.Set("metadata", pointersMapToStringList(out.Metadata)) - d.Set("server_side_encryption", out.ServerSideEncryption) - d.Set("sse_kms_key_id", out.SSEKMSKeyId) - d.Set("version_id", out.VersionId) - d.Set("website_redirect_location", out.WebsiteRedirectLocation) - - // The "STANDARD" (which is also the default) storage - // class when set would not be included in the results. - d.Set("storage_class", s3.StorageClassStandard) - if out.StorageClass != nil { - d.Set("storage_class", out.StorageClass) - } - - if isContentTypeAllowed(out.ContentType) { - input := s3.GetObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - } - if v, ok := d.GetOk("range"); ok { - input.Range = aws.String(v.(string)) - } - if out.VersionId != nil { - input.VersionId = out.VersionId - } - out, err := conn.GetObject(&input) - if err != nil { - return fmt.Errorf("Failed getting S3 object: %s", err) - } - - buf := new(bytes.Buffer) - bytesRead, err := buf.ReadFrom(out.Body) - if err != nil { - return fmt.Errorf("Failed reading content of S3 object (%s): %s", - uniqueId, err) - } - log.Printf("[INFO] Saving %d bytes from S3 object %s", bytesRead, uniqueId) - d.Set("body", buf.String()) - } else { - contentType := "" - if out.ContentType == nil { - contentType = "" - } else { - contentType = *out.ContentType - } - - log.Printf("[INFO] Ignoring body of S3 object %s with Content-Type %q", - uniqueId, contentType) - } - - tagResp, err := conn.GetObjectTagging( - &s3.GetObjectTaggingInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - }) - if err != nil { - return err - } - d.Set("tags", tagsToMapS3(tagResp.TagSet)) - - return nil -} - -// This is to prevent potential issues w/ binary files -// and generally unprintable characters -// See https://github.com/hashicorp/terraform/pull/3858#issuecomment-156856738 -func isContentTypeAllowed(contentType *string) bool { - if contentType == nil { - return false - } - - allowedContentTypes := []*regexp.Regexp{ - regexp.MustCompile("^text/.+"), - regexp.MustCompile("^application/json$"), - } - - for _, r := range allowedContentTypes { - if r.MatchString(*contentType) { - return true - } - } - - return false -} diff --git a/builtin/providers/aws/data_source_aws_s3_bucket_object_test.go b/builtin/providers/aws/data_source_aws_s3_bucket_object_test.go deleted file mode 100644 index b4693da54..000000000 --- a/builtin/providers/aws/data_source_aws_s3_bucket_object_test.go +++ /dev/null @@ -1,302 +0,0 @@ -// make testacc TEST=./builtin/providers/aws/ TESTARGS='-run=TestAccDataSourceAWSS3BucketObject_' -package aws - -import ( - "fmt" - "regexp" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourceAWSS3BucketObject_basic(t *testing.T) { - rInt := acctest.RandInt() - resourceOnlyConf, conf := testAccAWSDataSourceS3ObjectConfig_basic(rInt) - - var rObj s3.GetObjectOutput - var dsObj s3.GetObjectOutput - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - PreventPostDestroyRefresh: true, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: resourceOnlyConf, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object", &rObj), - ), - }, - resource.TestStep{ - Config: conf, - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsS3ObjectDataSourceExists("data.aws_s3_bucket_object.obj", &dsObj), - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_length", "11"), - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_type", "binary/octet-stream"), - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "etag", "b10a8db164e0754105b7a99be72e3fe5"), - resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "last_modified", - regexp.MustCompile("^[a-zA-Z]{3}, [0-9]+ [a-zA-Z]+ [0-9]{4} [0-9:]+ [A-Z]+$")), - resource.TestCheckNoResourceAttr("data.aws_s3_bucket_object.obj", "body"), - ), - }, - }, - }) -} - -func TestAccDataSourceAWSS3BucketObject_readableBody(t *testing.T) { - rInt := acctest.RandInt() - resourceOnlyConf, conf := testAccAWSDataSourceS3ObjectConfig_readableBody(rInt) - - var rObj s3.GetObjectOutput - var dsObj s3.GetObjectOutput - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - PreventPostDestroyRefresh: true, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: resourceOnlyConf, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object", &rObj), - ), - }, - resource.TestStep{ - Config: conf, - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsS3ObjectDataSourceExists("data.aws_s3_bucket_object.obj", &dsObj), - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_length", "3"), - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_type", "text/plain"), - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "etag", "a6105c0a611b41b08f1209506350279e"), - resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "last_modified", - regexp.MustCompile("^[a-zA-Z]{3}, [0-9]+ [a-zA-Z]+ [0-9]{4} [0-9:]+ [A-Z]+$")), - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "body", "yes"), - ), - }, - }, - }) -} - -func TestAccDataSourceAWSS3BucketObject_kmsEncrypted(t *testing.T) { - rInt := acctest.RandInt() - resourceOnlyConf, conf := testAccAWSDataSourceS3ObjectConfig_kmsEncrypted(rInt) - - var rObj s3.GetObjectOutput - var dsObj s3.GetObjectOutput - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - PreventPostDestroyRefresh: true, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: resourceOnlyConf, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object", &rObj), - ), - }, - resource.TestStep{ - Config: conf, - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsS3ObjectDataSourceExists("data.aws_s3_bucket_object.obj", &dsObj), - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_length", "22"), - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_type", "text/plain"), - resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "etag", regexp.MustCompile("^[a-f0-9]{32}$")), - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "server_side_encryption", "aws:kms"), - resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "sse_kms_key_id", - regexp.MustCompile("^arn:aws:kms:[a-z]{2}-[a-z]+-\\d{1}:[0-9]{12}:key/[a-z0-9-]{36}$")), - resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "last_modified", - regexp.MustCompile("^[a-zA-Z]{3}, [0-9]+ [a-zA-Z]+ [0-9]{4} [0-9:]+ [A-Z]+$")), - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "body", "Keep Calm and Carry On"), - ), - }, - }, - }) -} - -func TestAccDataSourceAWSS3BucketObject_allParams(t *testing.T) { - rInt := acctest.RandInt() - resourceOnlyConf, conf := testAccAWSDataSourceS3ObjectConfig_allParams(rInt) - - var rObj s3.GetObjectOutput - var dsObj s3.GetObjectOutput - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - PreventPostDestroyRefresh: true, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: resourceOnlyConf, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object", &rObj), - ), - }, - resource.TestStep{ - Config: conf, - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsS3ObjectDataSourceExists("data.aws_s3_bucket_object.obj", &dsObj), - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_length", "21"), - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_type", "application/unknown"), - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "etag", "723f7a6ac0c57b445790914668f98640"), - resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "last_modified", - regexp.MustCompile("^[a-zA-Z]{3}, [0-9]+ [a-zA-Z]+ [0-9]{4} [0-9:]+ [A-Z]+$")), - resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "version_id", regexp.MustCompile("^.{32}$")), - resource.TestCheckNoResourceAttr("data.aws_s3_bucket_object.obj", "body"), - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "cache_control", "no-cache"), - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_disposition", "attachment"), - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_encoding", "identity"), - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_language", "en-GB"), - // Encryption is off - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "server_side_encryption", ""), - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "sse_kms_key_id", ""), - // Supported, but difficult to reproduce in short testing time - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "storage_class", "STANDARD"), - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "expiration", ""), - // Currently unsupported in aws_s3_bucket_object resource - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "expires", ""), - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "website_redirect_location", ""), - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "metadata.%", "0"), - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "tags.%", "1"), - ), - }, - }, - }) -} - -func testAccCheckAwsS3ObjectDataSourceExists(n string, obj *s3.GetObjectOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Can't find S3 object data source: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("S3 object data source ID not set") - } - - s3conn := testAccProvider.Meta().(*AWSClient).s3conn - out, err := s3conn.GetObject( - &s3.GetObjectInput{ - Bucket: aws.String(rs.Primary.Attributes["bucket"]), - Key: aws.String(rs.Primary.Attributes["key"]), - }) - if err != nil { - return fmt.Errorf("Failed getting S3 Object from %s: %s", - rs.Primary.Attributes["bucket"]+"/"+rs.Primary.Attributes["key"], err) - } - - *obj = *out - - return nil - } -} - -func testAccAWSDataSourceS3ObjectConfig_basic(randInt int) (string, string) { - resources := fmt.Sprintf(` -resource "aws_s3_bucket" "object_bucket" { - bucket = "tf-object-test-bucket-%d" -} -resource "aws_s3_bucket_object" "object" { - bucket = "${aws_s3_bucket.object_bucket.bucket}" - key = "tf-testing-obj-%d" - content = "Hello World" -} -`, randInt, randInt) - - both := fmt.Sprintf(`%s -data "aws_s3_bucket_object" "obj" { - bucket = "tf-object-test-bucket-%d" - key = "tf-testing-obj-%d" -}`, resources, randInt, randInt) - - return resources, both -} - -func testAccAWSDataSourceS3ObjectConfig_readableBody(randInt int) (string, string) { - resources := fmt.Sprintf(` -resource "aws_s3_bucket" "object_bucket" { - bucket = "tf-object-test-bucket-%d" -} -resource "aws_s3_bucket_object" "object" { - bucket = "${aws_s3_bucket.object_bucket.bucket}" - key = "tf-testing-obj-%d-readable" - content = "yes" - content_type = "text/plain" -} -`, randInt, randInt) - - both := fmt.Sprintf(`%s -data "aws_s3_bucket_object" "obj" { - bucket = "tf-object-test-bucket-%d" - key = "tf-testing-obj-%d-readable" -}`, resources, randInt, randInt) - - return resources, both -} - -func testAccAWSDataSourceS3ObjectConfig_kmsEncrypted(randInt int) (string, string) { - resources := fmt.Sprintf(` -resource "aws_s3_bucket" "object_bucket" { - bucket = "tf-object-test-bucket-%d" -} -resource "aws_kms_key" "example" { - description = "TF Acceptance Test KMS key" - deletion_window_in_days = 7 -} -resource "aws_s3_bucket_object" "object" { - bucket = "${aws_s3_bucket.object_bucket.bucket}" - key = "tf-testing-obj-%d-encrypted" - content = "Keep Calm and Carry On" - content_type = "text/plain" - kms_key_id = "${aws_kms_key.example.arn}" -} -`, randInt, randInt) - - both := fmt.Sprintf(`%s -data "aws_s3_bucket_object" "obj" { - bucket = "tf-object-test-bucket-%d" - key = "tf-testing-obj-%d-encrypted" -}`, resources, randInt, randInt) - - return resources, both -} - -func testAccAWSDataSourceS3ObjectConfig_allParams(randInt int) (string, string) { - resources := fmt.Sprintf(` -resource "aws_s3_bucket" "object_bucket" { - bucket = "tf-object-test-bucket-%d" - versioning { - enabled = true - } -} - -resource "aws_s3_bucket_object" "object" { - bucket = "${aws_s3_bucket.object_bucket.bucket}" - key = "tf-testing-obj-%d-all-params" - content = < 1 { - return fmt.Errorf("multiple Security Groups matched; use additional constraints to reduce matches to a single Security Group") - } - - sg := resp.SecurityGroups[0] - - d.SetId(*sg.GroupId) - d.Set("id", sg.VpcId) - d.Set("name", sg.GroupName) - d.Set("description", sg.Description) - d.Set("vpc_id", sg.VpcId) - d.Set("tags", tagsToMap(sg.Tags)) - d.Set("arn", fmt.Sprintf("arn:%s:ec2:%s:%s/security-group/%s", - meta.(*AWSClient).partition, meta.(*AWSClient).region, *sg.OwnerId, *sg.GroupId)) - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_security_group_test.go b/builtin/providers/aws/data_source_aws_security_group_test.go deleted file mode 100644 index 6e1f1664a..000000000 --- a/builtin/providers/aws/data_source_aws_security_group_test.go +++ /dev/null @@ -1,151 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "strings" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourceAwsSecurityGroup(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsSecurityGroupConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccDataSourceAwsSecurityGroupCheck("data.aws_security_group.by_id"), - testAccDataSourceAwsSecurityGroupCheck("data.aws_security_group.by_tag"), - testAccDataSourceAwsSecurityGroupCheck("data.aws_security_group.by_filter"), - testAccDataSourceAwsSecurityGroupCheck("data.aws_security_group.by_name"), - testAccDataSourceAwsSecurityGroupCheckDefault("data.aws_security_group.default_by_name"), - ), - }, - }, - }) -} - -func testAccDataSourceAwsSecurityGroupCheck(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("root module has no resource called %s", name) - } - - SGRs, ok := s.RootModule().Resources["aws_security_group.test"] - if !ok { - return fmt.Errorf("can't find aws_security_group.test in state") - } - vpcRs, ok := s.RootModule().Resources["aws_vpc.test"] - if !ok { - return fmt.Errorf("can't find aws_vpc.test in state") - } - attr := rs.Primary.Attributes - - if attr["id"] != SGRs.Primary.Attributes["id"] { - return fmt.Errorf( - "id is %s; want %s", - attr["id"], - SGRs.Primary.Attributes["id"], - ) - } - - if attr["vpc_id"] != vpcRs.Primary.Attributes["id"] { - return fmt.Errorf( - "vpc_id is %s; want %s", - attr["vpc_id"], - vpcRs.Primary.Attributes["id"], - ) - } - - if attr["tags.Name"] != "tf-acctest" { - return fmt.Errorf("bad Name tag %s", attr["tags.Name"]) - } - - if !strings.Contains(attr["arn"], attr["id"]) { - return fmt.Errorf("bad ARN %s", attr["arn"]) - } - - return nil - } -} - -func testAccDataSourceAwsSecurityGroupCheckDefault(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("root module has no resource called %s", name) - } - - vpcRs, ok := s.RootModule().Resources["aws_vpc.test"] - if !ok { - return fmt.Errorf("can't find aws_vpc.test in state") - } - attr := rs.Primary.Attributes - - if attr["id"] != vpcRs.Primary.Attributes["default_security_group_id"] { - return fmt.Errorf( - "id is %s; want %s", - attr["id"], - vpcRs.Primary.Attributes["default_security_group_id"], - ) - } - - return nil - } -} - -func testAccDataSourceAwsSecurityGroupConfig(rInt int) string { - return fmt.Sprintf(` - provider "aws" { - region = "eu-west-1" - } - resource "aws_vpc" "test" { - cidr_block = "172.16.0.0/16" - - tags { - Name = "terraform-testacc-subnet-data-source" - } - } - - resource "aws_security_group" "test" { - vpc_id = "${aws_vpc.test.id}" - name = "test-%d" - tags { - Name = "tf-acctest" - Seed = "%d" - } - } - - data "aws_security_group" "by_id" { - id = "${aws_security_group.test.id}" - } - - data "aws_security_group" "by_name" { - name = "${aws_security_group.test.name}" - } - - data "aws_security_group" "default_by_name" { - vpc_id = "${aws_vpc.test.id}" - name = "default" - } - - data "aws_security_group" "by_tag" { - tags { - Seed = "${aws_security_group.test.tags["Seed"]}" - } - } - - data "aws_security_group" "by_filter" { - filter { - name = "group-name" - values = ["${aws_security_group.test.name}"] - } - }`, rInt, rInt) -} diff --git a/builtin/providers/aws/data_source_aws_sns.go b/builtin/providers/aws/data_source_aws_sns.go deleted file mode 100644 index c02ec328a..000000000 --- a/builtin/providers/aws/data_source_aws_sns.go +++ /dev/null @@ -1,71 +0,0 @@ -package aws - -import ( - "fmt" - "regexp" - "time" - - "github.com/aws/aws-sdk-go/service/sns" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsSnsTopic() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsSnsTopicsRead, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - validNamePattern := "^[A-Za-z0-9_-]+$" - validName, nameMatchErr := regexp.MatchString(validNamePattern, value) - if !validName || nameMatchErr != nil { - errors = append(errors, fmt.Errorf( - "%q must match regex '%v'", k, validNamePattern)) - } - return - }, - }, - "arn": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceAwsSnsTopicsRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).snsconn - params := &sns.ListTopicsInput{} - - target := d.Get("name") - var arns []string - err := conn.ListTopicsPages(params, func(page *sns.ListTopicsOutput, lastPage bool) bool { - for _, topic := range page.Topics { - topicPattern := fmt.Sprintf(".*:%v$", target) - matched, regexpErr := regexp.MatchString(topicPattern, *topic.TopicArn) - if matched && regexpErr == nil { - arns = append(arns, *topic.TopicArn) - } - } - - return true - }) - if err != nil { - return errwrap.Wrapf("Error describing topics: {{err}}", err) - } - - if len(arns) == 0 { - return fmt.Errorf("No topic with name %q found in this region.", target) - } - if len(arns) > 1 { - return fmt.Errorf("Multiple topics with name %q found in this region.", target) - } - - d.SetId(time.Now().UTC().String()) - d.Set("arn", arns[0]) - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_sns_test.go b/builtin/providers/aws/data_source_aws_sns_test.go deleted file mode 100644 index f4742dcb7..000000000 --- a/builtin/providers/aws/data_source_aws_sns_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourceAwsSnsTopic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataSourceAwsSnsTopicConfig, - Check: resource.ComposeTestCheckFunc( - testAccDataSourceAwsSnsTopicCheck("data.aws_sns_topic.by_name"), - ), - }, - }, - }) -} - -func testAccDataSourceAwsSnsTopicCheck(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("root module has no resource called %s", name) - } - - snsTopicRs, ok := s.RootModule().Resources["aws_sns_topic.tf_test"] - if !ok { - return fmt.Errorf("can't find aws_sns_topic.tf_test in state") - } - - attr := rs.Primary.Attributes - - if attr["name"] != snsTopicRs.Primary.Attributes["name"] { - return fmt.Errorf( - "name is %s; want %s", - attr["name"], - snsTopicRs.Primary.Attributes["name"], - ) - } - - return nil - } -} - -const testAccDataSourceAwsSnsTopicConfig = ` -provider "aws" { - region = "us-west-2" -} - -resource "aws_sns_topic" "tf_wrong1" { - name = "wrong1" -} -resource "aws_sns_topic" "tf_test" { - name = "tf_test" -} -resource "aws_sns_topic" "tf_wrong2" { - name = "wrong2" -} - -data "aws_sns_topic" "by_name" { - name = "${aws_sns_topic.tf_test.name}" -} -` diff --git a/builtin/providers/aws/data_source_aws_ssm_parameter.go b/builtin/providers/aws/data_source_aws_ssm_parameter.go deleted file mode 100644 index 388366686..000000000 --- a/builtin/providers/aws/data_source_aws_ssm_parameter.go +++ /dev/null @@ -1,63 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ssm" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsSsmParameter() *schema.Resource { - return &schema.Resource{ - Read: dataAwsSsmParameterRead, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - "value": { - Type: schema.TypeString, - Computed: true, - Sensitive: true, - }, - }, - } -} - -func dataAwsSsmParameterRead(d *schema.ResourceData, meta interface{}) error { - ssmconn := meta.(*AWSClient).ssmconn - - log.Printf("[DEBUG] Reading SSM Parameter: %s", d.Id()) - - paramInput := &ssm.GetParametersInput{ - Names: []*string{ - aws.String(d.Get("name").(string)), - }, - WithDecryption: aws.Bool(true), - } - - resp, err := ssmconn.GetParameters(paramInput) - - if err != nil { - return errwrap.Wrapf("[ERROR] Error describing SSM parameter: {{err}}", err) - } - - if len(resp.InvalidParameters) > 0 { - return fmt.Errorf("[ERROR] SSM Parameter %s is invalid", d.Get("name").(string)) - } - - param := resp.Parameters[0] - d.SetId(*param.Name) - d.Set("name", param.Name) - d.Set("type", param.Type) - d.Set("value", param.Value) - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_ssm_parameter_test.go b/builtin/providers/aws/data_source_aws_ssm_parameter_test.go deleted file mode 100644 index 3b9c0d0f4..000000000 --- a/builtin/providers/aws/data_source_aws_ssm_parameter_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAwsSsmParameterDataSource_basic(t *testing.T) { - name := "test.parameter" - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckAwsSsmParameterDataSourceConfig(name), - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("data.aws_ssm_parameter.test", "name", name), - resource.TestCheckResourceAttr("data.aws_ssm_parameter.test", "type", "String"), - resource.TestCheckResourceAttr("data.aws_ssm_parameter.test", "value", "TestValue"), - ), - }, - }, - }) -} - -func testAccCheckAwsSsmParameterDataSourceConfig(name string) string { - return fmt.Sprintf(` -resource "aws_ssm_parameter" "test" { - name = "%s" - type = "String" - value = "TestValue" -} - -data "aws_ssm_parameter" "test" { - name = "${aws_ssm_parameter.test.name}" -} -`, name) -} diff --git a/builtin/providers/aws/data_source_aws_subnet.go b/builtin/providers/aws/data_source_aws_subnet.go deleted file mode 100644 index 188a09dd2..000000000 --- a/builtin/providers/aws/data_source_aws_subnet.go +++ /dev/null @@ -1,160 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsSubnet() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsSubnetRead, - - Schema: map[string]*schema.Schema{ - "availability_zone": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "cidr_block": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "ipv6_cidr_block": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "default_for_az": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "filter": ec2CustomFiltersSchema(), - - "id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "state": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "tags": tagsSchemaComputed(), - - "vpc_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "assign_ipv6_address_on_creation": { - Type: schema.TypeBool, - Computed: true, - }, - - "map_public_ip_on_launch": { - Type: schema.TypeBool, - Computed: true, - }, - - "ipv6_cidr_block_association_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceAwsSubnetRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - req := &ec2.DescribeSubnetsInput{} - - if id := d.Get("id"); id != "" { - req.SubnetIds = []*string{aws.String(id.(string))} - } - - // We specify default_for_az as boolean, but EC2 filters want - // it to be serialized as a string. Note that setting it to - // "false" here does not actually filter by it *not* being - // the default, because Terraform can't distinguish between - // "false" and "not set". - defaultForAzStr := "" - if d.Get("default_for_az").(bool) { - defaultForAzStr = "true" - } - - filters := map[string]string{ - "availabilityZone": d.Get("availability_zone").(string), - "defaultForAz": defaultForAzStr, - "state": d.Get("state").(string), - "vpc-id": d.Get("vpc_id").(string), - } - - if v, ok := d.GetOk("cidr_block"); ok { - filters["cidrBlock"] = v.(string) - } - - if v, ok := d.GetOk("ipv6_cidr_block"); ok { - filters["ipv6-cidr-block-association.ipv6-cidr-block"] = v.(string) - } - - req.Filters = buildEC2AttributeFilterList(filters) - req.Filters = append(req.Filters, buildEC2TagFilterList( - tagsFromMap(d.Get("tags").(map[string]interface{})), - )...) - req.Filters = append(req.Filters, buildEC2CustomFilterList( - d.Get("filter").(*schema.Set), - )...) - if len(req.Filters) == 0 { - // Don't send an empty filters list; the EC2 API won't accept it. - req.Filters = nil - } - - log.Printf("[DEBUG] DescribeSubnets %s\n", req) - resp, err := conn.DescribeSubnets(req) - if err != nil { - return err - } - if resp == nil || len(resp.Subnets) == 0 { - return fmt.Errorf("no matching subnet found") - } - if len(resp.Subnets) > 1 { - return fmt.Errorf("multiple subnets matched; use additional constraints to reduce matches to a single subnet") - } - - subnet := resp.Subnets[0] - - d.SetId(*subnet.SubnetId) - d.Set("id", subnet.SubnetId) - d.Set("vpc_id", subnet.VpcId) - d.Set("availability_zone", subnet.AvailabilityZone) - d.Set("cidr_block", subnet.CidrBlock) - d.Set("default_for_az", subnet.DefaultForAz) - d.Set("state", subnet.State) - d.Set("tags", tagsToMap(subnet.Tags)) - d.Set("assign_ipv6_address_on_creation", subnet.AssignIpv6AddressOnCreation) - d.Set("map_public_ip_on_launch", subnet.MapPublicIpOnLaunch) - - for _, a := range subnet.Ipv6CidrBlockAssociationSet { - if *a.Ipv6CidrBlockState.State == "associated" { //we can only ever have 1 IPv6 block associated at once - d.Set("ipv6_cidr_block_association_id", a.AssociationId) - d.Set("ipv6_cidr_block", a.Ipv6CidrBlock) - } - } - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_subnet_ids.go b/builtin/providers/aws/data_source_aws_subnet_ids.go deleted file mode 100644 index c1a495aa1..000000000 --- a/builtin/providers/aws/data_source_aws_subnet_ids.go +++ /dev/null @@ -1,68 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsSubnetIDs() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsSubnetIDsRead, - Schema: map[string]*schema.Schema{ - - "tags": tagsSchemaComputed(), - - "vpc_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "ids": &schema.Schema{ - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - } -} - -func dataSourceAwsSubnetIDsRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - req := &ec2.DescribeSubnetsInput{} - - req.Filters = buildEC2AttributeFilterList( - map[string]string{ - "vpc-id": d.Get("vpc_id").(string), - }, - ) - - req.Filters = append(req.Filters, buildEC2TagFilterList( - tagsFromMap(d.Get("tags").(map[string]interface{})), - )...) - - log.Printf("[DEBUG] DescribeSubnets %s\n", req) - resp, err := conn.DescribeSubnets(req) - if err != nil { - return err - } - - if resp == nil || len(resp.Subnets) == 0 { - return fmt.Errorf("no matching subnet found for vpc with id %s", d.Get("vpc_id").(string)) - } - - subnets := make([]string, 0) - - for _, subnet := range resp.Subnets { - subnets = append(subnets, *subnet.SubnetId) - } - - d.SetId(d.Get("vpc_id").(string)) - d.Set("ids", subnets) - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_subnet_ids_test.go b/builtin/providers/aws/data_source_aws_subnet_ids_test.go deleted file mode 100644 index 5d752a25e..000000000 --- a/builtin/providers/aws/data_source_aws_subnet_ids_test.go +++ /dev/null @@ -1,132 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccDataSourceAwsSubnetIDs(t *testing.T) { - rInt := acctest.RandIntRange(0, 256) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVpcDestroy, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsSubnetIDsConfig(rInt), - }, - { - Config: testAccDataSourceAwsSubnetIDsConfigWithDataSource(rInt), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.aws_subnet_ids.selected", "ids.#", "3"), - resource.TestCheckResourceAttr("data.aws_subnet_ids.private", "ids.#", "2"), - ), - }, - }, - }) -} - -func testAccDataSourceAwsSubnetIDsConfigWithDataSource(rInt int) string { - return fmt.Sprintf( - ` - resource "aws_vpc" "test" { - cidr_block = "172.%d.0.0/16" - - tags { - Name = "terraform-testacc-subnet-ids-data-source" - } - } - - resource "aws_subnet" "test_public_a" { - vpc_id = "${aws_vpc.test.id}" - cidr_block = "172.%d.123.0/24" - availability_zone = "us-west-2a" - - tags { - Name = "terraform-testacc-subnet-ids-data-source-public-a" - Tier = "Public" - } - } - - resource "aws_subnet" "test_private_a" { - vpc_id = "${aws_vpc.test.id}" - cidr_block = "172.%d.125.0/24" - availability_zone = "us-west-2a" - - tags { - Name = "terraform-testacc-subnet-ids-data-source-private-a" - Tier = "Private" - } - } - - resource "aws_subnet" "test_private_b" { - vpc_id = "${aws_vpc.test.id}" - cidr_block = "172.%d.126.0/24" - availability_zone = "us-west-2b" - - tags { - Name = "terraform-testacc-subnet-ids-data-source-private-b" - Tier = "Private" - } - } - - data "aws_subnet_ids" "selected" { - vpc_id = "${aws_vpc.test.id}" - } - - data "aws_subnet_ids" "private" { - vpc_id = "${aws_vpc.test.id}" - tags { - Tier = "Private" - } - } - `, rInt, rInt, rInt, rInt) -} - -func testAccDataSourceAwsSubnetIDsConfig(rInt int) string { - return fmt.Sprintf(` - resource "aws_vpc" "test" { - cidr_block = "172.%d.0.0/16" - - tags { - Name = "terraform-testacc-subnet-ids-data-source" - } - } - - resource "aws_subnet" "test_public_a" { - vpc_id = "${aws_vpc.test.id}" - cidr_block = "172.%d.123.0/24" - availability_zone = "us-west-2a" - - tags { - Name = "terraform-testacc-subnet-ids-data-source-public-a" - Tier = "Public" - } - } - - resource "aws_subnet" "test_private_a" { - vpc_id = "${aws_vpc.test.id}" - cidr_block = "172.%d.125.0/24" - availability_zone = "us-west-2a" - - tags { - Name = "terraform-testacc-subnet-ids-data-source-private-a" - Tier = "Private" - } - } - - resource "aws_subnet" "test_private_b" { - vpc_id = "${aws_vpc.test.id}" - cidr_block = "172.%d.126.0/24" - availability_zone = "us-west-2b" - - tags { - Name = "terraform-testacc-subnet-ids-data-source-private-b" - Tier = "Private" - } - } - `, rInt, rInt, rInt, rInt) -} diff --git a/builtin/providers/aws/data_source_aws_subnet_test.go b/builtin/providers/aws/data_source_aws_subnet_test.go deleted file mode 100644 index 3c9c5ed6f..000000000 --- a/builtin/providers/aws/data_source_aws_subnet_test.go +++ /dev/null @@ -1,257 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourceAwsSubnet(t *testing.T) { - rInt := acctest.RandIntRange(0, 256) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVpcDestroy, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsSubnetConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccDataSourceAwsSubnetCheck("data.aws_subnet.by_id", rInt), - testAccDataSourceAwsSubnetCheck("data.aws_subnet.by_cidr", rInt), - testAccDataSourceAwsSubnetCheck("data.aws_subnet.by_tag", rInt), - testAccDataSourceAwsSubnetCheck("data.aws_subnet.by_vpc", rInt), - testAccDataSourceAwsSubnetCheck("data.aws_subnet.by_filter", rInt), - ), - }, - }, - }) -} - -func TestAccDataSourceAwsSubnetIpv6ByIpv6Filter(t *testing.T) { - rInt := acctest.RandIntRange(0, 256) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsSubnetConfigIpv6(rInt), - }, - { - Config: testAccDataSourceAwsSubnetConfigIpv6WithDataSourceFilter(rInt), - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet( - "data.aws_subnet.by_ipv6_cidr", "ipv6_cidr_block_association_id"), - resource.TestCheckResourceAttrSet( - "data.aws_subnet.by_ipv6_cidr", "ipv6_cidr_block"), - ), - }, - }, - }) -} - -func TestAccDataSourceAwsSubnetIpv6ByIpv6CidrBlock(t *testing.T) { - rInt := acctest.RandIntRange(0, 256) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsSubnetConfigIpv6(rInt), - }, - { - Config: testAccDataSourceAwsSubnetConfigIpv6WithDataSourceIpv6CidrBlock(rInt), - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet( - "data.aws_subnet.by_ipv6_cidr", "ipv6_cidr_block_association_id"), - ), - }, - }, - }) -} - -func testAccDataSourceAwsSubnetCheck(name string, rInt int) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("root module has no resource called %s", name) - } - - vpcRs, ok := s.RootModule().Resources["aws_vpc.test"] - if !ok { - return fmt.Errorf("can't find aws_vpc.test in state") - } - subnetRs, ok := s.RootModule().Resources["aws_subnet.test"] - if !ok { - return fmt.Errorf("can't find aws_subnet.test in state") - } - - attr := rs.Primary.Attributes - - if attr["id"] != subnetRs.Primary.Attributes["id"] { - return fmt.Errorf( - "id is %s; want %s", - attr["id"], - subnetRs.Primary.Attributes["id"], - ) - } - - if attr["vpc_id"] != vpcRs.Primary.Attributes["id"] { - return fmt.Errorf( - "vpc_id is %s; want %s", - attr["vpc_id"], - vpcRs.Primary.Attributes["id"], - ) - } - - if attr["cidr_block"] != fmt.Sprintf("172.%d.123.0/24", rInt) { - return fmt.Errorf("bad cidr_block %s", attr["cidr_block"]) - } - if attr["availability_zone"] != "us-west-2a" { - return fmt.Errorf("bad availability_zone %s", attr["availability_zone"]) - } - if attr["tags.Name"] != fmt.Sprintf("terraform-testacc-subnet-data-source-%d", rInt) { - return fmt.Errorf("bad Name tag %s", attr["tags.Name"]) - } - - return nil - } -} - -func testAccDataSourceAwsSubnetConfig(rInt int) string { - return fmt.Sprintf(` - provider "aws" { - region = "us-west-2" - } - - resource "aws_vpc" "test" { - cidr_block = "172.%d.0.0/16" - - tags { - Name = "terraform-testacc-subnet-data-source" - } - } - - resource "aws_subnet" "test" { - vpc_id = "${aws_vpc.test.id}" - cidr_block = "172.%d.123.0/24" - availability_zone = "us-west-2a" - - tags { - Name = "terraform-testacc-subnet-data-source-%d" - } - } - - - data "aws_subnet" "by_id" { - id = "${aws_subnet.test.id}" - } - - data "aws_subnet" "by_cidr" { - cidr_block = "${aws_subnet.test.cidr_block}" - } - - data "aws_subnet" "by_tag" { - tags { - Name = "${aws_subnet.test.tags["Name"]}" - } - } - - data "aws_subnet" "by_vpc" { - vpc_id = "${aws_subnet.test.vpc_id}" - } - - data "aws_subnet" "by_filter" { - filter { - name = "vpc-id" - values = ["${aws_subnet.test.vpc_id}"] - } - } - `, rInt, rInt, rInt) -} - -func testAccDataSourceAwsSubnetConfigIpv6(rInt int) string { - return fmt.Sprintf(` -resource "aws_vpc" "test" { - cidr_block = "172.%d.0.0/16" - assign_generated_ipv6_cidr_block = true - - tags { - Name = "terraform-testacc-subnet-data-source-ipv6" - } -} - -resource "aws_subnet" "test" { - vpc_id = "${aws_vpc.test.id}" - cidr_block = "172.%d.123.0/24" - availability_zone = "us-west-2a" - ipv6_cidr_block = "${cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, 1)}" - - tags { - Name = "terraform-testacc-subnet-data-sourceipv6-%d" - } -} -`, rInt, rInt, rInt) -} - -func testAccDataSourceAwsSubnetConfigIpv6WithDataSourceFilter(rInt int) string { - return fmt.Sprintf(` -resource "aws_vpc" "test" { - cidr_block = "172.%d.0.0/16" - assign_generated_ipv6_cidr_block = true - - tags { - Name = "terraform-testacc-subnet-data-source-ipv6" - } -} - -resource "aws_subnet" "test" { - vpc_id = "${aws_vpc.test.id}" - cidr_block = "172.%d.123.0/24" - availability_zone = "us-west-2a" - ipv6_cidr_block = "${cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, 1)}" - - tags { - Name = "terraform-testacc-subnet-data-sourceipv6-%d" - } -} - -data "aws_subnet" "by_ipv6_cidr" { - filter { - name = "ipv6-cidr-block-association.ipv6-cidr-block" - values = ["${aws_subnet.test.ipv6_cidr_block}"] - } -} -`, rInt, rInt, rInt) -} - -func testAccDataSourceAwsSubnetConfigIpv6WithDataSourceIpv6CidrBlock(rInt int) string { - return fmt.Sprintf(` -resource "aws_vpc" "test" { - cidr_block = "172.%d.0.0/16" - assign_generated_ipv6_cidr_block = true - - tags { - Name = "terraform-testacc-subnet-data-source-ipv6" - } -} - -resource "aws_subnet" "test" { - vpc_id = "${aws_vpc.test.id}" - cidr_block = "172.%d.123.0/24" - availability_zone = "us-west-2a" - ipv6_cidr_block = "${cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, 1)}" - - tags { - Name = "terraform-testacc-subnet-data-sourceipv6-%d" - } -} - -data "aws_subnet" "by_ipv6_cidr" { - ipv6_cidr_block = "${aws_subnet.test.ipv6_cidr_block}" -} -`, rInt, rInt, rInt) -} diff --git a/builtin/providers/aws/data_source_aws_vpc.go b/builtin/providers/aws/data_source_aws_vpc.go deleted file mode 100644 index 6e09e971d..000000000 --- a/builtin/providers/aws/data_source_aws_vpc.go +++ /dev/null @@ -1,136 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsVpc() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsVpcRead, - - Schema: map[string]*schema.Schema{ - "cidr_block": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "dhcp_options_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "default": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "filter": ec2CustomFiltersSchema(), - - "id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "instance_tenancy": { - Type: schema.TypeString, - Computed: true, - }, - - "ipv6_cidr_block": { - Type: schema.TypeString, - Computed: true, - }, - - "ipv6_association_id": { - Type: schema.TypeString, - Computed: true, - }, - - "state": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "tags": tagsSchemaComputed(), - }, - } -} - -func dataSourceAwsVpcRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - req := &ec2.DescribeVpcsInput{} - - if id := d.Get("id"); id != "" { - req.VpcIds = []*string{aws.String(id.(string))} - } - - // We specify "default" as boolean, but EC2 filters want - // it to be serialized as a string. Note that setting it to - // "false" here does not actually filter by it *not* being - // the default, because Terraform can't distinguish between - // "false" and "not set". - isDefaultStr := "" - if d.Get("default").(bool) { - isDefaultStr = "true" - } - - req.Filters = buildEC2AttributeFilterList( - map[string]string{ - "cidr": d.Get("cidr_block").(string), - "dhcp-options-id": d.Get("dhcp_options_id").(string), - "isDefault": isDefaultStr, - "state": d.Get("state").(string), - }, - ) - req.Filters = append(req.Filters, buildEC2TagFilterList( - tagsFromMap(d.Get("tags").(map[string]interface{})), - )...) - req.Filters = append(req.Filters, buildEC2CustomFilterList( - d.Get("filter").(*schema.Set), - )...) - if len(req.Filters) == 0 { - // Don't send an empty filters list; the EC2 API won't accept it. - req.Filters = nil - } - - log.Printf("[DEBUG] DescribeVpcs %s\n", req) - resp, err := conn.DescribeVpcs(req) - if err != nil { - return err - } - if resp == nil || len(resp.Vpcs) == 0 { - return fmt.Errorf("no matching VPC found") - } - if len(resp.Vpcs) > 1 { - return fmt.Errorf("multiple VPCs matched; use additional constraints to reduce matches to a single VPC") - } - - vpc := resp.Vpcs[0] - - d.SetId(*vpc.VpcId) - d.Set("id", vpc.VpcId) - d.Set("cidr_block", vpc.CidrBlock) - d.Set("dhcp_options_id", vpc.DhcpOptionsId) - d.Set("instance_tenancy", vpc.InstanceTenancy) - d.Set("default", vpc.IsDefault) - d.Set("state", vpc.State) - d.Set("tags", tagsToMap(vpc.Tags)) - - if vpc.Ipv6CidrBlockAssociationSet != nil { - d.Set("ipv6_association_id", vpc.Ipv6CidrBlockAssociationSet[0].AssociationId) - d.Set("ipv6_cidr_block", vpc.Ipv6CidrBlockAssociationSet[0].Ipv6CidrBlock) - } - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_vpc_endpoint.go b/builtin/providers/aws/data_source_aws_vpc_endpoint.go deleted file mode 100644 index c15933129..000000000 --- a/builtin/providers/aws/data_source_aws_vpc_endpoint.go +++ /dev/null @@ -1,103 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsVpcEndpoint() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsVpcEndpointRead, - - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "state": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "vpc_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "service_name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "policy": { - Type: schema.TypeString, - Computed: true, - }, - "route_table_ids": &schema.Schema{ - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - } -} - -func dataSourceAwsVpcEndpointRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - log.Printf("[DEBUG] Reading VPC Endpoints.") - - req := &ec2.DescribeVpcEndpointsInput{} - - if id, ok := d.GetOk("id"); ok { - req.VpcEndpointIds = aws.StringSlice([]string{id.(string)}) - } - - req.Filters = buildEC2AttributeFilterList( - map[string]string{ - "vpc-endpoint-state": d.Get("state").(string), - "vpc-id": d.Get("vpc_id").(string), - "service-name": d.Get("service_name").(string), - }, - ) - if len(req.Filters) == 0 { - // Don't send an empty filters list; the EC2 API won't accept it. - req.Filters = nil - } - - resp, err := conn.DescribeVpcEndpoints(req) - if err != nil { - return err - } - if resp == nil || len(resp.VpcEndpoints) == 0 { - return fmt.Errorf("no matching VPC endpoint found") - } - if len(resp.VpcEndpoints) > 1 { - return fmt.Errorf("multiple VPC endpoints matched; use additional constraints to reduce matches to a single VPC endpoint") - } - - vpce := resp.VpcEndpoints[0] - policy, err := normalizeJsonString(*vpce.PolicyDocument) - if err != nil { - return errwrap.Wrapf("policy contains an invalid JSON: {{err}}", err) - } - - d.SetId(aws.StringValue(vpce.VpcEndpointId)) - d.Set("id", vpce.VpcEndpointId) - d.Set("state", vpce.State) - d.Set("vpc_id", vpce.VpcId) - d.Set("service_name", vpce.ServiceName) - d.Set("policy", policy) - if err := d.Set("route_table_ids", aws.StringValueSlice(vpce.RouteTableIds)); err != nil { - return err - } - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_vpc_endpoint_service.go b/builtin/providers/aws/data_source_aws_vpc_endpoint_service.go deleted file mode 100644 index 8860b39a7..000000000 --- a/builtin/providers/aws/data_source_aws_vpc_endpoint_service.go +++ /dev/null @@ -1,56 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strconv" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsVpcEndpointService() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsVpcEndpointServiceRead, - - Schema: map[string]*schema.Schema{ - "service": { - Type: schema.TypeString, - Required: true, - }, - "service_name": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceAwsVpcEndpointServiceRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - service := d.Get("service").(string) - - log.Printf("[DEBUG] Reading VPC Endpoint Services.") - - request := &ec2.DescribeVpcEndpointServicesInput{} - - resp, err := conn.DescribeVpcEndpointServices(request) - if err != nil { - return fmt.Errorf("Error fetching VPC Endpoint Services: %s", err) - } - - names := aws.StringValueSlice(resp.ServiceNames) - for _, name := range names { - if strings.HasSuffix(name, "."+service) { - d.SetId(strconv.Itoa(hashcode.String(name))) - d.Set("service_name", name) - return nil - } - } - - return fmt.Errorf("VPC Endpoint Service (%s) not found", service) -} diff --git a/builtin/providers/aws/data_source_aws_vpc_endpoint_service_test.go b/builtin/providers/aws/data_source_aws_vpc_endpoint_service_test.go deleted file mode 100644 index ce82b8e2d..000000000 --- a/builtin/providers/aws/data_source_aws_vpc_endpoint_service_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourceAwsVpcEndpointService(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataSourceAwsVpcEndpointServiceConfig, - Check: resource.ComposeTestCheckFunc( - testAccDataSourceAwsVpcEndpointServiceCheck("data.aws_vpc_endpoint_service.s3"), - ), - }, - }, - }) -} - -func testAccDataSourceAwsVpcEndpointServiceCheck(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("root module has no resource called %s", name) - } - - attr := rs.Primary.Attributes - - name := attr["service_name"] - if name != "com.amazonaws.us-west-2.s3" { - return fmt.Errorf("bad service name %s", name) - } - - return nil - } -} - -const testAccDataSourceAwsVpcEndpointServiceConfig = ` -provider "aws" { - region = "us-west-2" -} - -data "aws_vpc_endpoint_service" "s3" { - service = "s3" -} -` diff --git a/builtin/providers/aws/data_source_aws_vpc_endpoint_test.go b/builtin/providers/aws/data_source_aws_vpc_endpoint_test.go deleted file mode 100644 index e73d0be56..000000000 --- a/builtin/providers/aws/data_source_aws_vpc_endpoint_test.go +++ /dev/null @@ -1,129 +0,0 @@ -// make testacc TEST=./builtin/providers/aws/ TESTARGS='-run=TestAccDataSourceAwsVpcEndpoint_' -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourceAwsVpcEndpoint_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataSourceAwsVpcEndpointConfig, - Check: resource.ComposeTestCheckFunc( - testAccDataSourceAwsVpcEndpointCheckExists("data.aws_vpc_endpoint.s3"), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccDataSourceAwsVpcEndpoint_withRouteTable(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataSourceAwsVpcEndpointWithRouteTableConfig, - Check: resource.ComposeTestCheckFunc( - testAccDataSourceAwsVpcEndpointCheckExists("data.aws_vpc_endpoint.s3"), - resource.TestCheckResourceAttr( - "data.aws_vpc_endpoint.s3", "route_table_ids.#", "1"), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testAccDataSourceAwsVpcEndpointCheckExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("root module has no resource called %s", name) - } - - vpceRs, ok := s.RootModule().Resources["aws_vpc_endpoint.s3"] - if !ok { - return fmt.Errorf("can't find aws_vpc_endpoint.s3 in state") - } - - attr := rs.Primary.Attributes - - if attr["id"] != vpceRs.Primary.Attributes["id"] { - return fmt.Errorf( - "id is %s; want %s", - attr["id"], - vpceRs.Primary.Attributes["id"], - ) - } - - return nil - } -} - -const testAccDataSourceAwsVpcEndpointConfig = ` -provider "aws" { - region = "us-west-2" -} - -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - - tags { - Name = "terraform-testacc-vpc-endpoint-data-source-foo" - } -} - -resource "aws_vpc_endpoint" "s3" { - vpc_id = "${aws_vpc.foo.id}" - service_name = "com.amazonaws.us-west-2.s3" -} - -data "aws_vpc_endpoint" "s3" { - vpc_id = "${aws_vpc.foo.id}" - service_name = "com.amazonaws.us-west-2.s3" - state = "available" - - depends_on = ["aws_vpc_endpoint.s3"] -} -` - -const testAccDataSourceAwsVpcEndpointWithRouteTableConfig = ` -provider "aws" { - region = "us-west-2" -} - -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - - tags { - Name = "terraform-testacc-vpc-endpoint-data-source-foo" - } -} - -resource "aws_route_table" "rt" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_vpc_endpoint" "s3" { - vpc_id = "${aws_vpc.foo.id}" - service_name = "com.amazonaws.us-west-2.s3" - route_table_ids = ["${aws_route_table.rt.id}"] -} - -data "aws_vpc_endpoint" "s3" { - vpc_id = "${aws_vpc.foo.id}" - service_name = "com.amazonaws.us-west-2.s3" - state = "available" - - depends_on = ["aws_vpc_endpoint.s3"] -} -` diff --git a/builtin/providers/aws/data_source_aws_vpc_peering_connection.go b/builtin/providers/aws/data_source_aws_vpc_peering_connection.go deleted file mode 100644 index 8d800751f..000000000 --- a/builtin/providers/aws/data_source_aws_vpc_peering_connection.go +++ /dev/null @@ -1,143 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsVpcPeeringConnection() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsVpcPeeringConnectionRead, - - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "vpc_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "owner_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "cidr_block": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "peer_vpc_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "peer_owner_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "peer_cidr_block": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "accepter": { - Type: schema.TypeMap, - Computed: true, - Elem: schema.TypeBool, - }, - "requester": { - Type: schema.TypeMap, - Computed: true, - Elem: schema.TypeBool, - }, - "filter": ec2CustomFiltersSchema(), - "tags": tagsSchemaComputed(), - }, - } -} - -func dataSourceAwsVpcPeeringConnectionRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - log.Printf("[DEBUG] Reading VPC Peering Connections.") - - req := &ec2.DescribeVpcPeeringConnectionsInput{} - - if id, ok := d.GetOk("id"); ok { - req.VpcPeeringConnectionIds = aws.StringSlice([]string{id.(string)}) - } - - req.Filters = buildEC2AttributeFilterList( - map[string]string{ - "status-code": d.Get("status").(string), - "requester-vpc-info.vpc-id": d.Get("vpc_id").(string), - "requester-vpc-info.owner-id": d.Get("owner_id").(string), - "requester-vpc-info.cidr-block": d.Get("cidr_block").(string), - "accepter-vpc-info.vpc-id": d.Get("peer_vpc_id").(string), - "accepter-vpc-info.owner-id": d.Get("peer_owner_id").(string), - "accepter-vpc-info.cidr-block": d.Get("peer_cidr_block").(string), - }, - ) - req.Filters = append(req.Filters, buildEC2TagFilterList( - tagsFromMap(d.Get("tags").(map[string]interface{})), - )...) - req.Filters = append(req.Filters, buildEC2CustomFilterList( - d.Get("filter").(*schema.Set), - )...) - if len(req.Filters) == 0 { - // Don't send an empty filters list; the EC2 API won't accept it. - req.Filters = nil - } - - resp, err := conn.DescribeVpcPeeringConnections(req) - if err != nil { - return err - } - if resp == nil || len(resp.VpcPeeringConnections) == 0 { - return fmt.Errorf("no matching VPC peering connection found") - } - if len(resp.VpcPeeringConnections) > 1 { - return fmt.Errorf("multiple VPC peering connections matched; use additional constraints to reduce matches to a single VPC peering connection") - } - - pcx := resp.VpcPeeringConnections[0] - - d.SetId(aws.StringValue(pcx.VpcPeeringConnectionId)) - d.Set("id", pcx.VpcPeeringConnectionId) - d.Set("status", pcx.Status.Code) - d.Set("vpc_id", pcx.RequesterVpcInfo.VpcId) - d.Set("owner_id", pcx.RequesterVpcInfo.OwnerId) - d.Set("cidr_block", pcx.RequesterVpcInfo.CidrBlock) - d.Set("peer_vpc_id", pcx.AccepterVpcInfo.VpcId) - d.Set("peer_owner_id", pcx.AccepterVpcInfo.OwnerId) - d.Set("peer_cidr_block", pcx.AccepterVpcInfo.CidrBlock) - d.Set("tags", tagsToMap(pcx.Tags)) - - if pcx.AccepterVpcInfo.PeeringOptions != nil { - if err := d.Set("accepter", flattenPeeringOptions(pcx.AccepterVpcInfo.PeeringOptions)[0]); err != nil { - return err - } - } - - if pcx.RequesterVpcInfo.PeeringOptions != nil { - if err := d.Set("requester", flattenPeeringOptions(pcx.RequesterVpcInfo.PeeringOptions)[0]); err != nil { - return err - } - } - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_vpc_peering_connection_test.go b/builtin/providers/aws/data_source_aws_vpc_peering_connection_test.go deleted file mode 100644 index 366921a72..000000000 --- a/builtin/providers/aws/data_source_aws_vpc_peering_connection_test.go +++ /dev/null @@ -1,129 +0,0 @@ -// make testacc TEST=./builtin/providers/aws/ TESTARGS='-run=TestAccDataSourceAwsVpcPeeringConnection_' -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourceAwsVpcPeeringConnection_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataSourceAwsVpcPeeringConnectionConfig, - Check: resource.ComposeTestCheckFunc( - testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_id"), - testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_requester_vpc_id"), - testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_accepter_vpc_id"), - testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_requester_cidr_block"), - testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_accepter_cidr_block"), - testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_owner_ids"), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testAccDataSourceAwsVpcPeeringConnectionCheck(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("root module has no resource called %s", name) - } - - pcxRs, ok := s.RootModule().Resources["aws_vpc_peering_connection.test"] - if !ok { - return fmt.Errorf("can't find aws_vpc_peering_connection.test in state") - } - - attr := rs.Primary.Attributes - - if attr["id"] != pcxRs.Primary.Attributes["id"] { - return fmt.Errorf( - "id is %s; want %s", - attr["id"], - pcxRs.Primary.Attributes["id"], - ) - } - - return nil - } -} - -const testAccDataSourceAwsVpcPeeringConnectionConfig = ` -provider "aws" { - region = "us-west-2" -} - -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - - tags { - Name = "terraform-testacc-vpc-peering-connection-data-source-foo" - } -} - -resource "aws_vpc" "bar" { - cidr_block = "10.2.0.0/16" - - tags { - Name = "terraform-testacc-vpc-peering-connection-data-source-bar" - } -} - -resource "aws_vpc_peering_connection" "test" { - vpc_id = "${aws_vpc.foo.id}" - peer_vpc_id = "${aws_vpc.bar.id}" - auto_accept = true - - tags { - Name = "terraform-testacc-vpc-peering-connection-data-source-foo-to-bar" - } -} - -data "aws_caller_identity" "current" {} - -data "aws_vpc_peering_connection" "test_by_id" { - id = "${aws_vpc_peering_connection.test.id}" -} - -data "aws_vpc_peering_connection" "test_by_requester_vpc_id" { - vpc_id = "${aws_vpc.foo.id}" - - depends_on = ["aws_vpc_peering_connection.test"] -} - -data "aws_vpc_peering_connection" "test_by_accepter_vpc_id" { - peer_vpc_id = "${aws_vpc.bar.id}" - - depends_on = ["aws_vpc_peering_connection.test"] -} - -data "aws_vpc_peering_connection" "test_by_requester_cidr_block" { - cidr_block = "10.1.0.0/16" - status = "active" - - depends_on = ["aws_vpc_peering_connection.test"] -} - -data "aws_vpc_peering_connection" "test_by_accepter_cidr_block" { - peer_cidr_block = "10.2.0.0/16" - status = "active" - - depends_on = ["aws_vpc_peering_connection.test"] -} - -data "aws_vpc_peering_connection" "test_by_owner_ids" { - owner_id = "${data.aws_caller_identity.current.account_id}" - peer_owner_id = "${data.aws_caller_identity.current.account_id}" - status = "active" - - depends_on = ["aws_vpc_peering_connection.test"] -} -` diff --git a/builtin/providers/aws/data_source_aws_vpc_test.go b/builtin/providers/aws/data_source_aws_vpc_test.go deleted file mode 100644 index e8344db98..000000000 --- a/builtin/providers/aws/data_source_aws_vpc_test.go +++ /dev/null @@ -1,145 +0,0 @@ -package aws - -import ( - "fmt" - "math/rand" - "testing" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourceAwsVpc_basic(t *testing.T) { - rand.Seed(time.Now().UTC().UnixNano()) - rInt := rand.Intn(16) - cidr := fmt.Sprintf("172.%d.0.0/16", rInt) - tag := fmt.Sprintf("terraform-testacc-vpc-data-source-%d", rInt) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsVpcConfig(cidr, tag), - Check: resource.ComposeTestCheckFunc( - testAccDataSourceAwsVpcCheck("data.aws_vpc.by_id", cidr, tag), - testAccDataSourceAwsVpcCheck("data.aws_vpc.by_cidr", cidr, tag), - testAccDataSourceAwsVpcCheck("data.aws_vpc.by_tag", cidr, tag), - testAccDataSourceAwsVpcCheck("data.aws_vpc.by_filter", cidr, tag), - ), - }, - }, - }) -} - -func TestAccDataSourceAwsVpc_ipv6Associated(t *testing.T) { - rand.Seed(time.Now().UTC().UnixNano()) - rInt := rand.Intn(16) - cidr := fmt.Sprintf("172.%d.0.0/16", rInt) - tag := fmt.Sprintf("terraform-testacc-vpc-data-source-%d", rInt) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsVpcConfigIpv6(cidr, tag), - Check: resource.ComposeTestCheckFunc( - testAccDataSourceAwsVpcCheck("data.aws_vpc.by_id", cidr, tag), - resource.TestCheckResourceAttrSet( - "data.aws_vpc.by_id", "ipv6_association_id"), - resource.TestCheckResourceAttrSet( - "data.aws_vpc.by_id", "ipv6_cidr_block"), - ), - }, - }, - }) -} - -func testAccDataSourceAwsVpcCheck(name, cidr, tag string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("root module has no resource called %s", name) - } - - vpcRs, ok := s.RootModule().Resources["aws_vpc.test"] - if !ok { - return fmt.Errorf("can't find aws_vpc.test in state") - } - - attr := rs.Primary.Attributes - - if attr["id"] != vpcRs.Primary.Attributes["id"] { - return fmt.Errorf( - "id is %s; want %s", - attr["id"], - vpcRs.Primary.Attributes["id"], - ) - } - - if attr["cidr_block"] != cidr { - return fmt.Errorf("bad cidr_block %s, expected: %s", attr["cidr_block"], cidr) - } - if attr["tags.Name"] != tag { - return fmt.Errorf("bad Name tag %s", attr["tags.Name"]) - } - - return nil - } -} - -func testAccDataSourceAwsVpcConfigIpv6(cidr, tag string) string { - return fmt.Sprintf(` -provider "aws" { - region = "us-west-2" -} - -resource "aws_vpc" "test" { - cidr_block = "%s" - assign_generated_ipv6_cidr_block = true - - tags { - Name = "%s" - } -} - -data "aws_vpc" "by_id" { - id = "${aws_vpc.test.id}" -}`, cidr, tag) -} - -func testAccDataSourceAwsVpcConfig(cidr, tag string) string { - return fmt.Sprintf(` -provider "aws" { - region = "us-west-2" -} - -resource "aws_vpc" "test" { - cidr_block = "%s" - - tags { - Name = "%s" - } -} - -data "aws_vpc" "by_id" { - id = "${aws_vpc.test.id}" -} - -data "aws_vpc" "by_cidr" { - cidr_block = "${aws_vpc.test.cidr_block}" -} - -data "aws_vpc" "by_tag" { - tags { - Name = "${aws_vpc.test.tags["Name"]}" - } -} - -data "aws_vpc" "by_filter" { - filter { - name = "cidr" - values = ["${aws_vpc.test.cidr_block}"] - } -}`, cidr, tag) -} diff --git a/builtin/providers/aws/data_source_aws_vpn_gateway.go b/builtin/providers/aws/data_source_aws_vpn_gateway.go deleted file mode 100644 index 5d088e548..000000000 --- a/builtin/providers/aws/data_source_aws_vpn_gateway.go +++ /dev/null @@ -1,105 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceAwsVpnGateway() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsVpnGatewayRead, - - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "state": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "attached_vpc_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "availability_zone": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "filter": ec2CustomFiltersSchema(), - "tags": tagsSchemaComputed(), - }, - } -} - -func dataSourceAwsVpnGatewayRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - log.Printf("[DEBUG] Reading VPN Gateways.") - - req := &ec2.DescribeVpnGatewaysInput{} - - if id, ok := d.GetOk("id"); ok { - req.VpnGatewayIds = aws.StringSlice([]string{id.(string)}) - } - - req.Filters = buildEC2AttributeFilterList( - map[string]string{ - "state": d.Get("state").(string), - "availability-zone": d.Get("availability_zone").(string), - }, - ) - if id, ok := d.GetOk("attached_vpc_id"); ok { - req.Filters = append(req.Filters, buildEC2AttributeFilterList( - map[string]string{ - "attachment.state": "attached", - "attachment.vpc-id": id.(string), - }, - )...) - } - req.Filters = append(req.Filters, buildEC2TagFilterList( - tagsFromMap(d.Get("tags").(map[string]interface{})), - )...) - req.Filters = append(req.Filters, buildEC2CustomFilterList( - d.Get("filter").(*schema.Set), - )...) - if len(req.Filters) == 0 { - // Don't send an empty filters list; the EC2 API won't accept it. - req.Filters = nil - } - - resp, err := conn.DescribeVpnGateways(req) - if err != nil { - return err - } - if resp == nil || len(resp.VpnGateways) == 0 { - return fmt.Errorf("no matching VPN gateway found: %#v", req) - } - if len(resp.VpnGateways) > 1 { - return fmt.Errorf("multiple VPN gateways matched; use additional constraints to reduce matches to a single VPN gateway") - } - - vgw := resp.VpnGateways[0] - - d.SetId(aws.StringValue(vgw.VpnGatewayId)) - d.Set("state", vgw.State) - d.Set("availability_zone", vgw.AvailabilityZone) - d.Set("tags", tagsToMap(vgw.Tags)) - - for _, attachment := range vgw.VpcAttachments { - if *attachment.State == "attached" { - d.Set("attached_vpc_id", attachment.VpcId) - break - } - } - - return nil -} diff --git a/builtin/providers/aws/data_source_aws_vpn_gateway_test.go b/builtin/providers/aws/data_source_aws_vpn_gateway_test.go deleted file mode 100644 index e082e844e..000000000 --- a/builtin/providers/aws/data_source_aws_vpn_gateway_test.go +++ /dev/null @@ -1,114 +0,0 @@ -// make testacc TEST=./builtin/providers/aws/ TESTARGS='-run=TestAccDataSourceAwsVpnGateway_' -package aws - -import ( - "fmt" - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccDataSourceAwsVpnGateway_unattached(t *testing.T) { - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataSourceAwsVpnGatewayUnattachedConfig(rInt), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair( - "data.aws_vpn_gateway.test_by_id", "id", - "aws_vpn_gateway.unattached", "id"), - resource.TestCheckResourceAttrPair( - "data.aws_vpn_gateway.test_by_tags", "id", - "aws_vpn_gateway.unattached", "id"), - resource.TestCheckResourceAttrSet("data.aws_vpn_gateway.test_by_id", "state"), - resource.TestCheckResourceAttr("data.aws_vpn_gateway.test_by_tags", "tags.%", "3"), - resource.TestCheckNoResourceAttr("data.aws_vpn_gateway.test_by_id", "attached_vpc_id"), - ), - }, - }, - }) -} - -func TestAccDataSourceAwsVpnGateway_attached(t *testing.T) { - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataSourceAwsVpnGatewayAttachedConfig(rInt), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair( - "data.aws_vpn_gateway.test_by_attached_vpc_id", "id", - "aws_vpn_gateway.attached", "id"), - resource.TestCheckResourceAttrPair( - "data.aws_vpn_gateway.test_by_attached_vpc_id", "attached_vpc_id", - "aws_vpc.foo", "id"), - resource.TestMatchResourceAttr("data.aws_vpn_gateway.test_by_attached_vpc_id", "state", regexp.MustCompile("(?i)available")), - ), - }, - }, - }) -} - -func testAccDataSourceAwsVpnGatewayUnattachedConfig(rInt int) string { - return fmt.Sprintf(` -provider "aws" { - region = "us-west-2" -} - -resource "aws_vpn_gateway" "unattached" { - tags { - Name = "terraform-testacc-vpn-gateway-data-source-unattached-%d" - ABC = "testacc-%d" - XYZ = "testacc-%d" - } -} - -data "aws_vpn_gateway" "test_by_id" { - id = "${aws_vpn_gateway.unattached.id}" -} - -data "aws_vpn_gateway" "test_by_tags" { - tags = "${aws_vpn_gateway.unattached.tags}" -} -`, rInt, rInt+1, rInt-1) -} - -func testAccDataSourceAwsVpnGatewayAttachedConfig(rInt int) string { - return fmt.Sprintf(` -provider "aws" { - region = "us-west-2" -} - -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - - tags { - Name = "terraform-testacc-vpn-gateway-data-source-foo-%d" - } -} - -resource "aws_vpn_gateway" "attached" { - tags { - Name = "terraform-testacc-vpn-gateway-data-source-attached-%d" - } -} - -resource "aws_vpn_gateway_attachment" "vpn_attachment" { - vpc_id = "${aws_vpc.foo.id}" - vpn_gateway_id = "${aws_vpn_gateway.attached.id}" -} - -data "aws_vpn_gateway" "test_by_attached_vpc_id" { - attached_vpc_id = "${aws_vpn_gateway_attachment.vpn_attachment.vpc_id}" -} -`, rInt, rInt) -} diff --git a/builtin/providers/aws/diff_aws_policy_test.go b/builtin/providers/aws/diff_aws_policy_test.go deleted file mode 100644 index ae06c26aa..000000000 --- a/builtin/providers/aws/diff_aws_policy_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package aws - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/jen20/awspolicyequivalence" -) - -func testAccCheckAwsPolicyMatch(resource, attr, expectedPolicy string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resource] - if !ok { - return fmt.Errorf("Not found: %s", resource) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - given, ok := rs.Primary.Attributes[attr] - if !ok { - return fmt.Errorf("Attribute %q not found for %q", attr, resource) - } - - areEquivalent, err := awspolicy.PoliciesAreEquivalent(given, expectedPolicy) - if err != nil { - return fmt.Errorf("Comparing AWS Policies failed: %s", err) - } - - if !areEquivalent { - return fmt.Errorf("AWS policies differ.\nGiven: %s\nExpected: %s", given, expectedPolicy) - } - - return nil - } -} diff --git a/builtin/providers/aws/diff_suppress_funcs.go b/builtin/providers/aws/diff_suppress_funcs.go deleted file mode 100644 index e8c58b813..000000000 --- a/builtin/providers/aws/diff_suppress_funcs.go +++ /dev/null @@ -1,77 +0,0 @@ -package aws - -import ( - "bytes" - "encoding/json" - "log" - "net/url" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/jen20/awspolicyequivalence" -) - -func suppressEquivalentAwsPolicyDiffs(k, old, new string, d *schema.ResourceData) bool { - equivalent, err := awspolicy.PoliciesAreEquivalent(old, new) - if err != nil { - return false - } - - return equivalent -} - -// Suppresses minor version changes to the db_instance engine_version attribute -func suppressAwsDbEngineVersionDiffs(k, old, new string, d *schema.ResourceData) bool { - // First check if the old/new values are nil. - // If both are nil, we have no state to compare the values with, so register a diff. - // This populates the attribute field during a plan/apply with fresh state, allowing - // the attribute to still be used in future resources. - // See https://github.com/hashicorp/terraform/issues/11881 - if old == "" && new == "" { - return false - } - - if v, ok := d.GetOk("auto_minor_version_upgrade"); ok { - if v.(bool) { - // If we're set to auto upgrade minor versions - // ignore a minor version diff between versions - if strings.HasPrefix(old, new) { - log.Printf("[DEBUG] Ignoring minor version diff") - return true - } - } - } - - // Throw a diff by default - return false -} - -func suppressEquivalentJsonDiffs(k, old, new string, d *schema.ResourceData) bool { - ob := bytes.NewBufferString("") - if err := json.Compact(ob, []byte(old)); err != nil { - return false - } - - nb := bytes.NewBufferString("") - if err := json.Compact(nb, []byte(new)); err != nil { - return false - } - - return jsonBytesEqual(ob.Bytes(), nb.Bytes()) -} - -func suppressOpenIdURL(k, old, new string, d *schema.ResourceData) bool { - oldUrl, err := url.Parse(old) - if err != nil { - return false - } - - newUrl, err := url.Parse(new) - if err != nil { - return false - } - - oldUrl.Scheme = "https" - - return oldUrl.String() == newUrl.String() -} diff --git a/builtin/providers/aws/diff_suppress_funcs_test.go b/builtin/providers/aws/diff_suppress_funcs_test.go deleted file mode 100644 index 0727a1042..000000000 --- a/builtin/providers/aws/diff_suppress_funcs_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/schema" -) - -func TestSuppressEquivalentJsonDiffsWhitespaceAndNoWhitespace(t *testing.T) { - d := new(schema.ResourceData) - - noWhitespace := `{"test":"test"}` - whitespace := ` -{ - "test": "test" -}` - - if !suppressEquivalentJsonDiffs("", noWhitespace, whitespace, d) { - t.Errorf("Expected suppressEquivalentJsonDiffs to return true for %s == %s", noWhitespace, whitespace) - } - - noWhitespaceDiff := `{"test":"test"}` - whitespaceDiff := ` -{ - "test": "tested" -}` - - if suppressEquivalentJsonDiffs("", noWhitespaceDiff, whitespaceDiff, d) { - t.Errorf("Expected suppressEquivalentJsonDiffs to return false for %s == %s", noWhitespaceDiff, whitespaceDiff) - } -} diff --git a/builtin/providers/aws/ec2_filters.go b/builtin/providers/aws/ec2_filters.go deleted file mode 100644 index 743d28224..000000000 --- a/builtin/providers/aws/ec2_filters.go +++ /dev/null @@ -1,163 +0,0 @@ -package aws - -import ( - "fmt" - "sort" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - - "github.com/hashicorp/terraform/helper/schema" -) - -// buildEC2AttributeFilterList takes a flat map of scalar attributes (most -// likely values extracted from a *schema.ResourceData on an EC2-querying -// data source) and produces a []*ec2.Filter representing an exact match -// for each of the given non-empty attributes. -// -// The keys of the given attributes map are the attribute names expected -// by the EC2 API, which are usually either in camelcase or with dash-separated -// words. We conventionally map these to underscore-separated identifiers -// with the same words when presenting these as data source query attributes -// in Terraform. -// -// It's the callers responsibility to transform any non-string values into -// the appropriate string serialization required by the AWS API when -// encoding the given filter. Any attributes given with empty string values -// are ignored, assuming that the user wishes to leave that attribute -// unconstrained while filtering. -// -// The purpose of this function is to create values to pass in -// for the "Filters" attribute on most of the "Describe..." API functions in -// the EC2 API, to aid in the implementation of Terraform data sources that -// retrieve data about EC2 objects. -func buildEC2AttributeFilterList(attrs map[string]string) []*ec2.Filter { - var filters []*ec2.Filter - - // sort the filters by name to make the output deterministic - var names []string - for filterName := range attrs { - names = append(names, filterName) - } - - sort.Strings(names) - - for _, filterName := range names { - value := attrs[filterName] - if value == "" { - continue - } - - filters = append(filters, &ec2.Filter{ - Name: aws.String(filterName), - Values: []*string{aws.String(value)}, - }) - } - - return filters -} - -// buildEC2TagFilterList takes a []*ec2.Tag and produces a []*ec2.Filter that -// represents exact matches for all of the tag key/value pairs given in -// the tag set. -// -// The purpose of this function is to create values to pass in for -// the "Filters" attribute on most of the "Describe..." API functions -// in the EC2 API, to implement filtering by tag values e.g. in Terraform -// data sources that retrieve data about EC2 objects. -// -// It is conventional for an EC2 data source to include an attribute called -// "tags" which conforms to the schema returned by the tagsSchema() function. -// The value of this can then be converted to a tags slice using tagsFromMap, -// and the result finally passed in to this function. -// -// In Terraform configuration this would then look like this, to constrain -// results by name: -// -// tags { -// Name = "my-awesome-subnet" -// } -func buildEC2TagFilterList(tags []*ec2.Tag) []*ec2.Filter { - filters := make([]*ec2.Filter, len(tags)) - - for i, tag := range tags { - filters[i] = &ec2.Filter{ - Name: aws.String(fmt.Sprintf("tag:%s", *tag.Key)), - Values: []*string{tag.Value}, - } - } - - return filters -} - -// ec2CustomFiltersSchema returns a *schema.Schema that represents -// a set of custom filtering criteria that a user can specify as input -// to a data source that wraps one of the many "Describe..." API calls -// in the EC2 API. -// -// It is conventional for an attribute of this type to be included -// as a top-level attribute called "filter". This is the "catch all" for -// filter combinations that are not possible to express using scalar -// attributes or tags. In Terraform configuration, the custom filter blocks -// then look like this: -// -// filter { -// name = "availabilityZone" -// values = ["us-west-2a", "us-west-2b"] -// } -func ec2CustomFiltersSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "values": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - } -} - -// buildEC2CustomFilterList takes the set value extracted from a schema -// attribute conforming to the schema returned by ec2CustomFiltersSchema, -// and transforms it into a []*ec2.Filter representing the same filter -// expressions which is ready to pass into the "Filters" attribute on most -// of the "Describe..." functions in the EC2 API. -// -// This function is intended only to be used in conjunction with -// ec2CustomFitlersSchema. See the docs on that function for more details -// on the configuration pattern this is intended to support. -func buildEC2CustomFilterList(filterSet *schema.Set) []*ec2.Filter { - if filterSet == nil { - return []*ec2.Filter{} - } - - customFilters := filterSet.List() - filters := make([]*ec2.Filter, len(customFilters)) - - for filterIdx, customFilterI := range customFilters { - customFilterMapI := customFilterI.(map[string]interface{}) - name := customFilterMapI["name"].(string) - valuesI := customFilterMapI["values"].(*schema.Set).List() - values := make([]*string, len(valuesI)) - for valueIdx, valueI := range valuesI { - values[valueIdx] = aws.String(valueI.(string)) - } - - filters[filterIdx] = &ec2.Filter{ - Name: &name, - Values: values, - } - } - - return filters -} diff --git a/builtin/providers/aws/ec2_filters_test.go b/builtin/providers/aws/ec2_filters_test.go deleted file mode 100644 index 267faa957..000000000 --- a/builtin/providers/aws/ec2_filters_test.go +++ /dev/null @@ -1,158 +0,0 @@ -package aws - -import ( - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - - "github.com/hashicorp/terraform/helper/schema" -) - -func TestBuildEC2AttributeFilterList(t *testing.T) { - type TestCase struct { - Attrs map[string]string - Expected []*ec2.Filter - } - testCases := []TestCase{ - { - map[string]string{ - "foo": "bar", - "baz": "boo", - }, - []*ec2.Filter{ - { - Name: aws.String("baz"), - Values: []*string{aws.String("boo")}, - }, - { - Name: aws.String("foo"), - Values: []*string{aws.String("bar")}, - }, - }, - }, - { - map[string]string{ - "foo": "bar", - "baz": "", - }, - []*ec2.Filter{ - { - Name: aws.String("foo"), - Values: []*string{aws.String("bar")}, - }, - }, - }, - } - - for i, testCase := range testCases { - result := buildEC2AttributeFilterList(testCase.Attrs) - - if !reflect.DeepEqual(result, testCase.Expected) { - t.Errorf( - "test case %d: got %#v, but want %#v", - i, result, testCase.Expected, - ) - } - } -} - -func TestBuildEC2TagFilterList(t *testing.T) { - type TestCase struct { - Tags []*ec2.Tag - Expected []*ec2.Filter - } - testCases := []TestCase{ - { - []*ec2.Tag{ - { - Key: aws.String("foo"), - Value: aws.String("bar"), - }, - { - Key: aws.String("baz"), - Value: aws.String("boo"), - }, - }, - []*ec2.Filter{ - { - Name: aws.String("tag:foo"), - Values: []*string{aws.String("bar")}, - }, - { - Name: aws.String("tag:baz"), - Values: []*string{aws.String("boo")}, - }, - }, - }, - } - - for i, testCase := range testCases { - result := buildEC2TagFilterList(testCase.Tags) - - if !reflect.DeepEqual(result, testCase.Expected) { - t.Errorf( - "test case %d: got %#v, but want %#v", - i, result, testCase.Expected, - ) - } - } -} - -func TestBuildEC2CustomFilterList(t *testing.T) { - - // We need to get a set with the appropriate hash function, - // so we'll use the schema to help us produce what would - // be produced in the normal case. - filtersSchema := ec2CustomFiltersSchema() - - // The zero value of this schema will be an interface{} - // referring to a new, empty *schema.Set with the - // appropriate hash function configured. - filters := filtersSchema.ZeroValue().(*schema.Set) - - // We also need an appropriately-configured set for - // the list of values. - valuesSchema := filtersSchema.Elem.(*schema.Resource).Schema["values"] - valuesSet := func(vals ...string) *schema.Set { - ret := valuesSchema.ZeroValue().(*schema.Set) - for _, val := range vals { - ret.Add(val) - } - return ret - } - - filters.Add(map[string]interface{}{ - "name": "foo", - "values": valuesSet("bar", "baz"), - }) - filters.Add(map[string]interface{}{ - "name": "pizza", - "values": valuesSet("cheese"), - }) - - expected := []*ec2.Filter{ - // These are produced in the deterministic order guaranteed - // by schema.Set.List(), which happens to produce them in - // the following order for our current input. If this test - // evolves with different input data in future then they - // will likely be emitted in a different order, which is fine. - { - Name: aws.String("pizza"), - Values: []*string{aws.String("cheese")}, - }, - { - Name: aws.String("foo"), - Values: []*string{aws.String("bar"), aws.String("baz")}, - }, - } - result := buildEC2CustomFilterList(filters) - - if !reflect.DeepEqual(result, expected) { - t.Errorf( - "got %#v, but want %#v", - result, expected, - ) - } -} diff --git a/builtin/providers/aws/hosted_zones.go b/builtin/providers/aws/hosted_zones.go deleted file mode 100644 index 131f03ebd..000000000 --- a/builtin/providers/aws/hosted_zones.go +++ /dev/null @@ -1,28 +0,0 @@ -package aws - -// This list is copied from -// http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints -// It currently cannot be generated from the API json. -var hostedZoneIDsMap = map[string]string{ - "us-east-1": "Z3AQBSTGFYJSTF", - "us-east-2": "Z2O1EMRO9K5GLX", - "us-west-2": "Z3BJ6K6RIION7M", - "us-west-1": "Z2F56UZL2M1ACD", - "eu-west-1": "Z1BKCTXD74EZPE", - "eu-west-2": "Z3GKZC51ZF0DB4", - "eu-central-1": "Z21DNDUVLTQW6Q", - "ap-south-1": "Z11RGJOFQNVJUP", - "ap-southeast-1": "Z3O0J2DXBE1FTB", - "ap-southeast-2": "Z1WCIGYICN2BYD", - "ap-northeast-1": "Z2M4EHUR26P7ZW", - "ap-northeast-2": "Z3W03O7B5YMIYP", - "ca-central-1": "Z1QDHH18159H29", - "sa-east-1": "Z7KQH4QJS55SO", - "us-gov-west-1": "Z31GFT0UA1I2HV", -} - -// Returns the hosted zone ID for an S3 website endpoint region. This can be -// used as input to the aws_route53_record resource's zone_id argument. -func HostedZoneIDForRegion(region string) string { - return hostedZoneIDsMap[region] -} diff --git a/builtin/providers/aws/hosted_zones_test.go b/builtin/providers/aws/hosted_zones_test.go deleted file mode 100644 index d331a7b8f..000000000 --- a/builtin/providers/aws/hosted_zones_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package aws - -import ( - "testing" -) - -func TestHostedZoneIDForRegion(t *testing.T) { - if r := HostedZoneIDForRegion("us-east-1"); r != "Z3AQBSTGFYJSTF" { - t.Fatalf("bad: %s", r) - } - if r := HostedZoneIDForRegion("ap-southeast-2"); r != "Z1WCIGYICN2BYD" { - t.Fatalf("bad: %s", r) - } - - // Bad input should be empty string - if r := HostedZoneIDForRegion("not-a-region"); r != "" { - t.Fatalf("bad: %s", r) - } -} diff --git a/builtin/providers/aws/iam_policy_model.go b/builtin/providers/aws/iam_policy_model.go deleted file mode 100644 index 81306971d..000000000 --- a/builtin/providers/aws/iam_policy_model.go +++ /dev/null @@ -1,112 +0,0 @@ -package aws - -import ( - "encoding/json" - "sort" -) - -type IAMPolicyDoc struct { - Version string `json:",omitempty"` - Id string `json:",omitempty"` - Statements []*IAMPolicyStatement `json:"Statement"` -} - -type IAMPolicyStatement struct { - Sid string - Effect string `json:",omitempty"` - Actions interface{} `json:"Action,omitempty"` - NotActions interface{} `json:"NotAction,omitempty"` - Resources interface{} `json:"Resource,omitempty"` - NotResources interface{} `json:"NotResource,omitempty"` - Principals IAMPolicyStatementPrincipalSet `json:"Principal,omitempty"` - NotPrincipals IAMPolicyStatementPrincipalSet `json:"NotPrincipal,omitempty"` - Conditions IAMPolicyStatementConditionSet `json:"Condition,omitempty"` -} - -type IAMPolicyStatementPrincipal struct { - Type string - Identifiers interface{} -} - -type IAMPolicyStatementCondition struct { - Test string - Variable string - Values interface{} -} - -type IAMPolicyStatementPrincipalSet []IAMPolicyStatementPrincipal -type IAMPolicyStatementConditionSet []IAMPolicyStatementCondition - -func (ps IAMPolicyStatementPrincipalSet) MarshalJSON() ([]byte, error) { - raw := map[string]interface{}{} - - // As a special case, IAM considers the string value "*" to be - // equivalent to "AWS": "*", and normalizes policies as such. - // We'll follow their lead and do the same normalization here. - // IAM also considers {"*": "*"} to be equivalent to this. - if len(ps) == 1 { - p := ps[0] - if p.Type == "AWS" || p.Type == "*" { - if sv, ok := p.Identifiers.(string); ok && sv == "*" { - return []byte(`"*"`), nil - } - - if av, ok := p.Identifiers.([]string); ok && len(av) == 1 && av[0] == "*" { - return []byte(`"*"`), nil - } - } - } - - for _, p := range ps { - switch i := p.Identifiers.(type) { - case []string: - if _, ok := raw[p.Type]; !ok { - raw[p.Type] = make([]string, 0, len(i)) - } - sort.Sort(sort.Reverse(sort.StringSlice(i))) - raw[p.Type] = append(raw[p.Type].([]string), i...) - case string: - raw[p.Type] = i - default: - panic("Unsupported data type for IAMPolicyStatementPrincipalSet") - } - } - - return json.Marshal(&raw) -} - -func (cs IAMPolicyStatementConditionSet) MarshalJSON() ([]byte, error) { - raw := map[string]map[string]interface{}{} - - for _, c := range cs { - if _, ok := raw[c.Test]; !ok { - raw[c.Test] = map[string]interface{}{} - } - switch i := c.Values.(type) { - case []string: - if _, ok := raw[c.Test][c.Variable]; !ok { - raw[c.Test][c.Variable] = make([]string, 0, len(i)) - } - sort.Sort(sort.Reverse(sort.StringSlice(i))) - raw[c.Test][c.Variable] = append(raw[c.Test][c.Variable].([]string), i...) - case string: - raw[c.Test][c.Variable] = i - default: - panic("Unsupported data type for IAMPolicyStatementConditionSet") - } - } - - return json.Marshal(&raw) -} - -func iamPolicyDecodeConfigStringList(lI []interface{}) interface{} { - if len(lI) == 1 { - return lI[0].(string) - } - ret := make([]string, len(lI)) - for i, vI := range lI { - ret[i] = vI.(string) - } - sort.Sort(sort.Reverse(sort.StringSlice(ret))) - return ret -} diff --git a/builtin/providers/aws/import_aws_api_gateway_account_test.go b/builtin/providers/aws/import_aws_api_gateway_account_test.go deleted file mode 100644 index cb60a4929..000000000 --- a/builtin/providers/aws/import_aws_api_gateway_account_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSAPIGatewayAccount_importBasic(t *testing.T) { - resourceName := "aws_api_gateway_account.test" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayAccountDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSAPIGatewayAccountConfig_empty, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_api_gateway_key_test.go b/builtin/providers/aws/import_aws_api_gateway_key_test.go deleted file mode 100644 index 2fd3d4cef..000000000 --- a/builtin/providers/aws/import_aws_api_gateway_key_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSAPIGatewayApiKey_importBasic(t *testing.T) { - resourceName := "aws_api_gateway_api_key.test" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayApiKeyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSAPIGatewayApiKeyConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_api_gateway_usage_plan_test.go b/builtin/providers/aws/import_aws_api_gateway_usage_plan_test.go deleted file mode 100644 index 76a58e0c5..000000000 --- a/builtin/providers/aws/import_aws_api_gateway_usage_plan_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSAPIGatewayUsagePlan_importBasic(t *testing.T) { - resourceName := "aws_api_gateway_usage_plan.main" - rName := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayUsagePlanDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSApiGatewayUsagePlanBasicConfig(rName), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_autoscaling_group_test.go b/builtin/providers/aws/import_aws_autoscaling_group_test.go deleted file mode 100644 index 666563b50..000000000 --- a/builtin/providers/aws/import_aws_autoscaling_group_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSAutoScalingGroup_importBasic(t *testing.T) { - resourceName := "aws_autoscaling_group.bar" - randName := fmt.Sprintf("terraform-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSAutoScalingGroupImport(randName), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "force_delete", "metrics_granularity", "wait_for_capacity_timeout"}, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_cloudfront_distribution.go b/builtin/providers/aws/import_aws_cloudfront_distribution.go deleted file mode 100644 index acfc836dc..000000000 --- a/builtin/providers/aws/import_aws_cloudfront_distribution.go +++ /dev/null @@ -1,32 +0,0 @@ -package aws - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudfront" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsCloudFrontDistributionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - // This is a non API attribute - // We are merely setting this to the same value as the Default setting in the schema - d.Set("retain_on_delete", false) - - conn := meta.(*AWSClient).cloudfrontconn - id := d.Id() - resp, err := conn.GetDistributionConfig(&cloudfront.GetDistributionConfigInput{ - Id: aws.String(id), - }) - - if err != nil { - return nil, err - } - - distConfig := resp.DistributionConfig - results := make([]*schema.ResourceData, 1) - err = flattenDistributionConfig(d, distConfig) - if err != nil { - return nil, err - } - results[0] = d - return results, nil -} diff --git a/builtin/providers/aws/import_aws_cloudfront_distribution_test.go b/builtin/providers/aws/import_aws_cloudfront_distribution_test.go deleted file mode 100644 index 787d913a5..000000000 --- a/builtin/providers/aws/import_aws_cloudfront_distribution_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSCloudFrontDistribution_importBasic(t *testing.T) { - ri := acctest.RandInt() - testConfig := fmt.Sprintf(testAccAWSCloudFrontDistributionS3Config, ri, originBucket, logBucket, testAccAWSCloudFrontDistributionRetainConfig()) - - resourceName := "aws_cloudfront_distribution.s3_distribution" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudFrontDistributionDestroy, - Steps: []resource.TestStep{ - { - Config: testConfig, - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_cloudfront_origin_access_identity_test.go b/builtin/providers/aws/import_aws_cloudfront_origin_access_identity_test.go deleted file mode 100644 index dd45cc786..000000000 --- a/builtin/providers/aws/import_aws_cloudfront_origin_access_identity_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSCloudFrontOriginAccessIdentity_importBasic(t *testing.T) { - resourceName := "aws_cloudfront_origin_access_identity.origin_access_identity" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudFrontOriginAccessIdentityDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSCloudFrontOriginAccessIdentityConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_cloudtrail_test.go b/builtin/providers/aws/import_aws_cloudtrail_test.go deleted file mode 100644 index b5b3aba3b..000000000 --- a/builtin/providers/aws/import_aws_cloudtrail_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSCloudTrail_importBasic(t *testing.T) { - resourceName := "aws_cloudtrail.foobar" - cloudTrailRandInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudTrailDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSCloudTrailConfig(cloudTrailRandInt), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"enable_log_file_validation", "is_multi_region_trail", "include_global_service_events", "enable_logging"}, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_cloudwatch_event_rule_test.go b/builtin/providers/aws/import_aws_cloudwatch_event_rule_test.go deleted file mode 100644 index ac200dddf..000000000 --- a/builtin/providers/aws/import_aws_cloudwatch_event_rule_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSCloudWatchEventRule_importBasic(t *testing.T) { - resourceName := "aws_cloudwatch_event_rule.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudWatchEventRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSCloudWatchEventRuleConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"is_enabled"}, //this has a default value - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_cloudwatch_log_destination_policy_test.go b/builtin/providers/aws/import_aws_cloudwatch_log_destination_policy_test.go deleted file mode 100644 index f7c4a7f35..000000000 --- a/builtin/providers/aws/import_aws_cloudwatch_log_destination_policy_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSCloudwatchLogDestinationPolicy_importBasic(t *testing.T) { - resourceName := "aws_cloudwatch_log_destination_policy.test" - - rstring := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudwatchLogDestinationPolicyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSCloudwatchLogDestinationPolicyConfig(rstring), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_cloudwatch_log_destination_test.go b/builtin/providers/aws/import_aws_cloudwatch_log_destination_test.go deleted file mode 100644 index b0c1d2535..000000000 --- a/builtin/providers/aws/import_aws_cloudwatch_log_destination_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSCloudwatchLogDestination_importBasic(t *testing.T) { - resourceName := "aws_cloudwatch_log_destination.test" - - rstring := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudwatchLogDestinationDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSCloudwatchLogDestinationConfig(rstring), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_cloudwatch_log_group_test.go b/builtin/providers/aws/import_aws_cloudwatch_log_group_test.go deleted file mode 100644 index b218ab286..000000000 --- a/builtin/providers/aws/import_aws_cloudwatch_log_group_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSCloudWatchLogGroup_importBasic(t *testing.T) { - resourceName := "aws_cloudwatch_log_group.foobar" - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudWatchLogGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSCloudWatchLogGroupConfig(rInt), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"retention_in_days"}, //this has a default value - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_cloudwatch_metric_alarm_test.go b/builtin/providers/aws/import_aws_cloudwatch_metric_alarm_test.go deleted file mode 100644 index 1cb30254c..000000000 --- a/builtin/providers/aws/import_aws_cloudwatch_metric_alarm_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSCloudWatchMetricAlarm_importBasic(t *testing.T) { - rInt := acctest.RandInt() - resourceName := "aws_cloudwatch_metric_alarm.foobar" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudWatchMetricAlarmDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudWatchMetricAlarmConfig(rInt), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_codecommit_repository_test.go b/builtin/providers/aws/import_aws_codecommit_repository_test.go deleted file mode 100644 index ea203c9c1..000000000 --- a/builtin/providers/aws/import_aws_codecommit_repository_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSCodeCommitRepository_importBasic(t *testing.T) { - resName := "aws_codecommit_repository.test" - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCodeCommitRepositoryDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCodeCommitRepository_basic(rInt), - }, - { - ResourceName: resName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_codepipeline_test.go b/builtin/providers/aws/import_aws_codepipeline_test.go deleted file mode 100644 index 5025fcddc..000000000 --- a/builtin/providers/aws/import_aws_codepipeline_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package aws - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSCodePipeline_Import_basic(t *testing.T) { - if os.Getenv("GITHUB_TOKEN") == "" { - t.Skip("Environment variable GITHUB_TOKEN is not set") - } - - name := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCodePipelineDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSCodePipelineConfig_basic(name), - }, - - resource.TestStep{ - ResourceName: "aws_codepipeline.bar", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_cognito_identity_pool_test.go b/builtin/providers/aws/import_aws_cognito_identity_pool_test.go deleted file mode 100644 index bdd2caec8..000000000 --- a/builtin/providers/aws/import_aws_cognito_identity_pool_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSCognitoIdentityPool_importBasic(t *testing.T) { - resourceName := "aws_cognito_identity_pool.main" - rName := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayAccountDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCognitoIdentityPoolConfig_basic(rName), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_customer_gateway_test.go b/builtin/providers/aws/import_aws_customer_gateway_test.go deleted file mode 100644 index 96e791ce8..000000000 --- a/builtin/providers/aws/import_aws_customer_gateway_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSCustomerGateway_importBasic(t *testing.T) { - resourceName := "aws_customer_gateway.foo" - rInt := acctest.RandInt() - rBgpAsn := acctest.RandIntRange(64512, 65534) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCustomerGatewayDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCustomerGatewayConfig(rInt, rBgpAsn), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_db_event_subscription.go b/builtin/providers/aws/import_aws_db_event_subscription.go deleted file mode 100644 index 82e5317ea..000000000 --- a/builtin/providers/aws/import_aws_db_event_subscription.go +++ /dev/null @@ -1,17 +0,0 @@ -package aws - -import "github.com/hashicorp/terraform/helper/schema" - -func resourceAwsDbEventSubscriptionImport( - d *schema.ResourceData, - meta interface{}) ([]*schema.ResourceData, error) { - - // The db event subscription Read function only needs the "name" of the event subscription - // in order to populate the necessary values. This takes the "id" from the supplied StateFunc - // and sets it as the "name" attribute, as described in the import documentation. This allows - // the Read function to actually succeed and set the ID of the resource - results := make([]*schema.ResourceData, 1, 1) - d.Set("name", d.Id()) - results[0] = d - return results, nil -} diff --git a/builtin/providers/aws/import_aws_db_event_subscription_test.go b/builtin/providers/aws/import_aws_db_event_subscription_test.go deleted file mode 100644 index 2aa85073f..000000000 --- a/builtin/providers/aws/import_aws_db_event_subscription_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSDBEventSubscription_importBasic(t *testing.T) { - resourceName := "aws_db_event_subscription.bar" - rInt := acctest.RandInt() - subscriptionName := fmt.Sprintf("tf-acc-test-rds-event-subs-%d", rInt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBEventSubscriptionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBEventSubscriptionConfig(rInt), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateId: subscriptionName, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_db_instance_test.go b/builtin/providers/aws/import_aws_db_instance_test.go deleted file mode 100644 index 5fea3c2e0..000000000 --- a/builtin/providers/aws/import_aws_db_instance_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSDBInstance_importBasic(t *testing.T) { - resourceName := "aws_db_instance.bar" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBInstanceConfig, - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "password", - "skip_final_snapshot", - "final_snapshot_identifier", - }, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_db_option_group_test.go b/builtin/providers/aws/import_aws_db_option_group_test.go deleted file mode 100644 index 3025ff9e8..000000000 --- a/builtin/providers/aws/import_aws_db_option_group_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSDBOptionGroup_importBasic(t *testing.T) { - resourceName := "aws_db_option_group.bar" - rName := fmt.Sprintf("option-group-test-terraform-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBOptionGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSDBOptionGroupBasicConfig(rName), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_db_parameter_group_group_test.go b/builtin/providers/aws/import_aws_db_parameter_group_group_test.go deleted file mode 100644 index d9806e5cf..000000000 --- a/builtin/providers/aws/import_aws_db_parameter_group_group_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSDBParameterGroup_importBasic(t *testing.T) { - resourceName := "aws_db_parameter_group.bar" - groupName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBParameterGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSDBParameterGroupConfig(groupName), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_db_security_group_test.go b/builtin/providers/aws/import_aws_db_security_group_test.go deleted file mode 100644 index 57447c5a5..000000000 --- a/builtin/providers/aws/import_aws_db_security_group_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package aws - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSDBSecurityGroup_importBasic(t *testing.T) { - oldvar := os.Getenv("AWS_DEFAULT_REGION") - os.Setenv("AWS_DEFAULT_REGION", "us-east-1") - defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - - resourceName := "aws_db_security_group.bar" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBSecurityGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSDBSecurityGroupConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_db_subnet_group_test.go b/builtin/providers/aws/import_aws_db_subnet_group_test.go deleted file mode 100644 index e9ab51b82..000000000 --- a/builtin/providers/aws/import_aws_db_subnet_group_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSDBSubnetGroup_importBasic(t *testing.T) { - resourceName := "aws_db_subnet_group.foo" - - rName := fmt.Sprintf("tf-test-%d", acctest.RandInt()) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDBSubnetGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDBSubnetGroupConfig(rName), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "description"}, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_dynamodb_table_test.go b/builtin/providers/aws/import_aws_dynamodb_table_test.go deleted file mode 100644 index 00fa2169d..000000000 --- a/builtin/providers/aws/import_aws_dynamodb_table_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSDynamoDbTable_importBasic(t *testing.T) { - resourceName := "aws_dynamodb_table.basic-dynamodb-table" - - rName := acctest.RandomWithPrefix("TerraformTestTable-") - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDynamoDbTableDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDynamoDbConfigInitialState(rName), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAWSDynamoDbTable_importTags(t *testing.T) { - resourceName := "aws_dynamodb_table.basic-dynamodb-table" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDynamoDbTableDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDynamoDbConfigTags(), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_ebs_volume_test.go b/builtin/providers/aws/import_aws_ebs_volume_test.go deleted file mode 100644 index fd15bc241..000000000 --- a/builtin/providers/aws/import_aws_ebs_volume_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSEBSVolume_importBasic(t *testing.T) { - resourceName := "aws_ebs_volume.test" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAwsEbsVolumeConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_ecr_repository_test.go b/builtin/providers/aws/import_aws_ecr_repository_test.go deleted file mode 100644 index cd7b14439..000000000 --- a/builtin/providers/aws/import_aws_ecr_repository_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSEcrRepository_importBasic(t *testing.T) { - resourceName := "aws_ecr_repository.default" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEcrRepositoryDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSEcrRepository, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_efs_file_system_test.go b/builtin/providers/aws/import_aws_efs_file_system_test.go deleted file mode 100644 index 885ee9ddd..000000000 --- a/builtin/providers/aws/import_aws_efs_file_system_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSEFSFileSystem_importBasic(t *testing.T) { - resourceName := "aws_efs_file_system.foo-with-tags" - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckEfsFileSystemDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSEFSFileSystemConfigWithTags(rInt), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"reference_name", "creation_token"}, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_efs_mount_target_test.go b/builtin/providers/aws/import_aws_efs_mount_target_test.go deleted file mode 100644 index 607938e43..000000000 --- a/builtin/providers/aws/import_aws_efs_mount_target_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSEFSMountTarget_importBasic(t *testing.T) { - resourceName := "aws_efs_mount_target.alpha" - - ct := fmt.Sprintf("createtoken-%d", acctest.RandInt()) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckEfsMountTargetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSEFSMountTargetConfig(ct), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_elastic_beanstalk_application_test.go b/builtin/providers/aws/import_aws_elastic_beanstalk_application_test.go deleted file mode 100644 index 8d322abb4..000000000 --- a/builtin/providers/aws/import_aws_elastic_beanstalk_application_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAWSElasticBeanstalkApplication_importBasic(t *testing.T) { - resourceName := "aws_elastic_beanstalk_application.tftest" - config := fmt.Sprintf("tf-test-name-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBeanstalkAppDestroy, - Steps: []resource.TestStep{ - { - Config: testAccBeanstalkAppImportConfig(config), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccBeanstalkAppImportConfig(name string) string { - return fmt.Sprintf(`resource "aws_elastic_beanstalk_application" "tftest" { - name = "%s" - description = "tf-test-desc" - }`, name) -} diff --git a/builtin/providers/aws/import_aws_elastic_beanstalk_environment_test.go b/builtin/providers/aws/import_aws_elastic_beanstalk_environment_test.go deleted file mode 100644 index 559df2e3e..000000000 --- a/builtin/providers/aws/import_aws_elastic_beanstalk_environment_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAWSElasticBeanstalkEnvironment_importBasic(t *testing.T) { - resourceName := "aws_elastic_beanstalk_application.tftest" - - applicationName := fmt.Sprintf("tf-test-name-%d", acctest.RandInt()) - environmentName := fmt.Sprintf("tf-test-env-name-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBeanstalkAppDestroy, - Steps: []resource.TestStep{ - { - Config: testAccBeanstalkEnvImportConfig(applicationName, environmentName), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccBeanstalkEnvImportConfig(appName, envName string) string { - return fmt.Sprintf(`resource "aws_elastic_beanstalk_application" "tftest" { - name = "%s" - description = "tf-test-desc" - } - - resource "aws_elastic_beanstalk_environment" "tfenvtest" { - name = "%s" - application = "${aws_elastic_beanstalk_application.tftest.name}" - solution_stack_name = "64bit Amazon Linux running Python" - }`, appName, envName) -} diff --git a/builtin/providers/aws/import_aws_elasticache_cluster_test.go b/builtin/providers/aws/import_aws_elasticache_cluster_test.go deleted file mode 100644 index 6128ddf95..000000000 --- a/builtin/providers/aws/import_aws_elasticache_cluster_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package aws - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSElasticacheCluster_importBasic(t *testing.T) { - oldvar := os.Getenv("AWS_DEFAULT_REGION") - os.Setenv("AWS_DEFAULT_REGION", "us-east-1") - defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - - name := acctest.RandString(10) - - resourceName := "aws_elasticache_cluster.bar" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheClusterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSElasticacheClusterConfigBasic(name), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_elasticache_parameter_group_test.go b/builtin/providers/aws/import_aws_elasticache_parameter_group_test.go deleted file mode 100644 index 11c9334ed..000000000 --- a/builtin/providers/aws/import_aws_elasticache_parameter_group_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSElasticacheParameterGroup_importBasic(t *testing.T) { - resourceName := "aws_elasticache_parameter_group.bar" - rName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheParameterGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSElasticacheParameterGroupConfig(rName), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_elasticache_replication_group_test.go b/builtin/providers/aws/import_aws_elasticache_replication_group_test.go deleted file mode 100644 index 372eff849..000000000 --- a/builtin/providers/aws/import_aws_elasticache_replication_group_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package aws - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSElasticacheReplicationGroup_importBasic(t *testing.T) { - oldvar := os.Getenv("AWS_DEFAULT_REGION") - os.Setenv("AWS_DEFAULT_REGION", "us-east-1") - defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - - name := acctest.RandString(10) - - resourceName := "aws_elasticache_replication_group.bar" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSElasticacheReplicationGroupConfig(name), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"apply_immediately"}, //not in the API - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_elasticache_subnet_group_test.go b/builtin/providers/aws/import_aws_elasticache_subnet_group_test.go deleted file mode 100644 index 2ce156110..000000000 --- a/builtin/providers/aws/import_aws_elasticache_subnet_group_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package aws - -import ( - "testing" - - "fmt" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSElasticacheSubnetGroup_importBasic(t *testing.T) { - resourceName := "aws_elasticache_subnet_group.bar" - config := fmt.Sprintf(testAccAWSElasticacheSubnetGroupConfig, acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheSubnetGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "description"}, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_elb_test.go b/builtin/providers/aws/import_aws_elb_test.go deleted file mode 100644 index f4d90dcef..000000000 --- a/builtin/providers/aws/import_aws_elb_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSELB_importBasic(t *testing.T) { - resourceName := "aws_elb.bar" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSELBDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSELBConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_emr_security_configuration_test.go b/builtin/providers/aws/import_aws_emr_security_configuration_test.go deleted file mode 100644 index 72ddddf51..000000000 --- a/builtin/providers/aws/import_aws_emr_security_configuration_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSEmrSecurityConfiguration_importBasic(t *testing.T) { - resourceName := "aws_emr_security_configuration.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckEmrSecurityConfigurationDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccEmrSecurityConfigurationConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_flow_log_test.go b/builtin/providers/aws/import_aws_flow_log_test.go deleted file mode 100644 index 97ccebb68..000000000 --- a/builtin/providers/aws/import_aws_flow_log_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSFlowLog_importBasic(t *testing.T) { - resourceName := "aws_flow_log.test_flow_log" - - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckFlowLogDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccFlowLogConfig_basic(rInt), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_glacier_vault_test.go b/builtin/providers/aws/import_aws_glacier_vault_test.go deleted file mode 100644 index f7c20666e..000000000 --- a/builtin/providers/aws/import_aws_glacier_vault_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSGlacierVault_importBasic(t *testing.T) { - resourceName := "aws_glacier_vault.full" - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckGlacierVaultDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccGlacierVault_full(rInt), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_iam_account_alias_test.go b/builtin/providers/aws/import_aws_iam_account_alias_test.go deleted file mode 100644 index 28829a419..000000000 --- a/builtin/providers/aws/import_aws_iam_account_alias_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func testAccAWSIAMAccountAlias_importBasic(t *testing.T) { - resourceName := "aws_iam_account_alias.test" - - rstring := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSIAMAccountAliasDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSIAMAccountAliasConfig(rstring), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_iam_account_password_policy_test.go b/builtin/providers/aws/import_aws_iam_account_password_policy_test.go deleted file mode 100644 index b5fec9eca..000000000 --- a/builtin/providers/aws/import_aws_iam_account_password_policy_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSIAMAccountPasswordPolicy_importBasic(t *testing.T) { - resourceName := "aws_iam_account_password_policy.default" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSIAMAccountPasswordPolicyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSIAMAccountPasswordPolicy, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_iam_group_test.go b/builtin/providers/aws/import_aws_iam_group_test.go deleted file mode 100644 index 7e94f116f..000000000 --- a/builtin/providers/aws/import_aws_iam_group_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSIAMGroup_importBasic(t *testing.T) { - rInt := acctest.RandInt() - resourceName := "aws_iam_group.group" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSGroupConfig(rInt), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_iam_policy_test.go b/builtin/providers/aws/import_aws_iam_policy_test.go deleted file mode 100644 index d40145b58..000000000 --- a/builtin/providers/aws/import_aws_iam_policy_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/iam" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func testAccAwsIamPolicyConfig(suffix string) string { - return fmt.Sprintf(` -resource "aws_iam_policy" "test_%[1]s" { - name = "test_policy_%[1]s" - path = "/" - description = "My test policy" - policy = < 0 { - for _, pair := range perm.UserIdGroupPairs { - p := &ec2.IpPermission{ - FromPort: perm.FromPort, - IpProtocol: perm.IpProtocol, - PrefixListIds: perm.PrefixListIds, - ToPort: perm.ToPort, - UserIdGroupPairs: []*ec2.UserIdGroupPair{pair}, - } - - r, err := resourceAwsSecurityGroupImportStatePermPair(sg, ruleType, p) - if err != nil { - return nil, err - } - result = append(result, r) - } - } - - if len(result) == 0 && len(perm.PrefixListIds) > 0 { - p := &ec2.IpPermission{ - FromPort: perm.FromPort, - IpProtocol: perm.IpProtocol, - PrefixListIds: perm.PrefixListIds, - ToPort: perm.ToPort, - } - - r, err := resourceAwsSecurityGroupImportStatePermPair(sg, ruleType, p) - if err != nil { - return nil, err - } - result = append(result, r) - } - - return result, nil -} - -func resourceAwsSecurityGroupImportStatePermPair(sg *ec2.SecurityGroup, ruleType string, perm *ec2.IpPermission) (*schema.ResourceData, error) { - // Construct the rule. We do this by populating the absolute - // minimum necessary for Refresh on the rule to work. This - // happens to be a lot of fields since they're almost all needed - // for de-dupping. - sgId := sg.GroupId - id := ipPermissionIDHash(*sgId, ruleType, perm) - ruleResource := resourceAwsSecurityGroupRule() - d := ruleResource.Data(nil) - d.SetId(id) - d.SetType("aws_security_group_rule") - d.Set("security_group_id", sgId) - d.Set("type", ruleType) - - // 'self' is false by default. Below, we range over the group ids and set true - // if the parent sg id is found - d.Set("self", false) - - if len(perm.UserIdGroupPairs) > 0 { - s := perm.UserIdGroupPairs[0] - - // Check for Pair that is the same as the Security Group, to denote self. - // Otherwise, mark the group id in source_security_group_id - isVPC := sg.VpcId != nil && *sg.VpcId != "" - if isVPC { - if *s.GroupId == *sg.GroupId { - d.Set("self", true) - // prune the self reference from the UserIdGroupPairs, so we don't - // have duplicate sg ids (both self and in source_security_group_id) - perm.UserIdGroupPairs = append(perm.UserIdGroupPairs[:0], perm.UserIdGroupPairs[0+1:]...) - } - } else { - if *s.GroupName == *sg.GroupName { - d.Set("self", true) - // prune the self reference from the UserIdGroupPairs, so we don't - // have duplicate sg ids (both self and in source_security_group_id) - perm.UserIdGroupPairs = append(perm.UserIdGroupPairs[:0], perm.UserIdGroupPairs[0+1:]...) - } - } - } - - if err := setFromIPPerm(d, sg, perm); err != nil { - return nil, errwrap.Wrapf("Error importing AWS Security Group: {{err}}", err) - } - - return d, nil -} diff --git a/builtin/providers/aws/import_aws_security_group_test.go b/builtin/providers/aws/import_aws_security_group_test.go deleted file mode 100644 index a57313ae5..000000000 --- a/builtin/providers/aws/import_aws_security_group_test.go +++ /dev/null @@ -1,187 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSSecurityGroup_importBasic(t *testing.T) { - checkFn := func(s []*terraform.InstanceState) error { - // Expect 3: group, 2 rules - if len(s) != 3 { - return fmt.Errorf("expected 3 states: %#v", s) - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupConfig, - }, - - { - ResourceName: "aws_security_group.web", - ImportState: true, - ImportStateCheck: checkFn, - }, - }, - }) -} - -func TestAccAWSSecurityGroup_importIpv6(t *testing.T) { - checkFn := func(s []*terraform.InstanceState) error { - // Expect 3: group, 2 rules - if len(s) != 3 { - return fmt.Errorf("expected 3 states: %#v", s) - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupConfigIpv6, - }, - - { - ResourceName: "aws_security_group.web", - ImportState: true, - ImportStateCheck: checkFn, - }, - }, - }) -} - -func TestAccAWSSecurityGroup_importSelf(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupConfig_importSelf, - }, - - { - ResourceName: "aws_security_group.allow_all", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAWSSecurityGroup_importSourceSecurityGroup(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupConfig_importSourceSecurityGroup, - }, - - { - ResourceName: "aws_security_group.test_group_1", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAWSSecurityGroup_importIPRangeAndSecurityGroupWithSameRules(t *testing.T) { - checkFn := func(s []*terraform.InstanceState) error { - // Expect 4: group, 3 rules - if len(s) != 4 { - return fmt.Errorf("expected 4 states: %#v", s) - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupConfig_importIPRangeAndSecurityGroupWithSameRules, - }, - - { - ResourceName: "aws_security_group.test_group_1", - ImportState: true, - ImportStateCheck: checkFn, - }, - }, - }) -} - -func TestAccAWSSecurityGroup_importIPRangesWithSameRules(t *testing.T) { - checkFn := func(s []*terraform.InstanceState) error { - // Expect 4: group, 2 rules - if len(s) != 3 { - return fmt.Errorf("expected 3 states: %#v", s) - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupConfig_importIPRangesWithSameRules, - }, - - { - ResourceName: "aws_security_group.test_group_1", - ImportState: true, - ImportStateCheck: checkFn, - }, - }, - }) -} - -func TestAccAWSSecurityGroup_importPrefixList(t *testing.T) { - checkFn := func(s []*terraform.InstanceState) error { - // Expect 2: group, 1 rule - if len(s) != 2 { - return fmt.Errorf("expected 2 states: %#v", s) - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupConfigPrefixListEgress, - }, - - { - ResourceName: "aws_security_group.egress", - ImportState: true, - ImportStateCheck: checkFn, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_ses_receipt_filter_test.go b/builtin/providers/aws/import_aws_ses_receipt_filter_test.go deleted file mode 100644 index ecc962b85..000000000 --- a/builtin/providers/aws/import_aws_ses_receipt_filter_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSSESReceiptFilter_importBasic(t *testing.T) { - resourceName := "aws_ses_receipt_filter.test" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckSESReceiptFilterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSSESReceiptFilterConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_ses_receipt_rule_set_test.go b/builtin/providers/aws/import_aws_ses_receipt_rule_set_test.go deleted file mode 100644 index c5294bcb4..000000000 --- a/builtin/providers/aws/import_aws_ses_receipt_rule_set_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSSESReceiptRuleSet_importBasic(t *testing.T) { - resourceName := "aws_ses_receipt_rule_set.test" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckSESReceiptRuleSetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSSESReceiptRuleSetConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_sfn_activity_test.go b/builtin/providers/aws/import_aws_sfn_activity_test.go deleted file mode 100644 index 01a91143e..000000000 --- a/builtin/providers/aws/import_aws_sfn_activity_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSSfnActivity_importBasic(t *testing.T) { - resourceName := "aws_sfn_activity.foo" - rName := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSfnActivityDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSfnActivityBasicConfig(rName), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_simpledb_domain_test.go b/builtin/providers/aws/import_aws_simpledb_domain_test.go deleted file mode 100644 index d79de8925..000000000 --- a/builtin/providers/aws/import_aws_simpledb_domain_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSSimpleDBDomain_importBasic(t *testing.T) { - resourceName := "aws_simpledb_domain.test_domain" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSimpleDBDomainDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSSimpleDBDomainConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_sns_topic_subscription_test.go b/builtin/providers/aws/import_aws_sns_topic_subscription_test.go deleted file mode 100644 index 9a0b60bbc..000000000 --- a/builtin/providers/aws/import_aws_sns_topic_subscription_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSSNSTopicSubscription_importBasic(t *testing.T) { - resourceName := "aws_sns_topic.test_topic" - ri := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSNSTopicSubscriptionDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSSNSTopicSubscriptionConfig(ri), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_sns_topic_test.go b/builtin/providers/aws/import_aws_sns_topic_test.go deleted file mode 100644 index 593610330..000000000 --- a/builtin/providers/aws/import_aws_sns_topic_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSSNSTopic_importBasic(t *testing.T) { - resourceName := "aws_sns_topic.test_topic" - rName := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSNSTopicDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSSNSTopicConfig(rName), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_spot_datafeed_subscription_test.go b/builtin/providers/aws/import_aws_spot_datafeed_subscription_test.go deleted file mode 100644 index 24c7acc59..000000000 --- a/builtin/providers/aws/import_aws_spot_datafeed_subscription_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func testAccAWSSpotDatafeedSubscription_importBasic(t *testing.T) { - resourceName := "aws_spot_datafeed_subscription.default" - ri := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSpotDatafeedSubscriptionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSpotDatafeedSubscription(ri), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_sqs_queue_test.go b/builtin/providers/aws/import_aws_sqs_queue_test.go deleted file mode 100644 index 437b949e8..000000000 --- a/builtin/providers/aws/import_aws_sqs_queue_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package aws - -import ( - "testing" - - "fmt" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSSQSQueue_importBasic(t *testing.T) { - resourceName := "aws_sqs_queue.queue" - queueName := fmt.Sprintf("sqs-queue-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSQSQueueDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSQSConfigWithDefaults(queueName), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("aws_sqs_queue.queue", "fifo_queue", "false"), - ), - }, - }, - }) -} - -func TestAccAWSSQSQueue_importFifo(t *testing.T) { - resourceName := "aws_sqs_queue.queue" - queueName := fmt.Sprintf("sqs-queue-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSQSQueueDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSQSFifoConfigWithDefaults(queueName), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("aws_sqs_queue.queue", "fifo_queue", "true"), - ), - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_subnet_test.go b/builtin/providers/aws/import_aws_subnet_test.go deleted file mode 100644 index c08e4f7ed..000000000 --- a/builtin/providers/aws/import_aws_subnet_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSSubnet_importBasic(t *testing.T) { - resourceName := "aws_subnet.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckSubnetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccSubnetConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_vpc_dhcp_options_test.go b/builtin/providers/aws/import_aws_vpc_dhcp_options_test.go deleted file mode 100644 index e0f605f28..000000000 --- a/builtin/providers/aws/import_aws_vpc_dhcp_options_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSDHCPOptions_importBasic(t *testing.T) { - resourceName := "aws_vpc_dhcp_options.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDHCPOptionsConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_vpc_endpoint_test.go b/builtin/providers/aws/import_aws_vpc_endpoint_test.go deleted file mode 100644 index d44c35493..000000000 --- a/builtin/providers/aws/import_aws_vpc_endpoint_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSVpcEndpoint_importBasic(t *testing.T) { - resourceName := "aws_vpc_endpoint.second-private-s3" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVpcEndpointDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVpcEndpointWithRouteTableAndPolicyConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_vpc_peering_connection_test.go b/builtin/providers/aws/import_aws_vpc_peering_connection_test.go deleted file mode 100644 index c64f84a36..000000000 --- a/builtin/providers/aws/import_aws_vpc_peering_connection_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSVPCPeeringConnection_importBasic(t *testing.T) { - resourceName := "aws_vpc_peering_connection.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSVpcPeeringConnectionDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVpcPeeringConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "auto_accept"}, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_vpc_test.go b/builtin/providers/aws/import_aws_vpc_test.go deleted file mode 100644 index e940b3ddc..000000000 --- a/builtin/providers/aws/import_aws_vpc_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSVpc_importBasic(t *testing.T) { - resourceName := "aws_vpc.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVpcDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVpcConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_vpn_connection_test.go b/builtin/providers/aws/import_aws_vpn_connection_test.go deleted file mode 100644 index a7297a220..000000000 --- a/builtin/providers/aws/import_aws_vpn_connection_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSVpnConnection_importBasic(t *testing.T) { - resourceName := "aws_vpn_connection.foo" - rBgpAsn := acctest.RandIntRange(64512, 65534) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccAwsVpnConnectionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsVpnConnectionConfig(rBgpAsn), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/import_aws_vpn_gateway_test.go b/builtin/providers/aws/import_aws_vpn_gateway_test.go deleted file mode 100644 index e911ff1ab..000000000 --- a/builtin/providers/aws/import_aws_vpn_gateway_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSVpnGateway_importBasic(t *testing.T) { - resourceName := "aws_vpn_gateway.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVpnGatewayDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVpnGatewayConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/network_acl_entry.go b/builtin/providers/aws/network_acl_entry.go deleted file mode 100644 index c57f82222..000000000 --- a/builtin/providers/aws/network_acl_entry.go +++ /dev/null @@ -1,141 +0,0 @@ -package aws - -import ( - "fmt" - "net" - "strconv" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" -) - -func expandNetworkAclEntries(configured []interface{}, entryType string) ([]*ec2.NetworkAclEntry, error) { - entries := make([]*ec2.NetworkAclEntry, 0, len(configured)) - for _, eRaw := range configured { - data := eRaw.(map[string]interface{}) - protocol := data["protocol"].(string) - p, err := strconv.Atoi(protocol) - if err != nil { - var ok bool - p, ok = protocolIntegers()[protocol] - if !ok { - return nil, fmt.Errorf("Invalid Protocol %s for rule %#v", protocol, data) - } - } - - e := &ec2.NetworkAclEntry{ - Protocol: aws.String(strconv.Itoa(p)), - PortRange: &ec2.PortRange{ - From: aws.Int64(int64(data["from_port"].(int))), - To: aws.Int64(int64(data["to_port"].(int))), - }, - Egress: aws.Bool(entryType == "egress"), - RuleAction: aws.String(data["action"].(string)), - RuleNumber: aws.Int64(int64(data["rule_no"].(int))), - } - - if v, ok := data["ipv6_cidr_block"]; ok { - e.Ipv6CidrBlock = aws.String(v.(string)) - } - - if v, ok := data["cidr_block"]; ok { - e.CidrBlock = aws.String(v.(string)) - } - - // Specify additional required fields for ICMP - if p == 1 { - e.IcmpTypeCode = &ec2.IcmpTypeCode{} - if v, ok := data["icmp_code"]; ok { - e.IcmpTypeCode.Code = aws.Int64(int64(v.(int))) - } - if v, ok := data["icmp_type"]; ok { - e.IcmpTypeCode.Type = aws.Int64(int64(v.(int))) - } - } - - entries = append(entries, e) - } - return entries, nil -} - -func flattenNetworkAclEntries(list []*ec2.NetworkAclEntry) []map[string]interface{} { - entries := make([]map[string]interface{}, 0, len(list)) - - for _, entry := range list { - - newEntry := map[string]interface{}{ - "from_port": *entry.PortRange.From, - "to_port": *entry.PortRange.To, - "action": *entry.RuleAction, - "rule_no": *entry.RuleNumber, - "protocol": *entry.Protocol, - } - - if entry.CidrBlock != nil { - newEntry["cidr_block"] = *entry.CidrBlock - } - - if entry.Ipv6CidrBlock != nil { - newEntry["ipv6_cidr_block"] = *entry.Ipv6CidrBlock - } - - entries = append(entries, newEntry) - } - - return entries - -} - -func protocolStrings(protocolIntegers map[string]int) map[int]string { - protocolStrings := make(map[int]string, len(protocolIntegers)) - for k, v := range protocolIntegers { - protocolStrings[v] = k - } - - return protocolStrings -} - -func protocolIntegers() map[string]int { - var protocolIntegers = make(map[string]int) - protocolIntegers = map[string]int{ - // defined at https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml - "ah": 51, - "esp": 50, - "udp": 17, - "tcp": 6, - "icmp": 1, - "all": -1, - "vrrp": 112, - } - return protocolIntegers -} - -// expectedPortPair stores a pair of ports we expect to see together. -type expectedPortPair struct { - to_port int64 - from_port int64 -} - -// validatePorts ensures the ports and protocol match expected -// values. -func validatePorts(to int64, from int64, expected expectedPortPair) bool { - if to != expected.to_port || from != expected.from_port { - return false - } - - return true -} - -// validateCIDRBlock ensures the passed CIDR block represents an implied -// network, and not an overly-specified IP address. -func validateCIDRBlock(cidr string) error { - _, ipnet, err := net.ParseCIDR(cidr) - if err != nil { - return err - } - if ipnet.String() != cidr { - return fmt.Errorf("%s is not a valid mask; did you mean %s?", cidr, ipnet) - } - - return nil -} diff --git a/builtin/providers/aws/network_acl_entry_test.go b/builtin/providers/aws/network_acl_entry_test.go deleted file mode 100644 index 46b288e4c..000000000 --- a/builtin/providers/aws/network_acl_entry_test.go +++ /dev/null @@ -1,174 +0,0 @@ -package aws - -import ( - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" -) - -func Test_expandNetworkACLEntry(t *testing.T) { - input := []interface{}{ - map[string]interface{}{ - "protocol": "tcp", - "from_port": 22, - "to_port": 22, - "cidr_block": "0.0.0.0/0", - "action": "deny", - "rule_no": 1, - }, - map[string]interface{}{ - "protocol": "tcp", - "from_port": 443, - "to_port": 443, - "cidr_block": "0.0.0.0/0", - "action": "deny", - "rule_no": 2, - }, - map[string]interface{}{ - "protocol": "-1", - "from_port": 443, - "to_port": 443, - "cidr_block": "0.0.0.0/0", - "action": "deny", - "rule_no": 2, - }, - } - expanded, _ := expandNetworkAclEntries(input, "egress") - - expected := []*ec2.NetworkAclEntry{ - &ec2.NetworkAclEntry{ - Protocol: aws.String("6"), - PortRange: &ec2.PortRange{ - From: aws.Int64(22), - To: aws.Int64(22), - }, - RuleAction: aws.String("deny"), - RuleNumber: aws.Int64(1), - CidrBlock: aws.String("0.0.0.0/0"), - Egress: aws.Bool(true), - }, - &ec2.NetworkAclEntry{ - Protocol: aws.String("6"), - PortRange: &ec2.PortRange{ - From: aws.Int64(443), - To: aws.Int64(443), - }, - RuleAction: aws.String("deny"), - RuleNumber: aws.Int64(2), - CidrBlock: aws.String("0.0.0.0/0"), - Egress: aws.Bool(true), - }, - &ec2.NetworkAclEntry{ - Protocol: aws.String("-1"), - PortRange: &ec2.PortRange{ - From: aws.Int64(443), - To: aws.Int64(443), - }, - RuleAction: aws.String("deny"), - RuleNumber: aws.Int64(2), - CidrBlock: aws.String("0.0.0.0/0"), - Egress: aws.Bool(true), - }, - } - - if !reflect.DeepEqual(expanded, expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - expanded, - expected) - } - -} - -func Test_flattenNetworkACLEntry(t *testing.T) { - - apiInput := []*ec2.NetworkAclEntry{ - &ec2.NetworkAclEntry{ - Protocol: aws.String("tcp"), - PortRange: &ec2.PortRange{ - From: aws.Int64(22), - To: aws.Int64(22), - }, - RuleAction: aws.String("deny"), - RuleNumber: aws.Int64(1), - CidrBlock: aws.String("0.0.0.0/0"), - }, - &ec2.NetworkAclEntry{ - Protocol: aws.String("tcp"), - PortRange: &ec2.PortRange{ - From: aws.Int64(443), - To: aws.Int64(443), - }, - RuleAction: aws.String("deny"), - RuleNumber: aws.Int64(2), - CidrBlock: aws.String("0.0.0.0/0"), - }, - } - flattened := flattenNetworkAclEntries(apiInput) - - expected := []map[string]interface{}{ - map[string]interface{}{ - "protocol": "tcp", - "from_port": int64(22), - "to_port": int64(22), - "cidr_block": "0.0.0.0/0", - "action": "deny", - "rule_no": int64(1), - }, - map[string]interface{}{ - "protocol": "tcp", - "from_port": int64(443), - "to_port": int64(443), - "cidr_block": "0.0.0.0/0", - "action": "deny", - "rule_no": int64(2), - }, - } - - if !reflect.DeepEqual(flattened, expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - flattened, - expected) - } - -} - -func Test_validatePorts(t *testing.T) { - for _, ts := range []struct { - to int64 - from int64 - expected *expectedPortPair - wanted bool - }{ - {0, 0, &expectedPortPair{0, 0}, true}, - {0, 1, &expectedPortPair{0, 0}, false}, - } { - got := validatePorts(ts.to, ts.from, *ts.expected) - if got != ts.wanted { - t.Fatalf("Got: %t; Expected: %t\n", got, ts.wanted) - } - } -} - -func Test_validateCIDRBlock(t *testing.T) { - for _, ts := range []struct { - cidr string - shouldErr bool - }{ - {"10.2.2.0/24", false}, - {"10.2.2.0/1234", true}, - {"10/24", true}, - {"10.2.2.2/24", true}, - } { - err := validateCIDRBlock(ts.cidr) - if ts.shouldErr && err == nil { - t.Fatalf("Input '%s' should error but didn't!", ts.cidr) - } - if !ts.shouldErr && err != nil { - t.Fatalf("Got unexpected error for '%s' input: %s", ts.cidr, err) - } - } -} diff --git a/builtin/providers/aws/opsworks_layers.go b/builtin/providers/aws/opsworks_layers.go deleted file mode 100644 index c4bfeb6b2..000000000 --- a/builtin/providers/aws/opsworks_layers.go +++ /dev/null @@ -1,645 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strconv" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/opsworks" -) - -// OpsWorks has a single concept of "layer" which represents several different -// layer types. The differences between these are in some extra properties that -// get packed into an "Attributes" map, but in the OpsWorks UI these are presented -// as first-class options, and so Terraform prefers to expose them this way and -// hide the implementation detail that they are all packed into a single type -// in the underlying API. -// -// This file contains utilities that are shared between all of the concrete -// layer resource types, which have names matching aws_opsworks_*_layer . - -type opsworksLayerTypeAttribute struct { - AttrName string - Type schema.ValueType - Default interface{} - Required bool - WriteOnly bool -} - -type opsworksLayerType struct { - TypeName string - DefaultLayerName string - Attributes map[string]*opsworksLayerTypeAttribute - CustomShortName bool -} - -var ( - opsworksTrueString = "true" - opsworksFalseString = "false" -) - -func (lt *opsworksLayerType) SchemaResource() *schema.Resource { - resourceSchema := map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "auto_assign_elastic_ips": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "auto_assign_public_ips": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "custom_instance_profile_arn": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "elastic_load_balancer": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "custom_setup_recipes": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "custom_configure_recipes": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "custom_deploy_recipes": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "custom_undeploy_recipes": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "custom_shutdown_recipes": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "custom_security_group_ids": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "custom_json": &schema.Schema{ - Type: schema.TypeString, - StateFunc: normalizeJson, - Optional: true, - }, - - "auto_healing": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - - "install_updates_on_boot": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - - "instance_shutdown_timeout": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 120, - }, - - "drain_elb_on_shutdown": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - - "system_packages": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "stack_id": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Required: true, - }, - - "use_ebs_optimized_instances": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "ebs_volume": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - - "iops": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - - "mount_point": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "number_of_disks": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "raid_level": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "", - }, - - "size": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "standard", - }, - }, - }, - Set: func(v interface{}) int { - m := v.(map[string]interface{}) - return hashcode.String(m["mount_point"].(string)) - }, - }, - } - - if lt.CustomShortName { - resourceSchema["short_name"] = &schema.Schema{ - Type: schema.TypeString, - Required: true, - } - } - - if lt.DefaultLayerName != "" { - resourceSchema["name"] = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: lt.DefaultLayerName, - } - } else { - resourceSchema["name"] = &schema.Schema{ - Type: schema.TypeString, - Required: true, - } - } - - for key, def := range lt.Attributes { - resourceSchema[key] = &schema.Schema{ - Type: def.Type, - Default: def.Default, - Required: def.Required, - Optional: !def.Required, - } - } - - return &schema.Resource{ - Read: func(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).opsworksconn - return lt.Read(d, client) - }, - Create: func(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).opsworksconn - return lt.Create(d, client) - }, - Update: func(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).opsworksconn - return lt.Update(d, client) - }, - Delete: func(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).opsworksconn - return lt.Delete(d, client) - }, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: resourceSchema, - } -} - -func (lt *opsworksLayerType) Read(d *schema.ResourceData, client *opsworks.OpsWorks) error { - - req := &opsworks.DescribeLayersInput{ - LayerIds: []*string{ - aws.String(d.Id()), - }, - } - - log.Printf("[DEBUG] Reading OpsWorks layer: %s", d.Id()) - - resp, err := client.DescribeLayers(req) - if err != nil { - if awserr, ok := err.(awserr.Error); ok { - if awserr.Code() == "ResourceNotFoundException" { - d.SetId("") - return nil - } - } - return err - } - - layer := resp.Layers[0] - d.Set("id", layer.LayerId) - d.Set("auto_assign_elastic_ips", layer.AutoAssignElasticIps) - d.Set("auto_assign_public_ips", layer.AutoAssignPublicIps) - d.Set("custom_instance_profile_arn", layer.CustomInstanceProfileArn) - d.Set("custom_security_group_ids", flattenStringList(layer.CustomSecurityGroupIds)) - d.Set("auto_healing", layer.EnableAutoHealing) - d.Set("install_updates_on_boot", layer.InstallUpdatesOnBoot) - d.Set("name", layer.Name) - d.Set("system_packages", flattenStringList(layer.Packages)) - d.Set("stack_id", layer.StackId) - d.Set("use_ebs_optimized_instances", layer.UseEbsOptimizedInstances) - - if lt.CustomShortName { - d.Set("short_name", layer.Shortname) - } - - if v := layer.CustomJson; v == nil { - if err := d.Set("custom_json", ""); err != nil { - return err - } - } else if err := d.Set("custom_json", normalizeJson(*v)); err != nil { - return err - } - - lt.SetAttributeMap(d, layer.Attributes) - lt.SetLifecycleEventConfiguration(d, layer.LifecycleEventConfiguration) - lt.SetCustomRecipes(d, layer.CustomRecipes) - lt.SetVolumeConfigurations(d, layer.VolumeConfigurations) - - /* get ELB */ - ebsRequest := &opsworks.DescribeElasticLoadBalancersInput{ - LayerIds: []*string{ - aws.String(d.Id()), - }, - } - loadBalancers, err := client.DescribeElasticLoadBalancers(ebsRequest) - if err != nil { - return err - } - - if loadBalancers.ElasticLoadBalancers == nil || len(loadBalancers.ElasticLoadBalancers) == 0 { - d.Set("elastic_load_balancer", "") - } else { - loadBalancer := loadBalancers.ElasticLoadBalancers[0] - if loadBalancer != nil { - d.Set("elastic_load_balancer", loadBalancer.ElasticLoadBalancerName) - } - } - - return nil -} - -func (lt *opsworksLayerType) Create(d *schema.ResourceData, client *opsworks.OpsWorks) error { - - req := &opsworks.CreateLayerInput{ - AutoAssignElasticIps: aws.Bool(d.Get("auto_assign_elastic_ips").(bool)), - AutoAssignPublicIps: aws.Bool(d.Get("auto_assign_public_ips").(bool)), - CustomInstanceProfileArn: aws.String(d.Get("custom_instance_profile_arn").(string)), - CustomRecipes: lt.CustomRecipes(d), - CustomSecurityGroupIds: expandStringSet(d.Get("custom_security_group_ids").(*schema.Set)), - EnableAutoHealing: aws.Bool(d.Get("auto_healing").(bool)), - InstallUpdatesOnBoot: aws.Bool(d.Get("install_updates_on_boot").(bool)), - LifecycleEventConfiguration: lt.LifecycleEventConfiguration(d), - Name: aws.String(d.Get("name").(string)), - Packages: expandStringSet(d.Get("system_packages").(*schema.Set)), - Type: aws.String(lt.TypeName), - StackId: aws.String(d.Get("stack_id").(string)), - UseEbsOptimizedInstances: aws.Bool(d.Get("use_ebs_optimized_instances").(bool)), - Attributes: lt.AttributeMap(d), - VolumeConfigurations: lt.VolumeConfigurations(d), - } - - if lt.CustomShortName { - req.Shortname = aws.String(d.Get("short_name").(string)) - } else { - req.Shortname = aws.String(lt.TypeName) - } - - req.CustomJson = aws.String(d.Get("custom_json").(string)) - - log.Printf("[DEBUG] Creating OpsWorks layer: %s", d.Id()) - - resp, err := client.CreateLayer(req) - if err != nil { - return err - } - - layerId := *resp.LayerId - d.SetId(layerId) - d.Set("id", layerId) - - loadBalancer := aws.String(d.Get("elastic_load_balancer").(string)) - if loadBalancer != nil && *loadBalancer != "" { - log.Printf("[DEBUG] Attaching load balancer: %s", *loadBalancer) - _, err := client.AttachElasticLoadBalancer(&opsworks.AttachElasticLoadBalancerInput{ - ElasticLoadBalancerName: loadBalancer, - LayerId: &layerId, - }) - if err != nil { - return err - } - } - - return lt.Read(d, client) -} - -func (lt *opsworksLayerType) Update(d *schema.ResourceData, client *opsworks.OpsWorks) error { - - req := &opsworks.UpdateLayerInput{ - LayerId: aws.String(d.Id()), - AutoAssignElasticIps: aws.Bool(d.Get("auto_assign_elastic_ips").(bool)), - AutoAssignPublicIps: aws.Bool(d.Get("auto_assign_public_ips").(bool)), - CustomInstanceProfileArn: aws.String(d.Get("custom_instance_profile_arn").(string)), - CustomRecipes: lt.CustomRecipes(d), - CustomSecurityGroupIds: expandStringSet(d.Get("custom_security_group_ids").(*schema.Set)), - EnableAutoHealing: aws.Bool(d.Get("auto_healing").(bool)), - InstallUpdatesOnBoot: aws.Bool(d.Get("install_updates_on_boot").(bool)), - LifecycleEventConfiguration: lt.LifecycleEventConfiguration(d), - Name: aws.String(d.Get("name").(string)), - Packages: expandStringSet(d.Get("system_packages").(*schema.Set)), - UseEbsOptimizedInstances: aws.Bool(d.Get("use_ebs_optimized_instances").(bool)), - Attributes: lt.AttributeMap(d), - VolumeConfigurations: lt.VolumeConfigurations(d), - } - - if lt.CustomShortName { - req.Shortname = aws.String(d.Get("short_name").(string)) - } else { - req.Shortname = aws.String(lt.TypeName) - } - - req.CustomJson = aws.String(d.Get("custom_json").(string)) - - log.Printf("[DEBUG] Updating OpsWorks layer: %s", d.Id()) - - if d.HasChange("elastic_load_balancer") { - lbo, lbn := d.GetChange("elastic_load_balancer") - - loadBalancerOld := aws.String(lbo.(string)) - loadBalancerNew := aws.String(lbn.(string)) - - if loadBalancerOld != nil && *loadBalancerOld != "" { - log.Printf("[DEBUG] Dettaching load balancer: %s", *loadBalancerOld) - _, err := client.DetachElasticLoadBalancer(&opsworks.DetachElasticLoadBalancerInput{ - ElasticLoadBalancerName: loadBalancerOld, - LayerId: aws.String(d.Id()), - }) - if err != nil { - return err - } - } - - if loadBalancerNew != nil && *loadBalancerNew != "" { - log.Printf("[DEBUG] Attaching load balancer: %s", *loadBalancerNew) - _, err := client.AttachElasticLoadBalancer(&opsworks.AttachElasticLoadBalancerInput{ - ElasticLoadBalancerName: loadBalancerNew, - LayerId: aws.String(d.Id()), - }) - if err != nil { - return err - } - } - } - - _, err := client.UpdateLayer(req) - if err != nil { - return err - } - - return lt.Read(d, client) -} - -func (lt *opsworksLayerType) Delete(d *schema.ResourceData, client *opsworks.OpsWorks) error { - req := &opsworks.DeleteLayerInput{ - LayerId: aws.String(d.Id()), - } - - log.Printf("[DEBUG] Deleting OpsWorks layer: %s", d.Id()) - - _, err := client.DeleteLayer(req) - return err -} - -func (lt *opsworksLayerType) AttributeMap(d *schema.ResourceData) map[string]*string { - attrs := map[string]*string{} - - for key, def := range lt.Attributes { - value := d.Get(key) - switch def.Type { - case schema.TypeString: - strValue := value.(string) - attrs[def.AttrName] = &strValue - case schema.TypeInt: - intValue := value.(int) - strValue := strconv.Itoa(intValue) - attrs[def.AttrName] = &strValue - case schema.TypeBool: - boolValue := value.(bool) - if boolValue { - attrs[def.AttrName] = &opsworksTrueString - } else { - attrs[def.AttrName] = &opsworksFalseString - } - default: - // should never happen - panic(fmt.Errorf("Unsupported OpsWorks layer attribute type")) - } - } - - return attrs -} - -func (lt *opsworksLayerType) SetAttributeMap(d *schema.ResourceData, attrs map[string]*string) { - for key, def := range lt.Attributes { - // Ignore write-only attributes; we'll just keep what we already have stored. - // (The AWS API returns garbage placeholder values for these.) - if def.WriteOnly { - continue - } - - if strPtr, ok := attrs[def.AttrName]; ok && strPtr != nil { - strValue := *strPtr - - switch def.Type { - case schema.TypeString: - d.Set(key, strValue) - case schema.TypeInt: - intValue, err := strconv.Atoi(strValue) - if err == nil { - d.Set(key, intValue) - } else { - // Got garbage from the AWS API - d.Set(key, nil) - } - case schema.TypeBool: - boolValue := true - if strValue == opsworksFalseString { - boolValue = false - } - d.Set(key, boolValue) - default: - // should never happen - panic(fmt.Errorf("Unsupported OpsWorks layer attribute type")) - } - return - - } else { - d.Set(key, nil) - } - } -} - -func (lt *opsworksLayerType) LifecycleEventConfiguration(d *schema.ResourceData) *opsworks.LifecycleEventConfiguration { - return &opsworks.LifecycleEventConfiguration{ - Shutdown: &opsworks.ShutdownEventConfiguration{ - DelayUntilElbConnectionsDrained: aws.Bool(d.Get("drain_elb_on_shutdown").(bool)), - ExecutionTimeout: aws.Int64(int64(d.Get("instance_shutdown_timeout").(int))), - }, - } -} - -func (lt *opsworksLayerType) SetLifecycleEventConfiguration(d *schema.ResourceData, v *opsworks.LifecycleEventConfiguration) { - if v == nil || v.Shutdown == nil { - d.Set("drain_elb_on_shutdown", nil) - d.Set("instance_shutdown_timeout", nil) - } else { - d.Set("drain_elb_on_shutdown", v.Shutdown.DelayUntilElbConnectionsDrained) - d.Set("instance_shutdown_timeout", v.Shutdown.ExecutionTimeout) - } -} - -func (lt *opsworksLayerType) CustomRecipes(d *schema.ResourceData) *opsworks.Recipes { - return &opsworks.Recipes{ - Configure: expandStringList(d.Get("custom_configure_recipes").([]interface{})), - Deploy: expandStringList(d.Get("custom_deploy_recipes").([]interface{})), - Setup: expandStringList(d.Get("custom_setup_recipes").([]interface{})), - Shutdown: expandStringList(d.Get("custom_shutdown_recipes").([]interface{})), - Undeploy: expandStringList(d.Get("custom_undeploy_recipes").([]interface{})), - } -} - -func (lt *opsworksLayerType) SetCustomRecipes(d *schema.ResourceData, v *opsworks.Recipes) { - // Null out everything first, and then we'll consider what to put back. - d.Set("custom_configure_recipes", nil) - d.Set("custom_deploy_recipes", nil) - d.Set("custom_setup_recipes", nil) - d.Set("custom_shutdown_recipes", nil) - d.Set("custom_undeploy_recipes", nil) - - if v == nil { - return - } - - d.Set("custom_configure_recipes", flattenStringList(v.Configure)) - d.Set("custom_deploy_recipes", flattenStringList(v.Deploy)) - d.Set("custom_setup_recipes", flattenStringList(v.Setup)) - d.Set("custom_shutdown_recipes", flattenStringList(v.Shutdown)) - d.Set("custom_undeploy_recipes", flattenStringList(v.Undeploy)) -} - -func (lt *opsworksLayerType) VolumeConfigurations(d *schema.ResourceData) []*opsworks.VolumeConfiguration { - configuredVolumes := d.Get("ebs_volume").(*schema.Set).List() - result := make([]*opsworks.VolumeConfiguration, len(configuredVolumes)) - - for i := 0; i < len(configuredVolumes); i++ { - volumeData := configuredVolumes[i].(map[string]interface{}) - - result[i] = &opsworks.VolumeConfiguration{ - MountPoint: aws.String(volumeData["mount_point"].(string)), - NumberOfDisks: aws.Int64(int64(volumeData["number_of_disks"].(int))), - Size: aws.Int64(int64(volumeData["size"].(int))), - VolumeType: aws.String(volumeData["type"].(string)), - } - iops := int64(volumeData["iops"].(int)) - if iops != 0 { - result[i].Iops = aws.Int64(iops) - } - - raidLevelStr := volumeData["raid_level"].(string) - if raidLevelStr != "" { - raidLevel, err := strconv.Atoi(raidLevelStr) - if err == nil { - result[i].RaidLevel = aws.Int64(int64(raidLevel)) - } - } - } - - return result -} - -func (lt *opsworksLayerType) SetVolumeConfigurations(d *schema.ResourceData, v []*opsworks.VolumeConfiguration) { - newValue := make([]*map[string]interface{}, len(v)) - - for i := 0; i < len(v); i++ { - config := v[i] - data := make(map[string]interface{}) - newValue[i] = &data - - if config.Iops != nil { - data["iops"] = int(*config.Iops) - } else { - data["iops"] = 0 - } - if config.MountPoint != nil { - data["mount_point"] = *config.MountPoint - } - if config.NumberOfDisks != nil { - data["number_of_disks"] = int(*config.NumberOfDisks) - } - if config.RaidLevel != nil { - data["raid_level"] = strconv.Itoa(int(*config.RaidLevel)) - } - if config.Size != nil { - data["size"] = int(*config.Size) - } - if config.VolumeType != nil { - data["type"] = *config.VolumeType - } - } - - d.Set("ebs_volume", newValue) -} diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go deleted file mode 100644 index d5880d730..000000000 --- a/builtin/providers/aws/provider.go +++ /dev/null @@ -1,815 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/mutexkv" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - // TODO: Move the validation to this, requires conditional schemas - // TODO: Move the configuration to this, requires validation - - // The actual provider - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "access_key": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["access_key"], - }, - - "secret_key": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["secret_key"], - }, - - "profile": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["profile"], - }, - - "assume_role": assumeRoleSchema(), - - "shared_credentials_file": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["shared_credentials_file"], - }, - - "token": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["token"], - }, - - "region": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "AWS_REGION", - "AWS_DEFAULT_REGION", - }, nil), - Description: descriptions["region"], - InputDefault: "us-east-1", - }, - - "max_retries": { - Type: schema.TypeInt, - Optional: true, - Default: 25, - Description: descriptions["max_retries"], - }, - - "allowed_account_ids": { - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - ConflictsWith: []string{"forbidden_account_ids"}, - Set: schema.HashString, - }, - - "forbidden_account_ids": { - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - ConflictsWith: []string{"allowed_account_ids"}, - Set: schema.HashString, - }, - - "dynamodb_endpoint": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["dynamodb_endpoint"], - Removed: "Use `dynamodb` inside `endpoints` block instead", - }, - - "kinesis_endpoint": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["kinesis_endpoint"], - Removed: "Use `kinesis` inside `endpoints` block instead", - }, - - "endpoints": endpointsSchema(), - - "insecure": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: descriptions["insecure"], - }, - - "skip_credentials_validation": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: descriptions["skip_credentials_validation"], - }, - - "skip_get_ec2_platforms": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: descriptions["skip_get_ec2_platforms"], - }, - - "skip_region_validation": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: descriptions["skip_region_validation"], - }, - - "skip_requesting_account_id": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: descriptions["skip_requesting_account_id"], - }, - - "skip_metadata_api_check": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: descriptions["skip_metadata_api_check"], - }, - - "s3_force_path_style": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: descriptions["s3_force_path_style"], - }, - }, - - DataSourcesMap: map[string]*schema.Resource{ - "aws_acm_certificate": dataSourceAwsAcmCertificate(), - "aws_alb": dataSourceAwsAlb(), - "aws_alb_listener": dataSourceAwsAlbListener(), - "aws_ami": dataSourceAwsAmi(), - "aws_ami_ids": dataSourceAwsAmiIds(), - "aws_autoscaling_groups": dataSourceAwsAutoscalingGroups(), - "aws_availability_zone": dataSourceAwsAvailabilityZone(), - "aws_availability_zones": dataSourceAwsAvailabilityZones(), - "aws_billing_service_account": dataSourceAwsBillingServiceAccount(), - "aws_caller_identity": dataSourceAwsCallerIdentity(), - "aws_canonical_user_id": dataSourceAwsCanonicalUserId(), - "aws_cloudformation_stack": dataSourceAwsCloudFormationStack(), - "aws_db_instance": dataSourceAwsDbInstance(), - "aws_db_snapshot": dataSourceAwsDbSnapshot(), - "aws_ebs_snapshot": dataSourceAwsEbsSnapshot(), - "aws_ebs_snapshot_ids": dataSourceAwsEbsSnapshotIds(), - "aws_ebs_volume": dataSourceAwsEbsVolume(), - "aws_ecs_cluster": dataSourceAwsEcsCluster(), - "aws_ecs_container_definition": dataSourceAwsEcsContainerDefinition(), - "aws_ecs_task_definition": dataSourceAwsEcsTaskDefinition(), - "aws_efs_file_system": dataSourceAwsEfsFileSystem(), - "aws_eip": dataSourceAwsEip(), - "aws_elastic_beanstalk_solution_stack": dataSourceAwsElasticBeanstalkSolutionStack(), - "aws_elasticache_cluster": dataSourceAwsElastiCacheCluster(), - "aws_elb_hosted_zone_id": dataSourceAwsElbHostedZoneId(), - "aws_elb_service_account": dataSourceAwsElbServiceAccount(), - "aws_iam_account_alias": dataSourceAwsIamAccountAlias(), - "aws_iam_policy_document": dataSourceAwsIamPolicyDocument(), - "aws_iam_role": dataSourceAwsIAMRole(), - "aws_iam_server_certificate": dataSourceAwsIAMServerCertificate(), - "aws_instance": dataSourceAwsInstance(), - "aws_ip_ranges": dataSourceAwsIPRanges(), - "aws_kinesis_stream": dataSourceAwsKinesisStream(), - "aws_kms_alias": dataSourceAwsKmsAlias(), - "aws_kms_ciphertext": dataSourceAwsKmsCiphetext(), - "aws_kms_secret": dataSourceAwsKmsSecret(), - "aws_partition": dataSourceAwsPartition(), - "aws_prefix_list": dataSourceAwsPrefixList(), - "aws_redshift_service_account": dataSourceAwsRedshiftServiceAccount(), - "aws_region": dataSourceAwsRegion(), - "aws_route_table": dataSourceAwsRouteTable(), - "aws_route53_zone": dataSourceAwsRoute53Zone(), - "aws_s3_bucket_object": dataSourceAwsS3BucketObject(), - "aws_sns_topic": dataSourceAwsSnsTopic(), - "aws_ssm_parameter": dataSourceAwsSsmParameter(), - "aws_subnet": dataSourceAwsSubnet(), - "aws_subnet_ids": dataSourceAwsSubnetIDs(), - "aws_security_group": dataSourceAwsSecurityGroup(), - "aws_vpc": dataSourceAwsVpc(), - "aws_vpc_endpoint": dataSourceAwsVpcEndpoint(), - "aws_vpc_endpoint_service": dataSourceAwsVpcEndpointService(), - "aws_vpc_peering_connection": dataSourceAwsVpcPeeringConnection(), - "aws_vpn_gateway": dataSourceAwsVpnGateway(), - }, - - ResourcesMap: map[string]*schema.Resource{ - "aws_alb": resourceAwsAlb(), - "aws_alb_listener": resourceAwsAlbListener(), - "aws_alb_listener_rule": resourceAwsAlbListenerRule(), - "aws_alb_target_group": resourceAwsAlbTargetGroup(), - "aws_alb_target_group_attachment": resourceAwsAlbTargetGroupAttachment(), - "aws_ami": resourceAwsAmi(), - "aws_ami_copy": resourceAwsAmiCopy(), - "aws_ami_from_instance": resourceAwsAmiFromInstance(), - "aws_ami_launch_permission": resourceAwsAmiLaunchPermission(), - "aws_api_gateway_account": resourceAwsApiGatewayAccount(), - "aws_api_gateway_api_key": resourceAwsApiGatewayApiKey(), - "aws_api_gateway_authorizer": resourceAwsApiGatewayAuthorizer(), - "aws_api_gateway_base_path_mapping": resourceAwsApiGatewayBasePathMapping(), - "aws_api_gateway_client_certificate": resourceAwsApiGatewayClientCertificate(), - "aws_api_gateway_deployment": resourceAwsApiGatewayDeployment(), - "aws_api_gateway_domain_name": resourceAwsApiGatewayDomainName(), - "aws_api_gateway_integration": resourceAwsApiGatewayIntegration(), - "aws_api_gateway_integration_response": resourceAwsApiGatewayIntegrationResponse(), - "aws_api_gateway_method": resourceAwsApiGatewayMethod(), - "aws_api_gateway_method_response": resourceAwsApiGatewayMethodResponse(), - "aws_api_gateway_method_settings": resourceAwsApiGatewayMethodSettings(), - "aws_api_gateway_model": resourceAwsApiGatewayModel(), - "aws_api_gateway_resource": resourceAwsApiGatewayResource(), - "aws_api_gateway_rest_api": resourceAwsApiGatewayRestApi(), - "aws_api_gateway_stage": resourceAwsApiGatewayStage(), - "aws_api_gateway_usage_plan": resourceAwsApiGatewayUsagePlan(), - "aws_api_gateway_usage_plan_key": resourceAwsApiGatewayUsagePlanKey(), - "aws_app_cookie_stickiness_policy": resourceAwsAppCookieStickinessPolicy(), - "aws_appautoscaling_target": resourceAwsAppautoscalingTarget(), - "aws_appautoscaling_policy": resourceAwsAppautoscalingPolicy(), - "aws_autoscaling_attachment": resourceAwsAutoscalingAttachment(), - "aws_autoscaling_group": resourceAwsAutoscalingGroup(), - "aws_autoscaling_notification": resourceAwsAutoscalingNotification(), - "aws_autoscaling_policy": resourceAwsAutoscalingPolicy(), - "aws_autoscaling_schedule": resourceAwsAutoscalingSchedule(), - "aws_cloudformation_stack": resourceAwsCloudFormationStack(), - "aws_cloudfront_distribution": resourceAwsCloudFrontDistribution(), - "aws_cloudfront_origin_access_identity": resourceAwsCloudFrontOriginAccessIdentity(), - "aws_cloudtrail": resourceAwsCloudTrail(), - "aws_cloudwatch_event_rule": resourceAwsCloudWatchEventRule(), - "aws_cloudwatch_event_target": resourceAwsCloudWatchEventTarget(), - "aws_cloudwatch_log_destination": resourceAwsCloudWatchLogDestination(), - "aws_cloudwatch_log_destination_policy": resourceAwsCloudWatchLogDestinationPolicy(), - "aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(), - "aws_cloudwatch_log_metric_filter": resourceAwsCloudWatchLogMetricFilter(), - "aws_cloudwatch_log_stream": resourceAwsCloudWatchLogStream(), - "aws_cloudwatch_log_subscription_filter": resourceAwsCloudwatchLogSubscriptionFilter(), - "aws_config_config_rule": resourceAwsConfigConfigRule(), - "aws_config_configuration_recorder": resourceAwsConfigConfigurationRecorder(), - "aws_config_configuration_recorder_status": resourceAwsConfigConfigurationRecorderStatus(), - "aws_config_delivery_channel": resourceAwsConfigDeliveryChannel(), - "aws_cognito_identity_pool": resourceAwsCognitoIdentityPool(), - "aws_autoscaling_lifecycle_hook": resourceAwsAutoscalingLifecycleHook(), - "aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(), - "aws_codedeploy_app": resourceAwsCodeDeployApp(), - "aws_codedeploy_deployment_config": resourceAwsCodeDeployDeploymentConfig(), - "aws_codedeploy_deployment_group": resourceAwsCodeDeployDeploymentGroup(), - "aws_codecommit_repository": resourceAwsCodeCommitRepository(), - "aws_codecommit_trigger": resourceAwsCodeCommitTrigger(), - "aws_codebuild_project": resourceAwsCodeBuildProject(), - "aws_codepipeline": resourceAwsCodePipeline(), - "aws_customer_gateway": resourceAwsCustomerGateway(), - "aws_db_event_subscription": resourceAwsDbEventSubscription(), - "aws_db_instance": resourceAwsDbInstance(), - "aws_db_option_group": resourceAwsDbOptionGroup(), - "aws_db_parameter_group": resourceAwsDbParameterGroup(), - "aws_db_security_group": resourceAwsDbSecurityGroup(), - "aws_db_snapshot": resourceAwsDbSnapshot(), - "aws_db_subnet_group": resourceAwsDbSubnetGroup(), - "aws_devicefarm_project": resourceAwsDevicefarmProject(), - "aws_directory_service_directory": resourceAwsDirectoryServiceDirectory(), - "aws_dms_certificate": resourceAwsDmsCertificate(), - "aws_dms_endpoint": resourceAwsDmsEndpoint(), - "aws_dms_replication_instance": resourceAwsDmsReplicationInstance(), - "aws_dms_replication_subnet_group": resourceAwsDmsReplicationSubnetGroup(), - "aws_dms_replication_task": resourceAwsDmsReplicationTask(), - "aws_dynamodb_table": resourceAwsDynamoDbTable(), - "aws_ebs_snapshot": resourceAwsEbsSnapshot(), - "aws_ebs_volume": resourceAwsEbsVolume(), - "aws_ecr_repository": resourceAwsEcrRepository(), - "aws_ecr_repository_policy": resourceAwsEcrRepositoryPolicy(), - "aws_ecs_cluster": resourceAwsEcsCluster(), - "aws_ecs_service": resourceAwsEcsService(), - "aws_ecs_task_definition": resourceAwsEcsTaskDefinition(), - "aws_efs_file_system": resourceAwsEfsFileSystem(), - "aws_efs_mount_target": resourceAwsEfsMountTarget(), - "aws_egress_only_internet_gateway": resourceAwsEgressOnlyInternetGateway(), - "aws_eip": resourceAwsEip(), - "aws_eip_association": resourceAwsEipAssociation(), - "aws_elasticache_cluster": resourceAwsElasticacheCluster(), - "aws_elasticache_parameter_group": resourceAwsElasticacheParameterGroup(), - "aws_elasticache_replication_group": resourceAwsElasticacheReplicationGroup(), - "aws_elasticache_security_group": resourceAwsElasticacheSecurityGroup(), - "aws_elasticache_subnet_group": resourceAwsElasticacheSubnetGroup(), - "aws_elastic_beanstalk_application": resourceAwsElasticBeanstalkApplication(), - "aws_elastic_beanstalk_application_version": resourceAwsElasticBeanstalkApplicationVersion(), - "aws_elastic_beanstalk_configuration_template": resourceAwsElasticBeanstalkConfigurationTemplate(), - "aws_elastic_beanstalk_environment": resourceAwsElasticBeanstalkEnvironment(), - "aws_elasticsearch_domain": resourceAwsElasticSearchDomain(), - "aws_elasticsearch_domain_policy": resourceAwsElasticSearchDomainPolicy(), - "aws_elastictranscoder_pipeline": resourceAwsElasticTranscoderPipeline(), - "aws_elastictranscoder_preset": resourceAwsElasticTranscoderPreset(), - "aws_elb": resourceAwsElb(), - "aws_elb_attachment": resourceAwsElbAttachment(), - "aws_emr_cluster": resourceAwsEMRCluster(), - "aws_emr_instance_group": resourceAwsEMRInstanceGroup(), - "aws_emr_security_configuration": resourceAwsEMRSecurityConfiguration(), - "aws_flow_log": resourceAwsFlowLog(), - "aws_glacier_vault": resourceAwsGlacierVault(), - "aws_iam_access_key": resourceAwsIamAccessKey(), - "aws_iam_account_alias": resourceAwsIamAccountAlias(), - "aws_iam_account_password_policy": resourceAwsIamAccountPasswordPolicy(), - "aws_iam_group_policy": resourceAwsIamGroupPolicy(), - "aws_iam_group": resourceAwsIamGroup(), - "aws_iam_group_membership": resourceAwsIamGroupMembership(), - "aws_iam_group_policy_attachment": resourceAwsIamGroupPolicyAttachment(), - "aws_iam_instance_profile": resourceAwsIamInstanceProfile(), - "aws_iam_openid_connect_provider": resourceAwsIamOpenIDConnectProvider(), - "aws_iam_policy": resourceAwsIamPolicy(), - "aws_iam_policy_attachment": resourceAwsIamPolicyAttachment(), - "aws_iam_role_policy_attachment": resourceAwsIamRolePolicyAttachment(), - "aws_iam_role_policy": resourceAwsIamRolePolicy(), - "aws_iam_role": resourceAwsIamRole(), - "aws_iam_saml_provider": resourceAwsIamSamlProvider(), - "aws_iam_server_certificate": resourceAwsIAMServerCertificate(), - "aws_iam_user_policy_attachment": resourceAwsIamUserPolicyAttachment(), - "aws_iam_user_policy": resourceAwsIamUserPolicy(), - "aws_iam_user_ssh_key": resourceAwsIamUserSshKey(), - "aws_iam_user": resourceAwsIamUser(), - "aws_iam_user_login_profile": resourceAwsIamUserLoginProfile(), - "aws_inspector_assessment_target": resourceAWSInspectorAssessmentTarget(), - "aws_inspector_assessment_template": resourceAWSInspectorAssessmentTemplate(), - "aws_inspector_resource_group": resourceAWSInspectorResourceGroup(), - "aws_instance": resourceAwsInstance(), - "aws_internet_gateway": resourceAwsInternetGateway(), - "aws_key_pair": resourceAwsKeyPair(), - "aws_kinesis_firehose_delivery_stream": resourceAwsKinesisFirehoseDeliveryStream(), - "aws_kinesis_stream": resourceAwsKinesisStream(), - "aws_kms_alias": resourceAwsKmsAlias(), - "aws_kms_key": resourceAwsKmsKey(), - "aws_lambda_function": resourceAwsLambdaFunction(), - "aws_lambda_event_source_mapping": resourceAwsLambdaEventSourceMapping(), - "aws_lambda_alias": resourceAwsLambdaAlias(), - "aws_lambda_permission": resourceAwsLambdaPermission(), - "aws_launch_configuration": resourceAwsLaunchConfiguration(), - "aws_lightsail_domain": resourceAwsLightsailDomain(), - "aws_lightsail_instance": resourceAwsLightsailInstance(), - "aws_lightsail_key_pair": resourceAwsLightsailKeyPair(), - "aws_lightsail_static_ip": resourceAwsLightsailStaticIp(), - "aws_lightsail_static_ip_attachment": resourceAwsLightsailStaticIpAttachment(), - "aws_lb_cookie_stickiness_policy": resourceAwsLBCookieStickinessPolicy(), - "aws_load_balancer_policy": resourceAwsLoadBalancerPolicy(), - "aws_load_balancer_backend_server_policy": resourceAwsLoadBalancerBackendServerPolicies(), - "aws_load_balancer_listener_policy": resourceAwsLoadBalancerListenerPolicies(), - "aws_lb_ssl_negotiation_policy": resourceAwsLBSSLNegotiationPolicy(), - "aws_main_route_table_association": resourceAwsMainRouteTableAssociation(), - "aws_nat_gateway": resourceAwsNatGateway(), - "aws_network_acl": resourceAwsNetworkAcl(), - "aws_default_network_acl": resourceAwsDefaultNetworkAcl(), - "aws_network_acl_rule": resourceAwsNetworkAclRule(), - "aws_network_interface": resourceAwsNetworkInterface(), - "aws_network_interface_attachment": resourceAwsNetworkInterfaceAttachment(), - "aws_opsworks_application": resourceAwsOpsworksApplication(), - "aws_opsworks_stack": resourceAwsOpsworksStack(), - "aws_opsworks_java_app_layer": resourceAwsOpsworksJavaAppLayer(), - "aws_opsworks_haproxy_layer": resourceAwsOpsworksHaproxyLayer(), - "aws_opsworks_static_web_layer": resourceAwsOpsworksStaticWebLayer(), - "aws_opsworks_php_app_layer": resourceAwsOpsworksPhpAppLayer(), - "aws_opsworks_rails_app_layer": resourceAwsOpsworksRailsAppLayer(), - "aws_opsworks_nodejs_app_layer": resourceAwsOpsworksNodejsAppLayer(), - "aws_opsworks_memcached_layer": resourceAwsOpsworksMemcachedLayer(), - "aws_opsworks_mysql_layer": resourceAwsOpsworksMysqlLayer(), - "aws_opsworks_ganglia_layer": resourceAwsOpsworksGangliaLayer(), - "aws_opsworks_custom_layer": resourceAwsOpsworksCustomLayer(), - "aws_opsworks_instance": resourceAwsOpsworksInstance(), - "aws_opsworks_user_profile": resourceAwsOpsworksUserProfile(), - "aws_opsworks_permission": resourceAwsOpsworksPermission(), - "aws_opsworks_rds_db_instance": resourceAwsOpsworksRdsDbInstance(), - "aws_placement_group": resourceAwsPlacementGroup(), - "aws_proxy_protocol_policy": resourceAwsProxyProtocolPolicy(), - "aws_rds_cluster": resourceAwsRDSCluster(), - "aws_rds_cluster_instance": resourceAwsRDSClusterInstance(), - "aws_rds_cluster_parameter_group": resourceAwsRDSClusterParameterGroup(), - "aws_redshift_cluster": resourceAwsRedshiftCluster(), - "aws_redshift_security_group": resourceAwsRedshiftSecurityGroup(), - "aws_redshift_parameter_group": resourceAwsRedshiftParameterGroup(), - "aws_redshift_subnet_group": resourceAwsRedshiftSubnetGroup(), - "aws_route53_delegation_set": resourceAwsRoute53DelegationSet(), - "aws_route53_record": resourceAwsRoute53Record(), - "aws_route53_zone_association": resourceAwsRoute53ZoneAssociation(), - "aws_route53_zone": resourceAwsRoute53Zone(), - "aws_route53_health_check": resourceAwsRoute53HealthCheck(), - "aws_route": resourceAwsRoute(), - "aws_route_table": resourceAwsRouteTable(), - "aws_default_route_table": resourceAwsDefaultRouteTable(), - "aws_route_table_association": resourceAwsRouteTableAssociation(), - "aws_ses_active_receipt_rule_set": resourceAwsSesActiveReceiptRuleSet(), - "aws_ses_domain_identity": resourceAwsSesDomainIdentity(), - "aws_ses_receipt_filter": resourceAwsSesReceiptFilter(), - "aws_ses_receipt_rule": resourceAwsSesReceiptRule(), - "aws_ses_receipt_rule_set": resourceAwsSesReceiptRuleSet(), - "aws_ses_configuration_set": resourceAwsSesConfigurationSet(), - "aws_ses_event_destination": resourceAwsSesEventDestination(), - "aws_s3_bucket": resourceAwsS3Bucket(), - "aws_s3_bucket_policy": resourceAwsS3BucketPolicy(), - "aws_s3_bucket_object": resourceAwsS3BucketObject(), - "aws_s3_bucket_notification": resourceAwsS3BucketNotification(), - "aws_security_group": resourceAwsSecurityGroup(), - "aws_default_security_group": resourceAwsDefaultSecurityGroup(), - "aws_security_group_rule": resourceAwsSecurityGroupRule(), - "aws_simpledb_domain": resourceAwsSimpleDBDomain(), - "aws_ssm_activation": resourceAwsSsmActivation(), - "aws_ssm_association": resourceAwsSsmAssociation(), - "aws_ssm_document": resourceAwsSsmDocument(), - "aws_ssm_maintenance_window": resourceAwsSsmMaintenanceWindow(), - "aws_ssm_maintenance_window_target": resourceAwsSsmMaintenanceWindowTarget(), - "aws_ssm_maintenance_window_task": resourceAwsSsmMaintenanceWindowTask(), - "aws_ssm_patch_baseline": resourceAwsSsmPatchBaseline(), - "aws_ssm_patch_group": resourceAwsSsmPatchGroup(), - "aws_ssm_parameter": resourceAwsSsmParameter(), - "aws_spot_datafeed_subscription": resourceAwsSpotDataFeedSubscription(), - "aws_spot_instance_request": resourceAwsSpotInstanceRequest(), - "aws_spot_fleet_request": resourceAwsSpotFleetRequest(), - "aws_sqs_queue": resourceAwsSqsQueue(), - "aws_sqs_queue_policy": resourceAwsSqsQueuePolicy(), - "aws_snapshot_create_volume_permission": resourceAwsSnapshotCreateVolumePermission(), - "aws_sns_topic": resourceAwsSnsTopic(), - "aws_sns_topic_policy": resourceAwsSnsTopicPolicy(), - "aws_sns_topic_subscription": resourceAwsSnsTopicSubscription(), - "aws_sfn_activity": resourceAwsSfnActivity(), - "aws_sfn_state_machine": resourceAwsSfnStateMachine(), - "aws_default_subnet": resourceAwsDefaultSubnet(), - "aws_subnet": resourceAwsSubnet(), - "aws_volume_attachment": resourceAwsVolumeAttachment(), - "aws_vpc_dhcp_options_association": resourceAwsVpcDhcpOptionsAssociation(), - "aws_default_vpc_dhcp_options": resourceAwsDefaultVpcDhcpOptions(), - "aws_vpc_dhcp_options": resourceAwsVpcDhcpOptions(), - "aws_vpc_peering_connection": resourceAwsVpcPeeringConnection(), - "aws_vpc_peering_connection_accepter": resourceAwsVpcPeeringConnectionAccepter(), - "aws_default_vpc": resourceAwsDefaultVpc(), - "aws_vpc": resourceAwsVpc(), - "aws_vpc_endpoint": resourceAwsVpcEndpoint(), - "aws_vpc_endpoint_route_table_association": resourceAwsVpcEndpointRouteTableAssociation(), - "aws_vpn_connection": resourceAwsVpnConnection(), - "aws_vpn_connection_route": resourceAwsVpnConnectionRoute(), - "aws_vpn_gateway": resourceAwsVpnGateway(), - "aws_vpn_gateway_attachment": resourceAwsVpnGatewayAttachment(), - "aws_vpn_gateway_route_propagation": resourceAwsVpnGatewayRoutePropagation(), - "aws_waf_byte_match_set": resourceAwsWafByteMatchSet(), - "aws_waf_ipset": resourceAwsWafIPSet(), - "aws_waf_rule": resourceAwsWafRule(), - "aws_waf_size_constraint_set": resourceAwsWafSizeConstraintSet(), - "aws_waf_web_acl": resourceAwsWafWebAcl(), - "aws_waf_xss_match_set": resourceAwsWafXssMatchSet(), - "aws_waf_sql_injection_match_set": resourceAwsWafSqlInjectionMatchSet(), - "aws_wafregional_byte_match_set": resourceAwsWafRegionalByteMatchSet(), - "aws_wafregional_ipset": resourceAwsWafRegionalIPSet(), - }, - ConfigureFunc: providerConfigure, - } -} - -var descriptions map[string]string - -func init() { - descriptions = map[string]string{ - "region": "The region where AWS operations will take place. Examples\n" + - "are us-east-1, us-west-2, etc.", - - "access_key": "The access key for API operations. You can retrieve this\n" + - "from the 'Security & Credentials' section of the AWS console.", - - "secret_key": "The secret key for API operations. You can retrieve this\n" + - "from the 'Security & Credentials' section of the AWS console.", - - "profile": "The profile for API operations. If not set, the default profile\n" + - "created with `aws configure` will be used.", - - "shared_credentials_file": "The path to the shared credentials file. If not set\n" + - "this defaults to ~/.aws/credentials.", - - "token": "session token. A session token is only required if you are\n" + - "using temporary security credentials.", - - "max_retries": "The maximum number of times an AWS API request is\n" + - "being executed. If the API request still fails, an error is\n" + - "thrown.", - - "cloudformation_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", - - "cloudwatch_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", - - "cloudwatchevents_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", - - "cloudwatchlogs_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", - - "devicefarm_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", - - "dynamodb_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n" + - "It's typically used to connect to dynamodb-local.", - - "kinesis_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n" + - "It's typically used to connect to kinesalite.", - - "kms_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", - - "iam_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", - - "ec2_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", - - "elb_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", - - "rds_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", - - "s3_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", - - "sns_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", - - "sqs_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", - - "insecure": "Explicitly allow the provider to perform \"insecure\" SSL requests. If omitted," + - "default value is `false`", - - "skip_credentials_validation": "Skip the credentials validation via STS API. " + - "Used for AWS API implementations that do not have STS available/implemented.", - - "skip_get_ec2_platforms": "Skip getting the supported EC2 platforms. " + - "Used by users that don't have ec2:DescribeAccountAttributes permissions.", - - "skip_region_validation": "Skip static validation of region name. " + - "Used by users of alternative AWS-like APIs or users w/ access to regions that are not public (yet).", - - "skip_requesting_account_id": "Skip requesting the account ID. " + - "Used for AWS API implementations that do not have IAM/STS API and/or metadata API.", - - "skip_medatadata_api_check": "Skip the AWS Metadata API check. " + - "Used for AWS API implementations that do not have a metadata api endpoint.", - - "s3_force_path_style": "Set this to true to force the request to use path-style addressing,\n" + - "i.e., http://s3.amazonaws.com/BUCKET/KEY. By default, the S3 client will\n" + - "use virtual hosted bucket addressing when possible\n" + - "(http://BUCKET.s3.amazonaws.com/KEY). Specific to the Amazon S3 service.", - - "assume_role_role_arn": "The ARN of an IAM role to assume prior to making API calls.", - - "assume_role_session_name": "The session name to use when assuming the role. If omitted," + - " no session name is passed to the AssumeRole call.", - - "assume_role_external_id": "The external ID to use when assuming the role. If omitted," + - " no external ID is passed to the AssumeRole call.", - - "assume_role_policy": "The permissions applied when assuming a role. You cannot use," + - " this policy to grant further permissions that are in excess to those of the, " + - " role that is being assumed.", - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - config := Config{ - AccessKey: d.Get("access_key").(string), - SecretKey: d.Get("secret_key").(string), - Profile: d.Get("profile").(string), - CredsFilename: d.Get("shared_credentials_file").(string), - Token: d.Get("token").(string), - Region: d.Get("region").(string), - MaxRetries: d.Get("max_retries").(int), - Insecure: d.Get("insecure").(bool), - SkipCredsValidation: d.Get("skip_credentials_validation").(bool), - SkipGetEC2Platforms: d.Get("skip_get_ec2_platforms").(bool), - SkipRegionValidation: d.Get("skip_region_validation").(bool), - SkipRequestingAccountId: d.Get("skip_requesting_account_id").(bool), - SkipMetadataApiCheck: d.Get("skip_metadata_api_check").(bool), - S3ForcePathStyle: d.Get("s3_force_path_style").(bool), - } - - assumeRoleList := d.Get("assume_role").(*schema.Set).List() - if len(assumeRoleList) == 1 { - assumeRole := assumeRoleList[0].(map[string]interface{}) - config.AssumeRoleARN = assumeRole["role_arn"].(string) - config.AssumeRoleSessionName = assumeRole["session_name"].(string) - config.AssumeRoleExternalID = assumeRole["external_id"].(string) - - if v := assumeRole["policy"].(string); v != "" { - config.AssumeRolePolicy = v - } - - log.Printf("[INFO] assume_role configuration set: (ARN: %q, SessionID: %q, ExternalID: %q, Policy: %q)", - config.AssumeRoleARN, config.AssumeRoleSessionName, config.AssumeRoleExternalID, config.AssumeRolePolicy) - } else { - log.Printf("[INFO] No assume_role block read from configuration") - } - - endpointsSet := d.Get("endpoints").(*schema.Set) - - for _, endpointsSetI := range endpointsSet.List() { - endpoints := endpointsSetI.(map[string]interface{}) - config.CloudFormationEndpoint = endpoints["cloudformation"].(string) - config.CloudWatchEndpoint = endpoints["cloudwatch"].(string) - config.CloudWatchEventsEndpoint = endpoints["cloudwatchevents"].(string) - config.CloudWatchLogsEndpoint = endpoints["cloudwatchlogs"].(string) - config.DeviceFarmEndpoint = endpoints["devicefarm"].(string) - config.DynamoDBEndpoint = endpoints["dynamodb"].(string) - config.Ec2Endpoint = endpoints["ec2"].(string) - config.ElbEndpoint = endpoints["elb"].(string) - config.IamEndpoint = endpoints["iam"].(string) - config.KinesisEndpoint = endpoints["kinesis"].(string) - config.KmsEndpoint = endpoints["kms"].(string) - config.RdsEndpoint = endpoints["rds"].(string) - config.S3Endpoint = endpoints["s3"].(string) - config.SnsEndpoint = endpoints["sns"].(string) - config.SqsEndpoint = endpoints["sqs"].(string) - } - - if v, ok := d.GetOk("allowed_account_ids"); ok { - config.AllowedAccountIds = v.(*schema.Set).List() - } - - if v, ok := d.GetOk("forbidden_account_ids"); ok { - config.ForbiddenAccountIds = v.(*schema.Set).List() - } - - return config.Client() -} - -// This is a global MutexKV for use within this plugin. -var awsMutexKV = mutexkv.NewMutexKV() - -func assumeRoleSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "role_arn": { - Type: schema.TypeString, - Optional: true, - Description: descriptions["assume_role_role_arn"], - }, - - "session_name": { - Type: schema.TypeString, - Optional: true, - Description: descriptions["assume_role_session_name"], - }, - - "external_id": { - Type: schema.TypeString, - Optional: true, - Description: descriptions["assume_role_external_id"], - }, - - "policy": { - Type: schema.TypeString, - Optional: true, - Description: descriptions["assume_role_policy"], - }, - }, - }, - Set: assumeRoleToHash, - } -} - -func assumeRoleToHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["role_arn"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["session_name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["external_id"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["policy"].(string))) - return hashcode.String(buf.String()) -} - -func endpointsSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cloudwatch": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["cloudwatch_endpoint"], - }, - "cloudwatchevents": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["cloudwatchevents_endpoint"], - }, - "cloudwatchlogs": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["cloudwatchlogs_endpoint"], - }, - "cloudformation": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["cloudformation_endpoint"], - }, - "devicefarm": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["devicefarm_endpoint"], - }, - "dynamodb": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["dynamodb_endpoint"], - }, - "iam": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["iam_endpoint"], - }, - - "ec2": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["ec2_endpoint"], - }, - - "elb": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["elb_endpoint"], - }, - "kinesis": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["kinesis_endpoint"], - }, - "kms": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["kms_endpoint"], - }, - "rds": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["rds_endpoint"], - }, - "s3": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["s3_endpoint"], - }, - "sns": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["sns_endpoint"], - }, - "sqs": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: descriptions["sqs_endpoint"], - }, - }, - }, - Set: endpointsToHash, - } -} - -func endpointsToHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["cloudwatch"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["cloudwatchevents"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["cloudwatchlogs"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["cloudformation"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["devicefarm"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["dynamodb"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["iam"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["ec2"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["elb"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["kinesis"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["kms"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["rds"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["s3"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["sns"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["sqs"].(string))) - - return hashcode.String(buf.String()) -} diff --git a/builtin/providers/aws/provider_test.go b/builtin/providers/aws/provider_test.go deleted file mode 100644 index fee6cb0e5..000000000 --- a/builtin/providers/aws/provider_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package aws - -import ( - "log" - "os" - "testing" - - "github.com/hashicorp/terraform/builtin/providers/template" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider -var testAccTemplateProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccTemplateProvider = template.Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "aws": testAccProvider, - "template": testAccTemplateProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("AWS_PROFILE"); v == "" { - if v := os.Getenv("AWS_ACCESS_KEY_ID"); v == "" { - t.Fatal("AWS_ACCESS_KEY_ID must be set for acceptance tests") - } - if v := os.Getenv("AWS_SECRET_ACCESS_KEY"); v == "" { - t.Fatal("AWS_SECRET_ACCESS_KEY must be set for acceptance tests") - } - } - if v := os.Getenv("AWS_DEFAULT_REGION"); v == "" { - log.Println("[INFO] Test: Using us-west-2 as test region") - os.Setenv("AWS_DEFAULT_REGION", "us-west-2") - } - err := testAccProvider.Configure(terraform.NewResourceConfig(nil)) - if err != nil { - t.Fatal(err) - } -} diff --git a/builtin/providers/aws/resource_aws_alb.go b/builtin/providers/aws/resource_aws_alb.go deleted file mode 100644 index d1652c680..000000000 --- a/builtin/providers/aws/resource_aws_alb.go +++ /dev/null @@ -1,497 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "regexp" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsAlb() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsAlbCreate, - Read: resourceAwsAlbRead, - Update: resourceAwsAlbUpdate, - Delete: resourceAwsAlbDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Update: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "arn": { - Type: schema.TypeString, - Computed: true, - }, - - "arn_suffix": { - Type: schema.TypeString, - Computed: true, - }, - - "name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"name_prefix"}, - ValidateFunc: validateElbName, - }, - - "name_prefix": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateElbNamePrefix, - }, - - "internal": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "security_groups": { - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Computed: true, - Optional: true, - Set: schema.HashString, - }, - - "subnets": { - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Required: true, - Set: schema.HashString, - }, - - "access_logs": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Required: true, - }, - "prefix": { - Type: schema.TypeString, - Optional: true, - }, - "enabled": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - }, - }, - }, - - "enable_deletion_protection": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "idle_timeout": { - Type: schema.TypeInt, - Optional: true, - Default: 60, - }, - - "ip_address_type": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - - "vpc_id": { - Type: schema.TypeString, - Computed: true, - }, - - "zone_id": { - Type: schema.TypeString, - Computed: true, - }, - - "dns_name": { - Type: schema.TypeString, - Computed: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAwsAlbCreate(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbv2conn - - var name string - if v, ok := d.GetOk("name"); ok { - name = v.(string) - } else if v, ok := d.GetOk("name_prefix"); ok { - name = resource.PrefixedUniqueId(v.(string)) - } else { - name = resource.PrefixedUniqueId("tf-lb-") - } - d.Set("name", name) - - elbOpts := &elbv2.CreateLoadBalancerInput{ - Name: aws.String(name), - Tags: tagsFromMapELBv2(d.Get("tags").(map[string]interface{})), - } - - if scheme, ok := d.GetOk("internal"); ok && scheme.(bool) { - elbOpts.Scheme = aws.String("internal") - } - - if v, ok := d.GetOk("security_groups"); ok { - elbOpts.SecurityGroups = expandStringList(v.(*schema.Set).List()) - } - - if v, ok := d.GetOk("subnets"); ok { - elbOpts.Subnets = expandStringList(v.(*schema.Set).List()) - } - - if v, ok := d.GetOk("ip_address_type"); ok { - elbOpts.IpAddressType = aws.String(v.(string)) - } - - log.Printf("[DEBUG] ALB create configuration: %#v", elbOpts) - - resp, err := elbconn.CreateLoadBalancer(elbOpts) - if err != nil { - return errwrap.Wrapf("Error creating Application Load Balancer: {{err}}", err) - } - - if len(resp.LoadBalancers) != 1 { - return fmt.Errorf("No load balancers returned following creation of %s", d.Get("name").(string)) - } - - lb := resp.LoadBalancers[0] - d.SetId(*lb.LoadBalancerArn) - log.Printf("[INFO] ALB ID: %s", d.Id()) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"provisioning", "failed"}, - Target: []string{"active"}, - Refresh: func() (interface{}, string, error) { - describeResp, err := elbconn.DescribeLoadBalancers(&elbv2.DescribeLoadBalancersInput{ - LoadBalancerArns: []*string{lb.LoadBalancerArn}, - }) - if err != nil { - return nil, "", err - } - - if len(describeResp.LoadBalancers) != 1 { - return nil, "", fmt.Errorf("No load balancers returned for %s", *lb.LoadBalancerArn) - } - dLb := describeResp.LoadBalancers[0] - - log.Printf("[INFO] ALB state: %s", *dLb.State.Code) - - return describeResp, *dLb.State.Code, nil - }, - Timeout: d.Timeout(schema.TimeoutCreate), - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting - } - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - return resourceAwsAlbUpdate(d, meta) -} - -func resourceAwsAlbRead(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbv2conn - albArn := d.Id() - - describeAlbOpts := &elbv2.DescribeLoadBalancersInput{ - LoadBalancerArns: []*string{aws.String(albArn)}, - } - - describeResp, err := elbconn.DescribeLoadBalancers(describeAlbOpts) - if err != nil { - if isLoadBalancerNotFound(err) { - // The ALB is gone now, so just remove it from the state - log.Printf("[WARN] ALB %s not found in AWS, removing from state", d.Id()) - d.SetId("") - return nil - } - - return errwrap.Wrapf("Error retrieving ALB: {{err}}", err) - } - if len(describeResp.LoadBalancers) != 1 { - return fmt.Errorf("Unable to find ALB: %#v", describeResp.LoadBalancers) - } - - return flattenAwsAlbResource(d, meta, describeResp.LoadBalancers[0]) -} - -func resourceAwsAlbUpdate(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbv2conn - - if !d.IsNewResource() { - if err := setElbV2Tags(elbconn, d); err != nil { - return errwrap.Wrapf("Error Modifying Tags on ALB: {{err}}", err) - } - } - - attributes := make([]*elbv2.LoadBalancerAttribute, 0) - - if d.HasChange("access_logs") { - logs := d.Get("access_logs").([]interface{}) - if len(logs) == 1 { - log := logs[0].(map[string]interface{}) - - attributes = append(attributes, - &elbv2.LoadBalancerAttribute{ - Key: aws.String("access_logs.s3.enabled"), - Value: aws.String(strconv.FormatBool(log["enabled"].(bool))), - }, - &elbv2.LoadBalancerAttribute{ - Key: aws.String("access_logs.s3.bucket"), - Value: aws.String(log["bucket"].(string)), - }) - - if prefix, ok := log["prefix"]; ok { - attributes = append(attributes, &elbv2.LoadBalancerAttribute{ - Key: aws.String("access_logs.s3.prefix"), - Value: aws.String(prefix.(string)), - }) - } - } else if len(logs) == 0 { - attributes = append(attributes, &elbv2.LoadBalancerAttribute{ - Key: aws.String("access_logs.s3.enabled"), - Value: aws.String("false"), - }) - } - } - - if d.HasChange("enable_deletion_protection") { - attributes = append(attributes, &elbv2.LoadBalancerAttribute{ - Key: aws.String("deletion_protection.enabled"), - Value: aws.String(fmt.Sprintf("%t", d.Get("enable_deletion_protection").(bool))), - }) - } - - if d.HasChange("idle_timeout") { - attributes = append(attributes, &elbv2.LoadBalancerAttribute{ - Key: aws.String("idle_timeout.timeout_seconds"), - Value: aws.String(fmt.Sprintf("%d", d.Get("idle_timeout").(int))), - }) - } - - if len(attributes) != 0 { - input := &elbv2.ModifyLoadBalancerAttributesInput{ - LoadBalancerArn: aws.String(d.Id()), - Attributes: attributes, - } - - log.Printf("[DEBUG] ALB Modify Load Balancer Attributes Request: %#v", input) - _, err := elbconn.ModifyLoadBalancerAttributes(input) - if err != nil { - return fmt.Errorf("Failure configuring ALB attributes: %s", err) - } - } - - if d.HasChange("security_groups") { - sgs := expandStringList(d.Get("security_groups").(*schema.Set).List()) - - params := &elbv2.SetSecurityGroupsInput{ - LoadBalancerArn: aws.String(d.Id()), - SecurityGroups: sgs, - } - _, err := elbconn.SetSecurityGroups(params) - if err != nil { - return fmt.Errorf("Failure Setting ALB Security Groups: %s", err) - } - - } - - if d.HasChange("subnets") { - subnets := expandStringList(d.Get("subnets").(*schema.Set).List()) - - params := &elbv2.SetSubnetsInput{ - LoadBalancerArn: aws.String(d.Id()), - Subnets: subnets, - } - - _, err := elbconn.SetSubnets(params) - if err != nil { - return fmt.Errorf("Failure Setting ALB Subnets: %s", err) - } - } - - if d.HasChange("ip_address_type") { - - params := &elbv2.SetIpAddressTypeInput{ - LoadBalancerArn: aws.String(d.Id()), - IpAddressType: aws.String(d.Get("ip_address_type").(string)), - } - - _, err := elbconn.SetIpAddressType(params) - if err != nil { - return fmt.Errorf("Failure Setting ALB IP Address Type: %s", err) - } - - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"active", "provisioning", "failed"}, - Target: []string{"active"}, - Refresh: func() (interface{}, string, error) { - describeResp, err := elbconn.DescribeLoadBalancers(&elbv2.DescribeLoadBalancersInput{ - LoadBalancerArns: []*string{aws.String(d.Id())}, - }) - if err != nil { - return nil, "", err - } - - if len(describeResp.LoadBalancers) != 1 { - return nil, "", fmt.Errorf("No load balancers returned for %s", d.Id()) - } - dLb := describeResp.LoadBalancers[0] - - log.Printf("[INFO] ALB state: %s", *dLb.State.Code) - - return describeResp, *dLb.State.Code, nil - }, - Timeout: d.Timeout(schema.TimeoutUpdate), - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting - } - _, err := stateConf.WaitForState() - if err != nil { - return err - } - - return resourceAwsAlbRead(d, meta) -} - -func resourceAwsAlbDelete(d *schema.ResourceData, meta interface{}) error { - albconn := meta.(*AWSClient).elbv2conn - - log.Printf("[INFO] Deleting ALB: %s", d.Id()) - - // Destroy the load balancer - deleteElbOpts := elbv2.DeleteLoadBalancerInput{ - LoadBalancerArn: aws.String(d.Id()), - } - if _, err := albconn.DeleteLoadBalancer(&deleteElbOpts); err != nil { - return fmt.Errorf("Error deleting ALB: %s", err) - } - - return nil -} - -// flattenSubnetsFromAvailabilityZones creates a slice of strings containing the subnet IDs -// for the ALB based on the AvailabilityZones structure returned by the API. -func flattenSubnetsFromAvailabilityZones(availabilityZones []*elbv2.AvailabilityZone) []string { - var result []string - for _, az := range availabilityZones { - result = append(result, *az.SubnetId) - } - return result -} - -func albSuffixFromARN(arn *string) string { - if arn == nil { - return "" - } - - if arnComponents := regexp.MustCompile(`arn:.*:loadbalancer/(.*)`).FindAllStringSubmatch(*arn, -1); len(arnComponents) == 1 { - if len(arnComponents[0]) == 2 { - return arnComponents[0][1] - } - } - - return "" -} - -// flattenAwsAlbResource takes a *elbv2.LoadBalancer and populates all respective resource fields. -func flattenAwsAlbResource(d *schema.ResourceData, meta interface{}, alb *elbv2.LoadBalancer) error { - elbconn := meta.(*AWSClient).elbv2conn - - d.Set("arn", alb.LoadBalancerArn) - d.Set("arn_suffix", albSuffixFromARN(alb.LoadBalancerArn)) - d.Set("name", alb.LoadBalancerName) - d.Set("internal", (alb.Scheme != nil && *alb.Scheme == "internal")) - d.Set("security_groups", flattenStringList(alb.SecurityGroups)) - d.Set("subnets", flattenSubnetsFromAvailabilityZones(alb.AvailabilityZones)) - d.Set("vpc_id", alb.VpcId) - d.Set("zone_id", alb.CanonicalHostedZoneId) - d.Set("dns_name", alb.DNSName) - d.Set("ip_address_type", alb.IpAddressType) - - respTags, err := elbconn.DescribeTags(&elbv2.DescribeTagsInput{ - ResourceArns: []*string{alb.LoadBalancerArn}, - }) - if err != nil { - return errwrap.Wrapf("Error retrieving ALB Tags: {{err}}", err) - } - - var et []*elbv2.Tag - if len(respTags.TagDescriptions) > 0 { - et = respTags.TagDescriptions[0].Tags - } - d.Set("tags", tagsToMapELBv2(et)) - - attributesResp, err := elbconn.DescribeLoadBalancerAttributes(&elbv2.DescribeLoadBalancerAttributesInput{ - LoadBalancerArn: aws.String(d.Id()), - }) - if err != nil { - return errwrap.Wrapf("Error retrieving ALB Attributes: {{err}}", err) - } - - accessLogMap := map[string]interface{}{} - for _, attr := range attributesResp.Attributes { - switch *attr.Key { - case "access_logs.s3.enabled": - accessLogMap["enabled"] = *attr.Value - case "access_logs.s3.bucket": - accessLogMap["bucket"] = *attr.Value - case "access_logs.s3.prefix": - accessLogMap["prefix"] = *attr.Value - case "idle_timeout.timeout_seconds": - timeout, err := strconv.Atoi(*attr.Value) - if err != nil { - return errwrap.Wrapf("Error parsing ALB timeout: {{err}}", err) - } - log.Printf("[DEBUG] Setting ALB Timeout Seconds: %d", timeout) - d.Set("idle_timeout", timeout) - case "deletion_protection.enabled": - protectionEnabled := (*attr.Value) == "true" - log.Printf("[DEBUG] Setting ALB Deletion Protection Enabled: %t", protectionEnabled) - d.Set("enable_deletion_protection", protectionEnabled) - } - } - - log.Printf("[DEBUG] Setting ALB Access Logs: %#v", accessLogMap) - if accessLogMap["bucket"] != "" || accessLogMap["prefix"] != "" { - d.Set("access_logs", []interface{}{accessLogMap}) - } else { - d.Set("access_logs", []interface{}{}) - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_alb_listener.go b/builtin/providers/aws/resource_aws_alb_listener.go deleted file mode 100644 index f94e3b1a1..000000000 --- a/builtin/providers/aws/resource_aws_alb_listener.go +++ /dev/null @@ -1,284 +0,0 @@ -package aws - -import ( - "errors" - "fmt" - "log" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsAlbListener() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsAlbListenerCreate, - Read: resourceAwsAlbListenerRead, - Update: resourceAwsAlbListenerUpdate, - Delete: resourceAwsAlbListenerDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "arn": { - Type: schema.TypeString, - Computed: true, - }, - - "load_balancer_arn": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "port": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validateAwsAlbListenerPort, - }, - - "protocol": { - Type: schema.TypeString, - Optional: true, - Default: "HTTP", - StateFunc: func(v interface{}) string { - return strings.ToUpper(v.(string)) - }, - ValidateFunc: validateAwsAlbListenerProtocol, - }, - - "ssl_policy": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "certificate_arn": { - Type: schema.TypeString, - Optional: true, - }, - - "default_action": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "target_group_arn": { - Type: schema.TypeString, - Required: true, - }, - "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateAwsAlbListenerActionType, - }, - }, - }, - }, - }, - } -} - -func resourceAwsAlbListenerCreate(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbv2conn - - albArn := d.Get("load_balancer_arn").(string) - - params := &elbv2.CreateListenerInput{ - LoadBalancerArn: aws.String(albArn), - Port: aws.Int64(int64(d.Get("port").(int))), - Protocol: aws.String(d.Get("protocol").(string)), - } - - if sslPolicy, ok := d.GetOk("ssl_policy"); ok { - params.SslPolicy = aws.String(sslPolicy.(string)) - } - - if certificateArn, ok := d.GetOk("certificate_arn"); ok { - params.Certificates = make([]*elbv2.Certificate, 1) - params.Certificates[0] = &elbv2.Certificate{ - CertificateArn: aws.String(certificateArn.(string)), - } - } - - if defaultActions := d.Get("default_action").([]interface{}); len(defaultActions) == 1 { - params.DefaultActions = make([]*elbv2.Action, len(defaultActions)) - - for i, defaultAction := range defaultActions { - defaultActionMap := defaultAction.(map[string]interface{}) - - params.DefaultActions[i] = &elbv2.Action{ - TargetGroupArn: aws.String(defaultActionMap["target_group_arn"].(string)), - Type: aws.String(defaultActionMap["type"].(string)), - } - } - } - - var resp *elbv2.CreateListenerOutput - - err := resource.Retry(5*time.Minute, func() *resource.RetryError { - var err error - log.Printf("[DEBUG] Creating ALB listener for ARN: %s", d.Get("load_balancer_arn").(string)) - resp, err = elbconn.CreateListener(params) - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "CertificateNotFound" { - log.Printf("[WARN] Got an error while trying to create ALB listener for ARN: %s: %s", albArn, err) - return resource.RetryableError(err) - } - } - if err != nil { - return resource.NonRetryableError(err) - } - - return nil - }) - - if err != nil { - return errwrap.Wrapf("Error creating ALB Listener: {{err}}", err) - } - - if len(resp.Listeners) == 0 { - return errors.New("Error creating ALB Listener: no listeners returned in response") - } - - d.SetId(*resp.Listeners[0].ListenerArn) - - return resourceAwsAlbListenerRead(d, meta) -} - -func resourceAwsAlbListenerRead(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbv2conn - - resp, err := elbconn.DescribeListeners(&elbv2.DescribeListenersInput{ - ListenerArns: []*string{aws.String(d.Id())}, - }) - if err != nil { - if isListenerNotFound(err) { - log.Printf("[WARN] DescribeListeners - removing %s from state", d.Id()) - d.SetId("") - return nil - } - return errwrap.Wrapf("Error retrieving Listener: {{err}}", err) - } - - if len(resp.Listeners) != 1 { - return fmt.Errorf("Error retrieving Listener %q", d.Id()) - } - - listener := resp.Listeners[0] - - d.Set("arn", listener.ListenerArn) - d.Set("load_balancer_arn", listener.LoadBalancerArn) - d.Set("port", listener.Port) - d.Set("protocol", listener.Protocol) - d.Set("ssl_policy", listener.SslPolicy) - - if listener.Certificates != nil && len(listener.Certificates) == 1 { - d.Set("certificate_arn", listener.Certificates[0].CertificateArn) - } - - defaultActions := make([]map[string]interface{}, 0) - if listener.DefaultActions != nil && len(listener.DefaultActions) > 0 { - for _, defaultAction := range listener.DefaultActions { - action := map[string]interface{}{ - "target_group_arn": *defaultAction.TargetGroupArn, - "type": *defaultAction.Type, - } - defaultActions = append(defaultActions, action) - } - } - d.Set("default_action", defaultActions) - - return nil -} - -func resourceAwsAlbListenerUpdate(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbv2conn - - params := &elbv2.ModifyListenerInput{ - ListenerArn: aws.String(d.Id()), - Port: aws.Int64(int64(d.Get("port").(int))), - Protocol: aws.String(d.Get("protocol").(string)), - } - - if sslPolicy, ok := d.GetOk("ssl_policy"); ok { - params.SslPolicy = aws.String(sslPolicy.(string)) - } - - if certificateArn, ok := d.GetOk("certificate_arn"); ok { - params.Certificates = make([]*elbv2.Certificate, 1) - params.Certificates[0] = &elbv2.Certificate{ - CertificateArn: aws.String(certificateArn.(string)), - } - } - - if defaultActions := d.Get("default_action").([]interface{}); len(defaultActions) == 1 { - params.DefaultActions = make([]*elbv2.Action, len(defaultActions)) - - for i, defaultAction := range defaultActions { - defaultActionMap := defaultAction.(map[string]interface{}) - - params.DefaultActions[i] = &elbv2.Action{ - TargetGroupArn: aws.String(defaultActionMap["target_group_arn"].(string)), - Type: aws.String(defaultActionMap["type"].(string)), - } - } - } - - _, err := elbconn.ModifyListener(params) - if err != nil { - return errwrap.Wrapf("Error modifying ALB Listener: {{err}}", err) - } - - return resourceAwsAlbListenerRead(d, meta) -} - -func resourceAwsAlbListenerDelete(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbv2conn - - _, err := elbconn.DeleteListener(&elbv2.DeleteListenerInput{ - ListenerArn: aws.String(d.Id()), - }) - if err != nil { - return errwrap.Wrapf("Error deleting Listener: {{err}}", err) - } - - return nil -} - -func validateAwsAlbListenerPort(v interface{}, k string) (ws []string, errors []error) { - port := v.(int) - if port < 1 || port > 65536 { - errors = append(errors, fmt.Errorf("%q must be a valid port number (1-65536)", k)) - } - return -} - -func validateAwsAlbListenerProtocol(v interface{}, k string) (ws []string, errors []error) { - value := strings.ToLower(v.(string)) - if value == "http" || value == "https" { - return - } - - errors = append(errors, fmt.Errorf("%q must be either %q or %q", k, "HTTP", "HTTPS")) - return -} - -func validateAwsAlbListenerActionType(v interface{}, k string) (ws []string, errors []error) { - value := strings.ToLower(v.(string)) - if value != "forward" { - errors = append(errors, fmt.Errorf("%q must have the value %q", k, "forward")) - } - return -} - -func isListenerNotFound(err error) bool { - elberr, ok := err.(awserr.Error) - return ok && elberr.Code() == "ListenerNotFound" -} diff --git a/builtin/providers/aws/resource_aws_alb_listener_rule.go b/builtin/providers/aws/resource_aws_alb_listener_rule.go deleted file mode 100644 index 21292753c..000000000 --- a/builtin/providers/aws/resource_aws_alb_listener_rule.go +++ /dev/null @@ -1,293 +0,0 @@ -package aws - -import ( - "errors" - "fmt" - "log" - "strconv" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsAlbListenerRule() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsAlbListenerRuleCreate, - Read: resourceAwsAlbListenerRuleRead, - Update: resourceAwsAlbListenerRuleUpdate, - Delete: resourceAwsAlbListenerRuleDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "arn": { - Type: schema.TypeString, - Computed: true, - }, - "listener_arn": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "priority": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - ValidateFunc: validateAwsAlbListenerRulePriority, - }, - "action": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "target_group_arn": { - Type: schema.TypeString, - Required: true, - }, - "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateAwsAlbListenerActionType, - }, - }, - }, - }, - "condition": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "field": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateAwsListenerRuleField, - }, - "values": { - Type: schema.TypeList, - MaxItems: 1, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - }, - }, - }, - }, - }, - } -} - -func resourceAwsAlbListenerRuleCreate(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbv2conn - - params := &elbv2.CreateRuleInput{ - ListenerArn: aws.String(d.Get("listener_arn").(string)), - Priority: aws.Int64(int64(d.Get("priority").(int))), - } - - actions := d.Get("action").([]interface{}) - params.Actions = make([]*elbv2.Action, len(actions)) - for i, action := range actions { - actionMap := action.(map[string]interface{}) - params.Actions[i] = &elbv2.Action{ - TargetGroupArn: aws.String(actionMap["target_group_arn"].(string)), - Type: aws.String(actionMap["type"].(string)), - } - } - - conditions := d.Get("condition").([]interface{}) - params.Conditions = make([]*elbv2.RuleCondition, len(conditions)) - for i, condition := range conditions { - conditionMap := condition.(map[string]interface{}) - values := conditionMap["values"].([]interface{}) - params.Conditions[i] = &elbv2.RuleCondition{ - Field: aws.String(conditionMap["field"].(string)), - Values: make([]*string, len(values)), - } - for j, value := range values { - params.Conditions[i].Values[j] = aws.String(value.(string)) - } - } - - resp, err := elbconn.CreateRule(params) - if err != nil { - return errwrap.Wrapf("Error creating ALB Listener Rule: {{err}}", err) - } - - if len(resp.Rules) == 0 { - return errors.New("Error creating ALB Listener Rule: no rules returned in response") - } - - d.SetId(*resp.Rules[0].RuleArn) - - return resourceAwsAlbListenerRuleRead(d, meta) -} - -func resourceAwsAlbListenerRuleRead(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbv2conn - - resp, err := elbconn.DescribeRules(&elbv2.DescribeRulesInput{ - RuleArns: []*string{aws.String(d.Id())}, - }) - if err != nil { - if isRuleNotFound(err) { - log.Printf("[WARN] DescribeRules - removing %s from state", d.Id()) - d.SetId("") - return nil - } - return errwrap.Wrapf(fmt.Sprintf("Error retrieving Rules for listener %s: {{err}}", d.Id()), err) - } - - if len(resp.Rules) != 1 { - return fmt.Errorf("Error retrieving Rule %q", d.Id()) - } - - rule := resp.Rules[0] - - d.Set("arn", rule.RuleArn) - // Rules are evaluated in priority order, from the lowest value to the highest value. The default rule has the lowest priority. - if *rule.Priority == "default" { - d.Set("priority", 99999) - } else { - if priority, err := strconv.Atoi(*rule.Priority); err != nil { - return errwrap.Wrapf("Cannot convert rule priority %q to int: {{err}}", err) - } else { - d.Set("priority", priority) - } - } - - actions := make([]interface{}, len(rule.Actions)) - for i, action := range rule.Actions { - actionMap := make(map[string]interface{}) - actionMap["target_group_arn"] = *action.TargetGroupArn - actionMap["type"] = *action.Type - actions[i] = actionMap - } - d.Set("action", actions) - - conditions := make([]interface{}, len(rule.Conditions)) - for i, condition := range rule.Conditions { - conditionMap := make(map[string]interface{}) - conditionMap["field"] = *condition.Field - conditionValues := make([]string, len(condition.Values)) - for k, value := range condition.Values { - conditionValues[k] = *value - } - conditionMap["values"] = conditionValues - conditions[i] = conditionMap - } - d.Set("condition", conditions) - - return nil -} - -func resourceAwsAlbListenerRuleUpdate(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbv2conn - - d.Partial(true) - - if d.HasChange("priority") { - params := &elbv2.SetRulePrioritiesInput{ - RulePriorities: []*elbv2.RulePriorityPair{ - { - RuleArn: aws.String(d.Id()), - Priority: aws.Int64(int64(d.Get("priority").(int))), - }, - }, - } - - _, err := elbconn.SetRulePriorities(params) - if err != nil { - return err - } - - d.SetPartial("priority") - } - - requestUpdate := false - params := &elbv2.ModifyRuleInput{ - RuleArn: aws.String(d.Id()), - } - - if d.HasChange("action") { - actions := d.Get("action").([]interface{}) - params.Actions = make([]*elbv2.Action, len(actions)) - for i, action := range actions { - actionMap := action.(map[string]interface{}) - params.Actions[i] = &elbv2.Action{ - TargetGroupArn: aws.String(actionMap["target_group_arn"].(string)), - Type: aws.String(actionMap["type"].(string)), - } - } - requestUpdate = true - d.SetPartial("action") - } - - if d.HasChange("condition") { - conditions := d.Get("condition").([]interface{}) - params.Conditions = make([]*elbv2.RuleCondition, len(conditions)) - for i, condition := range conditions { - conditionMap := condition.(map[string]interface{}) - values := conditionMap["values"].([]interface{}) - params.Conditions[i] = &elbv2.RuleCondition{ - Field: aws.String(conditionMap["field"].(string)), - Values: make([]*string, len(values)), - } - for j, value := range values { - params.Conditions[i].Values[j] = aws.String(value.(string)) - } - } - requestUpdate = true - d.SetPartial("condition") - } - - if requestUpdate { - resp, err := elbconn.ModifyRule(params) - if err != nil { - return errwrap.Wrapf("Error modifying ALB Listener Rule: {{err}}", err) - } - - if len(resp.Rules) == 0 { - return errors.New("Error modifying creating ALB Listener Rule: no rules returned in response") - } - } - - d.Partial(false) - - return resourceAwsAlbListenerRuleRead(d, meta) -} - -func resourceAwsAlbListenerRuleDelete(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbv2conn - - _, err := elbconn.DeleteRule(&elbv2.DeleteRuleInput{ - RuleArn: aws.String(d.Id()), - }) - if err != nil && !isRuleNotFound(err) { - return errwrap.Wrapf("Error deleting ALB Listener Rule: {{err}}", err) - } - return nil -} - -func validateAwsAlbListenerRulePriority(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 1 || value > 99999 { - errors = append(errors, fmt.Errorf("%q must be in the range 1-99999", k)) - } - return -} - -func validateAwsListenerRuleField(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 64 { - errors = append(errors, fmt.Errorf("%q must be a maximum of 64 characters", k)) - } - return -} - -func isRuleNotFound(err error) bool { - elberr, ok := err.(awserr.Error) - return ok && elberr.Code() == "RuleNotFound" -} diff --git a/builtin/providers/aws/resource_aws_alb_listener_rule_test.go b/builtin/providers/aws/resource_aws_alb_listener_rule_test.go deleted file mode 100644 index 8ddc0ef9e..000000000 --- a/builtin/providers/aws/resource_aws_alb_listener_rule_test.go +++ /dev/null @@ -1,647 +0,0 @@ -package aws - -import ( - "errors" - "fmt" - "regexp" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSALBListenerRule_basic(t *testing.T) { - var conf elbv2.Rule - albName := fmt.Sprintf("testrule-basic-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) - targetGroupName := fmt.Sprintf("testtargetgroup-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb_listener_rule.static", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBListenerRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBListenerRuleConfig_basic(albName, targetGroupName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBListenerRuleExists("aws_alb_listener_rule.static", &conf), - resource.TestCheckResourceAttrSet("aws_alb_listener_rule.static", "arn"), - resource.TestCheckResourceAttr("aws_alb_listener_rule.static", "priority", "100"), - resource.TestCheckResourceAttr("aws_alb_listener_rule.static", "action.#", "1"), - resource.TestCheckResourceAttr("aws_alb_listener_rule.static", "action.0.type", "forward"), - resource.TestCheckResourceAttrSet("aws_alb_listener_rule.static", "action.0.target_group_arn"), - resource.TestCheckResourceAttr("aws_alb_listener_rule.static", "condition.#", "1"), - resource.TestCheckResourceAttr("aws_alb_listener_rule.static", "condition.0.field", "path-pattern"), - resource.TestCheckResourceAttr("aws_alb_listener_rule.static", "condition.0.values.#", "1"), - resource.TestCheckResourceAttrSet("aws_alb_listener_rule.static", "condition.0.values.0"), - ), - }, - }, - }) -} - -func TestAccAWSALBListenerRule_updateRulePriority(t *testing.T) { - var rule elbv2.Rule - albName := fmt.Sprintf("testrule-basic-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) - targetGroupName := fmt.Sprintf("testtargetgroup-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb_listener_rule.static", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBListenerRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBListenerRuleConfig_basic(albName, targetGroupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSALBListenerRuleExists("aws_alb_listener_rule.static", &rule), - resource.TestCheckResourceAttr("aws_alb_listener_rule.static", "priority", "100"), - ), - }, - { - Config: testAccAWSALBListenerRuleConfig_updateRulePriority(albName, targetGroupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSALBListenerRuleExists("aws_alb_listener_rule.static", &rule), - resource.TestCheckResourceAttr("aws_alb_listener_rule.static", "priority", "101"), - ), - }, - }, - }) -} - -func TestAccAWSALBListenerRule_changeListenerRuleArnForcesNew(t *testing.T) { - var before, after elbv2.Rule - albName := fmt.Sprintf("testrule-basic-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) - targetGroupName := fmt.Sprintf("testtargetgroup-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb_listener_rule.static", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBListenerRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBListenerRuleConfig_basic(albName, targetGroupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSALBListenerRuleExists("aws_alb_listener_rule.static", &before), - ), - }, - { - Config: testAccAWSALBListenerRuleConfig_changeRuleArn(albName, targetGroupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSALBListenerRuleExists("aws_alb_listener_rule.static", &after), - testAccCheckAWSAlbListenerRuleRecreated(t, &before, &after), - ), - }, - }, - }) -} - -func TestAccAWSALBListenerRule_multipleConditionThrowsError(t *testing.T) { - albName := fmt.Sprintf("testrule-basic-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) - targetGroupName := fmt.Sprintf("testtargetgroup-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBListenerRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBListenerRuleConfig_multipleConditions(albName, targetGroupName), - ExpectError: regexp.MustCompile(`attribute supports 1 item maximum`), - }, - }, - }) -} - -func testAccCheckAWSAlbListenerRuleRecreated(t *testing.T, - before, after *elbv2.Rule) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *before.RuleArn == *after.RuleArn { - t.Fatalf("Expected change of Listener Rule ARNs, but both were %v", before.RuleArn) - } - return nil - } -} - -func testAccCheckAWSALBListenerRuleExists(n string, res *elbv2.Rule) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return errors.New("No Listener Rule ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).elbv2conn - - describe, err := conn.DescribeRules(&elbv2.DescribeRulesInput{ - RuleArns: []*string{aws.String(rs.Primary.ID)}, - }) - - if err != nil { - return err - } - - if len(describe.Rules) != 1 || - *describe.Rules[0].RuleArn != rs.Primary.ID { - return errors.New("Listener Rule not found") - } - - *res = *describe.Rules[0] - return nil - } -} - -func testAccCheckAWSALBListenerRuleDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elbv2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_alb_listener_rule" { - continue - } - - describe, err := conn.DescribeRules(&elbv2.DescribeRulesInput{ - RuleArns: []*string{aws.String(rs.Primary.ID)}, - }) - - if err == nil { - if len(describe.Rules) != 0 && - *describe.Rules[0].RuleArn == rs.Primary.ID { - return fmt.Errorf("Listener Rule %q still exists", rs.Primary.ID) - } - } - - // Verify the error - if isRuleNotFound(err) { - return nil - } else { - return errwrap.Wrapf("Unexpected error checking ALB Listener Rule destroyed: {{err}}", err) - } - } - - return nil -} - -func testAccAWSALBListenerRuleConfig_multipleConditions(albName, targetGroupName string) string { - return fmt.Sprintf(`resource "aws_alb_listener_rule" "static" { - listener_arn = "${aws_alb_listener.front_end.arn}" - priority = 100 - - action { - type = "forward" - target_group_arn = "${aws_alb_target_group.test.arn}" - } - - condition { - field = "path-pattern" - values = ["/static/*", "static"] - } -} - -resource "aws_alb_listener" "front_end" { - load_balancer_arn = "${aws_alb.alb_test.id}" - protocol = "HTTP" - port = "80" - - default_action { - target_group_arn = "${aws_alb_target_group.test.id}" - type = "forward" - } -} - -resource "aws_alb" "alb_test" { - name = "%s" - internal = true - security_groups = ["${aws_security_group.alb_test.id}"] - subnets = ["${aws_subnet.alb_test.*.id}"] - - idle_timeout = 30 - enable_deletion_protection = false - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_alb_target_group" "test" { - name = "%s" - port = 8080 - protocol = "HTTP" - vpc_id = "${aws_vpc.alb_test.id}" - - health_check { - path = "/health" - interval = 60 - port = 8081 - protocol = "HTTP" - timeout = 3 - healthy_threshold = 3 - unhealthy_threshold = 3 - matcher = "200-299" - } -} - -variable "subnets" { - default = ["10.0.1.0/24", "10.0.2.0/24"] - type = "list" -} - -data "aws_availability_zones" "available" {} - -resource "aws_vpc" "alb_test" { - cidr_block = "10.0.0.0/16" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_subnet" "alb_test" { - count = 2 - vpc_id = "${aws_vpc.alb_test.id}" - cidr_block = "${element(var.subnets, count.index)}" - map_public_ip_on_launch = true - availability_zone = "${element(data.aws_availability_zones.available.names, count.index)}" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_security_group" "alb_test" { - name = "allow_all_alb_test" - description = "Used for ALB Testing" - vpc_id = "${aws_vpc.alb_test.id}" - - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags { - TestName = "TestAccAWSALB_basic" - } -}`, albName, targetGroupName) -} - -func testAccAWSALBListenerRuleConfig_basic(albName, targetGroupName string) string { - return fmt.Sprintf(`resource "aws_alb_listener_rule" "static" { - listener_arn = "${aws_alb_listener.front_end.arn}" - priority = 100 - - action { - type = "forward" - target_group_arn = "${aws_alb_target_group.test.arn}" - } - - condition { - field = "path-pattern" - values = ["/static/*"] - } -} - -resource "aws_alb_listener" "front_end" { - load_balancer_arn = "${aws_alb.alb_test.id}" - protocol = "HTTP" - port = "80" - - default_action { - target_group_arn = "${aws_alb_target_group.test.id}" - type = "forward" - } -} - -resource "aws_alb" "alb_test" { - name = "%s" - internal = true - security_groups = ["${aws_security_group.alb_test.id}"] - subnets = ["${aws_subnet.alb_test.*.id}"] - - idle_timeout = 30 - enable_deletion_protection = false - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_alb_target_group" "test" { - name = "%s" - port = 8080 - protocol = "HTTP" - vpc_id = "${aws_vpc.alb_test.id}" - - health_check { - path = "/health" - interval = 60 - port = 8081 - protocol = "HTTP" - timeout = 3 - healthy_threshold = 3 - unhealthy_threshold = 3 - matcher = "200-299" - } -} - -variable "subnets" { - default = ["10.0.1.0/24", "10.0.2.0/24"] - type = "list" -} - -data "aws_availability_zones" "available" {} - -resource "aws_vpc" "alb_test" { - cidr_block = "10.0.0.0/16" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_subnet" "alb_test" { - count = 2 - vpc_id = "${aws_vpc.alb_test.id}" - cidr_block = "${element(var.subnets, count.index)}" - map_public_ip_on_launch = true - availability_zone = "${element(data.aws_availability_zones.available.names, count.index)}" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_security_group" "alb_test" { - name = "allow_all_alb_test" - description = "Used for ALB Testing" - vpc_id = "${aws_vpc.alb_test.id}" - - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags { - TestName = "TestAccAWSALB_basic" - } -}`, albName, targetGroupName) -} - -func testAccAWSALBListenerRuleConfig_updateRulePriority(albName, targetGroupName string) string { - return fmt.Sprintf(` -resource "aws_alb_listener_rule" "static" { - listener_arn = "${aws_alb_listener.front_end.arn}" - priority = 101 - - action { - type = "forward" - target_group_arn = "${aws_alb_target_group.test.arn}" - } - - condition { - field = "path-pattern" - values = ["/static/*"] - } -} - -resource "aws_alb_listener" "front_end" { - load_balancer_arn = "${aws_alb.alb_test.id}" - protocol = "HTTP" - port = "80" - - default_action { - target_group_arn = "${aws_alb_target_group.test.id}" - type = "forward" - } -} - -resource "aws_alb" "alb_test" { - name = "%s" - internal = true - security_groups = ["${aws_security_group.alb_test.id}"] - subnets = ["${aws_subnet.alb_test.*.id}"] - - idle_timeout = 30 - enable_deletion_protection = false - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_alb_target_group" "test" { - name = "%s" - port = 8080 - protocol = "HTTP" - vpc_id = "${aws_vpc.alb_test.id}" - - health_check { - path = "/health" - interval = 60 - port = 8081 - protocol = "HTTP" - timeout = 3 - healthy_threshold = 3 - unhealthy_threshold = 3 - matcher = "200-299" - } -} - -variable "subnets" { - default = ["10.0.1.0/24", "10.0.2.0/24"] - type = "list" -} - -data "aws_availability_zones" "available" {} - -resource "aws_vpc" "alb_test" { - cidr_block = "10.0.0.0/16" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_subnet" "alb_test" { - count = 2 - vpc_id = "${aws_vpc.alb_test.id}" - cidr_block = "${element(var.subnets, count.index)}" - map_public_ip_on_launch = true - availability_zone = "${element(data.aws_availability_zones.available.names, count.index)}" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_security_group" "alb_test" { - name = "allow_all_alb_test" - description = "Used for ALB Testing" - vpc_id = "${aws_vpc.alb_test.id}" - - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags { - TestName = "TestAccAWSALB_basic" - } -}`, albName, targetGroupName) -} - -func testAccAWSALBListenerRuleConfig_changeRuleArn(albName, targetGroupName string) string { - return fmt.Sprintf(` -resource "aws_alb_listener_rule" "static" { - listener_arn = "${aws_alb_listener.front_end_ruleupdate.arn}" - priority = 101 - - action { - type = "forward" - target_group_arn = "${aws_alb_target_group.test.arn}" - } - - condition { - field = "path-pattern" - values = ["/static/*"] - } -} - -resource "aws_alb_listener" "front_end" { - load_balancer_arn = "${aws_alb.alb_test.id}" - protocol = "HTTP" - port = "80" - - default_action { - target_group_arn = "${aws_alb_target_group.test.id}" - type = "forward" - } -} - -resource "aws_alb_listener" "front_end_ruleupdate" { - load_balancer_arn = "${aws_alb.alb_test.id}" - protocol = "HTTP" - port = "8080" - - default_action { - target_group_arn = "${aws_alb_target_group.test.id}" - type = "forward" - } -} - -resource "aws_alb" "alb_test" { - name = "%s" - internal = true - security_groups = ["${aws_security_group.alb_test.id}"] - subnets = ["${aws_subnet.alb_test.*.id}"] - - idle_timeout = 30 - enable_deletion_protection = false - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_alb_target_group" "test" { - name = "%s" - port = 8080 - protocol = "HTTP" - vpc_id = "${aws_vpc.alb_test.id}" - - health_check { - path = "/health" - interval = 60 - port = 8081 - protocol = "HTTP" - timeout = 3 - healthy_threshold = 3 - unhealthy_threshold = 3 - matcher = "200-299" - } -} - -variable "subnets" { - default = ["10.0.1.0/24", "10.0.2.0/24"] - type = "list" -} - -data "aws_availability_zones" "available" {} - -resource "aws_vpc" "alb_test" { - cidr_block = "10.0.0.0/16" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_subnet" "alb_test" { - count = 2 - vpc_id = "${aws_vpc.alb_test.id}" - cidr_block = "${element(var.subnets, count.index)}" - map_public_ip_on_launch = true - availability_zone = "${element(data.aws_availability_zones.available.names, count.index)}" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_security_group" "alb_test" { - name = "allow_all_alb_test" - description = "Used for ALB Testing" - vpc_id = "${aws_vpc.alb_test.id}" - - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags { - TestName = "TestAccAWSALB_basic" - } -}`, albName, targetGroupName) -} diff --git a/builtin/providers/aws/resource_aws_alb_listener_test.go b/builtin/providers/aws/resource_aws_alb_listener_test.go deleted file mode 100644 index 6fdd84c28..000000000 --- a/builtin/providers/aws/resource_aws_alb_listener_test.go +++ /dev/null @@ -1,393 +0,0 @@ -package aws - -import ( - "errors" - "fmt" - "math/rand" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSALBListener_basic(t *testing.T) { - var conf elbv2.Listener - albName := fmt.Sprintf("testlistener-basic-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) - targetGroupName := fmt.Sprintf("testtargetgroup-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb_listener.front_end", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBListenerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBListenerConfig_basic(albName, targetGroupName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBListenerExists("aws_alb_listener.front_end", &conf), - resource.TestCheckResourceAttrSet("aws_alb_listener.front_end", "load_balancer_arn"), - resource.TestCheckResourceAttrSet("aws_alb_listener.front_end", "arn"), - resource.TestCheckResourceAttr("aws_alb_listener.front_end", "protocol", "HTTP"), - resource.TestCheckResourceAttr("aws_alb_listener.front_end", "port", "80"), - resource.TestCheckResourceAttr("aws_alb_listener.front_end", "default_action.#", "1"), - resource.TestCheckResourceAttr("aws_alb_listener.front_end", "default_action.0.type", "forward"), - resource.TestCheckResourceAttrSet("aws_alb_listener.front_end", "default_action.0.target_group_arn"), - ), - }, - }, - }) -} - -func TestAccAWSALBListener_https(t *testing.T) { - var conf elbv2.Listener - albName := fmt.Sprintf("testlistener-https-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) - targetGroupName := fmt.Sprintf("testtargetgroup-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb_listener.front_end", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBListenerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBListenerConfig_https(albName, targetGroupName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBListenerExists("aws_alb_listener.front_end", &conf), - resource.TestCheckResourceAttrSet("aws_alb_listener.front_end", "load_balancer_arn"), - resource.TestCheckResourceAttrSet("aws_alb_listener.front_end", "arn"), - resource.TestCheckResourceAttr("aws_alb_listener.front_end", "protocol", "HTTPS"), - resource.TestCheckResourceAttr("aws_alb_listener.front_end", "port", "443"), - resource.TestCheckResourceAttr("aws_alb_listener.front_end", "default_action.#", "1"), - resource.TestCheckResourceAttr("aws_alb_listener.front_end", "default_action.0.type", "forward"), - resource.TestCheckResourceAttrSet("aws_alb_listener.front_end", "default_action.0.target_group_arn"), - resource.TestCheckResourceAttrSet("aws_alb_listener.front_end", "certificate_arn"), - resource.TestCheckResourceAttr("aws_alb_listener.front_end", "ssl_policy", "ELBSecurityPolicy-2015-05"), - ), - }, - }, - }) -} - -func testAccCheckAWSALBListenerExists(n string, res *elbv2.Listener) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return errors.New("No Listener ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).elbv2conn - - describe, err := conn.DescribeListeners(&elbv2.DescribeListenersInput{ - ListenerArns: []*string{aws.String(rs.Primary.ID)}, - }) - - if err != nil { - return err - } - - if len(describe.Listeners) != 1 || - *describe.Listeners[0].ListenerArn != rs.Primary.ID { - return errors.New("Listener not found") - } - - *res = *describe.Listeners[0] - return nil - } -} - -func testAccCheckAWSALBListenerDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elbv2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_alb_listener" { - continue - } - - describe, err := conn.DescribeListeners(&elbv2.DescribeListenersInput{ - ListenerArns: []*string{aws.String(rs.Primary.ID)}, - }) - - if err == nil { - if len(describe.Listeners) != 0 && - *describe.Listeners[0].ListenerArn == rs.Primary.ID { - return fmt.Errorf("Listener %q still exists", rs.Primary.ID) - } - } - - // Verify the error - if isListenerNotFound(err) { - return nil - } else { - return errwrap.Wrapf("Unexpected error checking ALB Listener destroyed: {{err}}", err) - } - } - - return nil -} - -func testAccAWSALBListenerConfig_basic(albName, targetGroupName string) string { - return fmt.Sprintf(`resource "aws_alb_listener" "front_end" { - load_balancer_arn = "${aws_alb.alb_test.id}" - protocol = "HTTP" - port = "80" - - default_action { - target_group_arn = "${aws_alb_target_group.test.id}" - type = "forward" - } -} - -resource "aws_alb" "alb_test" { - name = "%s" - internal = true - security_groups = ["${aws_security_group.alb_test.id}"] - subnets = ["${aws_subnet.alb_test.*.id}"] - - idle_timeout = 30 - enable_deletion_protection = false - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_alb_target_group" "test" { - name = "%s" - port = 8080 - protocol = "HTTP" - vpc_id = "${aws_vpc.alb_test.id}" - - health_check { - path = "/health" - interval = 60 - port = 8081 - protocol = "HTTP" - timeout = 3 - healthy_threshold = 3 - unhealthy_threshold = 3 - matcher = "200-299" - } -} - -variable "subnets" { - default = ["10.0.1.0/24", "10.0.2.0/24"] - type = "list" -} - -data "aws_availability_zones" "available" {} - -resource "aws_vpc" "alb_test" { - cidr_block = "10.0.0.0/16" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_subnet" "alb_test" { - count = 2 - vpc_id = "${aws_vpc.alb_test.id}" - cidr_block = "${element(var.subnets, count.index)}" - map_public_ip_on_launch = true - availability_zone = "${element(data.aws_availability_zones.available.names, count.index)}" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_security_group" "alb_test" { - name = "allow_all_alb_test" - description = "Used for ALB Testing" - vpc_id = "${aws_vpc.alb_test.id}" - - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags { - TestName = "TestAccAWSALB_basic" - } -}`, albName, targetGroupName) -} - -func testAccAWSALBListenerConfig_https(albName, targetGroupName string) string { - return fmt.Sprintf(`resource "aws_alb_listener" "front_end" { - load_balancer_arn = "${aws_alb.alb_test.id}" - protocol = "HTTPS" - port = "443" - ssl_policy = "ELBSecurityPolicy-2015-05" - certificate_arn = "${aws_iam_server_certificate.test_cert.arn}" - - default_action { - target_group_arn = "${aws_alb_target_group.test.id}" - type = "forward" - } -} - -resource "aws_alb" "alb_test" { - name = "%s" - internal = false - security_groups = ["${aws_security_group.alb_test.id}"] - subnets = ["${aws_subnet.alb_test.*.id}"] - - idle_timeout = 30 - enable_deletion_protection = false - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_alb_target_group" "test" { - name = "%s" - port = 8080 - protocol = "HTTP" - vpc_id = "${aws_vpc.alb_test.id}" - - health_check { - path = "/health" - interval = 60 - port = 8081 - protocol = "HTTP" - timeout = 3 - healthy_threshold = 3 - unhealthy_threshold = 3 - matcher = "200-299" - } -} - -variable "subnets" { - default = ["10.0.1.0/24", "10.0.2.0/24"] - type = "list" -} - -data "aws_availability_zones" "available" {} - -resource "aws_vpc" "alb_test" { - cidr_block = "10.0.0.0/16" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_internet_gateway" "gw" { - vpc_id = "${aws_vpc.alb_test.id}" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_subnet" "alb_test" { - count = 2 - vpc_id = "${aws_vpc.alb_test.id}" - cidr_block = "${element(var.subnets, count.index)}" - map_public_ip_on_launch = true - availability_zone = "${element(data.aws_availability_zones.available.names, count.index)}" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_security_group" "alb_test" { - name = "allow_all_alb_test" - description = "Used for ALB Testing" - vpc_id = "${aws_vpc.alb_test.id}" - - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_iam_server_certificate" "test_cert" { - name = "terraform-test-cert-%d" - certificate_body = < 0 { - params := &elbv2.ModifyTargetGroupAttributesInput{ - TargetGroupArn: aws.String(d.Id()), - Attributes: attrs, - } - - _, err := elbconn.ModifyTargetGroupAttributes(params) - if err != nil { - return errwrap.Wrapf("Error modifying Target Group Attributes: {{err}}", err) - } - } - - return resourceAwsAlbTargetGroupRead(d, meta) -} - -func resourceAwsAlbTargetGroupDelete(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbv2conn - - _, err := elbconn.DeleteTargetGroup(&elbv2.DeleteTargetGroupInput{ - TargetGroupArn: aws.String(d.Id()), - }) - if err != nil { - return errwrap.Wrapf("Error deleting Target Group: {{err}}", err) - } - - return nil -} - -func isTargetGroupNotFound(err error) bool { - elberr, ok := err.(awserr.Error) - return ok && elberr.Code() == "TargetGroupNotFound" -} - -func validateAwsAlbTargetGroupHealthCheckPath(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 1024 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 1024 characters: %q", k, value)) - } - return -} - -func validateAwsAlbTargetGroupHealthCheckPort(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if value == "traffic-port" { - return - } - - port, err := strconv.Atoi(value) - if err != nil { - errors = append(errors, fmt.Errorf("%q must be a valid port number (1-65536) or %q", k, "traffic-port")) - } - - if port < 1 || port > 65536 { - errors = append(errors, fmt.Errorf("%q must be a valid port number (1-65536) or %q", k, "traffic-port")) - } - - return -} - -func validateAwsAlbTargetGroupHealthCheckHealthyThreshold(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 2 || value > 10 { - errors = append(errors, fmt.Errorf("%q must be an integer between 2 and 10", k)) - } - return -} - -func validateAwsAlbTargetGroupHealthCheckTimeout(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 2 || value > 60 { - errors = append(errors, fmt.Errorf("%q must be an integer between 2 and 60", k)) - } - return -} - -func validateAwsAlbTargetGroupHealthCheckProtocol(v interface{}, k string) (ws []string, errors []error) { - value := strings.ToLower(v.(string)) - if value == "http" || value == "https" { - return - } - - errors = append(errors, fmt.Errorf("%q must be either %q or %q", k, "HTTP", "HTTPS")) - return -} - -func validateAwsAlbTargetGroupPort(v interface{}, k string) (ws []string, errors []error) { - port := v.(int) - if port < 1 || port > 65536 { - errors = append(errors, fmt.Errorf("%q must be a valid port number (1-65536)", k)) - } - return -} - -func validateAwsAlbTargetGroupProtocol(v interface{}, k string) (ws []string, errors []error) { - protocol := strings.ToLower(v.(string)) - if protocol == "http" || protocol == "https" { - return - } - - errors = append(errors, fmt.Errorf("%q must be either %q or %q", k, "HTTP", "HTTPS")) - return -} - -func validateAwsAlbTargetGroupDeregistrationDelay(v interface{}, k string) (ws []string, errors []error) { - delay := v.(int) - if delay < 0 || delay > 3600 { - errors = append(errors, fmt.Errorf("%q must be in the range 0-3600 seconds", k)) - } - return -} - -func validateAwsAlbTargetGroupStickinessType(v interface{}, k string) (ws []string, errors []error) { - stickinessType := v.(string) - if stickinessType != "lb_cookie" { - errors = append(errors, fmt.Errorf("%q must have the value %q", k, "lb_cookie")) - } - return -} - -func validateAwsAlbTargetGroupStickinessCookieDuration(v interface{}, k string) (ws []string, errors []error) { - duration := v.(int) - if duration < 1 || duration > 604800 { - errors = append(errors, fmt.Errorf("%q must be a between 1 second and 1 week (1-604800 seconds))", k)) - } - return -} - -func albTargetGroupSuffixFromARN(arn *string) string { - if arn == nil { - return "" - } - - if arnComponents := regexp.MustCompile(`arn:.*:targetgroup/(.*)`).FindAllStringSubmatch(*arn, -1); len(arnComponents) == 1 { - if len(arnComponents[0]) == 2 { - return fmt.Sprintf("targetgroup/%s", arnComponents[0][1]) - } - } - - return "" -} diff --git a/builtin/providers/aws/resource_aws_alb_target_group_attachment.go b/builtin/providers/aws/resource_aws_alb_target_group_attachment.go deleted file mode 100644 index 55a3b7392..000000000 --- a/builtin/providers/aws/resource_aws_alb_target_group_attachment.go +++ /dev/null @@ -1,141 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsAlbTargetGroupAttachment() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsAlbAttachmentCreate, - Read: resourceAwsAlbAttachmentRead, - Delete: resourceAwsAlbAttachmentDelete, - - Schema: map[string]*schema.Schema{ - "target_group_arn": { - Type: schema.TypeString, - ForceNew: true, - Required: true, - }, - - "target_id": { - Type: schema.TypeString, - ForceNew: true, - Required: true, - }, - - "port": { - Type: schema.TypeInt, - ForceNew: true, - Optional: true, - }, - }, - } -} - -func resourceAwsAlbAttachmentCreate(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbv2conn - - target := &elbv2.TargetDescription{ - Id: aws.String(d.Get("target_id").(string)), - } - - if v, ok := d.GetOk("port"); ok { - target.Port = aws.Int64(int64(v.(int))) - } - - params := &elbv2.RegisterTargetsInput{ - TargetGroupArn: aws.String(d.Get("target_group_arn").(string)), - Targets: []*elbv2.TargetDescription{target}, - } - - log.Printf("[INFO] Registering Target %s with Target Group %s", d.Get("target_id").(string), - d.Get("target_group_arn").(string)) - - _, err := elbconn.RegisterTargets(params) - if err != nil { - return errwrap.Wrapf("Error registering targets with target group: {{err}}", err) - } - - d.SetId(resource.PrefixedUniqueId(fmt.Sprintf("%s-", d.Get("target_group_arn")))) - - return nil -} - -func resourceAwsAlbAttachmentDelete(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbv2conn - - target := &elbv2.TargetDescription{ - Id: aws.String(d.Get("target_id").(string)), - } - - if v, ok := d.GetOk("port"); ok { - target.Port = aws.Int64(int64(v.(int))) - } - - params := &elbv2.DeregisterTargetsInput{ - TargetGroupArn: aws.String(d.Get("target_group_arn").(string)), - Targets: []*elbv2.TargetDescription{target}, - } - - _, err := elbconn.DeregisterTargets(params) - if err != nil && !isTargetGroupNotFound(err) { - return errwrap.Wrapf("Error deregistering Targets: {{err}}", err) - } - - d.SetId("") - - return nil -} - -// resourceAwsAlbAttachmentRead requires all of the fields in order to describe the correct -// target, so there is no work to do beyond ensuring that the target and group still exist. -func resourceAwsAlbAttachmentRead(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbv2conn - - target := &elbv2.TargetDescription{ - Id: aws.String(d.Get("target_id").(string)), - } - - if v, ok := d.GetOk("port"); ok { - target.Port = aws.Int64(int64(v.(int))) - } - - resp, err := elbconn.DescribeTargetHealth(&elbv2.DescribeTargetHealthInput{ - TargetGroupArn: aws.String(d.Get("target_group_arn").(string)), - Targets: []*elbv2.TargetDescription{target}, - }) - if err != nil { - if isTargetGroupNotFound(err) { - log.Printf("[WARN] Target group does not exist, removing target attachment %s", d.Id()) - d.SetId("") - return nil - } - if isInvalidTarget(err) { - log.Printf("[WARN] Target does not exist, removing target attachment %s", d.Id()) - d.SetId("") - return nil - } - return errwrap.Wrapf("Error reading Target Health: {{err}}", err) - } - - if len(resp.TargetHealthDescriptions) != 1 { - log.Printf("[WARN] Target does not exist, removing target attachment %s", d.Id()) - d.SetId("") - return nil - } - - return nil -} - -func isInvalidTarget(err error) bool { - elberr, ok := err.(awserr.Error) - return ok && elberr.Code() == "InvalidTarget" -} diff --git a/builtin/providers/aws/resource_aws_alb_target_group_attachment_test.go b/builtin/providers/aws/resource_aws_alb_target_group_attachment_test.go deleted file mode 100644 index 6ab8cab73..000000000 --- a/builtin/providers/aws/resource_aws_alb_target_group_attachment_test.go +++ /dev/null @@ -1,239 +0,0 @@ -package aws - -import ( - "errors" - "fmt" - "strconv" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSALBTargetGroupAttachment_basic(t *testing.T) { - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb_target_group.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBTargetGroupAttachmentDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBTargetGroupAttachmentConfig_basic(targetGroupName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBTargetGroupAttachmentExists("aws_alb_target_group_attachment.test"), - ), - }, - }, - }) -} - -func TestAccAWSALBTargetGroupAttachment_withoutPort(t *testing.T) { - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb_target_group.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBTargetGroupAttachmentDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBTargetGroupAttachmentConfigWithoutPort(targetGroupName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBTargetGroupAttachmentExists("aws_alb_target_group_attachment.test"), - ), - }, - }, - }) -} - -func testAccCheckAWSALBTargetGroupAttachmentExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return errors.New("No Target Group Attachment ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).elbv2conn - - _, hasPort := rs.Primary.Attributes["port"] - targetGroupArn, _ := rs.Primary.Attributes["target_group_arn"] - - target := &elbv2.TargetDescription{ - Id: aws.String(rs.Primary.Attributes["target_id"]), - } - if hasPort == true { - port, _ := strconv.Atoi(rs.Primary.Attributes["port"]) - target.Port = aws.Int64(int64(port)) - } - - describe, err := conn.DescribeTargetHealth(&elbv2.DescribeTargetHealthInput{ - TargetGroupArn: aws.String(targetGroupArn), - Targets: []*elbv2.TargetDescription{target}, - }) - - if err != nil { - return err - } - - if len(describe.TargetHealthDescriptions) != 1 { - return errors.New("Target Group Attachment not found") - } - - return nil - } -} - -func testAccCheckAWSALBTargetGroupAttachmentDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elbv2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_alb_target_group_attachment" { - continue - } - - _, hasPort := rs.Primary.Attributes["port"] - targetGroupArn, _ := rs.Primary.Attributes["target_group_arn"] - - target := &elbv2.TargetDescription{ - Id: aws.String(rs.Primary.Attributes["target_id"]), - } - if hasPort == true { - port, _ := strconv.Atoi(rs.Primary.Attributes["port"]) - target.Port = aws.Int64(int64(port)) - } - - describe, err := conn.DescribeTargetHealth(&elbv2.DescribeTargetHealthInput{ - TargetGroupArn: aws.String(targetGroupArn), - Targets: []*elbv2.TargetDescription{target}, - }) - if err == nil { - if len(describe.TargetHealthDescriptions) != 0 { - return fmt.Errorf("Target Group Attachment %q still exists", rs.Primary.ID) - } - } - - // Verify the error - if isTargetGroupNotFound(err) || isInvalidTarget(err) { - return nil - } else { - return errwrap.Wrapf("Unexpected error checking ALB destroyed: {{err}}", err) - } - } - - return nil -} - -func testAccAWSALBTargetGroupAttachmentConfigWithoutPort(targetGroupName string) string { - return fmt.Sprintf(` -resource "aws_alb_target_group_attachment" "test" { - target_group_arn = "${aws_alb_target_group.test.arn}" - target_id = "${aws_instance.test.id}" -} - -resource "aws_instance" "test" { - ami = "ami-f701cb97" - instance_type = "t2.micro" - subnet_id = "${aws_subnet.subnet.id}" -} - -resource "aws_alb_target_group" "test" { - name = "%s" - port = 443 - protocol = "HTTPS" - vpc_id = "${aws_vpc.test.id}" - - deregistration_delay = 200 - - stickiness { - type = "lb_cookie" - cookie_duration = 10000 - } - - health_check { - path = "/health" - interval = 60 - port = 8081 - protocol = "HTTP" - timeout = 3 - healthy_threshold = 3 - unhealthy_threshold = 3 - matcher = "200-299" - } -} - -resource "aws_subnet" "subnet" { - cidr_block = "10.0.1.0/24" - vpc_id = "${aws_vpc.test.id}" - -} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - tags { - Name = "testAccAWSALBTargetGroupAttachmentConfigWithoutPort" - } -}`, targetGroupName) -} - -func testAccAWSALBTargetGroupAttachmentConfig_basic(targetGroupName string) string { - return fmt.Sprintf(` -resource "aws_alb_target_group_attachment" "test" { - target_group_arn = "${aws_alb_target_group.test.arn}" - target_id = "${aws_instance.test.id}" - port = 80 -} - -resource "aws_instance" "test" { - ami = "ami-f701cb97" - instance_type = "t2.micro" - subnet_id = "${aws_subnet.subnet.id}" -} - -resource "aws_alb_target_group" "test" { - name = "%s" - port = 443 - protocol = "HTTPS" - vpc_id = "${aws_vpc.test.id}" - - deregistration_delay = 200 - - stickiness { - type = "lb_cookie" - cookie_duration = 10000 - } - - health_check { - path = "/health" - interval = 60 - port = 8081 - protocol = "HTTP" - timeout = 3 - healthy_threshold = 3 - unhealthy_threshold = 3 - matcher = "200-299" - } -} - -resource "aws_subnet" "subnet" { - cidr_block = "10.0.1.0/24" - vpc_id = "${aws_vpc.test.id}" - -} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - tags { - Name = "testAccAWSALBTargetGroupAttachmentConfig_basic" - } -}`, targetGroupName) -} diff --git a/builtin/providers/aws/resource_aws_alb_target_group_test.go b/builtin/providers/aws/resource_aws_alb_target_group_test.go deleted file mode 100644 index dc800cfd8..000000000 --- a/builtin/providers/aws/resource_aws_alb_target_group_test.go +++ /dev/null @@ -1,788 +0,0 @@ -package aws - -import ( - "errors" - "fmt" - "regexp" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestALBTargetGroupCloudwatchSuffixFromARN(t *testing.T) { - cases := []struct { - name string - arn *string - suffix string - }{ - { - name: "valid suffix", - arn: aws.String(`arn:aws:elasticloadbalancing:us-east-1:123456:targetgroup/my-targets/73e2d6bc24d8a067`), - suffix: `targetgroup/my-targets/73e2d6bc24d8a067`, - }, - { - name: "no suffix", - arn: aws.String(`arn:aws:elasticloadbalancing:us-east-1:123456:targetgroup`), - suffix: ``, - }, - { - name: "nil ARN", - arn: nil, - suffix: ``, - }, - } - - for _, tc := range cases { - actual := albTargetGroupSuffixFromARN(tc.arn) - if actual != tc.suffix { - t.Fatalf("bad suffix: %q\nExpected: %s\n Got: %s", tc.name, tc.suffix, actual) - } - } -} - -func TestAccAWSALBTargetGroup_basic(t *testing.T) { - var conf elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb_target_group.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBTargetGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBTargetGroupConfig_basic(targetGroupName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &conf), - resource.TestCheckResourceAttrSet("aws_alb_target_group.test", "arn"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "name", targetGroupName), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "port", "443"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "protocol", "HTTPS"), - resource.TestCheckResourceAttrSet("aws_alb_target_group.test", "vpc_id"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "deregistration_delay", "200"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.#", "1"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.enabled", "true"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.type", "lb_cookie"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.cookie_duration", "10000"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.#", "1"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.path", "/health"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.interval", "60"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.port", "8081"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.protocol", "HTTP"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.timeout", "3"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.healthy_threshold", "3"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.unhealthy_threshold", "3"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.matcher", "200-299"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "tags.TestName", "TestAccAWSALBTargetGroup_basic"), - ), - }, - }, - }) -} - -func TestAccAWSALBTargetGroup_namePrefix(t *testing.T) { - var conf elbv2.TargetGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb_target_group.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBTargetGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBTargetGroupConfig_namePrefix, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &conf), - resource.TestMatchResourceAttr("aws_alb_target_group.test", "name", regexp.MustCompile("^tf-")), - ), - }, - }, - }) -} - -func TestAccAWSALBTargetGroup_generatedName(t *testing.T) { - var conf elbv2.TargetGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb_target_group.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBTargetGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBTargetGroupConfig_generatedName, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &conf), - ), - }, - }, - }) -} - -func TestAccAWSALBTargetGroup_changeNameForceNew(t *testing.T) { - var before, after elbv2.TargetGroup - targetGroupNameBefore := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - targetGroupNameAfter := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(4, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb_target_group.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBTargetGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBTargetGroupConfig_basic(targetGroupNameBefore), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &before), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "name", targetGroupNameBefore), - ), - }, - { - Config: testAccAWSALBTargetGroupConfig_basic(targetGroupNameAfter), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &after), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "name", targetGroupNameAfter), - ), - }, - }, - }) -} - -func TestAccAWSALBTargetGroup_changeProtocolForceNew(t *testing.T) { - var before, after elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb_target_group.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBTargetGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBTargetGroupConfig_basic(targetGroupName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &before), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "protocol", "HTTPS"), - ), - }, - { - Config: testAccAWSALBTargetGroupConfig_updatedProtocol(targetGroupName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &after), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "protocol", "HTTP"), - ), - }, - }, - }) -} - -func TestAccAWSALBTargetGroup_changePortForceNew(t *testing.T) { - var before, after elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb_target_group.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBTargetGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBTargetGroupConfig_basic(targetGroupName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &before), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "port", "443"), - ), - }, - { - Config: testAccAWSALBTargetGroupConfig_updatedPort(targetGroupName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &after), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "port", "442"), - ), - }, - }, - }) -} - -func TestAccAWSALBTargetGroup_changeVpcForceNew(t *testing.T) { - var before, after elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb_target_group.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBTargetGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBTargetGroupConfig_basic(targetGroupName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &before), - ), - }, - { - Config: testAccAWSALBTargetGroupConfig_updatedVpc(targetGroupName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &after), - ), - }, - }, - }) -} - -func TestAccAWSALBTargetGroup_tags(t *testing.T) { - var conf elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb_target_group.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBTargetGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBTargetGroupConfig_basic(targetGroupName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &conf), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "tags.TestName", "TestAccAWSALBTargetGroup_basic"), - ), - }, - { - Config: testAccAWSALBTargetGroupConfig_updateTags(targetGroupName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &conf), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "tags.%", "2"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "tags.Environment", "Production"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "tags.Type", "ALB Target Group"), - ), - }, - }, - }) -} - -func TestAccAWSALBTargetGroup_updateHealthCheck(t *testing.T) { - var conf elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb_target_group.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBTargetGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBTargetGroupConfig_basic(targetGroupName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &conf), - resource.TestCheckResourceAttrSet("aws_alb_target_group.test", "arn"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "name", targetGroupName), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "port", "443"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "protocol", "HTTPS"), - resource.TestCheckResourceAttrSet("aws_alb_target_group.test", "vpc_id"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "deregistration_delay", "200"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.#", "1"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.type", "lb_cookie"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.cookie_duration", "10000"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.#", "1"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.path", "/health"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.interval", "60"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.port", "8081"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.protocol", "HTTP"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.timeout", "3"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.healthy_threshold", "3"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.unhealthy_threshold", "3"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.matcher", "200-299"), - ), - }, - { - Config: testAccAWSALBTargetGroupConfig_updateHealthCheck(targetGroupName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &conf), - resource.TestCheckResourceAttrSet("aws_alb_target_group.test", "arn"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "name", targetGroupName), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "port", "443"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "protocol", "HTTPS"), - resource.TestCheckResourceAttrSet("aws_alb_target_group.test", "vpc_id"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "deregistration_delay", "200"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.#", "1"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.type", "lb_cookie"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.cookie_duration", "10000"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.#", "1"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.path", "/health2"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.interval", "30"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.port", "8082"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.protocol", "HTTPS"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.timeout", "4"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.healthy_threshold", "4"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.unhealthy_threshold", "4"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.matcher", "200"), - ), - }, - }, - }) -} - -func TestAccAWSALBTargetGroup_updateSticknessEnabled(t *testing.T) { - var conf elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb_target_group.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBTargetGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBTargetGroupConfig_stickiness(targetGroupName, false, false), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &conf), - resource.TestCheckResourceAttrSet("aws_alb_target_group.test", "arn"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "name", targetGroupName), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "port", "443"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "protocol", "HTTPS"), - resource.TestCheckResourceAttrSet("aws_alb_target_group.test", "vpc_id"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "deregistration_delay", "200"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.#", "1"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.path", "/health2"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.interval", "30"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.port", "8082"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.protocol", "HTTPS"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.timeout", "4"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.healthy_threshold", "4"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.unhealthy_threshold", "4"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.matcher", "200"), - ), - }, - { - Config: testAccAWSALBTargetGroupConfig_stickiness(targetGroupName, true, true), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &conf), - resource.TestCheckResourceAttrSet("aws_alb_target_group.test", "arn"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "name", targetGroupName), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "port", "443"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "protocol", "HTTPS"), - resource.TestCheckResourceAttrSet("aws_alb_target_group.test", "vpc_id"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "deregistration_delay", "200"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.#", "1"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.enabled", "true"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.type", "lb_cookie"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.cookie_duration", "10000"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.#", "1"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.path", "/health2"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.interval", "30"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.port", "8082"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.protocol", "HTTPS"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.timeout", "4"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.healthy_threshold", "4"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.unhealthy_threshold", "4"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.matcher", "200"), - ), - }, - { - Config: testAccAWSALBTargetGroupConfig_stickiness(targetGroupName, true, false), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &conf), - resource.TestCheckResourceAttrSet("aws_alb_target_group.test", "arn"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "name", targetGroupName), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "port", "443"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "protocol", "HTTPS"), - resource.TestCheckResourceAttrSet("aws_alb_target_group.test", "vpc_id"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "deregistration_delay", "200"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.#", "1"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.enabled", "false"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.type", "lb_cookie"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.cookie_duration", "10000"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.#", "1"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.path", "/health2"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.interval", "30"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.port", "8082"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.protocol", "HTTPS"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.timeout", "4"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.healthy_threshold", "4"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.unhealthy_threshold", "4"), - resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.matcher", "200"), - ), - }, - }, - }) -} - -func testAccCheckAWSALBTargetGroupExists(n string, res *elbv2.TargetGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return errors.New("No Target Group ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).elbv2conn - - describe, err := conn.DescribeTargetGroups(&elbv2.DescribeTargetGroupsInput{ - TargetGroupArns: []*string{aws.String(rs.Primary.ID)}, - }) - - if err != nil { - return err - } - - if len(describe.TargetGroups) != 1 || - *describe.TargetGroups[0].TargetGroupArn != rs.Primary.ID { - return errors.New("Target Group not found") - } - - *res = *describe.TargetGroups[0] - return nil - } -} - -func testAccCheckAWSALBTargetGroupDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elbv2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_alb_target_group" { - continue - } - - describe, err := conn.DescribeTargetGroups(&elbv2.DescribeTargetGroupsInput{ - TargetGroupArns: []*string{aws.String(rs.Primary.ID)}, - }) - - if err == nil { - if len(describe.TargetGroups) != 0 && - *describe.TargetGroups[0].TargetGroupArn == rs.Primary.ID { - return fmt.Errorf("Target Group %q still exists", rs.Primary.ID) - } - } - - // Verify the error - if isTargetGroupNotFound(err) { - return nil - } else { - return errwrap.Wrapf("Unexpected error checking ALB destroyed: {{err}}", err) - } - } - - return nil -} - -func testAccAWSALBTargetGroupConfig_basic(targetGroupName string) string { - return fmt.Sprintf(`resource "aws_alb_target_group" "test" { - name = "%s" - port = 443 - protocol = "HTTPS" - vpc_id = "${aws_vpc.test.id}" - - deregistration_delay = 200 - - stickiness { - type = "lb_cookie" - cookie_duration = 10000 - } - - health_check { - path = "/health" - interval = 60 - port = 8081 - protocol = "HTTP" - timeout = 3 - healthy_threshold = 3 - unhealthy_threshold = 3 - matcher = "200-299" - } - - tags { - TestName = "TestAccAWSALBTargetGroup_basic" - } -} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - - tags { - TestName = "TestAccAWSALBTargetGroup_basic" - } -}`, targetGroupName) -} - -func testAccAWSALBTargetGroupConfig_updatedPort(targetGroupName string) string { - return fmt.Sprintf(`resource "aws_alb_target_group" "test" { - name = "%s" - port = 442 - protocol = "HTTPS" - vpc_id = "${aws_vpc.test.id}" - - deregistration_delay = 200 - - stickiness { - type = "lb_cookie" - cookie_duration = 10000 - } - - health_check { - path = "/health" - interval = 60 - port = 8081 - protocol = "HTTP" - timeout = 3 - healthy_threshold = 3 - unhealthy_threshold = 3 - matcher = "200-299" - } - - tags { - TestName = "TestAccAWSALBTargetGroup_basic" - } -} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - - tags { - TestName = "TestAccAWSALBTargetGroup_basic" - } -}`, targetGroupName) -} - -func testAccAWSALBTargetGroupConfig_updatedProtocol(targetGroupName string) string { - return fmt.Sprintf(`resource "aws_alb_target_group" "test" { - name = "%s" - port = 443 - protocol = "HTTP" - vpc_id = "${aws_vpc.test2.id}" - - deregistration_delay = 200 - - stickiness { - type = "lb_cookie" - cookie_duration = 10000 - } - - health_check { - path = "/health" - interval = 60 - port = 8081 - protocol = "HTTP" - timeout = 3 - healthy_threshold = 3 - unhealthy_threshold = 3 - matcher = "200-299" - } - - tags { - TestName = "TestAccAWSALBTargetGroup_basic" - } -} - -resource "aws_vpc" "test2" { - cidr_block = "10.10.0.0/16" - - tags { - TestName = "TestAccAWSALBTargetGroup_basic" - } -} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - - tags { - TestName = "TestAccAWSALBTargetGroup_basic" - } -}`, targetGroupName) -} - -func testAccAWSALBTargetGroupConfig_updatedVpc(targetGroupName string) string { - return fmt.Sprintf(`resource "aws_alb_target_group" "test" { - name = "%s" - port = 443 - protocol = "HTTPS" - vpc_id = "${aws_vpc.test.id}" - - deregistration_delay = 200 - - stickiness { - type = "lb_cookie" - cookie_duration = 10000 - } - - health_check { - path = "/health" - interval = 60 - port = 8081 - protocol = "HTTP" - timeout = 3 - healthy_threshold = 3 - unhealthy_threshold = 3 - matcher = "200-299" - } - - tags { - TestName = "TestAccAWSALBTargetGroup_basic" - } -} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - - tags { - TestName = "TestAccAWSALBTargetGroup_basic" - } -}`, targetGroupName) -} - -func testAccAWSALBTargetGroupConfig_updateTags(targetGroupName string) string { - return fmt.Sprintf(`resource "aws_alb_target_group" "test" { - name = "%s" - port = 443 - protocol = "HTTPS" - vpc_id = "${aws_vpc.test.id}" - - deregistration_delay = 200 - - stickiness { - type = "lb_cookie" - cookie_duration = 10000 - } - - health_check { - path = "/health" - interval = 60 - port = 8081 - protocol = "HTTP" - timeout = 3 - healthy_threshold = 3 - unhealthy_threshold = 3 - matcher = "200-299" - } - - tags { - Environment = "Production" - Type = "ALB Target Group" - } -} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - - tags { - TestName = "TestAccAWSALBTargetGroup_basic" - } -}`, targetGroupName) -} - -func testAccAWSALBTargetGroupConfig_updateHealthCheck(targetGroupName string) string { - return fmt.Sprintf(`resource "aws_alb_target_group" "test" { - name = "%s" - port = 443 - protocol = "HTTPS" - vpc_id = "${aws_vpc.test.id}" - - deregistration_delay = 200 - - stickiness { - type = "lb_cookie" - cookie_duration = 10000 - } - - health_check { - path = "/health2" - interval = 30 - port = 8082 - protocol = "HTTPS" - timeout = 4 - healthy_threshold = 4 - unhealthy_threshold = 4 - matcher = "200" - } -} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - - tags { - TestName = "TestAccAWSALBTargetGroup_basic" - } -}`, targetGroupName) -} - -func testAccAWSALBTargetGroupConfig_stickiness(targetGroupName string, addStickinessBlock bool, enabled bool) string { - var stickinessBlock string - - if addStickinessBlock { - stickinessBlock = fmt.Sprintf(`stickiness { - enabled = "%t" - type = "lb_cookie" - cookie_duration = 10000 - }`, enabled) - } - - return fmt.Sprintf(`resource "aws_alb_target_group" "test" { - name = "%s" - port = 443 - protocol = "HTTPS" - vpc_id = "${aws_vpc.test.id}" - - deregistration_delay = 200 - - %s - - health_check { - path = "/health2" - interval = 30 - port = 8082 - protocol = "HTTPS" - timeout = 4 - healthy_threshold = 4 - unhealthy_threshold = 4 - matcher = "200" - } -} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - - tags { - TestName = "TestAccAWSALBTargetGroup_stickiness" - } -}`, targetGroupName, stickinessBlock) -} - -const testAccAWSALBTargetGroupConfig_namePrefix = ` -resource "aws_alb_target_group" "test" { - name_prefix = "tf-" - port = 80 - protocol = "HTTP" - vpc_id = "${aws_vpc.test.id}" -} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - tags { - Name = "testAccAWSALBTargetGroupConfig_namePrefix" - } -} -` - -const testAccAWSALBTargetGroupConfig_generatedName = ` -resource "aws_alb_target_group" "test" { - port = 80 - protocol = "HTTP" - vpc_id = "${aws_vpc.test.id}" -} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - tags { - Name = "testAccAWSALBTargetGroupConfig_generatedName" - } -} -` diff --git a/builtin/providers/aws/resource_aws_alb_test.go b/builtin/providers/aws/resource_aws_alb_test.go deleted file mode 100644 index 5e766d74b..000000000 --- a/builtin/providers/aws/resource_aws_alb_test.go +++ /dev/null @@ -1,1317 +0,0 @@ -package aws - -import ( - "errors" - "fmt" - "regexp" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestALBCloudwatchSuffixFromARN(t *testing.T) { - cases := []struct { - name string - arn *string - suffix string - }{ - { - name: "valid suffix", - arn: aws.String(`arn:aws:elasticloadbalancing:us-east-1:123456:loadbalancer/app/my-alb/abc123`), - suffix: `app/my-alb/abc123`, - }, - { - name: "no suffix", - arn: aws.String(`arn:aws:elasticloadbalancing:us-east-1:123456:loadbalancer`), - suffix: ``, - }, - { - name: "nil ARN", - arn: nil, - suffix: ``, - }, - } - - for _, tc := range cases { - actual := albSuffixFromARN(tc.arn) - if actual != tc.suffix { - t.Fatalf("bad suffix: %q\nExpected: %s\n Got: %s", tc.name, tc.suffix, actual) - } - } -} - -func TestAccAWSALB_basic(t *testing.T) { - var conf elbv2.LoadBalancer - albName := fmt.Sprintf("testaccawsalb-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb.alb_test", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBConfig_basic(albName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBExists("aws_alb.alb_test", &conf), - resource.TestCheckResourceAttr("aws_alb.alb_test", "name", albName), - resource.TestCheckResourceAttr("aws_alb.alb_test", "internal", "true"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "subnets.#", "2"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "security_groups.#", "1"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "tags.TestName", "TestAccAWSALB_basic"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "enable_deletion_protection", "false"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "idle_timeout", "30"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "ip_address_type", "ipv4"), - resource.TestCheckResourceAttrSet("aws_alb.alb_test", "vpc_id"), - resource.TestCheckResourceAttrSet("aws_alb.alb_test", "zone_id"), - resource.TestCheckResourceAttrSet("aws_alb.alb_test", "dns_name"), - resource.TestCheckResourceAttrSet("aws_alb.alb_test", "arn"), - ), - }, - }, - }) -} - -func TestAccAWSALB_generatedName(t *testing.T) { - var conf elbv2.LoadBalancer - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb.alb_test", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBConfig_generatedName(), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBExists("aws_alb.alb_test", &conf), - resource.TestCheckResourceAttrSet("aws_alb.alb_test", "name"), - ), - }, - }, - }) -} - -func TestAccAWSALB_generatesNameForZeroValue(t *testing.T) { - var conf elbv2.LoadBalancer - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb.alb_test", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBConfig_zeroValueName(), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBExists("aws_alb.alb_test", &conf), - resource.TestCheckResourceAttrSet("aws_alb.alb_test", "name"), - ), - }, - }, - }) -} - -func TestAccAWSALB_namePrefix(t *testing.T) { - var conf elbv2.LoadBalancer - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb.alb_test", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBConfig_namePrefix(), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBExists("aws_alb.alb_test", &conf), - resource.TestCheckResourceAttrSet("aws_alb.alb_test", "name"), - resource.TestMatchResourceAttr("aws_alb.alb_test", "name", - regexp.MustCompile("^tf-lb-")), - ), - }, - }, - }) -} - -func TestAccAWSALB_tags(t *testing.T) { - var conf elbv2.LoadBalancer - albName := fmt.Sprintf("testaccawsalb-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb.alb_test", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBConfig_basic(albName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBExists("aws_alb.alb_test", &conf), - resource.TestCheckResourceAttr("aws_alb.alb_test", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "tags.TestName", "TestAccAWSALB_basic"), - ), - }, - { - Config: testAccAWSALBConfig_updatedTags(albName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBExists("aws_alb.alb_test", &conf), - resource.TestCheckResourceAttr("aws_alb.alb_test", "tags.%", "2"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "tags.Type", "Sample Type Tag"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "tags.Environment", "Production"), - ), - }, - }, - }) -} - -func TestAccAWSALB_updatedSecurityGroups(t *testing.T) { - var pre, post elbv2.LoadBalancer - albName := fmt.Sprintf("testaccawsalb-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb.alb_test", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBConfig_basic(albName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBExists("aws_alb.alb_test", &pre), - resource.TestCheckResourceAttr("aws_alb.alb_test", "security_groups.#", "1"), - ), - }, - { - Config: testAccAWSALBConfig_updateSecurityGroups(albName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBExists("aws_alb.alb_test", &post), - resource.TestCheckResourceAttr("aws_alb.alb_test", "security_groups.#", "2"), - testAccCheckAWSAlbARNs(&pre, &post), - ), - }, - }, - }) -} - -func TestAccAWSALB_updatedSubnets(t *testing.T) { - var pre, post elbv2.LoadBalancer - albName := fmt.Sprintf("testaccawsalb-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb.alb_test", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBConfig_basic(albName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBExists("aws_alb.alb_test", &pre), - resource.TestCheckResourceAttr("aws_alb.alb_test", "subnets.#", "2"), - ), - }, - { - Config: testAccAWSALBConfig_updateSubnets(albName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBExists("aws_alb.alb_test", &post), - resource.TestCheckResourceAttr("aws_alb.alb_test", "subnets.#", "3"), - testAccCheckAWSAlbARNs(&pre, &post), - ), - }, - }, - }) -} - -func TestAccAWSALB_updatedIpAddressType(t *testing.T) { - var pre, post elbv2.LoadBalancer - albName := fmt.Sprintf("testaccawsalb-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb.alb_test", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBConfigWithIpAddressType(albName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBExists("aws_alb.alb_test", &pre), - resource.TestCheckResourceAttr("aws_alb.alb_test", "ip_address_type", "ipv4"), - ), - }, - { - Config: testAccAWSALBConfigWithIpAddressTypeUpdated(albName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBExists("aws_alb.alb_test", &post), - resource.TestCheckResourceAttr("aws_alb.alb_test", "ip_address_type", "dualstack"), - ), - }, - }, - }) -} - -// TestAccAWSALB_noSecurityGroup regression tests the issue in #8264, -// where if an ALB is created without a security group, a default one -// is assigned. -func TestAccAWSALB_noSecurityGroup(t *testing.T) { - var conf elbv2.LoadBalancer - albName := fmt.Sprintf("testaccawsalb-nosg-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb.alb_test", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBConfig_nosg(albName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBExists("aws_alb.alb_test", &conf), - resource.TestCheckResourceAttr("aws_alb.alb_test", "name", albName), - resource.TestCheckResourceAttr("aws_alb.alb_test", "internal", "true"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "subnets.#", "2"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "security_groups.#", "1"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "tags.TestName", "TestAccAWSALB_basic"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "enable_deletion_protection", "false"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "idle_timeout", "30"), - resource.TestCheckResourceAttrSet("aws_alb.alb_test", "vpc_id"), - resource.TestCheckResourceAttrSet("aws_alb.alb_test", "zone_id"), - resource.TestCheckResourceAttrSet("aws_alb.alb_test", "dns_name"), - ), - }, - }, - }) -} - -func TestAccAWSALB_accesslogs(t *testing.T) { - var conf elbv2.LoadBalancer - bucketName := fmt.Sprintf("testaccawsalbaccesslogs-%s", acctest.RandStringFromCharSet(6, acctest.CharSetAlphaNum)) - albName := fmt.Sprintf("testaccawsalbaccesslog-%s", acctest.RandStringFromCharSet(4, acctest.CharSetAlpha)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_alb.alb_test", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSALBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSALBConfig_basic(albName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBExists("aws_alb.alb_test", &conf), - resource.TestCheckResourceAttr("aws_alb.alb_test", "name", albName), - resource.TestCheckResourceAttr("aws_alb.alb_test", "internal", "true"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "subnets.#", "2"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "security_groups.#", "1"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "tags.TestName", "TestAccAWSALB_basic"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "enable_deletion_protection", "false"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "idle_timeout", "30"), - resource.TestCheckResourceAttrSet("aws_alb.alb_test", "vpc_id"), - resource.TestCheckResourceAttrSet("aws_alb.alb_test", "zone_id"), - resource.TestCheckResourceAttrSet("aws_alb.alb_test", "dns_name"), - resource.TestCheckResourceAttrSet("aws_alb.alb_test", "arn"), - ), - }, - { - Config: testAccAWSALBConfig_accessLogs(true, albName, bucketName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBExists("aws_alb.alb_test", &conf), - resource.TestCheckResourceAttr("aws_alb.alb_test", "name", albName), - resource.TestCheckResourceAttr("aws_alb.alb_test", "internal", "true"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "subnets.#", "2"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "security_groups.#", "1"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "tags.TestName", "TestAccAWSALB_basic1"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "enable_deletion_protection", "false"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "idle_timeout", "50"), - resource.TestCheckResourceAttrSet("aws_alb.alb_test", "vpc_id"), - resource.TestCheckResourceAttrSet("aws_alb.alb_test", "zone_id"), - resource.TestCheckResourceAttrSet("aws_alb.alb_test", "dns_name"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "access_logs.#", "1"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "access_logs.0.bucket", bucketName), - resource.TestCheckResourceAttr("aws_alb.alb_test", "access_logs.0.prefix", "testAccAWSALBConfig_accessLogs"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "access_logs.0.enabled", "true"), - resource.TestCheckResourceAttrSet("aws_alb.alb_test", "arn"), - ), - }, - { - Config: testAccAWSALBConfig_accessLogs(false, albName, bucketName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSALBExists("aws_alb.alb_test", &conf), - resource.TestCheckResourceAttr("aws_alb.alb_test", "name", albName), - resource.TestCheckResourceAttr("aws_alb.alb_test", "internal", "true"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "subnets.#", "2"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "security_groups.#", "1"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "tags.TestName", "TestAccAWSALB_basic1"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "enable_deletion_protection", "false"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "idle_timeout", "50"), - resource.TestCheckResourceAttrSet("aws_alb.alb_test", "vpc_id"), - resource.TestCheckResourceAttrSet("aws_alb.alb_test", "zone_id"), - resource.TestCheckResourceAttrSet("aws_alb.alb_test", "dns_name"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "access_logs.#", "1"), - resource.TestCheckResourceAttr("aws_alb.alb_test", "access_logs.0.enabled", "false"), - resource.TestCheckResourceAttrSet("aws_alb.alb_test", "arn"), - ), - }, - }, - }) -} - -func testAccCheckAWSAlbARNs(pre, post *elbv2.LoadBalancer) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *pre.LoadBalancerArn != *post.LoadBalancerArn { - return errors.New("ALB has been recreated. ARNs are different") - } - - return nil - } -} - -func testAccCheckAWSALBExists(n string, res *elbv2.LoadBalancer) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return errors.New("No ALB ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).elbv2conn - - describe, err := conn.DescribeLoadBalancers(&elbv2.DescribeLoadBalancersInput{ - LoadBalancerArns: []*string{aws.String(rs.Primary.ID)}, - }) - - if err != nil { - return err - } - - if len(describe.LoadBalancers) != 1 || - *describe.LoadBalancers[0].LoadBalancerArn != rs.Primary.ID { - return errors.New("ALB not found") - } - - *res = *describe.LoadBalancers[0] - return nil - } -} - -func testAccCheckAWSALBDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elbv2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_alb" { - continue - } - - describe, err := conn.DescribeLoadBalancers(&elbv2.DescribeLoadBalancersInput{ - LoadBalancerArns: []*string{aws.String(rs.Primary.ID)}, - }) - - if err == nil { - if len(describe.LoadBalancers) != 0 && - *describe.LoadBalancers[0].LoadBalancerArn == rs.Primary.ID { - return fmt.Errorf("ALB %q still exists", rs.Primary.ID) - } - } - - // Verify the error - if isLoadBalancerNotFound(err) { - return nil - } else { - return errwrap.Wrapf("Unexpected error checking ALB destroyed: {{err}}", err) - } - } - - return nil -} - -func testAccAWSALBConfigWithIpAddressTypeUpdated(albName string) string { - return fmt.Sprintf(`resource "aws_alb" "alb_test" { - name = "%s" - security_groups = ["${aws_security_group.alb_test.id}"] - subnets = ["${aws_subnet.alb_test_1.id}", "${aws_subnet.alb_test_2.id}"] - - ip_address_type = "dualstack" - - idle_timeout = 30 - enable_deletion_protection = false - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_alb_listener" "test" { - load_balancer_arn = "${aws_alb.alb_test.id}" - protocol = "HTTP" - port = "80" - - default_action { - target_group_arn = "${aws_alb_target_group.test.id}" - type = "forward" - } -} - -resource "aws_alb_target_group" "test" { - name = "%s" - port = 80 - protocol = "HTTP" - vpc_id = "${aws_vpc.alb_test.id}" - - deregistration_delay = 200 - - stickiness { - type = "lb_cookie" - cookie_duration = 10000 - } - - health_check { - path = "/health2" - interval = 30 - port = 8082 - protocol = "HTTPS" - timeout = 4 - healthy_threshold = 4 - unhealthy_threshold = 4 - matcher = "200" - } -} - -resource "aws_egress_only_internet_gateway" "igw" { - vpc_id = "${aws_vpc.alb_test.id}" -} - -resource "aws_vpc" "alb_test" { - cidr_block = "10.0.0.0/16" - assign_generated_ipv6_cidr_block = true - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_internet_gateway" "foo" { - vpc_id = "${aws_vpc.alb_test.id}" -} - -resource "aws_subnet" "alb_test_1" { - vpc_id = "${aws_vpc.alb_test.id}" - cidr_block = "10.0.1.0/24" - map_public_ip_on_launch = true - availability_zone = "us-west-2a" - ipv6_cidr_block = "${cidrsubnet(aws_vpc.alb_test.ipv6_cidr_block, 8, 1)}" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_subnet" "alb_test_2" { - vpc_id = "${aws_vpc.alb_test.id}" - cidr_block = "10.0.2.0/24" - map_public_ip_on_launch = true - availability_zone = "us-west-2b" - ipv6_cidr_block = "${cidrsubnet(aws_vpc.alb_test.ipv6_cidr_block, 8, 2)}" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_security_group" "alb_test" { - name = "allow_all_alb_test" - description = "Used for ALB Testing" - vpc_id = "${aws_vpc.alb_test.id}" - - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags { - TestName = "TestAccAWSALB_basic" - } -}`, albName, albName) -} - -func testAccAWSALBConfigWithIpAddressType(albName string) string { - return fmt.Sprintf(`resource "aws_alb" "alb_test" { - name = "%s" - security_groups = ["${aws_security_group.alb_test.id}"] - subnets = ["${aws_subnet.alb_test_1.id}", "${aws_subnet.alb_test_2.id}"] - - ip_address_type = "ipv4" - - idle_timeout = 30 - enable_deletion_protection = false - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_alb_listener" "test" { - load_balancer_arn = "${aws_alb.alb_test.id}" - protocol = "HTTP" - port = "80" - - default_action { - target_group_arn = "${aws_alb_target_group.test.id}" - type = "forward" - } -} - -resource "aws_alb_target_group" "test" { - name = "%s" - port = 80 - protocol = "HTTP" - vpc_id = "${aws_vpc.alb_test.id}" - - deregistration_delay = 200 - - stickiness { - type = "lb_cookie" - cookie_duration = 10000 - } - - health_check { - path = "/health2" - interval = 30 - port = 8082 - protocol = "HTTPS" - timeout = 4 - healthy_threshold = 4 - unhealthy_threshold = 4 - matcher = "200" - } -} - -resource "aws_egress_only_internet_gateway" "igw" { - vpc_id = "${aws_vpc.alb_test.id}" -} - -resource "aws_vpc" "alb_test" { - cidr_block = "10.0.0.0/16" - assign_generated_ipv6_cidr_block = true - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_internet_gateway" "foo" { - vpc_id = "${aws_vpc.alb_test.id}" -} - -resource "aws_subnet" "alb_test_1" { - vpc_id = "${aws_vpc.alb_test.id}" - cidr_block = "10.0.1.0/24" - map_public_ip_on_launch = true - availability_zone = "us-west-2a" - ipv6_cidr_block = "${cidrsubnet(aws_vpc.alb_test.ipv6_cidr_block, 8, 1)}" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_subnet" "alb_test_2" { - vpc_id = "${aws_vpc.alb_test.id}" - cidr_block = "10.0.2.0/24" - map_public_ip_on_launch = true - availability_zone = "us-west-2b" - ipv6_cidr_block = "${cidrsubnet(aws_vpc.alb_test.ipv6_cidr_block, 8, 2)}" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_security_group" "alb_test" { - name = "allow_all_alb_test" - description = "Used for ALB Testing" - vpc_id = "${aws_vpc.alb_test.id}" - - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags { - TestName = "TestAccAWSALB_basic" - } -}`, albName, albName) -} - -func testAccAWSALBConfig_basic(albName string) string { - return fmt.Sprintf(`resource "aws_alb" "alb_test" { - name = "%s" - internal = true - security_groups = ["${aws_security_group.alb_test.id}"] - subnets = ["${aws_subnet.alb_test.*.id}"] - - idle_timeout = 30 - enable_deletion_protection = false - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -variable "subnets" { - default = ["10.0.1.0/24", "10.0.2.0/24"] - type = "list" -} - -data "aws_availability_zones" "available" {} - -resource "aws_vpc" "alb_test" { - cidr_block = "10.0.0.0/16" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_subnet" "alb_test" { - count = 2 - vpc_id = "${aws_vpc.alb_test.id}" - cidr_block = "${element(var.subnets, count.index)}" - map_public_ip_on_launch = true - availability_zone = "${element(data.aws_availability_zones.available.names, count.index)}" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_security_group" "alb_test" { - name = "allow_all_alb_test" - description = "Used for ALB Testing" - vpc_id = "${aws_vpc.alb_test.id}" - - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags { - TestName = "TestAccAWSALB_basic" - } -}`, albName) -} - -func testAccAWSALBConfig_updateSubnets(albName string) string { - return fmt.Sprintf(`resource "aws_alb" "alb_test" { - name = "%s" - internal = true - security_groups = ["${aws_security_group.alb_test.id}"] - subnets = ["${aws_subnet.alb_test.*.id}"] - - idle_timeout = 30 - enable_deletion_protection = false - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -variable "subnets" { - default = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] - type = "list" -} - -data "aws_availability_zones" "available" {} - -resource "aws_vpc" "alb_test" { - cidr_block = "10.0.0.0/16" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_subnet" "alb_test" { - count = 3 - vpc_id = "${aws_vpc.alb_test.id}" - cidr_block = "${element(var.subnets, count.index)}" - map_public_ip_on_launch = true - availability_zone = "${element(data.aws_availability_zones.available.names, count.index)}" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_security_group" "alb_test" { - name = "allow_all_alb_test" - description = "Used for ALB Testing" - vpc_id = "${aws_vpc.alb_test.id}" - - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags { - TestName = "TestAccAWSALB_basic" - } -}`, albName) -} - -func testAccAWSALBConfig_generatedName() string { - return fmt.Sprintf(` -resource "aws_alb" "alb_test" { - internal = true - security_groups = ["${aws_security_group.alb_test.id}"] - subnets = ["${aws_subnet.alb_test.*.id}"] - - idle_timeout = 30 - enable_deletion_protection = false - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -variable "subnets" { - default = ["10.0.1.0/24", "10.0.2.0/24"] - type = "list" -} - -data "aws_availability_zones" "available" {} - -resource "aws_vpc" "alb_test" { - cidr_block = "10.0.0.0/16" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_internet_gateway" "gw" { - vpc_id = "${aws_vpc.alb_test.id}" - - tags { - Name = "TestAccAWSALB_basic" - } -} - -resource "aws_subnet" "alb_test" { - count = 2 - vpc_id = "${aws_vpc.alb_test.id}" - cidr_block = "${element(var.subnets, count.index)}" - map_public_ip_on_launch = true - availability_zone = "${element(data.aws_availability_zones.available.names, count.index)}" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_security_group" "alb_test" { - name = "allow_all_alb_test" - description = "Used for ALB Testing" - vpc_id = "${aws_vpc.alb_test.id}" - - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags { - TestName = "TestAccAWSALB_basic" - } -}`) -} - -func testAccAWSALBConfig_zeroValueName() string { - return fmt.Sprintf(` -resource "aws_alb" "alb_test" { - name = "" - internal = true - security_groups = ["${aws_security_group.alb_test.id}"] - subnets = ["${aws_subnet.alb_test.*.id}"] - - idle_timeout = 30 - enable_deletion_protection = false - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -variable "subnets" { - default = ["10.0.1.0/24", "10.0.2.0/24"] - type = "list" -} - -data "aws_availability_zones" "available" {} - -resource "aws_vpc" "alb_test" { - cidr_block = "10.0.0.0/16" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_internet_gateway" "gw" { - vpc_id = "${aws_vpc.alb_test.id}" - - tags { - Name = "TestAccAWSALB_basic" - } -} - -resource "aws_subnet" "alb_test" { - count = 2 - vpc_id = "${aws_vpc.alb_test.id}" - cidr_block = "${element(var.subnets, count.index)}" - map_public_ip_on_launch = true - availability_zone = "${element(data.aws_availability_zones.available.names, count.index)}" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_security_group" "alb_test" { - name = "allow_all_alb_test" - description = "Used for ALB Testing" - vpc_id = "${aws_vpc.alb_test.id}" - - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags { - TestName = "TestAccAWSALB_basic" - } -}`) -} - -func testAccAWSALBConfig_namePrefix() string { - return fmt.Sprintf(` -resource "aws_alb" "alb_test" { - name_prefix = "tf-lb-" - internal = true - security_groups = ["${aws_security_group.alb_test.id}"] - subnets = ["${aws_subnet.alb_test.*.id}"] - - idle_timeout = 30 - enable_deletion_protection = false - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -variable "subnets" { - default = ["10.0.1.0/24", "10.0.2.0/24"] - type = "list" -} - -data "aws_availability_zones" "available" {} - -resource "aws_vpc" "alb_test" { - cidr_block = "10.0.0.0/16" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_subnet" "alb_test" { - count = 2 - vpc_id = "${aws_vpc.alb_test.id}" - cidr_block = "${element(var.subnets, count.index)}" - map_public_ip_on_launch = true - availability_zone = "${element(data.aws_availability_zones.available.names, count.index)}" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_security_group" "alb_test" { - name = "allow_all_alb_test" - description = "Used for ALB Testing" - vpc_id = "${aws_vpc.alb_test.id}" - - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags { - TestName = "TestAccAWSALB_basic" - } -}`) -} -func testAccAWSALBConfig_updatedTags(albName string) string { - return fmt.Sprintf(`resource "aws_alb" "alb_test" { - name = "%s" - internal = true - security_groups = ["${aws_security_group.alb_test.id}"] - subnets = ["${aws_subnet.alb_test.*.id}"] - - idle_timeout = 30 - enable_deletion_protection = false - - tags { - Environment = "Production" - Type = "Sample Type Tag" - } -} - -variable "subnets" { - default = ["10.0.1.0/24", "10.0.2.0/24"] - type = "list" -} - -data "aws_availability_zones" "available" {} - -resource "aws_vpc" "alb_test" { - cidr_block = "10.0.0.0/16" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_subnet" "alb_test" { - count = 2 - vpc_id = "${aws_vpc.alb_test.id}" - cidr_block = "${element(var.subnets, count.index)}" - map_public_ip_on_launch = true - availability_zone = "${element(data.aws_availability_zones.available.names, count.index)}" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_security_group" "alb_test" { - name = "allow_all_alb_test" - description = "Used for ALB Testing" - vpc_id = "${aws_vpc.alb_test.id}" - - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags { - TestName = "TestAccAWSALB_basic" - } -}`, albName) -} - -func testAccAWSALBConfig_accessLogs(enabled bool, albName, bucketName string) string { - return fmt.Sprintf(`resource "aws_alb" "alb_test" { - name = "%s" - internal = true - security_groups = ["${aws_security_group.alb_test.id}"] - subnets = ["${aws_subnet.alb_test.*.id}"] - - idle_timeout = 50 - enable_deletion_protection = false - - access_logs { - bucket = "${aws_s3_bucket.logs.bucket}" - prefix = "${var.bucket_prefix}" - enabled = "%t" - } - - tags { - TestName = "TestAccAWSALB_basic1" - } -} - -variable "bucket_name" { - type = "string" - default = "%s" -} - -variable "bucket_prefix" { - type = "string" - default = "testAccAWSALBConfig_accessLogs" -} - -resource "aws_s3_bucket" "logs" { - bucket = "${var.bucket_name}" - policy = "${data.aws_iam_policy_document.logs_bucket.json}" - # dangerous, only here for the test... - force_destroy = true - - tags { - Name = "ALB Logs Bucket Test" - } -} - -data "aws_caller_identity" "current" {} - -data "aws_elb_service_account" "current" {} - -data "aws_iam_policy_document" "logs_bucket" { - statement { - actions = ["s3:PutObject"] - effect = "Allow" - resources = ["arn:aws:s3:::${var.bucket_name}/${var.bucket_prefix}/AWSLogs/${data.aws_caller_identity.current.account_id}/*"] - - principals = { - type = "AWS" - identifiers = ["arn:aws:iam::${data.aws_elb_service_account.current.id}:root"] - } - } -} - -variable "subnets" { - default = ["10.0.1.0/24", "10.0.2.0/24"] - type = "list" -} - -data "aws_availability_zones" "available" {} - -resource "aws_vpc" "alb_test" { - cidr_block = "10.0.0.0/16" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_subnet" "alb_test" { - count = 2 - vpc_id = "${aws_vpc.alb_test.id}" - cidr_block = "${element(var.subnets, count.index)}" - map_public_ip_on_launch = true - availability_zone = "${element(data.aws_availability_zones.available.names, count.index)}" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_security_group" "alb_test" { - name = "allow_all_alb_test" - description = "Used for ALB Testing" - vpc_id = "${aws_vpc.alb_test.id}" - - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags { - TestName = "TestAccAWSALB_basic" - } -}`, albName, enabled, bucketName) -} - -func testAccAWSALBConfig_nosg(albName string) string { - return fmt.Sprintf(`resource "aws_alb" "alb_test" { - name = "%s" - internal = true - subnets = ["${aws_subnet.alb_test.*.id}"] - - idle_timeout = 30 - enable_deletion_protection = false - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -variable "subnets" { - default = ["10.0.1.0/24", "10.0.2.0/24"] - type = "list" -} - -data "aws_availability_zones" "available" {} - -resource "aws_vpc" "alb_test" { - cidr_block = "10.0.0.0/16" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_subnet" "alb_test" { - count = 2 - vpc_id = "${aws_vpc.alb_test.id}" - cidr_block = "${element(var.subnets, count.index)}" - map_public_ip_on_launch = true - availability_zone = "${element(data.aws_availability_zones.available.names, count.index)}" - - tags { - TestName = "TestAccAWSALB_basic" - } -}`, albName) -} - -func testAccAWSALBConfig_updateSecurityGroups(albName string) string { - return fmt.Sprintf(`resource "aws_alb" "alb_test" { - name = "%s" - internal = true - security_groups = ["${aws_security_group.alb_test.id}", "${aws_security_group.alb_test_2.id}"] - subnets = ["${aws_subnet.alb_test.*.id}"] - - idle_timeout = 30 - enable_deletion_protection = false - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -variable "subnets" { - default = ["10.0.1.0/24", "10.0.2.0/24"] - type = "list" -} - -data "aws_availability_zones" "available" {} - -resource "aws_vpc" "alb_test" { - cidr_block = "10.0.0.0/16" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_subnet" "alb_test" { - count = 2 - vpc_id = "${aws_vpc.alb_test.id}" - cidr_block = "${element(var.subnets, count.index)}" - map_public_ip_on_launch = true - availability_zone = "${element(data.aws_availability_zones.available.names, count.index)}" - - tags { - TestName = "TestAccAWSALB_basic" - } -} - -resource "aws_security_group" "alb_test_2" { - name = "allow_all_alb_test_2" - description = "Used for ALB Testing" - vpc_id = "${aws_vpc.alb_test.id}" - - ingress { - from_port = 80 - to_port = 80 - protocol = "TCP" - cidr_blocks = ["0.0.0.0/0"] - } - - tags { - TestName = "TestAccAWSALB_basic_2" - } -} - -resource "aws_security_group" "alb_test" { - name = "allow_all_alb_test" - description = "Used for ALB Testing" - vpc_id = "${aws_vpc.alb_test.id}" - - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags { - TestName = "TestAccAWSALB_basic" - } -}`, albName) -} diff --git a/builtin/providers/aws/resource_aws_ami.go b/builtin/providers/aws/resource_aws_ami.go deleted file mode 100644 index d01c402ed..000000000 --- a/builtin/providers/aws/resource_aws_ami.go +++ /dev/null @@ -1,562 +0,0 @@ -package aws - -import ( - "bytes" - "errors" - "fmt" - "log" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -const ( - AWSAMIRetryTimeout = 40 * time.Minute - AWSAMIDeleteRetryTimeout = 90 * time.Minute - AWSAMIRetryDelay = 5 * time.Second - AWSAMIRetryMinTimeout = 3 * time.Second -) - -func resourceAwsAmi() *schema.Resource { - // Our schema is shared also with aws_ami_copy and aws_ami_from_instance - resourceSchema := resourceAwsAmiCommonSchema(false) - - return &schema.Resource{ - Create: resourceAwsAmiCreate, - - Schema: resourceSchema, - - // The Read, Update and Delete operations are shared with aws_ami_copy - // and aws_ami_from_instance, since they differ only in how the image - // is created. - Read: resourceAwsAmiRead, - Update: resourceAwsAmiUpdate, - Delete: resourceAwsAmiDelete, - } -} - -func resourceAwsAmiCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).ec2conn - - req := &ec2.RegisterImageInput{ - Name: aws.String(d.Get("name").(string)), - Description: aws.String(d.Get("description").(string)), - Architecture: aws.String(d.Get("architecture").(string)), - ImageLocation: aws.String(d.Get("image_location").(string)), - RootDeviceName: aws.String(d.Get("root_device_name").(string)), - SriovNetSupport: aws.String(d.Get("sriov_net_support").(string)), - VirtualizationType: aws.String(d.Get("virtualization_type").(string)), - } - - if kernelId := d.Get("kernel_id").(string); kernelId != "" { - req.KernelId = aws.String(kernelId) - } - if ramdiskId := d.Get("ramdisk_id").(string); ramdiskId != "" { - req.RamdiskId = aws.String(ramdiskId) - } - - ebsBlockDevsSet := d.Get("ebs_block_device").(*schema.Set) - ephemeralBlockDevsSet := d.Get("ephemeral_block_device").(*schema.Set) - for _, ebsBlockDevI := range ebsBlockDevsSet.List() { - ebsBlockDev := ebsBlockDevI.(map[string]interface{}) - blockDev := &ec2.BlockDeviceMapping{ - DeviceName: aws.String(ebsBlockDev["device_name"].(string)), - Ebs: &ec2.EbsBlockDevice{ - DeleteOnTermination: aws.Bool(ebsBlockDev["delete_on_termination"].(bool)), - VolumeType: aws.String(ebsBlockDev["volume_type"].(string)), - }, - } - if iops, ok := ebsBlockDev["iops"]; ok { - if iop := iops.(int); iop != 0 { - blockDev.Ebs.Iops = aws.Int64(int64(iop)) - } - } - if size, ok := ebsBlockDev["volume_size"]; ok { - if s := size.(int); s != 0 { - blockDev.Ebs.VolumeSize = aws.Int64(int64(s)) - } - } - encrypted := ebsBlockDev["encrypted"].(bool) - if snapshotId := ebsBlockDev["snapshot_id"].(string); snapshotId != "" { - blockDev.Ebs.SnapshotId = aws.String(snapshotId) - if encrypted { - return errors.New("can't set both 'snapshot_id' and 'encrypted'") - } - } else if encrypted { - blockDev.Ebs.Encrypted = aws.Bool(true) - } - req.BlockDeviceMappings = append(req.BlockDeviceMappings, blockDev) - } - for _, ephemeralBlockDevI := range ephemeralBlockDevsSet.List() { - ephemeralBlockDev := ephemeralBlockDevI.(map[string]interface{}) - blockDev := &ec2.BlockDeviceMapping{ - DeviceName: aws.String(ephemeralBlockDev["device_name"].(string)), - VirtualName: aws.String(ephemeralBlockDev["virtual_name"].(string)), - } - req.BlockDeviceMappings = append(req.BlockDeviceMappings, blockDev) - } - - res, err := client.RegisterImage(req) - if err != nil { - return err - } - - id := *res.ImageId - d.SetId(id) - d.Partial(true) // make sure we record the id even if the rest of this gets interrupted - d.Set("id", id) - d.Set("manage_ebs_block_devices", false) - d.SetPartial("id") - d.SetPartial("manage_ebs_block_devices") - d.Partial(false) - - _, err = resourceAwsAmiWaitForAvailable(id, client) - if err != nil { - return err - } - - return resourceAwsAmiUpdate(d, meta) -} - -func resourceAwsAmiRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).ec2conn - id := d.Id() - - req := &ec2.DescribeImagesInput{ - ImageIds: []*string{aws.String(id)}, - } - - res, err := client.DescribeImages(req) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidAMIID.NotFound" { - log.Printf("[DEBUG] %s no longer exists, so we'll drop it from the state", id) - d.SetId("") - return nil - } - - return err - } - - if len(res.Images) != 1 { - d.SetId("") - return nil - } - - image := res.Images[0] - state := *image.State - - if state == "pending" { - // This could happen if a user manually adds an image we didn't create - // to the state. We'll wait for the image to become available - // before we continue. We should never take this branch in normal - // circumstances since we would've waited for availability during - // the "Create" step. - image, err = resourceAwsAmiWaitForAvailable(id, client) - if err != nil { - return err - } - state = *image.State - } - - if state == "deregistered" { - d.SetId("") - return nil - } - - if state != "available" { - return fmt.Errorf("AMI has become %s", state) - } - - d.Set("name", image.Name) - d.Set("description", image.Description) - d.Set("image_location", image.ImageLocation) - d.Set("architecture", image.Architecture) - d.Set("kernel_id", image.KernelId) - d.Set("ramdisk_id", image.RamdiskId) - d.Set("root_device_name", image.RootDeviceName) - d.Set("sriov_net_support", image.SriovNetSupport) - d.Set("virtualization_type", image.VirtualizationType) - - var ebsBlockDevs []map[string]interface{} - var ephemeralBlockDevs []map[string]interface{} - - for _, blockDev := range image.BlockDeviceMappings { - if blockDev.Ebs != nil { - ebsBlockDev := map[string]interface{}{ - "device_name": *blockDev.DeviceName, - "delete_on_termination": *blockDev.Ebs.DeleteOnTermination, - "encrypted": *blockDev.Ebs.Encrypted, - "iops": 0, - "volume_size": int(*blockDev.Ebs.VolumeSize), - "volume_type": *blockDev.Ebs.VolumeType, - } - if blockDev.Ebs.Iops != nil { - ebsBlockDev["iops"] = int(*blockDev.Ebs.Iops) - } - // The snapshot ID might not be set. - if blockDev.Ebs.SnapshotId != nil { - ebsBlockDev["snapshot_id"] = *blockDev.Ebs.SnapshotId - } - ebsBlockDevs = append(ebsBlockDevs, ebsBlockDev) - } else { - ephemeralBlockDevs = append(ephemeralBlockDevs, map[string]interface{}{ - "device_name": *blockDev.DeviceName, - "virtual_name": *blockDev.VirtualName, - }) - } - } - - d.Set("ebs_block_device", ebsBlockDevs) - d.Set("ephemeral_block_device", ephemeralBlockDevs) - - d.Set("tags", tagsToMap(image.Tags)) - - return nil -} - -func resourceAwsAmiUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).ec2conn - - d.Partial(true) - - if err := setTags(client, d); err != nil { - return err - } else { - d.SetPartial("tags") - } - - if d.Get("description").(string) != "" { - _, err := client.ModifyImageAttribute(&ec2.ModifyImageAttributeInput{ - ImageId: aws.String(d.Id()), - Description: &ec2.AttributeValue{ - Value: aws.String(d.Get("description").(string)), - }, - }) - if err != nil { - return err - } - d.SetPartial("description") - } - - d.Partial(false) - - return resourceAwsAmiRead(d, meta) -} - -func resourceAwsAmiDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).ec2conn - - req := &ec2.DeregisterImageInput{ - ImageId: aws.String(d.Id()), - } - - _, err := client.DeregisterImage(req) - if err != nil { - return err - } - - // If we're managing the EBS snapshots then we need to delete those too. - if d.Get("manage_ebs_snapshots").(bool) { - errs := map[string]error{} - ebsBlockDevsSet := d.Get("ebs_block_device").(*schema.Set) - req := &ec2.DeleteSnapshotInput{} - for _, ebsBlockDevI := range ebsBlockDevsSet.List() { - ebsBlockDev := ebsBlockDevI.(map[string]interface{}) - snapshotId := ebsBlockDev["snapshot_id"].(string) - if snapshotId != "" { - req.SnapshotId = aws.String(snapshotId) - _, err := client.DeleteSnapshot(req) - if err != nil { - errs[snapshotId] = err - } - } - } - - if len(errs) > 0 { - errParts := []string{"Errors while deleting associated EBS snapshots:"} - for snapshotId, err := range errs { - errParts = append(errParts, fmt.Sprintf("%s: %s", snapshotId, err)) - } - errParts = append(errParts, "These are no longer managed by Terraform and must be deleted manually.") - return errors.New(strings.Join(errParts, "\n")) - } - } - - // Verify that the image is actually removed, if not we need to wait for it to be removed - if err := resourceAwsAmiWaitForDestroy(d.Id(), client); err != nil { - return err - } - - // No error, ami was deleted successfully - d.SetId("") - return nil -} - -func AMIStateRefreshFunc(client *ec2.EC2, id string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - emptyResp := &ec2.DescribeImagesOutput{} - - resp, err := client.DescribeImages(&ec2.DescribeImagesInput{ImageIds: []*string{aws.String(id)}}) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidAMIID.NotFound" { - return emptyResp, "destroyed", nil - } else if resp != nil && len(resp.Images) == 0 { - return emptyResp, "destroyed", nil - } else { - return emptyResp, "", fmt.Errorf("Error on refresh: %+v", err) - } - } - - if resp == nil || resp.Images == nil || len(resp.Images) == 0 { - return emptyResp, "destroyed", nil - } - - // AMI is valid, so return it's state - return resp.Images[0], *resp.Images[0].State, nil - } -} - -func resourceAwsAmiWaitForDestroy(id string, client *ec2.EC2) error { - log.Printf("Waiting for AMI %s to be deleted...", id) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"available", "pending", "failed"}, - Target: []string{"destroyed"}, - Refresh: AMIStateRefreshFunc(client, id), - Timeout: AWSAMIDeleteRetryTimeout, - Delay: AWSAMIRetryDelay, - MinTimeout: AWSAMIRetryTimeout, - } - - _, err := stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for AMI (%s) to be deleted: %v", id, err) - } - - return nil -} - -func resourceAwsAmiWaitForAvailable(id string, client *ec2.EC2) (*ec2.Image, error) { - log.Printf("Waiting for AMI %s to become available...", id) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"pending"}, - Target: []string{"available"}, - Refresh: AMIStateRefreshFunc(client, id), - Timeout: AWSAMIRetryTimeout, - Delay: AWSAMIRetryDelay, - MinTimeout: AWSAMIRetryMinTimeout, - } - - info, err := stateConf.WaitForState() - if err != nil { - return nil, fmt.Errorf("Error waiting for AMI (%s) to be ready: %v", id, err) - } - return info.(*ec2.Image), nil -} - -func resourceAwsAmiCommonSchema(computed bool) map[string]*schema.Schema { - // The "computed" parameter controls whether we're making - // a schema for an AMI that's been implicitly registered (aws_ami_copy, aws_ami_from_instance) - // or whether we're making a schema for an explicit registration (aws_ami). - // When set, almost every attribute is marked as "computed". - // When not set, only the "id" attribute is computed. - // "name" and "description" are never computed, since they must always - // be provided by the user. - - var virtualizationTypeDefault interface{} - var deleteEbsOnTerminationDefault interface{} - var sriovNetSupportDefault interface{} - var architectureDefault interface{} - var volumeTypeDefault interface{} - if !computed { - virtualizationTypeDefault = "paravirtual" - deleteEbsOnTerminationDefault = true - sriovNetSupportDefault = "simple" - architectureDefault = "x86_64" - volumeTypeDefault = "standard" - } - - return map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, - "image_location": { - Type: schema.TypeString, - Optional: !computed, - Computed: true, - ForceNew: !computed, - }, - "architecture": { - Type: schema.TypeString, - Optional: !computed, - Computed: computed, - ForceNew: !computed, - Default: architectureDefault, - }, - "description": { - Type: schema.TypeString, - Optional: true, - }, - "kernel_id": { - Type: schema.TypeString, - Optional: !computed, - Computed: computed, - ForceNew: !computed, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "ramdisk_id": { - Type: schema.TypeString, - Optional: !computed, - Computed: computed, - ForceNew: !computed, - }, - "root_device_name": { - Type: schema.TypeString, - Optional: !computed, - Computed: computed, - ForceNew: !computed, - }, - "sriov_net_support": { - Type: schema.TypeString, - Optional: !computed, - Computed: computed, - ForceNew: !computed, - Default: sriovNetSupportDefault, - }, - "virtualization_type": { - Type: schema.TypeString, - Optional: !computed, - Computed: computed, - ForceNew: !computed, - Default: virtualizationTypeDefault, - }, - - // The following block device attributes intentionally mimick the - // corresponding attributes on aws_instance, since they have the - // same meaning. - // However, we don't use root_block_device here because the constraint - // on which root device attributes can be overridden for an instance to - // not apply when registering an AMI. - - "ebs_block_device": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "delete_on_termination": { - Type: schema.TypeBool, - Optional: !computed, - Default: deleteEbsOnTerminationDefault, - ForceNew: !computed, - Computed: computed, - }, - - "device_name": { - Type: schema.TypeString, - Required: !computed, - ForceNew: !computed, - Computed: computed, - }, - - "encrypted": { - Type: schema.TypeBool, - Optional: !computed, - Computed: computed, - ForceNew: !computed, - }, - - "iops": { - Type: schema.TypeInt, - Optional: !computed, - Computed: computed, - ForceNew: !computed, - }, - - "snapshot_id": { - Type: schema.TypeString, - Optional: !computed, - Computed: computed, - ForceNew: !computed, - }, - - "volume_size": { - Type: schema.TypeInt, - Optional: !computed, - Computed: true, - ForceNew: !computed, - }, - - "volume_type": { - Type: schema.TypeString, - Optional: !computed, - Computed: computed, - ForceNew: !computed, - Default: volumeTypeDefault, - }, - }, - }, - Set: func(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["snapshot_id"].(string))) - return hashcode.String(buf.String()) - }, - }, - - "ephemeral_block_device": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "device_name": { - Type: schema.TypeString, - Required: !computed, - Computed: computed, - }, - - "virtual_name": { - Type: schema.TypeString, - Required: !computed, - Computed: computed, - }, - }, - }, - Set: func(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["virtual_name"].(string))) - return hashcode.String(buf.String()) - }, - }, - - "tags": tagsSchema(), - - // Not a public attribute; used to let the aws_ami_copy and aws_ami_from_instance - // resources record that they implicitly created new EBS snapshots that we should - // now manage. Not set by aws_ami, since the snapshots used there are presumed to - // be independently managed. - "manage_ebs_snapshots": { - Type: schema.TypeBool, - Computed: true, - ForceNew: true, - }, - } -} diff --git a/builtin/providers/aws/resource_aws_ami_copy.go b/builtin/providers/aws/resource_aws_ami_copy.go deleted file mode 100644 index 3452d5b52..000000000 --- a/builtin/providers/aws/resource_aws_ami_copy.go +++ /dev/null @@ -1,90 +0,0 @@ -package aws - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsAmiCopy() *schema.Resource { - // Inherit all of the common AMI attributes from aws_ami, since we're - // implicitly creating an aws_ami resource. - resourceSchema := resourceAwsAmiCommonSchema(true) - - // Additional attributes unique to the copy operation. - resourceSchema["source_ami_id"] = &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - } - resourceSchema["source_ami_region"] = &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - } - - resourceSchema["encrypted"] = &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - } - - resourceSchema["kms_key_id"] = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validateArn, - } - - return &schema.Resource{ - Create: resourceAwsAmiCopyCreate, - - Schema: resourceSchema, - - // The remaining operations are shared with the generic aws_ami resource, - // since the aws_ami_copy resource only differs in how it's created. - Read: resourceAwsAmiRead, - Update: resourceAwsAmiUpdate, - Delete: resourceAwsAmiDelete, - } -} - -func resourceAwsAmiCopyCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).ec2conn - - req := &ec2.CopyImageInput{ - Name: aws.String(d.Get("name").(string)), - Description: aws.String(d.Get("description").(string)), - SourceImageId: aws.String(d.Get("source_ami_id").(string)), - SourceRegion: aws.String(d.Get("source_ami_region").(string)), - Encrypted: aws.Bool(d.Get("encrypted").(bool)), - } - - if v, ok := d.GetOk("kms_key_id"); ok { - req.KmsKeyId = aws.String(v.(string)) - } - - res, err := client.CopyImage(req) - if err != nil { - return err - } - - id := *res.ImageId - d.SetId(id) - d.Partial(true) // make sure we record the id even if the rest of this gets interrupted - d.Set("id", id) - d.Set("manage_ebs_snapshots", true) - d.SetPartial("id") - d.SetPartial("manage_ebs_snapshots") - d.Partial(false) - - _, err = resourceAwsAmiWaitForAvailable(id, client) - if err != nil { - return err - } - - return resourceAwsAmiUpdate(d, meta) -} diff --git a/builtin/providers/aws/resource_aws_ami_copy_test.go b/builtin/providers/aws/resource_aws_ami_copy_test.go deleted file mode 100644 index 4fd5f5264..000000000 --- a/builtin/providers/aws/resource_aws_ami_copy_test.go +++ /dev/null @@ -1,201 +0,0 @@ -package aws - -import ( - "errors" - "fmt" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSAMICopy(t *testing.T) { - var amiId string - snapshots := []string{} - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSAMICopyConfig, - Check: func(state *terraform.State) error { - rs, ok := state.RootModule().Resources["aws_ami_copy.test"] - if !ok { - return fmt.Errorf("AMI resource not found") - } - - amiId = rs.Primary.ID - - if amiId == "" { - return fmt.Errorf("AMI id is not set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - req := &ec2.DescribeImagesInput{ - ImageIds: []*string{aws.String(amiId)}, - } - describe, err := conn.DescribeImages(req) - if err != nil { - return err - } - - if len(describe.Images) != 1 || - *describe.Images[0].ImageId != rs.Primary.ID { - return fmt.Errorf("AMI not found") - } - - image := describe.Images[0] - if expected := "available"; *image.State != expected { - return fmt.Errorf("invalid image state; expected %v, got %v", expected, image.State) - } - if expected := "machine"; *image.ImageType != expected { - return fmt.Errorf("wrong image type; expected %v, got %v", expected, image.ImageType) - } - if expected := "terraform-acc-ami-copy"; *image.Name != expected { - return fmt.Errorf("wrong name; expected %v, got %v", expected, image.Name) - } - - for _, bdm := range image.BlockDeviceMappings { - // The snapshot ID might not be set, - // even for a block device that is an - // EBS volume. - if bdm.Ebs != nil && bdm.Ebs.SnapshotId != nil { - snapshots = append(snapshots, *bdm.Ebs.SnapshotId) - } - } - - if expected := 1; len(snapshots) != expected { - return fmt.Errorf("wrong number of snapshots; expected %v, got %v", expected, len(snapshots)) - } - - return nil - }, - }, - }, - CheckDestroy: func(state *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - diReq := &ec2.DescribeImagesInput{ - ImageIds: []*string{aws.String(amiId)}, - } - diRes, err := conn.DescribeImages(diReq) - if err != nil { - return err - } - - if len(diRes.Images) > 0 { - state := diRes.Images[0].State - return fmt.Errorf("AMI %v remains in state %v", amiId, state) - } - - stillExist := make([]string, 0, len(snapshots)) - checkErrors := make(map[string]error) - for _, snapshotId := range snapshots { - dsReq := &ec2.DescribeSnapshotsInput{ - SnapshotIds: []*string{aws.String(snapshotId)}, - } - _, err := conn.DescribeSnapshots(dsReq) - if err == nil { - stillExist = append(stillExist, snapshotId) - continue - } - - awsErr, ok := err.(awserr.Error) - if !ok { - checkErrors[snapshotId] = err - continue - } - - if awsErr.Code() != "InvalidSnapshot.NotFound" { - checkErrors[snapshotId] = err - continue - } - } - - if len(stillExist) > 0 || len(checkErrors) > 0 { - errParts := []string{ - "Expected all snapshots to be gone, but:", - } - for _, snapshotId := range stillExist { - errParts = append( - errParts, - fmt.Sprintf("- %v still exists", snapshotId), - ) - } - for snapshotId, err := range checkErrors { - errParts = append( - errParts, - fmt.Sprintf("- checking %v gave error: %v", snapshotId, err), - ) - } - return errors.New(strings.Join(errParts, "\n")) - } - - return nil - }, - }) -} - -var testAccAWSAMICopyConfig = ` -provider "aws" { - region = "us-east-1" -} - -// An AMI can't be directly copied from one account to another, and -// we can't rely on any particular AMI being available since anyone -// can run this test in whatever account they like. -// Therefore we jump through some hoops here: -// - Spin up an EC2 instance based on a public AMI -// - Create an AMI by snapshotting that EC2 instance, using -// aws_ami_from_instance . -// - Copy the new AMI using aws_ami_copy . -// -// Thus this test can only succeed if the aws_ami_from_instance resource -// is working. If it's misbehaving it will likely cause this test to fail too. - -// Since we're booting a t2.micro HVM instance we need a VPC for it to boot -// up into. - -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccAWSAMICopyConfig" - } -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_instance" "test" { - // This AMI has one block device mapping, so we expect to have - // one snapshot in our created AMI. - // This is an Ubuntu Linux HVM AMI. A public HVM AMI is required - // because paravirtual images cannot be copied between accounts. - ami = "ami-0f8bce65" - instance_type = "t2.micro" - tags { - Name = "terraform-acc-ami-copy-victim" - } - - subnet_id = "${aws_subnet.foo.id}" -} - -resource "aws_ami_from_instance" "test" { - name = "terraform-acc-ami-copy-victim" - description = "Testing Terraform aws_ami_from_instance resource" - source_instance_id = "${aws_instance.test.id}" -} - -resource "aws_ami_copy" "test" { - name = "terraform-acc-ami-copy" - description = "Testing Terraform aws_ami_copy resource" - source_ami_id = "${aws_ami_from_instance.test.id}" - source_ami_region = "us-east-1" -} -` diff --git a/builtin/providers/aws/resource_aws_ami_from_instance.go b/builtin/providers/aws/resource_aws_ami_from_instance.go deleted file mode 100644 index cc272d3c1..000000000 --- a/builtin/providers/aws/resource_aws_ami_from_instance.go +++ /dev/null @@ -1,70 +0,0 @@ -package aws - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsAmiFromInstance() *schema.Resource { - // Inherit all of the common AMI attributes from aws_ami, since we're - // implicitly creating an aws_ami resource. - resourceSchema := resourceAwsAmiCommonSchema(true) - - // Additional attributes unique to the copy operation. - resourceSchema["source_instance_id"] = &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - } - resourceSchema["snapshot_without_reboot"] = &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - } - - return &schema.Resource{ - Create: resourceAwsAmiFromInstanceCreate, - - Schema: resourceSchema, - - // The remaining operations are shared with the generic aws_ami resource, - // since the aws_ami_copy resource only differs in how it's created. - Read: resourceAwsAmiRead, - Update: resourceAwsAmiUpdate, - Delete: resourceAwsAmiDelete, - } -} - -func resourceAwsAmiFromInstanceCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).ec2conn - - req := &ec2.CreateImageInput{ - Name: aws.String(d.Get("name").(string)), - Description: aws.String(d.Get("description").(string)), - InstanceId: aws.String(d.Get("source_instance_id").(string)), - NoReboot: aws.Bool(d.Get("snapshot_without_reboot").(bool)), - } - - res, err := client.CreateImage(req) - if err != nil { - return err - } - - id := *res.ImageId - d.SetId(id) - d.Partial(true) // make sure we record the id even if the rest of this gets interrupted - d.Set("id", id) - d.Set("manage_ebs_snapshots", true) - d.SetPartial("id") - d.SetPartial("manage_ebs_snapshots") - d.Partial(false) - - _, err = resourceAwsAmiWaitForAvailable(id, client) - if err != nil { - return err - } - - return resourceAwsAmiUpdate(d, meta) -} diff --git a/builtin/providers/aws/resource_aws_ami_from_instance_test.go b/builtin/providers/aws/resource_aws_ami_from_instance_test.go deleted file mode 100644 index e130a6cbc..000000000 --- a/builtin/providers/aws/resource_aws_ami_from_instance_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package aws - -import ( - "errors" - "fmt" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSAMIFromInstance(t *testing.T) { - var amiId string - snapshots := []string{} - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccAWSAMIFromInstanceConfig(rInt), - Check: func(state *terraform.State) error { - rs, ok := state.RootModule().Resources["aws_ami_from_instance.test"] - if !ok { - return fmt.Errorf("AMI resource not found") - } - - amiId = rs.Primary.ID - - if amiId == "" { - return fmt.Errorf("AMI id is not set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - req := &ec2.DescribeImagesInput{ - ImageIds: []*string{aws.String(amiId)}, - } - describe, err := conn.DescribeImages(req) - if err != nil { - return err - } - - if len(describe.Images) != 1 || - *describe.Images[0].ImageId != rs.Primary.ID { - return fmt.Errorf("AMI not found") - } - - image := describe.Images[0] - if expected := "available"; *image.State != expected { - return fmt.Errorf("invalid image state; expected %v, got %v", expected, *image.State) - } - if expected := "machine"; *image.ImageType != expected { - return fmt.Errorf("wrong image type; expected %v, got %v", expected, *image.ImageType) - } - if expected := fmt.Sprintf("terraform-acc-ami-from-instance-%d", rInt); *image.Name != expected { - return fmt.Errorf("wrong name; expected %v, got %v", expected, *image.Name) - } - - for _, bdm := range image.BlockDeviceMappings { - if bdm.Ebs != nil && bdm.Ebs.SnapshotId != nil { - snapshots = append(snapshots, *bdm.Ebs.SnapshotId) - } - } - - if expected := 1; len(snapshots) != expected { - return fmt.Errorf("wrong number of snapshots; expected %v, got %v", expected, len(snapshots)) - } - - return nil - }, - }, - }, - CheckDestroy: func(state *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - diReq := &ec2.DescribeImagesInput{ - ImageIds: []*string{aws.String(amiId)}, - } - diRes, err := conn.DescribeImages(diReq) - if err != nil { - return err - } - - if len(diRes.Images) > 0 { - state := diRes.Images[0].State - return fmt.Errorf("AMI %v remains in state %v", amiId, state) - } - - stillExist := make([]string, 0, len(snapshots)) - checkErrors := make(map[string]error) - for _, snapshotId := range snapshots { - dsReq := &ec2.DescribeSnapshotsInput{ - SnapshotIds: []*string{aws.String(snapshotId)}, - } - _, err := conn.DescribeSnapshots(dsReq) - if err == nil { - stillExist = append(stillExist, snapshotId) - continue - } - - awsErr, ok := err.(awserr.Error) - if !ok { - checkErrors[snapshotId] = err - continue - } - - if awsErr.Code() != "InvalidSnapshot.NotFound" { - checkErrors[snapshotId] = err - continue - } - } - - if len(stillExist) > 0 || len(checkErrors) > 0 { - errParts := []string{ - "Expected all snapshots to be gone, but:", - } - for _, snapshotId := range stillExist { - errParts = append( - errParts, - fmt.Sprintf("- %v still exists", snapshotId), - ) - } - for snapshotId, err := range checkErrors { - errParts = append( - errParts, - fmt.Sprintf("- checking %v gave error: %v", snapshotId, err), - ) - } - return errors.New(strings.Join(errParts, "\n")) - } - - return nil - }, - }) -} - -func testAccAWSAMIFromInstanceConfig(rInt int) string { - return fmt.Sprintf(` - provider "aws" { - region = "us-east-1" - } - - resource "aws_instance" "test" { - // This AMI has one block device mapping, so we expect to have - // one snapshot in our created AMI. - ami = "ami-408c7f28" - instance_type = "t1.micro" - tags { - Name = "testAccAWSAMIFromInstanceConfig_TestAMI" - } - } - - resource "aws_ami_from_instance" "test" { - name = "terraform-acc-ami-from-instance-%d" - description = "Testing Terraform aws_ami_from_instance resource" - source_instance_id = "${aws_instance.test.id}" - }`, rInt) -} diff --git a/builtin/providers/aws/resource_aws_ami_launch_permission.go b/builtin/providers/aws/resource_aws_ami_launch_permission.go deleted file mode 100644 index 278e9d9ab..000000000 --- a/builtin/providers/aws/resource_aws_ami_launch_permission.go +++ /dev/null @@ -1,114 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsAmiLaunchPermission() *schema.Resource { - return &schema.Resource{ - Exists: resourceAwsAmiLaunchPermissionExists, - Create: resourceAwsAmiLaunchPermissionCreate, - Read: resourceAwsAmiLaunchPermissionRead, - Delete: resourceAwsAmiLaunchPermissionDelete, - - Schema: map[string]*schema.Schema{ - "image_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "account_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceAwsAmiLaunchPermissionExists(d *schema.ResourceData, meta interface{}) (bool, error) { - conn := meta.(*AWSClient).ec2conn - - image_id := d.Get("image_id").(string) - account_id := d.Get("account_id").(string) - return hasLaunchPermission(conn, image_id, account_id) -} - -func resourceAwsAmiLaunchPermissionCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - image_id := d.Get("image_id").(string) - account_id := d.Get("account_id").(string) - - _, err := conn.ModifyImageAttribute(&ec2.ModifyImageAttributeInput{ - ImageId: aws.String(image_id), - Attribute: aws.String("launchPermission"), - LaunchPermission: &ec2.LaunchPermissionModifications{ - Add: []*ec2.LaunchPermission{ - &ec2.LaunchPermission{UserId: aws.String(account_id)}, - }, - }, - }) - if err != nil { - return fmt.Errorf("error creating ami launch permission: %s", err) - } - - d.SetId(fmt.Sprintf("%s-%s", image_id, account_id)) - return nil -} - -func resourceAwsAmiLaunchPermissionRead(d *schema.ResourceData, meta interface{}) error { - return nil -} - -func resourceAwsAmiLaunchPermissionDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - image_id := d.Get("image_id").(string) - account_id := d.Get("account_id").(string) - - _, err := conn.ModifyImageAttribute(&ec2.ModifyImageAttributeInput{ - ImageId: aws.String(image_id), - Attribute: aws.String("launchPermission"), - LaunchPermission: &ec2.LaunchPermissionModifications{ - Remove: []*ec2.LaunchPermission{ - &ec2.LaunchPermission{UserId: aws.String(account_id)}, - }, - }, - }) - if err != nil { - return fmt.Errorf("error removing ami launch permission: %s", err) - } - - return nil -} - -func hasLaunchPermission(conn *ec2.EC2, image_id string, account_id string) (bool, error) { - attrs, err := conn.DescribeImageAttribute(&ec2.DescribeImageAttributeInput{ - ImageId: aws.String(image_id), - Attribute: aws.String("launchPermission"), - }) - if err != nil { - // When an AMI disappears out from under a launch permission resource, we will - // see either InvalidAMIID.NotFound or InvalidAMIID.Unavailable. - if ec2err, ok := err.(awserr.Error); ok && strings.HasPrefix(ec2err.Code(), "InvalidAMIID") { - log.Printf("[DEBUG] %s no longer exists, so we'll drop launch permission for %s from the state", image_id, account_id) - return false, nil - } - return false, err - } - - for _, lp := range attrs.LaunchPermissions { - if *lp.UserId == account_id { - return true, nil - } - } - return false, nil -} diff --git a/builtin/providers/aws/resource_aws_ami_launch_permission_test.go b/builtin/providers/aws/resource_aws_ami_launch_permission_test.go deleted file mode 100644 index 4ccb35c7c..000000000 --- a/builtin/providers/aws/resource_aws_ami_launch_permission_test.go +++ /dev/null @@ -1,147 +0,0 @@ -package aws - -import ( - "fmt" - "os" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - r "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSAMILaunchPermission_Basic(t *testing.T) { - imageID := "" - accountID := os.Getenv("AWS_ACCOUNT_ID") - - r.Test(t, r.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - if os.Getenv("AWS_ACCOUNT_ID") == "" { - t.Fatal("AWS_ACCOUNT_ID must be set") - } - }, - Providers: testAccProviders, - Steps: []r.TestStep{ - // Scaffold everything - r.TestStep{ - Config: testAccAWSAMILaunchPermissionConfig(accountID, true), - Check: r.ComposeTestCheckFunc( - testCheckResourceGetAttr("aws_ami_copy.test", "id", &imageID), - testAccAWSAMILaunchPermissionExists(accountID, &imageID), - ), - }, - // Drop just launch permission to test destruction - r.TestStep{ - Config: testAccAWSAMILaunchPermissionConfig(accountID, false), - Check: r.ComposeTestCheckFunc( - testAccAWSAMILaunchPermissionDestroyed(accountID, &imageID), - ), - }, - // Re-add everything so we can test when AMI disappears - r.TestStep{ - Config: testAccAWSAMILaunchPermissionConfig(accountID, true), - Check: r.ComposeTestCheckFunc( - testCheckResourceGetAttr("aws_ami_copy.test", "id", &imageID), - testAccAWSAMILaunchPermissionExists(accountID, &imageID), - ), - }, - // Here we delete the AMI to verify the follow-on refresh after this step - // should not error. - r.TestStep{ - Config: testAccAWSAMILaunchPermissionConfig(accountID, true), - Check: r.ComposeTestCheckFunc( - testAccAWSAMIDisappears(&imageID), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testCheckResourceGetAttr(name, key string, value *string) r.TestCheckFunc { - return func(s *terraform.State) error { - ms := s.RootModule() - rs, ok := ms.Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - is := rs.Primary - if is == nil { - return fmt.Errorf("No primary instance: %s", name) - } - - *value = is.Attributes[key] - return nil - } -} - -func testAccAWSAMILaunchPermissionExists(accountID string, imageID *string) r.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - if has, err := hasLaunchPermission(conn, *imageID, accountID); err != nil { - return err - } else if !has { - return fmt.Errorf("launch permission does not exist for '%s' on '%s'", accountID, *imageID) - } - return nil - } -} - -func testAccAWSAMILaunchPermissionDestroyed(accountID string, imageID *string) r.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - if has, err := hasLaunchPermission(conn, *imageID, accountID); err != nil { - return err - } else if has { - return fmt.Errorf("launch permission still exists for '%s' on '%s'", accountID, *imageID) - } - return nil - } -} - -// testAccAWSAMIDisappears is technically a "test check function" but really it -// exists to perform a side effect of deleting an AMI out from under a resource -// so we can test that Terraform will react properly -func testAccAWSAMIDisappears(imageID *string) r.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - req := &ec2.DeregisterImageInput{ - ImageId: aws.String(*imageID), - } - - _, err := conn.DeregisterImage(req) - if err != nil { - return err - } - - if err := resourceAwsAmiWaitForDestroy(*imageID, conn); err != nil { - return err - } - return nil - } -} - -func testAccAWSAMILaunchPermissionConfig(accountID string, includeLaunchPermission bool) string { - base := ` -resource "aws_ami_copy" "test" { - name = "launch-permission-test" - description = "Launch Permission Test Copy" - source_ami_id = "ami-7172b611" - source_ami_region = "us-west-2" -} -` - - if !includeLaunchPermission { - return base - } - - return base + fmt.Sprintf(` -resource "aws_ami_launch_permission" "self-test" { - image_id = "${aws_ami_copy.test.id}" - account_id = "%s" -} -`, accountID) -} diff --git a/builtin/providers/aws/resource_aws_ami_test.go b/builtin/providers/aws/resource_aws_ami_test.go deleted file mode 100644 index 2f2e481a4..000000000 --- a/builtin/providers/aws/resource_aws_ami_test.go +++ /dev/null @@ -1,234 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSAMI_basic(t *testing.T) { - var ami ec2.Image - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAmiDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAmiConfig_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAmiExists("aws_ami.foo", &ami), - resource.TestCheckResourceAttr( - "aws_ami.foo", "name", fmt.Sprintf("tf-testing-%d", rInt)), - ), - }, - }, - }) -} - -func TestAccAWSAMI_snapshotSize(t *testing.T) { - var ami ec2.Image - var bd ec2.BlockDeviceMapping - rInt := acctest.RandInt() - - expectedDevice := &ec2.EbsBlockDevice{ - DeleteOnTermination: aws.Bool(true), - Encrypted: aws.Bool(false), - Iops: aws.Int64(0), - VolumeSize: aws.Int64(20), - VolumeType: aws.String("standard"), - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAmiDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAmiConfig_snapshotSize(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAmiExists("aws_ami.foo", &ami), - testAccCheckAmiBlockDevice(&ami, &bd, "/dev/sda1"), - testAccCheckAmiEbsBlockDevice(&bd, expectedDevice), - resource.TestCheckResourceAttr( - "aws_ami.foo", "name", fmt.Sprintf("tf-testing-%d", rInt)), - resource.TestCheckResourceAttr( - "aws_ami.foo", "architecture", "x86_64"), - ), - }, - }, - }) -} - -func testAccCheckAmiDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_ami" { - continue - } - - // Try to find the AMI - log.Printf("AMI-ID: %s", rs.Primary.ID) - DescribeAmiOpts := &ec2.DescribeImagesInput{ - ImageIds: []*string{aws.String(rs.Primary.ID)}, - } - resp, err := conn.DescribeImages(DescribeAmiOpts) - if err != nil { - if isAWSErr(err, "InvalidAMIID", "NotFound") { - log.Printf("[DEBUG] AMI not found, passing") - return nil - } - return err - } - - if len(resp.Images) > 0 { - state := resp.Images[0].State - return fmt.Errorf("AMI %s still exists in the state: %s.", *resp.Images[0].ImageId, *state) - } - } - return nil -} - -func testAccCheckAmiExists(n string, ami *ec2.Image) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("AMI Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No AMI ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - opts := &ec2.DescribeImagesInput{ - ImageIds: []*string{aws.String(rs.Primary.ID)}, - } - resp, err := conn.DescribeImages(opts) - if err != nil { - return err - } - if len(resp.Images) == 0 { - return fmt.Errorf("AMI not found") - } - *ami = *resp.Images[0] - return nil - } -} - -func testAccCheckAmiBlockDevice(ami *ec2.Image, blockDevice *ec2.BlockDeviceMapping, n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - devices := make(map[string]*ec2.BlockDeviceMapping) - for _, device := range ami.BlockDeviceMappings { - devices[*device.DeviceName] = device - } - - // Check if the block device exists - if _, ok := devices[n]; !ok { - return fmt.Errorf("block device doesn't exist: %s", n) - } - - *blockDevice = *devices[n] - return nil - } -} - -func testAccCheckAmiEbsBlockDevice(bd *ec2.BlockDeviceMapping, ed *ec2.EbsBlockDevice) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Test for things that ed has, don't care about unset values - cd := bd.Ebs - if ed.VolumeType != nil { - if *ed.VolumeType != *cd.VolumeType { - return fmt.Errorf("Volume type mismatch. Expected: %s Got: %s", - *ed.VolumeType, *cd.VolumeType) - } - } - if ed.DeleteOnTermination != nil { - if *ed.DeleteOnTermination != *cd.DeleteOnTermination { - return fmt.Errorf("DeleteOnTermination mismatch. Expected: %t Got: %t", - *ed.DeleteOnTermination, *cd.DeleteOnTermination) - } - } - if ed.Encrypted != nil { - if *ed.Encrypted != *cd.Encrypted { - return fmt.Errorf("Encrypted mismatch. Expected: %t Got: %t", - *ed.Encrypted, *cd.Encrypted) - } - } - // Integer defaults need to not be `0` so we don't get a panic - if ed.Iops != nil && *ed.Iops != 0 { - if *ed.Iops != *cd.Iops { - return fmt.Errorf("IOPS mismatch. Expected: %d Got: %d", - *ed.Iops, *cd.Iops) - } - } - if ed.VolumeSize != nil && *ed.VolumeSize != 0 { - if *ed.VolumeSize != *cd.VolumeSize { - return fmt.Errorf("Volume Size mismatch. Expected: %d Got: %d", - *ed.VolumeSize, *cd.VolumeSize) - } - } - - return nil - } -} - -func testAccAmiConfig_basic(rInt int) string { - return fmt.Sprintf(` -resource "aws_ebs_volume" "foo" { - availability_zone = "us-west-2a" - size = 8 - tags { - Name = "testAccAmiConfig_basic" - } -} - -resource "aws_ebs_snapshot" "foo" { - volume_id = "${aws_ebs_volume.foo.id}" -} - -resource "aws_ami" "foo" { - name = "tf-testing-%d" - virtualization_type = "hvm" - root_device_name = "/dev/sda1" - ebs_block_device { - device_name = "/dev/sda1" - snapshot_id = "${aws_ebs_snapshot.foo.id}" - } -} - `, rInt) -} - -func testAccAmiConfig_snapshotSize(rInt int) string { - return fmt.Sprintf(` -resource "aws_ebs_volume" "foo" { - availability_zone = "us-west-2a" - size = 20 - tags { - Name = "testAccAmiConfig_snapshotSize" - } -} - -resource "aws_ebs_snapshot" "foo" { - volume_id = "${aws_ebs_volume.foo.id}" -} - -resource "aws_ami" "foo" { - name = "tf-testing-%d" - virtualization_type = "hvm" - root_device_name = "/dev/sda1" - ebs_block_device { - device_name = "/dev/sda1" - snapshot_id = "${aws_ebs_snapshot.foo.id}" - } -} - `, rInt) -} diff --git a/builtin/providers/aws/resource_aws_api_gateway_account.go b/builtin/providers/aws/resource_aws_api_gateway_account.go deleted file mode 100644 index 7b786270a..000000000 --- a/builtin/providers/aws/resource_aws_api_gateway_account.go +++ /dev/null @@ -1,127 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsApiGatewayAccount() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsApiGatewayAccountUpdate, - Read: resourceAwsApiGatewayAccountRead, - Update: resourceAwsApiGatewayAccountUpdate, - Delete: resourceAwsApiGatewayAccountDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "cloudwatch_role_arn": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "throttle_settings": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "burst_limit": &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - }, - "rate_limit": &schema.Schema{ - Type: schema.TypeFloat, - Computed: true, - }, - }, - }, - }, - }, - } -} - -func resourceAwsApiGatewayAccountRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).apigateway - - log.Printf("[INFO] Reading API Gateway Account %s", d.Id()) - account, err := conn.GetAccount(&apigateway.GetAccountInput{}) - if err != nil { - return err - } - - log.Printf("[DEBUG] Received API Gateway Account: %s", account) - - if _, ok := d.GetOk("cloudwatch_role_arn"); ok { - // CloudwatchRoleArn cannot be empty nor made empty via API - // This resource can however be useful w/out defining cloudwatch_role_arn - // (e.g. for referencing throttle_settings) - d.Set("cloudwatch_role_arn", account.CloudwatchRoleArn) - } - d.Set("throttle_settings", flattenApiGatewayThrottleSettings(account.ThrottleSettings)) - - return nil -} - -func resourceAwsApiGatewayAccountUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).apigateway - - input := apigateway.UpdateAccountInput{} - operations := make([]*apigateway.PatchOperation, 0) - - if d.HasChange("cloudwatch_role_arn") { - arn := d.Get("cloudwatch_role_arn").(string) - if len(arn) > 0 { - // Unfortunately AWS API doesn't allow empty ARNs, - // even though that's default settings for new AWS accounts - // BadRequestException: The role ARN is not well formed - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("replace"), - Path: aws.String("/cloudwatchRoleArn"), - Value: aws.String(arn), - }) - } - } - input.PatchOperations = operations - - log.Printf("[INFO] Updating API Gateway Account: %s", input) - - // Retry due to eventual consistency of IAM - expectedErrMsg := "The role ARN does not have required permissions set to API Gateway" - otherErrMsg := "API Gateway could not successfully write to CloudWatch Logs using the ARN specified" - var out *apigateway.Account - var err error - err = resource.Retry(2*time.Minute, func() *resource.RetryError { - out, err = conn.UpdateAccount(&input) - - if err != nil { - if isAWSErr(err, "BadRequestException", expectedErrMsg) || - isAWSErr(err, "BadRequestException", otherErrMsg) { - log.Printf("[DEBUG] Retrying API Gateway Account update: %s", err) - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - } - - return nil - }) - if err != nil { - return fmt.Errorf("Updating API Gateway Account failed: %s", err) - } - log.Printf("[DEBUG] API Gateway Account updated: %s", out) - - d.SetId("api-gateway-account") - return resourceAwsApiGatewayAccountRead(d, meta) -} - -func resourceAwsApiGatewayAccountDelete(d *schema.ResourceData, meta interface{}) error { - // There is no API for "deleting" account or resetting it to "default" settings - d.SetId("") - return nil -} diff --git a/builtin/providers/aws/resource_aws_api_gateway_account_test.go b/builtin/providers/aws/resource_aws_api_gateway_account_test.go deleted file mode 100644 index c50339f7e..000000000 --- a/builtin/providers/aws/resource_aws_api_gateway_account_test.go +++ /dev/null @@ -1,205 +0,0 @@ -package aws - -import ( - "fmt" - "regexp" - "testing" - - "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSAPIGatewayAccount_basic(t *testing.T) { - var conf apigateway.Account - - expectedRoleArn_first := regexp.MustCompile("[0-9]+") - expectedRoleArn_second := regexp.MustCompile("[0-9]+") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayAccountDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSAPIGatewayAccountConfig_updated, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayAccountExists("aws_api_gateway_account.test", &conf), - testAccCheckAWSAPIGatewayAccountCloudwatchRoleArn(&conf, expectedRoleArn_first), - resource.TestMatchResourceAttr("aws_api_gateway_account.test", "cloudwatch_role_arn", expectedRoleArn_first), - ), - }, - resource.TestStep{ - Config: testAccAWSAPIGatewayAccountConfig_updated2, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayAccountExists("aws_api_gateway_account.test", &conf), - testAccCheckAWSAPIGatewayAccountCloudwatchRoleArn(&conf, expectedRoleArn_second), - resource.TestMatchResourceAttr("aws_api_gateway_account.test", "cloudwatch_role_arn", expectedRoleArn_second), - ), - }, - resource.TestStep{ - Config: testAccAWSAPIGatewayAccountConfig_empty, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayAccountExists("aws_api_gateway_account.test", &conf), - testAccCheckAWSAPIGatewayAccountCloudwatchRoleArn(&conf, expectedRoleArn_second), - ), - }, - }, - }) -} - -func testAccCheckAWSAPIGatewayAccountCloudwatchRoleArn(conf *apigateway.Account, expectedArn *regexp.Regexp) resource.TestCheckFunc { - return func(s *terraform.State) error { - if expectedArn == nil && conf.CloudwatchRoleArn == nil { - return nil - } - if expectedArn == nil && conf.CloudwatchRoleArn != nil { - return fmt.Errorf("Expected empty CloudwatchRoleArn, given: %q", *conf.CloudwatchRoleArn) - } - if expectedArn != nil && conf.CloudwatchRoleArn == nil { - return fmt.Errorf("Empty CloudwatchRoleArn, expected: %q", expectedArn) - } - if !expectedArn.MatchString(*conf.CloudwatchRoleArn) { - return fmt.Errorf("CloudwatchRoleArn didn't match. Expected: %q, Given: %q", expectedArn, *conf.CloudwatchRoleArn) - } - return nil - } -} - -func testAccCheckAWSAPIGatewayAccountExists(n string, res *apigateway.Account) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No API Gateway Account ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).apigateway - - req := &apigateway.GetAccountInput{} - describe, err := conn.GetAccount(req) - if err != nil { - return err - } - if describe == nil { - return fmt.Errorf("Got nil account ?!") - } - - *res = *describe - - return nil - } -} - -func testAccCheckAWSAPIGatewayAccountDestroy(s *terraform.State) error { - // Intentionally noop - // as there is no API method for deleting or resetting account settings - return nil -} - -const testAccAWSAPIGatewayAccountConfig_empty = ` -resource "aws_api_gateway_account" "test" { -} -` - -const testAccAWSAPIGatewayAccountConfig_updated = ` -resource "aws_api_gateway_account" "test" { - cloudwatch_role_arn = "${aws_iam_role.cloudwatch.arn}" -} - -resource "aws_iam_role" "cloudwatch" { - name = "api_gateway_cloudwatch_global" - assume_role_policy = <Foo"), - ), - }, - - { - Config: testAccAWSAPIGatewayIntegrationConfigUpdateNoTemplates, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayIntegrationExists("aws_api_gateway_integration.test", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "type", "HTTP"), - resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "integration_http_method", "GET"), - resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "uri", "https://www.google.de"), - resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "passthrough_behavior", "WHEN_NO_MATCH"), - resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "content_handling", "CONVERT_TO_TEXT"), - resource.TestCheckNoResourceAttr("aws_api_gateway_integration.test", "credentials"), - resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_parameters.%", "0"), - resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_templates.%", "0"), - ), - }, - - { - Config: testAccAWSAPIGatewayIntegrationConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayIntegrationExists("aws_api_gateway_integration.test", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "type", "HTTP"), - resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "integration_http_method", "GET"), - resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "uri", "https://www.google.de"), - resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "passthrough_behavior", "WHEN_NO_MATCH"), - resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "content_handling", "CONVERT_TO_TEXT"), - resource.TestCheckNoResourceAttr("aws_api_gateway_integration.test", "credentials"), - resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_parameters.%", "2"), - resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_parameters.integration.request.header.X-Authorization", "'static'"), - resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_templates.%", "2"), - resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_templates.application/json", ""), - resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_templates.application/xml", "#set($inputRoot = $input.path('$'))\n{ }"), - ), - }, - }, - }) -} - -func testAccCheckAWSAPIGatewayIntegrationExists(n string, res *apigateway.Integration) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No API Gateway Method ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).apigateway - - req := &apigateway.GetIntegrationInput{ - HttpMethod: aws.String("GET"), - ResourceId: aws.String(s.RootModule().Resources["aws_api_gateway_resource.test"].Primary.ID), - RestApiId: aws.String(s.RootModule().Resources["aws_api_gateway_rest_api.test"].Primary.ID), - } - describe, err := conn.GetIntegration(req) - if err != nil { - return err - } - - *res = *describe - - return nil - } -} - -func testAccCheckAWSAPIGatewayIntegrationDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).apigateway - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_api_gateway_integration" { - continue - } - - req := &apigateway.GetIntegrationInput{ - HttpMethod: aws.String("GET"), - ResourceId: aws.String(s.RootModule().Resources["aws_api_gateway_resource.test"].Primary.ID), - RestApiId: aws.String(s.RootModule().Resources["aws_api_gateway_rest_api.test"].Primary.ID), - } - _, err := conn.GetIntegration(req) - - if err == nil { - return fmt.Errorf("API Gateway Method still exists") - } - - aws2err, ok := err.(awserr.Error) - if !ok { - return err - } - if aws2err.Code() != "NotFoundException" { - return err - } - - return nil - } - - return nil -} - -const testAccAWSAPIGatewayIntegrationConfig = ` -resource "aws_api_gateway_rest_api" "test" { - name = "test" -} - -resource "aws_api_gateway_resource" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - parent_id = "${aws_api_gateway_rest_api.test.root_resource_id}" - path_part = "test" -} - -resource "aws_api_gateway_method" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_resource.test.id}" - http_method = "GET" - authorization = "NONE" - - request_models = { - "application/json" = "Error" - } -} - -resource "aws_api_gateway_integration" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_resource.test.id}" - http_method = "${aws_api_gateway_method.test.http_method}" - - request_templates = { - "application/json" = "" - "application/xml" = "#set($inputRoot = $input.path('$'))\n{ }" - } - - request_parameters = { - "integration.request.header.X-Authorization" = "'static'" - "integration.request.header.X-Foo" = "'Bar'" - } - - type = "HTTP" - uri = "https://www.google.de" - integration_http_method = "GET" - passthrough_behavior = "WHEN_NO_MATCH" - content_handling = "CONVERT_TO_TEXT" -} -` - -const testAccAWSAPIGatewayIntegrationConfigUpdate = ` -resource "aws_api_gateway_rest_api" "test" { - name = "test" -} - -resource "aws_api_gateway_resource" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - parent_id = "${aws_api_gateway_rest_api.test.root_resource_id}" - path_part = "test" -} - -resource "aws_api_gateway_method" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_resource.test.id}" - http_method = "GET" - authorization = "NONE" - - request_models = { - "application/json" = "Error" - } -} - -resource "aws_api_gateway_integration" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_resource.test.id}" - http_method = "${aws_api_gateway_method.test.http_method}" - - request_templates = { - "application/json" = "{'foobar': 'bar}" - "text/html" = "Foo" - } - - request_parameters = { - "integration.request.header.X-Authorization" = "'updated'" - "integration.request.header.X-FooBar" = "'Baz'" - } - - type = "HTTP" - uri = "https://www.google.de" - integration_http_method = "GET" - passthrough_behavior = "WHEN_NO_MATCH" - content_handling = "CONVERT_TO_TEXT" -} -` - -const testAccAWSAPIGatewayIntegrationConfigUpdateNoTemplates = ` -resource "aws_api_gateway_rest_api" "test" { - name = "test" -} - -resource "aws_api_gateway_resource" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - parent_id = "${aws_api_gateway_rest_api.test.root_resource_id}" - path_part = "test" -} - -resource "aws_api_gateway_method" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_resource.test.id}" - http_method = "GET" - authorization = "NONE" - - request_models = { - "application/json" = "Error" - } -} - -resource "aws_api_gateway_integration" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_resource.test.id}" - http_method = "${aws_api_gateway_method.test.http_method}" - - type = "HTTP" - uri = "https://www.google.de" - integration_http_method = "GET" - passthrough_behavior = "WHEN_NO_MATCH" - content_handling = "CONVERT_TO_TEXT" -} -` diff --git a/builtin/providers/aws/resource_aws_api_gateway_method.go b/builtin/providers/aws/resource_aws_api_gateway_method.go deleted file mode 100644 index 577c44e15..000000000 --- a/builtin/providers/aws/resource_aws_api_gateway_method.go +++ /dev/null @@ -1,270 +0,0 @@ -package aws - -import ( - "encoding/json" - "fmt" - "log" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsApiGatewayMethod() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsApiGatewayMethodCreate, - Read: resourceAwsApiGatewayMethodRead, - Update: resourceAwsApiGatewayMethodUpdate, - Delete: resourceAwsApiGatewayMethodDelete, - - Schema: map[string]*schema.Schema{ - "rest_api_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "resource_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "http_method": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateHTTPMethod, - }, - - "authorization": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "authorizer_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "api_key_required": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "request_models": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Elem: schema.TypeString, - }, - - "request_parameters": &schema.Schema{ - Type: schema.TypeMap, - Elem: schema.TypeBool, - Optional: true, - ConflictsWith: []string{"request_parameters_in_json"}, - }, - - "request_parameters_in_json": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ConflictsWith: []string{"request_parameters"}, - Deprecated: "Use field request_parameters instead", - }, - }, - } -} - -func resourceAwsApiGatewayMethodCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).apigateway - - input := apigateway.PutMethodInput{ - AuthorizationType: aws.String(d.Get("authorization").(string)), - HttpMethod: aws.String(d.Get("http_method").(string)), - ResourceId: aws.String(d.Get("resource_id").(string)), - RestApiId: aws.String(d.Get("rest_api_id").(string)), - ApiKeyRequired: aws.Bool(d.Get("api_key_required").(bool)), - } - - models := make(map[string]string) - for k, v := range d.Get("request_models").(map[string]interface{}) { - models[k] = v.(string) - } - if len(models) > 0 { - input.RequestModels = aws.StringMap(models) - } - - parameters := make(map[string]bool) - if kv, ok := d.GetOk("request_parameters"); ok { - for k, v := range kv.(map[string]interface{}) { - parameters[k], ok = v.(bool) - if !ok { - value, _ := strconv.ParseBool(v.(string)) - parameters[k] = value - } - } - input.RequestParameters = aws.BoolMap(parameters) - } - if v, ok := d.GetOk("request_parameters_in_json"); ok { - if err := json.Unmarshal([]byte(v.(string)), ¶meters); err != nil { - return fmt.Errorf("Error unmarshaling request_parameters_in_json: %s", err) - } - input.RequestParameters = aws.BoolMap(parameters) - } - - if v, ok := d.GetOk("authorizer_id"); ok { - input.AuthorizerId = aws.String(v.(string)) - } - - _, err := conn.PutMethod(&input) - if err != nil { - return fmt.Errorf("Error creating API Gateway Method: %s", err) - } - - d.SetId(fmt.Sprintf("agm-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string))) - log.Printf("[DEBUG] API Gateway Method ID: %s", d.Id()) - - return nil -} - -func resourceAwsApiGatewayMethodRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).apigateway - - log.Printf("[DEBUG] Reading API Gateway Method %s", d.Id()) - out, err := conn.GetMethod(&apigateway.GetMethodInput{ - HttpMethod: aws.String(d.Get("http_method").(string)), - ResourceId: aws.String(d.Get("resource_id").(string)), - RestApiId: aws.String(d.Get("rest_api_id").(string)), - }) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { - d.SetId("") - return nil - } - return err - } - log.Printf("[DEBUG] Received API Gateway Method: %s", out) - d.SetId(fmt.Sprintf("agm-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string))) - d.Set("request_parameters", aws.BoolValueMap(out.RequestParameters)) - d.Set("request_parameters_in_json", aws.BoolValueMap(out.RequestParameters)) - d.Set("api_key_required", out.ApiKeyRequired) - d.Set("authorization_type", out.AuthorizationType) - d.Set("authorizer_id", out.AuthorizerId) - d.Set("request_models", aws.StringValueMap(out.RequestModels)) - - return nil -} - -func resourceAwsApiGatewayMethodUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).apigateway - - log.Printf("[DEBUG] Reading API Gateway Method %s", d.Id()) - operations := make([]*apigateway.PatchOperation, 0) - if d.HasChange("resource_id") { - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("replace"), - Path: aws.String("/resourceId"), - Value: aws.String(d.Get("resource_id").(string)), - }) - } - - if d.HasChange("request_models") { - operations = append(operations, expandApiGatewayRequestResponseModelOperations(d, "request_models", "requestModels")...) - } - - if d.HasChange("request_parameters_in_json") { - ops, err := deprecatedExpandApiGatewayMethodParametersJSONOperations(d, "request_parameters_in_json", "requestParameters") - if err != nil { - return err - } - operations = append(operations, ops...) - } - - if d.HasChange("request_parameters") { - parameters := make(map[string]bool) - var ok bool - for k, v := range d.Get("request_parameters").(map[string]interface{}) { - parameters[k], ok = v.(bool) - if !ok { - value, _ := strconv.ParseBool(v.(string)) - parameters[k] = value - } - } - ops, err := expandApiGatewayMethodParametersOperations(d, "request_parameters", "requestParameters") - if err != nil { - return err - } - operations = append(operations, ops...) - } - - if d.HasChange("authorization") { - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("replace"), - Path: aws.String("/authorizationType"), - Value: aws.String(d.Get("authorization").(string)), - }) - } - - if d.HasChange("authorizer_id") { - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("replace"), - Path: aws.String("/authorizerId"), - Value: aws.String(d.Get("authorizer_id").(string)), - }) - } - - if d.HasChange("api_key_required") { - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("replace"), - Path: aws.String("/apiKeyRequired"), - Value: aws.String(fmt.Sprintf("%t", d.Get("api_key_required").(bool))), - }) - } - - method, err := conn.UpdateMethod(&apigateway.UpdateMethodInput{ - HttpMethod: aws.String(d.Get("http_method").(string)), - ResourceId: aws.String(d.Get("resource_id").(string)), - RestApiId: aws.String(d.Get("rest_api_id").(string)), - PatchOperations: operations, - }) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Received API Gateway Method: %s", method) - - return resourceAwsApiGatewayMethodRead(d, meta) -} - -func resourceAwsApiGatewayMethodDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).apigateway - log.Printf("[DEBUG] Deleting API Gateway Method: %s", d.Id()) - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteMethod(&apigateway.DeleteMethodInput{ - HttpMethod: aws.String(d.Get("http_method").(string)), - ResourceId: aws.String(d.Get("resource_id").(string)), - RestApiId: aws.String(d.Get("rest_api_id").(string)), - }) - if err == nil { - return nil - } - - apigatewayErr, ok := err.(awserr.Error) - if apigatewayErr.Code() == "NotFoundException" { - return nil - } - - if !ok { - return resource.NonRetryableError(err) - } - - return resource.NonRetryableError(err) - }) -} diff --git a/builtin/providers/aws/resource_aws_api_gateway_method_response.go b/builtin/providers/aws/resource_aws_api_gateway_method_response.go deleted file mode 100644 index b0b929ad7..000000000 --- a/builtin/providers/aws/resource_aws_api_gateway_method_response.go +++ /dev/null @@ -1,210 +0,0 @@ -package aws - -import ( - "encoding/json" - "fmt" - "log" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsApiGatewayMethodResponse() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsApiGatewayMethodResponseCreate, - Read: resourceAwsApiGatewayMethodResponseRead, - Update: resourceAwsApiGatewayMethodResponseUpdate, - Delete: resourceAwsApiGatewayMethodResponseDelete, - - Schema: map[string]*schema.Schema{ - "rest_api_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "resource_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "http_method": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateHTTPMethod, - }, - - "status_code": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "response_models": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Elem: schema.TypeString, - }, - - "response_parameters": &schema.Schema{ - Type: schema.TypeMap, - Elem: schema.TypeBool, - Optional: true, - ConflictsWith: []string{"response_parameters_in_json"}, - }, - - "response_parameters_in_json": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ConflictsWith: []string{"response_parameters"}, - Deprecated: "Use field response_parameters instead", - }, - }, - } -} - -func resourceAwsApiGatewayMethodResponseCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).apigateway - - models := make(map[string]string) - for k, v := range d.Get("response_models").(map[string]interface{}) { - models[k] = v.(string) - } - - parameters := make(map[string]bool) - if kv, ok := d.GetOk("response_parameters"); ok { - for k, v := range kv.(map[string]interface{}) { - parameters[k], ok = v.(bool) - if !ok { - value, _ := strconv.ParseBool(v.(string)) - parameters[k] = value - } - } - } - if v, ok := d.GetOk("response_parameters_in_json"); ok { - if err := json.Unmarshal([]byte(v.(string)), ¶meters); err != nil { - return fmt.Errorf("Error unmarshaling request_parameters_in_json: %s", err) - } - } - - _, err := conn.PutMethodResponse(&apigateway.PutMethodResponseInput{ - HttpMethod: aws.String(d.Get("http_method").(string)), - ResourceId: aws.String(d.Get("resource_id").(string)), - RestApiId: aws.String(d.Get("rest_api_id").(string)), - StatusCode: aws.String(d.Get("status_code").(string)), - ResponseModels: aws.StringMap(models), - ResponseParameters: aws.BoolMap(parameters), - }) - if err != nil { - return fmt.Errorf("Error creating API Gateway Method Response: %s", err) - } - - d.SetId(fmt.Sprintf("agmr-%s-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string), d.Get("status_code").(string))) - log.Printf("[DEBUG] API Gateway Method ID: %s", d.Id()) - - return nil -} - -func resourceAwsApiGatewayMethodResponseRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).apigateway - - log.Printf("[DEBUG] Reading API Gateway Method %s", d.Id()) - methodResponse, err := conn.GetMethodResponse(&apigateway.GetMethodResponseInput{ - HttpMethod: aws.String(d.Get("http_method").(string)), - ResourceId: aws.String(d.Get("resource_id").(string)), - RestApiId: aws.String(d.Get("rest_api_id").(string)), - StatusCode: aws.String(d.Get("status_code").(string)), - }) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { - d.SetId("") - return nil - } - return err - } - - log.Printf("[DEBUG] Received API Gateway Method: %s", methodResponse) - d.Set("response_models", aws.StringValueMap(methodResponse.ResponseModels)) - d.Set("response_parameters", aws.BoolValueMap(methodResponse.ResponseParameters)) - d.Set("response_parameters_in_json", aws.BoolValueMap(methodResponse.ResponseParameters)) - d.SetId(fmt.Sprintf("agmr-%s-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string), d.Get("status_code").(string))) - - return nil -} - -func resourceAwsApiGatewayMethodResponseUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).apigateway - - log.Printf("[DEBUG] Updating API Gateway Method Response %s", d.Id()) - operations := make([]*apigateway.PatchOperation, 0) - - if d.HasChange("response_models") { - operations = append(operations, expandApiGatewayRequestResponseModelOperations(d, "response_models", "responseModels")...) - } - - if d.HasChange("response_parameters_in_json") { - ops, err := deprecatedExpandApiGatewayMethodParametersJSONOperations(d, "response_parameters_in_json", "responseParameters") - if err != nil { - return err - } - operations = append(operations, ops...) - } - - if d.HasChange("response_parameters") { - ops, err := expandApiGatewayMethodParametersOperations(d, "response_parameters", "responseParameters") - if err != nil { - return err - } - operations = append(operations, ops...) - } - - out, err := conn.UpdateMethodResponse(&apigateway.UpdateMethodResponseInput{ - HttpMethod: aws.String(d.Get("http_method").(string)), - ResourceId: aws.String(d.Get("resource_id").(string)), - RestApiId: aws.String(d.Get("rest_api_id").(string)), - StatusCode: aws.String(d.Get("status_code").(string)), - PatchOperations: operations, - }) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Received API Gateway Method Response: %s", out) - - return resourceAwsApiGatewayMethodResponseRead(d, meta) -} - -func resourceAwsApiGatewayMethodResponseDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).apigateway - log.Printf("[DEBUG] Deleting API Gateway Method Response: %s", d.Id()) - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteMethodResponse(&apigateway.DeleteMethodResponseInput{ - HttpMethod: aws.String(d.Get("http_method").(string)), - ResourceId: aws.String(d.Get("resource_id").(string)), - RestApiId: aws.String(d.Get("rest_api_id").(string)), - StatusCode: aws.String(d.Get("status_code").(string)), - }) - if err == nil { - return nil - } - - apigatewayErr, ok := err.(awserr.Error) - if apigatewayErr.Code() == "NotFoundException" { - return nil - } - - if !ok { - return resource.NonRetryableError(err) - } - - return resource.NonRetryableError(err) - }) -} diff --git a/builtin/providers/aws/resource_aws_api_gateway_method_response_test.go b/builtin/providers/aws/resource_aws_api_gateway_method_response_test.go deleted file mode 100644 index 514cb1db1..000000000 --- a/builtin/providers/aws/resource_aws_api_gateway_method_response_test.go +++ /dev/null @@ -1,229 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSAPIGatewayMethodResponse_basic(t *testing.T) { - var conf apigateway.MethodResponse - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayMethodResponseDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSAPIGatewayMethodResponseConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayMethodResponseExists("aws_api_gateway_method_response.error", &conf), - testAccCheckAWSAPIGatewayMethodResponseAttributes(&conf), - resource.TestCheckResourceAttr( - "aws_api_gateway_method_response.error", "status_code", "400"), - resource.TestCheckResourceAttr( - "aws_api_gateway_method_response.error", "response_models.application/json", "Error"), - ), - }, - - resource.TestStep{ - Config: testAccAWSAPIGatewayMethodResponseConfigUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayMethodResponseExists("aws_api_gateway_method_response.error", &conf), - testAccCheckAWSAPIGatewayMethodResponseAttributesUpdate(&conf), - resource.TestCheckResourceAttr( - "aws_api_gateway_method_response.error", "status_code", "400"), - resource.TestCheckResourceAttr( - "aws_api_gateway_method_response.error", "response_models.application/json", "Empty"), - ), - }, - }, - }) -} - -func testAccCheckAWSAPIGatewayMethodResponseAttributes(conf *apigateway.MethodResponse) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *conf.StatusCode == "" { - return fmt.Errorf("empty StatusCode") - } - if val, ok := conf.ResponseModels["application/json"]; !ok { - return fmt.Errorf("missing application/json ResponseModel") - } else { - if *val != "Error" { - return fmt.Errorf("wrong application/json ResponseModel") - } - } - if val, ok := conf.ResponseParameters["method.response.header.Content-Type"]; !ok { - return fmt.Errorf("missing Content-Type ResponseParameters") - } else { - if *val != true { - return fmt.Errorf("wrong ResponseParameters value") - } - } - return nil - } -} - -func testAccCheckAWSAPIGatewayMethodResponseAttributesUpdate(conf *apigateway.MethodResponse) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *conf.StatusCode == "" { - return fmt.Errorf("empty StatusCode") - } - if val, ok := conf.ResponseModels["application/json"]; !ok { - return fmt.Errorf("missing application/json ResponseModel") - } else { - if *val != "Empty" { - return fmt.Errorf("wrong application/json ResponseModel") - } - } - if conf.ResponseParameters["method.response.header.Content-Type"] != nil { - return fmt.Errorf("Content-Type ResponseParameters shouldn't exist") - } - return nil - } -} - -func testAccCheckAWSAPIGatewayMethodResponseExists(n string, res *apigateway.MethodResponse) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No API Gateway Method ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).apigateway - - req := &apigateway.GetMethodResponseInput{ - HttpMethod: aws.String("GET"), - ResourceId: aws.String(s.RootModule().Resources["aws_api_gateway_resource.test"].Primary.ID), - RestApiId: aws.String(s.RootModule().Resources["aws_api_gateway_rest_api.test"].Primary.ID), - StatusCode: aws.String(rs.Primary.Attributes["status_code"]), - } - describe, err := conn.GetMethodResponse(req) - if err != nil { - return err - } - - *res = *describe - - return nil - } -} - -func testAccCheckAWSAPIGatewayMethodResponseDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).apigateway - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_api_gateway_method_response" { - continue - } - - req := &apigateway.GetMethodResponseInput{ - HttpMethod: aws.String("GET"), - ResourceId: aws.String(s.RootModule().Resources["aws_api_gateway_resource.test"].Primary.ID), - RestApiId: aws.String(s.RootModule().Resources["aws_api_gateway_rest_api.test"].Primary.ID), - StatusCode: aws.String(rs.Primary.Attributes["status_code"]), - } - _, err := conn.GetMethodResponse(req) - - if err == nil { - return fmt.Errorf("API Gateway Method still exists") - } - - aws2err, ok := err.(awserr.Error) - if !ok { - return err - } - if aws2err.Code() != "NotFoundException" { - return err - } - - return nil - } - - return nil -} - -const testAccAWSAPIGatewayMethodResponseConfig = ` -resource "aws_api_gateway_rest_api" "test" { - name = "test" -} - -resource "aws_api_gateway_resource" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - parent_id = "${aws_api_gateway_rest_api.test.root_resource_id}" - path_part = "test" -} - -resource "aws_api_gateway_method" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_resource.test.id}" - http_method = "GET" - authorization = "NONE" - - request_models = { - "application/json" = "Error" - } -} - -resource "aws_api_gateway_method_response" "error" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_resource.test.id}" - http_method = "${aws_api_gateway_method.test.http_method}" - status_code = "400" - - response_models = { - "application/json" = "Error" - } - - response_parameters = { - "method.response.header.Content-Type" = true - } -} -` - -const testAccAWSAPIGatewayMethodResponseConfigUpdate = ` -resource "aws_api_gateway_rest_api" "test" { - name = "test" -} - -resource "aws_api_gateway_resource" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - parent_id = "${aws_api_gateway_rest_api.test.root_resource_id}" - path_part = "test" -} - -resource "aws_api_gateway_method" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_resource.test.id}" - http_method = "GET" - authorization = "NONE" - - request_models = { - "application/json" = "Error" - } -} - -resource "aws_api_gateway_method_response" "error" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_resource.test.id}" - http_method = "${aws_api_gateway_method.test.http_method}" - status_code = "400" - - response_models = { - "application/json" = "Empty" - } - - response_parameters = { - "method.response.header.Host" = true - } -} -` diff --git a/builtin/providers/aws/resource_aws_api_gateway_method_settings.go b/builtin/providers/aws/resource_aws_api_gateway_method_settings.go deleted file mode 100644 index 06d5efd01..000000000 --- a/builtin/providers/aws/resource_aws_api_gateway_method_settings.go +++ /dev/null @@ -1,248 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsApiGatewayMethodSettings() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsApiGatewayMethodSettingsUpdate, - Read: resourceAwsApiGatewayMethodSettingsRead, - Update: resourceAwsApiGatewayMethodSettingsUpdate, - Delete: resourceAwsApiGatewayMethodSettingsDelete, - - Schema: map[string]*schema.Schema{ - "rest_api_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "stage_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "method_path": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "settings": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "metrics_enabled": { - Type: schema.TypeBool, - Optional: true, - }, - "logging_level": { - Type: schema.TypeString, - Optional: true, - }, - "data_trace_enabled": { - Type: schema.TypeBool, - Optional: true, - }, - "throttling_burst_limit": { - Type: schema.TypeInt, - Optional: true, - }, - "throttling_rate_limit": { - Type: schema.TypeFloat, - Optional: true, - }, - "caching_enabled": { - Type: schema.TypeBool, - Optional: true, - }, - "cache_ttl_in_seconds": { - Type: schema.TypeInt, - Optional: true, - }, - "cache_data_encrypted": { - Type: schema.TypeBool, - Optional: true, - }, - "require_authorization_for_cache_control": { - Type: schema.TypeBool, - Optional: true, - }, - "unauthorized_cache_control_header_strategy": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - } -} - -func resourceAwsApiGatewayMethodSettingsRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).apigateway - - log.Printf("[DEBUG] Reading API Gateway Method Settings %s", d.Id()) - input := apigateway.GetStageInput{ - RestApiId: aws.String(d.Get("rest_api_id").(string)), - StageName: aws.String(d.Get("stage_name").(string)), - } - stage, err := conn.GetStage(&input) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { - log.Printf("[WARN] API Gateway Stage %s not found, removing method settings", d.Id()) - d.SetId("") - return nil - } - return err - } - log.Printf("[DEBUG] Received API Gateway Stage: %s", stage) - - methodPath := d.Get("method_path").(string) - settings, ok := stage.MethodSettings[methodPath] - if !ok { - log.Printf("[WARN] API Gateway Method Settings for %q not found, removing", methodPath) - d.SetId("") - return nil - } - - d.Set("settings.0.metrics_enabled", settings.MetricsEnabled) - d.Set("settings.0.logging_level", settings.LoggingLevel) - d.Set("settings.0.data_trace_enabled", settings.DataTraceEnabled) - d.Set("settings.0.throttling_burst_limit", settings.ThrottlingBurstLimit) - d.Set("settings.0.throttling_rate_limit", settings.ThrottlingRateLimit) - d.Set("settings.0.caching_enabled", settings.CachingEnabled) - d.Set("settings.0.cache_ttl_in_seconds", settings.CacheTtlInSeconds) - d.Set("settings.0.cache_data_encrypted", settings.CacheDataEncrypted) - d.Set("settings.0.require_authorization_for_cache_control", settings.RequireAuthorizationForCacheControl) - d.Set("settings.0.unauthorized_cache_control_header_strategy", settings.UnauthorizedCacheControlHeaderStrategy) - - return nil -} - -func resourceAwsApiGatewayMethodSettingsUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).apigateway - - methodPath := d.Get("method_path").(string) - prefix := fmt.Sprintf("/%s/", methodPath) - - ops := make([]*apigateway.PatchOperation, 0) - if d.HasChange("settings.0.metrics_enabled") { - ops = append(ops, &apigateway.PatchOperation{ - Op: aws.String("replace"), - Path: aws.String(prefix + "metrics/enabled"), - Value: aws.String(fmt.Sprintf("%t", d.Get("settings.0.metrics_enabled").(bool))), - }) - } - if d.HasChange("settings.0.logging_level") { - ops = append(ops, &apigateway.PatchOperation{ - Op: aws.String("replace"), - Path: aws.String(prefix + "logging/loglevel"), - Value: aws.String(d.Get("settings.0.logging_level").(string)), - }) - } - if d.HasChange("settings.0.data_trace_enabled") { - ops = append(ops, &apigateway.PatchOperation{ - Op: aws.String("replace"), - Path: aws.String(prefix + "logging/dataTrace"), - Value: aws.String(fmt.Sprintf("%t", d.Get("settings.0.data_trace_enabled").(bool))), - }) - } - - if d.HasChange("settings.0.throttling_burst_limit") { - ops = append(ops, &apigateway.PatchOperation{ - Op: aws.String("replace"), - Path: aws.String(prefix + "throttling/burstLimit"), - Value: aws.String(fmt.Sprintf("%d", d.Get("settings.0.throttling_burst_limit").(int))), - }) - } - if d.HasChange("settings.0.throttling_rate_limit") { - ops = append(ops, &apigateway.PatchOperation{ - Op: aws.String("replace"), - Path: aws.String(prefix + "throttling/rateLimit"), - Value: aws.String(fmt.Sprintf("%f", d.Get("settings.0.throttling_rate_limit").(float64))), - }) - } - if d.HasChange("settings.0.caching_enabled") { - ops = append(ops, &apigateway.PatchOperation{ - Op: aws.String("replace"), - Path: aws.String(prefix + "caching/enabled"), - Value: aws.String(fmt.Sprintf("%t", d.Get("settings.0.caching_enabled").(bool))), - }) - } - if d.HasChange("settings.0.cache_ttl_in_seconds") { - ops = append(ops, &apigateway.PatchOperation{ - Op: aws.String("replace"), - Path: aws.String(prefix + "caching/ttlInSeconds"), - Value: aws.String(fmt.Sprintf("%d", d.Get("settings.0.cache_ttl_in_seconds").(int))), - }) - } - if d.HasChange("settings.0.cache_data_encrypted") { - ops = append(ops, &apigateway.PatchOperation{ - Op: aws.String("replace"), - Path: aws.String(prefix + "caching/dataEncrypted"), - Value: aws.String(fmt.Sprintf("%d", d.Get("settings.0.cache_data_encrypted").(int))), - }) - } - if d.HasChange("settings.0.require_authorization_for_cache_control") { - ops = append(ops, &apigateway.PatchOperation{ - Op: aws.String("replace"), - Path: aws.String(prefix + "caching/requireAuthorizationForCacheControl"), - Value: aws.String(fmt.Sprintf("%t", d.Get("settings.0.require_authorization_for_cache_control").(bool))), - }) - } - if d.HasChange("settings.0.unauthorized_cache_control_header_strategy") { - ops = append(ops, &apigateway.PatchOperation{ - Op: aws.String("replace"), - Path: aws.String(prefix + "caching/unauthorizedCacheControlHeaderStrategy"), - Value: aws.String(d.Get("settings.0.unauthorized_cache_control_header_strategy").(string)), - }) - } - - restApiId := d.Get("rest_api_id").(string) - stageName := d.Get("stage_name").(string) - input := apigateway.UpdateStageInput{ - RestApiId: aws.String(restApiId), - StageName: aws.String(stageName), - PatchOperations: ops, - } - log.Printf("[DEBUG] Updating API Gateway Stage: %s", input) - _, err := conn.UpdateStage(&input) - if err != nil { - return fmt.Errorf("Updating API Gateway Stage failed: %s", err) - } - - d.SetId(restApiId + "-" + stageName + "-" + methodPath) - - return resourceAwsApiGatewayMethodSettingsRead(d, meta) -} - -func resourceAwsApiGatewayMethodSettingsDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).apigateway - log.Printf("[DEBUG] Deleting API Gateway Method Settings: %s", d.Id()) - - input := apigateway.UpdateStageInput{ - RestApiId: aws.String(d.Get("rest_api_id").(string)), - StageName: aws.String(d.Get("stage_name").(string)), - PatchOperations: []*apigateway.PatchOperation{ - { - Op: aws.String("remove"), - Path: aws.String(fmt.Sprintf("/%s", d.Get("method_path").(string))), - }, - }, - } - log.Printf("[DEBUG] Updating API Gateway Stage: %s", input) - _, err := conn.UpdateStage(&input) - if err != nil { - return fmt.Errorf("Updating API Gateway Stage failed: %s", err) - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_api_gateway_method_settings_test.go b/builtin/providers/aws/resource_aws_api_gateway_method_settings_test.go deleted file mode 100644 index 9372a6748..000000000 --- a/builtin/providers/aws/resource_aws_api_gateway_method_settings_test.go +++ /dev/null @@ -1,265 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSAPIGatewayMethodSettings_basic(t *testing.T) { - var stage apigateway.Stage - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayMethodSettingsDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSAPIGatewayMethodSettingsConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayMethodSettingsExists("aws_api_gateway_method_settings.test", &stage), - testAccCheckAWSAPIGatewayMethodSettings_metricsEnabled(&stage, "test/GET", true), - testAccCheckAWSAPIGatewayMethodSettings_loggingLevel(&stage, "test/GET", "INFO"), - resource.TestCheckResourceAttr("aws_api_gateway_method_settings.test", "settings.#", "1"), - resource.TestCheckResourceAttr("aws_api_gateway_method_settings.test", "settings.0.metrics_enabled", "true"), - resource.TestCheckResourceAttr("aws_api_gateway_method_settings.test", "settings.0.logging_level", "INFO"), - ), - }, - - { - Config: testAccAWSAPIGatewayMethodSettingsConfigUpdate(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayMethodSettingsExists("aws_api_gateway_method_settings.test", &stage), - testAccCheckAWSAPIGatewayMethodSettings_metricsEnabled(&stage, "test/GET", false), - testAccCheckAWSAPIGatewayMethodSettings_loggingLevel(&stage, "test/GET", "OFF"), - resource.TestCheckResourceAttr("aws_api_gateway_method_settings.test", "settings.#", "1"), - resource.TestCheckResourceAttr("aws_api_gateway_method_settings.test", "settings.0.metrics_enabled", "false"), - resource.TestCheckResourceAttr("aws_api_gateway_method_settings.test", "settings.0.logging_level", "OFF"), - ), - }, - }, - }) -} - -func testAccCheckAWSAPIGatewayMethodSettings_metricsEnabled(conf *apigateway.Stage, path string, expected bool) resource.TestCheckFunc { - return func(s *terraform.State) error { - settings, ok := conf.MethodSettings[path] - if !ok { - return fmt.Errorf("Expected to find method settings for %q", path) - } - - if expected && *settings.MetricsEnabled != expected { - return fmt.Errorf("Expected metrics to be enabled, got %t", *settings.MetricsEnabled) - } - if !expected && *settings.MetricsEnabled != expected { - return fmt.Errorf("Expected metrics to be disabled, got %t", *settings.MetricsEnabled) - } - - return nil - } -} - -func testAccCheckAWSAPIGatewayMethodSettings_loggingLevel(conf *apigateway.Stage, path string, expectedLevel string) resource.TestCheckFunc { - return func(s *terraform.State) error { - settings, ok := conf.MethodSettings[path] - if !ok { - return fmt.Errorf("Expected to find method settings for %q", path) - } - - if *settings.LoggingLevel != expectedLevel { - return fmt.Errorf("Expected logging level to match %q, got %q", expectedLevel, *settings.LoggingLevel) - } - - return nil - } -} - -func testAccCheckAWSAPIGatewayMethodSettingsExists(n string, res *apigateway.Stage) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No API Gateway Stage ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).apigateway - - req := &apigateway.GetStageInput{ - StageName: aws.String(s.RootModule().Resources["aws_api_gateway_deployment.test"].Primary.Attributes["stage_name"]), - RestApiId: aws.String(s.RootModule().Resources["aws_api_gateway_rest_api.test"].Primary.ID), - } - out, err := conn.GetStage(req) - if err != nil { - return err - } - - *res = *out - - return nil - } -} - -func testAccCheckAWSAPIGatewayMethodSettingsDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).apigateway - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_api_gateway_method_settings" { - continue - } - - req := &apigateway.GetStageInput{ - StageName: aws.String(s.RootModule().Resources["aws_api_gateway_deployment.test"].Primary.Attributes["stage_name"]), - RestApiId: aws.String(s.RootModule().Resources["aws_api_gateway_rest_api.test"].Primary.ID), - } - out, err := conn.GetStage(req) - if err == nil { - return fmt.Errorf("API Gateway Stage still exists: %s", out) - } - - awsErr, ok := err.(awserr.Error) - if !ok { - return err - } - if awsErr.Code() != "NotFoundException" { - return err - } - - return nil - } - - return nil -} - -func testAccAWSAPIGatewayMethodSettingsConfig(rInt int) string { - return fmt.Sprintf(` -resource "aws_api_gateway_rest_api" "test" { - name = "tf-acc-test-apig-method-%d" -} - -resource "aws_api_gateway_resource" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - parent_id = "${aws_api_gateway_rest_api.test.root_resource_id}" - path_part = "test" -} - -resource "aws_api_gateway_method" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_resource.test.id}" - http_method = "GET" - authorization = "NONE" - - request_models = { - "application/json" = "Error" - } - - request_parameters = { - "method.request.header.Content-Type" = false, - "method.request.querystring.page" = true - } -} - -resource "aws_api_gateway_integration" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_resource.test.id}" - http_method = "${aws_api_gateway_method.test.http_method}" - type = "MOCK" - - request_templates { - "application/xml" = < 0 { - params.ApiStages = as - } - } - - if v, ok := d.GetOk("quota_settings"); ok { - settings := v.(*schema.Set).List() - q, ok := settings[0].(map[string]interface{}) - - if errors := validateApiGatewayUsagePlanQuotaSettings(q); len(errors) > 0 { - return fmt.Errorf("Error validating the quota settings: %v", errors) - } - - if !ok { - return errors.New("At least one field is expected inside quota_settings") - } - - qs := &apigateway.QuotaSettings{} - - if sv, ok := q["limit"].(int); ok { - qs.Limit = aws.Int64(int64(sv)) - } - - if sv, ok := q["offset"].(int); ok { - qs.Offset = aws.Int64(int64(sv)) - } - - if sv, ok := q["period"].(string); ok && sv != "" { - qs.Period = aws.String(sv) - } - - params.Quota = qs - } - - if v, ok := d.GetOk("throttle_settings"); ok { - settings := v.(*schema.Set).List() - q, ok := settings[0].(map[string]interface{}) - - if !ok { - return errors.New("At least one field is expected inside throttle_settings") - } - - ts := &apigateway.ThrottleSettings{} - - if sv, ok := q["burst_limit"].(int); ok { - ts.BurstLimit = aws.Int64(int64(sv)) - } - - if sv, ok := q["rate_limit"].(float64); ok { - ts.RateLimit = aws.Float64(float64(sv)) - } - - params.Throttle = ts - } - - up, err := conn.CreateUsagePlan(params) - if err != nil { - return fmt.Errorf("Error creating API Gateway Usage Plan: %s", err) - } - - d.SetId(*up.Id) - - // Handle case of adding the product code since not addable when - // creating the Usage Plan initially. - if v, ok := d.GetOk("product_code"); ok { - updateParameters := &apigateway.UpdateUsagePlanInput{ - UsagePlanId: aws.String(d.Id()), - PatchOperations: []*apigateway.PatchOperation{ - { - Op: aws.String("add"), - Path: aws.String("/productCode"), - Value: aws.String(v.(string)), - }, - }, - } - - up, err = conn.UpdateUsagePlan(updateParameters) - if err != nil { - return fmt.Errorf("Error creating the API Gateway Usage Plan product code: %s", err) - } - } - - return resourceAwsApiGatewayUsagePlanRead(d, meta) -} - -func resourceAwsApiGatewayUsagePlanRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).apigateway - log.Printf("[DEBUG] Reading API Gateway Usage Plan: %s", d.Id()) - - up, err := conn.GetUsagePlan(&apigateway.GetUsagePlanInput{ - UsagePlanId: aws.String(d.Id()), - }) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { - d.SetId("") - return nil - } - return err - } - - d.Set("name", up.Name) - d.Set("description", up.Description) - d.Set("product_code", up.ProductCode) - - if up.ApiStages != nil { - if err := d.Set("api_stages", flattenApiGatewayUsageApiStages(up.ApiStages)); err != nil { - return fmt.Errorf("[DEBUG] Error setting api_stages error: %#v", err) - } - } - - if up.Throttle != nil { - if err := d.Set("throttle_settings", flattenApiGatewayUsagePlanThrottling(up.Throttle)); err != nil { - return fmt.Errorf("[DEBUG] Error setting throttle_settings error: %#v", err) - } - } - - if up.Quota != nil { - if err := d.Set("quota_settings", flattenApiGatewayUsagePlanQuota(up.Quota)); err != nil { - return fmt.Errorf("[DEBUG] Error setting quota_settings error: %#v", err) - } - } - - return nil -} - -func resourceAwsApiGatewayUsagePlanUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).apigateway - log.Print("[DEBUG] Updating API Gateway Usage Plan") - - operations := make([]*apigateway.PatchOperation, 0) - - if d.HasChange("name") { - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("replace"), - Path: aws.String("/name"), - Value: aws.String(d.Get("name").(string)), - }) - } - - if d.HasChange("description") { - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("replace"), - Path: aws.String("/description"), - Value: aws.String(d.Get("description").(string)), - }) - } - - if d.HasChange("product_code") { - v, ok := d.GetOk("product_code") - - if ok { - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("replace"), - Path: aws.String("/productCode"), - Value: aws.String(v.(string)), - }) - } else { - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("remove"), - Path: aws.String("/productCode"), - }) - } - } - - if d.HasChange("api_stages") { - o, n := d.GetChange("api_stages") - old := o.([]interface{}) - new := n.([]interface{}) - - // Remove every stages associated. Simpler to remove and add new ones, - // since there are no replacings. - for _, v := range old { - m := v.(map[string]interface{}) - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("remove"), - Path: aws.String("/apiStages"), - Value: aws.String(fmt.Sprintf("%s:%s", m["api_id"].(string), m["stage"].(string))), - }) - } - - // Handle additions - if len(new) > 0 { - for _, v := range new { - m := v.(map[string]interface{}) - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("add"), - Path: aws.String("/apiStages"), - Value: aws.String(fmt.Sprintf("%s:%s", m["api_id"].(string), m["stage"].(string))), - }) - } - } - } - - if d.HasChange("throttle_settings") { - o, n := d.GetChange("throttle_settings") - - os := o.(*schema.Set) - ns := n.(*schema.Set) - diff := ns.Difference(os).List() - - // Handle Removal - if len(diff) == 0 { - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("remove"), - Path: aws.String("/throttle"), - }) - } - - if len(diff) > 0 { - d := diff[0].(map[string]interface{}) - - // Handle Replaces - if o != nil && n != nil { - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("replace"), - Path: aws.String("/throttle/rateLimit"), - Value: aws.String(strconv.Itoa(d["rate_limit"].(int))), - }) - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("replace"), - Path: aws.String("/throttle/burstLimit"), - Value: aws.String(strconv.Itoa(d["burst_limit"].(int))), - }) - } - - // Handle Additions - if o == nil && n != nil { - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("add"), - Path: aws.String("/throttle/rateLimit"), - Value: aws.String(strconv.Itoa(d["rate_limit"].(int))), - }) - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("add"), - Path: aws.String("/throttle/burstLimit"), - Value: aws.String(strconv.Itoa(d["burst_limit"].(int))), - }) - } - } - } - - if d.HasChange("quota_settings") { - o, n := d.GetChange("quota_settings") - - os := o.(*schema.Set) - ns := n.(*schema.Set) - diff := ns.Difference(os).List() - - // Handle Removal - if len(diff) == 0 { - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("remove"), - Path: aws.String("/quota"), - }) - } - - if len(diff) > 0 { - d := diff[0].(map[string]interface{}) - - if errors := validateApiGatewayUsagePlanQuotaSettings(d); len(errors) > 0 { - return fmt.Errorf("Error validating the quota settings: %v", errors) - } - - // Handle Replaces - if o != nil && n != nil { - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("replace"), - Path: aws.String("/quota/limit"), - Value: aws.String(strconv.Itoa(d["limit"].(int))), - }) - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("replace"), - Path: aws.String("/quota/offset"), - Value: aws.String(strconv.Itoa(d["offset"].(int))), - }) - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("replace"), - Path: aws.String("/quota/period"), - Value: aws.String(d["period"].(string)), - }) - } - - // Handle Additions - if o == nil && n != nil { - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("add"), - Path: aws.String("/quota/limit"), - Value: aws.String(strconv.Itoa(d["limit"].(int))), - }) - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("add"), - Path: aws.String("/quota/offset"), - Value: aws.String(strconv.Itoa(d["offset"].(int))), - }) - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("add"), - Path: aws.String("/quota/period"), - Value: aws.String(d["period"].(string)), - }) - } - } - } - - params := &apigateway.UpdateUsagePlanInput{ - UsagePlanId: aws.String(d.Id()), - PatchOperations: operations, - } - - _, err := conn.UpdateUsagePlan(params) - if err != nil { - return fmt.Errorf("Error updating API Gateway Usage Plan: %s", err) - } - - return resourceAwsApiGatewayUsagePlanRead(d, meta) -} - -func resourceAwsApiGatewayUsagePlanDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).apigateway - - // Removing existing api stages associated - if apistages, ok := d.GetOk("api_stages"); ok { - log.Printf("[DEBUG] Deleting API Stages associated with Usage Plan: %s", d.Id()) - stages := apistages.([]interface{}) - operations := []*apigateway.PatchOperation{} - - for _, v := range stages { - sv := v.(map[string]interface{}) - - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("remove"), - Path: aws.String("/apiStages"), - Value: aws.String(fmt.Sprintf("%s:%s", sv["api_id"].(string), sv["stage"].(string))), - }) - } - - _, err := conn.UpdateUsagePlan(&apigateway.UpdateUsagePlanInput{ - UsagePlanId: aws.String(d.Id()), - PatchOperations: operations, - }) - if err != nil { - return fmt.Errorf("Error removing API Stages associated with Usage Plan: %s", err) - } - } - - log.Printf("[DEBUG] Deleting API Gateway Usage Plan: %s", d.Id()) - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteUsagePlan(&apigateway.DeleteUsagePlanInput{ - UsagePlanId: aws.String(d.Id()), - }) - - if err == nil { - return nil - } - - return resource.NonRetryableError(err) - }) -} diff --git a/builtin/providers/aws/resource_aws_api_gateway_usage_plan_key.go b/builtin/providers/aws/resource_aws_api_gateway_usage_plan_key.go deleted file mode 100644 index 2433da48b..000000000 --- a/builtin/providers/aws/resource_aws_api_gateway_usage_plan_key.go +++ /dev/null @@ -1,114 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsApiGatewayUsagePlanKey() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsApiGatewayUsagePlanKeyCreate, - Read: resourceAwsApiGatewayUsagePlanKeyRead, - Delete: resourceAwsApiGatewayUsagePlanKeyDelete, - - Schema: map[string]*schema.Schema{ - "key_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "key_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "usage_plan_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "name": { - Type: schema.TypeString, - Computed: true, - }, - - "value": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceAwsApiGatewayUsagePlanKeyCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).apigateway - log.Print("[DEBUG] Creating API Gateway Usage Plan Key") - - params := &apigateway.CreateUsagePlanKeyInput{ - KeyId: aws.String(d.Get("key_id").(string)), - KeyType: aws.String(d.Get("key_type").(string)), - UsagePlanId: aws.String(d.Get("usage_plan_id").(string)), - } - - up, err := conn.CreateUsagePlanKey(params) - if err != nil { - return fmt.Errorf("Error creating API Gateway Usage Plan Key: %s", err) - } - - d.SetId(*up.Id) - - return resourceAwsApiGatewayUsagePlanKeyRead(d, meta) -} - -func resourceAwsApiGatewayUsagePlanKeyRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).apigateway - log.Printf("[DEBUG] Reading API Gateway Usage Plan Key: %s", d.Id()) - - up, err := conn.GetUsagePlanKey(&apigateway.GetUsagePlanKeyInput{ - UsagePlanId: aws.String(d.Get("usage_plan_id").(string)), - KeyId: aws.String(d.Get("key_id").(string)), - }) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { - d.SetId("") - return nil - } - return err - } - - d.Set("name", up.Name) - d.Set("value", up.Value) - - return nil -} - -func resourceAwsApiGatewayUsagePlanKeyDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).apigateway - - log.Printf("[DEBUG] Deleting API Gateway Usage Plan Key: %s", d.Id()) - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteUsagePlanKey(&apigateway.DeleteUsagePlanKeyInput{ - UsagePlanId: aws.String(d.Get("usage_plan_id").(string)), - KeyId: aws.String(d.Get("key_id").(string)), - }) - if err == nil { - return nil - } - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { - return nil - } - - return resource.NonRetryableError(err) - }) -} diff --git a/builtin/providers/aws/resource_aws_api_gateway_usage_plan_key_test.go b/builtin/providers/aws/resource_aws_api_gateway_usage_plan_key_test.go deleted file mode 100644 index 608a88fd2..000000000 --- a/builtin/providers/aws/resource_aws_api_gateway_usage_plan_key_test.go +++ /dev/null @@ -1,232 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSAPIGatewayUsagePlanKey_basic(t *testing.T) { - var conf apigateway.UsagePlanKey - name := acctest.RandString(10) - updatedName := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayUsagePlanKeyDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSApiGatewayUsagePlanKeyBasicConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanKeyExists("aws_api_gateway_usage_plan_key.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan_key.main", "key_type", "API_KEY"), - resource.TestCheckResourceAttrSet("aws_api_gateway_usage_plan_key.main", "key_id"), - resource.TestCheckResourceAttrSet("aws_api_gateway_usage_plan_key.main", "key_type"), - resource.TestCheckResourceAttrSet("aws_api_gateway_usage_plan_key.main", "usage_plan_id"), - resource.TestCheckResourceAttrSet("aws_api_gateway_usage_plan_key.main", "name"), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan_key.main", "value", ""), - ), - }, - { - Config: testAccAWSApiGatewayUsagePlanKeyBasicUpdatedConfig(updatedName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanKeyExists("aws_api_gateway_usage_plan_key.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan_key.main", "key_type", "API_KEY"), - resource.TestCheckResourceAttrSet("aws_api_gateway_usage_plan_key.main", "key_id"), - resource.TestCheckResourceAttrSet("aws_api_gateway_usage_plan_key.main", "key_type"), - resource.TestCheckResourceAttrSet("aws_api_gateway_usage_plan_key.main", "usage_plan_id"), - resource.TestCheckResourceAttrSet("aws_api_gateway_usage_plan_key.main", "name"), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan_key.main", "value", ""), - ), - }, - { - Config: testAccAWSApiGatewayUsagePlanKeyBasicConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanKeyExists("aws_api_gateway_usage_plan_key.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan_key.main", "key_type", "API_KEY"), - resource.TestCheckResourceAttrSet("aws_api_gateway_usage_plan_key.main", "key_id"), - resource.TestCheckResourceAttrSet("aws_api_gateway_usage_plan_key.main", "key_type"), - resource.TestCheckResourceAttrSet("aws_api_gateway_usage_plan_key.main", "usage_plan_id"), - resource.TestCheckResourceAttrSet("aws_api_gateway_usage_plan_key.main", "name"), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan_key.main", "value", ""), - ), - }, - }, - }) -} - -func testAccCheckAWSAPIGatewayUsagePlanKeyExists(n string, res *apigateway.UsagePlanKey) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No API Gateway Usage Plan Key ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).apigateway - - req := &apigateway.GetUsagePlanKeyInput{ - UsagePlanId: aws.String(rs.Primary.Attributes["usage_plan_id"]), - KeyId: aws.String(rs.Primary.Attributes["key_id"]), - } - up, err := conn.GetUsagePlanKey(req) - if err != nil { - return err - } - - log.Printf("[DEBUG] Reading API Gateway Usage Plan Key: %#v", up) - - if *up.Id != rs.Primary.ID { - return fmt.Errorf("API Gateway Usage Plan Key not found") - } - - *res = *up - - return nil - } -} - -func testAccCheckAWSAPIGatewayUsagePlanKeyDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).apigateway - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_api_gateway_usage_plan_key" { - continue - } - - req := &apigateway.GetUsagePlanKeyInput{ - UsagePlanId: aws.String(rs.Primary.ID), - KeyId: aws.String(rs.Primary.Attributes["key_id"]), - } - describe, err := conn.GetUsagePlanKey(req) - - if err == nil { - if describe.Id != nil && *describe.Id == rs.Primary.ID { - return fmt.Errorf("API Gateway Usage Plan Key still exists") - } - } - - aws2err, ok := err.(awserr.Error) - if !ok { - return err - } - if aws2err.Code() != "NotFoundException" { - return err - } - - return nil - } - - return nil -} - -const testAccAWSAPIGatewayUsagePlanKeyConfig = ` -resource "aws_api_gateway_rest_api" "test" { - name = "test" -} - -resource "aws_api_gateway_resource" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - parent_id = "${aws_api_gateway_rest_api.test.root_resource_id}" - path_part = "test" -} - -resource "aws_api_gateway_method" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_resource.test.id}" - http_method = "GET" - authorization = "NONE" -} - -resource "aws_api_gateway_method_response" "error" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_resource.test.id}" - http_method = "${aws_api_gateway_method.test.http_method}" - status_code = "400" -} - -resource "aws_api_gateway_integration" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_resource.test.id}" - http_method = "${aws_api_gateway_method.test.http_method}" - - type = "HTTP" - uri = "https://www.google.de" - integration_http_method = "GET" -} - -resource "aws_api_gateway_integration_response" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_resource.test.id}" - http_method = "${aws_api_gateway_integration.test.http_method}" - status_code = "${aws_api_gateway_method_response.error.status_code}" -} - -resource "aws_api_gateway_deployment" "test" { - depends_on = ["aws_api_gateway_integration.test"] - - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - stage_name = "test" - description = "This is a test" - - variables = { - "a" = "2" - } -} - -resource "aws_api_gateway_deployment" "foo" { - depends_on = ["aws_api_gateway_integration.test"] - - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - stage_name = "foo" - description = "This is a prod stage" -} - -resource "aws_api_gateway_usage_plan" "main" { - name = "%s" -} - -resource "aws_api_gateway_usage_plan" "secondary" { - name = "secondary-%s" -} - -resource "aws_api_gateway_api_key" "mykey" { - name = "demo-%s" - - stage_key { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - stage_name = "${aws_api_gateway_deployment.foo.stage_name}" - } -} -` - -func testAccAWSApiGatewayUsagePlanKeyBasicConfig(rName string) string { - return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanKeyConfig+` -resource "aws_api_gateway_usage_plan_key" "main" { - key_id = "${aws_api_gateway_api_key.mykey.id}" - key_type = "API_KEY" - usage_plan_id = "${aws_api_gateway_usage_plan.main.id}" -} -`, rName, rName, rName) -} - -func testAccAWSApiGatewayUsagePlanKeyBasicUpdatedConfig(rName string) string { - return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanKeyConfig+` -resource "aws_api_gateway_usage_plan_key" "main" { - key_id = "${aws_api_gateway_api_key.mykey.id}" - key_type = "API_KEY" - usage_plan_id = "${aws_api_gateway_usage_plan.secondary.id}" -} -`, rName, rName, rName) -} diff --git a/builtin/providers/aws/resource_aws_api_gateway_usage_plan_test.go b/builtin/providers/aws/resource_aws_api_gateway_usage_plan_test.go deleted file mode 100644 index 13d7afc2d..000000000 --- a/builtin/providers/aws/resource_aws_api_gateway_usage_plan_test.go +++ /dev/null @@ -1,557 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSAPIGatewayUsagePlan_basic(t *testing.T) { - var conf apigateway.UsagePlan - name := acctest.RandString(10) - updatedName := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayUsagePlanDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", ""), - ), - }, - { - Config: testAccAWSApiGatewayUsagePlanBasicUpdatedConfig(updatedName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", updatedName), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", ""), - ), - }, - { - Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", ""), - ), - }, - }, - }) -} - -func TestAccAWSAPIGatewayUsagePlan_description(t *testing.T) { - var conf apigateway.UsagePlan - name := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayUsagePlanDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", ""), - ), - }, - { - Config: testAccAWSApiGatewayUsagePlanDescriptionConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", "This is a description"), - ), - }, - { - Config: testAccAWSApiGatewayUsagePlanDescriptionUpdatedConfig(name), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", "This is a new description"), - ), - }, - { - Config: testAccAWSApiGatewayUsagePlanDescriptionConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", "This is a description"), - ), - }, - { - Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", ""), - ), - }, - }, - }) -} - -func TestAccAWSAPIGatewayUsagePlan_productCode(t *testing.T) { - var conf apigateway.UsagePlan - name := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayUsagePlanDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "product_code", ""), - ), - }, - { - Config: testAccAWSApiGatewayUsagePlanProductCodeConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "product_code", "MYCODE"), - ), - }, - { - Config: testAccAWSApiGatewayUsagePlanProductCodeUpdatedConfig(name), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "product_code", "MYCODE2"), - ), - }, - { - Config: testAccAWSApiGatewayUsagePlanProductCodeConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "product_code", "MYCODE"), - ), - }, - { - Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "product_code", ""), - ), - }, - }, - }) -} - -func TestAccAWSAPIGatewayUsagePlan_throttling(t *testing.T) { - var conf apigateway.UsagePlan - name := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayUsagePlanDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckNoResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings"), - ), - }, - { - Config: testAccAWSApiGatewayUsagePlanThrottlingConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings.4173790118.burst_limit", "2"), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings.4173790118.rate_limit", "5"), - ), - }, - { - Config: testAccAWSApiGatewayUsagePlanThrottlingModifiedConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings.1779463053.burst_limit", "3"), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings.1779463053.rate_limit", "6"), - ), - }, - { - Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckNoResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings"), - ), - }, - }, - }) -} - -func TestAccAWSAPIGatewayUsagePlan_quota(t *testing.T) { - var conf apigateway.UsagePlan - name := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayUsagePlanDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckNoResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings"), - ), - }, - { - Config: testAccAWSApiGatewayUsagePlanQuotaConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings.1956747625.limit", "100"), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings.1956747625.offset", "6"), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings.1956747625.period", "WEEK"), - ), - }, - { - Config: testAccAWSApiGatewayUsagePlanQuotaModifiedConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings.3909168194.limit", "200"), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings.3909168194.offset", "20"), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings.3909168194.period", "MONTH"), - ), - }, - { - Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckNoResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings"), - ), - }, - }, - }) -} - -func TestAccAWSAPIGatewayUsagePlan_apiStages(t *testing.T) { - var conf apigateway.UsagePlan - name := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayUsagePlanDestroy, - Steps: []resource.TestStep{ - // Create UsagePlan WITH Stages as the API calls are different - // when creating or updating. - { - Config: testAccAWSApiGatewayUsagePlanApiStagesConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "api_stages.0.stage", "test"), - ), - }, - // Handle api stages removal - { - Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckNoResourceAttr("aws_api_gateway_usage_plan.main", "api_stages"), - ), - }, - // Handle api stages additions - { - Config: testAccAWSApiGatewayUsagePlanApiStagesConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "api_stages.0.stage", "test"), - ), - }, - // Handle api stages updates - { - Config: testAccAWSApiGatewayUsagePlanApiStagesModifiedConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "api_stages.0.stage", "foo"), - ), - }, - { - Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckNoResourceAttr("aws_api_gateway_usage_plan.main", "api_stages"), - ), - }, - }, - }) -} - -func testAccCheckAWSAPIGatewayUsagePlanExists(n string, res *apigateway.UsagePlan) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No API Gateway Usage Plan ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).apigateway - - req := &apigateway.GetUsagePlanInput{ - UsagePlanId: aws.String(rs.Primary.ID), - } - up, err := conn.GetUsagePlan(req) - if err != nil { - return err - } - - if *up.Id != rs.Primary.ID { - return fmt.Errorf("APIGateway Usage Plan not found") - } - - *res = *up - - return nil - } -} - -func testAccCheckAWSAPIGatewayUsagePlanDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).apigateway - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_api_gateway_usage_plan" { - continue - } - - req := &apigateway.GetUsagePlanInput{ - UsagePlanId: aws.String(s.RootModule().Resources["aws_api_gateway_rest_api.test"].Primary.ID), - } - describe, err := conn.GetUsagePlan(req) - - if err == nil { - if describe.Id != nil && *describe.Id == rs.Primary.ID { - return fmt.Errorf("API Gateway Usage Plan still exists") - } - } - - aws2err, ok := err.(awserr.Error) - if !ok { - return err - } - if aws2err.Code() != "NotFoundException" { - return err - } - - return nil - } - - return nil -} - -const testAccAWSAPIGatewayUsagePlanConfig = ` -resource "aws_api_gateway_rest_api" "test" { - name = "test" -} - -resource "aws_api_gateway_resource" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - parent_id = "${aws_api_gateway_rest_api.test.root_resource_id}" - path_part = "test" -} - -resource "aws_api_gateway_method" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_resource.test.id}" - http_method = "GET" - authorization = "NONE" -} - -resource "aws_api_gateway_method_response" "error" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_resource.test.id}" - http_method = "${aws_api_gateway_method.test.http_method}" - status_code = "400" -} - -resource "aws_api_gateway_integration" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_resource.test.id}" - http_method = "${aws_api_gateway_method.test.http_method}" - - type = "HTTP" - uri = "https://www.google.de" - integration_http_method = "GET" -} - -resource "aws_api_gateway_integration_response" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_resource.test.id}" - http_method = "${aws_api_gateway_integration.test.http_method}" - status_code = "${aws_api_gateway_method_response.error.status_code}" -} - -resource "aws_api_gateway_deployment" "test" { - depends_on = ["aws_api_gateway_integration.test"] - - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - stage_name = "test" - description = "This is a test" - - variables = { - "a" = "2" - } -} - -resource "aws_api_gateway_deployment" "foo" { - depends_on = ["aws_api_gateway_integration.test"] - - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - stage_name = "foo" - description = "This is a prod stage" -} -` - -func testAccAWSApiGatewayUsagePlanBasicConfig(rName string) string { - return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { - name = "%s" -} -`, rName) -} - -func testAccAWSApiGatewayUsagePlanDescriptionConfig(rName string) string { - return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { - name = "%s" - description = "This is a description" -} -`, rName) -} - -func testAccAWSApiGatewayUsagePlanDescriptionUpdatedConfig(rName string) string { - return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { - name = "%s" - description = "This is a new description" -} -`, rName) -} - -func testAccAWSApiGatewayUsagePlanProductCodeConfig(rName string) string { - return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { - name = "%s" - product_code = "MYCODE" -} -`, rName) -} - -func testAccAWSApiGatewayUsagePlanProductCodeUpdatedConfig(rName string) string { - return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { - name = "%s" - product_code = "MYCODE2" -} -`, rName) -} - -func testAccAWSApiGatewayUsagePlanBasicUpdatedConfig(rName string) string { - return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { - name = "%s" -} -`, rName) -} - -func testAccAWSApiGatewayUsagePlanThrottlingConfig(rName string) string { - return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { - name = "%s" - - throttle_settings { - burst_limit = 2 - rate_limit = 5 - } -} -`, rName) -} - -func testAccAWSApiGatewayUsagePlanThrottlingModifiedConfig(rName string) string { - return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { - name = "%s" - - throttle_settings { - burst_limit = 3 - rate_limit = 6 - } -} -`, rName) -} - -func testAccAWSApiGatewayUsagePlanQuotaConfig(rName string) string { - return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { - name = "%s" - - quota_settings { - limit = 100 - offset = 6 - period = "WEEK" - } -} -`, rName) -} - -func testAccAWSApiGatewayUsagePlanQuotaModifiedConfig(rName string) string { - return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { - name = "%s" - - quota_settings { - limit = 200 - offset = 20 - period = "MONTH" - } -} -`, rName) -} - -func testAccAWSApiGatewayUsagePlanApiStagesConfig(rName string) string { - return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { - name = "%s" - - api_stages { - api_id = "${aws_api_gateway_rest_api.test.id}" - stage = "${aws_api_gateway_deployment.test.stage_name}" - } -} -`, rName) -} - -func testAccAWSApiGatewayUsagePlanApiStagesModifiedConfig(rName string) string { - return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { - name = "%s" - - api_stages { - api_id = "${aws_api_gateway_rest_api.test.id}" - stage = "${aws_api_gateway_deployment.foo.stage_name}" - } -} -`, rName) -} diff --git a/builtin/providers/aws/resource_aws_app_cookie_stickiness_policy.go b/builtin/providers/aws/resource_aws_app_cookie_stickiness_policy.go deleted file mode 100644 index ecdc8eff4..000000000 --- a/builtin/providers/aws/resource_aws_app_cookie_stickiness_policy.go +++ /dev/null @@ -1,215 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "regexp" - "strconv" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsAppCookieStickinessPolicy() *schema.Resource { - return &schema.Resource{ - // There is no concept of "updating" an App Stickiness policy in - // the AWS API. - Create: resourceAwsAppCookieStickinessPolicyCreate, - Read: resourceAwsAppCookieStickinessPolicyRead, - Delete: resourceAwsAppCookieStickinessPolicyDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - value := v.(string) - if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) { - es = append(es, fmt.Errorf( - "only alphanumeric characters and hyphens allowed in %q", k)) - } - return - }, - }, - - "load_balancer": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "lb_port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - - "cookie_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceAwsAppCookieStickinessPolicyCreate(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - - // Provision the AppStickinessPolicy - acspOpts := &elb.CreateAppCookieStickinessPolicyInput{ - CookieName: aws.String(d.Get("cookie_name").(string)), - LoadBalancerName: aws.String(d.Get("load_balancer").(string)), - PolicyName: aws.String(d.Get("name").(string)), - } - - if _, err := elbconn.CreateAppCookieStickinessPolicy(acspOpts); err != nil { - return fmt.Errorf("Error creating AppCookieStickinessPolicy: %s", err) - } - - setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ - LoadBalancerName: aws.String(d.Get("load_balancer").(string)), - LoadBalancerPort: aws.Int64(int64(d.Get("lb_port").(int))), - PolicyNames: []*string{aws.String(d.Get("name").(string))}, - } - - if _, err := elbconn.SetLoadBalancerPoliciesOfListener(setLoadBalancerOpts); err != nil { - return fmt.Errorf("Error setting AppCookieStickinessPolicy: %s", err) - } - - d.SetId(fmt.Sprintf("%s:%d:%s", - *acspOpts.LoadBalancerName, - *setLoadBalancerOpts.LoadBalancerPort, - *acspOpts.PolicyName)) - return nil -} - -func resourceAwsAppCookieStickinessPolicyRead(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - - lbName, lbPort, policyName := resourceAwsAppCookieStickinessPolicyParseId(d.Id()) - - request := &elb.DescribeLoadBalancerPoliciesInput{ - LoadBalancerName: aws.String(lbName), - PolicyNames: []*string{aws.String(policyName)}, - } - - getResp, err := elbconn.DescribeLoadBalancerPolicies(request) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok { - if ec2err.Code() == "PolicyNotFound" || ec2err.Code() == "LoadBalancerNotFound" { - d.SetId("") - } - return nil - } - return fmt.Errorf("Error retrieving policy: %s", err) - } - if len(getResp.PolicyDescriptions) != 1 { - return fmt.Errorf("Unable to find policy %#v", getResp.PolicyDescriptions) - } - - // we know the policy exists now, but we have to check if it's assigned to a listener - assigned, err := resourceAwsELBSticknessPolicyAssigned(policyName, lbName, lbPort, elbconn) - if err != nil { - return err - } - if !assigned { - // policy exists, but isn't assigned to a listener - log.Printf("[DEBUG] policy '%s' exists, but isn't assigned to a listener", policyName) - d.SetId("") - return nil - } - - // We can get away with this because there's only one attribute, the - // cookie expiration, in these descriptions. - policyDesc := getResp.PolicyDescriptions[0] - cookieAttr := policyDesc.PolicyAttributeDescriptions[0] - if *cookieAttr.AttributeName != "CookieName" { - return fmt.Errorf("Unable to find cookie Name.") - } - d.Set("cookie_name", cookieAttr.AttributeValue) - - d.Set("name", policyName) - d.Set("load_balancer", lbName) - d.Set("lb_port", lbPort) - - return nil -} - -// Determine if a particular policy is assigned to an ELB listener -func resourceAwsELBSticknessPolicyAssigned(policyName, lbName, lbPort string, elbconn *elb.ELB) (bool, error) { - describeElbOpts := &elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{aws.String(lbName)}, - } - describeResp, err := elbconn.DescribeLoadBalancers(describeElbOpts) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok { - if ec2err.Code() == "LoadBalancerNotFound" { - return false, nil - } - } - return false, fmt.Errorf("Error retrieving ELB description: %s", err) - } - - if len(describeResp.LoadBalancerDescriptions) != 1 { - return false, fmt.Errorf("Unable to find ELB: %#v", describeResp.LoadBalancerDescriptions) - } - - lb := describeResp.LoadBalancerDescriptions[0] - assigned := false - for _, listener := range lb.ListenerDescriptions { - if lbPort != strconv.Itoa(int(*listener.Listener.LoadBalancerPort)) { - continue - } - - for _, name := range listener.PolicyNames { - if policyName == *name { - assigned = true - break - } - } - } - - return assigned, nil -} - -func resourceAwsAppCookieStickinessPolicyDelete(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - - lbName, _, policyName := resourceAwsAppCookieStickinessPolicyParseId(d.Id()) - - // Perversely, if we Set an empty list of PolicyNames, we detach the - // policies attached to a listener, which is required to delete the - // policy itself. - setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ - LoadBalancerName: aws.String(d.Get("load_balancer").(string)), - LoadBalancerPort: aws.Int64(int64(d.Get("lb_port").(int))), - PolicyNames: []*string{}, - } - - if _, err := elbconn.SetLoadBalancerPoliciesOfListener(setLoadBalancerOpts); err != nil { - return fmt.Errorf("Error removing AppCookieStickinessPolicy: %s", err) - } - - request := &elb.DeleteLoadBalancerPolicyInput{ - LoadBalancerName: aws.String(lbName), - PolicyName: aws.String(policyName), - } - - if _, err := elbconn.DeleteLoadBalancerPolicy(request); err != nil { - return fmt.Errorf("Error deleting App stickiness policy %s: %s", d.Id(), err) - } - return nil -} - -// resourceAwsAppCookieStickinessPolicyParseId takes an ID and parses it into -// it's constituent parts. You need three axes (LB name, policy name, and LB -// port) to create or identify a stickiness policy in AWS's API. -func resourceAwsAppCookieStickinessPolicyParseId(id string) (string, string, string) { - parts := strings.SplitN(id, ":", 3) - return parts[0], parts[1], parts[2] -} diff --git a/builtin/providers/aws/resource_aws_app_cookie_stickiness_policy_test.go b/builtin/providers/aws/resource_aws_app_cookie_stickiness_policy_test.go deleted file mode 100644 index ed0d25a46..000000000 --- a/builtin/providers/aws/resource_aws_app_cookie_stickiness_policy_test.go +++ /dev/null @@ -1,246 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elb" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSAppCookieStickinessPolicy_basic(t *testing.T) { - lbName := fmt.Sprintf("tf-test-lb-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAppCookieStickinessPolicyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAppCookieStickinessPolicyConfig(lbName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAppCookieStickinessPolicy( - "aws_elb.lb", - "aws_app_cookie_stickiness_policy.foo", - ), - ), - }, - resource.TestStep{ - Config: testAccAppCookieStickinessPolicyConfigUpdate(lbName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAppCookieStickinessPolicy( - "aws_elb.lb", - "aws_app_cookie_stickiness_policy.foo", - ), - ), - }, - }, - }) -} - -func TestAccAWSAppCookieStickinessPolicy_missingLB(t *testing.T) { - lbName := fmt.Sprintf("tf-test-lb-%s", acctest.RandString(5)) - - // check that we can destroy the policy if the LB is missing - removeLB := func() { - conn := testAccProvider.Meta().(*AWSClient).elbconn - deleteElbOpts := elb.DeleteLoadBalancerInput{ - LoadBalancerName: aws.String(lbName), - } - if _, err := conn.DeleteLoadBalancer(&deleteElbOpts); err != nil { - t.Fatalf("Error deleting ELB: %s", err) - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAppCookieStickinessPolicyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAppCookieStickinessPolicyConfig(lbName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAppCookieStickinessPolicy( - "aws_elb.lb", - "aws_app_cookie_stickiness_policy.foo", - ), - ), - }, - resource.TestStep{ - PreConfig: removeLB, - Config: testAccAppCookieStickinessPolicyConfigDestroy(lbName), - }, - }, - }) -} - -func testAccCheckAppCookieStickinessPolicyDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elbconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_app_cookie_stickiness_policy" { - continue - } - - lbName, _, policyName := resourceAwsAppCookieStickinessPolicyParseId( - rs.Primary.ID) - out, err := conn.DescribeLoadBalancerPolicies( - &elb.DescribeLoadBalancerPoliciesInput{ - LoadBalancerName: aws.String(lbName), - PolicyNames: []*string{aws.String(policyName)}, - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && (ec2err.Code() == "PolicyNotFound" || ec2err.Code() == "LoadBalancerNotFound") { - continue - } - return err - } - - if len(out.PolicyDescriptions) > 0 { - return fmt.Errorf("Policy still exists") - } - } - return nil -} - -func testAccCheckAppCookieStickinessPolicy(elbResource string, policyResource string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[elbResource] - if !ok { - return fmt.Errorf("Not found: %s", elbResource) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - policy, ok := s.RootModule().Resources[policyResource] - if !ok { - return fmt.Errorf("Not found: %s", policyResource) - } - - elbconn := testAccProvider.Meta().(*AWSClient).elbconn - elbName, _, policyName := resourceAwsAppCookieStickinessPolicyParseId(policy.Primary.ID) - _, err := elbconn.DescribeLoadBalancerPolicies(&elb.DescribeLoadBalancerPoliciesInput{ - LoadBalancerName: aws.String(elbName), - PolicyNames: []*string{aws.String(policyName)}, - }) - - if err != nil { - return err - } - - return nil - } -} - -// ensure the policy is re-added is it goes missing -func TestAccAWSAppCookieStickinessPolicy_drift(t *testing.T) { - lbName := fmt.Sprintf("tf-test-lb-%s", acctest.RandString(5)) - - // We only want to remove the reference to the policy from the listner, - // beacause that's all that can be done via the console. - removePolicy := func() { - conn := testAccProvider.Meta().(*AWSClient).elbconn - - setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ - LoadBalancerName: aws.String(lbName), - LoadBalancerPort: aws.Int64(80), - PolicyNames: []*string{}, - } - - if _, err := conn.SetLoadBalancerPoliciesOfListener(setLoadBalancerOpts); err != nil { - t.Fatalf("Error removing AppCookieStickinessPolicy: %s", err) - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAppCookieStickinessPolicyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAppCookieStickinessPolicyConfig(lbName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAppCookieStickinessPolicy( - "aws_elb.lb", - "aws_app_cookie_stickiness_policy.foo", - ), - ), - }, - resource.TestStep{ - PreConfig: removePolicy, - Config: testAccAppCookieStickinessPolicyConfig(lbName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAppCookieStickinessPolicy( - "aws_elb.lb", - "aws_app_cookie_stickiness_policy.foo", - ), - ), - }, - }, - }) -} - -func testAccAppCookieStickinessPolicyConfig(rName string) string { - return fmt.Sprintf(` -resource "aws_elb" "lb" { - name = "%s" - availability_zones = ["us-west-2a"] - listener { - instance_port = 8000 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } -} - -resource "aws_app_cookie_stickiness_policy" "foo" { - name = "foo-policy" - load_balancer = "${aws_elb.lb.id}" - lb_port = 80 - cookie_name = "MyAppCookie" -}`, rName) -} - -// Change the cookie_name to "MyOtherAppCookie". -func testAccAppCookieStickinessPolicyConfigUpdate(rName string) string { - return fmt.Sprintf(` -resource "aws_elb" "lb" { - name = "%s" - availability_zones = ["us-west-2a"] - listener { - instance_port = 8000 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } -} - -resource "aws_app_cookie_stickiness_policy" "foo" { - name = "foo-policy" - load_balancer = "${aws_elb.lb.id}" - lb_port = 80 - cookie_name = "MyOtherAppCookie" -}`, rName) -} - -// attempt to destroy the policy, but we'll delete the LB in the PreConfig -func testAccAppCookieStickinessPolicyConfigDestroy(rName string) string { - return fmt.Sprintf(` -resource "aws_elb" "lb" { - name = "%s" - availability_zones = ["us-west-2a"] - listener { - instance_port = 8000 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } -}`, rName) -} diff --git a/builtin/providers/aws/resource_aws_appautoscaling_policy.go b/builtin/providers/aws/resource_aws_appautoscaling_policy.go deleted file mode 100644 index e75e76152..000000000 --- a/builtin/providers/aws/resource_aws_appautoscaling_policy.go +++ /dev/null @@ -1,327 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "log" - "strconv" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/applicationautoscaling" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsAppautoscalingPolicy() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsAppautoscalingPolicyCreate, - Read: resourceAwsAppautoscalingPolicyRead, - Update: resourceAwsAppautoscalingPolicyUpdate, - Delete: resourceAwsAppautoscalingPolicyDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - // https://github.com/boto/botocore/blob/9f322b1/botocore/data/autoscaling/2011-01-01/service-2.json#L1862-L1873 - value := v.(string) - if len(value) > 255 { - errors = append(errors, fmt.Errorf("%s cannot be longer than 255 characters", k)) - } - return - }, - }, - "arn": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "policy_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "StepScaling", - }, - "resource_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "scalable_dimension": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateAppautoscalingScalableDimension, - }, - "service_namespace": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateAppautoscalingServiceNamespace, - }, - "adjustment_type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "cooldown": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - "metric_aggregation_type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "min_adjustment_magnitude": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - "alarms": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "step_adjustment": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "metric_interval_lower_bound": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "metric_interval_upper_bound": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "scaling_adjustment": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - }, - }, - Set: resourceAwsAppautoscalingAdjustmentHash, - }, - }, - } -} - -func resourceAwsAppautoscalingPolicyCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).appautoscalingconn - - params, err := getAwsAppautoscalingPutScalingPolicyInput(d) - if err != nil { - return err - } - - log.Printf("[DEBUG] ApplicationAutoScaling PutScalingPolicy: %#v", params) - resp, err := conn.PutScalingPolicy(¶ms) - if err != nil { - return fmt.Errorf("Error putting scaling policy: %s", err) - } - - d.Set("arn", resp.PolicyARN) - d.SetId(d.Get("name").(string)) - log.Printf("[INFO] ApplicationAutoScaling scaling PolicyARN: %s", d.Get("arn").(string)) - - return resourceAwsAppautoscalingPolicyRead(d, meta) -} - -func resourceAwsAppautoscalingPolicyRead(d *schema.ResourceData, meta interface{}) error { - p, err := getAwsAppautoscalingPolicy(d, meta) - if err != nil { - return err - } - if p == nil { - d.SetId("") - return nil - } - - log.Printf("[DEBUG] Read ApplicationAutoScaling policy: %s, SP: %s, Obj: %s", d.Get("name"), d.Get("name"), p) - - d.Set("arn", p.PolicyARN) - d.Set("name", p.PolicyName) - d.Set("policy_type", p.PolicyType) - d.Set("resource_id", p.ResourceId) - d.Set("scalable_dimension", p.ScalableDimension) - d.Set("service_namespace", p.ServiceNamespace) - d.Set("alarms", p.Alarms) - d.Set("step_scaling_policy_configuration", p.StepScalingPolicyConfiguration) - - return nil -} - -func resourceAwsAppautoscalingPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).appautoscalingconn - - params, inputErr := getAwsAppautoscalingPutScalingPolicyInput(d) - if inputErr != nil { - return inputErr - } - - log.Printf("[DEBUG] Application Autoscaling Update Scaling Policy: %#v", params) - _, err := conn.PutScalingPolicy(¶ms) - if err != nil { - return err - } - - return resourceAwsAppautoscalingPolicyRead(d, meta) -} - -func resourceAwsAppautoscalingPolicyDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).appautoscalingconn - p, err := getAwsAppautoscalingPolicy(d, meta) - if err != nil { - return fmt.Errorf("Error getting policy: %s", err) - } - if p == nil { - return nil - } - - params := applicationautoscaling.DeleteScalingPolicyInput{ - PolicyName: aws.String(d.Get("name").(string)), - ResourceId: aws.String(d.Get("resource_id").(string)), - ScalableDimension: aws.String(d.Get("scalable_dimension").(string)), - ServiceNamespace: aws.String(d.Get("service_namespace").(string)), - } - log.Printf("[DEBUG] Deleting Application AutoScaling Policy opts: %#v", params) - if _, err := conn.DeleteScalingPolicy(¶ms); err != nil { - return fmt.Errorf("Application AutoScaling Policy: %s", err) - } - - d.SetId("") - return nil -} - -// Takes the result of flatmap.Expand for an array of step adjustments and -// returns a []*applicationautoscaling.StepAdjustment. -func expandAppautoscalingStepAdjustments(configured []interface{}) ([]*applicationautoscaling.StepAdjustment, error) { - var adjustments []*applicationautoscaling.StepAdjustment - - // Loop over our configured step adjustments and create an array - // of aws-sdk-go compatible objects. We're forced to convert strings - // to floats here because there's no way to detect whether or not - // an uninitialized, optional schema element is "0.0" deliberately. - // With strings, we can test for "", which is definitely an empty - // struct value. - for _, raw := range configured { - data := raw.(map[string]interface{}) - a := &applicationautoscaling.StepAdjustment{ - ScalingAdjustment: aws.Int64(int64(data["scaling_adjustment"].(int))), - } - if data["metric_interval_lower_bound"] != "" { - bound := data["metric_interval_lower_bound"] - switch bound := bound.(type) { - case string: - f, err := strconv.ParseFloat(bound, 64) - if err != nil { - return nil, fmt.Errorf( - "metric_interval_lower_bound must be a float value represented as a string") - } - a.MetricIntervalLowerBound = aws.Float64(f) - default: - return nil, fmt.Errorf( - "metric_interval_lower_bound isn't a string. This is a bug. Please file an issue.") - } - } - if data["metric_interval_upper_bound"] != "" { - bound := data["metric_interval_upper_bound"] - switch bound := bound.(type) { - case string: - f, err := strconv.ParseFloat(bound, 64) - if err != nil { - return nil, fmt.Errorf( - "metric_interval_upper_bound must be a float value represented as a string") - } - a.MetricIntervalUpperBound = aws.Float64(f) - default: - return nil, fmt.Errorf( - "metric_interval_upper_bound isn't a string. This is a bug. Please file an issue.") - } - } - adjustments = append(adjustments, a) - } - - return adjustments, nil -} - -func getAwsAppautoscalingPutScalingPolicyInput(d *schema.ResourceData) (applicationautoscaling.PutScalingPolicyInput, error) { - var params = applicationautoscaling.PutScalingPolicyInput{ - PolicyName: aws.String(d.Get("name").(string)), - ResourceId: aws.String(d.Get("resource_id").(string)), - } - - if v, ok := d.GetOk("policy_type"); ok { - params.PolicyType = aws.String(v.(string)) - } - - if v, ok := d.GetOk("service_namespace"); ok { - params.ServiceNamespace = aws.String(v.(string)) - } - - if v, ok := d.GetOk("scalable_dimension"); ok { - params.ScalableDimension = aws.String(v.(string)) - } - - var adjustmentSteps []*applicationautoscaling.StepAdjustment - if v, ok := d.GetOk("step_adjustment"); ok { - steps, err := expandAppautoscalingStepAdjustments(v.(*schema.Set).List()) - if err != nil { - return params, fmt.Errorf("metric_interval_lower_bound and metric_interval_upper_bound must be strings!") - } - adjustmentSteps = steps - } - - // build StepScalingPolicyConfiguration - params.StepScalingPolicyConfiguration = &applicationautoscaling.StepScalingPolicyConfiguration{ - AdjustmentType: aws.String(d.Get("adjustment_type").(string)), - Cooldown: aws.Int64(int64(d.Get("cooldown").(int))), - MetricAggregationType: aws.String(d.Get("metric_aggregation_type").(string)), - StepAdjustments: adjustmentSteps, - } - - if v, ok := d.GetOk("min_adjustment_magnitude"); ok { - params.StepScalingPolicyConfiguration.MinAdjustmentMagnitude = aws.Int64(int64(v.(int))) - } - - return params, nil -} - -func getAwsAppautoscalingPolicy(d *schema.ResourceData, meta interface{}) (*applicationautoscaling.ScalingPolicy, error) { - conn := meta.(*AWSClient).appautoscalingconn - - params := applicationautoscaling.DescribeScalingPoliciesInput{ - PolicyNames: []*string{aws.String(d.Get("name").(string))}, - ServiceNamespace: aws.String(d.Get("service_namespace").(string)), - } - - log.Printf("[DEBUG] Application AutoScaling Policy Describe Params: %#v", params) - resp, err := conn.DescribeScalingPolicies(¶ms) - if err != nil { - return nil, fmt.Errorf("Error retrieving scaling policies: %s", err) - } - - // find scaling policy - name := d.Get("name") - for idx, sp := range resp.ScalingPolicies { - if *sp.PolicyName == name { - return resp.ScalingPolicies[idx], nil - } - } - - // policy not found - return nil, nil -} - -func resourceAwsAppautoscalingAdjustmentHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - if v, ok := m["metric_interval_lower_bound"]; ok { - buf.WriteString(fmt.Sprintf("%f-", v)) - } - if v, ok := m["metric_interval_upper_bound"]; ok { - buf.WriteString(fmt.Sprintf("%f-", v)) - } - buf.WriteString(fmt.Sprintf("%d-", m["scaling_adjustment"].(int))) - - return hashcode.String(buf.String()) -} diff --git a/builtin/providers/aws/resource_aws_appautoscaling_policy_test.go b/builtin/providers/aws/resource_aws_appautoscaling_policy_test.go deleted file mode 100644 index 36abf932c..000000000 --- a/builtin/providers/aws/resource_aws_appautoscaling_policy_test.go +++ /dev/null @@ -1,292 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/applicationautoscaling" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSAppautoScalingPolicy_basic(t *testing.T) { - var policy applicationautoscaling.ScalingPolicy - - randClusterName := fmt.Sprintf("cluster%s", acctest.RandString(10)) - randPolicyName := fmt.Sprintf("terraform-test-foobar-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAppautoscalingPolicyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSAppautoscalingPolicyConfig(randClusterName, randPolicyName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAppautoscalingPolicyExists("aws_appautoscaling_policy.foobar_simple", &policy), - resource.TestCheckResourceAttr("aws_appautoscaling_policy.foobar_simple", "adjustment_type", "ChangeInCapacity"), - resource.TestCheckResourceAttr("aws_appautoscaling_policy.foobar_simple", "policy_type", "StepScaling"), - resource.TestCheckResourceAttr("aws_appautoscaling_policy.foobar_simple", "cooldown", "60"), - resource.TestCheckResourceAttr("aws_appautoscaling_policy.foobar_simple", "name", randPolicyName), - resource.TestCheckResourceAttr("aws_appautoscaling_policy.foobar_simple", "resource_id", fmt.Sprintf("service/%s/foobar", randClusterName)), - resource.TestCheckResourceAttr("aws_appautoscaling_policy.foobar_simple", "service_namespace", "ecs"), - resource.TestCheckResourceAttr("aws_appautoscaling_policy.foobar_simple", "scalable_dimension", "ecs:service:DesiredCount"), - ), - }, - }, - }) -} - -func TestAccAWSAppautoScalingPolicy_spotFleetRequest(t *testing.T) { - var policy applicationautoscaling.ScalingPolicy - - randPolicyName := fmt.Sprintf("test-appautoscaling-policy-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAppautoscalingPolicyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSAppautoscalingPolicySpotFleetRequestConfig(randPolicyName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAppautoscalingPolicyExists("aws_appautoscaling_policy.test", &policy), - resource.TestCheckResourceAttr("aws_appautoscaling_policy.test", "name", randPolicyName), - resource.TestCheckResourceAttr("aws_appautoscaling_policy.test", "service_namespace", "ec2"), - resource.TestCheckResourceAttr("aws_appautoscaling_policy.test", "scalable_dimension", "ec2:spot-fleet-request:TargetCapacity"), - ), - }, - }, - }) -} - -func testAccCheckAWSAppautoscalingPolicyExists(n string, policy *applicationautoscaling.ScalingPolicy) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - conn := testAccProvider.Meta().(*AWSClient).appautoscalingconn - params := &applicationautoscaling.DescribeScalingPoliciesInput{ - ServiceNamespace: aws.String(rs.Primary.Attributes["service_namespace"]), - PolicyNames: []*string{aws.String(rs.Primary.ID)}, - } - resp, err := conn.DescribeScalingPolicies(params) - if err != nil { - return err - } - if len(resp.ScalingPolicies) == 0 { - return fmt.Errorf("ScalingPolicy %s not found", rs.Primary.ID) - } - - return nil - } -} - -func testAccCheckAWSAppautoscalingPolicyDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).appautoscalingconn - - for _, rs := range s.RootModule().Resources { - params := applicationautoscaling.DescribeScalingPoliciesInput{ - ServiceNamespace: aws.String(rs.Primary.Attributes["service_namespace"]), - PolicyNames: []*string{aws.String(rs.Primary.ID)}, - } - - resp, err := conn.DescribeScalingPolicies(¶ms) - - if err == nil { - if len(resp.ScalingPolicies) != 0 && - *resp.ScalingPolicies[0].PolicyName == rs.Primary.ID { - return fmt.Errorf("Application autoscaling policy still exists: %s", rs.Primary.ID) - } - } - } - - return nil -} - -func testAccAWSAppautoscalingPolicyConfig( - randClusterName string, - randPolicyName string) string { - return fmt.Sprintf(` -resource "aws_iam_role" "autoscale_role" { - name = "%s" - path = "/" - - assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"*\"},\"Action\":[\"sts:AssumeRole\"]}]}" -} - -resource "aws_iam_role_policy" "autoscale_role_policy" { - name = "%s" - role = "${aws_iam_role.autoscale_role.id}" - - policy = < 255 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 255 characters", k)) - } - return - }, - }, - "name_prefix": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 229 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 229 characters, name is limited to 255", k)) - } - return - }, - }, - - "launch_configuration": { - Type: schema.TypeString, - Required: true, - }, - - "desired_capacity": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "min_elb_capacity": { - Type: schema.TypeInt, - Optional: true, - }, - - "min_size": { - Type: schema.TypeInt, - Required: true, - }, - - "max_size": { - Type: schema.TypeInt, - Required: true, - }, - - "default_cooldown": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "force_delete": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "health_check_grace_period": { - Type: schema.TypeInt, - Optional: true, - Default: 300, - }, - - "health_check_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "availability_zones": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "placement_group": { - Type: schema.TypeString, - Optional: true, - }, - - "load_balancers": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "vpc_zone_identifier": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "termination_policies": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "wait_for_capacity_timeout": { - Type: schema.TypeString, - Optional: true, - Default: "10m", - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - duration, err := time.ParseDuration(value) - if err != nil { - errors = append(errors, fmt.Errorf( - "%q cannot be parsed as a duration: %s", k, err)) - } - if duration < 0 { - errors = append(errors, fmt.Errorf( - "%q must be greater than zero", k)) - } - return - }, - }, - - "wait_for_elb_capacity": { - Type: schema.TypeInt, - Optional: true, - }, - - "enabled_metrics": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "suspended_processes": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "metrics_granularity": { - Type: schema.TypeString, - Optional: true, - Default: "1Minute", - }, - - "protect_from_scale_in": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "target_group_arns": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "arn": { - Type: schema.TypeString, - Computed: true, - }, - - "initial_lifecycle_hook": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "default_result": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "heartbeat_timeout": { - Type: schema.TypeInt, - Optional: true, - }, - "lifecycle_transition": { - Type: schema.TypeString, - Required: true, - }, - "notification_metadata": { - Type: schema.TypeString, - Optional: true, - }, - "notification_target_arn": { - Type: schema.TypeString, - Optional: true, - }, - "role_arn": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - - "tag": autoscalingTagSchema(), - - "tags": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeMap}, - ConflictsWith: []string{"tag"}, - }, - }, - } -} - -func generatePutLifecycleHookInputs(asgName string, cfgs []interface{}) []autoscaling.PutLifecycleHookInput { - res := make([]autoscaling.PutLifecycleHookInput, 0, len(cfgs)) - - for _, raw := range cfgs { - cfg := raw.(map[string]interface{}) - - input := autoscaling.PutLifecycleHookInput{ - AutoScalingGroupName: &asgName, - LifecycleHookName: aws.String(cfg["name"].(string)), - } - - if v, ok := cfg["default_result"]; ok && v.(string) != "" { - input.DefaultResult = aws.String(v.(string)) - } - - if v, ok := cfg["heartbeat_timeout"]; ok && v.(int) > 0 { - input.HeartbeatTimeout = aws.Int64(int64(v.(int))) - } - - if v, ok := cfg["lifecycle_transition"]; ok && v.(string) != "" { - input.LifecycleTransition = aws.String(v.(string)) - } - - if v, ok := cfg["notification_metadata"]; ok && v.(string) != "" { - input.NotificationMetadata = aws.String(v.(string)) - } - - if v, ok := cfg["notification_target_arn"]; ok && v.(string) != "" { - input.NotificationTargetARN = aws.String(v.(string)) - } - - if v, ok := cfg["role_arn"]; ok && v.(string) != "" { - input.RoleARN = aws.String(v.(string)) - } - - res = append(res, input) - } - - return res -} - -func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).autoscalingconn - - var asgName string - if v, ok := d.GetOk("name"); ok { - asgName = v.(string) - } else { - if v, ok := d.GetOk("name_prefix"); ok { - asgName = resource.PrefixedUniqueId(v.(string)) - } else { - asgName = resource.PrefixedUniqueId("tf-asg-") - } - d.Set("name", asgName) - } - - createOpts := autoscaling.CreateAutoScalingGroupInput{ - AutoScalingGroupName: aws.String(asgName), - LaunchConfigurationName: aws.String(d.Get("launch_configuration").(string)), - NewInstancesProtectedFromScaleIn: aws.Bool(d.Get("protect_from_scale_in").(bool)), - } - updateOpts := autoscaling.UpdateAutoScalingGroupInput{ - AutoScalingGroupName: aws.String(asgName), - } - - initialLifecycleHooks := d.Get("initial_lifecycle_hook").(*schema.Set).List() - twoPhases := len(initialLifecycleHooks) > 0 - - minSize := aws.Int64(int64(d.Get("min_size").(int))) - maxSize := aws.Int64(int64(d.Get("max_size").(int))) - - if twoPhases { - createOpts.MinSize = aws.Int64(int64(0)) - createOpts.MaxSize = aws.Int64(int64(0)) - - updateOpts.MinSize = minSize - updateOpts.MaxSize = maxSize - - if v, ok := d.GetOk("desired_capacity"); ok { - updateOpts.DesiredCapacity = aws.Int64(int64(v.(int))) - } - } else { - createOpts.MinSize = minSize - createOpts.MaxSize = maxSize - - if v, ok := d.GetOk("desired_capacity"); ok { - createOpts.DesiredCapacity = aws.Int64(int64(v.(int))) - } - } - - // Availability Zones are optional if VPC Zone Identifer(s) are specified - if v, ok := d.GetOk("availability_zones"); ok && v.(*schema.Set).Len() > 0 { - createOpts.AvailabilityZones = expandStringList(v.(*schema.Set).List()) - } - - resourceID := d.Get("name").(string) - if v, ok := d.GetOk("tag"); ok { - var err error - createOpts.Tags, err = autoscalingTagsFromMap( - setToMapByKey(v.(*schema.Set), "key"), resourceID) - if err != nil { - return err - } - } - - if v, ok := d.GetOk("tags"); ok { - tags, err := autoscalingTagsFromList(v.([]interface{}), resourceID) - if err != nil { - return err - } - - createOpts.Tags = append(createOpts.Tags, tags...) - } - - if v, ok := d.GetOk("default_cooldown"); ok { - createOpts.DefaultCooldown = aws.Int64(int64(v.(int))) - } - - if v, ok := d.GetOk("health_check_type"); ok && v.(string) != "" { - createOpts.HealthCheckType = aws.String(v.(string)) - } - - if v, ok := d.GetOk("health_check_grace_period"); ok { - createOpts.HealthCheckGracePeriod = aws.Int64(int64(v.(int))) - } - - if v, ok := d.GetOk("placement_group"); ok { - createOpts.PlacementGroup = aws.String(v.(string)) - } - - if v, ok := d.GetOk("load_balancers"); ok && v.(*schema.Set).Len() > 0 { - createOpts.LoadBalancerNames = expandStringList( - v.(*schema.Set).List()) - } - - if v, ok := d.GetOk("vpc_zone_identifier"); ok && v.(*schema.Set).Len() > 0 { - createOpts.VPCZoneIdentifier = expandVpcZoneIdentifiers(v.(*schema.Set).List()) - } - - if v, ok := d.GetOk("termination_policies"); ok && len(v.([]interface{})) > 0 { - createOpts.TerminationPolicies = expandStringList(v.([]interface{})) - } - - if v, ok := d.GetOk("target_group_arns"); ok && len(v.(*schema.Set).List()) > 0 { - createOpts.TargetGroupARNs = expandStringList(v.(*schema.Set).List()) - } - - log.Printf("[DEBUG] AutoScaling Group create configuration: %#v", createOpts) - _, err := conn.CreateAutoScalingGroup(&createOpts) - if err != nil { - return fmt.Errorf("Error creating AutoScaling Group: %s", err) - } - - d.SetId(d.Get("name").(string)) - log.Printf("[INFO] AutoScaling Group ID: %s", d.Id()) - - if twoPhases { - for _, hook := range generatePutLifecycleHookInputs(asgName, initialLifecycleHooks) { - if err = resourceAwsAutoscalingLifecycleHookPutOp(conn, &hook); err != nil { - return fmt.Errorf("Error creating initial lifecycle hooks: %s", err) - } - } - - _, err = conn.UpdateAutoScalingGroup(&updateOpts) - if err != nil { - return fmt.Errorf("Error setting AutoScaling Group initial capacity: %s", err) - } - } - - if err := waitForASGCapacity(d, meta, capacitySatisfiedCreate); err != nil { - return err - } - - if _, ok := d.GetOk("suspended_processes"); ok { - suspendedProcessesErr := enableASGSuspendedProcesses(d, conn) - if suspendedProcessesErr != nil { - return suspendedProcessesErr - } - } - - if _, ok := d.GetOk("enabled_metrics"); ok { - metricsErr := enableASGMetricsCollection(d, conn) - if metricsErr != nil { - return metricsErr - } - } - - return resourceAwsAutoscalingGroupRead(d, meta) -} - -func resourceAwsAutoscalingGroupRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).autoscalingconn - - g, err := getAwsAutoscalingGroup(d.Id(), conn) - if err != nil { - return err - } - if g == nil { - log.Printf("[INFO] Autoscaling Group %q not found", d.Id()) - d.SetId("") - return nil - } - - d.Set("availability_zones", flattenStringList(g.AvailabilityZones)) - d.Set("default_cooldown", g.DefaultCooldown) - d.Set("arn", g.AutoScalingGroupARN) - d.Set("desired_capacity", g.DesiredCapacity) - d.Set("health_check_grace_period", g.HealthCheckGracePeriod) - d.Set("health_check_type", g.HealthCheckType) - d.Set("launch_configuration", g.LaunchConfigurationName) - d.Set("load_balancers", flattenStringList(g.LoadBalancerNames)) - - if err := d.Set("suspended_processes", flattenAsgSuspendedProcesses(g.SuspendedProcesses)); err != nil { - log.Printf("[WARN] Error setting suspended_processes for %q: %s", d.Id(), err) - } - if err := d.Set("target_group_arns", flattenStringList(g.TargetGroupARNs)); err != nil { - log.Printf("[ERR] Error setting target groups: %s", err) - } - d.Set("min_size", g.MinSize) - d.Set("max_size", g.MaxSize) - d.Set("placement_group", g.PlacementGroup) - d.Set("name", g.AutoScalingGroupName) - - var tagList, tagsList []*autoscaling.TagDescription - var tagOk, tagsOk bool - var v interface{} - - if v, tagOk = d.GetOk("tag"); tagOk { - tags := setToMapByKey(v.(*schema.Set), "key") - for _, t := range g.Tags { - if _, ok := tags[*t.Key]; ok { - tagList = append(tagList, t) - } - } - d.Set("tag", autoscalingTagDescriptionsToSlice(tagList)) - } - - if v, tagsOk = d.GetOk("tags"); tagsOk { - tags := map[string]struct{}{} - for _, tag := range v.([]interface{}) { - attr, ok := tag.(map[string]interface{}) - if !ok { - continue - } - - key, ok := attr["key"].(string) - if !ok { - continue - } - - tags[key] = struct{}{} - } - - for _, t := range g.Tags { - if _, ok := tags[*t.Key]; ok { - tagsList = append(tagsList, t) - } - } - d.Set("tags", autoscalingTagDescriptionsToSlice(tagsList)) - } - - if !tagOk && !tagsOk { - d.Set("tag", autoscalingTagDescriptionsToSlice(g.Tags)) - } - - d.Set("vpc_zone_identifier", strings.Split(*g.VPCZoneIdentifier, ",")) - d.Set("protect_from_scale_in", g.NewInstancesProtectedFromScaleIn) - - // If no termination polices are explicitly configured and the upstream state - // is only using the "Default" policy, clear the state to make it consistent - // with the default AWS create API behavior. - _, ok := d.GetOk("termination_policies") - if !ok && len(g.TerminationPolicies) == 1 && *g.TerminationPolicies[0] == "Default" { - d.Set("termination_policies", []interface{}{}) - } else { - d.Set("termination_policies", flattenStringList(g.TerminationPolicies)) - } - - if g.EnabledMetrics != nil { - if err := d.Set("enabled_metrics", flattenAsgEnabledMetrics(g.EnabledMetrics)); err != nil { - log.Printf("[WARN] Error setting metrics for (%s): %s", d.Id(), err) - } - d.Set("metrics_granularity", g.EnabledMetrics[0].Granularity) - } - - return nil -} - -func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).autoscalingconn - shouldWaitForCapacity := false - - opts := autoscaling.UpdateAutoScalingGroupInput{ - AutoScalingGroupName: aws.String(d.Id()), - } - - opts.NewInstancesProtectedFromScaleIn = aws.Bool(d.Get("protect_from_scale_in").(bool)) - - if d.HasChange("default_cooldown") { - opts.DefaultCooldown = aws.Int64(int64(d.Get("default_cooldown").(int))) - } - - if d.HasChange("desired_capacity") { - opts.DesiredCapacity = aws.Int64(int64(d.Get("desired_capacity").(int))) - shouldWaitForCapacity = true - } - - if d.HasChange("launch_configuration") { - opts.LaunchConfigurationName = aws.String(d.Get("launch_configuration").(string)) - } - - if d.HasChange("min_size") { - opts.MinSize = aws.Int64(int64(d.Get("min_size").(int))) - shouldWaitForCapacity = true - } - - if d.HasChange("max_size") { - opts.MaxSize = aws.Int64(int64(d.Get("max_size").(int))) - } - - if d.HasChange("health_check_grace_period") { - opts.HealthCheckGracePeriod = aws.Int64(int64(d.Get("health_check_grace_period").(int))) - } - - if d.HasChange("health_check_type") { - opts.HealthCheckGracePeriod = aws.Int64(int64(d.Get("health_check_grace_period").(int))) - opts.HealthCheckType = aws.String(d.Get("health_check_type").(string)) - } - - if d.HasChange("vpc_zone_identifier") { - opts.VPCZoneIdentifier = expandVpcZoneIdentifiers(d.Get("vpc_zone_identifier").(*schema.Set).List()) - } - - if d.HasChange("availability_zones") { - if v, ok := d.GetOk("availability_zones"); ok && v.(*schema.Set).Len() > 0 { - opts.AvailabilityZones = expandStringList(v.(*schema.Set).List()) - } - } - - if d.HasChange("placement_group") { - opts.PlacementGroup = aws.String(d.Get("placement_group").(string)) - } - - if d.HasChange("termination_policies") { - // If the termination policy is set to null, we need to explicitly set - // it back to "Default", or the API won't reset it for us. - if v, ok := d.GetOk("termination_policies"); ok && len(v.([]interface{})) > 0 { - opts.TerminationPolicies = expandStringList(v.([]interface{})) - } else { - log.Printf("[DEBUG] Explicitly setting null termination policy to 'Default'") - opts.TerminationPolicies = aws.StringSlice([]string{"Default"}) - } - } - - if err := setAutoscalingTags(conn, d); err != nil { - return err - } - - if d.HasChange("tag") { - d.SetPartial("tag") - } - - if d.HasChange("tags") { - d.SetPartial("tags") - } - - log.Printf("[DEBUG] AutoScaling Group update configuration: %#v", opts) - _, err := conn.UpdateAutoScalingGroup(&opts) - if err != nil { - d.Partial(true) - return fmt.Errorf("Error updating Autoscaling group: %s", err) - } - - if d.HasChange("load_balancers") { - - o, n := d.GetChange("load_balancers") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - remove := expandStringList(os.Difference(ns).List()) - add := expandStringList(ns.Difference(os).List()) - - if len(remove) > 0 { - _, err := conn.DetachLoadBalancers(&autoscaling.DetachLoadBalancersInput{ - AutoScalingGroupName: aws.String(d.Id()), - LoadBalancerNames: remove, - }) - if err != nil { - return fmt.Errorf("[WARN] Error updating Load Balancers for AutoScaling Group (%s), error: %s", d.Id(), err) - } - } - - if len(add) > 0 { - _, err := conn.AttachLoadBalancers(&autoscaling.AttachLoadBalancersInput{ - AutoScalingGroupName: aws.String(d.Id()), - LoadBalancerNames: add, - }) - if err != nil { - return fmt.Errorf("[WARN] Error updating Load Balancers for AutoScaling Group (%s), error: %s", d.Id(), err) - } - } - } - - if d.HasChange("target_group_arns") { - - o, n := d.GetChange("target_group_arns") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - remove := expandStringList(os.Difference(ns).List()) - add := expandStringList(ns.Difference(os).List()) - - if len(remove) > 0 { - _, err := conn.DetachLoadBalancerTargetGroups(&autoscaling.DetachLoadBalancerTargetGroupsInput{ - AutoScalingGroupName: aws.String(d.Id()), - TargetGroupARNs: remove, - }) - if err != nil { - return fmt.Errorf("[WARN] Error updating Load Balancers Target Groups for AutoScaling Group (%s), error: %s", d.Id(), err) - } - } - - if len(add) > 0 { - _, err := conn.AttachLoadBalancerTargetGroups(&autoscaling.AttachLoadBalancerTargetGroupsInput{ - AutoScalingGroupName: aws.String(d.Id()), - TargetGroupARNs: add, - }) - if err != nil { - return fmt.Errorf("[WARN] Error updating Load Balancers Target Groups for AutoScaling Group (%s), error: %s", d.Id(), err) - } - } - } - - if shouldWaitForCapacity { - if err := waitForASGCapacity(d, meta, capacitySatisfiedUpdate); err != nil { - return errwrap.Wrapf("Error waiting for AutoScaling Group Capacity: {{err}}", err) - } - } - - if d.HasChange("enabled_metrics") { - if err := updateASGMetricsCollection(d, conn); err != nil { - return errwrap.Wrapf("Error updating AutoScaling Group Metrics collection: {{err}}", err) - } - } - - if d.HasChange("suspended_processes") { - if err := updateASGSuspendedProcesses(d, conn); err != nil { - return errwrap.Wrapf("Error updating AutoScaling Group Suspended Processes: {{err}}", err) - } - } - - return resourceAwsAutoscalingGroupRead(d, meta) -} - -func resourceAwsAutoscalingGroupDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).autoscalingconn - - // Read the autoscaling group first. If it doesn't exist, we're done. - // We need the group in order to check if there are instances attached. - // If so, we need to remove those first. - g, err := getAwsAutoscalingGroup(d.Id(), conn) - if err != nil { - return err - } - if g == nil { - log.Printf("[INFO] Autoscaling Group %q not found", d.Id()) - d.SetId("") - return nil - } - if len(g.Instances) > 0 || *g.DesiredCapacity > 0 { - if err := resourceAwsAutoscalingGroupDrain(d, meta); err != nil { - return err - } - } - - log.Printf("[DEBUG] AutoScaling Group destroy: %v", d.Id()) - deleteopts := autoscaling.DeleteAutoScalingGroupInput{ - AutoScalingGroupName: aws.String(d.Id()), - ForceDelete: aws.Bool(d.Get("force_delete").(bool)), - } - - // We retry the delete operation to handle InUse/InProgress errors coming - // from scaling operations. We should be able to sneak in a delete in between - // scaling operations within 5m. - err = resource.Retry(5*time.Minute, func() *resource.RetryError { - if _, err := conn.DeleteAutoScalingGroup(&deleteopts); err != nil { - if awserr, ok := err.(awserr.Error); ok { - switch awserr.Code() { - case "InvalidGroup.NotFound": - // Already gone? Sure! - return nil - case "ResourceInUse", "ScalingActivityInProgress": - // These are retryable - return resource.RetryableError(awserr) - } - } - // Didn't recognize the error, so shouldn't retry. - return resource.NonRetryableError(err) - } - // Successful delete - return nil - }) - if err != nil { - return err - } - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - if g, _ = getAwsAutoscalingGroup(d.Id(), conn); g != nil { - return resource.RetryableError( - fmt.Errorf("Auto Scaling Group still exists")) - } - return nil - }) -} - -func getAwsAutoscalingGroup( - asgName string, - conn *autoscaling.AutoScaling) (*autoscaling.Group, error) { - - describeOpts := autoscaling.DescribeAutoScalingGroupsInput{ - AutoScalingGroupNames: []*string{aws.String(asgName)}, - } - - log.Printf("[DEBUG] AutoScaling Group describe configuration: %#v", describeOpts) - describeGroups, err := conn.DescribeAutoScalingGroups(&describeOpts) - if err != nil { - autoscalingerr, ok := err.(awserr.Error) - if ok && autoscalingerr.Code() == "InvalidGroup.NotFound" { - return nil, nil - } - - return nil, fmt.Errorf("Error retrieving AutoScaling groups: %s", err) - } - - // Search for the autoscaling group - for idx, asc := range describeGroups.AutoScalingGroups { - if *asc.AutoScalingGroupName == asgName { - return describeGroups.AutoScalingGroups[idx], nil - } - } - - return nil, nil -} - -func resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).autoscalingconn - - if d.Get("force_delete").(bool) { - log.Printf("[DEBUG] Skipping ASG drain, force_delete was set.") - return nil - } - - // First, set the capacity to zero so the group will drain - log.Printf("[DEBUG] Reducing autoscaling group capacity to zero") - opts := autoscaling.UpdateAutoScalingGroupInput{ - AutoScalingGroupName: aws.String(d.Id()), - DesiredCapacity: aws.Int64(0), - MinSize: aws.Int64(0), - MaxSize: aws.Int64(0), - } - if _, err := conn.UpdateAutoScalingGroup(&opts); err != nil { - return fmt.Errorf("Error setting capacity to zero to drain: %s", err) - } - - // Next, wait for the autoscale group to drain - log.Printf("[DEBUG] Waiting for group to have zero instances") - return resource.Retry(10*time.Minute, func() *resource.RetryError { - g, err := getAwsAutoscalingGroup(d.Id(), conn) - if err != nil { - return resource.NonRetryableError(err) - } - if g == nil { - log.Printf("[INFO] Autoscaling Group %q not found", d.Id()) - d.SetId("") - return nil - } - - if len(g.Instances) == 0 { - return nil - } - - return resource.RetryableError( - fmt.Errorf("group still has %d instances", len(g.Instances))) - }) -} - -func enableASGSuspendedProcesses(d *schema.ResourceData, conn *autoscaling.AutoScaling) error { - props := &autoscaling.ScalingProcessQuery{ - AutoScalingGroupName: aws.String(d.Id()), - ScalingProcesses: expandStringList(d.Get("suspended_processes").(*schema.Set).List()), - } - - _, err := conn.SuspendProcesses(props) - if err != nil { - return err - } - - return nil -} - -func enableASGMetricsCollection(d *schema.ResourceData, conn *autoscaling.AutoScaling) error { - props := &autoscaling.EnableMetricsCollectionInput{ - AutoScalingGroupName: aws.String(d.Id()), - Granularity: aws.String(d.Get("metrics_granularity").(string)), - Metrics: expandStringList(d.Get("enabled_metrics").(*schema.Set).List()), - } - - log.Printf("[INFO] Enabling metrics collection for the ASG: %s", d.Id()) - _, metricsErr := conn.EnableMetricsCollection(props) - if metricsErr != nil { - return metricsErr - } - - return nil -} - -func updateASGSuspendedProcesses(d *schema.ResourceData, conn *autoscaling.AutoScaling) error { - o, n := d.GetChange("suspended_processes") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - - resumeProcesses := os.Difference(ns) - if resumeProcesses.Len() != 0 { - props := &autoscaling.ScalingProcessQuery{ - AutoScalingGroupName: aws.String(d.Id()), - ScalingProcesses: expandStringList(resumeProcesses.List()), - } - - _, err := conn.ResumeProcesses(props) - if err != nil { - return fmt.Errorf("Error Resuming Processes for ASG %q: %s", d.Id(), err) - } - } - - suspendedProcesses := ns.Difference(os) - if suspendedProcesses.Len() != 0 { - props := &autoscaling.ScalingProcessQuery{ - AutoScalingGroupName: aws.String(d.Id()), - ScalingProcesses: expandStringList(suspendedProcesses.List()), - } - - _, err := conn.SuspendProcesses(props) - if err != nil { - return fmt.Errorf("Error Suspending Processes for ASG %q: %s", d.Id(), err) - } - } - - return nil - -} - -func updateASGMetricsCollection(d *schema.ResourceData, conn *autoscaling.AutoScaling) error { - - o, n := d.GetChange("enabled_metrics") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - - disableMetrics := os.Difference(ns) - if disableMetrics.Len() != 0 { - props := &autoscaling.DisableMetricsCollectionInput{ - AutoScalingGroupName: aws.String(d.Id()), - Metrics: expandStringList(disableMetrics.List()), - } - - _, err := conn.DisableMetricsCollection(props) - if err != nil { - return fmt.Errorf("Failure to Disable metrics collection types for ASG %s: %s", d.Id(), err) - } - } - - enabledMetrics := ns.Difference(os) - if enabledMetrics.Len() != 0 { - props := &autoscaling.EnableMetricsCollectionInput{ - AutoScalingGroupName: aws.String(d.Id()), - Metrics: expandStringList(enabledMetrics.List()), - Granularity: aws.String(d.Get("metrics_granularity").(string)), - } - - _, err := conn.EnableMetricsCollection(props) - if err != nil { - return fmt.Errorf("Failure to Enable metrics collection types for ASG %s: %s", d.Id(), err) - } - } - - return nil -} - -// getELBInstanceStates returns a mapping of the instance states of all the ELBs attached to the -// provided ASG. -// -// Note that this is the instance state function for ELB Classic. -// -// Nested like: lbName -> instanceId -> instanceState -func getELBInstanceStates(g *autoscaling.Group, meta interface{}) (map[string]map[string]string, error) { - lbInstanceStates := make(map[string]map[string]string) - elbconn := meta.(*AWSClient).elbconn - - for _, lbName := range g.LoadBalancerNames { - lbInstanceStates[*lbName] = make(map[string]string) - opts := &elb.DescribeInstanceHealthInput{LoadBalancerName: lbName} - r, err := elbconn.DescribeInstanceHealth(opts) - if err != nil { - return nil, err - } - for _, is := range r.InstanceStates { - if is.InstanceId == nil || is.State == nil { - continue - } - lbInstanceStates[*lbName][*is.InstanceId] = *is.State - } - } - - return lbInstanceStates, nil -} - -// getTargetGroupInstanceStates returns a mapping of the instance states of -// all the ALB target groups attached to the provided ASG. -// -// Note that this is the instance state function for Application Load -// Balancing (aka ELBv2). -// -// Nested like: targetGroupARN -> instanceId -> instanceState -func getTargetGroupInstanceStates(g *autoscaling.Group, meta interface{}) (map[string]map[string]string, error) { - targetInstanceStates := make(map[string]map[string]string) - elbv2conn := meta.(*AWSClient).elbv2conn - - for _, targetGroupARN := range g.TargetGroupARNs { - targetInstanceStates[*targetGroupARN] = make(map[string]string) - opts := &elbv2.DescribeTargetHealthInput{TargetGroupArn: targetGroupARN} - r, err := elbv2conn.DescribeTargetHealth(opts) - if err != nil { - return nil, err - } - for _, desc := range r.TargetHealthDescriptions { - if desc.Target == nil || desc.Target.Id == nil || desc.TargetHealth == nil || desc.TargetHealth.State == nil { - continue - } - targetInstanceStates[*targetGroupARN][*desc.Target.Id] = *desc.TargetHealth.State - } - } - - return targetInstanceStates, nil -} - -func expandVpcZoneIdentifiers(list []interface{}) *string { - strs := make([]string, len(list)) - for _, s := range list { - strs = append(strs, s.(string)) - } - return aws.String(strings.Join(strs, ",")) -} diff --git a/builtin/providers/aws/resource_aws_autoscaling_group_test.go b/builtin/providers/aws/resource_aws_autoscaling_group_test.go deleted file mode 100644 index 8a898f700..000000000 --- a/builtin/providers/aws/resource_aws_autoscaling_group_test.go +++ /dev/null @@ -1,1792 +0,0 @@ -package aws - -import ( - "errors" - "fmt" - "log" - "reflect" - "regexp" - "sort" - "strings" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func init() { - resource.AddTestSweepers("aws_autoscaling_group", &resource.Sweeper{ - Name: "aws_autoscaling_group", - F: testSweepAutoscalingGroups, - }) -} - -func testSweepAutoscalingGroups(region string) error { - client, err := sharedClientForRegion(region) - if err != nil { - return fmt.Errorf("error getting client: %s", err) - } - conn := client.(*AWSClient).autoscalingconn - - resp, err := conn.DescribeAutoScalingGroups(&autoscaling.DescribeAutoScalingGroupsInput{}) - if err != nil { - return fmt.Errorf("Error retrieving launch configuration: %s", err) - } - - if len(resp.AutoScalingGroups) == 0 { - log.Print("[DEBUG] No aws autoscaling groups to sweep") - return nil - } - - for _, asg := range resp.AutoScalingGroups { - var testOptGroup bool - for _, testName := range []string{"foobar", "terraform-"} { - if strings.HasPrefix(*asg.AutoScalingGroupName, testName) { - testOptGroup = true - } - } - - if !testOptGroup { - continue - } - - deleteopts := autoscaling.DeleteAutoScalingGroupInput{ - AutoScalingGroupName: asg.AutoScalingGroupName, - ForceDelete: aws.Bool(true), - } - - err = resource.Retry(5*time.Minute, func() *resource.RetryError { - if _, err := conn.DeleteAutoScalingGroup(&deleteopts); err != nil { - if awserr, ok := err.(awserr.Error); ok { - switch awserr.Code() { - case "InvalidGroup.NotFound": - return nil - case "ResourceInUse", "ScalingActivityInProgress": - return resource.RetryableError(awserr) - } - } - - // Didn't recognize the error, so shouldn't retry. - return resource.NonRetryableError(err) - } - // Successful delete - return nil - }) - if err != nil { - return err - } - } - - return nil -} - -func TestAccAWSAutoScalingGroup_basic(t *testing.T) { - var group autoscaling.Group - var lc autoscaling.LaunchConfiguration - - randName := fmt.Sprintf("terraform-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_autoscaling_group.bar", - IDRefreshIgnore: []string{"force_delete", "metrics_granularity", "wait_for_capacity_timeout"}, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSAutoScalingGroupConfig(randName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), - testAccCheckAWSAutoScalingGroupHealthyCapacity(&group, 2), - testAccCheckAWSAutoScalingGroupAttributes(&group, randName), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "availability_zones.2487133097", "us-west-2a"), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "name", randName), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "max_size", "5"), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "min_size", "2"), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "health_check_grace_period", "300"), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "health_check_type", "ELB"), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "desired_capacity", "4"), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "force_delete", "true"), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "termination_policies.0", "OldestInstance"), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "termination_policies.1", "ClosestToNextInstanceHour"), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "protect_from_scale_in", "false"), - ), - }, - - resource.TestStep{ - Config: testAccAWSAutoScalingGroupConfigUpdate(randName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.new", &lc), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "desired_capacity", "5"), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "termination_policies.0", "ClosestToNextInstanceHour"), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "protect_from_scale_in", "true"), - testLaunchConfigurationName("aws_autoscaling_group.bar", &lc), - testAccCheckAutoscalingTags(&group.Tags, "FromTags1Changed", map[string]interface{}{ - "value": "value1changed", - "propagate_at_launch": true, - }), - testAccCheckAutoscalingTags(&group.Tags, "FromTags2", map[string]interface{}{ - "value": "value2changed", - "propagate_at_launch": true, - }), - testAccCheckAutoscalingTags(&group.Tags, "FromTags3", map[string]interface{}{ - "value": "value3", - "propagate_at_launch": true, - }), - ), - }, - }, - }) -} - -func TestAccAWSAutoScalingGroup_namePrefix(t *testing.T) { - nameRegexp := regexp.MustCompile("^test-") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSAutoScalingGroupConfig_namePrefix, - Check: resource.ComposeTestCheckFunc( - resource.TestMatchResourceAttr( - "aws_autoscaling_group.test", "name", nameRegexp), - resource.TestCheckResourceAttrSet( - "aws_autoscaling_group.test", "arn"), - ), - }, - }, - }) -} - -func TestAccAWSAutoScalingGroup_autoGeneratedName(t *testing.T) { - asgNameRegexp := regexp.MustCompile("^tf-asg-") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSAutoScalingGroupConfig_autoGeneratedName, - Check: resource.ComposeTestCheckFunc( - resource.TestMatchResourceAttr( - "aws_autoscaling_group.bar", "name", asgNameRegexp), - resource.TestCheckResourceAttrSet( - "aws_autoscaling_group.bar", "arn"), - ), - }, - }, - }) -} - -func TestAccAWSAutoScalingGroup_terminationPolicies(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSAutoScalingGroupConfig_terminationPoliciesEmpty, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "termination_policies.#", "0"), - ), - }, - - resource.TestStep{ - Config: testAccAWSAutoScalingGroupConfig_terminationPoliciesUpdate, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "termination_policies.#", "1"), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "termination_policies.0", "OldestInstance"), - ), - }, - - resource.TestStep{ - Config: testAccAWSAutoScalingGroupConfig_terminationPoliciesExplicitDefault, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "termination_policies.#", "1"), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "termination_policies.0", "Default"), - ), - }, - - resource.TestStep{ - Config: testAccAWSAutoScalingGroupConfig_terminationPoliciesEmpty, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "termination_policies.#", "0"), - ), - }, - }, - }) -} - -func TestAccAWSAutoScalingGroup_tags(t *testing.T) { - var group autoscaling.Group - - randName := fmt.Sprintf("tfautotags-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSAutoScalingGroupConfig(randName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), - testAccCheckAutoscalingTags(&group.Tags, "FromTags1", map[string]interface{}{ - "value": "value1", - "propagate_at_launch": true, - }), - testAccCheckAutoscalingTags(&group.Tags, "FromTags2", map[string]interface{}{ - "value": "value2", - "propagate_at_launch": true, - }), - testAccCheckAutoscalingTags(&group.Tags, "FromTags3", map[string]interface{}{ - "value": "value3", - "propagate_at_launch": true, - }), - ), - }, - - resource.TestStep{ - Config: testAccAWSAutoScalingGroupConfigUpdate(randName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), - testAccCheckAutoscalingTagNotExists(&group.Tags, "Foo"), - testAccCheckAutoscalingTags(&group.Tags, "FromTags1Changed", map[string]interface{}{ - "value": "value1changed", - "propagate_at_launch": true, - }), - testAccCheckAutoscalingTags(&group.Tags, "FromTags2", map[string]interface{}{ - "value": "value2changed", - "propagate_at_launch": true, - }), - testAccCheckAutoscalingTags(&group.Tags, "FromTags3", map[string]interface{}{ - "value": "value3", - "propagate_at_launch": true, - }), - ), - }, - }, - }) -} - -func TestAccAWSAutoScalingGroup_VpcUpdates(t *testing.T) { - var group autoscaling.Group - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSAutoScalingGroupConfigWithAZ, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "availability_zones.#", "1"), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "availability_zones.2487133097", "us-west-2a"), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "vpc_zone_identifier.#", "1"), - ), - }, - - resource.TestStep{ - Config: testAccAWSAutoScalingGroupConfigWithVPCIdent, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), - testAccCheckAWSAutoScalingGroupAttributesVPCZoneIdentifer(&group), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "availability_zones.#", "1"), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "availability_zones.2487133097", "us-west-2a"), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "vpc_zone_identifier.#", "1"), - ), - }, - }, - }) -} - -func TestAccAWSAutoScalingGroup_WithLoadBalancer(t *testing.T) { - var group autoscaling.Group - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSAutoScalingGroupConfigWithLoadBalancer, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), - testAccCheckAWSAutoScalingGroupAttributesLoadBalancer(&group), - ), - }, - }, - }) -} - -func TestAccAWSAutoScalingGroup_withPlacementGroup(t *testing.T) { - var group autoscaling.Group - - randName := fmt.Sprintf("tf_placement_test-%s", acctest.RandString(5)) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSAutoScalingGroupConfig_withPlacementGroup(randName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "placement_group", randName), - ), - }, - }, - }) -} - -func TestAccAWSAutoScalingGroup_enablingMetrics(t *testing.T) { - var group autoscaling.Group - randName := fmt.Sprintf("terraform-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSAutoScalingGroupConfig(randName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), - resource.TestCheckNoResourceAttr( - "aws_autoscaling_group.bar", "enabled_metrics"), - ), - }, - - resource.TestStep{ - Config: testAccAWSAutoscalingMetricsCollectionConfig_updatingMetricsCollected, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "enabled_metrics.#", "5"), - ), - }, - }, - }) -} - -func TestAccAWSAutoScalingGroup_suspendingProcesses(t *testing.T) { - var group autoscaling.Group - randName := fmt.Sprintf("terraform-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSAutoScalingGroupConfig(randName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "suspended_processes.#", "0"), - ), - }, - { - Config: testAccAWSAutoScalingGroupConfigWithSuspendedProcesses(randName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "suspended_processes.#", "2"), - ), - }, - { - Config: testAccAWSAutoScalingGroupConfigWithSuspendedProcessesUpdated(randName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "suspended_processes.#", "2"), - ), - }, - }, - }) -} - -func TestAccAWSAutoScalingGroup_withMetrics(t *testing.T) { - var group autoscaling.Group - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSAutoscalingMetricsCollectionConfig_allMetricsCollected, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "enabled_metrics.#", "7"), - ), - }, - - resource.TestStep{ - Config: testAccAWSAutoscalingMetricsCollectionConfig_updatingMetricsCollected, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "enabled_metrics.#", "5"), - ), - }, - }, - }) -} - -func TestAccAWSAutoScalingGroup_ALB_TargetGroups(t *testing.T) { - var group autoscaling.Group - var tg elbv2.TargetGroup - var tg2 elbv2.TargetGroup - - testCheck := func(targets []*elbv2.TargetGroup) resource.TestCheckFunc { - return func(*terraform.State) error { - var ts []string - var gs []string - for _, t := range targets { - ts = append(ts, *t.TargetGroupArn) - } - - for _, s := range group.TargetGroupARNs { - gs = append(gs, *s) - } - - sort.Strings(ts) - sort.Strings(gs) - - if !reflect.DeepEqual(ts, gs) { - return fmt.Errorf("Error: target group match not found!\nASG Target groups: %#v\nTarget Group: %#v", ts, gs) - } - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_pre, - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), - testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &tg), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "target_group_arns.#", "0"), - ), - }, - - resource.TestStep{ - Config: testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_post_duo, - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), - testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &tg), - testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test_more", &tg2), - testCheck([]*elbv2.TargetGroup{&tg, &tg2}), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "target_group_arns.#", "2"), - ), - }, - - resource.TestStep{ - Config: testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_post, - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), - testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &tg), - testCheck([]*elbv2.TargetGroup{&tg}), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "target_group_arns.#", "1"), - ), - }, - }, - }) -} - -func TestAccAWSAutoScalingGroup_initialLifecycleHook(t *testing.T) { - var group autoscaling.Group - - randName := fmt.Sprintf("terraform-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_autoscaling_group.bar", - IDRefreshIgnore: []string{"force_delete", "metrics_granularity", "wait_for_capacity_timeout"}, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSAutoScalingGroupWithHookConfig(randName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), - testAccCheckAWSAutoScalingGroupHealthyCapacity(&group, 2), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "initial_lifecycle_hook.#", "1"), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "initial_lifecycle_hook.391359060.default_result", "CONTINUE"), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "initial_lifecycle_hook.391359060.name", "launching"), - testAccCheckAWSAutoScalingGroupInitialLifecycleHookExists( - "aws_autoscaling_group.bar", "initial_lifecycle_hook.391359060.name"), - ), - }, - }, - }) -} - -func TestAccAWSAutoScalingGroup_ALB_TargetGroups_ELBCapacity(t *testing.T) { - var group autoscaling.Group - var tg elbv2.TargetGroup - - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_ELBCapacity(rInt), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), - testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &tg), - testAccCheckAWSALBTargetGroupHealthy(&tg), - ), - }, - }, - }) -} - -func testAccCheckAWSAutoScalingGroupDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).autoscalingconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_autoscaling_group" { - continue - } - - // Try to find the Group - describeGroups, err := conn.DescribeAutoScalingGroups( - &autoscaling.DescribeAutoScalingGroupsInput{ - AutoScalingGroupNames: []*string{aws.String(rs.Primary.ID)}, - }) - - if err == nil { - if len(describeGroups.AutoScalingGroups) != 0 && - *describeGroups.AutoScalingGroups[0].AutoScalingGroupName == rs.Primary.ID { - return fmt.Errorf("AutoScaling Group still exists") - } - } - - // Verify the error - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - if ec2err.Code() != "InvalidGroup.NotFound" { - return err - } - } - - return nil -} - -func testAccCheckAWSAutoScalingGroupAttributes(group *autoscaling.Group, name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *group.AvailabilityZones[0] != "us-west-2a" { - return fmt.Errorf("Bad availability_zones: %#v", group.AvailabilityZones[0]) - } - - if *group.AutoScalingGroupName != name { - return fmt.Errorf("Bad Autoscaling Group name, expected (%s), got (%s)", name, *group.AutoScalingGroupName) - } - - if *group.MaxSize != 5 { - return fmt.Errorf("Bad max_size: %d", *group.MaxSize) - } - - if *group.MinSize != 2 { - return fmt.Errorf("Bad max_size: %d", *group.MinSize) - } - - if *group.HealthCheckType != "ELB" { - return fmt.Errorf("Bad health_check_type,\nexpected: %s\ngot: %s", "ELB", *group.HealthCheckType) - } - - if *group.HealthCheckGracePeriod != 300 { - return fmt.Errorf("Bad health_check_grace_period: %d", *group.HealthCheckGracePeriod) - } - - if *group.DesiredCapacity != 4 { - return fmt.Errorf("Bad desired_capacity: %d", *group.DesiredCapacity) - } - - if *group.LaunchConfigurationName == "" { - return fmt.Errorf("Bad launch configuration name: %s", *group.LaunchConfigurationName) - } - - t := &autoscaling.TagDescription{ - Key: aws.String("FromTags1"), - Value: aws.String("value1"), - PropagateAtLaunch: aws.Bool(true), - ResourceType: aws.String("auto-scaling-group"), - ResourceId: group.AutoScalingGroupName, - } - - if !reflect.DeepEqual(group.Tags[0], t) { - return fmt.Errorf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - group.Tags[0], - t) - } - - return nil - } -} - -func testAccCheckAWSAutoScalingGroupAttributesLoadBalancer(group *autoscaling.Group) resource.TestCheckFunc { - return func(s *terraform.State) error { - if len(group.LoadBalancerNames) != 1 { - return fmt.Errorf("Bad load_balancers: %v", group.LoadBalancerNames) - } - - return nil - } -} - -func testAccCheckAWSAutoScalingGroupExists(n string, group *autoscaling.Group) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No AutoScaling Group ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).autoscalingconn - - describeGroups, err := conn.DescribeAutoScalingGroups( - &autoscaling.DescribeAutoScalingGroupsInput{ - AutoScalingGroupNames: []*string{aws.String(rs.Primary.ID)}, - }) - - if err != nil { - return err - } - - if len(describeGroups.AutoScalingGroups) != 1 || - *describeGroups.AutoScalingGroups[0].AutoScalingGroupName != rs.Primary.ID { - return fmt.Errorf("AutoScaling Group not found") - } - - *group = *describeGroups.AutoScalingGroups[0] - - return nil - } -} - -func testAccCheckAWSAutoScalingGroupInitialLifecycleHookExists(asg, hookAttr string) resource.TestCheckFunc { - return func(s *terraform.State) error { - asgResource, ok := s.RootModule().Resources[asg] - if !ok { - return fmt.Errorf("Not found: %s", asg) - } - - if asgResource.Primary.ID == "" { - return fmt.Errorf("No AutoScaling Group ID is set") - } - - hookName := asgResource.Primary.Attributes[hookAttr] - if hookName == "" { - return fmt.Errorf("ASG %s has no hook name %s", asg, hookAttr) - } - - return checkLifecycleHookExistsByName(asgResource.Primary.ID, hookName) - } -} - -func testLaunchConfigurationName(n string, lc *autoscaling.LaunchConfiguration) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if *lc.LaunchConfigurationName != rs.Primary.Attributes["launch_configuration"] { - return fmt.Errorf("Launch configuration names do not match") - } - - return nil - } -} - -func testAccCheckAWSAutoScalingGroupHealthyCapacity( - g *autoscaling.Group, exp int) resource.TestCheckFunc { - return func(s *terraform.State) error { - healthy := 0 - for _, i := range g.Instances { - if i.HealthStatus == nil { - continue - } - if strings.EqualFold(*i.HealthStatus, "Healthy") { - healthy++ - } - } - if healthy < exp { - return fmt.Errorf("Expected at least %d healthy, got %d.", exp, healthy) - } - return nil - } -} - -func testAccCheckAWSAutoScalingGroupAttributesVPCZoneIdentifer(group *autoscaling.Group) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Grab Subnet Ids - var subnets []string - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_subnet" { - continue - } - subnets = append(subnets, rs.Primary.Attributes["id"]) - } - - if group.VPCZoneIdentifier == nil { - return fmt.Errorf("Bad VPC Zone Identifier\nexpected: %s\ngot nil", subnets) - } - - zones := strings.Split(*group.VPCZoneIdentifier, ",") - - remaining := len(zones) - for _, z := range zones { - for _, s := range subnets { - if z == s { - remaining-- - } - } - } - - if remaining != 0 { - return fmt.Errorf("Bad VPC Zone Identifier match\nexpected: %s\ngot:%s", zones, subnets) - } - - return nil - } -} - -// testAccCheckAWSALBTargetGroupHealthy checks an *elbv2.TargetGroup to make -// sure that all instances in it are healthy. -func testAccCheckAWSALBTargetGroupHealthy(res *elbv2.TargetGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elbv2conn - - resp, err := conn.DescribeTargetHealth(&elbv2.DescribeTargetHealthInput{ - TargetGroupArn: res.TargetGroupArn, - }) - - if err != nil { - return err - } - - for _, target := range resp.TargetHealthDescriptions { - if target.TargetHealth == nil || target.TargetHealth.State == nil || *target.TargetHealth.State != "healthy" { - return errors.New("Not all instances in target group are healthy yet, but should be") - } - } - - return nil - } -} - -const testAccAWSAutoScalingGroupConfig_autoGeneratedName = ` -resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" -} - -resource "aws_autoscaling_group" "bar" { - availability_zones = ["us-west-2a"] - desired_capacity = 0 - max_size = 0 - min_size = 0 - launch_configuration = "${aws_launch_configuration.foobar.name}" -} -` - -const testAccAWSAutoScalingGroupConfig_namePrefix = ` -resource "aws_launch_configuration" "test" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" -} - -resource "aws_autoscaling_group" "test" { - availability_zones = ["us-west-2a"] - desired_capacity = 0 - max_size = 0 - min_size = 0 - name_prefix = "test-" - launch_configuration = "${aws_launch_configuration.test.name}" -} -` - -const testAccAWSAutoScalingGroupConfig_terminationPoliciesEmpty = ` -resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" -} - -resource "aws_autoscaling_group" "bar" { - availability_zones = ["us-west-2a"] - max_size = 0 - min_size = 0 - desired_capacity = 0 - - launch_configuration = "${aws_launch_configuration.foobar.name}" -} -` - -const testAccAWSAutoScalingGroupConfig_terminationPoliciesExplicitDefault = ` -resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" -} - -resource "aws_autoscaling_group" "bar" { - availability_zones = ["us-west-2a"] - max_size = 0 - min_size = 0 - desired_capacity = 0 - termination_policies = ["Default"] - - launch_configuration = "${aws_launch_configuration.foobar.name}" -} -` - -const testAccAWSAutoScalingGroupConfig_terminationPoliciesUpdate = ` -resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" -} - -resource "aws_autoscaling_group" "bar" { - availability_zones = ["us-west-2a"] - max_size = 0 - min_size = 0 - desired_capacity = 0 - termination_policies = ["OldestInstance"] - - launch_configuration = "${aws_launch_configuration.foobar.name}" -} -` - -func testAccAWSAutoScalingGroupConfig(name string) string { - return fmt.Sprintf(` -resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" -} - -resource "aws_placement_group" "test" { - name = "asg_pg_%s" - strategy = "cluster" -} - -resource "aws_autoscaling_group" "bar" { - availability_zones = ["us-west-2a"] - name = "%s" - max_size = 5 - min_size = 2 - health_check_type = "ELB" - desired_capacity = 4 - force_delete = true - termination_policies = ["OldestInstance","ClosestToNextInstanceHour"] - - launch_configuration = "${aws_launch_configuration.foobar.name}" - - tags = [ - { - key = "FromTags1" - value = "value1" - propagate_at_launch = true - }, - { - key = "FromTags2" - value = "value2" - propagate_at_launch = true - }, - { - key = "FromTags3" - value = "value3" - propagate_at_launch = true - }, - ] -} -`, name, name) -} - -func testAccAWSAutoScalingGroupConfigUpdate(name string) string { - return fmt.Sprintf(` -resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" -} - -resource "aws_launch_configuration" "new" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" -} - -resource "aws_autoscaling_group" "bar" { - availability_zones = ["us-west-2a"] - name = "%s" - max_size = 5 - min_size = 2 - health_check_grace_period = 300 - health_check_type = "ELB" - desired_capacity = 5 - force_delete = true - termination_policies = ["ClosestToNextInstanceHour"] - protect_from_scale_in = true - - launch_configuration = "${aws_launch_configuration.new.name}" - - tags = [ - { - key = "FromTags1Changed" - value = "value1changed" - propagate_at_launch = true - }, - { - key = "FromTags2" - value = "value2changed" - propagate_at_launch = true - }, - { - key = "FromTags3" - value = "value3" - propagate_at_launch = true - }, - ] -} -`, name) -} - -func testAccAWSAutoScalingGroupImport(name string) string { - return fmt.Sprintf(` -resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" -} - -resource "aws_placement_group" "test" { - name = "asg_pg_%s" - strategy = "cluster" -} - -resource "aws_autoscaling_group" "bar" { - availability_zones = ["us-west-2a"] - name = "%s" - max_size = 5 - min_size = 2 - health_check_type = "ELB" - desired_capacity = 4 - force_delete = true - termination_policies = ["OldestInstance","ClosestToNextInstanceHour"] - - launch_configuration = "${aws_launch_configuration.foobar.name}" - - tag { - key = "FromTags1" - value = "value1" - propagate_at_launch = true - } - - tag { - key = "FromTags2" - value = "value2" - propagate_at_launch = true - } - - tag { - key = "FromTags3" - value = "value3" - propagate_at_launch = true - } -} -`, name, name) -} - -const testAccAWSAutoScalingGroupConfigWithLoadBalancer = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { Name = "tf-asg-test" } -} - -resource "aws_internet_gateway" "gw" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_security_group" "foo" { - vpc_id="${aws_vpc.foo.id}" - - ingress { - protocol = "-1" - from_port = 0 - to_port = 0 - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - protocol = "-1" - from_port = 0 - to_port = 0 - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_elb" "bar" { - subnets = ["${aws_subnet.foo.id}"] - security_groups = ["${aws_security_group.foo.id}"] - - listener { - instance_port = 80 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } - - health_check { - healthy_threshold = 2 - unhealthy_threshold = 2 - target = "HTTP:80/" - interval = 5 - timeout = 2 - } - - depends_on = ["aws_internet_gateway.gw"] -} - -resource "aws_launch_configuration" "foobar" { - // need an AMI that listens on :80 at boot, this is: - // bitnami-nginxstack-1.6.1-0-linux-ubuntu-14.04.1-x86_64-hvm-ebs-ami-99f5b1a9-3 - image_id = "ami-b5b3fc85" - instance_type = "t2.micro" - security_groups = ["${aws_security_group.foo.id}"] -} - -resource "aws_autoscaling_group" "bar" { - availability_zones = ["${aws_subnet.foo.availability_zone}"] - vpc_zone_identifier = ["${aws_subnet.foo.id}"] - max_size = 2 - min_size = 2 - health_check_grace_period = 300 - health_check_type = "ELB" - wait_for_elb_capacity = 2 - force_delete = true - - launch_configuration = "${aws_launch_configuration.foobar.name}" - load_balancers = ["${aws_elb.bar.name}"] -} -` - -const testAccAWSAutoScalingGroupConfigWithAZ = ` -resource "aws_vpc" "default" { - cidr_block = "10.0.0.0/16" - tags { - Name = "terraform-test" - } -} - -resource "aws_subnet" "main" { - vpc_id = "${aws_vpc.default.id}" - cidr_block = "10.0.1.0/24" - availability_zone = "us-west-2a" - tags { - Name = "terraform-test" - } -} - -resource "aws_launch_configuration" "foobar" { - image_id = "ami-b5b3fc85" - instance_type = "t2.micro" -} - -resource "aws_autoscaling_group" "bar" { - availability_zones = [ - "us-west-2a" - ] - desired_capacity = 0 - max_size = 0 - min_size = 0 - launch_configuration = "${aws_launch_configuration.foobar.name}" -} -` - -const testAccAWSAutoScalingGroupConfigWithVPCIdent = ` -resource "aws_vpc" "default" { - cidr_block = "10.0.0.0/16" - tags { - Name = "terraform-test" - } -} - -resource "aws_subnet" "main" { - vpc_id = "${aws_vpc.default.id}" - cidr_block = "10.0.1.0/24" - availability_zone = "us-west-2a" - tags { - Name = "terraform-test" - } -} - -resource "aws_launch_configuration" "foobar" { - image_id = "ami-b5b3fc85" - instance_type = "t2.micro" -} - -resource "aws_autoscaling_group" "bar" { - vpc_zone_identifier = [ - "${aws_subnet.main.id}", - ] - desired_capacity = 0 - max_size = 0 - min_size = 0 - launch_configuration = "${aws_launch_configuration.foobar.name}" -} -` - -func testAccAWSAutoScalingGroupConfig_withPlacementGroup(name string) string { - return fmt.Sprintf(` -resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "c3.large" -} - -resource "aws_placement_group" "test" { - name = "%s" - strategy = "cluster" -} - -resource "aws_autoscaling_group" "bar" { - availability_zones = ["us-west-2a"] - name = "%s" - max_size = 1 - min_size = 1 - health_check_grace_period = 300 - health_check_type = "ELB" - desired_capacity = 1 - force_delete = true - termination_policies = ["OldestInstance","ClosestToNextInstanceHour"] - placement_group = "${aws_placement_group.test.name}" - - launch_configuration = "${aws_launch_configuration.foobar.name}" - - tag { - key = "Foo" - value = "foo-bar" - propagate_at_launch = true - } -} -`, name, name) -} - -const testAccAWSAutoscalingMetricsCollectionConfig_allMetricsCollected = ` -resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" -} - -resource "aws_autoscaling_group" "bar" { - availability_zones = ["us-west-2a"] - max_size = 1 - min_size = 0 - health_check_grace_period = 300 - health_check_type = "EC2" - desired_capacity = 0 - force_delete = true - termination_policies = ["OldestInstance","ClosestToNextInstanceHour"] - launch_configuration = "${aws_launch_configuration.foobar.name}" - enabled_metrics = ["GroupTotalInstances", - "GroupPendingInstances", - "GroupTerminatingInstances", - "GroupDesiredCapacity", - "GroupInServiceInstances", - "GroupMinSize", - "GroupMaxSize" - ] - metrics_granularity = "1Minute" -} -` - -const testAccAWSAutoscalingMetricsCollectionConfig_updatingMetricsCollected = ` -resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" -} - -resource "aws_autoscaling_group" "bar" { - availability_zones = ["us-west-2a"] - max_size = 1 - min_size = 0 - health_check_grace_period = 300 - health_check_type = "EC2" - desired_capacity = 0 - force_delete = true - termination_policies = ["OldestInstance","ClosestToNextInstanceHour"] - launch_configuration = "${aws_launch_configuration.foobar.name}" - enabled_metrics = ["GroupTotalInstances", - "GroupPendingInstances", - "GroupTerminatingInstances", - "GroupDesiredCapacity", - "GroupMaxSize" - ] - metrics_granularity = "1Minute" -} -` - -const testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_pre = ` -provider "aws" { - region = "us-west-2" -} - -resource "aws_vpc" "default" { - cidr_block = "10.0.0.0/16" - - tags { - Name = "testAccAWSAutoScalingGroupConfig_ALB_TargetGroup" - } -} - -resource "aws_alb_target_group" "test" { - name = "tf-example-alb-tg" - port = 80 - protocol = "HTTP" - vpc_id = "${aws_vpc.default.id}" -} - -resource "aws_subnet" "main" { - vpc_id = "${aws_vpc.default.id}" - cidr_block = "10.0.1.0/24" - availability_zone = "us-west-2a" - - tags { - Name = "testAccAWSAutoScalingGroupConfig_ALB_TargetGroup" - } -} - -resource "aws_subnet" "alt" { - vpc_id = "${aws_vpc.default.id}" - cidr_block = "10.0.2.0/24" - availability_zone = "us-west-2b" - - tags { - Name = "testAccAWSAutoScalingGroupConfig_ALB_TargetGroup" - } -} - -resource "aws_launch_configuration" "foobar" { - # Golang-base from cts-hashi aws account, shared with tf testing account - image_id = "ami-1817d178" - instance_type = "t2.micro" - enable_monitoring = false -} - -resource "aws_autoscaling_group" "bar" { - vpc_zone_identifier = [ - "${aws_subnet.main.id}", - "${aws_subnet.alt.id}", - ] - - max_size = 2 - min_size = 0 - health_check_grace_period = 300 - health_check_type = "ELB" - desired_capacity = 0 - force_delete = true - termination_policies = ["OldestInstance"] - launch_configuration = "${aws_launch_configuration.foobar.name}" - -} - -resource "aws_security_group" "tf_test_self" { - name = "tf_test_alb_asg" - description = "tf_test_alb_asg" - vpc_id = "${aws_vpc.default.id}" - - ingress { - from_port = 80 - to_port = 80 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } - - tags { - Name = "testAccAWSAutoScalingGroupConfig_ALB_TargetGroup" - } -} -` - -const testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_post = ` -provider "aws" { - region = "us-west-2" -} - -resource "aws_vpc" "default" { - cidr_block = "10.0.0.0/16" - - tags { - Name = "testAccAWSAutoScalingGroupConfig_ALB_TargetGroup" - } -} - -resource "aws_alb_target_group" "test" { - name = "tf-example-alb-tg" - port = 80 - protocol = "HTTP" - vpc_id = "${aws_vpc.default.id}" -} - -resource "aws_subnet" "main" { - vpc_id = "${aws_vpc.default.id}" - cidr_block = "10.0.1.0/24" - availability_zone = "us-west-2a" - - tags { - Name = "testAccAWSAutoScalingGroupConfig_ALB_TargetGroup" - } -} - -resource "aws_subnet" "alt" { - vpc_id = "${aws_vpc.default.id}" - cidr_block = "10.0.2.0/24" - availability_zone = "us-west-2b" - - tags { - Name = "testAccAWSAutoScalingGroupConfig_ALB_TargetGroup" - } -} - -resource "aws_launch_configuration" "foobar" { - # Golang-base from cts-hashi aws account, shared with tf testing account - image_id = "ami-1817d178" - instance_type = "t2.micro" - enable_monitoring = false -} - -resource "aws_autoscaling_group" "bar" { - vpc_zone_identifier = [ - "${aws_subnet.main.id}", - "${aws_subnet.alt.id}", - ] - - target_group_arns = ["${aws_alb_target_group.test.arn}"] - - max_size = 2 - min_size = 0 - health_check_grace_period = 300 - health_check_type = "ELB" - desired_capacity = 0 - force_delete = true - termination_policies = ["OldestInstance"] - launch_configuration = "${aws_launch_configuration.foobar.name}" - -} - -resource "aws_security_group" "tf_test_self" { - name = "tf_test_alb_asg" - description = "tf_test_alb_asg" - vpc_id = "${aws_vpc.default.id}" - - ingress { - from_port = 80 - to_port = 80 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } - - tags { - Name = "testAccAWSAutoScalingGroupConfig_ALB_TargetGroup" - } -} -` - -const testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_post_duo = ` -provider "aws" { - region = "us-west-2" -} - -resource "aws_vpc" "default" { - cidr_block = "10.0.0.0/16" - - tags { - Name = "testAccAWSAutoScalingGroupConfig_ALB_TargetGroup" - } -} - -resource "aws_alb_target_group" "test" { - name = "tf-example-alb-tg" - port = 80 - protocol = "HTTP" - vpc_id = "${aws_vpc.default.id}" -} - -resource "aws_alb_target_group" "test_more" { - name = "tf-example-alb-tg-more" - port = 80 - protocol = "HTTP" - vpc_id = "${aws_vpc.default.id}" -} - -resource "aws_subnet" "main" { - vpc_id = "${aws_vpc.default.id}" - cidr_block = "10.0.1.0/24" - availability_zone = "us-west-2a" - - tags { - Name = "testAccAWSAutoScalingGroupConfig_ALB_TargetGroup" - } -} - -resource "aws_subnet" "alt" { - vpc_id = "${aws_vpc.default.id}" - cidr_block = "10.0.2.0/24" - availability_zone = "us-west-2b" - - tags { - Name = "testAccAWSAutoScalingGroupConfig_ALB_TargetGroup" - } -} - -resource "aws_launch_configuration" "foobar" { - # Golang-base from cts-hashi aws account, shared with tf testing account - image_id = "ami-1817d178" - instance_type = "t2.micro" - enable_monitoring = false -} - -resource "aws_autoscaling_group" "bar" { - vpc_zone_identifier = [ - "${aws_subnet.main.id}", - "${aws_subnet.alt.id}", - ] - - target_group_arns = [ - "${aws_alb_target_group.test.arn}", - "${aws_alb_target_group.test_more.arn}", - ] - - max_size = 2 - min_size = 0 - health_check_grace_period = 300 - health_check_type = "ELB" - desired_capacity = 0 - force_delete = true - termination_policies = ["OldestInstance"] - launch_configuration = "${aws_launch_configuration.foobar.name}" - -} - -resource "aws_security_group" "tf_test_self" { - name = "tf_test_alb_asg" - description = "tf_test_alb_asg" - vpc_id = "${aws_vpc.default.id}" - - ingress { - from_port = 80 - to_port = 80 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } - - tags { - Name = "testAccAWSAutoScalingGroupConfig_ALB_TargetGroup" - } -} -` - -func testAccAWSAutoScalingGroupWithHookConfig(name string) string { - return fmt.Sprintf(` -resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" -} - -resource "aws_autoscaling_group" "bar" { - availability_zones = ["us-west-2a"] - name = "%s" - max_size = 5 - min_size = 2 - health_check_type = "ELB" - desired_capacity = 4 - force_delete = true - termination_policies = ["OldestInstance","ClosestToNextInstanceHour"] - - launch_configuration = "${aws_launch_configuration.foobar.name}" - - initial_lifecycle_hook { - name = "launching" - default_result = "CONTINUE" - heartbeat_timeout = 30 # minimum value - lifecycle_transition = "autoscaling:EC2_INSTANCE_LAUNCHING" - } -} -`, name) -} - -func testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_ELBCapacity(rInt int) string { - return fmt.Sprintf(` -provider "aws" { - region = "us-west-2" -} - -resource "aws_vpc" "default" { - cidr_block = "10.0.0.0/16" - enable_dns_hostnames = "true" - enable_dns_support = "true" - - tags { - Name = "testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_ELBCapacity" - } -} - -resource "aws_alb" "test_lb" { - subnets = ["${aws_subnet.main.id}", "${aws_subnet.alt.id}"] - - tags { - Name = "testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_ELBCapacity" - } -} - -resource "aws_alb_listener" "test_listener" { - load_balancer_arn = "${aws_alb.test_lb.arn}" - port = "80" - - default_action { - target_group_arn = "${aws_alb_target_group.test.arn}" - type = "forward" - } -} - -resource "aws_alb_target_group" "test" { - name = "tf-alb-test-%d" - port = 80 - protocol = "HTTP" - vpc_id = "${aws_vpc.default.id}" - - health_check { - path = "/" - healthy_threshold = "2" - timeout = "2" - interval = "5" - } - - tags { - Name = "testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_ELBCapacity" - } -} - -resource "aws_subnet" "main" { - vpc_id = "${aws_vpc.default.id}" - cidr_block = "10.0.1.0/24" - availability_zone = "us-west-2a" - - tags { - Name = "testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_ELBCapacity" - } -} - -resource "aws_subnet" "alt" { - vpc_id = "${aws_vpc.default.id}" - cidr_block = "10.0.2.0/24" - availability_zone = "us-west-2b" - - tags { - Name = "testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_ELBCapacity" - } -} - -resource "aws_internet_gateway" "internet_gateway" { - vpc_id = "${aws_vpc.default.id}" -} - -resource "aws_route_table" "route_table" { - vpc_id = "${aws_vpc.default.id}" -} - -resource "aws_route_table_association" "route_table_association_main" { - subnet_id = "${aws_subnet.main.id}" - route_table_id = "${aws_route_table.route_table.id}" -} - -resource "aws_route_table_association" "route_table_association_alt" { - subnet_id = "${aws_subnet.alt.id}" - route_table_id = "${aws_route_table.route_table.id}" -} - -resource "aws_route" "public_default_route" { - route_table_id = "${aws_route_table.route_table.id}" - destination_cidr_block = "0.0.0.0/0" - gateway_id = "${aws_internet_gateway.internet_gateway.id}" -} - -data "aws_ami" "test_ami" { - most_recent = true - - filter { - name = "owner-alias" - values = ["amazon"] - } - - filter { - name = "name" - values = ["amzn-ami-hvm-*-x86_64-gp2"] - } -} - -resource "aws_launch_configuration" "foobar" { - image_id = "${data.aws_ami.test_ami.id}" - instance_type = "t2.micro" - associate_public_ip_address = "true" - - user_data = < /var/www/html/index.html -chkconfig httpd on -service httpd start -EOS -} - -resource "aws_autoscaling_group" "bar" { - vpc_zone_identifier = [ - "${aws_subnet.main.id}", - "${aws_subnet.alt.id}", - ] - - target_group_arns = ["${aws_alb_target_group.test.arn}"] - - max_size = 2 - min_size = 2 - health_check_grace_period = 300 - health_check_type = "ELB" - desired_capacity = 2 - wait_for_elb_capacity = 2 - force_delete = true - termination_policies = ["OldestInstance"] - launch_configuration = "${aws_launch_configuration.foobar.name}" -}`, rInt) -} - -func testAccAWSAutoScalingGroupConfigWithSuspendedProcesses(name string) string { - return fmt.Sprintf(` -resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" -} - -resource "aws_placement_group" "test" { - name = "asg_pg_%s" - strategy = "cluster" -} - -resource "aws_autoscaling_group" "bar" { - availability_zones = ["us-west-2a"] - name = "%s" - max_size = 5 - min_size = 2 - health_check_type = "ELB" - desired_capacity = 4 - force_delete = true - termination_policies = ["OldestInstance","ClosestToNextInstanceHour"] - - launch_configuration = "${aws_launch_configuration.foobar.name}" - - suspended_processes = ["AlarmNotification","ScheduledActions"] - - tag { - key = "Foo" - value = "foo-bar" - propagate_at_launch = true - } -} -`, name, name) -} - -func testAccAWSAutoScalingGroupConfigWithSuspendedProcessesUpdated(name string) string { - return fmt.Sprintf(` -resource "aws_launch_configuration" "foobar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" -} - -resource "aws_placement_group" "test" { - name = "asg_pg_%s" - strategy = "cluster" -} - -resource "aws_autoscaling_group" "bar" { - availability_zones = ["us-west-2a"] - name = "%s" - max_size = 5 - min_size = 2 - health_check_type = "ELB" - desired_capacity = 4 - force_delete = true - termination_policies = ["OldestInstance","ClosestToNextInstanceHour"] - - launch_configuration = "${aws_launch_configuration.foobar.name}" - - suspended_processes = ["AZRebalance","ScheduledActions"] - - tag { - key = "Foo" - value = "foo-bar" - propagate_at_launch = true - } -} -`, name, name) -} diff --git a/builtin/providers/aws/resource_aws_autoscaling_group_waiting.go b/builtin/providers/aws/resource_aws_autoscaling_group_waiting.go deleted file mode 100644 index 1c27bb813..000000000 --- a/builtin/providers/aws/resource_aws_autoscaling_group_waiting.go +++ /dev/null @@ -1,166 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -// waitForASGCapacityTimeout gathers the current numbers of healthy instances -// in the ASG and its attached ELBs and yields these numbers to a -// capacitySatifiedFunction. Loops for up to wait_for_capacity_timeout until -// the capacitySatisfiedFunc returns true. -// -// See "Waiting for Capacity" in docs for more discussion of the feature. -func waitForASGCapacity( - d *schema.ResourceData, - meta interface{}, - satisfiedFunc capacitySatisfiedFunc) error { - wait, err := time.ParseDuration(d.Get("wait_for_capacity_timeout").(string)) - if err != nil { - return err - } - - if wait == 0 { - log.Printf("[DEBUG] Capacity timeout set to 0, skipping capacity waiting.") - return nil - } - - log.Printf("[DEBUG] Waiting on %s for capacity...", d.Id()) - - err = resource.Retry(wait, func() *resource.RetryError { - g, err := getAwsAutoscalingGroup(d.Id(), meta.(*AWSClient).autoscalingconn) - if err != nil { - return resource.NonRetryableError(err) - } - if g == nil { - log.Printf("[INFO] Autoscaling Group %q not found", d.Id()) - d.SetId("") - return nil - } - elbis, err := getELBInstanceStates(g, meta) - albis, err := getTargetGroupInstanceStates(g, meta) - if err != nil { - return resource.NonRetryableError(err) - } - - haveASG := 0 - haveELB := 0 - - for _, i := range g.Instances { - if i.HealthStatus == nil || i.InstanceId == nil || i.LifecycleState == nil { - continue - } - - if !strings.EqualFold(*i.HealthStatus, "Healthy") { - continue - } - - if !strings.EqualFold(*i.LifecycleState, "InService") { - continue - } - - haveASG++ - - inAllLbs := true - for _, states := range elbis { - state, ok := states[*i.InstanceId] - if !ok || !strings.EqualFold(state, "InService") { - inAllLbs = false - } - } - for _, states := range albis { - state, ok := states[*i.InstanceId] - if !ok || !strings.EqualFold(state, "healthy") { - inAllLbs = false - } - } - if inAllLbs { - haveELB++ - } - } - - satisfied, reason := satisfiedFunc(d, haveASG, haveELB) - - log.Printf("[DEBUG] %q Capacity: %d ASG, %d ELB/ALB, satisfied: %t, reason: %q", - d.Id(), haveASG, haveELB, satisfied, reason) - - if satisfied { - return nil - } - - return resource.RetryableError( - fmt.Errorf("%q: Waiting up to %s: %s", d.Id(), wait, reason)) - }) - - if err == nil { - return nil - } - - recentStatus := "" - - conn := meta.(*AWSClient).autoscalingconn - resp, aErr := conn.DescribeScalingActivities(&autoscaling.DescribeScalingActivitiesInput{ - AutoScalingGroupName: aws.String(d.Id()), - MaxRecords: aws.Int64(1), - }) - if aErr == nil { - if len(resp.Activities) > 0 { - recentStatus = fmt.Sprintf("%s", resp.Activities[0]) - } else { - recentStatus = "(0 activities found)" - } - } else { - recentStatus = fmt.Sprintf("(Failed to describe scaling activities: %s)", aErr) - } - - msg := fmt.Sprintf("{{err}}. Most recent activity: %s", recentStatus) - return errwrap.Wrapf(msg, err) -} - -type capacitySatisfiedFunc func(*schema.ResourceData, int, int) (bool, string) - -// capacitySatisfiedCreate treats all targets as minimums -func capacitySatisfiedCreate(d *schema.ResourceData, haveASG, haveELB int) (bool, string) { - minASG := d.Get("min_size").(int) - if wantASG := d.Get("desired_capacity").(int); wantASG > 0 { - minASG = wantASG - } - if haveASG < minASG { - return false, fmt.Sprintf( - "Need at least %d healthy instances in ASG, have %d", minASG, haveASG) - } - minELB := d.Get("min_elb_capacity").(int) - if wantELB := d.Get("wait_for_elb_capacity").(int); wantELB > 0 { - minELB = wantELB - } - if haveELB < minELB { - return false, fmt.Sprintf( - "Need at least %d healthy instances in ELB, have %d", minELB, haveELB) - } - return true, "" -} - -// capacitySatisfiedUpdate only cares about specific targets -func capacitySatisfiedUpdate(d *schema.ResourceData, haveASG, haveELB int) (bool, string) { - if wantASG := d.Get("desired_capacity").(int); wantASG > 0 { - if haveASG != wantASG { - return false, fmt.Sprintf( - "Need exactly %d healthy instances in ASG, have %d", wantASG, haveASG) - } - } - if wantELB := d.Get("wait_for_elb_capacity").(int); wantELB > 0 { - if haveELB != wantELB { - return false, fmt.Sprintf( - "Need exactly %d healthy instances in ELB, have %d", wantELB, haveELB) - } - } - return true, "" -} diff --git a/builtin/providers/aws/resource_aws_autoscaling_group_waiting_test.go b/builtin/providers/aws/resource_aws_autoscaling_group_waiting_test.go deleted file mode 100644 index fa27102e8..000000000 --- a/builtin/providers/aws/resource_aws_autoscaling_group_waiting_test.go +++ /dev/null @@ -1,224 +0,0 @@ -package aws - -import "testing" - -func TestCapacitySatisfiedCreate(t *testing.T) { - cases := map[string]struct { - Data map[string]interface{} - HaveASG int - HaveELB int - ExpectSatisfied bool - ExpectReason string - }{ - "min_size, have less": { - Data: map[string]interface{}{ - "min_size": 5, - }, - HaveASG: 2, - ExpectSatisfied: false, - ExpectReason: "Need at least 5 healthy instances in ASG, have 2", - }, - "min_size, got it": { - Data: map[string]interface{}{ - "min_size": 5, - }, - HaveASG: 5, - ExpectSatisfied: true, - }, - "min_size, have more": { - Data: map[string]interface{}{ - "min_size": 5, - }, - HaveASG: 10, - ExpectSatisfied: true, - }, - "desired_capacity, have less": { - Data: map[string]interface{}{ - "desired_capacity": 5, - }, - HaveASG: 2, - ExpectSatisfied: false, - ExpectReason: "Need at least 5 healthy instances in ASG, have 2", - }, - "desired_capacity overrides min_size": { - Data: map[string]interface{}{ - "min_size": 2, - "desired_capacity": 5, - }, - HaveASG: 2, - ExpectSatisfied: false, - ExpectReason: "Need at least 5 healthy instances in ASG, have 2", - }, - "desired_capacity, got it": { - Data: map[string]interface{}{ - "desired_capacity": 5, - }, - HaveASG: 5, - ExpectSatisfied: true, - }, - "desired_capacity, have more": { - Data: map[string]interface{}{ - "desired_capacity": 5, - }, - HaveASG: 10, - ExpectSatisfied: true, - }, - - "min_elb_capacity, have less": { - Data: map[string]interface{}{ - "min_elb_capacity": 5, - }, - HaveELB: 2, - ExpectSatisfied: false, - ExpectReason: "Need at least 5 healthy instances in ELB, have 2", - }, - "min_elb_capacity, got it": { - Data: map[string]interface{}{ - "min_elb_capacity": 5, - }, - HaveELB: 5, - ExpectSatisfied: true, - }, - "min_elb_capacity, have more": { - Data: map[string]interface{}{ - "min_elb_capacity": 5, - }, - HaveELB: 10, - ExpectSatisfied: true, - }, - "wait_for_elb_capacity, have less": { - Data: map[string]interface{}{ - "wait_for_elb_capacity": 5, - }, - HaveELB: 2, - ExpectSatisfied: false, - ExpectReason: "Need at least 5 healthy instances in ELB, have 2", - }, - "wait_for_elb_capacity, got it": { - Data: map[string]interface{}{ - "wait_for_elb_capacity": 5, - }, - HaveELB: 5, - ExpectSatisfied: true, - }, - "wait_for_elb_capacity, have more": { - Data: map[string]interface{}{ - "wait_for_elb_capacity": 5, - }, - HaveELB: 10, - ExpectSatisfied: true, - }, - "wait_for_elb_capacity overrides min_elb_capacity": { - Data: map[string]interface{}{ - "min_elb_capacity": 2, - "wait_for_elb_capacity": 5, - }, - HaveELB: 2, - ExpectSatisfied: false, - ExpectReason: "Need at least 5 healthy instances in ELB, have 2", - }, - } - - r := resourceAwsAutoscalingGroup() - for tn, tc := range cases { - d := r.TestResourceData() - for k, v := range tc.Data { - if err := d.Set(k, v); err != nil { - t.Fatalf("err: %s", err) - } - } - gotSatisfied, gotReason := capacitySatisfiedCreate(d, tc.HaveASG, tc.HaveELB) - - if gotSatisfied != tc.ExpectSatisfied { - t.Fatalf("%s: expected satisfied: %t, got: %t (reason: %s)", - tn, tc.ExpectSatisfied, gotSatisfied, gotReason) - } - - if gotReason != tc.ExpectReason { - t.Fatalf("%s: expected reason: %s, got: %s", - tn, tc.ExpectReason, gotReason) - } - } -} - -func TestCapacitySatisfiedUpdate(t *testing.T) { - cases := map[string]struct { - Data map[string]interface{} - HaveASG int - HaveELB int - ExpectSatisfied bool - ExpectReason string - }{ - "default is satisfied": { - Data: map[string]interface{}{}, - ExpectSatisfied: true, - }, - "desired_capacity, have less": { - Data: map[string]interface{}{ - "desired_capacity": 5, - }, - HaveASG: 2, - ExpectSatisfied: false, - ExpectReason: "Need exactly 5 healthy instances in ASG, have 2", - }, - "desired_capacity, got it": { - Data: map[string]interface{}{ - "desired_capacity": 5, - }, - HaveASG: 5, - ExpectSatisfied: true, - }, - "desired_capacity, have more": { - Data: map[string]interface{}{ - "desired_capacity": 5, - }, - HaveASG: 10, - ExpectSatisfied: false, - ExpectReason: "Need exactly 5 healthy instances in ASG, have 10", - }, - "wait_for_elb_capacity, have less": { - Data: map[string]interface{}{ - "wait_for_elb_capacity": 5, - }, - HaveELB: 2, - ExpectSatisfied: false, - ExpectReason: "Need exactly 5 healthy instances in ELB, have 2", - }, - "wait_for_elb_capacity, got it": { - Data: map[string]interface{}{ - "wait_for_elb_capacity": 5, - }, - HaveELB: 5, - ExpectSatisfied: true, - }, - "wait_for_elb_capacity, have more": { - Data: map[string]interface{}{ - "wait_for_elb_capacity": 5, - }, - HaveELB: 10, - ExpectSatisfied: false, - ExpectReason: "Need exactly 5 healthy instances in ELB, have 10", - }, - } - - r := resourceAwsAutoscalingGroup() - for tn, tc := range cases { - d := r.TestResourceData() - for k, v := range tc.Data { - if err := d.Set(k, v); err != nil { - t.Fatalf("err: %s", err) - } - } - gotSatisfied, gotReason := capacitySatisfiedUpdate(d, tc.HaveASG, tc.HaveELB) - - if gotSatisfied != tc.ExpectSatisfied { - t.Fatalf("%s: expected satisfied: %t, got: %t (reason: %s)", - tn, tc.ExpectSatisfied, gotSatisfied, gotReason) - } - - if gotReason != tc.ExpectReason { - t.Fatalf("%s: expected reason: %s, got: %s", - tn, tc.ExpectReason, gotReason) - } - } -} diff --git a/builtin/providers/aws/resource_aws_autoscaling_lifecycle_hook.go b/builtin/providers/aws/resource_aws_autoscaling_lifecycle_hook.go deleted file mode 100644 index 60622345e..000000000 --- a/builtin/providers/aws/resource_aws_autoscaling_lifecycle_hook.go +++ /dev/null @@ -1,194 +0,0 @@ -package aws - -import ( - "log" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsAutoscalingLifecycleHook() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsAutoscalingLifecycleHookPut, - Read: resourceAwsAutoscalingLifecycleHookRead, - Update: resourceAwsAutoscalingLifecycleHookPut, - Delete: resourceAwsAutoscalingLifecycleHookDelete, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "autoscaling_group_name": { - Type: schema.TypeString, - Required: true, - }, - "default_result": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "heartbeat_timeout": { - Type: schema.TypeInt, - Optional: true, - }, - "lifecycle_transition": { - Type: schema.TypeString, - Required: true, - }, - "notification_metadata": { - Type: schema.TypeString, - Optional: true, - }, - "notification_target_arn": { - Type: schema.TypeString, - Optional: true, - }, - "role_arn": { - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -func resourceAwsAutoscalingLifecycleHookPutOp(conn *autoscaling.AutoScaling, params *autoscaling.PutLifecycleHookInput) error { - log.Printf("[DEBUG] AutoScaling PutLifecyleHook: %s", params) - return resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.PutLifecycleHook(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if strings.Contains(awsErr.Message(), "Unable to publish test message to notification target") { - return resource.RetryableError(errwrap.Wrapf("[DEBUG] Retrying AWS AutoScaling Lifecycle Hook: {{err}}", awsErr)) - } - } - return resource.NonRetryableError(errwrap.Wrapf("Error putting lifecycle hook: {{err}}", err)) - } - return nil - }) -} - -func resourceAwsAutoscalingLifecycleHookPut(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).autoscalingconn - params := getAwsAutoscalingPutLifecycleHookInput(d) - - if err := resourceAwsAutoscalingLifecycleHookPutOp(conn, ¶ms); err != nil { - return err - } - - d.SetId(d.Get("name").(string)) - - return resourceAwsAutoscalingLifecycleHookRead(d, meta) -} - -func resourceAwsAutoscalingLifecycleHookRead(d *schema.ResourceData, meta interface{}) error { - p, err := getAwsAutoscalingLifecycleHook(d, meta) - if err != nil { - return err - } - if p == nil { - d.SetId("") - return nil - } - - log.Printf("[DEBUG] Read Lifecycle Hook: ASG: %s, SH: %s, Obj: %#v", d.Get("autoscaling_group_name"), d.Get("name"), p) - - d.Set("default_result", p.DefaultResult) - d.Set("heartbeat_timeout", p.HeartbeatTimeout) - d.Set("lifecycle_transition", p.LifecycleTransition) - d.Set("notification_metadata", p.NotificationMetadata) - d.Set("notification_target_arn", p.NotificationTargetARN) - d.Set("name", p.LifecycleHookName) - d.Set("role_arn", p.RoleARN) - - return nil -} - -func resourceAwsAutoscalingLifecycleHookDelete(d *schema.ResourceData, meta interface{}) error { - autoscalingconn := meta.(*AWSClient).autoscalingconn - p, err := getAwsAutoscalingLifecycleHook(d, meta) - if err != nil { - return err - } - if p == nil { - return nil - } - - params := autoscaling.DeleteLifecycleHookInput{ - AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)), - LifecycleHookName: aws.String(d.Get("name").(string)), - } - if _, err := autoscalingconn.DeleteLifecycleHook(¶ms); err != nil { - return errwrap.Wrapf("Autoscaling Lifecycle Hook: {{err}}", err) - } - - d.SetId("") - return nil -} - -func getAwsAutoscalingPutLifecycleHookInput(d *schema.ResourceData) autoscaling.PutLifecycleHookInput { - var params = autoscaling.PutLifecycleHookInput{ - AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)), - LifecycleHookName: aws.String(d.Get("name").(string)), - } - - if v, ok := d.GetOk("default_result"); ok { - params.DefaultResult = aws.String(v.(string)) - } - - if v, ok := d.GetOk("heartbeat_timeout"); ok { - params.HeartbeatTimeout = aws.Int64(int64(v.(int))) - } - - if v, ok := d.GetOk("lifecycle_transition"); ok { - params.LifecycleTransition = aws.String(v.(string)) - } - - if v, ok := d.GetOk("notification_metadata"); ok { - params.NotificationMetadata = aws.String(v.(string)) - } - - if v, ok := d.GetOk("notification_target_arn"); ok { - params.NotificationTargetARN = aws.String(v.(string)) - } - - if v, ok := d.GetOk("role_arn"); ok { - params.RoleARN = aws.String(v.(string)) - } - - return params -} - -func getAwsAutoscalingLifecycleHook(d *schema.ResourceData, meta interface{}) (*autoscaling.LifecycleHook, error) { - autoscalingconn := meta.(*AWSClient).autoscalingconn - - params := autoscaling.DescribeLifecycleHooksInput{ - AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)), - LifecycleHookNames: []*string{aws.String(d.Get("name").(string))}, - } - - log.Printf("[DEBUG] AutoScaling Lifecycle Hook Describe Params: %#v", params) - resp, err := autoscalingconn.DescribeLifecycleHooks(¶ms) - if err != nil { - return nil, errwrap.Wrapf("Error retrieving lifecycle hooks: {{err}}", err) - } - - // find lifecycle hooks - name := d.Get("name") - for idx, sp := range resp.LifecycleHooks { - if *sp.LifecycleHookName == name { - return resp.LifecycleHooks[idx], nil - } - } - - // lifecycle hook not found - return nil, nil -} diff --git a/builtin/providers/aws/resource_aws_autoscaling_lifecycle_hook_test.go b/builtin/providers/aws/resource_aws_autoscaling_lifecycle_hook_test.go deleted file mode 100644 index 580c2ed55..000000000 --- a/builtin/providers/aws/resource_aws_autoscaling_lifecycle_hook_test.go +++ /dev/null @@ -1,282 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSAutoscalingLifecycleHook_basic(t *testing.T) { - resourceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAutoscalingLifecycleHookDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSAutoscalingLifecycleHookConfig(resourceName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLifecycleHookExists("aws_autoscaling_lifecycle_hook.foobar"), - resource.TestCheckResourceAttr("aws_autoscaling_lifecycle_hook.foobar", "autoscaling_group_name", resourceName), - resource.TestCheckResourceAttr("aws_autoscaling_lifecycle_hook.foobar", "default_result", "CONTINUE"), - resource.TestCheckResourceAttr("aws_autoscaling_lifecycle_hook.foobar", "heartbeat_timeout", "2000"), - resource.TestCheckResourceAttr("aws_autoscaling_lifecycle_hook.foobar", "lifecycle_transition", "autoscaling:EC2_INSTANCE_LAUNCHING"), - ), - }, - }, - }) -} - -func TestAccAWSAutoscalingLifecycleHook_omitDefaultResult(t *testing.T) { - rName := acctest.RandString(10) - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAutoscalingLifecycleHookDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSAutoscalingLifecycleHookConfig_omitDefaultResult(rName, rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckLifecycleHookExists("aws_autoscaling_lifecycle_hook.foobar"), - resource.TestCheckResourceAttr("aws_autoscaling_lifecycle_hook.foobar", "default_result", "ABANDON"), - ), - }, - }, - }) -} - -func testAccCheckLifecycleHookExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - return checkLifecycleHookExistsByName( - rs.Primary.Attributes["autoscaling_group_name"], rs.Primary.ID) - } -} - -func checkLifecycleHookExistsByName(asgName, hookName string) error { - conn := testAccProvider.Meta().(*AWSClient).autoscalingconn - params := &autoscaling.DescribeLifecycleHooksInput{ - AutoScalingGroupName: aws.String(asgName), - LifecycleHookNames: []*string{aws.String(hookName)}, - } - resp, err := conn.DescribeLifecycleHooks(params) - if err != nil { - return err - } - if len(resp.LifecycleHooks) == 0 { - return fmt.Errorf("LifecycleHook not found") - } - - return nil -} - -func testAccCheckAWSAutoscalingLifecycleHookDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).autoscalingconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_autoscaling_group" { - continue - } - - params := autoscaling.DescribeLifecycleHooksInput{ - AutoScalingGroupName: aws.String(rs.Primary.Attributes["autoscaling_group_name"]), - LifecycleHookNames: []*string{aws.String(rs.Primary.ID)}, - } - - resp, err := conn.DescribeLifecycleHooks(¶ms) - - if err == nil { - if len(resp.LifecycleHooks) != 0 && - *resp.LifecycleHooks[0].LifecycleHookName == rs.Primary.ID { - return fmt.Errorf("Lifecycle Hook Still Exists: %s", rs.Primary.ID) - } - } - } - - return nil -} - -func testAccAWSAutoscalingLifecycleHookConfig(name string) string { - return fmt.Sprintf(` -resource "aws_launch_configuration" "foobar" { - name = "%s" - image_id = "ami-21f78e11" - instance_type = "t1.micro" -} - -resource "aws_sqs_queue" "foobar" { - name = "foobar" - delay_seconds = 90 - max_message_size = 2048 - message_retention_seconds = 86400 - receive_wait_time_seconds = 10 -} - -resource "aws_iam_role" "foobar" { - name = "foobar" - assume_role_policy = < retryTimeout { - retryTimeout = m + 5 - log.Printf("[DEBUG] CloudFormation timeout: %d", retryTimeout) - } - } - if v, ok := d.GetOk("iam_role_arn"); ok { - input.RoleARN = aws.String(v.(string)) - } - - log.Printf("[DEBUG] Creating CloudFormation Stack: %s", input) - resp, err := conn.CreateStack(&input) - if err != nil { - return fmt.Errorf("Creating CloudFormation stack failed: %s", err.Error()) - } - - d.SetId(*resp.StackId) - var lastStatus string - - wait := resource.StateChangeConf{ - Pending: []string{ - "CREATE_IN_PROGRESS", - "DELETE_IN_PROGRESS", - "ROLLBACK_IN_PROGRESS", - }, - Target: []string{ - "CREATE_COMPLETE", - "CREATE_FAILED", - "DELETE_COMPLETE", - "DELETE_FAILED", - "ROLLBACK_COMPLETE", - "ROLLBACK_FAILED", - }, - Timeout: time.Duration(retryTimeout) * time.Minute, - MinTimeout: 1 * time.Second, - Refresh: func() (interface{}, string, error) { - resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{ - StackName: aws.String(d.Id()), - }) - if err != nil { - log.Printf("[ERROR] Failed to describe stacks: %s", err) - return nil, "", err - } - if len(resp.Stacks) == 0 { - // This shouldn't happen unless CloudFormation is inconsistent - // See https://github.com/hashicorp/terraform/issues/5487 - log.Printf("[WARN] CloudFormation stack %q not found.\nresponse: %q", - d.Id(), resp) - return resp, "", fmt.Errorf( - "CloudFormation stack %q vanished unexpectedly during creation.\n"+ - "Unless you knowingly manually deleted the stack "+ - "please report this as bug at https://github.com/hashicorp/terraform/issues\n"+ - "along with the config & Terraform version & the details below:\n"+ - "Full API response: %s\n", - d.Id(), resp) - } - - status := *resp.Stacks[0].StackStatus - lastStatus = status - log.Printf("[DEBUG] Current CloudFormation stack status: %q", status) - - return resp, status, err - }, - } - - _, err = wait.WaitForState() - if err != nil { - return err - } - - if lastStatus == "ROLLBACK_COMPLETE" || lastStatus == "ROLLBACK_FAILED" { - reasons, err := getCloudFormationRollbackReasons(d.Id(), nil, conn) - if err != nil { - return fmt.Errorf("Failed getting rollback reasons: %q", err.Error()) - } - - return fmt.Errorf("%s: %q", lastStatus, reasons) - } - if lastStatus == "DELETE_COMPLETE" || lastStatus == "DELETE_FAILED" { - reasons, err := getCloudFormationDeletionReasons(d.Id(), conn) - if err != nil { - return fmt.Errorf("Failed getting deletion reasons: %q", err.Error()) - } - - d.SetId("") - return fmt.Errorf("%s: %q", lastStatus, reasons) - } - if lastStatus == "CREATE_FAILED" { - reasons, err := getCloudFormationFailures(d.Id(), conn) - if err != nil { - return fmt.Errorf("Failed getting failure reasons: %q", err.Error()) - } - return fmt.Errorf("%s: %q", lastStatus, reasons) - } - - log.Printf("[INFO] CloudFormation Stack %q created", d.Id()) - - return resourceAwsCloudFormationStackRead(d, meta) -} - -func resourceAwsCloudFormationStackRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).cfconn - - input := &cloudformation.DescribeStacksInput{ - StackName: aws.String(d.Id()), - } - resp, err := conn.DescribeStacks(input) - if err != nil { - awsErr, ok := err.(awserr.Error) - // ValidationError: Stack with id % does not exist - if ok && awsErr.Code() == "ValidationError" { - log.Printf("[WARN] Removing CloudFormation stack %s as it's already gone", d.Id()) - d.SetId("") - return nil - } - - return err - } - - stacks := resp.Stacks - if len(stacks) < 1 { - log.Printf("[WARN] Removing CloudFormation stack %s as it's already gone", d.Id()) - d.SetId("") - return nil - } - for _, s := range stacks { - if *s.StackId == d.Id() && *s.StackStatus == "DELETE_COMPLETE" { - log.Printf("[DEBUG] Removing CloudFormation stack %s"+ - " as it has been already deleted", d.Id()) - d.SetId("") - return nil - } - } - - tInput := cloudformation.GetTemplateInput{ - StackName: aws.String(d.Id()), - } - out, err := conn.GetTemplate(&tInput) - if err != nil { - return err - } - - template, err := normalizeCloudFormationTemplate(*out.TemplateBody) - if err != nil { - return errwrap.Wrapf("template body contains an invalid JSON or YAML: {{err}}", err) - } - d.Set("template_body", template) - - stack := stacks[0] - log.Printf("[DEBUG] Received CloudFormation stack: %s", stack) - - d.Set("name", stack.StackName) - d.Set("arn", stack.StackId) - d.Set("iam_role_arn", stack.RoleARN) - - if stack.TimeoutInMinutes != nil { - d.Set("timeout_in_minutes", int(*stack.TimeoutInMinutes)) - } - if stack.Description != nil { - d.Set("description", stack.Description) - } - if stack.DisableRollback != nil { - d.Set("disable_rollback", stack.DisableRollback) - } - if len(stack.NotificationARNs) > 0 { - err = d.Set("notification_arns", schema.NewSet(schema.HashString, flattenStringList(stack.NotificationARNs))) - if err != nil { - return err - } - } - - originalParams := d.Get("parameters").(map[string]interface{}) - err = d.Set("parameters", flattenCloudFormationParameters(stack.Parameters, originalParams)) - if err != nil { - return err - } - - err = d.Set("tags", flattenCloudFormationTags(stack.Tags)) - if err != nil { - return err - } - - err = d.Set("outputs", flattenCloudFormationOutputs(stack.Outputs)) - if err != nil { - return err - } - - if len(stack.Capabilities) > 0 { - err = d.Set("capabilities", schema.NewSet(schema.HashString, flattenStringList(stack.Capabilities))) - if err != nil { - return err - } - } - - return nil -} - -func resourceAwsCloudFormationStackUpdate(d *schema.ResourceData, meta interface{}) error { - retryTimeout := int64(30) - conn := meta.(*AWSClient).cfconn - - input := &cloudformation.UpdateStackInput{ - StackName: aws.String(d.Id()), - } - - // Either TemplateBody, TemplateURL or UsePreviousTemplate are required - if v, ok := d.GetOk("template_url"); ok { - input.TemplateURL = aws.String(v.(string)) - } - if v, ok := d.GetOk("template_body"); ok && input.TemplateURL == nil { - template, err := normalizeCloudFormationTemplate(v) - if err != nil { - return errwrap.Wrapf("template body contains an invalid JSON or YAML: {{err}}", err) - } - input.TemplateBody = aws.String(template) - } - - // Capabilities must be present whether they are changed or not - if v, ok := d.GetOk("capabilities"); ok { - input.Capabilities = expandStringList(v.(*schema.Set).List()) - } - - if d.HasChange("notification_arns") { - input.NotificationARNs = expandStringList(d.Get("notification_arns").(*schema.Set).List()) - } - - // Parameters must be present whether they are changed or not - if v, ok := d.GetOk("parameters"); ok { - input.Parameters = expandCloudFormationParameters(v.(map[string]interface{})) - } - - if d.HasChange("policy_body") { - policy, err := normalizeJsonString(d.Get("policy_body")) - if err != nil { - return errwrap.Wrapf("policy body contains an invalid JSON: {{err}}", err) - } - input.StackPolicyBody = aws.String(policy) - } - if d.HasChange("policy_url") { - input.StackPolicyURL = aws.String(d.Get("policy_url").(string)) - } - - if d.HasChange("iam_role_arn") { - input.RoleARN = aws.String(d.Get("iam_role_arn").(string)) - } - - log.Printf("[DEBUG] Updating CloudFormation stack: %s", input) - _, err := conn.UpdateStack(input) - if err != nil { - awsErr, ok := err.(awserr.Error) - // ValidationError: No updates are to be performed. - if !ok || - awsErr.Code() != "ValidationError" || - awsErr.Message() != "No updates are to be performed." { - return err - } - - log.Printf("[DEBUG] Current CloudFormation stack has no updates") - } - - lastUpdatedTime, err := getLastCfEventTimestamp(d.Id(), conn) - if err != nil { - return err - } - - if v, ok := d.GetOk("timeout_in_minutes"); ok { - m := int64(v.(int)) - if m > retryTimeout { - retryTimeout = m + 5 - log.Printf("[DEBUG] CloudFormation timeout: %d", retryTimeout) - } - } - var lastStatus string - var stackId string - wait := resource.StateChangeConf{ - Pending: []string{ - "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS", - "UPDATE_IN_PROGRESS", - "UPDATE_ROLLBACK_IN_PROGRESS", - "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS", - }, - Target: []string{ - "CREATE_COMPLETE", // If no stack update was performed - "UPDATE_COMPLETE", - "UPDATE_ROLLBACK_COMPLETE", - "UPDATE_ROLLBACK_FAILED", - }, - Timeout: time.Duration(retryTimeout) * time.Minute, - MinTimeout: 5 * time.Second, - Refresh: func() (interface{}, string, error) { - resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{ - StackName: aws.String(d.Id()), - }) - if err != nil { - log.Printf("[ERROR] Failed to describe stacks: %s", err) - return nil, "", err - } - - stackId = aws.StringValue(resp.Stacks[0].StackId) - - status := *resp.Stacks[0].StackStatus - lastStatus = status - log.Printf("[DEBUG] Current CloudFormation stack status: %q", status) - - return resp, status, err - }, - } - - _, err = wait.WaitForState() - if err != nil { - return err - } - - if lastStatus == "UPDATE_ROLLBACK_COMPLETE" || lastStatus == "UPDATE_ROLLBACK_FAILED" { - reasons, err := getCloudFormationRollbackReasons(stackId, lastUpdatedTime, conn) - if err != nil { - return fmt.Errorf("Failed getting details about rollback: %q", err.Error()) - } - - return fmt.Errorf("%s: %q", lastStatus, reasons) - } - - log.Printf("[DEBUG] CloudFormation stack %q has been updated", stackId) - - return resourceAwsCloudFormationStackRead(d, meta) -} - -func resourceAwsCloudFormationStackDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).cfconn - - input := &cloudformation.DeleteStackInput{ - StackName: aws.String(d.Id()), - } - log.Printf("[DEBUG] Deleting CloudFormation stack %s", input) - _, err := conn.DeleteStack(input) - if err != nil { - awsErr, ok := err.(awserr.Error) - if !ok { - return err - } - - if awsErr.Code() == "ValidationError" { - // Ignore stack which has been already deleted - return nil - } - return err - } - var lastStatus string - wait := resource.StateChangeConf{ - Pending: []string{ - "DELETE_IN_PROGRESS", - "ROLLBACK_IN_PROGRESS", - }, - Target: []string{ - "DELETE_COMPLETE", - "DELETE_FAILED", - }, - Timeout: 30 * time.Minute, - MinTimeout: 5 * time.Second, - Refresh: func() (interface{}, string, error) { - resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{ - StackName: aws.String(d.Id()), - }) - if err != nil { - awsErr, ok := err.(awserr.Error) - if !ok { - return nil, "", err - } - - log.Printf("[DEBUG] Error when deleting CloudFormation stack: %s: %s", - awsErr.Code(), awsErr.Message()) - - // ValidationError: Stack with id % does not exist - if awsErr.Code() == "ValidationError" { - return resp, "DELETE_COMPLETE", nil - } - return nil, "", err - } - - if len(resp.Stacks) == 0 { - log.Printf("[DEBUG] CloudFormation stack %q is already gone", d.Id()) - return resp, "DELETE_COMPLETE", nil - } - - status := *resp.Stacks[0].StackStatus - lastStatus = status - log.Printf("[DEBUG] Current CloudFormation stack status: %q", status) - - return resp, status, err - }, - } - - _, err = wait.WaitForState() - if err != nil { - return err - } - - if lastStatus == "DELETE_FAILED" { - reasons, err := getCloudFormationFailures(d.Id(), conn) - if err != nil { - return fmt.Errorf("Failed getting reasons of failure: %q", err.Error()) - } - - return fmt.Errorf("%s: %q", lastStatus, reasons) - } - - log.Printf("[DEBUG] CloudFormation stack %q has been deleted", d.Id()) - - d.SetId("") - - return nil -} - -// getLastCfEventTimestamp takes the first event in a list -// of events ordered from the newest to the oldest -// and extracts timestamp from it -// LastUpdatedTime only provides last >successful< updated time -func getLastCfEventTimestamp(stackName string, conn *cloudformation.CloudFormation) ( - *time.Time, error) { - output, err := conn.DescribeStackEvents(&cloudformation.DescribeStackEventsInput{ - StackName: aws.String(stackName), - }) - if err != nil { - return nil, err - } - - return output.StackEvents[0].Timestamp, nil -} - -func getCloudFormationRollbackReasons(stackId string, afterTime *time.Time, conn *cloudformation.CloudFormation) ([]string, error) { - var failures []string - - err := conn.DescribeStackEventsPages(&cloudformation.DescribeStackEventsInput{ - StackName: aws.String(stackId), - }, func(page *cloudformation.DescribeStackEventsOutput, lastPage bool) bool { - for _, e := range page.StackEvents { - if afterTime != nil && !e.Timestamp.After(*afterTime) { - continue - } - - if cfStackEventIsFailure(e) || cfStackEventIsRollback(e) { - failures = append(failures, *e.ResourceStatusReason) - } - } - return !lastPage - }) - - return failures, err -} - -func getCloudFormationDeletionReasons(stackId string, conn *cloudformation.CloudFormation) ([]string, error) { - var failures []string - - err := conn.DescribeStackEventsPages(&cloudformation.DescribeStackEventsInput{ - StackName: aws.String(stackId), - }, func(page *cloudformation.DescribeStackEventsOutput, lastPage bool) bool { - for _, e := range page.StackEvents { - if cfStackEventIsFailure(e) || cfStackEventIsStackDeletion(e) { - failures = append(failures, *e.ResourceStatusReason) - } - } - return !lastPage - }) - - return failures, err -} - -func getCloudFormationFailures(stackId string, conn *cloudformation.CloudFormation) ([]string, error) { - var failures []string - - err := conn.DescribeStackEventsPages(&cloudformation.DescribeStackEventsInput{ - StackName: aws.String(stackId), - }, func(page *cloudformation.DescribeStackEventsOutput, lastPage bool) bool { - for _, e := range page.StackEvents { - if cfStackEventIsFailure(e) { - failures = append(failures, *e.ResourceStatusReason) - } - } - return !lastPage - }) - - return failures, err -} - -func cfStackEventIsFailure(event *cloudformation.StackEvent) bool { - failRe := regexp.MustCompile("_FAILED$") - return failRe.MatchString(*event.ResourceStatus) && event.ResourceStatusReason != nil -} - -func cfStackEventIsRollback(event *cloudformation.StackEvent) bool { - rollbackRe := regexp.MustCompile("^ROLLBACK_") - return rollbackRe.MatchString(*event.ResourceStatus) && event.ResourceStatusReason != nil -} - -func cfStackEventIsStackDeletion(event *cloudformation.StackEvent) bool { - return *event.ResourceStatus == "DELETE_IN_PROGRESS" && - *event.ResourceType == "AWS::CloudFormation::Stack" && - event.ResourceStatusReason != nil -} diff --git a/builtin/providers/aws/resource_aws_cloudformation_stack_test.go b/builtin/providers/aws/resource_aws_cloudformation_stack_test.go deleted file mode 100644 index 67b0344be..000000000 --- a/builtin/providers/aws/resource_aws_cloudformation_stack_test.go +++ /dev/null @@ -1,635 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudformation" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSCloudFormation_basic(t *testing.T) { - var stack cloudformation.Stack - stackName := fmt.Sprintf("tf-acc-test-basic-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudFormationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudFormationConfig(stackName), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.network", &stack), - ), - }, - }, - }) -} - -func TestAccAWSCloudFormation_yaml(t *testing.T) { - var stack cloudformation.Stack - stackName := fmt.Sprintf("tf-acc-test-yaml-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudFormationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudFormationConfig_yaml(stackName), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.yaml", &stack), - ), - }, - }, - }) -} - -func TestAccAWSCloudFormation_defaultParams(t *testing.T) { - var stack cloudformation.Stack - stackName := fmt.Sprintf("tf-acc-test-default-params-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudFormationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudFormationConfig_defaultParams(stackName), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.asg-demo", &stack), - ), - }, - }, - }) -} - -func TestAccAWSCloudFormation_allAttributes(t *testing.T) { - var stack cloudformation.Stack - stackName := fmt.Sprintf("tf-acc-test-all-attributes-%s", acctest.RandString(10)) - - expectedPolicyBody := "{\"Statement\":[{\"Action\":\"Update:*\",\"Effect\":\"Deny\",\"Principal\":\"*\",\"Resource\":\"LogicalResourceId/StaticVPC\"},{\"Action\":\"Update:*\",\"Effect\":\"Allow\",\"Principal\":\"*\",\"Resource\":\"*\"}]}" - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudFormationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudFormationConfig_allAttributesWithBodies(stackName), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.full", &stack), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "name", stackName), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "capabilities.#", "1"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "capabilities.1328347040", "CAPABILITY_IAM"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "disable_rollback", "false"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "notification_arns.#", "1"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "parameters.%", "1"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "parameters.VpcCIDR", "10.0.0.0/16"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "policy_body", expectedPolicyBody), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "tags.%", "2"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "tags.First", "Mickey"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "tags.Second", "Mouse"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "timeout_in_minutes", "10"), - ), - }, - { - Config: testAccAWSCloudFormationConfig_allAttributesWithBodies_modified(stackName), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.full", &stack), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "name", stackName), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "capabilities.#", "1"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "capabilities.1328347040", "CAPABILITY_IAM"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "disable_rollback", "false"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "notification_arns.#", "1"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "parameters.%", "1"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "parameters.VpcCIDR", "10.0.0.0/16"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "policy_body", expectedPolicyBody), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "tags.%", "2"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "tags.First", "Mickey"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "tags.Second", "Mouse"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "timeout_in_minutes", "10"), - ), - }, - }, - }) -} - -// Regression for https://github.com/hashicorp/terraform/issues/4332 -func TestAccAWSCloudFormation_withParams(t *testing.T) { - var stack cloudformation.Stack - stackName := fmt.Sprintf("tf-acc-test-with-params-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudFormationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudFormationConfig_withParams(stackName), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with_params", &stack), - ), - }, - { - Config: testAccAWSCloudFormationConfig_withParams_modified(stackName), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with_params", &stack), - ), - }, - }, - }) -} - -// Regression for https://github.com/hashicorp/terraform/issues/4534 -func TestAccAWSCloudFormation_withUrl_withParams(t *testing.T) { - var stack cloudformation.Stack - rName := fmt.Sprintf("tf-acc-test-with-url-and-params-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudFormationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudFormationConfig_templateUrl_withParams(rName, "tf-cf-stack.json", "11.0.0.0/16"), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with-url-and-params", &stack), - ), - }, - { - Config: testAccAWSCloudFormationConfig_templateUrl_withParams(rName, "tf-cf-stack.json", "13.0.0.0/16"), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with-url-and-params", &stack), - ), - }, - }, - }) -} - -func TestAccAWSCloudFormation_withUrl_withParams_withYaml(t *testing.T) { - var stack cloudformation.Stack - rName := fmt.Sprintf("tf-acc-test-with-params-and-yaml-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudFormationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudFormationConfig_templateUrl_withParams_withYaml(rName, "tf-cf-stack.yaml", "13.0.0.0/16"), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with-url-and-params-and-yaml", &stack), - ), - }, - }, - }) -} - -// Test for https://github.com/hashicorp/terraform/issues/5653 -func TestAccAWSCloudFormation_withUrl_withParams_noUpdate(t *testing.T) { - var stack cloudformation.Stack - rName := fmt.Sprintf("tf-acc-test-with-params-no-update-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudFormationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudFormationConfig_templateUrl_withParams(rName, "tf-cf-stack-1.json", "11.0.0.0/16"), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with-url-and-params", &stack), - ), - }, - { - Config: testAccAWSCloudFormationConfig_templateUrl_withParams(rName, "tf-cf-stack-2.json", "11.0.0.0/16"), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with-url-and-params", &stack), - ), - }, - }, - }) -} - -func testAccCheckCloudFormationStackExists(n string, stack *cloudformation.Stack) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - conn := testAccProvider.Meta().(*AWSClient).cfconn - params := &cloudformation.DescribeStacksInput{ - StackName: aws.String(rs.Primary.ID), - } - resp, err := conn.DescribeStacks(params) - if err != nil { - return err - } - if len(resp.Stacks) == 0 { - return fmt.Errorf("CloudFormation stack not found") - } - - return nil - } -} - -func testAccCheckAWSCloudFormationDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).cfconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_cloudformation_stack" { - continue - } - - params := cloudformation.DescribeStacksInput{ - StackName: aws.String(rs.Primary.ID), - } - - resp, err := conn.DescribeStacks(¶ms) - - if err != nil { - return err - } - - for _, s := range resp.Stacks { - if *s.StackId == rs.Primary.ID && *s.StackStatus != "DELETE_COMPLETE" { - return fmt.Errorf("CloudFormation stack still exists: %q", rs.Primary.ID) - } - } - } - - return nil -} - -func testAccAWSCloudFormationConfig(stackName string) string { - return fmt.Sprintf(` -resource "aws_cloudformation_stack" "network" { - name = "%s" - template_body = < 0 { - tags = tagsOut.ResourceTagList[0].TagsList - } - - if err := d.Set("tags", tagsToMapCloudtrail(tags)); err != nil { - return err - } - - logstatus, err := cloudTrailGetLoggingStatus(conn, trail.Name) - if err != nil { - return err - } - d.Set("enable_logging", logstatus) - - return nil -} - -func resourceAwsCloudTrailUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).cloudtrailconn - - input := cloudtrail.UpdateTrailInput{ - Name: aws.String(d.Id()), - } - - if d.HasChange("s3_bucket_name") { - input.S3BucketName = aws.String(d.Get("s3_bucket_name").(string)) - } - if d.HasChange("s3_key_prefix") { - input.S3KeyPrefix = aws.String(d.Get("s3_key_prefix").(string)) - } - if d.HasChange("cloud_watch_logs_role_arn") { - input.CloudWatchLogsRoleArn = aws.String(d.Get("cloud_watch_logs_role_arn").(string)) - } - if d.HasChange("cloud_watch_logs_group_arn") { - input.CloudWatchLogsLogGroupArn = aws.String(d.Get("cloud_watch_logs_group_arn").(string)) - } - if d.HasChange("include_global_service_events") { - input.IncludeGlobalServiceEvents = aws.Bool(d.Get("include_global_service_events").(bool)) - } - if d.HasChange("is_multi_region_trail") { - input.IsMultiRegionTrail = aws.Bool(d.Get("is_multi_region_trail").(bool)) - } - if d.HasChange("enable_log_file_validation") { - input.EnableLogFileValidation = aws.Bool(d.Get("enable_log_file_validation").(bool)) - } - if d.HasChange("kms_key_id") { - input.KmsKeyId = aws.String(d.Get("kms_key_id").(string)) - } - if d.HasChange("sns_topic_name") { - input.SnsTopicName = aws.String(d.Get("sns_topic_name").(string)) - } - - log.Printf("[DEBUG] Updating CloudTrail: %s", input) - t, err := conn.UpdateTrail(&input) - if err != nil { - return err - } - - if d.HasChange("tags") { - err := setTagsCloudtrail(conn, d) - if err != nil { - return err - } - } - - if d.HasChange("enable_logging") { - log.Printf("[DEBUG] Updating logging on CloudTrail: %s", input) - err := cloudTrailSetLogging(conn, d.Get("enable_logging").(bool), *input.Name) - if err != nil { - return err - } - } - - log.Printf("[DEBUG] CloudTrail updated: %s", t) - - return resourceAwsCloudTrailRead(d, meta) -} - -func resourceAwsCloudTrailDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).cloudtrailconn - - log.Printf("[DEBUG] Deleting CloudTrail: %q", d.Id()) - _, err := conn.DeleteTrail(&cloudtrail.DeleteTrailInput{ - Name: aws.String(d.Id()), - }) - - return err -} - -func cloudTrailGetLoggingStatus(conn *cloudtrail.CloudTrail, id *string) (bool, error) { - GetTrailStatusOpts := &cloudtrail.GetTrailStatusInput{ - Name: id, - } - resp, err := conn.GetTrailStatus(GetTrailStatusOpts) - if err != nil { - return false, fmt.Errorf("Error retrieving logging status of CloudTrail (%s): %s", *id, err) - } - - return *resp.IsLogging, err -} - -func cloudTrailSetLogging(conn *cloudtrail.CloudTrail, enabled bool, id string) error { - if enabled { - log.Printf( - "[DEBUG] Starting logging on CloudTrail (%s)", - id) - StartLoggingOpts := &cloudtrail.StartLoggingInput{ - Name: aws.String(id), - } - if _, err := conn.StartLogging(StartLoggingOpts); err != nil { - return fmt.Errorf( - "Error starting logging on CloudTrail (%s): %s", - id, err) - } - } else { - log.Printf( - "[DEBUG] Stopping logging on CloudTrail (%s)", - id) - StopLoggingOpts := &cloudtrail.StopLoggingInput{ - Name: aws.String(id), - } - if _, err := conn.StopLogging(StopLoggingOpts); err != nil { - return fmt.Errorf( - "Error stopping logging on CloudTrail (%s): %s", - id, err) - } - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_cloudtrail_test.go b/builtin/providers/aws/resource_aws_cloudtrail_test.go deleted file mode 100644 index 08655ea67..000000000 --- a/builtin/providers/aws/resource_aws_cloudtrail_test.go +++ /dev/null @@ -1,761 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "testing" - - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudtrail" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSCloudTrail(t *testing.T) { - testCases := map[string]map[string]func(t *testing.T){ - "Trail": { - "basic": testAccAWSCloudTrail_basic, - "enableLogging": testAccAWSCloudTrail_enable_logging, - "isMultiRegion": testAccAWSCloudTrail_is_multi_region, - "logValidation": testAccAWSCloudTrail_logValidation, - "kmsKey": testAccAWSCloudTrail_kmsKey, - "tags": testAccAWSCloudTrail_tags, - }, - } - - for group, m := range testCases { - m := m - t.Run(group, func(t *testing.T) { - for name, tc := range m { - tc := tc - t.Run(name, func(t *testing.T) { - tc(t) - }) - } - }) - } -} - -func testAccAWSCloudTrail_basic(t *testing.T) { - var trail cloudtrail.Trail - cloudTrailRandInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudTrailDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudTrailConfig(cloudTrailRandInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudTrailExists("aws_cloudtrail.foobar", &trail), - resource.TestCheckResourceAttr("aws_cloudtrail.foobar", "include_global_service_events", "true"), - testAccCheckCloudTrailLogValidationEnabled("aws_cloudtrail.foobar", false, &trail), - testAccCheckCloudTrailKmsKeyIdEquals("aws_cloudtrail.foobar", "", &trail), - ), - }, - { - Config: testAccAWSCloudTrailConfigModified(cloudTrailRandInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudTrailExists("aws_cloudtrail.foobar", &trail), - resource.TestCheckResourceAttr("aws_cloudtrail.foobar", "s3_key_prefix", "/prefix"), - resource.TestCheckResourceAttr("aws_cloudtrail.foobar", "include_global_service_events", "false"), - testAccCheckCloudTrailLogValidationEnabled("aws_cloudtrail.foobar", false, &trail), - testAccCheckCloudTrailKmsKeyIdEquals("aws_cloudtrail.foobar", "", &trail), - ), - }, - }, - }) -} - -func testAccAWSCloudTrail_enable_logging(t *testing.T) { - var trail cloudtrail.Trail - cloudTrailRandInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudTrailDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudTrailConfig(cloudTrailRandInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudTrailExists("aws_cloudtrail.foobar", &trail), - // AWS will create the trail with logging turned off. - // Test that "enable_logging" default works. - testAccCheckCloudTrailLoggingEnabled("aws_cloudtrail.foobar", true, &trail), - testAccCheckCloudTrailLogValidationEnabled("aws_cloudtrail.foobar", false, &trail), - testAccCheckCloudTrailKmsKeyIdEquals("aws_cloudtrail.foobar", "", &trail), - ), - }, - { - Config: testAccAWSCloudTrailConfigModified(cloudTrailRandInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudTrailExists("aws_cloudtrail.foobar", &trail), - testAccCheckCloudTrailLoggingEnabled("aws_cloudtrail.foobar", false, &trail), - testAccCheckCloudTrailLogValidationEnabled("aws_cloudtrail.foobar", false, &trail), - testAccCheckCloudTrailKmsKeyIdEquals("aws_cloudtrail.foobar", "", &trail), - ), - }, - { - Config: testAccAWSCloudTrailConfig(cloudTrailRandInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudTrailExists("aws_cloudtrail.foobar", &trail), - testAccCheckCloudTrailLoggingEnabled("aws_cloudtrail.foobar", true, &trail), - testAccCheckCloudTrailLogValidationEnabled("aws_cloudtrail.foobar", false, &trail), - testAccCheckCloudTrailKmsKeyIdEquals("aws_cloudtrail.foobar", "", &trail), - ), - }, - }, - }) -} - -func testAccAWSCloudTrail_is_multi_region(t *testing.T) { - var trail cloudtrail.Trail - cloudTrailRandInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudTrailDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudTrailConfig(cloudTrailRandInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudTrailExists("aws_cloudtrail.foobar", &trail), - resource.TestCheckResourceAttr("aws_cloudtrail.foobar", "is_multi_region_trail", "false"), - testAccCheckCloudTrailLogValidationEnabled("aws_cloudtrail.foobar", false, &trail), - testAccCheckCloudTrailKmsKeyIdEquals("aws_cloudtrail.foobar", "", &trail), - ), - }, - { - Config: testAccAWSCloudTrailConfigMultiRegion(cloudTrailRandInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudTrailExists("aws_cloudtrail.foobar", &trail), - resource.TestCheckResourceAttr("aws_cloudtrail.foobar", "is_multi_region_trail", "true"), - testAccCheckCloudTrailLogValidationEnabled("aws_cloudtrail.foobar", false, &trail), - testAccCheckCloudTrailKmsKeyIdEquals("aws_cloudtrail.foobar", "", &trail), - ), - }, - { - Config: testAccAWSCloudTrailConfig(cloudTrailRandInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudTrailExists("aws_cloudtrail.foobar", &trail), - resource.TestCheckResourceAttr("aws_cloudtrail.foobar", "is_multi_region_trail", "false"), - testAccCheckCloudTrailLogValidationEnabled("aws_cloudtrail.foobar", false, &trail), - testAccCheckCloudTrailKmsKeyIdEquals("aws_cloudtrail.foobar", "", &trail), - ), - }, - }, - }) -} - -func testAccAWSCloudTrail_logValidation(t *testing.T) { - var trail cloudtrail.Trail - cloudTrailRandInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudTrailDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudTrailConfig_logValidation(cloudTrailRandInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudTrailExists("aws_cloudtrail.foobar", &trail), - resource.TestCheckResourceAttr("aws_cloudtrail.foobar", "s3_key_prefix", ""), - resource.TestCheckResourceAttr("aws_cloudtrail.foobar", "include_global_service_events", "true"), - testAccCheckCloudTrailLogValidationEnabled("aws_cloudtrail.foobar", true, &trail), - testAccCheckCloudTrailKmsKeyIdEquals("aws_cloudtrail.foobar", "", &trail), - ), - }, - { - Config: testAccAWSCloudTrailConfig_logValidationModified(cloudTrailRandInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudTrailExists("aws_cloudtrail.foobar", &trail), - resource.TestCheckResourceAttr("aws_cloudtrail.foobar", "s3_key_prefix", ""), - resource.TestCheckResourceAttr("aws_cloudtrail.foobar", "include_global_service_events", "true"), - testAccCheckCloudTrailLogValidationEnabled("aws_cloudtrail.foobar", false, &trail), - testAccCheckCloudTrailKmsKeyIdEquals("aws_cloudtrail.foobar", "", &trail), - ), - }, - }, - }) -} - -func testAccAWSCloudTrail_kmsKey(t *testing.T) { - var trail cloudtrail.Trail - cloudTrailRandInt := acctest.RandInt() - keyRegex := regexp.MustCompile("^arn:aws:([a-zA-Z0-9\\-])+:([a-z]{2}-[a-z]+-\\d{1})?:(\\d{12})?:(.*)$") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudTrailDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudTrailConfig_kmsKey(cloudTrailRandInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudTrailExists("aws_cloudtrail.foobar", &trail), - resource.TestCheckResourceAttr("aws_cloudtrail.foobar", "s3_key_prefix", ""), - resource.TestCheckResourceAttr("aws_cloudtrail.foobar", "include_global_service_events", "true"), - testAccCheckCloudTrailLogValidationEnabled("aws_cloudtrail.foobar", false, &trail), - resource.TestMatchResourceAttr("aws_cloudtrail.foobar", "kms_key_id", keyRegex), - ), - }, - }, - }) -} - -func testAccAWSCloudTrail_tags(t *testing.T) { - var trail cloudtrail.Trail - var trailTags []*cloudtrail.Tag - var trailTagsModified []*cloudtrail.Tag - cloudTrailRandInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudTrailDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudTrailConfig_tags(cloudTrailRandInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudTrailExists("aws_cloudtrail.foobar", &trail), - resource.TestCheckResourceAttr("aws_cloudtrail.foobar", "tags.%", "2"), - testAccCheckCloudTrailLoadTags(&trail, &trailTags), - testAccCheckCloudTrailCheckTags(&trailTags, map[string]string{"Foo": "moo", "Pooh": "hi"}), - testAccCheckCloudTrailLogValidationEnabled("aws_cloudtrail.foobar", false, &trail), - testAccCheckCloudTrailKmsKeyIdEquals("aws_cloudtrail.foobar", "", &trail), - ), - }, - { - Config: testAccAWSCloudTrailConfig_tagsModified(cloudTrailRandInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudTrailExists("aws_cloudtrail.foobar", &trail), - resource.TestCheckResourceAttr("aws_cloudtrail.foobar", "tags.%", "3"), - testAccCheckCloudTrailLoadTags(&trail, &trailTagsModified), - testAccCheckCloudTrailCheckTags(&trailTagsModified, map[string]string{"Foo": "moo", "Moo": "boom", "Pooh": "hi"}), - testAccCheckCloudTrailLogValidationEnabled("aws_cloudtrail.foobar", false, &trail), - testAccCheckCloudTrailKmsKeyIdEquals("aws_cloudtrail.foobar", "", &trail), - ), - }, - { - Config: testAccAWSCloudTrailConfig_tagsModifiedAgain(cloudTrailRandInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudTrailExists("aws_cloudtrail.foobar", &trail), - resource.TestCheckResourceAttr("aws_cloudtrail.foobar", "tags.%", "0"), - testAccCheckCloudTrailLoadTags(&trail, &trailTagsModified), - testAccCheckCloudTrailCheckTags(&trailTagsModified, map[string]string{}), - testAccCheckCloudTrailLogValidationEnabled("aws_cloudtrail.foobar", false, &trail), - testAccCheckCloudTrailKmsKeyIdEquals("aws_cloudtrail.foobar", "", &trail), - ), - }, - }, - }) -} - -func testAccCheckCloudTrailExists(n string, trail *cloudtrail.Trail) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - conn := testAccProvider.Meta().(*AWSClient).cloudtrailconn - params := cloudtrail.DescribeTrailsInput{ - TrailNameList: []*string{aws.String(rs.Primary.ID)}, - } - resp, err := conn.DescribeTrails(¶ms) - if err != nil { - return err - } - if len(resp.TrailList) == 0 { - return fmt.Errorf("Trail not found") - } - *trail = *resp.TrailList[0] - - return nil - } -} - -func testAccCheckCloudTrailLoggingEnabled(n string, desired bool, trail *cloudtrail.Trail) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - conn := testAccProvider.Meta().(*AWSClient).cloudtrailconn - params := cloudtrail.GetTrailStatusInput{ - Name: aws.String(rs.Primary.ID), - } - resp, err := conn.GetTrailStatus(¶ms) - - if err != nil { - return err - } - if *resp.IsLogging != desired { - return fmt.Errorf("Expected logging status %t, given %t", desired, *resp.IsLogging) - } - - return nil - } -} - -func testAccCheckCloudTrailLogValidationEnabled(n string, desired bool, trail *cloudtrail.Trail) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if trail.LogFileValidationEnabled == nil { - return fmt.Errorf("No LogFileValidationEnabled attribute present in trail: %s", trail) - } - - if *trail.LogFileValidationEnabled != desired { - return fmt.Errorf("Expected log validation status %t, given %t", desired, - *trail.LogFileValidationEnabled) - } - - // local state comparison - enabled, ok := rs.Primary.Attributes["enable_log_file_validation"] - if !ok { - return fmt.Errorf("No enable_log_file_validation attribute defined for %s, expected %t", - n, desired) - } - desiredInString := fmt.Sprintf("%t", desired) - if enabled != desiredInString { - return fmt.Errorf("Expected log validation status %s, saved %s", desiredInString, enabled) - } - - return nil - } -} - -func testAccCheckCloudTrailKmsKeyIdEquals(n string, desired string, trail *cloudtrail.Trail) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if desired != "" && trail.KmsKeyId == nil { - return fmt.Errorf("No KmsKeyId attribute present in trail: %s, expected %s", - trail, desired) - } - - // work around string pointer - var kmsKeyIdInString string - if trail.KmsKeyId == nil { - kmsKeyIdInString = "" - } else { - kmsKeyIdInString = *trail.KmsKeyId - } - - if kmsKeyIdInString != desired { - return fmt.Errorf("Expected KMS Key ID %q to equal %q", - *trail.KmsKeyId, desired) - } - - kmsKeyId, ok := rs.Primary.Attributes["kms_key_id"] - if desired != "" && !ok { - return fmt.Errorf("No kms_key_id attribute defined for %s", n) - } - if kmsKeyId != desired { - return fmt.Errorf("Expected KMS Key ID %q, saved %q", desired, kmsKeyId) - } - - return nil - } -} - -func testAccCheckAWSCloudTrailDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).cloudtrailconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_cloudtrail" { - continue - } - - params := cloudtrail.DescribeTrailsInput{ - TrailNameList: []*string{aws.String(rs.Primary.ID)}, - } - - resp, err := conn.DescribeTrails(¶ms) - - if err == nil { - if len(resp.TrailList) != 0 && - *resp.TrailList[0].Name == rs.Primary.ID { - return fmt.Errorf("CloudTrail still exists: %s", rs.Primary.ID) - } - } - } - - return nil -} - -func testAccCheckCloudTrailLoadTags(trail *cloudtrail.Trail, tags *[]*cloudtrail.Tag) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).cloudtrailconn - input := cloudtrail.ListTagsInput{ - ResourceIdList: []*string{trail.TrailARN}, - } - out, err := conn.ListTags(&input) - if err != nil { - return err - } - log.Printf("[DEBUG] Received CloudTrail tags during test: %s", out) - if len(out.ResourceTagList) > 0 { - *tags = out.ResourceTagList[0].TagsList - } - log.Printf("[DEBUG] Loading CloudTrail tags into a var: %s", *tags) - return nil - } -} - -func testAccAWSCloudTrailConfig(cloudTrailRandInt int) string { - return fmt.Sprintf(` -resource "aws_cloudtrail" "foobar" { - name = "tf-trail-foobar-%d" - s3_bucket_name = "${aws_s3_bucket.foo.id}" -} - -resource "aws_s3_bucket" "foo" { - bucket = "tf-test-trail-%d" - force_destroy = true - policy = < length { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than %d characters: %q", k, length, json)) - } - return - } -} diff --git a/builtin/providers/aws/resource_aws_cloudwatch_event_rule_test.go b/builtin/providers/aws/resource_aws_cloudwatch_event_rule_test.go deleted file mode 100644 index e69489777..000000000 --- a/builtin/providers/aws/resource_aws_cloudwatch_event_rule_test.go +++ /dev/null @@ -1,267 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - events "github.com/aws/aws-sdk-go/service/cloudwatchevents" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSCloudWatchEventRule_basic(t *testing.T) { - var rule events.DescribeRuleOutput - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudWatchEventRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSCloudWatchEventRuleConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchEventRuleExists("aws_cloudwatch_event_rule.foo", &rule), - resource.TestCheckResourceAttr("aws_cloudwatch_event_rule.foo", "name", "tf-acc-cw-event-rule"), - ), - }, - resource.TestStep{ - Config: testAccAWSCloudWatchEventRuleConfigModified, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchEventRuleExists("aws_cloudwatch_event_rule.foo", &rule), - resource.TestCheckResourceAttr("aws_cloudwatch_event_rule.foo", "name", "tf-acc-cw-event-rule-mod"), - ), - }, - }, - }) -} - -func TestAccAWSCloudWatchEventRule_full(t *testing.T) { - var rule events.DescribeRuleOutput - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudWatchEventRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSCloudWatchEventRuleConfig_full, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchEventRuleExists("aws_cloudwatch_event_rule.moobar", &rule), - resource.TestCheckResourceAttr("aws_cloudwatch_event_rule.moobar", "name", "tf-acc-cw-event-rule-full"), - resource.TestCheckResourceAttr("aws_cloudwatch_event_rule.moobar", "schedule_expression", "rate(5 minutes)"), - resource.TestCheckResourceAttr("aws_cloudwatch_event_rule.moobar", "event_pattern", "{\"source\":[\"aws.ec2\"]}"), - resource.TestCheckResourceAttr("aws_cloudwatch_event_rule.moobar", "description", "He's not dead, he's just resting!"), - resource.TestCheckResourceAttr("aws_cloudwatch_event_rule.moobar", "role_arn", ""), - testAccCheckCloudWatchEventRuleEnabled("aws_cloudwatch_event_rule.moobar", "DISABLED", &rule), - ), - }, - }, - }) -} - -func TestAccAWSCloudWatchEventRule_enable(t *testing.T) { - var rule events.DescribeRuleOutput - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudWatchEventRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSCloudWatchEventRuleConfigEnabled, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchEventRuleExists("aws_cloudwatch_event_rule.moo", &rule), - testAccCheckCloudWatchEventRuleEnabled("aws_cloudwatch_event_rule.moo", "ENABLED", &rule), - ), - }, - resource.TestStep{ - Config: testAccAWSCloudWatchEventRuleConfigDisabled, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchEventRuleExists("aws_cloudwatch_event_rule.moo", &rule), - testAccCheckCloudWatchEventRuleEnabled("aws_cloudwatch_event_rule.moo", "DISABLED", &rule), - ), - }, - resource.TestStep{ - Config: testAccAWSCloudWatchEventRuleConfigEnabled, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchEventRuleExists("aws_cloudwatch_event_rule.moo", &rule), - testAccCheckCloudWatchEventRuleEnabled("aws_cloudwatch_event_rule.moo", "ENABLED", &rule), - ), - }, - }, - }) -} - -func testAccCheckCloudWatchEventRuleExists(n string, rule *events.DescribeRuleOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - conn := testAccProvider.Meta().(*AWSClient).cloudwatcheventsconn - params := events.DescribeRuleInput{ - Name: aws.String(rs.Primary.ID), - } - resp, err := conn.DescribeRule(¶ms) - if err != nil { - return err - } - if resp == nil { - return fmt.Errorf("Rule not found") - } - - *rule = *resp - - return nil - } -} - -func testAccCheckCloudWatchEventRuleEnabled(n string, desired string, rule *events.DescribeRuleOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - conn := testAccProvider.Meta().(*AWSClient).cloudwatcheventsconn - params := events.DescribeRuleInput{ - Name: aws.String(rs.Primary.ID), - } - resp, err := conn.DescribeRule(¶ms) - - if err != nil { - return err - } - if *resp.State != desired { - return fmt.Errorf("Expected state %q, given %q", desired, *resp.State) - } - - return nil - } -} - -func testAccCheckAWSCloudWatchEventRuleDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).cloudwatcheventsconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_cloudwatch_event_rule" { - continue - } - - params := events.DescribeRuleInput{ - Name: aws.String(rs.Primary.ID), - } - - resp, err := conn.DescribeRule(¶ms) - - if err == nil { - return fmt.Errorf("CloudWatch Event Rule %q still exists: %s", - rs.Primary.ID, resp) - } - } - - return nil -} - -func TestResourceAWSCloudWatchEventRule_validateEventPatternValue(t *testing.T) { - type testCases struct { - Length int - Value string - ErrCount int - } - - invalidCases := []testCases{ - { - Length: 8, - Value: acctest.RandString(16), - ErrCount: 1, - }, - { - Length: 123, - Value: `{"abc":}`, - ErrCount: 1, - }, - { - Length: 1, - Value: `{"abc":["1","2"]}`, - ErrCount: 1, - }, - } - - for _, tc := range invalidCases { - _, errors := validateEventPatternValue(tc.Length)(tc.Value, "event_pattern") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q to trigger a validation error.", tc.Value) - } - } - - validCases := []testCases{ - { - Length: 0, - Value: ``, - ErrCount: 0, - }, - { - Length: 2, - Value: `{}`, - ErrCount: 0, - }, - { - Length: 18, - Value: `{"abc":["1","2"]}`, - ErrCount: 0, - }, - } - - for _, tc := range validCases { - _, errors := validateEventPatternValue(tc.Length)(tc.Value, "event_pattern") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q not to trigger a validation error.", tc.Value) - } - } -} - -var testAccAWSCloudWatchEventRuleConfig = ` -resource "aws_cloudwatch_event_rule" "foo" { - name = "tf-acc-cw-event-rule" - schedule_expression = "rate(1 hour)" -} -` - -var testAccAWSCloudWatchEventRuleConfigEnabled = ` -resource "aws_cloudwatch_event_rule" "moo" { - name = "tf-acc-cw-event-rule-state" - schedule_expression = "rate(1 hour)" -} -` -var testAccAWSCloudWatchEventRuleConfigDisabled = ` -resource "aws_cloudwatch_event_rule" "moo" { - name = "tf-acc-cw-event-rule-state" - schedule_expression = "rate(1 hour)" - is_enabled = false -} -` - -var testAccAWSCloudWatchEventRuleConfigModified = ` -resource "aws_cloudwatch_event_rule" "foo" { - name = "tf-acc-cw-event-rule-mod" - schedule_expression = "rate(1 hour)" -} -` - -var testAccAWSCloudWatchEventRuleConfig_full = ` -resource "aws_cloudwatch_event_rule" "moobar" { - name = "tf-acc-cw-event-rule-full" - schedule_expression = "rate(5 minutes)" - event_pattern = < 0 { - return fmt.Errorf("Creating CloudWatch Event Target failed: %s", - out.FailedEntries) - } - - id := rule + "-" + targetId - d.SetId(id) - - log.Printf("[INFO] CloudWatch Event Target %q created", d.Id()) - - return resourceAwsCloudWatchEventTargetRead(d, meta) -} - -func resourceAwsCloudWatchEventTargetRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).cloudwatcheventsconn - - t, err := findEventTargetById( - d.Get("target_id").(string), - d.Get("rule").(string), - nil, conn) - if err != nil { - if regexp.MustCompile(" not found$").MatchString(err.Error()) { - log.Printf("[WARN] Removing CloudWatch Event Target %q because it's gone.", d.Id()) - d.SetId("") - return nil - } - if awsErr, ok := err.(awserr.Error); ok { - // This should never happen, but it's useful - // for recovering from https://github.com/hashicorp/terraform/issues/5389 - if awsErr.Code() == "ValidationException" { - log.Printf("[WARN] Removing CloudWatch Event Target %q because it never existed.", d.Id()) - d.SetId("") - return nil - } - - if awsErr.Code() == "ResourceNotFoundException" { - log.Printf("[WARN] CloudWatch Event Target (%q) not found. Removing it from state.", d.Id()) - d.SetId("") - return nil - } - - } - return err - } - log.Printf("[DEBUG] Found Event Target: %s", t) - - d.Set("arn", t.Arn) - d.Set("target_id", t.Id) - d.Set("input", t.Input) - d.Set("input_path", t.InputPath) - d.Set("role_arn", t.RoleArn) - - if t.RunCommandParameters != nil { - if err := d.Set("run_command_targets", flattenAwsCloudWatchEventTargetRunParameters(t.RunCommandParameters)); err != nil { - return fmt.Errorf("[DEBUG] Error setting run_command_targets error: %#v", err) - } - } - - return nil -} - -func findEventTargetById(id, rule string, nextToken *string, conn *events.CloudWatchEvents) ( - *events.Target, error) { - input := events.ListTargetsByRuleInput{ - Rule: aws.String(rule), - NextToken: nextToken, - Limit: aws.Int64(100), // Set limit to allowed maximum to prevent API throttling - } - log.Printf("[DEBUG] Reading CloudWatch Event Target: %s", input) - out, err := conn.ListTargetsByRule(&input) - if err != nil { - return nil, err - } - - for _, t := range out.Targets { - if *t.Id == id { - return t, nil - } - } - - if out.NextToken != nil { - return findEventTargetById(id, rule, nextToken, conn) - } - - return nil, fmt.Errorf("CloudWatch Event Target %q (%q) not found", id, rule) -} - -func resourceAwsCloudWatchEventTargetUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).cloudwatcheventsconn - - input := buildPutTargetInputStruct(d) - - log.Printf("[DEBUG] Updating CloudWatch Event Target: %s", input) - _, err := conn.PutTargets(input) - if err != nil { - return fmt.Errorf("Updating CloudWatch Event Target failed: %s", err) - } - - return resourceAwsCloudWatchEventTargetRead(d, meta) -} - -func resourceAwsCloudWatchEventTargetDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).cloudwatcheventsconn - - input := events.RemoveTargetsInput{ - Ids: []*string{aws.String(d.Get("target_id").(string))}, - Rule: aws.String(d.Get("rule").(string)), - } - log.Printf("[INFO] Deleting CloudWatch Event Target: %s", input) - _, err := conn.RemoveTargets(&input) - if err != nil { - return fmt.Errorf("Error deleting CloudWatch Event Target: %s", err) - } - log.Println("[INFO] CloudWatch Event Target deleted") - - d.SetId("") - - return nil -} - -func buildPutTargetInputStruct(d *schema.ResourceData) *events.PutTargetsInput { - e := &events.Target{ - Arn: aws.String(d.Get("arn").(string)), - Id: aws.String(d.Get("target_id").(string)), - } - - if v, ok := d.GetOk("input"); ok { - e.Input = aws.String(v.(string)) - } - if v, ok := d.GetOk("input_path"); ok { - e.InputPath = aws.String(v.(string)) - } - - if v, ok := d.GetOk("role_arn"); ok { - e.RoleArn = aws.String(v.(string)) - } - - if v, ok := d.GetOk("run_command_targets"); ok { - e.RunCommandParameters = expandAwsCloudWatchEventTargetRunParameters(v.([]interface{})) - } - - input := events.PutTargetsInput{ - Rule: aws.String(d.Get("rule").(string)), - Targets: []*events.Target{e}, - } - - return &input -} - -func expandAwsCloudWatchEventTargetRunParameters(config []interface{}) *events.RunCommandParameters { - - commands := make([]*events.RunCommandTarget, 0) - - for _, c := range config { - param := c.(map[string]interface{}) - command := &events.RunCommandTarget{ - Key: aws.String(param["key"].(string)), - Values: expandStringList(param["values"].([]interface{})), - } - - commands = append(commands, command) - } - - command := &events.RunCommandParameters{ - RunCommandTargets: commands, - } - - return command -} - -func flattenAwsCloudWatchEventTargetRunParameters(runCommand *events.RunCommandParameters) []map[string]interface{} { - result := make([]map[string]interface{}, 0) - - for _, x := range runCommand.RunCommandTargets { - config := make(map[string]interface{}) - - config["key"] = *x.Key - config["values"] = flattenStringList(x.Values) - - result = append(result, config) - } - - return result -} diff --git a/builtin/providers/aws/resource_aws_cloudwatch_event_target_test.go b/builtin/providers/aws/resource_aws_cloudwatch_event_target_test.go deleted file mode 100644 index 044a32e67..000000000 --- a/builtin/providers/aws/resource_aws_cloudwatch_event_target_test.go +++ /dev/null @@ -1,355 +0,0 @@ -package aws - -import ( - "fmt" - "regexp" - "testing" - - events "github.com/aws/aws-sdk-go/service/cloudwatchevents" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSCloudWatchEventTarget_basic(t *testing.T) { - var target events.Target - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudWatchEventTargetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudWatchEventTargetConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchEventTargetExists("aws_cloudwatch_event_target.moobar", &target), - resource.TestCheckResourceAttr("aws_cloudwatch_event_target.moobar", "rule", "tf-acc-cw-event-rule-basic"), - resource.TestCheckResourceAttr("aws_cloudwatch_event_target.moobar", "target_id", "tf-acc-cw-target-basic"), - resource.TestMatchResourceAttr("aws_cloudwatch_event_target.moobar", "arn", - regexp.MustCompile(":tf-acc-moon$")), - ), - }, - { - Config: testAccAWSCloudWatchEventTargetConfigModified, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchEventTargetExists("aws_cloudwatch_event_target.moobar", &target), - resource.TestCheckResourceAttr("aws_cloudwatch_event_target.moobar", "rule", "tf-acc-cw-event-rule-basic"), - resource.TestCheckResourceAttr("aws_cloudwatch_event_target.moobar", "target_id", "tf-acc-cw-target-modified"), - resource.TestMatchResourceAttr("aws_cloudwatch_event_target.moobar", "arn", - regexp.MustCompile(":tf-acc-sun$")), - ), - }, - }, - }) -} - -func TestAccAWSCloudWatchEventTarget_missingTargetId(t *testing.T) { - var target events.Target - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudWatchEventTargetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudWatchEventTargetConfigMissingTargetId, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchEventTargetExists("aws_cloudwatch_event_target.moobar", &target), - resource.TestCheckResourceAttr("aws_cloudwatch_event_target.moobar", "rule", "tf-acc-cw-event-rule-missing-target-id"), - resource.TestMatchResourceAttr("aws_cloudwatch_event_target.moobar", "arn", - regexp.MustCompile(":tf-acc-moon$")), - ), - }, - }, - }) -} - -func TestAccAWSCloudWatchEventTarget_full(t *testing.T) { - var target events.Target - rName := acctest.RandomWithPrefix("tf_ssm_Document") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudWatchEventTargetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudWatchEventTargetConfig_full(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchEventTargetExists("aws_cloudwatch_event_target.foobar", &target), - resource.TestCheckResourceAttr("aws_cloudwatch_event_target.foobar", "rule", "tf-acc-cw-event-rule-full"), - resource.TestCheckResourceAttr("aws_cloudwatch_event_target.foobar", "target_id", "tf-acc-cw-target-full"), - resource.TestMatchResourceAttr("aws_cloudwatch_event_target.foobar", "arn", - regexp.MustCompile("^arn:aws:kinesis:.*:stream/tf_ssm_Document")), - resource.TestCheckResourceAttr("aws_cloudwatch_event_target.foobar", "input", "{ \"source\": [\"aws.cloudtrail\"] }\n"), - resource.TestCheckResourceAttr("aws_cloudwatch_event_target.foobar", "input_path", ""), - ), - }, - }, - }) -} - -func TestAccAWSCloudWatchEventTarget_ssmDocument(t *testing.T) { - var target events.Target - rName := acctest.RandomWithPrefix("tf_ssm_Document") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudWatchEventTargetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudWatchEventTargetConfigSsmDocument(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchEventTargetExists("aws_cloudwatch_event_target.test", &target), - ), - }, - }, - }) -} - -func testAccCheckCloudWatchEventTargetExists(n string, rule *events.Target) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - conn := testAccProvider.Meta().(*AWSClient).cloudwatcheventsconn - t, err := findEventTargetById(rs.Primary.Attributes["target_id"], - rs.Primary.Attributes["rule"], nil, conn) - if err != nil { - return fmt.Errorf("Event Target not found: %s", err) - } - - *rule = *t - - return nil - } -} - -func testAccCheckAWSCloudWatchEventTargetDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).cloudwatcheventsconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_cloudwatch_event_target" { - continue - } - - t, err := findEventTargetById(rs.Primary.Attributes["target_id"], - rs.Primary.Attributes["rule"], nil, conn) - if err == nil { - return fmt.Errorf("CloudWatch Event Target %q still exists: %s", - rs.Primary.ID, t) - } - } - - return nil -} - -var testAccAWSCloudWatchEventTargetConfig = ` -resource "aws_cloudwatch_event_rule" "foo" { - name = "tf-acc-cw-event-rule-basic" - schedule_expression = "rate(1 hour)" -} - -resource "aws_cloudwatch_event_target" "moobar" { - rule = "${aws_cloudwatch_event_rule.foo.name}" - target_id = "tf-acc-cw-target-basic" - arn = "${aws_sns_topic.moon.arn}" -} - -resource "aws_sns_topic" "moon" { - name = "tf-acc-moon" -} -` - -var testAccAWSCloudWatchEventTargetConfigMissingTargetId = ` -resource "aws_cloudwatch_event_rule" "foo" { - name = "tf-acc-cw-event-rule-missing-target-id" - schedule_expression = "rate(1 hour)" -} - -resource "aws_cloudwatch_event_target" "moobar" { - rule = "${aws_cloudwatch_event_rule.foo.name}" - arn = "${aws_sns_topic.moon.arn}" -} - -resource "aws_sns_topic" "moon" { - name = "tf-acc-moon" -} -` - -var testAccAWSCloudWatchEventTargetConfigModified = ` -resource "aws_cloudwatch_event_rule" "foo" { - name = "tf-acc-cw-event-rule-basic" - schedule_expression = "rate(1 hour)" -} - -resource "aws_cloudwatch_event_target" "moobar" { - rule = "${aws_cloudwatch_event_rule.foo.name}" - target_id = "tf-acc-cw-target-modified" - arn = "${aws_sns_topic.sun.arn}" -} - -resource "aws_sns_topic" "sun" { - name = "tf-acc-sun" -} -` - -func testAccAWSCloudWatchEventTargetConfig_full(rName string) string { - return fmt.Sprintf(` -resource "aws_cloudwatch_event_rule" "foo" { - name = "tf-acc-cw-event-rule-full" - schedule_expression = "rate(1 hour)" - role_arn = "${aws_iam_role.role.arn}" -} - -resource "aws_iam_role" "role" { - name = "%s" - assume_role_policy = < 0 { - log.Printf("[DEBUG] Removing tags from %s", name) - _, err := conn.UntagLogGroup(&cloudwatchlogs.UntagLogGroupInput{ - LogGroupName: aws.String(name), - Tags: remove, - }) - if err != nil { - return err - } - } - - if len(create) > 0 { - log.Printf("[DEBUG] Creating tags on %s", name) - _, err := conn.TagLogGroup(&cloudwatchlogs.TagLogGroupInput{ - LogGroupName: aws.String(name), - Tags: create, - }) - if err != nil { - return err - } - } - } - - return resourceAwsCloudWatchLogGroupRead(d, meta) -} - -func diffCloudWatchTags(oldTags map[string]interface{}, newTags map[string]interface{}) (map[string]*string, []*string) { - create := make(map[string]*string) - for k, v := range newTags { - create[k] = aws.String(v.(string)) - } - - var remove []*string - for t, _ := range oldTags { - _, ok := create[t] - if !ok { - remove = append(remove, aws.String(t)) - } - } - - return create, remove -} - -func resourceAwsCloudWatchLogGroupDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).cloudwatchlogsconn - log.Printf("[INFO] Deleting CloudWatch Log Group: %s", d.Id()) - _, err := conn.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ - LogGroupName: aws.String(d.Get("name").(string)), - }) - if err != nil { - return fmt.Errorf("Error deleting CloudWatch Log Group: %s", err) - } - log.Println("[INFO] CloudWatch Log Group deleted") - - d.SetId("") - - return nil -} - -func flattenCloudWatchTags(d *schema.ResourceData, conn *cloudwatchlogs.CloudWatchLogs) (map[string]interface{}, error) { - tagsOutput, err := conn.ListTagsLogGroup(&cloudwatchlogs.ListTagsLogGroupInput{ - LogGroupName: aws.String(d.Get("name").(string)), - }) - if err != nil { - return nil, errwrap.Wrapf("Error Getting CloudWatch Logs Tag List: {{err}}", err) - } - if tagsOutput != nil { - output := make(map[string]interface{}, len(tagsOutput.Tags)) - - for i, v := range tagsOutput.Tags { - output[i] = *v - } - - return output, nil - } - - return make(map[string]interface{}), nil -} diff --git a/builtin/providers/aws/resource_aws_cloudwatch_log_group_test.go b/builtin/providers/aws/resource_aws_cloudwatch_log_group_test.go deleted file mode 100644 index e1f6e3131..000000000 --- a/builtin/providers/aws/resource_aws_cloudwatch_log_group_test.go +++ /dev/null @@ -1,361 +0,0 @@ -package aws - -import ( - "fmt" - "regexp" - "testing" - - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSCloudWatchLogGroup_basic(t *testing.T) { - var lg cloudwatchlogs.LogGroup - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudWatchLogGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudWatchLogGroupConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.foobar", &lg), - resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "retention_in_days", "0"), - ), - }, - }, - }) -} - -func TestAccAWSCloudWatchLogGroup_namePrefix(t *testing.T) { - var lg cloudwatchlogs.LogGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudWatchLogGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudWatchLogGroup_namePrefix, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.test", &lg), - resource.TestMatchResourceAttr("aws_cloudwatch_log_group.test", "name", regexp.MustCompile("^tf-test-")), - ), - }, - }, - }) -} - -func TestAccAWSCloudWatchLogGroup_generatedName(t *testing.T) { - var lg cloudwatchlogs.LogGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudWatchLogGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudWatchLogGroup_generatedName, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.test", &lg), - ), - }, - }, - }) -} - -func TestAccAWSCloudWatchLogGroup_retentionPolicy(t *testing.T) { - var lg cloudwatchlogs.LogGroup - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudWatchLogGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudWatchLogGroupConfig_withRetention(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.foobar", &lg), - resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "retention_in_days", "365"), - ), - }, - { - Config: testAccAWSCloudWatchLogGroupConfigModified_withRetention(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.foobar", &lg), - resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "retention_in_days", "0"), - ), - }, - }, - }) -} - -func TestAccAWSCloudWatchLogGroup_multiple(t *testing.T) { - var lg cloudwatchlogs.LogGroup - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudWatchLogGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudWatchLogGroupConfig_multiple(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.alpha", &lg), - resource.TestCheckResourceAttr("aws_cloudwatch_log_group.alpha", "retention_in_days", "14"), - testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.beta", &lg), - resource.TestCheckResourceAttr("aws_cloudwatch_log_group.beta", "retention_in_days", "0"), - testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.charlie", &lg), - resource.TestCheckResourceAttr("aws_cloudwatch_log_group.charlie", "retention_in_days", "3653"), - ), - }, - }, - }) -} - -func TestAccAWSCloudWatchLogGroup_disappears(t *testing.T) { - var lg cloudwatchlogs.LogGroup - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudWatchLogGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudWatchLogGroupConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.foobar", &lg), - testAccCheckCloudWatchLogGroupDisappears(&lg), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAWSCloudWatchLogGroup_tagging(t *testing.T) { - var lg cloudwatchlogs.LogGroup - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudWatchLogGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudWatchLogGroupConfigWithTags(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.foobar", &lg), - resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "tags.%", "3"), - resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "tags.Environment", "Production"), - resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "tags.Foo", "Bar"), - resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "tags.Empty", ""), - ), - }, - { - Config: testAccAWSCloudWatchLogGroupConfigWithTagsAdded(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.foobar", &lg), - resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "tags.%", "4"), - resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "tags.Environment", "Development"), - resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "tags.Foo", "Bar"), - resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "tags.Empty", ""), - resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "tags.Bar", "baz"), - ), - }, - { - Config: testAccAWSCloudWatchLogGroupConfigWithTagsUpdated(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.foobar", &lg), - resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "tags.%", "4"), - resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "tags.Environment", "Development"), - resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "tags.Empty", "NotEmpty"), - resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "tags.Foo", "UpdatedBar"), - resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "tags.Bar", "baz"), - ), - }, - { - Config: testAccAWSCloudWatchLogGroupConfigWithTags(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.foobar", &lg), - resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "tags.%", "3"), - resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "tags.Environment", "Production"), - resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "tags.Foo", "Bar"), - resource.TestCheckResourceAttr("aws_cloudwatch_log_group.foobar", "tags.Empty", ""), - ), - }, - }, - }) -} - -func testAccCheckCloudWatchLogGroupDisappears(lg *cloudwatchlogs.LogGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).cloudwatchlogsconn - opts := &cloudwatchlogs.DeleteLogGroupInput{ - LogGroupName: lg.LogGroupName, - } - if _, err := conn.DeleteLogGroup(opts); err != nil { - return err - } - return nil - } -} - -func testAccCheckCloudWatchLogGroupExists(n string, lg *cloudwatchlogs.LogGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - conn := testAccProvider.Meta().(*AWSClient).cloudwatchlogsconn - logGroup, exists, err := lookupCloudWatchLogGroup(conn, rs.Primary.ID, nil) - if err != nil { - return err - } - if !exists { - return fmt.Errorf("Bad: LogGroup %q does not exist", rs.Primary.ID) - } - - *lg = *logGroup - - return nil - } -} - -func testAccCheckAWSCloudWatchLogGroupDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).cloudwatchlogsconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_cloudwatch_log_group" { - continue - } - _, exists, err := lookupCloudWatchLogGroup(conn, rs.Primary.ID, nil) - if err != nil { - return nil - } - - if exists { - return fmt.Errorf("Bad: LogGroup still exists: %q", rs.Primary.ID) - } - - } - - return nil -} - -func testAccAWSCloudWatchLogGroupConfig(rInt int) string { - return fmt.Sprintf(` -resource "aws_cloudwatch_log_group" "foobar" { - name = "foo-bar-%d" -} -`, rInt) -} - -func testAccAWSCloudWatchLogGroupConfigWithTags(rInt int) string { - return fmt.Sprintf(` -resource "aws_cloudwatch_log_group" "foobar" { - name = "foo-bar-%d" - - tags { - Environment = "Production" - Foo = "Bar" - Empty = "" - } -} -`, rInt) -} - -func testAccAWSCloudWatchLogGroupConfigWithTagsAdded(rInt int) string { - return fmt.Sprintf(` -resource "aws_cloudwatch_log_group" "foobar" { - name = "foo-bar-%d" - - tags { - Environment = "Development" - Foo = "Bar" - Empty = "" - Bar = "baz" - } -} -`, rInt) -} - -func testAccAWSCloudWatchLogGroupConfigWithTagsUpdated(rInt int) string { - return fmt.Sprintf(` -resource "aws_cloudwatch_log_group" "foobar" { - name = "foo-bar-%d" - - tags { - Environment = "Development" - Foo = "UpdatedBar" - Empty = "NotEmpty" - Bar = "baz" - } -} -`, rInt) -} - -func testAccAWSCloudWatchLogGroupConfigWithTagsRemoval(rInt int) string { - return fmt.Sprintf(` -resource "aws_cloudwatch_log_group" "foobar" { - name = "foo-bar-%d" - - tags { - Environment = "Production" - Foo = "Bar" - Empty = "" - } -} -`, rInt) -} - -func testAccAWSCloudWatchLogGroupConfig_withRetention(rInt int) string { - return fmt.Sprintf(` -resource "aws_cloudwatch_log_group" "foobar" { - name = "foo-bar-%d" - retention_in_days = 365 -} -`, rInt) -} - -func testAccAWSCloudWatchLogGroupConfigModified_withRetention(rInt int) string { - return fmt.Sprintf(` -resource "aws_cloudwatch_log_group" "foobar" { - name = "foo-bar-%d" -} -`, rInt) -} - -func testAccAWSCloudWatchLogGroupConfig_multiple(rInt int) string { - return fmt.Sprintf(` -resource "aws_cloudwatch_log_group" "alpha" { - name = "foo-bar-%d" - retention_in_days = 14 -} -resource "aws_cloudwatch_log_group" "beta" { - name = "foo-bar-%d" -} -resource "aws_cloudwatch_log_group" "charlie" { - name = "foo-bar-%d" - retention_in_days = 3653 -} -`, rInt, rInt+1, rInt+2) -} - -const testAccAWSCloudWatchLogGroup_namePrefix = ` -resource "aws_cloudwatch_log_group" "test" { - name_prefix = "tf-test-" -} -` - -const testAccAWSCloudWatchLogGroup_generatedName = ` -resource "aws_cloudwatch_log_group" "test" {} -` diff --git a/builtin/providers/aws/resource_aws_cloudwatch_log_metric_filter.go b/builtin/providers/aws/resource_aws_cloudwatch_log_metric_filter.go deleted file mode 100644 index 943472f85..000000000 --- a/builtin/providers/aws/resource_aws_cloudwatch_log_metric_filter.go +++ /dev/null @@ -1,187 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" -) - -func resourceAwsCloudWatchLogMetricFilter() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsCloudWatchLogMetricFilterUpdate, - Read: resourceAwsCloudWatchLogMetricFilterRead, - Update: resourceAwsCloudWatchLogMetricFilterUpdate, - Delete: resourceAwsCloudWatchLogMetricFilterDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateLogMetricFilterName, - }, - - "pattern": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateMaxLength(512), - StateFunc: func(v interface{}) string { - s, ok := v.(string) - if !ok { - return "" - } - return strings.TrimSpace(s) - }, - }, - - "log_group_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateLogGroupName, - }, - - "metric_transformation": &schema.Schema{ - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateLogMetricFilterTransformationName, - }, - "namespace": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateLogMetricFilterTransformationName, - }, - "value": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateMaxLength(100), - }, - }, - }, - }, - }, - } -} - -func resourceAwsCloudWatchLogMetricFilterUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).cloudwatchlogsconn - - input := cloudwatchlogs.PutMetricFilterInput{ - FilterName: aws.String(d.Get("name").(string)), - FilterPattern: aws.String(strings.TrimSpace(d.Get("pattern").(string))), - LogGroupName: aws.String(d.Get("log_group_name").(string)), - } - - transformations := d.Get("metric_transformation").([]interface{}) - o := transformations[0].(map[string]interface{}) - input.MetricTransformations = expandCloudWachLogMetricTransformations(o) - - log.Printf("[DEBUG] Creating/Updating CloudWatch Log Metric Filter: %s", input) - _, err := conn.PutMetricFilter(&input) - if err != nil { - return fmt.Errorf("Creating/Updating CloudWatch Log Metric Filter failed: %s", err) - } - - d.SetId(d.Get("name").(string)) - - log.Println("[INFO] CloudWatch Log Metric Filter created/updated") - - return resourceAwsCloudWatchLogMetricFilterRead(d, meta) -} - -func resourceAwsCloudWatchLogMetricFilterRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).cloudwatchlogsconn - - mf, err := lookupCloudWatchLogMetricFilter(conn, d.Get("name").(string), - d.Get("log_group_name").(string), nil) - if err != nil { - if _, ok := err.(*resource.NotFoundError); ok { - log.Printf("[WARN] Removing CloudWatch Log Metric Filter as it is gone") - d.SetId("") - return nil - } - - return fmt.Errorf("Failed reading CloudWatch Log Metric Filter: %s", err) - } - - log.Printf("[DEBUG] Found CloudWatch Log Metric Filter: %s", mf) - - d.Set("name", mf.FilterName) - d.Set("pattern", mf.FilterPattern) - d.Set("metric_transformation", flattenCloudWachLogMetricTransformations(mf.MetricTransformations)) - - return nil -} - -func lookupCloudWatchLogMetricFilter(conn *cloudwatchlogs.CloudWatchLogs, - name, logGroupName string, nextToken *string) (*cloudwatchlogs.MetricFilter, error) { - - input := cloudwatchlogs.DescribeMetricFiltersInput{ - FilterNamePrefix: aws.String(name), - LogGroupName: aws.String(logGroupName), - NextToken: nextToken, - } - log.Printf("[DEBUG] Reading CloudWatch Log Metric Filter: %s", input) - resp, err := conn.DescribeMetricFilters(&input) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceNotFoundException" { - return nil, &resource.NotFoundError{ - Message: fmt.Sprintf("CloudWatch Log Metric Filter %q / %q not found via"+ - " initial DescribeMetricFilters call", name, logGroupName), - LastError: err, - LastRequest: input, - } - } - - return nil, fmt.Errorf("Failed describing CloudWatch Log Metric Filter: %s", err) - } - - for _, mf := range resp.MetricFilters { - if *mf.FilterName == name { - return mf, nil - } - } - - if resp.NextToken != nil { - return lookupCloudWatchLogMetricFilter(conn, name, logGroupName, resp.NextToken) - } - - return nil, &resource.NotFoundError{ - Message: fmt.Sprintf("CloudWatch Log Metric Filter %q / %q not found "+ - "in given results from DescribeMetricFilters", name, logGroupName), - LastResponse: resp, - LastRequest: input, - } -} - -func resourceAwsCloudWatchLogMetricFilterDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).cloudwatchlogsconn - - input := cloudwatchlogs.DeleteMetricFilterInput{ - FilterName: aws.String(d.Get("name").(string)), - LogGroupName: aws.String(d.Get("log_group_name").(string)), - } - log.Printf("[INFO] Deleting CloudWatch Log Metric Filter: %s", d.Id()) - _, err := conn.DeleteMetricFilter(&input) - if err != nil { - return fmt.Errorf("Error deleting CloudWatch Log Metric Filter: %s", err) - } - log.Println("[INFO] CloudWatch Log Metric Filter deleted") - - d.SetId("") - - return nil -} diff --git a/builtin/providers/aws/resource_aws_cloudwatch_log_metric_filter_test.go b/builtin/providers/aws/resource_aws_cloudwatch_log_metric_filter_test.go deleted file mode 100644 index d34ea8dd0..000000000 --- a/builtin/providers/aws/resource_aws_cloudwatch_log_metric_filter_test.go +++ /dev/null @@ -1,191 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSCloudWatchLogMetricFilter_basic(t *testing.T) { - var mf cloudwatchlogs.MetricFilter - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudWatchLogMetricFilterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudWatchLogMetricFilterConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchLogMetricFilterExists("aws_cloudwatch_log_metric_filter.foobar", &mf), - resource.TestCheckResourceAttr("aws_cloudwatch_log_metric_filter.foobar", "name", fmt.Sprintf("MyAppAccessCount-%d", rInt)), - testAccCheckCloudWatchLogMetricFilterName(&mf, fmt.Sprintf("MyAppAccessCount-%d", rInt)), - resource.TestCheckResourceAttr("aws_cloudwatch_log_metric_filter.foobar", "pattern", ""), - testAccCheckCloudWatchLogMetricFilterPattern(&mf, ""), - resource.TestCheckResourceAttr("aws_cloudwatch_log_metric_filter.foobar", "log_group_name", fmt.Sprintf("MyApp/access-%d.log", rInt)), - resource.TestCheckResourceAttr("aws_cloudwatch_log_metric_filter.foobar", "metric_transformation.0.name", "EventCount"), - resource.TestCheckResourceAttr("aws_cloudwatch_log_metric_filter.foobar", "metric_transformation.0.namespace", "YourNamespace"), - resource.TestCheckResourceAttr("aws_cloudwatch_log_metric_filter.foobar", "metric_transformation.0.value", "1"), - testAccCheckCloudWatchLogMetricFilterTransformation(&mf, &cloudwatchlogs.MetricTransformation{ - MetricName: aws.String("EventCount"), - MetricNamespace: aws.String("YourNamespace"), - MetricValue: aws.String("1"), - }), - ), - }, - { - Config: testAccAWSCloudWatchLogMetricFilterConfigModified(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchLogMetricFilterExists("aws_cloudwatch_log_metric_filter.foobar", &mf), - resource.TestCheckResourceAttr("aws_cloudwatch_log_metric_filter.foobar", "name", fmt.Sprintf("MyAppAccessCount-%d", rInt)), - testAccCheckCloudWatchLogMetricFilterName(&mf, fmt.Sprintf("MyAppAccessCount-%d", rInt)), - resource.TestCheckResourceAttr("aws_cloudwatch_log_metric_filter.foobar", "pattern", "{ $.errorCode = \"AccessDenied\" }"), - testAccCheckCloudWatchLogMetricFilterPattern(&mf, "{ $.errorCode = \"AccessDenied\" }"), - resource.TestCheckResourceAttr("aws_cloudwatch_log_metric_filter.foobar", "log_group_name", fmt.Sprintf("MyApp/access-%d.log", rInt)), - resource.TestCheckResourceAttr("aws_cloudwatch_log_metric_filter.foobar", "metric_transformation.0.name", "AccessDeniedCount"), - resource.TestCheckResourceAttr("aws_cloudwatch_log_metric_filter.foobar", "metric_transformation.0.namespace", "MyNamespace"), - resource.TestCheckResourceAttr("aws_cloudwatch_log_metric_filter.foobar", "metric_transformation.0.value", "2"), - testAccCheckCloudWatchLogMetricFilterTransformation(&mf, &cloudwatchlogs.MetricTransformation{ - MetricName: aws.String("AccessDeniedCount"), - MetricNamespace: aws.String("MyNamespace"), - MetricValue: aws.String("2"), - }), - ), - }, - }, - }) -} - -func testAccCheckCloudWatchLogMetricFilterName(mf *cloudwatchlogs.MetricFilter, name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if name != *mf.FilterName { - return fmt.Errorf("Expected filter name: %q, given: %q", name, *mf.FilterName) - } - return nil - } -} - -func testAccCheckCloudWatchLogMetricFilterPattern(mf *cloudwatchlogs.MetricFilter, pattern string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if mf.FilterPattern == nil { - if pattern != "" { - return fmt.Errorf("Received empty filter pattern, expected: %q", pattern) - } - return nil - } - - if pattern != *mf.FilterPattern { - return fmt.Errorf("Expected filter pattern: %q, given: %q", pattern, *mf.FilterPattern) - } - return nil - } -} - -func testAccCheckCloudWatchLogMetricFilterTransformation(mf *cloudwatchlogs.MetricFilter, - t *cloudwatchlogs.MetricTransformation) resource.TestCheckFunc { - return func(s *terraform.State) error { - given := mf.MetricTransformations[0] - expected := t - - if *given.MetricName != *expected.MetricName { - return fmt.Errorf("Expected metric name: %q, received: %q", - *expected.MetricName, *given.MetricName) - } - - if *given.MetricNamespace != *expected.MetricNamespace { - return fmt.Errorf("Expected metric namespace: %q, received: %q", - *expected.MetricNamespace, *given.MetricNamespace) - } - - if *given.MetricValue != *expected.MetricValue { - return fmt.Errorf("Expected metric value: %q, received: %q", - *expected.MetricValue, *given.MetricValue) - } - - return nil - } -} - -func testAccCheckCloudWatchLogMetricFilterExists(n string, mf *cloudwatchlogs.MetricFilter) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - conn := testAccProvider.Meta().(*AWSClient).cloudwatchlogsconn - metricFilter, err := lookupCloudWatchLogMetricFilter(conn, rs.Primary.ID, rs.Primary.Attributes["log_group_name"], nil) - if err != nil { - return err - } - - *mf = *metricFilter - - return nil - } -} - -func testAccCheckAWSCloudWatchLogMetricFilterDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).cloudwatchlogsconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_cloudwatch_log_metric_filter" { - continue - } - - _, err := lookupCloudWatchLogMetricFilter(conn, rs.Primary.ID, rs.Primary.Attributes["log_group_name"], nil) - if err == nil { - return fmt.Errorf("MetricFilter Still Exists: %s", rs.Primary.ID) - } - } - - return nil -} - -func testAccAWSCloudWatchLogMetricFilterConfig(rInt int) string { - return fmt.Sprintf(` -resource "aws_cloudwatch_log_metric_filter" "foobar" { - name = "MyAppAccessCount-%d" - pattern = "" - log_group_name = "${aws_cloudwatch_log_group.dada.name}" - - metric_transformation { - name = "EventCount" - namespace = "YourNamespace" - value = "1" - } -} - -resource "aws_cloudwatch_log_group" "dada" { - name = "MyApp/access-%d.log" -} -`, rInt, rInt) -} - -func testAccAWSCloudWatchLogMetricFilterConfigModified(rInt int) string { - return fmt.Sprintf(` -resource "aws_cloudwatch_log_metric_filter" "foobar" { - name = "MyAppAccessCount-%d" - pattern = < 512 { - errors = append(errors, fmt.Errorf( - "%q must be between 1 and 512 characters: %q", k, value)) - } - - return - -} diff --git a/builtin/providers/aws/resource_aws_cloudwatch_log_stream_test.go b/builtin/providers/aws/resource_aws_cloudwatch_log_stream_test.go deleted file mode 100644 index 9e39b1cd2..000000000 --- a/builtin/providers/aws/resource_aws_cloudwatch_log_stream_test.go +++ /dev/null @@ -1,152 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSCloudWatchLogStream_basic(t *testing.T) { - var ls cloudwatchlogs.LogStream - rName := acctest.RandString(15) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudWatchLogStreamDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudWatchLogStreamConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchLogStreamExists("aws_cloudwatch_log_stream.foobar", &ls), - ), - }, - }, - }) -} - -func TestAccAWSCloudWatchLogStream_disappears(t *testing.T) { - var ls cloudwatchlogs.LogStream - rName := acctest.RandString(15) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudWatchLogStreamDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudWatchLogStreamConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchLogStreamExists("aws_cloudwatch_log_stream.foobar", &ls), - testAccCheckCloudWatchLogStreamDisappears(&ls, rName), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testAccCheckCloudWatchLogStreamDisappears(ls *cloudwatchlogs.LogStream, lgn string) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).cloudwatchlogsconn - opts := &cloudwatchlogs.DeleteLogStreamInput{ - LogGroupName: aws.String(lgn), - LogStreamName: ls.LogStreamName, - } - if _, err := conn.DeleteLogStream(opts); err != nil { - return err - } - return nil - } -} - -func testAccCheckCloudWatchLogStreamExists(n string, ls *cloudwatchlogs.LogStream) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - logGroupName := rs.Primary.Attributes["log_group_name"] - conn := testAccProvider.Meta().(*AWSClient).cloudwatchlogsconn - logGroup, exists, err := lookupCloudWatchLogStream(conn, rs.Primary.ID, logGroupName, nil) - if err != nil { - return err - } - if !exists { - return fmt.Errorf("Bad: LogStream %q does not exist", rs.Primary.ID) - } - - *ls = *logGroup - - return nil - } -} - -func testAccCheckAWSCloudWatchLogStreamDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).cloudwatchlogsconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_cloudwatch_log_stream" { - continue - } - - logGroupName := rs.Primary.Attributes["log_group_name"] - _, exists, err := lookupCloudWatchLogStream(conn, rs.Primary.ID, logGroupName, nil) - if err != nil { - return nil - } - - if exists { - return fmt.Errorf("Bad: LogStream still exists: %q", rs.Primary.ID) - } - - } - - return nil -} - -func TestValidateCloudWatchLogStreamName(t *testing.T) { - validNames := []string{ - "test-log-stream", - "my_sample_log_stream", - "012345678", - "logstream/1234", - } - for _, v := range validNames { - _, errors := validateCloudWatchLogStreamName(v, "name") - if len(errors) != 0 { - t.Fatalf("%q should be a valid CloudWatch LogStream name: %q", v, errors) - } - } - - invalidNames := []string{ - acctest.RandString(513), - "", - "stringwith:colon", - } - for _, v := range invalidNames { - _, errors := validateCloudWatchLogStreamName(v, "name") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid CloudWatch LogStream name", v) - } - } -} - -func testAccAWSCloudWatchLogStreamConfig(rName string) string { - return fmt.Sprintf(` -resource "aws_cloudwatch_log_group" "foobar" { - name = "%s" -} - -resource "aws_cloudwatch_log_stream" "foobar" { - name = "%s" - log_group_name = "${aws_cloudwatch_log_group.foobar.id}" -} -`, rName, rName) -} diff --git a/builtin/providers/aws/resource_aws_cloudwatch_log_subscription_filter.go b/builtin/providers/aws/resource_aws_cloudwatch_log_subscription_filter.go deleted file mode 100644 index 250403143..000000000 --- a/builtin/providers/aws/resource_aws_cloudwatch_log_subscription_filter.go +++ /dev/null @@ -1,180 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "log" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsCloudwatchLogSubscriptionFilter() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsCloudwatchLogSubscriptionFilterCreate, - Read: resourceAwsCloudwatchLogSubscriptionFilterRead, - Update: resourceAwsCloudwatchLogSubscriptionFilterUpdate, - Delete: resourceAwsCloudwatchLogSubscriptionFilterDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "destination_arn": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "filter_pattern": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - "log_group_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "role_arn": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - } -} - -func resourceAwsCloudwatchLogSubscriptionFilterCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).cloudwatchlogsconn - params := getAwsCloudWatchLogsSubscriptionFilterInput(d) - log.Printf("[DEBUG] Creating SubscriptionFilter %#v", params) - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.PutSubscriptionFilter(¶ms) - - if err == nil { - d.SetId(cloudwatchLogsSubscriptionFilterId(d.Get("log_group_name").(string))) - log.Printf("[DEBUG] Cloudwatch logs subscription %q created", d.Id()) - } - - awsErr, ok := err.(awserr.Error) - if !ok { - return resource.RetryableError(err) - } - - if awsErr.Code() == "InvalidParameterException" { - log.Printf("[DEBUG] Caught message: %q, code: %q: Retrying", awsErr.Message(), awsErr.Code()) - if strings.Contains(awsErr.Message(), "Could not deliver test message to specified") { - return resource.RetryableError(err) - } - if strings.Contains(awsErr.Message(), "Could not execute the lambda function") { - return resource.RetryableError(err) - } - resource.NonRetryableError(err) - } - - return resource.NonRetryableError(err) - }) -} - -func resourceAwsCloudwatchLogSubscriptionFilterUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).cloudwatchlogsconn - - params := getAwsCloudWatchLogsSubscriptionFilterInput(d) - - log.Printf("[DEBUG] Update SubscriptionFilter %#v", params) - _, err := conn.PutSubscriptionFilter(¶ms) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - return fmt.Errorf("[WARN] Error updating SubscriptionFilter (%s) for LogGroup (%s), message: \"%s\", code: \"%s\"", - d.Get("name").(string), d.Get("log_group_name").(string), awsErr.Message(), awsErr.Code()) - } - return err - } - - d.SetId(cloudwatchLogsSubscriptionFilterId(d.Get("log_group_name").(string))) - return resourceAwsCloudwatchLogSubscriptionFilterRead(d, meta) -} - -func getAwsCloudWatchLogsSubscriptionFilterInput(d *schema.ResourceData) cloudwatchlogs.PutSubscriptionFilterInput { - name := d.Get("name").(string) - destination_arn := d.Get("destination_arn").(string) - filter_pattern := d.Get("filter_pattern").(string) - log_group_name := d.Get("log_group_name").(string) - - params := cloudwatchlogs.PutSubscriptionFilterInput{ - FilterName: aws.String(name), - DestinationArn: aws.String(destination_arn), - FilterPattern: aws.String(filter_pattern), - LogGroupName: aws.String(log_group_name), - } - - if _, ok := d.GetOk("role_arn"); ok { - params.RoleArn = aws.String(d.Get("role_arn").(string)) - } - - return params -} - -func resourceAwsCloudwatchLogSubscriptionFilterRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).cloudwatchlogsconn - - log_group_name := d.Get("log_group_name").(string) - name := d.Get("name").(string) // "name" is a required field in the schema - - req := &cloudwatchlogs.DescribeSubscriptionFiltersInput{ - LogGroupName: aws.String(log_group_name), - FilterNamePrefix: aws.String(name), - } - - resp, err := conn.DescribeSubscriptionFilters(req) - if err != nil { - return fmt.Errorf("Error reading SubscriptionFilters for log group %s with name prefix %s: %#v", log_group_name, d.Get("name").(string), err) - } - - for _, subscriptionFilter := range resp.SubscriptionFilters { - if *subscriptionFilter.LogGroupName == log_group_name { - d.SetId(cloudwatchLogsSubscriptionFilterId(log_group_name)) - return nil // OK, matching subscription filter found - } - } - - log.Printf("[DEBUG] Subscription Filter%q Not Found", name) - d.SetId("") - return nil -} - -func resourceAwsCloudwatchLogSubscriptionFilterDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).cloudwatchlogsconn - log.Printf("[INFO] Deleting CloudWatch Log Group Subscription: %s", d.Id()) - log_group_name := d.Get("log_group_name").(string) - name := d.Get("name").(string) - - params := &cloudwatchlogs.DeleteSubscriptionFilterInput{ - FilterName: aws.String(name), // Required - LogGroupName: aws.String(log_group_name), // Required - } - _, err := conn.DeleteSubscriptionFilter(params) - if err != nil { - return fmt.Errorf( - "Error deleting Subscription Filter from log group: %s with name filter name %s", log_group_name, name) - } - d.SetId("") - return nil -} - -func cloudwatchLogsSubscriptionFilterId(log_group_name string) string { - var buf bytes.Buffer - - buf.WriteString(fmt.Sprintf("%s-", log_group_name)) // only one filter allowed per log_group_name at the moment - - return fmt.Sprintf("cwlsf-%d", hashcode.String(buf.String())) -} diff --git a/builtin/providers/aws/resource_aws_cloudwatch_log_subscription_filter_test.go b/builtin/providers/aws/resource_aws_cloudwatch_log_subscription_filter_test.go deleted file mode 100644 index bf121b203..000000000 --- a/builtin/providers/aws/resource_aws_cloudwatch_log_subscription_filter_test.go +++ /dev/null @@ -1,174 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/lambda" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSCloudwatchLogSubscriptionFilter_basic(t *testing.T) { - var conf lambda.GetFunctionOutput - - rstring := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudwatchLogSubscriptionFilterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudwatchLogSubscriptionFilterConfig(rstring), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsCloudwatchLogSubscriptionFilterExists("aws_cloudwatch_log_subscription_filter.test_lambdafunction_logfilter", &conf, rstring), - testAccCheckAWSCloudwatchLogSubscriptionFilterAttributes(&conf, rstring), - ), - }, - }, - }) -} - -func testAccCheckCloudwatchLogSubscriptionFilterDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).lambdaconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_cloudwatch_log_subscription_filter" { - continue - } - - _, err := conn.GetFunction(&lambda.GetFunctionInput{ - FunctionName: aws.String(rs.Primary.ID), - }) - - if err == nil { - return fmt.Errorf("Lambda Function still exists") - } - - } - - return nil - -} - -func testAccCheckAwsCloudwatchLogSubscriptionFilterExists(n string, function *lambda.GetFunctionOutput, rstring string) resource.TestCheckFunc { - // Wait for IAM role - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Lambda function not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Lambda function ID not set") - } - - conn := testAccProvider.Meta().(*AWSClient).lambdaconn - - params := &lambda.GetFunctionInput{ - FunctionName: aws.String("example_lambda_name_" + rstring), - } - - getFunction, err := conn.GetFunction(params) - if err != nil { - return err - } - - *function = *getFunction - - return nil - } -} - -func testAccCheckAWSCloudwatchLogSubscriptionFilterAttributes(function *lambda.GetFunctionOutput, rstring string) resource.TestCheckFunc { - return func(s *terraform.State) error { - c := function.Configuration - expectedName := fmt.Sprintf("example_lambda_name_%s", rstring) - if *c.FunctionName != expectedName { - return fmt.Errorf("Expected function name %s, got %s", expectedName, *c.FunctionName) - } - - if *c.FunctionArn == "" { - return fmt.Errorf("Could not read Lambda Function's ARN") - } - - return nil - } -} - -func testAccAWSCloudwatchLogSubscriptionFilterConfig(rstring string) string { - return fmt.Sprintf(` -resource "aws_cloudwatch_log_subscription_filter" "test_lambdafunction_logfilter" { - name = "test_lambdafunction_logfilter_%s" - log_group_name = "${aws_cloudwatch_log_group.logs.name}" - filter_pattern = "logtype test" - destination_arn = "${aws_lambda_function.test_lambdafunction.arn}" -} - -resource "aws_lambda_function" "test_lambdafunction" { - filename = "test-fixtures/lambdatest.zip" - function_name = "example_lambda_name_%s" - role = "${aws_iam_role.iam_for_lambda.arn}" - runtime = "nodejs4.3" - handler = "exports.handler" -} - -resource "aws_cloudwatch_log_group" "logs" { - name = "example_lambda_name_%s" - retention_in_days = 1 -} - -resource "aws_lambda_permission" "allow_cloudwatch_logs" { - statement_id = "AllowExecutionFromCloudWatchLogs" - action = "lambda:*" - function_name = "${aws_lambda_function.test_lambdafunction.arn}" - principal = "logs.us-west-2.amazonaws.com" -} - -resource "aws_iam_role" "iam_for_lambda" { - name = "test_lambdafuntion_iam_role_%s" - - assume_role_policy = < %s; got: %s", k, v, val) - } - return nil - } -} - -func testAccCheckCloudWatchMetricAlarmExists(n string, alarm *cloudwatch.MetricAlarm) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - conn := testAccProvider.Meta().(*AWSClient).cloudwatchconn - params := cloudwatch.DescribeAlarmsInput{ - AlarmNames: []*string{aws.String(rs.Primary.ID)}, - } - resp, err := conn.DescribeAlarms(¶ms) - if err != nil { - return err - } - if len(resp.MetricAlarms) == 0 { - return fmt.Errorf("Alarm not found") - } - *alarm = *resp.MetricAlarms[0] - - return nil - } -} - -func testAccCheckAWSCloudWatchMetricAlarmDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).cloudwatchconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_cloudwatch_metric_alarm" { - continue - } - - params := cloudwatch.DescribeAlarmsInput{ - AlarmNames: []*string{aws.String(rs.Primary.ID)}, - } - - resp, err := conn.DescribeAlarms(¶ms) - - if err == nil { - if len(resp.MetricAlarms) != 0 && - *resp.MetricAlarms[0].AlarmName == rs.Primary.ID { - return fmt.Errorf("Alarm Still Exists: %s", rs.Primary.ID) - } - } - } - - return nil -} - -func testAccAWSCloudWatchMetricAlarmConfig(rInt int) string { - return fmt.Sprintf(` -resource "aws_cloudwatch_metric_alarm" "foobar" { - alarm_name = "terraform-test-foobar%d" - comparison_operator = "GreaterThanOrEqualToThreshold" - evaluation_periods = "2" - metric_name = "CPUUtilization" - namespace = "AWS/EC2" - period = "120" - statistic = "Average" - threshold = "80" - alarm_description = "This metric monitors ec2 cpu utilization" - insufficient_data_actions = [] - dimensions { - InstanceId = "i-abc123" - } -}`, rInt) -} - -func testAccAWSCloudWatchMetricAlarmConfigTreatMissingData(rInt int) string { - return fmt.Sprintf(` -resource "aws_cloudwatch_metric_alarm" "foobar" { - alarm_name = "terraform-test-foobar%d" - comparison_operator = "GreaterThanOrEqualToThreshold" - evaluation_periods = "2" - metric_name = "CPUUtilization" - namespace = "AWS/EC2" - period = "120" - statistic = "Average" - threshold = "80" - alarm_description = "This metric monitors ec2 cpu utilization" - treat_missing_data = "missing" - insufficient_data_actions = [] - dimensions { - InstanceId = "i-abc123" - } -}`, rInt) -} - -func testAccAWSCloudWatchMetricAlarmConfigTreatMissingDataUpdate(rInt int) string { - return fmt.Sprintf(` -resource "aws_cloudwatch_metric_alarm" "foobar" { - alarm_name = "terraform-test-foobar%d" - comparison_operator = "GreaterThanOrEqualToThreshold" - evaluation_periods = "2" - metric_name = "CPUUtilization" - namespace = "AWS/EC2" - period = "120" - statistic = "Average" - threshold = "80" - alarm_description = "This metric monitors ec2 cpu utilization" - treat_missing_data = "breaching" - insufficient_data_actions = [] - dimensions { - InstanceId = "i-abc123" - } -}`, rInt) -} - -func testAccAWSCloudWatchMetricAlarmConfigTreatEvaluateLowSampleCountPercentiles(rInt int) string { - return fmt.Sprintf(` -resource "aws_cloudwatch_metric_alarm" "foobar" { - alarm_name = "terraform-test-foobar%d" - comparison_operator = "GreaterThanOrEqualToThreshold" - evaluation_periods = "2" - metric_name = "CPUUtilization" - namespace = "AWS/EC2" - period = "120" - extended_statistic = "p88.0" - threshold = "80" - alarm_description = "This metric monitors ec2 cpu utilization" - evaluate_low_sample_count_percentiles = "evaluate" - insufficient_data_actions = [] - dimensions { - InstanceId = "i-abc123" - } -}`, rInt) -} - -func testAccAWSCloudWatchMetricAlarmConfigTreatEvaluateLowSampleCountPercentilesUpdated(rInt int) string { - return fmt.Sprintf(` -resource "aws_cloudwatch_metric_alarm" "foobar" { - alarm_name = "terraform-test-foobar%d" - comparison_operator = "GreaterThanOrEqualToThreshold" - evaluation_periods = "2" - metric_name = "CPUUtilization" - namespace = "AWS/EC2" - period = "120" - extended_statistic = "p88.0" - threshold = "80" - alarm_description = "This metric monitors ec2 cpu utilization" - evaluate_low_sample_count_percentiles = "ignore" - insufficient_data_actions = [] - dimensions { - InstanceId = "i-abc123" - } -}`, rInt) -} - -func testAccAWSCloudWatchMetricAlarmConfigExtendedStatistic(rInt int) string { - return fmt.Sprintf(` -resource "aws_cloudwatch_metric_alarm" "foobar" { - alarm_name = "terraform-test-foobar%d" - comparison_operator = "GreaterThanOrEqualToThreshold" - evaluation_periods = "2" - metric_name = "CPUUtilization" - namespace = "AWS/EC2" - period = "120" - extended_statistic = "p88.0" - threshold = "80" - alarm_description = "This metric monitors ec2 cpu utilization" - insufficient_data_actions = [] - dimensions { - InstanceId = "i-abc123" - } -}`, rInt) -} - -func testAccAWSCloudWatchMetricAlarmConfigMissingStatistic(rInt int) string { - return fmt.Sprintf(` -resource "aws_cloudwatch_metric_alarm" "foobar" { - alarm_name = "terraform-test-foobar%d" - comparison_operator = "GreaterThanOrEqualToThreshold" - evaluation_periods = "2" - metric_name = "CPUUtilization" - namespace = "AWS/EC2" - period = "120" - threshold = "80" - alarm_description = "This metric monitors ec2 cpu utilization" - insufficient_data_actions = [] - dimensions { - InstanceId = "i-abc123" - } -}`, rInt) -} diff --git a/builtin/providers/aws/resource_aws_codebuild_project.go b/builtin/providers/aws/resource_aws_codebuild_project.go deleted file mode 100644 index bbd3523a3..000000000 --- a/builtin/providers/aws/resource_aws_codebuild_project.go +++ /dev/null @@ -1,746 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "log" - "regexp" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/codebuild" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsCodeBuildProject() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsCodeBuildProjectCreate, - Read: resourceAwsCodeBuildProjectRead, - Update: resourceAwsCodeBuildProjectUpdate, - Delete: resourceAwsCodeBuildProjectDelete, - - Schema: map[string]*schema.Schema{ - "artifacts": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - }, - "location": { - Type: schema.TypeString, - Optional: true, - }, - "namespace_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateAwsCodeBuildArifactsNamespaceType, - }, - "packaging": { - Type: schema.TypeString, - Optional: true, - }, - "path": { - Type: schema.TypeString, - Optional: true, - }, - "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateAwsCodeBuildArifactsType, - }, - }, - }, - Set: resourceAwsCodeBuildProjectArtifactsHash, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validateAwsCodeBuildProjectDescription, - }, - "encryption_key": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "environment": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "compute_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateAwsCodeBuildEnvironmentComputeType, - }, - "environment_variable": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "value": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "image": { - Type: schema.TypeString, - Required: true, - }, - "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateAwsCodeBuildEnvironmentType, - }, - }, - }, - Set: resourceAwsCodeBuildProjectEnvironmentHash, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateAwsCodeBuildProjectName, - }, - "service_role": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "source": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "auth": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "resource": { - Type: schema.TypeString, - Optional: true, - }, - "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateAwsCodeBuildSourceAuthType, - }, - }, - }, - Optional: true, - }, - "buildspec": { - Type: schema.TypeString, - Optional: true, - }, - "location": { - Type: schema.TypeString, - Optional: true, - }, - "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateAwsCodeBuildSourceType, - }, - }, - }, - Required: true, - MaxItems: 1, - }, - "timeout": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validateAwsCodeBuildTimeout, - Removed: "This field has been removed. Please use build_timeout instead", - }, - "build_timeout": { - Type: schema.TypeInt, - Optional: true, - Default: "60", - ValidateFunc: validateAwsCodeBuildTimeout, - }, - "tags": tagsSchema(), - }, - } -} - -func resourceAwsCodeBuildProjectCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).codebuildconn - - projectEnv := expandProjectEnvironment(d) - projectSource := expandProjectSource(d) - projectArtifacts := expandProjectArtifacts(d) - - params := &codebuild.CreateProjectInput{ - Environment: projectEnv, - Name: aws.String(d.Get("name").(string)), - Source: &projectSource, - Artifacts: &projectArtifacts, - } - - if v, ok := d.GetOk("description"); ok { - params.Description = aws.String(v.(string)) - } - - if v, ok := d.GetOk("encryption_key"); ok { - params.EncryptionKey = aws.String(v.(string)) - } - - if v, ok := d.GetOk("service_role"); ok { - params.ServiceRole = aws.String(v.(string)) - } - - if v, ok := d.GetOk("build_timeout"); ok { - params.TimeoutInMinutes = aws.Int64(int64(v.(int))) - } - - if v, ok := d.GetOk("tags"); ok { - params.Tags = tagsFromMapCodeBuild(v.(map[string]interface{})) - } - - var resp *codebuild.CreateProjectOutput - err := resource.Retry(2*time.Minute, func() *resource.RetryError { - var err error - - resp, err = conn.CreateProject(params) - - if err != nil { - return resource.RetryableError(err) - } - - return resource.NonRetryableError(err) - }) - - if err != nil { - return fmt.Errorf("[ERROR] Error creating CodeBuild project: %s", err) - } - - d.SetId(*resp.Project.Arn) - - return resourceAwsCodeBuildProjectUpdate(d, meta) -} - -func expandProjectArtifacts(d *schema.ResourceData) codebuild.ProjectArtifacts { - configs := d.Get("artifacts").(*schema.Set).List() - data := configs[0].(map[string]interface{}) - - projectArtifacts := codebuild.ProjectArtifacts{ - Type: aws.String(data["type"].(string)), - } - - if data["location"].(string) != "" { - projectArtifacts.Location = aws.String(data["location"].(string)) - } - - if data["name"].(string) != "" { - projectArtifacts.Name = aws.String(data["name"].(string)) - } - - if data["namespace_type"].(string) != "" { - projectArtifacts.NamespaceType = aws.String(data["namespace_type"].(string)) - } - - if data["packaging"].(string) != "" { - projectArtifacts.Packaging = aws.String(data["packaging"].(string)) - } - - if data["path"].(string) != "" { - projectArtifacts.Path = aws.String(data["path"].(string)) - } - - return projectArtifacts -} - -func expandProjectEnvironment(d *schema.ResourceData) *codebuild.ProjectEnvironment { - configs := d.Get("environment").(*schema.Set).List() - projectEnv := &codebuild.ProjectEnvironment{} - - envConfig := configs[0].(map[string]interface{}) - - if v := envConfig["compute_type"]; v != nil { - projectEnv.ComputeType = aws.String(v.(string)) - } - - if v := envConfig["image"]; v != nil { - projectEnv.Image = aws.String(v.(string)) - } - - if v := envConfig["type"]; v != nil { - projectEnv.Type = aws.String(v.(string)) - } - - if v := envConfig["environment_variable"]; v != nil { - envVariables := v.([]interface{}) - if len(envVariables) > 0 { - projectEnvironmentVariables := make([]*codebuild.EnvironmentVariable, 0, len(envVariables)) - - for _, envVariablesConfig := range envVariables { - config := envVariablesConfig.(map[string]interface{}) - - projectEnvironmentVar := &codebuild.EnvironmentVariable{} - - if v := config["name"].(string); v != "" { - projectEnvironmentVar.Name = &v - } - - if v := config["value"].(string); v != "" { - projectEnvironmentVar.Value = &v - } - - projectEnvironmentVariables = append(projectEnvironmentVariables, projectEnvironmentVar) - } - - projectEnv.EnvironmentVariables = projectEnvironmentVariables - } - } - - return projectEnv -} - -func expandProjectSource(d *schema.ResourceData) codebuild.ProjectSource { - configs := d.Get("source").(*schema.Set).List() - projectSource := codebuild.ProjectSource{} - - for _, configRaw := range configs { - data := configRaw.(map[string]interface{}) - - sourceType := data["type"].(string) - location := data["location"].(string) - buildspec := data["buildspec"].(string) - - projectSource = codebuild.ProjectSource{ - Type: &sourceType, - Location: &location, - Buildspec: &buildspec, - } - - if v, ok := data["auth"]; ok { - if len(v.(*schema.Set).List()) > 0 { - auth := v.(*schema.Set).List()[0].(map[string]interface{}) - - projectSource.Auth = &codebuild.SourceAuth{ - Type: aws.String(auth["type"].(string)), - Resource: aws.String(auth["resource"].(string)), - } - } - } - } - - return projectSource -} - -func resourceAwsCodeBuildProjectRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).codebuildconn - - resp, err := conn.BatchGetProjects(&codebuild.BatchGetProjectsInput{ - Names: []*string{ - aws.String(d.Id()), - }, - }) - - if err != nil { - return fmt.Errorf("[ERROR] Error retreiving Projects: %q", err) - } - - // if nothing was found, then return no state - if len(resp.Projects) == 0 { - log.Printf("[INFO]: No projects were found, removing from state") - d.SetId("") - return nil - } - - project := resp.Projects[0] - - if err := d.Set("artifacts", flattenAwsCodebuildProjectArtifacts(project.Artifacts)); err != nil { - return err - } - - if err := d.Set("environment", schema.NewSet(resourceAwsCodeBuildProjectEnvironmentHash, flattenAwsCodebuildProjectEnvironment(project.Environment))); err != nil { - return err - } - - if err := d.Set("source", flattenAwsCodebuildProjectSource(project.Source)); err != nil { - return err - } - - d.Set("description", project.Description) - d.Set("encryption_key", project.EncryptionKey) - d.Set("name", project.Name) - d.Set("service_role", project.ServiceRole) - d.Set("build_timeout", project.TimeoutInMinutes) - - if err := d.Set("tags", tagsToMapCodeBuild(project.Tags)); err != nil { - return err - } - - return nil -} - -func resourceAwsCodeBuildProjectUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).codebuildconn - - params := &codebuild.UpdateProjectInput{ - Name: aws.String(d.Get("name").(string)), - } - - if d.HasChange("environment") { - projectEnv := expandProjectEnvironment(d) - params.Environment = projectEnv - } - - if d.HasChange("source") { - projectSource := expandProjectSource(d) - params.Source = &projectSource - } - - if d.HasChange("artifacts") { - projectArtifacts := expandProjectArtifacts(d) - params.Artifacts = &projectArtifacts - } - - if d.HasChange("description") { - params.Description = aws.String(d.Get("description").(string)) - } - - if d.HasChange("encryption_key") { - params.EncryptionKey = aws.String(d.Get("encryption_key").(string)) - } - - if d.HasChange("service_role") { - params.ServiceRole = aws.String(d.Get("service_role").(string)) - } - - if d.HasChange("build_timeout") { - params.TimeoutInMinutes = aws.Int64(int64(d.Get("build_timeout").(int))) - } - - // The documentation clearly says "The replacement set of tags for this build project." - // But its a slice of pointers so if not set for every update, they get removed. - params.Tags = tagsFromMapCodeBuild(d.Get("tags").(map[string]interface{})) - - _, err := conn.UpdateProject(params) - - if err != nil { - return fmt.Errorf( - "[ERROR] Error updating CodeBuild project (%s): %s", - d.Id(), err) - } - - return resourceAwsCodeBuildProjectRead(d, meta) -} - -func resourceAwsCodeBuildProjectDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).codebuildconn - - _, err := conn.DeleteProject(&codebuild.DeleteProjectInput{ - Name: aws.String(d.Id()), - }) - - if err != nil { - return err - } - - d.SetId("") - - return nil -} - -func flattenAwsCodebuildProjectArtifacts(artifacts *codebuild.ProjectArtifacts) *schema.Set { - - artifactSet := schema.Set{ - F: resourceAwsCodeBuildProjectArtifactsHash, - } - - values := map[string]interface{}{} - - values["type"] = *artifacts.Type - - if artifacts.Location != nil { - values["location"] = *artifacts.Location - } - - if artifacts.Name != nil { - values["name"] = *artifacts.Name - } - - if artifacts.NamespaceType != nil { - values["namespace_type"] = *artifacts.NamespaceType - } - - if artifacts.Packaging != nil { - values["packaging"] = *artifacts.Packaging - } - - if artifacts.Path != nil { - values["path"] = *artifacts.Path - } - - artifactSet.Add(values) - - return &artifactSet -} - -func flattenAwsCodebuildProjectEnvironment(environment *codebuild.ProjectEnvironment) []interface{} { - envConfig := map[string]interface{}{} - - envConfig["type"] = *environment.Type - envConfig["compute_type"] = *environment.ComputeType - envConfig["image"] = *environment.Image - - if environment.EnvironmentVariables != nil { - envConfig["environment_variable"] = environmentVariablesToMap(environment.EnvironmentVariables) - } - - return []interface{}{envConfig} - -} - -func flattenAwsCodebuildProjectSource(source *codebuild.ProjectSource) *schema.Set { - - sourceSet := schema.Set{ - F: resourceAwsCodeBuildProjectSourceHash, - } - - authSet := schema.Set{ - F: resourceAwsCodeBuildProjectSourceAuthHash, - } - - sourceConfig := map[string]interface{}{} - - sourceConfig["type"] = *source.Type - - if source.Auth != nil { - authSet.Add(sourceAuthToMap(source.Auth)) - sourceConfig["auth"] = &authSet - } - - if source.Buildspec != nil { - sourceConfig["buildspec"] = *source.Buildspec - } - - if source.Location != nil { - sourceConfig["location"] = *source.Location - } - - sourceSet.Add(sourceConfig) - - return &sourceSet - -} - -func resourceAwsCodeBuildProjectArtifactsHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - artifactType := m["type"].(string) - - buf.WriteString(fmt.Sprintf("%s-", artifactType)) - - return hashcode.String(buf.String()) -} - -func resourceAwsCodeBuildProjectEnvironmentHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - environmentType := m["type"].(string) - computeType := m["compute_type"].(string) - image := m["image"].(string) - environmentVariables := m["environment_variable"].([]interface{}) - buf.WriteString(fmt.Sprintf("%s-", environmentType)) - buf.WriteString(fmt.Sprintf("%s-", computeType)) - buf.WriteString(fmt.Sprintf("%s-", image)) - for _, e := range environmentVariables { - if e != nil { // Old statefiles might have nil values in them - ev := e.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s:%s-", ev["name"].(string), ev["value"].(string))) - } - } - - return hashcode.String(buf.String()) -} - -func resourceAwsCodeBuildProjectSourceHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - sourceType := m["type"].(string) - buildspec := m["buildspec"].(string) - location := m["location"].(string) - - buf.WriteString(fmt.Sprintf("%s-", sourceType)) - buf.WriteString(fmt.Sprintf("%s-", buildspec)) - buf.WriteString(fmt.Sprintf("%s-", location)) - - return hashcode.String(buf.String()) -} - -func resourceAwsCodeBuildProjectSourceAuthHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - buf.WriteString(fmt.Sprintf("%s-", m["type"].(string))) - - if m["resource"] != nil { - buf.WriteString(fmt.Sprintf("%s-", m["resource"].(string))) - } - - return hashcode.String(buf.String()) -} - -func environmentVariablesToMap(environmentVariables []*codebuild.EnvironmentVariable) []interface{} { - - envVariables := []interface{}{} - if len(environmentVariables) > 0 { - for _, env := range environmentVariables { - item := map[string]interface{}{} - item["name"] = *env.Name - item["value"] = *env.Value - envVariables = append(envVariables, item) - } - } - - return envVariables -} - -func sourceAuthToMap(sourceAuth *codebuild.SourceAuth) map[string]interface{} { - - auth := map[string]interface{}{} - auth["type"] = *sourceAuth.Type - - if sourceAuth.Resource != nil { - auth["resource"] = *sourceAuth.Resource - } - - return auth -} - -func validateAwsCodeBuildArifactsType(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - types := map[string]bool{ - "CODEPIPELINE": true, - "NO_ARTIFACTS": true, - "S3": true, - } - - if !types[value] { - errors = append(errors, fmt.Errorf("CodeBuild: Arifacts Type can only be CODEPIPELINE / NO_ARTIFACTS / S3")) - } - return -} - -func validateAwsCodeBuildArifactsNamespaceType(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - types := map[string]bool{ - "NONE": true, - "BUILD_ID": true, - } - - if !types[value] { - errors = append(errors, fmt.Errorf("CodeBuild: Arifacts Namespace Type can only be NONE / BUILD_ID")) - } - return -} - -func validateAwsCodeBuildProjectName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[A-Za-z0-9]`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "first character of %q must be a letter or number", value)) - } - - if !regexp.MustCompile(`^[A-Za-z0-9\-_]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only alphanumeric characters, hyphens and underscores allowed in %q", value)) - } - - if len(value) > 255 { - errors = append(errors, fmt.Errorf( - "%q cannot be greater than 255 characters", value)) - } - - return -} - -func validateAwsCodeBuildProjectDescription(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 255 { - errors = append(errors, fmt.Errorf("%q cannot be greater than 255 characters", value)) - } - return -} - -func validateAwsCodeBuildEnvironmentComputeType(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - types := map[string]bool{ - "BUILD_GENERAL1_SMALL": true, - "BUILD_GENERAL1_MEDIUM": true, - "BUILD_GENERAL1_LARGE": true, - } - - if !types[value] { - errors = append(errors, fmt.Errorf("CodeBuild: Environment Compute Type can only be BUILD_GENERAL1_SMALL / BUILD_GENERAL1_MEDIUM / BUILD_GENERAL1_LARGE")) - } - return -} - -func validateAwsCodeBuildEnvironmentType(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - types := map[string]bool{ - "LINUX_CONTAINER": true, - } - - if !types[value] { - errors = append(errors, fmt.Errorf("CodeBuild: Environment Type can only be LINUX_CONTAINER")) - } - return -} - -func validateAwsCodeBuildSourceType(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - types := map[string]bool{ - "CODECOMMIT": true, - "CODEPIPELINE": true, - "GITHUB": true, - "S3": true, - } - - if !types[value] { - errors = append(errors, fmt.Errorf("CodeBuild: Source Type can only be CODECOMMIT / CODEPIPELINE / GITHUB / S3")) - } - return -} - -func validateAwsCodeBuildSourceAuthType(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - types := map[string]bool{ - "OAUTH": true, - } - - if !types[value] { - errors = append(errors, fmt.Errorf("CodeBuild: Source Auth Type can only be OAUTH")) - } - return -} - -func validateAwsCodeBuildTimeout(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - - if value < 5 || value > 480 { - errors = append(errors, fmt.Errorf("%q must be greater than 5 minutes and less than 480 minutes (8 hours)", value)) - } - return -} diff --git a/builtin/providers/aws/resource_aws_codebuild_project_migrate.go b/builtin/providers/aws/resource_aws_codebuild_project_migrate.go deleted file mode 100644 index 97d7a9ff2..000000000 --- a/builtin/providers/aws/resource_aws_codebuild_project_migrate.go +++ /dev/null @@ -1,36 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/terraform" -) - -func resourceAwsCodebuildMigrateState( - v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - switch v { - case 0: - log.Println("[INFO] Found AWS Codebuild State v0; migrating to v1") - return migrateCodebuildStateV0toV1(is) - default: - return is, fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateCodebuildStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { - if is.Empty() { - log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return is, nil - } - - log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - - if is.Attributes["timeout"] != "" { - is.Attributes["build_timeout"] = strings.TrimSpace(is.Attributes["timeout"]) - } - - log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} diff --git a/builtin/providers/aws/resource_aws_codebuild_project_migrate_test.go b/builtin/providers/aws/resource_aws_codebuild_project_migrate_test.go deleted file mode 100644 index 2ae6b4e53..000000000 --- a/builtin/providers/aws/resource_aws_codebuild_project_migrate_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/terraform" -) - -func TestAWSCodebuildMigrateState(t *testing.T) { - cases := map[string]struct { - StateVersion int - ID string - Attributes map[string]string - Expected string - Meta interface{} - }{ - "v0_1": { - StateVersion: 0, - ID: "tf-testing-file", - Attributes: map[string]string{ - "description": "some description", - "timeout": "5", - }, - Expected: "5", - }, - "v0_2": { - StateVersion: 0, - ID: "tf-testing-file", - Attributes: map[string]string{ - "description": "some description", - "build_timeout": "5", - }, - Expected: "5", - }, - } - - for tn, tc := range cases { - is := &terraform.InstanceState{ - ID: tc.ID, - Attributes: tc.Attributes, - } - is, err := resourceAwsCodebuildMigrateState( - tc.StateVersion, is, tc.Meta) - - if err != nil { - t.Fatalf("bad: %s, err: %#v", tn, err) - } - - if is.Attributes["build_timeout"] != tc.Expected { - t.Fatalf("Bad build_timeout migration: %s\n\n expected: %s", is.Attributes["build_timeout"], tc.Expected) - } - } -} diff --git a/builtin/providers/aws/resource_aws_codebuild_project_test.go b/builtin/providers/aws/resource_aws_codebuild_project_test.go deleted file mode 100644 index 105515310..000000000 --- a/builtin/providers/aws/resource_aws_codebuild_project_test.go +++ /dev/null @@ -1,571 +0,0 @@ -package aws - -import ( - "fmt" - "strings" - "testing" - "unicode" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/codebuild" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSCodeBuildProject_basic(t *testing.T) { - name := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCodeBuildProjectDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCodeBuildProjectConfig_basic(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeBuildProjectExists("aws_codebuild_project.foo"), - resource.TestCheckResourceAttr( - "aws_codebuild_project.foo", "build_timeout", "5"), - ), - }, - { - Config: testAccAWSCodeBuildProjectConfig_basicUpdated(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeBuildProjectExists("aws_codebuild_project.foo"), - resource.TestCheckResourceAttr( - "aws_codebuild_project.foo", "build_timeout", "50"), - ), - }, - }, - }) -} - -func TestAccAWSCodeBuildProject_default_build_timeout(t *testing.T) { - name := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCodeBuildProjectDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCodeBuildProjectConfig_default_timeout(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeBuildProjectExists("aws_codebuild_project.foo"), - resource.TestCheckResourceAttr( - "aws_codebuild_project.foo", "build_timeout", "60"), - ), - }, - { - Config: testAccAWSCodeBuildProjectConfig_basicUpdated(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeBuildProjectExists("aws_codebuild_project.foo"), - resource.TestCheckResourceAttr( - "aws_codebuild_project.foo", "build_timeout", "50"), - ), - }, - }, - }) -} - -func TestAWSCodeBuildProject_artifactsTypeValidation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - {Value: "CODEPIPELINE", ErrCount: 0}, - {Value: "NO_ARTIFACTS", ErrCount: 0}, - {Value: "S3", ErrCount: 0}, - {Value: "XYZ", ErrCount: 1}, - } - - for _, tc := range cases { - _, errors := validateAwsCodeBuildArifactsType(tc.Value, "aws_codebuild_project") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the AWS CodeBuild project artifacts type to trigger a validation error") - } - } -} - -func TestAWSCodeBuildProject_artifactsNamespaceTypeValidation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - {Value: "NONE", ErrCount: 0}, - {Value: "BUILD_ID", ErrCount: 0}, - {Value: "XYZ", ErrCount: 1}, - } - - for _, tc := range cases { - _, errors := validateAwsCodeBuildArifactsNamespaceType(tc.Value, "aws_codebuild_project") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the AWS CodeBuild project artifacts namepsace_type to trigger a validation error") - } - } -} - -func longTestData() string { - data := ` - test-test-test-test-test-test-test-test-test-test- - test-test-test-test-test-test-test-test-test-test- - test-test-test-test-test-test-test-test-test-test- - test-test-test-test-test-test-test-test-test-test- - test-test-test-test-test-test-test-test-test-test- - test-test-test-test-test-test-test-test-test-test- - ` - - return strings.Map(func(r rune) rune { - if unicode.IsSpace(r) { - return -1 - } - return r - }, data) -} - -func TestAWSCodeBuildProject_nameValidation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - {Value: "_test", ErrCount: 1}, - {Value: "test", ErrCount: 0}, - {Value: "1_test", ErrCount: 0}, - {Value: "test**1", ErrCount: 1}, - {Value: longTestData(), ErrCount: 1}, - } - - for _, tc := range cases { - _, errors := validateAwsCodeBuildProjectName(tc.Value, "aws_codebuild_project") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the AWS CodeBuild project name to trigger a validation error - %s", errors) - } - } -} - -func TestAWSCodeBuildProject_descriptionValidation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - {Value: "test", ErrCount: 0}, - {Value: longTestData(), ErrCount: 1}, - } - - for _, tc := range cases { - _, errors := validateAwsCodeBuildProjectDescription(tc.Value, "aws_codebuild_project") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the AWS CodeBuild project description to trigger a validation error") - } - } -} - -func TestAWSCodeBuildProject_environmentComputeTypeValidation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - {Value: "BUILD_GENERAL1_SMALL", ErrCount: 0}, - {Value: "BUILD_GENERAL1_MEDIUM", ErrCount: 0}, - {Value: "BUILD_GENERAL1_LARGE", ErrCount: 0}, - {Value: "BUILD_GENERAL1_VERYLARGE", ErrCount: 1}, - } - - for _, tc := range cases { - _, errors := validateAwsCodeBuildEnvironmentComputeType(tc.Value, "aws_codebuild_project") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the AWS CodeBuild project environment compute_type to trigger a validation error") - } - } -} - -func TestAWSCodeBuildProject_environmentTypeValidation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - {Value: "LINUX_CONTAINER", ErrCount: 0}, - {Value: "WINDOWS_CONTAINER", ErrCount: 1}, - } - - for _, tc := range cases { - _, errors := validateAwsCodeBuildEnvironmentType(tc.Value, "aws_codebuild_project") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the AWS CodeBuild project environment type to trigger a validation error") - } - } -} - -func TestAWSCodeBuildProject_sourceTypeValidation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - {Value: "CODECOMMIT", ErrCount: 0}, - {Value: "CODEPIPELINE", ErrCount: 0}, - {Value: "GITHUB", ErrCount: 0}, - {Value: "S3", ErrCount: 0}, - {Value: "GITLAB", ErrCount: 1}, - } - - for _, tc := range cases { - _, errors := validateAwsCodeBuildSourceType(tc.Value, "aws_codebuild_project") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the AWS CodeBuild project source type to trigger a validation error") - } - } -} - -func TestAWSCodeBuildProject_sourceAuthTypeValidation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - {Value: "OAUTH", ErrCount: 0}, - {Value: "PASSWORD", ErrCount: 1}, - } - - for _, tc := range cases { - _, errors := validateAwsCodeBuildSourceAuthType(tc.Value, "aws_codebuild_project") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the AWS CodeBuild project source auth to trigger a validation error") - } - } -} - -func TestAWSCodeBuildProject_timeoutValidation(t *testing.T) { - cases := []struct { - Value int - ErrCount int - }{ - {Value: 10, ErrCount: 0}, - {Value: 200, ErrCount: 0}, - {Value: 1, ErrCount: 1}, - {Value: 500, ErrCount: 1}, - } - - for _, tc := range cases { - _, errors := validateAwsCodeBuildTimeout(tc.Value, "aws_codebuild_project") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the AWS CodeBuild project timeout to trigger a validation error") - } - } -} - -func testAccCheckAWSCodeBuildProjectExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No CodeBuild Project ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).codebuildconn - - out, err := conn.BatchGetProjects(&codebuild.BatchGetProjectsInput{ - Names: []*string{ - aws.String(rs.Primary.ID), - }, - }) - - if err != nil { - return err - } - - if len(out.Projects) < 1 { - return fmt.Errorf("No project found") - } - - return nil - } -} - -func testAccCheckAWSCodeBuildProjectDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).codebuildconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_codebuild_project" { - continue - } - - out, err := conn.BatchGetProjects(&codebuild.BatchGetProjectsInput{ - Names: []*string{ - aws.String(rs.Primary.ID), - }, - }) - - if err != nil { - return err - } - - if out != nil && len(out.Projects) > 0 { - return fmt.Errorf("Expected AWS CodeBuild Project to be gone, but was still found") - } - - return nil - } - - return fmt.Errorf("Default error in CodeBuild Test") -} - -func testAccAWSCodeBuildProjectConfig_basic(rName string) string { - return fmt.Sprintf(` -resource "aws_iam_role" "codebuild_role" { - name = "codebuild-role-%s" - assume_role_policy = < 100 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 100 characters", k)) - } - return - }, - }, - - "description": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 1000 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 1000 characters", k)) - } - return - }, - }, - - "arn": { - Type: schema.TypeString, - Computed: true, - }, - - "repository_id": { - Type: schema.TypeString, - Computed: true, - }, - - "clone_url_http": { - Type: schema.TypeString, - Computed: true, - }, - - "clone_url_ssh": { - Type: schema.TypeString, - Computed: true, - }, - - "default_branch": { - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -func resourceAwsCodeCommitRepositoryCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).codecommitconn - - input := &codecommit.CreateRepositoryInput{ - RepositoryName: aws.String(d.Get("repository_name").(string)), - RepositoryDescription: aws.String(d.Get("description").(string)), - } - - out, err := conn.CreateRepository(input) - if err != nil { - return fmt.Errorf("Error creating CodeCommit Repository: %s", err) - } - - d.SetId(d.Get("repository_name").(string)) - d.Set("repository_id", out.RepositoryMetadata.RepositoryId) - d.Set("arn", out.RepositoryMetadata.Arn) - d.Set("clone_url_http", out.RepositoryMetadata.CloneUrlHttp) - d.Set("clone_url_ssh", out.RepositoryMetadata.CloneUrlSsh) - - return resourceAwsCodeCommitRepositoryUpdate(d, meta) -} - -func resourceAwsCodeCommitRepositoryUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).codecommitconn - - if _, ok := d.GetOk("default_branch"); ok { - if d.HasChange("default_branch") { - if err := resourceAwsCodeCommitUpdateDefaultBranch(conn, d); err != nil { - return err - } - } - } - - if d.HasChange("description") { - if err := resourceAwsCodeCommitUpdateDescription(conn, d); err != nil { - return err - } - } - - return resourceAwsCodeCommitRepositoryRead(d, meta) -} - -func resourceAwsCodeCommitRepositoryRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).codecommitconn - - input := &codecommit.GetRepositoryInput{ - RepositoryName: aws.String(d.Id()), - } - - out, err := conn.GetRepository(input) - if err != nil { - return fmt.Errorf("Error reading CodeCommit Repository: %s", err.Error()) - } - - d.Set("repository_id", out.RepositoryMetadata.RepositoryId) - d.Set("arn", out.RepositoryMetadata.Arn) - d.Set("clone_url_http", out.RepositoryMetadata.CloneUrlHttp) - d.Set("clone_url_ssh", out.RepositoryMetadata.CloneUrlSsh) - d.Set("description", out.RepositoryMetadata.RepositoryDescription) - d.Set("repository_name", out.RepositoryMetadata.RepositoryName) - - if _, ok := d.GetOk("default_branch"); ok { - if out.RepositoryMetadata.DefaultBranch != nil { - d.Set("default_branch", out.RepositoryMetadata.DefaultBranch) - } - } - - return nil -} - -func resourceAwsCodeCommitRepositoryDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).codecommitconn - - log.Printf("[DEBUG] CodeCommit Delete Repository: %s", d.Id()) - _, err := conn.DeleteRepository(&codecommit.DeleteRepositoryInput{ - RepositoryName: aws.String(d.Id()), - }) - if err != nil { - return fmt.Errorf("Error deleting CodeCommit Repository: %s", err.Error()) - } - - return nil -} - -func resourceAwsCodeCommitUpdateDescription(conn *codecommit.CodeCommit, d *schema.ResourceData) error { - branchInput := &codecommit.UpdateRepositoryDescriptionInput{ - RepositoryName: aws.String(d.Id()), - RepositoryDescription: aws.String(d.Get("description").(string)), - } - - _, err := conn.UpdateRepositoryDescription(branchInput) - if err != nil { - return fmt.Errorf("Error Updating Repository Description for CodeCommit Repository: %s", err.Error()) - } - - return nil -} - -func resourceAwsCodeCommitUpdateDefaultBranch(conn *codecommit.CodeCommit, d *schema.ResourceData) error { - input := &codecommit.ListBranchesInput{ - RepositoryName: aws.String(d.Id()), - } - - out, err := conn.ListBranches(input) - if err != nil { - return fmt.Errorf("Error reading CodeCommit Repository branches: %s", err.Error()) - } - - if len(out.Branches) == 0 { - log.Printf("[WARN] Not setting Default Branch CodeCommit Repository that has no branches: %s", d.Id()) - return nil - } - - branchInput := &codecommit.UpdateDefaultBranchInput{ - RepositoryName: aws.String(d.Id()), - DefaultBranchName: aws.String(d.Get("default_branch").(string)), - } - - _, err = conn.UpdateDefaultBranch(branchInput) - if err != nil { - return fmt.Errorf("Error Updating Default Branch for CodeCommit Repository: %s", err.Error()) - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_codecommit_repository_test.go b/builtin/providers/aws/resource_aws_codecommit_repository_test.go deleted file mode 100644 index 6bc3dfab8..000000000 --- a/builtin/providers/aws/resource_aws_codecommit_repository_test.go +++ /dev/null @@ -1,188 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/codecommit" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSCodeCommitRepository_basic(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCodeCommitRepositoryDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCodeCommitRepository_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCodeCommitRepositoryExists("aws_codecommit_repository.test"), - ), - }, - }, - }) -} - -func TestAccAWSCodeCommitRepository_withChanges(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCodeCommitRepositoryDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCodeCommitRepository_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCodeCommitRepositoryExists("aws_codecommit_repository.test"), - resource.TestCheckResourceAttr( - "aws_codecommit_repository.test", "description", "This is a test description"), - ), - }, - { - Config: testAccCodeCommitRepository_withChanges(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCodeCommitRepositoryExists("aws_codecommit_repository.test"), - resource.TestCheckResourceAttr( - "aws_codecommit_repository.test", "description", "This is a test description - with changes"), - ), - }, - }, - }) -} - -func TestAccAWSCodeCommitRepository_create_default_branch(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCodeCommitRepositoryDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCodeCommitRepository_with_default_branch(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCodeCommitRepositoryExists("aws_codecommit_repository.test"), - resource.TestCheckResourceAttr( - "aws_codecommit_repository.test", "default_branch", "master"), - ), - }, - }, - }) -} - -func TestAccAWSCodeCommitRepository_create_and_update_default_branch(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCodeCommitRepositoryDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCodeCommitRepository_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCodeCommitRepositoryExists("aws_codecommit_repository.test"), - resource.TestCheckNoResourceAttr( - "aws_codecommit_repository.test", "default_branch"), - ), - }, - { - Config: testAccCodeCommitRepository_with_default_branch(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCodeCommitRepositoryExists("aws_codecommit_repository.test"), - resource.TestCheckResourceAttr( - "aws_codecommit_repository.test", "default_branch", "master"), - ), - }, - }, - }) -} - -func testAccCheckCodeCommitRepositoryExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - codecommitconn := testAccProvider.Meta().(*AWSClient).codecommitconn - out, err := codecommitconn.GetRepository(&codecommit.GetRepositoryInput{ - RepositoryName: aws.String(rs.Primary.ID), - }) - - if err != nil { - return err - } - - if out.RepositoryMetadata.Arn == nil { - return fmt.Errorf("No CodeCommit Repository Vault Found") - } - - if *out.RepositoryMetadata.RepositoryName != rs.Primary.ID { - return fmt.Errorf("CodeCommit Repository Mismatch - existing: %q, state: %q", - *out.RepositoryMetadata.RepositoryName, rs.Primary.ID) - } - - return nil - } -} - -func testAccCheckCodeCommitRepositoryDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).codecommitconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_codecommit_repository" { - continue - } - - _, err := conn.GetRepository(&codecommit.GetRepositoryInput{ - RepositoryName: aws.String(rs.Primary.ID), - }) - - if ae, ok := err.(awserr.Error); ok && ae.Code() == "RepositoryDoesNotExistException" { - continue - } - if err == nil { - return fmt.Errorf("Repository still exists: %s", rs.Primary.ID) - } - return err - } - - return nil -} - -func testAccCodeCommitRepository_basic(rInt int) string { - return fmt.Sprintf(` -resource "aws_codecommit_repository" "test" { - repository_name = "test_repository_%d" - description = "This is a test description" -} -`, rInt) -} - -func testAccCodeCommitRepository_withChanges(rInt int) string { - return fmt.Sprintf(` -resource "aws_codecommit_repository" "test" { - repository_name = "test_repository_%d" - description = "This is a test description - with changes" -} -`, rInt) -} - -func testAccCodeCommitRepository_with_default_branch(rInt int) string { - return fmt.Sprintf(` -resource "aws_codecommit_repository" "test" { - repository_name = "test_repository_%d" - description = "This is a test description" - default_branch = "master" -} -`, rInt) -} diff --git a/builtin/providers/aws/resource_aws_codecommit_trigger.go b/builtin/providers/aws/resource_aws_codecommit_trigger.go deleted file mode 100644 index c21d55a9c..000000000 --- a/builtin/providers/aws/resource_aws_codecommit_trigger.go +++ /dev/null @@ -1,163 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/codecommit" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsCodeCommitTrigger() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsCodeCommitTriggerCreate, - Read: resourceAwsCodeCommitTriggerRead, - Delete: resourceAwsCodeCommitTriggerDelete, - - Schema: map[string]*schema.Schema{ - "repository_name": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Required: true, - }, - "configuration_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "trigger": &schema.Schema{ - Type: schema.TypeSet, - ForceNew: true, - Required: true, - MaxItems: 10, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "destination_arn": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "custom_data": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "branches": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "events": &schema.Schema{ - Type: schema.TypeList, - Required: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - }, - } -} - -func resourceAwsCodeCommitTriggerCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).codecommitconn - - // Expand the "trigger" set to aws-sdk-go compat []*codecommit.RepositoryTrigger - triggers := expandAwsCodeCommitTriggers(d.Get("trigger").(*schema.Set).List()) - - input := &codecommit.PutRepositoryTriggersInput{ - RepositoryName: aws.String(d.Get("repository_name").(string)), - Triggers: triggers, - } - - resp, err := conn.PutRepositoryTriggers(input) - if err != nil { - return fmt.Errorf("Error creating CodeCommit Trigger: %s", err) - } - - log.Printf("[INFO] Code Commit Trigger Created %s input %s", resp, input) - - d.SetId(d.Get("repository_name").(string)) - d.Set("configuration_id", resp.ConfigurationId) - - return resourceAwsCodeCommitTriggerRead(d, meta) -} - -func resourceAwsCodeCommitTriggerRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).codecommitconn - - input := &codecommit.GetRepositoryTriggersInput{ - RepositoryName: aws.String(d.Id()), - } - - resp, err := conn.GetRepositoryTriggers(input) - if err != nil { - return fmt.Errorf("Error reading CodeCommit Trigger: %s", err.Error()) - } - - log.Printf("[DEBUG] CodeCommit Trigger: %s", resp) - - return nil -} - -func resourceAwsCodeCommitTriggerDelete(d *schema.ResourceData, meta interface{}) error { - - conn := meta.(*AWSClient).codecommitconn - - log.Printf("[DEBUG] Deleting Trigger: %q", d.Id()) - - input := &codecommit.PutRepositoryTriggersInput{ - RepositoryName: aws.String(d.Get("repository_name").(string)), - Triggers: []*codecommit.RepositoryTrigger{}, - } - - _, err := conn.PutRepositoryTriggers(input) - - if err != nil { - return err - } - - return nil -} - -func expandAwsCodeCommitTriggers(configured []interface{}) []*codecommit.RepositoryTrigger { - triggers := make([]*codecommit.RepositoryTrigger, 0, len(configured)) - // Loop over our configured triggers and create - // an array of aws-sdk-go compatabile objects - for _, lRaw := range configured { - data := lRaw.(map[string]interface{}) - t := &codecommit.RepositoryTrigger{ - CustomData: aws.String(data["custom_data"].(string)), - DestinationArn: aws.String(data["destination_arn"].(string)), - Name: aws.String(data["name"].(string)), - } - - branches := make([]*string, len(data["branches"].([]interface{}))) - for i, vv := range data["branches"].([]interface{}) { - str := vv.(string) - branches[i] = aws.String(str) - } - t.Branches = branches - - events := make([]*string, len(data["events"].([]interface{}))) - for i, vv := range data["events"].([]interface{}) { - str := vv.(string) - events[i] = aws.String(str) - } - t.Events = events - - triggers = append(triggers, t) - } - return triggers -} diff --git a/builtin/providers/aws/resource_aws_codecommit_trigger_test.go b/builtin/providers/aws/resource_aws_codecommit_trigger_test.go deleted file mode 100644 index 01472a7cc..000000000 --- a/builtin/providers/aws/resource_aws_codecommit_trigger_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/codecommit" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSCodeCommitTrigger_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCodeCommitTriggerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCodeCommitTrigger_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCodeCommitTriggerExists("aws_codecommit_trigger.test"), - resource.TestCheckResourceAttr( - "aws_codecommit_trigger.test", "trigger.#", "1"), - ), - }, - }, - }) -} - -func testAccCheckCodeCommitTriggerDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).codecommitconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_codecommit_trigger" { - continue - } - - _, err := conn.GetRepositoryTriggers(&codecommit.GetRepositoryTriggersInput{ - RepositoryName: aws.String(rs.Primary.ID), - }) - - if ae, ok := err.(awserr.Error); ok && ae.Code() == "RepositoryDoesNotExistException" { - continue - } - if err == nil { - return fmt.Errorf("Trigger still exists: %s", rs.Primary.ID) - } - return err - } - - return nil -} - -func testAccCheckCodeCommitTriggerExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - codecommitconn := testAccProvider.Meta().(*AWSClient).codecommitconn - out, err := codecommitconn.GetRepositoryTriggers(&codecommit.GetRepositoryTriggersInput{ - RepositoryName: aws.String(rs.Primary.ID), - }) - - if err != nil { - return err - } - - if len(out.Triggers) == 0 { - return fmt.Errorf("CodeCommit Trigger Failed: %q", out) - } - - return nil - } -} - -const testAccCodeCommitTrigger_basic = ` -provider "aws" { - region = "us-east-1" -} -resource "aws_sns_topic" "test" { - name = "tf-test-topic" -} -resource "aws_codecommit_repository" "test" { - repository_name = "tf_test_repository" - description = "This is a test description" -} -resource "aws_codecommit_trigger" "test" { - depends_on = ["aws_codecommit_repository.test"] - repository_name = "tf_test_repository" - trigger { - name = "tf-test-trigger" - events = ["all"] - destination_arn = "${aws_sns_topic.test.arn}" - } - } -` diff --git a/builtin/providers/aws/resource_aws_codedeploy_app.go b/builtin/providers/aws/resource_aws_codedeploy_app.go deleted file mode 100644 index 706bd7afa..000000000 --- a/builtin/providers/aws/resource_aws_codedeploy_app.go +++ /dev/null @@ -1,127 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/codedeploy" -) - -func resourceAwsCodeDeployApp() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsCodeDeployAppCreate, - Read: resourceAwsCodeDeployAppRead, - Update: resourceAwsCodeDeployUpdate, - Delete: resourceAwsCodeDeployAppDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - // The unique ID is set by AWS on create. - "unique_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - } -} - -func resourceAwsCodeDeployAppCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).codedeployconn - - application := d.Get("name").(string) - log.Printf("[DEBUG] Creating CodeDeploy application %s", application) - - resp, err := conn.CreateApplication(&codedeploy.CreateApplicationInput{ - ApplicationName: aws.String(application), - }) - if err != nil { - return err - } - log.Printf("[DEBUG] CodeDeploy application %s created", *resp.ApplicationId) - - // Despite giving the application a unique ID, AWS doesn't actually use - // it in API calls. Use it and the app name to identify the resource in - // the state file. This allows us to reliably detect both when the TF - // config file changes and when the user deletes the app without removing - // it first from the TF config. - d.SetId(fmt.Sprintf("%s:%s", *resp.ApplicationId, application)) - - return resourceAwsCodeDeployAppRead(d, meta) -} - -func resourceAwsCodeDeployAppRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).codedeployconn - - _, application := resourceAwsCodeDeployAppParseId(d.Id()) - log.Printf("[DEBUG] Reading CodeDeploy application %s", application) - resp, err := conn.GetApplication(&codedeploy.GetApplicationInput{ - ApplicationName: aws.String(application), - }) - if err != nil { - if codedeployerr, ok := err.(awserr.Error); ok && codedeployerr.Code() == "ApplicationDoesNotExistException" { - d.SetId("") - return nil - } else { - log.Printf("[ERROR] Error finding CodeDeploy application: %s", err) - return err - } - } - - d.Set("name", resp.Application.ApplicationName) - - return nil -} - -func resourceAwsCodeDeployUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).codedeployconn - - o, n := d.GetChange("name") - - _, err := conn.UpdateApplication(&codedeploy.UpdateApplicationInput{ - ApplicationName: aws.String(o.(string)), - NewApplicationName: aws.String(n.(string)), - }) - if err != nil { - return err - } - log.Printf("[DEBUG] CodeDeploy application %s updated", n) - - d.Set("name", n) - - return nil -} - -func resourceAwsCodeDeployAppDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).codedeployconn - - _, err := conn.DeleteApplication(&codedeploy.DeleteApplicationInput{ - ApplicationName: aws.String(d.Get("name").(string)), - }) - if err != nil { - if cderr, ok := err.(awserr.Error); ok && cderr.Code() == "InvalidApplicationNameException" { - d.SetId("") - return nil - } else { - log.Printf("[ERROR] Error deleting CodeDeploy application: %s", err) - return err - } - } - - return nil -} - -func resourceAwsCodeDeployAppParseId(id string) (string, string) { - parts := strings.SplitN(id, ":", 2) - return parts[0], parts[1] -} diff --git a/builtin/providers/aws/resource_aws_codedeploy_app_test.go b/builtin/providers/aws/resource_aws_codedeploy_app_test.go deleted file mode 100644 index dd3a4ce7a..000000000 --- a/builtin/providers/aws/resource_aws_codedeploy_app_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/codedeploy" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSCodeDeployApp_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCodeDeployAppDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSCodeDeployApp, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployAppExists("aws_codedeploy_app.foo"), - ), - }, - resource.TestStep{ - Config: testAccAWSCodeDeployAppModified, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployAppExists("aws_codedeploy_app.foo"), - ), - }, - }, - }) -} - -func testAccCheckAWSCodeDeployAppDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).codedeployconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_codedeploy_app" { - continue - } - - _, err := conn.GetApplication(&codedeploy.GetApplicationInput{ - ApplicationName: aws.String(rs.Primary.Attributes["name"]), - }) - - if err != nil { - // Verify the error is what we want - if ae, ok := err.(awserr.Error); ok && ae.Code() == "ApplicationDoesNotExistException" { - continue - } - return err - } - - return fmt.Errorf("still exists") - } - - return nil -} - -func testAccCheckAWSCodeDeployAppExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - return nil - } -} - -var testAccAWSCodeDeployApp = ` -resource "aws_codedeploy_app" "foo" { - name = "foo" -}` - -var testAccAWSCodeDeployAppModified = ` -resource "aws_codedeploy_app" "foo" { - name = "bar" -}` diff --git a/builtin/providers/aws/resource_aws_codedeploy_deployment_config.go b/builtin/providers/aws/resource_aws_codedeploy_deployment_config.go deleted file mode 100644 index 10130dc76..000000000 --- a/builtin/providers/aws/resource_aws_codedeploy_deployment_config.go +++ /dev/null @@ -1,152 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/codedeploy" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsCodeDeployDeploymentConfig() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsCodeDeployDeploymentConfigCreate, - Read: resourceAwsCodeDeployDeploymentConfigRead, - Delete: resourceAwsCodeDeployDeploymentConfigDelete, - - Schema: map[string]*schema.Schema{ - "deployment_config_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "minimum_healthy_hosts": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateMinimumHealtyHostsType, - }, - - "value": { - Type: schema.TypeInt, - Optional: true, - }, - }, - }, - }, - - "deployment_config_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceAwsCodeDeployDeploymentConfigCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).codedeployconn - - input := &codedeploy.CreateDeploymentConfigInput{ - DeploymentConfigName: aws.String(d.Get("deployment_config_name").(string)), - MinimumHealthyHosts: expandAwsCodeDeployConfigMinimumHealthHosts(d), - } - - _, err := conn.CreateDeploymentConfig(input) - if err != nil { - return err - } - - d.SetId(d.Get("deployment_config_name").(string)) - - return resourceAwsCodeDeployDeploymentConfigRead(d, meta) -} - -func resourceAwsCodeDeployDeploymentConfigRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).codedeployconn - - input := &codedeploy.GetDeploymentConfigInput{ - DeploymentConfigName: aws.String(d.Id()), - } - - resp, err := conn.GetDeploymentConfig(input) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if "DeploymentConfigDoesNotExistException" == awsErr.Code() { - log.Printf("[DEBUG] CodeDeploy Deployment Config (%s) not found", d.Id()) - d.SetId("") - return nil - } - } - return err - } - - if resp.DeploymentConfigInfo == nil { - return fmt.Errorf("[ERROR] Cannot find DeploymentConfig %q", d.Id()) - } - - if err := d.Set("minimum_healthy_hosts", flattenAwsCodeDeployConfigMinimumHealthHosts(resp.DeploymentConfigInfo.MinimumHealthyHosts)); err != nil { - return err - } - d.Set("deployment_config_id", resp.DeploymentConfigInfo.DeploymentConfigId) - d.Set("deployment_config_name", resp.DeploymentConfigInfo.DeploymentConfigName) - - return nil -} - -func resourceAwsCodeDeployDeploymentConfigDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).codedeployconn - - input := &codedeploy.DeleteDeploymentConfigInput{ - DeploymentConfigName: aws.String(d.Id()), - } - - _, err := conn.DeleteDeploymentConfig(input) - if err != nil { - return err - } - - return nil -} - -func expandAwsCodeDeployConfigMinimumHealthHosts(d *schema.ResourceData) *codedeploy.MinimumHealthyHosts { - hosts := d.Get("minimum_healthy_hosts").([]interface{}) - host := hosts[0].(map[string]interface{}) - - minimumHealthyHost := codedeploy.MinimumHealthyHosts{ - Type: aws.String(host["type"].(string)), - Value: aws.Int64(int64(host["value"].(int))), - } - - return &minimumHealthyHost -} - -func flattenAwsCodeDeployConfigMinimumHealthHosts(hosts *codedeploy.MinimumHealthyHosts) []map[string]interface{} { - result := make([]map[string]interface{}, 0) - - item := make(map[string]interface{}) - - item["type"] = *hosts.Type - item["value"] = *hosts.Value - - result = append(result, item) - - return result -} - -func validateMinimumHealtyHostsType(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "FLEET_PERCENT" && value != "HOST_COUNT" { - errors = append(errors, fmt.Errorf( - "%q must be one of \"FLEET_PERCENT\" or \"HOST_COUNT\"", k)) - } - return -} diff --git a/builtin/providers/aws/resource_aws_codedeploy_deployment_config_test.go b/builtin/providers/aws/resource_aws_codedeploy_deployment_config_test.go deleted file mode 100644 index f8f10bd0b..000000000 --- a/builtin/providers/aws/resource_aws_codedeploy_deployment_config_test.go +++ /dev/null @@ -1,177 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/codedeploy" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSCodeDeployDeploymentConfig_fleetPercent(t *testing.T) { - var config codedeploy.DeploymentConfigInfo - - rName := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCodeDeployDeploymentConfigDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCodeDeployDeploymentConfigFleet(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentConfigExists("aws_codedeploy_deployment_config.foo", &config), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_config.foo", "minimum_healthy_hosts.0.type", "FLEET_PERCENT"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_config.foo", "minimum_healthy_hosts.0.value", "75"), - ), - }, - }, - }) -} - -func TestAccAWSCodeDeployDeploymentConfig_hostCount(t *testing.T) { - var config codedeploy.DeploymentConfigInfo - - rName := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCodeDeployDeploymentConfigDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCodeDeployDeploymentConfigHostCount(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentConfigExists("aws_codedeploy_deployment_config.foo", &config), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_config.foo", "minimum_healthy_hosts.0.type", "HOST_COUNT"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_config.foo", "minimum_healthy_hosts.0.value", "1"), - ), - }, - }, - }) -} - -func TestValidateAWSCodeDeployMinimumHealthyHostsType(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "FLEET_PERCENT", - ErrCount: 0, - }, - { - Value: "HOST_COUNT", - ErrCount: 0, - }, - { - Value: "host_count", - ErrCount: 1, - }, - { - Value: "hostcount", - ErrCount: 1, - }, - { - Value: "FleetPercent", - ErrCount: 1, - }, - { - Value: "Foo", - ErrCount: 1, - }, - { - Value: "", - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateMinimumHealtyHostsType(tc.Value, "minimum_healthy_hosts_type") - if len(errors) != tc.ErrCount { - t.Fatalf("Minimum Healthy Hosts validation failed for type %q: %q", tc.Value, errors) - } - } -} - -func testAccCheckAWSCodeDeployDeploymentConfigDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).codedeployconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_codedeploy_deployment_config" { - continue - } - - resp, err := conn.GetDeploymentConfig(&codedeploy.GetDeploymentConfigInput{ - DeploymentConfigName: aws.String(rs.Primary.ID), - }) - - if ae, ok := err.(awserr.Error); ok && ae.Code() == "DeploymentConfigDoesNotExistException" { - continue - } - - if err == nil { - if resp.DeploymentConfigInfo != nil { - return fmt.Errorf("CodeDeploy deployment config still exists:\n%#v", *resp.DeploymentConfigInfo.DeploymentConfigName) - } - } - - return err - } - - return nil -} - -func testAccCheckAWSCodeDeployDeploymentConfigExists(name string, config *codedeploy.DeploymentConfigInfo) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - conn := testAccProvider.Meta().(*AWSClient).codedeployconn - - resp, err := conn.GetDeploymentConfig(&codedeploy.GetDeploymentConfigInput{ - DeploymentConfigName: aws.String(rs.Primary.ID), - }) - - if err != nil { - return err - } - - *config = *resp.DeploymentConfigInfo - - return nil - } -} - -func testAccAWSCodeDeployDeploymentConfigFleet(rName string) string { - return fmt.Sprintf(` -resource "aws_codedeploy_deployment_config" "foo" { - deployment_config_name = "test-deployment-config-%s" - minimum_healthy_hosts { - type = "FLEET_PERCENT" - value = 75 - } -}`, rName) -} - -func testAccAWSCodeDeployDeploymentConfigHostCount(rName string) string { - return fmt.Sprintf(` -resource "aws_codedeploy_deployment_config" "foo" { - deployment_config_name = "test-deployment-config-%s" - minimum_healthy_hosts { - type = "HOST_COUNT" - value = 1 - } -}`, rName) -} diff --git a/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go b/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go deleted file mode 100644 index 4a6d17211..000000000 --- a/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go +++ /dev/null @@ -1,690 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "log" - "regexp" - "sort" - "time" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/codedeploy" -) - -func resourceAwsCodeDeployDeploymentGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsCodeDeployDeploymentGroupCreate, - Read: resourceAwsCodeDeployDeploymentGroupRead, - Update: resourceAwsCodeDeployDeploymentGroupUpdate, - Delete: resourceAwsCodeDeployDeploymentGroupDelete, - - Schema: map[string]*schema.Schema{ - "app_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 100 { - errors = append(errors, fmt.Errorf( - "%q cannot exceed 100 characters", k)) - } - return - }, - }, - - "deployment_group_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 100 { - errors = append(errors, fmt.Errorf( - "%q cannot exceed 100 characters", k)) - } - return - }, - }, - - "service_role_arn": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "alarm_configuration": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "alarms": &schema.Schema{ - Type: schema.TypeSet, - MaxItems: 10, - Optional: true, - Set: schema.HashString, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "enabled": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - - "ignore_poll_alarm_failure": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - }, - }, - }, - - "auto_rollback_configuration": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - - "events": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Set: schema.HashString, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - - "autoscaling_groups": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "deployment_config_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "CodeDeployDefault.OneAtATime", - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 100 { - errors = append(errors, fmt.Errorf( - "%q cannot exceed 100 characters", k)) - } - return - }, - }, - - "ec2_tag_filter": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateTagFilters, - }, - - "value": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - Set: resourceAwsCodeDeployTagFilterHash, - }, - - "on_premises_instance_tag_filter": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateTagFilters, - }, - - "value": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - Set: resourceAwsCodeDeployTagFilterHash, - }, - - "trigger_configuration": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "trigger_events": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Set: schema.HashString, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateTriggerEvent, - }, - }, - - "trigger_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "trigger_target_arn": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - Set: resourceAwsCodeDeployTriggerConfigHash, - }, - }, - } -} - -func resourceAwsCodeDeployDeploymentGroupCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).codedeployconn - - application := d.Get("app_name").(string) - deploymentGroup := d.Get("deployment_group_name").(string) - - input := codedeploy.CreateDeploymentGroupInput{ - ApplicationName: aws.String(application), - DeploymentGroupName: aws.String(deploymentGroup), - ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)), - } - if attr, ok := d.GetOk("deployment_config_name"); ok { - input.DeploymentConfigName = aws.String(attr.(string)) - } - if attr, ok := d.GetOk("autoscaling_groups"); ok { - input.AutoScalingGroups = expandStringList(attr.(*schema.Set).List()) - } - if attr, ok := d.GetOk("on_premises_instance_tag_filter"); ok { - onPremFilters := buildOnPremTagFilters(attr.(*schema.Set).List()) - input.OnPremisesInstanceTagFilters = onPremFilters - } - if attr, ok := d.GetOk("ec2_tag_filter"); ok { - ec2TagFilters := buildEC2TagFilters(attr.(*schema.Set).List()) - input.Ec2TagFilters = ec2TagFilters - } - if attr, ok := d.GetOk("trigger_configuration"); ok { - triggerConfigs := buildTriggerConfigs(attr.(*schema.Set).List()) - input.TriggerConfigurations = triggerConfigs - } - - if attr, ok := d.GetOk("auto_rollback_configuration"); ok { - input.AutoRollbackConfiguration = buildAutoRollbackConfig(attr.([]interface{})) - } - - if attr, ok := d.GetOk("alarm_configuration"); ok { - input.AlarmConfiguration = buildAlarmConfig(attr.([]interface{})) - } - - // Retry to handle IAM role eventual consistency. - var resp *codedeploy.CreateDeploymentGroupOutput - var err error - err = resource.Retry(5*time.Minute, func() *resource.RetryError { - resp, err = conn.CreateDeploymentGroup(&input) - if err != nil { - retry := false - codedeployErr, ok := err.(awserr.Error) - if !ok { - return resource.NonRetryableError(err) - } - if codedeployErr.Code() == "InvalidRoleException" { - retry = true - } - if codedeployErr.Code() == "InvalidTriggerConfigException" { - r := regexp.MustCompile("^Topic ARN .+ is not valid$") - if r.MatchString(codedeployErr.Message()) { - retry = true - } - } - if retry { - log.Printf("[DEBUG] Trying to create deployment group again: %q", - codedeployErr.Message()) - return resource.RetryableError(err) - } - - return resource.NonRetryableError(err) - } - return nil - }) - if err != nil { - return err - } - - d.SetId(*resp.DeploymentGroupId) - - return resourceAwsCodeDeployDeploymentGroupRead(d, meta) -} - -func resourceAwsCodeDeployDeploymentGroupRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).codedeployconn - - log.Printf("[DEBUG] Reading CodeDeploy DeploymentGroup %s", d.Id()) - resp, err := conn.GetDeploymentGroup(&codedeploy.GetDeploymentGroupInput{ - ApplicationName: aws.String(d.Get("app_name").(string)), - DeploymentGroupName: aws.String(d.Get("deployment_group_name").(string)), - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "DeploymentGroupDoesNotExistException" { - log.Printf("[INFO] CodeDeployment DeploymentGroup %s not found", d.Get("deployment_group_name").(string)) - d.SetId("") - return nil - } - - return err - } - - d.Set("app_name", resp.DeploymentGroupInfo.ApplicationName) - d.Set("autoscaling_groups", resp.DeploymentGroupInfo.AutoScalingGroups) - d.Set("deployment_config_name", resp.DeploymentGroupInfo.DeploymentConfigName) - d.Set("deployment_group_name", resp.DeploymentGroupInfo.DeploymentGroupName) - d.Set("service_role_arn", resp.DeploymentGroupInfo.ServiceRoleArn) - if err := d.Set("ec2_tag_filter", ec2TagFiltersToMap(resp.DeploymentGroupInfo.Ec2TagFilters)); err != nil { - return err - } - if err := d.Set("on_premises_instance_tag_filter", onPremisesTagFiltersToMap(resp.DeploymentGroupInfo.OnPremisesInstanceTagFilters)); err != nil { - return err - } - if err := d.Set("trigger_configuration", triggerConfigsToMap(resp.DeploymentGroupInfo.TriggerConfigurations)); err != nil { - return err - } - - if err := d.Set("auto_rollback_configuration", autoRollbackConfigToMap(resp.DeploymentGroupInfo.AutoRollbackConfiguration)); err != nil { - return err - } - - if err := d.Set("alarm_configuration", alarmConfigToMap(resp.DeploymentGroupInfo.AlarmConfiguration)); err != nil { - return err - } - - return nil -} - -func resourceAwsCodeDeployDeploymentGroupUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).codedeployconn - - input := codedeploy.UpdateDeploymentGroupInput{ - ApplicationName: aws.String(d.Get("app_name").(string)), - CurrentDeploymentGroupName: aws.String(d.Get("deployment_group_name").(string)), - ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)), - } - - if d.HasChange("autoscaling_groups") { - _, n := d.GetChange("autoscaling_groups") - input.AutoScalingGroups = expandStringList(n.(*schema.Set).List()) - } - if d.HasChange("deployment_config_name") { - _, n := d.GetChange("deployment_config_name") - input.DeploymentConfigName = aws.String(n.(string)) - } - if d.HasChange("deployment_group_name") { - _, n := d.GetChange("deployment_group_name") - input.NewDeploymentGroupName = aws.String(n.(string)) - } - - // TagFilters aren't like tags. They don't append. They simply replace. - if d.HasChange("on_premises_instance_tag_filter") { - _, n := d.GetChange("on_premises_instance_tag_filter") - onPremFilters := buildOnPremTagFilters(n.(*schema.Set).List()) - input.OnPremisesInstanceTagFilters = onPremFilters - } - if d.HasChange("ec2_tag_filter") { - _, n := d.GetChange("ec2_tag_filter") - ec2Filters := buildEC2TagFilters(n.(*schema.Set).List()) - input.Ec2TagFilters = ec2Filters - } - if d.HasChange("trigger_configuration") { - _, n := d.GetChange("trigger_configuration") - triggerConfigs := buildTriggerConfigs(n.(*schema.Set).List()) - input.TriggerConfigurations = triggerConfigs - } - - if d.HasChange("auto_rollback_configuration") { - _, n := d.GetChange("auto_rollback_configuration") - input.AutoRollbackConfiguration = buildAutoRollbackConfig(n.([]interface{})) - } - - if d.HasChange("alarm_configuration") { - _, n := d.GetChange("alarm_configuration") - input.AlarmConfiguration = buildAlarmConfig(n.([]interface{})) - } - - log.Printf("[DEBUG] Updating CodeDeploy DeploymentGroup %s", d.Id()) - // Retry to handle IAM role eventual consistency. - err := resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.UpdateDeploymentGroup(&input) - if err != nil { - retry := false - codedeployErr, ok := err.(awserr.Error) - if !ok { - return resource.NonRetryableError(err) - } - if codedeployErr.Code() == "InvalidRoleException" { - retry = true - } - if codedeployErr.Code() == "InvalidTriggerConfigException" { - r := regexp.MustCompile("^Topic ARN .+ is not valid$") - if r.MatchString(codedeployErr.Message()) { - retry = true - } - } - if retry { - log.Printf("[DEBUG] Retrying Code Deployment Group Update: %q", - codedeployErr.Message()) - return resource.RetryableError(err) - } - - return resource.NonRetryableError(err) - } - return nil - }) - - if err != nil { - return err - } - - return resourceAwsCodeDeployDeploymentGroupRead(d, meta) -} - -func resourceAwsCodeDeployDeploymentGroupDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).codedeployconn - - log.Printf("[DEBUG] Deleting CodeDeploy DeploymentGroup %s", d.Id()) - _, err := conn.DeleteDeploymentGroup(&codedeploy.DeleteDeploymentGroupInput{ - ApplicationName: aws.String(d.Get("app_name").(string)), - DeploymentGroupName: aws.String(d.Get("deployment_group_name").(string)), - }) - if err != nil { - return err - } - - d.SetId("") - - return nil -} - -// buildOnPremTagFilters converts raw schema lists into a list of -// codedeploy.TagFilters. -func buildOnPremTagFilters(configured []interface{}) []*codedeploy.TagFilter { - filters := make([]*codedeploy.TagFilter, 0) - for _, raw := range configured { - var filter codedeploy.TagFilter - m := raw.(map[string]interface{}) - - if v, ok := m["key"]; ok { - filter.Key = aws.String(v.(string)) - } - if v, ok := m["type"]; ok { - filter.Type = aws.String(v.(string)) - } - if v, ok := m["value"]; ok { - filter.Value = aws.String(v.(string)) - } - - filters = append(filters, &filter) - } - - return filters -} - -// buildEC2TagFilters converts raw schema lists into a list of -// codedeploy.EC2TagFilters. -func buildEC2TagFilters(configured []interface{}) []*codedeploy.EC2TagFilter { - filters := make([]*codedeploy.EC2TagFilter, 0) - for _, raw := range configured { - var filter codedeploy.EC2TagFilter - m := raw.(map[string]interface{}) - - filter.Key = aws.String(m["key"].(string)) - filter.Type = aws.String(m["type"].(string)) - filter.Value = aws.String(m["value"].(string)) - - filters = append(filters, &filter) - } - - return filters -} - -// buildTriggerConfigs converts a raw schema list into a list of -// codedeploy.TriggerConfig. -func buildTriggerConfigs(configured []interface{}) []*codedeploy.TriggerConfig { - configs := make([]*codedeploy.TriggerConfig, 0, len(configured)) - for _, raw := range configured { - var config codedeploy.TriggerConfig - m := raw.(map[string]interface{}) - - config.TriggerEvents = expandStringSet(m["trigger_events"].(*schema.Set)) - config.TriggerName = aws.String(m["trigger_name"].(string)) - config.TriggerTargetArn = aws.String(m["trigger_target_arn"].(string)) - - configs = append(configs, &config) - } - return configs -} - -// buildAutoRollbackConfig converts a raw schema list containing a map[string]interface{} -// into a single codedeploy.AutoRollbackConfiguration -func buildAutoRollbackConfig(configured []interface{}) *codedeploy.AutoRollbackConfiguration { - result := &codedeploy.AutoRollbackConfiguration{} - - if len(configured) == 1 { - config := configured[0].(map[string]interface{}) - result.Enabled = aws.Bool(config["enabled"].(bool)) - result.Events = expandStringSet(config["events"].(*schema.Set)) - } else { // delete the configuration - result.Enabled = aws.Bool(false) - result.Events = make([]*string, 0) - } - - return result -} - -// buildAlarmConfig converts a raw schema list containing a map[string]interface{} -// into a single codedeploy.AlarmConfiguration -func buildAlarmConfig(configured []interface{}) *codedeploy.AlarmConfiguration { - result := &codedeploy.AlarmConfiguration{} - - if len(configured) == 1 { - config := configured[0].(map[string]interface{}) - names := expandStringSet(config["alarms"].(*schema.Set)) - alarms := make([]*codedeploy.Alarm, 0, len(names)) - - for _, name := range names { - alarm := &codedeploy.Alarm{ - Name: name, - } - alarms = append(alarms, alarm) - } - - result.Alarms = alarms - result.Enabled = aws.Bool(config["enabled"].(bool)) - result.IgnorePollAlarmFailure = aws.Bool(config["ignore_poll_alarm_failure"].(bool)) - } else { // delete the configuration - result.Alarms = make([]*codedeploy.Alarm, 0) - result.Enabled = aws.Bool(false) - result.IgnorePollAlarmFailure = aws.Bool(false) - } - - return result -} - -// ec2TagFiltersToMap converts lists of tag filters into a []map[string]string. -func ec2TagFiltersToMap(list []*codedeploy.EC2TagFilter) []map[string]string { - result := make([]map[string]string, 0, len(list)) - for _, tf := range list { - l := make(map[string]string) - if tf.Key != nil && *tf.Key != "" { - l["key"] = *tf.Key - } - if tf.Value != nil && *tf.Value != "" { - l["value"] = *tf.Value - } - if tf.Type != nil && *tf.Type != "" { - l["type"] = *tf.Type - } - result = append(result, l) - } - return result -} - -// onPremisesTagFiltersToMap converts lists of on-prem tag filters into a []map[string]string. -func onPremisesTagFiltersToMap(list []*codedeploy.TagFilter) []map[string]string { - result := make([]map[string]string, 0, len(list)) - for _, tf := range list { - l := make(map[string]string) - if tf.Key != nil && *tf.Key != "" { - l["key"] = *tf.Key - } - if tf.Value != nil && *tf.Value != "" { - l["value"] = *tf.Value - } - if tf.Type != nil && *tf.Type != "" { - l["type"] = *tf.Type - } - result = append(result, l) - } - return result -} - -// triggerConfigsToMap converts a list of []*codedeploy.TriggerConfig into a []map[string]interface{} -func triggerConfigsToMap(list []*codedeploy.TriggerConfig) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(list)) - for _, tc := range list { - item := make(map[string]interface{}) - item["trigger_events"] = schema.NewSet(schema.HashString, flattenStringList(tc.TriggerEvents)) - item["trigger_name"] = *tc.TriggerName - item["trigger_target_arn"] = *tc.TriggerTargetArn - result = append(result, item) - } - return result -} - -// autoRollbackConfigToMap converts a codedeploy.AutoRollbackConfiguration -// into a []map[string]interface{} list containing a single item -func autoRollbackConfigToMap(config *codedeploy.AutoRollbackConfiguration) []map[string]interface{} { - result := make([]map[string]interface{}, 0, 1) - - // only create configurations that are enabled or temporarily disabled (retaining events) - // otherwise empty configurations will be created - if config != nil && (*config.Enabled == true || len(config.Events) > 0) { - item := make(map[string]interface{}) - item["enabled"] = *config.Enabled - item["events"] = schema.NewSet(schema.HashString, flattenStringList(config.Events)) - result = append(result, item) - } - - return result -} - -// alarmConfigToMap converts a codedeploy.AlarmConfiguration -// into a []map[string]interface{} list containing a single item -func alarmConfigToMap(config *codedeploy.AlarmConfiguration) []map[string]interface{} { - result := make([]map[string]interface{}, 0, 1) - - // only create configurations that are enabled or temporarily disabled (retaining alarms) - // otherwise empty configurations will be created - if config != nil && (*config.Enabled == true || len(config.Alarms) > 0) { - names := make([]*string, 0, len(config.Alarms)) - for _, alarm := range config.Alarms { - names = append(names, alarm.Name) - } - - item := make(map[string]interface{}) - item["alarms"] = schema.NewSet(schema.HashString, flattenStringList(names)) - item["enabled"] = *config.Enabled - item["ignore_poll_alarm_failure"] = *config.IgnorePollAlarmFailure - - result = append(result, item) - } - - return result -} - -func resourceAwsCodeDeployTagFilterHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - // Nothing's actually required in tag filters, so we must check the - // presence of all values before attempting a hash. - if v, ok := m["key"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - if v, ok := m["type"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - if v, ok := m["value"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - - return hashcode.String(buf.String()) -} - -func resourceAwsCodeDeployTriggerConfigHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["trigger_name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["trigger_target_arn"].(string))) - - if triggerEvents, ok := m["trigger_events"]; ok { - names := triggerEvents.(*schema.Set).List() - strings := make([]string, len(names)) - for i, raw := range names { - strings[i] = raw.(string) - } - sort.Strings(strings) - - for _, s := range strings { - buf.WriteString(fmt.Sprintf("%s-", s)) - } - } - return hashcode.String(buf.String()) -} - -func validateTriggerEvent(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - triggerEvents := map[string]bool{ - "DeploymentStart": true, - "DeploymentStop": true, - "DeploymentSuccess": true, - "DeploymentFailure": true, - "DeploymentRollback": true, - "InstanceStart": true, - "InstanceSuccess": true, - "InstanceFailure": true, - } - - if !triggerEvents[value] { - errors = append(errors, fmt.Errorf("%q must be a valid event type value: %q", k, value)) - } - return -} diff --git a/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go b/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go deleted file mode 100644 index ff2852500..000000000 --- a/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go +++ /dev/null @@ -1,1458 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "regexp" - "sort" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/codedeploy" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSCodeDeployDeploymentGroup_basic(t *testing.T) { - var group codedeploy.DeploymentGroupInfo - - rName := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCodeDeployDeploymentGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSCodeDeployDeploymentGroup(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo", &group), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "app_name", "foo_app_"+rName), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "deployment_group_name", "foo_"+rName), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "deployment_config_name", "CodeDeployDefault.OneAtATime"), - resource.TestMatchResourceAttr( - "aws_codedeploy_deployment_group.foo", "service_role_arn", - regexp.MustCompile("arn:aws:iam::[0-9]{12}:role/foo_role_.*")), - - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "ec2_tag_filter.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "ec2_tag_filter.2916377465.key", "filterkey"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "ec2_tag_filter.2916377465.type", "KEY_AND_VALUE"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "ec2_tag_filter.2916377465.value", "filtervalue"), - - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "alarm_configuration.#", "0"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "auto_rollback_configuration.#", "0"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "trigger_configuration.#", "0"), - ), - }, - resource.TestStep{ - Config: testAccAWSCodeDeployDeploymentGroupModified(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo", &group), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "app_name", "foo_app_"+rName), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "deployment_group_name", "bar_"+rName), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "deployment_config_name", "CodeDeployDefault.OneAtATime"), - resource.TestMatchResourceAttr( - "aws_codedeploy_deployment_group.foo", "service_role_arn", - regexp.MustCompile("arn:aws:iam::[0-9]{12}:role/bar_role_.*")), - - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "ec2_tag_filter.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "ec2_tag_filter.2369538975.key", "filterkey"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "ec2_tag_filter.2369538975.type", "KEY_AND_VALUE"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "ec2_tag_filter.2369538975.value", "anotherfiltervalue"), - - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "alarm_configuration.#", "0"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "auto_rollback_configuration.#", "0"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "trigger_configuration.#", "0"), - ), - }, - }, - }) -} - -func TestAccAWSCodeDeployDeploymentGroup_onPremiseTag(t *testing.T) { - var group codedeploy.DeploymentGroupInfo - - rName := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCodeDeployDeploymentGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSCodeDeployDeploymentGroupOnPremiseTags(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo", &group), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "app_name", "foo_app_"+rName), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "deployment_group_name", "foo_"+rName), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "deployment_config_name", "CodeDeployDefault.OneAtATime"), - - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "on_premises_instance_tag_filter.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "on_premises_instance_tag_filter.2916377465.key", "filterkey"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "on_premises_instance_tag_filter.2916377465.type", "KEY_AND_VALUE"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo", "on_premises_instance_tag_filter.2916377465.value", "filtervalue"), - ), - }, - }, - }) -} - -func TestAccAWSCodeDeployDeploymentGroup_disappears(t *testing.T) { - var group codedeploy.DeploymentGroupInfo - rName := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCodeDeployDeploymentGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSCodeDeployDeploymentGroup(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo", &group), - testAccAWSCodeDeployDeploymentGroupDisappears(&group), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAWSCodeDeployDeploymentGroup_triggerConfiguration_basic(t *testing.T) { - var group codedeploy.DeploymentGroupInfo - - rName := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCodeDeployDeploymentGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSCodeDeployDeploymentGroup_triggerConfiguration_create(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo_group", &group), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "app_name", "foo-app-"+rName), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "deployment_group_name", "foo-group-"+rName), - testAccCheckTriggerEvents(&group, "foo-trigger", []string{ - "DeploymentFailure", - }), - ), - }, - resource.TestStep{ - Config: testAccAWSCodeDeployDeploymentGroup_triggerConfiguration_update(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo_group", &group), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "app_name", "foo-app-"+rName), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "deployment_group_name", "foo-group-"+rName), - testAccCheckTriggerEvents(&group, "foo-trigger", []string{ - "DeploymentFailure", - "DeploymentSuccess", - }), - ), - }, - }, - }) -} - -func TestAccAWSCodeDeployDeploymentGroup_triggerConfiguration_multiple(t *testing.T) { - var group codedeploy.DeploymentGroupInfo - - rName := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCodeDeployDeploymentGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSCodeDeployDeploymentGroup_triggerConfiguration_createMultiple(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo_group", &group), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "app_name", "foo-app-"+rName), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "deployment_group_name", "foo-group-"+rName), - testAccCheckTriggerEvents(&group, "foo-trigger", []string{ - "DeploymentFailure", - }), - testAccCheckTriggerEvents(&group, "bar-trigger", []string{ - "InstanceFailure", - }), - testAccCheckTriggerTargetArn(&group, "bar-trigger", - regexp.MustCompile("^arn:aws:sns:[^:]+:[0-9]{12}:bar-topic-"+rName+"$")), - ), - }, - resource.TestStep{ - Config: testAccAWSCodeDeployDeploymentGroup_triggerConfiguration_updateMultiple(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo_group", &group), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "app_name", "foo-app-"+rName), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "deployment_group_name", "foo-group-"+rName), - testAccCheckTriggerEvents(&group, "foo-trigger", []string{ - "DeploymentFailure", - "DeploymentStart", - "DeploymentStop", - "DeploymentSuccess", - }), - testAccCheckTriggerEvents(&group, "bar-trigger", []string{ - "InstanceFailure", - }), - testAccCheckTriggerTargetArn(&group, "bar-trigger", - regexp.MustCompile("^arn:aws:sns:[^:]+:[0-9]{12}:baz-topic-"+rName+"$")), - ), - }, - }, - }) -} - -func TestAccAWSCodeDeployDeploymentGroup_autoRollbackConfiguration_create(t *testing.T) { - var group codedeploy.DeploymentGroupInfo - - rName := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCodeDeployDeploymentGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: test_config_auto_rollback_configuration_delete(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo_group", &group), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.#", "0"), - ), - }, - resource.TestStep{ - Config: test_config_auto_rollback_configuration_create(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo_group", &group), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.0.enabled", "true"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.0.events.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.0.events.135881253", "DEPLOYMENT_FAILURE"), - ), - }, - }, - }) -} - -func TestAccAWSCodeDeployDeploymentGroup_autoRollbackConfiguration_update(t *testing.T) { - var group codedeploy.DeploymentGroupInfo - - rName := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCodeDeployDeploymentGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: test_config_auto_rollback_configuration_create(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo_group", &group), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.0.enabled", "true"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.0.events.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.0.events.135881253", "DEPLOYMENT_FAILURE"), - ), - }, - resource.TestStep{ - Config: test_config_auto_rollback_configuration_update(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo_group", &group), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.0.enabled", "true"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.0.events.#", "2"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.0.events.104943466", "DEPLOYMENT_STOP_ON_ALARM"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.0.events.135881253", "DEPLOYMENT_FAILURE"), - ), - }, - }, - }) -} - -func TestAccAWSCodeDeployDeploymentGroup_autoRollbackConfiguration_delete(t *testing.T) { - var group codedeploy.DeploymentGroupInfo - - rName := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCodeDeployDeploymentGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: test_config_auto_rollback_configuration_create(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo_group", &group), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.0.enabled", "true"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.0.events.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.0.events.135881253", "DEPLOYMENT_FAILURE"), - ), - }, - resource.TestStep{ - Config: test_config_auto_rollback_configuration_delete(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo_group", &group), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.#", "0"), - ), - }, - }, - }) -} - -func TestAccAWSCodeDeployDeploymentGroup_autoRollbackConfiguration_disable(t *testing.T) { - var group codedeploy.DeploymentGroupInfo - - rName := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCodeDeployDeploymentGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: test_config_auto_rollback_configuration_create(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo_group", &group), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.0.enabled", "true"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.0.events.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.0.events.135881253", "DEPLOYMENT_FAILURE"), - ), - }, - resource.TestStep{ - Config: test_config_auto_rollback_configuration_disable(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo_group", &group), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.0.enabled", "false"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.0.events.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "auto_rollback_configuration.0.events.135881253", "DEPLOYMENT_FAILURE"), - ), - }, - }, - }) -} - -func TestAccAWSCodeDeployDeploymentGroup_alarmConfiguration_create(t *testing.T) { - var group codedeploy.DeploymentGroupInfo - - rName := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCodeDeployDeploymentGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: test_config_alarm_configuration_delete(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo_group", &group), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.#", "0"), - ), - }, - resource.TestStep{ - Config: test_config_alarm_configuration_create(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo_group", &group), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.enabled", "true"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.alarms.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.alarms.2356372769", "foo"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.ignore_poll_alarm_failure", "false"), - ), - }, - }, - }) -} - -func TestAccAWSCodeDeployDeploymentGroup_alarmConfiguration_update(t *testing.T) { - var group codedeploy.DeploymentGroupInfo - - rName := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCodeDeployDeploymentGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: test_config_alarm_configuration_create(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo_group", &group), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.enabled", "true"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.alarms.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.alarms.2356372769", "foo"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.ignore_poll_alarm_failure", "false"), - ), - }, - resource.TestStep{ - Config: test_config_alarm_configuration_update(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo_group", &group), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.enabled", "true"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.alarms.#", "2"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.alarms.1996459178", "bar"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.alarms.2356372769", "foo"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.ignore_poll_alarm_failure", "true"), - ), - }, - }, - }) -} - -func TestAccAWSCodeDeployDeploymentGroup_alarmConfiguration_delete(t *testing.T) { - var group codedeploy.DeploymentGroupInfo - - rName := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCodeDeployDeploymentGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: test_config_alarm_configuration_create(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo_group", &group), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.enabled", "true"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.alarms.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.alarms.2356372769", "foo"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.ignore_poll_alarm_failure", "false"), - ), - }, - resource.TestStep{ - Config: test_config_alarm_configuration_delete(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo_group", &group), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.#", "0"), - ), - }, - }, - }) -} - -func TestAccAWSCodeDeployDeploymentGroup_alarmConfiguration_disable(t *testing.T) { - var group codedeploy.DeploymentGroupInfo - - rName := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCodeDeployDeploymentGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: test_config_alarm_configuration_create(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo_group", &group), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.enabled", "true"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.alarms.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.alarms.2356372769", "foo"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.ignore_poll_alarm_failure", "false"), - ), - }, - resource.TestStep{ - Config: test_config_alarm_configuration_disable(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo_group", &group), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.enabled", "false"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.alarms.#", "1"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.alarms.2356372769", "foo"), - resource.TestCheckResourceAttr( - "aws_codedeploy_deployment_group.foo_group", "alarm_configuration.0.ignore_poll_alarm_failure", "false"), - ), - }, - }, - }) -} - -func TestValidateAWSCodeDeployTriggerEvent(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "DeploymentStart", - ErrCount: 0, - }, - { - Value: "DeploymentStop", - ErrCount: 0, - }, - { - Value: "DeploymentSuccess", - ErrCount: 0, - }, - { - Value: "DeploymentFailure", - ErrCount: 0, - }, - { - Value: "DeploymentRollback", - ErrCount: 0, - }, - { - Value: "InstanceStart", - ErrCount: 0, - }, - { - Value: "InstanceSuccess", - ErrCount: 0, - }, - { - Value: "InstanceFailure", - ErrCount: 0, - }, - { - Value: "DeploymentStarts", - ErrCount: 1, - }, - { - Value: "InstanceFail", - ErrCount: 1, - }, - { - Value: "Foo", - ErrCount: 1, - }, - { - Value: "", - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateTriggerEvent(tc.Value, "trigger_event") - if len(errors) != tc.ErrCount { - t.Fatalf("Trigger event validation failed for event type %q: %q", tc.Value, errors) - } - } -} - -func TestBuildTriggerConfigs(t *testing.T) { - input := []interface{}{ - map[string]interface{}{ - "trigger_events": schema.NewSet(schema.HashString, []interface{}{ - "DeploymentFailure", - }), - "trigger_name": "foo-trigger", - "trigger_target_arn": "arn:aws:sns:us-west-2:123456789012:foo-topic", - }, - } - - expected := []*codedeploy.TriggerConfig{ - &codedeploy.TriggerConfig{ - TriggerEvents: []*string{ - aws.String("DeploymentFailure"), - }, - TriggerName: aws.String("foo-trigger"), - TriggerTargetArn: aws.String("arn:aws:sns:us-west-2:123456789012:foo-topic"), - }, - } - - actual := buildTriggerConfigs(input) - - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("buildTriggerConfigs output is not correct.\nGot:\n%#v\nExpected:\n%#v\n", - actual, expected) - } -} - -func TestTriggerConfigsToMap(t *testing.T) { - input := []*codedeploy.TriggerConfig{ - &codedeploy.TriggerConfig{ - TriggerEvents: []*string{ - aws.String("DeploymentFailure"), - aws.String("InstanceFailure"), - }, - TriggerName: aws.String("bar-trigger"), - TriggerTargetArn: aws.String("arn:aws:sns:us-west-2:123456789012:bar-topic"), - }, - } - - expected := map[string]interface{}{ - "trigger_events": schema.NewSet(schema.HashString, []interface{}{ - "DeploymentFailure", - "InstanceFailure", - }), - "trigger_name": "bar-trigger", - "trigger_target_arn": "arn:aws:sns:us-west-2:123456789012:bar-topic", - } - - actual := triggerConfigsToMap(input)[0] - - fatal := false - - if actual["trigger_name"] != expected["trigger_name"] { - fatal = true - } - - if actual["trigger_target_arn"] != expected["trigger_target_arn"] { - fatal = true - } - - actualEvents := actual["trigger_events"].(*schema.Set) - expectedEvents := expected["trigger_events"].(*schema.Set) - if !actualEvents.Equal(expectedEvents) { - fatal = true - } - - if fatal { - t.Fatalf("triggerConfigsToMap output is not correct.\nGot:\n%#v\nExpected:\n%#v\n", - actual, expected) - } -} - -func TestBuildAutoRollbackConfig(t *testing.T) { - input := []interface{}{ - map[string]interface{}{ - "events": schema.NewSet(schema.HashString, []interface{}{ - "DEPLOYMENT_FAILURE", - }), - "enabled": true, - }, - } - - expected := &codedeploy.AutoRollbackConfiguration{ - Events: []*string{ - aws.String("DEPLOYMENT_FAILURE"), - }, - Enabled: aws.Bool(true), - } - - actual := buildAutoRollbackConfig(input) - - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("buildAutoRollbackConfig output is not correct.\nGot:\n%#v\nExpected:\n%#v\n", - actual, expected) - } -} - -func TestAutoRollbackConfigToMap(t *testing.T) { - input := &codedeploy.AutoRollbackConfiguration{ - Events: []*string{ - aws.String("DEPLOYMENT_FAILURE"), - aws.String("DEPLOYMENT_STOP_ON_ALARM"), - }, - Enabled: aws.Bool(false), - } - - expected := map[string]interface{}{ - "events": schema.NewSet(schema.HashString, []interface{}{ - "DEPLOYMENT_FAILURE", - "DEPLOYMENT_STOP_ON_ALARM", - }), - "enabled": false, - } - - actual := autoRollbackConfigToMap(input)[0] - - fatal := false - - if actual["enabled"] != expected["enabled"] { - fatal = true - } - - actualEvents := actual["events"].(*schema.Set) - expectedEvents := expected["events"].(*schema.Set) - if !actualEvents.Equal(expectedEvents) { - fatal = true - } - - if fatal { - t.Fatalf("autoRollbackConfigToMap output is not correct.\nGot:\n%#v\nExpected:\n%#v\n", - actual, expected) - } -} - -func TestBuildAlarmConfig(t *testing.T) { - input := []interface{}{ - map[string]interface{}{ - "alarms": schema.NewSet(schema.HashString, []interface{}{ - "foo-alarm", - }), - "enabled": true, - "ignore_poll_alarm_failure": false, - }, - } - - expected := &codedeploy.AlarmConfiguration{ - Alarms: []*codedeploy.Alarm{ - { - Name: aws.String("foo-alarm"), - }, - }, - Enabled: aws.Bool(true), - IgnorePollAlarmFailure: aws.Bool(false), - } - - actual := buildAlarmConfig(input) - - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("buildAlarmConfig output is not correct.\nGot:\n%#v\nExpected:\n%#v\n", - actual, expected) - } -} - -func TestAlarmConfigToMap(t *testing.T) { - input := &codedeploy.AlarmConfiguration{ - Alarms: []*codedeploy.Alarm{ - { - Name: aws.String("bar-alarm"), - }, - { - Name: aws.String("foo-alarm"), - }, - }, - Enabled: aws.Bool(false), - IgnorePollAlarmFailure: aws.Bool(true), - } - - expected := map[string]interface{}{ - "alarms": schema.NewSet(schema.HashString, []interface{}{ - "bar-alarm", - "foo-alarm", - }), - "enabled": false, - "ignore_poll_alarm_failure": true, - } - - actual := alarmConfigToMap(input)[0] - - fatal := false - - if actual["enabled"] != expected["enabled"] { - fatal = true - } - - if actual["ignore_poll_alarm_failure"] != expected["ignore_poll_alarm_failure"] { - fatal = true - } - - actualAlarms := actual["alarms"].(*schema.Set) - expectedAlarms := expected["alarms"].(*schema.Set) - if !actualAlarms.Equal(expectedAlarms) { - fatal = true - } - - if fatal { - t.Fatalf("alarmConfigToMap output is not correct.\nGot:\n%#v\nExpected:\n%#v\n", - actual, expected) - } -} - -func testAccCheckTriggerEvents(group *codedeploy.DeploymentGroupInfo, triggerName string, expectedEvents []string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - for _, actual := range group.TriggerConfigurations { - if *actual.TriggerName == triggerName { - - numberOfEvents := len(actual.TriggerEvents) - if numberOfEvents != len(expectedEvents) { - return fmt.Errorf("Trigger events do not match. Expected: %d. Got: %d.", - len(expectedEvents), numberOfEvents) - } - - actualEvents := make([]string, 0, numberOfEvents) - for _, event := range actual.TriggerEvents { - actualEvents = append(actualEvents, *event) - } - sort.Strings(actualEvents) - - if !reflect.DeepEqual(actualEvents, expectedEvents) { - return fmt.Errorf("Trigger events do not match.\nExpected: %v\nGot: %v\n", - expectedEvents, actualEvents) - } - break - } - } - return nil - } -} - -func testAccCheckTriggerTargetArn(group *codedeploy.DeploymentGroupInfo, triggerName string, r *regexp.Regexp) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, actual := range group.TriggerConfigurations { - if *actual.TriggerName == triggerName { - if !r.MatchString(*actual.TriggerTargetArn) { - return fmt.Errorf("Trigger target arn does not match regular expression.\nRegex: %v\nTriggerTargetArn: %v\n", - r, *actual.TriggerTargetArn) - } - break - } - } - return nil - } -} - -func testAccCheckAWSCodeDeployDeploymentGroupDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).codedeployconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_codedeploy_deployment_group" { - continue - } - - resp, err := conn.GetDeploymentGroup(&codedeploy.GetDeploymentGroupInput{ - ApplicationName: aws.String(rs.Primary.Attributes["app_name"]), - DeploymentGroupName: aws.String(rs.Primary.Attributes["deployment_group_name"]), - }) - - if ae, ok := err.(awserr.Error); ok && ae.Code() == "ApplicationDoesNotExistException" { - continue - } - - if err == nil { - if resp.DeploymentGroupInfo.DeploymentGroupName != nil { - return fmt.Errorf("CodeDeploy deployment group still exists:\n%#v", *resp.DeploymentGroupInfo.DeploymentGroupName) - } - } - - return err - } - - return nil -} - -func testAccAWSCodeDeployDeploymentGroupDisappears(group *codedeploy.DeploymentGroupInfo) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).codedeployconn - opts := &codedeploy.DeleteDeploymentGroupInput{ - ApplicationName: group.ApplicationName, - DeploymentGroupName: group.DeploymentGroupName, - } - if _, err := conn.DeleteDeploymentGroup(opts); err != nil { - return err - } - return resource.Retry(40*time.Minute, func() *resource.RetryError { - opts := &codedeploy.GetDeploymentGroupInput{ - ApplicationName: group.ApplicationName, - DeploymentGroupName: group.DeploymentGroupName, - } - _, err := conn.GetDeploymentGroup(opts) - if err != nil { - codedeploy, ok := err.(awserr.Error) - if ok && codedeploy.Code() == "DeploymentGroupDoesNotExistException" { - return nil - } - return resource.NonRetryableError( - fmt.Errorf("Error retrieving CodeDeploy Deployment Group: %s", err)) - } - return resource.RetryableError(fmt.Errorf( - "Waiting for CodeDeploy Deployment Group: %v", group.DeploymentGroupName)) - }) - } -} - -func testAccCheckAWSCodeDeployDeploymentGroupExists(name string, group *codedeploy.DeploymentGroupInfo) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - conn := testAccProvider.Meta().(*AWSClient).codedeployconn - - resp, err := conn.GetDeploymentGroup(&codedeploy.GetDeploymentGroupInput{ - ApplicationName: aws.String(rs.Primary.Attributes["app_name"]), - DeploymentGroupName: aws.String(rs.Primary.Attributes["deployment_group_name"]), - }) - - if err != nil { - return err - } - - *group = *resp.DeploymentGroupInfo - - return nil - } -} - -func testAccAWSCodeDeployDeploymentGroup(rName string) string { - return fmt.Sprintf(` -resource "aws_codedeploy_app" "foo_app" { - name = "foo_app_%s" -} - -resource "aws_iam_role_policy" "foo_policy" { - name = "foo_policy_%s" - role = "${aws_iam_role.foo_role.id}" - policy = < 0 { - vk := tek[0].(map[string]interface{}) - ek := codepipeline.EncryptionKey{ - Type: aws.String(vk["type"].(string)), - Id: aws.String(vk["id"].(string)), - } - pipelineArtifactStore.EncryptionKey = &ek - } - return &pipelineArtifactStore -} - -func flattenAwsCodePipelineArtifactStore(artifactStore *codepipeline.ArtifactStore) []interface{} { - values := map[string]interface{}{} - values["type"] = *artifactStore.Type - values["location"] = *artifactStore.Location - if artifactStore.EncryptionKey != nil { - as := map[string]interface{}{ - "id": *artifactStore.EncryptionKey.Id, - "type": *artifactStore.EncryptionKey.Type, - } - values["encryption_key"] = []interface{}{as} - } - return []interface{}{values} -} - -func expandAwsCodePipelineStages(d *schema.ResourceData) []*codepipeline.StageDeclaration { - configs := d.Get("stage").([]interface{}) - pipelineStages := []*codepipeline.StageDeclaration{} - - for _, stage := range configs { - data := stage.(map[string]interface{}) - a := data["action"].([]interface{}) - actions := expandAwsCodePipelineActions(a) - pipelineStages = append(pipelineStages, &codepipeline.StageDeclaration{ - Name: aws.String(data["name"].(string)), - Actions: actions, - }) - } - return pipelineStages -} - -func flattenAwsCodePipelineStages(stages []*codepipeline.StageDeclaration) []interface{} { - stagesList := []interface{}{} - for _, stage := range stages { - values := map[string]interface{}{} - values["name"] = *stage.Name - values["action"] = flattenAwsCodePipelineStageActions(stage.Actions) - stagesList = append(stagesList, values) - } - return stagesList - -} - -func expandAwsCodePipelineActions(s []interface{}) []*codepipeline.ActionDeclaration { - actions := []*codepipeline.ActionDeclaration{} - for _, config := range s { - data := config.(map[string]interface{}) - - conf := expandAwsCodePipelineStageActionConfiguration(data["configuration"].(map[string]interface{})) - if data["provider"].(string) == "GitHub" { - githubToken := os.Getenv("GITHUB_TOKEN") - if githubToken != "" { - conf["OAuthToken"] = aws.String(githubToken) - } - - } - - action := codepipeline.ActionDeclaration{ - ActionTypeId: &codepipeline.ActionTypeId{ - Category: aws.String(data["category"].(string)), - Owner: aws.String(data["owner"].(string)), - - Provider: aws.String(data["provider"].(string)), - Version: aws.String(data["version"].(string)), - }, - Name: aws.String(data["name"].(string)), - Configuration: conf, - } - - oa := data["output_artifacts"].([]interface{}) - if len(oa) > 0 { - outputArtifacts := expandAwsCodePipelineActionsOutputArtifacts(oa) - action.OutputArtifacts = outputArtifacts - - } - ia := data["input_artifacts"].([]interface{}) - if len(ia) > 0 { - inputArtifacts := expandAwsCodePipelineActionsInputArtifacts(ia) - action.InputArtifacts = inputArtifacts - - } - ra := data["role_arn"].(string) - if ra != "" { - action.RoleArn = aws.String(ra) - } - ro := data["run_order"].(int) - if ro > 0 { - action.RunOrder = aws.Int64(int64(ro)) - } - actions = append(actions, &action) - } - return actions -} - -func flattenAwsCodePipelineStageActions(actions []*codepipeline.ActionDeclaration) []interface{} { - actionsList := []interface{}{} - for _, action := range actions { - values := map[string]interface{}{ - "category": *action.ActionTypeId.Category, - "owner": *action.ActionTypeId.Owner, - "provider": *action.ActionTypeId.Provider, - "version": *action.ActionTypeId.Version, - "name": *action.Name, - } - if action.Configuration != nil { - config := flattenAwsCodePipelineStageActionConfiguration(action.Configuration) - _, ok := config["OAuthToken"] - actionProvider := *action.ActionTypeId.Provider - if ok && actionProvider == "GitHub" { - delete(config, "OAuthToken") - } - values["configuration"] = config - } - - if len(action.OutputArtifacts) > 0 { - values["output_artifacts"] = flattenAwsCodePipelineActionsOutputArtifacts(action.OutputArtifacts) - } - - if len(action.InputArtifacts) > 0 { - values["input_artifacts"] = flattenAwsCodePipelineActionsInputArtifacts(action.InputArtifacts) - } - - if action.RoleArn != nil { - values["role_arn"] = *action.RoleArn - } - - if action.RunOrder != nil { - values["run_order"] = int(*action.RunOrder) - } - - actionsList = append(actionsList, values) - } - return actionsList -} - -func expandAwsCodePipelineStageActionConfiguration(config map[string]interface{}) map[string]*string { - m := map[string]*string{} - for k, v := range config { - s := v.(string) - m[k] = &s - } - return m -} - -func flattenAwsCodePipelineStageActionConfiguration(config map[string]*string) map[string]string { - m := map[string]string{} - for k, v := range config { - m[k] = *v - } - return m -} - -func expandAwsCodePipelineActionsOutputArtifacts(s []interface{}) []*codepipeline.OutputArtifact { - outputArtifacts := []*codepipeline.OutputArtifact{} - for _, artifact := range s { - outputArtifacts = append(outputArtifacts, &codepipeline.OutputArtifact{ - Name: aws.String(artifact.(string)), - }) - } - return outputArtifacts -} - -func flattenAwsCodePipelineActionsOutputArtifacts(artifacts []*codepipeline.OutputArtifact) []string { - values := []string{} - for _, artifact := range artifacts { - values = append(values, *artifact.Name) - } - return values -} - -func expandAwsCodePipelineActionsInputArtifacts(s []interface{}) []*codepipeline.InputArtifact { - outputArtifacts := []*codepipeline.InputArtifact{} - for _, artifact := range s { - outputArtifacts = append(outputArtifacts, &codepipeline.InputArtifact{ - Name: aws.String(artifact.(string)), - }) - } - return outputArtifacts -} - -func flattenAwsCodePipelineActionsInputArtifacts(artifacts []*codepipeline.InputArtifact) []string { - values := []string{} - for _, artifact := range artifacts { - values = append(values, *artifact.Name) - } - return values -} - -func resourceAwsCodePipelineRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).codepipelineconn - resp, err := conn.GetPipeline(&codepipeline.GetPipelineInput{ - Name: aws.String(d.Id()), - }) - - if err != nil { - pipelineerr, ok := err.(awserr.Error) - if ok && pipelineerr.Code() == "PipelineNotFoundException" { - log.Printf("[INFO] Codepipeline %q not found", d.Id()) - d.SetId("") - return nil - } - return fmt.Errorf("[ERROR] Error retreiving Pipeline: %q", err) - } - pipeline := resp.Pipeline - - if err := d.Set("artifact_store", flattenAwsCodePipelineArtifactStore(pipeline.ArtifactStore)); err != nil { - return err - } - - if err := d.Set("stage", flattenAwsCodePipelineStages(pipeline.Stages)); err != nil { - return err - } - - d.Set("name", pipeline.Name) - d.Set("role_arn", pipeline.RoleArn) - return nil -} - -func resourceAwsCodePipelineUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).codepipelineconn - - pipeline := expandAwsCodePipeline(d) - params := &codepipeline.UpdatePipelineInput{ - Pipeline: pipeline, - } - _, err := conn.UpdatePipeline(params) - - if err != nil { - return fmt.Errorf( - "[ERROR] Error updating CodePipeline (%s): %s", - d.Id(), err) - } - - return resourceAwsCodePipelineRead(d, meta) -} - -func resourceAwsCodePipelineDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).codepipelineconn - - _, err := conn.DeletePipeline(&codepipeline.DeletePipelineInput{ - Name: aws.String(d.Id()), - }) - - if err != nil { - return err - } - - d.SetId("") - - return nil -} diff --git a/builtin/providers/aws/resource_aws_codepipeline_test.go b/builtin/providers/aws/resource_aws_codepipeline_test.go deleted file mode 100644 index a377f5ac7..000000000 --- a/builtin/providers/aws/resource_aws_codepipeline_test.go +++ /dev/null @@ -1,530 +0,0 @@ -package aws - -import ( - "fmt" - "os" - "regexp" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/codepipeline" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSCodePipeline_basic(t *testing.T) { - if os.Getenv("GITHUB_TOKEN") == "" { - t.Skip("Environment variable GITHUB_TOKEN is not set") - } - - name := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCodePipelineDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCodePipelineConfig_basic(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodePipelineExists("aws_codepipeline.bar"), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "artifact_store.0.type", "S3"), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "artifact_store.0.encryption_key.0.id", "1234"), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "artifact_store.0.encryption_key.0.type", "KMS"), - ), - }, - { - Config: testAccAWSCodePipelineConfig_basicUpdated(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodePipelineExists("aws_codepipeline.bar"), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "artifact_store.0.type", "S3"), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "artifact_store.0.encryption_key.0.id", "4567"), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "artifact_store.0.encryption_key.0.type", "KMS"), - ), - }, - }, - }) -} - -func TestAccAWSCodePipeline_deployWithServiceRole(t *testing.T) { - if os.Getenv("GITHUB_TOKEN") == "" { - t.Skip("Environment variable GITHUB_TOKEN is not set") - } - - name := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCodePipelineDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCodePipelineConfig_deployWithServiceRole(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodePipelineExists("aws_codepipeline.bar"), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "stage.2.name", "Deploy"), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "stage.2.action.0.category", "Deploy"), - resource.TestMatchResourceAttr( - "aws_codepipeline.bar", "stage.2.action.0.role_arn", - regexp.MustCompile("^arn:aws:iam::[0-9]{12}:role/codepipeline-action-role.*")), - ), - }, - }, - }) -} - -func testAccCheckAWSCodePipelineExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No CodePipeline ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).codepipelineconn - - _, err := conn.GetPipeline(&codepipeline.GetPipelineInput{ - Name: aws.String(rs.Primary.ID), - }) - - if err != nil { - return err - } - return nil - } -} - -func testAccCheckAWSCodePipelineDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).codepipelineconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_codepipeline" { - continue - } - - _, err := conn.GetPipeline(&codepipeline.GetPipelineInput{ - Name: aws.String(rs.Primary.ID), - }) - - if err == nil { - return fmt.Errorf("Expected AWS CodePipeline to be gone, but was still found") - } - return nil - } - - return fmt.Errorf("Default error in CodePipeline Test") -} - -func testAccAWSCodePipelineConfig_basic(rName string) string { - return fmt.Sprintf(` -resource "aws_s3_bucket" "foo" { - bucket = "tf-test-pipeline-%s" - acl = "private" -} - -resource "aws_iam_role" "codepipeline_role" { - name = "codepipeline-role-%s" - - assume_role_policy = < 0 { - ruleInput.Scope = expandConfigRuleScope(scopes[0].(map[string]interface{})) - } - - if v, ok := d.GetOk("description"); ok { - ruleInput.Description = aws.String(v.(string)) - } - if v, ok := d.GetOk("input_parameters"); ok { - ruleInput.InputParameters = aws.String(v.(string)) - } - if v, ok := d.GetOk("maximum_execution_frequency"); ok { - ruleInput.MaximumExecutionFrequency = aws.String(v.(string)) - } - - input := configservice.PutConfigRuleInput{ - ConfigRule: &ruleInput, - } - log.Printf("[DEBUG] Creating AWSConfig config rule: %s", input) - err := resource.Retry(2*time.Minute, func() *resource.RetryError { - _, err := conn.PutConfigRule(&input) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "InsufficientPermissionsException" { - // IAM is eventually consistent - return resource.RetryableError(err) - } - } - - return resource.NonRetryableError(fmt.Errorf("Failed to create AWSConfig rule: %s", err)) - } - - return nil - }) - if err != nil { - return err - } - - d.SetId(name) - - log.Printf("[DEBUG] AWSConfig config rule %q created", name) - - return resourceAwsConfigConfigRuleRead(d, meta) -} - -func resourceAwsConfigConfigRuleRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).configconn - - out, err := conn.DescribeConfigRules(&configservice.DescribeConfigRulesInput{ - ConfigRuleNames: []*string{aws.String(d.Id())}, - }) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoSuchConfigRuleException" { - log.Printf("[WARN] Config Rule %q is gone (NoSuchConfigRuleException)", d.Id()) - d.SetId("") - return nil - } - return err - } - - numberOfRules := len(out.ConfigRules) - if numberOfRules < 1 { - log.Printf("[WARN] Config Rule %q is gone (no rules found)", d.Id()) - d.SetId("") - return nil - } - - if numberOfRules > 1 { - return fmt.Errorf("Expected exactly 1 Config Rule, received %d: %#v", - numberOfRules, out.ConfigRules) - } - - log.Printf("[DEBUG] AWS Config config rule received: %s", out) - - rule := out.ConfigRules[0] - d.Set("arn", rule.ConfigRuleArn) - d.Set("rule_id", rule.ConfigRuleId) - d.Set("name", rule.ConfigRuleName) - d.Set("description", rule.Description) - d.Set("input_parameters", rule.InputParameters) - d.Set("maximum_execution_frequency", rule.MaximumExecutionFrequency) - - if rule.Scope != nil { - d.Set("scope", flattenConfigRuleScope(rule.Scope)) - } - - d.Set("source", flattenConfigRuleSource(rule.Source)) - - return nil -} - -func resourceAwsConfigConfigRuleDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).configconn - - name := d.Get("name").(string) - - log.Printf("[DEBUG] Deleting AWS Config config rule %q", name) - err := resource.Retry(2*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteConfigRule(&configservice.DeleteConfigRuleInput{ - ConfigRuleName: aws.String(name), - }) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceInUseException" { - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - } - return nil - }) - if err != nil { - return fmt.Errorf("Deleting Config Rule failed: %s", err) - } - - conf := resource.StateChangeConf{ - Pending: []string{ - configservice.ConfigRuleStateActive, - configservice.ConfigRuleStateDeleting, - configservice.ConfigRuleStateDeletingResults, - configservice.ConfigRuleStateEvaluating, - }, - Target: []string{""}, - Timeout: 5 * time.Minute, - Refresh: func() (interface{}, string, error) { - out, err := conn.DescribeConfigRules(&configservice.DescribeConfigRulesInput{ - ConfigRuleNames: []*string{aws.String(d.Id())}, - }) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoSuchConfigRuleException" { - return 42, "", nil - } - return 42, "", fmt.Errorf("Failed to describe config rule %q: %s", d.Id(), err) - } - if len(out.ConfigRules) < 1 { - return 42, "", nil - } - rule := out.ConfigRules[0] - return out, *rule.ConfigRuleState, nil - }, - } - _, err = conf.WaitForState() - if err != nil { - return err - } - - log.Printf("[DEBUG] AWS Config config rule %q deleted", name) - - d.SetId("") - return nil -} - -func configRuleSourceDetailsHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - if v, ok := m["message_type"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - if v, ok := m["event_source"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - if v, ok := m["maximum_execution_frequency"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - return hashcode.String(buf.String()) -} diff --git a/builtin/providers/aws/resource_aws_config_config_rule_test.go b/builtin/providers/aws/resource_aws_config_config_rule_test.go deleted file mode 100644 index 42f3047bd..000000000 --- a/builtin/providers/aws/resource_aws_config_config_rule_test.go +++ /dev/null @@ -1,473 +0,0 @@ -package aws - -import ( - "fmt" - "regexp" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/configservice" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func testAccConfigConfigRule_basic(t *testing.T) { - var cr configservice.ConfigRule - rInt := acctest.RandInt() - expectedName := fmt.Sprintf("tf-acc-test-%d", rInt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckConfigConfigRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccConfigConfigRuleConfig_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckConfigConfigRuleExists("aws_config_config_rule.foo", &cr), - testAccCheckConfigConfigRuleName("aws_config_config_rule.foo", expectedName, &cr), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "name", expectedName), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "source.#", "1"), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "source.0.owner", "AWS"), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "source.0.source_identifier", "S3_BUCKET_VERSIONING_ENABLED"), - ), - }, - }, - }) -} - -func testAccConfigConfigRule_ownerAws(t *testing.T) { - var cr configservice.ConfigRule - rInt := acctest.RandInt() - expectedName := fmt.Sprintf("tf-acc-test-%d", rInt) - expectedArn := regexp.MustCompile("arn:aws:config:[a-z0-9-]+:[0-9]{12}:config-rule/config-rule-([a-z0-9]+)") - expectedRuleId := regexp.MustCompile("config-rule-[a-z0-9]+") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckConfigConfigRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccConfigConfigRuleConfig_ownerAws(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckConfigConfigRuleExists("aws_config_config_rule.foo", &cr), - testAccCheckConfigConfigRuleName("aws_config_config_rule.foo", expectedName, &cr), - resource.TestMatchResourceAttr("aws_config_config_rule.foo", "arn", expectedArn), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "name", expectedName), - resource.TestMatchResourceAttr("aws_config_config_rule.foo", "rule_id", expectedRuleId), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "description", "Terraform Acceptance tests"), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "source.#", "1"), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "source.0.owner", "AWS"), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "source.0.source_identifier", "REQUIRED_TAGS"), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "source.0.source_detail.#", "0"), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "scope.#", "1"), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "scope.0.compliance_resource_id", "blablah"), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "scope.0.compliance_resource_types.#", "1"), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "scope.0.compliance_resource_types.3865728585", "AWS::EC2::Instance"), - ), - }, - }, - }) -} - -func testAccConfigConfigRule_customlambda(t *testing.T) { - var cr configservice.ConfigRule - rInt := acctest.RandInt() - - expectedName := fmt.Sprintf("tf-acc-test-%d", rInt) - path := "test-fixtures/lambdatest.zip" - expectedArn := regexp.MustCompile("arn:aws:config:[a-z0-9-]+:[0-9]{12}:config-rule/config-rule-([a-z0-9]+)") - expectedFunctionArn := regexp.MustCompile(fmt.Sprintf("arn:aws:lambda:[a-z0-9-]+:[0-9]{12}:function:tf_acc_lambda_awsconfig_%d", rInt)) - expectedRuleId := regexp.MustCompile("config-rule-[a-z0-9]+") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckConfigConfigRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccConfigConfigRuleConfig_customLambda(rInt, path), - Check: resource.ComposeTestCheckFunc( - testAccCheckConfigConfigRuleExists("aws_config_config_rule.foo", &cr), - testAccCheckConfigConfigRuleName("aws_config_config_rule.foo", expectedName, &cr), - resource.TestMatchResourceAttr("aws_config_config_rule.foo", "arn", expectedArn), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "name", expectedName), - resource.TestMatchResourceAttr("aws_config_config_rule.foo", "rule_id", expectedRuleId), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "description", "Terraform Acceptance tests"), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "maximum_execution_frequency", "Six_Hours"), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "source.#", "1"), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "source.0.owner", "CUSTOM_LAMBDA"), - resource.TestMatchResourceAttr("aws_config_config_rule.foo", "source.0.source_identifier", expectedFunctionArn), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "source.0.source_detail.#", "1"), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "source.0.source_detail.3026922761.event_source", "aws.config"), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "source.0.source_detail.3026922761.message_type", "ConfigurationSnapshotDeliveryCompleted"), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "source.0.source_detail.3026922761.maximum_execution_frequency", ""), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "scope.#", "1"), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "scope.0.tag_key", "IsTemporary"), - resource.TestCheckResourceAttr("aws_config_config_rule.foo", "scope.0.tag_value", "yes"), - ), - }, - }, - }) -} - -func testAccConfigConfigRule_importAws(t *testing.T) { - resourceName := "aws_config_config_rule.foo" - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckConfigConfigRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccConfigConfigRuleConfig_ownerAws(rInt), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccConfigConfigRule_importLambda(t *testing.T) { - resourceName := "aws_config_config_rule.foo" - rInt := acctest.RandInt() - - path := "test-fixtures/lambdatest.zip" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckConfigConfigRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccConfigConfigRuleConfig_customLambda(rInt, path), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccCheckConfigConfigRuleName(n, desired string, obj *configservice.ConfigRule) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.Attributes["name"] != *obj.ConfigRuleName { - return fmt.Errorf("Expected name: %q, given: %q", desired, *obj.ConfigRuleName) - } - return nil - } -} - -func testAccCheckConfigConfigRuleExists(n string, obj *configservice.ConfigRule) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not Found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No config rule ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).configconn - out, err := conn.DescribeConfigRules(&configservice.DescribeConfigRulesInput{ - ConfigRuleNames: []*string{aws.String(rs.Primary.Attributes["name"])}, - }) - if err != nil { - return fmt.Errorf("Failed to describe config rule: %s", err) - } - if len(out.ConfigRules) < 1 { - return fmt.Errorf("No config rule found when describing %q", rs.Primary.Attributes["name"]) - } - - cr := out.ConfigRules[0] - *obj = *cr - - return nil - } -} - -func testAccCheckConfigConfigRuleDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).configconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_config_config_rule" { - continue - } - - resp, err := conn.DescribeConfigRules(&configservice.DescribeConfigRulesInput{ - ConfigRuleNames: []*string{aws.String(rs.Primary.Attributes["name"])}, - }) - - if err == nil { - if len(resp.ConfigRules) != 0 && - *resp.ConfigRules[0].ConfigRuleName == rs.Primary.Attributes["name"] { - return fmt.Errorf("config rule still exists: %s", rs.Primary.Attributes["name"]) - } - } - } - - return nil -} - -func testAccConfigConfigRuleConfig_basic(randInt int) string { - return fmt.Sprintf(` -resource "aws_config_config_rule" "foo" { - name = "tf-acc-test-%d" - source { - owner = "AWS" - source_identifier = "S3_BUCKET_VERSIONING_ENABLED" - } - depends_on = ["aws_config_configuration_recorder.foo"] -} - -resource "aws_config_configuration_recorder" "foo" { - name = "tf-acc-test-%d" - role_arn = "${aws_iam_role.r.arn}" -} - -resource "aws_iam_role" "r" { - name = "tf-acc-test-awsconfig-%d" - assume_role_policy = < 1 { - return fmt.Errorf("Expected exactly 1 Configuration Recorder, received %d: %#v", - numberOfRecorders, out.ConfigurationRecorders) - } - - recorder := out.ConfigurationRecorders[0] - - d.Set("name", recorder.Name) - d.Set("role_arn", recorder.RoleARN) - - if recorder.RecordingGroup != nil { - flattened := flattenConfigRecordingGroup(recorder.RecordingGroup) - err = d.Set("recording_group", flattened) - if err != nil { - return fmt.Errorf("Failed to set recording_group: %s", err) - } - } - - return nil -} - -func resourceAwsConfigConfigurationRecorderDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).configconn - input := configservice.DeleteConfigurationRecorderInput{ - ConfigurationRecorderName: aws.String(d.Id()), - } - _, err := conn.DeleteConfigurationRecorder(&input) - if err != nil { - return fmt.Errorf("Deleting Configuration Recorder failed: %s", err) - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/aws/resource_aws_config_configuration_recorder_status.go b/builtin/providers/aws/resource_aws_config_configuration_recorder_status.go deleted file mode 100644 index a2ba85b5d..000000000 --- a/builtin/providers/aws/resource_aws_config_configuration_recorder_status.go +++ /dev/null @@ -1,122 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/configservice" -) - -func resourceAwsConfigConfigurationRecorderStatus() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsConfigConfigurationRecorderStatusPut, - Read: resourceAwsConfigConfigurationRecorderStatusRead, - Update: resourceAwsConfigConfigurationRecorderStatusPut, - Delete: resourceAwsConfigConfigurationRecorderStatusDelete, - - Importer: &schema.ResourceImporter{ - State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - d.Set("name", d.Id()) - return []*schema.ResourceData{d}, nil - }, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "is_enabled": { - Type: schema.TypeBool, - Required: true, - }, - }, - } -} - -func resourceAwsConfigConfigurationRecorderStatusPut(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).configconn - - name := d.Get("name").(string) - d.SetId(name) - - if d.HasChange("is_enabled") { - isEnabled := d.Get("is_enabled").(bool) - if isEnabled { - log.Printf("[DEBUG] Starting AWSConfig Configuration recorder %q", name) - startInput := configservice.StartConfigurationRecorderInput{ - ConfigurationRecorderName: aws.String(name), - } - _, err := conn.StartConfigurationRecorder(&startInput) - if err != nil { - return fmt.Errorf("Failed to start Configuration Recorder: %s", err) - } - } else { - log.Printf("[DEBUG] Stopping AWSConfig Configuration recorder %q", name) - stopInput := configservice.StopConfigurationRecorderInput{ - ConfigurationRecorderName: aws.String(name), - } - _, err := conn.StopConfigurationRecorder(&stopInput) - if err != nil { - return fmt.Errorf("Failed to stop Configuration Recorder: %s", err) - } - } - } - - return resourceAwsConfigConfigurationRecorderStatusRead(d, meta) -} - -func resourceAwsConfigConfigurationRecorderStatusRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).configconn - - name := d.Id() - statusInput := configservice.DescribeConfigurationRecorderStatusInput{ - ConfigurationRecorderNames: []*string{aws.String(name)}, - } - statusOut, err := conn.DescribeConfigurationRecorderStatus(&statusInput) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "NoSuchConfigurationRecorderException" { - log.Printf("[WARN] Configuration Recorder (status) %q is gone (NoSuchConfigurationRecorderException)", name) - d.SetId("") - return nil - } - } - return fmt.Errorf("Failed describing Configuration Recorder %q status: %s", - name, err) - } - - numberOfStatuses := len(statusOut.ConfigurationRecordersStatus) - if numberOfStatuses < 1 { - log.Printf("[WARN] Configuration Recorder (status) %q is gone (no recorders found)", name) - d.SetId("") - return nil - } - - if numberOfStatuses > 1 { - return fmt.Errorf("Expected exactly 1 Configuration Recorder (status), received %d: %#v", - numberOfStatuses, statusOut.ConfigurationRecordersStatus) - } - - d.Set("is_enabled", statusOut.ConfigurationRecordersStatus[0].Recording) - - return nil -} - -func resourceAwsConfigConfigurationRecorderStatusDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).configconn - input := configservice.StopConfigurationRecorderInput{ - ConfigurationRecorderName: aws.String(d.Get("name").(string)), - } - _, err := conn.StopConfigurationRecorder(&input) - if err != nil { - return fmt.Errorf("Stopping Configuration Recorder failed: %s", err) - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/aws/resource_aws_config_configuration_recorder_status_test.go b/builtin/providers/aws/resource_aws_config_configuration_recorder_status_test.go deleted file mode 100644 index ded0d16b4..000000000 --- a/builtin/providers/aws/resource_aws_config_configuration_recorder_status_test.go +++ /dev/null @@ -1,235 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/configservice" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func testAccConfigConfigurationRecorderStatus_basic(t *testing.T) { - var cr configservice.ConfigurationRecorder - var crs configservice.ConfigurationRecorderStatus - rInt := acctest.RandInt() - expectedName := fmt.Sprintf("tf-acc-test-%d", rInt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckConfigConfigurationRecorderStatusDestroy, - Steps: []resource.TestStep{ - { - Config: testAccConfigConfigurationRecorderStatusConfig(rInt, false), - Check: resource.ComposeTestCheckFunc( - testAccCheckConfigConfigurationRecorderExists("aws_config_configuration_recorder.foo", &cr), - testAccCheckConfigConfigurationRecorderStatusExists("aws_config_configuration_recorder_status.foo", &crs), - testAccCheckConfigConfigurationRecorderStatus("aws_config_configuration_recorder_status.foo", false, &crs), - resource.TestCheckResourceAttr("aws_config_configuration_recorder_status.foo", "is_enabled", "false"), - resource.TestCheckResourceAttr("aws_config_configuration_recorder_status.foo", "name", expectedName), - ), - }, - }, - }) -} - -func testAccConfigConfigurationRecorderStatus_startEnabled(t *testing.T) { - var cr configservice.ConfigurationRecorder - var crs configservice.ConfigurationRecorderStatus - rInt := acctest.RandInt() - expectedName := fmt.Sprintf("tf-acc-test-%d", rInt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckConfigConfigurationRecorderStatusDestroy, - Steps: []resource.TestStep{ - { - Config: testAccConfigConfigurationRecorderStatusConfig(rInt, true), - Check: resource.ComposeTestCheckFunc( - testAccCheckConfigConfigurationRecorderExists("aws_config_configuration_recorder.foo", &cr), - testAccCheckConfigConfigurationRecorderStatusExists("aws_config_configuration_recorder_status.foo", &crs), - testAccCheckConfigConfigurationRecorderStatus("aws_config_configuration_recorder_status.foo", true, &crs), - resource.TestCheckResourceAttr("aws_config_configuration_recorder_status.foo", "is_enabled", "true"), - resource.TestCheckResourceAttr("aws_config_configuration_recorder_status.foo", "name", expectedName), - ), - }, - { - Config: testAccConfigConfigurationRecorderStatusConfig(rInt, false), - Check: resource.ComposeTestCheckFunc( - testAccCheckConfigConfigurationRecorderExists("aws_config_configuration_recorder.foo", &cr), - testAccCheckConfigConfigurationRecorderStatusExists("aws_config_configuration_recorder_status.foo", &crs), - testAccCheckConfigConfigurationRecorderStatus("aws_config_configuration_recorder_status.foo", false, &crs), - resource.TestCheckResourceAttr("aws_config_configuration_recorder_status.foo", "is_enabled", "false"), - resource.TestCheckResourceAttr("aws_config_configuration_recorder_status.foo", "name", expectedName), - ), - }, - { - Config: testAccConfigConfigurationRecorderStatusConfig(rInt, true), - Check: resource.ComposeTestCheckFunc( - testAccCheckConfigConfigurationRecorderExists("aws_config_configuration_recorder.foo", &cr), - testAccCheckConfigConfigurationRecorderStatusExists("aws_config_configuration_recorder_status.foo", &crs), - testAccCheckConfigConfigurationRecorderStatus("aws_config_configuration_recorder_status.foo", true, &crs), - resource.TestCheckResourceAttr("aws_config_configuration_recorder_status.foo", "is_enabled", "true"), - resource.TestCheckResourceAttr("aws_config_configuration_recorder_status.foo", "name", expectedName), - ), - }, - }, - }) -} - -func testAccConfigConfigurationRecorderStatus_importBasic(t *testing.T) { - resourceName := "aws_config_configuration_recorder_status.foo" - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckConfigConfigurationRecorderStatusDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccConfigConfigurationRecorderStatusConfig(rInt, true), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccCheckConfigConfigurationRecorderStatusExists(n string, obj *configservice.ConfigurationRecorderStatus) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - conn := testAccProvider.Meta().(*AWSClient).configconn - out, err := conn.DescribeConfigurationRecorderStatus(&configservice.DescribeConfigurationRecorderStatusInput{ - ConfigurationRecorderNames: []*string{aws.String(rs.Primary.Attributes["name"])}, - }) - if err != nil { - return fmt.Errorf("Failed to describe status of configuration recorder: %s", err) - } - if len(out.ConfigurationRecordersStatus) < 1 { - return fmt.Errorf("Configuration Recorder %q not found", rs.Primary.Attributes["name"]) - } - - status := out.ConfigurationRecordersStatus[0] - *obj = *status - - return nil - } -} - -func testAccCheckConfigConfigurationRecorderStatus(n string, desired bool, obj *configservice.ConfigurationRecorderStatus) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if *obj.Recording != desired { - return fmt.Errorf("Expected configuration recorder %q recording to be %t, given: %t", - n, desired, *obj.Recording) - } - - return nil - } -} - -func testAccCheckConfigConfigurationRecorderStatusDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).configconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_config_configuration_recorder_status" { - continue - } - - resp, err := conn.DescribeConfigurationRecorderStatus(&configservice.DescribeConfigurationRecorderStatusInput{ - ConfigurationRecorderNames: []*string{aws.String(rs.Primary.Attributes["name"])}, - }) - - if err == nil { - if len(resp.ConfigurationRecordersStatus) != 0 && - *resp.ConfigurationRecordersStatus[0].Name == rs.Primary.Attributes["name"] && - *resp.ConfigurationRecordersStatus[0].Recording { - return fmt.Errorf("Configuration recorder is still recording: %s", rs.Primary.Attributes["name"]) - } - } - } - - return nil -} - -func testAccConfigConfigurationRecorderStatusConfig(randInt int, enabled bool) string { - return fmt.Sprintf(` -resource "aws_config_configuration_recorder" "foo" { - name = "tf-acc-test-%d" - role_arn = "${aws_iam_role.r.arn}" -} - -resource "aws_iam_role" "r" { - name = "tf-acc-test-awsconfig-%d" - assume_role_policy = < 1 { - return fmt.Errorf("Received %d delivery channels under %s (expected exactly 1): %s", - len(out.DeliveryChannels), d.Id(), out.DeliveryChannels) - } - - channel := out.DeliveryChannels[0] - - d.Set("name", channel.Name) - d.Set("s3_bucket_name", channel.S3BucketName) - d.Set("s3_key_prefix", channel.S3KeyPrefix) - d.Set("sns_topic_arn", channel.SnsTopicARN) - - if channel.ConfigSnapshotDeliveryProperties != nil { - d.Set("snapshot_delivery_properties", flattenConfigSnapshotDeliveryProperties(channel.ConfigSnapshotDeliveryProperties)) - } - - return nil -} - -func resourceAwsConfigDeliveryChannelDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).configconn - input := configservice.DeleteDeliveryChannelInput{ - DeliveryChannelName: aws.String(d.Id()), - } - _, err := conn.DeleteDeliveryChannel(&input) - if err != nil { - return fmt.Errorf("Unable to delete delivery channel: %s", err) - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/aws/resource_aws_config_delivery_channel_test.go b/builtin/providers/aws/resource_aws_config_delivery_channel_test.go deleted file mode 100644 index 098465f9d..000000000 --- a/builtin/providers/aws/resource_aws_config_delivery_channel_test.go +++ /dev/null @@ -1,279 +0,0 @@ -package aws - -import ( - "fmt" - "regexp" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/configservice" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func testAccConfigDeliveryChannel_basic(t *testing.T) { - var dc configservice.DeliveryChannel - rInt := acctest.RandInt() - expectedName := fmt.Sprintf("tf-acc-test-awsconfig-%d", rInt) - expectedBucketName := fmt.Sprintf("tf-acc-test-awsconfig-%d", rInt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckConfigDeliveryChannelDestroy, - Steps: []resource.TestStep{ - { - Config: testAccConfigDeliveryChannelConfig_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckConfigDeliveryChannelExists("aws_config_delivery_channel.foo", &dc), - testAccCheckConfigDeliveryChannelName("aws_config_delivery_channel.foo", expectedName, &dc), - resource.TestCheckResourceAttr("aws_config_delivery_channel.foo", "name", expectedName), - resource.TestCheckResourceAttr("aws_config_delivery_channel.foo", "s3_bucket_name", expectedBucketName), - ), - }, - }, - }) -} - -func testAccConfigDeliveryChannel_allParams(t *testing.T) { - var dc configservice.DeliveryChannel - rInt := acctest.RandInt() - expectedName := fmt.Sprintf("tf-acc-test-awsconfig-%d", rInt) - expectedBucketName := fmt.Sprintf("tf-acc-test-awsconfig-%d", rInt) - expectedSnsTopicArn := regexp.MustCompile(fmt.Sprintf("arn:aws:sns:[a-z0-9-]+:[0-9]{12}:tf-acc-test-%d", rInt)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckConfigDeliveryChannelDestroy, - Steps: []resource.TestStep{ - { - Config: testAccConfigDeliveryChannelConfig_allParams(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckConfigDeliveryChannelExists("aws_config_delivery_channel.foo", &dc), - testAccCheckConfigDeliveryChannelName("aws_config_delivery_channel.foo", expectedName, &dc), - resource.TestCheckResourceAttr("aws_config_delivery_channel.foo", "name", expectedName), - resource.TestCheckResourceAttr("aws_config_delivery_channel.foo", "s3_bucket_name", expectedBucketName), - resource.TestCheckResourceAttr("aws_config_delivery_channel.foo", "s3_key_prefix", "one/two/three"), - resource.TestMatchResourceAttr("aws_config_delivery_channel.foo", "sns_topic_arn", expectedSnsTopicArn), - resource.TestCheckResourceAttr("aws_config_delivery_channel.foo", "snapshot_delivery_properties.0.delivery_frequency", "Six_Hours"), - ), - }, - }, - }) -} - -func testAccConfigDeliveryChannel_importBasic(t *testing.T) { - resourceName := "aws_config_delivery_channel.foo" - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckConfigDeliveryChannelDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccConfigDeliveryChannelConfig_basic(rInt), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccCheckConfigDeliveryChannelName(n, desired string, obj *configservice.DeliveryChannel) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if *obj.Name != desired { - return fmt.Errorf("Expected name: %q, given: %q", desired, *obj.Name) - } - return nil - } -} - -func testAccCheckConfigDeliveryChannelExists(n string, obj *configservice.DeliveryChannel) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not Found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No delivery channel ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).configconn - out, err := conn.DescribeDeliveryChannels(&configservice.DescribeDeliveryChannelsInput{ - DeliveryChannelNames: []*string{aws.String(rs.Primary.Attributes["name"])}, - }) - if err != nil { - return fmt.Errorf("Failed to describe delivery channel: %s", err) - } - if len(out.DeliveryChannels) < 1 { - return fmt.Errorf("No delivery channel found when describing %q", rs.Primary.Attributes["name"]) - } - - dc := out.DeliveryChannels[0] - *obj = *dc - - return nil - } -} - -func testAccCheckConfigDeliveryChannelDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).configconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_config_delivery_channel" { - continue - } - - resp, err := conn.DescribeDeliveryChannels(&configservice.DescribeDeliveryChannelsInput{ - DeliveryChannelNames: []*string{aws.String(rs.Primary.Attributes["name"])}, - }) - - if err == nil { - if len(resp.DeliveryChannels) != 0 && - *resp.DeliveryChannels[0].Name == rs.Primary.Attributes["name"] { - return fmt.Errorf("Delivery Channel still exists: %s", rs.Primary.Attributes["name"]) - } - } - } - - return nil -} - -func testAccConfigDeliveryChannelConfig_basic(randInt int) string { - return fmt.Sprintf(` -resource "aws_config_configuration_recorder" "foo" { - name = "tf-acc-test-%d" - role_arn = "${aws_iam_role.r.arn}" -} - -resource "aws_iam_role" "r" { - name = "tf-acc-test-awsconfig-%d" - assume_role_policy = < 0 && *resp.CustomerGateways[0].State != "deleted" { - return true, nil - } - - return false, nil -} - -func resourceAwsCustomerGatewayRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - gatewayFilter := &ec2.Filter{ - Name: aws.String("customer-gateway-id"), - Values: []*string{aws.String(d.Id())}, - } - - resp, err := conn.DescribeCustomerGateways(&ec2.DescribeCustomerGatewaysInput{ - Filters: []*ec2.Filter{gatewayFilter}, - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidCustomerGatewayID.NotFound" { - d.SetId("") - return nil - } else { - log.Printf("[ERROR] Error finding CustomerGateway: %s", err) - return err - } - } - - if len(resp.CustomerGateways) != 1 { - return fmt.Errorf("[ERROR] Error finding CustomerGateway: %s", d.Id()) - } - - if *resp.CustomerGateways[0].State == "deleted" { - log.Printf("[INFO] Customer Gateway is in `deleted` state: %s", d.Id()) - d.SetId("") - return nil - } - - customerGateway := resp.CustomerGateways[0] - d.Set("ip_address", customerGateway.IpAddress) - d.Set("type", customerGateway.Type) - d.Set("tags", tagsToMap(customerGateway.Tags)) - - if *customerGateway.BgpAsn != "" { - val, err := strconv.ParseInt(*customerGateway.BgpAsn, 0, 0) - if err != nil { - return fmt.Errorf("error parsing bgp_asn: %s", err) - } - - d.Set("bgp_asn", int(val)) - } - - return nil -} - -func resourceAwsCustomerGatewayUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - // Update tags if required. - if err := setTags(conn, d); err != nil { - return err - } - - d.SetPartial("tags") - - return resourceAwsCustomerGatewayRead(d, meta) -} - -func resourceAwsCustomerGatewayDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - _, err := conn.DeleteCustomerGateway(&ec2.DeleteCustomerGatewayInput{ - CustomerGatewayId: aws.String(d.Id()), - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidCustomerGatewayID.NotFound" { - d.SetId("") - return nil - } else { - log.Printf("[ERROR] Error deleting CustomerGateway: %s", err) - return err - } - } - - gatewayFilter := &ec2.Filter{ - Name: aws.String("customer-gateway-id"), - Values: []*string{aws.String(d.Id())}, - } - - err = resource.Retry(5*time.Minute, func() *resource.RetryError { - resp, err := conn.DescribeCustomerGateways(&ec2.DescribeCustomerGatewaysInput{ - Filters: []*ec2.Filter{gatewayFilter}, - }) - - if err != nil { - if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "InvalidCustomerGatewayID.NotFound" { - return nil - } - return resource.NonRetryableError(err) - } - - if len(resp.CustomerGateways) != 1 { - return resource.RetryableError(fmt.Errorf("[ERROR] Error finding CustomerGateway for delete: %s", d.Id())) - } - - switch *resp.CustomerGateways[0].State { - case "pending", "available", "deleting": - return resource.RetryableError(fmt.Errorf("[DEBUG] Gateway (%s) in state (%s), retrying", d.Id(), *resp.CustomerGateways[0].State)) - case "deleted": - return nil - default: - return resource.RetryableError(fmt.Errorf("[DEBUG] Unrecognized state (%s) for Customer Gateway delete on (%s)", *resp.CustomerGateways[0].State, d.Id())) - } - }) - - if err != nil { - return err - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_customer_gateway_test.go b/builtin/providers/aws/resource_aws_customer_gateway_test.go deleted file mode 100644 index 9606a4557..000000000 --- a/builtin/providers/aws/resource_aws_customer_gateway_test.go +++ /dev/null @@ -1,262 +0,0 @@ -package aws - -import ( - "fmt" - "regexp" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSCustomerGateway_basic(t *testing.T) { - var gateway ec2.CustomerGateway - rBgpAsn := acctest.RandIntRange(64512, 65534) - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_customer_gateway.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckCustomerGatewayDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCustomerGatewayConfig(rInt, rBgpAsn), - Check: resource.ComposeTestCheckFunc( - testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway), - ), - }, - { - Config: testAccCustomerGatewayConfigUpdateTags(rInt, rBgpAsn), - Check: resource.ComposeTestCheckFunc( - testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway), - ), - }, - { - Config: testAccCustomerGatewayConfigForceReplace(rInt, rBgpAsn), - Check: resource.ComposeTestCheckFunc( - testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway), - ), - }, - }, - }) -} - -func TestAccAWSCustomerGateway_similarAlreadyExists(t *testing.T) { - var gateway ec2.CustomerGateway - rInt := acctest.RandInt() - rBgpAsn := acctest.RandIntRange(64512, 65534) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_customer_gateway.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckCustomerGatewayDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCustomerGatewayConfig(rInt, rBgpAsn), - Check: resource.ComposeTestCheckFunc( - testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway), - ), - }, - { - Config: testAccCustomerGatewayConfigIdentical(rInt, rBgpAsn), - ExpectError: regexp.MustCompile("An existing customer gateway"), - }, - }, - }) -} - -func TestAccAWSCustomerGateway_disappears(t *testing.T) { - rInt := acctest.RandInt() - rBgpAsn := acctest.RandIntRange(64512, 65534) - var gateway ec2.CustomerGateway - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCustomerGatewayDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCustomerGatewayConfig(rInt, rBgpAsn), - Check: resource.ComposeTestCheckFunc( - testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway), - testAccAWSCustomerGatewayDisappears(&gateway), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testAccAWSCustomerGatewayDisappears(gateway *ec2.CustomerGateway) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - opts := &ec2.DeleteCustomerGatewayInput{ - CustomerGatewayId: gateway.CustomerGatewayId, - } - if _, err := conn.DeleteCustomerGateway(opts); err != nil { - return err - } - return resource.Retry(40*time.Minute, func() *resource.RetryError { - opts := &ec2.DescribeCustomerGatewaysInput{ - CustomerGatewayIds: []*string{gateway.CustomerGatewayId}, - } - resp, err := conn.DescribeCustomerGateways(opts) - if err != nil { - cgw, ok := err.(awserr.Error) - if ok && cgw.Code() == "InvalidCustomerGatewayID.NotFound" { - return nil - } - return resource.NonRetryableError( - fmt.Errorf("Error retrieving Customer Gateway: %s", err)) - } - if *resp.CustomerGateways[0].State == "deleted" { - return nil - } - return resource.RetryableError(fmt.Errorf( - "Waiting for Customer Gateway: %v", gateway.CustomerGatewayId)) - }) - } -} - -func testAccCheckCustomerGatewayDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_customer_gatewah" { - continue - } - - gatewayFilter := &ec2.Filter{ - Name: aws.String("customer-gateway-id"), - Values: []*string{aws.String(rs.Primary.ID)}, - } - - resp, err := conn.DescribeCustomerGateways(&ec2.DescribeCustomerGatewaysInput{ - Filters: []*ec2.Filter{gatewayFilter}, - }) - - if ae, ok := err.(awserr.Error); ok && ae.Code() == "InvalidCustomerGatewayID.NotFound" { - continue - } - - if err == nil { - if len(resp.CustomerGateways) > 0 { - return fmt.Errorf("Customer gateway still exists: %v", resp.CustomerGateways) - } - - if *resp.CustomerGateways[0].State == "deleted" { - continue - } - } - - return err - } - - return nil -} - -func testAccCheckCustomerGateway(gatewayResource string, cgw *ec2.CustomerGateway) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[gatewayResource] - if !ok { - return fmt.Errorf("Not found: %s", gatewayResource) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - gateway, ok := s.RootModule().Resources[gatewayResource] - if !ok { - return fmt.Errorf("Not found: %s", gatewayResource) - } - - ec2conn := testAccProvider.Meta().(*AWSClient).ec2conn - gatewayFilter := &ec2.Filter{ - Name: aws.String("customer-gateway-id"), - Values: []*string{aws.String(gateway.Primary.ID)}, - } - - resp, err := ec2conn.DescribeCustomerGateways(&ec2.DescribeCustomerGatewaysInput{ - Filters: []*ec2.Filter{gatewayFilter}, - }) - - if err != nil { - return err - } - - respGateway := resp.CustomerGateways[0] - *cgw = *respGateway - - return nil - } -} - -func testAccCustomerGatewayConfig(rInt, rBgpAsn int) string { - return fmt.Sprintf(` - resource "aws_customer_gateway" "foo" { - bgp_asn = %d - ip_address = "172.0.0.1" - type = "ipsec.1" - tags { - Name = "foo-gateway-%d" - } - } - `, rBgpAsn, rInt) -} - -func testAccCustomerGatewayConfigIdentical(randInt, rBgpAsn int) string { - return fmt.Sprintf(` - resource "aws_customer_gateway" "foo" { - bgp_asn = %d - ip_address = "172.0.0.1" - type = "ipsec.1" - tags { - Name = "foo-gateway-%d" - } - } - resource "aws_customer_gateway" "identical" { - bgp_asn = %d - ip_address = "172.0.0.1" - type = "ipsec.1" - tags { - Name = "foo-gateway-identical-%d" - } - } - `, rBgpAsn, randInt, rBgpAsn, randInt) -} - -// Add the Another: "tag" tag. -func testAccCustomerGatewayConfigUpdateTags(rInt, rBgpAsn int) string { - return fmt.Sprintf(` - resource "aws_customer_gateway" "foo" { - bgp_asn = %d - ip_address = "172.0.0.1" - type = "ipsec.1" - tags { - Name = "foo-gateway-%d" - Another = "tag" - } - } - `, rBgpAsn, rInt) -} - -// Change the ip_address. -func testAccCustomerGatewayConfigForceReplace(rInt, rBgpAsn int) string { - return fmt.Sprintf(` - resource "aws_customer_gateway" "foo" { - bgp_asn = %d - ip_address = "172.10.10.1" - type = "ipsec.1" - tags { - Name = "foo-gateway-%d" - Another = "tag" - } - } - `, rBgpAsn, rInt) -} diff --git a/builtin/providers/aws/resource_aws_db_event_subscription.go b/builtin/providers/aws/resource_aws_db_event_subscription.go deleted file mode 100644 index 9e725ce2d..000000000 --- a/builtin/providers/aws/resource_aws_db_event_subscription.go +++ /dev/null @@ -1,385 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsDbEventSubscription() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsDbEventSubscriptionCreate, - Read: resourceAwsDbEventSubscriptionRead, - Update: resourceAwsDbEventSubscriptionUpdate, - Delete: resourceAwsDbEventSubscriptionDelete, - Importer: &schema.ResourceImporter{ - State: resourceAwsDbEventSubscriptionImport, - }, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateDbEventSubscriptionName, - }, - "sns_topic": { - Type: schema.TypeString, - Required: true, - }, - "event_categories": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "source_ids": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - // ValidateFunc: validateDbEventSubscriptionSourceIds, - // requires source_type to be set, does not seem to be a way to validate this - }, - "source_type": { - Type: schema.TypeString, - Optional: true, - }, - "enabled": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "customer_aws_id": { - Type: schema.TypeString, - Computed: true, - }, - "tags": tagsSchema(), - }, - } -} - -func resourceAwsDbEventSubscriptionCreate(d *schema.ResourceData, meta interface{}) error { - rdsconn := meta.(*AWSClient).rdsconn - name := d.Get("name").(string) - tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) - - sourceIdsSet := d.Get("source_ids").(*schema.Set) - sourceIds := make([]*string, sourceIdsSet.Len()) - for i, sourceId := range sourceIdsSet.List() { - sourceIds[i] = aws.String(sourceId.(string)) - } - - eventCategoriesSet := d.Get("event_categories").(*schema.Set) - eventCategories := make([]*string, eventCategoriesSet.Len()) - for i, eventCategory := range eventCategoriesSet.List() { - eventCategories[i] = aws.String(eventCategory.(string)) - } - - request := &rds.CreateEventSubscriptionInput{ - SubscriptionName: aws.String(name), - SnsTopicArn: aws.String(d.Get("sns_topic").(string)), - Enabled: aws.Bool(d.Get("enabled").(bool)), - SourceIds: sourceIds, - SourceType: aws.String(d.Get("source_type").(string)), - EventCategories: eventCategories, - Tags: tags, - } - - log.Println("[DEBUG] Create RDS Event Subscription:", request) - - _, err := rdsconn.CreateEventSubscription(request) - if err != nil { - return fmt.Errorf("Error creating RDS Event Subscription %s: %s", name, err) - } - - log.Println( - "[INFO] Waiting for RDS Event Subscription to be ready") - - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating"}, - Target: []string{"active"}, - Refresh: resourceAwsDbEventSubscriptionRefreshFunc(d, meta.(*AWSClient).rdsconn), - Timeout: 40 * time.Minute, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting - } - - // Wait, catching any errors - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Creating RDS Event Subscription %s failed: %s", d.Id(), err) - } - - return resourceAwsDbEventSubscriptionRead(d, meta) -} - -func resourceAwsDbEventSubscriptionRead(d *schema.ResourceData, meta interface{}) error { - sub, err := resourceAwsDbEventSubscriptionRetrieve(d.Get("name").(string), meta.(*AWSClient).rdsconn) - if err != nil { - return fmt.Errorf("Error retrieving RDS Event Subscription %s: %s", d.Id(), err) - } - if sub == nil { - d.SetId("") - return nil - } - - d.SetId(*sub.CustSubscriptionId) - if err := d.Set("name", sub.CustSubscriptionId); err != nil { - return err - } - if err := d.Set("sns_topic", sub.SnsTopicArn); err != nil { - return err - } - if err := d.Set("source_type", sub.SourceType); err != nil { - return err - } - if err := d.Set("enabled", sub.Enabled); err != nil { - return err - } - if err := d.Set("source_ids", flattenStringList(sub.SourceIdsList)); err != nil { - return err - } - if err := d.Set("event_categories", flattenStringList(sub.EventCategoriesList)); err != nil { - return err - } - if err := d.Set("customer_aws_id", sub.CustomerAwsId); err != nil { - return err - } - - // list tags for resource - // set tags - conn := meta.(*AWSClient).rdsconn - if arn, err := buildRDSEventSubscriptionARN(d.Get("customer_aws_id").(string), d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).region); err != nil { - log.Printf("[DEBUG] Error building ARN for RDS Event Subscription, not setting Tags for Event Subscription %s", *sub.CustSubscriptionId) - } else { - resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{ - ResourceName: aws.String(arn), - }) - - if err != nil { - log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn) - } - - var dt []*rds.Tag - if len(resp.TagList) > 0 { - dt = resp.TagList - } - d.Set("tags", tagsToMapRDS(dt)) - } - - return nil -} - -func resourceAwsDbEventSubscriptionRetrieve( - name string, rdsconn *rds.RDS) (*rds.EventSubscription, error) { - - request := &rds.DescribeEventSubscriptionsInput{ - SubscriptionName: aws.String(name), - } - - describeResp, err := rdsconn.DescribeEventSubscriptions(request) - if err != nil { - if rdserr, ok := err.(awserr.Error); ok && rdserr.Code() == "SubscriptionNotFound" { - log.Printf("[WARN] No RDS Event Subscription by name (%s) found", name) - return nil, nil - } - return nil, fmt.Errorf("Error reading RDS Event Subscription %s: %s", name, err) - } - - if len(describeResp.EventSubscriptionsList) != 1 { - return nil, fmt.Errorf("Unable to find RDS Event Subscription: %#v", describeResp.EventSubscriptionsList) - } - - return describeResp.EventSubscriptionsList[0], nil -} - -func resourceAwsDbEventSubscriptionUpdate(d *schema.ResourceData, meta interface{}) error { - rdsconn := meta.(*AWSClient).rdsconn - - d.Partial(true) - requestUpdate := false - - req := &rds.ModifyEventSubscriptionInput{ - SubscriptionName: aws.String(d.Id()), - } - - if d.HasChange("event_categories") { - eventCategoriesSet := d.Get("event_categories").(*schema.Set) - req.EventCategories = make([]*string, eventCategoriesSet.Len()) - for i, eventCategory := range eventCategoriesSet.List() { - req.EventCategories[i] = aws.String(eventCategory.(string)) - } - requestUpdate = true - } - - if d.HasChange("enabled") { - req.Enabled = aws.Bool(d.Get("enabled").(bool)) - requestUpdate = true - } - - if d.HasChange("sns_topic") { - req.SnsTopicArn = aws.String(d.Get("sns_topic").(string)) - requestUpdate = true - } - - if d.HasChange("source_type") { - req.SourceType = aws.String(d.Get("source_type").(string)) - requestUpdate = true - } - - log.Printf("[DEBUG] Send RDS Event Subscription modification request: %#v", requestUpdate) - if requestUpdate { - log.Printf("[DEBUG] RDS Event Subscription modification request: %#v", req) - _, err := rdsconn.ModifyEventSubscription(req) - if err != nil { - return fmt.Errorf("Modifying RDS Event Subscription %s failed: %s", d.Id(), err) - } - - log.Println( - "[INFO] Waiting for RDS Event Subscription modification to finish") - - stateConf := &resource.StateChangeConf{ - Pending: []string{"modifying"}, - Target: []string{"active"}, - Refresh: resourceAwsDbEventSubscriptionRefreshFunc(d, meta.(*AWSClient).rdsconn), - Timeout: 40 * time.Minute, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting - } - - // Wait, catching any errors - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Modifying RDS Event Subscription %s failed: %s", d.Id(), err) - } - d.SetPartial("event_categories") - d.SetPartial("enabled") - d.SetPartial("sns_topic") - d.SetPartial("source_type") - } - - if arn, err := buildRDSEventSubscriptionARN(d.Get("customer_aws_id").(string), d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).region); err == nil { - if err := setTagsRDS(rdsconn, d, arn); err != nil { - return err - } else { - d.SetPartial("tags") - } - } - - if d.HasChange("source_ids") { - o, n := d.GetChange("source_ids") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - remove := expandStringList(os.Difference(ns).List()) - add := expandStringList(ns.Difference(os).List()) - - if len(remove) > 0 { - for _, removing := range remove { - log.Printf("[INFO] Removing %s as a Source Identifier from %q", *removing, d.Id()) - _, err := rdsconn.RemoveSourceIdentifierFromSubscription(&rds.RemoveSourceIdentifierFromSubscriptionInput{ - SourceIdentifier: removing, - SubscriptionName: aws.String(d.Id()), - }) - if err != nil { - return err - } - } - } - - if len(add) > 0 { - for _, adding := range add { - log.Printf("[INFO] Adding %s as a Source Identifier to %q", *adding, d.Id()) - _, err := rdsconn.AddSourceIdentifierToSubscription(&rds.AddSourceIdentifierToSubscriptionInput{ - SourceIdentifier: adding, - SubscriptionName: aws.String(d.Id()), - }) - if err != nil { - return err - } - } - } - d.SetPartial("source_ids") - } - - d.Partial(false) - - return nil -} - -func resourceAwsDbEventSubscriptionDelete(d *schema.ResourceData, meta interface{}) error { - rdsconn := meta.(*AWSClient).rdsconn - deleteOpts := rds.DeleteEventSubscriptionInput{ - SubscriptionName: aws.String(d.Id()), - } - - if _, err := rdsconn.DeleteEventSubscription(&deleteOpts); err != nil { - rdserr, ok := err.(awserr.Error) - if !ok { - return fmt.Errorf("Error deleting RDS Event Subscription %s: %s", d.Id(), err) - } - - if rdserr.Code() != "DBEventSubscriptionNotFoundFault" { - log.Printf("[WARN] RDS Event Subscription %s missing during delete", d.Id()) - return fmt.Errorf("Error deleting RDS Event Subscription %s: %s", d.Id(), err) - } - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"deleting"}, - Target: []string{}, - Refresh: resourceAwsDbEventSubscriptionRefreshFunc(d, meta.(*AWSClient).rdsconn), - Timeout: 40 * time.Minute, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting - } - _, err := stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting RDS Event Subscription %s: %s", d.Id(), err) - } - return err -} - -func resourceAwsDbEventSubscriptionRefreshFunc( - d *schema.ResourceData, - rdsconn *rds.RDS) resource.StateRefreshFunc { - - return func() (interface{}, string, error) { - sub, err := resourceAwsDbEventSubscriptionRetrieve(d.Get("name").(string), rdsconn) - - if err != nil { - log.Printf("Error on retrieving DB Event Subscription when waiting: %s", err) - return nil, "", err - } - - if sub == nil { - return nil, "", nil - } - - if sub.Status != nil { - log.Printf("[DEBUG] DB Event Subscription status for %s: %s", d.Id(), *sub.Status) - } - - return sub, *sub.Status, nil - } -} - -func buildRDSEventSubscriptionARN(customerAwsId, subscriptionId, partition, region string) (string, error) { - if partition == "" { - return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS partition") - } - arn := fmt.Sprintf("arn:%s:rds:%s:%s:es:%s", partition, region, customerAwsId, subscriptionId) - return arn, nil -} diff --git a/builtin/providers/aws/resource_aws_db_event_subscription_test.go b/builtin/providers/aws/resource_aws_db_event_subscription_test.go deleted file mode 100644 index c6dfde773..000000000 --- a/builtin/providers/aws/resource_aws_db_event_subscription_test.go +++ /dev/null @@ -1,265 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSDBEventSubscription_basicUpdate(t *testing.T) { - var v rds.EventSubscription - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBEventSubscriptionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBEventSubscriptionConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBEventSubscriptionExists("aws_db_event_subscription.bar", &v), - resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "enabled", "true"), - resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "source_type", "db-instance"), - resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "name", fmt.Sprintf("tf-acc-test-rds-event-subs-%d", rInt)), - resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "tags.Name", "name"), - ), - }, - { - Config: testAccAWSDBEventSubscriptionConfigUpdate(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBEventSubscriptionExists("aws_db_event_subscription.bar", &v), - resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "enabled", "false"), - resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "source_type", "db-parameter-group"), - resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "tags.Name", "new-name"), - ), - }, - }, - }) -} - -func TestAccAWSDBEventSubscription_withSourceIds(t *testing.T) { - var v rds.EventSubscription - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBEventSubscriptionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBEventSubscriptionConfigWithSourceIds(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBEventSubscriptionExists("aws_db_event_subscription.bar", &v), - resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "enabled", "true"), - resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "source_type", "db-parameter-group"), - resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "name", fmt.Sprintf("tf-acc-test-rds-event-subs-with-ids-%d", rInt)), - resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "source_ids.#", "1"), - ), - }, - { - Config: testAccAWSDBEventSubscriptionConfigUpdateSourceIds(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBEventSubscriptionExists("aws_db_event_subscription.bar", &v), - resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "enabled", "true"), - resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "source_type", "db-parameter-group"), - resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "name", fmt.Sprintf("tf-acc-test-rds-event-subs-with-ids-%d", rInt)), - resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "source_ids.#", "2"), - ), - }, - }, - }) -} - -func testAccCheckAWSDBEventSubscriptionExists(n string, v *rds.EventSubscription) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No RDS Event Subscription is set") - } - - conn := testAccProvider.Meta().(*AWSClient).rdsconn - - opts := rds.DescribeEventSubscriptionsInput{ - SubscriptionName: aws.String(rs.Primary.ID), - } - - resp, err := conn.DescribeEventSubscriptions(&opts) - - if err != nil { - return err - } - - if len(resp.EventSubscriptionsList) != 1 || - *resp.EventSubscriptionsList[0].CustSubscriptionId != rs.Primary.ID { - return fmt.Errorf("RDS Event Subscription not found") - } - - *v = *resp.EventSubscriptionsList[0] - return nil - } -} - -func testAccCheckAWSDBEventSubscriptionDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).rdsconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_db_event_subscription" { - continue - } - - var err error - resp, err := conn.DescribeEventSubscriptions( - &rds.DescribeEventSubscriptionsInput{ - SubscriptionName: aws.String(rs.Primary.ID), - }) - - if ae, ok := err.(awserr.Error); ok && ae.Code() == "SubscriptionNotFound" { - continue - } - - if err == nil { - if len(resp.EventSubscriptionsList) != 0 && - *resp.EventSubscriptionsList[0].CustSubscriptionId == rs.Primary.ID { - return fmt.Errorf("Event Subscription still exists") - } - } - - // Verify the error - newerr, ok := err.(awserr.Error) - if !ok { - return err - } - if newerr.Code() != "SubscriptionNotFound" { - return err - } - } - - return nil -} - -func testAccAWSDBEventSubscriptionConfig(rInt int) string { - return fmt.Sprintf(` -resource "aws_sns_topic" "aws_sns_topic" { - name = "tf-acc-test-rds-event-subs-sns-topic-%d" -} - -resource "aws_db_event_subscription" "bar" { - name = "tf-acc-test-rds-event-subs-%d" - sns_topic = "${aws_sns_topic.aws_sns_topic.arn}" - source_type = "db-instance" - event_categories = [ - "availability", - "backup", - "creation", - "deletion", - "maintenance" - ] - tags { - Name = "name" - } -}`, rInt, rInt) -} - -func testAccAWSDBEventSubscriptionConfigUpdate(rInt int) string { - return fmt.Sprintf(` -resource "aws_sns_topic" "aws_sns_topic" { - name = "tf-acc-test-rds-event-subs-sns-topic-%d" -} - -resource "aws_db_event_subscription" "bar" { - name = "tf-acc-test-rds-event-subs-%d" - sns_topic = "${aws_sns_topic.aws_sns_topic.arn}" - enabled = false - source_type = "db-parameter-group" - event_categories = [ - "configuration change" - ] - tags { - Name = "new-name" - } -}`, rInt, rInt) -} - -func testAccAWSDBEventSubscriptionConfigWithSourceIds(rInt int) string { - return fmt.Sprintf(` -resource "aws_sns_topic" "aws_sns_topic" { - name = "tf-acc-test-rds-event-subs-sns-topic-%d" -} - -resource "aws_db_parameter_group" "bar" { - name = "db-parameter-group-event-%d" - family = "mysql5.6" - description = "Test parameter group for terraform" -} - -resource "aws_db_event_subscription" "bar" { - name = "tf-acc-test-rds-event-subs-with-ids-%d" - sns_topic = "${aws_sns_topic.aws_sns_topic.arn}" - source_type = "db-parameter-group" - source_ids = ["${aws_db_parameter_group.bar.id}"] - event_categories = [ - "configuration change" - ] - tags { - Name = "name" - } -}`, rInt, rInt, rInt) -} - -func testAccAWSDBEventSubscriptionConfigUpdateSourceIds(rInt int) string { - return fmt.Sprintf(` - resource "aws_sns_topic" "aws_sns_topic" { - name = "tf-acc-test-rds-event-subs-sns-topic-%d" - } - - resource "aws_db_parameter_group" "bar" { - name = "db-parameter-group-event-%d" - family = "mysql5.6" - description = "Test parameter group for terraform" - } - - resource "aws_db_parameter_group" "foo" { - name = "db-parameter-group-event-2-%d" - family = "mysql5.6" - description = "Test parameter group for terraform" - } - - resource "aws_db_event_subscription" "bar" { - name = "tf-acc-test-rds-event-subs-with-ids-%d" - sns_topic = "${aws_sns_topic.aws_sns_topic.arn}" - source_type = "db-parameter-group" - source_ids = ["${aws_db_parameter_group.bar.id}","${aws_db_parameter_group.foo.id}"] - event_categories = [ - "configuration change" - ] - tags { - Name = "name" - } - }`, rInt, rInt, rInt, rInt) -} diff --git a/builtin/providers/aws/resource_aws_db_instance.go b/builtin/providers/aws/resource_aws_db_instance.go deleted file mode 100644 index 05621fb5d..000000000 --- a/builtin/providers/aws/resource_aws_db_instance.go +++ /dev/null @@ -1,1152 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "regexp" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/rds" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsDbInstance() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsDbInstanceCreate, - Read: resourceAwsDbInstanceRead, - Update: resourceAwsDbInstanceUpdate, - Delete: resourceAwsDbInstanceDelete, - Importer: &schema.ResourceImporter{ - State: resourceAwsDbInstanceImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(40 * time.Minute), - Update: schema.DefaultTimeout(80 * time.Minute), - Delete: schema.DefaultTimeout(40 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "arn": { - Type: schema.TypeString, - Computed: true, - }, - - "username": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "password": { - Type: schema.TypeString, - Optional: true, - Sensitive: true, - }, - - "engine": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - StateFunc: func(v interface{}) string { - value := v.(string) - return strings.ToLower(value) - }, - }, - - "engine_version": { - Type: schema.TypeString, - Optional: true, - Computed: true, - DiffSuppressFunc: suppressAwsDbEngineVersionDiffs, - }, - - "character_set_name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "storage_encrypted": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - - "allocated_storage": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "storage_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "identifier": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"identifier_prefix"}, - ValidateFunc: validateRdsIdentifier, - }, - "identifier_prefix": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validateRdsIdentifierPrefix, - }, - - "instance_class": { - Type: schema.TypeString, - Required: true, - }, - - "availability_zone": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "backup_retention_period": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "backup_window": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validateOnceADayWindowFormat, - }, - - "iops": { - Type: schema.TypeInt, - Optional: true, - }, - - "license_model": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "maintenance_window": { - Type: schema.TypeString, - Optional: true, - Computed: true, - StateFunc: func(v interface{}) string { - if v != nil { - value := v.(string) - return strings.ToLower(value) - } - return "" - }, - ValidateFunc: validateOnceAWeekWindowFormat, - }, - - "multi_az": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "port": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "publicly_accessible": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "vpc_security_group_ids": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "security_group_names": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "final_snapshot_identifier": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - value := v.(string) - if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) { - es = append(es, fmt.Errorf( - "only alphanumeric characters and hyphens allowed in %q", k)) - } - if regexp.MustCompile(`--`).MatchString(value) { - es = append(es, fmt.Errorf("%q cannot contain two consecutive hyphens", k)) - } - if regexp.MustCompile(`-$`).MatchString(value) { - es = append(es, fmt.Errorf("%q cannot end in a hyphen", k)) - } - return - }, - }, - - "skip_final_snapshot": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "copy_tags_to_snapshot": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "db_subnet_group_name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "parameter_group_name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "address": { - Type: schema.TypeString, - Computed: true, - }, - - "endpoint": { - Type: schema.TypeString, - Computed: true, - }, - - "hosted_zone_id": { - Type: schema.TypeString, - Computed: true, - }, - - "status": { - Type: schema.TypeString, - Computed: true, - }, - - // apply_immediately is used to determine when the update modifications - // take place. - // See http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html - "apply_immediately": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "replicate_source_db": { - Type: schema.TypeString, - Optional: true, - }, - - "replicas": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "snapshot_identifier": { - Type: schema.TypeString, - Computed: false, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "auto_minor_version_upgrade": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - - "allow_major_version_upgrade": { - Type: schema.TypeBool, - Computed: false, - Optional: true, - }, - - "monitoring_role_arn": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "monitoring_interval": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - - "option_group_name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "kms_key_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validateArn, - }, - - "timezone": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "iam_database_authentication_enabled": { - Type: schema.TypeBool, - Optional: true, - }, - - "resource_id": { - Type: schema.TypeString, - Computed: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn - tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) - - var identifier string - if v, ok := d.GetOk("identifier"); ok { - identifier = v.(string) - } else { - if v, ok := d.GetOk("identifier_prefix"); ok { - identifier = resource.PrefixedUniqueId(v.(string)) - } else { - identifier = resource.UniqueId() - } - - // SQL Server identifier size is max 15 chars, so truncate - if engine := d.Get("engine").(string); engine != "" { - if strings.Contains(strings.ToLower(engine), "sqlserver") { - identifier = identifier[:15] - } - } - d.Set("identifier", identifier) - } - - if v, ok := d.GetOk("replicate_source_db"); ok { - opts := rds.CreateDBInstanceReadReplicaInput{ - SourceDBInstanceIdentifier: aws.String(v.(string)), - CopyTagsToSnapshot: aws.Bool(d.Get("copy_tags_to_snapshot").(bool)), - DBInstanceClass: aws.String(d.Get("instance_class").(string)), - DBInstanceIdentifier: aws.String(identifier), - PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), - Tags: tags, - } - if attr, ok := d.GetOk("iops"); ok { - opts.Iops = aws.Int64(int64(attr.(int))) - } - - if attr, ok := d.GetOk("port"); ok { - opts.Port = aws.Int64(int64(attr.(int))) - } - - if attr, ok := d.GetOk("availability_zone"); ok { - opts.AvailabilityZone = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("storage_type"); ok { - opts.StorageType = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("db_subnet_group_name"); ok { - opts.DBSubnetGroupName = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("monitoring_role_arn"); ok { - opts.MonitoringRoleArn = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("monitoring_interval"); ok { - opts.MonitoringInterval = aws.Int64(int64(attr.(int))) - } - - if attr, ok := d.GetOk("option_group_name"); ok { - opts.OptionGroupName = aws.String(attr.(string)) - } - - log.Printf("[DEBUG] DB Instance Replica create configuration: %#v", opts) - _, err := conn.CreateDBInstanceReadReplica(&opts) - if err != nil { - return fmt.Errorf("Error creating DB Instance: %s", err) - } - } else if _, ok := d.GetOk("snapshot_identifier"); ok { - opts := rds.RestoreDBInstanceFromDBSnapshotInput{ - DBInstanceClass: aws.String(d.Get("instance_class").(string)), - DBInstanceIdentifier: aws.String(d.Get("identifier").(string)), - DBSnapshotIdentifier: aws.String(d.Get("snapshot_identifier").(string)), - AutoMinorVersionUpgrade: aws.Bool(d.Get("auto_minor_version_upgrade").(bool)), - PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), - Tags: tags, - CopyTagsToSnapshot: aws.Bool(d.Get("copy_tags_to_snapshot").(bool)), - } - - if attr, ok := d.GetOk("name"); ok { - // "Note: This parameter [DBName] doesn't apply to the MySQL, PostgreSQL, or MariaDB engines." - // https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_RestoreDBInstanceFromDBSnapshot.html - switch strings.ToLower(d.Get("engine").(string)) { - case "mysql", "postgres", "mariadb": - // skip - default: - opts.DBName = aws.String(attr.(string)) - } - } - - if attr, ok := d.GetOk("availability_zone"); ok { - opts.AvailabilityZone = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("db_subnet_group_name"); ok { - opts.DBSubnetGroupName = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("engine"); ok { - opts.Engine = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("iops"); ok { - opts.Iops = aws.Int64(int64(attr.(int))) - } - - if attr, ok := d.GetOk("license_model"); ok { - opts.LicenseModel = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("multi_az"); ok { - opts.MultiAZ = aws.Bool(attr.(bool)) - } - - if attr, ok := d.GetOk("option_group_name"); ok { - opts.OptionGroupName = aws.String(attr.(string)) - - } - - if attr, ok := d.GetOk("port"); ok { - opts.Port = aws.Int64(int64(attr.(int))) - } - - if attr, ok := d.GetOk("tde_credential_arn"); ok { - opts.TdeCredentialArn = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("storage_type"); ok { - opts.StorageType = aws.String(attr.(string)) - } - - log.Printf("[DEBUG] DB Instance restore from snapshot configuration: %s", opts) - _, err := conn.RestoreDBInstanceFromDBSnapshot(&opts) - if err != nil { - return fmt.Errorf("Error creating DB Instance: %s", err) - } - - var sgUpdate bool - var passwordUpdate bool - - if _, ok := d.GetOk("password"); ok { - passwordUpdate = true - } - - if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - sgUpdate = true - } - if attr := d.Get("security_group_names").(*schema.Set); attr.Len() > 0 { - sgUpdate = true - } - if sgUpdate || passwordUpdate { - log.Printf("[INFO] DB is restoring from snapshot with default security, but custom security should be set, will now update after snapshot is restored!") - - // wait for instance to get up and then modify security - d.SetId(d.Get("identifier").(string)) - - log.Printf("[INFO] DB Instance ID: %s", d.Id()) - - log.Println( - "[INFO] Waiting for DB Instance to be available") - - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating", "backing-up", "modifying", "resetting-master-credentials", - "maintenance", "renaming", "rebooting", "upgrading"}, - Target: []string{"available"}, - Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), - Timeout: d.Timeout(schema.TimeoutCreate), - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting - } - - // Wait, catching any errors - _, err := stateConf.WaitForState() - if err != nil { - return err - } - - err = resourceAwsDbInstanceUpdate(d, meta) - if err != nil { - return err - } - - } - } else { - if _, ok := d.GetOk("allocated_storage"); !ok { - return fmt.Errorf(`provider.aws: aws_db_instance: %s: "allocated_storage": required field is not set`, d.Get("name").(string)) - } - if _, ok := d.GetOk("engine"); !ok { - return fmt.Errorf(`provider.aws: aws_db_instance: %s: "engine": required field is not set`, d.Get("name").(string)) - } - if _, ok := d.GetOk("password"); !ok { - return fmt.Errorf(`provider.aws: aws_db_instance: %s: "password": required field is not set`, d.Get("name").(string)) - } - if _, ok := d.GetOk("username"); !ok { - return fmt.Errorf(`provider.aws: aws_db_instance: %s: "username": required field is not set`, d.Get("name").(string)) - } - opts := rds.CreateDBInstanceInput{ - AllocatedStorage: aws.Int64(int64(d.Get("allocated_storage").(int))), - DBName: aws.String(d.Get("name").(string)), - DBInstanceClass: aws.String(d.Get("instance_class").(string)), - DBInstanceIdentifier: aws.String(d.Get("identifier").(string)), - MasterUsername: aws.String(d.Get("username").(string)), - MasterUserPassword: aws.String(d.Get("password").(string)), - Engine: aws.String(d.Get("engine").(string)), - EngineVersion: aws.String(d.Get("engine_version").(string)), - StorageEncrypted: aws.Bool(d.Get("storage_encrypted").(bool)), - AutoMinorVersionUpgrade: aws.Bool(d.Get("auto_minor_version_upgrade").(bool)), - PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), - Tags: tags, - CopyTagsToSnapshot: aws.Bool(d.Get("copy_tags_to_snapshot").(bool)), - } - - attr := d.Get("backup_retention_period") - opts.BackupRetentionPeriod = aws.Int64(int64(attr.(int))) - if attr, ok := d.GetOk("multi_az"); ok { - opts.MultiAZ = aws.Bool(attr.(bool)) - - } - - if attr, ok := d.GetOk("character_set_name"); ok { - opts.CharacterSetName = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("timezone"); ok { - opts.Timezone = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("maintenance_window"); ok { - opts.PreferredMaintenanceWindow = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("backup_window"); ok { - opts.PreferredBackupWindow = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("license_model"); ok { - opts.LicenseModel = aws.String(attr.(string)) - } - if attr, ok := d.GetOk("parameter_group_name"); ok { - opts.DBParameterGroupName = aws.String(attr.(string)) - } - - if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - var s []*string - for _, v := range attr.List() { - s = append(s, aws.String(v.(string))) - } - opts.VpcSecurityGroupIds = s - } - - if attr := d.Get("security_group_names").(*schema.Set); attr.Len() > 0 { - var s []*string - for _, v := range attr.List() { - s = append(s, aws.String(v.(string))) - } - opts.DBSecurityGroups = s - } - if attr, ok := d.GetOk("storage_type"); ok { - opts.StorageType = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("db_subnet_group_name"); ok { - opts.DBSubnetGroupName = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("iops"); ok { - opts.Iops = aws.Int64(int64(attr.(int))) - } - - if attr, ok := d.GetOk("port"); ok { - opts.Port = aws.Int64(int64(attr.(int))) - } - - if attr, ok := d.GetOk("availability_zone"); ok { - opts.AvailabilityZone = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("monitoring_role_arn"); ok { - opts.MonitoringRoleArn = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("monitoring_interval"); ok { - opts.MonitoringInterval = aws.Int64(int64(attr.(int))) - } - - if attr, ok := d.GetOk("option_group_name"); ok { - opts.OptionGroupName = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("kms_key_id"); ok { - opts.KmsKeyId = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("iam_database_authentication_enabled"); ok { - opts.EnableIAMDatabaseAuthentication = aws.Bool(attr.(bool)) - } - - log.Printf("[DEBUG] DB Instance create configuration: %#v", opts) - var err error - err = resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err = conn.CreateDBInstance(&opts) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "InvalidParameterValue" && strings.Contains(awsErr.Message(), "ENHANCED_MONITORING") { - return resource.RetryableError(awsErr) - } - } - return resource.NonRetryableError(err) - } - return nil - }) - if err != nil { - return fmt.Errorf("Error creating DB Instance: %s", err) - } - } - - d.SetId(d.Get("identifier").(string)) - - log.Printf("[INFO] DB Instance ID: %s", d.Id()) - - log.Println( - "[INFO] Waiting for DB Instance to be available") - - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating", "backing-up", "modifying", "resetting-master-credentials", - "maintenance", "renaming", "rebooting", "upgrading", "configuring-enhanced-monitoring"}, - Target: []string{"available"}, - Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), - Timeout: d.Timeout(schema.TimeoutCreate), - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting - } - - // Wait, catching any errors - _, err := stateConf.WaitForState() - if err != nil { - return err - } - - return resourceAwsDbInstanceRead(d, meta) -} - -func resourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error { - v, err := resourceAwsDbInstanceRetrieve(d, meta) - - if err != nil { - return err - } - if v == nil { - d.SetId("") - return nil - } - - d.Set("name", v.DBName) - d.Set("identifier", v.DBInstanceIdentifier) - d.Set("resource_id", v.DbiResourceId) - d.Set("username", v.MasterUsername) - d.Set("engine", v.Engine) - d.Set("engine_version", v.EngineVersion) - d.Set("allocated_storage", v.AllocatedStorage) - d.Set("iops", v.Iops) - d.Set("copy_tags_to_snapshot", v.CopyTagsToSnapshot) - d.Set("auto_minor_version_upgrade", v.AutoMinorVersionUpgrade) - d.Set("storage_type", v.StorageType) - d.Set("instance_class", v.DBInstanceClass) - d.Set("availability_zone", v.AvailabilityZone) - d.Set("backup_retention_period", v.BackupRetentionPeriod) - d.Set("backup_window", v.PreferredBackupWindow) - d.Set("license_model", v.LicenseModel) - d.Set("maintenance_window", v.PreferredMaintenanceWindow) - d.Set("publicly_accessible", v.PubliclyAccessible) - d.Set("multi_az", v.MultiAZ) - d.Set("kms_key_id", v.KmsKeyId) - d.Set("port", v.DbInstancePort) - d.Set("iam_database_authentication_enabled", v.IAMDatabaseAuthenticationEnabled) - if v.DBSubnetGroup != nil { - d.Set("db_subnet_group_name", v.DBSubnetGroup.DBSubnetGroupName) - } - - if v.CharacterSetName != nil { - d.Set("character_set_name", v.CharacterSetName) - } - - d.Set("timezone", v.Timezone) - - if len(v.DBParameterGroups) > 0 { - d.Set("parameter_group_name", v.DBParameterGroups[0].DBParameterGroupName) - } - - if v.Endpoint != nil { - d.Set("port", v.Endpoint.Port) - d.Set("address", v.Endpoint.Address) - d.Set("hosted_zone_id", v.Endpoint.HostedZoneId) - if v.Endpoint.Address != nil && v.Endpoint.Port != nil { - d.Set("endpoint", - fmt.Sprintf("%s:%d", *v.Endpoint.Address, *v.Endpoint.Port)) - } - } - - d.Set("status", v.DBInstanceStatus) - d.Set("storage_encrypted", v.StorageEncrypted) - if v.OptionGroupMemberships != nil { - d.Set("option_group_name", v.OptionGroupMemberships[0].OptionGroupName) - } - - if v.MonitoringInterval != nil { - d.Set("monitoring_interval", v.MonitoringInterval) - } - - if v.MonitoringRoleArn != nil { - d.Set("monitoring_role_arn", v.MonitoringRoleArn) - } - - // list tags for resource - // set tags - conn := meta.(*AWSClient).rdsconn - arn, err := buildRDSARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) - if err != nil { - name := "" - if v.DBName != nil && *v.DBName != "" { - name = *v.DBName - } - log.Printf("[DEBUG] Error building ARN for DB Instance, not setting Tags for DB %s", name) - } else { - d.Set("arn", arn) - resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{ - ResourceName: aws.String(arn), - }) - - if err != nil { - log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn) - } - - var dt []*rds.Tag - if len(resp.TagList) > 0 { - dt = resp.TagList - } - d.Set("tags", tagsToMapRDS(dt)) - } - - // Create an empty schema.Set to hold all vpc security group ids - ids := &schema.Set{ - F: schema.HashString, - } - for _, v := range v.VpcSecurityGroups { - ids.Add(*v.VpcSecurityGroupId) - } - d.Set("vpc_security_group_ids", ids) - - // Create an empty schema.Set to hold all security group names - sgn := &schema.Set{ - F: schema.HashString, - } - for _, v := range v.DBSecurityGroups { - sgn.Add(*v.DBSecurityGroupName) - } - d.Set("security_group_names", sgn) - - // replica things - - var replicas []string - for _, v := range v.ReadReplicaDBInstanceIdentifiers { - replicas = append(replicas, *v) - } - if err := d.Set("replicas", replicas); err != nil { - return fmt.Errorf("[DEBUG] Error setting replicas attribute: %#v, error: %#v", replicas, err) - } - - d.Set("replicate_source_db", v.ReadReplicaSourceDBInstanceIdentifier) - - return nil -} - -func resourceAwsDbInstanceDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn - - log.Printf("[DEBUG] DB Instance destroy: %v", d.Id()) - - opts := rds.DeleteDBInstanceInput{DBInstanceIdentifier: aws.String(d.Id())} - - skipFinalSnapshot := d.Get("skip_final_snapshot").(bool) - opts.SkipFinalSnapshot = aws.Bool(skipFinalSnapshot) - - if skipFinalSnapshot == false { - if name, present := d.GetOk("final_snapshot_identifier"); present { - opts.FinalDBSnapshotIdentifier = aws.String(name.(string)) - } else { - return fmt.Errorf("DB Instance FinalSnapshotIdentifier is required when a final snapshot is required") - } - } - - log.Printf("[DEBUG] DB Instance destroy configuration: %v", opts) - if _, err := conn.DeleteDBInstance(&opts); err != nil { - return err - } - - log.Println( - "[INFO] Waiting for DB Instance to be destroyed") - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating", "backing-up", - "modifying", "deleting", "available"}, - Target: []string{}, - Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), - Timeout: d.Timeout(schema.TimeoutDelete), - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting - } - if _, err := stateConf.WaitForState(); err != nil { - return err - } - - return nil -} - -func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn - - d.Partial(true) - - req := &rds.ModifyDBInstanceInput{ - ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)), - DBInstanceIdentifier: aws.String(d.Id()), - } - d.SetPartial("apply_immediately") - - if !d.Get("apply_immediately").(bool) { - log.Println("[INFO] Only settings updating, instance changes will be applied in next maintenance window") - } - - requestUpdate := false - if d.HasChange("allocated_storage") || d.HasChange("iops") { - d.SetPartial("allocated_storage") - d.SetPartial("iops") - req.Iops = aws.Int64(int64(d.Get("iops").(int))) - req.AllocatedStorage = aws.Int64(int64(d.Get("allocated_storage").(int))) - requestUpdate = true - } - if d.HasChange("allow_major_version_upgrade") { - d.SetPartial("allow_major_version_upgrade") - req.AllowMajorVersionUpgrade = aws.Bool(d.Get("allow_major_version_upgrade").(bool)) - requestUpdate = true - } - if d.HasChange("backup_retention_period") { - d.SetPartial("backup_retention_period") - req.BackupRetentionPeriod = aws.Int64(int64(d.Get("backup_retention_period").(int))) - requestUpdate = true - } - if d.HasChange("copy_tags_to_snapshot") { - d.SetPartial("copy_tags_to_snapshot") - req.CopyTagsToSnapshot = aws.Bool(d.Get("copy_tags_to_snapshot").(bool)) - requestUpdate = true - } - if d.HasChange("instance_class") { - d.SetPartial("instance_class") - req.DBInstanceClass = aws.String(d.Get("instance_class").(string)) - requestUpdate = true - } - if d.HasChange("parameter_group_name") { - d.SetPartial("parameter_group_name") - req.DBParameterGroupName = aws.String(d.Get("parameter_group_name").(string)) - requestUpdate = true - } - if d.HasChange("engine_version") { - d.SetPartial("engine_version") - req.EngineVersion = aws.String(d.Get("engine_version").(string)) - req.AllowMajorVersionUpgrade = aws.Bool(d.Get("allow_major_version_upgrade").(bool)) - requestUpdate = true - } - if d.HasChange("backup_window") { - d.SetPartial("backup_window") - req.PreferredBackupWindow = aws.String(d.Get("backup_window").(string)) - requestUpdate = true - } - if d.HasChange("maintenance_window") { - d.SetPartial("maintenance_window") - req.PreferredMaintenanceWindow = aws.String(d.Get("maintenance_window").(string)) - requestUpdate = true - } - if d.HasChange("password") { - d.SetPartial("password") - req.MasterUserPassword = aws.String(d.Get("password").(string)) - requestUpdate = true - } - if d.HasChange("multi_az") { - d.SetPartial("multi_az") - req.MultiAZ = aws.Bool(d.Get("multi_az").(bool)) - requestUpdate = true - } - if d.HasChange("publicly_accessible") { - d.SetPartial("publicly_accessible") - req.PubliclyAccessible = aws.Bool(d.Get("publicly_accessible").(bool)) - requestUpdate = true - } - if d.HasChange("storage_type") { - d.SetPartial("storage_type") - req.StorageType = aws.String(d.Get("storage_type").(string)) - requestUpdate = true - - if *req.StorageType == "io1" { - req.Iops = aws.Int64(int64(d.Get("iops").(int))) - } - } - if d.HasChange("auto_minor_version_upgrade") { - d.SetPartial("auto_minor_version_upgrade") - req.AutoMinorVersionUpgrade = aws.Bool(d.Get("auto_minor_version_upgrade").(bool)) - requestUpdate = true - } - - if d.HasChange("monitoring_role_arn") { - d.SetPartial("monitoring_role_arn") - req.MonitoringRoleArn = aws.String(d.Get("monitoring_role_arn").(string)) - requestUpdate = true - } - - if d.HasChange("monitoring_interval") { - d.SetPartial("monitoring_interval") - req.MonitoringInterval = aws.Int64(int64(d.Get("monitoring_interval").(int))) - requestUpdate = true - } - - if d.HasChange("vpc_security_group_ids") { - if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - var s []*string - for _, v := range attr.List() { - s = append(s, aws.String(v.(string))) - } - req.VpcSecurityGroupIds = s - } - requestUpdate = true - } - - if d.HasChange("security_group_names") { - if attr := d.Get("security_group_names").(*schema.Set); attr.Len() > 0 { - var s []*string - for _, v := range attr.List() { - s = append(s, aws.String(v.(string))) - } - req.DBSecurityGroups = s - } - requestUpdate = true - } - - if d.HasChange("option_group_name") { - d.SetPartial("option_group_name") - req.OptionGroupName = aws.String(d.Get("option_group_name").(string)) - requestUpdate = true - } - - if d.HasChange("port") { - d.SetPartial("port") - req.DBPortNumber = aws.Int64(int64(d.Get("port").(int))) - requestUpdate = true - } - if d.HasChange("db_subnet_group_name") && !d.IsNewResource() { - d.SetPartial("db_subnet_group_name") - req.DBSubnetGroupName = aws.String(d.Get("db_subnet_group_name").(string)) - requestUpdate = true - } - - if d.HasChange("iam_database_authentication_enabled") { - req.EnableIAMDatabaseAuthentication = aws.Bool(d.Get("iam_database_authentication_enabled").(bool)) - requestUpdate = true - } - - log.Printf("[DEBUG] Send DB Instance Modification request: %t", requestUpdate) - if requestUpdate { - log.Printf("[DEBUG] DB Instance Modification request: %s", req) - _, err := conn.ModifyDBInstance(req) - if err != nil { - return fmt.Errorf("Error modifying DB Instance %s: %s", d.Id(), err) - } - - log.Println("[INFO] Waiting for DB Instance to be available") - - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating", "backing-up", "modifying", "resetting-master-credentials", - "maintenance", "renaming", "rebooting", "upgrading", "configuring-enhanced-monitoring", "moving-to-vpc"}, - Target: []string{"available"}, - Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), - Timeout: d.Timeout(schema.TimeoutUpdate), - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting - } - - // Wait, catching any errors - _, dbStateErr := stateConf.WaitForState() - if dbStateErr != nil { - return dbStateErr - } - } - - // separate request to promote a database - if d.HasChange("replicate_source_db") { - if d.Get("replicate_source_db").(string) == "" { - // promote - opts := rds.PromoteReadReplicaInput{ - DBInstanceIdentifier: aws.String(d.Id()), - } - attr := d.Get("backup_retention_period") - opts.BackupRetentionPeriod = aws.Int64(int64(attr.(int))) - if attr, ok := d.GetOk("backup_window"); ok { - opts.PreferredBackupWindow = aws.String(attr.(string)) - } - _, err := conn.PromoteReadReplica(&opts) - if err != nil { - return fmt.Errorf("Error promoting database: %#v", err) - } - d.Set("replicate_source_db", "") - } else { - return fmt.Errorf("cannot elect new source database for replication") - } - } - - if arn, err := buildRDSARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil { - if err := setTagsRDS(conn, d, arn); err != nil { - return err - } else { - d.SetPartial("tags") - } - } - d.Partial(false) - - return resourceAwsDbInstanceRead(d, meta) -} - -// resourceAwsDbInstanceRetrieve fetches DBInstance information from the AWS -// API. It returns an error if there is a communication problem or unexpected -// error with AWS. When the DBInstance is not found, it returns no error and a -// nil pointer. -func resourceAwsDbInstanceRetrieve( - d *schema.ResourceData, meta interface{}) (*rds.DBInstance, error) { - conn := meta.(*AWSClient).rdsconn - - opts := rds.DescribeDBInstancesInput{ - DBInstanceIdentifier: aws.String(d.Id()), - } - - log.Printf("[DEBUG] DB Instance describe configuration: %#v", opts) - - resp, err := conn.DescribeDBInstances(&opts) - if err != nil { - dbinstanceerr, ok := err.(awserr.Error) - if ok && dbinstanceerr.Code() == "DBInstanceNotFound" { - return nil, nil - } - return nil, fmt.Errorf("Error retrieving DB Instances: %s", err) - } - - if len(resp.DBInstances) != 1 || - *resp.DBInstances[0].DBInstanceIdentifier != d.Id() { - if err != nil { - return nil, nil - } - } - - return resp.DBInstances[0], nil -} - -func resourceAwsDbInstanceImport( - d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - // Neither skip_final_snapshot nor final_snapshot_identifier can be fetched - // from any API call, so we need to default skip_final_snapshot to true so - // that final_snapshot_identifier is not required - d.Set("skip_final_snapshot", true) - return []*schema.ResourceData{d}, nil -} - -func resourceAwsDbInstanceStateRefreshFunc( - d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - v, err := resourceAwsDbInstanceRetrieve(d, meta) - - if err != nil { - log.Printf("Error on retrieving DB Instance when waiting: %s", err) - return nil, "", err - } - - if v == nil { - return nil, "", nil - } - - if v.DBInstanceStatus != nil { - log.Printf("[DEBUG] DB Instance status for instance %s: %s", d.Id(), *v.DBInstanceStatus) - } - - return v, *v.DBInstanceStatus, nil - } -} - -func buildRDSARN(identifier, partition, accountid, region string) (string, error) { - if partition == "" { - return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS partition") - } - if accountid == "" { - return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS Account ID") - } - arn := fmt.Sprintf("arn:%s:rds:%s:%s:db:%s", partition, region, accountid, identifier) - return arn, nil -} diff --git a/builtin/providers/aws/resource_aws_db_instance_test.go b/builtin/providers/aws/resource_aws_db_instance_test.go deleted file mode 100644 index 22ee5df10..000000000 --- a/builtin/providers/aws/resource_aws_db_instance_test.go +++ /dev/null @@ -1,1336 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "regexp" - "strings" - - "math/rand" - "testing" - "time" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/rds" -) - -func TestAccAWSDBInstance_basic(t *testing.T) { - var v rds.DBInstance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBInstanceConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBInstanceExists("aws_db_instance.bar", &v), - testAccCheckAWSDBInstanceAttributes(&v), - resource.TestCheckResourceAttr( - "aws_db_instance.bar", "allocated_storage", "10"), - resource.TestCheckResourceAttr( - "aws_db_instance.bar", "engine", "mysql"), - resource.TestCheckResourceAttr( - "aws_db_instance.bar", "license_model", "general-public-license"), - resource.TestCheckResourceAttr( - "aws_db_instance.bar", "instance_class", "db.t1.micro"), - resource.TestCheckResourceAttr( - "aws_db_instance.bar", "name", "baz"), - resource.TestCheckResourceAttr( - "aws_db_instance.bar", "username", "foo"), - resource.TestCheckResourceAttr( - "aws_db_instance.bar", "parameter_group_name", "default.mysql5.6"), - resource.TestCheckResourceAttrSet("aws_db_instance.bar", "hosted_zone_id"), - resource.TestCheckResourceAttrSet( - "aws_db_instance.bar", "resource_id"), - ), - }, - }, - }) -} - -func TestAccAWSDBInstance_namePrefix(t *testing.T) { - var v rds.DBInstance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBInstanceConfig_namePrefix, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBInstanceExists("aws_db_instance.test", &v), - testAccCheckAWSDBInstanceAttributes(&v), - resource.TestMatchResourceAttr( - "aws_db_instance.test", "identifier", regexp.MustCompile("^tf-test-")), - ), - }, - }, - }) -} - -func TestAccAWSDBInstance_generatedName(t *testing.T) { - var v rds.DBInstance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBInstanceConfig_generatedName, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBInstanceExists("aws_db_instance.test", &v), - testAccCheckAWSDBInstanceAttributes(&v), - ), - }, - }, - }) -} - -func TestAccAWSDBInstance_kmsKey(t *testing.T) { - var v rds.DBInstance - keyRegex := regexp.MustCompile("^arn:aws:kms:") - - ri := rand.New(rand.NewSource(time.Now().UnixNano())).Int() - config := fmt.Sprintf(testAccAWSDBInstanceConfigKmsKeyId, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBInstanceExists("aws_db_instance.bar", &v), - testAccCheckAWSDBInstanceAttributes(&v), - resource.TestMatchResourceAttr( - "aws_db_instance.bar", "kms_key_id", keyRegex), - ), - }, - }, - }) -} - -func TestAccAWSDBInstance_subnetGroup(t *testing.T) { - var v rds.DBInstance - rName := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBInstanceConfigWithSubnetGroup(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBInstanceExists("aws_db_instance.bar", &v), - resource.TestCheckResourceAttr( - "aws_db_instance.bar", "db_subnet_group_name", "foo-"+rName), - ), - }, - { - Config: testAccAWSDBInstanceConfigWithSubnetGroupUpdated(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBInstanceExists("aws_db_instance.bar", &v), - resource.TestCheckResourceAttr( - "aws_db_instance.bar", "db_subnet_group_name", "bar-"+rName), - ), - }, - }, - }) -} - -func TestAccAWSDBInstance_optionGroup(t *testing.T) { - var v rds.DBInstance - - rName := fmt.Sprintf("tf-option-test-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBInstanceConfigWithOptionGroup(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBInstanceExists("aws_db_instance.bar", &v), - testAccCheckAWSDBInstanceAttributes(&v), - resource.TestCheckResourceAttr( - "aws_db_instance.bar", "option_group_name", rName), - ), - }, - }, - }) -} - -func TestAccAWSDBInstance_iamAuth(t *testing.T) { - var v rds.DBInstance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckAWSDBIAMAuth(acctest.RandInt()), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBInstanceExists("aws_db_instance.bar", &v), - testAccCheckAWSDBInstanceAttributes(&v), - resource.TestCheckResourceAttr( - "aws_db_instance.bar", "iam_database_authentication_enabled", "true"), - ), - }, - }, - }) -} - -func TestAccAWSDBInstanceReplica(t *testing.T) { - var s, r rds.DBInstance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccReplicaInstanceConfig(rand.New(rand.NewSource(time.Now().UnixNano())).Int()), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBInstanceExists("aws_db_instance.bar", &s), - testAccCheckAWSDBInstanceExists("aws_db_instance.replica", &r), - testAccCheckAWSDBInstanceReplicaAttributes(&s, &r), - ), - }, - }, - }) -} - -func TestAccAWSDBInstanceNoSnapshot(t *testing.T) { - var snap rds.DBInstance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBInstanceNoSnapshot, - Steps: []resource.TestStep{ - { - Config: testAccSnapshotInstanceConfig(), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBInstanceExists("aws_db_instance.snapshot", &snap), - ), - }, - }, - }) -} - -func TestAccAWSDBInstanceSnapshot(t *testing.T) { - var snap rds.DBInstance - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - // testAccCheckAWSDBInstanceSnapshot verifies a database snapshot is - // created, and subequently deletes it - CheckDestroy: testAccCheckAWSDBInstanceSnapshot(rInt), - Steps: []resource.TestStep{ - { - Config: testAccSnapshotInstanceConfigWithSnapshot(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBInstanceExists("aws_db_instance.snapshot", &snap), - ), - }, - }, - }) -} - -func TestAccAWSDBInstance_enhancedMonitoring(t *testing.T) { - var dbInstance rds.DBInstance - rName := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBInstanceNoSnapshot, - Steps: []resource.TestStep{ - { - Config: testAccSnapshotInstanceConfig_enhancedMonitoring(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBInstanceExists("aws_db_instance.enhanced_monitoring", &dbInstance), - resource.TestCheckResourceAttr( - "aws_db_instance.enhanced_monitoring", "monitoring_interval", "5"), - ), - }, - }, - }) -} - -// Regression test for https://github.com/hashicorp/terraform/issues/3760 . -// We apply a plan, then change just the iops. If the apply succeeds, we -// consider this a pass, as before in 3760 the request would fail -func TestAccAWS_separate_DBInstance_iops_update(t *testing.T) { - var v rds.DBInstance - - rName := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccSnapshotInstanceConfig_iopsUpdate(rName, 1000), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBInstanceExists("aws_db_instance.bar", &v), - testAccCheckAWSDBInstanceAttributes(&v), - ), - }, - - { - Config: testAccSnapshotInstanceConfig_iopsUpdate(rName, 2000), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBInstanceExists("aws_db_instance.bar", &v), - testAccCheckAWSDBInstanceAttributes(&v), - ), - }, - }, - }) -} - -func TestAccAWSDBInstance_portUpdate(t *testing.T) { - var v rds.DBInstance - - rName := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccSnapshotInstanceConfig_mysqlPort(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBInstanceExists("aws_db_instance.bar", &v), - resource.TestCheckResourceAttr( - "aws_db_instance.bar", "port", "3306"), - ), - }, - - { - Config: testAccSnapshotInstanceConfig_updateMysqlPort(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBInstanceExists("aws_db_instance.bar", &v), - resource.TestCheckResourceAttr( - "aws_db_instance.bar", "port", "3305"), - ), - }, - }, - }) -} - -func TestAccAWSDBInstance_MSSQL_TZ(t *testing.T) { - var v rds.DBInstance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBMSSQL_timezone, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBInstanceExists("aws_db_instance.mssql", &v), - testAccCheckAWSDBInstanceAttributes_MSSQL(&v, ""), - resource.TestCheckResourceAttr( - "aws_db_instance.mssql", "allocated_storage", "20"), - resource.TestCheckResourceAttr( - "aws_db_instance.mssql", "engine", "sqlserver-ex"), - ), - }, - - { - Config: testAccAWSDBMSSQL_timezone_AKST, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBInstanceExists("aws_db_instance.mssql", &v), - testAccCheckAWSDBInstanceAttributes_MSSQL(&v, "Alaskan Standard Time"), - resource.TestCheckResourceAttr( - "aws_db_instance.mssql", "allocated_storage", "20"), - resource.TestCheckResourceAttr( - "aws_db_instance.mssql", "engine", "sqlserver-ex"), - ), - }, - }, - }) -} - -func TestAccAWSDBInstance_MinorVersion(t *testing.T) { - var v rds.DBInstance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBInstanceConfigAutoMinorVersion, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBInstanceExists("aws_db_instance.bar", &v), - ), - }, - }, - }) -} - -// See https://github.com/hashicorp/terraform/issues/11881 -func TestAccAWSDBInstance_diffSuppressInitialState(t *testing.T) { - var v rds.DBInstance - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBInstanceConfigSuppressInitialState(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBInstanceExists("aws_db_instance.bar", &v), - ), - }, - }, - }) -} - -func testAccCheckAWSDBInstanceDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).rdsconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_db_instance" { - continue - } - - // Try to find the Group - var err error - resp, err := conn.DescribeDBInstances( - &rds.DescribeDBInstancesInput{ - DBInstanceIdentifier: aws.String(rs.Primary.ID), - }) - - if ae, ok := err.(awserr.Error); ok && ae.Code() == "DBInstanceNotFound" { - continue - } - - if err == nil { - if len(resp.DBInstances) != 0 && - *resp.DBInstances[0].DBInstanceIdentifier == rs.Primary.ID { - return fmt.Errorf("DB Instance still exists") - } - } - - // Verify the error - newerr, ok := err.(awserr.Error) - if !ok { - return err - } - if newerr.Code() != "DBInstanceNotFound" { - return err - } - } - - return nil -} - -func testAccCheckAWSDBInstanceAttributes(v *rds.DBInstance) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if *v.Engine != "mysql" { - return fmt.Errorf("bad engine: %#v", *v.Engine) - } - - if *v.EngineVersion == "" { - return fmt.Errorf("bad engine_version: %#v", *v.EngineVersion) - } - - if *v.BackupRetentionPeriod != 0 { - return fmt.Errorf("bad backup_retention_period: %#v", *v.BackupRetentionPeriod) - } - - return nil - } -} - -func testAccCheckAWSDBInstanceAttributes_MSSQL(v *rds.DBInstance, tz string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if *v.Engine != "sqlserver-ex" { - return fmt.Errorf("bad engine: %#v", *v.Engine) - } - - rtz := "" - if v.Timezone != nil { - rtz = *v.Timezone - } - - if tz != rtz { - return fmt.Errorf("Expected (%s) Timezone for MSSQL test, got (%s)", tz, rtz) - } - - return nil - } -} - -func testAccCheckAWSDBInstanceReplicaAttributes(source, replica *rds.DBInstance) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if replica.ReadReplicaSourceDBInstanceIdentifier != nil && *replica.ReadReplicaSourceDBInstanceIdentifier != *source.DBInstanceIdentifier { - return fmt.Errorf("bad source identifier for replica, expected: '%s', got: '%s'", *source.DBInstanceIdentifier, *replica.ReadReplicaSourceDBInstanceIdentifier) - } - - return nil - } -} - -func testAccCheckAWSDBInstanceSnapshot(rInt int) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_db_instance" { - continue - } - - awsClient := testAccProvider.Meta().(*AWSClient) - conn := awsClient.rdsconn - - var err error - log.Printf("[INFO] Trying to locate the DBInstance Final Snapshot") - snapshot_identifier := fmt.Sprintf("foobarbaz-test-terraform-final-snapshot-%d", rInt) - _, snapErr := conn.DescribeDBSnapshots( - &rds.DescribeDBSnapshotsInput{ - DBSnapshotIdentifier: aws.String(snapshot_identifier), - }) - - if snapErr != nil { - newerr, _ := snapErr.(awserr.Error) - if newerr.Code() == "DBSnapshotNotFound" { - return fmt.Errorf("Snapshot %s not found", snapshot_identifier) - } - } else { // snapshot was found, - // verify we have the tags copied to the snapshot - instanceARN, err := buildRDSARN(snapshot_identifier, testAccProvider.Meta().(*AWSClient).partition, testAccProvider.Meta().(*AWSClient).accountid, testAccProvider.Meta().(*AWSClient).region) - // tags have a different ARN, just swapping :db: for :snapshot: - tagsARN := strings.Replace(instanceARN, ":db:", ":snapshot:", 1) - if err != nil { - return fmt.Errorf("Error building ARN for tags check with ARN (%s): %s", tagsARN, err) - } - resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{ - ResourceName: aws.String(tagsARN), - }) - if err != nil { - return fmt.Errorf("Error retrieving tags for ARN (%s): %s", tagsARN, err) - } - - if resp.TagList == nil || len(resp.TagList) == 0 { - return fmt.Errorf("Tag list is nil or zero: %s", resp.TagList) - } - - var found bool - for _, t := range resp.TagList { - if *t.Key == "Name" && *t.Value == "tf-tags-db" { - found = true - } - } - if !found { - return fmt.Errorf("Expected to find tag Name (%s), but wasn't found. Tags: %s", "tf-tags-db", resp.TagList) - } - // end tag search - - log.Printf("[INFO] Deleting the Snapshot %s", snapshot_identifier) - _, snapDeleteErr := conn.DeleteDBSnapshot( - &rds.DeleteDBSnapshotInput{ - DBSnapshotIdentifier: aws.String(snapshot_identifier), - }) - if snapDeleteErr != nil { - return err - } - } // end snapshot was found - - resp, err := conn.DescribeDBInstances( - &rds.DescribeDBInstancesInput{ - DBInstanceIdentifier: aws.String(rs.Primary.ID), - }) - - if err != nil { - newerr, _ := err.(awserr.Error) - if newerr.Code() != "DBInstanceNotFound" { - return err - } - - } else { - if len(resp.DBInstances) != 0 && - *resp.DBInstances[0].DBInstanceIdentifier == rs.Primary.ID { - return fmt.Errorf("DB Instance still exists") - } - } - } - - return nil - } -} - -func testAccCheckAWSDBInstanceNoSnapshot(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).rdsconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_db_instance" { - continue - } - - var err error - resp, err := conn.DescribeDBInstances( - &rds.DescribeDBInstancesInput{ - DBInstanceIdentifier: aws.String(rs.Primary.ID), - }) - - if err != nil { - newerr, _ := err.(awserr.Error) - if newerr.Code() != "DBInstanceNotFound" { - return err - } - - } else { - if len(resp.DBInstances) != 0 && - *resp.DBInstances[0].DBInstanceIdentifier == rs.Primary.ID { - return fmt.Errorf("DB Instance still exists") - } - } - - snapshot_identifier := "foobarbaz-test-terraform-final-snapshot-2" - _, snapErr := conn.DescribeDBSnapshots( - &rds.DescribeDBSnapshotsInput{ - DBSnapshotIdentifier: aws.String(snapshot_identifier), - }) - - if snapErr != nil { - newerr, _ := snapErr.(awserr.Error) - if newerr.Code() != "DBSnapshotNotFound" { - return fmt.Errorf("Snapshot %s found and it shouldn't have been", snapshot_identifier) - } - } - } - - return nil -} - -func testAccCheckAWSDBInstanceExists(n string, v *rds.DBInstance) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No DB Instance ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).rdsconn - - opts := rds.DescribeDBInstancesInput{ - DBInstanceIdentifier: aws.String(rs.Primary.ID), - } - - resp, err := conn.DescribeDBInstances(&opts) - - if err != nil { - return err - } - - if len(resp.DBInstances) != 1 || - *resp.DBInstances[0].DBInstanceIdentifier != rs.Primary.ID { - return fmt.Errorf("DB Instance not found") - } - - *v = *resp.DBInstances[0] - - return nil - } -} - -// Database names cannot collide, and deletion takes so long, that making the -// name a bit random helps so able we can kill a test that's just waiting for a -// delete and not be blocked on kicking off another one. -var testAccAWSDBInstanceConfig = ` -resource "aws_db_instance" "bar" { - allocated_storage = 10 - engine = "MySQL" - engine_version = "5.6.21" - instance_class = "db.t1.micro" - name = "baz" - password = "barbarbarbar" - username = "foo" - - - # Maintenance Window is stored in lower case in the API, though not strictly - # documented. Terraform will downcase this to match (as opposed to throw a - # validation error). - maintenance_window = "Fri:09:00-Fri:09:30" - skip_final_snapshot = true - - backup_retention_period = 0 - - parameter_group_name = "default.mysql5.6" - - timeouts { - create = "30m" - } -}` - -const testAccAWSDBInstanceConfig_namePrefix = ` -resource "aws_db_instance" "test" { - allocated_storage = 10 - engine = "MySQL" - identifier_prefix = "tf-test-" - instance_class = "db.t1.micro" - password = "password" - username = "root" - publicly_accessible = true - skip_final_snapshot = true - - timeouts { - create = "30m" - } -}` - -const testAccAWSDBInstanceConfig_generatedName = ` -resource "aws_db_instance" "test" { - allocated_storage = 10 - engine = "MySQL" - instance_class = "db.t1.micro" - password = "password" - username = "root" - publicly_accessible = true - skip_final_snapshot = true - - timeouts { - create = "30m" - } -}` - -var testAccAWSDBInstanceConfigKmsKeyId = ` -resource "aws_kms_key" "foo" { - description = "Terraform acc test %s" - policy = < 0 { - dt = resp.TagList - } - d.Set("tags", tagsToMapRDS(dt)) - } - - return nil -} - -func optionInList(optionName string, list []*string) bool { - for _, opt := range list { - if *opt == optionName { - return true - } - } - return false -} - -func resourceAwsDbOptionGroupUpdate(d *schema.ResourceData, meta interface{}) error { - rdsconn := meta.(*AWSClient).rdsconn - if d.HasChange("option") { - o, n := d.GetChange("option") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - addOptions, addErr := expandOptionConfiguration(ns.Difference(os).List()) - if addErr != nil { - return addErr - } - - addingOptionNames, err := flattenOptionNames(ns.Difference(os).List()) - if err != nil { - return err - } - - removeOptions := []*string{} - opts, err := flattenOptionNames(os.Difference(ns).List()) - if err != nil { - return err - } - - for _, optionName := range opts { - if optionInList(*optionName, addingOptionNames) { - continue - } - removeOptions = append(removeOptions, optionName) - } - - modifyOpts := &rds.ModifyOptionGroupInput{ - OptionGroupName: aws.String(d.Id()), - ApplyImmediately: aws.Bool(true), - } - - if len(addOptions) > 0 { - modifyOpts.OptionsToInclude = addOptions - } - - if len(removeOptions) > 0 { - modifyOpts.OptionsToRemove = removeOptions - } - - log.Printf("[DEBUG] Modify DB Option Group: %s", modifyOpts) - _, err = rdsconn.ModifyOptionGroup(modifyOpts) - if err != nil { - return fmt.Errorf("Error modifying DB Option Group: %s", err) - } - d.SetPartial("option") - - } - - if arn, err := buildRDSOptionGroupARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil { - if err := setTagsRDS(rdsconn, d, arn); err != nil { - return err - } else { - d.SetPartial("tags") - } - } - - return resourceAwsDbOptionGroupRead(d, meta) -} - -func resourceAwsDbOptionGroupDelete(d *schema.ResourceData, meta interface{}) error { - rdsconn := meta.(*AWSClient).rdsconn - - deleteOpts := &rds.DeleteOptionGroupInput{ - OptionGroupName: aws.String(d.Id()), - } - - log.Printf("[DEBUG] Delete DB Option Group: %#v", deleteOpts) - ret := resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { - _, err := rdsconn.DeleteOptionGroup(deleteOpts) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "InvalidOptionGroupStateFault" { - log.Printf("[DEBUG] AWS believes the RDS Option Group is still in use, retrying") - return resource.RetryableError(awsErr) - } - } - return resource.NonRetryableError(err) - } - return nil - }) - if ret != nil { - return fmt.Errorf("Error Deleting DB Option Group: %s", ret) - } - return nil -} - -func flattenOptionNames(configured []interface{}) ([]*string, error) { - var optionNames []*string - for _, pRaw := range configured { - data := pRaw.(map[string]interface{}) - optionNames = append(optionNames, aws.String(data["option_name"].(string))) - } - - return optionNames, nil -} - -func resourceAwsDbOptionHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["option_name"].(string))) - if _, ok := m["port"]; ok { - buf.WriteString(fmt.Sprintf("%d-", m["port"].(int))) - } - - for _, oRaw := range m["option_settings"].(*schema.Set).List() { - o := oRaw.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", o["name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", o["value"].(string))) - } - - for _, vpcRaw := range m["vpc_security_group_memberships"].(*schema.Set).List() { - buf.WriteString(fmt.Sprintf("%s-", vpcRaw.(string))) - } - - for _, sgRaw := range m["db_security_group_memberships"].(*schema.Set).List() { - buf.WriteString(fmt.Sprintf("%s-", sgRaw.(string))) - } - return hashcode.String(buf.String()) -} - -func buildRDSOptionGroupARN(identifier, partition, accountid, region string) (string, error) { - if partition == "" { - return "", fmt.Errorf("Unable to construct RDS Option Group ARN because of missing AWS partition") - } - if accountid == "" { - return "", fmt.Errorf("Unable to construct RDS Option Group ARN because of missing AWS Account ID") - } - arn := fmt.Sprintf("arn:%s:rds:%s:%s:og:%s", partition, region, accountid, identifier) - return arn, nil -} diff --git a/builtin/providers/aws/resource_aws_db_option_group_test.go b/builtin/providers/aws/resource_aws_db_option_group_test.go deleted file mode 100644 index 65148d84b..000000000 --- a/builtin/providers/aws/resource_aws_db_option_group_test.go +++ /dev/null @@ -1,540 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "regexp" - "strings" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func init() { - resource.AddTestSweepers("aws_db_option_group", &resource.Sweeper{ - Name: "aws_db_option_group", - F: testSweepDbOptionGroups, - }) -} - -func testSweepDbOptionGroups(region string) error { - client, err := sharedClientForRegion(region) - if err != nil { - return fmt.Errorf("error getting client: %s", err) - } - - conn := client.(*AWSClient).rdsconn - - opts := rds.DescribeOptionGroupsInput{} - resp, err := conn.DescribeOptionGroups(&opts) - if err != nil { - return fmt.Errorf("error describing DB Option Groups in Sweeper: %s", err) - } - - for _, og := range resp.OptionGroupsList { - var testOptGroup bool - for _, testName := range []string{"option-group-test-terraform-", "tf-test"} { - if strings.HasPrefix(*og.OptionGroupName, testName) { - testOptGroup = true - } - } - - if !testOptGroup { - continue - } - - deleteOpts := &rds.DeleteOptionGroupInput{ - OptionGroupName: og.OptionGroupName, - } - - ret := resource.Retry(1*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteOptionGroup(deleteOpts) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "InvalidOptionGroupStateFault" { - log.Printf("[DEBUG] AWS believes the RDS Option Group is still in use, retrying") - return resource.RetryableError(awsErr) - } - } - return resource.NonRetryableError(err) - } - return nil - }) - if ret != nil { - return fmt.Errorf("Error Deleting DB Option Group (%s) in Sweeper: %s", *og.OptionGroupName, ret) - } - } - - return nil -} - -func TestAccAWSDBOptionGroup_basic(t *testing.T) { - var v rds.OptionGroup - rName := fmt.Sprintf("option-group-test-terraform-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBOptionGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBOptionGroupBasicConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBOptionGroupExists("aws_db_option_group.bar", &v), - testAccCheckAWSDBOptionGroupAttributes(&v), - resource.TestCheckResourceAttr( - "aws_db_option_group.bar", "name", rName), - ), - }, - }, - }) -} - -func TestAccAWSDBOptionGroup_timeoutBlock(t *testing.T) { - var v rds.OptionGroup - rName := fmt.Sprintf("option-group-test-terraform-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBOptionGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBOptionGroupBasicConfigTimeoutBlock(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBOptionGroupExists("aws_db_option_group.bar", &v), - testAccCheckAWSDBOptionGroupAttributes(&v), - resource.TestCheckResourceAttr( - "aws_db_option_group.bar", "name", rName), - ), - }, - }, - }) -} - -func TestAccAWSDBOptionGroup_namePrefix(t *testing.T) { - var v rds.OptionGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBOptionGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBOptionGroup_namePrefix, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBOptionGroupExists("aws_db_option_group.test", &v), - testAccCheckAWSDBOptionGroupAttributes(&v), - resource.TestMatchResourceAttr( - "aws_db_option_group.test", "name", regexp.MustCompile("^tf-test-")), - ), - }, - }, - }) -} - -func TestAccAWSDBOptionGroup_generatedName(t *testing.T) { - var v rds.OptionGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBOptionGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBOptionGroup_generatedName, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBOptionGroupExists("aws_db_option_group.test", &v), - testAccCheckAWSDBOptionGroupAttributes(&v), - ), - }, - }, - }) -} - -func TestAccAWSDBOptionGroup_defaultDescription(t *testing.T) { - var v rds.OptionGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBOptionGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBOptionGroup_defaultDescription(acctest.RandInt()), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBOptionGroupExists("aws_db_option_group.test", &v), - resource.TestCheckResourceAttr( - "aws_db_option_group.test", "option_group_description", "Managed by Terraform"), - ), - }, - }, - }) -} - -func TestAccAWSDBOptionGroup_basicDestroyWithInstance(t *testing.T) { - rName := fmt.Sprintf("option-group-test-terraform-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBOptionGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBOptionGroupBasicDestroyConfig(rName), - }, - }, - }) -} - -func TestAccAWSDBOptionGroup_OptionSettings(t *testing.T) { - var v rds.OptionGroup - rName := fmt.Sprintf("option-group-test-terraform-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBOptionGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBOptionGroupOptionSettings(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBOptionGroupExists("aws_db_option_group.bar", &v), - resource.TestCheckResourceAttr( - "aws_db_option_group.bar", "name", rName), - resource.TestCheckResourceAttr( - "aws_db_option_group.bar", "option.#", "1"), - resource.TestCheckResourceAttr( - "aws_db_option_group.bar", "option.961211605.option_settings.129825347.value", "UTC"), - ), - }, - { - Config: testAccAWSDBOptionGroupOptionSettings_update(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBOptionGroupExists("aws_db_option_group.bar", &v), - resource.TestCheckResourceAttr( - "aws_db_option_group.bar", "name", rName), - resource.TestCheckResourceAttr( - "aws_db_option_group.bar", "option.#", "1"), - resource.TestCheckResourceAttr( - "aws_db_option_group.bar", "option.2422743510.option_settings.1350509764.value", "US/Pacific"), - ), - }, - }, - }) -} - -func TestAccAWSDBOptionGroup_sqlServerOptionsUpdate(t *testing.T) { - var v rds.OptionGroup - rName := fmt.Sprintf("option-group-test-terraform-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBOptionGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBOptionGroupSqlServerEEOptions(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBOptionGroupExists("aws_db_option_group.bar", &v), - resource.TestCheckResourceAttr( - "aws_db_option_group.bar", "name", rName), - ), - }, - - { - Config: testAccAWSDBOptionGroupSqlServerEEOptions_update(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBOptionGroupExists("aws_db_option_group.bar", &v), - resource.TestCheckResourceAttr( - "aws_db_option_group.bar", "name", rName), - resource.TestCheckResourceAttr( - "aws_db_option_group.bar", "option.#", "1"), - ), - }, - }, - }) -} - -func TestAccAWSDBOptionGroup_multipleOptions(t *testing.T) { - var v rds.OptionGroup - rName := fmt.Sprintf("option-group-test-terraform-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBOptionGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBOptionGroupMultipleOptions(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBOptionGroupExists("aws_db_option_group.bar", &v), - resource.TestCheckResourceAttr( - "aws_db_option_group.bar", "name", rName), - resource.TestCheckResourceAttr( - "aws_db_option_group.bar", "option.#", "2"), - ), - }, - }, - }) -} - -func testAccCheckAWSDBOptionGroupAttributes(v *rds.OptionGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if *v.EngineName != "mysql" { - return fmt.Errorf("bad engine_name: %#v", *v.EngineName) - } - - if *v.MajorEngineVersion != "5.6" { - return fmt.Errorf("bad major_engine_version: %#v", *v.MajorEngineVersion) - } - - if *v.OptionGroupDescription != "Test option group for terraform" { - return fmt.Errorf("bad option_group_description: %#v", *v.OptionGroupDescription) - } - - return nil - } -} - -func testAccCheckAWSDBOptionGroupExists(n string, v *rds.OptionGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No DB Option Group Name is set") - } - - conn := testAccProvider.Meta().(*AWSClient).rdsconn - - opts := rds.DescribeOptionGroupsInput{ - OptionGroupName: aws.String(rs.Primary.ID), - } - - resp, err := conn.DescribeOptionGroups(&opts) - - if err != nil { - return err - } - - if len(resp.OptionGroupsList) != 1 || - *resp.OptionGroupsList[0].OptionGroupName != rs.Primary.ID { - return fmt.Errorf("DB Option Group not found") - } - - *v = *resp.OptionGroupsList[0] - - return nil - } -} - -func testAccCheckAWSDBOptionGroupDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).rdsconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_db_option_group" { - continue - } - - resp, err := conn.DescribeOptionGroups( - &rds.DescribeOptionGroupsInput{ - OptionGroupName: aws.String(rs.Primary.ID), - }) - - if err == nil { - if len(resp.OptionGroupsList) != 0 && - *resp.OptionGroupsList[0].OptionGroupName == rs.Primary.ID { - return fmt.Errorf("DB Option Group still exists") - } - } - - // Verify the error - newerr, ok := err.(awserr.Error) - if !ok { - return err - } - if newerr.Code() != "OptionGroupNotFoundFault" { - return err - } - } - - return nil -} - -func testAccAWSDBOptionGroupBasicConfigTimeoutBlock(r string) string { - return fmt.Sprintf(` -resource "aws_db_option_group" "bar" { - name = "%s" - option_group_description = "Test option group for terraform" - engine_name = "mysql" - major_engine_version = "5.6" - - timeouts { - delete = "10m" - } -} -`, r) -} - -func testAccAWSDBOptionGroupBasicConfig(r string) string { - return fmt.Sprintf(` -resource "aws_db_option_group" "bar" { - name = "%s" - option_group_description = "Test option group for terraform" - engine_name = "mysql" - major_engine_version = "5.6" -} -`, r) -} - -func testAccAWSDBOptionGroupBasicDestroyConfig(r string) string { - return fmt.Sprintf(` -resource "aws_db_instance" "bar" { - allocated_storage = 10 - engine = "MySQL" - engine_version = "5.6.21" - instance_class = "db.t2.micro" - name = "baz" - password = "barbarbarbar" - username = "foo" - - - # Maintenance Window is stored in lower case in the API, though not strictly - # documented. Terraform will downcase this to match (as opposed to throw a - # validation error). - maintenance_window = "Fri:09:00-Fri:09:30" - - backup_retention_period = 0 - skip_final_snapshot = true - - option_group_name = "${aws_db_option_group.bar.name}" -} - -resource "aws_db_option_group" "bar" { - name = "%s" - option_group_description = "Test option group for terraform" - engine_name = "mysql" - major_engine_version = "5.6" -} -`, r) -} - -func testAccAWSDBOptionGroupOptionSettings(r string) string { - return fmt.Sprintf(` -resource "aws_db_option_group" "bar" { - name = "%s" - option_group_description = "Test option group for terraform" - engine_name = "oracle-ee" - major_engine_version = "11.2" - - option { - option_name = "Timezone" - option_settings { - name = "TIME_ZONE" - value = "UTC" - } - } -} -`, r) -} - -func testAccAWSDBOptionGroupOptionSettings_update(r string) string { - return fmt.Sprintf(` -resource "aws_db_option_group" "bar" { - name = "%s" - option_group_description = "Test option group for terraform" - engine_name = "oracle-ee" - major_engine_version = "11.2" - - option { - option_name = "Timezone" - option_settings { - name = "TIME_ZONE" - value = "US/Pacific" - } - } -} -`, r) -} - -func testAccAWSDBOptionGroupSqlServerEEOptions(r string) string { - return fmt.Sprintf(` -resource "aws_db_option_group" "bar" { - name = "%s" - option_group_description = "Test option group for terraform" - engine_name = "sqlserver-ee" - major_engine_version = "11.00" -} -`, r) -} - -func testAccAWSDBOptionGroupSqlServerEEOptions_update(r string) string { - return fmt.Sprintf(` -resource "aws_db_option_group" "bar" { - name = "%s" - option_group_description = "Test option group for terraform" - engine_name = "sqlserver-ee" - major_engine_version = "11.00" - - option { - option_name = "Mirroring" - } -} -`, r) -} - -func testAccAWSDBOptionGroupMultipleOptions(r string) string { - return fmt.Sprintf(` -resource "aws_db_option_group" "bar" { - name = "%s" - option_group_description = "Test option group for terraform" - engine_name = "oracle-se" - major_engine_version = "11.2" - - option { - option_name = "STATSPACK" - } - - option { - option_name = "XMLDB" - } -} -`, r) -} - -const testAccAWSDBOptionGroup_namePrefix = ` -resource "aws_db_option_group" "test" { - name_prefix = "tf-test-" - option_group_description = "Test option group for terraform" - engine_name = "mysql" - major_engine_version = "5.6" -} -` - -const testAccAWSDBOptionGroup_generatedName = ` -resource "aws_db_option_group" "test" { - option_group_description = "Test option group for terraform" - engine_name = "mysql" - major_engine_version = "5.6" -} -` - -func testAccAWSDBOptionGroup_defaultDescription(n int) string { - return fmt.Sprintf(` -resource "aws_db_option_group" "test" { - name = "tf-test-%d" - engine_name = "mysql" - major_engine_version = "5.6" -} -`, n) -} diff --git a/builtin/providers/aws/resource_aws_db_parameter_group.go b/builtin/providers/aws/resource_aws_db_parameter_group.go deleted file mode 100644 index fe935b636..000000000 --- a/builtin/providers/aws/resource_aws_db_parameter_group.go +++ /dev/null @@ -1,293 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "log" - "strings" - "time" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/rds" -) - -func resourceAwsDbParameterGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsDbParameterGroupCreate, - Read: resourceAwsDbParameterGroupRead, - Update: resourceAwsDbParameterGroupUpdate, - Delete: resourceAwsDbParameterGroupDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "arn": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"name_prefix"}, - ValidateFunc: validateDbParamGroupName, - }, - "name_prefix": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validateDbParamGroupNamePrefix, - }, - "family": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "Managed by Terraform", - }, - "parameter": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: false, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "value": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "apply_method": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "immediate", - }, - }, - }, - Set: resourceAwsDbParameterHash, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAwsDbParameterGroupCreate(d *schema.ResourceData, meta interface{}) error { - rdsconn := meta.(*AWSClient).rdsconn - tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) - - var groupName string - if v, ok := d.GetOk("name"); ok { - groupName = v.(string) - } else if v, ok := d.GetOk("name_prefix"); ok { - groupName = resource.PrefixedUniqueId(v.(string)) - } else { - groupName = resource.UniqueId() - } - d.Set("name", groupName) - - createOpts := rds.CreateDBParameterGroupInput{ - DBParameterGroupName: aws.String(groupName), - DBParameterGroupFamily: aws.String(d.Get("family").(string)), - Description: aws.String(d.Get("description").(string)), - Tags: tags, - } - - log.Printf("[DEBUG] Create DB Parameter Group: %#v", createOpts) - _, err := rdsconn.CreateDBParameterGroup(&createOpts) - if err != nil { - return fmt.Errorf("Error creating DB Parameter Group: %s", err) - } - - d.Partial(true) - d.SetPartial("name") - d.SetPartial("family") - d.SetPartial("description") - d.Partial(false) - - d.SetId(*createOpts.DBParameterGroupName) - log.Printf("[INFO] DB Parameter Group ID: %s", d.Id()) - - return resourceAwsDbParameterGroupUpdate(d, meta) -} - -func resourceAwsDbParameterGroupRead(d *schema.ResourceData, meta interface{}) error { - rdsconn := meta.(*AWSClient).rdsconn - - describeOpts := rds.DescribeDBParameterGroupsInput{ - DBParameterGroupName: aws.String(d.Id()), - } - - describeResp, err := rdsconn.DescribeDBParameterGroups(&describeOpts) - if err != nil { - return err - } - - if len(describeResp.DBParameterGroups) != 1 || - *describeResp.DBParameterGroups[0].DBParameterGroupName != d.Id() { - return fmt.Errorf("Unable to find Parameter Group: %#v", describeResp.DBParameterGroups) - } - - d.Set("name", describeResp.DBParameterGroups[0].DBParameterGroupName) - d.Set("family", describeResp.DBParameterGroups[0].DBParameterGroupFamily) - d.Set("description", describeResp.DBParameterGroups[0].Description) - - // Only include user customized parameters as there's hundreds of system/default ones - describeParametersOpts := rds.DescribeDBParametersInput{ - DBParameterGroupName: aws.String(d.Id()), - Source: aws.String("user"), - } - - describeParametersResp, err := rdsconn.DescribeDBParameters(&describeParametersOpts) - if err != nil { - return err - } - - d.Set("parameter", flattenParameters(describeParametersResp.Parameters)) - - paramGroup := describeResp.DBParameterGroups[0] - arn, err := buildRDSPGARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) - if err != nil { - name := "" - if paramGroup.DBParameterGroupName != nil && *paramGroup.DBParameterGroupName != "" { - name = *paramGroup.DBParameterGroupName - } - log.Printf("[DEBUG] Error building ARN for DB Parameter Group, not setting Tags for Param Group %s", name) - } else { - d.Set("arn", arn) - resp, err := rdsconn.ListTagsForResource(&rds.ListTagsForResourceInput{ - ResourceName: aws.String(arn), - }) - - if err != nil { - log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn) - } - - var dt []*rds.Tag - if len(resp.TagList) > 0 { - dt = resp.TagList - } - d.Set("tags", tagsToMapRDS(dt)) - } - - return nil -} - -func resourceAwsDbParameterGroupUpdate(d *schema.ResourceData, meta interface{}) error { - rdsconn := meta.(*AWSClient).rdsconn - - d.Partial(true) - - if d.HasChange("parameter") { - o, n := d.GetChange("parameter") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - - // Expand the "parameter" set to aws-sdk-go compat []rds.Parameter - parameters, err := expandParameters(ns.Difference(os).List()) - if err != nil { - return err - } - - if len(parameters) > 0 { - // We can only modify 20 parameters at a time, so walk them until - // we've got them all. - maxParams := 20 - for parameters != nil { - paramsToModify := make([]*rds.Parameter, 0) - if len(parameters) <= maxParams { - paramsToModify, parameters = parameters[:], nil - } else { - paramsToModify, parameters = parameters[:maxParams], parameters[maxParams:] - } - modifyOpts := rds.ModifyDBParameterGroupInput{ - DBParameterGroupName: aws.String(d.Get("name").(string)), - Parameters: paramsToModify, - } - - log.Printf("[DEBUG] Modify DB Parameter Group: %s", modifyOpts) - _, err = rdsconn.ModifyDBParameterGroup(&modifyOpts) - if err != nil { - return fmt.Errorf("Error modifying DB Parameter Group: %s", err) - } - } - d.SetPartial("parameter") - } - } - - if arn, err := buildRDSPGARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil { - if err := setTagsRDS(rdsconn, d, arn); err != nil { - return err - } else { - d.SetPartial("tags") - } - } - - d.Partial(false) - - return resourceAwsDbParameterGroupRead(d, meta) -} - -func resourceAwsDbParameterGroupDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn - return resource.Retry(3*time.Minute, func() *resource.RetryError { - deleteOpts := rds.DeleteDBParameterGroupInput{ - DBParameterGroupName: aws.String(d.Id()), - } - - _, err := conn.DeleteDBParameterGroup(&deleteOpts) - if err != nil { - awsErr, ok := err.(awserr.Error) - if ok && awsErr.Code() == "DBParameterGroupNotFoundFault" { - return resource.RetryableError(err) - } - if ok && awsErr.Code() == "InvalidDBParameterGroupState" { - return resource.RetryableError(err) - } - } - return resource.NonRetryableError(err) - }) -} - -func resourceAwsDbParameterHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - // Store the value as a lower case string, to match how we store them in flattenParameters - buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["value"].(string)))) - - return hashcode.String(buf.String()) -} - -func buildRDSPGARN(identifier, partition, accountid, region string) (string, error) { - if partition == "" { - return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS partition") - } - if accountid == "" { - return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS Account ID") - } - arn := fmt.Sprintf("arn:%s:rds:%s:%s:pg:%s", partition, region, accountid, identifier) - return arn, nil - -} diff --git a/builtin/providers/aws/resource_aws_db_parameter_group_test.go b/builtin/providers/aws/resource_aws_db_parameter_group_test.go deleted file mode 100644 index 1d330bfc7..000000000 --- a/builtin/providers/aws/resource_aws_db_parameter_group_test.go +++ /dev/null @@ -1,735 +0,0 @@ -package aws - -import ( - "fmt" - "math/rand" - "regexp" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSDBParameterGroup_limit(t *testing.T) { - var v rds.DBParameterGroup - - groupName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBParameterGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: createAwsDbParameterGroupsExceedDefaultAwsLimit(groupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBParameterGroupExists("aws_db_parameter_group.large", &v), - testAccCheckAWSDBParameterGroupAttributes(&v, groupName), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "name", groupName), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "family", "mysql5.6"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "description", "RDS default parameter group: Exceed default AWS parameter group limit of twenty"), - - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2421266705.name", "character_set_server"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2421266705.value", "utf8"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2478663599.name", "character_set_client"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2478663599.value", "utf8"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1680942586.name", "collation_server"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1680942586.value", "utf8_general_ci"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2450940716.name", "collation_connection"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2450940716.value", "utf8_general_ci"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.242489837.name", "join_buffer_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.242489837.value", "16777216"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2026669454.name", "key_buffer_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2026669454.value", "67108864"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2705275319.name", "max_connections"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2705275319.value", "3200"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3512697936.name", "max_heap_table_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3512697936.value", "67108864"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.780730667.name", "performance_schema"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.780730667.value", "1"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2020346918.name", "performance_schema_users_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2020346918.value", "1048576"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1460834103.name", "query_cache_limit"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1460834103.value", "2097152"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.484865451.name", "query_cache_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.484865451.value", "67108864"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.255276438.name", "sort_buffer_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.255276438.value", "16777216"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2981725119.name", "table_open_cache"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2981725119.value", "4096"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2703661820.name", "tmp_table_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2703661820.value", "67108864"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2386583229.name", "binlog_cache_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2386583229.value", "131072"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.4012389720.name", "innodb_flush_log_at_trx_commit"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.4012389720.value", "0"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2688783017.name", "innodb_open_files"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2688783017.value", "4000"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.782983977.name", "innodb_read_io_threads"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.782983977.value", "64"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2809980413.name", "innodb_thread_concurrency"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2809980413.value", "0"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3599115250.name", "innodb_write_io_threads"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3599115250.value", "64"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2557156277.name", "character_set_connection"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2557156277.value", "utf8"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2475346812.name", "character_set_database"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2475346812.value", "utf8"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1986528518.name", "character_set_filesystem"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1986528518.value", "utf8"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1708034931.name", "character_set_results"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1708034931.value", "utf8"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1937131004.name", "event_scheduler"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1937131004.value", "on"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3437079877.name", "innodb_buffer_pool_dump_at_shutdown"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3437079877.value", "1"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1092112861.name", "innodb_file_format"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1092112861.value", "barracuda"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.615571931.name", "innodb_io_capacity"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.615571931.value", "2000"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1065962799.name", "innodb_io_capacity_max"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1065962799.value", "3000"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1411161182.name", "innodb_lock_wait_timeout"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1411161182.value", "120"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3133315879.name", "innodb_max_dirty_pages_pct"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3133315879.value", "90"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.950177639.name", "log_bin_trust_function_creators"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.950177639.value", "1"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.591700516.name", "log_warnings"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.591700516.value", "2"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1918306725.name", "log_output"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1918306725.value", "file"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.386204433.name", "max_allowed_packet"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.386204433.value", "1073741824"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1700901269.name", "max_connect_errors"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1700901269.value", "100"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2839701698.name", "query_cache_min_res_unit"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2839701698.value", "512"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.427634017.name", "slow_query_log"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.427634017.value", "1"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.881816039.name", "sync_binlog"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.881816039.value", "0"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.748684209.name", "tx_isolation"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.748684209.value", "repeatable-read"), - ), - }, - resource.TestStep{ - Config: updateAwsDbParameterGroupsExceedDefaultAwsLimit(groupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBParameterGroupExists("aws_db_parameter_group.large", &v), - testAccCheckAWSDBParameterGroupAttributes(&v, groupName), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "name", groupName), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "family", "mysql5.6"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "description", "Updated RDS default parameter group: Exceed default AWS parameter group limit of twenty"), - - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2421266705.name", "character_set_server"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2421266705.value", "utf8"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2478663599.name", "character_set_client"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2478663599.value", "utf8"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1680942586.name", "collation_server"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1680942586.value", "utf8_general_ci"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2450940716.name", "collation_connection"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2450940716.value", "utf8_general_ci"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.242489837.name", "join_buffer_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.242489837.value", "16777216"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2026669454.name", "key_buffer_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2026669454.value", "67108864"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2705275319.name", "max_connections"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2705275319.value", "3200"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3512697936.name", "max_heap_table_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3512697936.value", "67108864"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.780730667.name", "performance_schema"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.780730667.value", "1"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2020346918.name", "performance_schema_users_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2020346918.value", "1048576"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1460834103.name", "query_cache_limit"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1460834103.value", "2097152"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.484865451.name", "query_cache_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.484865451.value", "67108864"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.255276438.name", "sort_buffer_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.255276438.value", "16777216"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2981725119.name", "table_open_cache"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2981725119.value", "4096"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2703661820.name", "tmp_table_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2703661820.value", "67108864"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2386583229.name", "binlog_cache_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2386583229.value", "131072"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.4012389720.name", "innodb_flush_log_at_trx_commit"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.4012389720.value", "0"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2688783017.name", "innodb_open_files"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2688783017.value", "4000"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.782983977.name", "innodb_read_io_threads"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.782983977.value", "64"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2809980413.name", "innodb_thread_concurrency"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2809980413.value", "0"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3599115250.name", "innodb_write_io_threads"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3599115250.value", "64"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2557156277.name", "character_set_connection"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2557156277.value", "utf8"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2475346812.name", "character_set_database"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2475346812.value", "utf8"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1986528518.name", "character_set_filesystem"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1986528518.value", "utf8"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1708034931.name", "character_set_results"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1708034931.value", "utf8"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1937131004.name", "event_scheduler"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1937131004.value", "on"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3437079877.name", "innodb_buffer_pool_dump_at_shutdown"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3437079877.value", "1"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1092112861.name", "innodb_file_format"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1092112861.value", "barracuda"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.615571931.name", "innodb_io_capacity"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.615571931.value", "2000"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1065962799.name", "innodb_io_capacity_max"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1065962799.value", "3000"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1411161182.name", "innodb_lock_wait_timeout"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1411161182.value", "120"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3133315879.name", "innodb_max_dirty_pages_pct"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3133315879.value", "90"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.950177639.name", "log_bin_trust_function_creators"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.950177639.value", "1"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.591700516.name", "log_warnings"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.591700516.value", "2"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1918306725.name", "log_output"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1918306725.value", "file"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.386204433.name", "max_allowed_packet"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.386204433.value", "1073741824"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1700901269.name", "max_connect_errors"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1700901269.value", "100"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2839701698.name", "query_cache_min_res_unit"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2839701698.value", "512"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.427634017.name", "slow_query_log"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.427634017.value", "1"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.881816039.name", "sync_binlog"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.881816039.value", "0"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.748684209.name", "tx_isolation"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.748684209.value", "repeatable-read"), - ), - }, - }, - }) -} - -func TestAccAWSDBParameterGroup_basic(t *testing.T) { - var v rds.DBParameterGroup - - groupName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBParameterGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSDBParameterGroupConfig(groupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBParameterGroupExists("aws_db_parameter_group.bar", &v), - testAccCheckAWSDBParameterGroupAttributes(&v, groupName), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "name", groupName), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "family", "mysql5.6"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "description", "Managed by Terraform"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.1708034931.name", "character_set_results"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.1708034931.value", "utf8"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2421266705.name", "character_set_server"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2421266705.value", "utf8"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2478663599.name", "character_set_client"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2478663599.value", "utf8"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "tags.%", "1"), - ), - }, - resource.TestStep{ - Config: testAccAWSDBParameterGroupAddParametersConfig(groupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBParameterGroupExists("aws_db_parameter_group.bar", &v), - testAccCheckAWSDBParameterGroupAttributes(&v, groupName), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "name", groupName), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "family", "mysql5.6"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "description", "Test parameter group for terraform"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.1706463059.name", "collation_connection"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.1706463059.value", "utf8_unicode_ci"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.1708034931.name", "character_set_results"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.1708034931.value", "utf8"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2421266705.name", "character_set_server"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2421266705.value", "utf8"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2475805061.name", "collation_server"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2475805061.value", "utf8_unicode_ci"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2478663599.name", "character_set_client"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2478663599.value", "utf8"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "tags.%", "2"), - ), - }, - }, - }) -} - -func TestAccAWSDBParameterGroup_namePrefix(t *testing.T) { - var v rds.DBParameterGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBParameterGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDBParameterGroupConfig_namePrefix, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBParameterGroupExists("aws_db_parameter_group.test", &v), - resource.TestMatchResourceAttr( - "aws_db_parameter_group.test", "name", regexp.MustCompile("^tf-test-")), - ), - }, - }, - }) -} - -func TestAccAWSDBParameterGroup_generatedName(t *testing.T) { - var v rds.DBParameterGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBParameterGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDBParameterGroupConfig_generatedName, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBParameterGroupExists("aws_db_parameter_group.test", &v), - ), - }, - }, - }) -} - -func TestAccAWSDBParameterGroup_withApplyMethod(t *testing.T) { - var v rds.DBParameterGroup - - groupName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBParameterGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSDBParameterGroupConfigWithApplyMethod(groupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBParameterGroupExists("aws_db_parameter_group.bar", &v), - testAccCheckAWSDBParameterGroupAttributes(&v, groupName), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "name", groupName), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "family", "mysql5.6"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "description", "Managed by Terraform"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2421266705.name", "character_set_server"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2421266705.value", "utf8"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2421266705.apply_method", "immediate"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2478663599.name", "character_set_client"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2478663599.value", "utf8"), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2478663599.apply_method", "pending-reboot"), - ), - }, - }, - }) -} - -func TestAccAWSDBParameterGroup_Only(t *testing.T) { - var v rds.DBParameterGroup - - groupName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBParameterGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSDBParameterGroupOnlyConfig(groupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBParameterGroupExists("aws_db_parameter_group.bar", &v), - testAccCheckAWSDBParameterGroupAttributes(&v, groupName), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "name", groupName), - resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "family", "mysql5.6"), - ), - }, - }, - }) -} - -func TestResourceAWSDBParameterGroupName_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "tEsting123", - ErrCount: 1, - }, - { - Value: "testing123!", - ErrCount: 1, - }, - { - Value: "1testing123", - ErrCount: 1, - }, - { - Value: "testing--123", - ErrCount: 1, - }, - { - Value: "testing123-", - ErrCount: 1, - }, - { - Value: randomString(256), - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateDbParamGroupName(tc.Value, "aws_db_parameter_group_name") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the DB Parameter Group Name to trigger a validation error") - } - } -} - -func testAccCheckAWSDBParameterGroupDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).rdsconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_db_parameter_group" { - continue - } - - // Try to find the Group - resp, err := conn.DescribeDBParameterGroups( - &rds.DescribeDBParameterGroupsInput{ - DBParameterGroupName: aws.String(rs.Primary.ID), - }) - - if err == nil { - if len(resp.DBParameterGroups) != 0 && - *resp.DBParameterGroups[0].DBParameterGroupName == rs.Primary.ID { - return fmt.Errorf("DB Parameter Group still exists") - } - } - - // Verify the error - newerr, ok := err.(awserr.Error) - if !ok { - return err - } - if newerr.Code() != "DBParameterGroupNotFound" { - return err - } - } - - return nil -} - -func testAccCheckAWSDBParameterGroupAttributes(v *rds.DBParameterGroup, name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if *v.DBParameterGroupName != name { - return fmt.Errorf("Bad Parameter Group name, expected (%s), got (%s)", name, *v.DBParameterGroupName) - } - - if *v.DBParameterGroupFamily != "mysql5.6" { - return fmt.Errorf("bad family: %#v", v.DBParameterGroupFamily) - } - - return nil - } -} - -func testAccCheckAWSDBParameterGroupExists(n string, v *rds.DBParameterGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No DB Parameter Group ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).rdsconn - - opts := rds.DescribeDBParameterGroupsInput{ - DBParameterGroupName: aws.String(rs.Primary.ID), - } - - resp, err := conn.DescribeDBParameterGroups(&opts) - - if err != nil { - return err - } - - if len(resp.DBParameterGroups) != 1 || - *resp.DBParameterGroups[0].DBParameterGroupName != rs.Primary.ID { - return fmt.Errorf("DB Parameter Group not found") - } - - *v = *resp.DBParameterGroups[0] - - return nil - } -} - -func randomString(strlen int) string { - rand.Seed(time.Now().UTC().UnixNano()) - const chars = "abcdefghijklmnopqrstuvwxyz" - result := make([]byte, strlen) - for i := 0; i < strlen; i++ { - result[i] = chars[rand.Intn(len(chars))] - } - return string(result) -} - -func testAccAWSDBParameterGroupConfig(n string) string { - return fmt.Sprintf(` -resource "aws_db_parameter_group" "bar" { - name = "%s" - family = "mysql5.6" - parameter { - name = "character_set_server" - value = "utf8" - } - parameter { - name = "character_set_client" - value = "utf8" - } - parameter{ - name = "character_set_results" - value = "utf8" - } - tags { - foo = "bar" - } -}`, n) -} - -func testAccAWSDBParameterGroupConfigWithApplyMethod(n string) string { - return fmt.Sprintf(` -resource "aws_db_parameter_group" "bar" { - name = "%s" - family = "mysql5.6" - parameter { - name = "character_set_server" - value = "utf8" - } - parameter { - name = "character_set_client" - value = "utf8" - apply_method = "pending-reboot" - } - tags { - foo = "bar" - } -}`, n) -} - -func testAccAWSDBParameterGroupAddParametersConfig(n string) string { - return fmt.Sprintf(` -resource "aws_db_parameter_group" "bar" { - name = "%s" - family = "mysql5.6" - description = "Test parameter group for terraform" - parameter { - name = "character_set_server" - value = "utf8" - } - parameter { - name = "character_set_client" - value = "utf8" - } - parameter{ - name = "character_set_results" - value = "utf8" - } - parameter { - name = "collation_server" - value = "utf8_unicode_ci" - } - parameter { - name = "collation_connection" - value = "utf8_unicode_ci" - } - tags { - foo = "bar" - baz = "foo" - } -}`, n) -} - -func testAccAWSDBParameterGroupOnlyConfig(n string) string { - return fmt.Sprintf(` -resource "aws_db_parameter_group" "bar" { - name = "%s" - family = "mysql5.6" - description = "Test parameter group for terraform" -}`, n) -} - -func createAwsDbParameterGroupsExceedDefaultAwsLimit(n string) string { - return fmt.Sprintf(` -resource "aws_db_parameter_group" "large" { - name = "%s" - family = "mysql5.6" - description = "RDS default parameter group: Exceed default AWS parameter group limit of twenty" - - parameter { name = "binlog_cache_size" value = 131072 } - parameter { name = "character_set_client" value = "utf8" } - parameter { name = "character_set_connection" value = "utf8" } - parameter { name = "character_set_database" value = "utf8" } - parameter { name = "character_set_filesystem" value = "utf8" } - parameter { name = "character_set_results" value = "utf8" } - parameter { name = "character_set_server" value = "utf8" } - parameter { name = "collation_connection" value = "utf8_general_ci" } - parameter { name = "collation_server" value = "utf8_general_ci" } - parameter { name = "event_scheduler" value = "ON" } - parameter { name = "innodb_buffer_pool_dump_at_shutdown" value = 1 } - parameter { name = "innodb_file_format" value = "Barracuda" } - parameter { name = "innodb_flush_log_at_trx_commit" value = 0 } - parameter { name = "innodb_io_capacity" value = 2000 } - parameter { name = "innodb_io_capacity_max" value = 3000 } - parameter { name = "innodb_lock_wait_timeout" value = 120 } - parameter { name = "innodb_max_dirty_pages_pct" value = 90 } - parameter { name = "innodb_open_files" value = 4000 apply_method = "pending-reboot" } - parameter { name = "innodb_read_io_threads" value = 64 apply_method = "pending-reboot" } - parameter { name = "innodb_thread_concurrency" value = 0 } - parameter { name = "innodb_write_io_threads" value = 64 apply_method = "pending-reboot" } - parameter { name = "join_buffer_size" value = 16777216 } - parameter { name = "key_buffer_size" value = 67108864 } - parameter { name = "log_bin_trust_function_creators" value = 1 } - parameter { name = "log_warnings" value = 2 } - parameter { name = "log_output" value = "FILE" } - parameter { name = "max_allowed_packet" value = 1073741824 } - parameter { name = "max_connect_errors" value = 100 } - parameter { name = "max_connections" value = 3200 } - parameter { name = "max_heap_table_size" value = 67108864 } - parameter { name = "performance_schema" value = 1 apply_method = "pending-reboot" } - parameter { name = "performance_schema_users_size" value = 1048576 apply_method = "pending-reboot" } - parameter { name = "query_cache_limit" value = 2097152 } - parameter { name = "query_cache_min_res_unit" value = 512 } - parameter { name = "query_cache_size" value = 67108864 } - parameter { name = "slow_query_log" value = 1 } - parameter { name = "sort_buffer_size" value = 16777216 } - parameter { name = "sync_binlog" value = 0 } - parameter { name = "table_open_cache" value = 4096 } - parameter { name = "tmp_table_size" value = 67108864 } - parameter { name = "tx_isolation" value = "REPEATABLE-READ" } -}`, n) -} - -func updateAwsDbParameterGroupsExceedDefaultAwsLimit(n string) string { - return fmt.Sprintf(` -resource "aws_db_parameter_group" "large" { - name = "%s" - family = "mysql5.6" - description = "Updated RDS default parameter group: Exceed default AWS parameter group limit of twenty" - parameter { name = "binlog_cache_size" value = 131072 } - parameter { name = "character_set_client" value = "utf8" } - parameter { name = "character_set_connection" value = "utf8" } - parameter { name = "character_set_database" value = "utf8" } - parameter { name = "character_set_filesystem" value = "utf8" } - parameter { name = "character_set_results" value = "utf8" } - parameter { name = "character_set_server" value = "utf8" } - parameter { name = "collation_connection" value = "utf8_general_ci" } - parameter { name = "collation_server" value = "utf8_general_ci" } - parameter { name = "event_scheduler" value = "ON" } - parameter { name = "innodb_buffer_pool_dump_at_shutdown" value = 1 } - parameter { name = "innodb_file_format" value = "Barracuda" } - parameter { name = "innodb_flush_log_at_trx_commit" value = 0 } - parameter { name = "innodb_io_capacity" value = 2000 } - parameter { name = "innodb_io_capacity_max" value = 3000 } - parameter { name = "innodb_lock_wait_timeout" value = 120 } - parameter { name = "innodb_max_dirty_pages_pct" value = 90 } - parameter { name = "innodb_open_files" value = 4000 apply_method = "pending-reboot" } - parameter { name = "innodb_read_io_threads" value = 64 apply_method = "pending-reboot" } - parameter { name = "innodb_thread_concurrency" value = 0 } - parameter { name = "innodb_write_io_threads" value = 64 apply_method = "pending-reboot" } - parameter { name = "join_buffer_size" value = 16777216 } - parameter { name = "key_buffer_size" value = 67108864 } - parameter { name = "log_bin_trust_function_creators" value = 1 } - parameter { name = "log_warnings" value = 2 } - parameter { name = "log_output" value = "FILE" } - parameter { name = "max_allowed_packet" value = 1073741824 } - parameter { name = "max_connect_errors" value = 100 } - parameter { name = "max_connections" value = 3200 } - parameter { name = "max_heap_table_size" value = 67108864 } - parameter { name = "performance_schema" value = 1 apply_method = "pending-reboot" } - parameter { name = "performance_schema_users_size" value = 1048576 apply_method = "pending-reboot" } - parameter { name = "query_cache_limit" value = 2097152 } - parameter { name = "query_cache_min_res_unit" value = 512 } - parameter { name = "query_cache_size" value = 67108864 } - parameter { name = "slow_query_log" value = 1 } - parameter { name = "sort_buffer_size" value = 16777216 } - parameter { name = "sync_binlog" value = 0 } - parameter { name = "table_open_cache" value = 4096 } - parameter { name = "tmp_table_size" value = 67108864 } - parameter { name = "tx_isolation" value = "REPEATABLE-READ" } -}`, n) -} - -const testAccDBParameterGroupConfig_namePrefix = ` -resource "aws_db_parameter_group" "test" { - name_prefix = "tf-test-" - family = "mysql5.6" - - parameter { - name = "sync_binlog" - value = 0 - } -} -` - -const testAccDBParameterGroupConfig_generatedName = ` -resource "aws_db_parameter_group" "test" { - family = "mysql5.6" - - parameter { - name = "sync_binlog" - value = 0 - } -} -` diff --git a/builtin/providers/aws/resource_aws_db_security_group.go b/builtin/providers/aws/resource_aws_db_security_group.go deleted file mode 100644 index b9e73f2fb..000000000 --- a/builtin/providers/aws/resource_aws_db_security_group.go +++ /dev/null @@ -1,434 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsDbSecurityGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsDbSecurityGroupCreate, - Read: resourceAwsDbSecurityGroupRead, - Update: resourceAwsDbSecurityGroupUpdate, - Delete: resourceAwsDbSecurityGroupDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "arn": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "Managed by Terraform", - }, - - "ingress": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cidr": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "security_group_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "security_group_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "security_group_owner_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - }, - Set: resourceAwsDbSecurityGroupIngressHash, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAwsDbSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn - tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) - - var err error - var errs []error - - opts := rds.CreateDBSecurityGroupInput{ - DBSecurityGroupName: aws.String(d.Get("name").(string)), - DBSecurityGroupDescription: aws.String(d.Get("description").(string)), - Tags: tags, - } - - log.Printf("[DEBUG] DB Security Group create configuration: %#v", opts) - _, err = conn.CreateDBSecurityGroup(&opts) - if err != nil { - return fmt.Errorf("Error creating DB Security Group: %s", err) - } - - d.SetId(d.Get("name").(string)) - - log.Printf("[INFO] DB Security Group ID: %s", d.Id()) - - sg, err := resourceAwsDbSecurityGroupRetrieve(d, meta) - if err != nil { - return err - } - - ingresses := d.Get("ingress").(*schema.Set) - for _, ing := range ingresses.List() { - err := resourceAwsDbSecurityGroupAuthorizeRule(ing, *sg.DBSecurityGroupName, conn) - if err != nil { - errs = append(errs, err) - } - } - - if len(errs) > 0 { - return &multierror.Error{Errors: errs} - } - - log.Println( - "[INFO] Waiting for Ingress Authorizations to be authorized") - - stateConf := &resource.StateChangeConf{ - Pending: []string{"authorizing"}, - Target: []string{"authorized"}, - Refresh: resourceAwsDbSecurityGroupStateRefreshFunc(d, meta), - Timeout: 10 * time.Minute, - } - - // Wait, catching any errors - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - return resourceAwsDbSecurityGroupRead(d, meta) -} - -func resourceAwsDbSecurityGroupRead(d *schema.ResourceData, meta interface{}) error { - sg, err := resourceAwsDbSecurityGroupRetrieve(d, meta) - if err != nil { - return err - } - - d.Set("name", *sg.DBSecurityGroupName) - d.Set("description", *sg.DBSecurityGroupDescription) - - // Create an empty schema.Set to hold all ingress rules - rules := &schema.Set{ - F: resourceAwsDbSecurityGroupIngressHash, - } - - for _, v := range sg.IPRanges { - rule := map[string]interface{}{"cidr": *v.CIDRIP} - rules.Add(rule) - } - - for _, g := range sg.EC2SecurityGroups { - rule := map[string]interface{}{} - if g.EC2SecurityGroupId != nil { - rule["security_group_id"] = *g.EC2SecurityGroupId - } - if g.EC2SecurityGroupName != nil { - rule["security_group_name"] = *g.EC2SecurityGroupName - } - if g.EC2SecurityGroupOwnerId != nil { - rule["security_group_owner_id"] = *g.EC2SecurityGroupOwnerId - } - rules.Add(rule) - } - - d.Set("ingress", rules) - - conn := meta.(*AWSClient).rdsconn - arn, err := buildRDSSecurityGroupARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) - if err != nil { - name := "" - if sg.DBSecurityGroupName != nil && *sg.DBSecurityGroupName != "" { - name = *sg.DBSecurityGroupName - } - log.Printf("[DEBUG] Error building ARN for DB Security Group, not setting Tags for DB Security Group %s", name) - } else { - d.Set("arn", arn) - resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{ - ResourceName: aws.String(arn), - }) - - if err != nil { - log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn) - } - - var dt []*rds.Tag - if len(resp.TagList) > 0 { - dt = resp.TagList - } - d.Set("tags", tagsToMapRDS(dt)) - } - - return nil -} - -func resourceAwsDbSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn - - d.Partial(true) - if arn, err := buildRDSSecurityGroupARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil { - if err := setTagsRDS(conn, d, arn); err != nil { - return err - } else { - d.SetPartial("tags") - } - } - - if d.HasChange("ingress") { - sg, err := resourceAwsDbSecurityGroupRetrieve(d, meta) - if err != nil { - return err - } - - oi, ni := d.GetChange("ingress") - if oi == nil { - oi = new(schema.Set) - } - if ni == nil { - ni = new(schema.Set) - } - - ois := oi.(*schema.Set) - nis := ni.(*schema.Set) - removeIngress := ois.Difference(nis).List() - newIngress := nis.Difference(ois).List() - - // DELETE old Ingress rules - for _, ing := range removeIngress { - err := resourceAwsDbSecurityGroupRevokeRule(ing, *sg.DBSecurityGroupName, conn) - if err != nil { - return err - } - } - - // ADD new/updated Ingress rules - for _, ing := range newIngress { - err := resourceAwsDbSecurityGroupAuthorizeRule(ing, *sg.DBSecurityGroupName, conn) - if err != nil { - return err - } - } - } - d.Partial(false) - - return resourceAwsDbSecurityGroupRead(d, meta) -} - -func resourceAwsDbSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn - - log.Printf("[DEBUG] DB Security Group destroy: %v", d.Id()) - - opts := rds.DeleteDBSecurityGroupInput{DBSecurityGroupName: aws.String(d.Id())} - - log.Printf("[DEBUG] DB Security Group destroy configuration: %v", opts) - _, err := conn.DeleteDBSecurityGroup(&opts) - - if err != nil { - newerr, ok := err.(awserr.Error) - if ok && newerr.Code() == "InvalidDBSecurityGroup.NotFound" { - return nil - } - return err - } - - return nil -} - -func resourceAwsDbSecurityGroupRetrieve(d *schema.ResourceData, meta interface{}) (*rds.DBSecurityGroup, error) { - conn := meta.(*AWSClient).rdsconn - - opts := rds.DescribeDBSecurityGroupsInput{ - DBSecurityGroupName: aws.String(d.Id()), - } - - log.Printf("[DEBUG] DB Security Group describe configuration: %#v", opts) - - resp, err := conn.DescribeDBSecurityGroups(&opts) - - if err != nil { - return nil, fmt.Errorf("Error retrieving DB Security Groups: %s", err) - } - - if len(resp.DBSecurityGroups) != 1 || - *resp.DBSecurityGroups[0].DBSecurityGroupName != d.Id() { - return nil, fmt.Errorf("Unable to find DB Security Group: %#v", resp.DBSecurityGroups) - } - - return resp.DBSecurityGroups[0], nil -} - -// Authorizes the ingress rule on the db security group -func resourceAwsDbSecurityGroupAuthorizeRule(ingress interface{}, dbSecurityGroupName string, conn *rds.RDS) error { - ing := ingress.(map[string]interface{}) - - opts := rds.AuthorizeDBSecurityGroupIngressInput{ - DBSecurityGroupName: aws.String(dbSecurityGroupName), - } - - if attr, ok := ing["cidr"]; ok && attr != "" { - opts.CIDRIP = aws.String(attr.(string)) - } - - if attr, ok := ing["security_group_name"]; ok && attr != "" { - opts.EC2SecurityGroupName = aws.String(attr.(string)) - } - - if attr, ok := ing["security_group_id"]; ok && attr != "" { - opts.EC2SecurityGroupId = aws.String(attr.(string)) - } - - if attr, ok := ing["security_group_owner_id"]; ok && attr != "" { - opts.EC2SecurityGroupOwnerId = aws.String(attr.(string)) - } - - log.Printf("[DEBUG] Authorize ingress rule configuration: %#v", opts) - - _, err := conn.AuthorizeDBSecurityGroupIngress(&opts) - - if err != nil { - return fmt.Errorf("Error authorizing security group ingress: %s", err) - } - - return nil -} - -// Revokes the ingress rule on the db security group -func resourceAwsDbSecurityGroupRevokeRule(ingress interface{}, dbSecurityGroupName string, conn *rds.RDS) error { - ing := ingress.(map[string]interface{}) - - opts := rds.RevokeDBSecurityGroupIngressInput{ - DBSecurityGroupName: aws.String(dbSecurityGroupName), - } - - if attr, ok := ing["cidr"]; ok && attr != "" { - opts.CIDRIP = aws.String(attr.(string)) - } - - if attr, ok := ing["security_group_name"]; ok && attr != "" { - opts.EC2SecurityGroupName = aws.String(attr.(string)) - } - - if attr, ok := ing["security_group_id"]; ok && attr != "" { - opts.EC2SecurityGroupId = aws.String(attr.(string)) - } - - if attr, ok := ing["security_group_owner_id"]; ok && attr != "" { - opts.EC2SecurityGroupOwnerId = aws.String(attr.(string)) - } - - log.Printf("[DEBUG] Revoking ingress rule configuration: %#v", opts) - - _, err := conn.RevokeDBSecurityGroupIngress(&opts) - - if err != nil { - return fmt.Errorf("Error revoking security group ingress: %s", err) - } - - return nil -} - -func resourceAwsDbSecurityGroupIngressHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - if v, ok := m["cidr"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - - if v, ok := m["security_group_name"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - - if v, ok := m["security_group_id"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - - if v, ok := m["security_group_owner_id"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - - return hashcode.String(buf.String()) -} - -func resourceAwsDbSecurityGroupStateRefreshFunc( - d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - v, err := resourceAwsDbSecurityGroupRetrieve(d, meta) - - if err != nil { - log.Printf("Error on retrieving DB Security Group when waiting: %s", err) - return nil, "", err - } - - statuses := make([]string, 0, len(v.EC2SecurityGroups)+len(v.IPRanges)) - for _, ec2g := range v.EC2SecurityGroups { - statuses = append(statuses, *ec2g.Status) - } - for _, ips := range v.IPRanges { - statuses = append(statuses, *ips.Status) - } - - for _, stat := range statuses { - // Not done - if stat != "authorized" { - return nil, "authorizing", nil - } - } - - return v, "authorized", nil - } -} - -func buildRDSSecurityGroupARN(identifier, partition, accountid, region string) (string, error) { - if partition == "" { - return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS partition") - } - if accountid == "" { - return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS Account ID") - } - arn := fmt.Sprintf("arn:%s:rds:%s:%s:secgrp:%s", partition, region, accountid, identifier) - return arn, nil - -} diff --git a/builtin/providers/aws/resource_aws_db_security_group_test.go b/builtin/providers/aws/resource_aws_db_security_group_test.go deleted file mode 100644 index cfafad50c..000000000 --- a/builtin/providers/aws/resource_aws_db_security_group_test.go +++ /dev/null @@ -1,154 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSDBSecurityGroup_basic(t *testing.T) { - var v rds.DBSecurityGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBSecurityGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSDBSecurityGroupConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBSecurityGroupExists("aws_db_security_group.bar", &v), - testAccCheckAWSDBSecurityGroupAttributes(&v), - resource.TestCheckResourceAttr( - "aws_db_security_group.bar", "name", "secgroup-terraform"), - resource.TestCheckResourceAttr( - "aws_db_security_group.bar", "description", "Managed by Terraform"), - resource.TestCheckResourceAttr( - "aws_db_security_group.bar", "ingress.3363517775.cidr", "10.0.0.1/24"), - resource.TestCheckResourceAttr( - "aws_db_security_group.bar", "ingress.#", "1"), - resource.TestCheckResourceAttr( - "aws_db_security_group.bar", "tags.%", "1"), - ), - }, - }, - }) -} - -func testAccCheckAWSDBSecurityGroupDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).rdsconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_db_security_group" { - continue - } - - // Try to find the Group - resp, err := conn.DescribeDBSecurityGroups( - &rds.DescribeDBSecurityGroupsInput{ - DBSecurityGroupName: aws.String(rs.Primary.ID), - }) - - if err == nil { - if len(resp.DBSecurityGroups) != 0 && - *resp.DBSecurityGroups[0].DBSecurityGroupName == rs.Primary.ID { - return fmt.Errorf("DB Security Group still exists") - } - } - - // Verify the error - newerr, ok := err.(awserr.Error) - if !ok { - return err - } - if newerr.Code() != "DBSecurityGroupNotFound" { - return err - } - } - - return nil -} - -func testAccCheckAWSDBSecurityGroupAttributes(group *rds.DBSecurityGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - if len(group.IPRanges) == 0 { - return fmt.Errorf("no cidr: %#v", group.IPRanges) - } - - if *group.IPRanges[0].CIDRIP != "10.0.0.1/24" { - return fmt.Errorf("bad cidr: %#v", group.IPRanges) - } - - statuses := make([]string, 0, len(group.IPRanges)) - for _, ips := range group.IPRanges { - statuses = append(statuses, *ips.Status) - } - - if statuses[0] != "authorized" { - return fmt.Errorf("bad status: %#v", statuses) - } - - if *group.DBSecurityGroupName != "secgroup-terraform" { - return fmt.Errorf("bad name: %#v", *group.DBSecurityGroupName) - } - - return nil - } -} - -func testAccCheckAWSDBSecurityGroupExists(n string, v *rds.DBSecurityGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No DB Security Group ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).rdsconn - - opts := rds.DescribeDBSecurityGroupsInput{ - DBSecurityGroupName: aws.String(rs.Primary.ID), - } - - resp, err := conn.DescribeDBSecurityGroups(&opts) - - if err != nil { - return err - } - - if len(resp.DBSecurityGroups) != 1 || - *resp.DBSecurityGroups[0].DBSecurityGroupName != rs.Primary.ID { - return fmt.Errorf("DB Security Group not found") - } - - *v = *resp.DBSecurityGroups[0] - - return nil - } -} - -const testAccAWSDBSecurityGroupConfig = ` -provider "aws" { - region = "us-east-1" -} - -resource "aws_db_security_group" "bar" { - name = "secgroup-terraform" - - ingress { - cidr = "10.0.0.1/24" - } - - tags { - foo = "bar" - } -} -` diff --git a/builtin/providers/aws/resource_aws_db_snapshot.go b/builtin/providers/aws/resource_aws_db_snapshot.go deleted file mode 100644 index f2ab24c4a..000000000 --- a/builtin/providers/aws/resource_aws_db_snapshot.go +++ /dev/null @@ -1,216 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsDbSnapshot() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsDbSnapshotCreate, - Read: resourceAwsDbSnapshotRead, - Delete: resourceAwsDbSnapshotDelete, - - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "db_snapshot_identifier": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "db_instance_identifier": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "allocated_storage": { - Type: schema.TypeInt, - Computed: true, - }, - "availability_zone": { - Type: schema.TypeString, - Computed: true, - }, - "db_snapshot_arn": { - Type: schema.TypeString, - Computed: true, - }, - "encrypted": { - Type: schema.TypeBool, - Computed: true, - }, - "engine": { - Type: schema.TypeString, - Computed: true, - }, - "engine_version": { - Type: schema.TypeString, - Computed: true, - }, - "iops": { - Type: schema.TypeInt, - Computed: true, - }, - "kms_key_id": { - Type: schema.TypeString, - Computed: true, - }, - "license_model": { - Type: schema.TypeString, - Computed: true, - }, - "option_group_name": { - Type: schema.TypeString, - Computed: true, - }, - "port": { - Type: schema.TypeInt, - Computed: true, - }, - "source_db_snapshot_identifier": { - Type: schema.TypeString, - Computed: true, - }, - "source_region": { - Type: schema.TypeString, - Computed: true, - }, - "snapshot_type": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "storage_type": { - Type: schema.TypeString, - Computed: true, - }, - "vpc_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceAwsDbSnapshotCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn - - params := &rds.CreateDBSnapshotInput{ - DBInstanceIdentifier: aws.String(d.Get("db_instance_identifier").(string)), - DBSnapshotIdentifier: aws.String(d.Get("db_snapshot_identifier").(string)), - } - - _, err := conn.CreateDBSnapshot(params) - if err != nil { - return err - } - d.SetId(d.Get("db_snapshot_identifier").(string)) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating"}, - Target: []string{"available"}, - Refresh: resourceAwsDbSnapshotStateRefreshFunc(d, meta), - Timeout: d.Timeout(schema.TimeoutRead), - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting - } - - // Wait, catching any errors - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - return resourceAwsDbSnapshotRead(d, meta) -} - -func resourceAwsDbSnapshotRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn - - params := &rds.DescribeDBSnapshotsInput{ - DBSnapshotIdentifier: aws.String(d.Id()), - } - resp, err := conn.DescribeDBSnapshots(params) - if err != nil { - return err - } - - snapshot := resp.DBSnapshots[0] - - d.Set("allocated_storage", snapshot.AllocatedStorage) - d.Set("availability_zone", snapshot.AvailabilityZone) - d.Set("db_snapshot_arn", snapshot.DBSnapshotArn) - d.Set("encrypted", snapshot.Encrypted) - d.Set("engine", snapshot.Engine) - d.Set("engine_version", snapshot.EngineVersion) - d.Set("iops", snapshot.Iops) - d.Set("kms_key_id", snapshot.KmsKeyId) - d.Set("license_model", snapshot.LicenseModel) - d.Set("option_group_name", snapshot.OptionGroupName) - d.Set("port", snapshot.Port) - d.Set("source_db_snapshot_identifier", snapshot.SourceDBSnapshotIdentifier) - d.Set("source_region", snapshot.SourceRegion) - d.Set("snapshot_type", snapshot.SnapshotType) - d.Set("status", snapshot.Status) - d.Set("vpc_id", snapshot.VpcId) - - return nil -} - -func resourceAwsDbSnapshotDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn - - params := &rds.DeleteDBSnapshotInput{ - DBSnapshotIdentifier: aws.String(d.Id()), - } - _, err := conn.DeleteDBSnapshot(params) - if err != nil { - return err - } - - return nil -} - -func resourceAwsDbSnapshotStateRefreshFunc( - d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - conn := meta.(*AWSClient).rdsconn - - opts := &rds.DescribeDBSnapshotsInput{ - DBSnapshotIdentifier: aws.String(d.Id()), - } - - log.Printf("[DEBUG] DB Snapshot describe configuration: %#v", opts) - - resp, err := conn.DescribeDBSnapshots(opts) - if err != nil { - snapshoterr, ok := err.(awserr.Error) - if ok && snapshoterr.Code() == "DBSnapshotNotFound" { - return nil, "", nil - } - return nil, "", fmt.Errorf("Error retrieving DB Snapshots: %s", err) - } - - if len(resp.DBSnapshots) != 1 { - return nil, "", fmt.Errorf("No snapshots returned for %s", d.Id()) - } - - snapshot := resp.DBSnapshots[0] - - return resp, *snapshot.Status, nil - } -} diff --git a/builtin/providers/aws/resource_aws_db_snapshot_test.go b/builtin/providers/aws/resource_aws_db_snapshot_test.go deleted file mode 100644 index 2b33a2389..000000000 --- a/builtin/providers/aws/resource_aws_db_snapshot_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSDBSnapshot_basic(t *testing.T) { - var v rds.DBSnapshot - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccAwsDbSnapshotConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckDbSnapshotExists("aws_db_snapshot.test", &v), - ), - }, - }, - }) -} - -func testAccCheckDbSnapshotExists(n string, v *rds.DBSnapshot) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).rdsconn - - request := &rds.DescribeDBSnapshotsInput{ - DBSnapshotIdentifier: aws.String(rs.Primary.ID), - } - - response, err := conn.DescribeDBSnapshots(request) - if err == nil { - if response.DBSnapshots != nil && len(response.DBSnapshots) > 0 { - *v = *response.DBSnapshots[0] - return nil - } - } - return fmt.Errorf("Error finding RDS DB Snapshot %s", rs.Primary.ID) - } -} - -func testAccAwsDbSnapshotConfig(rInt int) string { - return fmt.Sprintf(` -resource "aws_db_instance" "bar" { - allocated_storage = 10 - engine = "MySQL" - engine_version = "5.6.21" - instance_class = "db.t1.micro" - name = "baz" - password = "barbarbarbar" - username = "foo" - - maintenance_window = "Fri:09:00-Fri:09:30" - - backup_retention_period = 0 - - parameter_group_name = "default.mysql5.6" - - skip_final_snapshot = true -} - -resource "aws_db_snapshot" "test" { - db_instance_identifier = "${aws_db_instance.bar.id}" - db_snapshot_identifier = "testsnapshot%d" -}`, rInt) -} diff --git a/builtin/providers/aws/resource_aws_db_subnet_group.go b/builtin/providers/aws/resource_aws_db_subnet_group.go deleted file mode 100644 index c4e437bee..000000000 --- a/builtin/providers/aws/resource_aws_db_subnet_group.go +++ /dev/null @@ -1,257 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsDbSubnetGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsDbSubnetGroupCreate, - Read: resourceAwsDbSubnetGroupRead, - Update: resourceAwsDbSubnetGroupUpdate, - Delete: resourceAwsDbSubnetGroupDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "arn": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"name_prefix"}, - ValidateFunc: validateDbSubnetGroupName, - }, - "name_prefix": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validateDbSubnetGroupNamePrefix, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "Managed by Terraform", - }, - - "subnet_ids": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAwsDbSubnetGroupCreate(d *schema.ResourceData, meta interface{}) error { - rdsconn := meta.(*AWSClient).rdsconn - tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) - - subnetIdsSet := d.Get("subnet_ids").(*schema.Set) - subnetIds := make([]*string, subnetIdsSet.Len()) - for i, subnetId := range subnetIdsSet.List() { - subnetIds[i] = aws.String(subnetId.(string)) - } - - var groupName string - if v, ok := d.GetOk("name"); ok { - groupName = v.(string) - } else if v, ok := d.GetOk("name_prefix"); ok { - groupName = resource.PrefixedUniqueId(v.(string)) - } else { - groupName = resource.UniqueId() - } - - createOpts := rds.CreateDBSubnetGroupInput{ - DBSubnetGroupName: aws.String(groupName), - DBSubnetGroupDescription: aws.String(d.Get("description").(string)), - SubnetIds: subnetIds, - Tags: tags, - } - - log.Printf("[DEBUG] Create DB Subnet Group: %#v", createOpts) - _, err := rdsconn.CreateDBSubnetGroup(&createOpts) - if err != nil { - return fmt.Errorf("Error creating DB Subnet Group: %s", err) - } - - d.SetId(*createOpts.DBSubnetGroupName) - log.Printf("[INFO] DB Subnet Group ID: %s", d.Id()) - return resourceAwsDbSubnetGroupRead(d, meta) -} - -func resourceAwsDbSubnetGroupRead(d *schema.ResourceData, meta interface{}) error { - rdsconn := meta.(*AWSClient).rdsconn - - describeOpts := rds.DescribeDBSubnetGroupsInput{ - DBSubnetGroupName: aws.String(d.Id()), - } - - describeResp, err := rdsconn.DescribeDBSubnetGroups(&describeOpts) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "DBSubnetGroupNotFoundFault" { - // Update state to indicate the db subnet no longer exists. - d.SetId("") - return nil - } - return err - } - - if len(describeResp.DBSubnetGroups) == 0 { - return fmt.Errorf("Unable to find DB Subnet Group: %#v", describeResp.DBSubnetGroups) - } - - var subnetGroup *rds.DBSubnetGroup - for _, s := range describeResp.DBSubnetGroups { - // AWS is down casing the name provided, so we compare lower case versions - // of the names. We lower case both our name and their name in the check, - // incase they change that someday. - if strings.ToLower(d.Id()) == strings.ToLower(*s.DBSubnetGroupName) { - subnetGroup = describeResp.DBSubnetGroups[0] - } - } - - if subnetGroup.DBSubnetGroupName == nil { - return fmt.Errorf("Unable to find DB Subnet Group: %#v", describeResp.DBSubnetGroups) - } - - d.Set("name", subnetGroup.DBSubnetGroupName) - d.Set("description", subnetGroup.DBSubnetGroupDescription) - - subnets := make([]string, 0, len(subnetGroup.Subnets)) - for _, s := range subnetGroup.Subnets { - subnets = append(subnets, *s.SubnetIdentifier) - } - d.Set("subnet_ids", subnets) - - // list tags for resource - // set tags - conn := meta.(*AWSClient).rdsconn - arn, err := buildRDSsubgrpARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) - if err != nil { - log.Printf("[DEBUG] Error building ARN for DB Subnet Group, not setting Tags for group %s", *subnetGroup.DBSubnetGroupName) - } else { - d.Set("arn", arn) - resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{ - ResourceName: aws.String(arn), - }) - - if err != nil { - log.Printf("[DEBUG] Error retreiving tags for ARN: %s", arn) - } - - var dt []*rds.Tag - if len(resp.TagList) > 0 { - dt = resp.TagList - } - d.Set("tags", tagsToMapRDS(dt)) - } - - return nil -} - -func resourceAwsDbSubnetGroupUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn - if d.HasChange("subnet_ids") || d.HasChange("description") { - _, n := d.GetChange("subnet_ids") - if n == nil { - n = new(schema.Set) - } - ns := n.(*schema.Set) - - var sIds []*string - for _, s := range ns.List() { - sIds = append(sIds, aws.String(s.(string))) - } - - _, err := conn.ModifyDBSubnetGroup(&rds.ModifyDBSubnetGroupInput{ - DBSubnetGroupName: aws.String(d.Id()), - DBSubnetGroupDescription: aws.String(d.Get("description").(string)), - SubnetIds: sIds, - }) - - if err != nil { - return err - } - } - - if arn, err := buildRDSsubgrpARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil { - if err := setTagsRDS(conn, d, arn); err != nil { - return err - } else { - d.SetPartial("tags") - } - } - - return resourceAwsDbSubnetGroupRead(d, meta) -} - -func resourceAwsDbSubnetGroupDelete(d *schema.ResourceData, meta interface{}) error { - stateConf := &resource.StateChangeConf{ - Pending: []string{"pending"}, - Target: []string{"destroyed"}, - Refresh: resourceAwsDbSubnetGroupDeleteRefreshFunc(d, meta), - Timeout: 3 * time.Minute, - MinTimeout: 1 * time.Second, - } - _, err := stateConf.WaitForState() - return err -} - -func resourceAwsDbSubnetGroupDeleteRefreshFunc( - d *schema.ResourceData, - meta interface{}) resource.StateRefreshFunc { - rdsconn := meta.(*AWSClient).rdsconn - - return func() (interface{}, string, error) { - - deleteOpts := rds.DeleteDBSubnetGroupInput{ - DBSubnetGroupName: aws.String(d.Id()), - } - - if _, err := rdsconn.DeleteDBSubnetGroup(&deleteOpts); err != nil { - rdserr, ok := err.(awserr.Error) - if !ok { - return d, "error", err - } - - if rdserr.Code() != "DBSubnetGroupNotFoundFault" { - return d, "error", err - } - } - - return d, "destroyed", nil - } -} - -func buildRDSsubgrpARN(identifier, partition, accountid, region string) (string, error) { - if partition == "" { - return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS partition") - } - if accountid == "" { - return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS Account ID") - } - arn := fmt.Sprintf("arn:%s:rds:%s:%s:subgrp:%s", partition, region, accountid, identifier) - return arn, nil - -} diff --git a/builtin/providers/aws/resource_aws_db_subnet_group_test.go b/builtin/providers/aws/resource_aws_db_subnet_group_test.go deleted file mode 100644 index 0fe175038..000000000 --- a/builtin/providers/aws/resource_aws_db_subnet_group_test.go +++ /dev/null @@ -1,367 +0,0 @@ -package aws - -import ( - "fmt" - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/rds" -) - -func TestAccAWSDBSubnetGroup_basic(t *testing.T) { - var v rds.DBSubnetGroup - - testCheck := func(*terraform.State) error { - return nil - } - - rName := fmt.Sprintf("tf-test-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDBSubnetGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDBSubnetGroupConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDBSubnetGroupExists( - "aws_db_subnet_group.foo", &v), - resource.TestCheckResourceAttr( - "aws_db_subnet_group.foo", "name", rName), - resource.TestCheckResourceAttr( - "aws_db_subnet_group.foo", "description", "Managed by Terraform"), - testCheck, - ), - }, - }, - }) -} - -func TestAccAWSDBSubnetGroup_namePrefix(t *testing.T) { - var v rds.DBSubnetGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDBSubnetGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDBSubnetGroupConfig_namePrefix, - Check: resource.ComposeTestCheckFunc( - testAccCheckDBSubnetGroupExists( - "aws_db_subnet_group.test", &v), - resource.TestMatchResourceAttr( - "aws_db_subnet_group.test", "name", regexp.MustCompile("^tf_test-")), - ), - }, - }, - }) -} - -func TestAccAWSDBSubnetGroup_generatedName(t *testing.T) { - var v rds.DBSubnetGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDBSubnetGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDBSubnetGroupConfig_generatedName, - Check: resource.ComposeTestCheckFunc( - testAccCheckDBSubnetGroupExists( - "aws_db_subnet_group.test", &v), - ), - }, - }, - }) -} - -// Regression test for https://github.com/hashicorp/terraform/issues/2603 and -// https://github.com/hashicorp/terraform/issues/2664 -func TestAccAWSDBSubnetGroup_withUndocumentedCharacters(t *testing.T) { - var v rds.DBSubnetGroup - - testCheck := func(*terraform.State) error { - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDBSubnetGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDBSubnetGroupConfig_withUnderscoresAndPeriodsAndSpaces, - Check: resource.ComposeTestCheckFunc( - testAccCheckDBSubnetGroupExists( - "aws_db_subnet_group.underscores", &v), - testAccCheckDBSubnetGroupExists( - "aws_db_subnet_group.periods", &v), - testAccCheckDBSubnetGroupExists( - "aws_db_subnet_group.spaces", &v), - testCheck, - ), - }, - }, - }) -} - -func TestAccAWSDBSubnetGroup_updateDescription(t *testing.T) { - var v rds.DBSubnetGroup - - rName := fmt.Sprintf("tf-test-%d", acctest.RandInt()) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDBSubnetGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDBSubnetGroupConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDBSubnetGroupExists( - "aws_db_subnet_group.foo", &v), - resource.TestCheckResourceAttr( - "aws_db_subnet_group.foo", "description", "Managed by Terraform"), - ), - }, - - resource.TestStep{ - Config: testAccDBSubnetGroupConfig_updatedDescription(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDBSubnetGroupExists( - "aws_db_subnet_group.foo", &v), - resource.TestCheckResourceAttr( - "aws_db_subnet_group.foo", "description", "foo description updated"), - ), - }, - }, - }) -} - -func testAccCheckDBSubnetGroupDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).rdsconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_db_subnet_group" { - continue - } - - // Try to find the resource - resp, err := conn.DescribeDBSubnetGroups( - &rds.DescribeDBSubnetGroupsInput{DBSubnetGroupName: aws.String(rs.Primary.ID)}) - if err == nil { - if len(resp.DBSubnetGroups) > 0 { - return fmt.Errorf("still exist.") - } - - return nil - } - - // Verify the error is what we want - rdserr, ok := err.(awserr.Error) - if !ok { - return err - } - if rdserr.Code() != "DBSubnetGroupNotFoundFault" { - return err - } - } - - return nil -} - -func testAccCheckDBSubnetGroupExists(n string, v *rds.DBSubnetGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).rdsconn - resp, err := conn.DescribeDBSubnetGroups( - &rds.DescribeDBSubnetGroupsInput{DBSubnetGroupName: aws.String(rs.Primary.ID)}) - if err != nil { - return err - } - if len(resp.DBSubnetGroups) == 0 { - return fmt.Errorf("DbSubnetGroup not found") - } - - *v = *resp.DBSubnetGroups[0] - - return nil - } -} - -func testAccDBSubnetGroupConfig(rName string) string { - return fmt.Sprintf(` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccDBSubnetGroupConfig" - } -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - availability_zone = "us-west-2a" - vpc_id = "${aws_vpc.foo.id}" - tags { - Name = "tf-dbsubnet-test-1" - } -} - -resource "aws_subnet" "bar" { - cidr_block = "10.1.2.0/24" - availability_zone = "us-west-2b" - vpc_id = "${aws_vpc.foo.id}" - tags { - Name = "tf-dbsubnet-test-2" - } -} - -resource "aws_db_subnet_group" "foo" { - name = "%s" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] - tags { - Name = "tf-dbsubnet-group-test" - } -}`, rName) -} - -func testAccDBSubnetGroupConfig_updatedDescription(rName string) string { - return fmt.Sprintf(` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccDBSubnetGroupConfig_updatedDescription" - } -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - availability_zone = "us-west-2a" - vpc_id = "${aws_vpc.foo.id}" - tags { - Name = "tf-dbsubnet-test-1" - } -} - -resource "aws_subnet" "bar" { - cidr_block = "10.1.2.0/24" - availability_zone = "us-west-2b" - vpc_id = "${aws_vpc.foo.id}" - tags { - Name = "tf-dbsubnet-test-2" - } -} - -resource "aws_db_subnet_group" "foo" { - name = "%s" - description = "foo description updated" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] - tags { - Name = "tf-dbsubnet-group-test" - } -}`, rName) -} - -const testAccDBSubnetGroupConfig_namePrefix = ` -resource "aws_vpc" "test" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccDBSubnetGroupConfig_namePrefix" - } -} - -resource "aws_subnet" "a" { - vpc_id = "${aws_vpc.test.id}" - cidr_block = "10.1.1.0/24" - availability_zone = "us-west-2a" -} - -resource "aws_subnet" "b" { - vpc_id = "${aws_vpc.test.id}" - cidr_block = "10.1.2.0/24" - availability_zone = "us-west-2b" -} - -resource "aws_db_subnet_group" "test" { - name_prefix = "tf_test-" - subnet_ids = ["${aws_subnet.a.id}", "${aws_subnet.b.id}"] -}` - -const testAccDBSubnetGroupConfig_generatedName = ` -resource "aws_vpc" "test" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccDBSubnetGroupConfig_generatedName" - } -} - -resource "aws_subnet" "a" { - vpc_id = "${aws_vpc.test.id}" - cidr_block = "10.1.1.0/24" - availability_zone = "us-west-2a" -} - -resource "aws_subnet" "b" { - vpc_id = "${aws_vpc.test.id}" - cidr_block = "10.1.2.0/24" - availability_zone = "us-west-2b" -} - -resource "aws_db_subnet_group" "test" { - subnet_ids = ["${aws_subnet.a.id}", "${aws_subnet.b.id}"] -}` - -const testAccDBSubnetGroupConfig_withUnderscoresAndPeriodsAndSpaces = ` -resource "aws_vpc" "main" { - cidr_block = "192.168.0.0/16" - tags { - Name = "testAccDBSubnetGroupConfig_withUnderscoresAndPeriodsAndSpaces" - } -} - -resource "aws_subnet" "frontend" { - vpc_id = "${aws_vpc.main.id}" - availability_zone = "us-west-2b" - cidr_block = "192.168.1.0/24" -} - -resource "aws_subnet" "backend" { - vpc_id = "${aws_vpc.main.id}" - availability_zone = "us-west-2c" - cidr_block = "192.168.2.0/24" -} - -resource "aws_db_subnet_group" "underscores" { - name = "with_underscores" - description = "Our main group of subnets" - subnet_ids = ["${aws_subnet.frontend.id}", "${aws_subnet.backend.id}"] -} - -resource "aws_db_subnet_group" "periods" { - name = "with.periods" - description = "Our main group of subnets" - subnet_ids = ["${aws_subnet.frontend.id}", "${aws_subnet.backend.id}"] -} - -resource "aws_db_subnet_group" "spaces" { - name = "with spaces" - description = "Our main group of subnets" - subnet_ids = ["${aws_subnet.frontend.id}", "${aws_subnet.backend.id}"] -} -` diff --git a/builtin/providers/aws/resource_aws_default_network_acl.go b/builtin/providers/aws/resource_aws_default_network_acl.go deleted file mode 100644 index 419972b18..000000000 --- a/builtin/providers/aws/resource_aws_default_network_acl.go +++ /dev/null @@ -1,287 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" -) - -// ACL Network ACLs all contain explicit deny-all rules that cannot be -// destroyed or changed by users. This rules are numbered very high to be a -// catch-all. -// See http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html#default-network-acl -const ( - awsDefaultAclRuleNumberIpv4 = 32767 - awsDefaultAclRuleNumberIpv6 = 32768 -) - -func resourceAwsDefaultNetworkAcl() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsDefaultNetworkAclCreate, - // We reuse aws_network_acl's read method, the operations are the same - Read: resourceAwsNetworkAclRead, - Delete: resourceAwsDefaultNetworkAclDelete, - Update: resourceAwsDefaultNetworkAclUpdate, - - Schema: map[string]*schema.Schema{ - "vpc_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "default_network_acl_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - Computed: false, - }, - // We want explicit management of Subnets here, so we do not allow them to be - // computed. Instead, an empty config will enforce just that; removal of the - // any Subnets that have been assigned to the Default Network ACL. Because we - // can't actually remove them, this will be a continual plan until the - // Subnets are themselves destroyed or reassigned to a different Network - // ACL - "subnet_ids": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - // We want explicit management of Rules here, so we do not allow them to be - // computed. Instead, an empty config will enforce just that; removal of the - // rules - "ingress": &schema.Schema{ - Type: schema.TypeSet, - Required: false, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "from_port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - "to_port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - "rule_no": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - "action": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "cidr_block": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "icmp_type": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - "icmp_code": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - }, - }, - Set: resourceAwsNetworkAclEntryHash, - }, - "egress": &schema.Schema{ - Type: schema.TypeSet, - Required: false, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "from_port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - "to_port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - "rule_no": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - "action": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "cidr_block": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "icmp_type": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - "icmp_code": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - }, - }, - Set: resourceAwsNetworkAclEntryHash, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAwsDefaultNetworkAclCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId(d.Get("default_network_acl_id").(string)) - - // revoke all default and pre-existing rules on the default network acl. - // In the UPDATE method, we'll apply only the rules in the configuration. - log.Printf("[DEBUG] Revoking default ingress and egress rules for Default Network ACL for %s", d.Id()) - err := revokeAllNetworkACLEntries(d.Id(), meta) - if err != nil { - return err - } - - return resourceAwsDefaultNetworkAclUpdate(d, meta) -} - -func resourceAwsDefaultNetworkAclUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - d.Partial(true) - - if d.HasChange("ingress") { - err := updateNetworkAclEntries(d, "ingress", conn) - if err != nil { - return err - } - } - - if d.HasChange("egress") { - err := updateNetworkAclEntries(d, "egress", conn) - if err != nil { - return err - } - } - - if d.HasChange("subnet_ids") { - o, n := d.GetChange("subnet_ids") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - - remove := os.Difference(ns).List() - add := ns.Difference(os).List() - - if len(remove) > 0 { - // - // NO-OP - // - // Subnets *must* belong to a Network ACL. Subnets are not "removed" from - // Network ACLs, instead their association is replaced. In a normal - // Network ACL, any removal of a Subnet is done by replacing the - // Subnet/ACL association with an association between the Subnet and the - // Default Network ACL. Because we're managing the default here, we cannot - // do that, so we simply log a NO-OP. In order to remove the Subnet here, - // it must be destroyed, or assigned to different Network ACL. Those - // operations are not handled here - log.Printf("[WARN] Cannot remove subnets from the Default Network ACL. They must be re-assigned or destroyed") - } - - if len(add) > 0 { - for _, a := range add { - association, err := findNetworkAclAssociation(a.(string), conn) - if err != nil { - return fmt.Errorf("Failed to find acl association: acl %s with subnet %s: %s", d.Id(), a, err) - } - log.Printf("[DEBUG] Updating Network Association for Default Network ACL (%s) and Subnet (%s)", d.Id(), a.(string)) - _, err = conn.ReplaceNetworkAclAssociation(&ec2.ReplaceNetworkAclAssociationInput{ - AssociationId: association.NetworkAclAssociationId, - NetworkAclId: aws.String(d.Id()), - }) - if err != nil { - return err - } - } - } - } - - if err := setTags(conn, d); err != nil { - return err - } else { - d.SetPartial("tags") - } - - d.Partial(false) - // Re-use the exiting Network ACL Resources READ method - return resourceAwsNetworkAclRead(d, meta) -} - -func resourceAwsDefaultNetworkAclDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[WARN] Cannot destroy Default Network ACL. Terraform will remove this resource from the state file, however resources may remain.") - d.SetId("") - return nil -} - -// revokeAllNetworkACLEntries revoke all ingress and egress rules that the Default -// Network ACL currently has -func revokeAllNetworkACLEntries(netaclId string, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{ - NetworkAclIds: []*string{aws.String(netaclId)}, - }) - - if err != nil { - log.Printf("[DEBUG] Error looking up Network ACL: %s", err) - return err - } - - if resp == nil { - return fmt.Errorf("[ERR] Error looking up Default Network ACL Entries: No results") - } - - networkAcl := resp.NetworkAcls[0] - for _, e := range networkAcl.Entries { - // Skip the default rules added by AWS. They can be neither - // configured or deleted by users. See http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html#default-network-acl - if *e.RuleNumber == awsDefaultAclRuleNumberIpv4 || - *e.RuleNumber == awsDefaultAclRuleNumberIpv6 { - continue - } - - // track if this is an egress or ingress rule, for logging purposes - rt := "ingress" - if *e.Egress == true { - rt = "egress" - } - - log.Printf("[DEBUG] Destroying Network ACL (%s) Entry number (%d)", rt, int(*e.RuleNumber)) - _, err := conn.DeleteNetworkAclEntry(&ec2.DeleteNetworkAclEntryInput{ - NetworkAclId: aws.String(netaclId), - RuleNumber: e.RuleNumber, - Egress: e.Egress, - }) - if err != nil { - return fmt.Errorf("Error deleting entry (%s): %s", e, err) - } - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_default_network_acl_test.go b/builtin/providers/aws/resource_aws_default_network_acl_test.go deleted file mode 100644 index c5f9e02d1..000000000 --- a/builtin/providers/aws/resource_aws_default_network_acl_test.go +++ /dev/null @@ -1,470 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -var defaultEgressAcl = &ec2.NetworkAclEntry{ - CidrBlock: aws.String("0.0.0.0/0"), - Egress: aws.Bool(true), - Protocol: aws.String("-1"), - RuleAction: aws.String("allow"), - RuleNumber: aws.Int64(100), -} -var defaultIngressAcl = &ec2.NetworkAclEntry{ - CidrBlock: aws.String("0.0.0.0/0"), - Egress: aws.Bool(false), - Protocol: aws.String("-1"), - RuleAction: aws.String("allow"), - RuleNumber: aws.Int64(100), -} - -func TestAccAWSDefaultNetworkAcl_basic(t *testing.T) { - var networkAcl ec2.NetworkAcl - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDefaultNetworkAclDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSDefaultNetworkConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccGetAWSDefaultNetworkAcl("aws_default_network_acl.default", &networkAcl), - testAccCheckAWSDefaultACLAttributes(&networkAcl, []*ec2.NetworkAclEntry{}, 0, 2), - ), - }, - }, - }) -} - -func TestAccAWSDefaultNetworkAcl_basicIpv6Vpc(t *testing.T) { - var networkAcl ec2.NetworkAcl - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDefaultNetworkAclDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSDefaultNetworkConfig_basicIpv6Vpc, - Check: resource.ComposeTestCheckFunc( - testAccGetAWSDefaultNetworkAcl("aws_default_network_acl.default", &networkAcl), - testAccCheckAWSDefaultACLAttributes(&networkAcl, []*ec2.NetworkAclEntry{}, 0, 4), - ), - }, - }, - }) -} - -func TestAccAWSDefaultNetworkAcl_deny_ingress(t *testing.T) { - // TestAccAWSDefaultNetworkAcl_deny_ingress will deny all Ingress rules, but - // not Egress. We then expect there to be 3 rules, 2 AWS defaults and 1 - // additional Egress. - var networkAcl ec2.NetworkAcl - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDefaultNetworkAclDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSDefaultNetworkConfig_deny_ingress, - Check: resource.ComposeTestCheckFunc( - testAccGetAWSDefaultNetworkAcl("aws_default_network_acl.default", &networkAcl), - testAccCheckAWSDefaultACLAttributes(&networkAcl, []*ec2.NetworkAclEntry{defaultEgressAcl}, 0, 2), - ), - }, - }, - }) -} - -func TestAccAWSDefaultNetworkAcl_SubnetRemoval(t *testing.T) { - var networkAcl ec2.NetworkAcl - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDefaultNetworkAclDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSDefaultNetworkConfig_Subnets, - Check: resource.ComposeTestCheckFunc( - testAccGetAWSDefaultNetworkAcl("aws_default_network_acl.default", &networkAcl), - testAccCheckAWSDefaultACLAttributes(&networkAcl, []*ec2.NetworkAclEntry{}, 2, 2), - ), - }, - - // Here the Subnets have been removed from the Default Network ACL Config, - // but have not been reassigned. The result is that the Subnets are still - // there, and we have a non-empty plan - resource.TestStep{ - Config: testAccAWSDefaultNetworkConfig_Subnets_remove, - Check: resource.ComposeTestCheckFunc( - testAccGetAWSDefaultNetworkAcl("aws_default_network_acl.default", &networkAcl), - testAccCheckAWSDefaultACLAttributes(&networkAcl, []*ec2.NetworkAclEntry{}, 2, 2), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAWSDefaultNetworkAcl_SubnetReassign(t *testing.T) { - var networkAcl ec2.NetworkAcl - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDefaultNetworkAclDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSDefaultNetworkConfig_Subnets, - Check: resource.ComposeTestCheckFunc( - testAccGetAWSDefaultNetworkAcl("aws_default_network_acl.default", &networkAcl), - testAccCheckAWSDefaultACLAttributes(&networkAcl, []*ec2.NetworkAclEntry{}, 2, 2), - ), - }, - - // Here we've reassigned the subnets to a different ACL. - // Without any otherwise association between the `aws_network_acl` and - // `aws_default_network_acl` resources, we cannot guarantee that the - // reassignment of the two subnets to the `aws_network_acl` will happen - // before the update/read on the `aws_default_network_acl` resource. - // Because of this, there could be a non-empty plan if a READ is done on - // the default before the reassignment occurs on the other resource. - // - // For the sake of testing, here we introduce a depends_on attribute from - // the default resource to the other acl resource, to ensure the latter's - // update occurs first, and the former's READ will correctly read zero - // subnets - resource.TestStep{ - Config: testAccAWSDefaultNetworkConfig_Subnets_move, - Check: resource.ComposeTestCheckFunc( - testAccGetAWSDefaultNetworkAcl("aws_default_network_acl.default", &networkAcl), - testAccCheckAWSDefaultACLAttributes(&networkAcl, []*ec2.NetworkAclEntry{}, 0, 2), - ), - }, - }, - }) -} - -func testAccCheckAWSDefaultNetworkAclDestroy(s *terraform.State) error { - // We can't destroy this resource; it comes and goes with the VPC itself. - return nil -} - -func testAccCheckAWSDefaultACLAttributes(acl *ec2.NetworkAcl, rules []*ec2.NetworkAclEntry, subnetCount int, hiddenRuleCount int) resource.TestCheckFunc { - return func(s *terraform.State) error { - - aclEntriesCount := len(acl.Entries) - ruleCount := len(rules) - - // Default ACL has hidden rules we can't do anything about - ruleCount = ruleCount + hiddenRuleCount - - if ruleCount != aclEntriesCount { - return fmt.Errorf("Expected (%d) Rules, got (%d)", ruleCount, aclEntriesCount) - } - - if len(acl.Associations) != subnetCount { - return fmt.Errorf("Expected (%d) Subnets, got (%d)", subnetCount, len(acl.Associations)) - } - - return nil - } -} - -func testAccGetAWSDefaultNetworkAcl(n string, networkAcl *ec2.NetworkAcl) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Network ACL is set") - } - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{ - NetworkAclIds: []*string{aws.String(rs.Primary.ID)}, - }) - if err != nil { - return err - } - - if len(resp.NetworkAcls) > 0 && *resp.NetworkAcls[0].NetworkAclId == rs.Primary.ID { - *networkAcl = *resp.NetworkAcls[0] - return nil - } - - return fmt.Errorf("Network Acls not found") - } -} - -const testAccAWSDefaultNetworkConfig_basic = ` -resource "aws_vpc" "tftestvpc" { - cidr_block = "10.1.0.0/16" - - tags { - Name = "TestAccAWSDefaultNetworkAcl_basic" - } -} - -resource "aws_default_network_acl" "default" { - default_network_acl_id = "${aws_vpc.tftestvpc.default_network_acl_id}" - - tags { - Name = "TestAccAWSDefaultNetworkAcl_basic" - } -} -` - -const testAccAWSDefaultNetworkConfig_basicDefaultRules = ` -resource "aws_vpc" "tftestvpc" { - cidr_block = "10.1.0.0/16" - - tags { - Name = "TestAccAWSDefaultNetworkAcl_basic" - } -} - -resource "aws_default_network_acl" "default" { - default_network_acl_id = "${aws_vpc.tftestvpc.default_network_acl_id}" - - ingress { - protocol = -1 - rule_no = 100 - action = "allow" - cidr_block = "0.0.0.0/0" - from_port = 0 - to_port = 0 - } - - egress { - protocol = -1 - rule_no = 100 - action = "allow" - cidr_block = "0.0.0.0/0" - from_port = 0 - to_port = 0 - } - - tags { - Name = "TestAccAWSDefaultNetworkAcl_basic" - } -} -` - -const testAccAWSDefaultNetworkConfig_deny = ` -resource "aws_vpc" "tftestvpc" { - cidr_block = "10.1.0.0/16" - - tags { - Name = "TestAccAWSDefaultNetworkAcl_basic" - } -} - -resource "aws_default_network_acl" "default" { - default_network_acl_id = "${aws_vpc.tftestvpc.default_network_acl_id}" - - tags { - Name = "TestAccAWSDefaultNetworkAcl_basic" - } -} -` - -const testAccAWSDefaultNetworkConfig_deny_ingress = ` -resource "aws_vpc" "tftestvpc" { - cidr_block = "10.1.0.0/16" - - tags { - Name = "TestAccAWSDefaultNetworkAcl_basic" - } -} - -resource "aws_default_network_acl" "default" { - default_network_acl_id = "${aws_vpc.tftestvpc.default_network_acl_id}" - - egress { - protocol = -1 - rule_no = 100 - action = "allow" - cidr_block = "0.0.0.0/0" - from_port = 0 - to_port = 0 - } - - tags { - Name = "TestAccAWSDefaultNetworkAcl_basic" - } -} -` - -const testAccAWSDefaultNetworkConfig_Subnets = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - - tags { - Name = "TestAccAWSDefaultNetworkAcl_SubnetRemoval" - } -} - -resource "aws_subnet" "one" { - cidr_block = "10.1.111.0/24" - vpc_id = "${aws_vpc.foo.id}" - - tags { - Name = "TestAccAWSDefaultNetworkAcl_SubnetRemoval" - } -} - -resource "aws_subnet" "two" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" - - tags { - Name = "TestAccAWSDefaultNetworkAcl_SubnetRemoval" - } -} - -resource "aws_network_acl" "bar" { - vpc_id = "${aws_vpc.foo.id}" - - tags { - Name = "TestAccAWSDefaultNetworkAcl_SubnetRemoval" - } -} - -resource "aws_default_network_acl" "default" { - default_network_acl_id = "${aws_vpc.foo.default_network_acl_id}" - - subnet_ids = ["${aws_subnet.one.id}", "${aws_subnet.two.id}"] - - tags { - Name = "TestAccAWSDefaultNetworkAcl_SubnetRemoval" - } -} -` - -const testAccAWSDefaultNetworkConfig_Subnets_remove = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - - tags { - Name = "TestAccAWSDefaultNetworkAcl_SubnetRemoval" - } -} - -resource "aws_subnet" "one" { - cidr_block = "10.1.111.0/24" - vpc_id = "${aws_vpc.foo.id}" - - tags { - Name = "TestAccAWSDefaultNetworkAcl_SubnetRemoval" - } -} - -resource "aws_subnet" "two" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" - - tags { - Name = "TestAccAWSDefaultNetworkAcl_SubnetRemoval" - } -} - -resource "aws_network_acl" "bar" { - vpc_id = "${aws_vpc.foo.id}" - - tags { - Name = "TestAccAWSDefaultNetworkAcl_SubnetRemoval" - } -} - -resource "aws_default_network_acl" "default" { - default_network_acl_id = "${aws_vpc.foo.default_network_acl_id}" - - tags { - Name = "TestAccAWSDefaultNetworkAcl_SubnetRemoval" - } -} -` - -const testAccAWSDefaultNetworkConfig_Subnets_move = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - - tags { - Name = "TestAccAWSDefaultNetworkAcl_SubnetRemoval" - } -} - -resource "aws_subnet" "one" { - cidr_block = "10.1.111.0/24" - vpc_id = "${aws_vpc.foo.id}" - - tags { - Name = "TestAccAWSDefaultNetworkAcl_SubnetRemoval" - } -} - -resource "aws_subnet" "two" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" - - tags { - Name = "TestAccAWSDefaultNetworkAcl_SubnetRemoval" - } -} - -resource "aws_network_acl" "bar" { - vpc_id = "${aws_vpc.foo.id}" - - subnet_ids = ["${aws_subnet.one.id}", "${aws_subnet.two.id}"] - - tags { - Name = "TestAccAWSDefaultNetworkAcl_SubnetRemoval" - } -} - -resource "aws_default_network_acl" "default" { - default_network_acl_id = "${aws_vpc.foo.default_network_acl_id}" - - depends_on = ["aws_network_acl.bar"] - - tags { - Name = "TestAccAWSDefaultNetworkAcl_SubnetRemoval" - } -} -` - -const testAccAWSDefaultNetworkConfig_basicIpv6Vpc = ` -provider "aws" { - region = "us-east-2" -} - -resource "aws_vpc" "tftestvpc" { - cidr_block = "10.1.0.0/16" - assign_generated_ipv6_cidr_block = true - - tags { - Name = "TestAccAWSDefaultNetworkAcl_basicIpv6Vpc" - } -} - -resource "aws_default_network_acl" "default" { - default_network_acl_id = "${aws_vpc.tftestvpc.default_network_acl_id}" - - tags { - Name = "TestAccAWSDefaultNetworkAcl_basicIpv6Vpc" - } -} -` diff --git a/builtin/providers/aws/resource_aws_default_route_table.go b/builtin/providers/aws/resource_aws_default_route_table.go deleted file mode 100644 index 987dd4a7d..000000000 --- a/builtin/providers/aws/resource_aws_default_route_table.go +++ /dev/null @@ -1,236 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsDefaultRouteTable() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsDefaultRouteTableCreate, - Read: resourceAwsDefaultRouteTableRead, - Update: resourceAwsRouteTableUpdate, - Delete: resourceAwsDefaultRouteTableDelete, - - Schema: map[string]*schema.Schema{ - "default_route_table_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "vpc_id": { - Type: schema.TypeString, - Computed: true, - }, - - "propagating_vgws": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "route": { - Type: schema.TypeSet, - Computed: true, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cidr_block": { - Type: schema.TypeString, - Optional: true, - }, - - "ipv6_cidr_block": { - Type: schema.TypeString, - Optional: true, - }, - - "egress_only_gateway_id": { - Type: schema.TypeString, - Optional: true, - }, - - "gateway_id": { - Type: schema.TypeString, - Optional: true, - }, - - "instance_id": { - Type: schema.TypeString, - Optional: true, - }, - - "nat_gateway_id": { - Type: schema.TypeString, - Optional: true, - }, - - "vpc_peering_connection_id": { - Type: schema.TypeString, - Optional: true, - }, - - "network_interface_id": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - Set: resourceAwsRouteTableHash, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAwsDefaultRouteTableCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId(d.Get("default_route_table_id").(string)) - - conn := meta.(*AWSClient).ec2conn - rtRaw, _, err := resourceAwsRouteTableStateRefreshFunc(conn, d.Id())() - if err != nil { - return err - } - if rtRaw == nil { - log.Printf("[WARN] Default Route Table not found") - d.SetId("") - return nil - } - - rt := rtRaw.(*ec2.RouteTable) - - d.Set("vpc_id", rt.VpcId) - - // revoke all default and pre-existing routes on the default route table. - // In the UPDATE method, we'll apply only the rules in the configuration. - log.Printf("[DEBUG] Revoking default routes for Default Route Table for %s", d.Id()) - if err := revokeAllRouteTableRules(d.Id(), meta); err != nil { - return err - } - - return resourceAwsRouteTableUpdate(d, meta) -} - -func resourceAwsDefaultRouteTableRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - // look up default route table for VPC - filter1 := &ec2.Filter{ - Name: aws.String("association.main"), - Values: []*string{aws.String("true")}, - } - filter2 := &ec2.Filter{ - Name: aws.String("vpc-id"), - Values: []*string{aws.String(d.Get("vpc_id").(string))}, - } - - findOpts := &ec2.DescribeRouteTablesInput{ - Filters: []*ec2.Filter{filter1, filter2}, - } - - resp, err := conn.DescribeRouteTables(findOpts) - if err != nil { - return err - } - - if len(resp.RouteTables) < 1 || resp.RouteTables[0] == nil { - return fmt.Errorf("Default Route table not found") - } - - rt := resp.RouteTables[0] - - d.Set("default_route_table_id", rt.RouteTableId) - d.SetId(*rt.RouteTableId) - - // re-use regular AWS Route Table READ. This is an extra API call but saves us - // from trying to manually keep parity - return resourceAwsRouteTableRead(d, meta) -} - -func resourceAwsDefaultRouteTableDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[WARN] Cannot destroy Default Route Table. Terraform will remove this resource from the state file, however resources may remain.") - d.SetId("") - return nil -} - -// revokeAllRouteTableRules revoke all routes on the Default Route Table -// This should only be ran once at creation time of this resource -func revokeAllRouteTableRules(defaultRouteTableId string, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - log.Printf("\n***\nrevokeAllRouteTableRules\n***\n") - - resp, err := conn.DescribeRouteTables(&ec2.DescribeRouteTablesInput{ - RouteTableIds: []*string{aws.String(defaultRouteTableId)}, - }) - if err != nil { - return err - } - - if len(resp.RouteTables) < 1 || resp.RouteTables[0] == nil { - return fmt.Errorf("Default Route table not found") - } - - rt := resp.RouteTables[0] - - // Remove all Gateway association - for _, r := range rt.PropagatingVgws { - log.Printf( - "[INFO] Deleting VGW propagation from %s: %s", - defaultRouteTableId, *r.GatewayId) - _, err := conn.DisableVgwRoutePropagation(&ec2.DisableVgwRoutePropagationInput{ - RouteTableId: aws.String(defaultRouteTableId), - GatewayId: r.GatewayId, - }) - if err != nil { - return err - } - } - - // Delete all routes - for _, r := range rt.Routes { - // you cannot delete the local route - if r.GatewayId != nil && *r.GatewayId == "local" { - continue - } - if r.DestinationPrefixListId != nil { - // Skipping because VPC endpoint routes are handled separately - // See aws_vpc_endpoint - continue - } - - if r.DestinationCidrBlock != nil { - log.Printf( - "[INFO] Deleting route from %s: %s", - defaultRouteTableId, *r.DestinationCidrBlock) - _, err := conn.DeleteRoute(&ec2.DeleteRouteInput{ - RouteTableId: aws.String(defaultRouteTableId), - DestinationCidrBlock: r.DestinationCidrBlock, - }) - if err != nil { - return err - } - } - - if r.DestinationIpv6CidrBlock != nil { - log.Printf( - "[INFO] Deleting route from %s: %s", - defaultRouteTableId, *r.DestinationIpv6CidrBlock) - _, err := conn.DeleteRoute(&ec2.DeleteRouteInput{ - RouteTableId: aws.String(defaultRouteTableId), - DestinationIpv6CidrBlock: r.DestinationIpv6CidrBlock, - }) - if err != nil { - return err - } - } - - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_default_route_table_test.go b/builtin/providers/aws/resource_aws_default_route_table_test.go deleted file mode 100644 index dd67db0ff..000000000 --- a/builtin/providers/aws/resource_aws_default_route_table_test.go +++ /dev/null @@ -1,303 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSDefaultRouteTable_basic(t *testing.T) { - var v ec2.RouteTable - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_default_route_table.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckDefaultRouteTableDestroy, - Steps: []resource.TestStep{ - { - Config: testAccDefaultRouteTableConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists( - "aws_default_route_table.foo", &v), - ), - }, - }, - }) -} - -func TestAccAWSDefaultRouteTable_swap(t *testing.T) { - var v ec2.RouteTable - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_default_route_table.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckDefaultRouteTableDestroy, - Steps: []resource.TestStep{ - { - Config: testAccDefaultRouteTable_change, - Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists( - "aws_default_route_table.foo", &v), - ), - }, - - // This config will swap out the original Default Route Table and replace - // it with the custom route table. While this is not advised, it's a - // behavior that may happen, in which case a follow up plan will show (in - // this case) a diff as the table now needs to be updated to match the - // config - { - Config: testAccDefaultRouteTable_change_mod, - Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists( - "aws_default_route_table.foo", &v), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAWSDefaultRouteTable_vpc_endpoint(t *testing.T) { - var v ec2.RouteTable - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_default_route_table.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckDefaultRouteTableDestroy, - Steps: []resource.TestStep{ - { - Config: testAccDefaultRouteTable_vpc_endpoint, - Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists( - "aws_default_route_table.foo", &v), - ), - }, - }, - }) -} - -func testAccCheckDefaultRouteTableDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_default_route_table" { - continue - } - - // Try to find the resource - resp, err := conn.DescribeRouteTables(&ec2.DescribeRouteTablesInput{ - RouteTableIds: []*string{aws.String(rs.Primary.ID)}, - }) - if err == nil { - if len(resp.RouteTables) > 0 { - return fmt.Errorf("still exist.") - } - - return nil - } - - // Verify the error is what we want - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - if ec2err.Code() != "InvalidRouteTableID.NotFound" { - return err - } - } - - return nil -} - -func testAccCheckDefaultRouteTableExists(s *terraform.State) error { - // We can't destroy this resource; it comes and goes with the VPC itself. - return nil -} - -const testAccDefaultRouteTableConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - enable_dns_hostnames = true - - tags { - Name = "tf-default-route-table-test" - } -} - -resource "aws_default_route_table" "foo" { - default_route_table_id = "${aws_vpc.foo.default_route_table_id}" - - route { - cidr_block = "10.0.1.0/32" - gateway_id = "${aws_internet_gateway.gw.id}" - } - - tags { - Name = "tf-default-route-table-test" - } -} - -resource "aws_internet_gateway" "gw" { - vpc_id = "${aws_vpc.foo.id}" - - tags { - Name = "tf-default-route-table-test" - } -}` - -const testAccDefaultRouteTable_change = ` -provider "aws" { - region = "us-west-2" -} - -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - enable_dns_hostnames = true - - tags { - Name = "tf-default-route-table" - } -} - -resource "aws_default_route_table" "foo" { - default_route_table_id = "${aws_vpc.foo.default_route_table_id}" - - route { - cidr_block = "10.0.1.0/32" - gateway_id = "${aws_internet_gateway.gw.id}" - } - - tags { - Name = "this was the first main" - } -} - -resource "aws_internet_gateway" "gw" { - vpc_id = "${aws_vpc.foo.id}" - - tags { - Name = "main-igw" - } -} - -# Thing to help testing changes -resource "aws_route_table" "r" { - vpc_id = "${aws_vpc.foo.id}" - - route { - cidr_block = "10.0.1.0/24" - gateway_id = "${aws_internet_gateway.gw.id}" - } - - tags { - Name = "other" - } -} -` - -const testAccDefaultRouteTable_change_mod = ` -provider "aws" { - region = "us-west-2" -} - -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - enable_dns_hostnames = true - - tags { - Name = "tf-default-route-table" - } -} - -resource "aws_default_route_table" "foo" { - default_route_table_id = "${aws_vpc.foo.default_route_table_id}" - - route { - cidr_block = "10.0.1.0/32" - gateway_id = "${aws_internet_gateway.gw.id}" - } - - tags { - Name = "this was the first main" - } -} - -resource "aws_internet_gateway" "gw" { - vpc_id = "${aws_vpc.foo.id}" - - tags { - Name = "main-igw" - } -} - -# Thing to help testing changes -resource "aws_route_table" "r" { - vpc_id = "${aws_vpc.foo.id}" - - route { - cidr_block = "10.0.1.0/24" - gateway_id = "${aws_internet_gateway.gw.id}" - } - - tags { - Name = "other" - } -} - -resource "aws_main_route_table_association" "a" { - vpc_id = "${aws_vpc.foo.id}" - route_table_id = "${aws_route_table.r.id}" -} -` - -const testAccDefaultRouteTable_vpc_endpoint = ` -provider "aws" { - region = "us-west-2" -} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - - tags { - Name = "test" - } -} - -resource "aws_internet_gateway" "igw" { - vpc_id = "${aws_vpc.test.id}" - - tags { - Name = "test" - } -} - -resource "aws_vpc_endpoint" "s3" { - vpc_id = "${aws_vpc.test.id}" - service_name = "com.amazonaws.us-west-2.s3" - route_table_ids = [ - "${aws_vpc.test.default_route_table_id}" - ] -} - -resource "aws_default_route_table" "foo" { - default_route_table_id = "${aws_vpc.test.default_route_table_id}" - - tags { - Name = "test" - } - - route { - cidr_block = "0.0.0.0/0" - gateway_id = "${aws_internet_gateway.igw.id}" - } -} -` diff --git a/builtin/providers/aws/resource_aws_default_security_group.go b/builtin/providers/aws/resource_aws_default_security_group.go deleted file mode 100644 index f4fb748bb..000000000 --- a/builtin/providers/aws/resource_aws_default_security_group.go +++ /dev/null @@ -1,149 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsDefaultSecurityGroup() *schema.Resource { - // reuse aws_security_group_rule schema, and methods for READ, UPDATE - dsg := resourceAwsSecurityGroup() - dsg.Create = resourceAwsDefaultSecurityGroupCreate - dsg.Delete = resourceAwsDefaultSecurityGroupDelete - - // Descriptions cannot be updated - delete(dsg.Schema, "description") - - // name is a computed value for Default Security Groups and cannot be changed - delete(dsg.Schema, "name_prefix") - dsg.Schema["name"] = &schema.Schema{ - Type: schema.TypeString, - Computed: true, - } - - // We want explicit management of Rules here, so we do not allow them to be - // computed. Instead, an empty config will enforce just that; removal of the - // rules - dsg.Schema["ingress"].Computed = false - dsg.Schema["egress"].Computed = false - return dsg -} - -func resourceAwsDefaultSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - securityGroupOpts := &ec2.DescribeSecurityGroupsInput{ - Filters: []*ec2.Filter{ - &ec2.Filter{ - Name: aws.String("group-name"), - Values: []*string{aws.String("default")}, - }, - }, - } - - var vpcId string - if v, ok := d.GetOk("vpc_id"); ok { - vpcId = v.(string) - securityGroupOpts.Filters = append(securityGroupOpts.Filters, &ec2.Filter{ - Name: aws.String("vpc-id"), - Values: []*string{aws.String(vpcId)}, - }) - } - - var err error - log.Printf("[DEBUG] Commandeer Default Security Group: %s", securityGroupOpts) - resp, err := conn.DescribeSecurityGroups(securityGroupOpts) - if err != nil { - return fmt.Errorf("Error creating Default Security Group: %s", err) - } - - var g *ec2.SecurityGroup - if vpcId != "" { - // if vpcId contains a value, then we expect just a single Security Group - // returned, as default is a protected name for each VPC, and for each - // Region on EC2 Classic - if len(resp.SecurityGroups) != 1 { - return fmt.Errorf("[ERR] Error finding default security group; found (%d) groups: %s", len(resp.SecurityGroups), resp) - } - g = resp.SecurityGroups[0] - } else { - // we need to filter through any returned security groups for the group - // named "default", and does not belong to a VPC - for _, sg := range resp.SecurityGroups { - if sg.VpcId == nil && *sg.GroupName == "default" { - g = sg - } - } - } - - if g == nil { - return fmt.Errorf("[ERR] Error finding default security group: no matching group found") - } - - d.SetId(*g.GroupId) - - log.Printf("[INFO] Default Security Group ID: %s", d.Id()) - - if err := setTags(conn, d); err != nil { - return err - } - - if err := revokeDefaultSecurityGroupRules(meta, g); err != nil { - return errwrap.Wrapf("{{err}}", err) - } - - return resourceAwsSecurityGroupUpdate(d, meta) -} - -func resourceAwsDefaultSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[WARN] Cannot destroy Default Security Group. Terraform will remove this resource from the state file, however resources may remain.") - d.SetId("") - return nil -} - -func revokeDefaultSecurityGroupRules(meta interface{}, g *ec2.SecurityGroup) error { - conn := meta.(*AWSClient).ec2conn - - log.Printf("[WARN] Removing all ingress and egress rules found on Default Security Group (%s)", *g.GroupId) - if len(g.IpPermissionsEgress) > 0 { - req := &ec2.RevokeSecurityGroupEgressInput{ - GroupId: g.GroupId, - IpPermissions: g.IpPermissionsEgress, - } - - log.Printf("[DEBUG] Revoking default egress rules for Default Security Group for %s", *g.GroupId) - if _, err := conn.RevokeSecurityGroupEgress(req); err != nil { - return fmt.Errorf( - "Error revoking default egress rules for Default Security Group (%s): %s", - *g.GroupId, err) - } - } - if len(g.IpPermissions) > 0 { - // a limitation in EC2 Classic is that a call to RevokeSecurityGroupIngress - // cannot contain both the GroupName and the GroupId - for _, p := range g.IpPermissions { - for _, uigp := range p.UserIdGroupPairs { - if uigp.GroupId != nil && uigp.GroupName != nil { - uigp.GroupName = nil - } - } - } - req := &ec2.RevokeSecurityGroupIngressInput{ - GroupId: g.GroupId, - IpPermissions: g.IpPermissions, - } - - log.Printf("[DEBUG] Revoking default ingress rules for Default Security Group for (%s): %s", *g.GroupId, req) - if _, err := conn.RevokeSecurityGroupIngress(req); err != nil { - return fmt.Errorf( - "Error revoking default ingress rules for Default Security Group (%s): %s", - *g.GroupId, err) - } - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_default_security_group_test.go b/builtin/providers/aws/resource_aws_default_security_group_test.go deleted file mode 100644 index b31a5d440..000000000 --- a/builtin/providers/aws/resource_aws_default_security_group_test.go +++ /dev/null @@ -1,188 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSDefaultSecurityGroup_basic(t *testing.T) { - var group ec2.SecurityGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_default_security_group.web", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDefaultSecurityGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSDefaultSecurityGroupConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDefaultSecurityGroupExists("aws_default_security_group.web", &group), - testAccCheckAWSDefaultSecurityGroupAttributes(&group), - resource.TestCheckResourceAttr( - "aws_default_security_group.web", "name", "default"), - resource.TestCheckResourceAttr( - "aws_default_security_group.web", "ingress.3629188364.protocol", "tcp"), - resource.TestCheckResourceAttr( - "aws_default_security_group.web", "ingress.3629188364.from_port", "80"), - resource.TestCheckResourceAttr( - "aws_default_security_group.web", "ingress.3629188364.to_port", "8000"), - resource.TestCheckResourceAttr( - "aws_default_security_group.web", "ingress.3629188364.cidr_blocks.#", "1"), - resource.TestCheckResourceAttr( - "aws_default_security_group.web", "ingress.3629188364.cidr_blocks.0", "10.0.0.0/8"), - ), - }, - }, - }) -} - -func TestAccAWSDefaultSecurityGroup_classic(t *testing.T) { - var group ec2.SecurityGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_default_security_group.web", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDefaultSecurityGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSDefaultSecurityGroupConfig_classic, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDefaultSecurityGroupExists("aws_default_security_group.web", &group), - testAccCheckAWSDefaultSecurityGroupAttributes(&group), - resource.TestCheckResourceAttr( - "aws_default_security_group.web", "name", "default"), - resource.TestCheckResourceAttr( - "aws_default_security_group.web", "ingress.3629188364.protocol", "tcp"), - resource.TestCheckResourceAttr( - "aws_default_security_group.web", "ingress.3629188364.from_port", "80"), - resource.TestCheckResourceAttr( - "aws_default_security_group.web", "ingress.3629188364.to_port", "8000"), - resource.TestCheckResourceAttr( - "aws_default_security_group.web", "ingress.3629188364.cidr_blocks.#", "1"), - resource.TestCheckResourceAttr( - "aws_default_security_group.web", "ingress.3629188364.cidr_blocks.0", "10.0.0.0/8"), - ), - }, - }, - }) -} - -func testAccCheckAWSDefaultSecurityGroupDestroy(s *terraform.State) error { - // We expect Security Group to still exist - return nil -} - -func testAccCheckAWSDefaultSecurityGroupExists(n string, group *ec2.SecurityGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Security Group is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - req := &ec2.DescribeSecurityGroupsInput{ - GroupIds: []*string{aws.String(rs.Primary.ID)}, - } - resp, err := conn.DescribeSecurityGroups(req) - if err != nil { - return err - } - - if len(resp.SecurityGroups) > 0 && *resp.SecurityGroups[0].GroupId == rs.Primary.ID { - *group = *resp.SecurityGroups[0] - return nil - } - - return fmt.Errorf("Security Group not found") - } -} - -func testAccCheckAWSDefaultSecurityGroupAttributes(group *ec2.SecurityGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - p := &ec2.IpPermission{ - FromPort: aws.Int64(80), - ToPort: aws.Int64(8000), - IpProtocol: aws.String("tcp"), - IpRanges: []*ec2.IpRange{&ec2.IpRange{CidrIp: aws.String("10.0.0.0/8")}}, - } - - if *group.GroupName != "default" { - return fmt.Errorf("Bad name: %s", *group.GroupName) - } - - if len(group.IpPermissions) == 0 { - return fmt.Errorf("No IPPerms") - } - - // Compare our ingress - if !reflect.DeepEqual(group.IpPermissions[0], p) { - return fmt.Errorf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - group.IpPermissions[0], - p) - } - - return nil - } -} - -const testAccAWSDefaultSecurityGroupConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccAWSDefaultSecurityGroupConfig" - } -} - -resource "aws_default_security_group" "web" { - vpc_id = "${aws_vpc.foo.id}" - - ingress { - protocol = "6" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } - - egress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } - - tags { - Name = "tf-acc-test" - } -} -` - -const testAccAWSDefaultSecurityGroupConfig_classic = ` -provider "aws" { - region = "us-east-1" -} - -resource "aws_default_security_group" "web" { - ingress { - protocol = "6" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } - - tags { - Name = "tf-acc-test" - } -}` diff --git a/builtin/providers/aws/resource_aws_default_subnet.go b/builtin/providers/aws/resource_aws_default_subnet.go deleted file mode 100644 index fc10723db..000000000 --- a/builtin/providers/aws/resource_aws_default_subnet.go +++ /dev/null @@ -1,85 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsDefaultSubnet() *schema.Resource { - // reuse aws_subnet schema, and methods for READ, UPDATE - dsubnet := resourceAwsSubnet() - dsubnet.Create = resourceAwsDefaultSubnetCreate - dsubnet.Delete = resourceAwsDefaultSubnetDelete - - // vpc_id is a required value for Default Subnets - dsubnet.Schema["availability_zone"] = &schema.Schema{ - Type: schema.TypeString, - Required: true, - } - // vpc_id is a computed value for Default Subnets - dsubnet.Schema["vpc_id"] = &schema.Schema{ - Type: schema.TypeString, - Computed: true, - } - // cidr_block is a computed value for Default Subnets - dsubnet.Schema["cidr_block"] = &schema.Schema{ - Type: schema.TypeString, - Computed: true, - } - // ipv6_cidr_block is a computed value for Default Subnets - dsubnet.Schema["ipv6_cidr_block"] = &schema.Schema{ - Type: schema.TypeString, - Computed: true, - } - // map_public_ip_on_launch is a computed value for Default Subnets - dsubnet.Schema["map_public_ip_on_launch"] = &schema.Schema{ - Type: schema.TypeBool, - Computed: true, - } - // assign_ipv6_address_on_creation is a computed value for Default Subnets - dsubnet.Schema["assign_ipv6_address_on_creation"] = &schema.Schema{ - Type: schema.TypeBool, - Computed: true, - } - - return dsubnet -} - -func resourceAwsDefaultSubnetCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - req := &ec2.DescribeSubnetsInput{ - Filters: []*ec2.Filter{ - &ec2.Filter{ - Name: aws.String("availabilityZone"), - Values: aws.StringSlice([]string{d.Get("availability_zone").(string)}), - }, - &ec2.Filter{ - Name: aws.String("defaultForAz"), - Values: aws.StringSlice([]string{"true"}), - }, - }, - } - - resp, err := conn.DescribeSubnets(req) - if err != nil { - return err - } - - if len(resp.Subnets) != 1 || resp.Subnets[0] == nil { - return fmt.Errorf("Default subnet not found") - } - - d.SetId(aws.StringValue(resp.Subnets[0].SubnetId)) - - return resourceAwsSubnetUpdate(d, meta) -} - -func resourceAwsDefaultSubnetDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[WARN] Cannot destroy Default Subnet. Terraform will remove this resource from the state file, however resources may remain.") - d.SetId("") - return nil -} diff --git a/builtin/providers/aws/resource_aws_default_subnet_test.go b/builtin/providers/aws/resource_aws_default_subnet_test.go deleted file mode 100644 index 0c12b6b94..000000000 --- a/builtin/providers/aws/resource_aws_default_subnet_test.go +++ /dev/null @@ -1,56 +0,0 @@ -// make testacc TEST=./builtin/providers/aws/ TESTARGS='-run=TestAccAWSDefaultVpc_' -package aws - -import ( - "testing" - - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSDefaultSubnet_basic(t *testing.T) { - var v ec2.Subnet - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDefaultSubnetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDefaultSubnetConfigBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckSubnetExists("aws_default_subnet.foo", &v), - resource.TestCheckResourceAttr( - "aws_default_subnet.foo", "availability_zone", "us-west-2a"), - resource.TestCheckResourceAttr( - "aws_default_subnet.foo", "map_public_ip_on_launch", "true"), - resource.TestCheckResourceAttr( - "aws_default_subnet.foo", "assign_ipv6_address_on_creation", "false"), - resource.TestCheckResourceAttr( - "aws_default_subnet.foo", "tags.%", "1"), - resource.TestCheckResourceAttr( - "aws_default_subnet.foo", "tags.Name", "Default subnet for us-west-2a"), - ), - }, - }, - }) -} - -func testAccCheckAWSDefaultSubnetDestroy(s *terraform.State) error { - // We expect subnet to still exist - return nil -} - -const testAccAWSDefaultSubnetConfigBasic = ` -provider "aws" { - region = "us-west-2" -} - -resource "aws_default_subnet" "foo" { - availability_zone = "us-west-2a" - tags { - Name = "Default subnet for us-west-2a" - } -} -` diff --git a/builtin/providers/aws/resource_aws_default_vpc.go b/builtin/providers/aws/resource_aws_default_vpc.go deleted file mode 100644 index 8953534a0..000000000 --- a/builtin/providers/aws/resource_aws_default_vpc.go +++ /dev/null @@ -1,66 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsDefaultVpc() *schema.Resource { - // reuse aws_vpc schema, and methods for READ, UPDATE - dvpc := resourceAwsVpc() - dvpc.Create = resourceAwsDefaultVpcCreate - dvpc.Delete = resourceAwsDefaultVpcDelete - - // cidr_block is a computed value for Default VPCs - dvpc.Schema["cidr_block"] = &schema.Schema{ - Type: schema.TypeString, - Computed: true, - } - // instance_tenancy is a computed value for Default VPCs - dvpc.Schema["instance_tenancy"] = &schema.Schema{ - Type: schema.TypeString, - Computed: true, - } - // assign_generated_ipv6_cidr_block is a computed value for Default VPCs - dvpc.Schema["assign_generated_ipv6_cidr_block"] = &schema.Schema{ - Type: schema.TypeBool, - Computed: true, - } - - return dvpc -} - -func resourceAwsDefaultVpcCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - req := &ec2.DescribeVpcsInput{ - Filters: []*ec2.Filter{ - { - Name: aws.String("isDefault"), - Values: aws.StringSlice([]string{"true"}), - }, - }, - } - - resp, err := conn.DescribeVpcs(req) - if err != nil { - return err - } - - if resp.Vpcs == nil || len(resp.Vpcs) == 0 { - return fmt.Errorf("No default VPC found in this region.") - } - - d.SetId(aws.StringValue(resp.Vpcs[0].VpcId)) - - return resourceAwsVpcUpdate(d, meta) -} - -func resourceAwsDefaultVpcDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[WARN] Cannot destroy Default VPC. Terraform will remove this resource from the state file, however resources may remain.") - d.SetId("") - return nil -} diff --git a/builtin/providers/aws/resource_aws_default_vpc_dhcp_options.go b/builtin/providers/aws/resource_aws_default_vpc_dhcp_options.go deleted file mode 100644 index cb433ff4b..000000000 --- a/builtin/providers/aws/resource_aws_default_vpc_dhcp_options.go +++ /dev/null @@ -1,90 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsDefaultVpcDhcpOptions() *schema.Resource { - // reuse aws_vpc_dhcp_options schema, and methods for READ, UPDATE - dvpc := resourceAwsVpcDhcpOptions() - dvpc.Create = resourceAwsDefaultVpcDhcpOptionsCreate - dvpc.Delete = resourceAwsDefaultVpcDhcpOptionsDelete - - // domain_name is a computed value for Default Default DHCP Options Sets - dvpc.Schema["domain_name"] = &schema.Schema{ - Type: schema.TypeString, - Computed: true, - } - // domain_name_servers is a computed value for Default Default DHCP Options Sets - dvpc.Schema["domain_name_servers"] = &schema.Schema{ - Type: schema.TypeString, - Computed: true, - } - // ntp_servers is a computed value for Default Default DHCP Options Sets - dvpc.Schema["ntp_servers"] = &schema.Schema{ - Type: schema.TypeString, - Computed: true, - } - - return dvpc -} - -func resourceAwsDefaultVpcDhcpOptionsCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - var domainName string - awsRegion := meta.(*AWSClient).region - if awsRegion == "us-east-1" { - domainName = "ec2.internal" - } else { - domainName = awsRegion + ".compute.internal" - } - req := &ec2.DescribeDhcpOptionsInput{ - Filters: []*ec2.Filter{ - &ec2.Filter{ - Name: aws.String("key"), - Values: aws.StringSlice([]string{"domain-name"}), - }, - &ec2.Filter{ - Name: aws.String("value"), - Values: aws.StringSlice([]string{domainName}), - }, - &ec2.Filter{ - Name: aws.String("key"), - Values: aws.StringSlice([]string{"domain-name-servers"}), - }, - &ec2.Filter{ - Name: aws.String("value"), - Values: aws.StringSlice([]string{"AmazonProvidedDNS"}), - }, - }, - } - - resp, err := conn.DescribeDhcpOptions(req) - if err != nil { - return err - } - - if len(resp.DhcpOptions) != 1 || resp.DhcpOptions[0] == nil { - return fmt.Errorf("Default DHCP Options Set not found") - } - - d.SetId(aws.StringValue(resp.DhcpOptions[0].DhcpOptionsId)) - - if err := resourceAwsVpcDhcpOptionsUpdate(d, meta); err != nil { - return err - } - - return resourceAwsVpcDhcpOptionsRead(d, meta) -} - -func resourceAwsDefaultVpcDhcpOptionsDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[WARN] Cannot destroy Default DHCP Options Set. Terraform will remove this resource from the state file, however resources may remain.") - d.SetId("") - return nil -} diff --git a/builtin/providers/aws/resource_aws_default_vpc_dhcp_options_test.go b/builtin/providers/aws/resource_aws_default_vpc_dhcp_options_test.go deleted file mode 100644 index 8149d245f..000000000 --- a/builtin/providers/aws/resource_aws_default_vpc_dhcp_options_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// make testacc TEST=./builtin/providers/aws/ TESTARGS='-run=TestAccAWSDefaultVpc_' -package aws - -import ( - "testing" - - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSDefaultVpcDhcpOptions_basic(t *testing.T) { - var d ec2.DhcpOptions - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDefaultVpcDhcpOptionsDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDefaultVpcDhcpOptionsConfigBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckDHCPOptionsExists("aws_default_vpc_dhcp_options.foo", &d), - resource.TestCheckResourceAttr( - "aws_default_vpc_dhcp_options.foo", "domain_name", "us-west-2.compute.internal"), - resource.TestCheckResourceAttr( - "aws_default_vpc_dhcp_options.foo", "domain_name_servers", "AmazonProvidedDNS"), - resource.TestCheckResourceAttr( - "aws_default_vpc_dhcp_options.foo", "tags.%", "1"), - resource.TestCheckResourceAttr( - "aws_default_vpc_dhcp_options.foo", "tags.Name", "Default DHCP Option Set"), - ), - }, - }, - }) -} - -func testAccCheckAWSDefaultVpcDhcpOptionsDestroy(s *terraform.State) error { - // We expect DHCP Options Set to still exist - return nil -} - -const testAccAWSDefaultVpcDhcpOptionsConfigBasic = ` -provider "aws" { - region = "us-west-2" -} - -resource "aws_default_vpc_dhcp_options" "foo" { - tags { - Name = "Default DHCP Option Set" - } -} -` diff --git a/builtin/providers/aws/resource_aws_default_vpc_test.go b/builtin/providers/aws/resource_aws_default_vpc_test.go deleted file mode 100644 index 1f14b6469..000000000 --- a/builtin/providers/aws/resource_aws_default_vpc_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// make testacc TEST=./builtin/providers/aws/ TESTARGS='-run=TestAccAWSDefaultVpc_' -package aws - -import ( - "testing" - - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSDefaultVpc_basic(t *testing.T) { - var vpc ec2.Vpc - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDefaultVpcDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDefaultVpcConfigBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpcExists("aws_default_vpc.foo", &vpc), - testAccCheckVpcCidr(&vpc, "172.31.0.0/16"), - resource.TestCheckResourceAttr( - "aws_default_vpc.foo", "cidr_block", "172.31.0.0/16"), - resource.TestCheckResourceAttr( - "aws_default_vpc.foo", "tags.%", "1"), - resource.TestCheckResourceAttr( - "aws_default_vpc.foo", "tags.Name", "Default VPC"), - resource.TestCheckNoResourceAttr( - "aws_default_vpc.foo", "assign_generated_ipv6_cidr_block"), - resource.TestCheckNoResourceAttr( - "aws_default_vpc.foo", "ipv6_association_id"), - resource.TestCheckNoResourceAttr( - "aws_default_vpc.foo", "ipv6_cidr_block"), - ), - }, - }, - }) -} - -func testAccCheckAWSDefaultVpcDestroy(s *terraform.State) error { - // We expect VPC to still exist - return nil -} - -const testAccAWSDefaultVpcConfigBasic = ` -provider "aws" { - region = "us-west-2" -} - -resource "aws_default_vpc" "foo" { - tags { - Name = "Default VPC" - } -} -` diff --git a/builtin/providers/aws/resource_aws_devicefarm_project.go b/builtin/providers/aws/resource_aws_devicefarm_project.go deleted file mode 100644 index e7e377eaf..000000000 --- a/builtin/providers/aws/resource_aws_devicefarm_project.go +++ /dev/null @@ -1,112 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/devicefarm" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsDevicefarmProject() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsDevicefarmProjectCreate, - Read: resourceAwsDevicefarmProjectRead, - Update: resourceAwsDevicefarmProjectUpdate, - Delete: resourceAwsDevicefarmProjectDelete, - - Schema: map[string]*schema.Schema{ - "arn": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - } -} - -func resourceAwsDevicefarmProjectCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).devicefarmconn - region := meta.(*AWSClient).region - - // We need to ensure that DeviceFarm is only being run against us-west-2 - // As this is the only place that AWS currently supports it - if region != "us-west-2" { - return fmt.Errorf("DeviceFarm can only be used with us-west-2. You are trying to use it on %s", region) - } - - input := &devicefarm.CreateProjectInput{ - Name: aws.String(d.Get("name").(string)), - } - - log.Printf("[DEBUG] Creating DeviceFarm Project: %s", d.Get("name").(string)) - out, err := conn.CreateProject(input) - if err != nil { - return fmt.Errorf("Error creating DeviceFarm Project: %s", err) - } - - log.Printf("[DEBUG] Successsfully Created DeviceFarm Project: %s", *out.Project.Arn) - d.SetId(*out.Project.Arn) - - return resourceAwsDevicefarmProjectRead(d, meta) -} - -func resourceAwsDevicefarmProjectRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).devicefarmconn - - input := &devicefarm.GetProjectInput{ - Arn: aws.String(d.Id()), - } - - log.Printf("[DEBUG] Reading DeviceFarm Project: %s", d.Id()) - out, err := conn.GetProject(input) - if err != nil { - return fmt.Errorf("Error reading DeviceFarm Project: %s", err) - } - - d.Set("name", out.Project.Name) - d.Set("arn", out.Project.Arn) - - return nil -} - -func resourceAwsDevicefarmProjectUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).devicefarmconn - - if d.HasChange("name") { - input := &devicefarm.UpdateProjectInput{ - Arn: aws.String(d.Id()), - Name: aws.String(d.Get("name").(string)), - } - - log.Printf("[DEBUG] Updating DeviceFarm Project: %s", d.Id()) - _, err := conn.UpdateProject(input) - if err != nil { - return fmt.Errorf("Error Updating DeviceFarm Project: %s", err) - } - - } - - return resourceAwsDevicefarmProjectRead(d, meta) -} - -func resourceAwsDevicefarmProjectDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).devicefarmconn - - input := &devicefarm.DeleteProjectInput{ - Arn: aws.String(d.Id()), - } - - log.Printf("[DEBUG] Deleting DeviceFarm Project: %s", d.Id()) - _, err := conn.DeleteProject(input) - if err != nil { - return fmt.Errorf("Error deleting DeviceFarm Project: %s", err) - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_devicefarm_project_test.go b/builtin/providers/aws/resource_aws_devicefarm_project_test.go deleted file mode 100644 index dc3a92c04..000000000 --- a/builtin/providers/aws/resource_aws_devicefarm_project_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/devicefarm" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSDeviceFarmProject_basic(t *testing.T) { - var afterCreate, afterUpdate devicefarm.Project - beforeInt := acctest.RandInt() - afterInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDeviceFarmProjectDestroy, - Steps: []resource.TestStep{ - { - Config: testAccDeviceFarmProjectConfig(beforeInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckDeviceFarmProjectExists( - "aws_devicefarm_project.foo", &afterCreate), - resource.TestCheckResourceAttr( - "aws_devicefarm_project.foo", "name", fmt.Sprintf("tf-testproject-%d", beforeInt)), - ), - }, - - { - Config: testAccDeviceFarmProjectConfig(afterInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckDeviceFarmProjectExists( - "aws_devicefarm_project.foo", &afterUpdate), - resource.TestCheckResourceAttr( - "aws_devicefarm_project.foo", "name", fmt.Sprintf("tf-testproject-%d", afterInt)), - testAccCheckDeviceFarmProjectNotRecreated( - t, &afterCreate, &afterUpdate), - ), - }, - }, - }) -} - -func testAccCheckDeviceFarmProjectNotRecreated(t *testing.T, - before, after *devicefarm.Project) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *before.Arn != *after.Arn { - t.Fatalf("Expected DeviceFarm Project ARNs to be the same. But they were: %v, %v", *before.Arn, *after.Arn) - } - return nil - } -} - -func testAccCheckDeviceFarmProjectExists(n string, v *devicefarm.Project) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).devicefarmconn - resp, err := conn.GetProject( - &devicefarm.GetProjectInput{Arn: aws.String(rs.Primary.ID)}) - if err != nil { - return err - } - if resp.Project == nil { - return fmt.Errorf("DeviceFarmProject not found") - } - - *v = *resp.Project - - return nil - } -} - -func testAccCheckDeviceFarmProjectDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).devicefarmconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_devicefarm_project" { - continue - } - - // Try to find the resource - resp, err := conn.GetProject( - &devicefarm.GetProjectInput{Arn: aws.String(rs.Primary.ID)}) - if err == nil { - if resp.Project != nil { - return fmt.Errorf("still exist.") - } - - return nil - } - - if dferr, ok := err.(awserr.Error); ok && dferr.Code() == "DeviceFarmProjectNotFoundFault" { - return nil - } - } - - return nil -} - -func testAccDeviceFarmProjectConfig(rInt int) string { - return fmt.Sprintf(` -resource "aws_devicefarm_project" "foo" { - name = "tf-testproject-%d" -}`, rInt) -} diff --git a/builtin/providers/aws/resource_aws_directory_service_directory.go b/builtin/providers/aws/resource_aws_directory_service_directory.go deleted file mode 100644 index a9bd952dd..000000000 --- a/builtin/providers/aws/resource_aws_directory_service_directory.go +++ /dev/null @@ -1,490 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/schema" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/directoryservice" - "github.com/hashicorp/terraform/helper/resource" -) - -var directoryCreationFuncs = map[string]func(*directoryservice.DirectoryService, *schema.ResourceData) (string, error){ - "SimpleAD": createSimpleDirectoryService, - "MicrosoftAD": createActiveDirectoryService, - "ADConnector": createDirectoryConnector, -} - -func resourceAwsDirectoryServiceDirectory() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsDirectoryServiceDirectoryCreate, - Read: resourceAwsDirectoryServiceDirectoryRead, - Update: resourceAwsDirectoryServiceDirectoryUpdate, - Delete: resourceAwsDirectoryServiceDirectoryDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "password": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - Sensitive: true, - }, - "size": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "alias": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "short_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "vpc_settings": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "subnet_ids": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "vpc_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - }, - }, - "connect_settings": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "customer_username": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "customer_dns_ips": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "subnet_ids": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "vpc_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - }, - }, - "enable_sso": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "access_url": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "dns_ip_addresses": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - Computed: true, - }, - "type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "SimpleAD", - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - validTypes := []string{"SimpleAD", "MicrosoftAD"} - value := v.(string) - for validType, _ := range directoryCreationFuncs { - if validType == value { - return - } - } - es = append(es, fmt.Errorf("%q must be one of %q", k, validTypes)) - return - }, - }, - }, - } -} - -func buildVpcSettings(d *schema.ResourceData) (vpcSettings *directoryservice.DirectoryVpcSettings, err error) { - if v, ok := d.GetOk("vpc_settings"); !ok { - return nil, fmt.Errorf("vpc_settings is required for type = SimpleAD or MicrosoftAD") - } else { - settings := v.([]interface{}) - - if len(settings) > 1 { - return nil, fmt.Errorf("Only a single vpc_settings block is expected") - } else if len(settings) == 1 { - s := settings[0].(map[string]interface{}) - var subnetIds []*string - for _, id := range s["subnet_ids"].(*schema.Set).List() { - subnetIds = append(subnetIds, aws.String(id.(string))) - } - - vpcSettings = &directoryservice.DirectoryVpcSettings{ - SubnetIds: subnetIds, - VpcId: aws.String(s["vpc_id"].(string)), - } - } - } - - return vpcSettings, nil -} - -func buildConnectSettings(d *schema.ResourceData) (connectSettings *directoryservice.DirectoryConnectSettings, err error) { - if v, ok := d.GetOk("connect_settings"); !ok { - return nil, fmt.Errorf("connect_settings is required for type = ADConnector") - } else { - settings := v.([]interface{}) - - if len(settings) > 1 { - return nil, fmt.Errorf("Only a single connect_settings block is expected") - } else if len(settings) == 1 { - s := settings[0].(map[string]interface{}) - - var subnetIds []*string - for _, id := range s["subnet_ids"].(*schema.Set).List() { - subnetIds = append(subnetIds, aws.String(id.(string))) - } - - var customerDnsIps []*string - for _, id := range s["customer_dns_ips"].(*schema.Set).List() { - customerDnsIps = append(customerDnsIps, aws.String(id.(string))) - } - - connectSettings = &directoryservice.DirectoryConnectSettings{ - CustomerDnsIps: customerDnsIps, - CustomerUserName: aws.String(s["customer_username"].(string)), - SubnetIds: subnetIds, - VpcId: aws.String(s["vpc_id"].(string)), - } - } - } - - return connectSettings, nil -} - -func createDirectoryConnector(dsconn *directoryservice.DirectoryService, d *schema.ResourceData) (directoryId string, err error) { - if _, ok := d.GetOk("size"); !ok { - return "", fmt.Errorf("size is required for type = ADConnector") - } - - input := directoryservice.ConnectDirectoryInput{ - Name: aws.String(d.Get("name").(string)), - Password: aws.String(d.Get("password").(string)), - Size: aws.String(d.Get("size").(string)), - } - - if v, ok := d.GetOk("description"); ok { - input.Description = aws.String(v.(string)) - } - if v, ok := d.GetOk("short_name"); ok { - input.ShortName = aws.String(v.(string)) - } - - input.ConnectSettings, err = buildConnectSettings(d) - if err != nil { - return "", err - } - - log.Printf("[DEBUG] Creating Directory Connector: %s", input) - out, err := dsconn.ConnectDirectory(&input) - if err != nil { - return "", err - } - log.Printf("[DEBUG] Directory Connector created: %s", out) - - return *out.DirectoryId, nil -} - -func createSimpleDirectoryService(dsconn *directoryservice.DirectoryService, d *schema.ResourceData) (directoryId string, err error) { - if _, ok := d.GetOk("size"); !ok { - return "", fmt.Errorf("size is required for type = SimpleAD") - } - - input := directoryservice.CreateDirectoryInput{ - Name: aws.String(d.Get("name").(string)), - Password: aws.String(d.Get("password").(string)), - Size: aws.String(d.Get("size").(string)), - } - - if v, ok := d.GetOk("description"); ok { - input.Description = aws.String(v.(string)) - } - if v, ok := d.GetOk("short_name"); ok { - input.ShortName = aws.String(v.(string)) - } - - input.VpcSettings, err = buildVpcSettings(d) - if err != nil { - return "", err - } - - log.Printf("[DEBUG] Creating Simple Directory Service: %s", input) - out, err := dsconn.CreateDirectory(&input) - if err != nil { - return "", err - } - log.Printf("[DEBUG] Simple Directory Service created: %s", out) - - return *out.DirectoryId, nil -} - -func createActiveDirectoryService(dsconn *directoryservice.DirectoryService, d *schema.ResourceData) (directoryId string, err error) { - input := directoryservice.CreateMicrosoftADInput{ - Name: aws.String(d.Get("name").(string)), - Password: aws.String(d.Get("password").(string)), - } - - if v, ok := d.GetOk("description"); ok { - input.Description = aws.String(v.(string)) - } - if v, ok := d.GetOk("short_name"); ok { - input.ShortName = aws.String(v.(string)) - } - - input.VpcSettings, err = buildVpcSettings(d) - if err != nil { - return "", err - } - - log.Printf("[DEBUG] Creating Microsoft AD Directory Service: %s", input) - out, err := dsconn.CreateMicrosoftAD(&input) - if err != nil { - return "", err - } - log.Printf("[DEBUG] Microsoft AD Directory Service created: %s", out) - - return *out.DirectoryId, nil -} - -func resourceAwsDirectoryServiceDirectoryCreate(d *schema.ResourceData, meta interface{}) error { - dsconn := meta.(*AWSClient).dsconn - - creationFunc, ok := directoryCreationFuncs[d.Get("type").(string)] - if !ok { - // Shouldn't happen as this is validated above - return fmt.Errorf("Unsupported directory type: %s", d.Get("type")) - } - - directoryId, err := creationFunc(dsconn, d) - if err != nil { - return err - } - - d.SetId(directoryId) - - // Wait for creation - log.Printf("[DEBUG] Waiting for DS (%q) to become available", d.Id()) - stateConf := &resource.StateChangeConf{ - Pending: []string{"Requested", "Creating", "Created"}, - Target: []string{"Active"}, - Refresh: func() (interface{}, string, error) { - resp, err := dsconn.DescribeDirectories(&directoryservice.DescribeDirectoriesInput{ - DirectoryIds: []*string{aws.String(d.Id())}, - }) - if err != nil { - log.Printf("Error during creation of DS: %q", err.Error()) - return nil, "", err - } - - ds := resp.DirectoryDescriptions[0] - log.Printf("[DEBUG] Creation of DS %q is in following stage: %q.", - d.Id(), *ds.Stage) - return ds, *ds.Stage, nil - }, - Timeout: 60 * time.Minute, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf( - "Error waiting for Directory Service (%s) to become available: %s", - d.Id(), err) - } - - if v, ok := d.GetOk("alias"); ok { - d.SetPartial("alias") - - input := directoryservice.CreateAliasInput{ - DirectoryId: aws.String(d.Id()), - Alias: aws.String(v.(string)), - } - - log.Printf("[DEBUG] Assigning alias %q to DS directory %q", - v.(string), d.Id()) - out, err := dsconn.CreateAlias(&input) - if err != nil { - return err - } - log.Printf("[DEBUG] Alias %q assigned to DS directory %q", - *out.Alias, *out.DirectoryId) - } - - return resourceAwsDirectoryServiceDirectoryUpdate(d, meta) -} - -func resourceAwsDirectoryServiceDirectoryUpdate(d *schema.ResourceData, meta interface{}) error { - dsconn := meta.(*AWSClient).dsconn - - if d.HasChange("enable_sso") { - d.SetPartial("enable_sso") - var err error - - if v, ok := d.GetOk("enable_sso"); ok && v.(bool) { - log.Printf("[DEBUG] Enabling SSO for DS directory %q", d.Id()) - _, err = dsconn.EnableSso(&directoryservice.EnableSsoInput{ - DirectoryId: aws.String(d.Id()), - }) - } else { - log.Printf("[DEBUG] Disabling SSO for DS directory %q", d.Id()) - _, err = dsconn.DisableSso(&directoryservice.DisableSsoInput{ - DirectoryId: aws.String(d.Id()), - }) - } - - if err != nil { - return err - } - } - - return resourceAwsDirectoryServiceDirectoryRead(d, meta) -} - -func resourceAwsDirectoryServiceDirectoryRead(d *schema.ResourceData, meta interface{}) error { - dsconn := meta.(*AWSClient).dsconn - - input := directoryservice.DescribeDirectoriesInput{ - DirectoryIds: []*string{aws.String(d.Id())}, - } - out, err := dsconn.DescribeDirectories(&input) - if err != nil { - return err - - } - - if len(out.DirectoryDescriptions) == 0 { - log.Printf("[WARN] Directory %s not found", d.Id()) - d.SetId("") - return nil - } - - dir := out.DirectoryDescriptions[0] - log.Printf("[DEBUG] Received DS directory: %s", dir) - - d.Set("access_url", *dir.AccessUrl) - d.Set("alias", *dir.Alias) - if dir.Description != nil { - d.Set("description", *dir.Description) - } - - if *dir.Type == "ADConnector" { - d.Set("dns_ip_addresses", schema.NewSet(schema.HashString, flattenStringList(dir.ConnectSettings.ConnectIps))) - } else { - d.Set("dns_ip_addresses", schema.NewSet(schema.HashString, flattenStringList(dir.DnsIpAddrs))) - } - d.Set("name", *dir.Name) - if dir.ShortName != nil { - d.Set("short_name", *dir.ShortName) - } - if dir.Size != nil { - d.Set("size", *dir.Size) - } - d.Set("type", *dir.Type) - d.Set("vpc_settings", flattenDSVpcSettings(dir.VpcSettings)) - d.Set("connect_settings", flattenDSConnectSettings(dir.DnsIpAddrs, dir.ConnectSettings)) - d.Set("enable_sso", *dir.SsoEnabled) - - return nil -} - -func resourceAwsDirectoryServiceDirectoryDelete(d *schema.ResourceData, meta interface{}) error { - dsconn := meta.(*AWSClient).dsconn - - input := directoryservice.DeleteDirectoryInput{ - DirectoryId: aws.String(d.Id()), - } - - log.Printf("[DEBUG] Delete Directory input: %s", input) - _, err := dsconn.DeleteDirectory(&input) - if err != nil { - return err - } - - // Wait for deletion - log.Printf("[DEBUG] Waiting for DS (%q) to be deleted", d.Id()) - stateConf := &resource.StateChangeConf{ - Pending: []string{"Deleting"}, - Target: []string{"Deleted"}, - Refresh: func() (interface{}, string, error) { - resp, err := dsconn.DescribeDirectories(&directoryservice.DescribeDirectoriesInput{ - DirectoryIds: []*string{aws.String(d.Id())}, - }) - if err != nil { - if dserr, ok := err.(awserr.Error); ok && dserr.Code() == "EntityDoesNotExistException" { - return 42, "Deleted", nil - } - return nil, "error", err - } - - if len(resp.DirectoryDescriptions) == 0 { - return 42, "Deleted", nil - } - - ds := resp.DirectoryDescriptions[0] - log.Printf("[DEBUG] Deletion of DS %q is in following stage: %q.", - d.Id(), *ds.Stage) - return ds, *ds.Stage, nil - }, - Timeout: 60 * time.Minute, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf( - "Error waiting for Directory Service (%s) to be deleted: %q", - d.Id(), err.Error()) - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_directory_service_directory_test.go b/builtin/providers/aws/resource_aws_directory_service_directory_test.go deleted file mode 100644 index 56505d05d..000000000 --- a/builtin/providers/aws/resource_aws_directory_service_directory_test.go +++ /dev/null @@ -1,426 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/directoryservice" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSDirectoryServiceDirectory_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDirectoryServiceDirectoryDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDirectoryServiceDirectoryConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceDirectoryExists("aws_directory_service_directory.bar"), - ), - }, - }, - }) -} - -func TestAccAWSDirectoryServiceDirectory_microsoft(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDirectoryServiceDirectoryDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDirectoryServiceDirectoryConfig_microsoft, - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceDirectoryExists("aws_directory_service_directory.bar"), - ), - }, - }, - }) -} - -func TestAccAWSDirectoryServiceDirectory_connector(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDirectoryServiceDirectoryDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDirectoryServiceDirectoryConfig_connector, - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceDirectoryExists("aws_directory_service_directory.connector"), - ), - }, - }, - }) -} - -func TestAccAWSDirectoryServiceDirectory_withAliasAndSso(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDirectoryServiceDirectoryDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDirectoryServiceDirectoryConfig_withAlias, - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceDirectoryExists("aws_directory_service_directory.bar_a"), - testAccCheckServiceDirectoryAlias("aws_directory_service_directory.bar_a", - fmt.Sprintf("tf-d-%d", randomInteger)), - testAccCheckServiceDirectorySso("aws_directory_service_directory.bar_a", false), - ), - }, - resource.TestStep{ - Config: testAccDirectoryServiceDirectoryConfig_withSso, - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceDirectoryExists("aws_directory_service_directory.bar_a"), - testAccCheckServiceDirectoryAlias("aws_directory_service_directory.bar_a", - fmt.Sprintf("tf-d-%d", randomInteger)), - testAccCheckServiceDirectorySso("aws_directory_service_directory.bar_a", true), - ), - }, - resource.TestStep{ - Config: testAccDirectoryServiceDirectoryConfig_withSso_modified, - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceDirectoryExists("aws_directory_service_directory.bar_a"), - testAccCheckServiceDirectoryAlias("aws_directory_service_directory.bar_a", - fmt.Sprintf("tf-d-%d", randomInteger)), - testAccCheckServiceDirectorySso("aws_directory_service_directory.bar_a", false), - ), - }, - }, - }) -} - -func testAccCheckDirectoryServiceDirectoryDestroy(s *terraform.State) error { - dsconn := testAccProvider.Meta().(*AWSClient).dsconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_directory_service_directory" { - continue - } - - input := directoryservice.DescribeDirectoriesInput{ - DirectoryIds: []*string{aws.String(rs.Primary.ID)}, - } - out, err := dsconn.DescribeDirectories(&input) - if err != nil { - // EntityDoesNotExistException means it's gone, this is good - if dserr, ok := err.(awserr.Error); ok && dserr.Code() == "EntityDoesNotExistException" { - return nil - } - return err - } - - if out != nil && len(out.DirectoryDescriptions) > 0 { - return fmt.Errorf("Expected AWS Directory Service Directory to be gone, but was still found") - } - - return nil - } - - return fmt.Errorf("Default error in Service Directory Test") -} - -func testAccCheckServiceDirectoryExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - dsconn := testAccProvider.Meta().(*AWSClient).dsconn - out, err := dsconn.DescribeDirectories(&directoryservice.DescribeDirectoriesInput{ - DirectoryIds: []*string{aws.String(rs.Primary.ID)}, - }) - - if err != nil { - return err - } - - if len(out.DirectoryDescriptions) < 1 { - return fmt.Errorf("No DS directory found") - } - - if *out.DirectoryDescriptions[0].DirectoryId != rs.Primary.ID { - return fmt.Errorf("DS directory ID mismatch - existing: %q, state: %q", - *out.DirectoryDescriptions[0].DirectoryId, rs.Primary.ID) - } - - return nil - } -} - -func testAccCheckServiceDirectoryAlias(name, alias string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - dsconn := testAccProvider.Meta().(*AWSClient).dsconn - out, err := dsconn.DescribeDirectories(&directoryservice.DescribeDirectoriesInput{ - DirectoryIds: []*string{aws.String(rs.Primary.ID)}, - }) - - if err != nil { - return err - } - - if *out.DirectoryDescriptions[0].Alias != alias { - return fmt.Errorf("DS directory Alias mismatch - actual: %q, expected: %q", - *out.DirectoryDescriptions[0].Alias, alias) - } - - return nil - } -} - -func testAccCheckServiceDirectorySso(name string, ssoEnabled bool) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - dsconn := testAccProvider.Meta().(*AWSClient).dsconn - out, err := dsconn.DescribeDirectories(&directoryservice.DescribeDirectoriesInput{ - DirectoryIds: []*string{aws.String(rs.Primary.ID)}, - }) - - if err != nil { - return err - } - - if *out.DirectoryDescriptions[0].SsoEnabled != ssoEnabled { - return fmt.Errorf("DS directory SSO mismatch - actual: %t, expected: %t", - *out.DirectoryDescriptions[0].SsoEnabled, ssoEnabled) - } - - return nil - } -} - -const testAccDirectoryServiceDirectoryConfig = ` -resource "aws_directory_service_directory" "bar" { - name = "corp.notexample.com" - password = "SuperSecretPassw0rd" - size = "Small" - - vpc_settings { - vpc_id = "${aws_vpc.main.id}" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] - } -} - -resource "aws_vpc" "main" { - cidr_block = "10.0.0.0/16" - tags { - Name = "testAccDirectoryServiceDirectoryConfig" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.main.id}" - availability_zone = "us-west-2a" - cidr_block = "10.0.1.0/24" -} -resource "aws_subnet" "bar" { - vpc_id = "${aws_vpc.main.id}" - availability_zone = "us-west-2b" - cidr_block = "10.0.2.0/24" -} -` - -const testAccDirectoryServiceDirectoryConfig_connector = ` -resource "aws_directory_service_directory" "bar" { - name = "corp.notexample.com" - password = "SuperSecretPassw0rd" - size = "Small" - - vpc_settings { - vpc_id = "${aws_vpc.main.id}" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] - } -} - -resource "aws_directory_service_directory" "connector" { - name = "corp.notexample.com" - password = "SuperSecretPassw0rd" - size = "Small" - type = "ADConnector" - - connect_settings { - customer_dns_ips = ["${aws_directory_service_directory.bar.dns_ip_addresses}"] - customer_username = "Administrator" - vpc_id = "${aws_vpc.main.id}" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] - } -} - -resource "aws_vpc" "main" { - cidr_block = "10.0.0.0/16" - tags { - Name = "testAccDirectoryServiceDirectoryConfig_connector" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.main.id}" - availability_zone = "us-west-2a" - cidr_block = "10.0.1.0/24" -} -resource "aws_subnet" "bar" { - vpc_id = "${aws_vpc.main.id}" - availability_zone = "us-west-2b" - cidr_block = "10.0.2.0/24" -} -` - -const testAccDirectoryServiceDirectoryConfig_microsoft = ` -resource "aws_directory_service_directory" "bar" { - name = "corp.notexample.com" - password = "SuperSecretPassw0rd" - type = "MicrosoftAD" - - vpc_settings { - vpc_id = "${aws_vpc.main.id}" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] - } -} - -resource "aws_vpc" "main" { - cidr_block = "10.0.0.0/16" - tags { - Name = "testAccDirectoryServiceDirectoryConfig_microsoft" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.main.id}" - availability_zone = "us-west-2a" - cidr_block = "10.0.1.0/24" -} -resource "aws_subnet" "bar" { - vpc_id = "${aws_vpc.main.id}" - availability_zone = "us-west-2b" - cidr_block = "10.0.2.0/24" -} -` - -var randomInteger = acctest.RandInt() -var testAccDirectoryServiceDirectoryConfig_withAlias = fmt.Sprintf(` -resource "aws_directory_service_directory" "bar_a" { - name = "corp.notexample.com" - password = "SuperSecretPassw0rd" - size = "Small" - alias = "tf-d-%d" - - vpc_settings { - vpc_id = "${aws_vpc.main.id}" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] - } -} - -resource "aws_vpc" "main" { - cidr_block = "10.0.0.0/16" - tags { - Name = "testAccDirectoryServiceDirectoryConfig_withAlias" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.main.id}" - availability_zone = "us-west-2a" - cidr_block = "10.0.1.0/24" -} -resource "aws_subnet" "bar" { - vpc_id = "${aws_vpc.main.id}" - availability_zone = "us-west-2b" - cidr_block = "10.0.2.0/24" -} -`, randomInteger) - -var testAccDirectoryServiceDirectoryConfig_withSso = fmt.Sprintf(` -resource "aws_directory_service_directory" "bar_a" { - name = "corp.notexample.com" - password = "SuperSecretPassw0rd" - size = "Small" - alias = "tf-d-%d" - enable_sso = true - - vpc_settings { - vpc_id = "${aws_vpc.main.id}" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] - } -} - -resource "aws_vpc" "main" { - cidr_block = "10.0.0.0/16" - tags { - Name = "testAccDirectoryServiceDirectoryConfig_withSso" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.main.id}" - availability_zone = "us-west-2a" - cidr_block = "10.0.1.0/24" -} -resource "aws_subnet" "bar" { - vpc_id = "${aws_vpc.main.id}" - availability_zone = "us-west-2b" - cidr_block = "10.0.2.0/24" -} -`, randomInteger) - -var testAccDirectoryServiceDirectoryConfig_withSso_modified = fmt.Sprintf(` -resource "aws_directory_service_directory" "bar_a" { - name = "corp.notexample.com" - password = "SuperSecretPassw0rd" - size = "Small" - alias = "tf-d-%d" - enable_sso = false - - vpc_settings { - vpc_id = "${aws_vpc.main.id}" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] - } -} - -resource "aws_vpc" "main" { - cidr_block = "10.0.0.0/16" - tags { - Name = "testAccDirectoryServiceDirectoryConfig_withSso_modified" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.main.id}" - availability_zone = "us-west-2a" - cidr_block = "10.0.1.0/24" -} -resource "aws_subnet" "bar" { - vpc_id = "${aws_vpc.main.id}" - availability_zone = "us-west-2b" - cidr_block = "10.0.2.0/24" -} -`, randomInteger) diff --git a/builtin/providers/aws/resource_aws_dms_certificate.go b/builtin/providers/aws/resource_aws_dms_certificate.go deleted file mode 100644 index 8fd3f9f88..000000000 --- a/builtin/providers/aws/resource_aws_dms_certificate.go +++ /dev/null @@ -1,138 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsDmsCertificate() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsDmsCertificateCreate, - Read: resourceAwsDmsCertificateRead, - Delete: resourceAwsDmsCertificateDelete, - - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "certificate_arn": { - Type: schema.TypeString, - Computed: true, - }, - "certificate_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateDmsCertificateId, - }, - "certificate_pem": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Sensitive: true, - }, - "certificate_wallet": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Sensitive: true, - }, - }, - } -} - -func resourceAwsDmsCertificateCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).dmsconn - - request := &dms.ImportCertificateInput{ - CertificateIdentifier: aws.String(d.Get("certificate_id").(string)), - } - - pem, pemSet := d.GetOk("certificate_pem") - wallet, walletSet := d.GetOk("certificate_wallet") - - if !pemSet && !walletSet { - return fmt.Errorf("Must set either certificate_pem and certificate_wallet.") - } - if pemSet && walletSet { - return fmt.Errorf("Cannot set both certificate_pem and certificate_wallet.") - } - - if pemSet { - request.CertificatePem = aws.String(pem.(string)) - } - if walletSet { - request.CertificateWallet = []byte(wallet.(string)) - } - - log.Println("[DEBUG] DMS import certificate:", request) - - _, err := conn.ImportCertificate(request) - if err != nil { - return err - } - - d.SetId(d.Get("certificate_id").(string)) - return resourceAwsDmsCertificateRead(d, meta) -} - -func resourceAwsDmsCertificateRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).dmsconn - - response, err := conn.DescribeCertificates(&dms.DescribeCertificatesInput{ - Filters: []*dms.Filter{ - { - Name: aws.String("certificate-id"), - Values: []*string{aws.String(d.Id())}, // Must use d.Id() to work with import. - }, - }, - }) - if err != nil { - if dmserr, ok := err.(awserr.Error); ok && dmserr.Code() == "ResourceNotFoundFault" { - d.SetId("") - return nil - } - return err - } - - return resourceAwsDmsCertificateSetState(d, response.Certificates[0]) -} - -func resourceAwsDmsCertificateDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).dmsconn - - request := &dms.DeleteCertificateInput{ - CertificateArn: aws.String(d.Get("certificate_arn").(string)), - } - - log.Printf("[DEBUG] DMS delete certificate: %#v", request) - - _, err := conn.DeleteCertificate(request) - if err != nil { - return err - } - - return nil -} - -func resourceAwsDmsCertificateSetState(d *schema.ResourceData, cert *dms.Certificate) error { - d.SetId(*cert.CertificateIdentifier) - - d.Set("certificate_id", cert.CertificateIdentifier) - d.Set("certificate_arn", cert.CertificateArn) - - if cert.CertificatePem != nil && *cert.CertificatePem != "" { - d.Set("certificate_pem", cert.CertificatePem) - } - if cert.CertificateWallet != nil && len(cert.CertificateWallet) == 0 { - d.Set("certificate_wallet", cert.CertificateWallet) - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_dms_certificate_test.go b/builtin/providers/aws/resource_aws_dms_certificate_test.go deleted file mode 100644 index 52cfa1c0c..000000000 --- a/builtin/providers/aws/resource_aws_dms_certificate_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAwsDmsCertificateBasic(t *testing.T) { - resourceName := "aws_dms_certificate.dms_certificate" - randId := acctest.RandString(8) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: dmsCertificateDestroy, - Steps: []resource.TestStep{ - { - Config: dmsCertificateConfig(randId), - Check: resource.ComposeTestCheckFunc( - checkDmsCertificateExists(resourceName), - resource.TestCheckResourceAttrSet(resourceName, "certificate_arn"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func dmsCertificateDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_dms_certificate" { - continue - } - - err := checkDmsCertificateExists(rs.Primary.ID) - if err == nil { - return fmt.Errorf("Found a certificate that was not destroyed: %s", rs.Primary.ID) - } - } - - return nil -} - -func checkDmsCertificateExists(n string) resource.TestCheckFunc { - providers := []*schema.Provider{testAccProvider} - return checkDmsCertificateExistsWithProviders(n, &providers) -} - -func checkDmsCertificateExistsWithProviders(n string, providers *[]*schema.Provider) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - for _, provider := range *providers { - // Ignore if Meta is empty, this can happen for validation providers - if provider.Meta() == nil { - continue - } - - conn := provider.Meta().(*AWSClient).dmsconn - _, err := conn.DescribeCertificates(&dms.DescribeCertificatesInput{ - Filters: []*dms.Filter{ - { - Name: aws.String("certificate-id"), - Values: []*string{aws.String(rs.Primary.ID)}, - }, - }, - }) - - if err != nil { - return fmt.Errorf("DMS certificate error: %v", err) - } - return nil - } - - return fmt.Errorf("DMS certificate not found") - } -} - -func dmsCertificateConfig(randId string) string { - return fmt.Sprintf(` -resource "aws_dms_certificate" "dms_certificate" { - certificate_id = "tf-test-dms-certificate-%[1]s" - certificate_pem = "-----BEGIN CERTIFICATE-----\nMIID2jCCAsKgAwIBAgIJAJ58TJVjU7G1MA0GCSqGSIb3DQEBBQUAMFExCzAJBgNV\nBAYTAlVTMREwDwYDVQQIEwhDb2xvcmFkbzEPMA0GA1UEBxMGRGVudmVyMRAwDgYD\nVQQKEwdDaGFydGVyMQwwCgYDVQQLEwNDU0UwHhcNMTcwMTMwMTkyMDA4WhcNMjYx\nMjA5MTkyMDA4WjBRMQswCQYDVQQGEwJVUzERMA8GA1UECBMIQ29sb3JhZG8xDzAN\nBgNVBAcTBkRlbnZlcjEQMA4GA1UEChMHQ2hhcnRlcjEMMAoGA1UECxMDQ1NFMIIB\nIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv6dq6VLIImlAaTrckb5w3X6J\nWP7EGz2ChGAXlkEYto6dPCba0v5+f+8UlMOpeB25XGoai7gdItqNWVFpYsgmndx3\nvTad3ukO1zeElKtw5oHPH2plOaiv/gVJaDa9NTeINj0EtGZs74fCOclAzGFX5vBc\nb08ESWBceRgGjGv3nlij4JzHfqTkCKQz6P6pBivQBfk62rcOkkH5rKoaGltRHROS\nMbkwOhu2hN0KmSYTXRvts0LXnZU4N0l2ms39gmr7UNNNlKYINL2JoTs9dNBc7APD\ndZvlEHd+/FjcLCI8hC3t4g4AbfW0okIBCNG0+oVjqGb2DeONSJKsThahXt89MQID\nAQABo4G0MIGxMB0GA1UdDgQWBBQKq8JxjY1GmeZXJjfOMfW0kBIzPDCBgQYDVR0j\nBHoweIAUCqvCcY2NRpnmVyY3zjH1tJASMzyhVaRTMFExCzAJBgNVBAYTAlVTMREw\nDwYDVQQIEwhDb2xvcmFkbzEPMA0GA1UEBxMGRGVudmVyMRAwDgYDVQQKEwdDaGFy\ndGVyMQwwCgYDVQQLEwNDU0WCCQCefEyVY1OxtTAMBgNVHRMEBTADAQH/MA0GCSqG\nSIb3DQEBBQUAA4IBAQAWifoMk5kbv+yuWXvFwHiB4dWUUmMlUlPU/E300yVTRl58\np6DfOgJs7MMftd1KeWqTO+uW134QlTt7+jwI8Jq0uyKCu/O2kJhVtH/Ryog14tGl\n+wLcuIPLbwJI9CwZX4WMBrq4DnYss+6F47i8NCc+Z3MAiG4vtq9ytBmaod0dj2bI\ng4/Lac0e00dql9RnqENh1+dF0V+QgTJCoPkMqDNAlSB8vOodBW81UAb2z12t+IFi\n3X9J3WtCK2+T5brXL6itzewWJ2ALvX3QpmZx7fMHJ3tE+SjjyivE1BbOlzYHx83t\nTeYnm7pS9un7A/UzTDHbs7hPUezLek+H3xTPAnnq\n-----END CERTIFICATE-----\n" -} -`, randId) -} diff --git a/builtin/providers/aws/resource_aws_dms_endpoint.go b/builtin/providers/aws/resource_aws_dms_endpoint.go deleted file mode 100644 index 586ed9f7c..000000000 --- a/builtin/providers/aws/resource_aws_dms_endpoint.go +++ /dev/null @@ -1,307 +0,0 @@ -package aws - -import ( - "log" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" -) - -func resourceAwsDmsEndpoint() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsDmsEndpointCreate, - Read: resourceAwsDmsEndpointRead, - Update: resourceAwsDmsEndpointUpdate, - Delete: resourceAwsDmsEndpointDelete, - - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "certificate_arn": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validateArn, - }, - "database_name": { - Type: schema.TypeString, - Optional: true, - }, - "endpoint_arn": { - Type: schema.TypeString, - Computed: true, - }, - "endpoint_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateDmsEndpointId, - }, - "endpoint_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - "source", - "target", - }, false), - }, - "engine_name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - "mysql", - "oracle", - "postgres", - "mariadb", - "aurora", - "redshift", - "sybase", - "sqlserver", - }, false), - }, - "extra_connection_attributes": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - "kms_key_arn": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: validateArn, - }, - "password": { - Type: schema.TypeString, - Required: true, - Sensitive: true, - }, - "port": { - Type: schema.TypeInt, - Required: true, - }, - "server_name": { - Type: schema.TypeString, - Required: true, - }, - "ssl_mode": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{ - "none", - "require", - "verify-ca", - "verify-full", - }, false), - }, - "tags": { - Type: schema.TypeMap, - Optional: true, - }, - "username": { - Type: schema.TypeString, - Required: true, - }, - }, - } -} - -func resourceAwsDmsEndpointCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).dmsconn - - request := &dms.CreateEndpointInput{ - EndpointIdentifier: aws.String(d.Get("endpoint_id").(string)), - EndpointType: aws.String(d.Get("endpoint_type").(string)), - EngineName: aws.String(d.Get("engine_name").(string)), - Password: aws.String(d.Get("password").(string)), - Port: aws.Int64(int64(d.Get("port").(int))), - ServerName: aws.String(d.Get("server_name").(string)), - Tags: dmsTagsFromMap(d.Get("tags").(map[string]interface{})), - Username: aws.String(d.Get("username").(string)), - } - - if v, ok := d.GetOk("database_name"); ok { - request.DatabaseName = aws.String(v.(string)) - } - if v, ok := d.GetOk("certificate_arn"); ok { - request.CertificateArn = aws.String(v.(string)) - } - if v, ok := d.GetOk("extra_connection_attributes"); ok { - request.ExtraConnectionAttributes = aws.String(v.(string)) - } - if v, ok := d.GetOk("kms_key_arn"); ok { - request.KmsKeyId = aws.String(v.(string)) - } - if v, ok := d.GetOk("ssl_mode"); ok { - request.SslMode = aws.String(v.(string)) - } - - log.Println("[DEBUG] DMS create endpoint:", request) - - _, err := conn.CreateEndpoint(request) - if err != nil { - return err - } - - d.SetId(d.Get("endpoint_id").(string)) - return resourceAwsDmsEndpointRead(d, meta) -} - -func resourceAwsDmsEndpointRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).dmsconn - - response, err := conn.DescribeEndpoints(&dms.DescribeEndpointsInput{ - Filters: []*dms.Filter{ - { - Name: aws.String("endpoint-id"), - Values: []*string{aws.String(d.Id())}, // Must use d.Id() to work with import. - }, - }, - }) - if err != nil { - if dmserr, ok := err.(awserr.Error); ok && dmserr.Code() == "ResourceNotFoundFault" { - log.Printf("[DEBUG] DMS Replication Endpoint %q Not Found", d.Id()) - d.SetId("") - return nil - } - return err - } - - err = resourceAwsDmsEndpointSetState(d, response.Endpoints[0]) - if err != nil { - return err - } - - tagsResp, err := conn.ListTagsForResource(&dms.ListTagsForResourceInput{ - ResourceArn: aws.String(d.Get("endpoint_arn").(string)), - }) - if err != nil { - return err - } - d.Set("tags", dmsTagsToMap(tagsResp.TagList)) - - return nil -} - -func resourceAwsDmsEndpointUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).dmsconn - - request := &dms.ModifyEndpointInput{ - EndpointArn: aws.String(d.Get("endpoint_arn").(string)), - } - hasChanges := false - - if d.HasChange("certificate_arn") { - request.CertificateArn = aws.String(d.Get("certificate_arn").(string)) - hasChanges = true - } - - if d.HasChange("database_name") { - request.DatabaseName = aws.String(d.Get("database_name").(string)) - hasChanges = true - } - - if d.HasChange("endpoint_type") { - request.EndpointType = aws.String(d.Get("endpoint_type").(string)) - hasChanges = true - } - - if d.HasChange("engine_name") { - request.EngineName = aws.String(d.Get("engine_name").(string)) - hasChanges = true - } - - if d.HasChange("extra_connection_attributes") { - request.ExtraConnectionAttributes = aws.String(d.Get("extra_connection_attributes").(string)) - hasChanges = true - } - - if d.HasChange("password") { - request.Password = aws.String(d.Get("password").(string)) - hasChanges = true - } - - if d.HasChange("port") { - request.Port = aws.Int64(int64(d.Get("port").(int))) - hasChanges = true - } - - if d.HasChange("server_name") { - request.ServerName = aws.String(d.Get("server_name").(string)) - hasChanges = true - } - - if d.HasChange("ssl_mode") { - request.SslMode = aws.String(d.Get("ssl_mode").(string)) - hasChanges = true - } - - if d.HasChange("username") { - request.Username = aws.String(d.Get("username").(string)) - hasChanges = true - } - - if d.HasChange("tags") { - err := dmsSetTags(d.Get("endpoint_arn").(string), d, meta) - if err != nil { - return err - } - } - - if hasChanges { - log.Println("[DEBUG] DMS update endpoint:", request) - - _, err := conn.ModifyEndpoint(request) - if err != nil { - return err - } - - return resourceAwsDmsEndpointRead(d, meta) - } - - return nil -} - -func resourceAwsDmsEndpointDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).dmsconn - - request := &dms.DeleteEndpointInput{ - EndpointArn: aws.String(d.Get("endpoint_arn").(string)), - } - - log.Printf("[DEBUG] DMS delete endpoint: %#v", request) - - _, err := conn.DeleteEndpoint(request) - if err != nil { - return err - } - - return nil -} - -func resourceAwsDmsEndpointSetState(d *schema.ResourceData, endpoint *dms.Endpoint) error { - d.SetId(*endpoint.EndpointIdentifier) - - d.Set("certificate_arn", endpoint.CertificateArn) - d.Set("database_name", endpoint.DatabaseName) - d.Set("endpoint_arn", endpoint.EndpointArn) - d.Set("endpoint_id", endpoint.EndpointIdentifier) - // For some reason the AWS API only accepts lowercase type but returns it as uppercase - d.Set("endpoint_type", strings.ToLower(*endpoint.EndpointType)) - d.Set("engine_name", endpoint.EngineName) - d.Set("extra_connection_attributes", endpoint.ExtraConnectionAttributes) - d.Set("kms_key_arn", endpoint.KmsKeyId) - d.Set("port", endpoint.Port) - d.Set("server_name", endpoint.ServerName) - d.Set("ssl_mode", endpoint.SslMode) - d.Set("username", endpoint.Username) - - return nil -} diff --git a/builtin/providers/aws/resource_aws_dms_endpoint_test.go b/builtin/providers/aws/resource_aws_dms_endpoint_test.go deleted file mode 100644 index 59c3d87c7..000000000 --- a/builtin/providers/aws/resource_aws_dms_endpoint_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAwsDmsEndpointBasic(t *testing.T) { - resourceName := "aws_dms_endpoint.dms_endpoint" - randId := acctest.RandString(8) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: dmsEndpointDestroy, - Steps: []resource.TestStep{ - { - Config: dmsEndpointConfig(randId), - Check: resource.ComposeTestCheckFunc( - checkDmsEndpointExists(resourceName), - resource.TestCheckResourceAttrSet(resourceName, "endpoint_arn"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"password"}, - }, - { - Config: dmsEndpointConfigUpdate(randId), - Check: resource.ComposeTestCheckFunc( - checkDmsEndpointExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "database_name", "tf-test-dms-db-updated"), - resource.TestCheckResourceAttr(resourceName, "extra_connection_attributes", "extra"), - resource.TestCheckResourceAttr(resourceName, "password", "tftestupdate"), - resource.TestCheckResourceAttr(resourceName, "port", "3303"), - resource.TestCheckResourceAttr(resourceName, "ssl_mode", "none"), - resource.TestCheckResourceAttr(resourceName, "server_name", "tftestupdate"), - resource.TestCheckResourceAttr(resourceName, "username", "tftestupdate"), - ), - }, - }, - }) -} - -func dmsEndpointDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_dms_endpoint" { - continue - } - - err := checkDmsEndpointExists(rs.Primary.ID) - if err == nil { - return fmt.Errorf("Found an endpoint that was not destroyed: %s", rs.Primary.ID) - } - } - - return nil -} - -func checkDmsEndpointExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).dmsconn - resp, err := conn.DescribeEndpoints(&dms.DescribeEndpointsInput{ - Filters: []*dms.Filter{ - { - Name: aws.String("endpoint-id"), - Values: []*string{aws.String(rs.Primary.ID)}, - }, - }, - }) - - if err != nil { - return fmt.Errorf("DMS endpoint error: %v", err) - } - - if resp.Endpoints == nil { - return fmt.Errorf("DMS endpoint not found") - } - - return nil - } -} - -func dmsEndpointConfig(randId string) string { - return fmt.Sprintf(` -resource "aws_dms_endpoint" "dms_endpoint" { - database_name = "tf-test-dms-db" - endpoint_id = "tf-test-dms-endpoint-%[1]s" - endpoint_type = "source" - engine_name = "aurora" - extra_connection_attributes = "" - password = "tftest" - port = 3306 - server_name = "tftest" - ssl_mode = "none" - tags { - Name = "tf-test-dms-endpoint-%[1]s" - Update = "to-update" - Remove = "to-remove" - } - username = "tftest" -} -`, randId) -} - -func dmsEndpointConfigUpdate(randId string) string { - return fmt.Sprintf(` -resource "aws_dms_endpoint" "dms_endpoint" { - database_name = "tf-test-dms-db-updated" - endpoint_id = "tf-test-dms-endpoint-%[1]s" - endpoint_type = "source" - engine_name = "aurora" - extra_connection_attributes = "extra" - password = "tftestupdate" - port = 3303 - server_name = "tftestupdate" - ssl_mode = "none" - tags { - Name = "tf-test-dms-endpoint-%[1]s" - Update = "updated" - Add = "added" - } - username = "tftestupdate" -} -`, randId) -} diff --git a/builtin/providers/aws/resource_aws_dms_replication_instance.go b/builtin/providers/aws/resource_aws_dms_replication_instance.go deleted file mode 100644 index f0b0a3aed..000000000 --- a/builtin/providers/aws/resource_aws_dms_replication_instance.go +++ /dev/null @@ -1,433 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsDmsReplicationInstance() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsDmsReplicationInstanceCreate, - Read: resourceAwsDmsReplicationInstanceRead, - Update: resourceAwsDmsReplicationInstanceUpdate, - Delete: resourceAwsDmsReplicationInstanceDelete, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), - }, - - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "allocated_storage": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - ValidateFunc: validateIntegerInRange(5, 6144), - }, - "apply_immediately": { - Type: schema.TypeBool, - Optional: true, - }, - "auto_minor_version_upgrade": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - }, - "availability_zone": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "engine_version": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - "kms_key_arn": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: validateArn, - }, - "multi_az": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - }, - "preferred_maintenance_window": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validateOnceAWeekWindowFormat, - }, - "publicly_accessible": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - ForceNew: true, - }, - "replication_instance_arn": { - Type: schema.TypeString, - Computed: true, - }, - "replication_instance_class": { - Type: schema.TypeString, - Required: true, - // Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | - // dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge - }, - "replication_instance_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateDmsReplicationInstanceId, - }, - "replication_instance_private_ips": { - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Computed: true, - }, - "replication_instance_public_ips": { - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Computed: true, - }, - "replication_subnet_group_id": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "tags": { - Type: schema.TypeMap, - Optional: true, - }, - "vpc_security_group_ids": { - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - Computed: true, - Optional: true, - }, - }, - } -} - -func resourceAwsDmsReplicationInstanceCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).dmsconn - - request := &dms.CreateReplicationInstanceInput{ - AutoMinorVersionUpgrade: aws.Bool(d.Get("auto_minor_version_upgrade").(bool)), - PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), - ReplicationInstanceClass: aws.String(d.Get("replication_instance_class").(string)), - ReplicationInstanceIdentifier: aws.String(d.Get("replication_instance_id").(string)), - Tags: dmsTagsFromMap(d.Get("tags").(map[string]interface{})), - } - - // WARNING: GetOk returns the zero value for the type if the key is omitted in config. This means for optional - // keys that the zero value is valid we cannot know if the zero value was in the config and cannot allow the API - // to set the default value. See GitHub Issue #5694 https://github.com/hashicorp/terraform/issues/5694 - - if v, ok := d.GetOk("allocated_storage"); ok { - request.AllocatedStorage = aws.Int64(int64(v.(int))) - } - if v, ok := d.GetOk("engine_version"); ok { - request.EngineVersion = aws.String(v.(string)) - } - if v, ok := d.GetOk("kms_key_arn"); ok { - request.KmsKeyId = aws.String(v.(string)) - } - if v, ok := d.GetOk("preferred_maintenance_window"); ok { - request.PreferredMaintenanceWindow = aws.String(v.(string)) - } - if v, ok := d.GetOk("replication_subnet_group_id"); ok { - request.ReplicationSubnetGroupIdentifier = aws.String(v.(string)) - } - if v, ok := d.GetOk("vpc_security_group_ids"); ok { - request.VpcSecurityGroupIds = expandStringList(v.(*schema.Set).List()) - } - - az, azSet := d.GetOk("availability_zone") - if azSet { - request.AvailabilityZone = aws.String(az.(string)) - } - - if multiAz, ok := d.GetOk("multi_az"); ok { - request.MultiAZ = aws.Bool(multiAz.(bool)) - - if multiAz.(bool) && azSet { - return fmt.Errorf("Cannot set availability_zone if multi_az is set to true") - } - } - - log.Println("[DEBUG] DMS create replication instance:", request) - - _, err := conn.CreateReplicationInstance(request) - if err != nil { - return err - } - - d.SetId(d.Get("replication_instance_id").(string)) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating"}, - Target: []string{"available"}, - Refresh: resourceAwsDmsReplicationInstanceStateRefreshFunc(d, meta), - Timeout: d.Timeout(schema.TimeoutCreate), - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting - } - - // Wait, catching any errors - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - return resourceAwsDmsReplicationInstanceRead(d, meta) -} - -func resourceAwsDmsReplicationInstanceRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).dmsconn - - response, err := conn.DescribeReplicationInstances(&dms.DescribeReplicationInstancesInput{ - Filters: []*dms.Filter{ - { - Name: aws.String("replication-instance-id"), - Values: []*string{aws.String(d.Id())}, // Must use d.Id() to work with import. - }, - }, - }) - if err != nil { - if dmserr, ok := err.(awserr.Error); ok && dmserr.Code() == "ResourceNotFoundFault" { - log.Printf("[DEBUG] DMS Replication Instance %q Not Found", d.Id()) - d.SetId("") - return nil - } - return err - } - - err = resourceAwsDmsReplicationInstanceSetState(d, response.ReplicationInstances[0]) - if err != nil { - return err - } - - tagsResp, err := conn.ListTagsForResource(&dms.ListTagsForResourceInput{ - ResourceArn: aws.String(d.Get("replication_instance_arn").(string)), - }) - if err != nil { - return err - } - d.Set("tags", dmsTagsToMap(tagsResp.TagList)) - - return nil -} - -func resourceAwsDmsReplicationInstanceUpdate(d *schema.ResourceData, meta interface{}) error { - request := &dms.ModifyReplicationInstanceInput{ - ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)), - ReplicationInstanceArn: aws.String(d.Get("replication_instance_arn").(string)), - } - hasChanges := false - - if d.HasChange("auto_minor_version_upgrade") { - request.AutoMinorVersionUpgrade = aws.Bool(d.Get("auto_minor_version_upgrade").(bool)) - hasChanges = true - } - - if d.HasChange("allocated_storage") { - if v, ok := d.GetOk("allocated_storage"); ok { - request.AllocatedStorage = aws.Int64(int64(v.(int))) - hasChanges = true - } - } - - if d.HasChange("engine_version") { - if v, ok := d.GetOk("engine_version"); ok { - request.ReplicationInstanceClass = aws.String(v.(string)) - hasChanges = true - } - } - - if d.HasChange("multi_az") { - if v, ok := d.GetOk("multi_az"); ok { - request.MultiAZ = aws.Bool(v.(bool)) - hasChanges = true - } - } - - if d.HasChange("preferred_maintenance_window") { - if v, ok := d.GetOk("preferred_maintenance_window"); ok { - request.PreferredMaintenanceWindow = aws.String(v.(string)) - hasChanges = true - } - } - - if d.HasChange("replication_instance_class") { - if v, ok := d.GetOk("replication_instance_class"); ok { - request.ReplicationInstanceClass = aws.String(v.(string)) - hasChanges = true - } - } - - if d.HasChange("vpc_security_group_ids") { - if v, ok := d.GetOk("vpc_security_group_ids"); ok { - request.VpcSecurityGroupIds = expandStringList(v.(*schema.Set).List()) - hasChanges = true - } - } - - if d.HasChange("tags") { - err := dmsSetTags(d.Get("replication_instance_arn").(string), d, meta) - if err != nil { - return err - } - } - - if hasChanges { - conn := meta.(*AWSClient).dmsconn - - _, err := conn.ModifyReplicationInstance(request) - if err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"modifying"}, - Target: []string{"available"}, - Refresh: resourceAwsDmsReplicationInstanceStateRefreshFunc(d, meta), - Timeout: d.Timeout(schema.TimeoutUpdate), - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting - } - - // Wait, catching any errors - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - return resourceAwsDmsReplicationInstanceRead(d, meta) - } - - return nil -} - -func resourceAwsDmsReplicationInstanceDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).dmsconn - - request := &dms.DeleteReplicationInstanceInput{ - ReplicationInstanceArn: aws.String(d.Get("replication_instance_arn").(string)), - } - - log.Printf("[DEBUG] DMS delete replication instance: %#v", request) - - _, err := conn.DeleteReplicationInstance(request) - if err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"deleting"}, - Target: []string{}, - Refresh: resourceAwsDmsReplicationInstanceStateRefreshFunc(d, meta), - Timeout: d.Timeout(schema.TimeoutDelete), - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting - } - - // Wait, catching any errors - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - return nil -} - -func resourceAwsDmsReplicationInstanceSetState(d *schema.ResourceData, instance *dms.ReplicationInstance) error { - d.SetId(*instance.ReplicationInstanceIdentifier) - - d.Set("replication_instance_id", instance.ReplicationInstanceIdentifier) - d.Set("allocated_storage", instance.AllocatedStorage) - d.Set("auto_minor_version_upgrade", instance.AutoMinorVersionUpgrade) - d.Set("availability_zone", instance.AvailabilityZone) - d.Set("engine_version", instance.EngineVersion) - d.Set("kms_key_arn", instance.KmsKeyId) - d.Set("multi_az", instance.MultiAZ) - d.Set("preferred_maintenance_window", instance.PreferredMaintenanceWindow) - d.Set("publicly_accessible", instance.PubliclyAccessible) - d.Set("replication_instance_arn", instance.ReplicationInstanceArn) - d.Set("replication_instance_class", instance.ReplicationInstanceClass) - d.Set("replication_subnet_group_id", instance.ReplicationSubnetGroup.ReplicationSubnetGroupIdentifier) - - vpc_security_group_ids := []string{} - for _, sg := range instance.VpcSecurityGroups { - vpc_security_group_ids = append(vpc_security_group_ids, aws.StringValue(sg.VpcSecurityGroupId)) - } - - d.Set("vpc_security_group_ids", vpc_security_group_ids) - - private_ip_addresses := []string{} - for _, ip := range instance.ReplicationInstancePrivateIpAddresses { - private_ip_addresses = append(private_ip_addresses, aws.StringValue(ip)) - } - - d.Set("replication_instance_private_ips", private_ip_addresses) - - public_ip_addresses := []string{} - for _, ip := range instance.ReplicationInstancePublicIpAddresses { - public_ip_addresses = append(public_ip_addresses, aws.StringValue(ip)) - } - - d.Set("replication_instance_public_ips", public_ip_addresses) - - return nil -} - -func resourceAwsDmsReplicationInstanceStateRefreshFunc( - d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - conn := meta.(*AWSClient).dmsconn - - v, err := conn.DescribeReplicationInstances(&dms.DescribeReplicationInstancesInput{ - Filters: []*dms.Filter{ - { - Name: aws.String("replication-instance-id"), - Values: []*string{aws.String(d.Id())}, // Must use d.Id() to work with import. - }, - }, - }) - if err != nil { - if dmserr, ok := err.(awserr.Error); ok && dmserr.Code() == "ResourceNotFoundFault" { - return nil, "", nil - } - log.Printf("Error on retrieving DMS Replication Instance when waiting: %s", err) - return nil, "", err - } - - if v == nil { - return nil, "", nil - } - - if v.ReplicationInstances == nil { - return nil, "", fmt.Errorf("Error on retrieving DMS Replication Instance when waiting for State") - } - - return v, *v.ReplicationInstances[0].ReplicationInstanceStatus, nil - } -} diff --git a/builtin/providers/aws/resource_aws_dms_replication_instance_test.go b/builtin/providers/aws/resource_aws_dms_replication_instance_test.go deleted file mode 100644 index 1b8ddfd3e..000000000 --- a/builtin/providers/aws/resource_aws_dms_replication_instance_test.go +++ /dev/null @@ -1,201 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAwsDmsReplicationInstanceBasic(t *testing.T) { - resourceName := "aws_dms_replication_instance.dms_replication_instance" - randId := acctest.RandString(8) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: dmsReplicationInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: dmsReplicationInstanceConfig(randId), - Check: resource.ComposeTestCheckFunc( - checkDmsReplicationInstanceExists(resourceName), - resource.TestCheckResourceAttrSet(resourceName, "replication_instance_arn"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: dmsReplicationInstanceConfigUpdate(randId), - Check: resource.ComposeTestCheckFunc( - checkDmsReplicationInstanceExists(resourceName), - resource.TestCheckResourceAttrSet(resourceName, "apply_immediately"), - resource.TestCheckResourceAttr(resourceName, "auto_minor_version_upgrade", "false"), - resource.TestCheckResourceAttr(resourceName, "preferred_maintenance_window", "mon:00:30-mon:02:30"), - ), - }, - }, - }) -} - -func checkDmsReplicationInstanceExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - conn := testAccProvider.Meta().(*AWSClient).dmsconn - resp, err := conn.DescribeReplicationInstances(&dms.DescribeReplicationInstancesInput{ - Filters: []*dms.Filter{ - { - Name: aws.String("replication-instance-id"), - Values: []*string{aws.String(rs.Primary.ID)}, - }, - }, - }) - - if err != nil { - return fmt.Errorf("DMS replication instance error: %v", err) - } - if resp.ReplicationInstances == nil { - return fmt.Errorf("DMS replication instance not found") - } - - return nil - } -} - -func dmsReplicationInstanceDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_dms_replication_instance" { - continue - } - - err := checkDmsReplicationInstanceExists(rs.Primary.ID) - if err == nil { - return fmt.Errorf("Found replication instance that was not destroyed: %s", rs.Primary.ID) - } - } - - return nil -} - -func dmsReplicationInstanceConfig(randId string) string { - return fmt.Sprintf(` -resource "aws_vpc" "dms_vpc" { - cidr_block = "10.1.0.0/16" - tags { - Name = "tf-test-dms-vpc-%[1]s" - } -} - -resource "aws_subnet" "dms_subnet_1" { - cidr_block = "10.1.1.0/24" - availability_zone = "us-west-2a" - vpc_id = "${aws_vpc.dms_vpc.id}" - tags { - Name = "tf-test-dms-subnet-%[1]s" - } - depends_on = ["aws_vpc.dms_vpc"] -} - -resource "aws_subnet" "dms_subnet_2" { - cidr_block = "10.1.2.0/24" - availability_zone = "us-west-2b" - vpc_id = "${aws_vpc.dms_vpc.id}" - tags { - Name = "tf-test-dms-subnet-%[1]s" - } - depends_on = ["aws_vpc.dms_vpc"] -} - -resource "aws_dms_replication_subnet_group" "dms_replication_subnet_group" { - replication_subnet_group_id = "tf-test-dms-replication-subnet-group-%[1]s" - replication_subnet_group_description = "terraform test for replication subnet group" - subnet_ids = ["${aws_subnet.dms_subnet_1.id}", "${aws_subnet.dms_subnet_2.id}"] -} - -resource "aws_dms_replication_instance" "dms_replication_instance" { - allocated_storage = 5 - auto_minor_version_upgrade = true - replication_instance_class = "dms.t2.micro" - replication_instance_id = "tf-test-dms-replication-instance-%[1]s" - preferred_maintenance_window = "sun:00:30-sun:02:30" - publicly_accessible = false - replication_subnet_group_id = "${aws_dms_replication_subnet_group.dms_replication_subnet_group.replication_subnet_group_id}" - tags { - Name = "tf-test-dms-replication-instance-%[1]s" - Update = "to-update" - Remove = "to-remove" - } - - timeouts { - create = "40m" - } -} -`, randId) -} - -func dmsReplicationInstanceConfigUpdate(randId string) string { - return fmt.Sprintf(` -resource "aws_vpc" "dms_vpc" { - cidr_block = "10.1.0.0/16" - tags { - Name = "tf-test-dms-vpc-%[1]s" - } -} - -resource "aws_subnet" "dms_subnet_1" { - cidr_block = "10.1.1.0/24" - availability_zone = "us-west-2a" - vpc_id = "${aws_vpc.dms_vpc.id}" - tags { - Name = "tf-test-dms-subnet-%[1]s" - } - depends_on = ["aws_vpc.dms_vpc"] -} - -resource "aws_subnet" "dms_subnet_2" { - cidr_block = "10.1.2.0/24" - availability_zone = "us-west-2b" - vpc_id = "${aws_vpc.dms_vpc.id}" - tags { - Name = "tf-test-dms-subnet-%[1]s" - } - depends_on = ["aws_vpc.dms_vpc"] -} - -resource "aws_dms_replication_subnet_group" "dms_replication_subnet_group" { - replication_subnet_group_id = "tf-test-dms-replication-subnet-group-%[1]s" - replication_subnet_group_description = "terraform test for replication subnet group" - subnet_ids = ["${aws_subnet.dms_subnet_1.id}", "${aws_subnet.dms_subnet_2.id}"] -} - -resource "aws_dms_replication_instance" "dms_replication_instance" { - allocated_storage = 5 - apply_immediately = true - auto_minor_version_upgrade = false - replication_instance_class = "dms.t2.micro" - replication_instance_id = "tf-test-dms-replication-instance-%[1]s" - preferred_maintenance_window = "mon:00:30-mon:02:30" - publicly_accessible = false - replication_subnet_group_id = "${aws_dms_replication_subnet_group.dms_replication_subnet_group.replication_subnet_group_id}" - tags { - Name = "tf-test-dms-replication-instance-%[1]s" - Update = "updated" - Add = "added" - } -} -`, randId) -} diff --git a/builtin/providers/aws/resource_aws_dms_replication_subnet_group.go b/builtin/providers/aws/resource_aws_dms_replication_subnet_group.go deleted file mode 100644 index b28165308..000000000 --- a/builtin/providers/aws/resource_aws_dms_replication_subnet_group.go +++ /dev/null @@ -1,179 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsDmsReplicationSubnetGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsDmsReplicationSubnetGroupCreate, - Read: resourceAwsDmsReplicationSubnetGroupRead, - Update: resourceAwsDmsReplicationSubnetGroupUpdate, - Delete: resourceAwsDmsReplicationSubnetGroupDelete, - - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "replication_subnet_group_arn": { - Type: schema.TypeString, - Computed: true, - }, - "replication_subnet_group_description": { - Type: schema.TypeString, - Required: true, - }, - "replication_subnet_group_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateDmsReplicationSubnetGroupId, - }, - "subnet_ids": { - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - Required: true, - }, - "tags": { - Type: schema.TypeMap, - Optional: true, - }, - "vpc_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceAwsDmsReplicationSubnetGroupCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).dmsconn - - request := &dms.CreateReplicationSubnetGroupInput{ - ReplicationSubnetGroupIdentifier: aws.String(d.Get("replication_subnet_group_id").(string)), - ReplicationSubnetGroupDescription: aws.String(d.Get("replication_subnet_group_description").(string)), - SubnetIds: expandStringList(d.Get("subnet_ids").(*schema.Set).List()), - Tags: dmsTagsFromMap(d.Get("tags").(map[string]interface{})), - } - - log.Println("[DEBUG] DMS create replication subnet group:", request) - - _, err := conn.CreateReplicationSubnetGroup(request) - if err != nil { - return err - } - - d.SetId(d.Get("replication_subnet_group_id").(string)) - return resourceAwsDmsReplicationSubnetGroupRead(d, meta) -} - -func resourceAwsDmsReplicationSubnetGroupRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).dmsconn - - response, err := conn.DescribeReplicationSubnetGroups(&dms.DescribeReplicationSubnetGroupsInput{ - Filters: []*dms.Filter{ - { - Name: aws.String("replication-subnet-group-id"), - Values: []*string{aws.String(d.Id())}, // Must use d.Id() to work with import. - }, - }, - }) - if err != nil { - return err - } - if len(response.ReplicationSubnetGroups) == 0 { - d.SetId("") - return nil - } - - // The AWS API for DMS subnet groups does not return the ARN which is required to - // retrieve tags. This ARN can be built. - d.Set("replication_subnet_group_arn", fmt.Sprintf("arn:aws:dms:%s:%s:subgrp:%s", - meta.(*AWSClient).region, meta.(*AWSClient).accountid, d.Id())) - - err = resourceAwsDmsReplicationSubnetGroupSetState(d, response.ReplicationSubnetGroups[0]) - if err != nil { - return err - } - - tagsResp, err := conn.ListTagsForResource(&dms.ListTagsForResourceInput{ - ResourceArn: aws.String(d.Get("replication_subnet_group_arn").(string)), - }) - if err != nil { - return err - } - d.Set("tags", dmsTagsToMap(tagsResp.TagList)) - - return nil -} - -func resourceAwsDmsReplicationSubnetGroupUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).dmsconn - - // Updates to subnet groups are only valid when sending SubnetIds even if there are no - // changes to SubnetIds. - request := &dms.ModifyReplicationSubnetGroupInput{ - ReplicationSubnetGroupIdentifier: aws.String(d.Get("replication_subnet_group_id").(string)), - SubnetIds: expandStringList(d.Get("subnet_ids").(*schema.Set).List()), - } - - if d.HasChange("replication_subnet_group_description") { - request.ReplicationSubnetGroupDescription = aws.String(d.Get("replication_subnet_group_description").(string)) - } - - if d.HasChange("tags") { - err := dmsSetTags(d.Get("replication_subnet_group_arn").(string), d, meta) - if err != nil { - return err - } - } - - log.Println("[DEBUG] DMS update replication subnet group:", request) - - _, err := conn.ModifyReplicationSubnetGroup(request) - if err != nil { - return err - } - - return resourceAwsDmsReplicationSubnetGroupRead(d, meta) -} - -func resourceAwsDmsReplicationSubnetGroupDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).dmsconn - - request := &dms.DeleteReplicationSubnetGroupInput{ - ReplicationSubnetGroupIdentifier: aws.String(d.Get("replication_subnet_group_id").(string)), - } - - log.Printf("[DEBUG] DMS delete replication subnet group: %#v", request) - - _, err := conn.DeleteReplicationSubnetGroup(request) - if err != nil { - return err - } - - return nil -} - -func resourceAwsDmsReplicationSubnetGroupSetState(d *schema.ResourceData, group *dms.ReplicationSubnetGroup) error { - d.SetId(*group.ReplicationSubnetGroupIdentifier) - - subnet_ids := []string{} - for _, subnet := range group.Subnets { - subnet_ids = append(subnet_ids, aws.StringValue(subnet.SubnetIdentifier)) - } - - d.Set("replication_subnet_group_description", group.ReplicationSubnetGroupDescription) - d.Set("replication_subnet_group_id", group.ReplicationSubnetGroupIdentifier) - d.Set("subnet_ids", subnet_ids) - d.Set("vpc_id", group.VpcId) - - return nil -} diff --git a/builtin/providers/aws/resource_aws_dms_replication_subnet_group_test.go b/builtin/providers/aws/resource_aws_dms_replication_subnet_group_test.go deleted file mode 100644 index 608382ccc..000000000 --- a/builtin/providers/aws/resource_aws_dms_replication_subnet_group_test.go +++ /dev/null @@ -1,204 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAwsDmsReplicationSubnetGroupBasic(t *testing.T) { - resourceName := "aws_dms_replication_subnet_group.dms_replication_subnet_group" - randId := acctest.RandString(8) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: dmsReplicationSubnetGroupDestroy, - Steps: []resource.TestStep{ - { - Config: dmsReplicationSubnetGroupConfig(randId), - Check: resource.ComposeTestCheckFunc( - checkDmsReplicationSubnetGroupExists(resourceName), - resource.TestCheckResourceAttrSet(resourceName, "vpc_id"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: dmsReplicationSubnetGroupConfigUpdate(randId), - Check: resource.ComposeTestCheckFunc( - checkDmsReplicationSubnetGroupExists(resourceName), - ), - }, - }, - }) -} - -func checkDmsReplicationSubnetGroupExists(n string) resource.TestCheckFunc { - providers := []*schema.Provider{testAccProvider} - return checkDmsReplicationSubnetGroupExistsWithProviders(n, &providers) -} - -func checkDmsReplicationSubnetGroupExistsWithProviders(n string, providers *[]*schema.Provider) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - for _, provider := range *providers { - // Ignore if Meta is empty, this can happen for validation providers - if provider.Meta() == nil { - continue - } - - conn := provider.Meta().(*AWSClient).dmsconn - _, err := conn.DescribeReplicationSubnetGroups(&dms.DescribeReplicationSubnetGroupsInput{ - Filters: []*dms.Filter{ - { - Name: aws.String("replication-subnet-group-id"), - Values: []*string{aws.String(rs.Primary.ID)}, - }, - }, - }) - - if err != nil { - return fmt.Errorf("DMS replication subnet group error: %v", err) - } - return nil - } - - return fmt.Errorf("DMS replication subnet group not found") - } -} - -func dmsReplicationSubnetGroupDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_dms_replication_subnet_group" { - continue - } - - err := checkDmsReplicationSubnetGroupExists(rs.Primary.ID) - if err == nil { - return fmt.Errorf("Found replication subnet group that was not destroyed: %s", rs.Primary.ID) - } - } - - return nil -} - -func dmsReplicationSubnetGroupConfig(randId string) string { - return fmt.Sprintf(` -resource "aws_vpc" "dms_vpc" { - cidr_block = "10.1.0.0/16" - tags { - Name = "tf-test-dms-vpc-%[1]s" - } -} - -resource "aws_subnet" "dms_subnet_1" { - cidr_block = "10.1.1.0/24" - availability_zone = "us-west-2a" - vpc_id = "${aws_vpc.dms_vpc.id}" - tags { - Name = "tf-test-dms-subnet-%[1]s" - } - depends_on = ["aws_vpc.dms_vpc"] -} - -resource "aws_subnet" "dms_subnet_2" { - cidr_block = "10.1.2.0/24" - availability_zone = "us-west-2b" - vpc_id = "${aws_vpc.dms_vpc.id}" - tags { - Name = "tf-test-dms-subnet-%[1]s" - } - depends_on = ["aws_vpc.dms_vpc"] -} - -resource "aws_subnet" "dms_subnet_3" { - cidr_block = "10.1.3.0/24" - availability_zone = "us-west-2b" - vpc_id = "${aws_vpc.dms_vpc.id}" - tags { - Name = "tf-test-dms-subnet-%[1]s" - } - depends_on = ["aws_vpc.dms_vpc"] -} - -resource "aws_dms_replication_subnet_group" "dms_replication_subnet_group" { - replication_subnet_group_id = "tf-test-dms-replication-subnet-group-%[1]s" - replication_subnet_group_description = "terraform test for replication subnet group" - subnet_ids = ["${aws_subnet.dms_subnet_1.id}", "${aws_subnet.dms_subnet_2.id}"] - tags { - Name = "tf-test-dms-replication-subnet-group-%[1]s" - Update = "to-update" - Remove = "to-remove" - } -} -`, randId) -} - -func dmsReplicationSubnetGroupConfigUpdate(randId string) string { - return fmt.Sprintf(` -resource "aws_vpc" "dms_vpc" { - cidr_block = "10.1.0.0/16" - tags { - Name = "tf-test-dms-vpc-%[1]s" - } -} - -resource "aws_subnet" "dms_subnet_1" { - cidr_block = "10.1.1.0/24" - availability_zone = "us-west-2a" - vpc_id = "${aws_vpc.dms_vpc.id}" - tags { - Name = "tf-test-dms-subnet-%[1]s" - } - depends_on = ["aws_vpc.dms_vpc"] -} - -resource "aws_subnet" "dms_subnet_2" { - cidr_block = "10.1.2.0/24" - availability_zone = "us-west-2b" - vpc_id = "${aws_vpc.dms_vpc.id}" - tags { - Name = "tf-test-dms-subnet-%[1]s" - } - depends_on = ["aws_vpc.dms_vpc"] -} - -resource "aws_subnet" "dms_subnet_3" { - cidr_block = "10.1.3.0/24" - availability_zone = "us-west-2b" - vpc_id = "${aws_vpc.dms_vpc.id}" - tags { - Name = "tf-test-dms-subnet-%[1]s" - } - depends_on = ["aws_vpc.dms_vpc"] -} - -resource "aws_dms_replication_subnet_group" "dms_replication_subnet_group" { - replication_subnet_group_id = "tf-test-dms-replication-subnet-group-%[1]s" - replication_subnet_group_description = "terraform test for replication subnet group" - subnet_ids = ["${aws_subnet.dms_subnet_1.id}", "${aws_subnet.dms_subnet_3.id}"] - tags { - Name = "tf-test-dms-replication-subnet-group-%[1]s" - Update = "updated" - Add = "added" - } -} -`, randId) -} diff --git a/builtin/providers/aws/resource_aws_dms_replication_task.go b/builtin/providers/aws/resource_aws_dms_replication_task.go deleted file mode 100644 index ab10eedbc..000000000 --- a/builtin/providers/aws/resource_aws_dms_replication_task.go +++ /dev/null @@ -1,331 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" -) - -func resourceAwsDmsReplicationTask() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsDmsReplicationTaskCreate, - Read: resourceAwsDmsReplicationTaskRead, - Update: resourceAwsDmsReplicationTaskUpdate, - Delete: resourceAwsDmsReplicationTaskDelete, - - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "cdc_start_time": { - Type: schema.TypeString, - Optional: true, - // Requires a Unix timestamp in seconds. Example 1484346880 - }, - "migration_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - "full-load", - "cdc", - "full-load-and-cdc", - }, false), - }, - "replication_instance_arn": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateArn, - }, - "replication_task_arn": { - Type: schema.TypeString, - Computed: true, - }, - "replication_task_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateDmsReplicationTaskId, - }, - "replication_task_settings": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateJsonString, - DiffSuppressFunc: suppressEquivalentJsonDiffs, - }, - "source_endpoint_arn": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateArn, - }, - "table_mappings": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateJsonString, - DiffSuppressFunc: suppressEquivalentJsonDiffs, - }, - "tags": { - Type: schema.TypeMap, - Optional: true, - }, - "target_endpoint_arn": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateArn, - }, - }, - } -} - -func resourceAwsDmsReplicationTaskCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).dmsconn - - request := &dms.CreateReplicationTaskInput{ - MigrationType: aws.String(d.Get("migration_type").(string)), - ReplicationInstanceArn: aws.String(d.Get("replication_instance_arn").(string)), - ReplicationTaskIdentifier: aws.String(d.Get("replication_task_id").(string)), - SourceEndpointArn: aws.String(d.Get("source_endpoint_arn").(string)), - TableMappings: aws.String(d.Get("table_mappings").(string)), - Tags: dmsTagsFromMap(d.Get("tags").(map[string]interface{})), - TargetEndpointArn: aws.String(d.Get("target_endpoint_arn").(string)), - } - - if v, ok := d.GetOk("cdc_start_time"); ok { - seconds, err := strconv.ParseInt(v.(string), 10, 64) - if err != nil { - return fmt.Errorf("[ERROR] DMS create replication task. Invalid CDC Unix timestamp: %s", err) - } - request.CdcStartTime = aws.Time(time.Unix(seconds, 0)) - } - - if v, ok := d.GetOk("replication_task_settings"); ok { - request.ReplicationTaskSettings = aws.String(v.(string)) - } - - log.Println("[DEBUG] DMS create replication task:", request) - - _, err := conn.CreateReplicationTask(request) - if err != nil { - return err - } - - taskId := d.Get("replication_task_id").(string) - d.SetId(taskId) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating"}, - Target: []string{"ready"}, - Refresh: resourceAwsDmsReplicationTaskStateRefreshFunc(d, meta), - Timeout: d.Timeout(schema.TimeoutCreate), - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting - } - - // Wait, catching any errors - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - return resourceAwsDmsReplicationTaskRead(d, meta) -} - -func resourceAwsDmsReplicationTaskRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).dmsconn - - response, err := conn.DescribeReplicationTasks(&dms.DescribeReplicationTasksInput{ - Filters: []*dms.Filter{ - { - Name: aws.String("replication-task-id"), - Values: []*string{aws.String(d.Id())}, // Must use d.Id() to work with import. - }, - }, - }) - if err != nil { - if dmserr, ok := err.(awserr.Error); ok && dmserr.Code() == "ResourceNotFoundFault" { - log.Printf("[DEBUG] DMS Replication Task %q Not Found", d.Id()) - d.SetId("") - return nil - } - return err - } - - err = resourceAwsDmsReplicationTaskSetState(d, response.ReplicationTasks[0]) - if err != nil { - return err - } - - tagsResp, err := conn.ListTagsForResource(&dms.ListTagsForResourceInput{ - ResourceArn: aws.String(d.Get("replication_task_arn").(string)), - }) - if err != nil { - return err - } - d.Set("tags", dmsTagsToMap(tagsResp.TagList)) - - return nil -} - -func resourceAwsDmsReplicationTaskUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).dmsconn - - request := &dms.ModifyReplicationTaskInput{ - ReplicationTaskArn: aws.String(d.Get("replication_task_arn").(string)), - } - hasChanges := false - - if d.HasChange("cdc_start_time") { - seconds, err := strconv.ParseInt(d.Get("cdc_start_time").(string), 10, 64) - if err != nil { - return fmt.Errorf("[ERROR] DMS update replication task. Invalid CRC Unix timestamp: %s", err) - } - request.CdcStartTime = aws.Time(time.Unix(seconds, 0)) - hasChanges = true - } - - if d.HasChange("migration_type") { - request.MigrationType = aws.String(d.Get("migration_type").(string)) - hasChanges = true - } - - if d.HasChange("replication_task_settings") { - request.ReplicationTaskSettings = aws.String(d.Get("replication_task_settings").(string)) - hasChanges = true - } - - if d.HasChange("table_mappings") { - request.TableMappings = aws.String(d.Get("table_mappings").(string)) - hasChanges = true - } - - if d.HasChange("tags") { - err := dmsSetTags(d.Get("replication_task_arn").(string), d, meta) - if err != nil { - return err - } - } - - if hasChanges { - log.Println("[DEBUG] DMS update replication task:", request) - - _, err := conn.ModifyReplicationTask(request) - if err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"modifying"}, - Target: []string{"ready", "stopped", "failed"}, - Refresh: resourceAwsDmsReplicationTaskStateRefreshFunc(d, meta), - Timeout: d.Timeout(schema.TimeoutCreate), - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting - } - - // Wait, catching any errors - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - return resourceAwsDmsReplicationTaskRead(d, meta) - } - - return nil -} - -func resourceAwsDmsReplicationTaskDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).dmsconn - - request := &dms.DeleteReplicationTaskInput{ - ReplicationTaskArn: aws.String(d.Get("replication_task_arn").(string)), - } - - log.Printf("[DEBUG] DMS delete replication task: %#v", request) - - _, err := conn.DeleteReplicationTask(request) - if err != nil { - if dmserr, ok := err.(awserr.Error); ok && dmserr.Code() == "ResourceNotFoundFault" { - log.Printf("[DEBUG] DMS Replication Task %q Not Found", d.Id()) - d.SetId("") - return nil - } - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"deleting"}, - Target: []string{}, - Refresh: resourceAwsDmsReplicationTaskStateRefreshFunc(d, meta), - Timeout: d.Timeout(schema.TimeoutCreate), - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting - } - - // Wait, catching any errors - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - return nil -} - -func resourceAwsDmsReplicationTaskSetState(d *schema.ResourceData, task *dms.ReplicationTask) error { - d.SetId(*task.ReplicationTaskIdentifier) - - d.Set("migration_type", task.MigrationType) - d.Set("replication_instance_arn", task.ReplicationInstanceArn) - d.Set("replication_task_arn", task.ReplicationTaskArn) - d.Set("replication_task_id", task.ReplicationTaskIdentifier) - d.Set("replication_task_settings", task.ReplicationTaskSettings) - d.Set("source_endpoint_arn", task.SourceEndpointArn) - d.Set("table_mappings", task.TableMappings) - d.Set("target_endpoint_arn", task.TargetEndpointArn) - - return nil -} - -func resourceAwsDmsReplicationTaskStateRefreshFunc( - d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - conn := meta.(*AWSClient).dmsconn - - v, err := conn.DescribeReplicationTasks(&dms.DescribeReplicationTasksInput{ - Filters: []*dms.Filter{ - { - Name: aws.String("replication-task-id"), - Values: []*string{aws.String(d.Id())}, // Must use d.Id() to work with import. - }, - }, - }) - if err != nil { - if dmserr, ok := err.(awserr.Error); ok && dmserr.Code() == "ResourceNotFoundFault" { - return nil, "", nil - } - log.Printf("Error on retrieving DMS Replication Task when waiting: %s", err) - return nil, "", err - } - - if v == nil { - return nil, "", nil - } - - if v.ReplicationTasks != nil { - log.Printf("[DEBUG] DMS Replication Task status for instance %s: %s", d.Id(), *v.ReplicationTasks[0].Status) - } - - return v, *v.ReplicationTasks[0].Status, nil - } -} diff --git a/builtin/providers/aws/resource_aws_dms_replication_task_test.go b/builtin/providers/aws/resource_aws_dms_replication_task_test.go deleted file mode 100644 index 9105a3109..000000000 --- a/builtin/providers/aws/resource_aws_dms_replication_task_test.go +++ /dev/null @@ -1,271 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAwsDmsReplicationTaskBasic(t *testing.T) { - resourceName := "aws_dms_replication_task.dms_replication_task" - randId := acctest.RandString(8) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: dmsReplicationTaskDestroy, - Steps: []resource.TestStep{ - { - Config: dmsReplicationTaskConfig(randId), - Check: resource.ComposeTestCheckFunc( - checkDmsReplicationTaskExists(resourceName), - resource.TestCheckResourceAttrSet(resourceName, "replication_task_arn"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: dmsReplicationTaskConfigUpdate(randId), - Check: resource.ComposeTestCheckFunc( - checkDmsReplicationTaskExists(resourceName), - ), - }, - }, - }) -} - -func checkDmsReplicationTaskExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).dmsconn - resp, err := conn.DescribeReplicationTasks(&dms.DescribeReplicationTasksInput{ - Filters: []*dms.Filter{ - { - Name: aws.String("replication-task-id"), - Values: []*string{aws.String(rs.Primary.ID)}, - }, - }, - }) - - if err != nil { - return err - } - - if resp.ReplicationTasks == nil { - return fmt.Errorf("DMS replication task error: %v", err) - } - return nil - } -} - -func dmsReplicationTaskDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_dms_replication_task" { - continue - } - - conn := testAccProvider.Meta().(*AWSClient).dmsconn - resp, err := conn.DescribeReplicationTasks(&dms.DescribeReplicationTasksInput{ - Filters: []*dms.Filter{ - { - Name: aws.String("replication-task-id"), - Values: []*string{aws.String(rs.Primary.ID)}, - }, - }, - }) - - if err != nil { - return nil - } - - if resp != nil && len(resp.ReplicationTasks) > 0 { - return fmt.Errorf("DMS replication task still exists: %v", err) - } - } - - return nil -} - -func dmsReplicationTaskConfig(randId string) string { - return fmt.Sprintf(` -resource "aws_vpc" "dms_vpc" { - cidr_block = "10.1.0.0/16" - tags { - Name = "tf-test-dms-vpc-%[1]s" - } -} - -resource "aws_subnet" "dms_subnet_1" { - cidr_block = "10.1.1.0/24" - availability_zone = "us-west-2a" - vpc_id = "${aws_vpc.dms_vpc.id}" - tags { - Name = "tf-test-dms-subnet-%[1]s" - } - depends_on = ["aws_vpc.dms_vpc"] -} - -resource "aws_subnet" "dms_subnet_2" { - cidr_block = "10.1.2.0/24" - availability_zone = "us-west-2b" - vpc_id = "${aws_vpc.dms_vpc.id}" - tags { - Name = "tf-test-dms-subnet-%[1]s" - } - depends_on = ["aws_vpc.dms_vpc"] -} - -resource "aws_dms_endpoint" "dms_endpoint_source" { - database_name = "tf-test-dms-db" - endpoint_id = "tf-test-dms-endpoint-source-%[1]s" - endpoint_type = "source" - engine_name = "aurora" - server_name = "tf-test-cluster.cluster-xxxxxxx.us-west-2.rds.amazonaws.com" - port = 3306 - username = "tftest" - password = "tftest" -} - -resource "aws_dms_endpoint" "dms_endpoint_target" { - database_name = "tf-test-dms-db" - endpoint_id = "tf-test-dms-endpoint-target-%[1]s" - endpoint_type = "target" - engine_name = "aurora" - server_name = "tf-test-cluster.cluster-xxxxxxx.us-west-2.rds.amazonaws.com" - port = 3306 - username = "tftest" - password = "tftest" -} - -resource "aws_dms_replication_subnet_group" "dms_replication_subnet_group" { - replication_subnet_group_id = "tf-test-dms-replication-subnet-group-%[1]s" - replication_subnet_group_description = "terraform test for replication subnet group" - subnet_ids = ["${aws_subnet.dms_subnet_1.id}", "${aws_subnet.dms_subnet_2.id}"] -} - -resource "aws_dms_replication_instance" "dms_replication_instance" { - allocated_storage = 5 - auto_minor_version_upgrade = true - replication_instance_class = "dms.t2.micro" - replication_instance_id = "tf-test-dms-replication-instance-%[1]s" - preferred_maintenance_window = "sun:00:30-sun:02:30" - publicly_accessible = false - replication_subnet_group_id = "${aws_dms_replication_subnet_group.dms_replication_subnet_group.replication_subnet_group_id}" -} - -resource "aws_dms_replication_task" "dms_replication_task" { - migration_type = "full-load" - replication_instance_arn = "${aws_dms_replication_instance.dms_replication_instance.replication_instance_arn}" - replication_task_id = "tf-test-dms-replication-task-%[1]s" - replication_task_settings = "{\"TargetMetadata\":{\"TargetSchema\":\"\",\"SupportLobs\":true,\"FullLobMode\":false,\"LobChunkSize\":0,\"LimitedSizeLobMode\":true,\"LobMaxSize\":32,\"LoadMaxFileSize\":0,\"ParallelLoadThreads\":0,\"BatchApplyEnabled\":false},\"FullLoadSettings\":{\"FullLoadEnabled\":true,\"ApplyChangesEnabled\":false,\"TargetTablePrepMode\":\"DROP_AND_CREATE\",\"CreatePkAfterFullLoad\":false,\"StopTaskCachedChangesApplied\":false,\"StopTaskCachedChangesNotApplied\":false,\"ResumeEnabled\":false,\"ResumeMinTableSize\":100000,\"ResumeOnlyClusteredPKTables\":true,\"MaxFullLoadSubTasks\":8,\"TransactionConsistencyTimeout\":600,\"CommitRate\":10000},\"Logging\":{\"EnableLogging\":false,\"LogComponents\":[{\"Id\":\"SOURCE_UNLOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_LOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_CAPTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_APPLY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TASK_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"}],\"CloudWatchLogGroup\":null,\"CloudWatchLogStream\":null},\"ControlTablesSettings\":{\"historyTimeslotInMinutes\":5,\"ControlSchema\":\"\",\"HistoryTimeslotInMinutes\":5,\"HistoryTableEnabled\":false,\"SuspendedTablesTableEnabled\":false,\"StatusTableEnabled\":false},\"StreamBufferSettings\":{\"StreamBufferCount\":3,\"StreamBufferSizeInMB\":8,\"CtrlStreamBufferSizeInMB\":5},\"ChangeProcessingDdlHandlingPolicy\":{\"HandleSourceTableDropped\":true,\"HandleSourceTableTruncated\":true,\"HandleSourceTableAltered\":true},\"ErrorBehavior\":{\"DataErrorPolicy\":\"LOG_ERROR\",\"DataTruncationErrorPolicy\":\"LOG_ERROR\",\"DataErrorEscalationPolicy\":\"SUSPEND_TABLE\",\"DataErrorEscalationCount\":0,\"TableErrorPolicy\":\"SUSPEND_TABLE\",\"TableErrorEscalationPolicy\":\"STOP_TASK\",\"TableErrorEscalationCount\":0,\"RecoverableErrorCount\":-1,\"RecoverableErrorInterval\":5,\"RecoverableErrorThrottling\":true,\"RecoverableErrorThrottlingMax\":1800,\"ApplyErrorDeletePolicy\":\"IGNORE_RECORD\",\"ApplyErrorInsertPolicy\":\"LOG_ERROR\",\"ApplyErrorUpdatePolicy\":\"LOG_ERROR\",\"ApplyErrorEscalationPolicy\":\"LOG_ERROR\",\"ApplyErrorEscalationCount\":0,\"FullLoadIgnoreConflicts\":true},\"ChangeProcessingTuning\":{\"BatchApplyPreserveTransaction\":true,\"BatchApplyTimeoutMin\":1,\"BatchApplyTimeoutMax\":30,\"BatchApplyMemoryLimit\":500,\"BatchSplitSize\":0,\"MinTransactionSize\":1000,\"CommitTimeout\":1,\"MemoryLimitTotal\":1024,\"MemoryKeepTime\":60,\"StatementCacheSize\":50}}" - source_endpoint_arn = "${aws_dms_endpoint.dms_endpoint_source.endpoint_arn}" - table_mappings = "{\"rules\":[{\"rule-type\":\"selection\",\"rule-id\":\"1\",\"rule-name\":\"1\",\"object-locator\":{\"schema-name\":\"%%\",\"table-name\":\"%%\"},\"rule-action\":\"include\"}]}" - tags { - Name = "tf-test-dms-replication-task-%[1]s" - Update = "to-update" - Remove = "to-remove" - } - target_endpoint_arn = "${aws_dms_endpoint.dms_endpoint_target.endpoint_arn}" -} -`, randId) -} - -func dmsReplicationTaskConfigUpdate(randId string) string { - return fmt.Sprintf(` -resource "aws_vpc" "dms_vpc" { - cidr_block = "10.1.0.0/16" - tags { - Name = "tf-test-dms-vpc-%[1]s" - } -} - -resource "aws_subnet" "dms_subnet_1" { - cidr_block = "10.1.1.0/24" - availability_zone = "us-west-2a" - vpc_id = "${aws_vpc.dms_vpc.id}" - tags { - Name = "tf-test-dms-subnet-%[1]s" - } - depends_on = ["aws_vpc.dms_vpc"] -} - -resource "aws_subnet" "dms_subnet_2" { - cidr_block = "10.1.2.0/24" - availability_zone = "us-west-2b" - vpc_id = "${aws_vpc.dms_vpc.id}" - tags { - Name = "tf-test-dms-subnet-%[1]s" - } - depends_on = ["aws_vpc.dms_vpc"] -} - -resource "aws_dms_endpoint" "dms_endpoint_source" { - database_name = "tf-test-dms-db" - endpoint_id = "tf-test-dms-endpoint-source-%[1]s" - endpoint_type = "source" - engine_name = "aurora" - server_name = "tf-test-cluster.cluster-xxxxxxx.us-west-2.rds.amazonaws.com" - port = 3306 - username = "tftest" - password = "tftest" -} - -resource "aws_dms_endpoint" "dms_endpoint_target" { - database_name = "tf-test-dms-db" - endpoint_id = "tf-test-dms-endpoint-target-%[1]s" - endpoint_type = "target" - engine_name = "aurora" - server_name = "tf-test-cluster.cluster-xxxxxxx.us-west-2.rds.amazonaws.com" - port = 3306 - username = "tftest" - password = "tftest" -} - -resource "aws_dms_replication_subnet_group" "dms_replication_subnet_group" { - replication_subnet_group_id = "tf-test-dms-replication-subnet-group-%[1]s" - replication_subnet_group_description = "terraform test for replication subnet group" - subnet_ids = ["${aws_subnet.dms_subnet_1.id}", "${aws_subnet.dms_subnet_2.id}"] -} - -resource "aws_dms_replication_instance" "dms_replication_instance" { - allocated_storage = 5 - auto_minor_version_upgrade = true - replication_instance_class = "dms.t2.micro" - replication_instance_id = "tf-test-dms-replication-instance-%[1]s" - preferred_maintenance_window = "sun:00:30-sun:02:30" - publicly_accessible = false - replication_subnet_group_id = "${aws_dms_replication_subnet_group.dms_replication_subnet_group.replication_subnet_group_id}" -} - -resource "aws_dms_replication_task" "dms_replication_task" { - migration_type = "full-load" - replication_instance_arn = "${aws_dms_replication_instance.dms_replication_instance.replication_instance_arn}" - replication_task_id = "tf-test-dms-replication-task-%[1]s" - replication_task_settings = "{\"TargetMetadata\":{\"TargetSchema\":\"\",\"SupportLobs\":true,\"FullLobMode\":false,\"LobChunkSize\":0,\"LimitedSizeLobMode\":true,\"LobMaxSize\":32,\"LoadMaxFileSize\":0,\"ParallelLoadThreads\":0,\"BatchApplyEnabled\":false},\"FullLoadSettings\":{\"FullLoadEnabled\":true,\"ApplyChangesEnabled\":false,\"TargetTablePrepMode\":\"DROP_AND_CREATE\",\"CreatePkAfterFullLoad\":false,\"StopTaskCachedChangesApplied\":false,\"StopTaskCachedChangesNotApplied\":false,\"ResumeEnabled\":false,\"ResumeMinTableSize\":100000,\"ResumeOnlyClusteredPKTables\":true,\"MaxFullLoadSubTasks\":7,\"TransactionConsistencyTimeout\":600,\"CommitRate\":10000},\"Logging\":{\"EnableLogging\":false,\"LogComponents\":[{\"Id\":\"SOURCE_UNLOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_LOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_CAPTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_APPLY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TASK_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"}],\"CloudWatchLogGroup\":null,\"CloudWatchLogStream\":null},\"ControlTablesSettings\":{\"historyTimeslotInMinutes\":5,\"ControlSchema\":\"\",\"HistoryTimeslotInMinutes\":5,\"HistoryTableEnabled\":false,\"SuspendedTablesTableEnabled\":false,\"StatusTableEnabled\":false},\"StreamBufferSettings\":{\"StreamBufferCount\":3,\"StreamBufferSizeInMB\":8,\"CtrlStreamBufferSizeInMB\":5},\"ChangeProcessingDdlHandlingPolicy\":{\"HandleSourceTableDropped\":true,\"HandleSourceTableTruncated\":true,\"HandleSourceTableAltered\":true},\"ErrorBehavior\":{\"DataErrorPolicy\":\"LOG_ERROR\",\"DataTruncationErrorPolicy\":\"LOG_ERROR\",\"DataErrorEscalationPolicy\":\"SUSPEND_TABLE\",\"DataErrorEscalationCount\":0,\"TableErrorPolicy\":\"SUSPEND_TABLE\",\"TableErrorEscalationPolicy\":\"STOP_TASK\",\"TableErrorEscalationCount\":0,\"RecoverableErrorCount\":-1,\"RecoverableErrorInterval\":5,\"RecoverableErrorThrottling\":true,\"RecoverableErrorThrottlingMax\":1800,\"ApplyErrorDeletePolicy\":\"IGNORE_RECORD\",\"ApplyErrorInsertPolicy\":\"LOG_ERROR\",\"ApplyErrorUpdatePolicy\":\"LOG_ERROR\",\"ApplyErrorEscalationPolicy\":\"LOG_ERROR\",\"ApplyErrorEscalationCount\":0,\"FullLoadIgnoreConflicts\":true},\"ChangeProcessingTuning\":{\"BatchApplyPreserveTransaction\":true,\"BatchApplyTimeoutMin\":1,\"BatchApplyTimeoutMax\":30,\"BatchApplyMemoryLimit\":500,\"BatchSplitSize\":0,\"MinTransactionSize\":1000,\"CommitTimeout\":1,\"MemoryLimitTotal\":1024,\"MemoryKeepTime\":60,\"StatementCacheSize\":50}}" - source_endpoint_arn = "${aws_dms_endpoint.dms_endpoint_source.endpoint_arn}" - table_mappings = "{\"rules\":[{\"rule-type\":\"selection\",\"rule-id\":\"1\",\"rule-name\":\"1\",\"object-locator\":{\"schema-name\":\"%%\",\"table-name\":\"%%\"},\"rule-action\":\"include\"}]}" - tags { - Name = "tf-test-dms-replication-task-%[1]s" - Update = "updated" - Add = "added" - } - target_endpoint_arn = "${aws_dms_endpoint.dms_endpoint_target.endpoint_arn}" -} -`, randId) -} diff --git a/builtin/providers/aws/resource_aws_dynamodb_table.go b/builtin/providers/aws/resource_aws_dynamodb_table.go deleted file mode 100644 index 2644f164d..000000000 --- a/builtin/providers/aws/resource_aws_dynamodb_table.go +++ /dev/null @@ -1,1087 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "log" - "strings" - "time" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/hashicorp/terraform/helper/hashcode" -) - -// Number of times to retry if a throttling-related exception occurs -const DYNAMODB_MAX_THROTTLE_RETRIES = 5 - -// How long to sleep when a throttle-event happens -const DYNAMODB_THROTTLE_SLEEP = 5 * time.Second - -// How long to sleep if a limit-exceeded event happens -const DYNAMODB_LIMIT_EXCEEDED_SLEEP = 10 * time.Second - -// A number of these are marked as computed because if you don't -// provide a value, DynamoDB will provide you with defaults (which are the -// default values specified below) -func resourceAwsDynamoDbTable() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsDynamoDbTableCreate, - Read: resourceAwsDynamoDbTableRead, - Update: resourceAwsDynamoDbTableUpdate, - Delete: resourceAwsDynamoDbTableDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - SchemaVersion: 1, - MigrateState: resourceAwsDynamoDbTableMigrateState, - - Schema: map[string]*schema.Schema{ - "arn": { - Type: schema.TypeString, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "hash_key": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "range_key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "write_capacity": { - Type: schema.TypeInt, - Required: true, - }, - "read_capacity": { - Type: schema.TypeInt, - Required: true, - }, - "attribute": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "type": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - Set: func(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - return hashcode.String(buf.String()) - }, - }, - "ttl": { - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "attribute_name": { - Type: schema.TypeString, - Required: true, - }, - "enabled": { - Type: schema.TypeBool, - Required: true, - }, - }, - }, - }, - "local_secondary_index": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "range_key": { - Type: schema.TypeString, - Required: true, - }, - "projection_type": { - Type: schema.TypeString, - Required: true, - }, - "non_key_attributes": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - Set: func(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - return hashcode.String(buf.String()) - }, - }, - "global_secondary_index": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "write_capacity": { - Type: schema.TypeInt, - Required: true, - }, - "read_capacity": { - Type: schema.TypeInt, - Required: true, - }, - "hash_key": { - Type: schema.TypeString, - Required: true, - }, - "range_key": { - Type: schema.TypeString, - Optional: true, - }, - "projection_type": { - Type: schema.TypeString, - Required: true, - }, - "non_key_attributes": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - "stream_enabled": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - "stream_view_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - StateFunc: func(v interface{}) string { - value := v.(string) - return strings.ToUpper(value) - }, - ValidateFunc: validateStreamViewType, - }, - "stream_arn": { - Type: schema.TypeString, - Computed: true, - }, - "tags": tagsSchema(), - }, - } -} - -func resourceAwsDynamoDbTableCreate(d *schema.ResourceData, meta interface{}) error { - dynamodbconn := meta.(*AWSClient).dynamodbconn - - name := d.Get("name").(string) - - log.Printf("[DEBUG] DynamoDB table create: %s", name) - - throughput := &dynamodb.ProvisionedThroughput{ - ReadCapacityUnits: aws.Int64(int64(d.Get("read_capacity").(int))), - WriteCapacityUnits: aws.Int64(int64(d.Get("write_capacity").(int))), - } - - hash_key_name := d.Get("hash_key").(string) - keyschema := []*dynamodb.KeySchemaElement{ - { - AttributeName: aws.String(hash_key_name), - KeyType: aws.String("HASH"), - }, - } - - if range_key, ok := d.GetOk("range_key"); ok { - range_schema_element := &dynamodb.KeySchemaElement{ - AttributeName: aws.String(range_key.(string)), - KeyType: aws.String("RANGE"), - } - keyschema = append(keyschema, range_schema_element) - } - - req := &dynamodb.CreateTableInput{ - TableName: aws.String(name), - ProvisionedThroughput: throughput, - KeySchema: keyschema, - } - - if attributedata, ok := d.GetOk("attribute"); ok { - attributes := []*dynamodb.AttributeDefinition{} - attributeSet := attributedata.(*schema.Set) - for _, attribute := range attributeSet.List() { - attr := attribute.(map[string]interface{}) - attributes = append(attributes, &dynamodb.AttributeDefinition{ - AttributeName: aws.String(attr["name"].(string)), - AttributeType: aws.String(attr["type"].(string)), - }) - } - - req.AttributeDefinitions = attributes - } - - if lsidata, ok := d.GetOk("local_secondary_index"); ok { - log.Printf("[DEBUG] Adding LSI data to the table") - - lsiSet := lsidata.(*schema.Set) - localSecondaryIndexes := []*dynamodb.LocalSecondaryIndex{} - for _, lsiObject := range lsiSet.List() { - lsi := lsiObject.(map[string]interface{}) - - projection := &dynamodb.Projection{ - ProjectionType: aws.String(lsi["projection_type"].(string)), - } - - if lsi["projection_type"] == "INCLUDE" { - non_key_attributes := []*string{} - for _, attr := range lsi["non_key_attributes"].([]interface{}) { - non_key_attributes = append(non_key_attributes, aws.String(attr.(string))) - } - projection.NonKeyAttributes = non_key_attributes - } - - localSecondaryIndexes = append(localSecondaryIndexes, &dynamodb.LocalSecondaryIndex{ - IndexName: aws.String(lsi["name"].(string)), - KeySchema: []*dynamodb.KeySchemaElement{ - { - AttributeName: aws.String(hash_key_name), - KeyType: aws.String("HASH"), - }, - { - AttributeName: aws.String(lsi["range_key"].(string)), - KeyType: aws.String("RANGE"), - }, - }, - Projection: projection, - }) - } - - req.LocalSecondaryIndexes = localSecondaryIndexes - - log.Printf("[DEBUG] Added %d LSI definitions", len(localSecondaryIndexes)) - } - - if gsidata, ok := d.GetOk("global_secondary_index"); ok { - globalSecondaryIndexes := []*dynamodb.GlobalSecondaryIndex{} - - gsiSet := gsidata.(*schema.Set) - for _, gsiObject := range gsiSet.List() { - gsi := gsiObject.(map[string]interface{}) - gsiObject := createGSIFromData(&gsi) - globalSecondaryIndexes = append(globalSecondaryIndexes, &gsiObject) - } - req.GlobalSecondaryIndexes = globalSecondaryIndexes - } - - if _, ok := d.GetOk("stream_enabled"); ok { - - req.StreamSpecification = &dynamodb.StreamSpecification{ - StreamEnabled: aws.Bool(d.Get("stream_enabled").(bool)), - StreamViewType: aws.String(d.Get("stream_view_type").(string)), - } - - log.Printf("[DEBUG] Adding StreamSpecifications to the table") - } - - _, timeToLiveOk := d.GetOk("ttl") - _, tagsOk := d.GetOk("tags") - - attemptCount := 1 - for attemptCount <= DYNAMODB_MAX_THROTTLE_RETRIES { - output, err := dynamodbconn.CreateTable(req) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - switch code := awsErr.Code(); code { - case "ThrottlingException": - log.Printf("[DEBUG] Attempt %d/%d: Sleeping for a bit to throttle back create request", attemptCount, DYNAMODB_MAX_THROTTLE_RETRIES) - time.Sleep(DYNAMODB_THROTTLE_SLEEP) - attemptCount += 1 - case "LimitExceededException": - // If we're at resource capacity, error out without retry - if strings.Contains(awsErr.Message(), "Subscriber limit exceeded:") { - return fmt.Errorf("AWS Error creating DynamoDB table: %s", err) - } - log.Printf("[DEBUG] Limit on concurrent table creations hit, sleeping for a bit") - time.Sleep(DYNAMODB_LIMIT_EXCEEDED_SLEEP) - attemptCount += 1 - default: - // Some other non-retryable exception occurred - return fmt.Errorf("AWS Error creating DynamoDB table: %s", err) - } - } else { - // Non-AWS exception occurred, give up - return fmt.Errorf("Error creating DynamoDB table: %s", err) - } - } else { - // No error, set ID and return - d.SetId(*output.TableDescription.TableName) - tableArn := *output.TableDescription.TableArn - if err := d.Set("arn", tableArn); err != nil { - return err - } - - // Wait, till table is active before imitating any TimeToLive changes - if err := waitForTableToBeActive(d.Id(), meta); err != nil { - log.Printf("[DEBUG] Error waiting for table to be active: %s", err) - return err - } - - log.Printf("[DEBUG] Setting DynamoDB TimeToLive on arn: %s", tableArn) - if timeToLiveOk { - if err := updateTimeToLive(d, meta); err != nil { - log.Printf("[DEBUG] Error updating table TimeToLive: %s", err) - return err - } - } - - if tagsOk { - log.Printf("[DEBUG] Setting DynamoDB Tags on arn: %s", tableArn) - if err := createTableTags(d, meta); err != nil { - return err - } - } - - return resourceAwsDynamoDbTableRead(d, meta) - } - } - - // Too many throttling events occurred, give up - return fmt.Errorf("Unable to create DynamoDB table '%s' after %d attempts", name, attemptCount) -} - -func resourceAwsDynamoDbTableUpdate(d *schema.ResourceData, meta interface{}) error { - - log.Printf("[DEBUG] Updating DynamoDB table %s", d.Id()) - dynamodbconn := meta.(*AWSClient).dynamodbconn - - // Ensure table is active before trying to update - if err := waitForTableToBeActive(d.Id(), meta); err != nil { - return errwrap.Wrapf("Error waiting for Dynamo DB Table update: {{err}}", err) - } - - if d.HasChange("read_capacity") || d.HasChange("write_capacity") { - req := &dynamodb.UpdateTableInput{ - TableName: aws.String(d.Id()), - } - - throughput := &dynamodb.ProvisionedThroughput{ - ReadCapacityUnits: aws.Int64(int64(d.Get("read_capacity").(int))), - WriteCapacityUnits: aws.Int64(int64(d.Get("write_capacity").(int))), - } - req.ProvisionedThroughput = throughput - - _, err := dynamodbconn.UpdateTable(req) - - if err != nil { - return err - } - - if err := waitForTableToBeActive(d.Id(), meta); err != nil { - return errwrap.Wrapf("Error waiting for Dynamo DB Table update: {{err}}", err) - } - } - - if d.HasChange("stream_enabled") || d.HasChange("stream_view_type") { - req := &dynamodb.UpdateTableInput{ - TableName: aws.String(d.Id()), - } - - req.StreamSpecification = &dynamodb.StreamSpecification{ - StreamEnabled: aws.Bool(d.Get("stream_enabled").(bool)), - StreamViewType: aws.String(d.Get("stream_view_type").(string)), - } - - _, err := dynamodbconn.UpdateTable(req) - - if err != nil { - return err - } - - if err := waitForTableToBeActive(d.Id(), meta); err != nil { - return errwrap.Wrapf("Error waiting for Dynamo DB Table update: {{err}}", err) - } - } - - if d.HasChange("global_secondary_index") { - log.Printf("[DEBUG] Changed GSI data") - req := &dynamodb.UpdateTableInput{ - TableName: aws.String(d.Id()), - } - - o, n := d.GetChange("global_secondary_index") - - oldSet := o.(*schema.Set) - newSet := n.(*schema.Set) - - // Track old names so we can know which ones we need to just update based on - // capacity changes, terraform appears to only diff on the set hash, not the - // contents so we need to make sure we don't delete any indexes that we - // just want to update the capacity for - oldGsiNameSet := make(map[string]bool) - newGsiNameSet := make(map[string]bool) - - for _, gsidata := range oldSet.List() { - gsiName := gsidata.(map[string]interface{})["name"].(string) - oldGsiNameSet[gsiName] = true - } - - for _, gsidata := range newSet.List() { - gsiName := gsidata.(map[string]interface{})["name"].(string) - newGsiNameSet[gsiName] = true - } - - // First determine what's new - for _, newgsidata := range newSet.List() { - updates := []*dynamodb.GlobalSecondaryIndexUpdate{} - newGsiName := newgsidata.(map[string]interface{})["name"].(string) - if _, exists := oldGsiNameSet[newGsiName]; !exists { - attributes := []*dynamodb.AttributeDefinition{} - gsidata := newgsidata.(map[string]interface{}) - gsi := createGSIFromData(&gsidata) - log.Printf("[DEBUG] Adding GSI %s", *gsi.IndexName) - update := &dynamodb.GlobalSecondaryIndexUpdate{ - Create: &dynamodb.CreateGlobalSecondaryIndexAction{ - IndexName: gsi.IndexName, - KeySchema: gsi.KeySchema, - ProvisionedThroughput: gsi.ProvisionedThroughput, - Projection: gsi.Projection, - }, - } - updates = append(updates, update) - - // Hash key is required, range key isn't - hashkey_type, err := getAttributeType(d, *gsi.KeySchema[0].AttributeName) - if err != nil { - return err - } - - attributes = append(attributes, &dynamodb.AttributeDefinition{ - AttributeName: gsi.KeySchema[0].AttributeName, - AttributeType: aws.String(hashkey_type), - }) - - // If there's a range key, there will be 2 elements in KeySchema - if len(gsi.KeySchema) == 2 { - rangekey_type, err := getAttributeType(d, *gsi.KeySchema[1].AttributeName) - if err != nil { - return err - } - - attributes = append(attributes, &dynamodb.AttributeDefinition{ - AttributeName: gsi.KeySchema[1].AttributeName, - AttributeType: aws.String(rangekey_type), - }) - } - - req.AttributeDefinitions = attributes - req.GlobalSecondaryIndexUpdates = updates - _, err = dynamodbconn.UpdateTable(req) - - if err != nil { - return err - } - - if err := waitForTableToBeActive(d.Id(), meta); err != nil { - return errwrap.Wrapf("Error waiting for Dynamo DB Table update: {{err}}", err) - } - - if err := waitForGSIToBeActive(d.Id(), *gsi.IndexName, meta); err != nil { - return errwrap.Wrapf("Error waiting for Dynamo DB GSIT to be active: {{err}}", err) - } - - } - } - - for _, oldgsidata := range oldSet.List() { - updates := []*dynamodb.GlobalSecondaryIndexUpdate{} - oldGsiName := oldgsidata.(map[string]interface{})["name"].(string) - if _, exists := newGsiNameSet[oldGsiName]; !exists { - gsidata := oldgsidata.(map[string]interface{}) - log.Printf("[DEBUG] Deleting GSI %s", gsidata["name"].(string)) - update := &dynamodb.GlobalSecondaryIndexUpdate{ - Delete: &dynamodb.DeleteGlobalSecondaryIndexAction{ - IndexName: aws.String(gsidata["name"].(string)), - }, - } - updates = append(updates, update) - - req.GlobalSecondaryIndexUpdates = updates - _, err := dynamodbconn.UpdateTable(req) - - if err != nil { - return err - } - - if err := waitForTableToBeActive(d.Id(), meta); err != nil { - return errwrap.Wrapf("Error waiting for Dynamo DB Table update: {{err}}", err) - } - } - } - } - - // Update any out-of-date read / write capacity - if gsiObjects, ok := d.GetOk("global_secondary_index"); ok { - gsiSet := gsiObjects.(*schema.Set) - if len(gsiSet.List()) > 0 { - log.Printf("Updating capacity as needed!") - - // We can only change throughput, but we need to make sure it's actually changed - tableDescription, err := dynamodbconn.DescribeTable(&dynamodb.DescribeTableInput{ - TableName: aws.String(d.Id()), - }) - - if err != nil { - return err - } - - table := tableDescription.Table - - for _, updatedgsidata := range gsiSet.List() { - updates := []*dynamodb.GlobalSecondaryIndexUpdate{} - gsidata := updatedgsidata.(map[string]interface{}) - gsiName := gsidata["name"].(string) - gsiWriteCapacity := gsidata["write_capacity"].(int) - gsiReadCapacity := gsidata["read_capacity"].(int) - - log.Printf("[DEBUG] Updating GSI %s", gsiName) - gsi, err := getGlobalSecondaryIndex(gsiName, table.GlobalSecondaryIndexes) - - if err != nil { - return err - } - - capacityUpdated := false - - if int64(gsiReadCapacity) != *gsi.ProvisionedThroughput.ReadCapacityUnits || - int64(gsiWriteCapacity) != *gsi.ProvisionedThroughput.WriteCapacityUnits { - capacityUpdated = true - } - - if capacityUpdated { - update := &dynamodb.GlobalSecondaryIndexUpdate{ - Update: &dynamodb.UpdateGlobalSecondaryIndexAction{ - IndexName: aws.String(gsidata["name"].(string)), - ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ - WriteCapacityUnits: aws.Int64(int64(gsiWriteCapacity)), - ReadCapacityUnits: aws.Int64(int64(gsiReadCapacity)), - }, - }, - } - updates = append(updates, update) - - } - - if len(updates) > 0 { - - req := &dynamodb.UpdateTableInput{ - TableName: aws.String(d.Id()), - } - - req.GlobalSecondaryIndexUpdates = updates - - log.Printf("[DEBUG] Updating GSI read / write capacity on %s", d.Id()) - _, err := dynamodbconn.UpdateTable(req) - - if err != nil { - log.Printf("[DEBUG] Error updating table: %s", err) - return err - } - - if err := waitForGSIToBeActive(d.Id(), gsiName, meta); err != nil { - return errwrap.Wrapf("Error waiting for Dynamo DB GSI to be active: {{err}}", err) - } - } - } - } - - } - - if d.HasChange("ttl") { - if err := updateTimeToLive(d, meta); err != nil { - log.Printf("[DEBUG] Error updating table TimeToLive: %s", err) - return err - } - } - - // Update tags - if err := setTagsDynamoDb(dynamodbconn, d); err != nil { - return err - } - - return resourceAwsDynamoDbTableRead(d, meta) -} - -func updateTimeToLive(d *schema.ResourceData, meta interface{}) error { - dynamodbconn := meta.(*AWSClient).dynamodbconn - - if ttl, ok := d.GetOk("ttl"); ok { - - timeToLiveSet := ttl.(*schema.Set) - - spec := &dynamodb.TimeToLiveSpecification{} - - timeToLive := timeToLiveSet.List()[0].(map[string]interface{}) - spec.AttributeName = aws.String(timeToLive["attribute_name"].(string)) - spec.Enabled = aws.Bool(timeToLive["enabled"].(bool)) - - req := &dynamodb.UpdateTimeToLiveInput{ - TableName: aws.String(d.Id()), - TimeToLiveSpecification: spec, - } - - _, err := dynamodbconn.UpdateTimeToLive(req) - - if err != nil { - // If ttl was not set within the .tf file before and has now been added we still run this command to update - // But there has been no change so lets continue - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ValidationException" && awsErr.Message() == "TimeToLive is already disabled" { - return nil - } - log.Printf("[DEBUG] Error updating TimeToLive on table: %s", err) - return err - } - - log.Printf("[DEBUG] Updated TimeToLive on table") - - if err := waitForTimeToLiveUpdateToBeCompleted(d.Id(), timeToLive["enabled"].(bool), meta); err != nil { - return errwrap.Wrapf("Error waiting for Dynamo DB TimeToLive to be updated: {{err}}", err) - } - } - - return nil -} - -func resourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) error { - dynamodbconn := meta.(*AWSClient).dynamodbconn - log.Printf("[DEBUG] Loading data for DynamoDB table '%s'", d.Id()) - req := &dynamodb.DescribeTableInput{ - TableName: aws.String(d.Id()), - } - - result, err := dynamodbconn.DescribeTable(req) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceNotFoundException" { - log.Printf("[WARN] Dynamodb Table (%s) not found, error code (404)", d.Id()) - d.SetId("") - return nil - } - return err - } - - table := result.Table - - d.Set("write_capacity", table.ProvisionedThroughput.WriteCapacityUnits) - d.Set("read_capacity", table.ProvisionedThroughput.ReadCapacityUnits) - - attributes := []interface{}{} - for _, attrdef := range table.AttributeDefinitions { - attribute := map[string]string{ - "name": *attrdef.AttributeName, - "type": *attrdef.AttributeType, - } - attributes = append(attributes, attribute) - log.Printf("[DEBUG] Added Attribute: %s", attribute["name"]) - } - - d.Set("attribute", attributes) - d.Set("name", table.TableName) - - for _, attribute := range table.KeySchema { - if *attribute.KeyType == "HASH" { - d.Set("hash_key", attribute.AttributeName) - } - - if *attribute.KeyType == "RANGE" { - d.Set("range_key", attribute.AttributeName) - } - } - - lsiList := make([]map[string]interface{}, 0, len(table.LocalSecondaryIndexes)) - for _, lsiObject := range table.LocalSecondaryIndexes { - lsi := map[string]interface{}{ - "name": *lsiObject.IndexName, - "projection_type": *lsiObject.Projection.ProjectionType, - } - - for _, attribute := range lsiObject.KeySchema { - - if *attribute.KeyType == "RANGE" { - lsi["range_key"] = *attribute.AttributeName - } - } - nkaList := make([]string, len(lsiObject.Projection.NonKeyAttributes)) - for _, nka := range lsiObject.Projection.NonKeyAttributes { - nkaList = append(nkaList, *nka) - } - lsi["non_key_attributes"] = nkaList - - lsiList = append(lsiList, lsi) - } - - err = d.Set("local_secondary_index", lsiList) - if err != nil { - return err - } - - gsiList := make([]map[string]interface{}, 0, len(table.GlobalSecondaryIndexes)) - for _, gsiObject := range table.GlobalSecondaryIndexes { - gsi := map[string]interface{}{ - "write_capacity": *gsiObject.ProvisionedThroughput.WriteCapacityUnits, - "read_capacity": *gsiObject.ProvisionedThroughput.ReadCapacityUnits, - "name": *gsiObject.IndexName, - } - - for _, attribute := range gsiObject.KeySchema { - if *attribute.KeyType == "HASH" { - gsi["hash_key"] = *attribute.AttributeName - } - - if *attribute.KeyType == "RANGE" { - gsi["range_key"] = *attribute.AttributeName - } - } - - gsi["projection_type"] = *(gsiObject.Projection.ProjectionType) - - nonKeyAttrs := make([]string, 0, len(gsiObject.Projection.NonKeyAttributes)) - for _, nonKeyAttr := range gsiObject.Projection.NonKeyAttributes { - nonKeyAttrs = append(nonKeyAttrs, *nonKeyAttr) - } - gsi["non_key_attributes"] = nonKeyAttrs - - gsiList = append(gsiList, gsi) - log.Printf("[DEBUG] Added GSI: %s - Read: %d / Write: %d", gsi["name"], gsi["read_capacity"], gsi["write_capacity"]) - } - - if table.StreamSpecification != nil { - d.Set("stream_view_type", table.StreamSpecification.StreamViewType) - d.Set("stream_enabled", table.StreamSpecification.StreamEnabled) - d.Set("stream_arn", table.LatestStreamArn) - } - - err = d.Set("global_secondary_index", gsiList) - if err != nil { - return err - } - - d.Set("arn", table.TableArn) - - timeToLiveReq := &dynamodb.DescribeTimeToLiveInput{ - TableName: aws.String(d.Id()), - } - timeToLiveOutput, err := dynamodbconn.DescribeTimeToLive(timeToLiveReq) - if err != nil { - return err - } - timeToLive := []interface{}{} - attribute := map[string]*string{ - "name": timeToLiveOutput.TimeToLiveDescription.AttributeName, - "type": timeToLiveOutput.TimeToLiveDescription.TimeToLiveStatus, - } - timeToLive = append(timeToLive, attribute) - d.Set("timeToLive", timeToLive) - - log.Printf("[DEBUG] Loaded TimeToLive data for DynamoDB table '%s'", d.Id()) - - tags, err := readTableTags(d, meta) - if err != nil { - return err - } - if len(tags) != 0 { - d.Set("tags", tags) - } - - return nil -} - -func resourceAwsDynamoDbTableDelete(d *schema.ResourceData, meta interface{}) error { - dynamodbconn := meta.(*AWSClient).dynamodbconn - - if err := waitForTableToBeActive(d.Id(), meta); err != nil { - return errwrap.Wrapf("Error waiting for Dynamo DB Table update: {{err}}", err) - } - - log.Printf("[DEBUG] DynamoDB delete table: %s", d.Id()) - - _, err := dynamodbconn.DeleteTable(&dynamodb.DeleteTableInput{ - TableName: aws.String(d.Id()), - }) - if err != nil { - return err - } - - params := &dynamodb.DescribeTableInput{ - TableName: aws.String(d.Id()), - } - - err = resource.Retry(10*time.Minute, func() *resource.RetryError { - t, err := dynamodbconn.DescribeTable(params) - if err != nil { - if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "ResourceNotFoundException" { - return nil - } - // Didn't recognize the error, so shouldn't retry. - return resource.NonRetryableError(err) - } - - if t != nil { - if t.Table.TableStatus != nil && strings.ToLower(*t.Table.TableStatus) == "deleting" { - log.Printf("[DEBUG] AWS Dynamo DB table (%s) is still deleting", d.Id()) - return resource.RetryableError(fmt.Errorf("still deleting")) - } - } - - // we should be not found or deleting, so error here - return resource.NonRetryableError(err) - }) - - // check error from retry - if err != nil { - return err - } - - return nil -} - -func createGSIFromData(data *map[string]interface{}) dynamodb.GlobalSecondaryIndex { - - projection := &dynamodb.Projection{ - ProjectionType: aws.String((*data)["projection_type"].(string)), - } - - if (*data)["projection_type"] == "INCLUDE" { - non_key_attributes := []*string{} - for _, attr := range (*data)["non_key_attributes"].([]interface{}) { - non_key_attributes = append(non_key_attributes, aws.String(attr.(string))) - } - projection.NonKeyAttributes = non_key_attributes - } - - writeCapacity := (*data)["write_capacity"].(int) - readCapacity := (*data)["read_capacity"].(int) - - key_schema := []*dynamodb.KeySchemaElement{ - { - AttributeName: aws.String((*data)["hash_key"].(string)), - KeyType: aws.String("HASH"), - }, - } - - range_key_name := (*data)["range_key"] - if range_key_name != "" { - range_key_element := &dynamodb.KeySchemaElement{ - AttributeName: aws.String(range_key_name.(string)), - KeyType: aws.String("RANGE"), - } - - key_schema = append(key_schema, range_key_element) - } - - return dynamodb.GlobalSecondaryIndex{ - IndexName: aws.String((*data)["name"].(string)), - KeySchema: key_schema, - Projection: projection, - ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ - WriteCapacityUnits: aws.Int64(int64(writeCapacity)), - ReadCapacityUnits: aws.Int64(int64(readCapacity)), - }, - } -} - -func getGlobalSecondaryIndex(indexName string, indexList []*dynamodb.GlobalSecondaryIndexDescription) (*dynamodb.GlobalSecondaryIndexDescription, error) { - for _, gsi := range indexList { - if *gsi.IndexName == indexName { - return gsi, nil - } - } - - return &dynamodb.GlobalSecondaryIndexDescription{}, fmt.Errorf("Can't find a GSI by that name...") -} - -func getAttributeType(d *schema.ResourceData, attributeName string) (string, error) { - if attributedata, ok := d.GetOk("attribute"); ok { - attributeSet := attributedata.(*schema.Set) - for _, attribute := range attributeSet.List() { - attr := attribute.(map[string]interface{}) - if attr["name"] == attributeName { - return attr["type"].(string), nil - } - } - } - - return "", fmt.Errorf("Unable to find an attribute named %s", attributeName) -} - -func waitForGSIToBeActive(tableName string, gsiName string, meta interface{}) error { - dynamodbconn := meta.(*AWSClient).dynamodbconn - req := &dynamodb.DescribeTableInput{ - TableName: aws.String(tableName), - } - - activeIndex := false - - for activeIndex == false { - - result, err := dynamodbconn.DescribeTable(req) - - if err != nil { - return err - } - - table := result.Table - var targetGSI *dynamodb.GlobalSecondaryIndexDescription = nil - - for _, gsi := range table.GlobalSecondaryIndexes { - if *gsi.IndexName == gsiName { - targetGSI = gsi - } - } - - if targetGSI != nil { - activeIndex = *targetGSI.IndexStatus == "ACTIVE" - - if !activeIndex { - log.Printf("[DEBUG] Sleeping for 5 seconds for %s GSI to become active", gsiName) - time.Sleep(5 * time.Second) - } - } else { - log.Printf("[DEBUG] GSI %s did not exist, giving up", gsiName) - break - } - } - - return nil - -} - -func waitForTableToBeActive(tableName string, meta interface{}) error { - dynamodbconn := meta.(*AWSClient).dynamodbconn - req := &dynamodb.DescribeTableInput{ - TableName: aws.String(tableName), - } - - activeState := false - - for activeState == false { - result, err := dynamodbconn.DescribeTable(req) - - if err != nil { - return err - } - - activeState = *result.Table.TableStatus == "ACTIVE" - - // Wait for a few seconds - if !activeState { - log.Printf("[DEBUG] Sleeping for 5 seconds for table to become active") - time.Sleep(5 * time.Second) - } - } - - return nil - -} - -func waitForTimeToLiveUpdateToBeCompleted(tableName string, enabled bool, meta interface{}) error { - dynamodbconn := meta.(*AWSClient).dynamodbconn - req := &dynamodb.DescribeTimeToLiveInput{ - TableName: aws.String(tableName), - } - - stateMatched := false - for stateMatched == false { - result, err := dynamodbconn.DescribeTimeToLive(req) - - if err != nil { - return err - } - - if enabled { - stateMatched = *result.TimeToLiveDescription.TimeToLiveStatus == dynamodb.TimeToLiveStatusEnabled - } else { - stateMatched = *result.TimeToLiveDescription.TimeToLiveStatus == dynamodb.TimeToLiveStatusDisabled - } - - // Wait for a few seconds, this may take a long time... - if !stateMatched { - log.Printf("[DEBUG] Sleeping for 5 seconds before checking TimeToLive state again") - time.Sleep(5 * time.Second) - } - } - - log.Printf("[DEBUG] TimeToLive update complete") - - return nil - -} - -func createTableTags(d *schema.ResourceData, meta interface{}) error { - // DynamoDB Table has to be in the ACTIVE state in order to tag the resource - if err := waitForTableToBeActive(d.Id(), meta); err != nil { - return err - } - tags := d.Get("tags").(map[string]interface{}) - arn := d.Get("arn").(string) - dynamodbconn := meta.(*AWSClient).dynamodbconn - req := &dynamodb.TagResourceInput{ - ResourceArn: aws.String(arn), - Tags: tagsFromMapDynamoDb(tags), - } - _, err := dynamodbconn.TagResource(req) - if err != nil { - return fmt.Errorf("Error tagging dynamodb resource: %s", err) - } - return nil -} - -func readTableTags(d *schema.ResourceData, meta interface{}) (map[string]string, error) { - if err := waitForTableToBeActive(d.Id(), meta); err != nil { - return nil, err - } - arn := d.Get("arn").(string) - //result := make(map[string]string) - - dynamodbconn := meta.(*AWSClient).dynamodbconn - req := &dynamodb.ListTagsOfResourceInput{ - ResourceArn: aws.String(arn), - } - - output, err := dynamodbconn.ListTagsOfResource(req) - if err != nil { - return nil, fmt.Errorf("Error reading tags from dynamodb resource: %s", err) - } - result := tagsToMapDynamoDb(output.Tags) - // TODO Read NextToken if avail - return result, nil -} diff --git a/builtin/providers/aws/resource_aws_dynamodb_table_migrate.go b/builtin/providers/aws/resource_aws_dynamodb_table_migrate.go deleted file mode 100644 index 59865effc..000000000 --- a/builtin/providers/aws/resource_aws_dynamodb_table_migrate.go +++ /dev/null @@ -1,70 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - "strings" -) - -func resourceAwsDynamoDbTableMigrateState( - v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - switch v { - case 0: - log.Println("[INFO] Found AWS DynamoDB Table State v0; migrating to v1") - return migrateDynamoDBStateV0toV1(is) - default: - return is, fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateDynamoDBStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { - if is.Empty() { - log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return is, nil - } - - log.Printf("[DEBUG] DynamoDB Table Attributes before Migration: %#v", is.Attributes) - - prefix := "global_secondary_index" - entity := resourceAwsDynamoDbTable() - - // Read old keys - reader := &schema.MapFieldReader{ - Schema: entity.Schema, - Map: schema.BasicMapReader(is.Attributes), - } - result, err := reader.ReadField([]string{prefix}) - if err != nil { - return nil, err - } - - oldKeys, ok := result.Value.(*schema.Set) - if !ok { - return nil, fmt.Errorf("Got unexpected value from state: %#v", result.Value) - } - - // Delete old keys - for k := range is.Attributes { - if strings.HasPrefix(k, fmt.Sprintf("%s.", prefix)) { - delete(is.Attributes, k) - } - } - - // Write new keys - writer := schema.MapFieldWriter{ - Schema: entity.Schema, - } - if err := writer.WriteField([]string{prefix}, oldKeys); err != nil { - return is, err - } - for k, v := range writer.Map() { - is.Attributes[k] = v - } - - log.Printf("[DEBUG] DynamoDB Table Attributes after State Migration: %#v", is.Attributes) - - return is, nil -} diff --git a/builtin/providers/aws/resource_aws_dynamodb_table_test.go b/builtin/providers/aws/resource_aws_dynamodb_table_test.go deleted file mode 100644 index 59cebc4a1..000000000 --- a/builtin/providers/aws/resource_aws_dynamodb_table_test.go +++ /dev/null @@ -1,797 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSDynamoDbTable_basic(t *testing.T) { - var conf dynamodb.DescribeTableOutput - - rName := acctest.RandomWithPrefix("TerraformTestTable-") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDynamoDbTableDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDynamoDbConfigInitialState(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.basic-dynamodb-table", &conf), - testAccCheckInitialAWSDynamoDbTableConf("aws_dynamodb_table.basic-dynamodb-table"), - ), - }, - { - Config: testAccAWSDynamoDbConfigAddSecondaryGSI(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDynamoDbTableWasUpdated("aws_dynamodb_table.basic-dynamodb-table"), - ), - }, - }, - }) -} - -func TestAccAWSDynamoDbTable_streamSpecification(t *testing.T) { - var conf dynamodb.DescribeTableOutput - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDynamoDbTableDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDynamoDbConfigStreamSpecification(), - Check: resource.ComposeTestCheckFunc( - testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.basic-dynamodb-table", &conf), - testAccCheckInitialAWSDynamoDbTableConf("aws_dynamodb_table.basic-dynamodb-table"), - resource.TestCheckResourceAttr( - "aws_dynamodb_table.basic-dynamodb-table", "stream_enabled", "true"), - resource.TestCheckResourceAttr( - "aws_dynamodb_table.basic-dynamodb-table", "stream_view_type", "KEYS_ONLY"), - ), - }, - }, - }) -} - -func TestAccAWSDynamoDbTable_tags(t *testing.T) { - var conf dynamodb.DescribeTableOutput - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDynamoDbTableDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDynamoDbConfigTags(), - Check: resource.ComposeTestCheckFunc( - testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.basic-dynamodb-table", &conf), - testAccCheckInitialAWSDynamoDbTableConf("aws_dynamodb_table.basic-dynamodb-table"), - resource.TestCheckResourceAttr( - "aws_dynamodb_table.basic-dynamodb-table", "tags.%", "3"), - ), - }, - }, - }) -} - -// https://github.com/hashicorp/terraform/issues/13243 -func TestAccAWSDynamoDbTable_gsiUpdate(t *testing.T) { - var conf dynamodb.DescribeTableOutput - name := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDynamoDbTableDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDynamoDbConfigGsiUpdate(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.test", &conf), - ), - }, - { - Config: testAccAWSDynamoDbConfigGsiUpdated(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.test", &conf), - ), - }, - }, - }) -} - -func TestAccAWSDynamoDbTable_ttl(t *testing.T) { - var conf dynamodb.DescribeTableOutput - - rName := acctest.RandomWithPrefix("TerraformTestTable-") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDynamoDbTableDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDynamoDbConfigInitialState(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.basic-dynamodb-table", &conf), - ), - }, - { - Config: testAccAWSDynamoDbConfigAddTimeToLive(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDynamoDbTableTimeToLiveWasUpdated("aws_dynamodb_table.basic-dynamodb-table"), - ), - }, - }, - }) -} -func testAccCheckDynamoDbTableTimeToLiveWasUpdated(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - log.Printf("[DEBUG] Trying to create initial table state!") - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No DynamoDB table name specified!") - } - - conn := testAccProvider.Meta().(*AWSClient).dynamodbconn - - params := &dynamodb.DescribeTimeToLiveInput{ - TableName: aws.String(rs.Primary.ID), - } - - resp, err := conn.DescribeTimeToLive(params) - - if err != nil { - return fmt.Errorf("[ERROR] Problem describing time to live for table '%s': %s", rs.Primary.ID, err) - } - - ttlDescription := resp.TimeToLiveDescription - - log.Printf("[DEBUG] Checking on table %s", rs.Primary.ID) - - if *ttlDescription.TimeToLiveStatus != dynamodb.TimeToLiveStatusEnabled { - return fmt.Errorf("TimeToLiveStatus %s, not ENABLED!", *ttlDescription.TimeToLiveStatus) - } - - if *ttlDescription.AttributeName != "TestTTL" { - return fmt.Errorf("AttributeName was %s, not TestTTL!", *ttlDescription.AttributeName) - } - - return nil - } -} - -func TestResourceAWSDynamoDbTableStreamViewType_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "KEYS-ONLY", - ErrCount: 1, - }, - { - Value: "RANDOM-STRING", - ErrCount: 1, - }, - { - Value: "KEYS_ONLY", - ErrCount: 0, - }, - { - Value: "NEW_AND_OLD_IMAGES", - ErrCount: 0, - }, - { - Value: "NEW_IMAGE", - ErrCount: 0, - }, - { - Value: "OLD_IMAGE", - ErrCount: 0, - }, - } - - for _, tc := range cases { - _, errors := validateStreamViewType(tc.Value, "aws_dynamodb_table_stream_view_type") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the DynamoDB stream_view_type to trigger a validation error") - } - } -} - -func testAccCheckAWSDynamoDbTableDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).dynamodbconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_dynamodb_table" { - continue - } - - log.Printf("[DEBUG] Checking if DynamoDB table %s exists", rs.Primary.ID) - // Check if queue exists by checking for its attributes - params := &dynamodb.DescribeTableInput{ - TableName: aws.String(rs.Primary.ID), - } - - _, err := conn.DescribeTable(params) - if err == nil { - return fmt.Errorf("DynamoDB table %s still exists. Failing!", rs.Primary.ID) - } - - // Verify the error is what we want - if dbErr, ok := err.(awserr.Error); ok && dbErr.Code() == "ResourceNotFoundException" { - return nil - } - - return err - } - - return nil -} - -func testAccCheckInitialAWSDynamoDbTableExists(n string, table *dynamodb.DescribeTableOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - log.Printf("[DEBUG] Trying to create initial table state!") - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No DynamoDB table name specified!") - } - - conn := testAccProvider.Meta().(*AWSClient).dynamodbconn - - params := &dynamodb.DescribeTableInput{ - TableName: aws.String(rs.Primary.ID), - } - - resp, err := conn.DescribeTable(params) - - if err != nil { - return fmt.Errorf("[ERROR] Problem describing table '%s': %s", rs.Primary.ID, err) - } - - *table = *resp - - return nil - } -} - -func testAccCheckInitialAWSDynamoDbTableConf(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - log.Printf("[DEBUG] Trying to create initial table state!") - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No DynamoDB table name specified!") - } - - conn := testAccProvider.Meta().(*AWSClient).dynamodbconn - - params := &dynamodb.DescribeTableInput{ - TableName: aws.String(rs.Primary.ID), - } - - resp, err := conn.DescribeTable(params) - - if err != nil { - return fmt.Errorf("[ERROR] Problem describing table '%s': %s", rs.Primary.ID, err) - } - - table := resp.Table - - log.Printf("[DEBUG] Checking on table %s", rs.Primary.ID) - - if *table.ProvisionedThroughput.WriteCapacityUnits != 20 { - return fmt.Errorf("Provisioned write capacity was %d, not 20!", table.ProvisionedThroughput.WriteCapacityUnits) - } - - if *table.ProvisionedThroughput.ReadCapacityUnits != 10 { - return fmt.Errorf("Provisioned read capacity was %d, not 10!", table.ProvisionedThroughput.ReadCapacityUnits) - } - - attrCount := len(table.AttributeDefinitions) - gsiCount := len(table.GlobalSecondaryIndexes) - lsiCount := len(table.LocalSecondaryIndexes) - - if attrCount != 4 { - return fmt.Errorf("There were %d attributes, not 4 like there should have been!", attrCount) - } - - if gsiCount != 1 { - return fmt.Errorf("There were %d GSIs, not 1 like there should have been!", gsiCount) - } - - if lsiCount != 1 { - return fmt.Errorf("There were %d LSIs, not 1 like there should have been!", lsiCount) - } - - attrmap := dynamoDbAttributesToMap(&table.AttributeDefinitions) - if attrmap["TestTableHashKey"] != "S" { - return fmt.Errorf("Test table hash key was of type %s instead of S!", attrmap["TestTableHashKey"]) - } - if attrmap["TestTableRangeKey"] != "S" { - return fmt.Errorf("Test table range key was of type %s instead of S!", attrmap["TestTableRangeKey"]) - } - if attrmap["TestLSIRangeKey"] != "N" { - return fmt.Errorf("Test table LSI range key was of type %s instead of N!", attrmap["TestLSIRangeKey"]) - } - if attrmap["TestGSIRangeKey"] != "S" { - return fmt.Errorf("Test table GSI range key was of type %s instead of S!", attrmap["TestGSIRangeKey"]) - } - - return nil - } -} - -func testAccCheckDynamoDbTableWasUpdated(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No DynamoDB table name specified!") - } - - conn := testAccProvider.Meta().(*AWSClient).dynamodbconn - - params := &dynamodb.DescribeTableInput{ - TableName: aws.String(rs.Primary.ID), - } - resp, err := conn.DescribeTable(params) - table := resp.Table - - if err != nil { - return err - } - - attrCount := len(table.AttributeDefinitions) - gsiCount := len(table.GlobalSecondaryIndexes) - lsiCount := len(table.LocalSecondaryIndexes) - - if attrCount != 4 { - return fmt.Errorf("There were %d attributes, not 4 like there should have been!", attrCount) - } - - if gsiCount != 1 { - return fmt.Errorf("There were %d GSIs, not 1 like there should have been!", gsiCount) - } - - if lsiCount != 1 { - return fmt.Errorf("There were %d LSIs, not 1 like there should have been!", lsiCount) - } - - if dynamoDbGetGSIIndex(&table.GlobalSecondaryIndexes, "ReplacementTestTableGSI") == -1 { - return fmt.Errorf("Could not find GSI named 'ReplacementTestTableGSI' in the table!") - } - - if dynamoDbGetGSIIndex(&table.GlobalSecondaryIndexes, "InitialTestTableGSI") != -1 { - return fmt.Errorf("Should have removed 'InitialTestTableGSI' but it still exists!") - } - - attrmap := dynamoDbAttributesToMap(&table.AttributeDefinitions) - if attrmap["TestTableHashKey"] != "S" { - return fmt.Errorf("Test table hash key was of type %s instead of S!", attrmap["TestTableHashKey"]) - } - if attrmap["TestTableRangeKey"] != "S" { - return fmt.Errorf("Test table range key was of type %s instead of S!", attrmap["TestTableRangeKey"]) - } - if attrmap["TestLSIRangeKey"] != "N" { - return fmt.Errorf("Test table LSI range key was of type %s instead of N!", attrmap["TestLSIRangeKey"]) - } - if attrmap["ReplacementGSIRangeKey"] != "N" { - return fmt.Errorf("Test table replacement GSI range key was of type %s instead of N!", attrmap["ReplacementGSIRangeKey"]) - } - - return nil - } -} - -func dynamoDbGetGSIIndex(gsiList *[]*dynamodb.GlobalSecondaryIndexDescription, target string) int { - for idx, gsiObject := range *gsiList { - if *gsiObject.IndexName == target { - return idx - } - } - - return -1 -} - -func dynamoDbAttributesToMap(attributes *[]*dynamodb.AttributeDefinition) map[string]string { - attrmap := make(map[string]string) - - for _, attrdef := range *attributes { - attrmap[*attrdef.AttributeName] = *attrdef.AttributeType - } - - return attrmap -} - -func testAccAWSDynamoDbConfigInitialState(rName string) string { - return fmt.Sprintf(` -resource "aws_dynamodb_table" "basic-dynamodb-table" { - name = "%s" - read_capacity = 10 - write_capacity = 20 - hash_key = "TestTableHashKey" - range_key = "TestTableRangeKey" - - attribute { - name = "TestTableHashKey" - type = "S" - } - - attribute { - name = "TestTableRangeKey" - type = "S" - } - - attribute { - name = "TestLSIRangeKey" - type = "N" - } - - attribute { - name = "TestGSIRangeKey" - type = "S" - } - - local_secondary_index { - name = "TestTableLSI" - range_key = "TestLSIRangeKey" - projection_type = "ALL" - } - - global_secondary_index { - name = "InitialTestTableGSI" - hash_key = "TestTableHashKey" - range_key = "TestGSIRangeKey" - write_capacity = 10 - read_capacity = 10 - projection_type = "KEYS_ONLY" - } -} -`, rName) -} - -func testAccAWSDynamoDbConfigAddSecondaryGSI(rName string) string { - return fmt.Sprintf(` -resource "aws_dynamodb_table" "basic-dynamodb-table" { - name = "%s" - read_capacity = 20 - write_capacity = 20 - hash_key = "TestTableHashKey" - range_key = "TestTableRangeKey" - - attribute { - name = "TestTableHashKey" - type = "S" - } - - attribute { - name = "TestTableRangeKey" - type = "S" - } - - attribute { - name = "TestLSIRangeKey" - type = "N" - } - - attribute { - name = "ReplacementGSIRangeKey" - type = "N" - } - - local_secondary_index { - name = "TestTableLSI" - range_key = "TestLSIRangeKey" - projection_type = "ALL" - } - - global_secondary_index { - name = "ReplacementTestTableGSI" - hash_key = "TestTableHashKey" - range_key = "ReplacementGSIRangeKey" - write_capacity = 5 - read_capacity = 5 - projection_type = "INCLUDE" - non_key_attributes = ["TestNonKeyAttribute"] - } -}`, rName) -} - -func testAccAWSDynamoDbConfigStreamSpecification() string { - return fmt.Sprintf(` -resource "aws_dynamodb_table" "basic-dynamodb-table" { - name = "TerraformTestStreamTable-%d" - read_capacity = 10 - write_capacity = 20 - hash_key = "TestTableHashKey" - range_key = "TestTableRangeKey" - - attribute { - name = "TestTableHashKey" - type = "S" - } - - attribute { - name = "TestTableRangeKey" - type = "S" - } - - attribute { - name = "TestLSIRangeKey" - type = "N" - } - - attribute { - name = "TestGSIRangeKey" - type = "S" - } - - local_secondary_index { - name = "TestTableLSI" - range_key = "TestLSIRangeKey" - projection_type = "ALL" - } - - global_secondary_index { - name = "InitialTestTableGSI" - hash_key = "TestTableHashKey" - range_key = "TestGSIRangeKey" - write_capacity = 10 - read_capacity = 10 - projection_type = "KEYS_ONLY" - } - stream_enabled = true - stream_view_type = "KEYS_ONLY" -} -`, acctest.RandInt()) -} - -func testAccAWSDynamoDbConfigTags() string { - return fmt.Sprintf(` -resource "aws_dynamodb_table" "basic-dynamodb-table" { - name = "TerraformTestTable-%d" - read_capacity = 10 - write_capacity = 20 - hash_key = "TestTableHashKey" - range_key = "TestTableRangeKey" - - attribute { - name = "TestTableHashKey" - type = "S" - } - - attribute { - name = "TestTableRangeKey" - type = "S" - } - - attribute { - name = "TestLSIRangeKey" - type = "N" - } - - attribute { - name = "TestGSIRangeKey" - type = "S" - } - - local_secondary_index { - name = "TestTableLSI" - range_key = "TestLSIRangeKey" - projection_type = "ALL" - } - - global_secondary_index { - name = "InitialTestTableGSI" - hash_key = "TestTableHashKey" - range_key = "TestGSIRangeKey" - write_capacity = 10 - read_capacity = 10 - projection_type = "KEYS_ONLY" - } - - tags { - Name = "terraform-test-table-%d" - AccTest = "yes" - Testing = "absolutely" - } -} -`, acctest.RandInt(), acctest.RandInt()) -} - -func testAccAWSDynamoDbConfigGsiUpdate(name string) string { - return fmt.Sprintf(` -variable "capacity" { - default = 10 -} - -resource "aws_dynamodb_table" "test" { - name = "tf-acc-test-%s" - read_capacity = "${var.capacity}" - write_capacity = "${var.capacity}" - hash_key = "id" - - attribute { - name = "id" - type = "S" - } - - attribute { - name = "att1" - type = "S" - } - - attribute { - name = "att2" - type = "S" - } - - attribute { - name = "att3" - type = "S" - } - - global_secondary_index { - name = "att1-index" - hash_key = "att1" - write_capacity = "${var.capacity}" - read_capacity = "${var.capacity}" - projection_type = "ALL" - } - - global_secondary_index { - name = "att2-index" - hash_key = "att2" - write_capacity = "${var.capacity}" - read_capacity = "${var.capacity}" - projection_type = "ALL" - } - - global_secondary_index { - name = "att3-index" - hash_key = "att3" - write_capacity = "${var.capacity}" - read_capacity = "${var.capacity}" - projection_type = "ALL" - } -} -`, name) -} - -func testAccAWSDynamoDbConfigGsiUpdated(name string) string { - return fmt.Sprintf(` -variable "capacity" { - default = 20 -} - -resource "aws_dynamodb_table" "test" { - name = "tf-acc-test-%s" - read_capacity = "${var.capacity}" - write_capacity = "${var.capacity}" - hash_key = "id" - - attribute { - name = "id" - type = "S" - } - - attribute { - name = "att1" - type = "S" - } - - attribute { - name = "att2" - type = "S" - } - - attribute { - name = "att3" - type = "S" - } - - global_secondary_index { - name = "att1-index" - hash_key = "att1" - write_capacity = "${var.capacity}" - read_capacity = "${var.capacity}" - projection_type = "ALL" - } - - global_secondary_index { - name = "att2-index" - hash_key = "att2" - write_capacity = "${var.capacity}" - read_capacity = "${var.capacity}" - projection_type = "ALL" - } - - global_secondary_index { - name = "att3-index" - hash_key = "att3" - write_capacity = "${var.capacity}" - read_capacity = "${var.capacity}" - projection_type = "ALL" - } -} -`, name) -} - -func testAccAWSDynamoDbConfigAddTimeToLive(rName string) string { - return fmt.Sprintf(` -resource "aws_dynamodb_table" "basic-dynamodb-table" { - name = "%s" - read_capacity = 10 - write_capacity = 20 - hash_key = "TestTableHashKey" - range_key = "TestTableRangeKey" - - attribute { - name = "TestTableHashKey" - type = "S" - } - - attribute { - name = "TestTableRangeKey" - type = "S" - } - - attribute { - name = "TestLSIRangeKey" - type = "N" - } - - attribute { - name = "TestGSIRangeKey" - type = "S" - } - - local_secondary_index { - name = "TestTableLSI" - range_key = "TestLSIRangeKey" - projection_type = "ALL" - } - - ttl { - attribute_name = "TestTTL" - enabled = true - } - - global_secondary_index { - name = "InitialTestTableGSI" - hash_key = "TestTableHashKey" - range_key = "TestGSIRangeKey" - write_capacity = 10 - read_capacity = 10 - projection_type = "KEYS_ONLY" - } -} -`, rName) -} diff --git a/builtin/providers/aws/resource_aws_ebs_snapshot.go b/builtin/providers/aws/resource_aws_ebs_snapshot.go deleted file mode 100644 index e06a7290d..000000000 --- a/builtin/providers/aws/resource_aws_ebs_snapshot.go +++ /dev/null @@ -1,145 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsEbsSnapshot() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsEbsSnapshotCreate, - Read: resourceAwsEbsSnapshotRead, - Delete: resourceAwsEbsSnapshotDelete, - - Schema: map[string]*schema.Schema{ - "volume_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "owner_id": { - Type: schema.TypeString, - Computed: true, - }, - "owner_alias": { - Type: schema.TypeString, - Computed: true, - }, - "encrypted": { - Type: schema.TypeBool, - Computed: true, - }, - "volume_size": { - Type: schema.TypeInt, - Computed: true, - }, - "kms_key_id": { - Type: schema.TypeString, - Computed: true, - }, - "data_encryption_key_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceAwsEbsSnapshotCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - request := &ec2.CreateSnapshotInput{ - VolumeId: aws.String(d.Get("volume_id").(string)), - } - if v, ok := d.GetOk("description"); ok { - request.Description = aws.String(v.(string)) - } - - res, err := conn.CreateSnapshot(request) - if err != nil { - return err - } - - d.SetId(*res.SnapshotId) - - err = resourceAwsEbsSnapshotWaitForAvailable(d.Id(), conn) - if err != nil { - return err - } - - return resourceAwsEbsSnapshotRead(d, meta) -} - -func resourceAwsEbsSnapshotRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - req := &ec2.DescribeSnapshotsInput{ - SnapshotIds: []*string{aws.String(d.Id())}, - } - res, err := conn.DescribeSnapshots(req) - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidSnapshotID.NotFound" { - log.Printf("Snapshot %q Not found - removing from state", d.Id()) - d.SetId("") - return nil - } - - snapshot := res.Snapshots[0] - - d.Set("description", snapshot.Description) - d.Set("owner_id", snapshot.OwnerId) - d.Set("encrypted", snapshot.Encrypted) - d.Set("owner_alias", snapshot.OwnerAlias) - d.Set("volume_id", snapshot.VolumeId) - d.Set("data_encryption_key_id", snapshot.DataEncryptionKeyId) - d.Set("kms_keey_id", snapshot.KmsKeyId) - d.Set("volume_size", snapshot.VolumeSize) - - return nil -} - -func resourceAwsEbsSnapshotDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - request := &ec2.DeleteSnapshotInput{ - SnapshotId: aws.String(d.Id()), - } - _, err := conn.DeleteSnapshot(request) - if err == nil { - return nil - } - - ebsErr, ok := err.(awserr.Error) - if ebsErr.Code() == "SnapshotInUse" { - return resource.RetryableError(fmt.Errorf("EBS SnapshotInUse - trying again while it detaches")) - } - - if !ok { - return resource.NonRetryableError(err) - } - - return resource.NonRetryableError(err) - }) -} - -func resourceAwsEbsSnapshotWaitForAvailable(id string, conn *ec2.EC2) error { - log.Printf("Waiting for Snapshot %s to become available...", id) - - req := &ec2.DescribeSnapshotsInput{ - SnapshotIds: []*string{aws.String(id)}, - } - err := conn.WaitUntilSnapshotCompleted(req) - return err -} diff --git a/builtin/providers/aws/resource_aws_ebs_snapshot_test.go b/builtin/providers/aws/resource_aws_ebs_snapshot_test.go deleted file mode 100644 index 8e2c7a2ba..000000000 --- a/builtin/providers/aws/resource_aws_ebs_snapshot_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSEBSSnapshot_basic(t *testing.T) { - var v ec2.Snapshot - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccAwsEbsSnapshotConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckSnapshotExists("aws_ebs_snapshot.test", &v), - ), - }, - }, - }) -} - -func TestAccAWSEBSSnapshot_withDescription(t *testing.T) { - var v ec2.Snapshot - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccAwsEbsSnapshotConfigWithDescription, - Check: resource.ComposeTestCheckFunc( - testAccCheckSnapshotExists("aws_ebs_snapshot.test", &v), - resource.TestCheckResourceAttr("aws_ebs_snapshot.test", "description", "EBS Snapshot Acceptance Test"), - ), - }, - }, - }) -} - -func testAccCheckSnapshotExists(n string, v *ec2.Snapshot) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - request := &ec2.DescribeSnapshotsInput{ - SnapshotIds: []*string{aws.String(rs.Primary.ID)}, - } - - response, err := conn.DescribeSnapshots(request) - if err == nil { - if response.Snapshots != nil && len(response.Snapshots) > 0 { - *v = *response.Snapshots[0] - return nil - } - } - return fmt.Errorf("Error finding EC2 Snapshot %s", rs.Primary.ID) - } -} - -const testAccAwsEbsSnapshotConfig = ` -resource "aws_ebs_volume" "test" { - availability_zone = "us-west-2a" - size = 1 -} - -resource "aws_ebs_snapshot" "test" { - volume_id = "${aws_ebs_volume.test.id}" -} -` - -const testAccAwsEbsSnapshotConfigWithDescription = ` -resource "aws_ebs_volume" "description_test" { - availability_zone = "us-west-2a" - size = 1 -} - -resource "aws_ebs_snapshot" "test" { - volume_id = "${aws_ebs_volume.description_test.id}" - description = "EBS Snapshot Acceptance Test" -} -` diff --git a/builtin/providers/aws/resource_aws_ebs_volume.go b/builtin/providers/aws/resource_aws_ebs_volume.go deleted file mode 100644 index 1beda135e..000000000 --- a/builtin/providers/aws/resource_aws_ebs_volume.go +++ /dev/null @@ -1,305 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsEbsVolume() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsEbsVolumeCreate, - Read: resourceAwsEbsVolumeRead, - Update: resourceAWSEbsVolumeUpdate, - Delete: resourceAwsEbsVolumeDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "availability_zone": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "encrypted": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - ForceNew: true, - }, - "iops": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "kms_key_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validateArn, - }, - "size": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "snapshot_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "tags": tagsSchema(), - }, - } -} - -func resourceAwsEbsVolumeCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - request := &ec2.CreateVolumeInput{ - AvailabilityZone: aws.String(d.Get("availability_zone").(string)), - } - if value, ok := d.GetOk("encrypted"); ok { - request.Encrypted = aws.Bool(value.(bool)) - } - if value, ok := d.GetOk("kms_key_id"); ok { - request.KmsKeyId = aws.String(value.(string)) - } - if value, ok := d.GetOk("size"); ok { - request.Size = aws.Int64(int64(value.(int))) - } - if value, ok := d.GetOk("snapshot_id"); ok { - request.SnapshotId = aws.String(value.(string)) - } - - // IOPs are only valid, and required for, storage type io1. The current minimu - // is 100. Instead of a hard validation we we only apply the IOPs to the - // request if the type is io1, and log a warning otherwise. This allows users - // to "disable" iops. See https://github.com/hashicorp/terraform/pull/4146 - var t string - if value, ok := d.GetOk("type"); ok { - t = value.(string) - request.VolumeType = aws.String(t) - } - - iops := d.Get("iops").(int) - if t != "io1" && iops > 0 { - log.Printf("[WARN] IOPs is only valid for storate type io1 for EBS Volumes") - } else if t == "io1" { - // We add the iops value without validating it's size, to allow AWS to - // enforce a size requirement (currently 100) - request.Iops = aws.Int64(int64(iops)) - } - - log.Printf( - "[DEBUG] EBS Volume create opts: %s", request) - result, err := conn.CreateVolume(request) - if err != nil { - return fmt.Errorf("Error creating EC2 volume: %s", err) - } - - log.Println("[DEBUG] Waiting for Volume to become available") - - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating"}, - Target: []string{"available"}, - Refresh: volumeStateRefreshFunc(conn, *result.VolumeId), - Timeout: 5 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for Volume (%s) to become available: %s", - *result.VolumeId, err) - } - - d.SetId(*result.VolumeId) - - if _, ok := d.GetOk("tags"); ok { - if err := setTags(conn, d); err != nil { - return errwrap.Wrapf("Error setting tags for EBS Volume: {{err}}", err) - } - } - - return readVolume(d, result) -} - -func resourceAWSEbsVolumeUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - if _, ok := d.GetOk("tags"); ok { - if err := setTags(conn, d); err != nil { - return errwrap.Wrapf("Error updating tags for EBS Volume: {{err}}", err) - } - } - - requestUpdate := false - params := &ec2.ModifyVolumeInput{ - VolumeId: aws.String(d.Id()), - } - - if d.HasChange("size") { - requestUpdate = true - params.Size = aws.Int64(int64(d.Get("size").(int))) - } - - if d.HasChange("type") { - requestUpdate = true - params.VolumeType = aws.String(d.Get("type").(string)) - } - - if d.HasChange("iops") { - requestUpdate = true - params.Iops = aws.Int64(int64(d.Get("iops").(int))) - } - - if requestUpdate { - result, err := conn.ModifyVolume(params) - if err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating", "modifying"}, - Target: []string{"available", "in-use"}, - Refresh: volumeStateRefreshFunc(conn, *result.VolumeModification.VolumeId), - Timeout: 5 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for Volume (%s) to become available: %s", - *result.VolumeModification.VolumeId, err) - } - } - - return resourceAwsEbsVolumeRead(d, meta) -} - -// volumeStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// a the state of a Volume. Returns successfully when volume is available -func volumeStateRefreshFunc(conn *ec2.EC2, volumeID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - resp, err := conn.DescribeVolumes(&ec2.DescribeVolumesInput{ - VolumeIds: []*string{aws.String(volumeID)}, - }) - - if err != nil { - if ec2err, ok := err.(awserr.Error); ok { - // Set this to nil as if we didn't find anything. - log.Printf("Error on Volume State Refresh: message: \"%s\", code:\"%s\"", ec2err.Message(), ec2err.Code()) - resp = nil - return nil, "", err - } else { - log.Printf("Error on Volume State Refresh: %s", err) - return nil, "", err - } - } - - v := resp.Volumes[0] - return v, *v.State, nil - } -} - -func resourceAwsEbsVolumeRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - request := &ec2.DescribeVolumesInput{ - VolumeIds: []*string{aws.String(d.Id())}, - } - - response, err := conn.DescribeVolumes(request) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVolume.NotFound" { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading EC2 volume %s: %s", d.Id(), err) - } - - return readVolume(d, response.Volumes[0]) -} - -func resourceAwsEbsVolumeDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - request := &ec2.DeleteVolumeInput{ - VolumeId: aws.String(d.Id()), - } - _, err := conn.DeleteVolume(request) - if err == nil { - return nil - } - - ebsErr, ok := err.(awserr.Error) - if ebsErr.Code() == "VolumeInUse" { - return resource.RetryableError(fmt.Errorf("EBS VolumeInUse - trying again while it detaches")) - } - - if !ok { - return resource.NonRetryableError(err) - } - - return resource.NonRetryableError(err) - }) - -} - -func readVolume(d *schema.ResourceData, volume *ec2.Volume) error { - d.SetId(*volume.VolumeId) - - d.Set("availability_zone", *volume.AvailabilityZone) - if volume.Encrypted != nil { - d.Set("encrypted", *volume.Encrypted) - } - if volume.KmsKeyId != nil { - d.Set("kms_key_id", *volume.KmsKeyId) - } - if volume.Size != nil { - d.Set("size", *volume.Size) - } - if volume.SnapshotId != nil { - d.Set("snapshot_id", *volume.SnapshotId) - } - if volume.VolumeType != nil { - d.Set("type", *volume.VolumeType) - } - - if volume.VolumeType != nil && *volume.VolumeType == "io1" { - // Only set the iops attribute if the volume type is io1. Setting otherwise - // can trigger a refresh/plan loop based on the computed value that is given - // from AWS, and prevent us from specifying 0 as a valid iops. - // See https://github.com/hashicorp/terraform/pull/4146 - if volume.Iops != nil { - d.Set("iops", *volume.Iops) - } - } - - if volume.Tags != nil { - d.Set("tags", tagsToMap(volume.Tags)) - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_ebs_volume_test.go b/builtin/providers/aws/resource_aws_ebs_volume_test.go deleted file mode 100644 index 1c62247ed..000000000 --- a/builtin/providers/aws/resource_aws_ebs_volume_test.go +++ /dev/null @@ -1,442 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSEBSVolume_basic(t *testing.T) { - var v ec2.Volume - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_ebs_volume.test", - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccAwsEbsVolumeConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckVolumeExists("aws_ebs_volume.test", &v), - ), - }, - }, - }) -} - -func TestAccAWSEBSVolume_updateAttachedEbsVolume(t *testing.T) { - var v ec2.Volume - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_ebs_volume.test", - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccAwsEbsAttachedVolumeConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckVolumeExists("aws_ebs_volume.test", &v), - resource.TestCheckResourceAttr("aws_ebs_volume.test", "size", "10"), - ), - }, - { - Config: testAccAwsEbsAttachedVolumeConfigUpdateSize, - Check: resource.ComposeTestCheckFunc( - testAccCheckVolumeExists("aws_ebs_volume.test", &v), - resource.TestCheckResourceAttr("aws_ebs_volume.test", "size", "20"), - ), - }, - }, - }) -} - -func TestAccAWSEBSVolume_updateSize(t *testing.T) { - var v ec2.Volume - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_ebs_volume.test", - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccAwsEbsVolumeConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckVolumeExists("aws_ebs_volume.test", &v), - resource.TestCheckResourceAttr("aws_ebs_volume.test", "size", "1"), - ), - }, - { - Config: testAccAwsEbsVolumeConfigUpdateSize, - Check: resource.ComposeTestCheckFunc( - testAccCheckVolumeExists("aws_ebs_volume.test", &v), - resource.TestCheckResourceAttr("aws_ebs_volume.test", "size", "10"), - ), - }, - }, - }) -} - -func TestAccAWSEBSVolume_updateType(t *testing.T) { - var v ec2.Volume - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_ebs_volume.test", - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccAwsEbsVolumeConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckVolumeExists("aws_ebs_volume.test", &v), - resource.TestCheckResourceAttr("aws_ebs_volume.test", "type", "gp2"), - ), - }, - { - Config: testAccAwsEbsVolumeConfigUpdateType, - Check: resource.ComposeTestCheckFunc( - testAccCheckVolumeExists("aws_ebs_volume.test", &v), - resource.TestCheckResourceAttr("aws_ebs_volume.test", "type", "sc1"), - ), - }, - }, - }) -} - -func TestAccAWSEBSVolume_updateIops(t *testing.T) { - var v ec2.Volume - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_ebs_volume.test", - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccAwsEbsVolumeConfigWithIops, - Check: resource.ComposeTestCheckFunc( - testAccCheckVolumeExists("aws_ebs_volume.test", &v), - resource.TestCheckResourceAttr("aws_ebs_volume.test", "iops", "100"), - ), - }, - { - Config: testAccAwsEbsVolumeConfigWithIopsUpdated, - Check: resource.ComposeTestCheckFunc( - testAccCheckVolumeExists("aws_ebs_volume.test", &v), - resource.TestCheckResourceAttr("aws_ebs_volume.test", "iops", "200"), - ), - }, - }, - }) -} - -func TestAccAWSEBSVolume_kmsKey(t *testing.T) { - var v ec2.Volume - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAwsEbsVolumeConfigWithKmsKey, ri) - keyRegex := regexp.MustCompile("^arn:aws:([a-zA-Z0-9\\-])+:([a-z]{2}-[a-z]+-\\d{1})?:(\\d{12})?:(.*)$") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_ebs_volume.test", - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testAccCheckVolumeExists("aws_ebs_volume.test", &v), - resource.TestCheckResourceAttr("aws_ebs_volume.test", "encrypted", "true"), - resource.TestMatchResourceAttr("aws_ebs_volume.test", "kms_key_id", keyRegex), - ), - }, - }, - }) -} - -func TestAccAWSEBSVolume_NoIops(t *testing.T) { - var v ec2.Volume - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccAwsEbsVolumeConfigWithNoIops, - Check: resource.ComposeTestCheckFunc( - testAccCheckVolumeExists("aws_ebs_volume.iops_test", &v), - ), - }, - }, - }) -} - -func TestAccAWSEBSVolume_withTags(t *testing.T) { - var v ec2.Volume - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_ebs_volume.tags_test", - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccAwsEbsVolumeConfigWithTags, - Check: resource.ComposeTestCheckFunc( - testAccCheckVolumeExists("aws_ebs_volume.tags_test", &v), - ), - }, - }, - }) -} - -func testAccCheckVolumeExists(n string, v *ec2.Volume) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - request := &ec2.DescribeVolumesInput{ - VolumeIds: []*string{aws.String(rs.Primary.ID)}, - } - - response, err := conn.DescribeVolumes(request) - if err == nil { - if response.Volumes != nil && len(response.Volumes) > 0 { - *v = *response.Volumes[0] - return nil - } - } - return fmt.Errorf("Error finding EC2 volume %s", rs.Primary.ID) - } -} - -const testAccAwsEbsVolumeConfig = ` -resource "aws_ebs_volume" "test" { - availability_zone = "us-west-2a" - type = "gp2" - size = 1 - tags { - Name = "tf-acc-test-ebs-volume-test" - } -} -` - -const testAccAwsEbsAttachedVolumeConfig = ` -data "aws_ami" "debian_jessie_latest" { - most_recent = true - - filter { - name = "name" - values = ["debian-jessie-*"] - } - - filter { - name = "virtualization-type" - values = ["hvm"] - } - - filter { - name = "architecture" - values = ["x86_64"] - } - - filter { - name = "root-device-type" - values = ["ebs"] - } - - owners = ["379101102735"] # Debian -} - -resource "aws_instance" "test" { - ami = "${data.aws_ami.debian_jessie_latest.id}" - associate_public_ip_address = true - count = 1 - instance_type = "t2.medium" - - root_block_device { - volume_size = "10" - volume_type = "standard" - delete_on_termination = true - } - - tags { - Name = "test-terraform" - } -} - -resource "aws_ebs_volume" "test" { - depends_on = ["aws_instance.test"] - availability_zone = "${aws_instance.test.availability_zone}" - type = "gp2" - size = "10" -} - -resource "aws_volume_attachment" "test" { - depends_on = ["aws_ebs_volume.test"] - device_name = "/dev/xvdg" - volume_id = "${aws_ebs_volume.test.id}" - instance_id = "${aws_instance.test.id}" -} -` - -const testAccAwsEbsAttachedVolumeConfigUpdateSize = ` -data "aws_ami" "debian_jessie_latest" { - most_recent = true - - filter { - name = "name" - values = ["debian-jessie-*"] - } - - filter { - name = "virtualization-type" - values = ["hvm"] - } - - filter { - name = "architecture" - values = ["x86_64"] - } - - filter { - name = "root-device-type" - values = ["ebs"] - } - - owners = ["379101102735"] # Debian -} - -resource "aws_instance" "test" { - ami = "${data.aws_ami.debian_jessie_latest.id}" - associate_public_ip_address = true - count = 1 - instance_type = "t2.medium" - - root_block_device { - volume_size = "10" - volume_type = "standard" - delete_on_termination = true - } - - tags { - Name = "test-terraform" - } -} - -resource "aws_ebs_volume" "test" { - depends_on = ["aws_instance.test"] - availability_zone = "${aws_instance.test.availability_zone}" - type = "gp2" - size = "20" -} - -resource "aws_volume_attachment" "test" { - depends_on = ["aws_ebs_volume.test"] - device_name = "/dev/xvdg" - volume_id = "${aws_ebs_volume.test.id}" - instance_id = "${aws_instance.test.id}" -} -` - -const testAccAwsEbsVolumeConfigUpdateSize = ` -resource "aws_ebs_volume" "test" { - availability_zone = "us-west-2a" - type = "gp2" - size = 10 - tags { - Name = "tf-acc-test-ebs-volume-test" - } -} -` - -const testAccAwsEbsVolumeConfigUpdateType = ` -resource "aws_ebs_volume" "test" { - availability_zone = "us-west-2a" - type = "sc1" - size = 500 - tags { - Name = "tf-acc-test-ebs-volume-test" - } -} -` - -const testAccAwsEbsVolumeConfigWithIops = ` -resource "aws_ebs_volume" "test" { - availability_zone = "us-west-2a" - type = "io1" - size = 4 - iops = 100 - tags { - Name = "tf-acc-test-ebs-volume-test" - } -} -` - -const testAccAwsEbsVolumeConfigWithIopsUpdated = ` -resource "aws_ebs_volume" "test" { - availability_zone = "us-west-2a" - type = "io1" - size = 4 - iops = 200 - tags { - Name = "tf-acc-test-ebs-volume-test" - } -} -` - -const testAccAwsEbsVolumeConfigWithKmsKey = ` -resource "aws_kms_key" "foo" { - description = "Terraform acc test %d" - policy = < 0 { - log.Printf("[DEBUG] Adding ECS load balancers: %s", loadBalancers) - input.LoadBalancers = loadBalancers - } - if v, ok := d.GetOk("iam_role"); ok { - input.Role = aws.String(v.(string)) - } - - strategies := d.Get("placement_strategy").(*schema.Set).List() - if len(strategies) > 0 { - var ps []*ecs.PlacementStrategy - for _, raw := range strategies { - p := raw.(map[string]interface{}) - t := p["type"].(string) - f := p["field"].(string) - if err := validateAwsEcsPlacementStrategy(t, f); err != nil { - return err - } - ps = append(ps, &ecs.PlacementStrategy{ - Type: aws.String(p["type"].(string)), - Field: aws.String(p["field"].(string)), - }) - } - input.PlacementStrategy = ps - } - - constraints := d.Get("placement_constraints").(*schema.Set).List() - if len(constraints) > 0 { - var pc []*ecs.PlacementConstraint - for _, raw := range constraints { - p := raw.(map[string]interface{}) - t := p["type"].(string) - e := p["expression"].(string) - if err := validateAwsEcsPlacementConstraint(t, e); err != nil { - return err - } - constraint := &ecs.PlacementConstraint{ - Type: aws.String(t), - } - if e != "" { - constraint.Expression = aws.String(e) - } - - pc = append(pc, constraint) - } - input.PlacementConstraints = pc - } - - log.Printf("[DEBUG] Creating ECS service: %s", input) - - // Retry due to AWS IAM & ECS eventual consistency - var out *ecs.CreateServiceOutput - var err error - err = resource.Retry(2*time.Minute, func() *resource.RetryError { - out, err = conn.CreateService(&input) - - if err != nil { - awsErr, ok := err.(awserr.Error) - if !ok { - return resource.NonRetryableError(err) - } - if awsErr.Code() == "InvalidParameterException" { - log.Printf("[DEBUG] Trying to create ECS service again: %q", - awsErr.Message()) - return resource.RetryableError(err) - } - if awsErr.Code() == "ClusterNotFoundException" { - log.Printf("[DEBUG] Trying to create ECS service again: %q", - awsErr.Message()) - return resource.RetryableError(err) - } - - return resource.NonRetryableError(err) - } - - return nil - }) - if err != nil { - return fmt.Errorf("%s %q", err, d.Get("name").(string)) - } - - service := *out.Service - - log.Printf("[DEBUG] ECS service created: %s", *service.ServiceArn) - d.SetId(*service.ServiceArn) - - return resourceAwsEcsServiceUpdate(d, meta) -} - -func resourceAwsEcsServiceRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ecsconn - - log.Printf("[DEBUG] Reading ECS service %s", d.Id()) - input := ecs.DescribeServicesInput{ - Services: []*string{aws.String(d.Id())}, - Cluster: aws.String(d.Get("cluster").(string)), - } - - out, err := conn.DescribeServices(&input) - if err != nil { - return err - } - - if len(out.Services) < 1 { - log.Printf("[DEBUG] Removing ECS service %s (%s) because it's gone", d.Get("name").(string), d.Id()) - d.SetId("") - return nil - } - - service := out.Services[0] - - // Status==INACTIVE means deleted service - if *service.Status == "INACTIVE" { - log.Printf("[DEBUG] Removing ECS service %q because it's INACTIVE", *service.ServiceArn) - d.SetId("") - return nil - } - - log.Printf("[DEBUG] Received ECS service %s", service) - - d.SetId(*service.ServiceArn) - d.Set("name", service.ServiceName) - - // Save task definition in the same format - if strings.HasPrefix(d.Get("task_definition").(string), "arn:"+meta.(*AWSClient).partition+":ecs:") { - d.Set("task_definition", service.TaskDefinition) - } else { - taskDefinition := buildFamilyAndRevisionFromARN(*service.TaskDefinition) - d.Set("task_definition", taskDefinition) - } - - d.Set("desired_count", service.DesiredCount) - - // Save cluster in the same format - if strings.HasPrefix(d.Get("cluster").(string), "arn:"+meta.(*AWSClient).partition+":ecs:") { - d.Set("cluster", service.ClusterArn) - } else { - clusterARN := getNameFromARN(*service.ClusterArn) - d.Set("cluster", clusterARN) - } - - // Save IAM role in the same format - if service.RoleArn != nil { - if strings.HasPrefix(d.Get("iam_role").(string), "arn:"+meta.(*AWSClient).partition+":iam:") { - d.Set("iam_role", service.RoleArn) - } else { - roleARN := getNameFromARN(*service.RoleArn) - d.Set("iam_role", roleARN) - } - } - - if service.DeploymentConfiguration != nil { - d.Set("deployment_maximum_percent", service.DeploymentConfiguration.MaximumPercent) - d.Set("deployment_minimum_healthy_percent", service.DeploymentConfiguration.MinimumHealthyPercent) - } - - if service.LoadBalancers != nil { - d.Set("load_balancers", flattenEcsLoadBalancers(service.LoadBalancers)) - } - - if err := d.Set("placement_strategy", flattenPlacementStrategy(service.PlacementStrategy)); err != nil { - log.Printf("[ERR] Error setting placement_strategy for (%s): %s", d.Id(), err) - } - if err := d.Set("placement_constraints", flattenServicePlacementConstraints(service.PlacementConstraints)); err != nil { - log.Printf("[ERR] Error setting placement_constraints for (%s): %s", d.Id(), err) - } - - return nil -} - -func flattenServicePlacementConstraints(pcs []*ecs.PlacementConstraint) []map[string]interface{} { - if len(pcs) == 0 { - return nil - } - results := make([]map[string]interface{}, 0) - for _, pc := range pcs { - c := make(map[string]interface{}) - c["type"] = *pc.Type - if pc.Expression != nil { - c["expression"] = *pc.Expression - } - - results = append(results, c) - } - return results -} - -func flattenPlacementStrategy(pss []*ecs.PlacementStrategy) []map[string]interface{} { - if len(pss) == 0 { - return nil - } - results := make([]map[string]interface{}, 0) - for _, ps := range pss { - c := make(map[string]interface{}) - c["type"] = *ps.Type - c["field"] = *ps.Field - - // for some fields the API requires lowercase for creation but will return uppercase on query - if *ps.Field == "MEMORY" || *ps.Field == "CPU" { - c["field"] = strings.ToLower(*ps.Field) - } - - results = append(results, c) - } - return results -} - -func resourceAwsEcsServiceUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ecsconn - - log.Printf("[DEBUG] Updating ECS service %s", d.Id()) - input := ecs.UpdateServiceInput{ - Service: aws.String(d.Id()), - Cluster: aws.String(d.Get("cluster").(string)), - } - - if d.HasChange("desired_count") { - _, n := d.GetChange("desired_count") - input.DesiredCount = aws.Int64(int64(n.(int))) - } - if d.HasChange("task_definition") { - _, n := d.GetChange("task_definition") - input.TaskDefinition = aws.String(n.(string)) - } - - if d.HasChange("deployment_maximum_percent") || d.HasChange("deployment_minimum_healthy_percent") { - input.DeploymentConfiguration = &ecs.DeploymentConfiguration{ - MaximumPercent: aws.Int64(int64(d.Get("deployment_maximum_percent").(int))), - MinimumHealthyPercent: aws.Int64(int64(d.Get("deployment_minimum_healthy_percent").(int))), - } - } - - // Retry due to IAM & ECS eventual consistency - err := resource.Retry(2*time.Minute, func() *resource.RetryError { - out, err := conn.UpdateService(&input) - if err != nil { - awsErr, ok := err.(awserr.Error) - if ok && awsErr.Code() == "InvalidParameterException" { - log.Printf("[DEBUG] Trying to update ECS service again: %#v", err) - return resource.RetryableError(err) - } - if ok && awsErr.Code() == "ServiceNotFoundException" { - log.Printf("[DEBUG] Trying to update ECS service again: %#v", err) - return resource.RetryableError(err) - } - - return resource.NonRetryableError(err) - } - - log.Printf("[DEBUG] Updated ECS service %s", out.Service) - return nil - }) - if err != nil { - return err - } - - return resourceAwsEcsServiceRead(d, meta) -} - -func resourceAwsEcsServiceDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ecsconn - - // Check if it's not already gone - resp, err := conn.DescribeServices(&ecs.DescribeServicesInput{ - Services: []*string{aws.String(d.Id())}, - Cluster: aws.String(d.Get("cluster").(string)), - }) - if err != nil { - return err - } - - if len(resp.Services) == 0 { - log.Printf("[DEBUG] ECS Service %q is already gone", d.Id()) - return nil - } - - log.Printf("[DEBUG] ECS service %s is currently %s", d.Id(), *resp.Services[0].Status) - - if *resp.Services[0].Status == "INACTIVE" { - return nil - } - - // Drain the ECS service - if *resp.Services[0].Status != "DRAINING" { - log.Printf("[DEBUG] Draining ECS service %s", d.Id()) - _, err = conn.UpdateService(&ecs.UpdateServiceInput{ - Service: aws.String(d.Id()), - Cluster: aws.String(d.Get("cluster").(string)), - DesiredCount: aws.Int64(int64(0)), - }) - if err != nil { - return err - } - } - - // Wait until the ECS service is drained - err = resource.Retry(5*time.Minute, func() *resource.RetryError { - input := ecs.DeleteServiceInput{ - Service: aws.String(d.Id()), - Cluster: aws.String(d.Get("cluster").(string)), - } - - log.Printf("[DEBUG] Trying to delete ECS service %s", input) - _, err := conn.DeleteService(&input) - if err == nil { - return nil - } - - ec2err, ok := err.(awserr.Error) - if !ok { - return resource.NonRetryableError(err) - } - if ec2err.Code() == "InvalidParameterException" { - // Prevent "The service cannot be stopped while deployments are active." - log.Printf("[DEBUG] Trying to delete ECS service again: %q", - ec2err.Message()) - return resource.RetryableError(err) - } - - return resource.NonRetryableError(err) - - }) - if err != nil { - return err - } - - // Wait until it's deleted - wait := resource.StateChangeConf{ - Pending: []string{"ACTIVE", "DRAINING"}, - Target: []string{"INACTIVE"}, - Timeout: 10 * time.Minute, - MinTimeout: 1 * time.Second, - Refresh: func() (interface{}, string, error) { - log.Printf("[DEBUG] Checking if ECS service %s is INACTIVE", d.Id()) - resp, err := conn.DescribeServices(&ecs.DescribeServicesInput{ - Services: []*string{aws.String(d.Id())}, - Cluster: aws.String(d.Get("cluster").(string)), - }) - if err != nil { - return resp, "FAILED", err - } - - log.Printf("[DEBUG] ECS service (%s) is currently %q", d.Id(), *resp.Services[0].Status) - return resp, *resp.Services[0].Status, nil - }, - } - - _, err = wait.WaitForState() - if err != nil { - return err - } - - log.Printf("[DEBUG] ECS service %s deleted.", d.Id()) - return nil -} - -func resourceAwsEcsLoadBalancerHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - buf.WriteString(fmt.Sprintf("%s-", m["elb_name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["container_name"].(string))) - buf.WriteString(fmt.Sprintf("%d-", m["container_port"].(int))) - - if s := m["target_group_arn"].(string); s != "" { - buf.WriteString(fmt.Sprintf("%s-", s)) - } - - return hashcode.String(buf.String()) -} - -func buildFamilyAndRevisionFromARN(arn string) string { - return strings.Split(arn, "/")[1] -} - -// Expects the following ARNs: -// arn:aws:iam::0123456789:role/EcsService -// arn:aws:ecs:us-west-2:0123456789:cluster/radek-cluster -func getNameFromARN(arn string) string { - return strings.Split(arn, "/")[1] -} - -func parseTaskDefinition(taskDefinition string) (string, string, error) { - matches := taskDefinitionRE.FindAllStringSubmatch(taskDefinition, 2) - - if len(matches) == 0 || len(matches[0]) != 3 { - return "", "", fmt.Errorf( - "Invalid task definition format, family:rev or ARN expected (%#v)", - taskDefinition) - } - - return matches[0][1], matches[0][2], nil -} diff --git a/builtin/providers/aws/resource_aws_ecs_service_test.go b/builtin/providers/aws/resource_aws_ecs_service_test.go deleted file mode 100644 index c3a603547..000000000 --- a/builtin/providers/aws/resource_aws_ecs_service_test.go +++ /dev/null @@ -1,1025 +0,0 @@ -package aws - -import ( - "fmt" - "regexp" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ecs" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestParseTaskDefinition(t *testing.T) { - cases := map[string]map[string]interface{}{ - "invalid": { - "family": "", - "revision": "", - "isValid": false, - }, - "invalidWithColon:": { - "family": "", - "revision": "", - "isValid": false, - }, - "1234": { - "family": "", - "revision": "", - "isValid": false, - }, - "invalid:aaa": { - "family": "", - "revision": "", - "isValid": false, - }, - "invalid=family:1": { - "family": "", - "revision": "", - "isValid": false, - }, - "invalid:name:1": { - "family": "", - "revision": "", - "isValid": false, - }, - "valid:1": { - "family": "valid", - "revision": "1", - "isValid": true, - }, - "abc12-def:54": { - "family": "abc12-def", - "revision": "54", - "isValid": true, - }, - "lorem_ip-sum:123": { - "family": "lorem_ip-sum", - "revision": "123", - "isValid": true, - }, - "lorem-ipsum:1": { - "family": "lorem-ipsum", - "revision": "1", - "isValid": true, - }, - } - - for input, expectedOutput := range cases { - family, revision, err := parseTaskDefinition(input) - isValid := expectedOutput["isValid"].(bool) - if !isValid && err == nil { - t.Fatalf("Task definition %s should fail", input) - } - - expectedFamily := expectedOutput["family"].(string) - if family != expectedFamily { - t.Fatalf("Unexpected family (%#v) for task definition %s\n%#v", family, input, err) - } - expectedRevision := expectedOutput["revision"].(string) - if revision != expectedRevision { - t.Fatalf("Unexpected revision (%#v) for task definition %s\n%#v", revision, input, err) - } - } -} - -func TestAccAWSEcsServiceWithARN(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEcsServiceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEcsService(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsServiceExists("aws_ecs_service.mongo"), - ), - }, - - { - Config: testAccAWSEcsServiceModified(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsServiceExists("aws_ecs_service.mongo"), - ), - }, - }, - }) -} - -func TestAccAWSEcsServiceWithFamilyAndRevision(t *testing.T) { - rName := acctest.RandomWithPrefix("tf-test") - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEcsServiceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEcsServiceWithFamilyAndRevision(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsServiceExists("aws_ecs_service.jenkins"), - ), - }, - - { - Config: testAccAWSEcsServiceWithFamilyAndRevisionModified(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsServiceExists("aws_ecs_service.jenkins"), - ), - }, - }, - }) -} - -// Regression for https://github.com/hashicorp/terraform/issues/2427 -func TestAccAWSEcsServiceWithRenamedCluster(t *testing.T) { - originalRegexp := regexp.MustCompile( - "^arn:aws:ecs:[^:]+:[0-9]+:cluster/terraformecstest3$") - modifiedRegexp := regexp.MustCompile( - "^arn:aws:ecs:[^:]+:[0-9]+:cluster/terraformecstest3modified$") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEcsServiceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEcsServiceWithRenamedCluster, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsServiceExists("aws_ecs_service.ghost"), - resource.TestMatchResourceAttr( - "aws_ecs_service.ghost", "cluster", originalRegexp), - ), - }, - - { - Config: testAccAWSEcsServiceWithRenamedClusterModified, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsServiceExists("aws_ecs_service.ghost"), - resource.TestMatchResourceAttr( - "aws_ecs_service.ghost", "cluster", modifiedRegexp), - ), - }, - }, - }) -} - -func TestAccAWSEcsService_withIamRole(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEcsServiceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEcsService_withIamRole, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsServiceExists("aws_ecs_service.ghost"), - ), - }, - }, - }) -} - -func TestAccAWSEcsService_withDeploymentValues(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEcsServiceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEcsServiceWithDeploymentValues(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsServiceExists("aws_ecs_service.mongo"), - resource.TestCheckResourceAttr( - "aws_ecs_service.mongo", "deployment_maximum_percent", "200"), - resource.TestCheckResourceAttr( - "aws_ecs_service.mongo", "deployment_minimum_healthy_percent", "100"), - ), - }, - }, - }) -} - -// Regression for https://github.com/hashicorp/terraform/issues/3444 -func TestAccAWSEcsService_withLbChanges(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEcsServiceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEcsService_withLbChanges, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsServiceExists("aws_ecs_service.with_lb_changes"), - ), - }, - { - Config: testAccAWSEcsService_withLbChanges_modified, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsServiceExists("aws_ecs_service.with_lb_changes"), - ), - }, - }, - }) -} - -// Regression for https://github.com/hashicorp/terraform/issues/3361 -func TestAccAWSEcsService_withEcsClusterName(t *testing.T) { - clusterName := regexp.MustCompile("^terraformecstestcluster$") - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEcsServiceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEcsServiceWithEcsClusterName, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsServiceExists("aws_ecs_service.jenkins"), - resource.TestMatchResourceAttr( - "aws_ecs_service.jenkins", "cluster", clusterName), - ), - }, - }, - }) -} - -func TestAccAWSEcsService_withAlb(t *testing.T) { - rName := acctest.RandomWithPrefix("tf-acc") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEcsServiceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEcsServiceWithAlb(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsServiceExists("aws_ecs_service.with_alb"), - ), - }, - }, - }) -} - -func TestAccAWSEcsServiceWithPlacementStrategy(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEcsServiceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEcsService(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsServiceExists("aws_ecs_service.mongo"), - resource.TestCheckResourceAttr("aws_ecs_service.mongo", "placement_strategy.#", "0"), - ), - }, - { - Config: testAccAWSEcsServiceWithPlacementStrategy(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsServiceExists("aws_ecs_service.mongo"), - resource.TestCheckResourceAttr("aws_ecs_service.mongo", "placement_strategy.#", "1"), - ), - }, - }, - }) -} - -func TestAccAWSEcsServiceWithPlacementConstraints(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEcsServiceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEcsServiceWithPlacementConstraint(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsServiceExists("aws_ecs_service.mongo"), - resource.TestCheckResourceAttr("aws_ecs_service.mongo", "placement_constraints.#", "1"), - ), - }, - }, - }) -} - -func TestAccAWSEcsServiceWithPlacementConstraints_emptyExpression(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEcsServiceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEcsServiceWithPlacementConstraintEmptyExpression(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsServiceExists("aws_ecs_service.mongo"), - resource.TestCheckResourceAttr("aws_ecs_service.mongo", "placement_constraints.#", "1"), - ), - }, - }, - }) -} - -func testAccCheckAWSEcsServiceDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ecsconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_ecs_service" { - continue - } - - out, err := conn.DescribeServices(&ecs.DescribeServicesInput{ - Services: []*string{aws.String(rs.Primary.ID)}, - Cluster: aws.String(rs.Primary.Attributes["cluster"]), - }) - - if err == nil { - if len(out.Services) > 0 { - var activeServices []*ecs.Service - for _, svc := range out.Services { - if *svc.Status != "INACTIVE" { - activeServices = append(activeServices, svc) - } - } - if len(activeServices) == 0 { - return nil - } - - return fmt.Errorf("ECS service still exists:\n%#v", activeServices) - } - return nil - } - - return err - } - - return nil -} - -func testAccCheckAWSEcsServiceExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - return nil - } -} - -func testAccAWSEcsService(rInt int) string { - return fmt.Sprintf(` -resource "aws_ecs_cluster" "default" { - name = "terraformecstest%d" -} - -resource "aws_ecs_task_definition" "mongo" { - family = "mongodb" - container_definitions = < 0 { - var pc []*ecs.TaskDefinitionPlacementConstraint - for _, raw := range constraints { - p := raw.(map[string]interface{}) - t := p["type"].(string) - e := p["expression"].(string) - if err := validateAwsEcsPlacementConstraint(t, e); err != nil { - return err - } - pc = append(pc, &ecs.TaskDefinitionPlacementConstraint{ - Type: aws.String(t), - Expression: aws.String(e), - }) - } - input.PlacementConstraints = pc - } - - log.Printf("[DEBUG] Registering ECS task definition: %s", input) - out, err := conn.RegisterTaskDefinition(&input) - if err != nil { - return err - } - - taskDefinition := *out.TaskDefinition - - log.Printf("[DEBUG] ECS task definition registered: %q (rev. %d)", - *taskDefinition.TaskDefinitionArn, *taskDefinition.Revision) - - d.SetId(*taskDefinition.Family) - d.Set("arn", taskDefinition.TaskDefinitionArn) - - return resourceAwsEcsTaskDefinitionRead(d, meta) -} - -func resourceAwsEcsTaskDefinitionRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ecsconn - - log.Printf("[DEBUG] Reading task definition %s", d.Id()) - out, err := conn.DescribeTaskDefinition(&ecs.DescribeTaskDefinitionInput{ - TaskDefinition: aws.String(d.Get("arn").(string)), - }) - if err != nil { - return err - } - log.Printf("[DEBUG] Received task definition %s", out) - - taskDefinition := out.TaskDefinition - - d.SetId(*taskDefinition.Family) - d.Set("arn", taskDefinition.TaskDefinitionArn) - d.Set("family", taskDefinition.Family) - d.Set("revision", taskDefinition.Revision) - d.Set("container_definitions", taskDefinition.ContainerDefinitions) - d.Set("task_role_arn", taskDefinition.TaskRoleArn) - d.Set("network_mode", taskDefinition.NetworkMode) - d.Set("volumes", flattenEcsVolumes(taskDefinition.Volumes)) - if err := d.Set("placement_constraints", flattenPlacementConstraints(taskDefinition.PlacementConstraints)); err != nil { - log.Printf("[ERR] Error setting placement_constraints for (%s): %s", d.Id(), err) - } - - return nil -} - -func flattenPlacementConstraints(pcs []*ecs.TaskDefinitionPlacementConstraint) []map[string]interface{} { - if len(pcs) == 0 { - return nil - } - results := make([]map[string]interface{}, 0) - for _, pc := range pcs { - c := make(map[string]interface{}) - c["type"] = *pc.Type - c["expression"] = *pc.Expression - results = append(results, c) - } - return results -} - -func resourceAwsEcsTaskDefinitionDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ecsconn - - _, err := conn.DeregisterTaskDefinition(&ecs.DeregisterTaskDefinitionInput{ - TaskDefinition: aws.String(d.Get("arn").(string)), - }) - if err != nil { - return err - } - - log.Printf("[DEBUG] Task definition %q deregistered.", d.Get("arn").(string)) - - return nil -} - -func resourceAwsEcsTaskDefinitionVolumeHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["host_path"].(string))) - - return hashcode.String(buf.String()) -} diff --git a/builtin/providers/aws/resource_aws_ecs_task_definition_test.go b/builtin/providers/aws/resource_aws_ecs_task_definition_test.go deleted file mode 100644 index afbf2955f..000000000 --- a/builtin/providers/aws/resource_aws_ecs_task_definition_test.go +++ /dev/null @@ -1,716 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ecs" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSEcsTaskDefinition_basic(t *testing.T) { - var def ecs.TaskDefinition - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEcsTaskDefinitionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEcsTaskDefinition, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.jenkins", &def), - ), - }, - { - Config: testAccAWSEcsTaskDefinitionModified, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.jenkins", &def), - ), - }, - }, - }) -} - -// Regression for https://github.com/hashicorp/terraform/issues/2370 -func TestAccAWSEcsTaskDefinition_withScratchVolume(t *testing.T) { - var def ecs.TaskDefinition - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEcsTaskDefinitionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEcsTaskDefinitionWithScratchVolume, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.sleep", &def), - ), - }, - }, - }) -} - -// Regression for https://github.com/hashicorp/terraform/issues/2694 -func TestAccAWSEcsTaskDefinition_withEcsService(t *testing.T) { - var def ecs.TaskDefinition - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEcsTaskDefinitionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEcsTaskDefinitionWithEcsService, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.sleep", &def), - testAccCheckAWSEcsServiceExists("aws_ecs_service.sleep-svc"), - ), - }, - { - Config: testAccAWSEcsTaskDefinitionWithEcsServiceModified, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.sleep", &def), - testAccCheckAWSEcsServiceExists("aws_ecs_service.sleep-svc"), - ), - }, - }, - }) -} - -func TestAccAWSEcsTaskDefinition_withTaskRoleArn(t *testing.T) { - var def ecs.TaskDefinition - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEcsTaskDefinitionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEcsTaskDefinitionWithTaskRoleArn(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.sleep", &def), - ), - }, - }, - }) -} - -func TestAccAWSEcsTaskDefinition_withNetworkMode(t *testing.T) { - var def ecs.TaskDefinition - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEcsTaskDefinitionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEcsTaskDefinitionWithNetworkMode(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.sleep", &def), - resource.TestCheckResourceAttr( - "aws_ecs_task_definition.sleep", "network_mode", "bridge"), - ), - }, - }, - }) -} - -func TestAccAWSEcsTaskDefinition_constraint(t *testing.T) { - var def ecs.TaskDefinition - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEcsTaskDefinitionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEcsTaskDefinition_constraint, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.jenkins", &def), - resource.TestCheckResourceAttr("aws_ecs_task_definition.jenkins", "placement_constraints.#", "1"), - testAccCheckAWSTaskDefinitionConstraintsAttrs(&def), - ), - }, - }, - }) -} - -func TestAccAWSEcsTaskDefinition_changeVolumesForcesNewResource(t *testing.T) { - var before ecs.TaskDefinition - var after ecs.TaskDefinition - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEcsTaskDefinitionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEcsTaskDefinition, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.jenkins", &before), - ), - }, - { - Config: testAccAWSEcsTaskDefinitionUpdatedVolume, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.jenkins", &after), - testAccCheckEcsTaskDefinitionRecreated(t, &before, &after), - ), - }, - }, - }) -} - -func testAccCheckEcsTaskDefinitionRecreated(t *testing.T, - before, after *ecs.TaskDefinition) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *before.Revision == *after.Revision { - t.Fatalf("Expected change of TaskDefinition Revisions, but both were %v", before.Revision) - } - return nil - } -} - -func testAccCheckAWSTaskDefinitionConstraintsAttrs(def *ecs.TaskDefinition) resource.TestCheckFunc { - return func(s *terraform.State) error { - if len(def.PlacementConstraints) != 1 { - return fmt.Errorf("Expected (1) placement_constraints, got (%d)", len(def.PlacementConstraints)) - } - return nil - } -} -func TestValidateAwsEcsTaskDefinitionNetworkMode(t *testing.T) { - validNames := []string{ - "bridge", - "host", - "none", - } - for _, v := range validNames { - _, errors := validateAwsEcsTaskDefinitionNetworkMode(v, "network_mode") - if len(errors) != 0 { - t.Fatalf("%q should be a valid AWS ECS Task Definition Network Mode: %q", v, errors) - } - } - - invalidNames := []string{ - "bridged", - "-docker", - } - for _, v := range invalidNames { - _, errors := validateAwsEcsTaskDefinitionNetworkMode(v, "network_mode") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid AWS ECS Task Definition Network Mode", v) - } - } -} - -func TestValidateAwsEcsTaskDefinitionContainerDefinitions(t *testing.T) { - validDefinitions := []string{ - testValidateAwsEcsTaskDefinitionValidContainerDefinitions, - } - for _, v := range validDefinitions { - _, errors := validateAwsEcsTaskDefinitionContainerDefinitions(v, "container_definitions") - if len(errors) != 0 { - t.Fatalf("%q should be a valid AWS ECS Task Definition Container Definitions: %q", v, errors) - } - } - - invalidDefinitions := []string{ - testValidateAwsEcsTaskDefinitionInvalidCommandContainerDefinitions, - } - for _, v := range invalidDefinitions { - _, errors := validateAwsEcsTaskDefinitionContainerDefinitions(v, "container_definitions") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid AWS ECS Task Definition Container Definitions", v) - } - } -} - -func testAccCheckAWSEcsTaskDefinitionDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ecsconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_ecs_task_definition" { - continue - } - - input := ecs.DescribeTaskDefinitionInput{ - TaskDefinition: aws.String(rs.Primary.Attributes["arn"]), - } - - out, err := conn.DescribeTaskDefinition(&input) - - if err != nil { - return err - } - - if out.TaskDefinition != nil && *out.TaskDefinition.Status != "INACTIVE" { - return fmt.Errorf("ECS task definition still exists:\n%#v", *out.TaskDefinition) - } - } - - return nil -} - -func testAccCheckAWSEcsTaskDefinitionExists(name string, def *ecs.TaskDefinition) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - conn := testAccProvider.Meta().(*AWSClient).ecsconn - - out, err := conn.DescribeTaskDefinition(&ecs.DescribeTaskDefinitionInput{ - TaskDefinition: aws.String(rs.Primary.Attributes["arn"]), - }) - if err != nil { - return err - } - - *def = *out.TaskDefinition - - return nil - } -} - -var testAccAWSEcsTaskDefinition_constraint = ` -resource "aws_ecs_task_definition" "jenkins" { - family = "terraform-acc-test" - container_definitions = < 64 { - errors = append(errors, fmt.Errorf( - "%q cannot take the Creation Token over the limit of 64 characters: %q", k, value)) - } - return -} - -func validatePerformanceModeType(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != efs.PerformanceModeGeneralPurpose && value != efs.PerformanceModeMaxIo { - errors = append(errors, fmt.Errorf( - "%q contains an invalid Performance Mode %q. Valid modes are either %q or %q.", - k, value, efs.PerformanceModeGeneralPurpose, efs.PerformanceModeMaxIo)) - } - return -} - -func hasEmptyFileSystems(fs *efs.DescribeFileSystemsOutput) bool { - if fs != nil && len(fs.FileSystems) > 0 { - return false - } - return true -} diff --git a/builtin/providers/aws/resource_aws_efs_file_system_test.go b/builtin/providers/aws/resource_aws_efs_file_system_test.go deleted file mode 100644 index 93119bb79..000000000 --- a/builtin/providers/aws/resource_aws_efs_file_system_test.go +++ /dev/null @@ -1,353 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/efs" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestResourceAWSEFSFileSystem_validateReferenceName(t *testing.T) { - var value string - var errors []error - - value = acctest.RandString(128) - _, errors = validateReferenceName(value, "reference_name") - if len(errors) == 0 { - t.Fatalf("Expected to trigger a validation error") - } - - value = acctest.RandString(32) - _, errors = validateReferenceName(value, "reference_name") - if len(errors) != 0 { - t.Fatalf("Expected not to trigger a validation error") - } -} - -func TestResourceAWSEFSFileSystem_validatePerformanceModeType(t *testing.T) { - _, errors := validatePerformanceModeType("incorrect", "performance_mode") - if len(errors) == 0 { - t.Fatalf("Expected to trigger a validation error") - } - - var testCases = []struct { - Value string - ErrCount int - }{ - { - Value: "generalPurpose", - ErrCount: 0, - }, - { - Value: "maxIO", - ErrCount: 0, - }, - } - - for _, tc := range testCases { - _, errors := validatePerformanceModeType(tc.Value, "performance_mode") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected not to trigger a validation error") - } - } -} - -func TestResourceAWSEFSFileSystem_hasEmptyFileSystems(t *testing.T) { - fs := &efs.DescribeFileSystemsOutput{ - FileSystems: []*efs.FileSystemDescription{}, - } - - var actual bool - - actual = hasEmptyFileSystems(fs) - if !actual { - t.Fatalf("Expected return value to be true, got %t", actual) - } - - // Add an empty file system. - fs.FileSystems = append(fs.FileSystems, &efs.FileSystemDescription{}) - - actual = hasEmptyFileSystems(fs) - if actual { - t.Fatalf("Expected return value to be false, got %t", actual) - } - -} - -func TestAccAWSEFSFileSystem_basic(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckEfsFileSystemDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEFSFileSystemConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_efs_file_system.foo", - "performance_mode", - "generalPurpose"), - testAccCheckEfsFileSystem( - "aws_efs_file_system.foo", - ), - testAccCheckEfsFileSystemPerformanceMode( - "aws_efs_file_system.foo", - "generalPurpose", - ), - ), - }, - { - Config: testAccAWSEFSFileSystemConfigWithTags(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckEfsFileSystem( - "aws_efs_file_system.foo-with-tags", - ), - testAccCheckEfsFileSystemPerformanceMode( - "aws_efs_file_system.foo-with-tags", - "generalPurpose", - ), - testAccCheckEfsFileSystemTags( - "aws_efs_file_system.foo-with-tags", - map[string]string{ - "Name": fmt.Sprintf("foo-efs-%d", rInt), - "Another": "tag", - }, - ), - ), - }, - { - Config: testAccAWSEFSFileSystemConfigWithPerformanceMode, - Check: resource.ComposeTestCheckFunc( - testAccCheckEfsFileSystem( - "aws_efs_file_system.foo-with-performance-mode", - ), - testAccCheckEfsCreationToken( - "aws_efs_file_system.foo-with-performance-mode", - "supercalifragilisticexpialidocious", - ), - testAccCheckEfsFileSystemPerformanceMode( - "aws_efs_file_system.foo-with-performance-mode", - "maxIO", - ), - ), - }, - }, - }) -} - -func TestAccAWSEFSFileSystem_pagedTags(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckEfsFileSystemDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEFSFileSystemConfigPagedTags(rInt), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_efs_file_system.foo", - "tags.%", - "11"), - //testAccCheckEfsFileSystem( - // "aws_efs_file_system.foo", - //), - //testAccCheckEfsFileSystemPerformanceMode( - // "aws_efs_file_system.foo", - // "generalPurpose", - //), - ), - }, - }, - }) -} - -func testAccCheckEfsFileSystemDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).efsconn - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_efs_file_system" { - continue - } - - resp, err := conn.DescribeFileSystems(&efs.DescribeFileSystemsInput{ - FileSystemId: aws.String(rs.Primary.ID), - }) - if err != nil { - if efsErr, ok := err.(awserr.Error); ok && efsErr.Code() == "FileSystemNotFound" { - // gone - return nil - } - return fmt.Errorf("Error describing EFS in tests: %s", err) - } - if len(resp.FileSystems) > 0 { - return fmt.Errorf("EFS file system %q still exists", rs.Primary.ID) - } - } - - return nil -} - -func testAccCheckEfsFileSystem(resourceID string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceID] - if !ok { - return fmt.Errorf("Not found: %s", resourceID) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).efsconn - _, err := conn.DescribeFileSystems(&efs.DescribeFileSystemsInput{ - FileSystemId: aws.String(rs.Primary.ID), - }) - - if err != nil { - return err - } - - return nil - } -} - -func testAccCheckEfsCreationToken(resourceID string, expectedToken string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceID] - if !ok { - return fmt.Errorf("Not found: %s", resourceID) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).efsconn - resp, err := conn.DescribeFileSystems(&efs.DescribeFileSystemsInput{ - FileSystemId: aws.String(rs.Primary.ID), - }) - - fs := resp.FileSystems[0] - if *fs.CreationToken != expectedToken { - return fmt.Errorf("Creation Token mismatch.\nExpected: %s\nGiven: %v", - expectedToken, *fs.CreationToken) - } - - if err != nil { - return err - } - - return nil - } -} - -func testAccCheckEfsFileSystemTags(resourceID string, expectedTags map[string]string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceID] - if !ok { - return fmt.Errorf("Not found: %s", resourceID) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).efsconn - resp, err := conn.DescribeTags(&efs.DescribeTagsInput{ - FileSystemId: aws.String(rs.Primary.ID), - }) - - if !reflect.DeepEqual(expectedTags, tagsToMapEFS(resp.Tags)) { - return fmt.Errorf("Tags mismatch.\nExpected: %#v\nGiven: %#v", - expectedTags, resp.Tags) - } - - if err != nil { - return err - } - - return nil - } -} - -func testAccCheckEfsFileSystemPerformanceMode(resourceID string, expectedMode string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceID] - if !ok { - return fmt.Errorf("Not found: %s", resourceID) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).efsconn - resp, err := conn.DescribeFileSystems(&efs.DescribeFileSystemsInput{ - FileSystemId: aws.String(rs.Primary.ID), - }) - - fs := resp.FileSystems[0] - if *fs.PerformanceMode != expectedMode { - return fmt.Errorf("Performance Mode mismatch.\nExpected: %s\nGiven: %v", - expectedMode, *fs.PerformanceMode) - } - - if err != nil { - return err - } - - return nil - } -} - -const testAccAWSEFSFileSystemConfig = ` -resource "aws_efs_file_system" "foo" { - creation_token = "radeksimko" -} -` - -func testAccAWSEFSFileSystemConfigPagedTags(rInt int) string { - return fmt.Sprintf(` - resource "aws_efs_file_system" "foo" { - tags { - Name = "foo-efs-%d" - Another = "tag" - Test = "yes" - User = "root" - Page = "1" - Environment = "prod" - CostCenter = "terraform" - AcceptanceTest = "PagedTags" - CreationToken = "radek" - PerfMode = "max" - Region = "us-west-2" - } - } - `, rInt) -} - -func testAccAWSEFSFileSystemConfigWithTags(rInt int) string { - return fmt.Sprintf(` - resource "aws_efs_file_system" "foo-with-tags" { - tags { - Name = "foo-efs-%d" - Another = "tag" - } - } - `, rInt) -} - -const testAccAWSEFSFileSystemConfigWithPerformanceMode = ` -resource "aws_efs_file_system" "foo-with-performance-mode" { - creation_token = "supercalifragilisticexpialidocious" - performance_mode = "maxIO" -} -` diff --git a/builtin/providers/aws/resource_aws_efs_mount_target.go b/builtin/providers/aws/resource_aws_efs_mount_target.go deleted file mode 100644 index 501447808..000000000 --- a/builtin/providers/aws/resource_aws_efs_mount_target.go +++ /dev/null @@ -1,298 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/efs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsEfsMountTarget() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsEfsMountTargetCreate, - Read: resourceAwsEfsMountTargetRead, - Update: resourceAwsEfsMountTargetUpdate, - Delete: resourceAwsEfsMountTargetDelete, - - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "file_system_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "ip_address": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - - "security_groups": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - Computed: true, - Optional: true, - }, - - "subnet_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "network_interface_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "dns_name": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceAwsEfsMountTargetCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).efsconn - - fsId := d.Get("file_system_id").(string) - subnetId := d.Get("subnet_id").(string) - - // CreateMountTarget would return the same Mount Target ID - // to parallel requests if they both include the same AZ - // and we would end up managing the same MT as 2 resources. - // So we make it fail by calling 1 request per AZ at a time. - az, err := getAzFromSubnetId(subnetId, meta.(*AWSClient).ec2conn) - if err != nil { - return fmt.Errorf("Failed getting Availability Zone from subnet ID (%s): %s", subnetId, err) - } - mtKey := "efs-mt-" + fsId + "-" + az - awsMutexKV.Lock(mtKey) - defer awsMutexKV.Unlock(mtKey) - - input := efs.CreateMountTargetInput{ - FileSystemId: aws.String(fsId), - SubnetId: aws.String(subnetId), - } - - if v, ok := d.GetOk("ip_address"); ok { - input.IpAddress = aws.String(v.(string)) - } - if v, ok := d.GetOk("security_groups"); ok { - input.SecurityGroups = expandStringList(v.(*schema.Set).List()) - } - - log.Printf("[DEBUG] Creating EFS mount target: %#v", input) - - mt, err := conn.CreateMountTarget(&input) - if err != nil { - return err - } - - d.SetId(*mt.MountTargetId) - log.Printf("[INFO] EFS mount target ID: %s", d.Id()) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating"}, - Target: []string{"available"}, - Refresh: func() (interface{}, string, error) { - resp, err := conn.DescribeMountTargets(&efs.DescribeMountTargetsInput{ - MountTargetId: aws.String(d.Id()), - }) - if err != nil { - return nil, "error", err - } - - if hasEmptyMountTargets(resp) { - return nil, "error", fmt.Errorf("EFS mount target %q could not be found.", d.Id()) - } - - mt := resp.MountTargets[0] - - log.Printf("[DEBUG] Current status of %q: %q", *mt.MountTargetId, *mt.LifeCycleState) - return mt, *mt.LifeCycleState, nil - }, - Timeout: 10 * time.Minute, - Delay: 2 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for EFS mount target (%s) to create: %s", d.Id(), err) - } - - log.Printf("[DEBUG] EFS mount target created: %s", *mt.MountTargetId) - - return resourceAwsEfsMountTargetRead(d, meta) -} - -func resourceAwsEfsMountTargetUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).efsconn - - if d.HasChange("security_groups") { - input := efs.ModifyMountTargetSecurityGroupsInput{ - MountTargetId: aws.String(d.Id()), - SecurityGroups: expandStringList(d.Get("security_groups").(*schema.Set).List()), - } - _, err := conn.ModifyMountTargetSecurityGroups(&input) - if err != nil { - return err - } - } - - return resourceAwsEfsMountTargetRead(d, meta) -} - -func resourceAwsEfsMountTargetRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).efsconn - resp, err := conn.DescribeMountTargets(&efs.DescribeMountTargetsInput{ - MountTargetId: aws.String(d.Id()), - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "MountTargetNotFound" { - // The EFS mount target could not be found, - // which would indicate that it might be - // already deleted. - log.Printf("[WARN] EFS mount target %q could not be found.", d.Id()) - d.SetId("") - return nil - } - return fmt.Errorf("Error reading EFS mount target %s: %s", d.Id(), err) - } - - if hasEmptyMountTargets(resp) { - return fmt.Errorf("EFS mount target %q could not be found.", d.Id()) - } - - mt := resp.MountTargets[0] - - log.Printf("[DEBUG] Found EFS mount target: %#v", mt) - - d.SetId(*mt.MountTargetId) - d.Set("file_system_id", mt.FileSystemId) - d.Set("ip_address", mt.IpAddress) - d.Set("subnet_id", mt.SubnetId) - d.Set("network_interface_id", mt.NetworkInterfaceId) - - sgResp, err := conn.DescribeMountTargetSecurityGroups(&efs.DescribeMountTargetSecurityGroupsInput{ - MountTargetId: aws.String(d.Id()), - }) - if err != nil { - return err - } - - err = d.Set("security_groups", schema.NewSet(schema.HashString, flattenStringList(sgResp.SecurityGroups))) - if err != nil { - return err - } - - // DNS name per http://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-dns-name.html - _, err = getAzFromSubnetId(*mt.SubnetId, meta.(*AWSClient).ec2conn) - if err != nil { - return fmt.Errorf("Failed getting Availability Zone from subnet ID (%s): %s", *mt.SubnetId, err) - } - - region := meta.(*AWSClient).region - err = d.Set("dns_name", resourceAwsEfsMountTargetDnsName(*mt.FileSystemId, region)) - if err != nil { - return err - } - - return nil -} - -func getAzFromSubnetId(subnetId string, conn *ec2.EC2) (string, error) { - input := ec2.DescribeSubnetsInput{ - SubnetIds: []*string{aws.String(subnetId)}, - } - out, err := conn.DescribeSubnets(&input) - if err != nil { - return "", err - } - - if l := len(out.Subnets); l != 1 { - return "", fmt.Errorf("Expected exactly 1 subnet returned for %q, got: %d", subnetId, l) - } - - return *out.Subnets[0].AvailabilityZone, nil -} - -func resourceAwsEfsMountTargetDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).efsconn - - log.Printf("[DEBUG] Deleting EFS mount target %q", d.Id()) - _, err := conn.DeleteMountTarget(&efs.DeleteMountTargetInput{ - MountTargetId: aws.String(d.Id()), - }) - if err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"available", "deleting", "deleted"}, - Target: []string{}, - Refresh: func() (interface{}, string, error) { - resp, err := conn.DescribeMountTargets(&efs.DescribeMountTargetsInput{ - MountTargetId: aws.String(d.Id()), - }) - if err != nil { - awsErr, ok := err.(awserr.Error) - if !ok { - return nil, "error", err - } - - if awsErr.Code() == "MountTargetNotFound" { - return nil, "", nil - } - - return nil, "error", awsErr - } - - if hasEmptyMountTargets(resp) { - return nil, "", nil - } - - mt := resp.MountTargets[0] - - log.Printf("[DEBUG] Current status of %q: %q", *mt.MountTargetId, *mt.LifeCycleState) - return mt, *mt.LifeCycleState, nil - }, - Timeout: 10 * time.Minute, - Delay: 2 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for EFS mount target (%q) to delete: %s", - d.Id(), err.Error()) - } - - log.Printf("[DEBUG] EFS mount target %q deleted.", d.Id()) - - return nil -} - -func resourceAwsEfsMountTargetDnsName(fileSystemId, region string) string { - return fmt.Sprintf("%s.efs.%s.amazonaws.com", fileSystemId, region) -} - -func hasEmptyMountTargets(mto *efs.DescribeMountTargetsOutput) bool { - if mto != nil && len(mto.MountTargets) > 0 { - return false - } - return true -} diff --git a/builtin/providers/aws/resource_aws_efs_mount_target_test.go b/builtin/providers/aws/resource_aws_efs_mount_target_test.go deleted file mode 100644 index 75349cc2e..000000000 --- a/builtin/providers/aws/resource_aws_efs_mount_target_test.go +++ /dev/null @@ -1,285 +0,0 @@ -package aws - -import ( - "fmt" - "regexp" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/efs" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSEFSMountTarget_basic(t *testing.T) { - var mount efs.MountTargetDescription - ct := fmt.Sprintf("createtoken-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckEfsMountTargetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSEFSMountTargetConfig(ct), - Check: resource.ComposeTestCheckFunc( - testAccCheckEfsMountTarget( - "aws_efs_mount_target.alpha", - &mount, - ), - resource.TestMatchResourceAttr( - "aws_efs_mount_target.alpha", - "dns_name", - regexp.MustCompile("^[^.]+.efs.us-west-2.amazonaws.com$"), - ), - ), - }, - resource.TestStep{ - Config: testAccAWSEFSMountTargetConfigModified(ct), - Check: resource.ComposeTestCheckFunc( - testAccCheckEfsMountTarget( - "aws_efs_mount_target.alpha", - &mount, - ), - resource.TestMatchResourceAttr( - "aws_efs_mount_target.alpha", - "dns_name", - regexp.MustCompile("^[^.]+.efs.us-west-2.amazonaws.com$"), - ), - testAccCheckEfsMountTarget( - "aws_efs_mount_target.beta", - &mount, - ), - resource.TestMatchResourceAttr( - "aws_efs_mount_target.beta", - "dns_name", - regexp.MustCompile("^[^.]+.efs.us-west-2.amazonaws.com$"), - ), - ), - }, - }, - }) -} - -func TestAccAWSEFSMountTarget_disappears(t *testing.T) { - var mount efs.MountTargetDescription - - ct := fmt.Sprintf("createtoken-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVpnGatewayDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSEFSMountTargetConfig(ct), - Check: resource.ComposeTestCheckFunc( - testAccCheckEfsMountTarget( - "aws_efs_mount_target.alpha", - &mount, - ), - testAccAWSEFSMountTargetDisappears(&mount), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestResourceAWSEFSMountTarget_mountTargetDnsName(t *testing.T) { - actual := resourceAwsEfsMountTargetDnsName("fs-123456ab", "non-existent-1") - - expected := "fs-123456ab.efs.non-existent-1.amazonaws.com" - if actual != expected { - t.Fatalf("Expected EFS mount target DNS name to be %s, got %s", - expected, actual) - } -} - -func TestResourceAWSEFSMountTarget_hasEmptyMountTargets(t *testing.T) { - mto := &efs.DescribeMountTargetsOutput{ - MountTargets: []*efs.MountTargetDescription{}, - } - - var actual bool - - actual = hasEmptyMountTargets(mto) - if !actual { - t.Fatalf("Expected return value to be true, got %t", actual) - } - - // Add an empty mount target. - mto.MountTargets = append(mto.MountTargets, &efs.MountTargetDescription{}) - - actual = hasEmptyMountTargets(mto) - if actual { - t.Fatalf("Expected return value to be false, got %t", actual) - } - -} - -func testAccCheckEfsMountTargetDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).efsconn - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_efs_mount_target" { - continue - } - - resp, err := conn.DescribeMountTargets(&efs.DescribeMountTargetsInput{ - MountTargetId: aws.String(rs.Primary.ID), - }) - if err != nil { - if efsErr, ok := err.(awserr.Error); ok && efsErr.Code() == "MountTargetNotFound" { - // gone - return nil - } - return fmt.Errorf("Error describing EFS Mount in tests: %s", err) - } - if len(resp.MountTargets) > 0 { - return fmt.Errorf("EFS Mount target %q still exists", rs.Primary.ID) - } - } - - return nil -} - -func testAccCheckEfsMountTarget(resourceID string, mount *efs.MountTargetDescription) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceID] - if !ok { - return fmt.Errorf("Not found: %s", resourceID) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - fs, ok := s.RootModule().Resources[resourceID] - if !ok { - return fmt.Errorf("Not found: %s", resourceID) - } - - conn := testAccProvider.Meta().(*AWSClient).efsconn - mt, err := conn.DescribeMountTargets(&efs.DescribeMountTargetsInput{ - MountTargetId: aws.String(fs.Primary.ID), - }) - if err != nil { - return err - } - - if *mt.MountTargets[0].MountTargetId != fs.Primary.ID { - return fmt.Errorf("Mount target ID mismatch: %q != %q", - *mt.MountTargets[0].MountTargetId, fs.Primary.ID) - } - - *mount = *mt.MountTargets[0] - - return nil - } -} - -func testAccAWSEFSMountTargetDisappears(mount *efs.MountTargetDescription) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).efsconn - - _, err := conn.DeleteMountTarget(&efs.DeleteMountTargetInput{ - MountTargetId: mount.MountTargetId, - }) - - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "MountTargetNotFound" { - return nil - } - return err - } - - return resource.Retry(3*time.Minute, func() *resource.RetryError { - resp, err := conn.DescribeMountTargets(&efs.DescribeMountTargetsInput{ - MountTargetId: mount.MountTargetId, - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "MountTargetNotFound" { - return nil - } - return resource.NonRetryableError( - fmt.Errorf("Error reading EFS mount target: %s", err)) - } - if resp.MountTargets == nil || len(resp.MountTargets) < 1 { - return nil - } - if *resp.MountTargets[0].LifeCycleState == "deleted" { - return nil - } - return resource.RetryableError(fmt.Errorf( - "Waiting for EFS mount target: %s", *mount.MountTargetId)) - }) - } - -} - -func testAccAWSEFSMountTargetConfig(ct string) string { - return fmt.Sprintf(` -resource "aws_efs_file_system" "foo" { - creation_token = "%s" -} - -resource "aws_efs_mount_target" "alpha" { - file_system_id = "${aws_efs_file_system.foo.id}" - subnet_id = "${aws_subnet.alpha.id}" -} - -resource "aws_vpc" "foo" { - cidr_block = "10.0.0.0/16" - tags { - Name = "testAccAWSEFSMountTargetConfig" - } -} - -resource "aws_subnet" "alpha" { - vpc_id = "${aws_vpc.foo.id}" - availability_zone = "us-west-2a" - cidr_block = "10.0.1.0/24" -} -`, ct) -} - -func testAccAWSEFSMountTargetConfigModified(ct string) string { - return fmt.Sprintf(` -resource "aws_efs_file_system" "foo" { - creation_token = "%s" -} - -resource "aws_efs_mount_target" "alpha" { - file_system_id = "${aws_efs_file_system.foo.id}" - subnet_id = "${aws_subnet.alpha.id}" -} - -resource "aws_efs_mount_target" "beta" { - file_system_id = "${aws_efs_file_system.foo.id}" - subnet_id = "${aws_subnet.beta.id}" -} - -resource "aws_vpc" "foo" { - cidr_block = "10.0.0.0/16" - tags { - Name = "testAccAWSEFSMountTargetConfigModified" - } -} - -resource "aws_subnet" "alpha" { - vpc_id = "${aws_vpc.foo.id}" - availability_zone = "us-west-2a" - cidr_block = "10.0.1.0/24" -} - -resource "aws_subnet" "beta" { - vpc_id = "${aws_vpc.foo.id}" - availability_zone = "us-west-2b" - cidr_block = "10.0.2.0/24" -} -`, ct) -} diff --git a/builtin/providers/aws/resource_aws_egress_only_internet_gateway.go b/builtin/providers/aws/resource_aws_egress_only_internet_gateway.go deleted file mode 100644 index 0d5256a15..000000000 --- a/builtin/providers/aws/resource_aws_egress_only_internet_gateway.go +++ /dev/null @@ -1,129 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsEgressOnlyInternetGateway() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsEgressOnlyInternetGatewayCreate, - Read: resourceAwsEgressOnlyInternetGatewayRead, - Delete: resourceAwsEgressOnlyInternetGatewayDelete, - - Schema: map[string]*schema.Schema{ - "vpc_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceAwsEgressOnlyInternetGatewayCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - resp, err := conn.CreateEgressOnlyInternetGateway(&ec2.CreateEgressOnlyInternetGatewayInput{ - VpcId: aws.String(d.Get("vpc_id").(string)), - }) - if err != nil { - return fmt.Errorf("Error creating egress internet gateway: %s", err) - } - - d.SetId(*resp.EgressOnlyInternetGateway.EgressOnlyInternetGatewayId) - - err = resource.Retry(5*time.Minute, func() *resource.RetryError { - igRaw, _, err := EIGWStateRefreshFunc(conn, d.Id())() - if igRaw != nil { - return nil - } - if err == nil { - return resource.RetryableError(err) - } else { - return resource.NonRetryableError(err) - } - }) - - if err != nil { - return errwrap.Wrapf("{{err}}", err) - } - - return resourceAwsEgressOnlyInternetGatewayRead(d, meta) -} - -func EIGWStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - resp, err := conn.DescribeEgressOnlyInternetGateways(&ec2.DescribeEgressOnlyInternetGatewaysInput{ - EgressOnlyInternetGatewayIds: []*string{aws.String(id)}, - }) - if err != nil { - ec2err, ok := err.(awserr.Error) - if ok && ec2err.Code() == "InvalidEgressInternetGatewayID.NotFound" { - resp = nil - } else { - log.Printf("[ERROR] Error on EIGWStateRefreshFunc: %s", err) - return nil, "", err - } - } - if len(resp.EgressOnlyInternetGateways) < 1 { - resp = nil - } - - if resp == nil { - // Sometimes AWS just has consistency issues and doesn't see - // our instance yet. Return an empty state. - return nil, "", nil - } - - ig := resp.EgressOnlyInternetGateways[0] - return ig, "available", nil - } -} - -func resourceAwsEgressOnlyInternetGatewayRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - resp, err := conn.DescribeEgressOnlyInternetGateways(&ec2.DescribeEgressOnlyInternetGatewaysInput{ - EgressOnlyInternetGatewayIds: []*string{aws.String(d.Id())}, - }) - if err != nil { - return fmt.Errorf("Error describing egress internet gateway: %s", err) - } - - found := false - for _, igw := range resp.EgressOnlyInternetGateways { - if *igw.EgressOnlyInternetGatewayId == d.Id() { - found = true - } - } - - if !found { - log.Printf("[Error] Cannot find Egress Only Internet Gateway: %q", d.Id()) - d.SetId("") - return nil - } - - return nil -} - -func resourceAwsEgressOnlyInternetGatewayDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - _, err := conn.DeleteEgressOnlyInternetGateway(&ec2.DeleteEgressOnlyInternetGatewayInput{ - EgressOnlyInternetGatewayId: aws.String(d.Id()), - }) - if err != nil { - return fmt.Errorf("Error deleting egress internet gateway: %s", err) - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_egress_only_internet_gateway_test.go b/builtin/providers/aws/resource_aws_egress_only_internet_gateway_test.go deleted file mode 100644 index d9f57009c..000000000 --- a/builtin/providers/aws/resource_aws_egress_only_internet_gateway_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSEgressOnlyInternetGateway_basic(t *testing.T) { - var igw ec2.EgressOnlyInternetGateway - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEgressOnlyInternetGatewayDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEgressOnlyInternetGatewayConfig_basic, - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSEgressOnlyInternetGatewayExists("aws_egress_only_internet_gateway.foo", &igw), - ), - }, - }, - }) -} - -func testAccCheckAWSEgressOnlyInternetGatewayDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_egress_only_internet_gateway" { - continue - } - - describe, err := conn.DescribeEgressOnlyInternetGateways(&ec2.DescribeEgressOnlyInternetGatewaysInput{ - EgressOnlyInternetGatewayIds: []*string{aws.String(rs.Primary.ID)}, - }) - - if err == nil { - if len(describe.EgressOnlyInternetGateways) != 0 && - *describe.EgressOnlyInternetGateways[0].EgressOnlyInternetGatewayId == rs.Primary.ID { - return fmt.Errorf("Egress Only Internet Gateway %q still exists", rs.Primary.ID) - } - } - - return nil - } - - return nil -} - -func testAccCheckAWSEgressOnlyInternetGatewayExists(n string, igw *ec2.EgressOnlyInternetGateway) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Egress Only IGW ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - resp, err := conn.DescribeEgressOnlyInternetGateways(&ec2.DescribeEgressOnlyInternetGatewaysInput{ - EgressOnlyInternetGatewayIds: []*string{aws.String(rs.Primary.ID)}, - }) - if err != nil { - return err - } - if len(resp.EgressOnlyInternetGateways) == 0 { - return fmt.Errorf("Egress Only IGW not found") - } - - *igw = *resp.EgressOnlyInternetGateways[0] - - return nil - } -} - -const testAccAWSEgressOnlyInternetGatewayConfig_basic = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - assign_generated_ipv6_cidr_block = true - tags { - Name = "testAccAWSEgressOnlyInternetGatewayConfig_basic" - } -} - -resource "aws_egress_only_internet_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} -` diff --git a/builtin/providers/aws/resource_aws_eip.go b/builtin/providers/aws/resource_aws_eip.go deleted file mode 100644 index 1cd136782..000000000 --- a/builtin/providers/aws/resource_aws_eip.go +++ /dev/null @@ -1,326 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "net" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsEip() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsEipCreate, - Read: resourceAwsEipRead, - Update: resourceAwsEipUpdate, - Delete: resourceAwsEipDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "vpc": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "instance": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "network_interface": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "allocation_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "association_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "domain": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "public_ip": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "private_ip": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "associate_with_private_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -func resourceAwsEipCreate(d *schema.ResourceData, meta interface{}) error { - ec2conn := meta.(*AWSClient).ec2conn - - // By default, we're not in a VPC - domainOpt := "" - if v := d.Get("vpc"); v != nil && v.(bool) { - domainOpt = "vpc" - } - - allocOpts := &ec2.AllocateAddressInput{ - Domain: aws.String(domainOpt), - } - - log.Printf("[DEBUG] EIP create configuration: %#v", allocOpts) - allocResp, err := ec2conn.AllocateAddress(allocOpts) - if err != nil { - return fmt.Errorf("Error creating EIP: %s", err) - } - - // The domain tells us if we're in a VPC or not - d.Set("domain", allocResp.Domain) - - // Assign the eips (unique) allocation id for use later - // the EIP api has a conditional unique ID (really), so - // if we're in a VPC we need to save the ID as such, otherwise - // it defaults to using the public IP - log.Printf("[DEBUG] EIP Allocate: %#v", allocResp) - if d.Get("domain").(string) == "vpc" { - d.SetId(*allocResp.AllocationId) - } else { - d.SetId(*allocResp.PublicIp) - } - - log.Printf("[INFO] EIP ID: %s (domain: %v)", d.Id(), *allocResp.Domain) - return resourceAwsEipUpdate(d, meta) -} - -func resourceAwsEipRead(d *schema.ResourceData, meta interface{}) error { - ec2conn := meta.(*AWSClient).ec2conn - - domain := resourceAwsEipDomain(d) - id := d.Id() - - req := &ec2.DescribeAddressesInput{} - - if domain == "vpc" { - req.AllocationIds = []*string{aws.String(id)} - } else { - req.PublicIps = []*string{aws.String(id)} - } - - log.Printf( - "[DEBUG] EIP describe configuration: %s (domain: %s)", - req, domain) - - describeAddresses, err := ec2conn.DescribeAddresses(req) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && (ec2err.Code() == "InvalidAllocationID.NotFound" || ec2err.Code() == "InvalidAddress.NotFound") { - d.SetId("") - return nil - } - - return fmt.Errorf("Error retrieving EIP: %s", err) - } - - // Verify AWS returned our EIP - if len(describeAddresses.Addresses) != 1 || - domain == "vpc" && *describeAddresses.Addresses[0].AllocationId != id || - *describeAddresses.Addresses[0].PublicIp != id { - if err != nil { - return fmt.Errorf("Unable to find EIP: %#v", describeAddresses.Addresses) - } - } - - address := describeAddresses.Addresses[0] - - d.Set("association_id", address.AssociationId) - if address.InstanceId != nil { - d.Set("instance", address.InstanceId) - } else { - d.Set("instance", "") - } - if address.NetworkInterfaceId != nil { - d.Set("network_interface", address.NetworkInterfaceId) - } else { - d.Set("network_interface", "") - } - d.Set("private_ip", address.PrivateIpAddress) - d.Set("public_ip", address.PublicIp) - - // On import (domain never set, which it must've been if we created), - // set the 'vpc' attribute depending on if we're in a VPC. - if address.Domain != nil { - d.Set("vpc", *address.Domain == "vpc") - } - - d.Set("domain", address.Domain) - - // Force ID to be an Allocation ID if we're on a VPC - // This allows users to import the EIP based on the IP if they are in a VPC - if *address.Domain == "vpc" && net.ParseIP(id) != nil { - log.Printf("[DEBUG] Re-assigning EIP ID (%s) to it's Allocation ID (%s)", d.Id(), *address.AllocationId) - d.SetId(*address.AllocationId) - } - - return nil -} - -func resourceAwsEipUpdate(d *schema.ResourceData, meta interface{}) error { - ec2conn := meta.(*AWSClient).ec2conn - - domain := resourceAwsEipDomain(d) - - // Associate to instance or interface if specified - v_instance, ok_instance := d.GetOk("instance") - v_interface, ok_interface := d.GetOk("network_interface") - - if ok_instance || ok_interface { - instanceId := v_instance.(string) - networkInterfaceId := v_interface.(string) - - assocOpts := &ec2.AssociateAddressInput{ - InstanceId: aws.String(instanceId), - PublicIp: aws.String(d.Id()), - } - - // more unique ID conditionals - if domain == "vpc" { - var privateIpAddress *string - if v := d.Get("associate_with_private_ip").(string); v != "" { - privateIpAddress = aws.String(v) - } - assocOpts = &ec2.AssociateAddressInput{ - NetworkInterfaceId: aws.String(networkInterfaceId), - InstanceId: aws.String(instanceId), - AllocationId: aws.String(d.Id()), - PrivateIpAddress: privateIpAddress, - } - } - - log.Printf("[DEBUG] EIP associate configuration: %s (domain: %s)", assocOpts, domain) - - err := resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := ec2conn.AssociateAddress(assocOpts) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "InvalidAllocationID.NotFound" { - return resource.RetryableError(awsErr) - } - } - return resource.NonRetryableError(err) - } - return nil - }) - if err != nil { - // Prevent saving instance if association failed - // e.g. missing internet gateway in VPC - d.Set("instance", "") - d.Set("network_interface", "") - return fmt.Errorf("Failure associating EIP: %s", err) - } - } - - return resourceAwsEipRead(d, meta) -} - -func resourceAwsEipDelete(d *schema.ResourceData, meta interface{}) error { - ec2conn := meta.(*AWSClient).ec2conn - - if err := resourceAwsEipRead(d, meta); err != nil { - return err - } - if d.Id() == "" { - // This might happen from the read - return nil - } - - // If we are attached to an instance or interface, detach first. - if d.Get("instance").(string) != "" || d.Get("association_id").(string) != "" { - log.Printf("[DEBUG] Disassociating EIP: %s", d.Id()) - var err error - switch resourceAwsEipDomain(d) { - case "vpc": - _, err = ec2conn.DisassociateAddress(&ec2.DisassociateAddressInput{ - AssociationId: aws.String(d.Get("association_id").(string)), - }) - case "standard": - _, err = ec2conn.DisassociateAddress(&ec2.DisassociateAddressInput{ - PublicIp: aws.String(d.Get("public_ip").(string)), - }) - } - - if err != nil { - // First check if the association ID is not found. If this - // is the case, then it was already disassociated somehow, - // and that is okay. The most commmon reason for this is that - // the instance or ENI it was attached it was destroyed. - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidAssociationID.NotFound" { - err = nil - } - } - - if err != nil { - return err - } - } - - domain := resourceAwsEipDomain(d) - return resource.Retry(3*time.Minute, func() *resource.RetryError { - var err error - switch domain { - case "vpc": - log.Printf( - "[DEBUG] EIP release (destroy) address allocation: %v", - d.Id()) - _, err = ec2conn.ReleaseAddress(&ec2.ReleaseAddressInput{ - AllocationId: aws.String(d.Id()), - }) - case "standard": - log.Printf("[DEBUG] EIP release (destroy) address: %v", d.Id()) - _, err = ec2conn.ReleaseAddress(&ec2.ReleaseAddressInput{ - PublicIp: aws.String(d.Id()), - }) - } - - if err == nil { - return nil - } - if _, ok := err.(awserr.Error); !ok { - return resource.NonRetryableError(err) - } - - return resource.RetryableError(err) - }) -} - -func resourceAwsEipDomain(d *schema.ResourceData) string { - if v, ok := d.GetOk("domain"); ok { - return v.(string) - } else if strings.Contains(d.Id(), "eipalloc") { - // We have to do this for backwards compatibility since TF 0.1 - // didn't have the "domain" computed attribute. - return "vpc" - } - - return "standard" -} diff --git a/builtin/providers/aws/resource_aws_eip_association.go b/builtin/providers/aws/resource_aws_eip_association.go deleted file mode 100644 index b3db8655d..000000000 --- a/builtin/providers/aws/resource_aws_eip_association.go +++ /dev/null @@ -1,163 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsEipAssociation() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsEipAssociationCreate, - Read: resourceAwsEipAssociationRead, - Delete: resourceAwsEipAssociationDelete, - - Schema: map[string]*schema.Schema{ - "allocation_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "allow_reassociation": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - - "instance_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "network_interface_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "private_ip_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "public_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - } -} - -func resourceAwsEipAssociationCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - request := &ec2.AssociateAddressInput{} - - if v, ok := d.GetOk("allocation_id"); ok { - request.AllocationId = aws.String(v.(string)) - } - if v, ok := d.GetOk("allow_reassociation"); ok { - request.AllowReassociation = aws.Bool(v.(bool)) - } - if v, ok := d.GetOk("instance_id"); ok { - request.InstanceId = aws.String(v.(string)) - } - if v, ok := d.GetOk("network_interface_id"); ok { - request.NetworkInterfaceId = aws.String(v.(string)) - } - if v, ok := d.GetOk("private_ip_address"); ok { - request.PrivateIpAddress = aws.String(v.(string)) - } - if v, ok := d.GetOk("public_ip"); ok { - request.PublicIp = aws.String(v.(string)) - } - - log.Printf("[DEBUG] EIP association configuration: %#v", request) - - resp, err := conn.AssociateAddress(request) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - return fmt.Errorf("[WARN] Error attaching EIP, message: \"%s\", code: \"%s\"", - awsErr.Message(), awsErr.Code()) - } - return err - } - - d.SetId(*resp.AssociationId) - - return resourceAwsEipAssociationRead(d, meta) -} - -func resourceAwsEipAssociationRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - request := &ec2.DescribeAddressesInput{ - Filters: []*ec2.Filter{ - &ec2.Filter{ - Name: aws.String("association-id"), - Values: []*string{aws.String(d.Id())}, - }, - }, - } - - response, err := conn.DescribeAddresses(request) - if err != nil { - return fmt.Errorf("Error reading EC2 Elastic IP %s: %#v", d.Get("allocation_id").(string), err) - } - - if response.Addresses == nil || len(response.Addresses) == 0 { - log.Printf("[INFO] EIP Association ID Not Found. Refreshing from state") - d.SetId("") - return nil - } - - return readAwsEipAssociation(d, response.Addresses[0]) -} - -func resourceAwsEipAssociationDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - opts := &ec2.DisassociateAddressInput{ - AssociationId: aws.String(d.Id()), - } - - _, err := conn.DisassociateAddress(opts) - if err != nil { - return fmt.Errorf("Error deleting Elastic IP association: %s", err) - } - - return nil -} - -func readAwsEipAssociation(d *schema.ResourceData, address *ec2.Address) error { - if err := d.Set("allocation_id", address.AllocationId); err != nil { - return err - } - if err := d.Set("instance_id", address.InstanceId); err != nil { - return err - } - if err := d.Set("network_interface_id", address.NetworkInterfaceId); err != nil { - return err - } - if err := d.Set("private_ip_address", address.PrivateIpAddress); err != nil { - return err - } - if err := d.Set("public_ip", address.PublicIp); err != nil { - return err - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_eip_association_test.go b/builtin/providers/aws/resource_aws_eip_association_test.go deleted file mode 100644 index c9f56f215..000000000 --- a/builtin/providers/aws/resource_aws_eip_association_test.go +++ /dev/null @@ -1,224 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSEIPAssociation_basic(t *testing.T) { - var a ec2.Address - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEIPAssociationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEIPAssociationConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEIPExists( - "aws_eip.bar.0", &a), - testAccCheckAWSEIPAssociationExists( - "aws_eip_association.by_allocation_id", &a), - testAccCheckAWSEIPExists( - "aws_eip.bar.1", &a), - testAccCheckAWSEIPAssociationExists( - "aws_eip_association.by_public_ip", &a), - testAccCheckAWSEIPExists( - "aws_eip.bar.2", &a), - testAccCheckAWSEIPAssociationExists( - "aws_eip_association.to_eni", &a), - ), - }, - }, - }) -} - -func TestAccAWSEIPAssociation_disappears(t *testing.T) { - var a ec2.Address - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEIPAssociationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEIPAssociationConfigDisappears, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEIPExists( - "aws_eip.bar", &a), - testAccCheckAWSEIPAssociationExists( - "aws_eip_association.by_allocation_id", &a), - testAccCheckEIPAssociationDisappears(&a), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testAccCheckEIPAssociationDisappears(address *ec2.Address) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - opts := &ec2.DisassociateAddressInput{ - AssociationId: address.AssociationId, - } - if _, err := conn.DisassociateAddress(opts); err != nil { - return err - } - return nil - } -} - -func testAccCheckAWSEIPAssociationExists(name string, res *ec2.Address) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No EIP Association ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - request := &ec2.DescribeAddressesInput{ - Filters: []*ec2.Filter{ - &ec2.Filter{ - Name: aws.String("association-id"), - Values: []*string{res.AssociationId}, - }, - }, - } - describe, err := conn.DescribeAddresses(request) - if err != nil { - return err - } - - if len(describe.Addresses) != 1 || - *describe.Addresses[0].AssociationId != *res.AssociationId { - return fmt.Errorf("EIP Association not found") - } - - return nil - } -} - -func testAccCheckAWSEIPAssociationDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_eip_association" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No EIP Association ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - request := &ec2.DescribeAddressesInput{ - Filters: []*ec2.Filter{ - &ec2.Filter{ - Name: aws.String("association-id"), - Values: []*string{aws.String(rs.Primary.ID)}, - }, - }, - } - describe, err := conn.DescribeAddresses(request) - if err != nil { - return err - } - - if len(describe.Addresses) > 0 { - return fmt.Errorf("EIP Association still exists") - } - } - return nil -} - -const testAccAWSEIPAssociationConfig = ` -resource "aws_vpc" "main" { - cidr_block = "192.168.0.0/24" - tags { - Name = "testAccAWSEIPAssociationConfig" - } -} -resource "aws_subnet" "sub" { - vpc_id = "${aws_vpc.main.id}" - cidr_block = "192.168.0.0/25" - availability_zone = "us-west-2a" -} -resource "aws_internet_gateway" "igw" { - vpc_id = "${aws_vpc.main.id}" -} -resource "aws_instance" "foo" { - count = 2 - ami = "ami-21f78e11" - availability_zone = "us-west-2a" - instance_type = "t1.micro" - subnet_id = "${aws_subnet.sub.id}" - private_ip = "192.168.0.${count.index+10}" -} -resource "aws_eip" "bar" { - count = 3 - vpc = true -} -resource "aws_eip_association" "by_allocation_id" { - allocation_id = "${aws_eip.bar.0.id}" - instance_id = "${aws_instance.foo.0.id}" - depends_on = ["aws_instance.foo"] -} -resource "aws_eip_association" "by_public_ip" { - public_ip = "${aws_eip.bar.1.public_ip}" - instance_id = "${aws_instance.foo.1.id}" - depends_on = ["aws_instance.foo"] -} -resource "aws_eip_association" "to_eni" { - allocation_id = "${aws_eip.bar.2.id}" - network_interface_id = "${aws_network_interface.baz.id}" -} -resource "aws_network_interface" "baz" { - subnet_id = "${aws_subnet.sub.id}" - private_ips = ["192.168.0.50"] - depends_on = ["aws_instance.foo"] - attachment { - instance = "${aws_instance.foo.0.id}" - device_index = 1 - } -} -` - -const testAccAWSEIPAssociationConfigDisappears = ` -resource "aws_vpc" "main" { - cidr_block = "192.168.0.0/24" - tags { - Name = "testAccAWSEIPAssociationConfigDisappears" - } -} -resource "aws_subnet" "sub" { - vpc_id = "${aws_vpc.main.id}" - cidr_block = "192.168.0.0/25" - availability_zone = "us-west-2a" -} -resource "aws_internet_gateway" "igw" { - vpc_id = "${aws_vpc.main.id}" -} -resource "aws_instance" "foo" { - ami = "ami-21f78e11" - availability_zone = "us-west-2a" - instance_type = "t1.micro" - subnet_id = "${aws_subnet.sub.id}" -} -resource "aws_eip" "bar" { - vpc = true -} -resource "aws_eip_association" "by_allocation_id" { - allocation_id = "${aws_eip.bar.id}" - instance_id = "${aws_instance.foo.id}" -}` diff --git a/builtin/providers/aws/resource_aws_eip_test.go b/builtin/providers/aws/resource_aws_eip_test.go deleted file mode 100644 index c5512c522..000000000 --- a/builtin/providers/aws/resource_aws_eip_test.go +++ /dev/null @@ -1,546 +0,0 @@ -package aws - -import ( - "fmt" - "os" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSEIP_importEc2Classic(t *testing.T) { - oldvar := os.Getenv("AWS_DEFAULT_REGION") - os.Setenv("AWS_DEFAULT_REGION", "us-east-1") - defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - - resourceName := "aws_eip.bar" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEIPDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEIPInstanceEc2Classic, - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAWSEIP_importVpc(t *testing.T) { - resourceName := "aws_eip.bar" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEIPDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEIPNetworkInterfaceConfig, - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAWSEIP_basic(t *testing.T) { - var conf ec2.Address - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_eip.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEIPDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSEIPConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEIPExists("aws_eip.bar", &conf), - testAccCheckAWSEIPAttributes(&conf), - ), - }, - }, - }) -} - -func TestAccAWSEIP_instance(t *testing.T) { - var conf ec2.Address - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_eip.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEIPDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSEIPInstanceConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEIPExists("aws_eip.bar", &conf), - testAccCheckAWSEIPAttributes(&conf), - ), - }, - - resource.TestStep{ - Config: testAccAWSEIPInstanceConfig2, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEIPExists("aws_eip.bar", &conf), - testAccCheckAWSEIPAttributes(&conf), - ), - }, - }, - }) -} - -func TestAccAWSEIP_network_interface(t *testing.T) { - var conf ec2.Address - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_eip.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEIPDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSEIPNetworkInterfaceConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEIPExists("aws_eip.bar", &conf), - testAccCheckAWSEIPAttributes(&conf), - testAccCheckAWSEIPAssociated(&conf), - ), - }, - }, - }) -} - -func TestAccAWSEIP_twoEIPsOneNetworkInterface(t *testing.T) { - var one, two ec2.Address - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_eip.one", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEIPDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSEIPMultiNetworkInterfaceConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEIPExists("aws_eip.one", &one), - testAccCheckAWSEIPAttributes(&one), - testAccCheckAWSEIPAssociated(&one), - testAccCheckAWSEIPExists("aws_eip.two", &two), - testAccCheckAWSEIPAttributes(&two), - testAccCheckAWSEIPAssociated(&two), - ), - }, - }, - }) -} - -// This test is an expansion of TestAccAWSEIP_instance, by testing the -// associated Private EIPs of two instances -func TestAccAWSEIP_associated_user_private_ip(t *testing.T) { - var one ec2.Address - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_eip.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEIPDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSEIPInstanceConfig_associated, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEIPExists("aws_eip.bar", &one), - testAccCheckAWSEIPAttributes(&one), - testAccCheckAWSEIPAssociated(&one), - ), - }, - - resource.TestStep{ - Config: testAccAWSEIPInstanceConfig_associated_switch, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEIPExists("aws_eip.bar", &one), - testAccCheckAWSEIPAttributes(&one), - testAccCheckAWSEIPAssociated(&one), - ), - }, - }, - }) -} - -func testAccCheckAWSEIPDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_eip" { - continue - } - - if strings.Contains(rs.Primary.ID, "eipalloc") { - req := &ec2.DescribeAddressesInput{ - AllocationIds: []*string{aws.String(rs.Primary.ID)}, - } - describe, err := conn.DescribeAddresses(req) - if err != nil { - // Verify the error is what we want - if ae, ok := err.(awserr.Error); ok && ae.Code() == "InvalidAllocationID.NotFound" || ae.Code() == "InvalidAddress.NotFound" { - continue - } - return err - } - - if len(describe.Addresses) > 0 { - return fmt.Errorf("still exists") - } - } else { - req := &ec2.DescribeAddressesInput{ - PublicIps: []*string{aws.String(rs.Primary.ID)}, - } - describe, err := conn.DescribeAddresses(req) - if err != nil { - // Verify the error is what we want - if ae, ok := err.(awserr.Error); ok && ae.Code() == "InvalidAllocationID.NotFound" || ae.Code() == "InvalidAddress.NotFound" { - continue - } - return err - } - - if len(describe.Addresses) > 0 { - return fmt.Errorf("still exists") - } - } - } - - return nil -} - -func testAccCheckAWSEIPAttributes(conf *ec2.Address) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *conf.PublicIp == "" { - return fmt.Errorf("empty public_ip") - } - - return nil - } -} - -func testAccCheckAWSEIPAssociated(conf *ec2.Address) resource.TestCheckFunc { - return func(s *terraform.State) error { - if conf.AssociationId == nil || *conf.AssociationId == "" { - return fmt.Errorf("empty association_id") - } - - return nil - } -} - -func testAccCheckAWSEIPExists(n string, res *ec2.Address) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No EIP ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - if strings.Contains(rs.Primary.ID, "eipalloc") { - req := &ec2.DescribeAddressesInput{ - AllocationIds: []*string{aws.String(rs.Primary.ID)}, - } - describe, err := conn.DescribeAddresses(req) - if err != nil { - return err - } - - if len(describe.Addresses) != 1 || - *describe.Addresses[0].AllocationId != rs.Primary.ID { - return fmt.Errorf("EIP not found") - } - *res = *describe.Addresses[0] - - } else { - req := &ec2.DescribeAddressesInput{ - PublicIps: []*string{aws.String(rs.Primary.ID)}, - } - describe, err := conn.DescribeAddresses(req) - if err != nil { - return err - } - - if len(describe.Addresses) != 1 || - *describe.Addresses[0].PublicIp != rs.Primary.ID { - return fmt.Errorf("EIP not found") - } - *res = *describe.Addresses[0] - } - - return nil - } -} - -const testAccAWSEIPConfig = ` -resource "aws_eip" "bar" { -} -` - -const testAccAWSEIPInstanceEc2Classic = ` -provider "aws" { - region = "us-east-1" -} -resource "aws_instance" "foo" { - ami = "ami-5469ae3c" - instance_type = "m1.small" -} - -resource "aws_eip" "bar" { - instance = "${aws_instance.foo.id}" -} -` - -const testAccAWSEIPInstanceConfig = ` -resource "aws_instance" "foo" { - # us-west-2 - ami = "ami-4fccb37f" - instance_type = "m1.small" -} - -resource "aws_eip" "bar" { - instance = "${aws_instance.foo.id}" -} -` - -const testAccAWSEIPInstanceConfig2 = ` -resource "aws_instance" "bar" { - # us-west-2 - ami = "ami-4fccb37f" - instance_type = "m1.small" -} - -resource "aws_eip" "bar" { - instance = "${aws_instance.bar.id}" -} -` - -const testAccAWSEIPInstanceConfig_associated = ` -resource "aws_vpc" "default" { - cidr_block = "10.0.0.0/16" - enable_dns_hostnames = true - - tags { - Name = "default" - } -} - -resource "aws_internet_gateway" "gw" { - vpc_id = "${aws_vpc.default.id}" - - tags { - Name = "main" - } -} - -resource "aws_subnet" "tf_test_subnet" { - vpc_id = "${aws_vpc.default.id}" - cidr_block = "10.0.0.0/24" - map_public_ip_on_launch = true - - depends_on = ["aws_internet_gateway.gw"] - - tags { - Name = "tf_test_subnet" - } -} - -resource "aws_instance" "foo" { - # us-west-2 - ami = "ami-5189a661" - instance_type = "t2.micro" - - private_ip = "10.0.0.12" - subnet_id = "${aws_subnet.tf_test_subnet.id}" - - tags { - Name = "foo instance" - } -} - -resource "aws_instance" "bar" { - # us-west-2 - - ami = "ami-5189a661" - - instance_type = "t2.micro" - - private_ip = "10.0.0.19" - subnet_id = "${aws_subnet.tf_test_subnet.id}" - - tags { - Name = "bar instance" - } -} - -resource "aws_eip" "bar" { - vpc = true - - instance = "${aws_instance.bar.id}" - associate_with_private_ip = "10.0.0.19" -} -` -const testAccAWSEIPInstanceConfig_associated_switch = ` -resource "aws_vpc" "default" { - cidr_block = "10.0.0.0/16" - enable_dns_hostnames = true - - tags { - Name = "default" - } -} - -resource "aws_internet_gateway" "gw" { - vpc_id = "${aws_vpc.default.id}" - - tags { - Name = "main" - } -} - -resource "aws_subnet" "tf_test_subnet" { - vpc_id = "${aws_vpc.default.id}" - cidr_block = "10.0.0.0/24" - map_public_ip_on_launch = true - - depends_on = ["aws_internet_gateway.gw"] - - tags { - Name = "tf_test_subnet" - } -} - -resource "aws_instance" "foo" { - # us-west-2 - ami = "ami-5189a661" - instance_type = "t2.micro" - - private_ip = "10.0.0.12" - subnet_id = "${aws_subnet.tf_test_subnet.id}" - - tags { - Name = "foo instance" - } -} - -resource "aws_instance" "bar" { - # us-west-2 - - ami = "ami-5189a661" - - instance_type = "t2.micro" - - private_ip = "10.0.0.19" - subnet_id = "${aws_subnet.tf_test_subnet.id}" - - tags { - Name = "bar instance" - } -} - -resource "aws_eip" "bar" { - vpc = true - - instance = "${aws_instance.foo.id}" - associate_with_private_ip = "10.0.0.12" -} -` - -const testAccAWSEIPInstanceConfig_associated_update = ` -resource "aws_instance" "bar" { - # us-west-2 - ami = "ami-4fccb37f" - instance_type = "m1.small" -} - -resource "aws_eip" "bar" { - instance = "${aws_instance.bar.id}" -} -` - -const testAccAWSEIPNetworkInterfaceConfig = ` -resource "aws_vpc" "bar" { - cidr_block = "10.0.0.0/24" - tags { - Name = "testAccAWSEIPNetworkInterfaceConfig" - } -} -resource "aws_internet_gateway" "bar" { - vpc_id = "${aws_vpc.bar.id}" -} -resource "aws_subnet" "bar" { - vpc_id = "${aws_vpc.bar.id}" - availability_zone = "us-west-2a" - cidr_block = "10.0.0.0/24" -} -resource "aws_network_interface" "bar" { - subnet_id = "${aws_subnet.bar.id}" - private_ips = ["10.0.0.10"] - security_groups = [ "${aws_vpc.bar.default_security_group_id}" ] -} -resource "aws_eip" "bar" { - vpc = "true" - network_interface = "${aws_network_interface.bar.id}" -} -` - -const testAccAWSEIPMultiNetworkInterfaceConfig = ` -resource "aws_vpc" "bar" { - cidr_block = "10.0.0.0/24" - tags { - Name = "testAccAWSEIPMultiNetworkInterfaceConfig" - } -} - -resource "aws_internet_gateway" "bar" { - vpc_id = "${aws_vpc.bar.id}" -} - -resource "aws_subnet" "bar" { - vpc_id = "${aws_vpc.bar.id}" - availability_zone = "us-west-2a" - cidr_block = "10.0.0.0/24" -} - -resource "aws_network_interface" "bar" { - subnet_id = "${aws_subnet.bar.id}" - private_ips = ["10.0.0.10", "10.0.0.11"] - security_groups = ["${aws_vpc.bar.default_security_group_id}"] -} - -resource "aws_eip" "one" { - vpc = "true" - network_interface = "${aws_network_interface.bar.id}" - associate_with_private_ip = "10.0.0.10" -} - -resource "aws_eip" "two" { - vpc = "true" - network_interface = "${aws_network_interface.bar.id}" - associate_with_private_ip = "10.0.0.11" -} -` diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_application.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_application.go deleted file mode 100644 index 212332526..000000000 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_application.go +++ /dev/null @@ -1,152 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/schema" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elasticbeanstalk" - "github.com/hashicorp/terraform/helper/resource" -) - -func resourceAwsElasticBeanstalkApplication() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsElasticBeanstalkApplicationCreate, - Read: resourceAwsElasticBeanstalkApplicationRead, - Update: resourceAwsElasticBeanstalkApplicationUpdate, - Delete: resourceAwsElasticBeanstalkApplicationDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - }, - } -} - -func resourceAwsElasticBeanstalkApplicationCreate(d *schema.ResourceData, meta interface{}) error { - beanstalkConn := meta.(*AWSClient).elasticbeanstalkconn - - // Get the name and description - name := d.Get("name").(string) - description := d.Get("description").(string) - - log.Printf("[DEBUG] Elastic Beanstalk application create: %s, description: %s", name, description) - - req := &elasticbeanstalk.CreateApplicationInput{ - ApplicationName: aws.String(name), - Description: aws.String(description), - } - - _, err := beanstalkConn.CreateApplication(req) - if err != nil { - return err - } - - d.SetId(name) - - return resourceAwsElasticBeanstalkApplicationRead(d, meta) -} - -func resourceAwsElasticBeanstalkApplicationUpdate(d *schema.ResourceData, meta interface{}) error { - beanstalkConn := meta.(*AWSClient).elasticbeanstalkconn - - if d.HasChange("description") { - if err := resourceAwsElasticBeanstalkApplicationDescriptionUpdate(beanstalkConn, d); err != nil { - return err - } - } - - return resourceAwsElasticBeanstalkApplicationRead(d, meta) -} - -func resourceAwsElasticBeanstalkApplicationDescriptionUpdate(beanstalkConn *elasticbeanstalk.ElasticBeanstalk, d *schema.ResourceData) error { - name := d.Get("name").(string) - description := d.Get("description").(string) - - log.Printf("[DEBUG] Elastic Beanstalk application: %s, update description: %s", name, description) - - _, err := beanstalkConn.UpdateApplication(&elasticbeanstalk.UpdateApplicationInput{ - ApplicationName: aws.String(name), - Description: aws.String(description), - }) - - return err -} - -func resourceAwsElasticBeanstalkApplicationRead(d *schema.ResourceData, meta interface{}) error { - a, err := getBeanstalkApplication(d, meta) - if err != nil { - return err - } - if a == nil { - return err - } - - d.Set("name", a.ApplicationName) - d.Set("description", a.Description) - return nil -} - -func resourceAwsElasticBeanstalkApplicationDelete(d *schema.ResourceData, meta interface{}) error { - beanstalkConn := meta.(*AWSClient).elasticbeanstalkconn - - a, err := getBeanstalkApplication(d, meta) - if err != nil { - return err - } - _, err = beanstalkConn.DeleteApplication(&elasticbeanstalk.DeleteApplicationInput{ - ApplicationName: aws.String(d.Id()), - }) - - return resource.Retry(10*time.Second, func() *resource.RetryError { - if a, _ = getBeanstalkApplication(d, meta); a != nil { - return resource.RetryableError( - fmt.Errorf("Beanstalk Application still exists")) - } - return nil - }) -} - -func getBeanstalkApplication( - d *schema.ResourceData, - meta interface{}) (*elasticbeanstalk.ApplicationDescription, error) { - conn := meta.(*AWSClient).elasticbeanstalkconn - - resp, err := conn.DescribeApplications(&elasticbeanstalk.DescribeApplicationsInput{ - ApplicationNames: []*string{aws.String(d.Id())}, - }) - - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() != "InvalidBeanstalkAppID.NotFound" { - log.Printf("[Err] Error reading Elastic Beanstalk Application (%s): Application not found", d.Id()) - d.SetId("") - return nil, nil - } - return nil, err - } - - switch { - case len(resp.Applications) > 1: - return nil, fmt.Errorf("Error %d Applications matched, expected 1", len(resp.Applications)) - case len(resp.Applications) == 0: - d.SetId("") - return nil, nil - default: - return resp.Applications[0], nil - } -} diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_application_test.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_application_test.go deleted file mode 100644 index edcf4c174..000000000 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_application_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elasticbeanstalk" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSBeanstalkApp_basic(t *testing.T) { - var app elasticbeanstalk.ApplicationDescription - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBeanstalkAppDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccBeanstalkAppConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckBeanstalkAppExists("aws_elastic_beanstalk_application.tftest", &app), - ), - }, - }, - }) -} - -func testAccCheckBeanstalkAppDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elasticbeanstalkconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_elastic_beanstalk_application" { - continue - } - - // Try to find the application - DescribeBeanstalkAppOpts := &elasticbeanstalk.DescribeApplicationsInput{ - ApplicationNames: []*string{aws.String(rs.Primary.ID)}, - } - resp, err := conn.DescribeApplications(DescribeBeanstalkAppOpts) - if err == nil { - if len(resp.Applications) > 0 { - return fmt.Errorf("Elastic Beanstalk Application still exists.") - } - - return nil - } - - // Verify the error is what we want - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - if ec2err.Code() != "InvalidBeanstalkAppID.NotFound" { - return err - } - } - - return nil -} - -func testAccCheckBeanstalkAppExists(n string, app *elasticbeanstalk.ApplicationDescription) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Elastic Beanstalk app ID is not set") - } - - conn := testAccProvider.Meta().(*AWSClient).elasticbeanstalkconn - DescribeBeanstalkAppOpts := &elasticbeanstalk.DescribeApplicationsInput{ - ApplicationNames: []*string{aws.String(rs.Primary.ID)}, - } - resp, err := conn.DescribeApplications(DescribeBeanstalkAppOpts) - if err != nil { - return err - } - if len(resp.Applications) == 0 { - return fmt.Errorf("Elastic Beanstalk Application not found.") - } - - *app = *resp.Applications[0] - - return nil - } -} - -const testAccBeanstalkAppConfig = ` -resource "aws_elastic_beanstalk_application" "tftest" { - name = "tf-test-name" - description = "tf-test-desc" -} -` diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_application_version.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_application_version.go deleted file mode 100644 index 9125225a3..000000000 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_application_version.go +++ /dev/null @@ -1,202 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elasticbeanstalk" - "time" -) - -func resourceAwsElasticBeanstalkApplicationVersion() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsElasticBeanstalkApplicationVersionCreate, - Read: resourceAwsElasticBeanstalkApplicationVersionRead, - Update: resourceAwsElasticBeanstalkApplicationVersionUpdate, - Delete: resourceAwsElasticBeanstalkApplicationVersionDelete, - - Schema: map[string]*schema.Schema{ - "application": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "bucket": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "key": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "force_delete": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - }, - } -} - -func resourceAwsElasticBeanstalkApplicationVersionCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticbeanstalkconn - - application := d.Get("application").(string) - description := d.Get("description").(string) - bucket := d.Get("bucket").(string) - key := d.Get("key").(string) - name := d.Get("name").(string) - - s3Location := elasticbeanstalk.S3Location{ - S3Bucket: aws.String(bucket), - S3Key: aws.String(key), - } - - createOpts := elasticbeanstalk.CreateApplicationVersionInput{ - ApplicationName: aws.String(application), - Description: aws.String(description), - SourceBundle: &s3Location, - VersionLabel: aws.String(name), - } - - log.Printf("[DEBUG] Elastic Beanstalk Application Version create opts: %s", createOpts) - _, err := conn.CreateApplicationVersion(&createOpts) - if err != nil { - return err - } - - d.SetId(name) - log.Printf("[INFO] Elastic Beanstalk Application Version Label: %s", name) - - return resourceAwsElasticBeanstalkApplicationVersionRead(d, meta) -} - -func resourceAwsElasticBeanstalkApplicationVersionRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticbeanstalkconn - - resp, err := conn.DescribeApplicationVersions(&elasticbeanstalk.DescribeApplicationVersionsInput{ - VersionLabels: []*string{aws.String(d.Id())}, - }) - - if err != nil { - return err - } - - if len(resp.ApplicationVersions) == 0 { - log.Printf("[DEBUG] Elastic Beanstalk application version read: application version not found") - - d.SetId("") - - return nil - } else if len(resp.ApplicationVersions) != 1 { - return fmt.Errorf("Error reading application version properties: found %d application versions, expected 1", len(resp.ApplicationVersions)) - } - - if err := d.Set("description", resp.ApplicationVersions[0].Description); err != nil { - return err - } - - return nil -} - -func resourceAwsElasticBeanstalkApplicationVersionUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticbeanstalkconn - - if d.HasChange("description") { - if err := resourceAwsElasticBeanstalkApplicationVersionDescriptionUpdate(conn, d); err != nil { - return err - } - } - - return resourceAwsElasticBeanstalkApplicationVersionRead(d, meta) - -} - -func resourceAwsElasticBeanstalkApplicationVersionDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticbeanstalkconn - - application := d.Get("application").(string) - name := d.Id() - - if d.Get("force_delete").(bool) == false { - environments, err := versionUsedBy(application, name, conn) - if err != nil { - return err - } - - if len(environments) > 1 { - return fmt.Errorf("Unable to delete Application Version, it is currently in use by the following environments: %s.", environments) - } - } - _, err := conn.DeleteApplicationVersion(&elasticbeanstalk.DeleteApplicationVersionInput{ - ApplicationName: aws.String(application), - VersionLabel: aws.String(name), - DeleteSourceBundle: aws.Bool(false), - }) - - if err != nil { - if awserr, ok := err.(awserr.Error); ok { - // application version is pending delete, or no longer exists. - if awserr.Code() == "InvalidParameterValue" { - d.SetId("") - return nil - } - } - return err - } - - d.SetId("") - return nil -} - -func resourceAwsElasticBeanstalkApplicationVersionDescriptionUpdate(conn *elasticbeanstalk.ElasticBeanstalk, d *schema.ResourceData) error { - application := d.Get("application").(string) - description := d.Get("description").(string) - name := d.Get("name").(string) - - log.Printf("[DEBUG] Elastic Beanstalk application version: %s, update description: %s", name, description) - - _, err := conn.UpdateApplicationVersion(&elasticbeanstalk.UpdateApplicationVersionInput{ - ApplicationName: aws.String(application), - Description: aws.String(description), - VersionLabel: aws.String(name), - }) - - return err -} - -func versionUsedBy(applicationName, versionLabel string, conn *elasticbeanstalk.ElasticBeanstalk) ([]string, error) { - now := time.Now() - resp, err := conn.DescribeEnvironments(&elasticbeanstalk.DescribeEnvironmentsInput{ - ApplicationName: aws.String(applicationName), - VersionLabel: aws.String(versionLabel), - IncludeDeleted: aws.Bool(true), - IncludedDeletedBackTo: aws.Time(now.Add(-1 * time.Minute)), - }) - - if err != nil { - return nil, err - } - - var environmentIDs []string - for _, environment := range resp.Environments { - environmentIDs = append(environmentIDs, *environment.EnvironmentId) - } - - return environmentIDs, nil -} diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_application_version_test.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_application_version_test.go deleted file mode 100644 index 9735f545e..000000000 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_application_version_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elasticbeanstalk" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSBeanstalkAppVersion_basic(t *testing.T) { - - var appVersion elasticbeanstalk.ApplicationVersionDescription - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckApplicationVersionDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccBeanstalkApplicationVersionConfig(acctest.RandInt()), - Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationVersionExists("aws_elastic_beanstalk_application_version.default", &appVersion), - ), - }, - }, - }) -} - -func testAccCheckApplicationVersionDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elasticbeanstalkconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_elastic_beanstalk_application_version" { - continue - } - - describeApplicationVersionOpts := &elasticbeanstalk.DescribeApplicationVersionsInput{ - VersionLabels: []*string{aws.String(rs.Primary.ID)}, - } - resp, err := conn.DescribeApplicationVersions(describeApplicationVersionOpts) - if err == nil { - if len(resp.ApplicationVersions) > 0 { - return fmt.Errorf("Elastic Beanstalk Application Verson still exists.") - } - - return nil - } - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - if ec2err.Code() != "InvalidParameterValue" { - return err - } - } - - return nil -} - -func testAccCheckApplicationVersionExists(n string, app *elasticbeanstalk.ApplicationVersionDescription) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Elastic Beanstalk Application Version is not set") - } - - conn := testAccProvider.Meta().(*AWSClient).elasticbeanstalkconn - describeApplicationVersionOpts := &elasticbeanstalk.DescribeApplicationVersionsInput{ - VersionLabels: []*string{aws.String(rs.Primary.ID)}, - } - - log.Printf("[DEBUG] Elastic Beanstalk Application Version TEST describe opts: %s", describeApplicationVersionOpts) - - resp, err := conn.DescribeApplicationVersions(describeApplicationVersionOpts) - if err != nil { - return err - } - if len(resp.ApplicationVersions) == 0 { - return fmt.Errorf("Elastic Beanstalk Application Version not found.") - } - - *app = *resp.ApplicationVersions[0] - - return nil - } -} - -func testAccBeanstalkApplicationVersionConfig(randInt int) string { - return fmt.Sprintf(` -resource "aws_s3_bucket" "default" { - bucket = "tftest.applicationversion.bucket-%d" -} - -resource "aws_s3_bucket_object" "default" { - bucket = "${aws_s3_bucket.default.id}" - key = "beanstalk/python-v1.zip" - source = "test-fixtures/python-v1.zip" -} - -resource "aws_elastic_beanstalk_application" "default" { - name = "tf-test-name-%d" - description = "tf-test-desc" -} - -resource "aws_elastic_beanstalk_application_version" "default" { - application = "${aws_elastic_beanstalk_application.default.name}" - name = "tf-test-version-label" - bucket = "${aws_s3_bucket.default.id}" - key = "${aws_s3_bucket_object.default.id}" - } - `, randInt, randInt) -} diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_configuration_template.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_configuration_template.go deleted file mode 100644 index 346fcd5ff..000000000 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_configuration_template.go +++ /dev/null @@ -1,240 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elasticbeanstalk" -) - -func resourceAwsElasticBeanstalkConfigurationTemplate() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsElasticBeanstalkConfigurationTemplateCreate, - Read: resourceAwsElasticBeanstalkConfigurationTemplateRead, - Update: resourceAwsElasticBeanstalkConfigurationTemplateUpdate, - Delete: resourceAwsElasticBeanstalkConfigurationTemplateDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "application": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "environment_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "setting": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: resourceAwsElasticBeanstalkOptionSetting(), - Set: optionSettingValueHash, - }, - "solution_stack_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceAwsElasticBeanstalkConfigurationTemplateCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticbeanstalkconn - - // Get the relevant properties - name := d.Get("name").(string) - appName := d.Get("application").(string) - - optionSettings := gatherOptionSettings(d) - - opts := elasticbeanstalk.CreateConfigurationTemplateInput{ - ApplicationName: aws.String(appName), - TemplateName: aws.String(name), - OptionSettings: optionSettings, - } - - if attr, ok := d.GetOk("description"); ok { - opts.Description = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("environment_id"); ok { - opts.EnvironmentId = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("solution_stack_name"); ok { - opts.SolutionStackName = aws.String(attr.(string)) - } - - log.Printf("[DEBUG] Elastic Beanstalk configuration template create opts: %s", opts) - if _, err := conn.CreateConfigurationTemplate(&opts); err != nil { - return fmt.Errorf("Error creating Elastic Beanstalk configuration template: %s", err) - } - - d.SetId(name) - - return resourceAwsElasticBeanstalkConfigurationTemplateRead(d, meta) -} - -func resourceAwsElasticBeanstalkConfigurationTemplateRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticbeanstalkconn - - log.Printf("[DEBUG] Elastic Beanstalk configuration template read: %s", d.Get("name").(string)) - - resp, err := conn.DescribeConfigurationSettings(&elasticbeanstalk.DescribeConfigurationSettingsInput{ - TemplateName: aws.String(d.Id()), - ApplicationName: aws.String(d.Get("application").(string)), - }) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "InvalidParameterValue" && strings.Contains(awsErr.Message(), "No Configuration Template named") { - log.Printf("[WARN] No Configuration Template named (%s) found", d.Id()) - d.SetId("") - return nil - } - } - return err - } - - if len(resp.ConfigurationSettings) != 1 { - log.Printf("[DEBUG] Elastic Beanstalk unexpected describe configuration template response: %+v", resp) - return fmt.Errorf("Error reading application properties: found %d applications, expected 1", len(resp.ConfigurationSettings)) - } - - d.Set("description", resp.ConfigurationSettings[0].Description) - return nil -} - -func resourceAwsElasticBeanstalkConfigurationTemplateUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticbeanstalkconn - - log.Printf("[DEBUG] Elastic Beanstalk configuration template update: %s", d.Get("name").(string)) - - if d.HasChange("description") { - if err := resourceAwsElasticBeanstalkConfigurationTemplateDescriptionUpdate(conn, d); err != nil { - return err - } - } - - if d.HasChange("setting") { - if err := resourceAwsElasticBeanstalkConfigurationTemplateOptionSettingsUpdate(conn, d); err != nil { - return err - } - } - - return resourceAwsElasticBeanstalkConfigurationTemplateRead(d, meta) -} - -func resourceAwsElasticBeanstalkConfigurationTemplateDescriptionUpdate(conn *elasticbeanstalk.ElasticBeanstalk, d *schema.ResourceData) error { - _, err := conn.UpdateConfigurationTemplate(&elasticbeanstalk.UpdateConfigurationTemplateInput{ - ApplicationName: aws.String(d.Get("application").(string)), - TemplateName: aws.String(d.Get("name").(string)), - Description: aws.String(d.Get("description").(string)), - }) - - return err -} - -func resourceAwsElasticBeanstalkConfigurationTemplateOptionSettingsUpdate(conn *elasticbeanstalk.ElasticBeanstalk, d *schema.ResourceData) error { - if d.HasChange("setting") { - _, err := conn.ValidateConfigurationSettings(&elasticbeanstalk.ValidateConfigurationSettingsInput{ - ApplicationName: aws.String(d.Get("application").(string)), - TemplateName: aws.String(d.Get("name").(string)), - OptionSettings: gatherOptionSettings(d), - }) - if err != nil { - return err - } - - o, n := d.GetChange("setting") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - - rm := extractOptionSettings(os.Difference(ns)) - add := extractOptionSettings(ns.Difference(os)) - - // Additions and removals of options are done in a single API call, so we - // can't do our normal "remove these" and then later "add these", re-adding - // any updated settings. - // Because of this, we need to remove any settings in the "removable" - // settings that are also found in the "add" settings, otherwise they - // conflict. Here we loop through all the initial removables from the set - // difference, and we build up a slice of settings not found in the "add" - // set - var remove []*elasticbeanstalk.ConfigurationOptionSetting - for _, r := range rm { - for _, a := range add { - if *r.Namespace == *a.Namespace && *r.OptionName == *a.OptionName { - continue - } - remove = append(remove, r) - } - } - - req := &elasticbeanstalk.UpdateConfigurationTemplateInput{ - ApplicationName: aws.String(d.Get("application").(string)), - TemplateName: aws.String(d.Get("name").(string)), - OptionSettings: add, - } - - for _, elem := range remove { - req.OptionsToRemove = append(req.OptionsToRemove, &elasticbeanstalk.OptionSpecification{ - Namespace: elem.Namespace, - OptionName: elem.OptionName, - }) - } - - log.Printf("[DEBUG] Update Configuration Template request: %s", req) - if _, err := conn.UpdateConfigurationTemplate(req); err != nil { - return err - } - } - - return nil -} - -func resourceAwsElasticBeanstalkConfigurationTemplateDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticbeanstalkconn - - application := d.Get("application").(string) - - _, err := conn.DeleteConfigurationTemplate(&elasticbeanstalk.DeleteConfigurationTemplateInput{ - ApplicationName: aws.String(application), - TemplateName: aws.String(d.Id()), - }) - - return err -} - -func gatherOptionSettings(d *schema.ResourceData) []*elasticbeanstalk.ConfigurationOptionSetting { - optionSettingsSet, ok := d.Get("setting").(*schema.Set) - if !ok || optionSettingsSet == nil { - optionSettingsSet = new(schema.Set) - } - - return extractOptionSettings(optionSettingsSet) -} diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_configuration_template_test.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_configuration_template_test.go deleted file mode 100644 index edb9c55f9..000000000 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_configuration_template_test.go +++ /dev/null @@ -1,225 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elasticbeanstalk" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSBeanstalkConfigurationTemplate_basic(t *testing.T) { - var config elasticbeanstalk.ConfigurationSettingsDescription - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBeanstalkConfigurationTemplateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccBeanstalkConfigurationTemplateConfig(acctest.RandString(5)), - Check: resource.ComposeTestCheckFunc( - testAccCheckBeanstalkConfigurationTemplateExists("aws_elastic_beanstalk_configuration_template.tf_template", &config), - ), - }, - }, - }) -} - -func TestAccAWSBeanstalkConfigurationTemplate_VPC(t *testing.T) { - var config elasticbeanstalk.ConfigurationSettingsDescription - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBeanstalkConfigurationTemplateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccBeanstalkConfigurationTemplateConfig_VPC(acctest.RandString(5)), - Check: resource.ComposeTestCheckFunc( - testAccCheckBeanstalkConfigurationTemplateExists("aws_elastic_beanstalk_configuration_template.tf_template", &config), - ), - }, - }, - }) -} - -func TestAccAWSBeanstalkConfigurationTemplate_Setting(t *testing.T) { - var config elasticbeanstalk.ConfigurationSettingsDescription - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBeanstalkConfigurationTemplateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccBeanstalkConfigurationTemplateConfig_Setting(acctest.RandString(5)), - Check: resource.ComposeTestCheckFunc( - testAccCheckBeanstalkConfigurationTemplateExists("aws_elastic_beanstalk_configuration_template.tf_template", &config), - resource.TestCheckResourceAttr( - "aws_elastic_beanstalk_configuration_template.tf_template", "setting.#", "1"), - resource.TestCheckResourceAttr( - "aws_elastic_beanstalk_configuration_template.tf_template", "setting.4112217815.value", "m1.small"), - ), - }, - }, - }) -} - -func testAccCheckBeanstalkConfigurationTemplateDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elasticbeanstalkconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_elastic_beanstalk_configuration_template" { - continue - } - - // Try to find the Configuration Template - opts := elasticbeanstalk.DescribeConfigurationSettingsInput{ - TemplateName: aws.String(rs.Primary.ID), - ApplicationName: aws.String(rs.Primary.Attributes["application"]), - } - resp, err := conn.DescribeConfigurationSettings(&opts) - if err == nil { - if len(resp.ConfigurationSettings) > 0 { - return fmt.Errorf("Elastic Beanstalk Application still exists.") - } - - return nil - } - - // Verify the error is what we want - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - - switch { - case ec2err.Code() == "InvalidBeanstalkConfigurationTemplateID.NotFound": - return nil - // This error can be returned when the beanstalk application no longer exists. - case ec2err.Code() == "InvalidParameterValue": - return nil - default: - return err - } - } - - return nil -} - -func testAccCheckBeanstalkConfigurationTemplateExists(n string, config *elasticbeanstalk.ConfigurationSettingsDescription) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elasticbeanstalkconn - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Elastic Beanstalk config ID is not set") - } - - opts := elasticbeanstalk.DescribeConfigurationSettingsInput{ - TemplateName: aws.String(rs.Primary.ID), - ApplicationName: aws.String(rs.Primary.Attributes["application"]), - } - resp, err := conn.DescribeConfigurationSettings(&opts) - if err != nil { - return err - } - if len(resp.ConfigurationSettings) == 0 { - return fmt.Errorf("Elastic Beanstalk Configurations not found.") - } - - *config = *resp.ConfigurationSettings[0] - - return nil - } -} - -func testAccBeanstalkConfigurationTemplateConfig(r string) string { - return fmt.Sprintf(` -resource "aws_elastic_beanstalk_application" "tftest" { - name = "tf-test-%s" - description = "tf-test-desc-%s" -} - -resource "aws_elastic_beanstalk_configuration_template" "tf_template" { - name = "tf-test-template-config" - application = "${aws_elastic_beanstalk_application.tftest.name}" - solution_stack_name = "64bit Amazon Linux running Python" -}`, r, r) -} - -func testAccBeanstalkConfigurationTemplateConfig_VPC(name string) string { - return fmt.Sprintf(` -resource "aws_vpc" "tf_b_test" { - cidr_block = "10.0.0.0/16" - - tags { - Name = "beanstalk_crash" - } -} - -resource "aws_subnet" "main" { - vpc_id = "${aws_vpc.tf_b_test.id}" - cidr_block = "10.0.0.0/24" - - tags { - Name = "subnet-count-test" - } -} - -resource "aws_elastic_beanstalk_application" "tftest" { - name = "tf-test-%s" - description = "tf-test-desc" -} - -resource "aws_elastic_beanstalk_configuration_template" "tf_template" { - name = "tf-test-%s" - application = "${aws_elastic_beanstalk_application.tftest.name}" - - solution_stack_name = "64bit Amazon Linux running Python" - - setting { - namespace = "aws:ec2:vpc" - name = "VPCId" - value = "${aws_vpc.tf_b_test.id}" - } - - setting { - namespace = "aws:ec2:vpc" - name = "Subnets" - value = "${aws_subnet.main.id}" - } -} -`, name, name) -} - -func testAccBeanstalkConfigurationTemplateConfig_Setting(name string) string { - return fmt.Sprintf(` -resource "aws_elastic_beanstalk_application" "tftest" { - name = "tf-test-%s" - description = "tf-test-desc" -} - -resource "aws_elastic_beanstalk_configuration_template" "tf_template" { - name = "tf-test-%s" - application = "${aws_elastic_beanstalk_application.tftest.name}" - - solution_stack_name = "64bit Amazon Linux running Python" - - setting { - namespace = "aws:autoscaling:launchconfiguration" - name = "InstanceType" - value = "m1.small" - } - -} -`, name, name) -} diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go deleted file mode 100644 index fa1e2562b..000000000 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go +++ /dev/null @@ -1,913 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "regexp" - "sort" - "strings" - "time" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/elasticbeanstalk" -) - -func resourceAwsElasticBeanstalkOptionSetting() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "namespace": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "value": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "resource": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -func resourceAwsElasticBeanstalkEnvironment() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsElasticBeanstalkEnvironmentCreate, - Read: resourceAwsElasticBeanstalkEnvironmentRead, - Update: resourceAwsElasticBeanstalkEnvironmentUpdate, - Delete: resourceAwsElasticBeanstalkEnvironmentDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - SchemaVersion: 1, - MigrateState: resourceAwsElasticBeanstalkEnvironmentMigrateState, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "application": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "version_label": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "cname": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "cname_prefix": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "tier": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "WebServer", - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - switch value { - case - "Worker", - "WebServer": - return - } - errors = append(errors, fmt.Errorf("%s is not a valid tier. Valid options are WebServer or Worker", value)) - return - }, - ForceNew: true, - }, - "setting": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: resourceAwsElasticBeanstalkOptionSetting(), - Set: optionSettingValueHash, - }, - "all_settings": &schema.Schema{ - Type: schema.TypeSet, - Computed: true, - Elem: resourceAwsElasticBeanstalkOptionSetting(), - Set: optionSettingValueHash, - }, - "solution_stack_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ConflictsWith: []string{"template_name"}, - }, - "template_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "wait_for_ready_timeout": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "20m", - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - duration, err := time.ParseDuration(value) - if err != nil { - errors = append(errors, fmt.Errorf( - "%q cannot be parsed as a duration: %s", k, err)) - } - if duration < 0 { - errors = append(errors, fmt.Errorf( - "%q must be greater than zero", k)) - } - return - }, - }, - "poll_interval": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - duration, err := time.ParseDuration(value) - if err != nil { - errors = append(errors, fmt.Errorf( - "%q cannot be parsed as a duration: %s", k, err)) - } - if duration < 10*time.Second || duration > 60*time.Second { - errors = append(errors, fmt.Errorf( - "%q must be between 10s and 180s", k)) - } - return - }, - }, - "autoscaling_groups": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "instances": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "launch_configurations": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "load_balancers": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "queues": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "triggers": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAwsElasticBeanstalkEnvironmentCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticbeanstalkconn - - // Get values from config - name := d.Get("name").(string) - cnamePrefix := d.Get("cname_prefix").(string) - tier := d.Get("tier").(string) - app := d.Get("application").(string) - desc := d.Get("description").(string) - version := d.Get("version_label").(string) - settings := d.Get("setting").(*schema.Set) - solutionStack := d.Get("solution_stack_name").(string) - templateName := d.Get("template_name").(string) - - // TODO set tags - // Note: at time of writing, you cannot view or edit Tags after creation - // d.Set("tags", tagsToMap(instance.Tags)) - createOpts := elasticbeanstalk.CreateEnvironmentInput{ - EnvironmentName: aws.String(name), - ApplicationName: aws.String(app), - OptionSettings: extractOptionSettings(settings), - Tags: tagsFromMapBeanstalk(d.Get("tags").(map[string]interface{})), - } - - if desc != "" { - createOpts.Description = aws.String(desc) - } - - if cnamePrefix != "" { - if tier != "WebServer" { - return fmt.Errorf("Cannot set cname_prefix for tier: %s.", tier) - } - createOpts.CNAMEPrefix = aws.String(cnamePrefix) - } - - if tier != "" { - var tierType string - - switch tier { - case "WebServer": - tierType = "Standard" - case "Worker": - tierType = "SQS/HTTP" - } - environmentTier := elasticbeanstalk.EnvironmentTier{ - Name: aws.String(tier), - Type: aws.String(tierType), - } - createOpts.Tier = &environmentTier - } - - if solutionStack != "" { - createOpts.SolutionStackName = aws.String(solutionStack) - } - - if templateName != "" { - createOpts.TemplateName = aws.String(templateName) - } - - if version != "" { - createOpts.VersionLabel = aws.String(version) - } - - // Get the current time to filter getBeanstalkEnvironmentErrors messages - t := time.Now() - log.Printf("[DEBUG] Elastic Beanstalk Environment create opts: %s", createOpts) - resp, err := conn.CreateEnvironment(&createOpts) - if err != nil { - return err - } - - // Assign the application name as the resource ID - d.SetId(*resp.EnvironmentId) - - waitForReadyTimeOut, err := time.ParseDuration(d.Get("wait_for_ready_timeout").(string)) - if err != nil { - return err - } - - pollInterval, err := time.ParseDuration(d.Get("poll_interval").(string)) - if err != nil { - pollInterval = 0 - log.Printf("[WARN] Error parsing poll_interval, using default backoff") - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"Launching", "Updating"}, - Target: []string{"Ready"}, - Refresh: environmentStateRefreshFunc(conn, d.Id(), t), - Timeout: waitForReadyTimeOut, - Delay: 10 * time.Second, - PollInterval: pollInterval, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for Elastic Beanstalk Environment (%s) to become ready: %s", - d.Id(), err) - } - - envErrors, err := getBeanstalkEnvironmentErrors(conn, d.Id(), t) - if err != nil { - return err - } - if envErrors != nil { - return envErrors - } - - return resourceAwsElasticBeanstalkEnvironmentRead(d, meta) -} - -func resourceAwsElasticBeanstalkEnvironmentUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticbeanstalkconn - - envId := d.Id() - - var hasChange bool - - updateOpts := elasticbeanstalk.UpdateEnvironmentInput{ - EnvironmentId: aws.String(envId), - } - - if d.HasChange("description") { - hasChange = true - updateOpts.Description = aws.String(d.Get("description").(string)) - } - - if d.HasChange("solution_stack_name") { - hasChange = true - if v, ok := d.GetOk("solution_stack_name"); ok { - updateOpts.SolutionStackName = aws.String(v.(string)) - } - } - - if d.HasChange("setting") { - hasChange = true - o, n := d.GetChange("setting") - if o == nil { - o = &schema.Set{F: optionSettingValueHash} - } - if n == nil { - n = &schema.Set{F: optionSettingValueHash} - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - - rm := extractOptionSettings(os.Difference(ns)) - add := extractOptionSettings(ns.Difference(os)) - - // Additions and removals of options are done in a single API call, so we - // can't do our normal "remove these" and then later "add these", re-adding - // any updated settings. - // Because of this, we need to exclude any settings in the "removable" - // settings that are also found in the "add" settings, otherwise they - // conflict. Here we loop through all the initial removables from the set - // difference, and create a new slice `remove` that contains those settings - // found in `rm` but not in `add` - var remove []*elasticbeanstalk.ConfigurationOptionSetting - if len(add) > 0 { - for _, r := range rm { - var update = false - for _, a := range add { - // ResourceNames are optional. Some defaults come with it, some do - // not. We need to guard against nil/empty in state as well as - // nil/empty from the API - if a.ResourceName != nil { - if r.ResourceName == nil { - continue - } - if *r.ResourceName != *a.ResourceName { - continue - } - } - if *r.Namespace == *a.Namespace && *r.OptionName == *a.OptionName { - log.Printf("[DEBUG] Updating Beanstalk setting (%s::%s) \"%s\" => \"%s\"", *a.Namespace, *a.OptionName, *r.Value, *a.Value) - update = true - break - } - } - // Only remove options that are not updates - if !update { - remove = append(remove, r) - } - } - } else { - remove = rm - } - - for _, elem := range remove { - updateOpts.OptionsToRemove = append(updateOpts.OptionsToRemove, &elasticbeanstalk.OptionSpecification{ - Namespace: elem.Namespace, - OptionName: elem.OptionName, - }) - } - - updateOpts.OptionSettings = add - } - - if d.HasChange("template_name") { - hasChange = true - if v, ok := d.GetOk("template_name"); ok { - updateOpts.TemplateName = aws.String(v.(string)) - } - } - - if d.HasChange("version_label") { - hasChange = true - updateOpts.VersionLabel = aws.String(d.Get("version_label").(string)) - } - - if hasChange { - // Get the current time to filter getBeanstalkEnvironmentErrors messages - t := time.Now() - log.Printf("[DEBUG] Elastic Beanstalk Environment update opts: %s", updateOpts) - _, err := conn.UpdateEnvironment(&updateOpts) - if err != nil { - return err - } - - waitForReadyTimeOut, err := time.ParseDuration(d.Get("wait_for_ready_timeout").(string)) - if err != nil { - return err - } - pollInterval, err := time.ParseDuration(d.Get("poll_interval").(string)) - if err != nil { - pollInterval = 0 - log.Printf("[WARN] Error parsing poll_interval, using default backoff") - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"Launching", "Updating"}, - Target: []string{"Ready"}, - Refresh: environmentStateRefreshFunc(conn, d.Id(), t), - Timeout: waitForReadyTimeOut, - Delay: 10 * time.Second, - PollInterval: pollInterval, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for Elastic Beanstalk Environment (%s) to become ready: %s", - d.Id(), err) - } - - envErrors, err := getBeanstalkEnvironmentErrors(conn, d.Id(), t) - if err != nil { - return err - } - if envErrors != nil { - return envErrors - } - } - - return resourceAwsElasticBeanstalkEnvironmentRead(d, meta) -} - -func resourceAwsElasticBeanstalkEnvironmentRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticbeanstalkconn - - envId := d.Id() - - log.Printf("[DEBUG] Elastic Beanstalk environment read %s: id %s", d.Get("name").(string), d.Id()) - - resp, err := conn.DescribeEnvironments(&elasticbeanstalk.DescribeEnvironmentsInput{ - EnvironmentIds: []*string{aws.String(envId)}, - }) - - if err != nil { - return err - } - - if len(resp.Environments) == 0 { - log.Printf("[DEBUG] Elastic Beanstalk environment properties: could not find environment %s", d.Id()) - - d.SetId("") - return nil - } else if len(resp.Environments) != 1 { - return fmt.Errorf("Error reading application properties: found %d environments, expected 1", len(resp.Environments)) - } - - env := resp.Environments[0] - - if *env.Status == "Terminated" { - log.Printf("[DEBUG] Elastic Beanstalk environment %s was terminated", d.Id()) - - d.SetId("") - return nil - } - - resources, err := conn.DescribeEnvironmentResources(&elasticbeanstalk.DescribeEnvironmentResourcesInput{ - EnvironmentId: aws.String(envId), - }) - - if err != nil { - return err - } - - if err := d.Set("name", env.EnvironmentName); err != nil { - return err - } - - if err := d.Set("application", env.ApplicationName); err != nil { - return err - } - - if err := d.Set("description", env.Description); err != nil { - return err - } - - if err := d.Set("cname", env.CNAME); err != nil { - return err - } - - if err := d.Set("version_label", env.VersionLabel); err != nil { - return err - } - - if err := d.Set("tier", *env.Tier.Name); err != nil { - return err - } - - if env.CNAME != nil { - beanstalkCnamePrefixRegexp := regexp.MustCompile(`(^[^.]+)(.\w{2}-\w{4,9}-\d)?.elasticbeanstalk.com$`) - var cnamePrefix string - cnamePrefixMatch := beanstalkCnamePrefixRegexp.FindStringSubmatch(*env.CNAME) - - if cnamePrefixMatch == nil { - cnamePrefix = "" - } else { - cnamePrefix = cnamePrefixMatch[1] - } - - if err := d.Set("cname_prefix", cnamePrefix); err != nil { - return err - } - } else { - if err := d.Set("cname_prefix", ""); err != nil { - return err - } - } - - if err := d.Set("solution_stack_name", env.SolutionStackName); err != nil { - return err - } - - if err := d.Set("autoscaling_groups", flattenBeanstalkAsg(resources.EnvironmentResources.AutoScalingGroups)); err != nil { - return err - } - - if err := d.Set("instances", flattenBeanstalkInstances(resources.EnvironmentResources.Instances)); err != nil { - return err - } - if err := d.Set("launch_configurations", flattenBeanstalkLc(resources.EnvironmentResources.LaunchConfigurations)); err != nil { - return err - } - if err := d.Set("load_balancers", flattenBeanstalkElb(resources.EnvironmentResources.LoadBalancers)); err != nil { - return err - } - if err := d.Set("queues", flattenBeanstalkSqs(resources.EnvironmentResources.Queues)); err != nil { - return err - } - if err := d.Set("triggers", flattenBeanstalkTrigger(resources.EnvironmentResources.Triggers)); err != nil { - return err - } - - return resourceAwsElasticBeanstalkEnvironmentSettingsRead(d, meta) -} - -func fetchAwsElasticBeanstalkEnvironmentSettings(d *schema.ResourceData, meta interface{}) (*schema.Set, error) { - conn := meta.(*AWSClient).elasticbeanstalkconn - - app := d.Get("application").(string) - name := d.Get("name").(string) - - resp, err := conn.DescribeConfigurationSettings(&elasticbeanstalk.DescribeConfigurationSettingsInput{ - ApplicationName: aws.String(app), - EnvironmentName: aws.String(name), - }) - - if err != nil { - return nil, err - } - - if len(resp.ConfigurationSettings) != 1 { - return nil, fmt.Errorf("Error reading environment settings: received %d settings groups, expected 1", len(resp.ConfigurationSettings)) - } - - settings := &schema.Set{F: optionSettingValueHash} - for _, optionSetting := range resp.ConfigurationSettings[0].OptionSettings { - m := map[string]interface{}{} - - if optionSetting.Namespace != nil { - m["namespace"] = *optionSetting.Namespace - } else { - return nil, fmt.Errorf("Error reading environment settings: option setting with no namespace: %v", optionSetting) - } - - if optionSetting.OptionName != nil { - m["name"] = *optionSetting.OptionName - } else { - return nil, fmt.Errorf("Error reading environment settings: option setting with no name: %v", optionSetting) - } - - if *optionSetting.Namespace == "aws:autoscaling:scheduledaction" && optionSetting.ResourceName != nil { - m["resource"] = *optionSetting.ResourceName - } - - if optionSetting.Value != nil { - switch *optionSetting.OptionName { - case "SecurityGroups": - m["value"] = dropGeneratedSecurityGroup(*optionSetting.Value, meta) - case "Subnets", "ELBSubnets": - m["value"] = sortValues(*optionSetting.Value) - default: - m["value"] = *optionSetting.Value - } - } - - settings.Add(m) - } - - return settings, nil -} - -func resourceAwsElasticBeanstalkEnvironmentSettingsRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Elastic Beanstalk environment settings read %s: id %s", d.Get("name").(string), d.Id()) - - allSettings, err := fetchAwsElasticBeanstalkEnvironmentSettings(d, meta) - if err != nil { - return err - } - - settings := d.Get("setting").(*schema.Set) - - log.Printf("[DEBUG] Elastic Beanstalk allSettings: %s", allSettings.GoString()) - log.Printf("[DEBUG] Elastic Beanstalk settings: %s", settings.GoString()) - - // perform the set operation with only name/namespace as keys, excluding value - // this is so we override things in the settings resource data key with updated values - // from the api. we skip values we didn't know about before because there are so many - // defaults set by the eb api that we would delete many useful defaults. - // - // there is likely a better way to do this - allSettingsKeySet := schema.NewSet(optionSettingKeyHash, allSettings.List()) - settingsKeySet := schema.NewSet(optionSettingKeyHash, settings.List()) - updatedSettingsKeySet := allSettingsKeySet.Intersection(settingsKeySet) - - log.Printf("[DEBUG] Elastic Beanstalk updatedSettingsKeySet: %s", updatedSettingsKeySet.GoString()) - - updatedSettings := schema.NewSet(optionSettingValueHash, updatedSettingsKeySet.List()) - - log.Printf("[DEBUG] Elastic Beanstalk updatedSettings: %s", updatedSettings.GoString()) - - if err := d.Set("all_settings", allSettings.List()); err != nil { - return err - } - - if err := d.Set("setting", updatedSettings.List()); err != nil { - return err - } - - return nil -} - -func resourceAwsElasticBeanstalkEnvironmentDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticbeanstalkconn - - opts := elasticbeanstalk.TerminateEnvironmentInput{ - EnvironmentId: aws.String(d.Id()), - TerminateResources: aws.Bool(true), - } - - // Get the current time to filter getBeanstalkEnvironmentErrors messages - t := time.Now() - log.Printf("[DEBUG] Elastic Beanstalk Environment terminate opts: %s", opts) - _, err := conn.TerminateEnvironment(&opts) - - if err != nil { - return err - } - - waitForReadyTimeOut, err := time.ParseDuration(d.Get("wait_for_ready_timeout").(string)) - if err != nil { - return err - } - pollInterval, err := time.ParseDuration(d.Get("poll_interval").(string)) - if err != nil { - pollInterval = 0 - log.Printf("[WARN] Error parsing poll_interval, using default backoff") - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"Terminating"}, - Target: []string{"Terminated"}, - Refresh: environmentStateRefreshFunc(conn, d.Id(), t), - Timeout: waitForReadyTimeOut, - Delay: 10 * time.Second, - PollInterval: pollInterval, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for Elastic Beanstalk Environment (%s) to become terminated: %s", - d.Id(), err) - } - - envErrors, err := getBeanstalkEnvironmentErrors(conn, d.Id(), t) - if err != nil { - return err - } - if envErrors != nil { - return envErrors - } - - return nil -} - -// environmentStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// the creation of the Beanstalk Environment -func environmentStateRefreshFunc(conn *elasticbeanstalk.ElasticBeanstalk, environmentId string, t time.Time) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - resp, err := conn.DescribeEnvironments(&elasticbeanstalk.DescribeEnvironmentsInput{ - EnvironmentIds: []*string{aws.String(environmentId)}, - }) - if err != nil { - log.Printf("[Err] Error waiting for Elastic Beanstalk Environment state: %s", err) - return -1, "failed", fmt.Errorf("[Err] Error waiting for Elastic Beanstalk Environment state: %s", err) - } - - if resp == nil || len(resp.Environments) == 0 { - // Sometimes AWS just has consistency issues and doesn't see - // our instance yet. Return an empty state. - return nil, "", nil - } - - var env *elasticbeanstalk.EnvironmentDescription - for _, e := range resp.Environments { - if environmentId == *e.EnvironmentId { - env = e - } - } - - if env == nil { - return -1, "failed", fmt.Errorf("[Err] Error finding Elastic Beanstalk Environment, environment not found") - } - - envErrors, err := getBeanstalkEnvironmentErrors(conn, environmentId, t) - if err != nil { - return -1, "failed", err - } - if envErrors != nil { - return -1, "failed", envErrors - } - - return env, *env.Status, nil - } -} - -// we use the following two functions to allow us to split out defaults -// as they become overridden from within the template -func optionSettingValueHash(v interface{}) int { - rd := v.(map[string]interface{}) - namespace := rd["namespace"].(string) - optionName := rd["name"].(string) - var resourceName string - if v, ok := rd["resource"].(string); ok { - resourceName = v - } - value, _ := rd["value"].(string) - hk := fmt.Sprintf("%s:%s%s=%s", namespace, optionName, resourceName, sortValues(value)) - log.Printf("[DEBUG] Elastic Beanstalk optionSettingValueHash(%#v): %s: hk=%s,hc=%d", v, optionName, hk, hashcode.String(hk)) - return hashcode.String(hk) -} - -func optionSettingKeyHash(v interface{}) int { - rd := v.(map[string]interface{}) - namespace := rd["namespace"].(string) - optionName := rd["name"].(string) - var resourceName string - if v, ok := rd["resource"].(string); ok { - resourceName = v - } - hk := fmt.Sprintf("%s:%s%s", namespace, optionName, resourceName) - log.Printf("[DEBUG] Elastic Beanstalk optionSettingKeyHash(%#v): %s: hk=%s,hc=%d", v, optionName, hk, hashcode.String(hk)) - return hashcode.String(hk) -} - -func sortValues(v string) string { - values := strings.Split(v, ",") - sort.Strings(values) - return strings.Join(values, ",") -} - -func extractOptionSettings(s *schema.Set) []*elasticbeanstalk.ConfigurationOptionSetting { - settings := []*elasticbeanstalk.ConfigurationOptionSetting{} - - if s != nil { - for _, setting := range s.List() { - optionSetting := elasticbeanstalk.ConfigurationOptionSetting{ - Namespace: aws.String(setting.(map[string]interface{})["namespace"].(string)), - OptionName: aws.String(setting.(map[string]interface{})["name"].(string)), - Value: aws.String(setting.(map[string]interface{})["value"].(string)), - } - if *optionSetting.Namespace == "aws:autoscaling:scheduledaction" { - if v, ok := setting.(map[string]interface{})["resource"].(string); ok && v != "" { - optionSetting.ResourceName = aws.String(v) - } - } - settings = append(settings, &optionSetting) - } - } - - return settings -} - -func dropGeneratedSecurityGroup(settingValue string, meta interface{}) string { - conn := meta.(*AWSClient).ec2conn - - groups := strings.Split(settingValue, ",") - - // Check to see if groups are ec2-classic or vpc security groups - ec2Classic := true - beanstalkSGRegexp := "sg-[0-9a-fA-F]{8}" - for _, g := range groups { - if ok, _ := regexp.MatchString(beanstalkSGRegexp, g); ok { - ec2Classic = false - break - } - } - - var resp *ec2.DescribeSecurityGroupsOutput - var err error - - if ec2Classic { - resp, err = conn.DescribeSecurityGroups(&ec2.DescribeSecurityGroupsInput{ - GroupNames: aws.StringSlice(groups), - }) - } else { - resp, err = conn.DescribeSecurityGroups(&ec2.DescribeSecurityGroupsInput{ - GroupIds: aws.StringSlice(groups), - }) - } - - if err != nil { - log.Printf("[DEBUG] Elastic Beanstalk error describing SecurityGroups: %v", err) - return settingValue - } - - log.Printf("[DEBUG] Elastic Beanstalk using ec2-classic security-groups: %t", ec2Classic) - var legitGroups []string - for _, group := range resp.SecurityGroups { - log.Printf("[DEBUG] Elastic Beanstalk SecurityGroup: %v", *group.GroupName) - if !strings.HasPrefix(*group.GroupName, "awseb") { - if ec2Classic { - legitGroups = append(legitGroups, *group.GroupName) - } else { - legitGroups = append(legitGroups, *group.GroupId) - } - } - } - - sort.Strings(legitGroups) - - return strings.Join(legitGroups, ",") -} - -type beanstalkEnvironmentError struct { - eventDate *time.Time - environmentID string - message *string -} - -func (e beanstalkEnvironmentError) Error() string { - return e.eventDate.String() + " (" + e.environmentID + ") : " + *e.message -} - -type beanstalkEnvironmentErrors []*beanstalkEnvironmentError - -func (e beanstalkEnvironmentErrors) Len() int { return len(e) } -func (e beanstalkEnvironmentErrors) Swap(i, j int) { e[i], e[j] = e[j], e[i] } -func (e beanstalkEnvironmentErrors) Less(i, j int) bool { return e[i].eventDate.Before(*e[j].eventDate) } - -func getBeanstalkEnvironmentErrors(conn *elasticbeanstalk.ElasticBeanstalk, environmentId string, t time.Time) (*multierror.Error, error) { - environmentErrors, err := conn.DescribeEvents(&elasticbeanstalk.DescribeEventsInput{ - EnvironmentId: aws.String(environmentId), - Severity: aws.String("ERROR"), - StartTime: aws.Time(t), - }) - - if err != nil { - return nil, fmt.Errorf("[Err] Unable to get Elastic Beanstalk Evironment events: %s", err) - } - - var events beanstalkEnvironmentErrors - for _, event := range environmentErrors.Events { - e := &beanstalkEnvironmentError{ - eventDate: event.EventDate, - environmentID: environmentId, - message: event.Message, - } - events = append(events, e) - } - sort.Sort(beanstalkEnvironmentErrors(events)) - - var result *multierror.Error - for _, event := range events { - result = multierror.Append(result, event) - } - - return result, nil -} diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_migrate.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_migrate.go deleted file mode 100644 index 31cd5c777..000000000 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_migrate.go +++ /dev/null @@ -1,35 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/terraform" -) - -func resourceAwsElasticBeanstalkEnvironmentMigrateState( - v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - switch v { - case 0: - log.Println("[INFO] Found AWS Elastic Beanstalk Environment State v0; migrating to v1") - return migrateBeanstalkEnvironmentStateV0toV1(is) - default: - return is, fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateBeanstalkEnvironmentStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { - if is.Empty() || is.Attributes == nil { - log.Println("[DEBUG] Empty Elastic Beanstalk Environment State; nothing to migrate.") - return is, nil - } - - log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - - if is.Attributes["tier"] == "" { - is.Attributes["tier"] = "WebServer" - } - - log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_migrate_test.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_migrate_test.go deleted file mode 100644 index 6b7603894..000000000 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_migrate_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/terraform" -) - -func TestAWSElasticBeanstalkEnvironmentMigrateState(t *testing.T) { - cases := map[string]struct { - StateVersion int - Attributes map[string]string - Expected map[string]string - Meta interface{} - }{ - "v0_1_web": { - StateVersion: 0, - Attributes: map[string]string{ - "tier": "", - }, - Expected: map[string]string{ - "tier": "WebServer", - }, - }, - "v0_1_web_explicit": { - StateVersion: 0, - Attributes: map[string]string{ - "tier": "WebServer", - }, - Expected: map[string]string{ - "tier": "WebServer", - }, - }, - "v0_1_worker": { - StateVersion: 0, - Attributes: map[string]string{ - "tier": "Worker", - }, - Expected: map[string]string{ - "tier": "Worker", - }, - }, - } - - for tn, tc := range cases { - is := &terraform.InstanceState{ - ID: "e-abcde12345", - Attributes: tc.Attributes, - } - is, err := resourceAwsElasticBeanstalkEnvironmentMigrateState( - tc.StateVersion, is, tc.Meta) - - if err != nil { - t.Fatalf("bad: %s, err: %#v", tn, err) - } - } -} diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_test.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_test.go deleted file mode 100644 index 27633af50..000000000 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_test.go +++ /dev/null @@ -1,994 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "reflect" - "regexp" - "sort" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elasticbeanstalk" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSBeanstalkEnv_basic(t *testing.T) { - var app elasticbeanstalk.EnvironmentDescription - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBeanstalkEnvDestroy, - Steps: []resource.TestStep{ - { - Config: testAccBeanstalkEnvConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckBeanstalkEnvExists("aws_elastic_beanstalk_environment.tfenvtest", &app), - ), - }, - }, - }) -} - -func TestAccAWSBeanstalkEnv_tier(t *testing.T) { - var app elasticbeanstalk.EnvironmentDescription - beanstalkQueuesNameRegexp := regexp.MustCompile("https://sqs.+?awseb[^,]+") - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBeanstalkEnvDestroy, - Steps: []resource.TestStep{ - { - Config: testAccBeanstalkWorkerEnvConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckBeanstalkEnvTier("aws_elastic_beanstalk_environment.tfenvtest", &app), - resource.TestMatchResourceAttr( - "aws_elastic_beanstalk_environment.tfenvtest", "queues.0", beanstalkQueuesNameRegexp), - ), - }, - }, - }) -} - -func TestAccAWSBeanstalkEnv_outputs(t *testing.T) { - var app elasticbeanstalk.EnvironmentDescription - rInt := acctest.RandInt() - beanstalkAsgNameRegexp := regexp.MustCompile("awseb.+?AutoScalingGroup[^,]+") - beanstalkElbNameRegexp := regexp.MustCompile("awseb.+?EBLoa[^,]+") - beanstalkInstancesNameRegexp := regexp.MustCompile("i-([0-9a-fA-F]{8}|[0-9a-fA-F]{17})") - beanstalkLcNameRegexp := regexp.MustCompile("awseb.+?AutoScalingLaunch[^,]+") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBeanstalkEnvDestroy, - Steps: []resource.TestStep{ - { - Config: testAccBeanstalkEnvConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckBeanstalkEnvExists("aws_elastic_beanstalk_environment.tfenvtest", &app), - resource.TestMatchResourceAttr( - "aws_elastic_beanstalk_environment.tfenvtest", "autoscaling_groups.0", beanstalkAsgNameRegexp), - resource.TestMatchResourceAttr( - "aws_elastic_beanstalk_environment.tfenvtest", "load_balancers.0", beanstalkElbNameRegexp), - resource.TestMatchResourceAttr( - "aws_elastic_beanstalk_environment.tfenvtest", "instances.0", beanstalkInstancesNameRegexp), - resource.TestMatchResourceAttr( - "aws_elastic_beanstalk_environment.tfenvtest", "launch_configurations.0", beanstalkLcNameRegexp), - ), - }, - }, - }) -} - -func TestAccAWSBeanstalkEnv_cname_prefix(t *testing.T) { - var app elasticbeanstalk.EnvironmentDescription - cnamePrefix := acctest.RandString(8) - rInt := acctest.RandInt() - beanstalkCnameRegexp := regexp.MustCompile("^" + cnamePrefix + ".+?elasticbeanstalk.com$") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBeanstalkEnvDestroy, - Steps: []resource.TestStep{ - { - Config: testAccBeanstalkEnvCnamePrefixConfig(cnamePrefix, rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckBeanstalkEnvExists("aws_elastic_beanstalk_environment.tfenvtest", &app), - resource.TestMatchResourceAttr( - "aws_elastic_beanstalk_environment.tfenvtest", "cname", beanstalkCnameRegexp), - ), - }, - }, - }) -} - -func TestAccAWSBeanstalkEnv_config(t *testing.T) { - var app elasticbeanstalk.EnvironmentDescription - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBeanstalkEnvDestroy, - Steps: []resource.TestStep{ - { - Config: testAccBeanstalkConfigTemplate(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckBeanstalkEnvExists("aws_elastic_beanstalk_environment.tftest", &app), - testAccCheckBeanstalkEnvConfigValue("aws_elastic_beanstalk_environment.tftest", "1"), - ), - }, - - { - Config: testAccBeanstalkConfigTemplateUpdate(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckBeanstalkEnvExists("aws_elastic_beanstalk_environment.tftest", &app), - testAccCheckBeanstalkEnvConfigValue("aws_elastic_beanstalk_environment.tftest", "2"), - ), - }, - - { - Config: testAccBeanstalkConfigTemplateUpdate(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckBeanstalkEnvExists("aws_elastic_beanstalk_environment.tftest", &app), - testAccCheckBeanstalkEnvConfigValue("aws_elastic_beanstalk_environment.tftest", "3"), - ), - }, - }, - }) -} - -func TestAccAWSBeanstalkEnv_resource(t *testing.T) { - var app elasticbeanstalk.EnvironmentDescription - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBeanstalkEnvDestroy, - Steps: []resource.TestStep{ - { - Config: testAccBeanstalkResourceOptionSetting(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckBeanstalkEnvExists("aws_elastic_beanstalk_environment.tfenvtest", &app), - ), - }, - }, - }) -} - -func TestAccAWSBeanstalkEnv_vpc(t *testing.T) { - var app elasticbeanstalk.EnvironmentDescription - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBeanstalkEnvDestroy, - Steps: []resource.TestStep{ - { - Config: testAccBeanstalkEnv_VPC(acctest.RandString(5), rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckBeanstalkEnvExists("aws_elastic_beanstalk_environment.default", &app), - ), - }, - }, - }) -} - -func TestAccAWSBeanstalkEnv_template_change(t *testing.T) { - var app elasticbeanstalk.EnvironmentDescription - - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBeanstalkEnvDestroy, - Steps: []resource.TestStep{ - { - Config: testAccBeanstalkEnv_TemplateChange_stack(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckBeanstalkEnvExists("aws_elastic_beanstalk_environment.environment", &app), - ), - }, - { - Config: testAccBeanstalkEnv_TemplateChange_temp(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckBeanstalkEnvExists("aws_elastic_beanstalk_environment.environment", &app), - ), - }, - { - Config: testAccBeanstalkEnv_TemplateChange_stack(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckBeanstalkEnvExists("aws_elastic_beanstalk_environment.environment", &app), - ), - }, - }, - }) -} - -func TestAccAWSBeanstalkEnv_basic_settings_update(t *testing.T) { - var app elasticbeanstalk.EnvironmentDescription - - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBeanstalkEnvDestroy, - Steps: []resource.TestStep{ - { - Config: testAccBeanstalkEnvConfig_empty_settings(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckBeanstalkEnvExists("aws_elastic_beanstalk_environment.tfenvtest", &app), - testAccVerifyBeanstalkConfig(&app, []string{}), - ), - }, - { - Config: testAccBeanstalkEnvConfig_settings(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckBeanstalkEnvExists("aws_elastic_beanstalk_environment.tfenvtest", &app), - testAccVerifyBeanstalkConfig(&app, []string{"ENV_STATIC", "ENV_UPDATE"}), - ), - }, - { - Config: testAccBeanstalkEnvConfig_settings_update(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckBeanstalkEnvExists("aws_elastic_beanstalk_environment.tfenvtest", &app), - testAccVerifyBeanstalkConfig(&app, []string{"ENV_STATIC", "ENV_UPDATE"}), - ), - }, - { - Config: testAccBeanstalkEnvConfig_empty_settings(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckBeanstalkEnvExists("aws_elastic_beanstalk_environment.tfenvtest", &app), - testAccVerifyBeanstalkConfig(&app, []string{}), - ), - }, - }, - }) -} - -func TestAccAWSBeanstalkEnv_version_label(t *testing.T) { - var app elasticbeanstalk.EnvironmentDescription - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBeanstalkEnvDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccBeanstalkEnvApplicationVersionConfig(acctest.RandInt()), - Check: resource.ComposeTestCheckFunc( - testAccCheckBeanstalkApplicationVersionDeployed("aws_elastic_beanstalk_environment.default", &app), - ), - }, - resource.TestStep{ - Config: testAccBeanstalkEnvApplicationVersionConfigUpdate(acctest.RandInt()), - Check: resource.ComposeTestCheckFunc( - testAccCheckBeanstalkApplicationVersionDeployed("aws_elastic_beanstalk_environment.default", &app), - ), - }, - }, - }) -} - -func testAccVerifyBeanstalkConfig(env *elasticbeanstalk.EnvironmentDescription, expected []string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if env == nil { - return fmt.Errorf("Nil environment in testAccVerifyBeanstalkConfig") - } - conn := testAccProvider.Meta().(*AWSClient).elasticbeanstalkconn - - resp, err := conn.DescribeConfigurationSettings(&elasticbeanstalk.DescribeConfigurationSettingsInput{ - ApplicationName: env.ApplicationName, - EnvironmentName: env.EnvironmentName, - }) - - if err != nil { - return fmt.Errorf("Error describing config settings in testAccVerifyBeanstalkConfig: %s", err) - } - - // should only be 1 environment - if len(resp.ConfigurationSettings) != 1 { - return fmt.Errorf("Expected only 1 set of Configuration Settings in testAccVerifyBeanstalkConfig, got (%d)", len(resp.ConfigurationSettings)) - } - - cs := resp.ConfigurationSettings[0] - - var foundEnvs []string - testStrings := []string{"ENV_STATIC", "ENV_UPDATE"} - for _, os := range cs.OptionSettings { - for _, k := range testStrings { - if *os.OptionName == k { - foundEnvs = append(foundEnvs, k) - } - } - } - - // if expected is zero, then we should not have found any of the predefined - // env vars - if len(expected) == 0 { - if len(foundEnvs) > 0 { - return fmt.Errorf("Found configs we should not have: %#v", foundEnvs) - } - return nil - } - - sort.Strings(testStrings) - sort.Strings(expected) - if !reflect.DeepEqual(testStrings, expected) { - return fmt.Errorf("Error matching strings, expected:\n\n%#v\n\ngot:\n\n%#v\n", testStrings, foundEnvs) - } - - return nil - } -} - -func testAccCheckBeanstalkEnvDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elasticbeanstalkconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_elastic_beanstalk_environment" { - continue - } - - // Try to find the environment - describeBeanstalkEnvOpts := &elasticbeanstalk.DescribeEnvironmentsInput{ - EnvironmentIds: []*string{aws.String(rs.Primary.ID)}, - } - resp, err := conn.DescribeEnvironments(describeBeanstalkEnvOpts) - if err == nil { - switch { - case len(resp.Environments) > 1: - return fmt.Errorf("Error %d environments match, expected 1", len(resp.Environments)) - case len(resp.Environments) == 1: - if *resp.Environments[0].Status == "Terminated" { - return nil - } - return fmt.Errorf("Elastic Beanstalk ENV still exists.") - default: - return nil - } - } - - // Verify the error is what we want - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - if ec2err.Code() != "InvalidBeanstalkEnvID.NotFound" { - return err - } - } - - return nil -} - -func testAccCheckBeanstalkEnvExists(n string, app *elasticbeanstalk.EnvironmentDescription) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Elastic Beanstalk ENV is not set") - } - - env, err := describeBeanstalkEnv(testAccProvider.Meta().(*AWSClient).elasticbeanstalkconn, aws.String(rs.Primary.ID)) - if err != nil { - return err - } - - *app = *env - - return nil - } -} - -func testAccCheckBeanstalkEnvTier(n string, app *elasticbeanstalk.EnvironmentDescription) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Elastic Beanstalk ENV is not set") - } - - env, err := describeBeanstalkEnv(testAccProvider.Meta().(*AWSClient).elasticbeanstalkconn, aws.String(rs.Primary.ID)) - if err != nil { - return err - } - if *env.Tier.Name != "Worker" { - return fmt.Errorf("Beanstalk Environment tier is %s, expected Worker", *env.Tier.Name) - } - - *app = *env - - return nil - } -} - -func testAccCheckBeanstalkEnvConfigValue(n string, expectedValue string) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elasticbeanstalkconn - - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Elastic Beanstalk ENV is not set") - } - - resp, err := conn.DescribeConfigurationOptions(&elasticbeanstalk.DescribeConfigurationOptionsInput{ - ApplicationName: aws.String(rs.Primary.Attributes["application"]), - EnvironmentName: aws.String(rs.Primary.Attributes["name"]), - Options: []*elasticbeanstalk.OptionSpecification{ - { - Namespace: aws.String("aws:elasticbeanstalk:application:environment"), - OptionName: aws.String("TEMPLATE"), - }, - }, - }) - if err != nil { - return err - } - - if len(resp.Options) != 1 { - return fmt.Errorf("Found %d options, expected 1.", len(resp.Options)) - } - - log.Printf("[DEBUG] %d Elastic Beanstalk Option values returned.", len(resp.Options[0].ValueOptions)) - - for _, value := range resp.Options[0].ValueOptions { - if *value != expectedValue { - return fmt.Errorf("Option setting value: %s. Expected %s", *value, expectedValue) - } - } - - return nil - } -} - -func testAccCheckBeanstalkApplicationVersionDeployed(n string, app *elasticbeanstalk.EnvironmentDescription) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Elastic Beanstalk ENV is not set") - } - - env, err := describeBeanstalkEnv(testAccProvider.Meta().(*AWSClient).elasticbeanstalkconn, aws.String(rs.Primary.ID)) - if err != nil { - return err - } - - if *env.VersionLabel != rs.Primary.Attributes["version_label"] { - return fmt.Errorf("Elastic Beanstalk version deployed %s. Expected %s", *env.VersionLabel, rs.Primary.Attributes["version_label"]) - } - - *app = *env - - return nil - } -} - -func describeBeanstalkEnv(conn *elasticbeanstalk.ElasticBeanstalk, - envID *string) (*elasticbeanstalk.EnvironmentDescription, error) { - describeBeanstalkEnvOpts := &elasticbeanstalk.DescribeEnvironmentsInput{ - EnvironmentIds: []*string{envID}, - } - - log.Printf("[DEBUG] Elastic Beanstalk Environment TEST describe opts: %s", describeBeanstalkEnvOpts) - - resp, err := conn.DescribeEnvironments(describeBeanstalkEnvOpts) - if err != nil { - return &elasticbeanstalk.EnvironmentDescription{}, err - } - if len(resp.Environments) == 0 { - return &elasticbeanstalk.EnvironmentDescription{}, fmt.Errorf("Elastic Beanstalk ENV not found.") - } - if len(resp.Environments) > 1 { - return &elasticbeanstalk.EnvironmentDescription{}, fmt.Errorf("Found %d environments, expected 1.", len(resp.Environments)) - } - return resp.Environments[0], nil -} - -func testAccBeanstalkEnvConfig(rInt int) string { - return fmt.Sprintf(` - resource "aws_elastic_beanstalk_application" "tftest" { - name = "tf-test-name-%d" - description = "tf-test-desc" - } - - resource "aws_elastic_beanstalk_environment" "tfenvtest" { - name = "tf-test-name-%d" - application = "${aws_elastic_beanstalk_application.tftest.name}" - solution_stack_name = "64bit Amazon Linux running Python" - depends_on = ["aws_elastic_beanstalk_application.tftest"] - } - `, rInt, rInt) -} - -func testAccBeanstalkEnvConfig_empty_settings(r int) string { - return fmt.Sprintf(` -resource "aws_elastic_beanstalk_application" "tftest" { - name = "tf-test-name-%d" - description = "tf-test-desc" -} - -resource "aws_elastic_beanstalk_environment" "tfenvtest" { - name = "tf-test-name-%d" - application = "${aws_elastic_beanstalk_application.tftest.name}" - solution_stack_name = "64bit Amazon Linux running Python" - - wait_for_ready_timeout = "15m" -}`, r, r) -} - -func testAccBeanstalkEnvConfig_settings(r int) string { - return fmt.Sprintf(` -resource "aws_elastic_beanstalk_application" "tftest" { - name = "tf-test-name-%d" - description = "tf-test-desc" -} - -resource "aws_elastic_beanstalk_environment" "tfenvtest" { - name = "tf-test-name-%d" - application = "${aws_elastic_beanstalk_application.tftest.name}" - solution_stack_name = "64bit Amazon Linux running Python" - - wait_for_ready_timeout = "15m" - - setting { - namespace = "aws:elasticbeanstalk:application:environment" - name = "ENV_STATIC" - value = "true" - } - - setting { - namespace = "aws:elasticbeanstalk:application:environment" - name = "ENV_UPDATE" - value = "true" - } - - setting { - namespace = "aws:elasticbeanstalk:application:environment" - name = "ENV_REMOVE" - value = "true" - } - - setting { - namespace = "aws:autoscaling:scheduledaction" - resource = "ScheduledAction01" - name = "MinSize" - value = 2 - } - - setting { - namespace = "aws:autoscaling:scheduledaction" - resource = "ScheduledAction01" - name = "MaxSize" - value = 3 - } - - setting { - namespace = "aws:autoscaling:scheduledaction" - resource = "ScheduledAction01" - name = "StartTime" - value = "2016-07-28T04:07:02Z" - } -}`, r, r) -} - -func testAccBeanstalkEnvConfig_settings_update(r int) string { - return fmt.Sprintf(` -resource "aws_elastic_beanstalk_application" "tftest" { - name = "tf-test-name-%d" - description = "tf-test-desc" -} - -resource "aws_elastic_beanstalk_environment" "tfenvtest" { - name = "tf-test-name-%d" - application = "${aws_elastic_beanstalk_application.tftest.name}" - solution_stack_name = "64bit Amazon Linux running Python" - - wait_for_ready_timeout = "15m" - - setting { - namespace = "aws:elasticbeanstalk:application:environment" - name = "ENV_STATIC" - value = "true" - } - - setting { - namespace = "aws:elasticbeanstalk:application:environment" - name = "ENV_UPDATE" - value = "false" - } - - setting { - namespace = "aws:elasticbeanstalk:application:environment" - name = "ENV_ADD" - value = "true" - } - - setting { - namespace = "aws:autoscaling:scheduledaction" - resource = "ScheduledAction01" - name = "MinSize" - value = 2 - } - - setting { - namespace = "aws:autoscaling:scheduledaction" - resource = "ScheduledAction01" - name = "MaxSize" - value = 3 - } - - setting { - namespace = "aws:autoscaling:scheduledaction" - resource = "ScheduledAction01" - name = "StartTime" - value = "2016-07-28T04:07:02Z" - } -}`, r, r) -} - -func testAccBeanstalkWorkerEnvConfig(rInt int) string { - return fmt.Sprintf(` - resource "aws_iam_instance_profile" "tftest" { - name = "tftest_profile-%d" - roles = ["${aws_iam_role.tftest.name}"] - } - - resource "aws_iam_role" "tftest" { - name = "tftest_role" - path = "/" - assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Action\":\"sts:AssumeRole\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Effect\":\"Allow\",\"Sid\":\"\"}]}" - } - - resource "aws_iam_role_policy" "tftest" { - name = "tftest_policy" - role = "${aws_iam_role.tftest.id}" - policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"QueueAccess\",\"Action\":[\"sqs:ChangeMessageVisibility\",\"sqs:DeleteMessage\",\"sqs:ReceiveMessage\"],\"Effect\":\"Allow\",\"Resource\":\"*\"}]}" - } - - resource "aws_elastic_beanstalk_application" "tftest" { - name = "tf-test-name-%d" - description = "tf-test-desc" - } - - resource "aws_elastic_beanstalk_environment" "tfenvtest" { - name = "tf-test-name-%d" - application = "${aws_elastic_beanstalk_application.tftest.name}" - tier = "Worker" - solution_stack_name = "64bit Amazon Linux running Python" - - setting { - namespace = "aws:autoscaling:launchconfiguration" - name = "IamInstanceProfile" - value = "${aws_iam_instance_profile.tftest.name}" - } - }`, rInt, rInt, rInt) -} - -func testAccBeanstalkEnvCnamePrefixConfig(randString string, rInt int) string { - return fmt.Sprintf(` -resource "aws_elastic_beanstalk_application" "tftest" { -name = "tf-test-name-%d" -description = "tf-test-desc" -} - -resource "aws_elastic_beanstalk_environment" "tfenvtest" { -name = "tf-test-name-%d" -application = "${aws_elastic_beanstalk_application.tftest.name}" -cname_prefix = "%s" -solution_stack_name = "64bit Amazon Linux running Python" -} -`, rInt, rInt, randString) -} - -func testAccBeanstalkConfigTemplate(rInt int) string { - return fmt.Sprintf(` - resource "aws_elastic_beanstalk_application" "tftest" { - name = "tf-test-name-%d" - description = "tf-test-desc" - } - - resource "aws_elastic_beanstalk_environment" "tftest" { - name = "tf-test-name-%d" - application = "${aws_elastic_beanstalk_application.tftest.name}" - template_name = "${aws_elastic_beanstalk_configuration_template.tftest.name}" - } - - resource "aws_elastic_beanstalk_configuration_template" "tftest" { - name = "tf-test-original" - application = "${aws_elastic_beanstalk_application.tftest.name}" - solution_stack_name = "64bit Amazon Linux running Python" - - setting { - namespace = "aws:elasticbeanstalk:application:environment" - name = "TEMPLATE" - value = "1" - } - } - `, rInt, rInt) -} - -func testAccBeanstalkConfigTemplateUpdate(rInt int) string { - return fmt.Sprintf(` - resource "aws_elastic_beanstalk_application" "tftest" { - name = "tf-test-name-%d" - description = "tf-test-desc" - } - - resource "aws_elastic_beanstalk_environment" "tftest" { - name = "tf-test-name-%d" - application = "${aws_elastic_beanstalk_application.tftest.name}" - template_name = "${aws_elastic_beanstalk_configuration_template.tftest.name}" - } - - resource "aws_elastic_beanstalk_configuration_template" "tftest" { - name = "tf-test-updated" - application = "${aws_elastic_beanstalk_application.tftest.name}" - solution_stack_name = "64bit Amazon Linux running Python" - - setting { - namespace = "aws:elasticbeanstalk:application:environment" - name = "TEMPLATE" - value = "2" - } - } - `, rInt, rInt) -} - -func testAccBeanstalkResourceOptionSetting(rInt int) string { - return fmt.Sprintf(` -resource "aws_elastic_beanstalk_application" "tftest" { - name = "tf-test-name-%d" - description = "tf-test-desc" -} - -resource "aws_elastic_beanstalk_environment" "tfenvtest" { - name = "tf-test-name-%d" - application = "${aws_elastic_beanstalk_application.tftest.name}" - solution_stack_name = "64bit Amazon Linux running Python" - - setting { - namespace = "aws:autoscaling:scheduledaction" - resource = "ScheduledAction01" - name = "MinSize" - value = "2" - } - - setting { - namespace = "aws:autoscaling:scheduledaction" - resource = "ScheduledAction01" - name = "MaxSize" - value = "6" - } - - setting { - namespace = "aws:autoscaling:scheduledaction" - resource = "ScheduledAction01" - name = "Recurrence" - value = "0 8 * * *" - } -}`, rInt, rInt) -} - -func testAccBeanstalkEnv_VPC(name string, rInt int) string { - return fmt.Sprintf(` -resource "aws_vpc" "tf_b_test" { - cidr_block = "10.0.0.0/16" - tags { - Name = "testAccBeanstalkEnv_VPC" - } -} - -resource "aws_internet_gateway" "tf_b_test" { - vpc_id = "${aws_vpc.tf_b_test.id}" -} - -resource "aws_route" "r" { - route_table_id = "${aws_vpc.tf_b_test.main_route_table_id}" - destination_cidr_block = "0.0.0.0/0" - gateway_id = "${aws_internet_gateway.tf_b_test.id}" -} - -resource "aws_subnet" "main" { - vpc_id = "${aws_vpc.tf_b_test.id}" - cidr_block = "10.0.0.0/24" -} - -resource "aws_security_group" "default" { - name = "tf-b-test-%s" - vpc_id = "${aws_vpc.tf_b_test.id}" -} - -resource "aws_elastic_beanstalk_application" "default" { - name = "tf-test-name-%d" - description = "tf-test-desc" -} - -resource "aws_elastic_beanstalk_environment" "default" { - name = "tf-test-name-%d" - application = "${aws_elastic_beanstalk_application.default.name}" - solution_stack_name = "64bit Amazon Linux running Python" - - setting { - namespace = "aws:ec2:vpc" - name = "VPCId" - value = "${aws_vpc.tf_b_test.id}" - } - - setting { - namespace = "aws:ec2:vpc" - name = "Subnets" - value = "${aws_subnet.main.id}" - } - - setting { - namespace = "aws:ec2:vpc" - name = "AssociatePublicIpAddress" - value = "true" - } - - setting { - namespace = "aws:autoscaling:launchconfiguration" - name = "SecurityGroups" - value = "${aws_security_group.default.id}" - } -} -`, name, rInt, rInt) -} - -func testAccBeanstalkEnv_TemplateChange_stack(r int) string { - return fmt.Sprintf(` -provider "aws" { - region = "us-east-1" -} - -resource "aws_elastic_beanstalk_application" "app" { - name = "beanstalk-app-%d" - description = "" -} - -resource "aws_elastic_beanstalk_environment" "environment" { - name = "beanstalk-env-%d" - application = "${aws_elastic_beanstalk_application.app.name}" - - # Go 1.4 - - solution_stack_name = "64bit Amazon Linux 2016.03 v2.1.0 running Go 1.4" -} - -resource "aws_elastic_beanstalk_configuration_template" "template" { - name = "beanstalk-config-%d" - application = "${aws_elastic_beanstalk_application.app.name}" - - # Go 1.5 - solution_stack_name = "64bit Amazon Linux 2016.03 v2.1.3 running Go 1.5" -} -`, r, r, r) -} - -func testAccBeanstalkEnv_TemplateChange_temp(r int) string { - return fmt.Sprintf(` -provider "aws" { - region = "us-east-1" -} - -resource "aws_elastic_beanstalk_application" "app" { - name = "beanstalk-app-%d" - description = "" -} - -resource "aws_elastic_beanstalk_environment" "environment" { - name = "beanstalk-env-%d" - application = "${aws_elastic_beanstalk_application.app.name}" - - # Go 1.4 - - template_name = "${aws_elastic_beanstalk_configuration_template.template.name}" -} - -resource "aws_elastic_beanstalk_configuration_template" "template" { - name = "beanstalk-config-%d" - application = "${aws_elastic_beanstalk_application.app.name}" - - # Go 1.5 - solution_stack_name = "64bit Amazon Linux 2016.03 v2.1.3 running Go 1.5" -} -`, r, r, r) -} - -func testAccBeanstalkEnvApplicationVersionConfig(randInt int) string { - return fmt.Sprintf(` -resource "aws_s3_bucket" "default" { - bucket = "tftest.applicationversion.buckets-%d" -} - -resource "aws_s3_bucket_object" "default" { - bucket = "${aws_s3_bucket.default.id}" - key = "python-v1.zip" - source = "test-fixtures/python-v1.zip" -} - -resource "aws_elastic_beanstalk_application" "default" { - name = "tf-test-name-%d" - description = "tf-test-desc" -} - -resource "aws_elastic_beanstalk_application_version" "default" { - application = "${aws_elastic_beanstalk_application.default.name}" - name = "tf-test-version-label" - bucket = "${aws_s3_bucket.default.id}" - key = "${aws_s3_bucket_object.default.id}" -} - -resource "aws_elastic_beanstalk_environment" "default" { - name = "tf-test-name-%d" - application = "${aws_elastic_beanstalk_application.default.name}" - version_label = "${aws_elastic_beanstalk_application_version.default.name}" - solution_stack_name = "64bit Amazon Linux running Python" -} -`, randInt, randInt, randInt) -} - -func testAccBeanstalkEnvApplicationVersionConfigUpdate(randInt int) string { - return fmt.Sprintf(` -resource "aws_s3_bucket" "default" { - bucket = "tftest.applicationversion.buckets-%d" -} - -resource "aws_s3_bucket_object" "default" { - bucket = "${aws_s3_bucket.default.id}" - key = "python-v2.zip" - source = "test-fixtures/python-v1.zip" -} - -resource "aws_elastic_beanstalk_application" "default" { - name = "tf-test-name-%d" - description = "tf-test-desc" -} - -resource "aws_elastic_beanstalk_application_version" "default" { - application = "${aws_elastic_beanstalk_application.default.name}" - name = "tf-test-version-label-v2" - bucket = "${aws_s3_bucket.default.id}" - key = "${aws_s3_bucket_object.default.id}" -} - -resource "aws_elastic_beanstalk_environment" "default" { - name = "tf-test-name-%d" - application = "${aws_elastic_beanstalk_application.default.name}" - version_label = "${aws_elastic_beanstalk_application_version.default.name}" - solution_stack_name = "64bit Amazon Linux running Python" -} -`, randInt, randInt, randInt) -} diff --git a/builtin/providers/aws/resource_aws_elastic_transcoder_pipeline.go b/builtin/providers/aws/resource_aws_elastic_transcoder_pipeline.go deleted file mode 100644 index 6a0d88352..000000000 --- a/builtin/providers/aws/resource_aws_elastic_transcoder_pipeline.go +++ /dev/null @@ -1,481 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elastictranscoder" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsElasticTranscoderPipeline() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsElasticTranscoderPipelineCreate, - Read: resourceAwsElasticTranscoderPipelineRead, - Update: resourceAwsElasticTranscoderPipelineUpdate, - Delete: resourceAwsElasticTranscoderPipelineDelete, - - Schema: map[string]*schema.Schema{ - "arn": { - Type: schema.TypeString, - Computed: true, - }, - - "aws_kms_key_arn": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateArn, - }, - - // ContentConfig also requires ThumbnailConfig - "content_config": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - // elastictranscoder.PipelineOutputConfig - Schema: map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Optional: true, - // AWS may insert the bucket name here taken from output_bucket - Computed: true, - }, - "storage_class": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - - "content_config_permissions": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "access": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "grantee": { - Type: schema.TypeString, - Optional: true, - }, - "grantee_type": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - - "input_bucket": { - Type: schema.TypeString, - Required: true, - }, - - "name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[.0-9A-Za-z-_]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only alphanumeric characters, hyphens, underscores, and periods allowed in %q", k)) - } - if len(value) > 40 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 40 characters", k)) - } - return - }, - }, - - "notifications": { - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "completed": { - Type: schema.TypeString, - Optional: true, - }, - "error": { - Type: schema.TypeString, - Optional: true, - }, - "progressing": { - Type: schema.TypeString, - Optional: true, - }, - "warning": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - - // The output_bucket must be set, or both of content_config.bucket - // and thumbnail_config.bucket. - // This is set as Computed, because the API may or may not return - // this as set based on the other 2 configurations. - "output_bucket": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "role": { - Type: schema.TypeString, - Required: true, - }, - - "thumbnail_config": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - // elastictranscoder.PipelineOutputConfig - Schema: map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Optional: true, - // AWS may insert the bucket name here taken from output_bucket - Computed: true, - }, - "storage_class": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - - "thumbnail_config_permissions": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "access": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "grantee": { - Type: schema.TypeString, - Optional: true, - }, - "grantee_type": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - } -} - -func resourceAwsElasticTranscoderPipelineCreate(d *schema.ResourceData, meta interface{}) error { - elastictranscoderconn := meta.(*AWSClient).elastictranscoderconn - - req := &elastictranscoder.CreatePipelineInput{ - AwsKmsKeyArn: getStringPtr(d, "aws_kms_key_arn"), - ContentConfig: expandETPiplineOutputConfig(d, "content_config"), - InputBucket: aws.String(d.Get("input_bucket").(string)), - Notifications: expandETNotifications(d), - OutputBucket: getStringPtr(d, "output_bucket"), - Role: getStringPtr(d, "role"), - ThumbnailConfig: expandETPiplineOutputConfig(d, "thumbnail_config"), - } - - if name, ok := d.GetOk("name"); ok { - req.Name = aws.String(name.(string)) - } else { - name := resource.PrefixedUniqueId("tf-et-") - d.Set("name", name) - req.Name = aws.String(name) - } - - if (req.OutputBucket == nil && (req.ContentConfig == nil || req.ContentConfig.Bucket == nil)) || - (req.OutputBucket != nil && req.ContentConfig != nil && req.ContentConfig.Bucket != nil) { - return fmt.Errorf("[ERROR] you must specify only one of output_bucket or content_config.bucket") - } - - log.Printf("[DEBUG] Elastic Transcoder Pipeline create opts: %s", req) - resp, err := elastictranscoderconn.CreatePipeline(req) - if err != nil { - return fmt.Errorf("Error creating Elastic Transcoder Pipeline: %s", err) - } - - d.SetId(*resp.Pipeline.Id) - - for _, w := range resp.Warnings { - log.Printf("[WARN] Elastic Transcoder Pipeline %v: %v", *w.Code, *w.Message) - } - - return resourceAwsElasticTranscoderPipelineRead(d, meta) -} - -func expandETNotifications(d *schema.ResourceData) *elastictranscoder.Notifications { - set, ok := d.GetOk("notifications") - if !ok { - return nil - } - - s := set.(*schema.Set).List() - if s == nil || len(s) == 0 { - return nil - } - - if s[0] == nil { - log.Printf("[ERR] First element of Notifications set is nil") - return nil - } - - rN := s[0].(map[string]interface{}) - - return &elastictranscoder.Notifications{ - Completed: aws.String(rN["completed"].(string)), - Error: aws.String(rN["error"].(string)), - Progressing: aws.String(rN["progressing"].(string)), - Warning: aws.String(rN["warning"].(string)), - } -} - -func flattenETNotifications(n *elastictranscoder.Notifications) []map[string]interface{} { - if n == nil { - return nil - } - - allEmpty := func(s ...*string) bool { - for _, s := range s { - if s != nil && *s != "" { - return false - } - } - return true - } - - // the API always returns a Notifications value, even when all fields are nil - if allEmpty(n.Completed, n.Error, n.Progressing, n.Warning) { - return nil - } - - m := setMap(make(map[string]interface{})) - - m.SetString("completed", n.Completed) - m.SetString("error", n.Error) - m.SetString("progressing", n.Progressing) - m.SetString("warning", n.Warning) - return m.MapList() -} - -func expandETPiplineOutputConfig(d *schema.ResourceData, key string) *elastictranscoder.PipelineOutputConfig { - set, ok := d.GetOk(key) - if !ok { - return nil - } - - s := set.(*schema.Set) - if s == nil || s.Len() == 0 { - return nil - } - - cc := s.List()[0].(map[string]interface{}) - - cfg := &elastictranscoder.PipelineOutputConfig{ - Bucket: getStringPtr(cc, "bucket"), - StorageClass: getStringPtr(cc, "storage_class"), - } - - switch key { - case "content_config": - cfg.Permissions = expandETPermList(d.Get("content_config_permissions").(*schema.Set)) - case "thumbnail_config": - cfg.Permissions = expandETPermList(d.Get("thumbnail_config_permissions").(*schema.Set)) - } - - return cfg -} - -func flattenETPipelineOutputConfig(cfg *elastictranscoder.PipelineOutputConfig) []map[string]interface{} { - m := setMap(make(map[string]interface{})) - - m.SetString("bucket", cfg.Bucket) - m.SetString("storage_class", cfg.StorageClass) - - return m.MapList() -} - -func expandETPermList(permissions *schema.Set) []*elastictranscoder.Permission { - var perms []*elastictranscoder.Permission - - for _, p := range permissions.List() { - perm := &elastictranscoder.Permission{ - Access: getStringPtrList(p.(map[string]interface{}), "access"), - Grantee: getStringPtr(p, "grantee"), - GranteeType: getStringPtr(p, "grantee_type"), - } - perms = append(perms, perm) - } - return perms -} - -func flattenETPermList(perms []*elastictranscoder.Permission) []map[string]interface{} { - var set []map[string]interface{} - - for _, p := range perms { - m := setMap(make(map[string]interface{})) - m.Set("access", flattenStringList(p.Access)) - m.SetString("grantee", p.Grantee) - m.SetString("grantee_type", p.GranteeType) - - set = append(set, m) - } - return set -} - -func resourceAwsElasticTranscoderPipelineUpdate(d *schema.ResourceData, meta interface{}) error { - elastictranscoderconn := meta.(*AWSClient).elastictranscoderconn - - req := &elastictranscoder.UpdatePipelineInput{ - Id: aws.String(d.Id()), - } - - if d.HasChange("aws_kms_key_arn") { - req.AwsKmsKeyArn = getStringPtr(d, "aws_kms_key_arn") - } - - if d.HasChange("content_config") { - req.ContentConfig = expandETPiplineOutputConfig(d, "content_config") - } - - if d.HasChange("input_bucket") { - req.InputBucket = getStringPtr(d, "input_bucket") - } - - if d.HasChange("name") { - req.Name = getStringPtr(d, "name") - } - - if d.HasChange("notifications") { - req.Notifications = expandETNotifications(d) - } - - if d.HasChange("role") { - req.Role = getStringPtr(d, "role") - } - - if d.HasChange("thumbnail_config") { - req.ThumbnailConfig = expandETPiplineOutputConfig(d, "thumbnail_config") - } - - log.Printf("[DEBUG] Updating Elastic Transcoder Pipeline: %#v", req) - output, err := elastictranscoderconn.UpdatePipeline(req) - if err != nil { - return fmt.Errorf("Error updating Elastic Transcoder pipeline: %s", err) - } - - for _, w := range output.Warnings { - log.Printf("[WARN] Elastic Transcoder Pipeline %v: %v", *w.Code, *w.Message) - } - - return resourceAwsElasticTranscoderPipelineRead(d, meta) -} - -func resourceAwsElasticTranscoderPipelineRead(d *schema.ResourceData, meta interface{}) error { - elastictranscoderconn := meta.(*AWSClient).elastictranscoderconn - - resp, err := elastictranscoderconn.ReadPipeline(&elastictranscoder.ReadPipelineInput{ - Id: aws.String(d.Id()), - }) - - if err != nil { - if err, ok := err.(awserr.Error); ok && err.Code() == "ResourceNotFoundException" { - d.SetId("") - return nil - } - return err - } - - log.Printf("[DEBUG] Elastic Transcoder Pipeline Read response: %#v", resp) - - pipeline := resp.Pipeline - - d.Set("arn", *pipeline.Arn) - - if arn := pipeline.AwsKmsKeyArn; arn != nil { - d.Set("aws_kms_key_arn", *arn) - } - - if pipeline.ContentConfig != nil { - err := d.Set("content_config", flattenETPipelineOutputConfig(pipeline.ContentConfig)) - if err != nil { - return fmt.Errorf("error setting content_config: %s", err) - } - - if pipeline.ContentConfig.Permissions != nil { - err := d.Set("content_config_permissions", flattenETPermList(pipeline.ContentConfig.Permissions)) - if err != nil { - return fmt.Errorf("error setting content_config_permissions: %s", err) - } - } - } - - d.Set("input_bucket", *pipeline.InputBucket) - d.Set("name", *pipeline.Name) - - notifications := flattenETNotifications(pipeline.Notifications) - if notifications != nil { - if err := d.Set("notifications", notifications); err != nil { - return fmt.Errorf("error setting notifications: %s", err) - } - } - - d.Set("role", *pipeline.Role) - - if pipeline.ThumbnailConfig != nil { - err := d.Set("thumbnail_config", flattenETPipelineOutputConfig(pipeline.ThumbnailConfig)) - if err != nil { - return fmt.Errorf("error setting thumbnail_config: %s", err) - } - - if pipeline.ThumbnailConfig.Permissions != nil { - err := d.Set("thumbnail_config_permissions", flattenETPermList(pipeline.ThumbnailConfig.Permissions)) - if err != nil { - return fmt.Errorf("error setting thumbnail_config_permissions: %s", err) - } - } - } - - if pipeline.OutputBucket != nil { - d.Set("output_bucket", *pipeline.OutputBucket) - } - - return nil -} - -func resourceAwsElasticTranscoderPipelineDelete(d *schema.ResourceData, meta interface{}) error { - elastictranscoderconn := meta.(*AWSClient).elastictranscoderconn - - log.Printf("[DEBUG] Elastic Transcoder Delete Pipeline: %s", d.Id()) - _, err := elastictranscoderconn.DeletePipeline(&elastictranscoder.DeletePipelineInput{ - Id: aws.String(d.Id()), - }) - if err != nil { - return fmt.Errorf("error deleting Elastic Transcoder Pipeline: %s", err) - } - return nil -} diff --git a/builtin/providers/aws/resource_aws_elastic_transcoder_pipeline_test.go b/builtin/providers/aws/resource_aws_elastic_transcoder_pipeline_test.go deleted file mode 100644 index e9b8f05d2..000000000 --- a/builtin/providers/aws/resource_aws_elastic_transcoder_pipeline_test.go +++ /dev/null @@ -1,603 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "sort" - "testing" - - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elastictranscoder" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSElasticTranscoderPipeline_basic(t *testing.T) { - pipeline := &elastictranscoder.Pipeline{} - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elastictranscoder_pipeline.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckElasticTranscoderPipelineDestroy, - Steps: []resource.TestStep{ - { - Config: awsElasticTranscoderPipelineConfigBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticTranscoderPipelineExists("aws_elastictranscoder_pipeline.bar", pipeline), - ), - }, - }, - }) -} - -func TestAccAWSElasticTranscoderPipeline_kmsKey(t *testing.T) { - pipeline := &elastictranscoder.Pipeline{} - ri := acctest.RandInt() - config := fmt.Sprintf(awsElasticTranscoderPipelineConfigKmsKey, ri, ri, ri) - keyRegex := regexp.MustCompile("^arn:aws:([a-zA-Z0-9\\-])+:([a-z]{2}-[a-z]+-\\d{1})?:(\\d{12})?:(.*)$") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elastictranscoder_pipeline.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckElasticTranscoderPipelineDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticTranscoderPipelineExists("aws_elastictranscoder_pipeline.bar", pipeline), - resource.TestMatchResourceAttr("aws_elastictranscoder_pipeline.bar", "aws_kms_key_arn", keyRegex), - ), - }, - }, - }) -} - -func TestAccAWSElasticTranscoderPipeline_notifications(t *testing.T) { - pipeline := elastictranscoder.Pipeline{} - - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elastictranscoder_pipeline.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckElasticTranscoderPipelineDestroy, - Steps: []resource.TestStep{ - { - Config: awsElasticTranscoderNotifications(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticTranscoderPipelineExists("aws_elastictranscoder_pipeline.bar", &pipeline), - testAccCheckAWSElasticTranscoderPipeline_notifications(&pipeline, []string{"warning", "completed"}), - ), - }, - - // update and check that we have 1 less notification - resource.TestStep{ - Config: awsElasticTranscoderNotifications_update(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticTranscoderPipelineExists("aws_elastictranscoder_pipeline.bar", &pipeline), - testAccCheckAWSElasticTranscoderPipeline_notifications(&pipeline, []string{"completed"}), - ), - }, - }, - }) -} - -// testAccCheckTags can be used to check the tags on a resource. -func testAccCheckAWSElasticTranscoderPipeline_notifications( - p *elastictranscoder.Pipeline, notifications []string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - var notes []string - if p.Notifications.Completed != nil && *p.Notifications.Completed != "" { - notes = append(notes, "completed") - } - if p.Notifications.Error != nil && *p.Notifications.Error != "" { - notes = append(notes, "error") - } - if p.Notifications.Progressing != nil && *p.Notifications.Progressing != "" { - notes = append(notes, "progressing") - } - if p.Notifications.Warning != nil && *p.Notifications.Warning != "" { - notes = append(notes, "warning") - } - - if len(notes) != len(notifications) { - return fmt.Errorf("ETC notifications didn't match:\n\texpected: %#v\n\tgot: %#v\n\n", notifications, notes) - } - - sort.Strings(notes) - sort.Strings(notifications) - - if !reflect.DeepEqual(notes, notifications) { - return fmt.Errorf("ETC notifications were not equal:\n\texpected: %#v\n\tgot: %#v\n\n", notifications, notes) - } - - return nil - } -} - -func TestAccAWSElasticTranscoderPipeline_withContentConfig(t *testing.T) { - pipeline := &elastictranscoder.Pipeline{} - - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elastictranscoder_pipeline.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckElasticTranscoderPipelineDestroy, - Steps: []resource.TestStep{ - { - Config: awsElasticTranscoderPipelineWithContentConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticTranscoderPipelineExists("aws_elastictranscoder_pipeline.bar", pipeline), - ), - }, - { - Config: awsElasticTranscoderPipelineWithContentConfigUpdate(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticTranscoderPipelineExists("aws_elastictranscoder_pipeline.bar", pipeline), - ), - }, - }, - }) -} - -func TestAccAWSElasticTranscoderPipeline_withPermissions(t *testing.T) { - pipeline := &elastictranscoder.Pipeline{} - - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elastictranscoder_pipeline.baz", - Providers: testAccProviders, - CheckDestroy: testAccCheckElasticTranscoderPipelineDestroy, - Steps: []resource.TestStep{ - { - Config: awsElasticTranscoderPipelineWithPerms(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticTranscoderPipelineExists("aws_elastictranscoder_pipeline.baz", pipeline), - ), - }, - }, - }) -} - -func testAccCheckAWSElasticTranscoderPipelineExists(n string, res *elastictranscoder.Pipeline) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Pipeline ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).elastictranscoderconn - - out, err := conn.ReadPipeline(&elastictranscoder.ReadPipelineInput{ - Id: aws.String(rs.Primary.ID), - }) - - if err != nil { - return err - } - - *res = *out.Pipeline - - return nil - } -} - -func testAccCheckElasticTranscoderPipelineDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elastictranscoderconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_elastictranscoder_pipline" { - continue - } - - out, err := conn.ReadPipeline(&elastictranscoder.ReadPipelineInput{ - Id: aws.String(rs.Primary.ID), - }) - - if err == nil { - if out.Pipeline != nil && *out.Pipeline.Id == rs.Primary.ID { - return fmt.Errorf("Elastic Transcoder Pipeline still exists") - } - } - - awsErr, ok := err.(awserr.Error) - if !ok { - return err - } - - if awsErr.Code() != "ResourceNotFoundException" { - return fmt.Errorf("unexpected error: %s", awsErr) - } - - } - return nil -} - -const awsElasticTranscoderPipelineConfigBasic = ` -resource "aws_elastictranscoder_pipeline" "bar" { - input_bucket = "${aws_s3_bucket.test_bucket.bucket}" - output_bucket = "${aws_s3_bucket.test_bucket.bucket}" - name = "aws_elastictranscoder_pipeline_tf_test_" - role = "${aws_iam_role.test_role.arn}" -} - -resource "aws_iam_role" "test_role" { - name = "aws_elastictranscoder_pipeline_tf_test_role_" - - assume_role_policy = < 35 { - es = append(es, fmt.Errorf( - "snapshot retention limit cannot be more than 35 days")) - } - return - }, - }, - - "apply_immediately": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "tags": tagsSchema(), - } -} - -func resourceAwsElasticacheCluster() *schema.Resource { - resourceSchema := resourceAwsElastiCacheCommonSchema() - - resourceSchema["cluster_id"] = &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - StateFunc: func(val interface{}) string { - // Elasticache normalizes cluster ids to lowercase, - // so we have to do this too or else we can end up - // with non-converging diffs. - return strings.ToLower(val.(string)) - }, - ValidateFunc: validateElastiCacheClusterId, - } - - resourceSchema["num_cache_nodes"] = &schema.Schema{ - Type: schema.TypeInt, - Required: true, - } - - resourceSchema["az_mode"] = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - } - - resourceSchema["availability_zone"] = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - } - - resourceSchema["configuration_endpoint"] = &schema.Schema{ - Type: schema.TypeString, - Computed: true, - } - - resourceSchema["cluster_address"] = &schema.Schema{ - Type: schema.TypeString, - Computed: true, - } - - resourceSchema["replication_group_id"] = &schema.Schema{ - Type: schema.TypeString, - Computed: true, - } - - resourceSchema["cache_nodes"] = &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, - "address": { - Type: schema.TypeString, - Computed: true, - }, - "port": { - Type: schema.TypeInt, - Computed: true, - }, - "availability_zone": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - } - - return &schema.Resource{ - Create: resourceAwsElasticacheClusterCreate, - Read: resourceAwsElasticacheClusterRead, - Update: resourceAwsElasticacheClusterUpdate, - Delete: resourceAwsElasticacheClusterDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: resourceSchema, - } -} - -func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticacheconn - - clusterId := d.Get("cluster_id").(string) - nodeType := d.Get("node_type").(string) // e.g) cache.m1.small - numNodes := int64(d.Get("num_cache_nodes").(int)) // 2 - engine := d.Get("engine").(string) // memcached - engineVersion := d.Get("engine_version").(string) // 1.4.14 - port := int64(d.Get("port").(int)) // e.g) 11211 - subnetGroupName := d.Get("subnet_group_name").(string) - securityNameSet := d.Get("security_group_names").(*schema.Set) - securityIdSet := d.Get("security_group_ids").(*schema.Set) - - securityNames := expandStringList(securityNameSet.List()) - securityIds := expandStringList(securityIdSet.List()) - tags := tagsFromMapEC(d.Get("tags").(map[string]interface{})) - - req := &elasticache.CreateCacheClusterInput{ - CacheClusterId: aws.String(clusterId), - CacheNodeType: aws.String(nodeType), - NumCacheNodes: aws.Int64(numNodes), - Engine: aws.String(engine), - EngineVersion: aws.String(engineVersion), - Port: aws.Int64(port), - CacheSubnetGroupName: aws.String(subnetGroupName), - CacheSecurityGroupNames: securityNames, - SecurityGroupIds: securityIds, - Tags: tags, - } - - // parameter groups are optional and can be defaulted by AWS - if v, ok := d.GetOk("parameter_group_name"); ok { - req.CacheParameterGroupName = aws.String(v.(string)) - } - - if v, ok := d.GetOk("snapshot_retention_limit"); ok { - req.SnapshotRetentionLimit = aws.Int64(int64(v.(int))) - } - - if v, ok := d.GetOk("snapshot_window"); ok { - req.SnapshotWindow = aws.String(v.(string)) - } - - if v, ok := d.GetOk("maintenance_window"); ok { - req.PreferredMaintenanceWindow = aws.String(v.(string)) - } - - if v, ok := d.GetOk("notification_topic_arn"); ok { - req.NotificationTopicArn = aws.String(v.(string)) - } - - snaps := d.Get("snapshot_arns").(*schema.Set).List() - if len(snaps) > 0 { - s := expandStringList(snaps) - req.SnapshotArns = s - log.Printf("[DEBUG] Restoring Redis cluster from S3 snapshot: %#v", s) - } - - if v, ok := d.GetOk("snapshot_name"); ok { - req.SnapshotName = aws.String(v.(string)) - } - - if v, ok := d.GetOk("az_mode"); ok { - req.AZMode = aws.String(v.(string)) - } - - if v, ok := d.GetOk("availability_zone"); ok { - req.PreferredAvailabilityZone = aws.String(v.(string)) - } - - preferred_azs := d.Get("availability_zones").(*schema.Set).List() - if len(preferred_azs) > 0 { - azs := expandStringList(preferred_azs) - req.PreferredAvailabilityZones = azs - } - - if v, ok := d.GetOk("replication_group_id"); ok { - req.ReplicationGroupId = aws.String(v.(string)) - } - - resp, err := conn.CreateCacheCluster(req) - if err != nil { - return fmt.Errorf("Error creating Elasticache: %s", err) - } - - // Assign the cluster id as the resource ID - // Elasticache always retains the id in lower case, so we have to - // mimic that or else we won't be able to refresh a resource whose - // name contained uppercase characters. - d.SetId(strings.ToLower(*resp.CacheCluster.CacheClusterId)) - - pending := []string{"creating", "modifying", "restoring", "snapshotting"} - stateConf := &resource.StateChangeConf{ - Pending: pending, - Target: []string{"available"}, - Refresh: cacheClusterStateRefreshFunc(conn, d.Id(), "available", pending), - Timeout: 40 * time.Minute, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - log.Printf("[DEBUG] Waiting for state to become available: %v", d.Id()) - _, sterr := stateConf.WaitForState() - if sterr != nil { - return fmt.Errorf("Error waiting for elasticache (%s) to be created: %s", d.Id(), sterr) - } - - return resourceAwsElasticacheClusterRead(d, meta) -} - -func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticacheconn - req := &elasticache.DescribeCacheClustersInput{ - CacheClusterId: aws.String(d.Id()), - ShowCacheNodeInfo: aws.Bool(true), - } - - res, err := conn.DescribeCacheClusters(req) - if err != nil { - if eccErr, ok := err.(awserr.Error); ok && eccErr.Code() == "CacheClusterNotFound" { - log.Printf("[WARN] ElastiCache Cluster (%s) not found", d.Id()) - d.SetId("") - return nil - } - - return err - } - - if len(res.CacheClusters) == 1 { - c := res.CacheClusters[0] - d.Set("cluster_id", c.CacheClusterId) - d.Set("node_type", c.CacheNodeType) - d.Set("num_cache_nodes", c.NumCacheNodes) - d.Set("engine", c.Engine) - d.Set("engine_version", c.EngineVersion) - if c.ConfigurationEndpoint != nil { - d.Set("port", c.ConfigurationEndpoint.Port) - d.Set("configuration_endpoint", aws.String(fmt.Sprintf("%s:%d", *c.ConfigurationEndpoint.Address, *c.ConfigurationEndpoint.Port))) - d.Set("cluster_address", aws.String(fmt.Sprintf("%s", *c.ConfigurationEndpoint.Address))) - } - - if c.ReplicationGroupId != nil { - d.Set("replication_group_id", c.ReplicationGroupId) - } - - d.Set("subnet_group_name", c.CacheSubnetGroupName) - d.Set("security_group_names", flattenElastiCacheSecurityGroupNames(c.CacheSecurityGroups)) - d.Set("security_group_ids", flattenElastiCacheSecurityGroupIds(c.SecurityGroups)) - if c.CacheParameterGroup != nil { - d.Set("parameter_group_name", c.CacheParameterGroup.CacheParameterGroupName) - } - d.Set("maintenance_window", c.PreferredMaintenanceWindow) - d.Set("snapshot_window", c.SnapshotWindow) - d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit) - if c.NotificationConfiguration != nil { - if *c.NotificationConfiguration.TopicStatus == "active" { - d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn) - } - } - d.Set("availability_zone", c.PreferredAvailabilityZone) - - if err := setCacheNodeData(d, c); err != nil { - return err - } - // list tags for resource - // set tags - arn, err := buildECARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) - if err != nil { - log.Printf("[DEBUG] Error building ARN for ElastiCache Cluster, not setting Tags for cluster %s", *c.CacheClusterId) - } else { - resp, err := conn.ListTagsForResource(&elasticache.ListTagsForResourceInput{ - ResourceName: aws.String(arn), - }) - - if err != nil { - log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn) - } - - var et []*elasticache.Tag - if len(resp.TagList) > 0 { - et = resp.TagList - } - d.Set("tags", tagsToMapEC(et)) - } - } - - return nil -} - -func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticacheconn - arn, err := buildECARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) - if err != nil { - log.Printf("[DEBUG] Error building ARN for ElastiCache Cluster, not updating Tags for cluster %s", d.Id()) - } else { - if err := setTagsEC(conn, d, arn); err != nil { - return err - } - } - - req := &elasticache.ModifyCacheClusterInput{ - CacheClusterId: aws.String(d.Id()), - ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)), - } - - requestUpdate := false - if d.HasChange("security_group_ids") { - if attr := d.Get("security_group_ids").(*schema.Set); attr.Len() > 0 { - req.SecurityGroupIds = expandStringList(attr.List()) - requestUpdate = true - } - } - - if d.HasChange("parameter_group_name") { - req.CacheParameterGroupName = aws.String(d.Get("parameter_group_name").(string)) - requestUpdate = true - } - - if d.HasChange("maintenance_window") { - req.PreferredMaintenanceWindow = aws.String(d.Get("maintenance_window").(string)) - requestUpdate = true - } - - if d.HasChange("notification_topic_arn") { - v := d.Get("notification_topic_arn").(string) - req.NotificationTopicArn = aws.String(v) - if v == "" { - inactive := "inactive" - req.NotificationTopicStatus = &inactive - } - requestUpdate = true - } - - if d.HasChange("engine_version") { - req.EngineVersion = aws.String(d.Get("engine_version").(string)) - requestUpdate = true - } - - if d.HasChange("snapshot_window") { - req.SnapshotWindow = aws.String(d.Get("snapshot_window").(string)) - requestUpdate = true - } - - if d.HasChange("node_type") { - req.CacheNodeType = aws.String(d.Get("node_type").(string)) - requestUpdate = true - } - - if d.HasChange("snapshot_retention_limit") { - req.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int))) - requestUpdate = true - } - - if d.HasChange("num_cache_nodes") { - oraw, nraw := d.GetChange("num_cache_nodes") - o := oraw.(int) - n := nraw.(int) - if v, ok := d.GetOk("az_mode"); ok && v.(string) == "cross-az" && n == 1 { - return fmt.Errorf("[WARN] Error updateing Elasticache cluster (%s), error: Cross-AZ mode is not supported in a single cache node.", d.Id()) - } - if n < o { - log.Printf("[INFO] Cluster %s is marked for Decreasing cache nodes from %d to %d", d.Id(), o, n) - nodesToRemove := getCacheNodesToRemove(d, o, o-n) - req.CacheNodeIdsToRemove = nodesToRemove - } - - req.NumCacheNodes = aws.Int64(int64(d.Get("num_cache_nodes").(int))) - requestUpdate = true - - } - - if requestUpdate { - log.Printf("[DEBUG] Modifying ElastiCache Cluster (%s), opts:\n%s", d.Id(), req) - _, err := conn.ModifyCacheCluster(req) - if err != nil { - return fmt.Errorf("[WARN] Error updating ElastiCache cluster (%s), error: %s", d.Id(), err) - } - - log.Printf("[DEBUG] Waiting for update: %s", d.Id()) - pending := []string{"modifying", "rebooting cache cluster nodes", "snapshotting"} - stateConf := &resource.StateChangeConf{ - Pending: pending, - Target: []string{"available"}, - Refresh: cacheClusterStateRefreshFunc(conn, d.Id(), "available", pending), - Timeout: 80 * time.Minute, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - _, sterr := stateConf.WaitForState() - if sterr != nil { - return fmt.Errorf("Error waiting for elasticache (%s) to update: %s", d.Id(), sterr) - } - } - - return resourceAwsElasticacheClusterRead(d, meta) -} - -func getCacheNodesToRemove(d *schema.ResourceData, oldNumberOfNodes int, cacheNodesToRemove int) []*string { - nodesIdsToRemove := []*string{} - for i := oldNumberOfNodes; i > oldNumberOfNodes-cacheNodesToRemove && i > 0; i-- { - s := fmt.Sprintf("%04d", i) - nodesIdsToRemove = append(nodesIdsToRemove, &s) - } - - return nodesIdsToRemove -} - -func setCacheNodeData(d *schema.ResourceData, c *elasticache.CacheCluster) error { - sortedCacheNodes := make([]*elasticache.CacheNode, len(c.CacheNodes)) - copy(sortedCacheNodes, c.CacheNodes) - sort.Sort(byCacheNodeId(sortedCacheNodes)) - - cacheNodeData := make([]map[string]interface{}, 0, len(sortedCacheNodes)) - - for _, node := range sortedCacheNodes { - if node.CacheNodeId == nil || node.Endpoint == nil || node.Endpoint.Address == nil || node.Endpoint.Port == nil || node.CustomerAvailabilityZone == nil { - return fmt.Errorf("Unexpected nil pointer in: %s", node) - } - cacheNodeData = append(cacheNodeData, map[string]interface{}{ - "id": *node.CacheNodeId, - "address": *node.Endpoint.Address, - "port": int(*node.Endpoint.Port), - "availability_zone": *node.CustomerAvailabilityZone, - }) - } - - return d.Set("cache_nodes", cacheNodeData) -} - -type byCacheNodeId []*elasticache.CacheNode - -func (b byCacheNodeId) Len() int { return len(b) } -func (b byCacheNodeId) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byCacheNodeId) Less(i, j int) bool { - return b[i].CacheNodeId != nil && b[j].CacheNodeId != nil && - *b[i].CacheNodeId < *b[j].CacheNodeId -} - -func resourceAwsElasticacheClusterDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticacheconn - - req := &elasticache.DeleteCacheClusterInput{ - CacheClusterId: aws.String(d.Id()), - } - err := resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteCacheCluster(req) - if err != nil { - awsErr, ok := err.(awserr.Error) - // The cluster may be just snapshotting, so we retry until it's ready for deletion - if ok && awsErr.Code() == "InvalidCacheClusterState" { - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - } - return nil - }) - if err != nil { - return err - } - - log.Printf("[DEBUG] Waiting for deletion: %v", d.Id()) - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating", "available", "deleting", "incompatible-parameters", "incompatible-network", "restore-failed", "snapshotting"}, - Target: []string{}, - Refresh: cacheClusterStateRefreshFunc(conn, d.Id(), "", []string{}), - Timeout: 40 * time.Minute, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - _, sterr := stateConf.WaitForState() - if sterr != nil { - return fmt.Errorf("Error waiting for elasticache (%s) to delete: %s", d.Id(), sterr) - } - - d.SetId("") - - return nil -} - -func cacheClusterStateRefreshFunc(conn *elasticache.ElastiCache, clusterID, givenState string, pending []string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - resp, err := conn.DescribeCacheClusters(&elasticache.DescribeCacheClustersInput{ - CacheClusterId: aws.String(clusterID), - ShowCacheNodeInfo: aws.Bool(true), - }) - if err != nil { - apierr := err.(awserr.Error) - log.Printf("[DEBUG] message: %v, code: %v", apierr.Message(), apierr.Code()) - if apierr.Message() == fmt.Sprintf("CacheCluster not found: %v", clusterID) { - log.Printf("[DEBUG] Detect deletion") - return nil, "", nil - } - - log.Printf("[ERROR] CacheClusterStateRefreshFunc: %s", err) - return nil, "", err - } - - if len(resp.CacheClusters) == 0 { - return nil, "", fmt.Errorf("[WARN] Error: no Cache Clusters found for id (%s)", clusterID) - } - - var c *elasticache.CacheCluster - for _, cluster := range resp.CacheClusters { - if *cluster.CacheClusterId == clusterID { - log.Printf("[DEBUG] Found matching ElastiCache cluster: %s", *cluster.CacheClusterId) - c = cluster - } - } - - if c == nil { - return nil, "", fmt.Errorf("[WARN] Error: no matching Elastic Cache cluster for id (%s)", clusterID) - } - - log.Printf("[DEBUG] ElastiCache Cluster (%s) status: %v", clusterID, *c.CacheClusterStatus) - - // return the current state if it's in the pending array - for _, p := range pending { - log.Printf("[DEBUG] ElastiCache: checking pending state (%s) for cluster (%s), cluster status: %s", pending, clusterID, *c.CacheClusterStatus) - s := *c.CacheClusterStatus - if p == s { - log.Printf("[DEBUG] Return with status: %v", *c.CacheClusterStatus) - return c, p, nil - } - } - - // return given state if it's not in pending - if givenState != "" { - log.Printf("[DEBUG] ElastiCache: checking given state (%s) of cluster (%s) against cluster status (%s)", givenState, clusterID, *c.CacheClusterStatus) - // check to make sure we have the node count we're expecting - if int64(len(c.CacheNodes)) != *c.NumCacheNodes { - log.Printf("[DEBUG] Node count is not what is expected: %d found, %d expected", len(c.CacheNodes), *c.NumCacheNodes) - return nil, "creating", nil - } - - log.Printf("[DEBUG] Node count matched (%d)", len(c.CacheNodes)) - // loop the nodes and check their status as well - for _, n := range c.CacheNodes { - log.Printf("[DEBUG] Checking cache node for status: %s", n) - if n.CacheNodeStatus != nil && *n.CacheNodeStatus != "available" { - log.Printf("[DEBUG] Node (%s) is not yet available, status: %s", *n.CacheNodeId, *n.CacheNodeStatus) - return nil, "creating", nil - } - log.Printf("[DEBUG] Cache node not in expected state") - } - log.Printf("[DEBUG] ElastiCache returning given state (%s), cluster: %s", givenState, c) - return c, givenState, nil - } - log.Printf("[DEBUG] current status: %v", *c.CacheClusterStatus) - return c, *c.CacheClusterStatus, nil - } -} - -func buildECARN(identifier, partition, accountid, region string) (string, error) { - if partition == "" { - return "", fmt.Errorf("Unable to construct ElastiCache ARN because of missing AWS partition") - } - if accountid == "" { - return "", fmt.Errorf("Unable to construct ElastiCache ARN because of missing AWS Account ID") - } - arn := fmt.Sprintf("arn:%s:elasticache:%s:%s:cluster:%s", partition, region, accountid, identifier) - return arn, nil - -} diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go deleted file mode 100644 index 4c4b89b15..000000000 --- a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go +++ /dev/null @@ -1,524 +0,0 @@ -package aws - -import ( - "fmt" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSElasticacheCluster_basic(t *testing.T) { - var ec elasticache.CacheCluster - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSElasticacheClusterConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"), - testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec), - resource.TestCheckResourceAttr( - "aws_elasticache_cluster.bar", "cache_nodes.0.id", "0001"), - resource.TestCheckResourceAttrSet("aws_elasticache_cluster.bar", "configuration_endpoint"), - resource.TestCheckResourceAttrSet("aws_elasticache_cluster.bar", "cluster_address"), - ), - }, - }, - }) -} - -func TestAccAWSElasticacheCluster_snapshotsWithUpdates(t *testing.T) { - var ec elasticache.CacheCluster - - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshots, ri, ri, acctest.RandString(10)) - postConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshotsUpdated, ri, ri, acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheClusterDestroy, - Steps: []resource.TestStep{ - { - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"), - testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec), - resource.TestCheckResourceAttr( - "aws_elasticache_cluster.bar", "snapshot_window", "05:00-09:00"), - resource.TestCheckResourceAttr( - "aws_elasticache_cluster.bar", "snapshot_retention_limit", "3"), - ), - }, - - { - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"), - testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec), - resource.TestCheckResourceAttr( - "aws_elasticache_cluster.bar", "snapshot_window", "07:00-09:00"), - resource.TestCheckResourceAttr( - "aws_elasticache_cluster.bar", "snapshot_retention_limit", "7"), - ), - }, - }, - }) -} - -func TestAccAWSElasticacheCluster_decreasingCacheNodes(t *testing.T) { - var ec elasticache.CacheCluster - - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfigDecreasingNodes, ri, ri, acctest.RandString(10)) - postConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfigDecreasingNodes_update, ri, ri, acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheClusterDestroy, - Steps: []resource.TestStep{ - { - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"), - testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec), - resource.TestCheckResourceAttr( - "aws_elasticache_cluster.bar", "num_cache_nodes", "3"), - ), - }, - - { - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"), - testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec), - resource.TestCheckResourceAttr( - "aws_elasticache_cluster.bar", "num_cache_nodes", "1"), - ), - }, - }, - }) -} - -func TestAccAWSElasticacheCluster_vpc(t *testing.T) { - var csg elasticache.CacheSubnetGroup - var ec elasticache.CacheCluster - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSElasticacheClusterInVPCConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheSubnetGroupExists("aws_elasticache_subnet_group.bar", &csg), - testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec), - testAccCheckAWSElasticacheClusterAttributes(&ec), - resource.TestCheckResourceAttr( - "aws_elasticache_cluster.bar", "availability_zone", "us-west-2a"), - ), - }, - }, - }) -} - -func TestAccAWSElasticacheCluster_multiAZInVpc(t *testing.T) { - var csg elasticache.CacheSubnetGroup - var ec elasticache.CacheCluster - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSElasticacheClusterMultiAZInVPCConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheSubnetGroupExists("aws_elasticache_subnet_group.bar", &csg), - testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec), - resource.TestCheckResourceAttr( - "aws_elasticache_cluster.bar", "availability_zone", "Multiple"), - ), - }, - }, - }) -} - -func testAccCheckAWSElasticacheClusterAttributes(v *elasticache.CacheCluster) resource.TestCheckFunc { - return func(s *terraform.State) error { - if v.NotificationConfiguration == nil { - return fmt.Errorf("Expected NotificationConfiguration for ElastiCache Cluster (%s)", *v.CacheClusterId) - } - - if strings.ToLower(*v.NotificationConfiguration.TopicStatus) != "active" { - return fmt.Errorf("Expected NotificationConfiguration status to be 'active', got (%s)", *v.NotificationConfiguration.TopicStatus) - } - - return nil - } -} - -func testAccCheckAWSElasticacheClusterDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elasticacheconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_elasticache_cluster" { - continue - } - res, err := conn.DescribeCacheClusters(&elasticache.DescribeCacheClustersInput{ - CacheClusterId: aws.String(rs.Primary.ID), - }) - if err != nil { - // Verify the error is what we want - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "CacheClusterNotFound" { - continue - } - return err - } - if len(res.CacheClusters) > 0 { - return fmt.Errorf("still exist.") - } - } - return nil -} - -func testAccCheckAWSElasticacheClusterExists(n string, v *elasticache.CacheCluster) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No cache cluster ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).elasticacheconn - resp, err := conn.DescribeCacheClusters(&elasticache.DescribeCacheClustersInput{ - CacheClusterId: aws.String(rs.Primary.ID), - }) - if err != nil { - return fmt.Errorf("Elasticache error: %v", err) - } - - for _, c := range resp.CacheClusters { - if *c.CacheClusterId == rs.Primary.ID { - *v = *c - } - } - - return nil - } -} - -func testAccAWSElasticacheClusterConfigBasic(clusterId string) string { - return fmt.Sprintf(` -provider "aws" { - region = "us-east-1" -} - -resource "aws_elasticache_cluster" "bar" { - cluster_id = "tf-%s" - engine = "memcached" - node_type = "cache.m1.small" - num_cache_nodes = 1 - port = 11211 - parameter_group_name = "default.memcached1.4" -} -`, clusterId) -} - -var testAccAWSElasticacheClusterConfig = fmt.Sprintf(` -provider "aws" { - region = "us-east-1" -} -resource "aws_security_group" "bar" { - name = "tf-test-security-group-%03d" - description = "tf-test-security-group-descr" - ingress { - from_port = -1 - to_port = -1 - protocol = "icmp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_elasticache_security_group" "bar" { - name = "tf-test-security-group-%03d" - description = "tf-test-security-group-descr" - security_group_names = ["${aws_security_group.bar.name}"] -} - -resource "aws_elasticache_cluster" "bar" { - cluster_id = "tf-%s" - engine = "memcached" - node_type = "cache.m1.small" - num_cache_nodes = 1 - port = 11211 - parameter_group_name = "default.memcached1.4" - security_group_names = ["${aws_elasticache_security_group.bar.name}"] -} -`, acctest.RandInt(), acctest.RandInt(), acctest.RandString(10)) - -var testAccAWSElasticacheClusterConfig_snapshots = ` -provider "aws" { - region = "us-east-1" -} -resource "aws_security_group" "bar" { - name = "tf-test-security-group-%03d" - description = "tf-test-security-group-descr" - ingress { - from_port = -1 - to_port = -1 - protocol = "icmp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_elasticache_security_group" "bar" { - name = "tf-test-security-group-%03d" - description = "tf-test-security-group-descr" - security_group_names = ["${aws_security_group.bar.name}"] -} - -resource "aws_elasticache_cluster" "bar" { - cluster_id = "tf-%s" - engine = "redis" - node_type = "cache.m1.small" - num_cache_nodes = 1 - port = 6379 - parameter_group_name = "default.redis3.2" - security_group_names = ["${aws_elasticache_security_group.bar.name}"] - snapshot_window = "05:00-09:00" - snapshot_retention_limit = 3 -} -` - -var testAccAWSElasticacheClusterConfig_snapshotsUpdated = ` -provider "aws" { - region = "us-east-1" -} -resource "aws_security_group" "bar" { - name = "tf-test-security-group-%03d" - description = "tf-test-security-group-descr" - ingress { - from_port = -1 - to_port = -1 - protocol = "icmp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_elasticache_security_group" "bar" { - name = "tf-test-security-group-%03d" - description = "tf-test-security-group-descr" - security_group_names = ["${aws_security_group.bar.name}"] -} - -resource "aws_elasticache_cluster" "bar" { - cluster_id = "tf-%s" - engine = "redis" - node_type = "cache.m1.small" - num_cache_nodes = 1 - port = 6379 - parameter_group_name = "default.redis3.2" - security_group_names = ["${aws_elasticache_security_group.bar.name}"] - snapshot_window = "07:00-09:00" - snapshot_retention_limit = 7 - apply_immediately = true -} -` - -var testAccAWSElasticacheClusterConfigDecreasingNodes = ` -provider "aws" { - region = "us-east-1" -} -resource "aws_security_group" "bar" { - name = "tf-test-security-group-%03d" - description = "tf-test-security-group-descr" - ingress { - from_port = -1 - to_port = -1 - protocol = "icmp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_elasticache_security_group" "bar" { - name = "tf-test-security-group-%03d" - description = "tf-test-security-group-descr" - security_group_names = ["${aws_security_group.bar.name}"] -} - -resource "aws_elasticache_cluster" "bar" { - cluster_id = "tf-%s" - engine = "memcached" - node_type = "cache.m1.small" - num_cache_nodes = 3 - port = 11211 - parameter_group_name = "default.memcached1.4" - security_group_names = ["${aws_elasticache_security_group.bar.name}"] -} -` - -var testAccAWSElasticacheClusterConfigDecreasingNodes_update = ` -provider "aws" { - region = "us-east-1" -} -resource "aws_security_group" "bar" { - name = "tf-test-security-group-%03d" - description = "tf-test-security-group-descr" - ingress { - from_port = -1 - to_port = -1 - protocol = "icmp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_elasticache_security_group" "bar" { - name = "tf-test-security-group-%03d" - description = "tf-test-security-group-descr" - security_group_names = ["${aws_security_group.bar.name}"] -} - -resource "aws_elasticache_cluster" "bar" { - cluster_id = "tf-%s" - engine = "memcached" - node_type = "cache.m1.small" - num_cache_nodes = 1 - port = 11211 - parameter_group_name = "default.memcached1.4" - security_group_names = ["${aws_elasticache_security_group.bar.name}"] - apply_immediately = true -} -` - -var testAccAWSElasticacheClusterInVPCConfig = fmt.Sprintf(` -resource "aws_vpc" "foo" { - cidr_block = "192.168.0.0/16" - tags { - Name = "tf-test" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "192.168.0.0/20" - availability_zone = "us-west-2a" - tags { - Name = "tf-test" - } -} - -resource "aws_elasticache_subnet_group" "bar" { - name = "tf-test-cache-subnet-%03d" - description = "tf-test-cache-subnet-group-descr" - subnet_ids = ["${aws_subnet.foo.id}"] -} - -resource "aws_security_group" "bar" { - name = "tf-test-security-group-%03d" - description = "tf-test-security-group-descr" - vpc_id = "${aws_vpc.foo.id}" - ingress { - from_port = -1 - to_port = -1 - protocol = "icmp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_elasticache_cluster" "bar" { - // Including uppercase letters in this name to ensure - // that we correctly handle the fact that the API - // normalizes names to lowercase. - cluster_id = "tf-%s" - node_type = "cache.m1.small" - num_cache_nodes = 1 - engine = "redis" - engine_version = "2.8.19" - port = 6379 - subnet_group_name = "${aws_elasticache_subnet_group.bar.name}" - security_group_ids = ["${aws_security_group.bar.id}"] - parameter_group_name = "default.redis2.8" - notification_topic_arn = "${aws_sns_topic.topic_example.arn}" - availability_zone = "us-west-2a" -} - -resource "aws_sns_topic" "topic_example" { - name = "tf-ecache-cluster-test" -} -`, acctest.RandInt(), acctest.RandInt(), acctest.RandString(10)) - -var testAccAWSElasticacheClusterMultiAZInVPCConfig = fmt.Sprintf(` -resource "aws_vpc" "foo" { - cidr_block = "192.168.0.0/16" - tags { - Name = "tf-test" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "192.168.0.0/20" - availability_zone = "us-west-2a" - tags { - Name = "tf-test-%03d" - } -} - -resource "aws_subnet" "bar" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "192.168.16.0/20" - availability_zone = "us-west-2b" - tags { - Name = "tf-test-%03d" - } -} - -resource "aws_elasticache_subnet_group" "bar" { - name = "tf-test-cache-subnet-%03d" - description = "tf-test-cache-subnet-group-descr" - subnet_ids = [ - "${aws_subnet.foo.id}", - "${aws_subnet.bar.id}" - ] -} - -resource "aws_security_group" "bar" { - name = "tf-test-security-group-%03d" - description = "tf-test-security-group-descr" - vpc_id = "${aws_vpc.foo.id}" - ingress { - from_port = -1 - to_port = -1 - protocol = "icmp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_elasticache_cluster" "bar" { - cluster_id = "tf-%s" - engine = "memcached" - node_type = "cache.m1.small" - num_cache_nodes = 2 - port = 11211 - subnet_group_name = "${aws_elasticache_subnet_group.bar.name}" - security_group_ids = ["${aws_security_group.bar.id}"] - parameter_group_name = "default.memcached1.4" - az_mode = "cross-az" - availability_zones = [ - "us-west-2a", - "us-west-2b" - ] -} -`, acctest.RandInt(), acctest.RandInt(), acctest.RandInt(), acctest.RandInt(), acctest.RandString(10)) diff --git a/builtin/providers/aws/resource_aws_elasticache_parameter_group.go b/builtin/providers/aws/resource_aws_elasticache_parameter_group.go deleted file mode 100644 index e73d0be6f..000000000 --- a/builtin/providers/aws/resource_aws_elasticache_parameter_group.go +++ /dev/null @@ -1,215 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elasticache" -) - -func resourceAwsElasticacheParameterGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsElasticacheParameterGroupCreate, - Read: resourceAwsElasticacheParameterGroupRead, - Update: resourceAwsElasticacheParameterGroupUpdate, - Delete: resourceAwsElasticacheParameterGroupDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Required: true, - }, - "family": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "Managed by Terraform", - }, - "parameter": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: false, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "value": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - Set: resourceAwsElasticacheParameterHash, - }, - }, - } -} - -func resourceAwsElasticacheParameterGroupCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticacheconn - - createOpts := elasticache.CreateCacheParameterGroupInput{ - CacheParameterGroupName: aws.String(d.Get("name").(string)), - CacheParameterGroupFamily: aws.String(d.Get("family").(string)), - Description: aws.String(d.Get("description").(string)), - } - - log.Printf("[DEBUG] Create Cache Parameter Group: %#v", createOpts) - _, err := conn.CreateCacheParameterGroup(&createOpts) - if err != nil { - return fmt.Errorf("Error creating Cache Parameter Group: %s", err) - } - - d.Partial(true) - d.SetPartial("name") - d.SetPartial("family") - d.SetPartial("description") - d.Partial(false) - - d.SetId(*createOpts.CacheParameterGroupName) - log.Printf("[INFO] Cache Parameter Group ID: %s", d.Id()) - - return resourceAwsElasticacheParameterGroupUpdate(d, meta) -} - -func resourceAwsElasticacheParameterGroupRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticacheconn - - describeOpts := elasticache.DescribeCacheParameterGroupsInput{ - CacheParameterGroupName: aws.String(d.Id()), - } - - describeResp, err := conn.DescribeCacheParameterGroups(&describeOpts) - if err != nil { - return err - } - - if len(describeResp.CacheParameterGroups) != 1 || - *describeResp.CacheParameterGroups[0].CacheParameterGroupName != d.Id() { - return fmt.Errorf("Unable to find Parameter Group: %#v", describeResp.CacheParameterGroups) - } - - d.Set("name", describeResp.CacheParameterGroups[0].CacheParameterGroupName) - d.Set("family", describeResp.CacheParameterGroups[0].CacheParameterGroupFamily) - d.Set("description", describeResp.CacheParameterGroups[0].Description) - - // Only include user customized parameters as there's hundreds of system/default ones - describeParametersOpts := elasticache.DescribeCacheParametersInput{ - CacheParameterGroupName: aws.String(d.Id()), - Source: aws.String("user"), - } - - describeParametersResp, err := conn.DescribeCacheParameters(&describeParametersOpts) - if err != nil { - return err - } - - d.Set("parameter", flattenElastiCacheParameters(describeParametersResp.Parameters)) - - return nil -} - -func resourceAwsElasticacheParameterGroupUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticacheconn - - d.Partial(true) - - if d.HasChange("parameter") { - o, n := d.GetChange("parameter") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - - // Expand the "parameter" set to aws-sdk-go compat []elasticacheconn.Parameter - parameters, err := expandElastiCacheParameters(ns.Difference(os).List()) - if err != nil { - return err - } - - if len(parameters) > 0 { - modifyOpts := elasticache.ModifyCacheParameterGroupInput{ - CacheParameterGroupName: aws.String(d.Get("name").(string)), - ParameterNameValues: parameters, - } - - log.Printf("[DEBUG] Modify Cache Parameter Group: %#v", modifyOpts) - _, err = conn.ModifyCacheParameterGroup(&modifyOpts) - if err != nil { - return fmt.Errorf("Error modifying Cache Parameter Group: %s", err) - } - } - d.SetPartial("parameter") - } - - d.Partial(false) - - return resourceAwsElasticacheParameterGroupRead(d, meta) -} - -func resourceAwsElasticacheParameterGroupDelete(d *schema.ResourceData, meta interface{}) error { - stateConf := &resource.StateChangeConf{ - Pending: []string{"pending"}, - Target: []string{"destroyed"}, - Refresh: resourceAwsElasticacheParameterGroupDeleteRefreshFunc(d, meta), - Timeout: 3 * time.Minute, - MinTimeout: 1 * time.Second, - } - _, err := stateConf.WaitForState() - return err -} - -func resourceAwsElasticacheParameterGroupDeleteRefreshFunc( - d *schema.ResourceData, - meta interface{}) resource.StateRefreshFunc { - conn := meta.(*AWSClient).elasticacheconn - - return func() (interface{}, string, error) { - - deleteOpts := elasticache.DeleteCacheParameterGroupInput{ - CacheParameterGroupName: aws.String(d.Id()), - } - - if _, err := conn.DeleteCacheParameterGroup(&deleteOpts); err != nil { - elasticahceerr, ok := err.(awserr.Error) - if ok && elasticahceerr.Code() == "CacheParameterGroupNotFoundFault" { - d.SetId("") - return d, "error", err - } - return d, "error", err - } - return d, "destroyed", nil - } -} - -func resourceAwsElasticacheParameterHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["value"].(string))) - - return hashcode.String(buf.String()) -} diff --git a/builtin/providers/aws/resource_aws_elasticache_parameter_group_test.go b/builtin/providers/aws/resource_aws_elasticache_parameter_group_test.go deleted file mode 100644 index 201d6d524..000000000 --- a/builtin/providers/aws/resource_aws_elasticache_parameter_group_test.go +++ /dev/null @@ -1,209 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSElasticacheParameterGroup_basic(t *testing.T) { - var v elasticache.CacheParameterGroup - rName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheParameterGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSElasticacheParameterGroupConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheParameterGroupExists("aws_elasticache_parameter_group.bar", &v), - testAccCheckAWSElasticacheParameterGroupAttributes(&v, rName), - resource.TestCheckResourceAttr( - "aws_elasticache_parameter_group.bar", "name", rName), - resource.TestCheckResourceAttr( - "aws_elasticache_parameter_group.bar", "family", "redis2.8"), - resource.TestCheckResourceAttr( - "aws_elasticache_parameter_group.bar", "description", "Managed by Terraform"), - resource.TestCheckResourceAttr( - "aws_elasticache_parameter_group.bar", "parameter.283487565.name", "appendonly"), - resource.TestCheckResourceAttr( - "aws_elasticache_parameter_group.bar", "parameter.283487565.value", "yes"), - ), - }, - resource.TestStep{ - Config: testAccAWSElasticacheParameterGroupAddParametersConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheParameterGroupExists("aws_elasticache_parameter_group.bar", &v), - testAccCheckAWSElasticacheParameterGroupAttributes(&v, rName), - resource.TestCheckResourceAttr( - "aws_elasticache_parameter_group.bar", "name", rName), - resource.TestCheckResourceAttr( - "aws_elasticache_parameter_group.bar", "family", "redis2.8"), - resource.TestCheckResourceAttr( - "aws_elasticache_parameter_group.bar", "description", "Test parameter group for terraform"), - resource.TestCheckResourceAttr( - "aws_elasticache_parameter_group.bar", "parameter.283487565.name", "appendonly"), - resource.TestCheckResourceAttr( - "aws_elasticache_parameter_group.bar", "parameter.283487565.value", "yes"), - resource.TestCheckResourceAttr( - "aws_elasticache_parameter_group.bar", "parameter.2196914567.name", "appendfsync"), - resource.TestCheckResourceAttr( - "aws_elasticache_parameter_group.bar", "parameter.2196914567.value", "always"), - ), - }, - }, - }) -} - -func TestAccAWSElasticacheParameterGroupOnly(t *testing.T) { - var v elasticache.CacheParameterGroup - rName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheParameterGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSElasticacheParameterGroupOnlyConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheParameterGroupExists("aws_elasticache_parameter_group.bar", &v), - testAccCheckAWSElasticacheParameterGroupAttributes(&v, rName), - resource.TestCheckResourceAttr( - "aws_elasticache_parameter_group.bar", "name", rName), - resource.TestCheckResourceAttr( - "aws_elasticache_parameter_group.bar", "family", "redis2.8"), - ), - }, - }, - }) -} - -func testAccCheckAWSElasticacheParameterGroupDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elasticacheconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_elasticache_parameter_group" { - continue - } - - // Try to find the Group - resp, err := conn.DescribeCacheParameterGroups( - &elasticache.DescribeCacheParameterGroupsInput{ - CacheParameterGroupName: aws.String(rs.Primary.ID), - }) - - if err == nil { - if len(resp.CacheParameterGroups) != 0 && - *resp.CacheParameterGroups[0].CacheParameterGroupName == rs.Primary.ID { - return fmt.Errorf("Cache Parameter Group still exists") - } - } - - // Verify the error - newerr, ok := err.(awserr.Error) - if !ok { - return err - } - if newerr.Code() != "CacheParameterGroupNotFound" { - return err - } - } - - return nil -} - -func testAccCheckAWSElasticacheParameterGroupAttributes(v *elasticache.CacheParameterGroup, rName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if *v.CacheParameterGroupName != rName { - return fmt.Errorf("bad name: %#v", v.CacheParameterGroupName) - } - - if *v.CacheParameterGroupFamily != "redis2.8" { - return fmt.Errorf("bad family: %#v", v.CacheParameterGroupFamily) - } - - return nil - } -} - -func testAccCheckAWSElasticacheParameterGroupExists(n string, v *elasticache.CacheParameterGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Cache Parameter Group ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).elasticacheconn - - opts := elasticache.DescribeCacheParameterGroupsInput{ - CacheParameterGroupName: aws.String(rs.Primary.ID), - } - - resp, err := conn.DescribeCacheParameterGroups(&opts) - - if err != nil { - return err - } - - if len(resp.CacheParameterGroups) != 1 || - *resp.CacheParameterGroups[0].CacheParameterGroupName != rs.Primary.ID { - return fmt.Errorf("Cache Parameter Group not found") - } - - *v = *resp.CacheParameterGroups[0] - - return nil - } -} - -func testAccAWSElasticacheParameterGroupConfig(rName string) string { - return fmt.Sprintf(` -resource "aws_elasticache_parameter_group" "bar" { - name = "%s" - family = "redis2.8" - parameter { - name = "appendonly" - value = "yes" - } -}`, rName) -} - -func testAccAWSElasticacheParameterGroupAddParametersConfig(rName string) string { - return fmt.Sprintf(` -resource "aws_elasticache_parameter_group" "bar" { - name = "%s" - family = "redis2.8" - description = "Test parameter group for terraform" - parameter { - name = "appendonly" - value = "yes" - } - parameter { - name = "appendfsync" - value = "always" - } -}`, rName) -} - -func testAccAWSElasticacheParameterGroupOnlyConfig(rName string) string { - return fmt.Sprintf(` -resource "aws_elasticache_parameter_group" "bar" { - name = "%s" - family = "redis2.8" - description = "Test parameter group for terraform" -}`, rName) -} diff --git a/builtin/providers/aws/resource_aws_elasticache_replication_group.go b/builtin/providers/aws/resource_aws_elasticache_replication_group.go deleted file mode 100644 index ff739f2ce..000000000 --- a/builtin/providers/aws/resource_aws_elasticache_replication_group.go +++ /dev/null @@ -1,534 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "regexp" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsElasticacheReplicationGroup() *schema.Resource { - - resourceSchema := resourceAwsElastiCacheCommonSchema() - - resourceSchema["replication_group_id"] = &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateAwsElastiCacheReplicationGroupId, - } - - resourceSchema["automatic_failover_enabled"] = &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - } - - resourceSchema["auto_minor_version_upgrade"] = &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - } - - resourceSchema["replication_group_description"] = &schema.Schema{ - Type: schema.TypeString, - Required: true, - } - - resourceSchema["number_cache_clusters"] = &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - Optional: true, - ForceNew: true, - } - - resourceSchema["primary_endpoint_address"] = &schema.Schema{ - Type: schema.TypeString, - Computed: true, - } - - resourceSchema["configuration_endpoint_address"] = &schema.Schema{ - Type: schema.TypeString, - Computed: true, - } - - resourceSchema["cluster_mode"] = &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "replicas_per_node_group": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "num_node_groups": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - }, - }, - } - - resourceSchema["engine"].Required = false - resourceSchema["engine"].Optional = true - resourceSchema["engine"].Default = "redis" - resourceSchema["engine"].ValidateFunc = validateAwsElastiCacheReplicationGroupEngine - - return &schema.Resource{ - Create: resourceAwsElasticacheReplicationGroupCreate, - Read: resourceAwsElasticacheReplicationGroupRead, - Update: resourceAwsElasticacheReplicationGroupUpdate, - Delete: resourceAwsElasticacheReplicationGroupDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: resourceSchema, - } -} - -func resourceAwsElasticacheReplicationGroupCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticacheconn - - tags := tagsFromMapEC(d.Get("tags").(map[string]interface{})) - params := &elasticache.CreateReplicationGroupInput{ - ReplicationGroupId: aws.String(d.Get("replication_group_id").(string)), - ReplicationGroupDescription: aws.String(d.Get("replication_group_description").(string)), - AutomaticFailoverEnabled: aws.Bool(d.Get("automatic_failover_enabled").(bool)), - AutoMinorVersionUpgrade: aws.Bool(d.Get("auto_minor_version_upgrade").(bool)), - CacheNodeType: aws.String(d.Get("node_type").(string)), - Engine: aws.String(d.Get("engine").(string)), - Port: aws.Int64(int64(d.Get("port").(int))), - Tags: tags, - } - - if v, ok := d.GetOk("engine_version"); ok { - params.EngineVersion = aws.String(v.(string)) - } - - preferred_azs := d.Get("availability_zones").(*schema.Set).List() - if len(preferred_azs) > 0 { - azs := expandStringList(preferred_azs) - params.PreferredCacheClusterAZs = azs - } - - if v, ok := d.GetOk("parameter_group_name"); ok { - params.CacheParameterGroupName = aws.String(v.(string)) - } - - if v, ok := d.GetOk("subnet_group_name"); ok { - params.CacheSubnetGroupName = aws.String(v.(string)) - } - - security_group_names := d.Get("security_group_names").(*schema.Set).List() - if len(security_group_names) > 0 { - params.CacheSecurityGroupNames = expandStringList(security_group_names) - } - - security_group_ids := d.Get("security_group_ids").(*schema.Set).List() - if len(security_group_ids) > 0 { - params.SecurityGroupIds = expandStringList(security_group_ids) - } - - snaps := d.Get("snapshot_arns").(*schema.Set).List() - if len(snaps) > 0 { - params.SnapshotArns = expandStringList(snaps) - } - - if v, ok := d.GetOk("maintenance_window"); ok { - params.PreferredMaintenanceWindow = aws.String(v.(string)) - } - - if v, ok := d.GetOk("notification_topic_arn"); ok { - params.NotificationTopicArn = aws.String(v.(string)) - } - - if v, ok := d.GetOk("snapshot_retention_limit"); ok { - params.SnapshotRetentionLimit = aws.Int64(int64(v.(int))) - } - - if v, ok := d.GetOk("snapshot_window"); ok { - params.SnapshotWindow = aws.String(v.(string)) - } - - if v, ok := d.GetOk("snapshot_name"); ok { - params.SnapshotName = aws.String(v.(string)) - } - - clusterMode, clusterModeOk := d.GetOk("cluster_mode") - cacheClusters, cacheClustersOk := d.GetOk("number_cache_clusters") - - if !clusterModeOk && !cacheClustersOk || clusterModeOk && cacheClustersOk { - return fmt.Errorf("Either `number_cache_clusters` or `cluster_mode` must be set") - } - - if clusterModeOk { - clusterModeAttributes := clusterMode.(*schema.Set).List() - attributes := clusterModeAttributes[0].(map[string]interface{}) - - if v, ok := attributes["num_node_groups"]; ok { - params.NumNodeGroups = aws.Int64(int64(v.(int))) - } - - if v, ok := attributes["replicas_per_node_group"]; ok { - params.ReplicasPerNodeGroup = aws.Int64(int64(v.(int))) - } - } - - if cacheClustersOk { - params.NumCacheClusters = aws.Int64(int64(cacheClusters.(int))) - } - - resp, err := conn.CreateReplicationGroup(params) - if err != nil { - return fmt.Errorf("Error creating Elasticache Replication Group: %s", err) - } - - d.SetId(*resp.ReplicationGroup.ReplicationGroupId) - - pending := []string{"creating", "modifying", "restoring", "snapshotting"} - stateConf := &resource.StateChangeConf{ - Pending: pending, - Target: []string{"available"}, - Refresh: cacheReplicationGroupStateRefreshFunc(conn, d.Id(), "available", pending), - Timeout: 40 * time.Minute, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - log.Printf("[DEBUG] Waiting for state to become available: %v", d.Id()) - _, sterr := stateConf.WaitForState() - if sterr != nil { - return fmt.Errorf("Error waiting for elasticache replication group (%s) to be created: %s", d.Id(), sterr) - } - - return resourceAwsElasticacheReplicationGroupRead(d, meta) -} - -func resourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticacheconn - req := &elasticache.DescribeReplicationGroupsInput{ - ReplicationGroupId: aws.String(d.Id()), - } - - res, err := conn.DescribeReplicationGroups(req) - if err != nil { - if eccErr, ok := err.(awserr.Error); ok && eccErr.Code() == "ReplicationGroupNotFoundFault" { - log.Printf("[WARN] Elasticache Replication Group (%s) not found", d.Id()) - d.SetId("") - return nil - } - - return err - } - - var rgp *elasticache.ReplicationGroup - for _, r := range res.ReplicationGroups { - if *r.ReplicationGroupId == d.Id() { - rgp = r - } - } - - if rgp == nil { - log.Printf("[WARN] Replication Group (%s) not found", d.Id()) - d.SetId("") - return nil - } - - if *rgp.Status == "deleting" { - log.Printf("[WARN] The Replication Group %q is currently in the `deleting` state", d.Id()) - d.SetId("") - return nil - } - - if rgp.AutomaticFailover != nil { - switch strings.ToLower(*rgp.AutomaticFailover) { - case "disabled", "disabling": - d.Set("automatic_failover_enabled", false) - case "enabled", "enabling": - d.Set("automatic_failover_enabled", true) - default: - log.Printf("Unknown AutomaticFailover state %s", *rgp.AutomaticFailover) - } - } - - d.Set("replication_group_description", rgp.Description) - d.Set("number_cache_clusters", len(rgp.MemberClusters)) - d.Set("replication_group_id", rgp.ReplicationGroupId) - - if rgp.NodeGroups != nil { - if len(rgp.NodeGroups[0].NodeGroupMembers) == 0 { - return nil - } - - cacheCluster := *rgp.NodeGroups[0].NodeGroupMembers[0] - - res, err := conn.DescribeCacheClusters(&elasticache.DescribeCacheClustersInput{ - CacheClusterId: cacheCluster.CacheClusterId, - ShowCacheNodeInfo: aws.Bool(true), - }) - if err != nil { - return err - } - - if len(res.CacheClusters) == 0 { - return nil - } - - c := res.CacheClusters[0] - d.Set("node_type", c.CacheNodeType) - d.Set("engine", c.Engine) - d.Set("engine_version", c.EngineVersion) - d.Set("subnet_group_name", c.CacheSubnetGroupName) - d.Set("security_group_names", flattenElastiCacheSecurityGroupNames(c.CacheSecurityGroups)) - d.Set("security_group_ids", flattenElastiCacheSecurityGroupIds(c.SecurityGroups)) - - if c.CacheParameterGroup != nil { - d.Set("parameter_group_name", c.CacheParameterGroup.CacheParameterGroupName) - } - - d.Set("maintenance_window", c.PreferredMaintenanceWindow) - d.Set("snapshot_window", rgp.SnapshotWindow) - d.Set("snapshot_retention_limit", rgp.SnapshotRetentionLimit) - - if rgp.ConfigurationEndpoint != nil { - d.Set("port", rgp.ConfigurationEndpoint.Port) - d.Set("configuration_endpoint_address", rgp.ConfigurationEndpoint.Address) - } else { - d.Set("port", rgp.NodeGroups[0].PrimaryEndpoint.Port) - d.Set("primary_endpoint_address", rgp.NodeGroups[0].PrimaryEndpoint.Address) - } - - d.Set("auto_minor_version_upgrade", c.AutoMinorVersionUpgrade) - } - - return nil -} - -func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticacheconn - - requestUpdate := false - params := &elasticache.ModifyReplicationGroupInput{ - ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)), - ReplicationGroupId: aws.String(d.Id()), - } - - if d.HasChange("replication_group_description") { - params.ReplicationGroupDescription = aws.String(d.Get("replication_group_description").(string)) - requestUpdate = true - } - - if d.HasChange("automatic_failover_enabled") { - params.AutomaticFailoverEnabled = aws.Bool(d.Get("automatic_failover_enabled").(bool)) - requestUpdate = true - } - - if d.HasChange("auto_minor_version_upgrade") { - params.AutoMinorVersionUpgrade = aws.Bool(d.Get("auto_minor_version_upgrade").(bool)) - requestUpdate = true - } - - if d.HasChange("security_group_ids") { - if attr := d.Get("security_group_ids").(*schema.Set); attr.Len() > 0 { - params.SecurityGroupIds = expandStringList(attr.List()) - requestUpdate = true - } - } - - if d.HasChange("security_group_names") { - if attr := d.Get("security_group_names").(*schema.Set); attr.Len() > 0 { - params.CacheSecurityGroupNames = expandStringList(attr.List()) - requestUpdate = true - } - } - - if d.HasChange("maintenance_window") { - params.PreferredMaintenanceWindow = aws.String(d.Get("maintenance_window").(string)) - requestUpdate = true - } - - if d.HasChange("notification_topic_arn") { - params.NotificationTopicArn = aws.String(d.Get("notification_topic_arn").(string)) - requestUpdate = true - } - - if d.HasChange("parameter_group_name") { - params.CacheParameterGroupName = aws.String(d.Get("parameter_group_name").(string)) - requestUpdate = true - } - - if d.HasChange("engine_version") { - params.EngineVersion = aws.String(d.Get("engine_version").(string)) - requestUpdate = true - } - - if d.HasChange("snapshot_retention_limit") { - // This is a real hack to set the Snapshotting Cluster ID to be the first Cluster in the RG - o, _ := d.GetChange("snapshot_retention_limit") - if o.(int) == 0 { - params.SnapshottingClusterId = aws.String(fmt.Sprintf("%s-001", d.Id())) - } - - params.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int))) - requestUpdate = true - } - - if d.HasChange("snapshot_window") { - params.SnapshotWindow = aws.String(d.Get("snapshot_window").(string)) - requestUpdate = true - } - - if d.HasChange("node_type") { - params.CacheNodeType = aws.String(d.Get("node_type").(string)) - requestUpdate = true - } - - if requestUpdate { - _, err := conn.ModifyReplicationGroup(params) - if err != nil { - return fmt.Errorf("Error updating Elasticache replication group: %s", err) - } - - pending := []string{"creating", "modifying", "snapshotting"} - stateConf := &resource.StateChangeConf{ - Pending: pending, - Target: []string{"available"}, - Refresh: cacheReplicationGroupStateRefreshFunc(conn, d.Id(), "available", pending), - Timeout: 40 * time.Minute, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - log.Printf("[DEBUG] Waiting for state to become available: %v", d.Id()) - _, sterr := stateConf.WaitForState() - if sterr != nil { - return fmt.Errorf("Error waiting for elasticache replication group (%s) to be created: %s", d.Id(), sterr) - } - } - return resourceAwsElasticacheReplicationGroupRead(d, meta) -} - -func resourceAwsElasticacheReplicationGroupDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticacheconn - - req := &elasticache.DeleteReplicationGroupInput{ - ReplicationGroupId: aws.String(d.Id()), - } - - _, err := conn.DeleteReplicationGroup(req) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "ReplicationGroupNotFoundFault" { - d.SetId("") - return nil - } - - return fmt.Errorf("Error deleting Elasticache replication group: %s", err) - } - - log.Printf("[DEBUG] Waiting for deletion: %v", d.Id()) - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating", "available", "deleting"}, - Target: []string{}, - Refresh: cacheReplicationGroupStateRefreshFunc(conn, d.Id(), "", []string{}), - Timeout: 40 * time.Minute, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - _, sterr := stateConf.WaitForState() - if sterr != nil { - return fmt.Errorf("Error waiting for replication group (%s) to delete: %s", d.Id(), sterr) - } - - return nil -} - -func cacheReplicationGroupStateRefreshFunc(conn *elasticache.ElastiCache, replicationGroupId, givenState string, pending []string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - resp, err := conn.DescribeReplicationGroups(&elasticache.DescribeReplicationGroupsInput{ - ReplicationGroupId: aws.String(replicationGroupId), - }) - if err != nil { - if eccErr, ok := err.(awserr.Error); ok && eccErr.Code() == "ReplicationGroupNotFoundFault" { - log.Printf("[DEBUG] Replication Group Not Found") - return nil, "", nil - } - - log.Printf("[ERROR] cacheClusterReplicationGroupStateRefreshFunc: %s", err) - return nil, "", err - } - - if len(resp.ReplicationGroups) == 0 { - return nil, "", fmt.Errorf("[WARN] Error: no Cache Replication Groups found for id (%s)", replicationGroupId) - } - - var rg *elasticache.ReplicationGroup - for _, replicationGroup := range resp.ReplicationGroups { - if *replicationGroup.ReplicationGroupId == replicationGroupId { - log.Printf("[DEBUG] Found matching ElastiCache Replication Group: %s", *replicationGroup.ReplicationGroupId) - rg = replicationGroup - } - } - - if rg == nil { - return nil, "", fmt.Errorf("[WARN] Error: no matching ElastiCache Replication Group for id (%s)", replicationGroupId) - } - - log.Printf("[DEBUG] ElastiCache Replication Group (%s) status: %v", replicationGroupId, *rg.Status) - - // return the current state if it's in the pending array - for _, p := range pending { - log.Printf("[DEBUG] ElastiCache: checking pending state (%s) for Replication Group (%s), Replication Group status: %s", pending, replicationGroupId, *rg.Status) - s := *rg.Status - if p == s { - log.Printf("[DEBUG] Return with status: %v", *rg.Status) - return s, p, nil - } - } - - return rg, *rg.Status, nil - } -} - -func validateAwsElastiCacheReplicationGroupEngine(v interface{}, k string) (ws []string, errors []error) { - if strings.ToLower(v.(string)) != "redis" { - errors = append(errors, fmt.Errorf("The only acceptable Engine type when using Replication Groups is Redis")) - } - return -} - -func validateAwsElastiCacheReplicationGroupId(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if (len(value) < 1) || (len(value) > 20) { - errors = append(errors, fmt.Errorf( - "%q must contain from 1 to 20 alphanumeric characters or hyphens", k)) - } - if !regexp.MustCompile(`^[0-9a-zA-Z-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only alphanumeric characters and hyphens allowed in %q", k)) - } - if !regexp.MustCompile(`^[a-z]`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "first character of %q must be a letter", k)) - } - if regexp.MustCompile(`--`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot contain two consecutive hyphens", k)) - } - if regexp.MustCompile(`-$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot end with a hyphen", k)) - } - return -} diff --git a/builtin/providers/aws/resource_aws_elasticache_replication_group_test.go b/builtin/providers/aws/resource_aws_elasticache_replication_group_test.go deleted file mode 100644 index 97c778ccf..000000000 --- a/builtin/providers/aws/resource_aws_elasticache_replication_group_test.go +++ /dev/null @@ -1,972 +0,0 @@ -package aws - -import ( - "fmt" - "regexp" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSElasticacheReplicationGroup_basic(t *testing.T) { - var rg elasticache.ReplicationGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSElasticacheReplicationGroupConfig(acctest.RandString(10)), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "number_cache_clusters", "2"), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "auto_minor_version_upgrade", "false"), - ), - }, - }, - }) -} - -func TestAccAWSElasticacheReplicationGroup_updateDescription(t *testing.T) { - var rg elasticache.ReplicationGroup - rName := acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSElasticacheReplicationGroupConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "number_cache_clusters", "2"), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "replication_group_description", "test description"), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "auto_minor_version_upgrade", "false"), - ), - }, - - { - Config: testAccAWSElasticacheReplicationGroupConfigUpdatedDescription(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "number_cache_clusters", "2"), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "replication_group_description", "updated description"), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "auto_minor_version_upgrade", "true"), - ), - }, - }, - }) -} - -func TestAccAWSElasticacheReplicationGroup_updateMaintenanceWindow(t *testing.T) { - var rg elasticache.ReplicationGroup - rName := acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSElasticacheReplicationGroupConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "maintenance_window", "tue:06:30-tue:07:30"), - ), - }, - { - Config: testAccAWSElasticacheReplicationGroupConfigUpdatedMaintenanceWindow(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "maintenance_window", "wed:03:00-wed:06:00"), - ), - }, - }, - }) -} - -func TestAccAWSElasticacheReplicationGroup_updateNodeSize(t *testing.T) { - var rg elasticache.ReplicationGroup - rName := acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSElasticacheReplicationGroupConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "number_cache_clusters", "2"), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "node_type", "cache.m1.small"), - ), - }, - - { - Config: testAccAWSElasticacheReplicationGroupConfigUpdatedNodeSize(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "number_cache_clusters", "2"), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "node_type", "cache.m1.medium"), - ), - }, - }, - }) -} - -//This is a test to prove that we panic we get in https://github.com/hashicorp/terraform/issues/9097 -func TestAccAWSElasticacheReplicationGroup_updateParameterGroup(t *testing.T) { - var rg elasticache.ReplicationGroup - rName := acctest.RandString(10) - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSElasticacheReplicationGroupConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "parameter_group_name", "default.redis3.2"), - ), - }, - - { - Config: testAccAWSElasticacheReplicationGroupConfigUpdatedParameterGroup(rName, rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "parameter_group_name", fmt.Sprintf("allkeys-lru-%d", rInt)), - ), - }, - }, - }) -} - -func TestAccAWSElasticacheReplicationGroup_vpc(t *testing.T) { - var rg elasticache.ReplicationGroup - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSElasticacheReplicationGroupInVPCConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "number_cache_clusters", "1"), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "auto_minor_version_upgrade", "false"), - ), - }, - }, - }) -} - -func TestAccAWSElasticacheReplicationGroup_multiAzInVpc(t *testing.T) { - var rg elasticache.ReplicationGroup - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSElasticacheReplicationGroupMultiAZInVPCConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "number_cache_clusters", "2"), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "automatic_failover_enabled", "true"), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "snapshot_window", "02:00-03:00"), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "snapshot_retention_limit", "7"), - resource.TestCheckResourceAttrSet( - "aws_elasticache_replication_group.bar", "primary_endpoint_address"), - ), - }, - }, - }) -} - -func TestAccAWSElasticacheReplicationGroup_redisClusterInVpc2(t *testing.T) { - var rg elasticache.ReplicationGroup - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSElasticacheReplicationGroupRedisClusterInVPCConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "number_cache_clusters", "2"), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "automatic_failover_enabled", "true"), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "snapshot_window", "02:00-03:00"), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "snapshot_retention_limit", "7"), - resource.TestCheckResourceAttrSet( - "aws_elasticache_replication_group.bar", "configuration_endpoint_address"), - ), - }, - }, - }) -} - -func TestAccAWSElasticacheReplicationGroup_nativeRedisCluster(t *testing.T) { - var rg elasticache.ReplicationGroup - rInt := acctest.RandInt() - rName := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig(rInt, rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "number_cache_clusters", "4"), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "cluster_mode.#", "1"), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "cluster_mode.4170186206.num_node_groups", "2"), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "cluster_mode.4170186206.replicas_per_node_group", "1"), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "port", "6379"), - resource.TestCheckResourceAttrSet( - "aws_elasticache_replication_group.bar", "configuration_endpoint_address"), - ), - }, - }, - }) -} - -func TestAccAWSElasticacheReplicationGroup_clusteringAndCacheNodesCausesError(t *testing.T) { - rInt := acctest.RandInt() - rName := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterErrorConfig(rInt, rName), - ExpectError: regexp.MustCompile("Either `number_cache_clusters` or `cluster_mode` must be set"), - }, - }, - }) -} - -func TestAccAWSElasticacheReplicationGroup_enableSnapshotting(t *testing.T) { - var rg elasticache.ReplicationGroup - rName := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSElasticacheReplicationGroupConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "snapshot_retention_limit", "0"), - ), - }, - - { - Config: testAccAWSElasticacheReplicationGroupConfigEnableSnapshotting(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), - resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "snapshot_retention_limit", "2"), - ), - }, - }, - }) -} - -func TestResourceAWSElastiCacheReplicationGroupIdValidation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "tEsting", - ErrCount: 0, - }, - { - Value: "t.sting", - ErrCount: 1, - }, - { - Value: "t--sting", - ErrCount: 1, - }, - { - Value: "1testing", - ErrCount: 1, - }, - { - Value: "testing-", - ErrCount: 1, - }, - { - Value: randomString(21), - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateAwsElastiCacheReplicationGroupId(tc.Value, "aws_elasticache_replication_group_replication_group_id") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the ElastiCache Replication Group Id to trigger a validation error") - } - } -} - -func TestResourceAWSElastiCacheReplicationGroupEngineValidation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "Redis", - ErrCount: 0, - }, - { - Value: "REDIS", - ErrCount: 0, - }, - { - Value: "memcached", - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateAwsElastiCacheReplicationGroupEngine(tc.Value, "aws_elasticache_replication_group_engine") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the ElastiCache Replication Group Engine to trigger a validation error") - } - } -} - -func testAccCheckAWSElasticacheReplicationGroupExists(n string, v *elasticache.ReplicationGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No replication group ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).elasticacheconn - res, err := conn.DescribeReplicationGroups(&elasticache.DescribeReplicationGroupsInput{ - ReplicationGroupId: aws.String(rs.Primary.ID), - }) - if err != nil { - return fmt.Errorf("Elasticache error: %v", err) - } - - for _, rg := range res.ReplicationGroups { - if *rg.ReplicationGroupId == rs.Primary.ID { - *v = *rg - } - } - - return nil - } -} - -func testAccCheckAWSElasticacheReplicationDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elasticacheconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_elasticache_replication_group" { - continue - } - res, err := conn.DescribeReplicationGroups(&elasticache.DescribeReplicationGroupsInput{ - ReplicationGroupId: aws.String(rs.Primary.ID), - }) - if err != nil { - // Verify the error is what we want - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ReplicationGroupNotFoundFault" { - continue - } - return err - } - if len(res.ReplicationGroups) > 0 { - return fmt.Errorf("still exist.") - } - } - return nil -} - -func testAccAWSElasticacheReplicationGroupConfig(rName string) string { - return fmt.Sprintf(` -provider "aws" { - region = "us-east-1" -} -resource "aws_security_group" "bar" { - name = "tf-test-security-group-%s" - description = "tf-test-security-group-descr" - ingress { - from_port = -1 - to_port = -1 - protocol = "icmp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_elasticache_security_group" "bar" { - name = "tf-test-security-group-%s" - description = "tf-test-security-group-descr" - security_group_names = ["${aws_security_group.bar.name}"] -} - -resource "aws_elasticache_replication_group" "bar" { - replication_group_id = "tf-%s" - replication_group_description = "test description" - node_type = "cache.m1.small" - number_cache_clusters = 2 - port = 6379 - parameter_group_name = "default.redis3.2" - security_group_names = ["${aws_elasticache_security_group.bar.name}"] - apply_immediately = true - auto_minor_version_upgrade = false - maintenance_window = "tue:06:30-tue:07:30" - snapshot_window = "01:00-02:00" -}`, rName, rName, rName) -} - -func testAccAWSElasticacheReplicationGroupConfigEnableSnapshotting(rName string) string { - return fmt.Sprintf(` -provider "aws" { - region = "us-east-1" -} -resource "aws_security_group" "bar" { - name = "tf-test-security-group-%s" - description = "tf-test-security-group-descr" - ingress { - from_port = -1 - to_port = -1 - protocol = "icmp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_elasticache_security_group" "bar" { - name = "tf-test-security-group-%s" - description = "tf-test-security-group-descr" - security_group_names = ["${aws_security_group.bar.name}"] -} - -resource "aws_elasticache_replication_group" "bar" { - replication_group_id = "tf-%s" - replication_group_description = "test description" - node_type = "cache.m1.small" - number_cache_clusters = 2 - port = 6379 - parameter_group_name = "default.redis3.2" - security_group_names = ["${aws_elasticache_security_group.bar.name}"] - apply_immediately = true - auto_minor_version_upgrade = false - maintenance_window = "tue:06:30-tue:07:30" - snapshot_window = "01:00-02:00" - snapshot_retention_limit = 2 -}`, rName, rName, rName) -} - -func testAccAWSElasticacheReplicationGroupConfigUpdatedParameterGroup(rName string, rInt int) string { - return fmt.Sprintf(` -provider "aws" { - region = "us-east-1" -} -resource "aws_security_group" "bar" { - name = "tf-test-security-group-%s" - description = "tf-test-security-group-descr" - ingress { - from_port = -1 - to_port = -1 - protocol = "icmp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_elasticache_security_group" "bar" { - name = "tf-test-security-group-%s" - description = "tf-test-security-group-descr" - security_group_names = ["${aws_security_group.bar.name}"] -} - -resource "aws_elasticache_parameter_group" "bar" { - name = "allkeys-lru-%d" - family = "redis3.2" - - parameter { - name = "maxmemory-policy" - value = "allkeys-lru" - } -} - -resource "aws_elasticache_replication_group" "bar" { - replication_group_id = "tf-%s" - replication_group_description = "test description" - node_type = "cache.m1.small" - number_cache_clusters = 2 - port = 6379 - parameter_group_name = "${aws_elasticache_parameter_group.bar.name}" - security_group_names = ["${aws_elasticache_security_group.bar.name}"] - apply_immediately = true -}`, rName, rName, rInt, rName) -} - -func testAccAWSElasticacheReplicationGroupConfigUpdatedDescription(rName string) string { - return fmt.Sprintf(` -provider "aws" { - region = "us-east-1" -} -resource "aws_security_group" "bar" { - name = "tf-test-security-group-%s" - description = "tf-test-security-group-descr" - ingress { - from_port = -1 - to_port = -1 - protocol = "icmp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_elasticache_security_group" "bar" { - name = "tf-test-security-group-%s" - description = "tf-test-security-group-descr" - security_group_names = ["${aws_security_group.bar.name}"] -} - -resource "aws_elasticache_replication_group" "bar" { - replication_group_id = "tf-%s" - replication_group_description = "updated description" - node_type = "cache.m1.small" - number_cache_clusters = 2 - port = 6379 - parameter_group_name = "default.redis3.2" - security_group_names = ["${aws_elasticache_security_group.bar.name}"] - apply_immediately = true - auto_minor_version_upgrade = true -}`, rName, rName, rName) -} - -func testAccAWSElasticacheReplicationGroupConfigUpdatedMaintenanceWindow(rName string) string { - return fmt.Sprintf(` -provider "aws" { - region = "us-east-1" -} -resource "aws_security_group" "bar" { - name = "tf-test-security-group-%s" - description = "tf-test-security-group-descr" - ingress { - from_port = -1 - to_port = -1 - protocol = "icmp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_elasticache_security_group" "bar" { - name = "tf-test-security-group-%s" - description = "tf-test-security-group-descr" - security_group_names = ["${aws_security_group.bar.name}"] -} - -resource "aws_elasticache_replication_group" "bar" { - replication_group_id = "tf-%s" - replication_group_description = "updated description" - node_type = "cache.m1.small" - number_cache_clusters = 2 - port = 6379 - parameter_group_name = "default.redis3.2" - security_group_names = ["${aws_elasticache_security_group.bar.name}"] - apply_immediately = true - auto_minor_version_upgrade = true - maintenance_window = "wed:03:00-wed:06:00" - snapshot_window = "01:00-02:00" -}`, rName, rName, rName) -} - -func testAccAWSElasticacheReplicationGroupConfigUpdatedNodeSize(rName string) string { - return fmt.Sprintf(` -provider "aws" { - region = "us-east-1" -} -resource "aws_security_group" "bar" { - name = "tf-test-security-group-%s" - description = "tf-test-security-group-descr" - ingress { - from_port = -1 - to_port = -1 - protocol = "icmp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_elasticache_security_group" "bar" { - name = "tf-test-security-group-%s" - description = "tf-test-security-group-descr" - security_group_names = ["${aws_security_group.bar.name}"] -} - -resource "aws_elasticache_replication_group" "bar" { - replication_group_id = "tf-%s" - replication_group_description = "updated description" - node_type = "cache.m1.medium" - number_cache_clusters = 2 - port = 6379 - parameter_group_name = "default.redis3.2" - security_group_names = ["${aws_elasticache_security_group.bar.name}"] - apply_immediately = true -}`, rName, rName, rName) -} - -var testAccAWSElasticacheReplicationGroupInVPCConfig = fmt.Sprintf(` -resource "aws_vpc" "foo" { - cidr_block = "192.168.0.0/16" - tags { - Name = "tf-test" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "192.168.0.0/20" - availability_zone = "us-west-2a" - tags { - Name = "tf-test" - } -} - -resource "aws_elasticache_subnet_group" "bar" { - name = "tf-test-cache-subnet-%03d" - description = "tf-test-cache-subnet-group-descr" - subnet_ids = ["${aws_subnet.foo.id}"] -} - -resource "aws_security_group" "bar" { - name = "tf-test-security-group-%03d" - description = "tf-test-security-group-descr" - vpc_id = "${aws_vpc.foo.id}" - ingress { - from_port = -1 - to_port = -1 - protocol = "icmp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_elasticache_replication_group" "bar" { - replication_group_id = "tf-%s" - replication_group_description = "test description" - node_type = "cache.m1.small" - number_cache_clusters = 1 - port = 6379 - subnet_group_name = "${aws_elasticache_subnet_group.bar.name}" - security_group_ids = ["${aws_security_group.bar.id}"] - parameter_group_name = "default.redis3.2" - availability_zones = ["us-west-2a"] - auto_minor_version_upgrade = false -} - -`, acctest.RandInt(), acctest.RandInt(), acctest.RandString(10)) - -var testAccAWSElasticacheReplicationGroupMultiAZInVPCConfig = fmt.Sprintf(` -resource "aws_vpc" "foo" { - cidr_block = "192.168.0.0/16" - tags { - Name = "tf-test" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "192.168.0.0/20" - availability_zone = "us-west-2a" - tags { - Name = "tf-test-%03d" - } -} - -resource "aws_subnet" "bar" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "192.168.16.0/20" - availability_zone = "us-west-2b" - tags { - Name = "tf-test-%03d" - } -} - -resource "aws_elasticache_subnet_group" "bar" { - name = "tf-test-cache-subnet-%03d" - description = "tf-test-cache-subnet-group-descr" - subnet_ids = [ - "${aws_subnet.foo.id}", - "${aws_subnet.bar.id}" - ] -} - -resource "aws_security_group" "bar" { - name = "tf-test-security-group-%03d" - description = "tf-test-security-group-descr" - vpc_id = "${aws_vpc.foo.id}" - ingress { - from_port = -1 - to_port = -1 - protocol = "icmp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_elasticache_replication_group" "bar" { - replication_group_id = "tf-%s" - replication_group_description = "test description" - node_type = "cache.m1.small" - number_cache_clusters = 2 - port = 6379 - subnet_group_name = "${aws_elasticache_subnet_group.bar.name}" - security_group_ids = ["${aws_security_group.bar.id}"] - parameter_group_name = "default.redis3.2" - availability_zones = ["us-west-2a","us-west-2b"] - automatic_failover_enabled = true - snapshot_window = "02:00-03:00" - snapshot_retention_limit = 7 -} -`, acctest.RandInt(), acctest.RandInt(), acctest.RandInt(), acctest.RandInt(), acctest.RandString(10)) - -var testAccAWSElasticacheReplicationGroupRedisClusterInVPCConfig = fmt.Sprintf(` -resource "aws_vpc" "foo" { - cidr_block = "192.168.0.0/16" - tags { - Name = "tf-test" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "192.168.0.0/20" - availability_zone = "us-west-2a" - tags { - Name = "tf-test-%03d" - } -} - -resource "aws_subnet" "bar" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "192.168.16.0/20" - availability_zone = "us-west-2b" - tags { - Name = "tf-test-%03d" - } -} - -resource "aws_elasticache_subnet_group" "bar" { - name = "tf-test-cache-subnet-%03d" - description = "tf-test-cache-subnet-group-descr" - subnet_ids = [ - "${aws_subnet.foo.id}", - "${aws_subnet.bar.id}" - ] -} - -resource "aws_security_group" "bar" { - name = "tf-test-security-group-%03d" - description = "tf-test-security-group-descr" - vpc_id = "${aws_vpc.foo.id}" - ingress { - from_port = -1 - to_port = -1 - protocol = "icmp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_elasticache_replication_group" "bar" { - replication_group_id = "tf-%s" - replication_group_description = "test description" - node_type = "cache.t2.micro" - number_cache_clusters = "2" - port = 6379 - subnet_group_name = "${aws_elasticache_subnet_group.bar.name}" - security_group_ids = ["${aws_security_group.bar.id}"] - parameter_group_name = "default.redis3.2.cluster.on" - availability_zones = ["us-west-2a","us-west-2b"] - automatic_failover_enabled = true - snapshot_window = "02:00-03:00" - snapshot_retention_limit = 7 - engine_version = "3.2.4" - maintenance_window = "thu:03:00-thu:04:00" -} -`, acctest.RandInt(), acctest.RandInt(), acctest.RandInt(), acctest.RandInt(), acctest.RandString(10)) - -func testAccAWSElasticacheReplicationGroupNativeRedisClusterErrorConfig(rInt int, rName string) string { - return fmt.Sprintf(` -resource "aws_vpc" "foo" { - cidr_block = "192.168.0.0/16" - tags { - Name = "tf-test" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "192.168.0.0/20" - availability_zone = "us-west-2a" - tags { - Name = "tf-test-%03d" - } -} - -resource "aws_subnet" "bar" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "192.168.16.0/20" - availability_zone = "us-west-2b" - tags { - Name = "tf-test-%03d" - } -} - -resource "aws_elasticache_subnet_group" "bar" { - name = "tf-test-cache-subnet-%03d" - description = "tf-test-cache-subnet-group-descr" - subnet_ids = [ - "${aws_subnet.foo.id}", - "${aws_subnet.bar.id}" - ] -} - -resource "aws_security_group" "bar" { - name = "tf-test-security-group-%03d" - description = "tf-test-security-group-descr" - vpc_id = "${aws_vpc.foo.id}" - ingress { - from_port = -1 - to_port = -1 - protocol = "icmp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_elasticache_replication_group" "bar" { - replication_group_id = "tf-%s" - replication_group_description = "test description" - node_type = "cache.t2.micro" - port = 6379 - subnet_group_name = "${aws_elasticache_subnet_group.bar.name}" - security_group_ids = ["${aws_security_group.bar.id}"] - parameter_group_name = "default.redis3.2.cluster.on" - automatic_failover_enabled = true - cluster_mode { - replicas_per_node_group = 1 - num_node_groups = 2 - } - number_cache_clusters = 3 -}`, rInt, rInt, rInt, rInt, rName) -} - -func testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig(rInt int, rName string) string { - return fmt.Sprintf(` -resource "aws_vpc" "foo" { - cidr_block = "192.168.0.0/16" - tags { - Name = "tf-test" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "192.168.0.0/20" - availability_zone = "us-west-2a" - tags { - Name = "tf-test-%03d" - } -} - -resource "aws_subnet" "bar" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "192.168.16.0/20" - availability_zone = "us-west-2b" - tags { - Name = "tf-test-%03d" - } -} - -resource "aws_elasticache_subnet_group" "bar" { - name = "tf-test-cache-subnet-%03d" - description = "tf-test-cache-subnet-group-descr" - subnet_ids = [ - "${aws_subnet.foo.id}", - "${aws_subnet.bar.id}" - ] -} - -resource "aws_security_group" "bar" { - name = "tf-test-security-group-%03d" - description = "tf-test-security-group-descr" - vpc_id = "${aws_vpc.foo.id}" - ingress { - from_port = -1 - to_port = -1 - protocol = "icmp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_elasticache_replication_group" "bar" { - replication_group_id = "tf-%s" - replication_group_description = "test description" - node_type = "cache.t2.micro" - port = 6379 - subnet_group_name = "${aws_elasticache_subnet_group.bar.name}" - security_group_ids = ["${aws_security_group.bar.id}"] - parameter_group_name = "default.redis3.2.cluster.on" - automatic_failover_enabled = true - cluster_mode { - replicas_per_node_group = 1 - num_node_groups = 2 - } -}`, rInt, rInt, rInt, rInt, rName) -} diff --git a/builtin/providers/aws/resource_aws_elasticache_security_group.go b/builtin/providers/aws/resource_aws_elasticache_security_group.go deleted file mode 100644 index 07676e513..000000000 --- a/builtin/providers/aws/resource_aws_elasticache_security_group.go +++ /dev/null @@ -1,144 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsElasticacheSecurityGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsElasticacheSecurityGroupCreate, - Read: resourceAwsElasticacheSecurityGroupRead, - Delete: resourceAwsElasticacheSecurityGroupDelete, - - Schema: map[string]*schema.Schema{ - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "Managed by Terraform", - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "security_group_names": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - } -} - -func resourceAwsElasticacheSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticacheconn - - name := d.Get("name").(string) - desc := d.Get("description").(string) - nameSet := d.Get("security_group_names").(*schema.Set) - - names := make([]string, nameSet.Len()) - for i, name := range nameSet.List() { - names[i] = name.(string) - } - - log.Printf("[DEBUG] Cache security group create: name: %s, description: %s, security_group_names: %v", name, desc, names) - res, err := conn.CreateCacheSecurityGroup(&elasticache.CreateCacheSecurityGroupInput{ - Description: aws.String(desc), - CacheSecurityGroupName: aws.String(name), - }) - if err != nil { - return fmt.Errorf("Error creating CacheSecurityGroup: %s", err) - } - - for _, n := range names { - log.Printf("[DEBUG] Authorize cache security group ingress name: %v, ec2 security group name: %v", name, n) - _, err = conn.AuthorizeCacheSecurityGroupIngress(&elasticache.AuthorizeCacheSecurityGroupIngressInput{ - CacheSecurityGroupName: aws.String(name), - EC2SecurityGroupName: aws.String(n), - EC2SecurityGroupOwnerId: aws.String(*res.CacheSecurityGroup.OwnerId), - }) - if err != nil { - log.Printf("[ERROR] Failed to authorize: %v", err) - _, err := conn.DeleteCacheSecurityGroup(&elasticache.DeleteCacheSecurityGroupInput{ - CacheSecurityGroupName: aws.String(d.Id()), - }) - log.Printf("[ERROR] Revert cache security group: %v", err) - } - } - - d.SetId(name) - - return nil -} - -func resourceAwsElasticacheSecurityGroupRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticacheconn - req := &elasticache.DescribeCacheSecurityGroupsInput{ - CacheSecurityGroupName: aws.String(d.Get("name").(string)), - } - - res, err := conn.DescribeCacheSecurityGroups(req) - if err != nil { - return err - } - if len(res.CacheSecurityGroups) == 0 { - return fmt.Errorf("Error missing %v", d.Get("name")) - } - - var group *elasticache.CacheSecurityGroup - for _, g := range res.CacheSecurityGroups { - log.Printf("[DEBUG] CacheSecurityGroupName: %v, id: %v", g.CacheSecurityGroupName, d.Id()) - if *g.CacheSecurityGroupName == d.Id() { - group = g - } - } - if group == nil { - return fmt.Errorf("Error retrieving cache security group: %v", res) - } - - d.Set("name", group.CacheSecurityGroupName) - d.Set("description", group.Description) - - return nil -} - -func resourceAwsElasticacheSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticacheconn - - log.Printf("[DEBUG] Cache security group delete: %s", d.Id()) - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteCacheSecurityGroup(&elasticache.DeleteCacheSecurityGroupInput{ - CacheSecurityGroupName: aws.String(d.Id()), - }) - if err != nil { - apierr, ok := err.(awserr.Error) - if !ok { - return resource.RetryableError(err) - } - log.Printf("[DEBUG] APIError.Code: %v", apierr.Code()) - switch apierr.Code() { - case "InvalidCacheSecurityGroupState": - return resource.RetryableError(err) - case "DependencyViolation": - // If it is a dependency violation, we want to retry - return resource.RetryableError(err) - default: - return resource.NonRetryableError(err) - } - } - return nil - }) -} diff --git a/builtin/providers/aws/resource_aws_elasticache_security_group_test.go b/builtin/providers/aws/resource_aws_elasticache_security_group_test.go deleted file mode 100644 index 2c9c93b86..000000000 --- a/builtin/providers/aws/resource_aws_elasticache_security_group_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSElasticacheSecurityGroup_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheSecurityGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSElasticacheSecurityGroupConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"), - resource.TestCheckResourceAttr( - "aws_elasticache_security_group.bar", "description", "Managed by Terraform"), - ), - }, - }, - }) -} - -func testAccCheckAWSElasticacheSecurityGroupDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elasticacheconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_elasticache_security_group" { - continue - } - res, err := conn.DescribeCacheSecurityGroups(&elasticache.DescribeCacheSecurityGroupsInput{ - CacheSecurityGroupName: aws.String(rs.Primary.ID), - }) - if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "CacheSecurityGroupNotFound" { - continue - } - - if len(res.CacheSecurityGroups) > 0 { - return fmt.Errorf("cache security group still exists") - } - return err - } - return nil -} - -func testAccCheckAWSElasticacheSecurityGroupExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No cache security group ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).elasticacheconn - _, err := conn.DescribeCacheSecurityGroups(&elasticache.DescribeCacheSecurityGroupsInput{ - CacheSecurityGroupName: aws.String(rs.Primary.ID), - }) - if err != nil { - return fmt.Errorf("CacheSecurityGroup error: %v", err) - } - return nil - } -} - -var testAccAWSElasticacheSecurityGroupConfig = fmt.Sprintf(` -provider "aws" { - region = "us-east-1" -} - -resource "aws_security_group" "bar" { - name = "tf-test-security-group-%03d" - - ingress { - from_port = -1 - to_port = -1 - protocol = "icmp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_elasticache_security_group" "bar" { - name = "tf-test-security-group-%03d" - security_group_names = ["${aws_security_group.bar.name}"] -} -`, acctest.RandInt(), acctest.RandInt()) diff --git a/builtin/providers/aws/resource_aws_elasticache_subnet_group.go b/builtin/providers/aws/resource_aws_elasticache_subnet_group.go deleted file mode 100644 index efae2e703..000000000 --- a/builtin/providers/aws/resource_aws_elasticache_subnet_group.go +++ /dev/null @@ -1,176 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsElasticacheSubnetGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsElasticacheSubnetGroupCreate, - Read: resourceAwsElasticacheSubnetGroupRead, - Update: resourceAwsElasticacheSubnetGroupUpdate, - Delete: resourceAwsElasticacheSubnetGroupDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "Managed by Terraform", - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - StateFunc: func(val interface{}) string { - // Elasticache normalizes subnet names to lowercase, - // so we have to do this too or else we can end up - // with non-converging diffs. - return strings.ToLower(val.(string)) - }, - }, - "subnet_ids": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - } -} - -func resourceAwsElasticacheSubnetGroupCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticacheconn - - // Get the group properties - name := d.Get("name").(string) - desc := d.Get("description").(string) - subnetIdsSet := d.Get("subnet_ids").(*schema.Set) - - log.Printf("[DEBUG] Cache subnet group create: name: %s, description: %s", name, desc) - - subnetIds := expandStringList(subnetIdsSet.List()) - - req := &elasticache.CreateCacheSubnetGroupInput{ - CacheSubnetGroupDescription: aws.String(desc), - CacheSubnetGroupName: aws.String(name), - SubnetIds: subnetIds, - } - - _, err := conn.CreateCacheSubnetGroup(req) - if err != nil { - return fmt.Errorf("Error creating CacheSubnetGroup: %s", err) - } - - // Assign the group name as the resource ID - // Elasticache always retains the name in lower case, so we have to - // mimic that or else we won't be able to refresh a resource whose - // name contained uppercase characters. - d.SetId(strings.ToLower(name)) - - return nil -} - -func resourceAwsElasticacheSubnetGroupRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticacheconn - req := &elasticache.DescribeCacheSubnetGroupsInput{ - CacheSubnetGroupName: aws.String(d.Get("name").(string)), - } - - res, err := conn.DescribeCacheSubnetGroups(req) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "CacheSubnetGroupNotFoundFault" { - // Update state to indicate the db subnet no longer exists. - log.Printf("[WARN] Elasticache Subnet Group (%s) not found, removing from state", d.Id()) - d.SetId("") - return nil - } - return err - } - if len(res.CacheSubnetGroups) == 0 { - return fmt.Errorf("Error missing %v", d.Get("name")) - } - - var group *elasticache.CacheSubnetGroup - for _, g := range res.CacheSubnetGroups { - log.Printf("[DEBUG] %v %v", g.CacheSubnetGroupName, d.Id()) - if *g.CacheSubnetGroupName == d.Id() { - group = g - } - } - if group == nil { - return fmt.Errorf("Error retrieving cache subnet group: %v", res) - } - - ids := make([]string, len(group.Subnets)) - for i, s := range group.Subnets { - ids[i] = *s.SubnetIdentifier - } - - d.Set("name", group.CacheSubnetGroupName) - d.Set("description", group.CacheSubnetGroupDescription) - d.Set("subnet_ids", ids) - - return nil -} - -func resourceAwsElasticacheSubnetGroupUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticacheconn - if d.HasChange("subnet_ids") || d.HasChange("description") { - var subnets []*string - if v := d.Get("subnet_ids"); v != nil { - for _, v := range v.(*schema.Set).List() { - subnets = append(subnets, aws.String(v.(string))) - } - } - log.Printf("[DEBUG] Updating ElastiCache Subnet Group") - - _, err := conn.ModifyCacheSubnetGroup(&elasticache.ModifyCacheSubnetGroupInput{ - CacheSubnetGroupName: aws.String(d.Get("name").(string)), - CacheSubnetGroupDescription: aws.String(d.Get("description").(string)), - SubnetIds: subnets, - }) - if err != nil { - return err - } - } - - return resourceAwsElasticacheSubnetGroupRead(d, meta) -} -func resourceAwsElasticacheSubnetGroupDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).elasticacheconn - - log.Printf("[DEBUG] Cache subnet group delete: %s", d.Id()) - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteCacheSubnetGroup(&elasticache.DeleteCacheSubnetGroupInput{ - CacheSubnetGroupName: aws.String(d.Id()), - }) - if err != nil { - apierr, ok := err.(awserr.Error) - if !ok { - return resource.RetryableError(err) - } - log.Printf("[DEBUG] APIError.Code: %v", apierr.Code()) - switch apierr.Code() { - case "DependencyViolation": - // If it is a dependency violation, we want to retry - return resource.RetryableError(err) - default: - return resource.NonRetryableError(err) - } - } - return nil - }) -} diff --git a/builtin/providers/aws/resource_aws_elasticache_subnet_group_test.go b/builtin/providers/aws/resource_aws_elasticache_subnet_group_test.go deleted file mode 100644 index 06bda6f9b..000000000 --- a/builtin/providers/aws/resource_aws_elasticache_subnet_group_test.go +++ /dev/null @@ -1,226 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSElasticacheSubnetGroup_basic(t *testing.T) { - var csg elasticache.CacheSubnetGroup - config := fmt.Sprintf(testAccAWSElasticacheSubnetGroupConfig, acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheSubnetGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheSubnetGroupExists("aws_elasticache_subnet_group.bar", &csg), - resource.TestCheckResourceAttr( - "aws_elasticache_subnet_group.bar", "description", "Managed by Terraform"), - ), - }, - }, - }) -} - -func TestAccAWSElasticacheSubnetGroup_update(t *testing.T) { - var csg elasticache.CacheSubnetGroup - rn := "aws_elasticache_subnet_group.bar" - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAWSElasticacheSubnetGroupUpdateConfigPre, ri) - postConfig := fmt.Sprintf(testAccAWSElasticacheSubnetGroupUpdateConfigPost, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheSubnetGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheSubnetGroupExists(rn, &csg), - testAccCheckAWSElastiCacheSubnetGroupAttrs(&csg, rn, 1), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheSubnetGroupExists(rn, &csg), - testAccCheckAWSElastiCacheSubnetGroupAttrs(&csg, rn, 2), - ), - }, - }, - }) -} - -func testAccCheckAWSElasticacheSubnetGroupDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elasticacheconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_elasticache_subnet_group" { - continue - } - res, err := conn.DescribeCacheSubnetGroups(&elasticache.DescribeCacheSubnetGroupsInput{ - CacheSubnetGroupName: aws.String(rs.Primary.ID), - }) - if err != nil { - // Verify the error is what we want - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "CacheSubnetGroupNotFoundFault" { - continue - } - return err - } - if len(res.CacheSubnetGroups) > 0 { - return fmt.Errorf("still exist.") - } - } - return nil -} - -func testAccCheckAWSElasticacheSubnetGroupExists(n string, csg *elasticache.CacheSubnetGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No cache subnet group ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).elasticacheconn - resp, err := conn.DescribeCacheSubnetGroups(&elasticache.DescribeCacheSubnetGroupsInput{ - CacheSubnetGroupName: aws.String(rs.Primary.ID), - }) - if err != nil { - return fmt.Errorf("CacheSubnetGroup error: %v", err) - } - - for _, c := range resp.CacheSubnetGroups { - if rs.Primary.ID == *c.CacheSubnetGroupName { - *csg = *c - } - } - - if csg == nil { - return fmt.Errorf("cache subnet group not found") - } - return nil - } -} - -func testAccCheckAWSElastiCacheSubnetGroupAttrs(csg *elasticache.CacheSubnetGroup, n string, count int) resource.TestCheckFunc { - return func(s *terraform.State) error { - - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if len(csg.Subnets) != count { - return fmt.Errorf("Bad cache subnet count, expected: %d, got: %d", count, len(csg.Subnets)) - } - - if rs.Primary.Attributes["description"] != *csg.CacheSubnetGroupDescription { - return fmt.Errorf("Bad cache subnet description, expected: %s, got: %s", rs.Primary.Attributes["description"], *csg.CacheSubnetGroupDescription) - } - - return nil - } -} - -var testAccAWSElasticacheSubnetGroupConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "192.168.0.0/16" - tags { - Name = "tf-test" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "192.168.0.0/20" - availability_zone = "us-west-2a" - tags { - Name = "tf-test" - } -} - -resource "aws_elasticache_subnet_group" "bar" { - // Including uppercase letters in this name to ensure - // that we correctly handle the fact that the API - // normalizes names to lowercase. - name = "tf-TEST-cache-subnet-%03d" - subnet_ids = ["${aws_subnet.foo.id}"] -} -` -var testAccAWSElasticacheSubnetGroupUpdateConfigPre = ` -resource "aws_vpc" "foo" { - cidr_block = "10.0.0.0/16" - tags { - Name = "tf-elc-sub-test" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "10.0.1.0/24" - availability_zone = "us-west-2a" - tags { - Name = "tf-test" - } -} - -resource "aws_elasticache_subnet_group" "bar" { - name = "tf-test-cache-subnet-%03d" - description = "tf-test-cache-subnet-group-descr" - subnet_ids = ["${aws_subnet.foo.id}"] -} -` - -var testAccAWSElasticacheSubnetGroupUpdateConfigPost = ` -resource "aws_vpc" "foo" { - cidr_block = "10.0.0.0/16" - tags { - Name = "tf-elc-sub-test" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "10.0.1.0/24" - availability_zone = "us-west-2a" - tags { - Name = "tf-test" - } -} - -resource "aws_subnet" "bar" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "10.0.2.0/24" - availability_zone = "us-west-2a" - tags { - Name = "tf-test-foo-update" - } -} - -resource "aws_elasticache_subnet_group" "bar" { - name = "tf-test-cache-subnet-%03d" - description = "tf-test-cache-subnet-group-descr-edited" - subnet_ids = [ - "${aws_subnet.foo.id}", - "${aws_subnet.bar.id}", - ] -} -` diff --git a/builtin/providers/aws/resource_aws_elasticsearch_domain.go b/builtin/providers/aws/resource_aws_elasticsearch_domain.go deleted file mode 100644 index c931b119e..000000000 --- a/builtin/providers/aws/resource_aws_elasticsearch_domain.go +++ /dev/null @@ -1,467 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "regexp" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsElasticSearchDomain() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsElasticSearchDomainCreate, - Read: resourceAwsElasticSearchDomainRead, - Update: resourceAwsElasticSearchDomainUpdate, - Delete: resourceAwsElasticSearchDomainDelete, - Importer: &schema.ResourceImporter{ - State: resourceAwsElasticSearchDomainImport, - }, - - Schema: map[string]*schema.Schema{ - "access_policies": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validateJsonString, - DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, - }, - "advanced_options": { - Type: schema.TypeMap, - Optional: true, - Computed: true, - }, - "domain_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[a-z][0-9a-z\-]{2,27}$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q must start with a lowercase alphabet and be at least 3 and no more than 28 characters long. Valid characters are a-z (lowercase letters), 0-9, and - (hyphen).", k)) - } - return - }, - }, - "arn": { - Type: schema.TypeString, - Computed: true, - }, - "domain_id": { - Type: schema.TypeString, - Computed: true, - }, - "endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "ebs_options": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ebs_enabled": { - Type: schema.TypeBool, - Required: true, - }, - "iops": { - Type: schema.TypeInt, - Optional: true, - }, - "volume_size": { - Type: schema.TypeInt, - Optional: true, - }, - "volume_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - }, - }, - "cluster_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dedicated_master_count": { - Type: schema.TypeInt, - Optional: true, - }, - "dedicated_master_enabled": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "dedicated_master_type": { - Type: schema.TypeString, - Optional: true, - }, - "instance_count": { - Type: schema.TypeInt, - Optional: true, - Default: 1, - }, - "instance_type": { - Type: schema.TypeString, - Optional: true, - Default: "m3.medium.elasticsearch", - }, - "zone_awareness_enabled": { - Type: schema.TypeBool, - Optional: true, - }, - }, - }, - }, - "snapshot_options": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "automated_snapshot_start_hour": { - Type: schema.TypeInt, - Required: true, - }, - }, - }, - }, - "elasticsearch_version": { - Type: schema.TypeString, - Optional: true, - Default: "1.5", - ForceNew: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAwsElasticSearchDomainImport( - d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - d.Set("domain_name", d.Id()) - return []*schema.ResourceData{d}, nil -} - -func resourceAwsElasticSearchDomainCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).esconn - - input := elasticsearch.CreateElasticsearchDomainInput{ - DomainName: aws.String(d.Get("domain_name").(string)), - ElasticsearchVersion: aws.String(d.Get("elasticsearch_version").(string)), - } - - if v, ok := d.GetOk("access_policies"); ok { - input.AccessPolicies = aws.String(v.(string)) - } - - if v, ok := d.GetOk("advanced_options"); ok { - input.AdvancedOptions = stringMapToPointers(v.(map[string]interface{})) - } - - if v, ok := d.GetOk("ebs_options"); ok { - options := v.([]interface{}) - - if len(options) > 1 { - return fmt.Errorf("Only a single ebs_options block is expected") - } else if len(options) == 1 { - if options[0] == nil { - return fmt.Errorf("At least one field is expected inside ebs_options") - } - - s := options[0].(map[string]interface{}) - input.EBSOptions = expandESEBSOptions(s) - } - } - - if v, ok := d.GetOk("cluster_config"); ok { - config := v.([]interface{}) - - if len(config) > 1 { - return fmt.Errorf("Only a single cluster_config block is expected") - } else if len(config) == 1 { - if config[0] == nil { - return fmt.Errorf("At least one field is expected inside cluster_config") - } - m := config[0].(map[string]interface{}) - input.ElasticsearchClusterConfig = expandESClusterConfig(m) - } - } - - if v, ok := d.GetOk("snapshot_options"); ok { - options := v.([]interface{}) - - if len(options) > 1 { - return fmt.Errorf("Only a single snapshot_options block is expected") - } else if len(options) == 1 { - if options[0] == nil { - return fmt.Errorf("At least one field is expected inside snapshot_options") - } - - o := options[0].(map[string]interface{}) - - snapshotOptions := elasticsearch.SnapshotOptions{ - AutomatedSnapshotStartHour: aws.Int64(int64(o["automated_snapshot_start_hour"].(int))), - } - - input.SnapshotOptions = &snapshotOptions - } - } - - log.Printf("[DEBUG] Creating ElasticSearch domain: %s", input) - out, err := conn.CreateElasticsearchDomain(&input) - if err != nil { - return err - } - - d.SetId(*out.DomainStatus.ARN) - - log.Printf("[DEBUG] Waiting for ElasticSearch domain %q to be created", d.Id()) - err = resource.Retry(60*time.Minute, func() *resource.RetryError { - out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{ - DomainName: aws.String(d.Get("domain_name").(string)), - }) - if err != nil { - return resource.NonRetryableError(err) - } - - if !*out.DomainStatus.Processing && out.DomainStatus.Endpoint != nil { - return nil - } - - return resource.RetryableError( - fmt.Errorf("%q: Timeout while waiting for the domain to be created", d.Id())) - }) - if err != nil { - return err - } - - tags := tagsFromMapElasticsearchService(d.Get("tags").(map[string]interface{})) - - if err := setTagsElasticsearchService(conn, d, *out.DomainStatus.ARN); err != nil { - return err - } - - d.Set("tags", tagsToMapElasticsearchService(tags)) - d.SetPartial("tags") - d.Partial(false) - - log.Printf("[DEBUG] ElasticSearch domain %q created", d.Id()) - - return resourceAwsElasticSearchDomainRead(d, meta) -} - -func resourceAwsElasticSearchDomainRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).esconn - - out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{ - DomainName: aws.String(d.Get("domain_name").(string)), - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "ResourceNotFoundException" { - log.Printf("[INFO] ElasticSearch Domain %q not found", d.Get("domain_name").(string)) - d.SetId("") - return nil - } - return err - } - - log.Printf("[DEBUG] Received ElasticSearch domain: %s", out) - - ds := out.DomainStatus - - if ds.AccessPolicies != nil && *ds.AccessPolicies != "" { - policies, err := normalizeJsonString(*ds.AccessPolicies) - if err != nil { - return errwrap.Wrapf("access policies contain an invalid JSON: {{err}}", err) - } - d.Set("access_policies", policies) - } - err = d.Set("advanced_options", pointersMapToStringList(ds.AdvancedOptions)) - if err != nil { - return err - } - d.SetId(*ds.ARN) - d.Set("domain_id", ds.DomainId) - d.Set("domain_name", ds.DomainName) - d.Set("elasticsearch_version", ds.ElasticsearchVersion) - if ds.Endpoint != nil { - d.Set("endpoint", *ds.Endpoint) - } - - err = d.Set("ebs_options", flattenESEBSOptions(ds.EBSOptions)) - if err != nil { - return err - } - err = d.Set("cluster_config", flattenESClusterConfig(ds.ElasticsearchClusterConfig)) - if err != nil { - return err - } - if ds.SnapshotOptions != nil { - d.Set("snapshot_options", map[string]interface{}{ - "automated_snapshot_start_hour": *ds.SnapshotOptions.AutomatedSnapshotStartHour, - }) - } - - d.Set("arn", ds.ARN) - - listOut, err := conn.ListTags(&elasticsearch.ListTagsInput{ - ARN: ds.ARN, - }) - - if err != nil { - return err - } - var est []*elasticsearch.Tag - if len(listOut.TagList) > 0 { - est = listOut.TagList - } - - d.Set("tags", tagsToMapElasticsearchService(est)) - - return nil -} - -func resourceAwsElasticSearchDomainUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).esconn - - d.Partial(true) - - if err := setTagsElasticsearchService(conn, d, d.Id()); err != nil { - return err - } else { - d.SetPartial("tags") - } - - input := elasticsearch.UpdateElasticsearchDomainConfigInput{ - DomainName: aws.String(d.Get("domain_name").(string)), - } - - if d.HasChange("access_policies") { - input.AccessPolicies = aws.String(d.Get("access_policies").(string)) - } - - if d.HasChange("advanced_options") { - input.AdvancedOptions = stringMapToPointers(d.Get("advanced_options").(map[string]interface{})) - } - - if d.HasChange("ebs_options") { - options := d.Get("ebs_options").([]interface{}) - - if len(options) > 1 { - return fmt.Errorf("Only a single ebs_options block is expected") - } else if len(options) == 1 { - s := options[0].(map[string]interface{}) - input.EBSOptions = expandESEBSOptions(s) - } - } - - if d.HasChange("cluster_config") { - config := d.Get("cluster_config").([]interface{}) - - if len(config) > 1 { - return fmt.Errorf("Only a single cluster_config block is expected") - } else if len(config) == 1 { - m := config[0].(map[string]interface{}) - input.ElasticsearchClusterConfig = expandESClusterConfig(m) - } - } - - if d.HasChange("snapshot_options") { - options := d.Get("snapshot_options").([]interface{}) - - if len(options) > 1 { - return fmt.Errorf("Only a single snapshot_options block is expected") - } else if len(options) == 1 { - o := options[0].(map[string]interface{}) - - snapshotOptions := elasticsearch.SnapshotOptions{ - AutomatedSnapshotStartHour: aws.Int64(int64(o["automated_snapshot_start_hour"].(int))), - } - - input.SnapshotOptions = &snapshotOptions - } - } - - _, err := conn.UpdateElasticsearchDomainConfig(&input) - if err != nil { - return err - } - - err = resource.Retry(60*time.Minute, func() *resource.RetryError { - out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{ - DomainName: aws.String(d.Get("domain_name").(string)), - }) - if err != nil { - return resource.NonRetryableError(err) - } - - if *out.DomainStatus.Processing == false { - return nil - } - - return resource.RetryableError( - fmt.Errorf("%q: Timeout while waiting for changes to be processed", d.Id())) - }) - if err != nil { - return err - } - - d.Partial(false) - - return resourceAwsElasticSearchDomainRead(d, meta) -} - -func resourceAwsElasticSearchDomainDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).esconn - - log.Printf("[DEBUG] Deleting ElasticSearch domain: %q", d.Get("domain_name").(string)) - _, err := conn.DeleteElasticsearchDomain(&elasticsearch.DeleteElasticsearchDomainInput{ - DomainName: aws.String(d.Get("domain_name").(string)), - }) - if err != nil { - return err - } - - log.Printf("[DEBUG] Waiting for ElasticSearch domain %q to be deleted", d.Get("domain_name").(string)) - err = resource.Retry(90*time.Minute, func() *resource.RetryError { - out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{ - DomainName: aws.String(d.Get("domain_name").(string)), - }) - - if err != nil { - awsErr, ok := err.(awserr.Error) - if !ok { - return resource.NonRetryableError(err) - } - - if awsErr.Code() == "ResourceNotFoundException" { - return nil - } - - return resource.NonRetryableError(err) - } - - if !*out.DomainStatus.Processing { - return nil - } - - return resource.RetryableError( - fmt.Errorf("%q: Timeout while waiting for the domain to be deleted", d.Id())) - }) - - d.SetId("") - - return err -} diff --git a/builtin/providers/aws/resource_aws_elasticsearch_domain_policy.go b/builtin/providers/aws/resource_aws_elasticsearch_domain_policy.go deleted file mode 100644 index dfb22c64d..000000000 --- a/builtin/providers/aws/resource_aws_elasticsearch_domain_policy.go +++ /dev/null @@ -1,127 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsElasticSearchDomainPolicy() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsElasticSearchDomainPolicyUpsert, - Read: resourceAwsElasticSearchDomainPolicyRead, - Update: resourceAwsElasticSearchDomainPolicyUpsert, - Delete: resourceAwsElasticSearchDomainPolicyDelete, - - Schema: map[string]*schema.Schema{ - "domain_name": { - Type: schema.TypeString, - Required: true, - }, - "access_policies": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, - }, - }, - } -} - -func resourceAwsElasticSearchDomainPolicyRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).esconn - name := d.Get("domain_name").(string) - out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{ - DomainName: aws.String(name), - }) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceNotFound" { - log.Printf("[WARN] ElasticSearch Domain %q not found, removing", name) - d.SetId("") - return nil - } - return err - } - - log.Printf("[DEBUG] Received ElasticSearch domain: %s", out) - - ds := out.DomainStatus - d.Set("access_policies", ds.AccessPolicies) - - return nil -} - -func resourceAwsElasticSearchDomainPolicyUpsert(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).esconn - domainName := d.Get("domain_name").(string) - _, err := conn.UpdateElasticsearchDomainConfig(&elasticsearch.UpdateElasticsearchDomainConfigInput{ - DomainName: aws.String(domainName), - AccessPolicies: aws.String(d.Get("access_policies").(string)), - }) - if err != nil { - return err - } - - d.SetId("esd-policy-" + domainName) - - err = resource.Retry(50*time.Minute, func() *resource.RetryError { - out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{ - DomainName: aws.String(d.Get("domain_name").(string)), - }) - if err != nil { - return resource.NonRetryableError(err) - } - - if *out.DomainStatus.Processing == false { - return nil - } - - return resource.RetryableError( - fmt.Errorf("%q: Timeout while waiting for changes to be processed", d.Id())) - }) - if err != nil { - return err - } - - return resourceAwsElasticSearchDomainPolicyRead(d, meta) -} - -func resourceAwsElasticSearchDomainPolicyDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).esconn - - _, err := conn.UpdateElasticsearchDomainConfig(&elasticsearch.UpdateElasticsearchDomainConfigInput{ - DomainName: aws.String(d.Get("domain_name").(string)), - AccessPolicies: aws.String(""), - }) - if err != nil { - return err - } - - log.Printf("[DEBUG] Waiting for ElasticSearch domain policy %q to be deleted", d.Get("domain_name").(string)) - err = resource.Retry(60*time.Minute, func() *resource.RetryError { - out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{ - DomainName: aws.String(d.Get("domain_name").(string)), - }) - if err != nil { - return resource.NonRetryableError(err) - } - - if *out.DomainStatus.Processing == false { - return nil - } - - return resource.RetryableError( - fmt.Errorf("%q: Timeout while waiting for policy to be deleted", d.Id())) - }) - if err != nil { - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/aws/resource_aws_elasticsearch_domain_policy_test.go b/builtin/providers/aws/resource_aws_elasticsearch_domain_policy_test.go deleted file mode 100644 index 5efd3eb99..000000000 --- a/builtin/providers/aws/resource_aws_elasticsearch_domain_policy_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSElasticSearchDomainPolicy_basic(t *testing.T) { - var domain elasticsearch.ElasticsearchDomainStatus - ri := acctest.RandInt() - policy := `{ - "Version": "2012-10-17", - "Statement": [ - { - "Action": "es:*", - "Principal": "*", - "Effect": "Allow", - "Condition": { - "IpAddress": {"aws:SourceIp": "127.0.0.1/32"} - }, - "Resource": "${aws_elasticsearch_domain.example.arn}" - } - ] -}` - expectedPolicyTpl := `{ - "Version": "2012-10-17", - "Statement": [ - { - "Action": "es:*", - "Principal": "*", - "Effect": "Allow", - "Condition": { - "IpAddress": {"aws:SourceIp": "127.0.0.1/32"} - }, - "Resource": "%s" - } - ] -}` - name := fmt.Sprintf("tf-test-%d", ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckESDomainDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccESDomainPolicyConfig(ri, policy), - Check: resource.ComposeTestCheckFunc( - testAccCheckESDomainExists("aws_elasticsearch_domain.example", &domain), - resource.TestCheckResourceAttr("aws_elasticsearch_domain.example", "elasticsearch_version", "2.3"), - func(s *terraform.State) error { - awsClient := testAccProvider.Meta().(*AWSClient) - expectedArn, err := buildESDomainArn(name, awsClient.partition, awsClient.accountid, awsClient.region) - if err != nil { - return err - } - expectedPolicy := fmt.Sprintf(expectedPolicyTpl, expectedArn) - - return testAccCheckAwsPolicyMatch("aws_elasticsearch_domain_policy.main", "access_policies", expectedPolicy)(s) - }, - ), - }, - }, - }) -} - -func buildESDomainArn(name, partition, accId, region string) (string, error) { - if partition == "" { - return "", fmt.Errorf("Unable to construct ES Domain ARN because of missing AWS partition") - } - if accId == "" { - return "", fmt.Errorf("Unable to construct ES Domain ARN because of missing AWS Account ID") - } - // arn:aws:es:us-west-2:187416307283:domain/example-name - return fmt.Sprintf("arn:%s:es:%s:%s:domain/%s", partition, region, accId, name), nil -} - -func testAccESDomainPolicyConfig(randInt int, policy string) string { - return fmt.Sprintf(` -resource "aws_elasticsearch_domain" "example" { - domain_name = "tf-test-%d" - elasticsearch_version = "2.3" - cluster_config { - instance_type = "t2.micro.elasticsearch" - } - ebs_options { - ebs_enabled = true - volume_size = 10 - } -} - -resource "aws_elasticsearch_domain_policy" "main" { - domain_name = "${aws_elasticsearch_domain.example.domain_name}" - access_policies = < 0 { - *td = *describe - } - return nil - } -} - -func testAccCheckESDomainExists(n string, domain *elasticsearch.ElasticsearchDomainStatus) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ES Domain ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).esconn - opts := &elasticsearch.DescribeElasticsearchDomainInput{ - DomainName: aws.String(rs.Primary.Attributes["domain_name"]), - } - - resp, err := conn.DescribeElasticsearchDomain(opts) - if err != nil { - return fmt.Errorf("Error describing domain: %s", err.Error()) - } - - *domain = *resp.DomainStatus - - return nil - } -} - -func testAccCheckESDomainDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_elasticsearch_domain" { - continue - } - - conn := testAccProvider.Meta().(*AWSClient).esconn - opts := &elasticsearch.DescribeElasticsearchDomainInput{ - DomainName: aws.String(rs.Primary.Attributes["domain_name"]), - } - - _, err := conn.DescribeElasticsearchDomain(opts) - // Verify the error is what we want - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceNotFoundException" { - continue - } - return err - } - } - return nil -} - -func testAccESDomainConfig(randInt int) string { - return fmt.Sprintf(` -resource "aws_elasticsearch_domain" "example" { - domain_name = "tf-test-%d" - ebs_options { - ebs_enabled = true - volume_size = 10 - } -} -`, randInt) -} - -func testAccESDomainConfig_TagUpdate(randInt int) string { - return fmt.Sprintf(` -resource "aws_elasticsearch_domain" "example" { - domain_name = "tf-test-%d" - ebs_options { - ebs_enabled = true - volume_size = 10 - } - - tags { - foo = "bar" - new = "type" - } -} -`, randInt) -} - -func testAccESDomainConfig_complex(randInt int) string { - return fmt.Sprintf(` -resource "aws_elasticsearch_domain" "example" { - domain_name = "tf-test-%d" - - advanced_options { - "indices.fielddata.cache.size" = 80 - } - - ebs_options { - ebs_enabled = false - } - - cluster_config { - instance_count = 2 - zone_awareness_enabled = true - instance_type = "r3.large.elasticsearch" - } - - snapshot_options { - automated_snapshot_start_hour = 23 - } - - tags { - bar = "complex" - } -} -`, randInt) -} - -func testAccESDomainConfigV23(randInt int) string { - return fmt.Sprintf(` -resource "aws_elasticsearch_domain" "example" { - domain_name = "tf-test-%d" - ebs_options { - ebs_enabled = true - volume_size = 10 - } - elasticsearch_version = "2.3" -} -`, randInt) -} diff --git a/builtin/providers/aws/resource_aws_elb.go b/builtin/providers/aws/resource_aws_elb.go deleted file mode 100644 index 3878c9611..000000000 --- a/builtin/providers/aws/resource_aws_elb.go +++ /dev/null @@ -1,976 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "log" - "regexp" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsElb() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsElbCreate, - Read: resourceAwsElbRead, - Update: resourceAwsElbUpdate, - Delete: resourceAwsElbDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"name_prefix"}, - ValidateFunc: validateElbName, - }, - "name_prefix": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateElbNamePrefix, - }, - - "internal": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "cross_zone_load_balancing": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - - "availability_zones": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - Computed: true, - Set: schema.HashString, - }, - - "instances": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - Computed: true, - Set: schema.HashString, - }, - - "security_groups": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - Computed: true, - Set: schema.HashString, - }, - - "source_security_group": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "source_security_group_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "subnets": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - Computed: true, - Set: schema.HashString, - }, - - "idle_timeout": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 60, - ValidateFunc: validateIntegerInRange(1, 3600), - }, - - "connection_draining": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "connection_draining_timeout": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 300, - }, - - "access_logs": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "interval": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 60, - ValidateFunc: validateAccessLogsInterval, - }, - "bucket": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "bucket_prefix": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "enabled": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - }, - }, - }, - - "listener": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ValidateFunc: validateIntegerInRange(1, 65535), - }, - - "instance_protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateListenerProtocol, - }, - - "lb_port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ValidateFunc: validateIntegerInRange(1, 65535), - }, - - "lb_protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateListenerProtocol, - }, - - "ssl_certificate_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - Set: resourceAwsElbListenerHash, - }, - - "health_check": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "healthy_threshold": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ValidateFunc: validateIntegerInRange(2, 10), - }, - - "unhealthy_threshold": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ValidateFunc: validateIntegerInRange(2, 10), - }, - - "target": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateHeathCheckTarget, - }, - - "interval": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ValidateFunc: validateIntegerInRange(5, 300), - }, - - "timeout": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ValidateFunc: validateIntegerInRange(2, 60), - }, - }, - }, - }, - - "dns_name": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "zone_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAwsElbCreate(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - - // Expand the "listener" set to aws-sdk-go compat []*elb.Listener - listeners, err := expandListeners(d.Get("listener").(*schema.Set).List()) - if err != nil { - return err - } - - var elbName string - if v, ok := d.GetOk("name"); ok { - elbName = v.(string) - } else { - if v, ok := d.GetOk("name_prefix"); ok { - elbName = resource.PrefixedUniqueId(v.(string)) - } else { - elbName = resource.PrefixedUniqueId("tf-lb-") - } - d.Set("name", elbName) - } - - tags := tagsFromMapELB(d.Get("tags").(map[string]interface{})) - // Provision the elb - elbOpts := &elb.CreateLoadBalancerInput{ - LoadBalancerName: aws.String(elbName), - Listeners: listeners, - Tags: tags, - } - - if scheme, ok := d.GetOk("internal"); ok && scheme.(bool) { - elbOpts.Scheme = aws.String("internal") - } - - if v, ok := d.GetOk("availability_zones"); ok { - elbOpts.AvailabilityZones = expandStringList(v.(*schema.Set).List()) - } - - if v, ok := d.GetOk("security_groups"); ok { - elbOpts.SecurityGroups = expandStringList(v.(*schema.Set).List()) - } - - if v, ok := d.GetOk("subnets"); ok { - elbOpts.Subnets = expandStringList(v.(*schema.Set).List()) - } - - log.Printf("[DEBUG] ELB create configuration: %#v", elbOpts) - err = resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := elbconn.CreateLoadBalancer(elbOpts) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Check for IAM SSL Cert error, eventual consistancy issue - if awsErr.Code() == "CertificateNotFound" { - return resource.RetryableError( - fmt.Errorf("[WARN] Error creating ELB Listener with SSL Cert, retrying: %s", err)) - } - } - return resource.NonRetryableError(err) - } - return nil - }) - - if err != nil { - return err - } - - // Assign the elb's unique identifier for use later - d.SetId(elbName) - log.Printf("[INFO] ELB ID: %s", d.Id()) - - // Enable partial mode and record what we set - d.Partial(true) - d.SetPartial("name") - d.SetPartial("internal") - d.SetPartial("availability_zones") - d.SetPartial("listener") - d.SetPartial("security_groups") - d.SetPartial("subnets") - - d.Set("tags", tagsToMapELB(tags)) - - return resourceAwsElbUpdate(d, meta) -} - -func resourceAwsElbRead(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - elbName := d.Id() - - // Retrieve the ELB properties for updating the state - describeElbOpts := &elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{aws.String(elbName)}, - } - - describeResp, err := elbconn.DescribeLoadBalancers(describeElbOpts) - if err != nil { - if isLoadBalancerNotFound(err) { - // The ELB is gone now, so just remove it from the state - d.SetId("") - return nil - } - - return fmt.Errorf("Error retrieving ELB: %s", err) - } - if len(describeResp.LoadBalancerDescriptions) != 1 { - return fmt.Errorf("Unable to find ELB: %#v", describeResp.LoadBalancerDescriptions) - } - - describeAttrsOpts := &elb.DescribeLoadBalancerAttributesInput{ - LoadBalancerName: aws.String(elbName), - } - describeAttrsResp, err := elbconn.DescribeLoadBalancerAttributes(describeAttrsOpts) - if err != nil { - if isLoadBalancerNotFound(err) { - // The ELB is gone now, so just remove it from the state - d.SetId("") - return nil - } - - return fmt.Errorf("Error retrieving ELB: %s", err) - } - - lbAttrs := describeAttrsResp.LoadBalancerAttributes - - lb := describeResp.LoadBalancerDescriptions[0] - - d.Set("name", lb.LoadBalancerName) - d.Set("dns_name", lb.DNSName) - d.Set("zone_id", lb.CanonicalHostedZoneNameID) - - var scheme bool - if lb.Scheme != nil { - scheme = *lb.Scheme == "internal" - } - d.Set("internal", scheme) - d.Set("availability_zones", flattenStringList(lb.AvailabilityZones)) - d.Set("instances", flattenInstances(lb.Instances)) - d.Set("listener", flattenListeners(lb.ListenerDescriptions)) - d.Set("security_groups", flattenStringList(lb.SecurityGroups)) - if lb.SourceSecurityGroup != nil { - group := lb.SourceSecurityGroup.GroupName - if lb.SourceSecurityGroup.OwnerAlias != nil && *lb.SourceSecurityGroup.OwnerAlias != "" { - group = aws.String(*lb.SourceSecurityGroup.OwnerAlias + "/" + *lb.SourceSecurityGroup.GroupName) - } - d.Set("source_security_group", group) - - // Manually look up the ELB Security Group ID, since it's not provided - var elbVpc string - if lb.VPCId != nil { - elbVpc = *lb.VPCId - sgId, err := sourceSGIdByName(meta, *lb.SourceSecurityGroup.GroupName, elbVpc) - if err != nil { - return fmt.Errorf("[WARN] Error looking up ELB Security Group ID: %s", err) - } else { - d.Set("source_security_group_id", sgId) - } - } - } - d.Set("subnets", flattenStringList(lb.Subnets)) - if lbAttrs.ConnectionSettings != nil { - d.Set("idle_timeout", lbAttrs.ConnectionSettings.IdleTimeout) - } - d.Set("connection_draining", lbAttrs.ConnectionDraining.Enabled) - d.Set("connection_draining_timeout", lbAttrs.ConnectionDraining.Timeout) - d.Set("cross_zone_load_balancing", lbAttrs.CrossZoneLoadBalancing.Enabled) - if lbAttrs.AccessLog != nil { - // The AWS API does not allow users to remove access_logs, only disable them. - // During creation of the ELB, Terraform sets the access_logs to disabled, - // so there should not be a case where lbAttrs.AccessLog above is nil. - - // Here we do not record the remove value of access_log if: - // - there is no access_log block in the configuration - // - the remote access_logs are disabled - // - // This indicates there is no access_log in the configuration. - // - externally added access_logs will be enabled, so we'll detect the drift - // - locally added access_logs will be in the config, so we'll add to the - // API/state - // See https://github.com/hashicorp/terraform/issues/10138 - _, n := d.GetChange("access_logs") - elbal := lbAttrs.AccessLog - nl := n.([]interface{}) - if len(nl) == 0 && !*elbal.Enabled { - elbal = nil - } - if err := d.Set("access_logs", flattenAccessLog(elbal)); err != nil { - return err - } - } - - resp, err := elbconn.DescribeTags(&elb.DescribeTagsInput{ - LoadBalancerNames: []*string{lb.LoadBalancerName}, - }) - - var et []*elb.Tag - if len(resp.TagDescriptions) > 0 { - et = resp.TagDescriptions[0].Tags - } - d.Set("tags", tagsToMapELB(et)) - - // There's only one health check, so save that to state as we - // currently can - if *lb.HealthCheck.Target != "" { - d.Set("health_check", flattenHealthCheck(lb.HealthCheck)) - } - - return nil -} - -func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - - d.Partial(true) - - if d.HasChange("listener") { - o, n := d.GetChange("listener") - os := o.(*schema.Set) - ns := n.(*schema.Set) - - remove, _ := expandListeners(os.Difference(ns).List()) - add, _ := expandListeners(ns.Difference(os).List()) - - if len(remove) > 0 { - ports := make([]*int64, 0, len(remove)) - for _, listener := range remove { - ports = append(ports, listener.LoadBalancerPort) - } - - deleteListenersOpts := &elb.DeleteLoadBalancerListenersInput{ - LoadBalancerName: aws.String(d.Id()), - LoadBalancerPorts: ports, - } - - log.Printf("[DEBUG] ELB Delete Listeners opts: %s", deleteListenersOpts) - _, err := elbconn.DeleteLoadBalancerListeners(deleteListenersOpts) - if err != nil { - return fmt.Errorf("Failure removing outdated ELB listeners: %s", err) - } - } - - if len(add) > 0 { - createListenersOpts := &elb.CreateLoadBalancerListenersInput{ - LoadBalancerName: aws.String(d.Id()), - Listeners: add, - } - - // Occasionally AWS will error with a 'duplicate listener', without any - // other listeners on the ELB. Retry here to eliminate that. - err := resource.Retry(5*time.Minute, func() *resource.RetryError { - log.Printf("[DEBUG] ELB Create Listeners opts: %s", createListenersOpts) - if _, err := elbconn.CreateLoadBalancerListeners(createListenersOpts); err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "DuplicateListener" { - log.Printf("[DEBUG] Duplicate listener found for ELB (%s), retrying", d.Id()) - return resource.RetryableError(awsErr) - } - if awsErr.Code() == "CertificateNotFound" && strings.Contains(awsErr.Message(), "Server Certificate not found for the key: arn") { - log.Printf("[DEBUG] SSL Cert not found for given ARN, retrying") - return resource.RetryableError(awsErr) - } - } - - // Didn't recognize the error, so shouldn't retry. - return resource.NonRetryableError(err) - } - // Successful creation - return nil - }) - if err != nil { - return fmt.Errorf("Failure adding new or updated ELB listeners: %s", err) - } - } - - d.SetPartial("listener") - } - - // If we currently have instances, or did have instances, - // we want to figure out what to add and remove from the load - // balancer - if d.HasChange("instances") { - o, n := d.GetChange("instances") - os := o.(*schema.Set) - ns := n.(*schema.Set) - remove := expandInstanceString(os.Difference(ns).List()) - add := expandInstanceString(ns.Difference(os).List()) - - if len(add) > 0 { - registerInstancesOpts := elb.RegisterInstancesWithLoadBalancerInput{ - LoadBalancerName: aws.String(d.Id()), - Instances: add, - } - - _, err := elbconn.RegisterInstancesWithLoadBalancer(®isterInstancesOpts) - if err != nil { - return fmt.Errorf("Failure registering instances with ELB: %s", err) - } - } - if len(remove) > 0 { - deRegisterInstancesOpts := elb.DeregisterInstancesFromLoadBalancerInput{ - LoadBalancerName: aws.String(d.Id()), - Instances: remove, - } - - _, err := elbconn.DeregisterInstancesFromLoadBalancer(&deRegisterInstancesOpts) - if err != nil { - return fmt.Errorf("Failure deregistering instances from ELB: %s", err) - } - } - - d.SetPartial("instances") - } - - if d.HasChange("cross_zone_load_balancing") || d.HasChange("idle_timeout") || d.HasChange("access_logs") { - attrs := elb.ModifyLoadBalancerAttributesInput{ - LoadBalancerName: aws.String(d.Get("name").(string)), - LoadBalancerAttributes: &elb.LoadBalancerAttributes{ - CrossZoneLoadBalancing: &elb.CrossZoneLoadBalancing{ - Enabled: aws.Bool(d.Get("cross_zone_load_balancing").(bool)), - }, - ConnectionSettings: &elb.ConnectionSettings{ - IdleTimeout: aws.Int64(int64(d.Get("idle_timeout").(int))), - }, - }, - } - - logs := d.Get("access_logs").([]interface{}) - if len(logs) == 1 { - l := logs[0].(map[string]interface{}) - accessLog := &elb.AccessLog{ - Enabled: aws.Bool(l["enabled"].(bool)), - EmitInterval: aws.Int64(int64(l["interval"].(int))), - S3BucketName: aws.String(l["bucket"].(string)), - } - - if l["bucket_prefix"] != "" { - accessLog.S3BucketPrefix = aws.String(l["bucket_prefix"].(string)) - } - - attrs.LoadBalancerAttributes.AccessLog = accessLog - } else if len(logs) == 0 { - // disable access logs - attrs.LoadBalancerAttributes.AccessLog = &elb.AccessLog{ - Enabled: aws.Bool(false), - } - } - - log.Printf("[DEBUG] ELB Modify Load Balancer Attributes Request: %#v", attrs) - _, err := elbconn.ModifyLoadBalancerAttributes(&attrs) - if err != nil { - return fmt.Errorf("Failure configuring ELB attributes: %s", err) - } - - d.SetPartial("cross_zone_load_balancing") - d.SetPartial("idle_timeout") - d.SetPartial("connection_draining_timeout") - } - - // We have to do these changes separately from everything else since - // they have some weird undocumented rules. You can't set the timeout - // without having connection draining to true, so we set that to true, - // set the timeout, then reset it to false if requested. - if d.HasChange("connection_draining") || d.HasChange("connection_draining_timeout") { - // We do timeout changes first since they require us to set draining - // to true for a hot second. - if d.HasChange("connection_draining_timeout") { - attrs := elb.ModifyLoadBalancerAttributesInput{ - LoadBalancerName: aws.String(d.Get("name").(string)), - LoadBalancerAttributes: &elb.LoadBalancerAttributes{ - ConnectionDraining: &elb.ConnectionDraining{ - Enabled: aws.Bool(true), - Timeout: aws.Int64(int64(d.Get("connection_draining_timeout").(int))), - }, - }, - } - - _, err := elbconn.ModifyLoadBalancerAttributes(&attrs) - if err != nil { - return fmt.Errorf("Failure configuring ELB attributes: %s", err) - } - - d.SetPartial("connection_draining_timeout") - } - - // Then we always set connection draining even if there is no change. - // This lets us reset to "false" if requested even with a timeout - // change. - attrs := elb.ModifyLoadBalancerAttributesInput{ - LoadBalancerName: aws.String(d.Get("name").(string)), - LoadBalancerAttributes: &elb.LoadBalancerAttributes{ - ConnectionDraining: &elb.ConnectionDraining{ - Enabled: aws.Bool(d.Get("connection_draining").(bool)), - }, - }, - } - - _, err := elbconn.ModifyLoadBalancerAttributes(&attrs) - if err != nil { - return fmt.Errorf("Failure configuring ELB attributes: %s", err) - } - - d.SetPartial("connection_draining") - } - - if d.HasChange("health_check") { - hc := d.Get("health_check").([]interface{}) - if len(hc) > 0 { - check := hc[0].(map[string]interface{}) - configureHealthCheckOpts := elb.ConfigureHealthCheckInput{ - LoadBalancerName: aws.String(d.Id()), - HealthCheck: &elb.HealthCheck{ - HealthyThreshold: aws.Int64(int64(check["healthy_threshold"].(int))), - UnhealthyThreshold: aws.Int64(int64(check["unhealthy_threshold"].(int))), - Interval: aws.Int64(int64(check["interval"].(int))), - Target: aws.String(check["target"].(string)), - Timeout: aws.Int64(int64(check["timeout"].(int))), - }, - } - _, err := elbconn.ConfigureHealthCheck(&configureHealthCheckOpts) - if err != nil { - return fmt.Errorf("Failure configuring health check for ELB: %s", err) - } - d.SetPartial("health_check") - } - } - - if d.HasChange("security_groups") { - groups := d.Get("security_groups").(*schema.Set).List() - - applySecurityGroupsOpts := elb.ApplySecurityGroupsToLoadBalancerInput{ - LoadBalancerName: aws.String(d.Id()), - SecurityGroups: expandStringList(groups), - } - - _, err := elbconn.ApplySecurityGroupsToLoadBalancer(&applySecurityGroupsOpts) - if err != nil { - return fmt.Errorf("Failure applying security groups to ELB: %s", err) - } - - d.SetPartial("security_groups") - } - - if d.HasChange("availability_zones") { - o, n := d.GetChange("availability_zones") - os := o.(*schema.Set) - ns := n.(*schema.Set) - - removed := expandStringList(os.Difference(ns).List()) - added := expandStringList(ns.Difference(os).List()) - - if len(added) > 0 { - enableOpts := &elb.EnableAvailabilityZonesForLoadBalancerInput{ - LoadBalancerName: aws.String(d.Id()), - AvailabilityZones: added, - } - - log.Printf("[DEBUG] ELB enable availability zones opts: %s", enableOpts) - _, err := elbconn.EnableAvailabilityZonesForLoadBalancer(enableOpts) - if err != nil { - return fmt.Errorf("Failure enabling ELB availability zones: %s", err) - } - } - - if len(removed) > 0 { - disableOpts := &elb.DisableAvailabilityZonesForLoadBalancerInput{ - LoadBalancerName: aws.String(d.Id()), - AvailabilityZones: removed, - } - - log.Printf("[DEBUG] ELB disable availability zones opts: %s", disableOpts) - _, err := elbconn.DisableAvailabilityZonesForLoadBalancer(disableOpts) - if err != nil { - return fmt.Errorf("Failure disabling ELB availability zones: %s", err) - } - } - - d.SetPartial("availability_zones") - } - - if d.HasChange("subnets") { - o, n := d.GetChange("subnets") - os := o.(*schema.Set) - ns := n.(*schema.Set) - - removed := expandStringList(os.Difference(ns).List()) - added := expandStringList(ns.Difference(os).List()) - - if len(removed) > 0 { - detachOpts := &elb.DetachLoadBalancerFromSubnetsInput{ - LoadBalancerName: aws.String(d.Id()), - Subnets: removed, - } - - log.Printf("[DEBUG] ELB detach subnets opts: %s", detachOpts) - _, err := elbconn.DetachLoadBalancerFromSubnets(detachOpts) - if err != nil { - return fmt.Errorf("Failure removing ELB subnets: %s", err) - } - } - - if len(added) > 0 { - attachOpts := &elb.AttachLoadBalancerToSubnetsInput{ - LoadBalancerName: aws.String(d.Id()), - Subnets: added, - } - - log.Printf("[DEBUG] ELB attach subnets opts: %s", attachOpts) - err := resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := elbconn.AttachLoadBalancerToSubnets(attachOpts) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // eventually consistent issue with removing a subnet in AZ1 and - // immediately adding a new one in the same AZ - if awsErr.Code() == "InvalidConfigurationRequest" && strings.Contains(awsErr.Message(), "cannot be attached to multiple subnets in the same AZ") { - log.Printf("[DEBUG] retrying az association") - return resource.RetryableError(awsErr) - } - } - return resource.NonRetryableError(err) - } - return nil - }) - if err != nil { - return fmt.Errorf("Failure adding ELB subnets: %s", err) - } - } - - d.SetPartial("subnets") - } - - if err := setTagsELB(elbconn, d); err != nil { - return err - } - - d.SetPartial("tags") - d.Partial(false) - - return resourceAwsElbRead(d, meta) -} - -func resourceAwsElbDelete(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - - log.Printf("[INFO] Deleting ELB: %s", d.Id()) - - // Destroy the load balancer - deleteElbOpts := elb.DeleteLoadBalancerInput{ - LoadBalancerName: aws.String(d.Id()), - } - if _, err := elbconn.DeleteLoadBalancer(&deleteElbOpts); err != nil { - return fmt.Errorf("Error deleting ELB: %s", err) - } - - return nil -} - -func resourceAwsElbListenerHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%d-", m["instance_port"].(int))) - buf.WriteString(fmt.Sprintf("%s-", - strings.ToLower(m["instance_protocol"].(string)))) - buf.WriteString(fmt.Sprintf("%d-", m["lb_port"].(int))) - buf.WriteString(fmt.Sprintf("%s-", - strings.ToLower(m["lb_protocol"].(string)))) - - if v, ok := m["ssl_certificate_id"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - - return hashcode.String(buf.String()) -} - -func isLoadBalancerNotFound(err error) bool { - elberr, ok := err.(awserr.Error) - return ok && elberr.Code() == "LoadBalancerNotFound" -} - -func sourceSGIdByName(meta interface{}, sg, vpcId string) (string, error) { - conn := meta.(*AWSClient).ec2conn - var filters []*ec2.Filter - var sgFilterName, sgFilterVPCID *ec2.Filter - sgFilterName = &ec2.Filter{ - Name: aws.String("group-name"), - Values: []*string{aws.String(sg)}, - } - - if vpcId != "" { - sgFilterVPCID = &ec2.Filter{ - Name: aws.String("vpc-id"), - Values: []*string{aws.String(vpcId)}, - } - } - - filters = append(filters, sgFilterName) - - if sgFilterVPCID != nil { - filters = append(filters, sgFilterVPCID) - } - - req := &ec2.DescribeSecurityGroupsInput{ - Filters: filters, - } - resp, err := conn.DescribeSecurityGroups(req) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok { - if ec2err.Code() == "InvalidSecurityGroupID.NotFound" || - ec2err.Code() == "InvalidGroup.NotFound" { - resp = nil - err = nil - } - } - - if err != nil { - log.Printf("Error on ELB SG look up: %s", err) - return "", err - } - } - - if resp == nil || len(resp.SecurityGroups) == 0 { - return "", fmt.Errorf("No security groups found for name %s and vpc id %s", sg, vpcId) - } - - group := resp.SecurityGroups[0] - return *group.GroupId, nil -} - -func validateAccessLogsInterval(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - - // Check if the value is either 5 or 60 (minutes). - if value != 5 && value != 60 { - errors = append(errors, fmt.Errorf( - "%q contains an invalid Access Logs interval \"%d\". "+ - "Valid intervals are either 5 or 60 (minutes).", - k, value)) - } - return -} - -func validateHeathCheckTarget(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - // Parse the Health Check target value. - matches := regexp.MustCompile(`\A(\w+):(\d+)(.+)?\z`).FindStringSubmatch(value) - - // Check if the value contains a valid target. - if matches == nil || len(matches) < 1 { - errors = append(errors, fmt.Errorf( - "%q contains an invalid Health Check: %s", - k, value)) - - // Invalid target? Return immediately, - // there is no need to collect other - // errors. - return - } - - // Check if the value contains a valid protocol. - if !isValidProtocol(matches[1]) { - errors = append(errors, fmt.Errorf( - "%q contains an invalid Health Check protocol %q. "+ - "Valid protocols are either %q, %q, %q, or %q.", - k, matches[1], "TCP", "SSL", "HTTP", "HTTPS")) - } - - // Check if the value contains a valid port range. - port, _ := strconv.Atoi(matches[2]) - if port < 1 || port > 65535 { - errors = append(errors, fmt.Errorf( - "%q contains an invalid Health Check target port \"%d\". "+ - "Valid port is in the range from 1 to 65535 inclusive.", - k, port)) - } - - switch strings.ToLower(matches[1]) { - case "tcp", "ssl": - // Check if value is in the form : for TCP and/or SSL. - if matches[3] != "" { - errors = append(errors, fmt.Errorf( - "%q cannot contain a path in the Health Check target: %s", - k, value)) - } - break - case "http", "https": - // Check if value is in the form :/ for HTTP and/or HTTPS. - if matches[3] == "" { - errors = append(errors, fmt.Errorf( - "%q must contain a path in the Health Check target: %s", - k, value)) - } - - // Cannot be longer than 1024 multibyte characters. - if len([]rune(matches[3])) > 1024 { - errors = append(errors, fmt.Errorf("%q cannot contain a path longer "+ - "than 1024 characters in the Health Check target: %s", - k, value)) - } - break - } - - return -} - -func validateListenerProtocol(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if !isValidProtocol(value) { - errors = append(errors, fmt.Errorf( - "%q contains an invalid Listener protocol %q. "+ - "Valid protocols are either %q, %q, %q, or %q.", - k, value, "TCP", "SSL", "HTTP", "HTTPS")) - } - return -} - -func isValidProtocol(s string) bool { - if s == "" { - return false - } - s = strings.ToLower(s) - - validProtocols := map[string]bool{ - "http": true, - "https": true, - "ssl": true, - "tcp": true, - } - - if _, ok := validProtocols[s]; !ok { - return false - } - - return true -} diff --git a/builtin/providers/aws/resource_aws_elb_attachment.go b/builtin/providers/aws/resource_aws_elb_attachment.go deleted file mode 100644 index 401544ad7..000000000 --- a/builtin/providers/aws/resource_aws_elb_attachment.go +++ /dev/null @@ -1,121 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsElbAttachment() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsElbAttachmentCreate, - Read: resourceAwsElbAttachmentRead, - Delete: resourceAwsElbAttachmentDelete, - - Schema: map[string]*schema.Schema{ - "elb": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Required: true, - }, - - "instance": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Required: true, - }, - }, - } -} - -func resourceAwsElbAttachmentCreate(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - elbName := d.Get("elb").(string) - - instance := d.Get("instance").(string) - - registerInstancesOpts := elb.RegisterInstancesWithLoadBalancerInput{ - LoadBalancerName: aws.String(elbName), - Instances: []*elb.Instance{{InstanceId: aws.String(instance)}}, - } - - log.Printf("[INFO] registering instance %s with ELB %s", instance, elbName) - - _, err := elbconn.RegisterInstancesWithLoadBalancer(®isterInstancesOpts) - if err != nil { - return fmt.Errorf("Failure registering instances with ELB: %s", err) - } - - d.SetId(resource.PrefixedUniqueId(fmt.Sprintf("%s-", elbName))) - - return nil -} - -func resourceAwsElbAttachmentRead(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - elbName := d.Get("elb").(string) - - // only add the instance that was previously defined for this resource - expected := d.Get("instance").(string) - - // Retrieve the ELB properties to get a list of attachments - describeElbOpts := &elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{aws.String(elbName)}, - } - - resp, err := elbconn.DescribeLoadBalancers(describeElbOpts) - if err != nil { - if isLoadBalancerNotFound(err) { - log.Printf("[ERROR] ELB %s not found", elbName) - d.SetId("") - return nil - } - return fmt.Errorf("Error retrieving ELB: %s", err) - } - if len(resp.LoadBalancerDescriptions) != 1 { - log.Printf("[ERROR] Unable to find ELB: %s", resp.LoadBalancerDescriptions) - d.SetId("") - return nil - } - - // only set the instance Id that this resource manages - found := false - for _, i := range resp.LoadBalancerDescriptions[0].Instances { - if expected == *i.InstanceId { - d.Set("instance", expected) - found = true - } - } - - if !found { - log.Printf("[WARN] instance %s not found in elb attachments", expected) - d.SetId("") - } - - return nil -} - -func resourceAwsElbAttachmentDelete(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - elbName := d.Get("elb").(string) - - instance := d.Get("instance").(string) - - log.Printf("[INFO] Deleting Attachment %s from: %s", instance, elbName) - - deRegisterInstancesOpts := elb.DeregisterInstancesFromLoadBalancerInput{ - LoadBalancerName: aws.String(elbName), - Instances: []*elb.Instance{{InstanceId: aws.String(instance)}}, - } - - _, err := elbconn.DeregisterInstancesFromLoadBalancer(&deRegisterInstancesOpts) - if err != nil { - return fmt.Errorf("Failure deregistering instances from ELB: %s", err) - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_elb_attachment_test.go b/builtin/providers/aws/resource_aws_elb_attachment_test.go deleted file mode 100644 index 154f17c01..000000000 --- a/builtin/providers/aws/resource_aws_elb_attachment_test.go +++ /dev/null @@ -1,232 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "testing" - - "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSELBAttachment_basic(t *testing.T) { - var conf elb.LoadBalancerDescription - - testCheckInstanceAttached := func(count int) resource.TestCheckFunc { - return func(*terraform.State) error { - if len(conf.Instances) != count { - return fmt.Errorf("instance count does not match") - } - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elb.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSELBDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSELBAttachmentConfig1, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.bar", &conf), - testCheckInstanceAttached(1), - ), - }, - - resource.TestStep{ - Config: testAccAWSELBAttachmentConfig2, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.bar", &conf), - testCheckInstanceAttached(2), - ), - }, - - resource.TestStep{ - Config: testAccAWSELBAttachmentConfig3, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.bar", &conf), - testCheckInstanceAttached(2), - ), - }, - - resource.TestStep{ - Config: testAccAWSELBAttachmentConfig4, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.bar", &conf), - testCheckInstanceAttached(0), - ), - }, - }, - }) -} - -// remove and instance and check that it's correctly re-attached. -func TestAccAWSELBAttachment_drift(t *testing.T) { - var conf elb.LoadBalancerDescription - - deregInstance := func() { - conn := testAccProvider.Meta().(*AWSClient).elbconn - - deRegisterInstancesOpts := elb.DeregisterInstancesFromLoadBalancerInput{ - LoadBalancerName: conf.LoadBalancerName, - Instances: conf.Instances, - } - - log.Printf("[DEBUG] deregistering instance %v from ELB", *conf.Instances[0].InstanceId) - - _, err := conn.DeregisterInstancesFromLoadBalancer(&deRegisterInstancesOpts) - if err != nil { - t.Fatalf("Failure deregistering instances from ELB: %s", err) - } - - } - - testCheckInstanceAttached := func(count int) resource.TestCheckFunc { - return func(*terraform.State) error { - if len(conf.Instances) != count { - return fmt.Errorf("instance count does not match") - } - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elb.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSELBDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSELBAttachmentConfig1, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.bar", &conf), - testCheckInstanceAttached(1), - ), - }, - - // remove an instance from the ELB, and make sure it gets re-added - resource.TestStep{ - Config: testAccAWSELBAttachmentConfig1, - PreConfig: deregInstance, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.bar", &conf), - testCheckInstanceAttached(1), - ), - }, - }, - }) -} - -// add one attachment -const testAccAWSELBAttachmentConfig1 = ` -resource "aws_elb" "bar" { - availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"] - - listener { - instance_port = 8000 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } -} - -resource "aws_instance" "foo1" { - # us-west-2 - ami = "ami-043a5034" - instance_type = "t1.micro" -} - -resource "aws_elb_attachment" "foo1" { - elb = "${aws_elb.bar.id}" - instance = "${aws_instance.foo1.id}" -} -` - -// add a second attachment -const testAccAWSELBAttachmentConfig2 = ` -resource "aws_elb" "bar" { - availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"] - - listener { - instance_port = 8000 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } -} - -resource "aws_instance" "foo1" { - # us-west-2 - ami = "ami-043a5034" - instance_type = "t1.micro" -} - -resource "aws_instance" "foo2" { - # us-west-2 - ami = "ami-043a5034" - instance_type = "t1.micro" -} - -resource "aws_elb_attachment" "foo1" { - elb = "${aws_elb.bar.id}" - instance = "${aws_instance.foo1.id}" -} - -resource "aws_elb_attachment" "foo2" { - elb = "${aws_elb.bar.id}" - instance = "${aws_instance.foo2.id}" -} -` - -// swap attachments between resources -const testAccAWSELBAttachmentConfig3 = ` -resource "aws_elb" "bar" { - availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"] - - listener { - instance_port = 8000 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } -} - -resource "aws_instance" "foo1" { - # us-west-2 - ami = "ami-043a5034" - instance_type = "t1.micro" -} - -resource "aws_instance" "foo2" { - # us-west-2 - ami = "ami-043a5034" - instance_type = "t1.micro" -} - -resource "aws_elb_attachment" "foo1" { - elb = "${aws_elb.bar.id}" - instance = "${aws_instance.foo2.id}" -} - -resource "aws_elb_attachment" "foo2" { - elb = "${aws_elb.bar.id}" - instance = "${aws_instance.foo1.id}" -} -` - -// destroy attachments -const testAccAWSELBAttachmentConfig4 = ` -resource "aws_elb" "bar" { - availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"] - - listener { - instance_port = 8000 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } -} -` diff --git a/builtin/providers/aws/resource_aws_elb_test.go b/builtin/providers/aws/resource_aws_elb_test.go deleted file mode 100644 index 60ac96476..000000000 --- a/builtin/providers/aws/resource_aws_elb_test.go +++ /dev/null @@ -1,1665 +0,0 @@ -package aws - -import ( - "fmt" - "math/rand" - "reflect" - "regexp" - "sort" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSELB_basic(t *testing.T) { - var conf elb.LoadBalancerDescription - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elb.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSELBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSELBConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.bar", &conf), - testAccCheckAWSELBAttributes(&conf), - resource.TestCheckResourceAttr( - "aws_elb.bar", "availability_zones.#", "3"), - resource.TestCheckResourceAttr( - "aws_elb.bar", "availability_zones.2487133097", "us-west-2a"), - resource.TestCheckResourceAttr( - "aws_elb.bar", "availability_zones.221770259", "us-west-2b"), - resource.TestCheckResourceAttr( - "aws_elb.bar", "availability_zones.2050015877", "us-west-2c"), - resource.TestCheckResourceAttr( - "aws_elb.bar", "subnets.#", "3"), - // NOTE: Subnet IDs are different across AWS accounts and cannot be checked. - resource.TestCheckResourceAttr( - "aws_elb.bar", "listener.206423021.instance_port", "8000"), - resource.TestCheckResourceAttr( - "aws_elb.bar", "listener.206423021.instance_protocol", "http"), - resource.TestCheckResourceAttr( - "aws_elb.bar", "listener.206423021.lb_port", "80"), - resource.TestCheckResourceAttr( - "aws_elb.bar", "listener.206423021.lb_protocol", "http"), - resource.TestCheckResourceAttr( - "aws_elb.bar", "cross_zone_load_balancing", "true"), - ), - }, - }, - }) -} - -func TestAccAWSELB_fullCharacterRange(t *testing.T) { - var conf elb.LoadBalancerDescription - - lbName := fmt.Sprintf("Tf-%d", - rand.New(rand.NewSource(time.Now().UnixNano())).Int()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elb.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSELBDestroy, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testAccAWSELBFullRangeOfCharacters, lbName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.foo", &conf), - resource.TestCheckResourceAttr( - "aws_elb.foo", "name", lbName), - ), - }, - }, - }) -} - -func TestAccAWSELB_AccessLogs_enabled(t *testing.T) { - var conf elb.LoadBalancerDescription - - rName := fmt.Sprintf("terraform-access-logs-bucket-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elb.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSELBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSELBAccessLogs, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.foo", &conf), - ), - }, - - { - Config: testAccAWSELBAccessLogsOn(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.foo", &conf), - resource.TestCheckResourceAttr( - "aws_elb.foo", "access_logs.#", "1"), - resource.TestCheckResourceAttr( - "aws_elb.foo", "access_logs.0.bucket", rName), - resource.TestCheckResourceAttr( - "aws_elb.foo", "access_logs.0.interval", "5"), - resource.TestCheckResourceAttr( - "aws_elb.foo", "access_logs.0.enabled", "true"), - ), - }, - - { - Config: testAccAWSELBAccessLogs, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.foo", &conf), - resource.TestCheckResourceAttr( - "aws_elb.foo", "access_logs.#", "0"), - ), - }, - }, - }) -} - -func TestAccAWSELB_AccessLogs_disabled(t *testing.T) { - var conf elb.LoadBalancerDescription - - rName := fmt.Sprintf("terraform-access-logs-bucket-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elb.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSELBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSELBAccessLogs, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.foo", &conf), - ), - }, - - { - Config: testAccAWSELBAccessLogsDisabled(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.foo", &conf), - resource.TestCheckResourceAttr( - "aws_elb.foo", "access_logs.#", "1"), - resource.TestCheckResourceAttr( - "aws_elb.foo", "access_logs.0.bucket", rName), - resource.TestCheckResourceAttr( - "aws_elb.foo", "access_logs.0.interval", "5"), - resource.TestCheckResourceAttr( - "aws_elb.foo", "access_logs.0.enabled", "false"), - ), - }, - - { - Config: testAccAWSELBAccessLogs, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.foo", &conf), - resource.TestCheckResourceAttr( - "aws_elb.foo", "access_logs.#", "0"), - ), - }, - }, - }) -} - -func TestAccAWSELB_namePrefix(t *testing.T) { - var conf elb.LoadBalancerDescription - nameRegex := regexp.MustCompile("^test-") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elb.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSELBDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSELB_namePrefix, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.test", &conf), - resource.TestMatchResourceAttr( - "aws_elb.test", "name", nameRegex), - ), - }, - }, - }) -} - -func TestAccAWSELB_generatedName(t *testing.T) { - var conf elb.LoadBalancerDescription - generatedNameRegexp := regexp.MustCompile("^tf-lb-") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elb.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSELBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSELBGeneratedName, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.foo", &conf), - resource.TestMatchResourceAttr( - "aws_elb.foo", "name", generatedNameRegexp), - ), - }, - }, - }) -} - -func TestAccAWSELB_generatesNameForZeroValue(t *testing.T) { - var conf elb.LoadBalancerDescription - generatedNameRegexp := regexp.MustCompile("^tf-lb-") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elb.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSELBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSELB_zeroValueName, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.foo", &conf), - resource.TestMatchResourceAttr( - "aws_elb.foo", "name", generatedNameRegexp), - ), - }, - }, - }) -} - -func TestAccAWSELB_availabilityZones(t *testing.T) { - var conf elb.LoadBalancerDescription - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elb.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSELBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSELBConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.bar", &conf), - resource.TestCheckResourceAttr( - "aws_elb.bar", "availability_zones.#", "3"), - resource.TestCheckResourceAttr( - "aws_elb.bar", "availability_zones.2487133097", "us-west-2a"), - resource.TestCheckResourceAttr( - "aws_elb.bar", "availability_zones.221770259", "us-west-2b"), - resource.TestCheckResourceAttr( - "aws_elb.bar", "availability_zones.2050015877", "us-west-2c"), - ), - }, - - { - Config: testAccAWSELBConfig_AvailabilityZonesUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.bar", &conf), - resource.TestCheckResourceAttr( - "aws_elb.bar", "availability_zones.#", "2"), - resource.TestCheckResourceAttr( - "aws_elb.bar", "availability_zones.2487133097", "us-west-2a"), - resource.TestCheckResourceAttr( - "aws_elb.bar", "availability_zones.221770259", "us-west-2b"), - ), - }, - }, - }) -} - -func TestAccAWSELB_tags(t *testing.T) { - var conf elb.LoadBalancerDescription - var td elb.TagDescription - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elb.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSELBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSELBConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.bar", &conf), - testAccCheckAWSELBAttributes(&conf), - testAccLoadTags(&conf, &td), - testAccCheckELBTags(&td.Tags, "bar", "baz"), - ), - }, - - { - Config: testAccAWSELBConfig_TagUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.bar", &conf), - testAccCheckAWSELBAttributes(&conf), - testAccLoadTags(&conf, &td), - testAccCheckELBTags(&td.Tags, "foo", "bar"), - testAccCheckELBTags(&td.Tags, "new", "type"), - ), - }, - }, - }) -} - -func TestAccAWSELB_iam_server_cert(t *testing.T) { - var conf elb.LoadBalancerDescription - // var td elb.TagDescription - testCheck := func(*terraform.State) error { - if len(conf.ListenerDescriptions) != 1 { - return fmt.Errorf( - "TestAccAWSELB_iam_server_cert expected 1 listener, got %d", - len(conf.ListenerDescriptions)) - } - return nil - } - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elb.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSELBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccELBIAMServerCertConfig( - fmt.Sprintf("tf-acctest-%s", acctest.RandString(10))), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.bar", &conf), - testCheck, - ), - }, - }, - }) -} - -func TestAccAWSELB_swap_subnets(t *testing.T) { - var conf elb.LoadBalancerDescription - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elb.ourapp", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSELBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSELBConfig_subnets, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.ourapp", &conf), - resource.TestCheckResourceAttr( - "aws_elb.ourapp", "subnets.#", "2"), - ), - }, - - { - Config: testAccAWSELBConfig_subnet_swap, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.ourapp", &conf), - resource.TestCheckResourceAttr( - "aws_elb.ourapp", "subnets.#", "2"), - ), - }, - }, - }) -} - -func testAccLoadTags(conf *elb.LoadBalancerDescription, td *elb.TagDescription) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elbconn - - describe, err := conn.DescribeTags(&elb.DescribeTagsInput{ - LoadBalancerNames: []*string{conf.LoadBalancerName}, - }) - - if err != nil { - return err - } - if len(describe.TagDescriptions) > 0 { - *td = *describe.TagDescriptions[0] - } - return nil - } -} - -func TestAccAWSELB_InstanceAttaching(t *testing.T) { - var conf elb.LoadBalancerDescription - - testCheckInstanceAttached := func(count int) resource.TestCheckFunc { - return func(*terraform.State) error { - if len(conf.Instances) != count { - return fmt.Errorf("instance count does not match") - } - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elb.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSELBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSELBConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.bar", &conf), - testAccCheckAWSELBAttributes(&conf), - ), - }, - - { - Config: testAccAWSELBConfigNewInstance, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.bar", &conf), - testCheckInstanceAttached(1), - ), - }, - }, - }) -} - -func TestAccAWSELBUpdate_Listener(t *testing.T) { - var conf elb.LoadBalancerDescription - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elb.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSELBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSELBConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.bar", &conf), - testAccCheckAWSELBAttributes(&conf), - resource.TestCheckResourceAttr( - "aws_elb.bar", "listener.206423021.instance_port", "8000"), - ), - }, - - { - Config: testAccAWSELBConfigListener_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.bar", &conf), - resource.TestCheckResourceAttr( - "aws_elb.bar", "listener.3931999347.instance_port", "8080"), - ), - }, - }, - }) -} - -func TestAccAWSELB_HealthCheck(t *testing.T) { - var conf elb.LoadBalancerDescription - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elb.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSELBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSELBConfigHealthCheck, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.bar", &conf), - testAccCheckAWSELBAttributesHealthCheck(&conf), - resource.TestCheckResourceAttr( - "aws_elb.bar", "health_check.0.healthy_threshold", "5"), - resource.TestCheckResourceAttr( - "aws_elb.bar", "health_check.0.unhealthy_threshold", "5"), - resource.TestCheckResourceAttr( - "aws_elb.bar", "health_check.0.target", "HTTP:8000/"), - resource.TestCheckResourceAttr( - "aws_elb.bar", "health_check.0.timeout", "30"), - resource.TestCheckResourceAttr( - "aws_elb.bar", "health_check.0.interval", "60"), - ), - }, - }, - }) -} - -func TestAccAWSELBUpdate_HealthCheck(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elb.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSELBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSELBConfigHealthCheck, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_elb.bar", "health_check.0.healthy_threshold", "5"), - ), - }, - { - Config: testAccAWSELBConfigHealthCheck_update, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_elb.bar", "health_check.0.healthy_threshold", "10"), - ), - }, - }, - }) -} - -func TestAccAWSELB_Timeout(t *testing.T) { - var conf elb.LoadBalancerDescription - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elb.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSELBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSELBConfigIdleTimeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSELBExists("aws_elb.bar", &conf), - resource.TestCheckResourceAttr( - "aws_elb.bar", "idle_timeout", "200", - ), - ), - }, - }, - }) -} - -func TestAccAWSELBUpdate_Timeout(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elb.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSELBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSELBConfigIdleTimeout, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_elb.bar", "idle_timeout", "200", - ), - ), - }, - { - Config: testAccAWSELBConfigIdleTimeout_update, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_elb.bar", "idle_timeout", "400", - ), - ), - }, - }, - }) -} - -func TestAccAWSELB_ConnectionDraining(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elb.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSELBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSELBConfigConnectionDraining, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_elb.bar", "connection_draining", "true", - ), - resource.TestCheckResourceAttr( - "aws_elb.bar", "connection_draining_timeout", "400", - ), - ), - }, - }, - }) -} - -func TestAccAWSELBUpdate_ConnectionDraining(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elb.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSELBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSELBConfigConnectionDraining, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_elb.bar", "connection_draining", "true", - ), - resource.TestCheckResourceAttr( - "aws_elb.bar", "connection_draining_timeout", "400", - ), - ), - }, - { - Config: testAccAWSELBConfigConnectionDraining_update_timeout, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_elb.bar", "connection_draining", "true", - ), - resource.TestCheckResourceAttr( - "aws_elb.bar", "connection_draining_timeout", "600", - ), - ), - }, - { - Config: testAccAWSELBConfigConnectionDraining_update_disable, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_elb.bar", "connection_draining", "false", - ), - ), - }, - }, - }) -} - -func TestAccAWSELB_SecurityGroups(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_elb.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSELBDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSELBConfig, - Check: resource.ComposeTestCheckFunc( - // ELBs get a default security group - resource.TestCheckResourceAttr( - "aws_elb.bar", "security_groups.#", "1", - ), - ), - }, - { - Config: testAccAWSELBConfigSecurityGroups, - Check: resource.ComposeTestCheckFunc( - // Count should still be one as we swap in a custom security group - resource.TestCheckResourceAttr( - "aws_elb.bar", "security_groups.#", "1", - ), - ), - }, - }, - }) -} - -// Unit test for listeners hash -func TestResourceAwsElbListenerHash(t *testing.T) { - cases := map[string]struct { - Left map[string]interface{} - Right map[string]interface{} - Match bool - }{ - "protocols are case insensitive": { - map[string]interface{}{ - "instance_port": 80, - "instance_protocol": "TCP", - "lb_port": 80, - "lb_protocol": "TCP", - }, - map[string]interface{}{ - "instance_port": 80, - "instance_protocol": "Tcp", - "lb_port": 80, - "lb_protocol": "tcP", - }, - true, - }, - } - - for tn, tc := range cases { - leftHash := resourceAwsElbListenerHash(tc.Left) - rightHash := resourceAwsElbListenerHash(tc.Right) - if leftHash == rightHash != tc.Match { - t.Fatalf("%s: expected match: %t, but did not get it", tn, tc.Match) - } - } -} - -func TestResourceAWSELB_validateElbNameCannotBeginWithHyphen(t *testing.T) { - var elbName = "-Testing123" - _, errors := validateElbName(elbName, "SampleKey") - - if len(errors) != 1 { - t.Fatalf("Expected the ELB Name to trigger a validation error") - } -} - -func TestResourceAWSELB_validateElbNameCanBeAnEmptyString(t *testing.T) { - var elbName = "" - _, errors := validateElbName(elbName, "SampleKey") - - if len(errors) != 0 { - t.Fatalf("Expected the ELB Name to pass validation") - } -} - -func TestResourceAWSELB_validateElbNameCannotBeLongerThan32Characters(t *testing.T) { - var elbName = "Testing123dddddddddddddddddddvvvv" - _, errors := validateElbName(elbName, "SampleKey") - - if len(errors) != 1 { - t.Fatalf("Expected the ELB Name to trigger a validation error") - } -} - -func TestResourceAWSELB_validateElbNameCannotHaveSpecialCharacters(t *testing.T) { - var elbName = "Testing123%%" - _, errors := validateElbName(elbName, "SampleKey") - - if len(errors) != 1 { - t.Fatalf("Expected the ELB Name to trigger a validation error") - } -} - -func TestResourceAWSELB_validateElbNameCannotEndWithHyphen(t *testing.T) { - var elbName = "Testing123-" - _, errors := validateElbName(elbName, "SampleKey") - - if len(errors) != 1 { - t.Fatalf("Expected the ELB Name to trigger a validation error") - } -} - -func TestResourceAWSELB_validateAccessLogsInterval(t *testing.T) { - type testCases struct { - Value int - ErrCount int - } - - invalidCases := []testCases{ - { - Value: 0, - ErrCount: 1, - }, - { - Value: 10, - ErrCount: 1, - }, - { - Value: -1, - ErrCount: 1, - }, - } - - for _, tc := range invalidCases { - _, errors := validateAccessLogsInterval(tc.Value, "interval") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q to trigger a validation error.", tc.Value) - } - } - -} - -func TestResourceAWSELB_validateListenerProtocol(t *testing.T) { - type testCases struct { - Value string - ErrCount int - } - - invalidCases := []testCases{ - { - Value: "", - ErrCount: 1, - }, - { - Value: "incorrect", - ErrCount: 1, - }, - { - Value: "HTTP:", - ErrCount: 1, - }, - } - - for _, tc := range invalidCases { - _, errors := validateListenerProtocol(tc.Value, "protocol") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q to trigger a validation error.", tc.Value) - } - } - - validCases := []testCases{ - { - Value: "TCP", - ErrCount: 0, - }, - { - Value: "ssl", - ErrCount: 0, - }, - { - Value: "HTTP", - ErrCount: 0, - }, - } - - for _, tc := range validCases { - _, errors := validateListenerProtocol(tc.Value, "protocol") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q not to trigger a validation error.", tc.Value) - } - } -} - -func TestResourceAWSELB_validateHealthCheckTarget(t *testing.T) { - type testCase struct { - Value string - ErrCount int - } - - randomRunes := func(n int) string { - rand.Seed(time.Now().UTC().UnixNano()) - - // A complete set of modern Katakana characters. - runes := []rune("アイウエオ" + - "カキクケコガギグゲゴサシスセソザジズゼゾ" + - "タチツテトダヂヅデドナニヌネノハヒフヘホ" + - "バビブベボパピプペポマミムメモヤユヨラリ" + - "ルレロワヰヱヲン") - - s := make([]rune, n) - for i := range s { - s[i] = runes[rand.Intn(len(runes))] - } - return string(s) - } - - validCases := []testCase{ - { - Value: "TCP:1234", - ErrCount: 0, - }, - { - Value: "http:80/test", - ErrCount: 0, - }, - { - Value: fmt.Sprintf("HTTP:8080/%s", randomRunes(5)), - ErrCount: 0, - }, - { - Value: "SSL:8080", - ErrCount: 0, - }, - } - - for _, tc := range validCases { - _, errors := validateHeathCheckTarget(tc.Value, "target") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q not to trigger a validation error.", tc.Value) - } - } - - invalidCases := []testCase{ - { - Value: "", - ErrCount: 1, - }, - { - Value: "TCP:", - ErrCount: 1, - }, - { - Value: "TCP:1234/", - ErrCount: 1, - }, - { - Value: "SSL:8080/", - ErrCount: 1, - }, - { - Value: "HTTP:8080", - ErrCount: 1, - }, - { - Value: "incorrect-value", - ErrCount: 1, - }, - { - Value: "TCP:123456", - ErrCount: 1, - }, - { - Value: "incorrect:80/", - ErrCount: 1, - }, - { - Value: fmt.Sprintf("HTTP:8080/%s%s", randomString(512), randomRunes(512)), - ErrCount: 1, - }, - } - - for _, tc := range invalidCases { - _, errors := validateHeathCheckTarget(tc.Value, "target") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q to trigger a validation error.", tc.Value) - } - } -} - -func testAccCheckAWSELBDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elbconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_elb" { - continue - } - - describe, err := conn.DescribeLoadBalancers(&elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{aws.String(rs.Primary.ID)}, - }) - - if err == nil { - if len(describe.LoadBalancerDescriptions) != 0 && - *describe.LoadBalancerDescriptions[0].LoadBalancerName == rs.Primary.ID { - return fmt.Errorf("ELB still exists") - } - } - - // Verify the error - providerErr, ok := err.(awserr.Error) - if !ok { - return err - } - - if providerErr.Code() != "LoadBalancerNotFound" { - return fmt.Errorf("Unexpected error: %s", err) - } - } - - return nil -} - -func testAccCheckAWSELBAttributes(conf *elb.LoadBalancerDescription) resource.TestCheckFunc { - return func(s *terraform.State) error { - zones := []string{"us-west-2a", "us-west-2b", "us-west-2c"} - azs := make([]string, 0, len(conf.AvailabilityZones)) - for _, x := range conf.AvailabilityZones { - azs = append(azs, *x) - } - sort.StringSlice(azs).Sort() - if !reflect.DeepEqual(azs, zones) { - return fmt.Errorf("bad availability_zones") - } - - l := elb.Listener{ - InstancePort: aws.Int64(int64(8000)), - InstanceProtocol: aws.String("HTTP"), - LoadBalancerPort: aws.Int64(int64(80)), - Protocol: aws.String("HTTP"), - } - - if !reflect.DeepEqual(conf.ListenerDescriptions[0].Listener, &l) { - return fmt.Errorf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - conf.ListenerDescriptions[0].Listener, - l) - } - - if *conf.DNSName == "" { - return fmt.Errorf("empty dns_name") - } - - return nil - } -} - -func testAccCheckAWSELBAttributesHealthCheck(conf *elb.LoadBalancerDescription) resource.TestCheckFunc { - return func(s *terraform.State) error { - zones := []string{"us-west-2a", "us-west-2b", "us-west-2c"} - azs := make([]string, 0, len(conf.AvailabilityZones)) - for _, x := range conf.AvailabilityZones { - azs = append(azs, *x) - } - sort.StringSlice(azs).Sort() - if !reflect.DeepEqual(azs, zones) { - return fmt.Errorf("bad availability_zones") - } - - check := &elb.HealthCheck{ - Timeout: aws.Int64(int64(30)), - UnhealthyThreshold: aws.Int64(int64(5)), - HealthyThreshold: aws.Int64(int64(5)), - Interval: aws.Int64(int64(60)), - Target: aws.String("HTTP:8000/"), - } - - if !reflect.DeepEqual(conf.HealthCheck, check) { - return fmt.Errorf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - conf.HealthCheck, - check) - } - - if *conf.DNSName == "" { - return fmt.Errorf("empty dns_name") - } - - return nil - } -} - -func testAccCheckAWSELBExists(n string, res *elb.LoadBalancerDescription) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ELB ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).elbconn - - describe, err := conn.DescribeLoadBalancers(&elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{aws.String(rs.Primary.ID)}, - }) - - if err != nil { - return err - } - - if len(describe.LoadBalancerDescriptions) != 1 || - *describe.LoadBalancerDescriptions[0].LoadBalancerName != rs.Primary.ID { - return fmt.Errorf("ELB not found") - } - - *res = *describe.LoadBalancerDescriptions[0] - - // Confirm source_security_group_id for ELBs in a VPC - // See https://github.com/hashicorp/terraform/pull/3780 - if res.VPCId != nil { - sgid := rs.Primary.Attributes["source_security_group_id"] - if sgid == "" { - return fmt.Errorf("Expected to find source_security_group_id for ELB, but was empty") - } - } - - return nil - } -} - -const testAccAWSELBConfig = ` -resource "aws_elb" "bar" { - availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"] - - listener { - instance_port = 8000 - instance_protocol = "http" - lb_port = 80 - // Protocol should be case insensitive - lb_protocol = "HttP" - } - - tags { - bar = "baz" - } - - cross_zone_load_balancing = true -} -` - -const testAccAWSELBFullRangeOfCharacters = ` -resource "aws_elb" "foo" { - name = "%s" - availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"] - - listener { - instance_port = 8000 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } -} -` - -const testAccAWSELBAccessLogs = ` -resource "aws_elb" "foo" { - availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"] - - listener { - instance_port = 8000 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } -} -` - -func testAccAWSELBAccessLogsOn(r string) string { - return fmt.Sprintf(` -# an S3 bucket configured for Access logs -# The 797873946194 is the AWS ID for us-west-2, so this test -# must be ran in us-west-2 -resource "aws_s3_bucket" "acceslogs_bucket" { - bucket = "%s" - acl = "private" - force_destroy = true - policy = < 0 { - strs := aws.StringValueSlice(ia.AdditionalMasterSecurityGroups) - attrs["additional_master_security_groups"] = strings.Join(strs, ",") - } - if len(ia.AdditionalSlaveSecurityGroups) > 0 { - strs := aws.StringValueSlice(ia.AdditionalSlaveSecurityGroups) - attrs["additional_slave_security_groups"] = strings.Join(strs, ",") - } - - if ia.ServiceAccessSecurityGroup != nil { - attrs["service_access_security_group"] = *ia.ServiceAccessSecurityGroup - } - - result = append(result, attrs) - - return result -} - -func flattenBootstrapArguments(actions []*emr.Command) []map[string]interface{} { - result := make([]map[string]interface{}, 0) - - for _, b := range actions { - attrs := make(map[string]interface{}) - attrs["name"] = *b.Name - attrs["path"] = *b.ScriptPath - attrs["args"] = flattenStringList(b.Args) - result = append(result, attrs) - } - - return result -} - -func loadGroups(d *schema.ResourceData, meta interface{}) ([]*emr.InstanceGroup, error) { - emrconn := meta.(*AWSClient).emrconn - reqGrps := &emr.ListInstanceGroupsInput{ - ClusterId: aws.String(d.Id()), - } - - respGrps, errGrps := emrconn.ListInstanceGroups(reqGrps) - if errGrps != nil { - return nil, fmt.Errorf("Error reading EMR cluster: %s", errGrps) - } - return respGrps.InstanceGroups, nil -} - -func findGroup(grps []*emr.InstanceGroup, typ string) *emr.InstanceGroup { - for _, grp := range grps { - if grp.InstanceGroupType != nil { - if *grp.InstanceGroupType == typ { - return grp - } - } - } - return nil -} - -func expandTags(m map[string]interface{}) []*emr.Tag { - var result []*emr.Tag - for k, v := range m { - result = append(result, &emr.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - }) - } - - return result -} - -func tagsToMapEMR(ts []*emr.Tag) map[string]string { - result := make(map[string]string) - for _, t := range ts { - result[*t.Key] = *t.Value - } - - return result -} - -func diffTagsEMR(oldTags, newTags []*emr.Tag) ([]*emr.Tag, []*emr.Tag) { - // First, we're creating everything we have - create := make(map[string]interface{}) - for _, t := range newTags { - create[*t.Key] = *t.Value - } - - // Build the list of what to remove - var remove []*emr.Tag - for _, t := range oldTags { - old, ok := create[*t.Key] - if !ok || old != *t.Value { - // Delete it! - remove = append(remove, t) - } - } - - return expandTags(create), remove -} - -func setTagsEMR(conn *emr.EMR, d *schema.ResourceData) error { - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffTagsEMR(expandTags(o), expandTags(n)) - - // Set tags - if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags: %s", remove) - k := make([]*string, len(remove), len(remove)) - for i, t := range remove { - k[i] = t.Key - } - - _, err := conn.RemoveTags(&emr.RemoveTagsInput{ - ResourceId: aws.String(d.Id()), - TagKeys: k, - }) - if err != nil { - return err - } - } - if len(create) > 0 { - log.Printf("[DEBUG] Creating tags: %s", create) - _, err := conn.AddTags(&emr.AddTagsInput{ - ResourceId: aws.String(d.Id()), - Tags: create, - }) - if err != nil { - return err - } - } - } - - return nil -} - -func expandBootstrapActions(bootstrapActions []interface{}) []*emr.BootstrapActionConfig { - actionsOut := []*emr.BootstrapActionConfig{} - - for _, raw := range bootstrapActions { - actionAttributes := raw.(map[string]interface{}) - actionName := actionAttributes["name"].(string) - actionPath := actionAttributes["path"].(string) - actionArgs := actionAttributes["args"].([]interface{}) - - action := &emr.BootstrapActionConfig{ - Name: aws.String(actionName), - ScriptBootstrapAction: &emr.ScriptBootstrapActionConfig{ - Path: aws.String(actionPath), - Args: expandStringList(actionArgs), - }, - } - actionsOut = append(actionsOut, action) - } - - return actionsOut -} - -func expandConfigures(input string) []*emr.Configuration { - configsOut := []*emr.Configuration{} - if strings.HasPrefix(input, "http") { - if err := readHttpJson(input, &configsOut); err != nil { - log.Printf("[ERR] Error reading HTTP JSON: %s", err) - } - } else if strings.HasSuffix(input, ".json") { - if err := readLocalJson(input, &configsOut); err != nil { - log.Printf("[ERR] Error reading local JSON: %s", err) - } - } else { - if err := readBodyJson(input, &configsOut); err != nil { - log.Printf("[ERR] Error reading body JSON: %s", err) - } - } - log.Printf("[DEBUG] Expanded EMR Configurations %s", configsOut) - - return configsOut -} - -func readHttpJson(url string, target interface{}) error { - r, err := http.Get(url) - if err != nil { - return err - } - defer r.Body.Close() - - return json.NewDecoder(r.Body).Decode(target) -} - -func readLocalJson(localFile string, target interface{}) error { - file, e := ioutil.ReadFile(localFile) - if e != nil { - log.Printf("[ERROR] %s", e) - return e - } - - return json.Unmarshal(file, target) -} - -func readBodyJson(body string, target interface{}) error { - log.Printf("[DEBUG] Raw Body %s\n", body) - err := json.Unmarshal([]byte(body), target) - if err != nil { - log.Printf("[ERROR] parsing JSON %s", err) - return err - } - return nil -} - -func resourceAwsEMRClusterStateRefreshFunc(d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - conn := meta.(*AWSClient).emrconn - - log.Printf("[INFO] Reading EMR Cluster Information: %s", d.Id()) - params := &emr.DescribeClusterInput{ - ClusterId: aws.String(d.Id()), - } - - resp, err := conn.DescribeCluster(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if "ClusterNotFound" == awsErr.Code() { - return 42, "destroyed", nil - } - } - log.Printf("[WARN] Error on retrieving EMR Cluster (%s) when waiting: %s", d.Id(), err) - return nil, "", err - } - - emrc := resp.Cluster - - if emrc == nil { - return 42, "destroyed", nil - } - - if resp.Cluster.Status != nil { - log.Printf("[DEBUG] EMR Cluster status (%s): %s", d.Id(), *resp.Cluster.Status) - } - - status := emrc.Status - if *status.State == "TERMINATING" { - reason := *status.StateChangeReason - return emrc, *status.State, fmt.Errorf("EMR Cluster is terminating. %s: %s", - *reason.Code, *reason.Message) - } - - return emrc, *status.State, nil - } -} diff --git a/builtin/providers/aws/resource_aws_emr_cluster_test.go b/builtin/providers/aws/resource_aws_emr_cluster_test.go deleted file mode 100644 index 9de404d20..000000000 --- a/builtin/providers/aws/resource_aws_emr_cluster_test.go +++ /dev/null @@ -1,2168 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/emr" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSEMRCluster_basic(t *testing.T) { - var cluster emr.Cluster - r := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEmrDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEmrClusterConfig(r), - Check: testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &cluster), - }, - }, - }) -} - -func TestAccAWSEMRCluster_security_config(t *testing.T) { - var cluster emr.Cluster - r := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEmrDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEmrClusterConfig_SecurityConfiguration(r), - Check: testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &cluster), - }, - }, - }) -} - -func TestAccAWSEMRCluster_bootstrap_ordering(t *testing.T) { - var cluster emr.Cluster - rName := acctest.RandomWithPrefix("tf-emr-bootstrap") - argsInts := []string{ - "1", - "2", - "3", - "4", - "5", - "6", - "7", - "8", - "9", - "10", - } - - argsStrings := []string{ - "instance.isMaster=true", - "echo running on master node", - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEmrDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEmrClusterConfig_bootstrap(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEmrClusterExists("aws_emr_cluster.test", &cluster), - testAccCheck_bootstrap_order(&cluster, argsInts, argsStrings), - ), - }, - }, - }) -} - -func TestAccAWSEMRCluster_terminationProtected(t *testing.T) { - var cluster emr.Cluster - r := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEmrDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEmrClusterConfig(r), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &cluster), - resource.TestCheckResourceAttr( - "aws_emr_cluster.tf-test-cluster", "termination_protection", "false"), - ), - }, - { - Config: testAccAWSEmrClusterConfigTerminationPolicyUpdated(r), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &cluster), - resource.TestCheckResourceAttr( - "aws_emr_cluster.tf-test-cluster", "termination_protection", "true"), - ), - }, - { - //Need to turn off termination_protection to allow the job to be deleted - Config: testAccAWSEmrClusterConfig(r), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &cluster), - ), - }, - }, - }) -} - -func TestAccAWSEMRCluster_visibleToAllUsers(t *testing.T) { - var cluster emr.Cluster - r := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEmrDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEmrClusterConfig(r), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &cluster), - resource.TestCheckResourceAttr( - "aws_emr_cluster.tf-test-cluster", "visible_to_all_users", "true"), - ), - }, - { - Config: testAccAWSEmrClusterConfigVisibleToAllUsersUpdated(r), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &cluster), - resource.TestCheckResourceAttr( - "aws_emr_cluster.tf-test-cluster", "visible_to_all_users", "false"), - ), - }, - }, - }) -} - -func TestAccAWSEMRCluster_tags(t *testing.T) { - var cluster emr.Cluster - r := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEmrDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSEmrClusterConfig(r), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &cluster), - resource.TestCheckResourceAttr("aws_emr_cluster.tf-test-cluster", "tags.%", "4"), - resource.TestCheckResourceAttr( - "aws_emr_cluster.tf-test-cluster", "tags.role", "rolename"), - resource.TestCheckResourceAttr( - "aws_emr_cluster.tf-test-cluster", "tags.dns_zone", "env_zone"), - resource.TestCheckResourceAttr( - "aws_emr_cluster.tf-test-cluster", "tags.env", "env"), - resource.TestCheckResourceAttr( - "aws_emr_cluster.tf-test-cluster", "tags.name", "name-env")), - }, - { - Config: testAccAWSEmrClusterConfigUpdatedTags(r), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &cluster), - resource.TestCheckResourceAttr("aws_emr_cluster.tf-test-cluster", "tags.%", "3"), - resource.TestCheckResourceAttr( - "aws_emr_cluster.tf-test-cluster", "tags.dns_zone", "new_zone"), - resource.TestCheckResourceAttr( - "aws_emr_cluster.tf-test-cluster", "tags.Env", "production"), - resource.TestCheckResourceAttr( - "aws_emr_cluster.tf-test-cluster", "tags.name", "name-env"), - ), - }, - }, - }) -} - -func testAccCheck_bootstrap_order(cluster *emr.Cluster, argsInts, argsStrings []string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - emrconn := testAccProvider.Meta().(*AWSClient).emrconn - req := emr.ListBootstrapActionsInput{ - ClusterId: cluster.Id, - } - - resp, err := emrconn.ListBootstrapActions(&req) - if err != nil { - return fmt.Errorf("[ERR] Error listing boostrap actions in test: %s", err) - } - - // make sure we actually checked something - var ran bool - for _, ba := range resp.BootstrapActions { - // assume name matches the config - rArgs := aws.StringValueSlice(ba.Args) - if *ba.Name == "test" { - ran = true - if !reflect.DeepEqual(argsInts, rArgs) { - return fmt.Errorf("Error matching Bootstrap args:\n\texpected: %#v\n\tgot: %#v", argsInts, rArgs) - } - } else if *ba.Name == "runif" { - ran = true - if !reflect.DeepEqual(argsStrings, rArgs) { - return fmt.Errorf("Error matching Bootstrap args:\n\texpected: %#v\n\tgot: %#v", argsStrings, rArgs) - } - } - } - - if !ran { - return fmt.Errorf("Expected to compare bootstrap actions, but no checks were ran") - } - - return nil - } -} - -func testAccCheckAWSEmrDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).emrconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_emr_cluster" { - continue - } - - params := &emr.DescribeClusterInput{ - ClusterId: aws.String(rs.Primary.ID), - } - - describe, err := conn.DescribeCluster(params) - - if err == nil { - if describe.Cluster != nil && - *describe.Cluster.Status.State == "WAITING" { - return fmt.Errorf("EMR Cluster still exists") - } - } - - providerErr, ok := err.(awserr.Error) - if !ok { - return err - } - - log.Printf("[ERROR] %v", providerErr) - } - - return nil -} - -func testAccCheckAWSEmrClusterExists(n string, v *emr.Cluster) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No cluster id set") - } - conn := testAccProvider.Meta().(*AWSClient).emrconn - describe, err := conn.DescribeCluster(&emr.DescribeClusterInput{ - ClusterId: aws.String(rs.Primary.ID), - }) - if err != nil { - return fmt.Errorf("EMR error: %v", err) - } - - if describe.Cluster != nil && - *describe.Cluster.Id != rs.Primary.ID { - return fmt.Errorf("EMR cluser not found") - } - - *v = *describe.Cluster - - if describe.Cluster != nil && - *describe.Cluster.Status.State != "WAITING" { - return fmt.Errorf("EMR cluser is not up yet") - } - - return nil - } -} - -func testAccAWSEmrClusterConfig_bootstrap(r string) string { - return fmt.Sprintf(` -resource "aws_emr_cluster" "test" { - count = 1 - name = "%s" - release_label = "emr-5.0.0" - applications = ["Hadoop", "Hive"] - log_uri = "s3n://terraform/testlog/" - master_instance_type = "m4.large" - core_instance_type = "m1.small" - core_instance_count = 1 - service_role = "${aws_iam_role.iam_emr_default_role.arn}" - - depends_on = ["aws_main_route_table_association.a"] - - ec2_attributes { - subnet_id = "${aws_subnet.main.id}" - - emr_managed_master_security_group = "${aws_security_group.allow_all.id}" - emr_managed_slave_security_group = "${aws_security_group.allow_all.id}" - instance_profile = "${aws_iam_instance_profile.emr_profile.arn}" - } - - bootstrap_action { - path = "s3://elasticmapreduce/bootstrap-actions/run-if" - name = "runif" - args = ["instance.isMaster=true", "echo running on master node"] - } - - bootstrap_action = [ - { - path = "s3://${aws_s3_bucket.tester.bucket}/testscript.sh" - name = "test" - - args = ["1", - "2", - "3", - "4", - "5", - "6", - "7", - "8", - "9", - "10", - ] - }, - ] -} - -resource "aws_iam_instance_profile" "emr_profile" { - name = "%s_profile" - role = "${aws_iam_role.iam_emr_profile_role.name}" -} - -resource "aws_iam_role" "iam_emr_default_role" { - name = "%s_default_role" - - assume_role_policy = < 10280 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 10280 characters", k)) - } - return - }, - }, - "name_prefix": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 10000 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 10000 characters, name is limited to 10280", k)) - } - return - }, - }, - - "configuration": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateJsonString, - }, - - "creation_date": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceAwsEmrSecurityConfigurationCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).emrconn - - var emrSCName string - if v, ok := d.GetOk("name"); ok { - emrSCName = v.(string) - } else { - if v, ok := d.GetOk("name_prefix"); ok { - emrSCName = resource.PrefixedUniqueId(v.(string)) - } else { - emrSCName = resource.PrefixedUniqueId("tf-emr-sc-") - } - } - - resp, err := conn.CreateSecurityConfiguration(&emr.CreateSecurityConfigurationInput{ - Name: aws.String(emrSCName), - SecurityConfiguration: aws.String(d.Get("configuration").(string)), - }) - - if err != nil { - return err - } - - d.SetId(*resp.Name) - return resourceAwsEmrSecurityConfigurationRead(d, meta) -} - -func resourceAwsEmrSecurityConfigurationRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).emrconn - - resp, err := conn.DescribeSecurityConfiguration(&emr.DescribeSecurityConfigurationInput{ - Name: aws.String(d.Id()), - }) - if err != nil { - if isAWSErr(err, "InvalidRequestException", "does not exist") { - log.Printf("[WARN] EMR Security Configuraiton (%s) not found, removing from state", d.Id()) - d.SetId("") - return nil - } - return err - } - - d.Set("creation_date", resp.CreationDateTime) - d.Set("name", resp.Name) - d.Set("configuration", resp.SecurityConfiguration) - - return nil -} - -func resourceAwsEmrSecurityConfigurationDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).emrconn - - _, err := conn.DeleteSecurityConfiguration(&emr.DeleteSecurityConfigurationInput{ - Name: aws.String(d.Id()), - }) - if err != nil { - if isAWSErr(err, "InvalidRequestException", "does not exist") { - d.SetId("") - return nil - } - return err - } - d.SetId("") - - return nil -} diff --git a/builtin/providers/aws/resource_aws_emr_security_configuration_test.go b/builtin/providers/aws/resource_aws_emr_security_configuration_test.go deleted file mode 100644 index c17fb806f..000000000 --- a/builtin/providers/aws/resource_aws_emr_security_configuration_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/emr" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSEmrSecurityConfiguration_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckEmrSecurityConfigurationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccEmrSecurityConfigurationConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckEmrSecurityConfigurationExists("aws_emr_security_configuration.foo"), - ), - }, - }, - }) -} - -func testAccCheckEmrSecurityConfigurationDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).emrconn - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_emr_security_configuration" { - continue - } - - // Try to find the Security Configuration - resp, err := conn.DescribeSecurityConfiguration(&emr.DescribeSecurityConfigurationInput{ - Name: aws.String(rs.Primary.ID), - }) - if err == nil { - if resp.Name != nil && *resp.Name == rs.Primary.ID { - // assume this means the resource still exists - return fmt.Errorf("Error: EMR Security Configuration still exists: %s", *resp.Name) - } - return nil - } - - // Verify the error is what we want - if err != nil { - if isAWSErr(err, "InvalidRequestException", "does not exist") { - return nil - } - return err - } - } - - return nil -} - -func testAccCheckEmrSecurityConfigurationExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No EMR Security Configuration ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).emrconn - resp, err := conn.DescribeSecurityConfiguration(&emr.DescribeSecurityConfigurationInput{ - Name: aws.String(rs.Primary.ID), - }) - if err != nil { - return err - } - - if resp.Name == nil { - return fmt.Errorf("EMR Security Configuration had nil name which shouldn't happen") - } - - if *resp.Name != rs.Primary.ID { - return fmt.Errorf("EMR Security Configuration name mismatch, got (%s), expected (%s)", *resp.Name, rs.Primary.ID) - } - - return nil - } -} - -const testAccEmrSecurityConfigurationConfig = ` -resource "aws_emr_security_configuration" "foo" { - configuration = < 1 { - return fmt.Errorf("Error: multiple Flow Logs created for (%s)", resourceId) - } - - d.SetId(*resp.FlowLogIds[0]) - - return resourceAwsLogFlowRead(d, meta) -} - -func resourceAwsLogFlowRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - opts := &ec2.DescribeFlowLogsInput{ - FlowLogIds: []*string{aws.String(d.Id())}, - } - - resp, err := conn.DescribeFlowLogs(opts) - if err != nil { - log.Printf("[WARN] Error describing Flow Logs for id (%s)", d.Id()) - d.SetId("") - return nil - } - - if len(resp.FlowLogs) == 0 { - log.Printf("[WARN] No Flow Logs found for id (%s)", d.Id()) - d.SetId("") - return nil - } - - fl := resp.FlowLogs[0] - d.Set("traffic_type", fl.TrafficType) - d.Set("log_group_name", fl.LogGroupName) - d.Set("iam_role_arn", fl.DeliverLogsPermissionArn) - - var resourceKey string - if strings.HasPrefix(*fl.ResourceId, "vpc-") { - resourceKey = "vpc_id" - } else if strings.HasPrefix(*fl.ResourceId, "subnet-") { - resourceKey = "subnet_id" - } else if strings.HasPrefix(*fl.ResourceId, "eni-") { - resourceKey = "eni_id" - } - if resourceKey != "" { - d.Set(resourceKey, fl.ResourceId) - } - - return nil -} - -func resourceAwsLogFlowDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - log.Printf( - "[DEBUG] Flow Log Destroy: %s", d.Id()) - _, err := conn.DeleteFlowLogs(&ec2.DeleteFlowLogsInput{ - FlowLogIds: []*string{aws.String(d.Id())}, - }) - - if err != nil { - return fmt.Errorf("[WARN] Error deleting Flow Log with ID (%s), error: %s", d.Id(), err) - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_flow_log_test.go b/builtin/providers/aws/resource_aws_flow_log_test.go deleted file mode 100644 index 8757caea1..000000000 --- a/builtin/providers/aws/resource_aws_flow_log_test.go +++ /dev/null @@ -1,220 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSFlowLog_basic(t *testing.T) { - var flowLog ec2.FlowLog - - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_flow_log.test_flow_log", - Providers: testAccProviders, - CheckDestroy: testAccCheckFlowLogDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccFlowLogConfig_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckFlowLogExists("aws_flow_log.test_flow_log", &flowLog), - testAccCheckAWSFlowLogAttributes(&flowLog), - ), - }, - }, - }) -} - -func TestAccAWSFlowLog_subnet(t *testing.T) { - var flowLog ec2.FlowLog - - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_flow_log.test_flow_log_subnet", - Providers: testAccProviders, - CheckDestroy: testAccCheckFlowLogDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccFlowLogConfig_subnet(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckFlowLogExists("aws_flow_log.test_flow_log_subnet", &flowLog), - testAccCheckAWSFlowLogAttributes(&flowLog), - ), - }, - }, - }) -} - -func testAccCheckFlowLogExists(n string, flowLog *ec2.FlowLog) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Flow Log ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - describeOpts := &ec2.DescribeFlowLogsInput{ - FlowLogIds: []*string{aws.String(rs.Primary.ID)}, - } - resp, err := conn.DescribeFlowLogs(describeOpts) - if err != nil { - return err - } - - if len(resp.FlowLogs) > 0 { - *flowLog = *resp.FlowLogs[0] - return nil - } - return fmt.Errorf("No Flow Logs found for id (%s)", rs.Primary.ID) - } -} - -func testAccCheckAWSFlowLogAttributes(flowLog *ec2.FlowLog) resource.TestCheckFunc { - return func(s *terraform.State) error { - if flowLog.FlowLogStatus != nil && *flowLog.FlowLogStatus == "ACTIVE" { - return nil - } - if flowLog.FlowLogStatus == nil { - return fmt.Errorf("Flow Log status is not ACTIVE, is nil") - } else { - return fmt.Errorf("Flow Log status is not ACTIVE, got: %s", *flowLog.FlowLogStatus) - } - } -} - -func testAccCheckFlowLogDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_flow_log" { - continue - } - - return nil - } - - return nil -} - -func testAccFlowLogConfig_basic(rInt int) string { - return fmt.Sprintf(` -resource "aws_vpc" "default" { - cidr_block = "10.0.0.0/16" - tags { - Name = "tf-flow-log-test" - } -} - -resource "aws_subnet" "test_subnet" { - vpc_id = "${aws_vpc.default.id}" - cidr_block = "10.0.1.0/24" - - tags { - Name = "tf-flow-test" - } -} - -resource "aws_iam_role" "test_role" { - name = "tf_test_flow_log_basic_%d" - assume_role_policy = < 255 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 255 characters", k)) - } - return - }, - }, - - "location": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "arn": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "access_policy": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateJsonString, - StateFunc: func(v interface{}) string { - json, _ := normalizeJsonString(v) - return json - }, - }, - - "notification": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "events": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "sns_topic": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAwsGlacierVaultCreate(d *schema.ResourceData, meta interface{}) error { - glacierconn := meta.(*AWSClient).glacierconn - - input := &glacier.CreateVaultInput{ - VaultName: aws.String(d.Get("name").(string)), - } - - out, err := glacierconn.CreateVault(input) - if err != nil { - return fmt.Errorf("Error creating Glacier Vault: %s", err) - } - - d.SetId(d.Get("name").(string)) - d.Set("location", *out.Location) - - return resourceAwsGlacierVaultUpdate(d, meta) -} - -func resourceAwsGlacierVaultUpdate(d *schema.ResourceData, meta interface{}) error { - glacierconn := meta.(*AWSClient).glacierconn - - if err := setGlacierVaultTags(glacierconn, d); err != nil { - return err - } - - if d.HasChange("access_policy") { - if err := resourceAwsGlacierVaultPolicyUpdate(glacierconn, d); err != nil { - return err - } - } - - if d.HasChange("notification") { - if err := resourceAwsGlacierVaultNotificationUpdate(glacierconn, d); err != nil { - return err - } - } - - return resourceAwsGlacierVaultRead(d, meta) -} - -func resourceAwsGlacierVaultRead(d *schema.ResourceData, meta interface{}) error { - glacierconn := meta.(*AWSClient).glacierconn - - input := &glacier.DescribeVaultInput{ - VaultName: aws.String(d.Id()), - } - - out, err := glacierconn.DescribeVault(input) - if err != nil { - return fmt.Errorf("Error reading Glacier Vault: %s", err.Error()) - } - - awsClient := meta.(*AWSClient) - d.Set("name", out.VaultName) - d.Set("arn", out.VaultARN) - - location, err := buildGlacierVaultLocation(awsClient.accountid, d.Id()) - if err != nil { - return err - } - d.Set("location", location) - - tags, err := getGlacierVaultTags(glacierconn, d.Id()) - if err != nil { - return err - } - d.Set("tags", tags) - - log.Printf("[DEBUG] Getting the access_policy for Vault %s", d.Id()) - pol, err := glacierconn.GetVaultAccessPolicy(&glacier.GetVaultAccessPolicyInput{ - VaultName: aws.String(d.Id()), - }) - - if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "ResourceNotFoundException" { - d.Set("access_policy", "") - } else if pol != nil { - policy, err := normalizeJsonString(*pol.Policy.Policy) - if err != nil { - return errwrap.Wrapf("access policy contains an invalid JSON: {{err}}", err) - } - d.Set("access_policy", policy) - } else { - return err - } - - notifications, err := getGlacierVaultNotification(glacierconn, d.Id()) - if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "ResourceNotFoundException" { - d.Set("notification", "") - } else if pol != nil { - d.Set("notification", notifications) - } else { - return err - } - - return nil -} - -func resourceAwsGlacierVaultDelete(d *schema.ResourceData, meta interface{}) error { - glacierconn := meta.(*AWSClient).glacierconn - - log.Printf("[DEBUG] Glacier Delete Vault: %s", d.Id()) - _, err := glacierconn.DeleteVault(&glacier.DeleteVaultInput{ - VaultName: aws.String(d.Id()), - }) - if err != nil { - return fmt.Errorf("Error deleting Glacier Vault: %s", err.Error()) - } - return nil -} - -func resourceAwsGlacierVaultNotificationUpdate(glacierconn *glacier.Glacier, d *schema.ResourceData) error { - - if v, ok := d.GetOk("notification"); ok { - settings := v.([]interface{}) - - if len(settings) > 1 { - return fmt.Errorf("Only a single Notification Block is allowed for Glacier Vault") - } else if len(settings) == 1 { - s := settings[0].(map[string]interface{}) - var events []*string - for _, id := range s["events"].(*schema.Set).List() { - events = append(events, aws.String(id.(string))) - } - - _, err := glacierconn.SetVaultNotifications(&glacier.SetVaultNotificationsInput{ - VaultName: aws.String(d.Id()), - VaultNotificationConfig: &glacier.VaultNotificationConfig{ - SNSTopic: aws.String(s["sns_topic"].(string)), - Events: events, - }, - }) - - if err != nil { - return fmt.Errorf("Error Updating Glacier Vault Notifications: %s", err.Error()) - } - } - } else { - _, err := glacierconn.DeleteVaultNotifications(&glacier.DeleteVaultNotificationsInput{ - VaultName: aws.String(d.Id()), - }) - - if err != nil { - return fmt.Errorf("Error Removing Glacier Vault Notifications: %s", err.Error()) - } - - } - - return nil -} - -func resourceAwsGlacierVaultPolicyUpdate(glacierconn *glacier.Glacier, d *schema.ResourceData) error { - vaultName := d.Id() - policyContents := d.Get("access_policy").(string) - - policy := &glacier.VaultAccessPolicy{ - Policy: aws.String(policyContents), - } - - if policyContents != "" { - log.Printf("[DEBUG] Glacier Vault: %s, put policy", vaultName) - - _, err := glacierconn.SetVaultAccessPolicy(&glacier.SetVaultAccessPolicyInput{ - VaultName: aws.String(d.Id()), - Policy: policy, - }) - - if err != nil { - return fmt.Errorf("Error putting Glacier Vault policy: %s", err.Error()) - } - } else { - log.Printf("[DEBUG] Glacier Vault: %s, delete policy: %s", vaultName, policy) - _, err := glacierconn.DeleteVaultAccessPolicy(&glacier.DeleteVaultAccessPolicyInput{ - VaultName: aws.String(d.Id()), - }) - - if err != nil { - return fmt.Errorf("Error deleting Glacier Vault policy: %s", err.Error()) - } - } - - return nil -} - -func setGlacierVaultTags(conn *glacier.Glacier, d *schema.ResourceData) error { - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffGlacierVaultTags(mapGlacierVaultTags(o), mapGlacierVaultTags(n)) - - // Set tags - if len(remove) > 0 { - tagsToRemove := &glacier.RemoveTagsFromVaultInput{ - VaultName: aws.String(d.Id()), - TagKeys: glacierStringsToPointyString(remove), - } - - log.Printf("[DEBUG] Removing tags: from %s", d.Id()) - _, err := conn.RemoveTagsFromVault(tagsToRemove) - if err != nil { - return err - } - } - if len(create) > 0 { - tagsToAdd := &glacier.AddTagsToVaultInput{ - VaultName: aws.String(d.Id()), - Tags: glacierVaultTagsFromMap(create), - } - - log.Printf("[DEBUG] Creating tags: for %s", d.Id()) - _, err := conn.AddTagsToVault(tagsToAdd) - if err != nil { - return err - } - } - } - - return nil -} - -func mapGlacierVaultTags(m map[string]interface{}) map[string]string { - results := make(map[string]string) - for k, v := range m { - results[k] = v.(string) - } - - return results -} - -func diffGlacierVaultTags(oldTags, newTags map[string]string) (map[string]string, []string) { - - create := make(map[string]string) - for k, v := range newTags { - create[k] = v - } - - // Build the list of what to remove - var remove []string - for k, v := range oldTags { - old, ok := create[k] - if !ok || old != v { - // Delete it! - remove = append(remove, k) - } - } - - return create, remove -} - -func getGlacierVaultTags(glacierconn *glacier.Glacier, vaultName string) (map[string]string, error) { - request := &glacier.ListTagsForVaultInput{ - VaultName: aws.String(vaultName), - } - - log.Printf("[DEBUG] Getting the tags: for %s", vaultName) - response, err := glacierconn.ListTagsForVault(request) - if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "NoSuchTagSet" { - return map[string]string{}, nil - } else if err != nil { - return nil, err - } - - return glacierVaultTagsToMap(response.Tags), nil -} - -func glacierVaultTagsToMap(responseTags map[string]*string) map[string]string { - results := make(map[string]string, len(responseTags)) - for k, v := range responseTags { - results[k] = *v - } - - return results -} - -func glacierVaultTagsFromMap(responseTags map[string]string) map[string]*string { - results := make(map[string]*string, len(responseTags)) - for k, v := range responseTags { - results[k] = aws.String(v) - } - - return results -} - -func glacierStringsToPointyString(s []string) []*string { - results := make([]*string, len(s)) - for i, x := range s { - results[i] = aws.String(x) - } - - return results -} - -func glacierPointersToStringList(pointers []*string) []interface{} { - list := make([]interface{}, len(pointers)) - for i, v := range pointers { - list[i] = *v - } - return list -} - -func buildGlacierVaultLocation(accountId, vaultName string) (string, error) { - if accountId == "" { - return "", errors.New("AWS account ID unavailable - failed to construct Vault location") - } - return fmt.Sprintf("/" + accountId + "/vaults/" + vaultName), nil -} - -func getGlacierVaultNotification(glacierconn *glacier.Glacier, vaultName string) ([]map[string]interface{}, error) { - request := &glacier.GetVaultNotificationsInput{ - VaultName: aws.String(vaultName), - } - - response, err := glacierconn.GetVaultNotifications(request) - if err != nil { - return nil, fmt.Errorf("Error reading Glacier Vault Notifications: %s", err.Error()) - } - - notifications := make(map[string]interface{}, 0) - - log.Print("[DEBUG] Flattening Glacier Vault Notifications") - - notifications["events"] = schema.NewSet(schema.HashString, glacierPointersToStringList(response.VaultNotificationConfig.Events)) - notifications["sns_topic"] = *response.VaultNotificationConfig.SNSTopic - - return []map[string]interface{}{notifications}, nil -} diff --git a/builtin/providers/aws/resource_aws_glacier_vault_test.go b/builtin/providers/aws/resource_aws_glacier_vault_test.go deleted file mode 100644 index 011284c1d..000000000 --- a/builtin/providers/aws/resource_aws_glacier_vault_test.go +++ /dev/null @@ -1,252 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/glacier" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSGlacierVault_basic(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckGlacierVaultDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccGlacierVault_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckGlacierVaultExists("aws_glacier_vault.test"), - ), - }, - }, - }) -} - -func TestAccAWSGlacierVault_full(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckGlacierVaultDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccGlacierVault_full(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckGlacierVaultExists("aws_glacier_vault.full"), - ), - }, - }, - }) -} - -func TestAccAWSGlacierVault_RemoveNotifications(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckGlacierVaultDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccGlacierVault_full(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckGlacierVaultExists("aws_glacier_vault.full"), - ), - }, - resource.TestStep{ - Config: testAccGlacierVault_withoutNotification(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckGlacierVaultExists("aws_glacier_vault.full"), - testAccCheckVaultNotificationsMissing("aws_glacier_vault.full"), - ), - }, - }, - }) -} - -func TestDiffGlacierVaultTags(t *testing.T) { - cases := []struct { - Old, New map[string]interface{} - Create map[string]string - Remove []string - }{ - // Basic add/remove - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "bar": "baz", - }, - Create: map[string]string{ - "bar": "baz", - }, - Remove: []string{ - "foo", - }, - }, - - // Modify - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "foo": "baz", - }, - Create: map[string]string{ - "foo": "baz", - }, - Remove: []string{ - "foo", - }, - }, - } - - for i, tc := range cases { - c, r := diffGlacierVaultTags(mapGlacierVaultTags(tc.Old), mapGlacierVaultTags(tc.New)) - - if !reflect.DeepEqual(c, tc.Create) { - t.Fatalf("%d: bad create: %#v", i, c) - } - if !reflect.DeepEqual(r, tc.Remove) { - t.Fatalf("%d: bad remove: %#v", i, r) - } - } -} - -func testAccCheckGlacierVaultExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - glacierconn := testAccProvider.Meta().(*AWSClient).glacierconn - out, err := glacierconn.DescribeVault(&glacier.DescribeVaultInput{ - VaultName: aws.String(rs.Primary.ID), - }) - - if err != nil { - return err - } - - if out.VaultARN == nil { - return fmt.Errorf("No Glacier Vault Found") - } - - if *out.VaultName != rs.Primary.ID { - return fmt.Errorf("Glacier Vault Mismatch - existing: %q, state: %q", - *out.VaultName, rs.Primary.ID) - } - - return nil - } -} - -func testAccCheckVaultNotificationsMissing(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - glacierconn := testAccProvider.Meta().(*AWSClient).glacierconn - out, err := glacierconn.GetVaultNotifications(&glacier.GetVaultNotificationsInput{ - VaultName: aws.String(rs.Primary.ID), - }) - - if awserr, ok := err.(awserr.Error); ok && awserr.Code() != "ResourceNotFoundException" { - return fmt.Errorf("Expected ResourceNotFoundException for Vault %s Notification Block but got %s", rs.Primary.ID, awserr.Code()) - } - - if out.VaultNotificationConfig != nil { - return fmt.Errorf("Vault Notification Block has been found for %s", rs.Primary.ID) - } - - return nil - } - -} - -func testAccCheckGlacierVaultDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).glacierconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_glacier_vault" { - continue - } - - input := &glacier.DescribeVaultInput{ - VaultName: aws.String(rs.Primary.ID), - } - if _, err := conn.DescribeVault(input); err != nil { - // Verify the error is what we want - if ae, ok := err.(awserr.Error); ok && ae.Code() == "ResourceNotFoundException" { - continue - } - - return err - } - return fmt.Errorf("still exists") - } - return nil -} - -func testAccGlacierVault_basic(rInt int) string { - return fmt.Sprintf(` -resource "aws_glacier_vault" "test" { - name = "my_test_vault_%d" -} -`, rInt) -} - -func testAccGlacierVault_full(rInt int) string { - return fmt.Sprintf(` -resource "aws_sns_topic" "aws_sns_topic" { - name = "glacier-sns-topic-%d" -} - -resource "aws_glacier_vault" "full" { - name = "my_test_vault_%d" - notification { - sns_topic = "${aws_sns_topic.aws_sns_topic.arn}" - events = ["ArchiveRetrievalCompleted","InventoryRetrievalCompleted"] - } - tags { - Test="Test1" - } -} -`, rInt, rInt) -} - -func testAccGlacierVault_withoutNotification(rInt int) string { - return fmt.Sprintf(` -resource "aws_sns_topic" "aws_sns_topic" { - name = "glacier-sns-topic-%d" -} - -resource "aws_glacier_vault" "full" { - name = "my_test_vault_%d" - tags { - Test="Test1" - } -} -`, rInt, rInt) -} diff --git a/builtin/providers/aws/resource_aws_iam_access_key.go b/builtin/providers/aws/resource_aws_iam_access_key.go deleted file mode 100644 index 515069c03..000000000 --- a/builtin/providers/aws/resource_aws_iam_access_key.go +++ /dev/null @@ -1,178 +0,0 @@ -package aws - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/iam" - - "github.com/hashicorp/terraform/helper/encryption" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsIamAccessKey() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsIamAccessKeyCreate, - Read: resourceAwsIamAccessKeyRead, - Delete: resourceAwsIamAccessKeyDelete, - - Schema: map[string]*schema.Schema{ - "user": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "status": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "secret": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Deprecated: "Please use a PGP key to encrypt", - }, - "ses_smtp_password": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "pgp_key": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - }, - "key_fingerprint": { - Type: schema.TypeString, - Computed: true, - }, - "encrypted_secret": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceAwsIamAccessKeyCreate(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - request := &iam.CreateAccessKeyInput{ - UserName: aws.String(d.Get("user").(string)), - } - - createResp, err := iamconn.CreateAccessKey(request) - if err != nil { - return fmt.Errorf( - "Error creating access key for user %s: %s", - *request.UserName, - err, - ) - } - - d.SetId(*createResp.AccessKey.AccessKeyId) - - if createResp.AccessKey == nil || createResp.AccessKey.SecretAccessKey == nil { - return fmt.Errorf("[ERR] CreateAccessKey response did not contain a Secret Access Key as expected") - } - - if v, ok := d.GetOk("pgp_key"); ok { - pgpKey := v.(string) - encryptionKey, err := encryption.RetrieveGPGKey(pgpKey) - if err != nil { - return err - } - fingerprint, encrypted, err := encryption.EncryptValue(encryptionKey, *createResp.AccessKey.SecretAccessKey, "IAM Access Key Secret") - if err != nil { - return err - } - - d.Set("key_fingerprint", fingerprint) - d.Set("encrypted_secret", encrypted) - } else { - if err := d.Set("secret", createResp.AccessKey.SecretAccessKey); err != nil { - return err - } - } - - d.Set("ses_smtp_password", - sesSmtpPasswordFromSecretKey(createResp.AccessKey.SecretAccessKey)) - - return resourceAwsIamAccessKeyReadResult(d, &iam.AccessKeyMetadata{ - AccessKeyId: createResp.AccessKey.AccessKeyId, - CreateDate: createResp.AccessKey.CreateDate, - Status: createResp.AccessKey.Status, - UserName: createResp.AccessKey.UserName, - }) -} - -func resourceAwsIamAccessKeyRead(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - request := &iam.ListAccessKeysInput{ - UserName: aws.String(d.Get("user").(string)), - } - - getResp, err := iamconn.ListAccessKeys(request) - if err != nil { - if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { // XXX TEST ME - // the user does not exist, so the key can't exist. - d.SetId("") - return nil - } - return fmt.Errorf("Error reading IAM acces key: %s", err) - } - - for _, key := range getResp.AccessKeyMetadata { - if key.AccessKeyId != nil && *key.AccessKeyId == d.Id() { - return resourceAwsIamAccessKeyReadResult(d, key) - } - } - - // Guess the key isn't around anymore. - d.SetId("") - return nil -} - -func resourceAwsIamAccessKeyReadResult(d *schema.ResourceData, key *iam.AccessKeyMetadata) error { - d.SetId(*key.AccessKeyId) - if err := d.Set("user", key.UserName); err != nil { - return err - } - if err := d.Set("status", key.Status); err != nil { - return err - } - return nil -} - -func resourceAwsIamAccessKeyDelete(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - request := &iam.DeleteAccessKeyInput{ - AccessKeyId: aws.String(d.Id()), - UserName: aws.String(d.Get("user").(string)), - } - - if _, err := iamconn.DeleteAccessKey(request); err != nil { - return fmt.Errorf("Error deleting access key %s: %s", d.Id(), err) - } - return nil -} - -func sesSmtpPasswordFromSecretKey(key *string) string { - if key == nil { - return "" - } - version := byte(0x02) - message := []byte("SendRawEmail") - hmacKey := []byte(*key) - h := hmac.New(sha256.New, hmacKey) - h.Write(message) - rawSig := h.Sum(nil) - versionedSig := make([]byte, 0, len(rawSig)+1) - versionedSig = append(versionedSig, version) - versionedSig = append(versionedSig, rawSig...) - return base64.StdEncoding.EncodeToString(versionedSig) -} diff --git a/builtin/providers/aws/resource_aws_iam_access_key_test.go b/builtin/providers/aws/resource_aws_iam_access_key_test.go deleted file mode 100644 index 4cf8a1dd7..000000000 --- a/builtin/providers/aws/resource_aws_iam_access_key_test.go +++ /dev/null @@ -1,286 +0,0 @@ -package aws - -import ( - "errors" - "fmt" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/hashicorp/vault/helper/pgpkeys" -) - -func TestAccAWSAccessKey_basic(t *testing.T) { - var conf iam.AccessKeyMetadata - rName := fmt.Sprintf("test-user-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAccessKeyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSAccessKeyConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAccessKeyExists("aws_iam_access_key.a_key", &conf), - testAccCheckAWSAccessKeyAttributes(&conf), - resource.TestCheckResourceAttrSet("aws_iam_access_key.a_key", "secret"), - ), - }, - }, - }) -} - -func TestAccAWSAccessKey_encrypted(t *testing.T) { - var conf iam.AccessKeyMetadata - rName := fmt.Sprintf("test-user-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAccessKeyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSAccessKeyConfig_encrypted(rName, testPubAccessKey1), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAccessKeyExists("aws_iam_access_key.a_key", &conf), - testAccCheckAWSAccessKeyAttributes(&conf), - testDecryptSecretKeyAndTest("aws_iam_access_key.a_key", testPrivKey1), - resource.TestCheckNoResourceAttr( - "aws_iam_access_key.a_key", "secret"), - resource.TestCheckResourceAttrSet( - "aws_iam_access_key.a_key", "encrypted_secret"), - resource.TestCheckResourceAttrSet( - "aws_iam_access_key.a_key", "key_fingerprint"), - ), - }, - }, - }) -} - -func testAccCheckAWSAccessKeyDestroy(s *terraform.State) error { - iamconn := testAccProvider.Meta().(*AWSClient).iamconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_access_key" { - continue - } - - // Try to get access key - resp, err := iamconn.ListAccessKeys(&iam.ListAccessKeysInput{ - UserName: aws.String(rs.Primary.ID), - }) - if err == nil { - if len(resp.AccessKeyMetadata) > 0 { - return fmt.Errorf("still exist.") - } - return nil - } - - // Verify the error is what we want - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - if ec2err.Code() != "NoSuchEntity" { - return err - } - } - - return nil -} - -func testAccCheckAWSAccessKeyExists(n string, res *iam.AccessKeyMetadata) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Role name is set") - } - - iamconn := testAccProvider.Meta().(*AWSClient).iamconn - name := rs.Primary.Attributes["user"] - - resp, err := iamconn.ListAccessKeys(&iam.ListAccessKeysInput{ - UserName: aws.String(name), - }) - if err != nil { - return err - } - - if len(resp.AccessKeyMetadata) != 1 || - *resp.AccessKeyMetadata[0].UserName != name { - return fmt.Errorf("User not found not found") - } - - *res = *resp.AccessKeyMetadata[0] - - return nil - } -} - -func testAccCheckAWSAccessKeyAttributes(accessKeyMetadata *iam.AccessKeyMetadata) resource.TestCheckFunc { - return func(s *terraform.State) error { - if !strings.Contains(*accessKeyMetadata.UserName, "test-user") { - return fmt.Errorf("Bad username: %s", *accessKeyMetadata.UserName) - } - - if *accessKeyMetadata.Status != "Active" { - return fmt.Errorf("Bad status: %s", *accessKeyMetadata.Status) - } - - return nil - } -} - -func testDecryptSecretKeyAndTest(nAccessKey, key string) resource.TestCheckFunc { - return func(s *terraform.State) error { - keyResource, ok := s.RootModule().Resources[nAccessKey] - if !ok { - return fmt.Errorf("Not found: %s", nAccessKey) - } - - password, ok := keyResource.Primary.Attributes["encrypted_secret"] - if !ok { - return errors.New("No password in state") - } - - // We can't verify that the decrypted password is correct, because we don't - // have it. We can verify that decrypting it does not error - _, err := pgpkeys.DecryptBytes(password, key) - if err != nil { - return fmt.Errorf("Error decrypting password: %s", err) - } - - return nil - } -} - -func testAccAWSAccessKeyConfig(rName string) string { - return fmt.Sprintf(` -resource "aws_iam_user" "a_user" { - name = "%s" -} - -resource "aws_iam_access_key" "a_key" { - user = "${aws_iam_user.a_user.name}" -} -`, rName) -} - -func testAccAWSAccessKeyConfig_encrypted(rName, key string) string { - return fmt.Sprintf(` -resource "aws_iam_user" "a_user" { - name = "%s" -} - -resource "aws_iam_access_key" "a_key" { - user = "${aws_iam_user.a_user.name}" - pgp_key = < 0 { - return fmt.Errorf("Bad: Account alias still exists: %q", rs.Primary.ID) - } - } - - return nil - -} - -func testAccCheckAWSIAMAccountAliasDataExists(n string, a *string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.Attributes["account_alias"] != *a { - return fmt.Errorf("Data Source account_alias didn't match, expected (%s), got (%s)", *a, rs.Primary.Attributes["account_alias"]) - } - - return nil - } -} - -func testAccCheckAWSIAMAccountAliasExists(n string, a *string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - conn := testAccProvider.Meta().(*AWSClient).iamconn - params := &iam.ListAccountAliasesInput{} - - resp, err := conn.ListAccountAliases(params) - - if err != nil || resp == nil { - return nil - } - - if len(resp.AccountAliases) == 0 { - return fmt.Errorf("Bad: Account alias %q does not exist", rs.Primary.ID) - } - - *a = aws.StringValue(resp.AccountAliases[0]) - - return nil - } -} - -func testAccCheckAwsIamAccountAlias(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Can't find Account Alias resource: %s", n) - } - - if rs.Primary.Attributes["account_alias"] == "" { - return fmt.Errorf("Missing Account Alias") - } - - return nil - } -} - -func testAccAWSIAMAccountAliasConfig_with_datasource(rstring string) string { - return fmt.Sprintf(` -resource "aws_iam_account_alias" "test" { - account_alias = "terraform-%s-alias" -} - -data "aws_iam_account_alias" "current" { - depends_on = ["aws_iam_account_alias.test"] -}`, rstring) -} - -func testAccAWSIAMAccountAliasConfig(rstring string) string { - return fmt.Sprintf(` -resource "aws_iam_account_alias" "test" { - account_alias = "terraform-%s-alias" -}`, rstring) -} diff --git a/builtin/providers/aws/resource_aws_iam_account_password_policy.go b/builtin/providers/aws/resource_aws_iam_account_password_policy.go deleted file mode 100644 index 71dfbf0c8..000000000 --- a/builtin/providers/aws/resource_aws_iam_account_password_policy.go +++ /dev/null @@ -1,168 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/iam" - - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsIamAccountPasswordPolicy() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsIamAccountPasswordPolicyUpdate, - Read: resourceAwsIamAccountPasswordPolicyRead, - Update: resourceAwsIamAccountPasswordPolicyUpdate, - Delete: resourceAwsIamAccountPasswordPolicyDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "allow_users_to_change_password": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "expire_passwords": &schema.Schema{ - Type: schema.TypeBool, - Computed: true, - }, - "hard_expiry": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - "max_password_age": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "minimum_password_length": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 6, - }, - "password_reuse_prevention": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "require_lowercase_characters": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - "require_numbers": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - "require_symbols": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - "require_uppercase_characters": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - }, - } -} - -func resourceAwsIamAccountPasswordPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - input := &iam.UpdateAccountPasswordPolicyInput{} - - if v, ok := d.GetOk("allow_users_to_change_password"); ok { - input.AllowUsersToChangePassword = aws.Bool(v.(bool)) - } - if v, ok := d.GetOk("hard_expiry"); ok { - input.HardExpiry = aws.Bool(v.(bool)) - } - if v, ok := d.GetOk("max_password_age"); ok { - input.MaxPasswordAge = aws.Int64(int64(v.(int))) - } - if v, ok := d.GetOk("minimum_password_length"); ok { - input.MinimumPasswordLength = aws.Int64(int64(v.(int))) - } - if v, ok := d.GetOk("password_reuse_prevention"); ok { - input.PasswordReusePrevention = aws.Int64(int64(v.(int))) - } - if v, ok := d.GetOk("require_lowercase_characters"); ok { - input.RequireLowercaseCharacters = aws.Bool(v.(bool)) - } - if v, ok := d.GetOk("require_numbers"); ok { - input.RequireNumbers = aws.Bool(v.(bool)) - } - if v, ok := d.GetOk("require_symbols"); ok { - input.RequireSymbols = aws.Bool(v.(bool)) - } - if v, ok := d.GetOk("require_uppercase_characters"); ok { - input.RequireUppercaseCharacters = aws.Bool(v.(bool)) - } - - log.Printf("[DEBUG] Updating IAM account password policy: %s", input) - _, err := iamconn.UpdateAccountPasswordPolicy(input) - if err != nil { - return fmt.Errorf("Error updating IAM Password Policy: %s", err) - } - log.Println("[DEBUG] IAM account password policy updated") - - d.SetId("iam-account-password-policy") - - return resourceAwsIamAccountPasswordPolicyRead(d, meta) -} - -func resourceAwsIamAccountPasswordPolicyRead(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - input := &iam.GetAccountPasswordPolicyInput{} - resp, err := iamconn.GetAccountPasswordPolicy(input) - if err != nil { - awsErr, ok := err.(awserr.Error) - if ok && awsErr.Code() == "NoSuchEntity" { - log.Printf("[WARN] IAM account password policy is gone (i.e. default)") - d.SetId("") - return nil - } - return fmt.Errorf("Error reading IAM account password policy: %s", err) - } - - log.Printf("[DEBUG] Received IAM account password policy: %s", resp) - - policy := resp.PasswordPolicy - - d.Set("allow_users_to_change_password", policy.AllowUsersToChangePassword) - d.Set("expire_passwords", policy.ExpirePasswords) - d.Set("hard_expiry", policy.HardExpiry) - d.Set("max_password_age", policy.MaxPasswordAge) - d.Set("minimum_password_length", policy.MinimumPasswordLength) - d.Set("password_reuse_prevention", policy.PasswordReusePrevention) - d.Set("require_lowercase_characters", policy.RequireLowercaseCharacters) - d.Set("require_numbers", policy.RequireNumbers) - d.Set("require_symbols", policy.RequireSymbols) - d.Set("require_uppercase_characters", policy.RequireUppercaseCharacters) - - return nil -} - -func resourceAwsIamAccountPasswordPolicyDelete(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - log.Println("[DEBUG] Deleting IAM account password policy") - input := &iam.DeleteAccountPasswordPolicyInput{} - if _, err := iamconn.DeleteAccountPasswordPolicy(input); err != nil { - return fmt.Errorf("Error deleting IAM Password Policy: %s", err) - } - d.SetId("") - log.Println("[DEBUG] Deleted IAM account password policy") - - return nil -} diff --git a/builtin/providers/aws/resource_aws_iam_account_password_policy_test.go b/builtin/providers/aws/resource_aws_iam_account_password_policy_test.go deleted file mode 100644 index b909fc05a..000000000 --- a/builtin/providers/aws/resource_aws_iam_account_password_policy_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSIAMAccountPasswordPolicy_basic(t *testing.T) { - var policy iam.GetAccountPasswordPolicyOutput - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSIAMAccountPasswordPolicyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSIAMAccountPasswordPolicy, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSIAMAccountPasswordPolicyExists("aws_iam_account_password_policy.default", &policy), - resource.TestCheckResourceAttr("aws_iam_account_password_policy.default", "minimum_password_length", "8"), - ), - }, - resource.TestStep{ - Config: testAccAWSIAMAccountPasswordPolicy_modified, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSIAMAccountPasswordPolicyExists("aws_iam_account_password_policy.default", &policy), - resource.TestCheckResourceAttr("aws_iam_account_password_policy.default", "minimum_password_length", "7"), - ), - }, - }, - }) -} - -func testAccCheckAWSIAMAccountPasswordPolicyDestroy(s *terraform.State) error { - iamconn := testAccProvider.Meta().(*AWSClient).iamconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_iam_account_password_policy" { - continue - } - - // Try to get policy - _, err := iamconn.GetAccountPasswordPolicy(&iam.GetAccountPasswordPolicyInput{}) - if err == nil { - return fmt.Errorf("still exist.") - } - - // Verify the error is what we want - awsErr, ok := err.(awserr.Error) - if !ok { - return err - } - if awsErr.Code() != "NoSuchEntity" { - return err - } - } - - return nil -} - -func testAccCheckAWSIAMAccountPasswordPolicyExists(n string, res *iam.GetAccountPasswordPolicyOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No policy ID is set") - } - - iamconn := testAccProvider.Meta().(*AWSClient).iamconn - - resp, err := iamconn.GetAccountPasswordPolicy(&iam.GetAccountPasswordPolicyInput{}) - if err != nil { - return err - } - - *res = *resp - - return nil - } -} - -const testAccAWSIAMAccountPasswordPolicy = ` -resource "aws_iam_account_password_policy" "default" { - allow_users_to_change_password = true - minimum_password_length = 8 - require_numbers = true -} -` -const testAccAWSIAMAccountPasswordPolicy_modified = ` -resource "aws_iam_account_password_policy" "default" { - allow_users_to_change_password = true - minimum_password_length = 7 - require_numbers = false - require_symbols = true - require_uppercase_characters = true -} -` diff --git a/builtin/providers/aws/resource_aws_iam_group.go b/builtin/providers/aws/resource_aws_iam_group.go deleted file mode 100644 index 967f055cd..000000000 --- a/builtin/providers/aws/resource_aws_iam_group.go +++ /dev/null @@ -1,141 +0,0 @@ -package aws - -import ( - "fmt" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/iam" - - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsIamGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsIamGroupCreate, - Read: resourceAwsIamGroupRead, - Update: resourceAwsIamGroupUpdate, - Delete: resourceAwsIamGroupDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "arn": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "unique_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateAwsIamGroupName, - }, - "path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "/", - }, - }, - } -} - -func resourceAwsIamGroupCreate(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - name := d.Get("name").(string) - path := d.Get("path").(string) - - request := &iam.CreateGroupInput{ - Path: aws.String(path), - GroupName: aws.String(name), - } - - createResp, err := iamconn.CreateGroup(request) - if err != nil { - return fmt.Errorf("Error creating IAM Group %s: %s", name, err) - } - d.SetId(*createResp.Group.GroupName) - - return resourceAwsIamGroupReadResult(d, createResp.Group) -} - -func resourceAwsIamGroupRead(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - request := &iam.GetGroupInput{ - GroupName: aws.String(d.Id()), - } - - getResp, err := iamconn.GetGroup(request) - if err != nil { - if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading IAM Group %s: %s", d.Id(), err) - } - return resourceAwsIamGroupReadResult(d, getResp.Group) -} - -func resourceAwsIamGroupReadResult(d *schema.ResourceData, group *iam.Group) error { - if err := d.Set("name", group.GroupName); err != nil { - return err - } - if err := d.Set("arn", group.Arn); err != nil { - return err - } - if err := d.Set("path", group.Path); err != nil { - return err - } - if err := d.Set("unique_id", group.GroupId); err != nil { - return err - } - return nil -} - -func resourceAwsIamGroupUpdate(d *schema.ResourceData, meta interface{}) error { - if d.HasChange("name") || d.HasChange("path") { - iamconn := meta.(*AWSClient).iamconn - on, nn := d.GetChange("name") - _, np := d.GetChange("path") - - request := &iam.UpdateGroupInput{ - GroupName: aws.String(on.(string)), - NewGroupName: aws.String(nn.(string)), - NewPath: aws.String(np.(string)), - } - _, err := iamconn.UpdateGroup(request) - if err != nil { - return fmt.Errorf("Error updating IAM Group %s: %s", d.Id(), err) - } - return resourceAwsIamGroupRead(d, meta) - } - return nil -} - -func resourceAwsIamGroupDelete(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - request := &iam.DeleteGroupInput{ - GroupName: aws.String(d.Id()), - } - - if _, err := iamconn.DeleteGroup(request); err != nil { - return fmt.Errorf("Error deleting IAM Group %s: %s", d.Id(), err) - } - return nil -} - -func validateAwsIamGroupName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[0-9A-Za-z=,.@\-_+]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only alphanumeric characters, hyphens, underscores, commas, periods, @ symbols, plus and equals signs allowed in %q: %q", - k, value)) - } - return -} diff --git a/builtin/providers/aws/resource_aws_iam_group_membership.go b/builtin/providers/aws/resource_aws_iam_group_membership.go deleted file mode 100644 index 7977bbfb7..000000000 --- a/builtin/providers/aws/resource_aws_iam_group_membership.go +++ /dev/null @@ -1,169 +0,0 @@ -package aws - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsIamGroupMembership() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsIamGroupMembershipCreate, - Read: resourceAwsIamGroupMembershipRead, - Update: resourceAwsIamGroupMembershipUpdate, - Delete: resourceAwsIamGroupMembershipDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "users": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "group": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceAwsIamGroupMembershipCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iamconn - - group := d.Get("group").(string) - userList := expandStringList(d.Get("users").(*schema.Set).List()) - - if err := addUsersToGroup(conn, userList, group); err != nil { - return err - } - - d.SetId(d.Get("name").(string)) - return resourceAwsIamGroupMembershipRead(d, meta) -} - -func resourceAwsIamGroupMembershipRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iamconn - group := d.Get("group").(string) - - var ul []string - var marker *string - for { - resp, err := conn.GetGroup(&iam.GetGroupInput{ - GroupName: aws.String(group), - Marker: marker, - }) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // aws specific error - if awsErr.Code() == "NoSuchEntity" { - // group not found - d.SetId("") - return nil - } - } - return err - } - - for _, u := range resp.Users { - ul = append(ul, *u.UserName) - } - - if !*resp.IsTruncated { - break - } - marker = resp.Marker - } - - if err := d.Set("users", ul); err != nil { - return fmt.Errorf("[WARN] Error setting user list from IAM Group Membership (%s), error: %s", group, err) - } - - return nil -} - -func resourceAwsIamGroupMembershipUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iamconn - - if d.HasChange("users") { - group := d.Get("group").(string) - - o, n := d.GetChange("users") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - remove := expandStringList(os.Difference(ns).List()) - add := expandStringList(ns.Difference(os).List()) - - if err := removeUsersFromGroup(conn, remove, group); err != nil { - return err - } - - if err := addUsersToGroup(conn, add, group); err != nil { - return err - } - } - - return resourceAwsIamGroupMembershipRead(d, meta) -} - -func resourceAwsIamGroupMembershipDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iamconn - userList := expandStringList(d.Get("users").(*schema.Set).List()) - group := d.Get("group").(string) - - if err := removeUsersFromGroup(conn, userList, group); err != nil { - return err - } - - return nil -} - -func removeUsersFromGroup(conn *iam.IAM, users []*string, group string) error { - for _, u := range users { - _, err := conn.RemoveUserFromGroup(&iam.RemoveUserFromGroupInput{ - UserName: u, - GroupName: aws.String(group), - }) - - if err != nil { - if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { - return nil - } - return err - } - } - return nil -} - -func addUsersToGroup(conn *iam.IAM, users []*string, group string) error { - for _, u := range users { - _, err := conn.AddUserToGroup(&iam.AddUserToGroupInput{ - UserName: u, - GroupName: aws.String(group), - }) - - if err != nil { - return err - } - } - return nil -} diff --git a/builtin/providers/aws/resource_aws_iam_group_membership_test.go b/builtin/providers/aws/resource_aws_iam_group_membership_test.go deleted file mode 100644 index 102d29630..000000000 --- a/builtin/providers/aws/resource_aws_iam_group_membership_test.go +++ /dev/null @@ -1,233 +0,0 @@ -package aws - -import ( - "fmt" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSGroupMembership_basic(t *testing.T) { - var group iam.GetGroupOutput - - rString := acctest.RandStringFromCharSet(10, acctest.CharSetAlpha) - configBase := fmt.Sprintf(testAccAWSGroupMemberConfig, rString, rString, rString) - configUpdate := fmt.Sprintf(testAccAWSGroupMemberConfigUpdate, rString, rString, rString, rString, rString) - configUpdateDown := fmt.Sprintf(testAccAWSGroupMemberConfigUpdateDown, rString, rString, rString) - - testUser := fmt.Sprintf("test-user-%s", rString) - testUserTwo := fmt.Sprintf("test-user-two-%s", rString) - testUserThree := fmt.Sprintf("test-user-three-%s", rString) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSGroupMembershipDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: configBase, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSGroupMembershipExists("aws_iam_group_membership.team", &group), - testAccCheckAWSGroupMembershipAttributes(&group, []string{testUser}), - ), - }, - - resource.TestStep{ - Config: configUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSGroupMembershipExists("aws_iam_group_membership.team", &group), - testAccCheckAWSGroupMembershipAttributes(&group, []string{testUserTwo, testUserThree}), - ), - }, - - resource.TestStep{ - Config: configUpdateDown, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSGroupMembershipExists("aws_iam_group_membership.team", &group), - testAccCheckAWSGroupMembershipAttributes(&group, []string{testUserThree}), - ), - }, - }, - }) -} - -func TestAccAWSGroupMembership_paginatedUserList(t *testing.T) { - var group iam.GetGroupOutput - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSGroupMembershipDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSGroupMemberConfigPaginatedUserList, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSGroupMembershipExists("aws_iam_group_membership.team", &group), - resource.TestCheckResourceAttr( - "aws_iam_group_membership.team", "users.#", "101"), - ), - }, - }, - }) -} - -func testAccCheckAWSGroupMembershipDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).iamconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_iam_group_membership" { - continue - } - - group := rs.Primary.Attributes["group"] - - _, err := conn.GetGroup(&iam.GetGroupInput{ - GroupName: aws.String(group), - }) - if err != nil { - // Verify the error is what we want - if ae, ok := err.(awserr.Error); ok && ae.Code() == "NoSuchEntity" { - continue - } - return err - } - - return fmt.Errorf("still exists") - } - - return nil -} - -func testAccCheckAWSGroupMembershipExists(n string, g *iam.GetGroupOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No User name is set") - } - - conn := testAccProvider.Meta().(*AWSClient).iamconn - gn := rs.Primary.Attributes["group"] - - resp, err := conn.GetGroup(&iam.GetGroupInput{ - GroupName: aws.String(gn), - }) - - if err != nil { - return fmt.Errorf("Error: Group (%s) not found", gn) - } - - *g = *resp - - return nil - } -} - -func testAccCheckAWSGroupMembershipAttributes(group *iam.GetGroupOutput, users []string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if !strings.Contains(*group.Group.GroupName, "test-group") { - return fmt.Errorf("Bad group membership: expected %s, got %s", "test-group", *group.Group.GroupName) - } - - uc := len(users) - for _, u := range users { - for _, gu := range group.Users { - if u == *gu.UserName { - uc-- - } - } - } - - if uc > 0 { - return fmt.Errorf("Bad group membership count, expected (%d), but only (%d) found", len(users), uc) - } - return nil - } -} - -const testAccAWSGroupMemberConfig = ` -resource "aws_iam_group" "group" { - name = "test-group-%s" -} - -resource "aws_iam_user" "user" { - name = "test-user-%s" -} - -resource "aws_iam_group_membership" "team" { - name = "tf-testing-group-membership-%s" - users = ["${aws_iam_user.user.name}"] - group = "${aws_iam_group.group.name}" -} -` - -const testAccAWSGroupMemberConfigUpdate = ` -resource "aws_iam_group" "group" { - name = "test-group-%s" -} - -resource "aws_iam_user" "user" { - name = "test-user-%s" -} - -resource "aws_iam_user" "user_two" { - name = "test-user-two-%s" -} - -resource "aws_iam_user" "user_three" { - name = "test-user-three-%s" -} - -resource "aws_iam_group_membership" "team" { - name = "tf-testing-group-membership-%s" - users = [ - "${aws_iam_user.user_two.name}", - "${aws_iam_user.user_three.name}", - ] - group = "${aws_iam_group.group.name}" -} -` - -const testAccAWSGroupMemberConfigUpdateDown = ` -resource "aws_iam_group" "group" { - name = "test-group-%s" -} - -resource "aws_iam_user" "user_three" { - name = "test-user-three-%s" -} - -resource "aws_iam_group_membership" "team" { - name = "tf-testing-group-membership-%s" - users = [ - "${aws_iam_user.user_three.name}", - ] - group = "${aws_iam_group.group.name}" -} -` - -const testAccAWSGroupMemberConfigPaginatedUserList = ` -resource "aws_iam_group" "group" { - name = "test-paginated-group" -} - -resource "aws_iam_group_membership" "team" { - name = "tf-testing-paginated-group-membership" - users = ["${aws_iam_user.user.*.name}"] - group = "${aws_iam_group.group.name}" -} - -resource "aws_iam_user" "user" { - count = 101 - name = "${format("paged-test-user-%d", count.index + 1)}" -} -` diff --git a/builtin/providers/aws/resource_aws_iam_group_policy.go b/builtin/providers/aws/resource_aws_iam_group_policy.go deleted file mode 100644 index 1bdf72545..000000000 --- a/builtin/providers/aws/resource_aws_iam_group_policy.go +++ /dev/null @@ -1,129 +0,0 @@ -package aws - -import ( - "fmt" - "net/url" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/iam" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsIamGroupPolicy() *schema.Resource { - return &schema.Resource{ - // PutGroupPolicy API is idempotent, so these can be the same. - Create: resourceAwsIamGroupPolicyPut, - Update: resourceAwsIamGroupPolicyPut, - - Read: resourceAwsIamGroupPolicyRead, - Delete: resourceAwsIamGroupPolicyDelete, - - Schema: map[string]*schema.Schema{ - "policy": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"name_prefix"}, - }, - "name_prefix": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "group": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceAwsIamGroupPolicyPut(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - request := &iam.PutGroupPolicyInput{ - GroupName: aws.String(d.Get("group").(string)), - PolicyDocument: aws.String(d.Get("policy").(string)), - } - - var policyName string - if v, ok := d.GetOk("name"); ok { - policyName = v.(string) - } else if v, ok := d.GetOk("name_prefix"); ok { - policyName = resource.PrefixedUniqueId(v.(string)) - } else { - policyName = resource.UniqueId() - } - request.PolicyName = aws.String(policyName) - - if _, err := iamconn.PutGroupPolicy(request); err != nil { - return fmt.Errorf("Error putting IAM group policy %s: %s", *request.PolicyName, err) - } - - d.SetId(fmt.Sprintf("%s:%s", *request.GroupName, *request.PolicyName)) - return nil -} - -func resourceAwsIamGroupPolicyRead(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - group, name := resourceAwsIamGroupPolicyParseId(d.Id()) - - request := &iam.GetGroupPolicyInput{ - PolicyName: aws.String(name), - GroupName: aws.String(group), - } - - var err error - getResp, err := iamconn.GetGroupPolicy(request) - if err != nil { - if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { // XXX test me - d.SetId("") - return nil - } - return fmt.Errorf("Error reading IAM policy %s from group %s: %s", name, group, err) - } - - if getResp.PolicyDocument == nil { - return fmt.Errorf("GetGroupPolicy returned a nil policy document") - } - - policy, err := url.QueryUnescape(*getResp.PolicyDocument) - if err != nil { - return err - } - return d.Set("policy", policy) -} - -func resourceAwsIamGroupPolicyDelete(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - group, name := resourceAwsIamGroupPolicyParseId(d.Id()) - - request := &iam.DeleteGroupPolicyInput{ - PolicyName: aws.String(name), - GroupName: aws.String(group), - } - - if _, err := iamconn.DeleteGroupPolicy(request); err != nil { - return fmt.Errorf("Error deleting IAM group policy %s: %s", d.Id(), err) - } - return nil -} - -func resourceAwsIamGroupPolicyParseId(id string) (groupName, policyName string) { - parts := strings.SplitN(id, ":", 2) - groupName = parts[0] - policyName = parts[1] - return -} diff --git a/builtin/providers/aws/resource_aws_iam_group_policy_attachment.go b/builtin/providers/aws/resource_aws_iam_group_policy_attachment.go deleted file mode 100644 index cf9595232..000000000 --- a/builtin/providers/aws/resource_aws_iam_group_policy_attachment.go +++ /dev/null @@ -1,124 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsIamGroupPolicyAttachment() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsIamGroupPolicyAttachmentCreate, - Read: resourceAwsIamGroupPolicyAttachmentRead, - Delete: resourceAwsIamGroupPolicyAttachmentDelete, - - Schema: map[string]*schema.Schema{ - "group": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "policy_arn": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceAwsIamGroupPolicyAttachmentCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iamconn - - group := d.Get("group").(string) - arn := d.Get("policy_arn").(string) - - err := attachPolicyToGroup(conn, group, arn) - if err != nil { - return fmt.Errorf("[WARN] Error attaching policy %s to IAM group %s: %v", arn, group, err) - } - - d.SetId(resource.PrefixedUniqueId(fmt.Sprintf("%s-", group))) - return resourceAwsIamGroupPolicyAttachmentRead(d, meta) -} - -func resourceAwsIamGroupPolicyAttachmentRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iamconn - group := d.Get("group").(string) - arn := d.Get("policy_arn").(string) - - _, err := conn.GetGroup(&iam.GetGroupInput{ - GroupName: aws.String(group), - }) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "NoSuchEntity" { - log.Printf("[WARN] No such entity found for Policy Attachment (%s)", group) - d.SetId("") - return nil - } - } - return err - } - - attachedPolicies, err := conn.ListAttachedGroupPolicies(&iam.ListAttachedGroupPoliciesInput{ - GroupName: aws.String(group), - }) - if err != nil { - return err - } - - var policy string - for _, p := range attachedPolicies.AttachedPolicies { - if *p.PolicyArn == arn { - policy = *p.PolicyArn - } - } - - if policy == "" { - log.Printf("[WARN] No such policy found for Group Policy Attachment (%s)", group) - d.SetId("") - } - - return nil -} - -func resourceAwsIamGroupPolicyAttachmentDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iamconn - group := d.Get("group").(string) - arn := d.Get("policy_arn").(string) - - err := detachPolicyFromGroup(conn, group, arn) - if err != nil { - return fmt.Errorf("[WARN] Error removing policy %s from IAM Group %s: %v", arn, group, err) - } - return nil -} - -func attachPolicyToGroup(conn *iam.IAM, group string, arn string) error { - _, err := conn.AttachGroupPolicy(&iam.AttachGroupPolicyInput{ - GroupName: aws.String(group), - PolicyArn: aws.String(arn), - }) - if err != nil { - return err - } - return nil -} - -func detachPolicyFromGroup(conn *iam.IAM, group string, arn string) error { - _, err := conn.DetachGroupPolicy(&iam.DetachGroupPolicyInput{ - GroupName: aws.String(group), - PolicyArn: aws.String(arn), - }) - if err != nil { - return err - } - return nil -} diff --git a/builtin/providers/aws/resource_aws_iam_group_policy_attachment_test.go b/builtin/providers/aws/resource_aws_iam_group_policy_attachment_test.go deleted file mode 100644 index a63bd2079..000000000 --- a/builtin/providers/aws/resource_aws_iam_group_policy_attachment_test.go +++ /dev/null @@ -1,192 +0,0 @@ -package aws - -import ( - "fmt" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSIamGroupPolicyAttachment_basic(t *testing.T) { - var out iam.ListAttachedGroupPoliciesOutput - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSGroupPolicyAttachmentDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSGroupPolicyAttachConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSGroupPolicyAttachmentExists("aws_iam_group_policy_attachment.test-attach", 1, &out), - testAccCheckAWSGroupPolicyAttachmentAttributes([]string{"test-policy"}, &out), - ), - }, - resource.TestStep{ - Config: testAccAWSGroupPolicyAttachConfigUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSGroupPolicyAttachmentExists("aws_iam_group_policy_attachment.test-attach", 2, &out), - testAccCheckAWSGroupPolicyAttachmentAttributes([]string{"test-policy2", "test-policy3"}, &out), - ), - }, - }, - }) -} -func testAccCheckAWSGroupPolicyAttachmentDestroy(s *terraform.State) error { - return nil -} - -func testAccCheckAWSGroupPolicyAttachmentExists(n string, c int, out *iam.ListAttachedGroupPoliciesOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No policy name is set") - } - - conn := testAccProvider.Meta().(*AWSClient).iamconn - group := rs.Primary.Attributes["group"] - - attachedPolicies, err := conn.ListAttachedGroupPolicies(&iam.ListAttachedGroupPoliciesInput{ - GroupName: aws.String(group), - }) - if err != nil { - return fmt.Errorf("Error: Failed to get attached policies for group %s (%s)", group, n) - } - if c != len(attachedPolicies.AttachedPolicies) { - return fmt.Errorf("Error: Group (%s) has wrong number of policies attached on initial creation", n) - } - - *out = *attachedPolicies - return nil - } -} -func testAccCheckAWSGroupPolicyAttachmentAttributes(policies []string, out *iam.ListAttachedGroupPoliciesOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - matched := 0 - - for _, p := range policies { - for _, ap := range out.AttachedPolicies { - // *ap.PolicyArn like arn:aws:iam::111111111111:policy/test-policy - parts := strings.Split(*ap.PolicyArn, "/") - if len(parts) == 2 && p == parts[1] { - matched++ - } - } - } - if matched != len(policies) || matched != len(out.AttachedPolicies) { - return fmt.Errorf("Error: Number of attached policies was incorrect: expected %d matched policies, matched %d of %d", len(policies), matched, len(out.AttachedPolicies)) - } - return nil - } -} - -const testAccAWSGroupPolicyAttachConfig = ` -resource "aws_iam_group" "group" { - name = "test-group" -} - -resource "aws_iam_policy" "policy" { - name = "test-policy" - description = "A test policy" - policy = < 128 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 128 characters", k)) - } - if !regexp.MustCompile("^[\\w+=,.@-]+$").MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q must match [\\w+=,.@-]", k)) - } - return - }, - }, - - "name_prefix": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - // https://github.com/boto/botocore/blob/2485f5c/botocore/data/iam/2010-05-08/service-2.json#L8196-L8201 - value := v.(string) - if len(value) > 64 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 64 characters, name is limited to 128", k)) - } - if !regexp.MustCompile("^[\\w+=,.@-]+$").MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q must match [\\w+=,.@-]", k)) - } - return - }, - }, - - "path": { - Type: schema.TypeString, - Optional: true, - Default: "/", - ForceNew: true, - }, - - "roles": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - ConflictsWith: []string{"role"}, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - Deprecated: "Use `role` instead. Only a single role can be passed to an IAM Instance Profile", - }, - - "role": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ConflictsWith: []string{"roles"}, - }, - }, - } -} - -func resourceAwsIamInstanceProfileCreate(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - var name string - if v, ok := d.GetOk("name"); ok { - name = v.(string) - } else if v, ok := d.GetOk("name_prefix"); ok { - name = resource.PrefixedUniqueId(v.(string)) - } else { - name = resource.UniqueId() - } - - _, hasRoles := d.GetOk("roles") - _, hasRole := d.GetOk("role") - - if hasRole == false && hasRoles == false { - return fmt.Errorf("Either `role` or `roles` (deprecated) must be specified when creating an IAM Instance Profile") - } - - request := &iam.CreateInstanceProfileInput{ - InstanceProfileName: aws.String(name), - Path: aws.String(d.Get("path").(string)), - } - - var err error - response, err := iamconn.CreateInstanceProfile(request) - if err == nil { - err = instanceProfileReadResult(d, response.InstanceProfile) - } - if err != nil { - return fmt.Errorf("Error creating IAM instance profile %s: %s", name, err) - } - - waiterRequest := &iam.GetInstanceProfileInput{ - InstanceProfileName: aws.String(name), - } - // don't return until the IAM service reports that the instance profile is ready. - // this ensures that terraform resources which rely on the instance profile will 'see' - // that the instance profile exists. - err = iamconn.WaitUntilInstanceProfileExists(waiterRequest) - if err != nil { - return fmt.Errorf("Timed out while waiting for instance profile %s: %s", name, err) - } - - return resourceAwsIamInstanceProfileUpdate(d, meta) -} - -func instanceProfileAddRole(iamconn *iam.IAM, profileName, roleName string) error { - request := &iam.AddRoleToInstanceProfileInput{ - InstanceProfileName: aws.String(profileName), - RoleName: aws.String(roleName), - } - - _, err := iamconn.AddRoleToInstanceProfile(request) - return err -} - -func instanceProfileRemoveRole(iamconn *iam.IAM, profileName, roleName string) error { - request := &iam.RemoveRoleFromInstanceProfileInput{ - InstanceProfileName: aws.String(profileName), - RoleName: aws.String(roleName), - } - - _, err := iamconn.RemoveRoleFromInstanceProfile(request) - if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { - return nil - } - return err -} - -func instanceProfileSetRoles(d *schema.ResourceData, iamconn *iam.IAM) error { - oldInterface, newInterface := d.GetChange("roles") - oldRoles := oldInterface.(*schema.Set) - newRoles := newInterface.(*schema.Set) - - currentRoles := schema.CopySet(oldRoles) - - d.Partial(true) - - for _, role := range oldRoles.Difference(newRoles).List() { - err := instanceProfileRemoveRole(iamconn, d.Id(), role.(string)) - if err != nil { - return fmt.Errorf("Error removing role %s from IAM instance profile %s: %s", role, d.Id(), err) - } - currentRoles.Remove(role) - d.Set("roles", currentRoles) - d.SetPartial("roles") - } - - for _, role := range newRoles.Difference(oldRoles).List() { - err := instanceProfileAddRole(iamconn, d.Id(), role.(string)) - if err != nil { - return fmt.Errorf("Error adding role %s to IAM instance profile %s: %s", role, d.Id(), err) - } - currentRoles.Add(role) - d.Set("roles", currentRoles) - d.SetPartial("roles") - } - - d.Partial(false) - - return nil -} - -func instanceProfileRemoveAllRoles(d *schema.ResourceData, iamconn *iam.IAM) error { - for _, role := range d.Get("roles").(*schema.Set).List() { - err := instanceProfileRemoveRole(iamconn, d.Id(), role.(string)) - if err != nil { - return fmt.Errorf("Error removing role %s from IAM instance profile %s: %s", role, d.Id(), err) - } - } - return nil -} - -func resourceAwsIamInstanceProfileUpdate(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - d.Partial(true) - - if d.HasChange("role") { - oldRole, newRole := d.GetChange("role") - - if oldRole.(string) != "" { - err := instanceProfileRemoveRole(iamconn, d.Id(), oldRole.(string)) - if err != nil { - return fmt.Errorf("Error adding role %s to IAM instance profile %s: %s", oldRole.(string), d.Id(), err) - } - } - - if newRole.(string) != "" { - err := instanceProfileAddRole(iamconn, d.Id(), newRole.(string)) - if err != nil { - return fmt.Errorf("Error adding role %s to IAM instance profile %s: %s", newRole.(string), d.Id(), err) - } - } - - d.SetPartial("role") - } - - if d.HasChange("roles") { - return instanceProfileSetRoles(d, iamconn) - } - - d.Partial(false) - - return nil -} - -func resourceAwsIamInstanceProfileRead(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - request := &iam.GetInstanceProfileInput{ - InstanceProfileName: aws.String(d.Id()), - } - - result, err := iamconn.GetInstanceProfile(request) - if err != nil { - if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading IAM instance profile %s: %s", d.Id(), err) - } - - return instanceProfileReadResult(d, result.InstanceProfile) -} - -func resourceAwsIamInstanceProfileDelete(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - if err := instanceProfileRemoveAllRoles(d, iamconn); err != nil { - return err - } - - request := &iam.DeleteInstanceProfileInput{ - InstanceProfileName: aws.String(d.Id()), - } - _, err := iamconn.DeleteInstanceProfile(request) - if err != nil { - return fmt.Errorf("Error deleting IAM instance profile %s: %s", d.Id(), err) - } - d.SetId("") - return nil -} - -func instanceProfileReadResult(d *schema.ResourceData, result *iam.InstanceProfile) error { - d.SetId(*result.InstanceProfileName) - if err := d.Set("name", result.InstanceProfileName); err != nil { - return err - } - if err := d.Set("arn", result.Arn); err != nil { - return err - } - if err := d.Set("path", result.Path); err != nil { - return err - } - d.Set("unique_id", result.InstanceProfileId) - - if result.Roles != nil && len(result.Roles) > 0 { - d.Set("role", result.Roles[0].RoleName) //there will only be 1 role returned - } - - roles := &schema.Set{F: schema.HashString} - for _, role := range result.Roles { - roles.Add(*role.RoleName) - } - if err := d.Set("roles", roles); err != nil { - return err - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_iam_instance_profile_test.go b/builtin/providers/aws/resource_aws_iam_instance_profile_test.go deleted file mode 100644 index f60c4584f..000000000 --- a/builtin/providers/aws/resource_aws_iam_instance_profile_test.go +++ /dev/null @@ -1,229 +0,0 @@ -package aws - -import ( - "fmt" - "regexp" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSIAMInstanceProfile_importBasic(t *testing.T) { - resourceName := "aws_iam_instance_profile.test" - rName := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSInstanceProfileDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSInstanceProfilePrefixNameConfig(rName), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"name_prefix"}, - }, - }, - }) -} - -func TestAccAWSIAMInstanceProfile_basic(t *testing.T) { - var conf iam.GetInstanceProfileOutput - - rName := acctest.RandString(5) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccAwsIamInstanceProfileConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSInstanceProfileExists("aws_iam_instance_profile.test", &conf), - ), - }, - }, - }) -} - -func TestAccAWSIAMInstanceProfile_withRoleNotRoles(t *testing.T) { - var conf iam.GetInstanceProfileOutput - - rName := acctest.RandString(5) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccAWSInstanceProfileWithRoleSpecified(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSInstanceProfileExists("aws_iam_instance_profile.test", &conf), - ), - }, - }, - }) -} - -func TestAccAWSIAMInstanceProfile_missingRoleThrowsError(t *testing.T) { - rName := acctest.RandString(5) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccAwsIamInstanceProfileConfigMissingRole(rName), - ExpectError: regexp.MustCompile(regexp.QuoteMeta("Either `role` or `roles` (deprecated) must be specified when creating an IAM Instance Profile")), - }, - }, - }) -} - -func TestAccAWSIAMInstanceProfile_namePrefix(t *testing.T) { - var conf iam.GetInstanceProfileOutput - rName := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_iam_instance_profile.test", - IDRefreshIgnore: []string{"name_prefix"}, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSInstanceProfileDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSInstanceProfilePrefixNameConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSInstanceProfileExists("aws_iam_instance_profile.test", &conf), - testAccCheckAWSInstanceProfileGeneratedNamePrefix( - "aws_iam_instance_profile.test", "test-"), - ), - }, - }, - }) -} - -func testAccCheckAWSInstanceProfileGeneratedNamePrefix(resource, prefix string) resource.TestCheckFunc { - return func(s *terraform.State) error { - r, ok := s.RootModule().Resources[resource] - if !ok { - return fmt.Errorf("Resource not found") - } - name, ok := r.Primary.Attributes["name"] - if !ok { - return fmt.Errorf("Name attr not found: %#v", r.Primary.Attributes) - } - if !strings.HasPrefix(name, prefix) { - return fmt.Errorf("Name: %q, does not have prefix: %q", name, prefix) - } - return nil - } -} - -func testAccCheckAWSInstanceProfileDestroy(s *terraform.State) error { - iamconn := testAccProvider.Meta().(*AWSClient).iamconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_iam_instance_profile" { - continue - } - - // Try to get role - _, err := iamconn.GetInstanceProfile(&iam.GetInstanceProfileInput{ - InstanceProfileName: aws.String(rs.Primary.ID), - }) - if err == nil { - return fmt.Errorf("still exist.") - } - - // Verify the error is what we want - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - if ec2err.Code() != "NoSuchEntity" { - return err - } - } - - return nil -} - -func testAccCheckAWSInstanceProfileExists(n string, res *iam.GetInstanceProfileOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Instance Profile name is set") - } - - iamconn := testAccProvider.Meta().(*AWSClient).iamconn - - resp, err := iamconn.GetInstanceProfile(&iam.GetInstanceProfileInput{ - InstanceProfileName: aws.String(rs.Primary.ID), - }) - if err != nil { - return err - } - - *res = *resp - - return nil - } -} - -func testAccAwsIamInstanceProfileConfig(rName string) string { - return fmt.Sprintf(` -resource "aws_iam_role" "test" { - name = "test-%s" - assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ec2.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}" -} - -resource "aws_iam_instance_profile" "test" { - name = "test" - roles = ["${aws_iam_role.test.name}"] -}`, rName) -} - -func testAccAwsIamInstanceProfileConfigMissingRole(rName string) string { - return fmt.Sprintf(` -resource "aws_iam_instance_profile" "test" { - name = "test-%s" -}`, rName) -} - -func testAccAWSInstanceProfilePrefixNameConfig(rName string) string { - return fmt.Sprintf(` -resource "aws_iam_role" "test" { - name = "test-%s" - assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ec2.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}" -} - -resource "aws_iam_instance_profile" "test" { - name_prefix = "test-" - roles = ["${aws_iam_role.test.name}"] -}`, rName) -} - -func testAccAWSInstanceProfileWithRoleSpecified(rName string) string { - return fmt.Sprintf(` -resource "aws_iam_role" "test" { - name = "test-%s" - assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ec2.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}" -} - -resource "aws_iam_instance_profile" "test" { - name_prefix = "test-" - role = "${aws_iam_role.test.name}" -}`, rName) -} diff --git a/builtin/providers/aws/resource_aws_iam_openid_connect_provider.go b/builtin/providers/aws/resource_aws_iam_openid_connect_provider.go deleted file mode 100644 index 1791da4ec..000000000 --- a/builtin/providers/aws/resource_aws_iam_openid_connect_provider.go +++ /dev/null @@ -1,141 +0,0 @@ -package aws - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/iam" - - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsIamOpenIDConnectProvider() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsIamOpenIDConnectProviderCreate, - Read: resourceAwsIamOpenIDConnectProviderRead, - Update: resourceAwsIamOpenIDConnectProviderUpdate, - Delete: resourceAwsIamOpenIDConnectProviderDelete, - Exists: resourceAwsIamOpenIDConnectProviderExists, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "arn": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "url": &schema.Schema{ - Type: schema.TypeString, - Computed: false, - Required: true, - ForceNew: true, - ValidateFunc: validateOpenIdURL, - DiffSuppressFunc: suppressOpenIdURL, - }, - "client_id_list": &schema.Schema{ - Elem: &schema.Schema{Type: schema.TypeString}, - Type: schema.TypeList, - Required: true, - ForceNew: true, - }, - "thumbprint_list": &schema.Schema{ - Elem: &schema.Schema{Type: schema.TypeString}, - Type: schema.TypeList, - Required: true, - }, - }, - } -} - -func resourceAwsIamOpenIDConnectProviderCreate(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - input := &iam.CreateOpenIDConnectProviderInput{ - Url: aws.String(d.Get("url").(string)), - ClientIDList: expandStringList(d.Get("client_id_list").([]interface{})), - ThumbprintList: expandStringList(d.Get("thumbprint_list").([]interface{})), - } - - out, err := iamconn.CreateOpenIDConnectProvider(input) - if err != nil { - return err - } - - d.SetId(*out.OpenIDConnectProviderArn) - - return resourceAwsIamOpenIDConnectProviderRead(d, meta) -} - -func resourceAwsIamOpenIDConnectProviderRead(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - input := &iam.GetOpenIDConnectProviderInput{ - OpenIDConnectProviderArn: aws.String(d.Id()), - } - out, err := iamconn.GetOpenIDConnectProvider(input) - if err != nil { - return err - } - - d.Set("arn", d.Id()) - d.Set("url", out.Url) - d.Set("client_id_list", flattenStringList(out.ClientIDList)) - d.Set("thumbprint_list", flattenStringList(out.ThumbprintList)) - - return nil -} - -func resourceAwsIamOpenIDConnectProviderUpdate(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - if d.HasChange("thumbprint_list") { - input := &iam.UpdateOpenIDConnectProviderThumbprintInput{ - OpenIDConnectProviderArn: aws.String(d.Id()), - ThumbprintList: expandStringList(d.Get("thumbprint_list").([]interface{})), - } - - _, err := iamconn.UpdateOpenIDConnectProviderThumbprint(input) - if err != nil { - return err - } - } - - return resourceAwsIamOpenIDConnectProviderRead(d, meta) -} - -func resourceAwsIamOpenIDConnectProviderDelete(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - input := &iam.DeleteOpenIDConnectProviderInput{ - OpenIDConnectProviderArn: aws.String(d.Id()), - } - _, err := iamconn.DeleteOpenIDConnectProvider(input) - - if err != nil { - if err, ok := err.(awserr.Error); ok && err.Code() == "NoSuchEntity" { - return nil - } - return fmt.Errorf("Error deleting platform application %s", err) - } - - return nil -} - -func resourceAwsIamOpenIDConnectProviderExists(d *schema.ResourceData, meta interface{}) (bool, error) { - iamconn := meta.(*AWSClient).iamconn - - input := &iam.GetOpenIDConnectProviderInput{ - OpenIDConnectProviderArn: aws.String(d.Id()), - } - _, err := iamconn.GetOpenIDConnectProvider(input) - if err != nil { - if err, ok := err.(awserr.Error); ok && err.Code() == "NoSuchEntity" { - return false, nil - } - return true, err - } - - return true, nil -} diff --git a/builtin/providers/aws/resource_aws_iam_openid_connect_provider_test.go b/builtin/providers/aws/resource_aws_iam_openid_connect_provider_test.go deleted file mode 100644 index 6cf10d8b8..000000000 --- a/builtin/providers/aws/resource_aws_iam_openid_connect_provider_test.go +++ /dev/null @@ -1,187 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSIAMOpenIDConnectProvider_basic(t *testing.T) { - rString := acctest.RandString(5) - url := "accounts.google.com/" + rString - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckIAMOpenIDConnectProviderDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccIAMOpenIDConnectProviderConfig(rString), - Check: resource.ComposeTestCheckFunc( - testAccCheckIAMOpenIDConnectProvider("aws_iam_openid_connect_provider.goog"), - resource.TestCheckResourceAttr("aws_iam_openid_connect_provider.goog", "url", url), - resource.TestCheckResourceAttr("aws_iam_openid_connect_provider.goog", "client_id_list.#", "1"), - resource.TestCheckResourceAttr("aws_iam_openid_connect_provider.goog", "client_id_list.0", - "266362248691-re108qaeld573ia0l6clj2i5ac7r7291.apps.googleusercontent.com"), - resource.TestCheckResourceAttr("aws_iam_openid_connect_provider.goog", "thumbprint_list.#", "0"), - ), - }, - resource.TestStep{ - Config: testAccIAMOpenIDConnectProviderConfig_modified(rString), - Check: resource.ComposeTestCheckFunc( - testAccCheckIAMOpenIDConnectProvider("aws_iam_openid_connect_provider.goog"), - resource.TestCheckResourceAttr("aws_iam_openid_connect_provider.goog", "url", url), - resource.TestCheckResourceAttr("aws_iam_openid_connect_provider.goog", "client_id_list.#", "1"), - resource.TestCheckResourceAttr("aws_iam_openid_connect_provider.goog", "client_id_list.0", - "266362248691-re108qaeld573ia0l6clj2i5ac7r7291.apps.googleusercontent.com"), - resource.TestCheckResourceAttr("aws_iam_openid_connect_provider.goog", "thumbprint_list.#", "2"), - resource.TestCheckResourceAttr("aws_iam_openid_connect_provider.goog", "thumbprint_list.0", "cf23df2207d99a74fbe169e3eba035e633b65d94"), - resource.TestCheckResourceAttr("aws_iam_openid_connect_provider.goog", "thumbprint_list.1", "c784713d6f9cb67b55dd84f4e4af7832d42b8f55"), - ), - }, - }, - }) -} - -func TestAccAWSIAMOpenIDConnectProvider_importBasic(t *testing.T) { - resourceName := "aws_iam_openid_connect_provider.goog" - rString := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckIAMOpenIDConnectProviderDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccIAMOpenIDConnectProviderConfig_modified(rString), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAWSIAMOpenIDConnectProvider_disappears(t *testing.T) { - rString := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckIAMOpenIDConnectProviderDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccIAMOpenIDConnectProviderConfig(rString), - Check: resource.ComposeTestCheckFunc( - testAccCheckIAMOpenIDConnectProvider("aws_iam_openid_connect_provider.goog"), - testAccCheckIAMOpenIDConnectProviderDisappears("aws_iam_openid_connect_provider.goog"), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testAccCheckIAMOpenIDConnectProviderDestroy(s *terraform.State) error { - iamconn := testAccProvider.Meta().(*AWSClient).iamconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_iam_openid_connect_provider" { - continue - } - - input := &iam.GetOpenIDConnectProviderInput{ - OpenIDConnectProviderArn: aws.String(rs.Primary.ID), - } - out, err := iamconn.GetOpenIDConnectProvider(input) - if err != nil { - if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { - // none found, that's good - return nil - } - return fmt.Errorf("Error reading IAM OpenID Connect Provider, out: %s, err: %s", out, err) - } - - if out != nil { - return fmt.Errorf("Found IAM OpenID Connect Provider, expected none: %s", out) - } - } - - return nil -} - -func testAccCheckIAMOpenIDConnectProviderDisappears(id string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[id] - if !ok { - return fmt.Errorf("Not Found: %s", id) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - iamconn := testAccProvider.Meta().(*AWSClient).iamconn - _, err := iamconn.DeleteOpenIDConnectProvider(&iam.DeleteOpenIDConnectProviderInput{ - OpenIDConnectProviderArn: aws.String(rs.Primary.ID), - }) - return err - } -} - -func testAccCheckIAMOpenIDConnectProvider(id string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[id] - if !ok { - return fmt.Errorf("Not Found: %s", id) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - iamconn := testAccProvider.Meta().(*AWSClient).iamconn - _, err := iamconn.GetOpenIDConnectProvider(&iam.GetOpenIDConnectProviderInput{ - OpenIDConnectProviderArn: aws.String(rs.Primary.ID), - }) - - if err != nil { - return err - } - - return nil - } -} - -func testAccIAMOpenIDConnectProviderConfig(rString string) string { - return fmt.Sprintf(` -resource "aws_iam_openid_connect_provider" "goog" { - url="https://accounts.google.com/%s" - client_id_list = [ - "266362248691-re108qaeld573ia0l6clj2i5ac7r7291.apps.googleusercontent.com" - ] - thumbprint_list = [] -} -`, rString) -} - -func testAccIAMOpenIDConnectProviderConfig_modified(rString string) string { - return fmt.Sprintf(` -resource "aws_iam_openid_connect_provider" "goog" { - url="https://accounts.google.com/%s" - client_id_list = [ - "266362248691-re108qaeld573ia0l6clj2i5ac7r7291.apps.googleusercontent.com" - ] - thumbprint_list = ["cf23df2207d99a74fbe169e3eba035e633b65d94", "c784713d6f9cb67b55dd84f4e4af7832d42b8f55"] -} -`, rString) -} diff --git a/builtin/providers/aws/resource_aws_iam_policy.go b/builtin/providers/aws/resource_aws_iam_policy.go deleted file mode 100644 index b3fdf1c5f..000000000 --- a/builtin/providers/aws/resource_aws_iam_policy.go +++ /dev/null @@ -1,296 +0,0 @@ -package aws - -import ( - "fmt" - "net/url" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/iam" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsIamPolicy() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsIamPolicyCreate, - Read: resourceAwsIamPolicyRead, - Update: resourceAwsIamPolicyUpdate, - Delete: resourceAwsIamPolicyDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "description": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - }, - "path": { - Type: schema.TypeString, - Optional: true, - Default: "/", - ForceNew: true, - }, - "policy": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateIAMPolicyJson, - DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, - }, - "name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"name_prefix"}, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - // https://github.com/boto/botocore/blob/2485f5c/botocore/data/iam/2010-05-08/service-2.json#L8329-L8334 - value := v.(string) - if len(value) > 128 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 128 characters", k)) - } - if !regexp.MustCompile("^[\\w+=,.@-]*$").MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q must match [\\w+=,.@-]", k)) - } - return - }, - }, - "name_prefix": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - // https://github.com/boto/botocore/blob/2485f5c/botocore/data/iam/2010-05-08/service-2.json#L8329-L8334 - value := v.(string) - if len(value) > 96 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 96 characters, name is limited to 128", k)) - } - if !regexp.MustCompile("^[\\w+=,.@-]*$").MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q must match [\\w+=,.@-]", k)) - } - return - }, - }, - "arn": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceAwsIamPolicyCreate(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - var name string - if v, ok := d.GetOk("name"); ok { - name = v.(string) - } else if v, ok := d.GetOk("name_prefix"); ok { - name = resource.PrefixedUniqueId(v.(string)) - } else { - name = resource.UniqueId() - } - - request := &iam.CreatePolicyInput{ - Description: aws.String(d.Get("description").(string)), - Path: aws.String(d.Get("path").(string)), - PolicyDocument: aws.String(d.Get("policy").(string)), - PolicyName: aws.String(name), - } - - response, err := iamconn.CreatePolicy(request) - if err != nil { - return fmt.Errorf("Error creating IAM policy %s: %s", name, err) - } - - return readIamPolicy(d, response.Policy) -} - -func resourceAwsIamPolicyRead(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - getPolicyRequest := &iam.GetPolicyInput{ - PolicyArn: aws.String(d.Id()), - } - - getPolicyResponse, err := iamconn.GetPolicy(getPolicyRequest) - if err != nil { - if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading IAM policy %s: %s", d.Id(), err) - } - - getPolicyVersionRequest := &iam.GetPolicyVersionInput{ - PolicyArn: aws.String(d.Id()), - VersionId: getPolicyResponse.Policy.DefaultVersionId, - } - - getPolicyVersionResponse, err := iamconn.GetPolicyVersion(getPolicyVersionRequest) - if err != nil { - if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading IAM policy version %s: %s", d.Id(), err) - } - - policy, err := url.QueryUnescape(*getPolicyVersionResponse.PolicyVersion.Document) - if err != nil { - return err - } - if err := d.Set("policy", policy); err != nil { - return err - } - - return readIamPolicy(d, getPolicyResponse.Policy) -} - -func resourceAwsIamPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - if err := iamPolicyPruneVersions(d.Id(), iamconn); err != nil { - return err - } - - if !d.HasChange("policy") { - return nil - } - request := &iam.CreatePolicyVersionInput{ - PolicyArn: aws.String(d.Id()), - PolicyDocument: aws.String(d.Get("policy").(string)), - SetAsDefault: aws.Bool(true), - } - - if _, err := iamconn.CreatePolicyVersion(request); err != nil { - return fmt.Errorf("Error updating IAM policy %s: %s", d.Id(), err) - } - return nil -} - -func resourceAwsIamPolicyDelete(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - if err := iamPolicyDeleteNondefaultVersions(d.Id(), iamconn); err != nil { - return err - } - - request := &iam.DeletePolicyInput{ - PolicyArn: aws.String(d.Id()), - } - - _, err := iamconn.DeletePolicy(request) - if err != nil { - if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { - return nil - } - return fmt.Errorf("Error reading IAM policy %s: %#v", d.Id(), err) - } - return nil -} - -// iamPolicyPruneVersions deletes the oldest versions. -// -// Old versions are deleted until there are 4 or less remaining, which means at -// least one more can be created before hitting the maximum of 5. -// -// The default version is never deleted. - -func iamPolicyPruneVersions(arn string, iamconn *iam.IAM) error { - versions, err := iamPolicyListVersions(arn, iamconn) - if err != nil { - return err - } - if len(versions) < 5 { - return nil - } - - var oldestVersion *iam.PolicyVersion - - for _, version := range versions { - if *version.IsDefaultVersion { - continue - } - if oldestVersion == nil || - version.CreateDate.Before(*oldestVersion.CreateDate) { - oldestVersion = version - } - } - - if err := iamPolicyDeleteVersion(arn, *oldestVersion.VersionId, iamconn); err != nil { - return err - } - return nil -} - -func iamPolicyDeleteNondefaultVersions(arn string, iamconn *iam.IAM) error { - versions, err := iamPolicyListVersions(arn, iamconn) - if err != nil { - return err - } - - for _, version := range versions { - if *version.IsDefaultVersion { - continue - } - if err := iamPolicyDeleteVersion(arn, *version.VersionId, iamconn); err != nil { - return err - } - } - - return nil -} - -func iamPolicyDeleteVersion(arn, versionID string, iamconn *iam.IAM) error { - request := &iam.DeletePolicyVersionInput{ - PolicyArn: aws.String(arn), - VersionId: aws.String(versionID), - } - - _, err := iamconn.DeletePolicyVersion(request) - if err != nil { - return fmt.Errorf("Error deleting version %s from IAM policy %s: %s", versionID, arn, err) - } - return nil -} - -func iamPolicyListVersions(arn string, iamconn *iam.IAM) ([]*iam.PolicyVersion, error) { - request := &iam.ListPolicyVersionsInput{ - PolicyArn: aws.String(arn), - } - - response, err := iamconn.ListPolicyVersions(request) - if err != nil { - return nil, fmt.Errorf("Error listing versions for IAM policy %s: %s", arn, err) - } - return response.Versions, nil -} - -func readIamPolicy(d *schema.ResourceData, policy *iam.Policy) error { - d.SetId(*policy.Arn) - if policy.Description != nil { - // the description isn't present in the response to CreatePolicy. - if err := d.Set("description", policy.Description); err != nil { - return err - } - } - if err := d.Set("path", policy.Path); err != nil { - return err - } - if err := d.Set("name", policy.PolicyName); err != nil { - return err - } - if err := d.Set("arn", policy.Arn); err != nil { - return err - } - return nil -} diff --git a/builtin/providers/aws/resource_aws_iam_policy_attachment.go b/builtin/providers/aws/resource_aws_iam_policy_attachment.go deleted file mode 100644 index adbd81b20..000000000 --- a/builtin/providers/aws/resource_aws_iam_policy_attachment.go +++ /dev/null @@ -1,374 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsIamPolicyAttachment() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsIamPolicyAttachmentCreate, - Read: resourceAwsIamPolicyAttachmentRead, - Update: resourceAwsIamPolicyAttachmentUpdate, - Delete: resourceAwsIamPolicyAttachmentDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - if v.(string) == "" { - errors = append(errors, fmt.Errorf( - "%q cannot be an empty string", k)) - } - return - }, - }, - "users": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "roles": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "groups": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "policy_arn": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceAwsIamPolicyAttachmentCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iamconn - - name := d.Get("name").(string) - arn := d.Get("policy_arn").(string) - users := expandStringList(d.Get("users").(*schema.Set).List()) - roles := expandStringList(d.Get("roles").(*schema.Set).List()) - groups := expandStringList(d.Get("groups").(*schema.Set).List()) - - if len(users) == 0 && len(roles) == 0 && len(groups) == 0 { - return fmt.Errorf("[WARN] No Users, Roles, or Groups specified for IAM Policy Attachment %s", name) - } else { - var userErr, roleErr, groupErr error - if users != nil { - userErr = attachPolicyToUsers(conn, users, arn) - } - if roles != nil { - roleErr = attachPolicyToRoles(conn, roles, arn) - } - if groups != nil { - groupErr = attachPolicyToGroups(conn, groups, arn) - } - if userErr != nil || roleErr != nil || groupErr != nil { - return composeErrors(fmt.Sprint("[WARN] Error attaching policy with IAM Policy Attachment ", name, ":"), userErr, roleErr, groupErr) - } - } - d.SetId(d.Get("name").(string)) - return resourceAwsIamPolicyAttachmentRead(d, meta) -} - -func resourceAwsIamPolicyAttachmentRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iamconn - arn := d.Get("policy_arn").(string) - name := d.Get("name").(string) - - _, err := conn.GetPolicy(&iam.GetPolicyInput{ - PolicyArn: aws.String(arn), - }) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "NoSuchEntity" { - log.Printf("[WARN] No such entity found for Policy Attachment (%s)", d.Id()) - d.SetId("") - return nil - } - } - return err - } - - ul := make([]string, 0) - rl := make([]string, 0) - gl := make([]string, 0) - - args := iam.ListEntitiesForPolicyInput{ - PolicyArn: aws.String(arn), - } - err = conn.ListEntitiesForPolicyPages(&args, func(page *iam.ListEntitiesForPolicyOutput, lastPage bool) bool { - for _, u := range page.PolicyUsers { - ul = append(ul, *u.UserName) - } - - for _, r := range page.PolicyRoles { - rl = append(rl, *r.RoleName) - } - - for _, g := range page.PolicyGroups { - gl = append(gl, *g.GroupName) - } - return true - }) - if err != nil { - return err - } - - userErr := d.Set("users", ul) - roleErr := d.Set("roles", rl) - groupErr := d.Set("groups", gl) - - if userErr != nil || roleErr != nil || groupErr != nil { - return composeErrors(fmt.Sprint("[WARN} Error setting user, role, or group list from IAM Policy Attachment ", name, ":"), userErr, roleErr, groupErr) - } - - return nil -} -func resourceAwsIamPolicyAttachmentUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iamconn - name := d.Get("name").(string) - var userErr, roleErr, groupErr error - - if d.HasChange("users") { - userErr = updateUsers(conn, d, meta) - } - if d.HasChange("roles") { - roleErr = updateRoles(conn, d, meta) - } - if d.HasChange("groups") { - groupErr = updateGroups(conn, d, meta) - } - if userErr != nil || roleErr != nil || groupErr != nil { - return composeErrors(fmt.Sprint("[WARN] Error updating user, role, or group list from IAM Policy Attachment ", name, ":"), userErr, roleErr, groupErr) - } - return resourceAwsIamPolicyAttachmentRead(d, meta) -} - -func resourceAwsIamPolicyAttachmentDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iamconn - name := d.Get("name").(string) - arn := d.Get("policy_arn").(string) - users := expandStringList(d.Get("users").(*schema.Set).List()) - roles := expandStringList(d.Get("roles").(*schema.Set).List()) - groups := expandStringList(d.Get("groups").(*schema.Set).List()) - - var userErr, roleErr, groupErr error - if len(users) != 0 { - userErr = detachPolicyFromUsers(conn, users, arn) - } - if len(roles) != 0 { - roleErr = detachPolicyFromRoles(conn, roles, arn) - } - if len(groups) != 0 { - groupErr = detachPolicyFromGroups(conn, groups, arn) - } - if userErr != nil || roleErr != nil || groupErr != nil { - return composeErrors(fmt.Sprint("[WARN] Error removing user, role, or group list from IAM Policy Detach ", name, ":"), userErr, roleErr, groupErr) - } - return nil -} - -func composeErrors(desc string, uErr error, rErr error, gErr error) error { - errMsg := fmt.Sprintf(desc) - errs := []error{uErr, rErr, gErr} - for _, e := range errs { - if e != nil { - errMsg = errMsg + "\n– " + e.Error() - } - } - return fmt.Errorf(errMsg) -} - -func attachPolicyToUsers(conn *iam.IAM, users []*string, arn string) error { - for _, u := range users { - _, err := conn.AttachUserPolicy(&iam.AttachUserPolicyInput{ - UserName: u, - PolicyArn: aws.String(arn), - }) - if err != nil { - return err - } - } - return nil -} -func attachPolicyToRoles(conn *iam.IAM, roles []*string, arn string) error { - for _, r := range roles { - _, err := conn.AttachRolePolicy(&iam.AttachRolePolicyInput{ - RoleName: r, - PolicyArn: aws.String(arn), - }) - if err != nil { - return err - } - - var attachmentErr error - attachmentErr = resource.Retry(2*time.Minute, func() *resource.RetryError { - - input := iam.ListRolePoliciesInput{ - RoleName: r, - } - - attachedPolicies, err := conn.ListRolePolicies(&input) - if err != nil { - return resource.NonRetryableError(err) - } - - if len(attachedPolicies.PolicyNames) > 0 { - var foundPolicy bool - for _, policyName := range attachedPolicies.PolicyNames { - if strings.HasSuffix(arn, *policyName) { - foundPolicy = true - break - } - } - - if !foundPolicy { - return resource.NonRetryableError(err) - } - } - - return nil - }) - - if attachmentErr != nil { - return attachmentErr - } - } - return nil -} -func attachPolicyToGroups(conn *iam.IAM, groups []*string, arn string) error { - for _, g := range groups { - _, err := conn.AttachGroupPolicy(&iam.AttachGroupPolicyInput{ - GroupName: g, - PolicyArn: aws.String(arn), - }) - if err != nil { - return err - } - } - return nil -} -func updateUsers(conn *iam.IAM, d *schema.ResourceData, meta interface{}) error { - arn := d.Get("policy_arn").(string) - o, n := d.GetChange("users") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - os := o.(*schema.Set) - ns := n.(*schema.Set) - remove := expandStringList(os.Difference(ns).List()) - add := expandStringList(ns.Difference(os).List()) - - if rErr := detachPolicyFromUsers(conn, remove, arn); rErr != nil { - return rErr - } - if aErr := attachPolicyToUsers(conn, add, arn); aErr != nil { - return aErr - } - return nil -} -func updateRoles(conn *iam.IAM, d *schema.ResourceData, meta interface{}) error { - arn := d.Get("policy_arn").(string) - o, n := d.GetChange("roles") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - os := o.(*schema.Set) - ns := n.(*schema.Set) - remove := expandStringList(os.Difference(ns).List()) - add := expandStringList(ns.Difference(os).List()) - - if rErr := detachPolicyFromRoles(conn, remove, arn); rErr != nil { - return rErr - } - if aErr := attachPolicyToRoles(conn, add, arn); aErr != nil { - return aErr - } - return nil -} -func updateGroups(conn *iam.IAM, d *schema.ResourceData, meta interface{}) error { - arn := d.Get("policy_arn").(string) - o, n := d.GetChange("groups") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - os := o.(*schema.Set) - ns := n.(*schema.Set) - remove := expandStringList(os.Difference(ns).List()) - add := expandStringList(ns.Difference(os).List()) - - if rErr := detachPolicyFromGroups(conn, remove, arn); rErr != nil { - return rErr - } - if aErr := attachPolicyToGroups(conn, add, arn); aErr != nil { - return aErr - } - return nil - -} -func detachPolicyFromUsers(conn *iam.IAM, users []*string, arn string) error { - for _, u := range users { - _, err := conn.DetachUserPolicy(&iam.DetachUserPolicyInput{ - UserName: u, - PolicyArn: aws.String(arn), - }) - if err != nil { - return err - } - } - return nil -} -func detachPolicyFromRoles(conn *iam.IAM, roles []*string, arn string) error { - for _, r := range roles { - _, err := conn.DetachRolePolicy(&iam.DetachRolePolicyInput{ - RoleName: r, - PolicyArn: aws.String(arn), - }) - if err != nil { - return err - } - } - return nil -} -func detachPolicyFromGroups(conn *iam.IAM, groups []*string, arn string) error { - for _, g := range groups { - _, err := conn.DetachGroupPolicy(&iam.DetachGroupPolicyInput{ - GroupName: g, - PolicyArn: aws.String(arn), - }) - if err != nil { - return err - } - } - return nil -} diff --git a/builtin/providers/aws/resource_aws_iam_policy_attachment_test.go b/builtin/providers/aws/resource_aws_iam_policy_attachment_test.go deleted file mode 100644 index 6ae57b1d5..000000000 --- a/builtin/providers/aws/resource_aws_iam_policy_attachment_test.go +++ /dev/null @@ -1,335 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSPolicyAttachment_basic(t *testing.T) { - var out iam.ListEntitiesForPolicyOutput - - user1 := fmt.Sprintf("test-user-%d", acctest.RandInt()) - user2 := fmt.Sprintf("test-user-%d", acctest.RandInt()) - user3 := fmt.Sprintf("test-user-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSPolicyAttachmentDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSPolicyAttachConfig(user1), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSPolicyAttachmentExists("aws_iam_policy_attachment.test-attach", 3, &out), - testAccCheckAWSPolicyAttachmentAttributes([]string{user1}, []string{"test-role"}, []string{"test-group"}, &out), - ), - }, - resource.TestStep{ - Config: testAccAWSPolicyAttachConfigUpdate(user1, user2, user3), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSPolicyAttachmentExists("aws_iam_policy_attachment.test-attach", 6, &out), - testAccCheckAWSPolicyAttachmentAttributes([]string{user3, user3}, []string{"test-role2", "test-role3"}, []string{"test-group2", "test-group3"}, &out), - ), - }, - }, - }) -} - -func TestAccAWSPolicyAttachment_paginatedEntities(t *testing.T) { - var out iam.ListEntitiesForPolicyOutput - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSPolicyAttachmentDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSPolicyPaginatedAttachConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSPolicyAttachmentExists("aws_iam_policy_attachment.test-paginated-attach", 101, &out), - ), - }, - }, - }) -} - -func testAccCheckAWSPolicyAttachmentDestroy(s *terraform.State) error { - return nil -} - -func testAccCheckAWSPolicyAttachmentExists(n string, c int64, out *iam.ListEntitiesForPolicyOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No policy name is set") - } - - conn := testAccProvider.Meta().(*AWSClient).iamconn - arn := rs.Primary.Attributes["policy_arn"] - - resp, err := conn.GetPolicy(&iam.GetPolicyInput{ - PolicyArn: aws.String(arn), - }) - if err != nil { - return fmt.Errorf("Error: Policy (%s) not found", n) - } - if c != *resp.Policy.AttachmentCount { - return fmt.Errorf("Error: Policy (%s) has wrong number of entities attached on initial creation", n) - } - resp2, err := conn.ListEntitiesForPolicy(&iam.ListEntitiesForPolicyInput{ - PolicyArn: aws.String(arn), - }) - if err != nil { - return fmt.Errorf("Error: Failed to get entities for Policy (%s)", arn) - } - - *out = *resp2 - return nil - } -} - -func testAccCheckAWSPolicyAttachmentAttributes(users []string, roles []string, groups []string, out *iam.ListEntitiesForPolicyOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - uc := len(users) - rc := len(roles) - gc := len(groups) - - for _, u := range users { - for _, pu := range out.PolicyUsers { - if u == *pu.UserName { - uc-- - } - } - } - for _, r := range roles { - for _, pr := range out.PolicyRoles { - if r == *pr.RoleName { - rc-- - } - } - } - for _, g := range groups { - for _, pg := range out.PolicyGroups { - if g == *pg.GroupName { - gc-- - } - } - } - if uc != 0 || rc != 0 || gc != 0 { - return fmt.Errorf("Error: Number of attached users, roles, or groups was incorrect:\n expected %d users and found %d\nexpected %d roles and found %d\nexpected %d groups and found %d", len(users), len(users)-uc, len(roles), len(roles)-rc, len(groups), len(groups)-gc) - } - return nil - } -} - -func testAccAWSPolicyAttachConfig(u1 string) string { - return fmt.Sprintf(` -resource "aws_iam_user" "user" { - name = "%s" -} -resource "aws_iam_role" "role" { - name = "test-role" - assume_role_policy = < 64 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 64 characters", k)) - } - if !regexp.MustCompile("^[\\w+=,.@-]*$").MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q must match [\\w+=,.@-]", k)) - } - return - }, - }, - - "name_prefix": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - // https://github.com/boto/botocore/blob/2485f5c/botocore/data/iam/2010-05-08/service-2.json#L8329-L8334 - value := v.(string) - if len(value) > 32 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 32 characters, name is limited to 64", k)) - } - if !regexp.MustCompile("^[\\w+=,.@-]*$").MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q must match [\\w+=,.@-]", k)) - } - return - }, - }, - - "path": { - Type: schema.TypeString, - Optional: true, - Default: "/", - ForceNew: true, - }, - - "description": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateIamRoleDescription, - }, - - "assume_role_policy": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, - ValidateFunc: validateJsonString, - }, - - "create_date": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceAwsIamRoleCreate(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - var name string - if v, ok := d.GetOk("name"); ok { - name = v.(string) - } else if v, ok := d.GetOk("name_prefix"); ok { - name = resource.PrefixedUniqueId(v.(string)) - } else { - name = resource.UniqueId() - } - - request := &iam.CreateRoleInput{ - Path: aws.String(d.Get("path").(string)), - RoleName: aws.String(name), - AssumeRolePolicyDocument: aws.String(d.Get("assume_role_policy").(string)), - } - - if v, ok := d.GetOk("description"); ok { - request.Description = aws.String(v.(string)) - } - - var createResp *iam.CreateRoleOutput - err := resource.Retry(30*time.Second, func() *resource.RetryError { - var err error - createResp, err = iamconn.CreateRole(request) - // IAM users (referenced in Principal field of assume policy) - // can take ~30 seconds to propagate in AWS - if isAWSErr(err, "MalformedPolicyDocument", "Invalid principal in policy") { - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - }) - if err != nil { - return fmt.Errorf("Error creating IAM Role %s: %s", name, err) - } - d.SetId(*createResp.Role.RoleName) - return resourceAwsIamRoleRead(d, meta) -} - -func resourceAwsIamRoleRead(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - request := &iam.GetRoleInput{ - RoleName: aws.String(d.Id()), - } - - getResp, err := iamconn.GetRole(request) - if err != nil { - if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { // XXX test me - d.SetId("") - return nil - } - return fmt.Errorf("Error reading IAM Role %s: %s", d.Id(), err) - } - - role := getResp.Role - - if err := d.Set("name", role.RoleName); err != nil { - return err - } - if err := d.Set("arn", role.Arn); err != nil { - return err - } - if err := d.Set("path", role.Path); err != nil { - return err - } - if err := d.Set("unique_id", role.RoleId); err != nil { - return err - } - if err := d.Set("create_date", role.CreateDate.Format(time.RFC3339)); err != nil { - return err - } - - if role.Description != nil { - // the description isn't present in the response to CreateRole. - if err := d.Set("description", role.Description); err != nil { - return err - } - } - - assumRolePolicy, err := url.QueryUnescape(*role.AssumeRolePolicyDocument) - if err != nil { - return err - } - if err := d.Set("assume_role_policy", assumRolePolicy); err != nil { - return err - } - return nil -} - -func resourceAwsIamRoleUpdate(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - if d.HasChange("assume_role_policy") { - assumeRolePolicyInput := &iam.UpdateAssumeRolePolicyInput{ - RoleName: aws.String(d.Id()), - PolicyDocument: aws.String(d.Get("assume_role_policy").(string)), - } - _, err := iamconn.UpdateAssumeRolePolicy(assumeRolePolicyInput) - if err != nil { - if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { - d.SetId("") - return nil - } - return fmt.Errorf("Error Updating IAM Role (%s) Assume Role Policy: %s", d.Id(), err) - } - } - - if d.HasChange("description") { - roleDescriptionInput := &iam.UpdateRoleDescriptionInput{ - RoleName: aws.String(d.Id()), - Description: aws.String(d.Get("description").(string)), - } - _, err := iamconn.UpdateRoleDescription(roleDescriptionInput) - if err != nil { - if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { - d.SetId("") - return nil - } - return fmt.Errorf("Error Updating IAM Role (%s) Assume Role Policy: %s", d.Id(), err) - } - } - - return nil -} - -func resourceAwsIamRoleDelete(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - // Roles cannot be destroyed when attached to an existing Instance Profile - resp, err := iamconn.ListInstanceProfilesForRole(&iam.ListInstanceProfilesForRoleInput{ - RoleName: aws.String(d.Id()), - }) - if err != nil { - return fmt.Errorf("Error listing Profiles for IAM Role (%s) when trying to delete: %s", d.Id(), err) - } - - // Loop and remove this Role from any Profiles - if len(resp.InstanceProfiles) > 0 { - for _, i := range resp.InstanceProfiles { - _, err := iamconn.RemoveRoleFromInstanceProfile(&iam.RemoveRoleFromInstanceProfileInput{ - InstanceProfileName: i.InstanceProfileName, - RoleName: aws.String(d.Id()), - }) - if err != nil { - return fmt.Errorf("Error deleting IAM Role %s: %s", d.Id(), err) - } - } - } - - request := &iam.DeleteRoleInput{ - RoleName: aws.String(d.Id()), - } - - // IAM is eventually consistent and deletion of attached policies may take time - return resource.Retry(30*time.Second, func() *resource.RetryError { - _, err := iamconn.DeleteRole(request) - if err != nil { - awsErr, ok := err.(awserr.Error) - if ok && awsErr.Code() == "DeleteConflict" { - return resource.RetryableError(err) - } - - return resource.NonRetryableError(fmt.Errorf("Error deleting IAM Role %s: %s", d.Id(), err)) - } - return nil - }) -} diff --git a/builtin/providers/aws/resource_aws_iam_role_policy.go b/builtin/providers/aws/resource_aws_iam_role_policy.go deleted file mode 100644 index ec05a2259..000000000 --- a/builtin/providers/aws/resource_aws_iam_role_policy.go +++ /dev/null @@ -1,152 +0,0 @@ -package aws - -import ( - "fmt" - "net/url" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/iam" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsIamRolePolicy() *schema.Resource { - return &schema.Resource{ - // PutRolePolicy API is idempotent, so these can be the same. - Create: resourceAwsIamRolePolicyPut, - Update: resourceAwsIamRolePolicyPut, - - Read: resourceAwsIamRolePolicyRead, - Delete: resourceAwsIamRolePolicyDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "policy": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateIAMPolicyJson, - DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, - }, - "name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"name_prefix"}, - ValidateFunc: validateIamRolePolicyName, - }, - "name_prefix": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateIamRolePolicyNamePrefix, - }, - "role": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceAwsIamRolePolicyPut(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - request := &iam.PutRolePolicyInput{ - RoleName: aws.String(d.Get("role").(string)), - PolicyDocument: aws.String(d.Get("policy").(string)), - } - - var policyName string - if v, ok := d.GetOk("name"); ok { - policyName = v.(string) - } else if v, ok := d.GetOk("name_prefix"); ok { - policyName = resource.PrefixedUniqueId(v.(string)) - } else { - policyName = resource.UniqueId() - } - request.PolicyName = aws.String(policyName) - - if _, err := iamconn.PutRolePolicy(request); err != nil { - return fmt.Errorf("Error putting IAM role policy %s: %s", *request.PolicyName, err) - } - - d.SetId(fmt.Sprintf("%s:%s", *request.RoleName, *request.PolicyName)) - return nil -} - -func resourceAwsIamRolePolicyRead(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - role, name, err := resourceAwsIamRolePolicyParseId(d.Id()) - if err != nil { - return err - } - - request := &iam.GetRolePolicyInput{ - PolicyName: aws.String(name), - RoleName: aws.String(role), - } - - getResp, err := iamconn.GetRolePolicy(request) - if err != nil { - if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { // XXX test me - d.SetId("") - return nil - } - return fmt.Errorf("Error reading IAM policy %s from role %s: %s", name, role, err) - } - - if getResp.PolicyDocument == nil { - return fmt.Errorf("GetRolePolicy returned a nil policy document") - } - - policy, err := url.QueryUnescape(*getResp.PolicyDocument) - if err != nil { - return err - } - if err := d.Set("policy", policy); err != nil { - return err - } - if err := d.Set("name", name); err != nil { - return err - } - return d.Set("role", role) -} - -func resourceAwsIamRolePolicyDelete(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - role, name, err := resourceAwsIamRolePolicyParseId(d.Id()) - if err != nil { - return err - } - - request := &iam.DeleteRolePolicyInput{ - PolicyName: aws.String(name), - RoleName: aws.String(role), - } - - if _, err := iamconn.DeleteRolePolicy(request); err != nil { - return fmt.Errorf("Error deleting IAM role policy %s: %s", d.Id(), err) - } - return nil -} - -func resourceAwsIamRolePolicyParseId(id string) (roleName, policyName string, err error) { - parts := strings.SplitN(id, ":", 2) - if len(parts) != 2 { - err = fmt.Errorf("role_policy id must be of the form :") - return - } - - roleName = parts[0] - policyName = parts[1] - return -} diff --git a/builtin/providers/aws/resource_aws_iam_role_policy_attachment.go b/builtin/providers/aws/resource_aws_iam_role_policy_attachment.go deleted file mode 100644 index bb72f879a..000000000 --- a/builtin/providers/aws/resource_aws_iam_role_policy_attachment.go +++ /dev/null @@ -1,126 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsIamRolePolicyAttachment() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsIamRolePolicyAttachmentCreate, - Read: resourceAwsIamRolePolicyAttachmentRead, - Delete: resourceAwsIamRolePolicyAttachmentDelete, - - Schema: map[string]*schema.Schema{ - "role": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "policy_arn": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceAwsIamRolePolicyAttachmentCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iamconn - - role := d.Get("role").(string) - arn := d.Get("policy_arn").(string) - - err := attachPolicyToRole(conn, role, arn) - if err != nil { - return fmt.Errorf("[WARN] Error attaching policy %s to IAM Role %s: %v", arn, role, err) - } - - d.SetId(resource.PrefixedUniqueId(fmt.Sprintf("%s-", role))) - return resourceAwsIamRolePolicyAttachmentRead(d, meta) -} - -func resourceAwsIamRolePolicyAttachmentRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iamconn - role := d.Get("role").(string) - arn := d.Get("policy_arn").(string) - - _, err := conn.GetRole(&iam.GetRoleInput{ - RoleName: aws.String(role), - }) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "NoSuchEntity" { - log.Printf("[WARN] No such entity found for Policy Attachment (%s)", role) - d.SetId("") - return nil - } - } - return err - } - - args := iam.ListAttachedRolePoliciesInput{ - RoleName: aws.String(role), - } - var policy string - err = conn.ListAttachedRolePoliciesPages(&args, func(page *iam.ListAttachedRolePoliciesOutput, lastPage bool) bool { - for _, p := range page.AttachedPolicies { - if *p.PolicyArn == arn { - policy = *p.PolicyArn - } - } - - return policy == "" - }) - if err != nil { - return err - } - if policy == "" { - log.Printf("[WARN] No such policy found for Role Policy Attachment (%s)", role) - d.SetId("") - } - - return nil -} - -func resourceAwsIamRolePolicyAttachmentDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iamconn - role := d.Get("role").(string) - arn := d.Get("policy_arn").(string) - - err := detachPolicyFromRole(conn, role, arn) - if err != nil { - return fmt.Errorf("[WARN] Error removing policy %s from IAM Role %s: %v", arn, role, err) - } - return nil -} - -func attachPolicyToRole(conn *iam.IAM, role string, arn string) error { - _, err := conn.AttachRolePolicy(&iam.AttachRolePolicyInput{ - RoleName: aws.String(role), - PolicyArn: aws.String(arn), - }) - if err != nil { - return err - } - return nil -} - -func detachPolicyFromRole(conn *iam.IAM, role string, arn string) error { - _, err := conn.DetachRolePolicy(&iam.DetachRolePolicyInput{ - RoleName: aws.String(role), - PolicyArn: aws.String(arn), - }) - if err != nil { - return err - } - return nil -} diff --git a/builtin/providers/aws/resource_aws_iam_role_policy_attachment_test.go b/builtin/providers/aws/resource_aws_iam_role_policy_attachment_test.go deleted file mode 100644 index 7a723bc07..000000000 --- a/builtin/providers/aws/resource_aws_iam_role_policy_attachment_test.go +++ /dev/null @@ -1,229 +0,0 @@ -package aws - -import ( - "fmt" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSRolePolicyAttachment_basic(t *testing.T) { - var out iam.ListAttachedRolePoliciesOutput - rInt := acctest.RandInt() - testPolicy := fmt.Sprintf("tf-acctest-%d", rInt) - testPolicy2 := fmt.Sprintf("tf-acctest2-%d", rInt) - testPolicy3 := fmt.Sprintf("tf-acctest3-%d", rInt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSRolePolicyAttachmentDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSRolePolicyAttachConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRolePolicyAttachmentExists("aws_iam_role_policy_attachment.test-attach", 1, &out), - testAccCheckAWSRolePolicyAttachmentAttributes([]string{testPolicy}, &out), - ), - }, - { - Config: testAccAWSRolePolicyAttachConfigUpdate(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRolePolicyAttachmentExists("aws_iam_role_policy_attachment.test-attach", 2, &out), - testAccCheckAWSRolePolicyAttachmentAttributes([]string{testPolicy2, testPolicy3}, &out), - ), - }, - }, - }) -} -func testAccCheckAWSRolePolicyAttachmentDestroy(s *terraform.State) error { - return nil -} - -func testAccCheckAWSRolePolicyAttachmentExists(n string, c int, out *iam.ListAttachedRolePoliciesOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No policy name is set") - } - - conn := testAccProvider.Meta().(*AWSClient).iamconn - role := rs.Primary.Attributes["role"] - - attachedPolicies, err := conn.ListAttachedRolePolicies(&iam.ListAttachedRolePoliciesInput{ - RoleName: aws.String(role), - }) - if err != nil { - return fmt.Errorf("Error: Failed to get attached policies for role %s (%s)", role, n) - } - if c != len(attachedPolicies.AttachedPolicies) { - return fmt.Errorf("Error: Role (%s) has wrong number of policies attached on initial creation", n) - } - - *out = *attachedPolicies - return nil - } -} -func testAccCheckAWSRolePolicyAttachmentAttributes(policies []string, out *iam.ListAttachedRolePoliciesOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - matched := 0 - - for _, p := range policies { - for _, ap := range out.AttachedPolicies { - // *ap.PolicyArn like arn:aws:iam::111111111111:policy/test-policy - parts := strings.Split(*ap.PolicyArn, "/") - if len(parts) == 2 && p == parts[1] { - matched++ - } - } - } - if matched != len(policies) || matched != len(out.AttachedPolicies) { - return fmt.Errorf("Error: Number of attached policies was incorrect: expected %d matched policies, matched %d of %d", len(policies), matched, len(out.AttachedPolicies)) - } - return nil - } -} - -func testAccAWSRolePolicyAttachConfig(rInt int) string { - return fmt.Sprintf(` - resource "aws_iam_role" "role" { - name = "test-role-%d" - assume_role_policy = < 128 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 128 characters", k)) - } - return - }, - }, - - "name_prefix": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 30 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 30 characters, name is limited to 128", k)) - } - return - }, - }, - - "arn": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - } -} - -func resourceAwsIAMServerCertificateCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iamconn - - var sslCertName string - if v, ok := d.GetOk("name"); ok { - sslCertName = v.(string) - } else if v, ok := d.GetOk("name_prefix"); ok { - sslCertName = resource.PrefixedUniqueId(v.(string)) - } else { - sslCertName = resource.UniqueId() - } - - createOpts := &iam.UploadServerCertificateInput{ - CertificateBody: aws.String(d.Get("certificate_body").(string)), - PrivateKey: aws.String(d.Get("private_key").(string)), - ServerCertificateName: aws.String(sslCertName), - } - - if v, ok := d.GetOk("certificate_chain"); ok { - createOpts.CertificateChain = aws.String(v.(string)) - } - - if v, ok := d.GetOk("path"); ok { - createOpts.Path = aws.String(v.(string)) - } - - log.Printf("[DEBUG] Creating IAM Server Certificate with opts: %s", createOpts) - resp, err := conn.UploadServerCertificate(createOpts) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - return fmt.Errorf("[WARN] Error uploading server certificate, error: %s: %s", awsErr.Code(), awsErr.Message()) - } - return fmt.Errorf("[WARN] Error uploading server certificate, error: %s", err) - } - - d.SetId(*resp.ServerCertificateMetadata.ServerCertificateId) - d.Set("name", sslCertName) - - return resourceAwsIAMServerCertificateRead(d, meta) -} - -func resourceAwsIAMServerCertificateRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iamconn - resp, err := conn.GetServerCertificate(&iam.GetServerCertificateInput{ - ServerCertificateName: aws.String(d.Get("name").(string)), - }) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "NoSuchEntity" { - log.Printf("[WARN] IAM Server Cert (%s) not found, removing from state", d.Id()) - d.SetId("") - return nil - } - return fmt.Errorf("[WARN] Error reading IAM Server Certificate: %s: %s", awsErr.Code(), awsErr.Message()) - } - return fmt.Errorf("[WARN] Error reading IAM Server Certificate: %s", err) - } - - d.SetId(*resp.ServerCertificate.ServerCertificateMetadata.ServerCertificateId) - - // these values should always be present, and have a default if not set in - // configuration, and so safe to reference with nil checks - d.Set("certificate_body", normalizeCert(resp.ServerCertificate.CertificateBody)) - - c := normalizeCert(resp.ServerCertificate.CertificateChain) - if c != "" { - d.Set("certificate_chain", c) - } - - d.Set("path", resp.ServerCertificate.ServerCertificateMetadata.Path) - d.Set("arn", resp.ServerCertificate.ServerCertificateMetadata.Arn) - - return nil -} - -func resourceAwsIAMServerCertificateDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).iamconn - log.Printf("[INFO] Deleting IAM Server Certificate: %s", d.Id()) - err := resource.Retry(10*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteServerCertificate(&iam.DeleteServerCertificateInput{ - ServerCertificateName: aws.String(d.Get("name").(string)), - }) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "DeleteConflict" && strings.Contains(awsErr.Message(), "currently in use by arn") { - log.Printf("[WARN] Conflict deleting server certificate: %s, retrying", awsErr.Message()) - return resource.RetryableError(err) - } - if awsErr.Code() == "NoSuchEntity" { - log.Printf("[WARN] IAM Server Certificate (%s) not found, removing from state", d.Id()) - d.SetId("") - return nil - } - } - return resource.NonRetryableError(err) - } - return nil - }) - - if err != nil { - return err - } - - d.SetId("") - return nil -} - -func resourceAwsIAMServerCertificateImport( - d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - d.Set("name", d.Id()) - // private_key can't be fetched from any API call - return []*schema.ResourceData{d}, nil -} - -func normalizeCert(cert interface{}) string { - if cert == nil || cert == (*string)(nil) { - return "" - } - - var rawCert string - switch cert.(type) { - case string: - rawCert = cert.(string) - case *string: - rawCert = *cert.(*string) - default: - return "" - } - - cleanVal := sha1.Sum(stripCR([]byte(strings.TrimSpace(rawCert)))) - return hex.EncodeToString(cleanVal[:]) -} - -// strip CRs from raw literals. Lifted from go/scanner/scanner.go -// See https://github.com/golang/go/blob/release-branch.go1.6/src/go/scanner/scanner.go#L479 -func stripCR(b []byte) []byte { - c := make([]byte, len(b)) - i := 0 - for _, ch := range b { - if ch != '\r' { - c[i] = ch - i++ - } - } - return c[:i] -} diff --git a/builtin/providers/aws/resource_aws_iam_server_certificate_test.go b/builtin/providers/aws/resource_aws_iam_server_certificate_test.go deleted file mode 100644 index 1dad7b829..000000000 --- a/builtin/providers/aws/resource_aws_iam_server_certificate_test.go +++ /dev/null @@ -1,358 +0,0 @@ -package aws - -import ( - "fmt" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSIAMServerCertificate_basic(t *testing.T) { - var cert iam.ServerCertificate - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckIAMServerCertificateDestroy, - Steps: []resource.TestStep{ - { - Config: testAccIAMServerCertConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCertExists("aws_iam_server_certificate.test_cert", &cert), - testAccCheckAWSServerCertAttributes(&cert), - ), - }, - }, - }) -} - -func TestAccAWSIAMServerCertificate_name_prefix(t *testing.T) { - var cert iam.ServerCertificate - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckIAMServerCertificateDestroy, - Steps: []resource.TestStep{ - { - Config: testAccIAMServerCertConfig_random, - Check: resource.ComposeTestCheckFunc( - testAccCheckCertExists("aws_iam_server_certificate.test_cert", &cert), - testAccCheckAWSServerCertAttributes(&cert), - ), - }, - }, - }) -} - -func TestAccAWSIAMServerCertificate_disappears(t *testing.T) { - var cert iam.ServerCertificate - - testDestroyCert := func(*terraform.State) error { - // reach out and DELETE the Cert - conn := testAccProvider.Meta().(*AWSClient).iamconn - _, err := conn.DeleteServerCertificate(&iam.DeleteServerCertificateInput{ - ServerCertificateName: cert.ServerCertificateMetadata.ServerCertificateName, - }) - - if err != nil { - return fmt.Errorf("Error destroying cert in test: %s", err) - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckIAMServerCertificateDestroy, - Steps: []resource.TestStep{ - { - Config: testAccIAMServerCertConfig_random, - Check: resource.ComposeTestCheckFunc( - testAccCheckCertExists("aws_iam_server_certificate.test_cert", &cert), - testAccCheckAWSServerCertAttributes(&cert), - testDestroyCert, - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAWSIAMServerCertificate_file(t *testing.T) { - var cert iam.ServerCertificate - - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckIAMServerCertificateDestroy, - Steps: []resource.TestStep{ - { - Config: testAccIAMServerCertConfig_file(rInt, "iam-ssl-unix-line-endings"), - Check: resource.ComposeTestCheckFunc( - testAccCheckCertExists("aws_iam_server_certificate.test_cert", &cert), - testAccCheckAWSServerCertAttributes(&cert), - ), - }, - - { - Config: testAccIAMServerCertConfig_file(rInt, "iam-ssl-windows-line-endings"), - Check: resource.ComposeTestCheckFunc( - testAccCheckCertExists("aws_iam_server_certificate.test_cert", &cert), - testAccCheckAWSServerCertAttributes(&cert), - ), - }, - }, - }) -} - -func testAccCheckCertExists(n string, cert *iam.ServerCertificate) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Server Cert ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).iamconn - describeOpts := &iam.GetServerCertificateInput{ - ServerCertificateName: aws.String(rs.Primary.Attributes["name"]), - } - resp, err := conn.GetServerCertificate(describeOpts) - if err != nil { - return err - } - - *cert = *resp.ServerCertificate - - return nil - } -} - -func testAccCheckAWSServerCertAttributes(cert *iam.ServerCertificate) resource.TestCheckFunc { - return func(s *terraform.State) error { - if !strings.Contains(*cert.ServerCertificateMetadata.ServerCertificateName, "terraform-test-cert") { - return fmt.Errorf("Bad Server Cert Name: %s", *cert.ServerCertificateMetadata.ServerCertificateName) - } - - if *cert.CertificateBody != strings.TrimSpace(certBody) { - return fmt.Errorf("Bad Server Cert body\n\t expected: %s\n\tgot: %s\n", certBody, *cert.CertificateBody) - } - return nil - } -} - -func testAccCheckIAMServerCertificateDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).iamconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_iam_server_certificate" { - continue - } - - // Try to find the Cert - opts := &iam.GetServerCertificateInput{ - ServerCertificateName: aws.String(rs.Primary.Attributes["name"]), - } - resp, err := conn.GetServerCertificate(opts) - if err == nil { - if resp.ServerCertificate != nil { - return fmt.Errorf("Error: Server Cert still exists") - } - - return nil - } - - } - - return nil -} - -var certBody = fmt.Sprintf(` ------BEGIN CERTIFICATE----- -MIIDBjCCAe4CCQCGWwBmOiHQdTANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB -VTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0 -cyBQdHkgTHRkMB4XDTE2MDYyMTE2MzM0MVoXDTE3MDYyMTE2MzM0MVowRTELMAkG -A1UEBhMCQVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0 -IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -AL+LFlsCJG5txZp4yuu+lQnuUrgBXRG+irQqcTXlV91Bp5hpmRIyhnGCtWxxDBUL -xrh4WN3VV/0jDzKT976oLgOy3hj56Cdqf+JlZ1qgMN5bHB3mm3aVWnrnsLbBsfwZ -SEbk3Kht/cE1nK2toNVW+rznS3m+eoV3Zn/DUNwGlZr42hGNs6ETn2jURY78ETqR -mW47xvjf86eIo7vULHJaY6xyarPqkL8DZazOmvY06hUGvGwGBny7gugfXqDG+I8n -cPBsGJGSAmHmVV8o0RCB9UjY+TvSMQRpEDoVlvyrGuglsD8to/4+7UcsuDGlRYN6 -jmIOC37mOi/jwRfWL1YUa4MCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAPDxTH0oQ -JjKXoJgkmQxurB81RfnK/NrswJVzWbOv6ejcbhwh+/ZgJTMc15BrYcxU6vUW1V/i -Z7APU0qJ0icECACML+a2fRI7YdLCTiPIOmY66HY8MZHAn3dGjU5TeiUflC0n0zkP -mxKJe43kcYLNDItbfvUDo/GoxTXrC3EFVZyU0RhFzoVJdODlTHXMVFCzcbQEBrBJ -xKdShCEc8nFMneZcGFeEU488ntZoWzzms8/QpYrKa5S0Sd7umEU2Kwu4HTkvUFg/ -CqDUFjhydXxYRsxXBBrEiLOE5BdtJR1sH/QHxIJe23C9iHI2nS1NbLziNEApLwC4 -GnSud83VUo9G9w== ------END CERTIFICATE-----`) - -func testAccIAMServerCertConfig(rInt int) string { - return fmt.Sprintf(` -resource "aws_iam_server_certificate" "test_cert" { - name = "terraform-test-cert-%d" - certificate_body = < 128 { - es = append(es, errors.New("maximum password_length is 128 characters")) - } - return -} - -// generatePassword generates a random password of a given length using -// characters that are likely to satisfy any possible AWS password policy -// (given sufficient length). -func generatePassword(length int) string { - charsets := []string{ - "abcdefghijklmnopqrstuvwxyz", - "ABCDEFGHIJKLMNOPQRSTUVWXYZ", - "012346789", - "!@#$%^&*()_+-=[]{}|'", - } - - // Use all character sets - random := rand.New(rand.NewSource(time.Now().UTC().UnixNano())) - components := make(map[int]byte, length) - for i := 0; i < length; i++ { - charset := charsets[i%len(charsets)] - components[i] = charset[random.Intn(len(charset))] - } - - // Randomise the ordering so we don't end up with a predictable - // lower case, upper case, numeric, symbol pattern - result := make([]byte, length) - i := 0 - for _, b := range components { - result[i] = b - i = i + 1 - } - - return string(result) -} - -func resourceAwsIamUserLoginProfileCreate(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn - - encryptionKey, err := encryption.RetrieveGPGKey(d.Get("pgp_key").(string)) - if err != nil { - return err - } - - username := d.Get("user").(string) - passwordResetRequired := d.Get("password_reset_required").(bool) - passwordLength := d.Get("password_length").(int) - - _, err = iamconn.GetLoginProfile(&iam.GetLoginProfileInput{ - UserName: aws.String(username), - }) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "NoSuchEntity" { - // If there is already a login profile, bring it under management (to prevent - // resource creation diffs) - we will never modify it, but obviously cannot - // set the password. - d.SetId(username) - d.Set("key_fingerprint", "") - d.Set("encrypted_password", "") - return nil - } - } - - initialPassword := generatePassword(passwordLength) - fingerprint, encrypted, err := encryption.EncryptValue(encryptionKey, initialPassword, "Password") - if err != nil { - return err - } - - request := &iam.CreateLoginProfileInput{ - UserName: aws.String(username), - Password: aws.String(initialPassword), - PasswordResetRequired: aws.Bool(passwordResetRequired), - } - - log.Println("[DEBUG] Create IAM User Login Profile request:", request) - createResp, err := iamconn.CreateLoginProfile(request) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "EntityAlreadyExists" { - // If there is already a login profile, bring it under management (to prevent - // resource creation diffs) - we will never modify it, but obviously cannot - // set the password. - d.SetId(username) - d.Set("key_fingerprint", "") - d.Set("encrypted_password", "") - return nil - } - return errwrap.Wrapf(fmt.Sprintf("Error creating IAM User Login Profile for %q: {{err}}", username), err) - } - - d.SetId(*createResp.LoginProfile.UserName) - d.Set("key_fingerprint", fingerprint) - d.Set("encrypted_password", encrypted) - return nil -} diff --git a/builtin/providers/aws/resource_aws_iam_user_login_profile_test.go b/builtin/providers/aws/resource_aws_iam_user_login_profile_test.go deleted file mode 100644 index 2755f917c..000000000 --- a/builtin/providers/aws/resource_aws_iam_user_login_profile_test.go +++ /dev/null @@ -1,325 +0,0 @@ -package aws - -import ( - "errors" - "fmt" - "testing" - "time" - - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/hashicorp/vault/helper/pgpkeys" -) - -func TestAccAWSUserLoginProfile_basic(t *testing.T) { - var conf iam.GetLoginProfileOutput - - username := fmt.Sprintf("test-user-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSUserLoginProfileDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSUserLoginProfileConfig(username, "/", testPubKey1), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSUserLoginProfileExists("aws_iam_user_login_profile.user", &conf), - testDecryptPasswordAndTest("aws_iam_user_login_profile.user", "aws_iam_access_key.user", testPrivKey1), - ), - }, - }, - }) -} - -func TestAccAWSUserLoginProfile_keybase(t *testing.T) { - var conf iam.GetLoginProfileOutput - - username := fmt.Sprintf("test-user-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSUserLoginProfileDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSUserLoginProfileConfig(username, "/", "keybase:terraformacctest"), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSUserLoginProfileExists("aws_iam_user_login_profile.user", &conf), - resource.TestCheckResourceAttrSet("aws_iam_user_login_profile.user", "encrypted_password"), - resource.TestCheckResourceAttrSet("aws_iam_user_login_profile.user", "key_fingerprint"), - ), - }, - }, - }) -} - -func TestAccAWSUserLoginProfile_keybaseDoesntExist(t *testing.T) { - username := fmt.Sprintf("test-user-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSUserLoginProfileDestroy, - Steps: []resource.TestStep{ - { - // We own this account but it doesn't have any key associated with it - Config: testAccAWSUserLoginProfileConfig(username, "/", "keybase:terraform_nope"), - ExpectError: regexp.MustCompile(`Error retrieving Public Key`), - }, - }, - }) -} - -func TestAccAWSUserLoginProfile_notAKey(t *testing.T) { - username := fmt.Sprintf("test-user-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSUserLoginProfileDestroy, - Steps: []resource.TestStep{ - { - // We own this account but it doesn't have any key associated with it - Config: testAccAWSUserLoginProfileConfig(username, "/", "lolimnotakey"), - ExpectError: regexp.MustCompile(`Error encrypting Password`), - }, - }, - }) -} - -func testAccCheckAWSUserLoginProfileDestroy(s *terraform.State) error { - iamconn := testAccProvider.Meta().(*AWSClient).iamconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_iam_user_login_profile" { - continue - } - - // Try to get user - _, err := iamconn.GetLoginProfile(&iam.GetLoginProfileInput{ - UserName: aws.String(rs.Primary.ID), - }) - if err == nil { - return fmt.Errorf("still exists.") - } - - // Verify the error is what we want - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - if ec2err.Code() != "NoSuchEntity" { - return err - } - } - - return nil -} - -func testDecryptPasswordAndTest(nProfile, nAccessKey, key string) resource.TestCheckFunc { - return func(s *terraform.State) error { - profileResource, ok := s.RootModule().Resources[nProfile] - if !ok { - return fmt.Errorf("Not found: %s", nProfile) - } - - password, ok := profileResource.Primary.Attributes["encrypted_password"] - if !ok { - return errors.New("No password in state") - } - - accessKeyResource, ok := s.RootModule().Resources[nAccessKey] - if !ok { - return fmt.Errorf("Not found: %s", nAccessKey) - } - - accessKeyId := accessKeyResource.Primary.ID - secretAccessKey, ok := accessKeyResource.Primary.Attributes["secret"] - if !ok { - return errors.New("No secret access key in state") - } - - decryptedPassword, err := pgpkeys.DecryptBytes(password, key) - if err != nil { - return fmt.Errorf("Error decrypting password: %s", err) - } - - iamAsCreatedUserSession := session.New(&aws.Config{ - Region: aws.String("us-west-2"), - Credentials: credentials.NewStaticCredentials(accessKeyId, secretAccessKey, ""), - }) - _, err = iamAsCreatedUserSession.Config.Credentials.Get() - if err != nil { - return fmt.Errorf("Error getting session credentials: %s", err) - } - - return resource.Retry(2*time.Minute, func() *resource.RetryError { - iamAsCreatedUser := iam.New(iamAsCreatedUserSession) - _, err = iamAsCreatedUser.ChangePassword(&iam.ChangePasswordInput{ - OldPassword: aws.String(decryptedPassword.String()), - NewPassword: aws.String(generatePassword(20)), - }) - if err != nil { - if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "InvalidClientTokenId" { - return resource.RetryableError(err) - } - - return resource.NonRetryableError(fmt.Errorf("Error changing decrypted password: %s", err)) - } - - return nil - }) - } -} - -func testAccCheckAWSUserLoginProfileExists(n string, res *iam.GetLoginProfileOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return errors.New("No UserName is set") - } - - iamconn := testAccProvider.Meta().(*AWSClient).iamconn - resp, err := iamconn.GetLoginProfile(&iam.GetLoginProfileInput{ - UserName: aws.String(rs.Primary.ID), - }) - if err != nil { - return err - } - - *res = *resp - - return nil - } -} - -func testAccAWSUserLoginProfileConfig(r, p, key string) string { - return fmt.Sprintf(` -resource "aws_iam_user" "user" { - name = "%s" - path = "%s" - force_destroy = true -} - -data "aws_caller_identity" "current" {} - -data "aws_iam_policy_document" "user" { - statement { - effect = "Allow" - actions = ["iam:GetAccountPasswordPolicy"] - resources = ["*"] - } - statement { - effect = "Allow" - actions = ["iam:ChangePassword"] - resources = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:user/&{aws:username}"] - } -} - -resource "aws_iam_user_policy" "user" { - name = "AllowChangeOwnPassword" - user = "${aws_iam_user.user.name}" - policy = "${data.aws_iam_policy_document.user.json}" -} - -resource "aws_iam_access_key" "user" { - user = "${aws_iam_user.user.name}" -} - -resource "aws_iam_user_login_profile" "user" { - user = "${aws_iam_user.user.name}" - pgp_key = < 0 { - d.Set("name", resp.AssessmentTargets[0].Name) - } - - return nil -} - -func resourceAwsInspectorAssessmentTargetUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).inspectorconn - - input := inspector.UpdateAssessmentTargetInput{ - AssessmentTargetArn: aws.String(d.Id()), - AssessmentTargetName: aws.String(d.Get("name").(string)), - ResourceGroupArn: aws.String(d.Get("resource_group_arn").(string)), - } - - _, err := conn.UpdateAssessmentTarget(&input) - if err != nil { - return err - } - - log.Println("[DEBUG] Inspector Assessment Target updated") - - return resourceAwsInspectorAssessmentTargetRead(d, meta) -} - -func resourceAwsInspectorAssessmentTargetDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).inspectorconn - - return resource.Retry(60*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteAssessmentTarget(&inspector.DeleteAssessmentTargetInput{ - AssessmentTargetArn: aws.String(d.Id()), - }) - if err != nil { - if inspectorerr, ok := err.(awserr.Error); ok && inspectorerr.Code() == "AssessmentRunInProgressException" { - log.Printf("[ERROR] Assement Run in progress: %s", err) - return resource.RetryableError(err) - } else { - log.Printf("[ERROR] Error deleting Assement Target: %s", err) - return resource.NonRetryableError(err) - } - } - return nil - }) - -} diff --git a/builtin/providers/aws/resource_aws_inspector_assessment_target_test.go b/builtin/providers/aws/resource_aws_inspector_assessment_target_test.go deleted file mode 100644 index 5f8f019e8..000000000 --- a/builtin/providers/aws/resource_aws_inspector_assessment_target_test.go +++ /dev/null @@ -1,126 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/inspector" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSInspectorTarget_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSInspectorTargetAssessmentDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSInspectorTargetAssessment, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSInspectorTargetExists("aws_inspector_assessment_target.foo"), - ), - }, - { - Config: testAccCheckAWSInspectorTargetAssessmentModified, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSInspectorTargetExists("aws_inspector_assessment_target.foo"), - ), - }, - { - Config: testAccCheckAWSInspectorTargetAssessmentUpdatedResourceGroup, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSInspectorTargetExists("aws_inspector_assessment_target.foo"), - ), - }, - }, - }) -} - -func testAccCheckAWSInspectorTargetAssessmentDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).inspectorconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_inspector_assessment_target" { - continue - } - - resp, err := conn.DescribeAssessmentTargets(&inspector.DescribeAssessmentTargetsInput{ - AssessmentTargetArns: []*string{ - aws.String(rs.Primary.ID), - }, - }) - - if err != nil { - if inspectorerr, ok := err.(awserr.Error); ok && inspectorerr.Code() == "InvalidInputException" { - return nil - } else { - return fmt.Errorf("Error finding Inspector Assessment Target: %s", err) - } - } - - if len(resp.AssessmentTargets) > 0 { - return fmt.Errorf("Found Target, expected none: %s", resp) - } - } - - return nil -} - -func testAccCheckAWSInspectorTargetExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - return nil - } -} - -var testAccAWSInspectorTargetAssessment = ` - -resource "aws_inspector_resource_group" "foo" { - tags { - Name = "bar" - } -} - -resource "aws_inspector_assessment_target" "foo" { - name = "foo" - resource_group_arn = "${aws_inspector_resource_group.foo.arn}" -}` - -var testAccCheckAWSInspectorTargetAssessmentModified = ` - -resource "aws_inspector_resource_group" "foo" { - tags { - Name = "bar" - } -} - -resource "aws_inspector_assessment_target" "foo" { - name = "bar" - resource_group_arn = "${aws_inspector_resource_group.foo.arn}" -}` - -var testAccCheckAWSInspectorTargetAssessmentUpdatedResourceGroup = ` - -resource "aws_inspector_resource_group" "foo" { - tags { - Name = "bar" - } -} - -resource "aws_inspector_resource_group" "bar" { - tags { - Name = "test" - } -} - -resource "aws_inspector_assessment_target" "foo" { - name = "bar" - resource_group_arn = "${aws_inspector_resource_group.bar.arn}" -}` diff --git a/builtin/providers/aws/resource_aws_inspector_assessment_template.go b/builtin/providers/aws/resource_aws_inspector_assessment_template.go deleted file mode 100644 index 4856a000c..000000000 --- a/builtin/providers/aws/resource_aws_inspector_assessment_template.go +++ /dev/null @@ -1,121 +0,0 @@ -package aws - -import ( - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/inspector" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAWSInspectorAssessmentTemplate() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsInspectorAssessmentTemplateCreate, - Read: resourceAwsInspectorAssessmentTemplateRead, - Delete: resourceAwsInspectorAssessmentTemplateDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "target_arn": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "arn": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - ForceNew: true, - }, - "duration": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "rules_package_arns": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceAwsInspectorAssessmentTemplateCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).inspectorconn - - rules := []*string{} - if attr := d.Get("rules_package_arns").(*schema.Set); attr.Len() > 0 { - rules = expandStringList(attr.List()) - } - - targetArn := d.Get("target_arn").(string) - templateName := d.Get("name").(string) - duration := int64(d.Get("duration").(int)) - - resp, err := conn.CreateAssessmentTemplate(&inspector.CreateAssessmentTemplateInput{ - AssessmentTargetArn: aws.String(targetArn), - AssessmentTemplateName: aws.String(templateName), - DurationInSeconds: aws.Int64(duration), - RulesPackageArns: rules, - }) - if err != nil { - return err - } - log.Printf("[DEBUG] Inspector Assessment Template %s created", *resp.AssessmentTemplateArn) - - d.Set("arn", resp.AssessmentTemplateArn) - - d.SetId(*resp.AssessmentTemplateArn) - - return resourceAwsInspectorAssessmentTemplateRead(d, meta) -} - -func resourceAwsInspectorAssessmentTemplateRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).inspectorconn - - resp, err := conn.DescribeAssessmentTemplates(&inspector.DescribeAssessmentTemplatesInput{ - AssessmentTemplateArns: []*string{ - aws.String(d.Id()), - }, - }, - ) - if err != nil { - if inspectorerr, ok := err.(awserr.Error); ok && inspectorerr.Code() == "InvalidInputException" { - return nil - } else { - log.Printf("[ERROR] Error finding Inspector Assessment Template: %s", err) - return err - } - } - - if resp.AssessmentTemplates != nil && len(resp.AssessmentTemplates) > 0 { - d.Set("name", resp.AssessmentTemplates[0].Name) - } - return nil -} - -func resourceAwsInspectorAssessmentTemplateDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).inspectorconn - - _, err := conn.DeleteAssessmentTemplate(&inspector.DeleteAssessmentTemplateInput{ - AssessmentTemplateArn: aws.String(d.Id()), - }) - if err != nil { - if inspectorerr, ok := err.(awserr.Error); ok && inspectorerr.Code() == "AssessmentRunInProgressException" { - log.Printf("[ERROR] Assement Run in progress: %s", err) - return err - } else { - log.Printf("[ERROR] Error deleting Assement Template: %s", err) - return err - } - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_inspector_assessment_template_test.go b/builtin/providers/aws/resource_aws_inspector_assessment_template_test.go deleted file mode 100644 index 2e7a1c0e8..000000000 --- a/builtin/providers/aws/resource_aws_inspector_assessment_template_test.go +++ /dev/null @@ -1,132 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/inspector" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSInspectorTemplate_basic(t *testing.T) { - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSInspectorTemplateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSInspectorTemplateAssessment(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSInspectorTemplateExists("aws_inspector_assessment_template.foo"), - ), - }, - resource.TestStep{ - Config: testAccCheckAWSInspectorTemplatetModified(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSInspectorTargetExists("aws_inspector_assessment_template.foo"), - ), - }, - }, - }) -} - -func testAccCheckAWSInspectorTemplateDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).inspectorconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_inspector_assessment_template" { - continue - } - - resp, err := conn.DescribeAssessmentTemplates(&inspector.DescribeAssessmentTemplatesInput{ - AssessmentTemplateArns: []*string{ - aws.String(rs.Primary.ID), - }, - }) - - if err != nil { - if inspectorerr, ok := err.(awserr.Error); ok && inspectorerr.Code() == "InvalidInputException" { - return nil - } else { - return fmt.Errorf("Error finding Inspector Assessment Template: %s", err) - } - } - - if len(resp.AssessmentTemplates) > 0 { - return fmt.Errorf("Found Template, expected none: %s", resp) - } - } - - return nil -} - -func testAccCheckAWSInspectorTemplateExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - return nil - } -} - -func testAccAWSInspectorTemplateAssessment(rInt int) string { - return fmt.Sprintf(` -resource "aws_inspector_resource_group" "foo" { - tags { - Name = "tf-acc-test-%d" - } -} - -resource "aws_inspector_assessment_target" "foo" { - name = "tf-acc-test-basic-%d" - resource_group_arn = "${aws_inspector_resource_group.foo.arn}" -} - -resource "aws_inspector_assessment_template" "foo" { - name = "tf-acc-test-basic-tpl-%d" - target_arn = "${aws_inspector_assessment_target.foo.arn}" - duration = 3600 - - rules_package_arns = [ - "arn:aws:inspector:us-west-2:758058086616:rulespackage/0-9hgA516p", - "arn:aws:inspector:us-west-2:758058086616:rulespackage/0-H5hpSawc", - "arn:aws:inspector:us-west-2:758058086616:rulespackage/0-JJOtZiqQ", - "arn:aws:inspector:us-west-2:758058086616:rulespackage/0-vg5GGHSD", - ] -}`, rInt, rInt, rInt) -} - -func testAccCheckAWSInspectorTemplatetModified(rInt int) string { - return fmt.Sprintf(` -resource "aws_inspector_resource_group" "foo" { - tags { - Name = "tf-acc-test-%d" - } -} - -resource "aws_inspector_assessment_target" "foo" { - name = "tf-acc-test-basic-%d" - resource_group_arn = "${aws_inspector_resource_group.foo.arn}" -} - -resource "aws_inspector_assessment_template" "foo" { - name = "tf-acc-test-basic-tpl-%d" - target_arn = "${aws_inspector_assessment_target.foo.arn}" - duration = 3600 - - rules_package_arns = [ - "arn:aws:inspector:us-west-2:758058086616:rulespackage/0-9hgA516p", - "arn:aws:inspector:us-west-2:758058086616:rulespackage/0-H5hpSawc", - "arn:aws:inspector:us-west-2:758058086616:rulespackage/0-JJOtZiqQ", - "arn:aws:inspector:us-west-2:758058086616:rulespackage/0-vg5GGHSD", - ] -}`, rInt, rInt, rInt) -} diff --git a/builtin/providers/aws/resource_aws_inspector_resource_group.go b/builtin/providers/aws/resource_aws_inspector_resource_group.go deleted file mode 100644 index 55f56696c..000000000 --- a/builtin/providers/aws/resource_aws_inspector_resource_group.go +++ /dev/null @@ -1,76 +0,0 @@ -package aws - -import ( - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/inspector" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAWSInspectorResourceGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsInspectorResourceGroupCreate, - Read: resourceAwsInspectorResourceGroupRead, - Delete: resourceAwsInspectorResourceGroupDelete, - - Schema: map[string]*schema.Schema{ - "tags": &schema.Schema{ - ForceNew: true, - Type: schema.TypeMap, - Required: true, - }, - "arn": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceAwsInspectorResourceGroupCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).inspectorconn - - resp, err := conn.CreateResourceGroup(&inspector.CreateResourceGroupInput{ - ResourceGroupTags: tagsFromMapInspector(d.Get("tags").(map[string]interface{})), - }) - - if err != nil { - return err - } - - d.Set("arn", *resp.ResourceGroupArn) - - d.SetId(*resp.ResourceGroupArn) - - return resourceAwsInspectorResourceGroupRead(d, meta) -} - -func resourceAwsInspectorResourceGroupRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).inspectorconn - - _, err := conn.DescribeResourceGroups(&inspector.DescribeResourceGroupsInput{ - ResourceGroupArns: []*string{ - aws.String(d.Id()), - }, - }) - - if err != nil { - if inspectorerr, ok := err.(awserr.Error); ok && inspectorerr.Code() == "InvalidInputException" { - return nil - } else { - log.Printf("[ERROR] Error finding Inspector resource group: %s", err) - return err - } - } - - return nil -} - -func resourceAwsInspectorResourceGroupDelete(d *schema.ResourceData, meta interface{}) error { - d.Set("arn", "") - d.SetId("") - - return nil -} diff --git a/builtin/providers/aws/resource_aws_inspector_resource_group_test.go b/builtin/providers/aws/resource_aws_inspector_resource_group_test.go deleted file mode 100644 index 0024cccfe..000000000 --- a/builtin/providers/aws/resource_aws_inspector_resource_group_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSInspectorResourceGroup_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSInspectorResourceGroup, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSInspectorResourceGroupExists("aws_inspector_resource_group.foo"), - ), - }, - resource.TestStep{ - Config: testAccCheckAWSInspectorResourceGroupModified, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSInspectorTargetExists("aws_inspector_resource_group.foo"), - ), - }, - }, - }) -} - -func testAccCheckAWSInspectorResourceGroupExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - return nil - } -} - -var testAccAWSInspectorResourceGroup = ` -resource "aws_inspector_resource_group" "foo" { - tags { - Name = "foo" - } -}` - -var testAccCheckAWSInspectorResourceGroupModified = ` -resource "aws_inspector_resource_group" "foo" { - tags { - Name = "bar" - } -}` diff --git a/builtin/providers/aws/resource_aws_instance.go b/builtin/providers/aws/resource_aws_instance.go deleted file mode 100644 index 400aaf2e0..000000000 --- a/builtin/providers/aws/resource_aws_instance.go +++ /dev/null @@ -1,1691 +0,0 @@ -package aws - -import ( - "bytes" - "crypto/sha1" - "encoding/base64" - "encoding/hex" - "errors" - "fmt" - "log" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsInstance() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsInstanceCreate, - Read: resourceAwsInstanceRead, - Update: resourceAwsInstanceUpdate, - Delete: resourceAwsInstanceDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - SchemaVersion: 1, - MigrateState: resourceAwsInstanceMigrateState, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Update: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "ami": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "associate_public_ip_address": { - Type: schema.TypeBool, - ForceNew: true, - Computed: true, - Optional: true, - }, - - "availability_zone": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "placement_group": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "instance_type": { - Type: schema.TypeString, - Required: true, - }, - - "key_name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "subnet_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "private_ip": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "source_dest_check": { - Type: schema.TypeBool, - Optional: true, - Default: true, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - // Suppress diff if network_interface is set - _, ok := d.GetOk("network_interface") - return ok - }, - }, - - "user_data": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - StateFunc: func(v interface{}) string { - switch v.(type) { - case string: - return userDataHashSum(v.(string)) - default: - return "" - } - }, - }, - - "security_groups": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "vpc_security_group_ids": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "public_dns": { - Type: schema.TypeString, - Computed: true, - }, - - // TODO: Deprecate me v0.10.0 - "network_interface_id": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Please use `primary_network_interface_id` instead", - }, - - "primary_network_interface_id": { - Type: schema.TypeString, - Computed: true, - }, - - "network_interface": { - ConflictsWith: []string{"associate_public_ip_address", "subnet_id", "private_ip", "vpc_security_group_ids", "security_groups", "ipv6_addresses", "ipv6_address_count", "source_dest_check"}, - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "delete_on_termination": { - Type: schema.TypeBool, - Default: false, - Optional: true, - ForceNew: true, - }, - "network_interface_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "device_index": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - }, - }, - }, - - "public_ip": { - Type: schema.TypeString, - Computed: true, - }, - - "instance_state": { - Type: schema.TypeString, - Computed: true, - }, - - "private_dns": { - Type: schema.TypeString, - Computed: true, - }, - - "ebs_optimized": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - - "disable_api_termination": { - Type: schema.TypeBool, - Optional: true, - }, - - "instance_initiated_shutdown_behavior": { - Type: schema.TypeString, - Optional: true, - }, - - "monitoring": { - Type: schema.TypeBool, - Optional: true, - }, - - "iam_instance_profile": { - Type: schema.TypeString, - Optional: true, - }, - - "ipv6_address_count": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "ipv6_addresses": { - Type: schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "tenancy": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "tags": tagsSchema(), - - "volume_tags": tagsSchemaComputed(), - - "block_device": { - Type: schema.TypeMap, - Optional: true, - Removed: "Split out into three sub-types; see Changelog and Docs", - }, - - "ebs_block_device": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "delete_on_termination": { - Type: schema.TypeBool, - Optional: true, - Default: true, - ForceNew: true, - }, - - "device_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "encrypted": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "iops": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "snapshot_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "volume_size": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "volume_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - }, - Set: func(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["snapshot_id"].(string))) - return hashcode.String(buf.String()) - }, - }, - - "ephemeral_block_device": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "device_name": { - Type: schema.TypeString, - Required: true, - }, - - "virtual_name": { - Type: schema.TypeString, - Optional: true, - }, - - "no_device": { - Type: schema.TypeBool, - Optional: true, - }, - }, - }, - Set: func(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["virtual_name"].(string))) - if v, ok := m["no_device"].(bool); ok && v { - buf.WriteString(fmt.Sprintf("%t-", v)) - } - return hashcode.String(buf.String()) - }, - }, - - "root_block_device": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - // "You can only modify the volume size, volume type, and Delete on - // Termination flag on the block device mapping entry for the root - // device volume." - bit.ly/ec2bdmap - Schema: map[string]*schema.Schema{ - "delete_on_termination": { - Type: schema.TypeBool, - Optional: true, - Default: true, - ForceNew: true, - }, - - "iops": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "volume_size": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "volume_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - }, - }, - }, - } -} - -func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - instanceOpts, err := buildAwsInstanceOpts(d, meta) - if err != nil { - return err - } - - // Build the creation struct - runOpts := &ec2.RunInstancesInput{ - BlockDeviceMappings: instanceOpts.BlockDeviceMappings, - DisableApiTermination: instanceOpts.DisableAPITermination, - EbsOptimized: instanceOpts.EBSOptimized, - Monitoring: instanceOpts.Monitoring, - IamInstanceProfile: instanceOpts.IAMInstanceProfile, - ImageId: instanceOpts.ImageID, - InstanceInitiatedShutdownBehavior: instanceOpts.InstanceInitiatedShutdownBehavior, - InstanceType: instanceOpts.InstanceType, - Ipv6AddressCount: instanceOpts.Ipv6AddressCount, - Ipv6Addresses: instanceOpts.Ipv6Addresses, - KeyName: instanceOpts.KeyName, - MaxCount: aws.Int64(int64(1)), - MinCount: aws.Int64(int64(1)), - NetworkInterfaces: instanceOpts.NetworkInterfaces, - Placement: instanceOpts.Placement, - PrivateIpAddress: instanceOpts.PrivateIPAddress, - SecurityGroupIds: instanceOpts.SecurityGroupIDs, - SecurityGroups: instanceOpts.SecurityGroups, - SubnetId: instanceOpts.SubnetID, - UserData: instanceOpts.UserData64, - } - - _, ipv6CountOk := d.GetOk("ipv6_address_count") - _, ipv6AddressOk := d.GetOk("ipv6_addresses") - - if ipv6AddressOk && ipv6CountOk { - return fmt.Errorf("Only 1 of `ipv6_address_count` or `ipv6_addresses` can be specified") - } - - restricted := meta.(*AWSClient).IsGovCloud() || meta.(*AWSClient).IsChinaCloud() - if !restricted { - tagsSpec := make([]*ec2.TagSpecification, 0) - - if v, ok := d.GetOk("tags"); ok { - tags := tagsFromMap(v.(map[string]interface{})) - - spec := &ec2.TagSpecification{ - ResourceType: aws.String("instance"), - Tags: tags, - } - - tagsSpec = append(tagsSpec, spec) - } - - if v, ok := d.GetOk("volume_tags"); ok { - tags := tagsFromMap(v.(map[string]interface{})) - - spec := &ec2.TagSpecification{ - ResourceType: aws.String("volume"), - Tags: tags, - } - - tagsSpec = append(tagsSpec, spec) - } - - if len(tagsSpec) > 0 { - runOpts.TagSpecifications = tagsSpec - } - } - - // Create the instance - log.Printf("[DEBUG] Run configuration: %s", runOpts) - - var runResp *ec2.Reservation - err = resource.Retry(30*time.Second, func() *resource.RetryError { - var err error - runResp, err = conn.RunInstances(runOpts) - // IAM instance profiles can take ~10 seconds to propagate in AWS: - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console - if isAWSErr(err, "InvalidParameterValue", "Invalid IAM Instance Profile") { - log.Print("[DEBUG] Invalid IAM Instance Profile referenced, retrying...") - return resource.RetryableError(err) - } - // IAM roles can also take time to propagate in AWS: - if isAWSErr(err, "InvalidParameterValue", " has no associated IAM Roles") { - log.Print("[DEBUG] IAM Instance Profile appears to have no IAM roles, retrying...") - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - }) - // Warn if the AWS Error involves group ids, to help identify situation - // where a user uses group ids in security_groups for the Default VPC. - // See https://github.com/hashicorp/terraform/issues/3798 - if isAWSErr(err, "InvalidParameterValue", "groupId is invalid") { - return fmt.Errorf("Error launching instance, possible mismatch of Security Group IDs and Names. See AWS Instance docs here: %s.\n\n\tAWS Error: %s", "https://terraform.io/docs/providers/aws/r/instance.html", err.(awserr.Error).Message()) - } - if err != nil { - return fmt.Errorf("Error launching source instance: %s", err) - } - if runResp == nil || len(runResp.Instances) == 0 { - return errors.New("Error launching source instance: no instances returned in response") - } - - instance := runResp.Instances[0] - log.Printf("[INFO] Instance ID: %s", *instance.InstanceId) - - // Store the resulting ID so we can look this up later - d.SetId(*instance.InstanceId) - - // Wait for the instance to become running so we can get some attributes - // that aren't available until later. - log.Printf( - "[DEBUG] Waiting for instance (%s) to become running", - *instance.InstanceId) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"pending"}, - Target: []string{"running"}, - Refresh: InstanceStateRefreshFunc(conn, *instance.InstanceId, "terminated"), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - instanceRaw, err := stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for instance (%s) to become ready: %s", - *instance.InstanceId, err) - } - - instance = instanceRaw.(*ec2.Instance) - - // Initialize the connection info - if instance.PublicIpAddress != nil { - d.SetConnInfo(map[string]string{ - "type": "ssh", - "host": *instance.PublicIpAddress, - }) - } else if instance.PrivateIpAddress != nil { - d.SetConnInfo(map[string]string{ - "type": "ssh", - "host": *instance.PrivateIpAddress, - }) - } - - // Update if we need to - return resourceAwsInstanceUpdate(d, meta) -} - -func resourceAwsInstanceRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - resp, err := conn.DescribeInstances(&ec2.DescribeInstancesInput{ - InstanceIds: []*string{aws.String(d.Id())}, - }) - if err != nil { - // If the instance was not found, return nil so that we can show - // that the instance is gone. - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidInstanceID.NotFound" { - d.SetId("") - return nil - } - - // Some other error, report it - return err - } - - // If nothing was found, then return no state - if len(resp.Reservations) == 0 { - d.SetId("") - return nil - } - - instance := resp.Reservations[0].Instances[0] - - if instance.State != nil { - // If the instance is terminated, then it is gone - if *instance.State.Name == "terminated" { - d.SetId("") - return nil - } - - d.Set("instance_state", instance.State.Name) - } - - if instance.Placement != nil { - d.Set("availability_zone", instance.Placement.AvailabilityZone) - } - if instance.Placement.Tenancy != nil { - d.Set("tenancy", instance.Placement.Tenancy) - } - - d.Set("ami", instance.ImageId) - d.Set("instance_type", instance.InstanceType) - d.Set("key_name", instance.KeyName) - d.Set("public_dns", instance.PublicDnsName) - d.Set("public_ip", instance.PublicIpAddress) - d.Set("private_dns", instance.PrivateDnsName) - d.Set("private_ip", instance.PrivateIpAddress) - d.Set("iam_instance_profile", iamInstanceProfileArnToName(instance.IamInstanceProfile)) - - // Set configured Network Interface Device Index Slice - // We only want to read, and populate state for the configured network_interface attachments. Otherwise, other - // resources have the potential to attach network interfaces to the instance, and cause a perpetual create/destroy - // diff. We should only read on changes configured for this specific resource because of this. - var configuredDeviceIndexes []int - if v, ok := d.GetOk("network_interface"); ok { - vL := v.(*schema.Set).List() - for _, vi := range vL { - mVi := vi.(map[string]interface{}) - configuredDeviceIndexes = append(configuredDeviceIndexes, mVi["device_index"].(int)) - } - } - - var ipv6Addresses []string - if len(instance.NetworkInterfaces) > 0 { - var primaryNetworkInterface ec2.InstanceNetworkInterface - var networkInterfaces []map[string]interface{} - for _, iNi := range instance.NetworkInterfaces { - ni := make(map[string]interface{}) - if *iNi.Attachment.DeviceIndex == 0 { - primaryNetworkInterface = *iNi - } - // If the attached network device is inside our configuration, refresh state with values found. - // Otherwise, assume the network device was attached via an outside resource. - for _, index := range configuredDeviceIndexes { - if index == int(*iNi.Attachment.DeviceIndex) { - ni["device_index"] = *iNi.Attachment.DeviceIndex - ni["network_interface_id"] = *iNi.NetworkInterfaceId - ni["delete_on_termination"] = *iNi.Attachment.DeleteOnTermination - } - } - // Don't add empty network interfaces to schema - if len(ni) == 0 { - continue - } - networkInterfaces = append(networkInterfaces, ni) - } - if err := d.Set("network_interface", networkInterfaces); err != nil { - return fmt.Errorf("Error setting network_interfaces: %v", err) - } - - // Set primary network interface details - // If an instance is shutting down, network interfaces are detached, and attributes may be nil, - // need to protect against nil pointer dereferences - if primaryNetworkInterface.SubnetId != nil { - d.Set("subnet_id", primaryNetworkInterface.SubnetId) - } - if primaryNetworkInterface.NetworkInterfaceId != nil { - d.Set("network_interface_id", primaryNetworkInterface.NetworkInterfaceId) // TODO: Deprecate me v0.10.0 - d.Set("primary_network_interface_id", primaryNetworkInterface.NetworkInterfaceId) - } - if primaryNetworkInterface.Ipv6Addresses != nil { - d.Set("ipv6_address_count", len(primaryNetworkInterface.Ipv6Addresses)) - } - if primaryNetworkInterface.SourceDestCheck != nil { - d.Set("source_dest_check", primaryNetworkInterface.SourceDestCheck) - } - - d.Set("associate_public_ip_address", primaryNetworkInterface.Association != nil) - - for _, address := range primaryNetworkInterface.Ipv6Addresses { - ipv6Addresses = append(ipv6Addresses, *address.Ipv6Address) - } - - } else { - d.Set("subnet_id", instance.SubnetId) - d.Set("network_interface_id", "") // TODO: Deprecate me v0.10.0 - d.Set("primary_network_interface_id", "") - } - - if err := d.Set("ipv6_addresses", ipv6Addresses); err != nil { - log.Printf("[WARN] Error setting ipv6_addresses for AWS Instance (%s): %s", d.Id(), err) - } - - d.Set("ebs_optimized", instance.EbsOptimized) - if instance.SubnetId != nil && *instance.SubnetId != "" { - d.Set("source_dest_check", instance.SourceDestCheck) - } - - if instance.Monitoring != nil && instance.Monitoring.State != nil { - monitoringState := *instance.Monitoring.State - d.Set("monitoring", monitoringState == "enabled" || monitoringState == "pending") - } - - d.Set("tags", tagsToMap(instance.Tags)) - - if err := readVolumeTags(conn, d); err != nil { - return err - } - - if err := readSecurityGroups(d, instance); err != nil { - return err - } - - if err := readBlockDevices(d, instance, conn); err != nil { - return err - } - if _, ok := d.GetOk("ephemeral_block_device"); !ok { - d.Set("ephemeral_block_device", []interface{}{}) - } - - // Instance attributes - { - attr, err := conn.DescribeInstanceAttribute(&ec2.DescribeInstanceAttributeInput{ - Attribute: aws.String("disableApiTermination"), - InstanceId: aws.String(d.Id()), - }) - if err != nil { - return err - } - d.Set("disable_api_termination", attr.DisableApiTermination.Value) - } - { - attr, err := conn.DescribeInstanceAttribute(&ec2.DescribeInstanceAttributeInput{ - Attribute: aws.String(ec2.InstanceAttributeNameUserData), - InstanceId: aws.String(d.Id()), - }) - if err != nil { - return err - } - if attr.UserData.Value != nil { - d.Set("user_data", userDataHashSum(*attr.UserData.Value)) - } - } - - return nil -} - -func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - d.Partial(true) - - restricted := meta.(*AWSClient).IsGovCloud() || meta.(*AWSClient).IsChinaCloud() - - if d.HasChange("tags") { - if !d.IsNewResource() || restricted { - if err := setTags(conn, d); err != nil { - return err - } else { - d.SetPartial("tags") - } - } - } - if d.HasChange("volume_tags") { - if !d.IsNewResource() || !restricted { - if err := setVolumeTags(conn, d); err != nil { - return err - } else { - d.SetPartial("volume_tags") - } - } - } - - if d.HasChange("iam_instance_profile") && !d.IsNewResource() { - request := &ec2.DescribeIamInstanceProfileAssociationsInput{ - Filters: []*ec2.Filter{ - { - Name: aws.String("instance-id"), - Values: []*string{aws.String(d.Id())}, - }, - }, - } - - resp, err := conn.DescribeIamInstanceProfileAssociations(request) - if err != nil { - return err - } - - // An Iam Instance Profile has been provided and is pending a change - // This means it is an association or a replacement to an association - if _, ok := d.GetOk("iam_instance_profile"); ok { - // Does not have an Iam Instance Profile associated with it, need to associate - if len(resp.IamInstanceProfileAssociations) == 0 { - _, err := conn.AssociateIamInstanceProfile(&ec2.AssociateIamInstanceProfileInput{ - InstanceId: aws.String(d.Id()), - IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ - Name: aws.String(d.Get("iam_instance_profile").(string)), - }, - }) - if err != nil { - return err - } - - } else { - // Has an Iam Instance Profile associated with it, need to replace the association - associationId := resp.IamInstanceProfileAssociations[0].AssociationId - - _, err := conn.ReplaceIamInstanceProfileAssociation(&ec2.ReplaceIamInstanceProfileAssociationInput{ - AssociationId: associationId, - IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ - Name: aws.String(d.Get("iam_instance_profile").(string)), - }, - }) - if err != nil { - return err - } - } - // An Iam Instance Profile has _not_ been provided but is pending a change. This means there is a pending removal - } else { - if len(resp.IamInstanceProfileAssociations) > 0 { - // Has an Iam Instance Profile associated with it, need to remove the association - associationId := resp.IamInstanceProfileAssociations[0].AssociationId - - _, err := conn.DisassociateIamInstanceProfile(&ec2.DisassociateIamInstanceProfileInput{ - AssociationId: associationId, - }) - if err != nil { - return err - } - } - } - } - - // SourceDestCheck can only be modified on an instance without manually specified network interfaces. - // SourceDestCheck, in that case, is configured at the network interface level - if _, ok := d.GetOk("network_interface"); !ok { - - // If we have a new resource and source_dest_check is still true, don't modify - sourceDestCheck := d.Get("source_dest_check").(bool) - - if d.HasChange("source_dest_check") || d.IsNewResource() && !sourceDestCheck { - // SourceDestCheck can only be set on VPC instances - // AWS will return an error of InvalidParameterCombination if we attempt - // to modify the source_dest_check of an instance in EC2 Classic - log.Printf("[INFO] Modifying `source_dest_check` on Instance %s", d.Id()) - _, err := conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{ - InstanceId: aws.String(d.Id()), - SourceDestCheck: &ec2.AttributeBooleanValue{ - Value: aws.Bool(sourceDestCheck), - }, - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok { - // Tolerate InvalidParameterCombination error in Classic, otherwise - // return the error - if "InvalidParameterCombination" != ec2err.Code() { - return err - } - log.Printf("[WARN] Attempted to modify SourceDestCheck on non VPC instance: %s", ec2err.Message()) - } - } - } - } - - if d.HasChange("vpc_security_group_ids") { - var groups []*string - if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 { - for _, v := range v.List() { - groups = append(groups, aws.String(v.(string))) - } - } - // If a user has multiple network interface attachments on the target EC2 instance, simply modifying the - // instance attributes via a `ModifyInstanceAttributes()` request would fail with the following error message: - // "There are multiple interfaces attached to instance 'i-XX'. Please specify an interface ID for the operation instead." - // Thus, we need to actually modify the primary network interface for the new security groups, as the primary - // network interface is where we modify/create security group assignments during Create. - log.Printf("[INFO] Modifying `vpc_security_group_ids` on Instance %q", d.Id()) - instances, err := conn.DescribeInstances(&ec2.DescribeInstancesInput{ - InstanceIds: []*string{aws.String(d.Id())}, - }) - if err != nil { - return err - } - instance := instances.Reservations[0].Instances[0] - var primaryInterface ec2.InstanceNetworkInterface - for _, ni := range instance.NetworkInterfaces { - if *ni.Attachment.DeviceIndex == 0 { - primaryInterface = *ni - } - } - - if primaryInterface.NetworkInterfaceId == nil { - log.Print("[Error] Attempted to set vpc_security_group_ids on an instance without a primary network interface") - return fmt.Errorf( - "Failed to update vpc_security_group_ids on %q, which does not contain a primary network interface", - d.Id()) - } - - if _, err := conn.ModifyNetworkInterfaceAttribute(&ec2.ModifyNetworkInterfaceAttributeInput{ - NetworkInterfaceId: primaryInterface.NetworkInterfaceId, - Groups: groups, - }); err != nil { - return err - } - } - - if d.HasChange("instance_type") && !d.IsNewResource() { - log.Printf("[INFO] Stopping Instance %q for instance_type change", d.Id()) - _, err := conn.StopInstances(&ec2.StopInstancesInput{ - InstanceIds: []*string{aws.String(d.Id())}, - }) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"pending", "running", "shutting-down", "stopped", "stopping"}, - Target: []string{"stopped"}, - Refresh: InstanceStateRefreshFunc(conn, d.Id(), ""), - Timeout: d.Timeout(schema.TimeoutUpdate), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for instance (%s) to stop: %s", d.Id(), err) - } - - log.Printf("[INFO] Modifying instance type %s", d.Id()) - _, err = conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{ - InstanceId: aws.String(d.Id()), - InstanceType: &ec2.AttributeValue{ - Value: aws.String(d.Get("instance_type").(string)), - }, - }) - if err != nil { - return err - } - - log.Printf("[INFO] Starting Instance %q after instance_type change", d.Id()) - _, err = conn.StartInstances(&ec2.StartInstancesInput{ - InstanceIds: []*string{aws.String(d.Id())}, - }) - - stateConf = &resource.StateChangeConf{ - Pending: []string{"pending", "stopped"}, - Target: []string{"running"}, - Refresh: InstanceStateRefreshFunc(conn, d.Id(), "terminated"), - Timeout: d.Timeout(schema.TimeoutUpdate), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for instance (%s) to become ready: %s", - d.Id(), err) - } - } - - if d.HasChange("disable_api_termination") { - _, err := conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{ - InstanceId: aws.String(d.Id()), - DisableApiTermination: &ec2.AttributeBooleanValue{ - Value: aws.Bool(d.Get("disable_api_termination").(bool)), - }, - }) - if err != nil { - return err - } - } - - if d.HasChange("instance_initiated_shutdown_behavior") { - log.Printf("[INFO] Modifying instance %s", d.Id()) - _, err := conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{ - InstanceId: aws.String(d.Id()), - InstanceInitiatedShutdownBehavior: &ec2.AttributeValue{ - Value: aws.String(d.Get("instance_initiated_shutdown_behavior").(string)), - }, - }) - if err != nil { - return err - } - } - - if d.HasChange("monitoring") { - var mErr error - if d.Get("monitoring").(bool) { - log.Printf("[DEBUG] Enabling monitoring for Instance (%s)", d.Id()) - _, mErr = conn.MonitorInstances(&ec2.MonitorInstancesInput{ - InstanceIds: []*string{aws.String(d.Id())}, - }) - } else { - log.Printf("[DEBUG] Disabling monitoring for Instance (%s)", d.Id()) - _, mErr = conn.UnmonitorInstances(&ec2.UnmonitorInstancesInput{ - InstanceIds: []*string{aws.String(d.Id())}, - }) - } - if mErr != nil { - return fmt.Errorf("[WARN] Error updating Instance monitoring: %s", mErr) - } - } - - // TODO(mitchellh): wait for the attributes we modified to - // persist the change... - - d.Partial(false) - - return resourceAwsInstanceRead(d, meta) -} - -func resourceAwsInstanceDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - if err := awsTerminateInstance(conn, d.Id(), d); err != nil { - return err - } - - d.SetId("") - return nil -} - -// InstanceStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// an EC2 instance. -func InstanceStateRefreshFunc(conn *ec2.EC2, instanceID, failState string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - resp, err := conn.DescribeInstances(&ec2.DescribeInstancesInput{ - InstanceIds: []*string{aws.String(instanceID)}, - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidInstanceID.NotFound" { - // Set this to nil as if we didn't find anything. - resp = nil - } else { - log.Printf("Error on InstanceStateRefresh: %s", err) - return nil, "", err - } - } - - if resp == nil || len(resp.Reservations) == 0 || len(resp.Reservations[0].Instances) == 0 { - // Sometimes AWS just has consistency issues and doesn't see - // our instance yet. Return an empty state. - return nil, "", nil - } - - i := resp.Reservations[0].Instances[0] - state := *i.State.Name - - if state == failState { - return i, state, fmt.Errorf("Failed to reach target state. Reason: %s", - stringifyStateReason(i.StateReason)) - - } - - return i, state, nil - } -} - -func stringifyStateReason(sr *ec2.StateReason) string { - if sr.Message != nil { - return *sr.Message - } - if sr.Code != nil { - return *sr.Code - } - - return sr.String() -} - -func readBlockDevices(d *schema.ResourceData, instance *ec2.Instance, conn *ec2.EC2) error { - ibds, err := readBlockDevicesFromInstance(instance, conn) - if err != nil { - return err - } - - if err := d.Set("ebs_block_device", ibds["ebs"]); err != nil { - return err - } - - // This handles the import case which needs to be defaulted to empty - if _, ok := d.GetOk("root_block_device"); !ok { - if err := d.Set("root_block_device", []interface{}{}); err != nil { - return err - } - } - - if ibds["root"] != nil { - roots := []interface{}{ibds["root"]} - if err := d.Set("root_block_device", roots); err != nil { - return err - } - } - - return nil -} - -func readBlockDevicesFromInstance(instance *ec2.Instance, conn *ec2.EC2) (map[string]interface{}, error) { - blockDevices := make(map[string]interface{}) - blockDevices["ebs"] = make([]map[string]interface{}, 0) - blockDevices["root"] = nil - - instanceBlockDevices := make(map[string]*ec2.InstanceBlockDeviceMapping) - for _, bd := range instance.BlockDeviceMappings { - if bd.Ebs != nil { - instanceBlockDevices[*bd.Ebs.VolumeId] = bd - } - } - - if len(instanceBlockDevices) == 0 { - return nil, nil - } - - volIDs := make([]*string, 0, len(instanceBlockDevices)) - for volID := range instanceBlockDevices { - volIDs = append(volIDs, aws.String(volID)) - } - - // Need to call DescribeVolumes to get volume_size and volume_type for each - // EBS block device - volResp, err := conn.DescribeVolumes(&ec2.DescribeVolumesInput{ - VolumeIds: volIDs, - }) - if err != nil { - return nil, err - } - - for _, vol := range volResp.Volumes { - instanceBd := instanceBlockDevices[*vol.VolumeId] - bd := make(map[string]interface{}) - - if instanceBd.Ebs != nil && instanceBd.Ebs.DeleteOnTermination != nil { - bd["delete_on_termination"] = *instanceBd.Ebs.DeleteOnTermination - } - if vol.Size != nil { - bd["volume_size"] = *vol.Size - } - if vol.VolumeType != nil { - bd["volume_type"] = *vol.VolumeType - } - if vol.Iops != nil { - bd["iops"] = *vol.Iops - } - - if blockDeviceIsRoot(instanceBd, instance) { - blockDevices["root"] = bd - } else { - if instanceBd.DeviceName != nil { - bd["device_name"] = *instanceBd.DeviceName - } - if vol.Encrypted != nil { - bd["encrypted"] = *vol.Encrypted - } - if vol.SnapshotId != nil { - bd["snapshot_id"] = *vol.SnapshotId - } - - blockDevices["ebs"] = append(blockDevices["ebs"].([]map[string]interface{}), bd) - } - } - - return blockDevices, nil -} - -func blockDeviceIsRoot(bd *ec2.InstanceBlockDeviceMapping, instance *ec2.Instance) bool { - return bd.DeviceName != nil && - instance.RootDeviceName != nil && - *bd.DeviceName == *instance.RootDeviceName -} - -func fetchRootDeviceName(ami string, conn *ec2.EC2) (*string, error) { - if ami == "" { - return nil, errors.New("Cannot fetch root device name for blank AMI ID.") - } - - log.Printf("[DEBUG] Describing AMI %q to get root block device name", ami) - res, err := conn.DescribeImages(&ec2.DescribeImagesInput{ - ImageIds: []*string{aws.String(ami)}, - }) - if err != nil { - return nil, err - } - - // For a bad image, we just return nil so we don't block a refresh - if len(res.Images) == 0 { - return nil, nil - } - - image := res.Images[0] - rootDeviceName := image.RootDeviceName - - // Instance store backed AMIs do not provide a root device name. - if *image.RootDeviceType == ec2.DeviceTypeInstanceStore { - return nil, nil - } - - // Some AMIs have a RootDeviceName like "/dev/sda1" that does not appear as a - // DeviceName in the BlockDeviceMapping list (which will instead have - // something like "/dev/sda") - // - // While this seems like it breaks an invariant of AMIs, it ends up working - // on the AWS side, and AMIs like this are common enough that we need to - // special case it so Terraform does the right thing. - // - // Our heuristic is: if the RootDeviceName does not appear in the - // BlockDeviceMapping, assume that the DeviceName of the first - // BlockDeviceMapping entry serves as the root device. - rootDeviceNameInMapping := false - for _, bdm := range image.BlockDeviceMappings { - if bdm.DeviceName == image.RootDeviceName { - rootDeviceNameInMapping = true - } - } - - if !rootDeviceNameInMapping && len(image.BlockDeviceMappings) > 0 { - rootDeviceName = image.BlockDeviceMappings[0].DeviceName - } - - if rootDeviceName == nil { - return nil, fmt.Errorf("[WARN] Error finding Root Device Name for AMI (%s)", ami) - } - - return rootDeviceName, nil -} - -func buildNetworkInterfaceOpts(d *schema.ResourceData, groups []*string, nInterfaces interface{}) []*ec2.InstanceNetworkInterfaceSpecification { - networkInterfaces := []*ec2.InstanceNetworkInterfaceSpecification{} - // Get necessary items - associatePublicIPAddress := d.Get("associate_public_ip_address").(bool) - subnet, hasSubnet := d.GetOk("subnet_id") - - if hasSubnet && associatePublicIPAddress { - // If we have a non-default VPC / Subnet specified, we can flag - // AssociatePublicIpAddress to get a Public IP assigned. By default these are not provided. - // You cannot specify both SubnetId and the NetworkInterface.0.* parameters though, otherwise - // you get: Network interfaces and an instance-level subnet ID may not be specified on the same request - // You also need to attach Security Groups to the NetworkInterface instead of the instance, - // to avoid: Network interfaces and an instance-level security groups may not be specified on - // the same request - ni := &ec2.InstanceNetworkInterfaceSpecification{ - AssociatePublicIpAddress: aws.Bool(associatePublicIPAddress), - DeviceIndex: aws.Int64(int64(0)), - SubnetId: aws.String(subnet.(string)), - Groups: groups, - } - - if v, ok := d.GetOk("private_ip"); ok { - ni.PrivateIpAddress = aws.String(v.(string)) - } - - if v, ok := d.GetOk("ipv6_address_count"); ok { - ni.Ipv6AddressCount = aws.Int64(int64(v.(int))) - } - - if v, ok := d.GetOk("ipv6_addresses"); ok { - ipv6Addresses := make([]*ec2.InstanceIpv6Address, len(v.([]interface{}))) - for _, address := range v.([]interface{}) { - ipv6Address := &ec2.InstanceIpv6Address{ - Ipv6Address: aws.String(address.(string)), - } - - ipv6Addresses = append(ipv6Addresses, ipv6Address) - } - - ni.Ipv6Addresses = ipv6Addresses - } - - if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 { - for _, v := range v.List() { - ni.Groups = append(ni.Groups, aws.String(v.(string))) - } - } - - networkInterfaces = append(networkInterfaces, ni) - } else { - // If we have manually specified network interfaces, build and attach those here. - vL := nInterfaces.(*schema.Set).List() - for _, v := range vL { - ini := v.(map[string]interface{}) - ni := &ec2.InstanceNetworkInterfaceSpecification{ - DeviceIndex: aws.Int64(int64(ini["device_index"].(int))), - NetworkInterfaceId: aws.String(ini["network_interface_id"].(string)), - DeleteOnTermination: aws.Bool(ini["delete_on_termination"].(bool)), - } - networkInterfaces = append(networkInterfaces, ni) - } - } - - return networkInterfaces -} - -func readBlockDeviceMappingsFromConfig( - d *schema.ResourceData, conn *ec2.EC2) ([]*ec2.BlockDeviceMapping, error) { - blockDevices := make([]*ec2.BlockDeviceMapping, 0) - - if v, ok := d.GetOk("ebs_block_device"); ok { - vL := v.(*schema.Set).List() - for _, v := range vL { - bd := v.(map[string]interface{}) - ebs := &ec2.EbsBlockDevice{ - DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)), - } - - if v, ok := bd["snapshot_id"].(string); ok && v != "" { - ebs.SnapshotId = aws.String(v) - } - - if v, ok := bd["encrypted"].(bool); ok && v { - ebs.Encrypted = aws.Bool(v) - } - - if v, ok := bd["volume_size"].(int); ok && v != 0 { - ebs.VolumeSize = aws.Int64(int64(v)) - } - - if v, ok := bd["volume_type"].(string); ok && v != "" { - ebs.VolumeType = aws.String(v) - if "io1" == strings.ToLower(v) { - // Condition: This parameter is required for requests to create io1 - // volumes; it is not used in requests to create gp2, st1, sc1, or - // standard volumes. - // See: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html - if v, ok := bd["iops"].(int); ok && v > 0 { - ebs.Iops = aws.Int64(int64(v)) - } - } - } - - blockDevices = append(blockDevices, &ec2.BlockDeviceMapping{ - DeviceName: aws.String(bd["device_name"].(string)), - Ebs: ebs, - }) - } - } - - if v, ok := d.GetOk("ephemeral_block_device"); ok { - vL := v.(*schema.Set).List() - for _, v := range vL { - bd := v.(map[string]interface{}) - bdm := &ec2.BlockDeviceMapping{ - DeviceName: aws.String(bd["device_name"].(string)), - VirtualName: aws.String(bd["virtual_name"].(string)), - } - if v, ok := bd["no_device"].(bool); ok && v { - bdm.NoDevice = aws.String("") - // When NoDevice is true, just ignore VirtualName since it's not needed - bdm.VirtualName = nil - } - - if bdm.NoDevice == nil && aws.StringValue(bdm.VirtualName) == "" { - return nil, errors.New("virtual_name cannot be empty when no_device is false or undefined.") - } - - blockDevices = append(blockDevices, bdm) - } - } - - if v, ok := d.GetOk("root_block_device"); ok { - vL := v.([]interface{}) - if len(vL) > 1 { - return nil, errors.New("Cannot specify more than one root_block_device.") - } - for _, v := range vL { - bd := v.(map[string]interface{}) - ebs := &ec2.EbsBlockDevice{ - DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)), - } - - if v, ok := bd["volume_size"].(int); ok && v != 0 { - ebs.VolumeSize = aws.Int64(int64(v)) - } - - if v, ok := bd["volume_type"].(string); ok && v != "" { - ebs.VolumeType = aws.String(v) - } - - if v, ok := bd["iops"].(int); ok && v > 0 && *ebs.VolumeType == "io1" { - // Only set the iops attribute if the volume type is io1. Setting otherwise - // can trigger a refresh/plan loop based on the computed value that is given - // from AWS, and prevent us from specifying 0 as a valid iops. - // See https://github.com/hashicorp/terraform/pull/4146 - // See https://github.com/hashicorp/terraform/issues/7765 - ebs.Iops = aws.Int64(int64(v)) - } else if v, ok := bd["iops"].(int); ok && v > 0 && *ebs.VolumeType != "io1" { - // Message user about incompatibility - log.Print("[WARN] IOPs is only valid for storate type io1 for EBS Volumes") - } - - if dn, err := fetchRootDeviceName(d.Get("ami").(string), conn); err == nil { - if dn == nil { - return nil, fmt.Errorf( - "Expected 1 AMI for ID: %s, got none", - d.Get("ami").(string)) - } - - blockDevices = append(blockDevices, &ec2.BlockDeviceMapping{ - DeviceName: dn, - Ebs: ebs, - }) - } else { - return nil, err - } - } - } - - return blockDevices, nil -} - -func readVolumeTags(conn *ec2.EC2, d *schema.ResourceData) error { - volumeIds, err := getAwsInstanceVolumeIds(conn, d) - if err != nil { - return err - } - - tagsResp, err := conn.DescribeTags(&ec2.DescribeTagsInput{ - Filters: []*ec2.Filter{ - { - Name: aws.String("resource-id"), - Values: volumeIds, - }, - }, - }) - if err != nil { - return err - } - - var tags []*ec2.Tag - - for _, t := range tagsResp.Tags { - tag := &ec2.Tag{ - Key: t.Key, - Value: t.Value, - } - tags = append(tags, tag) - } - - d.Set("volume_tags", tagsToMap(tags)) - - return nil -} - -// Determine whether we're referring to security groups with -// IDs or names. We use a heuristic to figure this out. By default, -// we use IDs if we're in a VPC. However, if we previously had an -// all-name list of security groups, we use names. Or, if we had any -// IDs, we use IDs. -func readSecurityGroups(d *schema.ResourceData, instance *ec2.Instance) error { - useID := instance.SubnetId != nil && *instance.SubnetId != "" - if v := d.Get("security_groups"); v != nil { - match := useID - sgs := v.(*schema.Set).List() - if len(sgs) > 0 { - match = false - for _, v := range v.(*schema.Set).List() { - if strings.HasPrefix(v.(string), "sg-") { - match = true - break - } - } - } - - useID = match - } - - // Build up the security groups - sgs := make([]string, 0, len(instance.SecurityGroups)) - if useID { - for _, sg := range instance.SecurityGroups { - sgs = append(sgs, *sg.GroupId) - } - log.Printf("[DEBUG] Setting Security Group IDs: %#v", sgs) - if err := d.Set("vpc_security_group_ids", sgs); err != nil { - return err - } - if err := d.Set("security_groups", []string{}); err != nil { - return err - } - } else { - for _, sg := range instance.SecurityGroups { - sgs = append(sgs, *sg.GroupName) - } - log.Printf("[DEBUG] Setting Security Group Names: %#v", sgs) - if err := d.Set("security_groups", sgs); err != nil { - return err - } - if err := d.Set("vpc_security_group_ids", []string{}); err != nil { - return err - } - } - return nil -} - -type awsInstanceOpts struct { - BlockDeviceMappings []*ec2.BlockDeviceMapping - DisableAPITermination *bool - EBSOptimized *bool - Monitoring *ec2.RunInstancesMonitoringEnabled - IAMInstanceProfile *ec2.IamInstanceProfileSpecification - ImageID *string - InstanceInitiatedShutdownBehavior *string - InstanceType *string - Ipv6AddressCount *int64 - Ipv6Addresses []*ec2.InstanceIpv6Address - KeyName *string - NetworkInterfaces []*ec2.InstanceNetworkInterfaceSpecification - Placement *ec2.Placement - PrivateIPAddress *string - SecurityGroupIDs []*string - SecurityGroups []*string - SpotPlacement *ec2.SpotPlacement - SubnetID *string - UserData64 *string -} - -func buildAwsInstanceOpts( - d *schema.ResourceData, meta interface{}) (*awsInstanceOpts, error) { - conn := meta.(*AWSClient).ec2conn - - opts := &awsInstanceOpts{ - DisableAPITermination: aws.Bool(d.Get("disable_api_termination").(bool)), - EBSOptimized: aws.Bool(d.Get("ebs_optimized").(bool)), - ImageID: aws.String(d.Get("ami").(string)), - InstanceType: aws.String(d.Get("instance_type").(string)), - } - - if v := d.Get("instance_initiated_shutdown_behavior").(string); v != "" { - opts.InstanceInitiatedShutdownBehavior = aws.String(v) - } - - opts.Monitoring = &ec2.RunInstancesMonitoringEnabled{ - Enabled: aws.Bool(d.Get("monitoring").(bool)), - } - - opts.IAMInstanceProfile = &ec2.IamInstanceProfileSpecification{ - Name: aws.String(d.Get("iam_instance_profile").(string)), - } - - user_data := d.Get("user_data").(string) - - opts.UserData64 = aws.String(base64Encode([]byte(user_data))) - - // check for non-default Subnet, and cast it to a String - subnet, hasSubnet := d.GetOk("subnet_id") - subnetID := subnet.(string) - - // Placement is used for aws_instance; SpotPlacement is used for - // aws_spot_instance_request. They represent the same data. :-| - opts.Placement = &ec2.Placement{ - AvailabilityZone: aws.String(d.Get("availability_zone").(string)), - GroupName: aws.String(d.Get("placement_group").(string)), - } - - opts.SpotPlacement = &ec2.SpotPlacement{ - AvailabilityZone: aws.String(d.Get("availability_zone").(string)), - GroupName: aws.String(d.Get("placement_group").(string)), - } - - if v := d.Get("tenancy").(string); v != "" { - opts.Placement.Tenancy = aws.String(v) - } - - associatePublicIPAddress := d.Get("associate_public_ip_address").(bool) - - var groups []*string - if v := d.Get("security_groups"); v != nil { - // Security group names. - // For a nondefault VPC, you must use security group IDs instead. - // See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html - sgs := v.(*schema.Set).List() - if len(sgs) > 0 && hasSubnet { - log.Print("[WARN] Deprecated. Attempting to use 'security_groups' within a VPC instance. Use 'vpc_security_group_ids' instead.") - } - for _, v := range sgs { - str := v.(string) - groups = append(groups, aws.String(str)) - } - } - - networkInterfaces, interfacesOk := d.GetOk("network_interface") - - // If setting subnet and public address, OR manual network interfaces, populate those now. - if hasSubnet && associatePublicIPAddress || interfacesOk { - // Otherwise we're attaching (a) network interface(s) - opts.NetworkInterfaces = buildNetworkInterfaceOpts(d, groups, networkInterfaces) - } else { - // If simply specifying a subnetID, privateIP, Security Groups, or VPC Security Groups, build these now - if subnetID != "" { - opts.SubnetID = aws.String(subnetID) - } - - if v, ok := d.GetOk("private_ip"); ok { - opts.PrivateIPAddress = aws.String(v.(string)) - } - if opts.SubnetID != nil && - *opts.SubnetID != "" { - opts.SecurityGroupIDs = groups - } else { - opts.SecurityGroups = groups - } - - if v, ok := d.GetOk("ipv6_address_count"); ok { - opts.Ipv6AddressCount = aws.Int64(int64(v.(int))) - } - - if v, ok := d.GetOk("ipv6_addresses"); ok { - ipv6Addresses := make([]*ec2.InstanceIpv6Address, len(v.([]interface{}))) - for _, address := range v.([]interface{}) { - ipv6Address := &ec2.InstanceIpv6Address{ - Ipv6Address: aws.String(address.(string)), - } - - ipv6Addresses = append(ipv6Addresses, ipv6Address) - } - - opts.Ipv6Addresses = ipv6Addresses - } - - if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 { - for _, v := range v.List() { - opts.SecurityGroupIDs = append(opts.SecurityGroupIDs, aws.String(v.(string))) - } - } - } - - if v, ok := d.GetOk("key_name"); ok { - opts.KeyName = aws.String(v.(string)) - } - - blockDevices, err := readBlockDeviceMappingsFromConfig(d, conn) - if err != nil { - return nil, err - } - if len(blockDevices) > 0 { - opts.BlockDeviceMappings = blockDevices - } - return opts, nil -} - -func awsTerminateInstance(conn *ec2.EC2, id string, d *schema.ResourceData) error { - log.Printf("[INFO] Terminating instance: %s", id) - req := &ec2.TerminateInstancesInput{ - InstanceIds: []*string{aws.String(id)}, - } - if _, err := conn.TerminateInstances(req); err != nil { - return fmt.Errorf("Error terminating instance: %s", err) - } - - log.Printf("[DEBUG] Waiting for instance (%s) to become terminated", id) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"pending", "running", "shutting-down", "stopped", "stopping"}, - Target: []string{"terminated"}, - Refresh: InstanceStateRefreshFunc(conn, id, ""), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err := stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for instance (%s) to terminate: %s", id, err) - } - - return nil -} - -func iamInstanceProfileArnToName(ip *ec2.IamInstanceProfile) string { - if ip == nil || ip.Arn == nil { - return "" - } - parts := strings.Split(*ip.Arn, "/") - return parts[len(parts)-1] -} - -func userDataHashSum(user_data string) string { - // Check whether the user_data is not Base64 encoded. - // Always calculate hash of base64 decoded value since we - // check against double-encoding when setting it - v, base64DecodeError := base64.StdEncoding.DecodeString(user_data) - if base64DecodeError != nil { - v = []byte(user_data) - } - - hash := sha1.Sum(v) - return hex.EncodeToString(hash[:]) -} - -func getAwsInstanceVolumeIds(conn *ec2.EC2, d *schema.ResourceData) ([]*string, error) { - volumeIds := make([]*string, 0) - - opts := &ec2.DescribeVolumesInput{ - Filters: []*ec2.Filter{ - { - Name: aws.String("attachment.instance-id"), - Values: []*string{aws.String(d.Id())}, - }, - }, - } - - resp, err := conn.DescribeVolumes(opts) - if err != nil { - return nil, err - } - - for _, v := range resp.Volumes { - volumeIds = append(volumeIds, v.VolumeId) - } - - return volumeIds, nil -} diff --git a/builtin/providers/aws/resource_aws_instance_migrate.go b/builtin/providers/aws/resource_aws_instance_migrate.go deleted file mode 100644 index 31f28b39f..000000000 --- a/builtin/providers/aws/resource_aws_instance_migrate.go +++ /dev/null @@ -1,111 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strconv" - "strings" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/terraform" -) - -func resourceAwsInstanceMigrateState( - v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - switch v { - case 0: - log.Println("[INFO] Found AWS Instance State v0; migrating to v1") - return migrateAwsInstanceStateV0toV1(is) - default: - return is, fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateAwsInstanceStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { - if is.Empty() || is.Attributes == nil { - log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return is, nil - } - - log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - - // Delete old count - delete(is.Attributes, "block_device.#") - - oldBds, err := readV0BlockDevices(is) - if err != nil { - return is, err - } - // seed count fields for new types - is.Attributes["ebs_block_device.#"] = "0" - is.Attributes["ephemeral_block_device.#"] = "0" - // depending on if state was v0.3.7 or an earlier version, it might have - // root_block_device defined already - if _, ok := is.Attributes["root_block_device.#"]; !ok { - is.Attributes["root_block_device.#"] = "0" - } - for _, oldBd := range oldBds { - if err := writeV1BlockDevice(is, oldBd); err != nil { - return is, err - } - } - log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} - -func readV0BlockDevices(is *terraform.InstanceState) (map[string]map[string]string, error) { - oldBds := make(map[string]map[string]string) - for k, v := range is.Attributes { - if !strings.HasPrefix(k, "block_device.") { - continue - } - path := strings.Split(k, ".") - if len(path) != 3 { - return oldBds, fmt.Errorf("Found unexpected block_device field: %#v", k) - } - hashcode, attribute := path[1], path[2] - oldBd, ok := oldBds[hashcode] - if !ok { - oldBd = make(map[string]string) - oldBds[hashcode] = oldBd - } - oldBd[attribute] = v - delete(is.Attributes, k) - } - return oldBds, nil -} - -func writeV1BlockDevice( - is *terraform.InstanceState, oldBd map[string]string) error { - code := hashcode.String(oldBd["device_name"]) - bdType := "ebs_block_device" - if vn, ok := oldBd["virtual_name"]; ok && strings.HasPrefix(vn, "ephemeral") { - bdType = "ephemeral_block_device" - } else if dn, ok := oldBd["device_name"]; ok && dn == "/dev/sda1" { - bdType = "root_block_device" - } - - switch bdType { - case "ebs_block_device": - delete(oldBd, "virtual_name") - case "root_block_device": - delete(oldBd, "virtual_name") - delete(oldBd, "encrypted") - delete(oldBd, "snapshot_id") - case "ephemeral_block_device": - delete(oldBd, "delete_on_termination") - delete(oldBd, "encrypted") - delete(oldBd, "iops") - delete(oldBd, "volume_size") - delete(oldBd, "volume_type") - } - for attr, val := range oldBd { - attrKey := fmt.Sprintf("%s.%d.%s", bdType, code, attr) - is.Attributes[attrKey] = val - } - - countAttr := fmt.Sprintf("%s.#", bdType) - count, _ := strconv.Atoi(is.Attributes[countAttr]) - is.Attributes[countAttr] = strconv.Itoa(count + 1) - return nil -} diff --git a/builtin/providers/aws/resource_aws_instance_migrate_test.go b/builtin/providers/aws/resource_aws_instance_migrate_test.go deleted file mode 100644 index d39294331..000000000 --- a/builtin/providers/aws/resource_aws_instance_migrate_test.go +++ /dev/null @@ -1,159 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/terraform" -) - -func TestAWSInstanceMigrateState(t *testing.T) { - cases := map[string]struct { - StateVersion int - Attributes map[string]string - Expected map[string]string - Meta interface{} - }{ - "v0.3.6 and earlier": { - StateVersion: 0, - Attributes: map[string]string{ - // EBS - "block_device.#": "2", - "block_device.3851383343.delete_on_termination": "true", - "block_device.3851383343.device_name": "/dev/sdx", - "block_device.3851383343.encrypted": "false", - "block_device.3851383343.snapshot_id": "", - "block_device.3851383343.virtual_name": "", - "block_device.3851383343.volume_size": "5", - "block_device.3851383343.volume_type": "standard", - // Ephemeral - "block_device.3101711606.delete_on_termination": "false", - "block_device.3101711606.device_name": "/dev/sdy", - "block_device.3101711606.encrypted": "false", - "block_device.3101711606.snapshot_id": "", - "block_device.3101711606.virtual_name": "ephemeral0", - "block_device.3101711606.volume_size": "", - "block_device.3101711606.volume_type": "", - // Root - "block_device.56575650.delete_on_termination": "true", - "block_device.56575650.device_name": "/dev/sda1", - "block_device.56575650.encrypted": "false", - "block_device.56575650.snapshot_id": "", - "block_device.56575650.volume_size": "10", - "block_device.56575650.volume_type": "standard", - }, - Expected: map[string]string{ - "ebs_block_device.#": "1", - "ebs_block_device.3851383343.delete_on_termination": "true", - "ebs_block_device.3851383343.device_name": "/dev/sdx", - "ebs_block_device.3851383343.encrypted": "false", - "ebs_block_device.3851383343.snapshot_id": "", - "ebs_block_device.3851383343.volume_size": "5", - "ebs_block_device.3851383343.volume_type": "standard", - "ephemeral_block_device.#": "1", - "ephemeral_block_device.2458403513.device_name": "/dev/sdy", - "ephemeral_block_device.2458403513.virtual_name": "ephemeral0", - "root_block_device.#": "1", - "root_block_device.3018388612.delete_on_termination": "true", - "root_block_device.3018388612.device_name": "/dev/sda1", - "root_block_device.3018388612.snapshot_id": "", - "root_block_device.3018388612.volume_size": "10", - "root_block_device.3018388612.volume_type": "standard", - }, - }, - "v0.3.7": { - StateVersion: 0, - Attributes: map[string]string{ - // EBS - "block_device.#": "2", - "block_device.3851383343.delete_on_termination": "true", - "block_device.3851383343.device_name": "/dev/sdx", - "block_device.3851383343.encrypted": "false", - "block_device.3851383343.snapshot_id": "", - "block_device.3851383343.virtual_name": "", - "block_device.3851383343.volume_size": "5", - "block_device.3851383343.volume_type": "standard", - "block_device.3851383343.iops": "", - // Ephemeral - "block_device.3101711606.delete_on_termination": "false", - "block_device.3101711606.device_name": "/dev/sdy", - "block_device.3101711606.encrypted": "false", - "block_device.3101711606.snapshot_id": "", - "block_device.3101711606.virtual_name": "ephemeral0", - "block_device.3101711606.volume_size": "", - "block_device.3101711606.volume_type": "", - "block_device.3101711606.iops": "", - // Root - "root_block_device.#": "1", - "root_block_device.3018388612.delete_on_termination": "true", - "root_block_device.3018388612.device_name": "/dev/sda1", - "root_block_device.3018388612.snapshot_id": "", - "root_block_device.3018388612.volume_size": "10", - "root_block_device.3018388612.volume_type": "io1", - "root_block_device.3018388612.iops": "1000", - }, - Expected: map[string]string{ - "ebs_block_device.#": "1", - "ebs_block_device.3851383343.delete_on_termination": "true", - "ebs_block_device.3851383343.device_name": "/dev/sdx", - "ebs_block_device.3851383343.encrypted": "false", - "ebs_block_device.3851383343.snapshot_id": "", - "ebs_block_device.3851383343.volume_size": "5", - "ebs_block_device.3851383343.volume_type": "standard", - "ephemeral_block_device.#": "1", - "ephemeral_block_device.2458403513.device_name": "/dev/sdy", - "ephemeral_block_device.2458403513.virtual_name": "ephemeral0", - "root_block_device.#": "1", - "root_block_device.3018388612.delete_on_termination": "true", - "root_block_device.3018388612.device_name": "/dev/sda1", - "root_block_device.3018388612.snapshot_id": "", - "root_block_device.3018388612.volume_size": "10", - "root_block_device.3018388612.volume_type": "io1", - "root_block_device.3018388612.iops": "1000", - }, - }, - } - - for tn, tc := range cases { - is := &terraform.InstanceState{ - ID: "i-abc123", - Attributes: tc.Attributes, - } - is, err := resourceAwsInstanceMigrateState( - tc.StateVersion, is, tc.Meta) - - if err != nil { - t.Fatalf("bad: %s, err: %#v", tn, err) - } - - for k, v := range tc.Expected { - if is.Attributes[k] != v { - t.Fatalf( - "bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", - tn, k, v, k, is.Attributes[k], is.Attributes) - } - } - } -} - -func TestAWSInstanceMigrateState_empty(t *testing.T) { - var is *terraform.InstanceState - var meta interface{} - - // should handle nil - is, err := resourceAwsInstanceMigrateState(0, is, meta) - - if err != nil { - t.Fatalf("err: %#v", err) - } - if is != nil { - t.Fatalf("expected nil instancestate, got: %#v", is) - } - - // should handle non-nil but empty - is = &terraform.InstanceState{} - is, err = resourceAwsInstanceMigrateState(0, is, meta) - - if err != nil { - t.Fatalf("err: %#v", err) - } -} diff --git a/builtin/providers/aws/resource_aws_instance_test.go b/builtin/providers/aws/resource_aws_instance_test.go deleted file mode 100644 index 3ef332cb8..000000000 --- a/builtin/providers/aws/resource_aws_instance_test.go +++ /dev/null @@ -1,2307 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "regexp" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSInstance_basic(t *testing.T) { - var v ec2.Instance - var vol *ec2.Volume - - testCheck := func(*terraform.State) error { - if *v.Placement.AvailabilityZone != "us-west-2a" { - return fmt.Errorf("bad availability zone: %#v", *v.Placement.AvailabilityZone) - } - - if len(v.SecurityGroups) == 0 { - return fmt.Errorf("no security groups: %#v", v.SecurityGroups) - } - if *v.SecurityGroups[0].GroupName != "tf_test_foo" { - return fmt.Errorf("no security groups: %#v", v.SecurityGroups) - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - - // We ignore security groups because even with EC2 classic - // we'll import as VPC security groups, which is fine. We verify - // VPC security group import in other tests - IDRefreshName: "aws_instance.foo", - IDRefreshIgnore: []string{"security_groups", "vpc_security_group_ids"}, - - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - // Create a volume to cover #1249 - { - // Need a resource in this config so the provisioner will be available - Config: testAccInstanceConfig_pre, - Check: func(*terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - var err error - vol, err = conn.CreateVolume(&ec2.CreateVolumeInput{ - AvailabilityZone: aws.String("us-west-2a"), - Size: aws.Int64(int64(5)), - }) - return err - }, - }, - - { - Config: testAccInstanceConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists( - "aws_instance.foo", &v), - testCheck, - resource.TestCheckResourceAttr( - "aws_instance.foo", - "user_data", - "3dc39dda39be1205215e776bad998da361a5955d"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.#", "0"), - ), - }, - - // We repeat the exact same test so that we can be sure - // that the user data hash stuff is working without generating - // an incorrect diff. - { - Config: testAccInstanceConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists( - "aws_instance.foo", &v), - testCheck, - resource.TestCheckResourceAttr( - "aws_instance.foo", - "user_data", - "3dc39dda39be1205215e776bad998da361a5955d"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.#", "0"), - ), - }, - - // Clean up volume created above - { - Config: testAccInstanceConfig, - Check: func(*terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - _, err := conn.DeleteVolume(&ec2.DeleteVolumeInput{VolumeId: vol.VolumeId}) - return err - }, - }, - }, - }) -} - -func TestAccAWSInstance_GP2IopsDevice(t *testing.T) { - var v ec2.Instance - - testCheck := func() resource.TestCheckFunc { - return func(*terraform.State) error { - - // Map out the block devices by name, which should be unique. - blockDevices := make(map[string]*ec2.InstanceBlockDeviceMapping) - for _, blockDevice := range v.BlockDeviceMappings { - blockDevices[*blockDevice.DeviceName] = blockDevice - } - - // Check if the root block device exists. - if _, ok := blockDevices["/dev/sda1"]; !ok { - return fmt.Errorf("block device doesn't exist: /dev/sda1") - } - - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", - IDRefreshIgnore: []string{ - "ephemeral_block_device", "user_data", "security_groups", "vpc_security_groups"}, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceGP2IopsDevice, - //Config: testAccInstanceConfigBlockDevices, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists( - "aws_instance.foo", &v), - resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.#", "1"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.0.volume_size", "11"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.0.volume_type", "gp2"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.0.iops", "100"), - testCheck(), - ), - }, - }, - }) -} - -func TestAccAWSInstance_blockDevices(t *testing.T) { - var v ec2.Instance - - testCheck := func() resource.TestCheckFunc { - return func(*terraform.State) error { - - // Map out the block devices by name, which should be unique. - blockDevices := make(map[string]*ec2.InstanceBlockDeviceMapping) - for _, blockDevice := range v.BlockDeviceMappings { - blockDevices[*blockDevice.DeviceName] = blockDevice - } - - // Check if the root block device exists. - if _, ok := blockDevices["/dev/sda1"]; !ok { - return fmt.Errorf("block device doesn't exist: /dev/sda1") - } - - // Check if the secondary block device exists. - if _, ok := blockDevices["/dev/sdb"]; !ok { - return fmt.Errorf("block device doesn't exist: /dev/sdb") - } - - // Check if the third block device exists. - if _, ok := blockDevices["/dev/sdc"]; !ok { - return fmt.Errorf("block device doesn't exist: /dev/sdc") - } - - // Check if the encrypted block device exists - if _, ok := blockDevices["/dev/sdd"]; !ok { - return fmt.Errorf("block device doesn't exist: /dev/sdd") - } - - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", - IDRefreshIgnore: []string{ - "ephemeral_block_device", "security_groups", "vpc_security_groups"}, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceConfigBlockDevices, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists( - "aws_instance.foo", &v), - resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.#", "1"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.0.volume_size", "11"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.0.volume_type", "gp2"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.#", "3"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.2576023345.device_name", "/dev/sdb"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.2576023345.volume_size", "9"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.2576023345.volume_type", "standard"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.2554893574.device_name", "/dev/sdc"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.2554893574.volume_size", "10"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.2554893574.volume_type", "io1"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.2554893574.iops", "100"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.2634515331.device_name", "/dev/sdd"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.2634515331.encrypted", "true"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.2634515331.volume_size", "12"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ephemeral_block_device.#", "1"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ephemeral_block_device.1692014856.device_name", "/dev/sde"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ephemeral_block_device.1692014856.virtual_name", "ephemeral0"), - testCheck(), - ), - }, - }, - }) -} - -func TestAccAWSInstance_rootInstanceStore(t *testing.T) { - var v ec2.Instance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: ` - resource "aws_instance" "foo" { - # us-west-2 - # Amazon Linux HVM Instance Store 64-bit (2016.09.0) - # https://aws.amazon.com/amazon-linux-ami - ami = "ami-44c36524" - - # Only certain instance types support ephemeral root instance stores. - # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html - instance_type = "m3.medium" - }`, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists( - "aws_instance.foo", &v), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ami", "ami-44c36524"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.#", "0"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_optimized", "false"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "instance_type", "m3.medium"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.#", "0"), - ), - }, - }, - }) -} - -func TestAcctABSInstance_noAMIEphemeralDevices(t *testing.T) { - var v ec2.Instance - - testCheck := func() resource.TestCheckFunc { - return func(*terraform.State) error { - - // Map out the block devices by name, which should be unique. - blockDevices := make(map[string]*ec2.InstanceBlockDeviceMapping) - for _, blockDevice := range v.BlockDeviceMappings { - blockDevices[*blockDevice.DeviceName] = blockDevice - } - - // Check if the root block device exists. - if _, ok := blockDevices["/dev/sda1"]; !ok { - return fmt.Errorf("block device doesn't exist: /dev/sda1") - } - - // Check if the secondary block not exists. - if _, ok := blockDevices["/dev/sdb"]; ok { - return fmt.Errorf("block device exist: /dev/sdb") - } - - // Check if the third block device not exists. - if _, ok := blockDevices["/dev/sdc"]; ok { - return fmt.Errorf("block device exist: /dev/sdc") - } - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", - IDRefreshIgnore: []string{ - "ephemeral_block_device", "security_groups", "vpc_security_groups"}, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: ` - resource "aws_instance" "foo" { - # us-west-2 - ami = "ami-01f05461" // This AMI (Ubuntu) contains two ephemerals - - instance_type = "c3.large" - - root_block_device { - volume_type = "gp2" - volume_size = 11 - } - ephemeral_block_device { - device_name = "/dev/sdb" - no_device = true - } - ephemeral_block_device { - device_name = "/dev/sdc" - no_device = true - } - }`, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists( - "aws_instance.foo", &v), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ami", "ami-01f05461"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_optimized", "false"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "instance_type", "c3.large"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.#", "1"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.0.volume_size", "11"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.0.volume_type", "gp2"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.#", "0"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ephemeral_block_device.#", "2"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ephemeral_block_device.172787947.device_name", "/dev/sdb"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ephemeral_block_device.172787947.no_device", "true"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ephemeral_block_device.3336996981.device_name", "/dev/sdc"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "ephemeral_block_device.3336996981.no_device", "true"), - testCheck(), - ), - }, - }, - }) -} - -func TestAccAWSInstance_sourceDestCheck(t *testing.T) { - var v ec2.Instance - - testCheck := func(enabled bool) resource.TestCheckFunc { - return func(*terraform.State) error { - if v.SourceDestCheck == nil { - return fmt.Errorf("bad source_dest_check: got nil") - } - if *v.SourceDestCheck != enabled { - return fmt.Errorf("bad source_dest_check: %#v", *v.SourceDestCheck) - } - - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceConfigSourceDestDisable, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), - testCheck(false), - ), - }, - - { - Config: testAccInstanceConfigSourceDestEnable, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), - testCheck(true), - ), - }, - - { - Config: testAccInstanceConfigSourceDestDisable, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), - testCheck(false), - ), - }, - }, - }) -} - -func TestAccAWSInstance_disableApiTermination(t *testing.T) { - var v ec2.Instance - - checkDisableApiTermination := func(expected bool) resource.TestCheckFunc { - return func(*terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - r, err := conn.DescribeInstanceAttribute(&ec2.DescribeInstanceAttributeInput{ - InstanceId: v.InstanceId, - Attribute: aws.String("disableApiTermination"), - }) - if err != nil { - return err - } - got := *r.DisableApiTermination.Value - if got != expected { - return fmt.Errorf("expected: %t, got: %t", expected, got) - } - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceConfigDisableAPITermination(true), - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), - checkDisableApiTermination(true), - ), - }, - - { - Config: testAccInstanceConfigDisableAPITermination(false), - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), - checkDisableApiTermination(false), - ), - }, - }, - }) -} - -func TestAccAWSInstance_vpc(t *testing.T) { - var v ec2.Instance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", - IDRefreshIgnore: []string{"associate_public_ip_address"}, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceConfigVPC, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists( - "aws_instance.foo", &v), - resource.TestCheckResourceAttr( - "aws_instance.foo", - "user_data", - "562a3e32810edf6ff09994f050f12e799452379d"), - ), - }, - }, - }) -} - -func TestAccAWSInstance_ipv6_supportAddressCount(t *testing.T) { - var v ec2.Instance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceConfigIpv6Support, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists( - "aws_instance.foo", &v), - resource.TestCheckResourceAttr( - "aws_instance.foo", - "ipv6_address_count", - "1"), - ), - }, - }, - }) -} - -func TestAccAWSInstance_ipv6AddressCountAndSingleAddressCausesError(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceConfigIpv6ErrorConfig, - ExpectError: regexp.MustCompile("Only 1 of `ipv6_address_count` or `ipv6_addresses` can be specified"), - }, - }, - }) -} - -func TestAccAWSInstance_ipv6_supportAddressCountWithIpv4(t *testing.T) { - var v ec2.Instance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceConfigIpv6SupportWithIpv4, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists( - "aws_instance.foo", &v), - resource.TestCheckResourceAttr( - "aws_instance.foo", - "ipv6_address_count", - "1"), - ), - }, - }, - }) -} - -func TestAccAWSInstance_multipleRegions(t *testing.T) { - var v ec2.Instance - - // record the initialized providers so that we can use them to - // check for the instances in each region - var providers []*schema.Provider - providerFactories := map[string]terraform.ResourceProviderFactory{ - "aws": func() (terraform.ResourceProvider, error) { - p := Provider() - providers = append(providers, p.(*schema.Provider)) - return p, nil - }, - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: providerFactories, - CheckDestroy: testAccCheckInstanceDestroyWithProviders(&providers), - Steps: []resource.TestStep{ - { - Config: testAccInstanceConfigMultipleRegions, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExistsWithProviders( - "aws_instance.foo", &v, &providers), - testAccCheckInstanceExistsWithProviders( - "aws_instance.bar", &v, &providers), - ), - }, - }, - }) -} - -func TestAccAWSInstance_NetworkInstanceSecurityGroups(t *testing.T) { - var v ec2.Instance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo_instance", - IDRefreshIgnore: []string{"associate_public_ip_address"}, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceNetworkInstanceSecurityGroups, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists( - "aws_instance.foo_instance", &v), - ), - }, - }, - }) -} - -func TestAccAWSInstance_NetworkInstanceVPCSecurityGroupIDs(t *testing.T) { - var v ec2.Instance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo_instance", - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceNetworkInstanceVPCSecurityGroupIDs, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists( - "aws_instance.foo_instance", &v), - resource.TestCheckResourceAttr( - "aws_instance.foo_instance", "security_groups.#", "0"), - resource.TestCheckResourceAttr( - "aws_instance.foo_instance", "vpc_security_group_ids.#", "1"), - ), - }, - }, - }) -} - -func TestAccAWSInstance_tags(t *testing.T) { - var v ec2.Instance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckInstanceConfigTags, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), - testAccCheckTags(&v.Tags, "foo", "bar"), - // Guard against regression of https://github.com/hashicorp/terraform/issues/914 - testAccCheckTags(&v.Tags, "#", ""), - ), - }, - { - Config: testAccCheckInstanceConfigTagsUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), - testAccCheckTags(&v.Tags, "foo", ""), - testAccCheckTags(&v.Tags, "bar", "baz"), - ), - }, - }, - }) -} - -func TestAccAWSInstance_volumeTags(t *testing.T) { - var v ec2.Instance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckInstanceConfigNoVolumeTags, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), - resource.TestCheckNoResourceAttr( - "aws_instance.foo", "volume_tags"), - ), - }, - { - Config: testAccCheckInstanceConfigWithVolumeTags, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), - resource.TestCheckResourceAttr( - "aws_instance.foo", "volume_tags.%", "1"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "volume_tags.Name", "acceptance-test-volume-tag"), - ), - }, - { - Config: testAccCheckInstanceConfigWithVolumeTagsUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), - resource.TestCheckResourceAttr( - "aws_instance.foo", "volume_tags.%", "2"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "volume_tags.Name", "acceptance-test-volume-tag"), - resource.TestCheckResourceAttr( - "aws_instance.foo", "volume_tags.Environment", "dev"), - ), - }, - { - Config: testAccCheckInstanceConfigNoVolumeTags, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), - resource.TestCheckNoResourceAttr( - "aws_instance.foo", "volume_tags"), - ), - }, - }, - }) -} - -func TestAccAWSInstance_volumeTagsComputed(t *testing.T) { - var v ec2.Instance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckInstanceConfigWithAttachedVolume, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), - ), - ExpectNonEmptyPlan: false, - }, - }, - }) -} - -func TestAccAWSInstance_instanceProfileChange(t *testing.T) { - var v ec2.Instance - rName := acctest.RandString(5) - - testCheckInstanceProfile := func() resource.TestCheckFunc { - return func(*terraform.State) error { - if v.IamInstanceProfile == nil { - return fmt.Errorf("Instance Profile is nil - we expected an InstanceProfile associated with the Instance") - } - - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceConfigWithoutInstanceProfile(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), - ), - }, - { - Config: testAccInstanceConfigWithInstanceProfile(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), - testCheckInstanceProfile(), - ), - }, - }, - }) -} - -func TestAccAWSInstance_withIamInstanceProfile(t *testing.T) { - var v ec2.Instance - rName := acctest.RandString(5) - - testCheckInstanceProfile := func() resource.TestCheckFunc { - return func(*terraform.State) error { - if v.IamInstanceProfile == nil { - return fmt.Errorf("Instance Profile is nil - we expected an InstanceProfile associated with the Instance") - } - - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceConfigWithInstanceProfile(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), - testCheckInstanceProfile(), - ), - }, - }, - }) -} - -func TestAccAWSInstance_privateIP(t *testing.T) { - var v ec2.Instance - - testCheckPrivateIP := func() resource.TestCheckFunc { - return func(*terraform.State) error { - if *v.PrivateIpAddress != "10.1.1.42" { - return fmt.Errorf("bad private IP: %s", *v.PrivateIpAddress) - } - - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceConfigPrivateIP, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), - testCheckPrivateIP(), - ), - }, - }, - }) -} - -func TestAccAWSInstance_associatePublicIPAndPrivateIP(t *testing.T) { - var v ec2.Instance - - testCheckPrivateIP := func() resource.TestCheckFunc { - return func(*terraform.State) error { - if *v.PrivateIpAddress != "10.1.1.42" { - return fmt.Errorf("bad private IP: %s", *v.PrivateIpAddress) - } - - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", - IDRefreshIgnore: []string{"associate_public_ip_address"}, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceConfigAssociatePublicIPAndPrivateIP, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), - testCheckPrivateIP(), - ), - }, - }, - }) -} - -// Guard against regression with KeyPairs -// https://github.com/hashicorp/terraform/issues/2302 -func TestAccAWSInstance_keyPairCheck(t *testing.T) { - var v ec2.Instance - - testCheckKeyPair := func(keyName string) resource.TestCheckFunc { - return func(*terraform.State) error { - if v.KeyName == nil { - return fmt.Errorf("No Key Pair found, expected(%s)", keyName) - } - if v.KeyName != nil && *v.KeyName != keyName { - return fmt.Errorf("Bad key name, expected (%s), got (%s)", keyName, *v.KeyName) - } - - return nil - } - } - - keyPairName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", - IDRefreshIgnore: []string{"source_dest_check"}, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceConfigKeyPair(keyPairName), - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), - testCheckKeyPair(keyPairName), - ), - }, - }, - }) -} - -func TestAccAWSInstance_rootBlockDeviceMismatch(t *testing.T) { - var v ec2.Instance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceConfigRootBlockDeviceMismatch, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), - resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.0.volume_size", "13"), - ), - }, - }, - }) -} - -// This test reproduces the bug here: -// https://github.com/hashicorp/terraform/issues/1752 -// -// I wish there were a way to exercise resources built with helper.Schema in a -// unit context, in which case this test could be moved there, but for now this -// will cover the bugfix. -// -// The following triggers "diffs didn't match during apply" without the fix in to -// set NewRemoved on the .# field when it changes to 0. -func TestAccAWSInstance_forceNewAndTagsDrift(t *testing.T) { - var v ec2.Instance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceConfigForceNewAndTagsDrift, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), - driftTags(&v), - ), - ExpectNonEmptyPlan: true, - }, - { - Config: testAccInstanceConfigForceNewAndTagsDrift_Update, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), - ), - }, - }, - }) -} - -func TestAccAWSInstance_changeInstanceType(t *testing.T) { - var before ec2.Instance - var after ec2.Instance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceConfigWithSmallInstanceType, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &before), - ), - }, - { - Config: testAccInstanceConfigUpdateInstanceType, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &after), - testAccCheckInstanceNotRecreated( - t, &before, &after), - ), - }, - }, - }) -} - -func TestAccAWSInstance_primaryNetworkInterface(t *testing.T) { - var instance ec2.Instance - var ini ec2.NetworkInterface - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceConfigPrimaryNetworkInterface, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &instance), - testAccCheckAWSENIExists("aws_network_interface.bar", &ini), - resource.TestCheckResourceAttr("aws_instance.foo", "network_interface.#", "1"), - ), - }, - }, - }) -} - -func TestAccAWSInstance_primaryNetworkInterfaceSourceDestCheck(t *testing.T) { - var instance ec2.Instance - var ini ec2.NetworkInterface - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceConfigPrimaryNetworkInterfaceSourceDestCheck, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &instance), - testAccCheckAWSENIExists("aws_network_interface.bar", &ini), - resource.TestCheckResourceAttr("aws_instance.foo", "source_dest_check", "false"), - ), - }, - }, - }) -} - -func TestAccAWSInstance_addSecondaryInterface(t *testing.T) { - var before ec2.Instance - var after ec2.Instance - var iniPrimary ec2.NetworkInterface - var iniSecondary ec2.NetworkInterface - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceConfigAddSecondaryNetworkInterfaceBefore, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &before), - testAccCheckAWSENIExists("aws_network_interface.primary", &iniPrimary), - resource.TestCheckResourceAttr("aws_instance.foo", "network_interface.#", "1"), - ), - }, - { - Config: testAccInstanceConfigAddSecondaryNetworkInterfaceAfter, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &after), - testAccCheckAWSENIExists("aws_network_interface.secondary", &iniSecondary), - resource.TestCheckResourceAttr("aws_instance.foo", "network_interface.#", "1"), - ), - }, - }, - }) -} - -// https://github.com/hashicorp/terraform/issues/3205 -func TestAccAWSInstance_addSecurityGroupNetworkInterface(t *testing.T) { - var before ec2.Instance - var after ec2.Instance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceConfigAddSecurityGroupBefore, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &before), - resource.TestCheckResourceAttr("aws_instance.foo", "vpc_security_group_ids.#", "1"), - ), - }, - { - Config: testAccInstanceConfigAddSecurityGroupAfter, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &after), - resource.TestCheckResourceAttr("aws_instance.foo", "vpc_security_group_ids.#", "2"), - ), - }, - }, - }) -} - -func testAccCheckInstanceNotRecreated(t *testing.T, - before, after *ec2.Instance) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *before.InstanceId != *after.InstanceId { - t.Fatalf("AWS Instance IDs have changed. Before %s. After %s", *before.InstanceId, *after.InstanceId) - } - return nil - } -} - -func testAccCheckInstanceDestroy(s *terraform.State) error { - return testAccCheckInstanceDestroyWithProvider(s, testAccProvider) -} - -func testAccCheckInstanceDestroyWithProviders(providers *[]*schema.Provider) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, provider := range *providers { - if provider.Meta() == nil { - continue - } - if err := testAccCheckInstanceDestroyWithProvider(s, provider); err != nil { - return err - } - } - return nil - } -} - -func testAccCheckInstanceDestroyWithProvider(s *terraform.State, provider *schema.Provider) error { - conn := provider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_instance" { - continue - } - - // Try to find the resource - resp, err := conn.DescribeInstances(&ec2.DescribeInstancesInput{ - InstanceIds: []*string{aws.String(rs.Primary.ID)}, - }) - if err == nil { - for _, r := range resp.Reservations { - for _, i := range r.Instances { - if i.State != nil && *i.State.Name != "terminated" { - return fmt.Errorf("Found unterminated instance: %s", i) - } - } - } - } - - // Verify the error is what we want - if ae, ok := err.(awserr.Error); ok && ae.Code() == "InvalidInstanceID.NotFound" { - continue - } - - return err - } - - return nil -} - -func testAccCheckInstanceExists(n string, i *ec2.Instance) resource.TestCheckFunc { - providers := []*schema.Provider{testAccProvider} - return testAccCheckInstanceExistsWithProviders(n, i, &providers) -} - -func testAccCheckInstanceExistsWithProviders(n string, i *ec2.Instance, providers *[]*schema.Provider) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - for _, provider := range *providers { - // Ignore if Meta is empty, this can happen for validation providers - if provider.Meta() == nil { - continue - } - - conn := provider.Meta().(*AWSClient).ec2conn - resp, err := conn.DescribeInstances(&ec2.DescribeInstancesInput{ - InstanceIds: []*string{aws.String(rs.Primary.ID)}, - }) - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidInstanceID.NotFound" { - continue - } - if err != nil { - return err - } - - if len(resp.Reservations) > 0 { - *i = *resp.Reservations[0].Instances[0] - return nil - } - } - - return fmt.Errorf("Instance not found") - } -} - -func TestInstanceTenancySchema(t *testing.T) { - actualSchema := resourceAwsInstance().Schema["tenancy"] - expectedSchema := &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - } - if !reflect.DeepEqual(actualSchema, expectedSchema) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - actualSchema, - expectedSchema) - } -} - -func driftTags(instance *ec2.Instance) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - _, err := conn.CreateTags(&ec2.CreateTagsInput{ - Resources: []*string{instance.InstanceId}, - Tags: []*ec2.Tag{ - { - Key: aws.String("Drift"), - Value: aws.String("Happens"), - }, - }, - }) - return err - } -} - -const testAccInstanceConfig_pre = ` -resource "aws_security_group" "tf_test_foo" { - name = "tf_test_foo" - description = "foo" - - ingress { - protocol = "icmp" - from_port = -1 - to_port = -1 - cidr_blocks = ["0.0.0.0/0"] - } -} -` - -const testAccInstanceConfig = ` -resource "aws_security_group" "tf_test_foo" { - name = "tf_test_foo" - description = "foo" - - ingress { - protocol = "icmp" - from_port = -1 - to_port = -1 - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_instance" "foo" { - # us-west-2 - ami = "ami-4fccb37f" - availability_zone = "us-west-2a" - - instance_type = "m1.small" - security_groups = ["${aws_security_group.tf_test_foo.name}"] - user_data = "foo:-with-character's" -} -` - -const testAccInstanceConfigWithSmallInstanceType = ` -resource "aws_instance" "foo" { - # us-west-2 - ami = "ami-55a7ea65" - availability_zone = "us-west-2a" - - instance_type = "m3.medium" - - tags { - Name = "tf-acctest" - } -} -` - -const testAccInstanceConfigUpdateInstanceType = ` -resource "aws_instance" "foo" { - # us-west-2 - ami = "ami-55a7ea65" - availability_zone = "us-west-2a" - - instance_type = "m3.large" - - tags { - Name = "tf-acctest" - } -} -` - -const testAccInstanceGP2IopsDevice = ` -resource "aws_instance" "foo" { - # us-west-2 - ami = "ami-55a7ea65" - - # In order to attach an encrypted volume to an instance you need to have an - # m3.medium or larger. See "Supported Instance Types" in: - # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html - instance_type = "m3.medium" - - root_block_device { - volume_type = "gp2" - volume_size = 11 - } -} -` - -const testAccInstanceConfigBlockDevices = ` -resource "aws_instance" "foo" { - # us-west-2 - ami = "ami-55a7ea65" - - # In order to attach an encrypted volume to an instance you need to have an - # m3.medium or larger. See "Supported Instance Types" in: - # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html - instance_type = "m3.medium" - - root_block_device { - volume_type = "gp2" - volume_size = 11 - } - ebs_block_device { - device_name = "/dev/sdb" - volume_size = 9 - } - ebs_block_device { - device_name = "/dev/sdc" - volume_size = 10 - volume_type = "io1" - iops = 100 - } - - # Encrypted ebs block device - ebs_block_device { - device_name = "/dev/sdd" - volume_size = 12 - encrypted = true - } - - ephemeral_block_device { - device_name = "/dev/sde" - virtual_name = "ephemeral0" - } -} -` - -const testAccInstanceConfigSourceDestEnable = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccInstanceConfigSourceDestEnable" - } -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_instance" "foo" { - # us-west-2 - ami = "ami-4fccb37f" - instance_type = "m1.small" - subnet_id = "${aws_subnet.foo.id}" -} -` - -const testAccInstanceConfigSourceDestDisable = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccInstanceConfigSourceDestDisable" - } -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_instance" "foo" { - # us-west-2 - ami = "ami-4fccb37f" - instance_type = "m1.small" - subnet_id = "${aws_subnet.foo.id}" - source_dest_check = false -} -` - -func testAccInstanceConfigDisableAPITermination(val bool) string { - return fmt.Sprintf(` - resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccInstanceConfigDisableAPITermination" - } - } - - resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" - } - - resource "aws_instance" "foo" { - # us-west-2 - ami = "ami-4fccb37f" - instance_type = "m1.small" - subnet_id = "${aws_subnet.foo.id}" - disable_api_termination = %t - } - `, val) -} - -const testAccInstanceConfigVPC = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccInstanceConfigVPC" - } -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_instance" "foo" { - # us-west-2 - ami = "ami-4fccb37f" - instance_type = "m1.small" - subnet_id = "${aws_subnet.foo.id}" - associate_public_ip_address = true - tenancy = "dedicated" - # pre-encoded base64 data - user_data = "3dc39dda39be1205215e776bad998da361a5955d" -} -` - -const testAccInstanceConfigIpv6ErrorConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - assign_generated_ipv6_cidr_block = true - tags { - Name = "tf-ipv6-instance-acc-test" - } -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" - ipv6_cidr_block = "${cidrsubnet(aws_vpc.foo.ipv6_cidr_block, 8, 1)}" - tags { - Name = "tf-ipv6-instance-acc-test" - } -} - -resource "aws_instance" "foo" { - # us-west-2 - ami = "ami-c5eabbf5" - instance_type = "t2.micro" - subnet_id = "${aws_subnet.foo.id}" - ipv6_addresses = ["2600:1f14:bb2:e501::10"] - ipv6_address_count = 1 - tags { - Name = "tf-ipv6-instance-acc-test" - } -} -` - -const testAccInstanceConfigIpv6Support = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - assign_generated_ipv6_cidr_block = true - tags { - Name = "tf-ipv6-instance-acc-test" - } -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" - ipv6_cidr_block = "${cidrsubnet(aws_vpc.foo.ipv6_cidr_block, 8, 1)}" - tags { - Name = "tf-ipv6-instance-acc-test" - } -} - -resource "aws_instance" "foo" { - # us-west-2 - ami = "ami-c5eabbf5" - instance_type = "t2.micro" - subnet_id = "${aws_subnet.foo.id}" - - ipv6_address_count = 1 - tags { - Name = "tf-ipv6-instance-acc-test" - } -} -` - -const testAccInstanceConfigIpv6SupportWithIpv4 = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - assign_generated_ipv6_cidr_block = true - tags { - Name = "tf-ipv6-instance-acc-test" - } -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" - ipv6_cidr_block = "${cidrsubnet(aws_vpc.foo.ipv6_cidr_block, 8, 1)}" - tags { - Name = "tf-ipv6-instance-acc-test" - } -} - -resource "aws_instance" "foo" { - # us-west-2 - ami = "ami-c5eabbf5" - instance_type = "t2.micro" - subnet_id = "${aws_subnet.foo.id}" - - associate_public_ip_address = true - ipv6_address_count = 1 - tags { - Name = "tf-ipv6-instance-acc-test" - } -} -` - -const testAccInstanceConfigMultipleRegions = ` -provider "aws" { - alias = "west" - region = "us-west-2" -} - -provider "aws" { - alias = "east" - region = "us-east-1" -} - -resource "aws_instance" "foo" { - # us-west-2 - provider = "aws.west" - ami = "ami-4fccb37f" - instance_type = "m1.small" -} - -resource "aws_instance" "bar" { - # us-east-1 - provider = "aws.east" - ami = "ami-8c6ea9e4" - instance_type = "m1.small" -} -` - -const testAccCheckInstanceConfigTags = ` -resource "aws_instance" "foo" { - ami = "ami-4fccb37f" - instance_type = "m1.small" - tags { - foo = "bar" - } -} -` - -const testAccCheckInstanceConfigWithAttachedVolume = ` -data "aws_ami" "debian_jessie_latest" { - most_recent = true - - filter { - name = "name" - values = ["debian-jessie-*"] - } - - filter { - name = "virtualization-type" - values = ["hvm"] - } - - filter { - name = "architecture" - values = ["x86_64"] - } - - filter { - name = "root-device-type" - values = ["ebs"] - } - - owners = ["379101102735"] # Debian -} - -resource "aws_instance" "foo" { - ami = "${data.aws_ami.debian_jessie_latest.id}" - associate_public_ip_address = true - count = 1 - instance_type = "t2.medium" - - root_block_device { - volume_size = "10" - volume_type = "standard" - delete_on_termination = true - } - - tags { - Name = "test-terraform" - } -} - -resource "aws_ebs_volume" "test" { - depends_on = ["aws_instance.foo"] - availability_zone = "${aws_instance.foo.availability_zone}" - type = "gp2" - size = "10" - - tags { - Name = "test-terraform" - } -} - -resource "aws_volume_attachment" "test" { - depends_on = ["aws_ebs_volume.test"] - device_name = "/dev/xvdg" - volume_id = "${aws_ebs_volume.test.id}" - instance_id = "${aws_instance.foo.id}" -} -` - -const testAccCheckInstanceConfigNoVolumeTags = ` -resource "aws_instance" "foo" { - ami = "ami-55a7ea65" - - instance_type = "m3.medium" - - root_block_device { - volume_type = "gp2" - volume_size = 11 - } - ebs_block_device { - device_name = "/dev/sdb" - volume_size = 9 - } - ebs_block_device { - device_name = "/dev/sdc" - volume_size = 10 - volume_type = "io1" - iops = 100 - } - - ebs_block_device { - device_name = "/dev/sdd" - volume_size = 12 - encrypted = true - } - - ephemeral_block_device { - device_name = "/dev/sde" - virtual_name = "ephemeral0" - } -} -` - -const testAccCheckInstanceConfigWithVolumeTags = ` -resource "aws_instance" "foo" { - ami = "ami-55a7ea65" - - instance_type = "m3.medium" - - root_block_device { - volume_type = "gp2" - volume_size = 11 - } - ebs_block_device { - device_name = "/dev/sdb" - volume_size = 9 - } - ebs_block_device { - device_name = "/dev/sdc" - volume_size = 10 - volume_type = "io1" - iops = 100 - } - - ebs_block_device { - device_name = "/dev/sdd" - volume_size = 12 - encrypted = true - } - - ephemeral_block_device { - device_name = "/dev/sde" - virtual_name = "ephemeral0" - } - - volume_tags { - Name = "acceptance-test-volume-tag" - } -} -` - -const testAccCheckInstanceConfigWithVolumeTagsUpdate = ` -resource "aws_instance" "foo" { - ami = "ami-55a7ea65" - - instance_type = "m3.medium" - - root_block_device { - volume_type = "gp2" - volume_size = 11 - } - ebs_block_device { - device_name = "/dev/sdb" - volume_size = 9 - } - ebs_block_device { - device_name = "/dev/sdc" - volume_size = 10 - volume_type = "io1" - iops = 100 - } - - ebs_block_device { - device_name = "/dev/sdd" - volume_size = 12 - encrypted = true - } - - ephemeral_block_device { - device_name = "/dev/sde" - virtual_name = "ephemeral0" - } - - volume_tags { - Name = "acceptance-test-volume-tag" - Environment = "dev" - } -} -` - -const testAccCheckInstanceConfigTagsUpdate = ` -resource "aws_instance" "foo" { - ami = "ami-4fccb37f" - instance_type = "m1.small" - tags { - bar = "baz" - } -} -` - -func testAccInstanceConfigWithoutInstanceProfile(rName string) string { - return fmt.Sprintf(` -resource "aws_iam_role" "test" { - name = "test-%s" - assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ec2.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}" -} - -resource "aws_iam_instance_profile" "test" { - name = "test-%s" - roles = ["${aws_iam_role.test.name}"] -} - -resource "aws_instance" "foo" { - ami = "ami-4fccb37f" - instance_type = "m1.small" - tags { - bar = "baz" - } -}`, rName, rName) -} - -func testAccInstanceConfigWithInstanceProfile(rName string) string { - return fmt.Sprintf(` -resource "aws_iam_role" "test" { - name = "test-%s" - assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ec2.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}" -} - -resource "aws_iam_instance_profile" "test" { - name = "test-%s" - roles = ["${aws_iam_role.test.name}"] -} - -resource "aws_instance" "foo" { - ami = "ami-4fccb37f" - instance_type = "m1.small" - iam_instance_profile = "${aws_iam_instance_profile.test.name}" - tags { - bar = "baz" - } -}`, rName, rName) -} - -const testAccInstanceConfigPrivateIP = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccInstanceConfigPrivateIP" - } -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_instance" "foo" { - ami = "ami-c5eabbf5" - instance_type = "t2.micro" - subnet_id = "${aws_subnet.foo.id}" - private_ip = "10.1.1.42" -} -` - -const testAccInstanceConfigAssociatePublicIPAndPrivateIP = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccInstanceConfigAssociatePublicIPAndPrivateIP" - } -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_instance" "foo" { - ami = "ami-c5eabbf5" - instance_type = "t2.micro" - subnet_id = "${aws_subnet.foo.id}" - associate_public_ip_address = true - private_ip = "10.1.1.42" -} -` - -const testAccInstanceNetworkInstanceSecurityGroups = ` -resource "aws_internet_gateway" "gw" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "tf-network-test" - } -} - -resource "aws_security_group" "tf_test_foo" { - name = "tf_test_foo" - description = "foo" - vpc_id="${aws_vpc.foo.id}" - - ingress { - protocol = "icmp" - from_port = -1 - to_port = -1 - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_instance" "foo_instance" { - ami = "ami-21f78e11" - instance_type = "t1.micro" - vpc_security_group_ids = ["${aws_security_group.tf_test_foo.id}"] - subnet_id = "${aws_subnet.foo.id}" - associate_public_ip_address = true - depends_on = ["aws_internet_gateway.gw"] -} - -resource "aws_eip" "foo_eip" { - instance = "${aws_instance.foo_instance.id}" - vpc = true - depends_on = ["aws_internet_gateway.gw"] -} -` - -const testAccInstanceNetworkInstanceVPCSecurityGroupIDs = ` -resource "aws_internet_gateway" "gw" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "tf-network-test" - } -} - -resource "aws_security_group" "tf_test_foo" { - name = "tf_test_foo" - description = "foo" - vpc_id="${aws_vpc.foo.id}" - - ingress { - protocol = "icmp" - from_port = -1 - to_port = -1 - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_instance" "foo_instance" { - ami = "ami-21f78e11" - instance_type = "t1.micro" - vpc_security_group_ids = ["${aws_security_group.tf_test_foo.id}"] - subnet_id = "${aws_subnet.foo.id}" - depends_on = ["aws_internet_gateway.gw"] -} - -resource "aws_eip" "foo_eip" { - instance = "${aws_instance.foo_instance.id}" - vpc = true - depends_on = ["aws_internet_gateway.gw"] -} -` - -func testAccInstanceConfigKeyPair(keyPairName string) string { - return fmt.Sprintf(` -provider "aws" { - region = "us-east-1" -} - -resource "aws_key_pair" "debugging" { - key_name = "%s" - public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD3F6tyPEFEzV0LX3X8BsXdMsQz1x2cEikKDEY0aIj41qgxMCP/iteneqXSIFZBp5vizPvaoIR3Um9xK7PGoW8giupGn+EPuxIA4cDM4vzOqOkiMPhz5XK0whEjkVzTo4+S0puvDZuwIsdiW9mxhJc7tgBNL0cYlWSYVkz4G/fslNfRPW5mYAM49f4fhtxPb5ok4Q2Lg9dPKVHO/Bgeu5woMc7RY0p1ej6D4CKFE6lymSDJpW0YHX/wqE9+cfEauh7xZcG0q9t2ta6F6fmX0agvpFyZo8aFbXeUBr7osSCJNgvavWbM/06niWrOvYX2xwWdhXmXSrbX8ZbabVohBK41 phodgson@thoughtworks.com" -} - -resource "aws_instance" "foo" { - ami = "ami-408c7f28" - instance_type = "t1.micro" - key_name = "${aws_key_pair.debugging.key_name}" - tags { - Name = "testAccInstanceConfigKeyPair_TestAMI" - } -} -`, keyPairName) -} - -const testAccInstanceConfigRootBlockDeviceMismatch = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccInstanceConfigRootBlockDeviceMismatch" - } -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_instance" "foo" { - // This is an AMI with RootDeviceName: "/dev/sda1"; actual root: "/dev/sda" - ami = "ami-ef5b69df" - instance_type = "t1.micro" - subnet_id = "${aws_subnet.foo.id}" - root_block_device { - volume_size = 13 - } -} -` - -const testAccInstanceConfigForceNewAndTagsDrift = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccInstanceConfigForceNewAndTagsDrift" - } -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_instance" "foo" { - ami = "ami-22b9a343" - instance_type = "t2.nano" - subnet_id = "${aws_subnet.foo.id}" -} -` - -const testAccInstanceConfigForceNewAndTagsDrift_Update = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccInstanceConfigForceNewAndTagsDrift_Update" - } -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_instance" "foo" { - ami = "ami-22b9a343" - instance_type = "t2.micro" - subnet_id = "${aws_subnet.foo.id}" -} -` - -const testAccInstanceConfigPrimaryNetworkInterface = ` -resource "aws_vpc" "foo" { - cidr_block = "172.16.0.0/16" - tags { - Name = "tf-instance-test" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "172.16.10.0/24" - availability_zone = "us-west-2a" - tags { - Name = "tf-instance-test" - } -} - -resource "aws_network_interface" "bar" { - subnet_id = "${aws_subnet.foo.id}" - private_ips = ["172.16.10.100"] - tags { - Name = "primary_network_interface" - } -} - -resource "aws_instance" "foo" { - ami = "ami-22b9a343" - instance_type = "t2.micro" - network_interface { - network_interface_id = "${aws_network_interface.bar.id}" - device_index = 0 - } -} -` - -const testAccInstanceConfigPrimaryNetworkInterfaceSourceDestCheck = ` -resource "aws_vpc" "foo" { - cidr_block = "172.16.0.0/16" - tags { - Name = "tf-instance-test" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "172.16.10.0/24" - availability_zone = "us-west-2a" - tags { - Name = "tf-instance-test" - } -} - -resource "aws_network_interface" "bar" { - subnet_id = "${aws_subnet.foo.id}" - private_ips = ["172.16.10.100"] - source_dest_check = false - tags { - Name = "primary_network_interface" - } -} - -resource "aws_instance" "foo" { - ami = "ami-22b9a343" - instance_type = "t2.micro" - network_interface { - network_interface_id = "${aws_network_interface.bar.id}" - device_index = 0 - } -} -` - -const testAccInstanceConfigAddSecondaryNetworkInterfaceBefore = ` -resource "aws_vpc" "foo" { - cidr_block = "172.16.0.0/16" - tags { - Name = "tf-instance-test" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "172.16.10.0/24" - availability_zone = "us-west-2a" - tags { - Name = "tf-instance-test" - } -} - -resource "aws_network_interface" "primary" { - subnet_id = "${aws_subnet.foo.id}" - private_ips = ["172.16.10.100"] - tags { - Name = "primary_network_interface" - } -} - -resource "aws_network_interface" "secondary" { - subnet_id = "${aws_subnet.foo.id}" - private_ips = ["172.16.10.101"] - tags { - Name = "secondary_network_interface" - } -} - -resource "aws_instance" "foo" { - ami = "ami-22b9a343" - instance_type = "t2.micro" - network_interface { - network_interface_id = "${aws_network_interface.primary.id}" - device_index = 0 - } -} -` - -const testAccInstanceConfigAddSecondaryNetworkInterfaceAfter = ` -resource "aws_vpc" "foo" { - cidr_block = "172.16.0.0/16" - tags { - Name = "tf-instance-test" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "172.16.10.0/24" - availability_zone = "us-west-2a" - tags { - Name = "tf-instance-test" - } -} - -resource "aws_network_interface" "primary" { - subnet_id = "${aws_subnet.foo.id}" - private_ips = ["172.16.10.100"] - tags { - Name = "primary_network_interface" - } -} - -// Attach previously created network interface, observe no state diff on instance resource -resource "aws_network_interface" "secondary" { - subnet_id = "${aws_subnet.foo.id}" - private_ips = ["172.16.10.101"] - tags { - Name = "secondary_network_interface" - } - attachment { - instance = "${aws_instance.foo.id}" - device_index = 1 - } -} - -resource "aws_instance" "foo" { - ami = "ami-22b9a343" - instance_type = "t2.micro" - network_interface { - network_interface_id = "${aws_network_interface.primary.id}" - device_index = 0 - } -} -` - -const testAccInstanceConfigAddSecurityGroupBefore = ` -resource "aws_vpc" "foo" { - cidr_block = "172.16.0.0/16" - tags { - Name = "tf-eni-test" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "172.16.10.0/24" - availability_zone = "us-west-2a" - tags { - Name = "tf-foo-instance-add-sg-test" - } -} - -resource "aws_subnet" "bar" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "172.16.11.0/24" - availability_zone = "us-west-2a" - tags { - Name = "tf-bar-instance-add-sg-test" - } -} - -resource "aws_security_group" "foo" { - vpc_id = "${aws_vpc.foo.id}" - description = "foo" - name = "foo" -} - -resource "aws_security_group" "bar" { - vpc_id = "${aws_vpc.foo.id}" - description = "bar" - name = "bar" -} - -resource "aws_instance" "foo" { - ami = "ami-c5eabbf5" - instance_type = "t2.micro" - subnet_id = "${aws_subnet.bar.id}" - associate_public_ip_address = false - vpc_security_group_ids = [ - "${aws_security_group.foo.id}" - ] - tags { - Name = "foo-instance-sg-add-test" - } -} - -resource "aws_network_interface" "bar" { - subnet_id = "${aws_subnet.foo.id}" - private_ips = ["172.16.10.100"] - security_groups = ["${aws_security_group.foo.id}"] - attachment { - instance = "${aws_instance.foo.id}" - device_index = 1 - } - tags { - Name = "bar_interface" - } -} -` - -const testAccInstanceConfigAddSecurityGroupAfter = ` -resource "aws_vpc" "foo" { - cidr_block = "172.16.0.0/16" - tags { - Name = "tf-eni-test" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "172.16.10.0/24" - availability_zone = "us-west-2a" - tags { - Name = "tf-foo-instance-add-sg-test" - } -} - -resource "aws_subnet" "bar" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "172.16.11.0/24" - availability_zone = "us-west-2a" - tags { - Name = "tf-bar-instance-add-sg-test" - } -} - -resource "aws_security_group" "foo" { - vpc_id = "${aws_vpc.foo.id}" - description = "foo" - name = "foo" -} - -resource "aws_security_group" "bar" { - vpc_id = "${aws_vpc.foo.id}" - description = "bar" - name = "bar" -} - -resource "aws_instance" "foo" { - ami = "ami-c5eabbf5" - instance_type = "t2.micro" - subnet_id = "${aws_subnet.bar.id}" - associate_public_ip_address = false - vpc_security_group_ids = [ - "${aws_security_group.foo.id}", - "${aws_security_group.bar.id}" - ] - tags { - Name = "foo-instance-sg-add-test" - } -} - -resource "aws_network_interface" "bar" { - subnet_id = "${aws_subnet.foo.id}" - private_ips = ["172.16.10.100"] - security_groups = ["${aws_security_group.foo.id}"] - attachment { - instance = "${aws_instance.foo.id}" - device_index = 1 - } - tags { - Name = "bar_interface" - } -} -` diff --git a/builtin/providers/aws/resource_aws_internet_gateway.go b/builtin/providers/aws/resource_aws_internet_gateway.go deleted file mode 100644 index 3834aa58e..000000000 --- a/builtin/providers/aws/resource_aws_internet_gateway.go +++ /dev/null @@ -1,354 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsInternetGateway() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsInternetGatewayCreate, - Read: resourceAwsInternetGatewayRead, - Update: resourceAwsInternetGatewayUpdate, - Delete: resourceAwsInternetGatewayDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "vpc_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "tags": tagsSchema(), - }, - } -} - -func resourceAwsInternetGatewayCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - // Create the gateway - log.Printf("[DEBUG] Creating internet gateway") - var err error - resp, err := conn.CreateInternetGateway(nil) - if err != nil { - return fmt.Errorf("Error creating internet gateway: %s", err) - } - - // Get the ID and store it - ig := *resp.InternetGateway - d.SetId(*ig.InternetGatewayId) - log.Printf("[INFO] InternetGateway ID: %s", d.Id()) - - err = resource.Retry(5*time.Minute, func() *resource.RetryError { - igRaw, _, err := IGStateRefreshFunc(conn, d.Id())() - if igRaw != nil { - return nil - } - if err == nil { - return resource.RetryableError(err) - } else { - return resource.NonRetryableError(err) - } - }) - - if err != nil { - return errwrap.Wrapf("{{err}}", err) - } - - err = setTags(conn, d) - if err != nil { - return err - } - - // Attach the new gateway to the correct vpc - return resourceAwsInternetGatewayAttach(d, meta) -} - -func resourceAwsInternetGatewayRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - igRaw, _, err := IGStateRefreshFunc(conn, d.Id())() - if err != nil { - return err - } - if igRaw == nil { - // Seems we have lost our internet gateway - d.SetId("") - return nil - } - - ig := igRaw.(*ec2.InternetGateway) - if len(ig.Attachments) == 0 { - // Gateway exists but not attached to the VPC - d.Set("vpc_id", "") - } else { - d.Set("vpc_id", ig.Attachments[0].VpcId) - } - - d.Set("tags", tagsToMap(ig.Tags)) - - return nil -} - -func resourceAwsInternetGatewayUpdate(d *schema.ResourceData, meta interface{}) error { - if d.HasChange("vpc_id") { - // If we're already attached, detach it first - if err := resourceAwsInternetGatewayDetach(d, meta); err != nil { - return err - } - - // Attach the gateway to the new vpc - if err := resourceAwsInternetGatewayAttach(d, meta); err != nil { - return err - } - } - - conn := meta.(*AWSClient).ec2conn - - if err := setTags(conn, d); err != nil { - return err - } - - d.SetPartial("tags") - - return nil -} - -func resourceAwsInternetGatewayDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - // Detach if it is attached - if err := resourceAwsInternetGatewayDetach(d, meta); err != nil { - return err - } - - log.Printf("[INFO] Deleting Internet Gateway: %s", d.Id()) - - return resource.Retry(10*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteInternetGateway(&ec2.DeleteInternetGatewayInput{ - InternetGatewayId: aws.String(d.Id()), - }) - if err == nil { - return nil - } - - ec2err, ok := err.(awserr.Error) - if !ok { - return resource.RetryableError(err) - } - - switch ec2err.Code() { - case "InvalidInternetGatewayID.NotFound": - return nil - case "DependencyViolation": - return resource.RetryableError(err) // retry - } - - return resource.NonRetryableError(err) - }) -} - -func resourceAwsInternetGatewayAttach(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - if d.Get("vpc_id").(string) == "" { - log.Printf( - "[DEBUG] Not attaching Internet Gateway '%s' as no VPC ID is set", - d.Id()) - return nil - } - - log.Printf( - "[INFO] Attaching Internet Gateway '%s' to VPC '%s'", - d.Id(), - d.Get("vpc_id").(string)) - - err := resource.Retry(2*time.Minute, func() *resource.RetryError { - _, err := conn.AttachInternetGateway(&ec2.AttachInternetGatewayInput{ - InternetGatewayId: aws.String(d.Id()), - VpcId: aws.String(d.Get("vpc_id").(string)), - }) - if err == nil { - return nil - } - if ec2err, ok := err.(awserr.Error); ok { - switch ec2err.Code() { - case "InvalidInternetGatewayID.NotFound": - return resource.RetryableError(err) // retry - } - } - return resource.NonRetryableError(err) - }) - if err != nil { - return err - } - - // A note on the states below: the AWS docs (as of July, 2014) say - // that the states would be: attached, attaching, detached, detaching, - // but when running, I noticed that the state is usually "available" when - // it is attached. - - // Wait for it to be fully attached before continuing - log.Printf("[DEBUG] Waiting for internet gateway (%s) to attach", d.Id()) - stateConf := &resource.StateChangeConf{ - Pending: []string{"detached", "attaching"}, - Target: []string{"available"}, - Refresh: IGAttachStateRefreshFunc(conn, d.Id(), "available"), - Timeout: 4 * time.Minute, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf( - "Error waiting for internet gateway (%s) to attach: %s", - d.Id(), err) - } - - return nil -} - -func resourceAwsInternetGatewayDetach(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - // Get the old VPC ID to detach from - vpcID, _ := d.GetChange("vpc_id") - - if vpcID.(string) == "" { - log.Printf( - "[DEBUG] Not detaching Internet Gateway '%s' as no VPC ID is set", - d.Id()) - return nil - } - - log.Printf( - "[INFO] Detaching Internet Gateway '%s' from VPC '%s'", - d.Id(), - vpcID.(string)) - - // Wait for it to be fully detached before continuing - log.Printf("[DEBUG] Waiting for internet gateway (%s) to detach", d.Id()) - stateConf := &resource.StateChangeConf{ - Pending: []string{"detaching"}, - Target: []string{"detached"}, - Refresh: detachIGStateRefreshFunc(conn, d.Id(), vpcID.(string)), - Timeout: 15 * time.Minute, - Delay: 10 * time.Second, - NotFoundChecks: 30, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf( - "Error waiting for internet gateway (%s) to detach: %s", - d.Id(), err) - } - - return nil -} - -// InstanceStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// an EC2 instance. -func detachIGStateRefreshFunc(conn *ec2.EC2, gatewayID, vpcID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - _, err := conn.DetachInternetGateway(&ec2.DetachInternetGatewayInput{ - InternetGatewayId: aws.String(gatewayID), - VpcId: aws.String(vpcID), - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok { - switch ec2err.Code() { - case "InvalidInternetGatewayID.NotFound": - log.Printf("[TRACE] Error detaching Internet Gateway '%s' from VPC '%s': %s", gatewayID, vpcID, err) - return nil, "Not Found", nil - - case "Gateway.NotAttached": - return "detached", "detached", nil - - case "DependencyViolation": - return nil, "detaching", nil - } - } - } - - // DetachInternetGateway only returns an error, so if it's nil, assume we're - // detached - return "detached", "detached", nil - } -} - -// IGStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// an internet gateway. -func IGStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - resp, err := conn.DescribeInternetGateways(&ec2.DescribeInternetGatewaysInput{ - InternetGatewayIds: []*string{aws.String(id)}, - }) - if err != nil { - ec2err, ok := err.(awserr.Error) - if ok && ec2err.Code() == "InvalidInternetGatewayID.NotFound" { - resp = nil - } else { - log.Printf("[ERROR] Error on IGStateRefresh: %s", err) - return nil, "", err - } - } - - if resp == nil { - // Sometimes AWS just has consistency issues and doesn't see - // our instance yet. Return an empty state. - return nil, "", nil - } - - ig := resp.InternetGateways[0] - return ig, "available", nil - } -} - -// IGAttachStateRefreshFunc returns a resource.StateRefreshFunc that is used -// watch the state of an internet gateway's attachment. -func IGAttachStateRefreshFunc(conn *ec2.EC2, id string, expected string) resource.StateRefreshFunc { - var start time.Time - return func() (interface{}, string, error) { - if start.IsZero() { - start = time.Now() - } - - resp, err := conn.DescribeInternetGateways(&ec2.DescribeInternetGatewaysInput{ - InternetGatewayIds: []*string{aws.String(id)}, - }) - if err != nil { - ec2err, ok := err.(awserr.Error) - if ok && ec2err.Code() == "InvalidInternetGatewayID.NotFound" { - resp = nil - } else { - log.Printf("[ERROR] Error on IGStateRefresh: %s", err) - return nil, "", err - } - } - - if resp == nil { - // Sometimes AWS just has consistency issues and doesn't see - // our instance yet. Return an empty state. - return nil, "", nil - } - - ig := resp.InternetGateways[0] - - if time.Now().Sub(start) > 10*time.Second { - return ig, expected, nil - } - - if len(ig.Attachments) == 0 { - // No attachments, we're detached - return ig, "detached", nil - } - - return ig, *ig.Attachments[0].State, nil - } -} diff --git a/builtin/providers/aws/resource_aws_internet_gateway_test.go b/builtin/providers/aws/resource_aws_internet_gateway_test.go deleted file mode 100644 index e8f748b26..000000000 --- a/builtin/providers/aws/resource_aws_internet_gateway_test.go +++ /dev/null @@ -1,254 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSInternetGateway_basic(t *testing.T) { - var v, v2 ec2.InternetGateway - - testNotEqual := func(*terraform.State) error { - if len(v.Attachments) == 0 { - return fmt.Errorf("IG A is not attached") - } - if len(v2.Attachments) == 0 { - return fmt.Errorf("IG B is not attached") - } - - id1 := v.Attachments[0].VpcId - id2 := v2.Attachments[0].VpcId - if id1 == id2 { - return fmt.Errorf("Both attachment IDs are the same") - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_internet_gateway.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckInternetGatewayDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccInternetGatewayConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckInternetGatewayExists( - "aws_internet_gateway.foo", &v), - ), - }, - - resource.TestStep{ - Config: testAccInternetGatewayConfigChangeVPC, - Check: resource.ComposeTestCheckFunc( - testAccCheckInternetGatewayExists( - "aws_internet_gateway.foo", &v2), - testNotEqual, - ), - }, - }, - }) -} - -func TestAccAWSInternetGateway_delete(t *testing.T) { - var ig ec2.InternetGateway - - testDeleted := func(r string) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, ok := s.RootModule().Resources[r] - if ok { - return fmt.Errorf("Internet Gateway %q should have been deleted", r) - } - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_internet_gateway.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckInternetGatewayDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccInternetGatewayConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckInternetGatewayExists("aws_internet_gateway.foo", &ig)), - }, - resource.TestStep{ - Config: testAccNoInternetGatewayConfig, - Check: resource.ComposeTestCheckFunc(testDeleted("aws_internet_gateway.foo")), - }, - }, - }) -} - -func TestAccAWSInternetGateway_tags(t *testing.T) { - var v ec2.InternetGateway - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_internet_gateway.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckInternetGatewayDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckInternetGatewayConfigTags, - Check: resource.ComposeTestCheckFunc( - testAccCheckInternetGatewayExists("aws_internet_gateway.foo", &v), - testAccCheckTags(&v.Tags, "foo", "bar"), - ), - }, - - resource.TestStep{ - Config: testAccCheckInternetGatewayConfigTagsUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckInternetGatewayExists("aws_internet_gateway.foo", &v), - testAccCheckTags(&v.Tags, "foo", ""), - testAccCheckTags(&v.Tags, "bar", "baz"), - ), - }, - }, - }) -} - -func testAccCheckInternetGatewayDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_internet_gateway" { - continue - } - - // Try to find the resource - resp, err := conn.DescribeInternetGateways(&ec2.DescribeInternetGatewaysInput{ - InternetGatewayIds: []*string{aws.String(rs.Primary.ID)}, - }) - if err == nil { - if len(resp.InternetGateways) > 0 { - return fmt.Errorf("still exists") - } - - return nil - } - - // Verify the error is what we want - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - if ec2err.Code() != "InvalidInternetGatewayID.NotFound" { - return err - } - } - - return nil -} - -func testAccCheckInternetGatewayExists(n string, ig *ec2.InternetGateway) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - resp, err := conn.DescribeInternetGateways(&ec2.DescribeInternetGatewaysInput{ - InternetGatewayIds: []*string{aws.String(rs.Primary.ID)}, - }) - if err != nil { - return err - } - if len(resp.InternetGateways) == 0 { - return fmt.Errorf("InternetGateway not found") - } - - *ig = *resp.InternetGateways[0] - - return nil - } -} - -const testAccNoInternetGatewayConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccNoInternetGatewayConfig" - } -} -` - -const testAccInternetGatewayConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccInternetGatewayConfig" - } -} - -resource "aws_internet_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} -` - -const testAccInternetGatewayConfigChangeVPC = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccInternetGatewayConfigChangeVPC" - } -} - -resource "aws_vpc" "bar" { - cidr_block = "10.2.0.0/16" - tags { - Name = "testAccInternetGatewayConfigChangeVPC_other" - } -} - -resource "aws_internet_gateway" "foo" { - vpc_id = "${aws_vpc.bar.id}" -} -` - -const testAccCheckInternetGatewayConfigTags = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccCheckInternetGatewayConfigTags" - } -} - -resource "aws_internet_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" - tags { - foo = "bar" - } -} -` - -const testAccCheckInternetGatewayConfigTagsUpdate = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccCheckInternetGatewayConfigTagsUpdate" - } -} - -resource "aws_internet_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" - tags { - bar = "baz" - } -} -` diff --git a/builtin/providers/aws/resource_aws_key_pair.go b/builtin/providers/aws/resource_aws_key_pair.go deleted file mode 100644 index 02050c7af..000000000 --- a/builtin/providers/aws/resource_aws_key_pair.go +++ /dev/null @@ -1,131 +0,0 @@ -package aws - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" -) - -func resourceAwsKeyPair() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsKeyPairCreate, - Read: resourceAwsKeyPairRead, - Update: nil, - Delete: resourceAwsKeyPairDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - SchemaVersion: 1, - MigrateState: resourceAwsKeyPairMigrateState, - - Schema: map[string]*schema.Schema{ - "key_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"key_name_prefix"}, - }, - "key_name_prefix": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 100 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 100 characters, name is limited to 255", k)) - } - return - }, - }, - "public_key": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - StateFunc: func(v interface{}) string { - switch v.(type) { - case string: - return strings.TrimSpace(v.(string)) - default: - return "" - } - }, - }, - "fingerprint": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceAwsKeyPairCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - var keyName string - if v, ok := d.GetOk("key_name"); ok { - keyName = v.(string) - } else if v, ok := d.GetOk("key_name_prefix"); ok { - keyName = resource.PrefixedUniqueId(v.(string)) - d.Set("key_name", keyName) - } else { - keyName = resource.UniqueId() - d.Set("key_name", keyName) - } - - publicKey := d.Get("public_key").(string) - req := &ec2.ImportKeyPairInput{ - KeyName: aws.String(keyName), - PublicKeyMaterial: []byte(publicKey), - } - resp, err := conn.ImportKeyPair(req) - if err != nil { - return fmt.Errorf("Error import KeyPair: %s", err) - } - - d.SetId(*resp.KeyName) - return nil -} - -func resourceAwsKeyPairRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - req := &ec2.DescribeKeyPairsInput{ - KeyNames: []*string{aws.String(d.Id())}, - } - resp, err := conn.DescribeKeyPairs(req) - if err != nil { - awsErr, ok := err.(awserr.Error) - if ok && awsErr.Code() == "InvalidKeyPair.NotFound" { - d.SetId("") - return nil - } - return fmt.Errorf("Error retrieving KeyPair: %s", err) - } - - for _, keyPair := range resp.KeyPairs { - if *keyPair.KeyName == d.Id() { - d.Set("key_name", keyPair.KeyName) - d.Set("fingerprint", keyPair.KeyFingerprint) - return nil - } - } - - return fmt.Errorf("Unable to find key pair within: %#v", resp.KeyPairs) -} - -func resourceAwsKeyPairDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - _, err := conn.DeleteKeyPair(&ec2.DeleteKeyPairInput{ - KeyName: aws.String(d.Id()), - }) - return err -} diff --git a/builtin/providers/aws/resource_aws_key_pair_migrate.go b/builtin/providers/aws/resource_aws_key_pair_migrate.go deleted file mode 100644 index c937ac360..000000000 --- a/builtin/providers/aws/resource_aws_key_pair_migrate.go +++ /dev/null @@ -1,36 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/terraform" -) - -func resourceAwsKeyPairMigrateState( - v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - switch v { - case 0: - log.Println("[INFO] Found AWS Key Pair State v0; migrating to v1") - return migrateKeyPairStateV0toV1(is) - default: - return is, fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateKeyPairStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { - if is.Empty() { - log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return is, nil - } - - log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - - // replace public_key with a stripped version, removing `\n` from the end - // see https://github.com/hashicorp/terraform/issues/3455 - is.Attributes["public_key"] = strings.TrimSpace(is.Attributes["public_key"]) - - log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} diff --git a/builtin/providers/aws/resource_aws_key_pair_migrate_test.go b/builtin/providers/aws/resource_aws_key_pair_migrate_test.go deleted file mode 100644 index 825d3c40f..000000000 --- a/builtin/providers/aws/resource_aws_key_pair_migrate_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/terraform" -) - -func TestAWSKeyPairMigrateState(t *testing.T) { - cases := map[string]struct { - StateVersion int - ID string - Attributes map[string]string - Expected string - Meta interface{} - }{ - "v0_1": { - StateVersion: 0, - ID: "tf-testing-file", - Attributes: map[string]string{ - "fingerprint": "1d:cd:46:31:a9:4a:e0:06:8a:a1:22:cb:3b:bf:8e:42", - "key_name": "tf-testing-file", - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA4LBtwcFsQAYWw1cnOwRTZCJCzPSzq0dl3== ctshryock", - }, - Expected: "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA4LBtwcFsQAYWw1cnOwRTZCJCzPSzq0dl3== ctshryock", - }, - "v0_2": { - StateVersion: 0, - ID: "tf-testing-file", - Attributes: map[string]string{ - "fingerprint": "1d:cd:46:31:a9:4a:e0:06:8a:a1:22:cb:3b:bf:8e:42", - "key_name": "tf-testing-file", - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA4LBtwcFsQAYWw1cnOwRTZCJCzPSzq0dl3== ctshryock\n", - }, - Expected: "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA4LBtwcFsQAYWw1cnOwRTZCJCzPSzq0dl3== ctshryock", - }, - } - - for tn, tc := range cases { - is := &terraform.InstanceState{ - ID: tc.ID, - Attributes: tc.Attributes, - } - is, err := resourceAwsKeyPairMigrateState( - tc.StateVersion, is, tc.Meta) - - if err != nil { - t.Fatalf("bad: %s, err: %#v", tn, err) - } - - if is.Attributes["public_key"] != tc.Expected { - t.Fatalf("Bad public_key migration: %s\n\n expected: %s", is.Attributes["public_key"], tc.Expected) - } - } -} diff --git a/builtin/providers/aws/resource_aws_key_pair_test.go b/builtin/providers/aws/resource_aws_key_pair_test.go deleted file mode 100644 index e0c510d26..000000000 --- a/builtin/providers/aws/resource_aws_key_pair_test.go +++ /dev/null @@ -1,233 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func init() { - resource.AddTestSweepers("aws_key_pair", &resource.Sweeper{ - Name: "aws_key_pair", - F: testSweepKeyPairs, - }) -} - -func testSweepKeyPairs(region string) error { - client, err := sharedClientForRegion(region) - if err != nil { - return fmt.Errorf("error getting client: %s", err) - } - ec2conn := client.(*AWSClient).ec2conn - - log.Printf("Destroying the tmp keys in (%s)", client.(*AWSClient).region) - - resp, err := ec2conn.DescribeKeyPairs(&ec2.DescribeKeyPairsInput{ - Filters: []*ec2.Filter{ - &ec2.Filter{ - Name: aws.String("key-name"), - Values: []*string{aws.String("tmp-key*")}, - }, - }, - }) - if err != nil { - return fmt.Errorf("Error describing key pairs in Sweeper: %s", err) - } - - keyPairs := resp.KeyPairs - for _, d := range keyPairs { - _, err := ec2conn.DeleteKeyPair(&ec2.DeleteKeyPairInput{ - KeyName: d.KeyName, - }) - - if err != nil { - return fmt.Errorf("Error deleting key pairs in Sweeper: %s", err) - } - } - return nil -} - -func TestAccAWSKeyPair_basic(t *testing.T) { - var conf ec2.KeyPairInfo - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSKeyPairDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSKeyPairConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSKeyPairExists("aws_key_pair.a_key_pair", &conf), - testAccCheckAWSKeyPairFingerprint("d7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62", &conf), - ), - }, - }, - }) -} - -func TestAccAWSKeyPair_generatedName(t *testing.T) { - var conf ec2.KeyPairInfo - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSKeyPairDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSKeyPairConfig_generatedName, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSKeyPairExists("aws_key_pair.a_key_pair", &conf), - testAccCheckAWSKeyPairFingerprint("d7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62", &conf), - func(s *terraform.State) error { - if conf.KeyName == nil { - return fmt.Errorf("bad: No SG name") - } - if !strings.HasPrefix(*conf.KeyName, "terraform-") { - return fmt.Errorf("No terraform- prefix: %s", *conf.KeyName) - } - return nil - }, - ), - }, - }, - }) -} - -func testAccCheckAWSKeyPairDestroy(s *terraform.State) error { - ec2conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_key_pair" { - continue - } - - // Try to find key pair - resp, err := ec2conn.DescribeKeyPairs(&ec2.DescribeKeyPairsInput{ - KeyNames: []*string{aws.String(rs.Primary.ID)}, - }) - if err == nil { - if len(resp.KeyPairs) > 0 { - return fmt.Errorf("still exist.") - } - return nil - } - - // Verify the error is what we want - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - if ec2err.Code() != "InvalidKeyPair.NotFound" { - return err - } - } - - return nil -} - -func testAccCheckAWSKeyPairFingerprint(expectedFingerprint string, conf *ec2.KeyPairInfo) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *conf.KeyFingerprint != expectedFingerprint { - return fmt.Errorf("incorrect fingerprint. expected %s, got %s", expectedFingerprint, *conf.KeyFingerprint) - } - return nil - } -} - -func testAccCheckAWSKeyPairExists(n string, res *ec2.KeyPairInfo) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No KeyPair name is set") - } - - ec2conn := testAccProvider.Meta().(*AWSClient).ec2conn - - resp, err := ec2conn.DescribeKeyPairs(&ec2.DescribeKeyPairsInput{ - KeyNames: []*string{aws.String(rs.Primary.ID)}, - }) - if err != nil { - return err - } - if len(resp.KeyPairs) != 1 || - *resp.KeyPairs[0].KeyName != rs.Primary.ID { - return fmt.Errorf("KeyPair not found") - } - - *res = *resp.KeyPairs[0] - - return nil - } -} - -func testAccCheckAWSKeyPair_namePrefix(t *testing.T) { - var conf ec2.KeyPairInfo - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_key_pair.a_key_pair", - IDRefreshIgnore: []string{"key_name_prefix"}, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSKeyPairDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckAWSKeyPairPrefixNameConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSKeyPairExists("aws_key_pair.a_key_pair", &conf), - testAccCheckAWSKeyPairGeneratedNamePrefix( - "aws_key_pair.a_key_pair", "baz-"), - ), - }, - }, - }) -} - -func testAccCheckAWSKeyPairGeneratedNamePrefix( - resource, prefix string) resource.TestCheckFunc { - return func(s *terraform.State) error { - r, ok := s.RootModule().Resources[resource] - if !ok { - return fmt.Errorf("Resource not found") - } - name, ok := r.Primary.Attributes["name"] - if !ok { - return fmt.Errorf("Name attr not found: %#v", r.Primary.Attributes) - } - if !strings.HasPrefix(name, prefix) { - return fmt.Errorf("Name: %q, does not have prefix: %q", name, prefix) - } - return nil - } -} - -const testAccAWSKeyPairConfig = ` -resource "aws_key_pair" "a_key_pair" { - key_name = "tf-acc-key-pair" - public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD3F6tyPEFEzV0LX3X8BsXdMsQz1x2cEikKDEY0aIj41qgxMCP/iteneqXSIFZBp5vizPvaoIR3Um9xK7PGoW8giupGn+EPuxIA4cDM4vzOqOkiMPhz5XK0whEjkVzTo4+S0puvDZuwIsdiW9mxhJc7tgBNL0cYlWSYVkz4G/fslNfRPW5mYAM49f4fhtxPb5ok4Q2Lg9dPKVHO/Bgeu5woMc7RY0p1ej6D4CKFE6lymSDJpW0YHX/wqE9+cfEauh7xZcG0q9t2ta6F6fmX0agvpFyZo8aFbXeUBr7osSCJNgvavWbM/06niWrOvYX2xwWdhXmXSrbX8ZbabVohBK41 phodgson@thoughtworks.com" -} -` - -const testAccAWSKeyPairConfig_generatedName = ` -resource "aws_key_pair" "a_key_pair" { - public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD3F6tyPEFEzV0LX3X8BsXdMsQz1x2cEikKDEY0aIj41qgxMCP/iteneqXSIFZBp5vizPvaoIR3Um9xK7PGoW8giupGn+EPuxIA4cDM4vzOqOkiMPhz5XK0whEjkVzTo4+S0puvDZuwIsdiW9mxhJc7tgBNL0cYlWSYVkz4G/fslNfRPW5mYAM49f4fhtxPb5ok4Q2Lg9dPKVHO/Bgeu5woMc7RY0p1ej6D4CKFE6lymSDJpW0YHX/wqE9+cfEauh7xZcG0q9t2ta6F6fmX0agvpFyZo8aFbXeUBr7osSCJNgvavWbM/06niWrOvYX2xwWdhXmXSrbX8ZbabVohBK41 phodgson@thoughtworks.com" -} -` - -const testAccCheckAWSKeyPairPrefixNameConfig = ` -resource "aws_key_pair" "a_key_pair" { - key_name_prefix = "baz-" - public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD3F6tyPEFEzV0LX3X8BsXdMsQz1x2cEikKDEY0aIj41qgxMCP/iteneqXSIFZBp5vizPvaoIR3Um9xK7PGoW8giupGn+EPuxIA4cDM4vzOqOkiMPhz5XK0whEjkVzTo4+S0puvDZuwIsdiW9mxhJc7tgBNL0cYlWSYVkz4G/fslNfRPW5mYAM49f4fhtxPb5ok4Q2Lg9dPKVHO/Bgeu5woMc7RY0p1ej6D4CKFE6lymSDJpW0YHX/wqE9+cfEauh7xZcG0q9t2ta6F6fmX0agvpFyZo8aFbXeUBr7osSCJNgvavWbM/06niWrOvYX2xwWdhXmXSrbX8ZbabVohBK41 phodgson@thoughtworks.com" -} -` diff --git a/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream.go b/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream.go deleted file mode 100644 index 3cd476be9..000000000 --- a/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream.go +++ /dev/null @@ -1,773 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/firehose" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func cloudWatchLoggingOptionsSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeSet, - MaxItems: 1, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "log_group_name": { - Type: schema.TypeString, - Optional: true, - }, - - "log_stream_name": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - } -} - -func resourceAwsKinesisFirehoseDeliveryStream() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsKinesisFirehoseDeliveryStreamCreate, - Read: resourceAwsKinesisFirehoseDeliveryStreamRead, - Update: resourceAwsKinesisFirehoseDeliveryStreamUpdate, - Delete: resourceAwsKinesisFirehoseDeliveryStreamDelete, - - SchemaVersion: 1, - MigrateState: resourceAwsKinesisFirehoseMigrateState, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 64 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 64 characters", k)) - } - return - }, - }, - - "destination": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - StateFunc: func(v interface{}) string { - value := v.(string) - return strings.ToLower(value) - }, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "s3" && value != "redshift" && value != "elasticsearch" { - errors = append(errors, fmt.Errorf( - "%q must be one of 's3', 'redshift', 'elasticsearch'", k)) - } - return - }, - }, - - "s3_configuration": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "bucket_arn": { - Type: schema.TypeString, - Required: true, - }, - - "buffer_size": { - Type: schema.TypeInt, - Optional: true, - Default: 5, - }, - - "buffer_interval": { - Type: schema.TypeInt, - Optional: true, - Default: 300, - }, - - "compression_format": { - Type: schema.TypeString, - Optional: true, - Default: "UNCOMPRESSED", - }, - - "kms_key_arn": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateArn, - }, - - "role_arn": { - Type: schema.TypeString, - Required: true, - }, - - "prefix": { - Type: schema.TypeString, - Optional: true, - }, - - "cloudwatch_logging_options": cloudWatchLoggingOptionsSchema(), - }, - }, - }, - - "redshift_configuration": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cluster_jdbcurl": { - Type: schema.TypeString, - Required: true, - }, - - "username": { - Type: schema.TypeString, - Required: true, - }, - - "password": { - Type: schema.TypeString, - Required: true, - Sensitive: true, - }, - - "role_arn": { - Type: schema.TypeString, - Required: true, - }, - - "retry_duration": { - Type: schema.TypeInt, - Optional: true, - Default: 3600, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 0 || value > 7200 { - errors = append(errors, fmt.Errorf( - "%q must be in the range from 0 to 7200 seconds.", k)) - } - return - }, - }, - - "copy_options": { - Type: schema.TypeString, - Optional: true, - }, - - "data_table_columns": { - Type: schema.TypeString, - Optional: true, - }, - - "data_table_name": { - Type: schema.TypeString, - Required: true, - }, - - "cloudwatch_logging_options": cloudWatchLoggingOptionsSchema(), - }, - }, - }, - - "elasticsearch_configuration": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "buffering_interval": { - Type: schema.TypeInt, - Optional: true, - Default: 300, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 60 || value > 900 { - errors = append(errors, fmt.Errorf( - "%q must be in the range from 60 to 900 seconds.", k)) - } - return - }, - }, - - "buffering_size": { - Type: schema.TypeInt, - Optional: true, - Default: 5, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 1 || value > 100 { - errors = append(errors, fmt.Errorf( - "%q must be in the range from 1 to 100 MB.", k)) - } - return - }, - }, - - "domain_arn": { - Type: schema.TypeString, - Required: true, - }, - - "index_name": { - Type: schema.TypeString, - Required: true, - }, - - "index_rotation_period": { - Type: schema.TypeString, - Optional: true, - Default: "OneDay", - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "NoRotation" && value != "OneHour" && value != "OneDay" && value != "OneWeek" && value != "OneMonth" { - errors = append(errors, fmt.Errorf( - "%q must be one of 'NoRotation', 'OneHour', 'OneDay', 'OneWeek', 'OneMonth'", k)) - } - return - }, - }, - - "retry_duration": { - Type: schema.TypeInt, - Optional: true, - Default: 300, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 0 || value > 7200 { - errors = append(errors, fmt.Errorf( - "%q must be in the range from 0 to 7200 seconds.", k)) - } - return - }, - }, - - "role_arn": { - Type: schema.TypeString, - Required: true, - }, - - "s3_backup_mode": { - Type: schema.TypeString, - Optional: true, - Default: "FailedDocumentsOnly", - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "FailedDocumentsOnly" && value != "AllDocuments" { - errors = append(errors, fmt.Errorf( - "%q must be one of 'FailedDocumentsOnly', 'AllDocuments'", k)) - } - return - }, - }, - - "type_name": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 100 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 100 characters", k)) - } - return - }, - }, - - "cloudwatch_logging_options": cloudWatchLoggingOptionsSchema(), - }, - }, - }, - - "arn": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "version_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "destination_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - } -} - -func createS3Config(d *schema.ResourceData) *firehose.S3DestinationConfiguration { - s3 := d.Get("s3_configuration").([]interface{})[0].(map[string]interface{}) - - configuration := &firehose.S3DestinationConfiguration{ - BucketARN: aws.String(s3["bucket_arn"].(string)), - RoleARN: aws.String(s3["role_arn"].(string)), - BufferingHints: &firehose.BufferingHints{ - IntervalInSeconds: aws.Int64(int64(s3["buffer_interval"].(int))), - SizeInMBs: aws.Int64(int64(s3["buffer_size"].(int))), - }, - Prefix: extractPrefixConfiguration(s3), - CompressionFormat: aws.String(s3["compression_format"].(string)), - EncryptionConfiguration: extractEncryptionConfiguration(s3), - } - - if _, ok := s3["cloudwatch_logging_options"]; ok { - configuration.CloudWatchLoggingOptions = extractCloudWatchLoggingConfiguration(s3) - } - - return configuration -} - -func updateS3Config(d *schema.ResourceData) *firehose.S3DestinationUpdate { - s3 := d.Get("s3_configuration").([]interface{})[0].(map[string]interface{}) - - configuration := &firehose.S3DestinationUpdate{ - BucketARN: aws.String(s3["bucket_arn"].(string)), - RoleARN: aws.String(s3["role_arn"].(string)), - BufferingHints: &firehose.BufferingHints{ - IntervalInSeconds: aws.Int64((int64)(s3["buffer_interval"].(int))), - SizeInMBs: aws.Int64((int64)(s3["buffer_size"].(int))), - }, - Prefix: extractPrefixConfiguration(s3), - CompressionFormat: aws.String(s3["compression_format"].(string)), - EncryptionConfiguration: extractEncryptionConfiguration(s3), - CloudWatchLoggingOptions: extractCloudWatchLoggingConfiguration(s3), - } - - if _, ok := s3["cloudwatch_logging_options"]; ok { - configuration.CloudWatchLoggingOptions = extractCloudWatchLoggingConfiguration(s3) - } - - return configuration -} - -func extractEncryptionConfiguration(s3 map[string]interface{}) *firehose.EncryptionConfiguration { - if key, ok := s3["kms_key_arn"]; ok && len(key.(string)) > 0 { - return &firehose.EncryptionConfiguration{ - KMSEncryptionConfig: &firehose.KMSEncryptionConfig{ - AWSKMSKeyARN: aws.String(key.(string)), - }, - } - } - - return &firehose.EncryptionConfiguration{ - NoEncryptionConfig: aws.String("NoEncryption"), - } -} - -func extractCloudWatchLoggingConfiguration(s3 map[string]interface{}) *firehose.CloudWatchLoggingOptions { - config := s3["cloudwatch_logging_options"].(*schema.Set).List() - if len(config) == 0 { - return nil - } - - loggingConfig := config[0].(map[string]interface{}) - loggingOptions := &firehose.CloudWatchLoggingOptions{ - Enabled: aws.Bool(loggingConfig["enabled"].(bool)), - } - - if v, ok := loggingConfig["log_group_name"]; ok { - loggingOptions.LogGroupName = aws.String(v.(string)) - } - - if v, ok := loggingConfig["log_stream_name"]; ok { - loggingOptions.LogStreamName = aws.String(v.(string)) - } - - return loggingOptions - -} - -func extractPrefixConfiguration(s3 map[string]interface{}) *string { - if v, ok := s3["prefix"]; ok { - return aws.String(v.(string)) - } - - return nil -} - -func createRedshiftConfig(d *schema.ResourceData, s3Config *firehose.S3DestinationConfiguration) (*firehose.RedshiftDestinationConfiguration, error) { - redshiftRaw, ok := d.GetOk("redshift_configuration") - if !ok { - return nil, fmt.Errorf("[ERR] Error loading Redshift Configuration for Kinesis Firehose: redshift_configuration not found") - } - rl := redshiftRaw.([]interface{}) - - redshift := rl[0].(map[string]interface{}) - - configuration := &firehose.RedshiftDestinationConfiguration{ - ClusterJDBCURL: aws.String(redshift["cluster_jdbcurl"].(string)), - RetryOptions: extractRedshiftRetryOptions(redshift), - Password: aws.String(redshift["password"].(string)), - Username: aws.String(redshift["username"].(string)), - RoleARN: aws.String(redshift["role_arn"].(string)), - CopyCommand: extractCopyCommandConfiguration(redshift), - S3Configuration: s3Config, - } - - if _, ok := redshift["cloudwatch_logging_options"]; ok { - configuration.CloudWatchLoggingOptions = extractCloudWatchLoggingConfiguration(redshift) - } - - return configuration, nil -} - -func updateRedshiftConfig(d *schema.ResourceData, s3Update *firehose.S3DestinationUpdate) (*firehose.RedshiftDestinationUpdate, error) { - redshiftRaw, ok := d.GetOk("redshift_configuration") - if !ok { - return nil, fmt.Errorf("[ERR] Error loading Redshift Configuration for Kinesis Firehose: redshift_configuration not found") - } - rl := redshiftRaw.([]interface{}) - - redshift := rl[0].(map[string]interface{}) - - configuration := &firehose.RedshiftDestinationUpdate{ - ClusterJDBCURL: aws.String(redshift["cluster_jdbcurl"].(string)), - RetryOptions: extractRedshiftRetryOptions(redshift), - Password: aws.String(redshift["password"].(string)), - Username: aws.String(redshift["username"].(string)), - RoleARN: aws.String(redshift["role_arn"].(string)), - CopyCommand: extractCopyCommandConfiguration(redshift), - S3Update: s3Update, - } - - if _, ok := redshift["cloudwatch_logging_options"]; ok { - configuration.CloudWatchLoggingOptions = extractCloudWatchLoggingConfiguration(redshift) - } - - return configuration, nil -} - -func createElasticsearchConfig(d *schema.ResourceData, s3Config *firehose.S3DestinationConfiguration) (*firehose.ElasticsearchDestinationConfiguration, error) { - esConfig, ok := d.GetOk("elasticsearch_configuration") - if !ok { - return nil, fmt.Errorf("[ERR] Error loading Elasticsearch Configuration for Kinesis Firehose: elasticsearch_configuration not found") - } - esList := esConfig.([]interface{}) - - es := esList[0].(map[string]interface{}) - - config := &firehose.ElasticsearchDestinationConfiguration{ - BufferingHints: extractBufferingHints(es), - DomainARN: aws.String(es["domain_arn"].(string)), - IndexName: aws.String(es["index_name"].(string)), - RetryOptions: extractElasticSearchRetryOptions(es), - RoleARN: aws.String(es["role_arn"].(string)), - TypeName: aws.String(es["type_name"].(string)), - S3Configuration: s3Config, - } - - if _, ok := es["cloudwatch_logging_options"]; ok { - config.CloudWatchLoggingOptions = extractCloudWatchLoggingConfiguration(es) - } - - if indexRotationPeriod, ok := es["index_rotation_period"]; ok { - config.IndexRotationPeriod = aws.String(indexRotationPeriod.(string)) - } - if s3BackupMode, ok := es["s3_backup_mode"]; ok { - config.S3BackupMode = aws.String(s3BackupMode.(string)) - } - - return config, nil -} - -func updateElasticsearchConfig(d *schema.ResourceData, s3Update *firehose.S3DestinationUpdate) (*firehose.ElasticsearchDestinationUpdate, error) { - esConfig, ok := d.GetOk("elasticsearch_configuration") - if !ok { - return nil, fmt.Errorf("[ERR] Error loading Elasticsearch Configuration for Kinesis Firehose: elasticsearch_configuration not found") - } - esList := esConfig.([]interface{}) - - es := esList[0].(map[string]interface{}) - - update := &firehose.ElasticsearchDestinationUpdate{ - BufferingHints: extractBufferingHints(es), - DomainARN: aws.String(es["domain_arn"].(string)), - IndexName: aws.String(es["index_name"].(string)), - RetryOptions: extractElasticSearchRetryOptions(es), - RoleARN: aws.String(es["role_arn"].(string)), - TypeName: aws.String(es["type_name"].(string)), - S3Update: s3Update, - } - - if _, ok := es["cloudwatch_logging_options"]; ok { - update.CloudWatchLoggingOptions = extractCloudWatchLoggingConfiguration(es) - } - - if indexRotationPeriod, ok := es["index_rotation_period"]; ok { - update.IndexRotationPeriod = aws.String(indexRotationPeriod.(string)) - } - - return update, nil -} - -func extractBufferingHints(es map[string]interface{}) *firehose.ElasticsearchBufferingHints { - bufferingHints := &firehose.ElasticsearchBufferingHints{} - - if bufferingInterval, ok := es["buffering_interval"].(int); ok { - bufferingHints.IntervalInSeconds = aws.Int64(int64(bufferingInterval)) - } - if bufferingSize, ok := es["buffering_size"].(int); ok { - bufferingHints.SizeInMBs = aws.Int64(int64(bufferingSize)) - } - - return bufferingHints -} - -func extractElasticSearchRetryOptions(es map[string]interface{}) *firehose.ElasticsearchRetryOptions { - retryOptions := &firehose.ElasticsearchRetryOptions{} - - if retryDuration, ok := es["retry_duration"].(int); ok { - retryOptions.DurationInSeconds = aws.Int64(int64(retryDuration)) - } - - return retryOptions -} - -func extractRedshiftRetryOptions(redshift map[string]interface{}) *firehose.RedshiftRetryOptions { - retryOptions := &firehose.RedshiftRetryOptions{} - - if retryDuration, ok := redshift["retry_duration"].(int); ok { - retryOptions.DurationInSeconds = aws.Int64(int64(retryDuration)) - } - - return retryOptions -} - -func extractCopyCommandConfiguration(redshift map[string]interface{}) *firehose.CopyCommand { - cmd := &firehose.CopyCommand{ - DataTableName: aws.String(redshift["data_table_name"].(string)), - } - if copyOptions, ok := redshift["copy_options"]; ok { - cmd.CopyOptions = aws.String(copyOptions.(string)) - } - if columns, ok := redshift["data_table_columns"]; ok { - cmd.DataTableColumns = aws.String(columns.(string)) - } - - return cmd -} - -func resourceAwsKinesisFirehoseDeliveryStreamCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).firehoseconn - - sn := d.Get("name").(string) - s3Config := createS3Config(d) - - createInput := &firehose.CreateDeliveryStreamInput{ - DeliveryStreamName: aws.String(sn), - } - - if d.Get("destination").(string) == "s3" { - createInput.S3DestinationConfiguration = s3Config - } else if d.Get("destination").(string) == "elasticsearch" { - esConfig, err := createElasticsearchConfig(d, s3Config) - if err != nil { - return err - } - createInput.ElasticsearchDestinationConfiguration = esConfig - } else { - rc, err := createRedshiftConfig(d, s3Config) - if err != nil { - return err - } - createInput.RedshiftDestinationConfiguration = rc - } - - var lastError error - err := resource.Retry(1*time.Minute, func() *resource.RetryError { - _, err := conn.CreateDeliveryStream(createInput) - if err != nil { - log.Printf("[DEBUG] Error creating Firehose Delivery Stream: %s", err) - lastError = err - - if awsErr, ok := err.(awserr.Error); ok { - // IAM roles can take ~10 seconds to propagate in AWS: - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console - if awsErr.Code() == "InvalidArgumentException" && strings.Contains(awsErr.Message(), "Firehose is unable to assume role") { - log.Printf("[DEBUG] Firehose could not assume role referenced, retrying...") - return resource.RetryableError(awsErr) - } - } - // Not retryable - return resource.NonRetryableError(err) - } - - return nil - }) - if err != nil { - if awsErr, ok := lastError.(awserr.Error); ok { - return fmt.Errorf("[WARN] Error creating Kinesis Firehose Delivery Stream: \"%s\", code: \"%s\"", awsErr.Message(), awsErr.Code()) - } - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"CREATING"}, - Target: []string{"ACTIVE"}, - Refresh: firehoseStreamStateRefreshFunc(conn, sn), - Timeout: 20 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - firehoseStream, err := stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for Kinesis Stream (%s) to become active: %s", - sn, err) - } - - s := firehoseStream.(*firehose.DeliveryStreamDescription) - d.SetId(*s.DeliveryStreamARN) - d.Set("arn", s.DeliveryStreamARN) - - return resourceAwsKinesisFirehoseDeliveryStreamRead(d, meta) -} - -func resourceAwsKinesisFirehoseDeliveryStreamUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).firehoseconn - - sn := d.Get("name").(string) - s3Config := updateS3Config(d) - - updateInput := &firehose.UpdateDestinationInput{ - DeliveryStreamName: aws.String(sn), - CurrentDeliveryStreamVersionId: aws.String(d.Get("version_id").(string)), - DestinationId: aws.String(d.Get("destination_id").(string)), - } - - if d.Get("destination").(string) == "s3" { - updateInput.S3DestinationUpdate = s3Config - } else if d.Get("destination").(string) == "elasticsearch" { - esUpdate, err := updateElasticsearchConfig(d, s3Config) - if err != nil { - return err - } - updateInput.ElasticsearchDestinationUpdate = esUpdate - } else { - rc, err := updateRedshiftConfig(d, s3Config) - if err != nil { - return err - } - updateInput.RedshiftDestinationUpdate = rc - } - - _, err := conn.UpdateDestination(updateInput) - if err != nil { - return fmt.Errorf( - "Error Updating Kinesis Firehose Delivery Stream: \"%s\"\n%s", - sn, err) - } - - return resourceAwsKinesisFirehoseDeliveryStreamRead(d, meta) -} - -func resourceAwsKinesisFirehoseDeliveryStreamRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).firehoseconn - - resp, err := conn.DescribeDeliveryStream(&firehose.DescribeDeliveryStreamInput{ - DeliveryStreamName: aws.String(d.Get("name").(string)), - }) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "ResourceNotFoundException" { - d.SetId("") - return nil - } - return fmt.Errorf("[WARN] Error reading Kinesis Firehose Delivery Stream: \"%s\", code: \"%s\"", awsErr.Message(), awsErr.Code()) - } - return err - } - - s := resp.DeliveryStreamDescription - d.Set("version_id", s.VersionId) - d.Set("arn", *s.DeliveryStreamARN) - if len(s.Destinations) > 0 { - destination := s.Destinations[0] - d.Set("destination_id", *destination.DestinationId) - } - - return nil -} - -func resourceAwsKinesisFirehoseDeliveryStreamDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).firehoseconn - - sn := d.Get("name").(string) - _, err := conn.DeleteDeliveryStream(&firehose.DeleteDeliveryStreamInput{ - DeliveryStreamName: aws.String(sn), - }) - - if err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"DELETING"}, - Target: []string{"DESTROYED"}, - Refresh: firehoseStreamStateRefreshFunc(conn, sn), - Timeout: 20 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for Delivery Stream (%s) to be destroyed: %s", - sn, err) - } - - d.SetId("") - return nil -} - -func firehoseStreamStateRefreshFunc(conn *firehose.Firehose, sn string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - describeOpts := &firehose.DescribeDeliveryStreamInput{ - DeliveryStreamName: aws.String(sn), - } - resp, err := conn.DescribeDeliveryStream(describeOpts) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "ResourceNotFoundException" { - return 42, "DESTROYED", nil - } - return nil, awsErr.Code(), err - } - return nil, "failed", err - } - - return resp.DeliveryStreamDescription, *resp.DeliveryStreamDescription.DeliveryStreamStatus, nil - } -} diff --git a/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream_migrate.go b/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream_migrate.go deleted file mode 100644 index 7ed8bfa33..000000000 --- a/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream_migrate.go +++ /dev/null @@ -1,59 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/terraform" -) - -func resourceAwsKinesisFirehoseMigrateState( - v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - switch v { - case 0: - log.Println("[INFO] Found AWS Kinesis Firehose Delivery Stream State v0; migrating to v1") - return migrateKinesisFirehoseV0toV1(is) - default: - return is, fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateKinesisFirehoseV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { - if is.Empty() { - log.Println("[DEBUG] Empty Kinesis Firehose Delivery State; nothing to migrate.") - return is, nil - } - - log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - - // migrate flate S3 configuration to a s3_configuration block - // grab initial values - is.Attributes["s3_configuration.#"] = "1" - // Required parameters - is.Attributes["s3_configuration.0.role_arn"] = is.Attributes["role_arn"] - is.Attributes["s3_configuration.0.bucket_arn"] = is.Attributes["s3_bucket_arn"] - - // Optional parameters - if is.Attributes["s3_buffer_size"] != "" { - is.Attributes["s3_configuration.0.buffer_size"] = is.Attributes["s3_buffer_size"] - } - if is.Attributes["s3_data_compression"] != "" { - is.Attributes["s3_configuration.0.compression_format"] = is.Attributes["s3_data_compression"] - } - if is.Attributes["s3_buffer_interval"] != "" { - is.Attributes["s3_configuration.0.buffer_interval"] = is.Attributes["s3_buffer_interval"] - } - if is.Attributes["s3_prefix"] != "" { - is.Attributes["s3_configuration.0.prefix"] = is.Attributes["s3_prefix"] - } - - delete(is.Attributes, "role_arn") - delete(is.Attributes, "s3_bucket_arn") - delete(is.Attributes, "s3_buffer_size") - delete(is.Attributes, "s3_data_compression") - delete(is.Attributes, "s3_buffer_interval") - delete(is.Attributes, "s3_prefix") - - log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} diff --git a/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream_migrate_test.go b/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream_migrate_test.go deleted file mode 100644 index 6f6f0c1d5..000000000 --- a/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream_migrate_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/terraform" -) - -func TestAWSKinesisFirehoseMigrateState(t *testing.T) { - cases := map[string]struct { - StateVersion int - Attributes map[string]string - Expected map[string]string - Meta interface{} - }{ - "v0.6.16 and earlier": { - StateVersion: 0, - Attributes: map[string]string{ - // EBS - "role_arn": "arn:aws:iam::somenumber:role/tf_acctest_4271506651559170635", - "s3_bucket_arn": "arn:aws:s3:::tf-test-bucket", - "s3_buffer_interval": "400", - "s3_buffer_size": "10", - "s3_data_compression": "GZIP", - }, - Expected: map[string]string{ - "s3_configuration.#": "1", - "s3_configuration.0.bucket_arn": "arn:aws:s3:::tf-test-bucket", - "s3_configuration.0.buffer_interval": "400", - "s3_configuration.0.buffer_size": "10", - "s3_configuration.0.compression_format": "GZIP", - "s3_configuration.0.role_arn": "arn:aws:iam::somenumber:role/tf_acctest_4271506651559170635", - }, - }, - "v0.6.16 and earlier, sparse": { - StateVersion: 0, - Attributes: map[string]string{ - // EBS - "role_arn": "arn:aws:iam::somenumber:role/tf_acctest_4271506651559170635", - "s3_bucket_arn": "arn:aws:s3:::tf-test-bucket", - }, - Expected: map[string]string{ - "s3_configuration.#": "1", - "s3_configuration.0.bucket_arn": "arn:aws:s3:::tf-test-bucket", - "s3_configuration.0.role_arn": "arn:aws:iam::somenumber:role/tf_acctest_4271506651559170635", - }, - }, - } - - for tn, tc := range cases { - is := &terraform.InstanceState{ - ID: "i-abc123", - Attributes: tc.Attributes, - } - is, err := resourceAwsKinesisFirehoseMigrateState( - tc.StateVersion, is, tc.Meta) - - if err != nil { - t.Fatalf("bad: %s, err: %#v", tn, err) - } - - for k, v := range tc.Expected { - if is.Attributes[k] != v { - t.Fatalf( - "bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", - tn, k, v, k, is.Attributes[k], is.Attributes) - } - } - } -} - -func TestAWSKinesisFirehoseMigrateState_empty(t *testing.T) { - var is *terraform.InstanceState - var meta interface{} - - // should handle nil - is, err := resourceAwsKinesisFirehoseMigrateState(0, is, meta) - - if err != nil { - t.Fatalf("err: %#v", err) - } - if is != nil { - t.Fatalf("expected nil instancestate, got: %#v", is) - } - - // should handle non-nil but empty - is = &terraform.InstanceState{} - is, err = resourceAwsInstanceMigrateState(0, is, meta) - - if err != nil { - t.Fatalf("err: %#v", err) - } -} diff --git a/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream_test.go b/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream_test.go deleted file mode 100644 index 27f227883..000000000 --- a/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream_test.go +++ /dev/null @@ -1,591 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "os" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/firehose" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSKinesisFirehoseDeliveryStream_s3basic(t *testing.T) { - var stream firehose.DeliveryStreamDescription - ri := acctest.RandInt() - config := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_s3basic, - ri, os.Getenv("AWS_ACCOUNT_ID"), ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: testAccKinesisFirehosePreCheck(t), - Providers: testAccProviders, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil), - ), - }, - }, - }) -} - -func TestAccAWSKinesisFirehoseDeliveryStream_s3WithCloudwatchLogging(t *testing.T) { - var stream firehose.DeliveryStreamDescription - ri := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: testAccKinesisFirehosePreCheck(t), - Providers: testAccProviders, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKinesisFirehoseDeliveryStreamConfig_s3WithCloudwatchLogging(os.Getenv("AWS_ACCOUNT_ID"), ri), - Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil), - ), - }, - }, - }) -} - -func TestAccAWSKinesisFirehoseDeliveryStream_s3ConfigUpdates(t *testing.T) { - var stream firehose.DeliveryStreamDescription - - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_s3basic, - ri, os.Getenv("AWS_ACCOUNT_ID"), ri, ri, ri) - postConfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_s3Updates, - ri, os.Getenv("AWS_ACCOUNT_ID"), ri, ri, ri) - - updatedS3DestinationConfig := &firehose.S3DestinationDescription{ - BufferingHints: &firehose.BufferingHints{ - IntervalInSeconds: aws.Int64(400), - SizeInMBs: aws.Int64(10), - }, - } - - resource.Test(t, resource.TestCase{ - PreCheck: testAccKinesisFirehosePreCheck(t), - Providers: testAccProviders, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, - Steps: []resource.TestStep{ - { - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil), - ), - }, - - { - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, updatedS3DestinationConfig, nil, nil), - ), - }, - }, - }) -} - -func TestAccAWSKinesisFirehoseDeliveryStream_RedshiftConfigUpdates(t *testing.T) { - var stream firehose.DeliveryStreamDescription - - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_RedshiftBasic, - ri, os.Getenv("AWS_ACCOUNT_ID"), ri, ri, ri, ri) - postConfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_RedshiftUpdates, - ri, os.Getenv("AWS_ACCOUNT_ID"), ri, ri, ri, ri) - - updatedRedshiftConfig := &firehose.RedshiftDestinationDescription{ - CopyCommand: &firehose.CopyCommand{ - CopyOptions: aws.String("GZIP"), - }, - } - - resource.Test(t, resource.TestCase{ - PreCheck: testAccKinesisFirehosePreCheck(t), - Providers: testAccProviders, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, - Steps: []resource.TestStep{ - { - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil), - ), - }, - - { - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, updatedRedshiftConfig, nil), - ), - }, - }, - }) -} - -func TestAccAWSKinesisFirehoseDeliveryStream_ElasticsearchConfigUpdates(t *testing.T) { - var stream firehose.DeliveryStreamDescription - - ri := acctest.RandInt() - awsAccountId := os.Getenv("AWS_ACCOUNT_ID") - preConfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_ElasticsearchBasic, - ri, awsAccountId, ri, ri, ri, awsAccountId, awsAccountId, ri, ri) - postConfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_ElasticsearchUpdate, - ri, awsAccountId, ri, ri, ri, awsAccountId, awsAccountId, ri, ri) - - updatedElasticSearchConfig := &firehose.ElasticsearchDestinationDescription{ - BufferingHints: &firehose.ElasticsearchBufferingHints{ - IntervalInSeconds: aws.Int64(500), - }, - } - - resource.Test(t, resource.TestCase{ - PreCheck: testAccKinesisFirehosePreCheck(t), - Providers: testAccProviders, - CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, - Steps: []resource.TestStep{ - { - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream_es", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil), - ), - }, - { - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream_es", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, updatedElasticSearchConfig), - ), - }, - }, - }) -} - -func testAccCheckKinesisFirehoseDeliveryStreamExists(n string, stream *firehose.DeliveryStreamDescription) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - log.Printf("State: %#v", s.RootModule().Resources) - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Kinesis Firehose ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).firehoseconn - describeOpts := &firehose.DescribeDeliveryStreamInput{ - DeliveryStreamName: aws.String(rs.Primary.Attributes["name"]), - } - resp, err := conn.DescribeDeliveryStream(describeOpts) - if err != nil { - return err - } - - *stream = *resp.DeliveryStreamDescription - - return nil - } -} - -func testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(stream *firehose.DeliveryStreamDescription, s3config interface{}, redshiftConfig interface{}, elasticsearchConfig interface{}) resource.TestCheckFunc { - return func(s *terraform.State) error { - if !strings.HasPrefix(*stream.DeliveryStreamName, "terraform-kinesis-firehose") { - return fmt.Errorf("Bad Stream name: %s", *stream.DeliveryStreamName) - } - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_kinesis_firehose_delivery_stream" { - continue - } - if *stream.DeliveryStreamARN != rs.Primary.Attributes["arn"] { - return fmt.Errorf("Bad Delivery Stream ARN\n\t expected: %s\n\tgot: %s\n", rs.Primary.Attributes["arn"], *stream.DeliveryStreamARN) - } - - if s3config != nil { - s := s3config.(*firehose.S3DestinationDescription) - // Range over the Stream Destinations, looking for the matching S3 - // destination. For simplicity, our test only have a single S3 or - // Redshift destination, so at this time it's safe to match on the first - // one - var match bool - for _, d := range stream.Destinations { - if d.S3DestinationDescription != nil { - if *d.S3DestinationDescription.BufferingHints.SizeInMBs == *s.BufferingHints.SizeInMBs { - match = true - } - } - } - if !match { - return fmt.Errorf("Mismatch s3 buffer size, expected: %s, got: %s", s, stream.Destinations) - } - } - - if redshiftConfig != nil { - r := redshiftConfig.(*firehose.RedshiftDestinationDescription) - // Range over the Stream Destinations, looking for the matching Redshift - // destination - var match bool - for _, d := range stream.Destinations { - if d.RedshiftDestinationDescription != nil { - if *d.RedshiftDestinationDescription.CopyCommand.CopyOptions == *r.CopyCommand.CopyOptions { - match = true - } - } - } - if !match { - return fmt.Errorf("Mismatch Redshift CopyOptions, expected: %s, got: %s", r, stream.Destinations) - } - } - - if elasticsearchConfig != nil { - es := elasticsearchConfig.(*firehose.ElasticsearchDestinationDescription) - // Range over the Stream Destinations, looking for the matching Elasticsearch destination - var match bool - for _, d := range stream.Destinations { - if d.ElasticsearchDestinationDescription != nil { - match = true - } - } - if !match { - return fmt.Errorf("Mismatch Elasticsearch Buffering Interval, expected: %s, got: %s", es, stream.Destinations) - } - } - } - return nil - } -} - -func testAccCheckKinesisFirehoseDeliveryStreamDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_kinesis_firehose_delivery_stream" { - continue - } - conn := testAccProvider.Meta().(*AWSClient).firehoseconn - describeOpts := &firehose.DescribeDeliveryStreamInput{ - DeliveryStreamName: aws.String(rs.Primary.Attributes["name"]), - } - resp, err := conn.DescribeDeliveryStream(describeOpts) - if err == nil { - if resp.DeliveryStreamDescription != nil && *resp.DeliveryStreamDescription.DeliveryStreamStatus != "DELETING" { - return fmt.Errorf("Error: Delivery Stream still exists") - } - } - - return nil - - } - - return nil -} - -func testAccKinesisFirehosePreCheck(t *testing.T) func() { - return func() { - testAccPreCheck(t) - if os.Getenv("AWS_ACCOUNT_ID") == "" { - t.Fatal("AWS_ACCOUNT_ID must be set") - } - } -} - -const testAccKinesisFirehoseDeliveryStreamBaseConfig = ` -resource "aws_iam_role" "firehose" { - name = "tf_acctest_firehose_delivery_role_%d" - assume_role_policy = < 168 { - errors = append(errors, fmt.Errorf( - "%q must be between 24 and 168 hours", k)) - } - return - }, - }, - - "shard_level_metrics": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "arn": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "tags": tagsSchema(), - }, - } -} - -func resourceAwsKinesisStreamImport( - d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - d.Set("name", d.Id()) - return []*schema.ResourceData{d}, nil -} - -func resourceAwsKinesisStreamCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).kinesisconn - sn := d.Get("name").(string) - createOpts := &kinesis.CreateStreamInput{ - ShardCount: aws.Int64(int64(d.Get("shard_count").(int))), - StreamName: aws.String(sn), - } - - _, err := conn.CreateStream(createOpts) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - return fmt.Errorf("[WARN] Error creating Kinesis Stream: \"%s\", code: \"%s\"", awsErr.Message(), awsErr.Code()) - } - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"CREATING"}, - Target: []string{"ACTIVE"}, - Refresh: streamStateRefreshFunc(conn, sn), - Timeout: 5 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - streamRaw, err := stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for Kinesis Stream (%s) to become active: %s", - sn, err) - } - - s := streamRaw.(*kinesisStreamState) - d.SetId(s.arn) - d.Set("arn", s.arn) - d.Set("shard_count", len(s.openShards)) - - return resourceAwsKinesisStreamUpdate(d, meta) -} - -func resourceAwsKinesisStreamUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).kinesisconn - - d.Partial(true) - if err := setTagsKinesis(conn, d); err != nil { - return err - } - - d.SetPartial("tags") - d.Partial(false) - - if err := setKinesisRetentionPeriod(conn, d); err != nil { - return err - } - if err := updateKinesisShardLevelMetrics(conn, d); err != nil { - return err - } - - return resourceAwsKinesisStreamRead(d, meta) -} - -func resourceAwsKinesisStreamRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).kinesisconn - sn := d.Get("name").(string) - - state, err := readKinesisStreamState(conn, sn) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "ResourceNotFoundException" { - d.SetId("") - return nil - } - return fmt.Errorf("[WARN] Error reading Kinesis Stream: \"%s\", code: \"%s\"", awsErr.Message(), awsErr.Code()) - } - return err - - } - d.SetId(state.arn) - d.Set("arn", state.arn) - d.Set("shard_count", len(state.openShards)) - d.Set("retention_period", state.retentionPeriod) - - if len(state.shardLevelMetrics) > 0 { - d.Set("shard_level_metrics", state.shardLevelMetrics) - } - - // set tags - describeTagsOpts := &kinesis.ListTagsForStreamInput{ - StreamName: aws.String(sn), - } - tagsResp, err := conn.ListTagsForStream(describeTagsOpts) - if err != nil { - log.Printf("[DEBUG] Error retrieving tags for Stream: %s. %s", sn, err) - } else { - d.Set("tags", tagsToMapKinesis(tagsResp.Tags)) - } - - return nil -} - -func resourceAwsKinesisStreamDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).kinesisconn - sn := d.Get("name").(string) - _, err := conn.DeleteStream(&kinesis.DeleteStreamInput{ - StreamName: aws.String(sn), - }) - - if err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"DELETING"}, - Target: []string{"DESTROYED"}, - Refresh: streamStateRefreshFunc(conn, sn), - Timeout: 5 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for Stream (%s) to be destroyed: %s", - sn, err) - } - - d.SetId("") - return nil -} - -func setKinesisRetentionPeriod(conn *kinesis.Kinesis, d *schema.ResourceData) error { - sn := d.Get("name").(string) - - oraw, nraw := d.GetChange("retention_period") - o := oraw.(int) - n := nraw.(int) - - if n == 0 { - log.Printf("[DEBUG] Kinesis Stream (%q) Retention Period Not Changed", sn) - return nil - } - - if n > o { - log.Printf("[DEBUG] Increasing %s Stream Retention Period to %d", sn, n) - _, err := conn.IncreaseStreamRetentionPeriod(&kinesis.IncreaseStreamRetentionPeriodInput{ - StreamName: aws.String(sn), - RetentionPeriodHours: aws.Int64(int64(n)), - }) - if err != nil { - return err - } - - } else { - log.Printf("[DEBUG] Decreasing %s Stream Retention Period to %d", sn, n) - _, err := conn.DecreaseStreamRetentionPeriod(&kinesis.DecreaseStreamRetentionPeriodInput{ - StreamName: aws.String(sn), - RetentionPeriodHours: aws.Int64(int64(n)), - }) - if err != nil { - return err - } - } - - if err := waitForKinesisToBeActive(conn, sn); err != nil { - return err - } - - return nil -} - -func updateKinesisShardLevelMetrics(conn *kinesis.Kinesis, d *schema.ResourceData) error { - sn := d.Get("name").(string) - - o, n := d.GetChange("shard_level_metrics") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - - disableMetrics := os.Difference(ns) - if disableMetrics.Len() != 0 { - metrics := disableMetrics.List() - log.Printf("[DEBUG] Disabling shard level metrics %v for stream %s", metrics, sn) - - props := &kinesis.DisableEnhancedMonitoringInput{ - StreamName: aws.String(sn), - ShardLevelMetrics: expandStringList(metrics), - } - - _, err := conn.DisableEnhancedMonitoring(props) - if err != nil { - return fmt.Errorf("Failure to disable shard level metrics for stream %s: %s", sn, err) - } - if err := waitForKinesisToBeActive(conn, sn); err != nil { - return err - } - } - - enabledMetrics := ns.Difference(os) - if enabledMetrics.Len() != 0 { - metrics := enabledMetrics.List() - log.Printf("[DEBUG] Enabling shard level metrics %v for stream %s", metrics, sn) - - props := &kinesis.EnableEnhancedMonitoringInput{ - StreamName: aws.String(sn), - ShardLevelMetrics: expandStringList(metrics), - } - - _, err := conn.EnableEnhancedMonitoring(props) - if err != nil { - return fmt.Errorf("Failure to enable shard level metrics for stream %s: %s", sn, err) - } - if err := waitForKinesisToBeActive(conn, sn); err != nil { - return err - } - } - - return nil -} - -type kinesisStreamState struct { - arn string - creationTimestamp int64 - status string - retentionPeriod int64 - openShards []string - closedShards []string - shardLevelMetrics []string -} - -func readKinesisStreamState(conn *kinesis.Kinesis, sn string) (*kinesisStreamState, error) { - describeOpts := &kinesis.DescribeStreamInput{ - StreamName: aws.String(sn), - } - - state := &kinesisStreamState{} - err := conn.DescribeStreamPages(describeOpts, func(page *kinesis.DescribeStreamOutput, last bool) (shouldContinue bool) { - state.arn = aws.StringValue(page.StreamDescription.StreamARN) - state.creationTimestamp = aws.TimeValue(page.StreamDescription.StreamCreationTimestamp).Unix() - state.status = aws.StringValue(page.StreamDescription.StreamStatus) - state.retentionPeriod = aws.Int64Value(page.StreamDescription.RetentionPeriodHours) - state.openShards = append(state.openShards, flattenShards(openShards(page.StreamDescription.Shards))...) - state.closedShards = append(state.closedShards, flattenShards(closedShards(page.StreamDescription.Shards))...) - state.shardLevelMetrics = flattenKinesisShardLevelMetrics(page.StreamDescription.EnhancedMonitoring) - return !last - }) - return state, err -} - -func streamStateRefreshFunc(conn *kinesis.Kinesis, sn string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - state, err := readKinesisStreamState(conn, sn) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "ResourceNotFoundException" { - return 42, "DESTROYED", nil - } - return nil, awsErr.Code(), err - } - return nil, "failed", err - } - - return state, state.status, nil - } -} - -func waitForKinesisToBeActive(conn *kinesis.Kinesis, sn string) error { - stateConf := &resource.StateChangeConf{ - Pending: []string{"UPDATING"}, - Target: []string{"ACTIVE"}, - Refresh: streamStateRefreshFunc(conn, sn), - Timeout: 5 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err := stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for Kinesis Stream (%s) to become active: %s", - sn, err) - } - return nil -} - -func openShards(shards []*kinesis.Shard) []*kinesis.Shard { - return filterShards(shards, true) -} - -func closedShards(shards []*kinesis.Shard) []*kinesis.Shard { - return filterShards(shards, false) -} - -// See http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-merge.html -func filterShards(shards []*kinesis.Shard, open bool) []*kinesis.Shard { - res := make([]*kinesis.Shard, 0, len(shards)) - for _, s := range shards { - if open && s.SequenceNumberRange.EndingSequenceNumber == nil { - res = append(res, s) - } else if !open && s.SequenceNumberRange.EndingSequenceNumber != nil { - res = append(res, s) - } - } - return res -} - -func flattenShards(shards []*kinesis.Shard) []string { - res := make([]string, len(shards)) - for i, s := range shards { - res[i] = aws.StringValue(s.ShardId) - } - return res -} diff --git a/builtin/providers/aws/resource_aws_kinesis_stream_test.go b/builtin/providers/aws/resource_aws_kinesis_stream_test.go deleted file mode 100644 index e8afbdc6c..000000000 --- a/builtin/providers/aws/resource_aws_kinesis_stream_test.go +++ /dev/null @@ -1,328 +0,0 @@ -package aws - -import ( - "fmt" - "strconv" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSKinesisStream_basic(t *testing.T) { - var stream kinesis.StreamDescription - - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKinesisStreamDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKinesisStreamConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisStreamExists("aws_kinesis_stream.test_stream", &stream), - testAccCheckAWSKinesisStreamAttributes(&stream), - ), - }, - }, - }) -} - -func TestAccAWSKinesisStream_importBasic(t *testing.T) { - rInt := acctest.RandInt() - resourceName := "aws_kinesis_stream.test_stream" - streamName := fmt.Sprintf("terraform-kinesis-test-%d", rInt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKinesisStreamDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKinesisStreamConfig(rInt), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateId: streamName, - }, - }, - }) -} - -func TestAccAWSKinesisStream_shardCount(t *testing.T) { - var stream kinesis.StreamDescription - - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKinesisStreamDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKinesisStreamConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisStreamExists("aws_kinesis_stream.test_stream", &stream), - testAccCheckAWSKinesisStreamAttributes(&stream), - resource.TestCheckResourceAttr( - "aws_kinesis_stream.test_stream", "shard_count", "2"), - ), - }, - - { - Config: testAccKinesisStreamConfigUpdateShardCount(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisStreamExists("aws_kinesis_stream.test_stream", &stream), - testAccCheckAWSKinesisStreamAttributes(&stream), - resource.TestCheckResourceAttr( - "aws_kinesis_stream.test_stream", "shard_count", "4"), - ), - }, - }, - }) -} - -func TestAccAWSKinesisStream_retentionPeriod(t *testing.T) { - var stream kinesis.StreamDescription - - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKinesisStreamDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKinesisStreamConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisStreamExists("aws_kinesis_stream.test_stream", &stream), - testAccCheckAWSKinesisStreamAttributes(&stream), - resource.TestCheckResourceAttr( - "aws_kinesis_stream.test_stream", "retention_period", "24"), - ), - }, - - { - Config: testAccKinesisStreamConfigUpdateRetentionPeriod(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisStreamExists("aws_kinesis_stream.test_stream", &stream), - testAccCheckAWSKinesisStreamAttributes(&stream), - resource.TestCheckResourceAttr( - "aws_kinesis_stream.test_stream", "retention_period", "100"), - ), - }, - - { - Config: testAccKinesisStreamConfigDecreaseRetentionPeriod(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisStreamExists("aws_kinesis_stream.test_stream", &stream), - testAccCheckAWSKinesisStreamAttributes(&stream), - resource.TestCheckResourceAttr( - "aws_kinesis_stream.test_stream", "retention_period", "28"), - ), - }, - }, - }) -} - -func TestAccAWSKinesisStream_shardLevelMetrics(t *testing.T) { - var stream kinesis.StreamDescription - - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKinesisStreamDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKinesisStreamConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisStreamExists("aws_kinesis_stream.test_stream", &stream), - testAccCheckAWSKinesisStreamAttributes(&stream), - resource.TestCheckNoResourceAttr( - "aws_kinesis_stream.test_stream", "shard_level_metrics"), - ), - }, - - { - Config: testAccKinesisStreamConfigAllShardLevelMetrics(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisStreamExists("aws_kinesis_stream.test_stream", &stream), - testAccCheckAWSKinesisStreamAttributes(&stream), - resource.TestCheckResourceAttr( - "aws_kinesis_stream.test_stream", "shard_level_metrics.#", "7"), - ), - }, - - { - Config: testAccKinesisStreamConfigSingleShardLevelMetric(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckKinesisStreamExists("aws_kinesis_stream.test_stream", &stream), - testAccCheckAWSKinesisStreamAttributes(&stream), - resource.TestCheckResourceAttr( - "aws_kinesis_stream.test_stream", "shard_level_metrics.#", "1"), - ), - }, - }, - }) -} - -func testAccCheckKinesisStreamExists(n string, stream *kinesis.StreamDescription) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Kinesis ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).kinesisconn - describeOpts := &kinesis.DescribeStreamInput{ - StreamName: aws.String(rs.Primary.Attributes["name"]), - } - resp, err := conn.DescribeStream(describeOpts) - if err != nil { - return err - } - - *stream = *resp.StreamDescription - - return nil - } -} - -func testAccCheckAWSKinesisStreamAttributes(stream *kinesis.StreamDescription) resource.TestCheckFunc { - return func(s *terraform.State) error { - if !strings.HasPrefix(*stream.StreamName, "terraform-kinesis-test") { - return fmt.Errorf("Bad Stream name: %s", *stream.StreamName) - } - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_kinesis_stream" { - continue - } - if *stream.StreamARN != rs.Primary.Attributes["arn"] { - return fmt.Errorf("Bad Stream ARN\n\t expected: %s\n\tgot: %s\n", rs.Primary.Attributes["arn"], *stream.StreamARN) - } - shard_count := strconv.Itoa(len(stream.Shards)) - if shard_count != rs.Primary.Attributes["shard_count"] { - return fmt.Errorf("Bad Stream Shard Count\n\t expected: %s\n\tgot: %s\n", rs.Primary.Attributes["shard_count"], shard_count) - } - } - return nil - } -} - -func testAccCheckKinesisStreamDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_kinesis_stream" { - continue - } - conn := testAccProvider.Meta().(*AWSClient).kinesisconn - describeOpts := &kinesis.DescribeStreamInput{ - StreamName: aws.String(rs.Primary.Attributes["name"]), - } - resp, err := conn.DescribeStream(describeOpts) - if err == nil { - if resp.StreamDescription != nil && *resp.StreamDescription.StreamStatus != "DELETING" { - return fmt.Errorf("Error: Stream still exists") - } - } - - return nil - - } - - return nil -} - -func testAccKinesisStreamConfig(rInt int) string { - return fmt.Sprintf(` -resource "aws_kinesis_stream" "test_stream" { - name = "terraform-kinesis-test-%d" - shard_count = 2 - tags { - Name = "tf-test" - } -}`, rInt) -} - -func testAccKinesisStreamConfigUpdateShardCount(rInt int) string { - return fmt.Sprintf(` -resource "aws_kinesis_stream" "test_stream" { - name = "terraform-kinesis-test-%d" - shard_count = 4 - tags { - Name = "tf-test" - } -}`, rInt) -} - -func testAccKinesisStreamConfigUpdateRetentionPeriod(rInt int) string { - return fmt.Sprintf(` -resource "aws_kinesis_stream" "test_stream" { - name = "terraform-kinesis-test-%d" - shard_count = 2 - retention_period = 100 - tags { - Name = "tf-test" - } -}`, rInt) -} - -func testAccKinesisStreamConfigDecreaseRetentionPeriod(rInt int) string { - return fmt.Sprintf(` -resource "aws_kinesis_stream" "test_stream" { - name = "terraform-kinesis-test-%d" - shard_count = 2 - retention_period = 28 - tags { - Name = "tf-test" - } -}`, rInt) -} - -func testAccKinesisStreamConfigAllShardLevelMetrics(rInt int) string { - return fmt.Sprintf(` -resource "aws_kinesis_stream" "test_stream" { - name = "terraform-kinesis-test-%d" - shard_count = 2 - tags { - Name = "tf-test" - } - shard_level_metrics = [ - "IncomingBytes", - "IncomingRecords", - "OutgoingBytes", - "OutgoingRecords", - "WriteProvisionedThroughputExceeded", - "ReadProvisionedThroughputExceeded", - "IteratorAgeMilliseconds" - ] -}`, rInt) -} - -func testAccKinesisStreamConfigSingleShardLevelMetric(rInt int) string { - return fmt.Sprintf(` -resource "aws_kinesis_stream" "test_stream" { - name = "terraform-kinesis-test-%d" - shard_count = 2 - tags { - Name = "tf-test" - } - shard_level_metrics = [ - "IncomingBytes" - ] -}`, rInt) -} diff --git a/builtin/providers/aws/resource_aws_kms_alias.go b/builtin/providers/aws/resource_aws_kms_alias.go deleted file mode 100644 index 7e3f0f2f6..000000000 --- a/builtin/providers/aws/resource_aws_kms_alias.go +++ /dev/null @@ -1,184 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "regexp" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kms" -) - -func resourceAwsKmsAlias() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsKmsAliasCreate, - Read: resourceAwsKmsAliasRead, - Update: resourceAwsKmsAliasUpdate, - Delete: resourceAwsKmsAliasDelete, - - Importer: &schema.ResourceImporter{ - State: resourceAwsKmsAliasImport, - }, - - Schema: map[string]*schema.Schema{ - "arn": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"name_prefix"}, - ValidateFunc: validateAwsKmsName, - }, - "name_prefix": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - value := v.(string) - if !regexp.MustCompile(`^(alias\/)[a-zA-Z0-9:/_-]+$`).MatchString(value) { - es = append(es, fmt.Errorf( - "%q must begin with 'alias/' and be comprised of only [a-zA-Z0-9:/_-]", k)) - } - return - }, - }, - "target_key_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - } -} - -func resourceAwsKmsAliasCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).kmsconn - - var name string - if v, ok := d.GetOk("name"); ok { - name = v.(string) - } else if v, ok := d.GetOk("name_prefix"); ok { - name = resource.PrefixedUniqueId(v.(string)) - } else { - name = resource.PrefixedUniqueId("alias/") - } - - targetKeyId := d.Get("target_key_id").(string) - - log.Printf("[DEBUG] KMS alias create name: %s, target_key: %s", name, targetKeyId) - - req := &kms.CreateAliasInput{ - AliasName: aws.String(name), - TargetKeyId: aws.String(targetKeyId), - } - _, err := conn.CreateAlias(req) - if err != nil { - return err - } - d.SetId(name) - return resourceAwsKmsAliasRead(d, meta) -} - -func resourceAwsKmsAliasRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).kmsconn - - alias, err := findKmsAliasByName(conn, d.Id(), nil) - if err != nil { - return err - } - if alias == nil { - log.Printf("[DEBUG] Removing KMS Alias (%s) as it's already gone", d.Id()) - d.SetId("") - return nil - } - - log.Printf("[DEBUG] Found KMS Alias: %s", alias) - - d.Set("arn", alias.AliasArn) - d.Set("target_key_id", alias.TargetKeyId) - - return nil -} - -func resourceAwsKmsAliasUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).kmsconn - - if d.HasChange("target_key_id") { - err := resourceAwsKmsAliasTargetUpdate(conn, d) - if err != nil { - return err - } - } - return nil -} - -func resourceAwsKmsAliasTargetUpdate(conn *kms.KMS, d *schema.ResourceData) error { - name := d.Get("name").(string) - targetKeyId := d.Get("target_key_id").(string) - - log.Printf("[DEBUG] KMS alias: %s, update target: %s", name, targetKeyId) - - req := &kms.UpdateAliasInput{ - AliasName: aws.String(name), - TargetKeyId: aws.String(targetKeyId), - } - _, err := conn.UpdateAlias(req) - - return err -} - -func resourceAwsKmsAliasDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).kmsconn - - req := &kms.DeleteAliasInput{ - AliasName: aws.String(d.Id()), - } - _, err := conn.DeleteAlias(req) - if err != nil { - return err - } - - log.Printf("[DEBUG] KMS Alias: (%s) deleted.", d.Id()) - d.SetId("") - return nil -} - -// API by default limits results to 50 aliases -// This is how we make sure we won't miss any alias -// See http://docs.aws.amazon.com/kms/latest/APIReference/API_ListAliases.html -func findKmsAliasByName(conn *kms.KMS, name string, marker *string) (*kms.AliasListEntry, error) { - req := kms.ListAliasesInput{ - Limit: aws.Int64(int64(100)), - } - if marker != nil { - req.Marker = marker - } - - log.Printf("[DEBUG] Listing KMS aliases: %s", req) - resp, err := conn.ListAliases(&req) - if err != nil { - return nil, err - } - - for _, entry := range resp.Aliases { - if *entry.AliasName == name { - return entry, nil - } - } - if *resp.Truncated { - log.Printf("[DEBUG] KMS alias list is truncated, listing more via %s", *resp.NextMarker) - return findKmsAliasByName(conn, name, resp.NextMarker) - } - - return nil, nil -} - -func resourceAwsKmsAliasImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - d.Set("name", d.Id()) - return []*schema.ResourceData{d}, nil -} diff --git a/builtin/providers/aws/resource_aws_kms_alias_test.go b/builtin/providers/aws/resource_aws_kms_alias_test.go deleted file mode 100644 index ef10b2f96..000000000 --- a/builtin/providers/aws/resource_aws_kms_alias_test.go +++ /dev/null @@ -1,183 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - "time" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSKmsAlias_basic(t *testing.T) { - rInt := acctest.RandInt() - kmsAliasTimestamp := time.Now().Format(time.RFC1123) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSKmsAliasDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSKmsSingleAlias(rInt, kmsAliasTimestamp), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSKmsAliasExists("aws_kms_alias.single"), - ), - }, - { - Config: testAccAWSKmsSingleAlias_modified(rInt, kmsAliasTimestamp), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSKmsAliasExists("aws_kms_alias.single"), - ), - }, - }, - }) -} - -func TestAccAWSKmsAlias_name_prefix(t *testing.T) { - rInt := acctest.RandInt() - kmsAliasTimestamp := time.Now().Format(time.RFC1123) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSKmsAliasDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSKmsSingleAlias(rInt, kmsAliasTimestamp), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSKmsAliasExists("aws_kms_alias.name_prefix"), - ), - }, - }, - }) -} - -func TestAccAWSKmsAlias_no_name(t *testing.T) { - rInt := acctest.RandInt() - kmsAliasTimestamp := time.Now().Format(time.RFC1123) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSKmsAliasDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSKmsSingleAlias(rInt, kmsAliasTimestamp), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSKmsAliasExists("aws_kms_alias.nothing"), - ), - }, - }, - }) -} - -func TestAccAWSKmsAlias_multiple(t *testing.T) { - rInt := acctest.RandInt() - kmsAliasTimestamp := time.Now().Format(time.RFC1123) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSKmsAliasDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSKmsMultipleAliases(rInt, kmsAliasTimestamp), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSKmsAliasExists("aws_kms_alias.one"), - testAccCheckAWSKmsAliasExists("aws_kms_alias.two"), - ), - }, - }, - }) -} - -func testAccCheckAWSKmsAliasDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).kmsconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_kms_alias" { - continue - } - - entry, err := findKmsAliasByName(conn, rs.Primary.ID, nil) - if err != nil { - return err - } - if entry != nil { - return fmt.Errorf("KMS alias still exists:\n%#v", entry) - } - - return nil - } - - return nil -} - -func testAccCheckAWSKmsAliasExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - return nil - } -} - -func testAccAWSKmsSingleAlias(rInt int, timestamp string) string { - return fmt.Sprintf(` -resource "aws_kms_key" "one" { - description = "Terraform acc test One %s" - deletion_window_in_days = 7 -} -resource "aws_kms_key" "two" { - description = "Terraform acc test Two %s" - deletion_window_in_days = 7 -} - -resource "aws_kms_alias" "name_prefix" { - name_prefix = "alias/tf-acc-key-alias-%d" - target_key_id = "${aws_kms_key.one.key_id}" -} - -resource "aws_kms_alias" "nothing" { - target_key_id = "${aws_kms_key.one.key_id}" -} - -resource "aws_kms_alias" "single" { - name = "alias/tf-acc-key-alias-%d" - target_key_id = "${aws_kms_key.one.key_id}" -}`, timestamp, timestamp, rInt, rInt) -} - -func testAccAWSKmsSingleAlias_modified(rInt int, timestamp string) string { - return fmt.Sprintf(` -resource "aws_kms_key" "one" { - description = "Terraform acc test One %s" - deletion_window_in_days = 7 -} -resource "aws_kms_key" "two" { - description = "Terraform acc test Two %s" - deletion_window_in_days = 7 -} - -resource "aws_kms_alias" "single" { - name = "alias/tf-acc-key-alias-%d" - target_key_id = "${aws_kms_key.two.key_id}" -}`, timestamp, timestamp, rInt) -} - -func testAccAWSKmsMultipleAliases(rInt int, timestamp string) string { - return fmt.Sprintf(` -resource "aws_kms_key" "single" { - description = "Terraform acc test One %s" - deletion_window_in_days = 7 -} - -resource "aws_kms_alias" "one" { - name = "alias/tf-acc-alias-one-%d" - target_key_id = "${aws_kms_key.single.key_id}" -} -resource "aws_kms_alias" "two" { - name = "alias/tf-acc-alias-two-%d" - target_key_id = "${aws_kms_key.single.key_id}" -}`, timestamp, rInt, rInt) -} diff --git a/builtin/providers/aws/resource_aws_kms_key.go b/builtin/providers/aws/resource_aws_kms_key.go deleted file mode 100644 index f95f76d95..000000000 --- a/builtin/providers/aws/resource_aws_kms_key.go +++ /dev/null @@ -1,457 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/kms" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsKmsKey() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsKmsKeyCreate, - Read: resourceAwsKmsKeyRead, - Update: resourceAwsKmsKeyUpdate, - Delete: resourceAwsKmsKeyDelete, - Exists: resourceAwsKmsKeyExists, - - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "arn": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "key_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "key_usage": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - value := v.(string) - if !(value == "ENCRYPT_DECRYPT" || value == "") { - es = append(es, fmt.Errorf( - "%q must be ENCRYPT_DECRYPT or not specified", k)) - } - return - }, - }, - "policy": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validateJsonString, - DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, - }, - "is_enabled": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "enable_key_rotation": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "deletion_window_in_days": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - value := v.(int) - if value > 30 || value < 7 { - es = append(es, fmt.Errorf( - "%q must be between 7 and 30 days inclusive", k)) - } - return - }, - }, - "tags": tagsSchema(), - }, - } -} - -func resourceAwsKmsKeyCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).kmsconn - - // Allow aws to chose default values if we don't pass them - var req kms.CreateKeyInput - if v, exists := d.GetOk("description"); exists { - req.Description = aws.String(v.(string)) - } - if v, exists := d.GetOk("key_usage"); exists { - req.KeyUsage = aws.String(v.(string)) - } - if v, exists := d.GetOk("policy"); exists { - req.Policy = aws.String(v.(string)) - } - if v, exists := d.GetOk("tags"); exists { - req.Tags = tagsFromMapKMS(v.(map[string]interface{})) - } - - var resp *kms.CreateKeyOutput - // AWS requires any principal in the policy to exist before the key is created. - // The KMS service's awareness of principals is limited by "eventual consistency". - // They acknowledge this here: - // http://docs.aws.amazon.com/kms/latest/APIReference/API_CreateKey.html - err := resource.Retry(30*time.Second, func() *resource.RetryError { - var err error - resp, err = conn.CreateKey(&req) - if isAWSErr(err, "MalformedPolicyDocumentException", "") { - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - }) - if err != nil { - return err - } - - d.SetId(*resp.KeyMetadata.KeyId) - d.Set("key_id", resp.KeyMetadata.KeyId) - - return _resourceAwsKmsKeyUpdate(d, meta, true) -} - -func resourceAwsKmsKeyRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).kmsconn - - req := &kms.DescribeKeyInput{ - KeyId: aws.String(d.Id()), - } - resp, err := conn.DescribeKey(req) - if err != nil { - return err - } - metadata := resp.KeyMetadata - - if *metadata.KeyState == "PendingDeletion" { - log.Printf("[WARN] Removing KMS key %s because it's already gone", d.Id()) - d.SetId("") - return nil - } - - d.SetId(*metadata.KeyId) - - d.Set("arn", metadata.Arn) - d.Set("key_id", metadata.KeyId) - d.Set("description", metadata.Description) - d.Set("key_usage", metadata.KeyUsage) - d.Set("is_enabled", metadata.Enabled) - - p, err := conn.GetKeyPolicy(&kms.GetKeyPolicyInput{ - KeyId: metadata.KeyId, - PolicyName: aws.String("default"), - }) - if err != nil { - return err - } - - policy, err := normalizeJsonString(*p.Policy) - if err != nil { - return errwrap.Wrapf("policy contains an invalid JSON: {{err}}", err) - } - d.Set("policy", policy) - - krs, err := conn.GetKeyRotationStatus(&kms.GetKeyRotationStatusInput{ - KeyId: metadata.KeyId, - }) - if err != nil { - return err - } - d.Set("enable_key_rotation", krs.KeyRotationEnabled) - - tagList, err := conn.ListResourceTags(&kms.ListResourceTagsInput{ - KeyId: metadata.KeyId, - }) - if err != nil { - return fmt.Errorf("Failed to get KMS key tags (key: %s): %s", d.Get("key_id").(string), err) - } - d.Set("tags", tagsToMapKMS(tagList.Tags)) - - return nil -} - -func resourceAwsKmsKeyUpdate(d *schema.ResourceData, meta interface{}) error { - return _resourceAwsKmsKeyUpdate(d, meta, false) -} - -// We expect new keys to be enabled already -// but there is no easy way to differentiate between Update() -// called from Create() and regular update, so we have this wrapper -func _resourceAwsKmsKeyUpdate(d *schema.ResourceData, meta interface{}, isFresh bool) error { - conn := meta.(*AWSClient).kmsconn - - if d.HasChange("is_enabled") && d.Get("is_enabled").(bool) && !isFresh { - // Enable before any attributes will be modified - if err := updateKmsKeyStatus(conn, d.Id(), d.Get("is_enabled").(bool)); err != nil { - return err - } - } - - if d.HasChange("enable_key_rotation") { - if err := updateKmsKeyRotationStatus(conn, d); err != nil { - return err - } - } - - if d.HasChange("description") { - if err := resourceAwsKmsKeyDescriptionUpdate(conn, d); err != nil { - return err - } - } - if d.HasChange("policy") { - if err := resourceAwsKmsKeyPolicyUpdate(conn, d); err != nil { - return err - } - } - - if d.HasChange("is_enabled") && !d.Get("is_enabled").(bool) { - // Only disable when all attributes are modified - // because we cannot modify disabled keys - if err := updateKmsKeyStatus(conn, d.Id(), d.Get("is_enabled").(bool)); err != nil { - return err - } - } - - if err := setTagsKMS(conn, d, d.Id()); err != nil { - return err - } - - return resourceAwsKmsKeyRead(d, meta) -} - -func resourceAwsKmsKeyDescriptionUpdate(conn *kms.KMS, d *schema.ResourceData) error { - description := d.Get("description").(string) - keyId := d.Get("key_id").(string) - - log.Printf("[DEBUG] KMS key: %s, update description: %s", keyId, description) - - req := &kms.UpdateKeyDescriptionInput{ - Description: aws.String(description), - KeyId: aws.String(keyId), - } - _, err := conn.UpdateKeyDescription(req) - return err -} - -func resourceAwsKmsKeyPolicyUpdate(conn *kms.KMS, d *schema.ResourceData) error { - policy, err := normalizeJsonString(d.Get("policy").(string)) - if err != nil { - return errwrap.Wrapf("policy contains an invalid JSON: {{err}}", err) - } - keyId := d.Get("key_id").(string) - - log.Printf("[DEBUG] KMS key: %s, update policy: %s", keyId, policy) - - req := &kms.PutKeyPolicyInput{ - KeyId: aws.String(keyId), - Policy: aws.String(policy), - PolicyName: aws.String("default"), - } - _, err = conn.PutKeyPolicy(req) - return err -} - -func updateKmsKeyStatus(conn *kms.KMS, id string, shouldBeEnabled bool) error { - var err error - - if shouldBeEnabled { - log.Printf("[DEBUG] Enabling KMS key %q", id) - _, err = conn.EnableKey(&kms.EnableKeyInput{ - KeyId: aws.String(id), - }) - } else { - log.Printf("[DEBUG] Disabling KMS key %q", id) - _, err = conn.DisableKey(&kms.DisableKeyInput{ - KeyId: aws.String(id), - }) - } - - if err != nil { - return fmt.Errorf("Failed to set KMS key %q status to %t: %q", - id, shouldBeEnabled, err.Error()) - } - - // Wait for propagation since KMS is eventually consistent - wait := resource.StateChangeConf{ - Pending: []string{fmt.Sprintf("%t", !shouldBeEnabled)}, - Target: []string{fmt.Sprintf("%t", shouldBeEnabled)}, - Timeout: 20 * time.Minute, - MinTimeout: 2 * time.Second, - ContinuousTargetOccurence: 10, - Refresh: func() (interface{}, string, error) { - log.Printf("[DEBUG] Checking if KMS key %s enabled status is %t", - id, shouldBeEnabled) - resp, err := conn.DescribeKey(&kms.DescribeKeyInput{ - KeyId: aws.String(id), - }) - if err != nil { - return resp, "FAILED", err - } - status := fmt.Sprintf("%t", *resp.KeyMetadata.Enabled) - log.Printf("[DEBUG] KMS key %s status received: %s, retrying", id, status) - - return resp, status, nil - }, - } - - _, err = wait.WaitForState() - if err != nil { - return fmt.Errorf("Failed setting KMS key status to %t: %s", shouldBeEnabled, err) - } - - return nil -} - -func updateKmsKeyRotationStatus(conn *kms.KMS, d *schema.ResourceData) error { - shouldEnableRotation := d.Get("enable_key_rotation").(bool) - - err := resource.Retry(5*time.Minute, func() *resource.RetryError { - var err error - if shouldEnableRotation { - log.Printf("[DEBUG] Enabling key rotation for KMS key %q", d.Id()) - _, err = conn.EnableKeyRotation(&kms.EnableKeyRotationInput{ - KeyId: aws.String(d.Id()), - }) - } else { - log.Printf("[DEBUG] Disabling key rotation for KMS key %q", d.Id()) - _, err = conn.DisableKeyRotation(&kms.DisableKeyRotationInput{ - KeyId: aws.String(d.Id()), - }) - } - - if err != nil { - awsErr, ok := err.(awserr.Error) - if ok && awsErr.Code() == "DisabledException" { - return resource.RetryableError(err) - } - - return resource.NonRetryableError(err) - } - - return nil - }) - - if err != nil { - return fmt.Errorf("Failed to set key rotation for %q to %t: %q", - d.Id(), shouldEnableRotation, err.Error()) - } - - // Wait for propagation since KMS is eventually consistent - wait := resource.StateChangeConf{ - Pending: []string{fmt.Sprintf("%t", !shouldEnableRotation)}, - Target: []string{fmt.Sprintf("%t", shouldEnableRotation)}, - Timeout: 5 * time.Minute, - MinTimeout: 1 * time.Second, - ContinuousTargetOccurence: 5, - Refresh: func() (interface{}, string, error) { - log.Printf("[DEBUG] Checking if KMS key %s rotation status is %t", - d.Id(), shouldEnableRotation) - resp, err := conn.GetKeyRotationStatus(&kms.GetKeyRotationStatusInput{ - KeyId: aws.String(d.Id()), - }) - if err != nil { - return resp, "FAILED", err - } - status := fmt.Sprintf("%t", *resp.KeyRotationEnabled) - log.Printf("[DEBUG] KMS key %s rotation status received: %s, retrying", d.Id(), status) - - return resp, status, nil - }, - } - - _, err = wait.WaitForState() - if err != nil { - return fmt.Errorf("Failed setting KMS key rotation status to %t: %s", shouldEnableRotation, err) - } - - return nil -} - -func resourceAwsKmsKeyExists(d *schema.ResourceData, meta interface{}) (bool, error) { - conn := meta.(*AWSClient).kmsconn - - req := &kms.DescribeKeyInput{ - KeyId: aws.String(d.Id()), - } - resp, err := conn.DescribeKey(req) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "NotFoundException" { - return false, nil - } - } - return false, err - } - metadata := resp.KeyMetadata - - if *metadata.KeyState == "PendingDeletion" { - return false, nil - } - - return true, nil -} - -func resourceAwsKmsKeyDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).kmsconn - keyId := d.Get("key_id").(string) - - req := &kms.ScheduleKeyDeletionInput{ - KeyId: aws.String(keyId), - } - if v, exists := d.GetOk("deletion_window_in_days"); exists { - req.PendingWindowInDays = aws.Int64(int64(v.(int))) - } - _, err := conn.ScheduleKeyDeletion(req) - if err != nil { - return err - } - - // Wait for propagation since KMS is eventually consistent - wait := resource.StateChangeConf{ - Pending: []string{"Enabled", "Disabled"}, - Target: []string{"PendingDeletion"}, - Timeout: 20 * time.Minute, - MinTimeout: 2 * time.Second, - ContinuousTargetOccurence: 10, - Refresh: func() (interface{}, string, error) { - log.Printf("[DEBUG] Checking if KMS key %s state is PendingDeletion", keyId) - resp, err := conn.DescribeKey(&kms.DescribeKeyInput{ - KeyId: aws.String(keyId), - }) - if err != nil { - return resp, "Failed", err - } - - metadata := *resp.KeyMetadata - log.Printf("[DEBUG] KMS key %s state is %s, retrying", keyId, *metadata.KeyState) - - return resp, *metadata.KeyState, nil - }, - } - - _, err = wait.WaitForState() - if err != nil { - return fmt.Errorf("Failed deactivating KMS key %s: %s", keyId, err) - } - - log.Printf("[DEBUG] KMS Key %s deactivated.", keyId) - d.SetId("") - return nil -} diff --git a/builtin/providers/aws/resource_aws_kms_key_test.go b/builtin/providers/aws/resource_aws_kms_key_test.go deleted file mode 100644 index b184fa30c..000000000 --- a/builtin/providers/aws/resource_aws_kms_key_test.go +++ /dev/null @@ -1,324 +0,0 @@ -// make testacc TEST=./builtin/providers/aws/ TESTARGS='-run=TestAccAWSKmsKey_' -package aws - -import ( - "fmt" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kms" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/jen20/awspolicyequivalence" -) - -func TestAccAWSKmsKey_basic(t *testing.T) { - var keyBefore, keyAfter kms.KeyMetadata - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSKmsKeyDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSKmsKey, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSKmsKeyExists("aws_kms_key.foo", &keyBefore), - ), - }, - { - Config: testAccAWSKmsKey_removedPolicy, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSKmsKeyExists("aws_kms_key.foo", &keyAfter), - ), - }, - }, - }) -} - -func TestAccAWSKmsKey_disappears(t *testing.T) { - var key kms.KeyMetadata - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSKmsKeyDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSKmsKey, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSKmsKeyExists("aws_kms_key.foo", &key), - ), - }, - { - Config: testAccAWSKmsKey_other_region, - PlanOnly: true, - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAWSKmsKey_policy(t *testing.T) { - var key kms.KeyMetadata - expectedPolicyText := `{"Version":"2012-10-17","Id":"kms-tf-1","Statement":[{"Sid":"Enable IAM User Permissions","Effect":"Allow","Principal":{"AWS":"*"},"Action":"kms:*","Resource":"*"}]}` - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSKmsKeyDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSKmsKey, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSKmsKeyExists("aws_kms_key.foo", &key), - testAccCheckAWSKmsKeyHasPolicy("aws_kms_key.foo", expectedPolicyText), - ), - }, - }, - }) -} - -func TestAccAWSKmsKey_isEnabled(t *testing.T) { - var key1, key2, key3 kms.KeyMetadata - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSKmsKeyDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSKmsKey_enabledRotation, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSKmsKeyExists("aws_kms_key.bar", &key1), - resource.TestCheckResourceAttr("aws_kms_key.bar", "is_enabled", "true"), - testAccCheckAWSKmsKeyIsEnabled(&key1, true), - resource.TestCheckResourceAttr("aws_kms_key.bar", "enable_key_rotation", "true"), - ), - }, - { - Config: testAccAWSKmsKey_disabled, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSKmsKeyExists("aws_kms_key.bar", &key2), - resource.TestCheckResourceAttr("aws_kms_key.bar", "is_enabled", "false"), - testAccCheckAWSKmsKeyIsEnabled(&key2, false), - resource.TestCheckResourceAttr("aws_kms_key.bar", "enable_key_rotation", "false"), - ), - }, - { - Config: testAccAWSKmsKey_enabled, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSKmsKeyExists("aws_kms_key.bar", &key3), - resource.TestCheckResourceAttr("aws_kms_key.bar", "is_enabled", "true"), - testAccCheckAWSKmsKeyIsEnabled(&key3, true), - resource.TestCheckResourceAttr("aws_kms_key.bar", "enable_key_rotation", "true"), - ), - }, - }, - }) -} - -func TestAccAWSKmsKey_tags(t *testing.T) { - var keyBefore kms.KeyMetadata - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSKmsKeyDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSKmsKey_tags, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSKmsKeyExists("aws_kms_key.foo", &keyBefore), - resource.TestCheckResourceAttr("aws_kms_key.foo", "tags.%", "2"), - ), - }, - }, - }) -} - -func testAccCheckAWSKmsKeyHasPolicy(name string, expectedPolicyText string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No KMS Key ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).kmsconn - - out, err := conn.GetKeyPolicy(&kms.GetKeyPolicyInput{ - KeyId: aws.String(rs.Primary.ID), - PolicyName: aws.String("default"), - }) - if err != nil { - return err - } - - actualPolicyText := *out.Policy - - equivalent, err := awspolicy.PoliciesAreEquivalent(actualPolicyText, expectedPolicyText) - if err != nil { - return fmt.Errorf("Error testing policy equivalence: %s", err) - } - if !equivalent { - return fmt.Errorf("Non-equivalent policy error:\n\nexpected: %s\n\n got: %s\n", - expectedPolicyText, actualPolicyText) - } - - return nil - } -} - -func testAccCheckAWSKmsKeyDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).kmsconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_kms_key" { - continue - } - - out, err := conn.DescribeKey(&kms.DescribeKeyInput{ - KeyId: aws.String(rs.Primary.ID), - }) - - if err != nil { - return err - } - - if *out.KeyMetadata.KeyState == "PendingDeletion" { - return nil - } - - return fmt.Errorf("KMS key still exists:\n%#v", out.KeyMetadata) - } - - return nil -} - -func testAccCheckAWSKmsKeyExists(name string, key *kms.KeyMetadata) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No KMS Key ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).kmsconn - - out, err := conn.DescribeKey(&kms.DescribeKeyInput{ - KeyId: aws.String(rs.Primary.ID), - }) - if err != nil { - return err - } - - *key = *out.KeyMetadata - - return nil - } -} - -func testAccCheckAWSKmsKeyIsEnabled(key *kms.KeyMetadata, isEnabled bool) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *key.Enabled != isEnabled { - return fmt.Errorf("Expected key %q to have is_enabled=%t, given %t", - *key.Arn, isEnabled, *key.Enabled) - } - - return nil - } -} - -var kmsTimestamp = time.Now().Format(time.RFC1123) -var testAccAWSKmsKey = fmt.Sprintf(` -resource "aws_kms_key" "foo" { - description = "Terraform acc test %s" - deletion_window_in_days = 7 - policy = < 1 { - return nil, errors.New("Only a single vpc_config block is expected") - } - - config, ok := configs[0].(map[string]interface{}) - - if !ok { - return nil, errors.New("vpc_config is ") - } - - // if subnet_ids and security_group_ids are both empty then the VPC is optional - if config["subnet_ids"].(*schema.Set).Len() == 0 && config["security_group_ids"].(*schema.Set).Len() == 0 { - return nil, nil - } - - if config["subnet_ids"].(*schema.Set).Len() == 0 { - return nil, errors.New("vpc_config.subnet_ids cannot be empty") - } - - if config["security_group_ids"].(*schema.Set).Len() == 0 { - return nil, errors.New("vpc_config.security_group_ids cannot be empty") - } - - return config, nil -} - -func validateRuntime(v interface{}, k string) (ws []string, errors []error) { - runtime := v.(string) - - if runtime == lambda.RuntimeNodejs { - errors = append(errors, fmt.Errorf( - "%s has reached end of life since October 2016 and has been deprecated in favor of %s.", - runtime, lambda.RuntimeNodejs43)) - } - return -} diff --git a/builtin/providers/aws/resource_aws_lambda_function_test.go b/builtin/providers/aws/resource_aws_lambda_function_test.go deleted file mode 100644 index 1b7249ba5..000000000 --- a/builtin/providers/aws/resource_aws_lambda_function_test.go +++ /dev/null @@ -1,1528 +0,0 @@ -package aws - -import ( - "archive/zip" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/lambda" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSLambdaFunction_basic(t *testing.T) { - var conf lambda.GetFunctionOutput - - rSt := acctest.RandString(5) - rName := fmt.Sprintf("tf_test_%s", rSt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaFunctionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLambdaConfigBasic(rName, rSt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_test", rName, &conf), - testAccCheckAwsLambdaFunctionName(&conf, rName), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, ":"+rName), - ), - }, - }, - }) -} - -func TestAccAWSLambdaFunction_updateRuntime(t *testing.T) { - var conf lambda.GetFunctionOutput - - rSt := acctest.RandString(5) - rName := fmt.Sprintf("tf_test_%s", rSt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaFunctionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLambdaConfigBasic(rName, rSt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_test", rName, &conf), - resource.TestCheckResourceAttr("aws_lambda_function.lambda_function_test", "runtime", "nodejs4.3"), - ), - }, - { - Config: testAccAWSLambdaConfigBasicUpdateRuntime(rName, rSt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_test", rName, &conf), - resource.TestCheckResourceAttr("aws_lambda_function.lambda_function_test", "runtime", "nodejs4.3-edge"), - ), - }, - }, - }) -} - -func TestAccAWSLambdaFunction_expectFilenameAndS3Attributes(t *testing.T) { - rSt := acctest.RandString(5) - rName := fmt.Sprintf("tf_test_%s", rSt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaFunctionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLambdaConfigWithoutFilenameAndS3Attributes(rName, rSt), - ExpectError: regexp.MustCompile(`filename or s3_\* attributes must be set`), - }, - }, - }) -} - -func TestAccAWSLambdaFunction_envVariables(t *testing.T) { - var conf lambda.GetFunctionOutput - - rSt := acctest.RandString(5) - rName := fmt.Sprintf("tf_test_%s", rSt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaFunctionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLambdaConfigBasic(rName, rSt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_test", rName, &conf), - testAccCheckAwsLambdaFunctionName(&conf, rName), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, ":"+rName), - resource.TestCheckNoResourceAttr("aws_lambda_function.lambda_function_test", "environment"), - ), - }, - { - Config: testAccAWSLambdaConfigEnvVariables(rName, rSt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_test", rName, &conf), - testAccCheckAwsLambdaFunctionName(&conf, rName), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, ":"+rName), - resource.TestCheckResourceAttr("aws_lambda_function.lambda_function_test", "environment.0.variables.foo", "bar"), - ), - }, - { - Config: testAccAWSLambdaConfigEnvVariablesModified(rName, rSt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_test", rName, &conf), - testAccCheckAwsLambdaFunctionName(&conf, rName), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, ":"+rName), - resource.TestCheckResourceAttr("aws_lambda_function.lambda_function_test", "environment.0.variables.foo", "baz"), - resource.TestCheckResourceAttr("aws_lambda_function.lambda_function_test", "environment.0.variables.foo1", "bar1"), - ), - }, - { - Config: testAccAWSLambdaConfigEnvVariablesModifiedWithoutEnvironment(rName, rSt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_test", rName, &conf), - testAccCheckAwsLambdaFunctionName(&conf, rName), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, ":"+rName), - resource.TestCheckNoResourceAttr("aws_lambda_function.lambda_function_test", "environment"), - ), - }, - }, - }) -} - -func TestAccAWSLambdaFunction_encryptedEnvVariables(t *testing.T) { - var conf lambda.GetFunctionOutput - - rSt := acctest.RandString(5) - rName := fmt.Sprintf("tf_test_%s", rSt) - keyRegex := regexp.MustCompile("^arn:aws:kms:") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaFunctionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLambdaConfigEncryptedEnvVariables(rName, rSt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_test", rName, &conf), - testAccCheckAwsLambdaFunctionName(&conf, rName), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, ":"+rName), - resource.TestCheckResourceAttr("aws_lambda_function.lambda_function_test", "environment.0.variables.foo", "bar"), - resource.TestMatchResourceAttr("aws_lambda_function.lambda_function_test", "kms_key_arn", keyRegex), - ), - }, - { - Config: testAccAWSLambdaConfigEncryptedEnvVariablesModified(rName, rSt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_test", rName, &conf), - testAccCheckAwsLambdaFunctionName(&conf, rName), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, ":"+rName), - resource.TestCheckResourceAttr("aws_lambda_function.lambda_function_test", "environment.0.variables.foo", "bar"), - resource.TestCheckResourceAttr("aws_lambda_function.lambda_function_test", "kms_key_arn", ""), - ), - }, - }, - }) -} - -func TestAccAWSLambdaFunction_versioned(t *testing.T) { - var conf lambda.GetFunctionOutput - - rSt := acctest.RandString(5) - rName := fmt.Sprintf("tf_test_%s", rSt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaFunctionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLambdaConfigVersioned(rName, rSt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_test", rName, &conf), - testAccCheckAwsLambdaFunctionName(&conf, rName), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, ":"+rName), - resource.TestMatchResourceAttr("aws_lambda_function.lambda_function_test", "version", - regexp.MustCompile("^[0-9]+$")), - resource.TestMatchResourceAttr("aws_lambda_function.lambda_function_test", "qualified_arn", - regexp.MustCompile(":"+rName+":[0-9]+$")), - ), - }, - }, - }) -} - -func TestAccAWSLambdaFunction_DeadLetterConfig(t *testing.T) { - var conf lambda.GetFunctionOutput - - rSt := acctest.RandString(5) - rName := fmt.Sprintf("tf_test_%s", rSt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaFunctionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLambdaConfigWithDeadLetterConfig(rName, rSt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_test", rName, &conf), - testAccCheckAwsLambdaFunctionName(&conf, rName), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, ":"+rName), - func(s *terraform.State) error { - if !strings.HasSuffix(*conf.Configuration.DeadLetterConfig.TargetArn, ":"+rName) { - return fmt.Errorf( - "Expected DeadLetterConfig.TargetArn %s to have suffix %s", *conf.Configuration.DeadLetterConfig.TargetArn, ":"+rName, - ) - } - return nil - }, - ), - }, - }, - }) -} - -func TestAccAWSLambdaFunction_nilDeadLetterConfig(t *testing.T) { - rSt := acctest.RandString(5) - rName := fmt.Sprintf("tf_test_%s", rSt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaFunctionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLambdaConfigWithNilDeadLetterConfig(rName, rSt), - ExpectError: regexp.MustCompile( - fmt.Sprintf("Nil dead_letter_config supplied for function: %s", rName)), - }, - }, - }) -} - -func TestAccAWSLambdaFunction_tracingConfig(t *testing.T) { - var conf lambda.GetFunctionOutput - - rSt := acctest.RandString(5) - rName := fmt.Sprintf("tf_test_%s", rSt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaFunctionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLambdaConfigWithTracingConfig(rName, rSt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_test", rName, &conf), - testAccCheckAwsLambdaFunctionName(&conf, rName), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, ":"+rName), - resource.TestCheckResourceAttr("aws_lambda_function.lambda_function_test", "tracing_config.0.mode", "Active"), - ), - }, - { - Config: testAccAWSLambdaConfigWithTracingConfigUpdated(rName, rSt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_test", rName, &conf), - testAccCheckAwsLambdaFunctionName(&conf, rName), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, ":"+rName), - resource.TestCheckResourceAttr("aws_lambda_function.lambda_function_test", "tracing_config.0.mode", "PassThrough"), - ), - }, - }, - }) -} - -func TestAccAWSLambdaFunction_VPC(t *testing.T) { - var conf lambda.GetFunctionOutput - - rSt := acctest.RandString(5) - rName := fmt.Sprintf("tf_test_%s", rSt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaFunctionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLambdaConfigWithVPC(rName, rSt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_test", rName, &conf), - testAccCheckAwsLambdaFunctionName(&conf, rName), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, ":"+rName), - testAccCheckAWSLambdaFunctionVersion(&conf, "$LATEST"), - resource.TestCheckResourceAttr("aws_lambda_function.lambda_function_test", "vpc_config.#", "1"), - resource.TestCheckResourceAttr("aws_lambda_function.lambda_function_test", "vpc_config.0.subnet_ids.#", "1"), - resource.TestCheckResourceAttr("aws_lambda_function.lambda_function_test", "vpc_config.0.security_group_ids.#", "1"), - resource.TestMatchResourceAttr("aws_lambda_function.lambda_function_test", "vpc_config.0.vpc_id", regexp.MustCompile("^vpc-")), - ), - }, - }, - }) -} - -// See https://github.com/hashicorp/terraform/issues/5767 -// and https://github.com/hashicorp/terraform/issues/10272 -func TestAccAWSLambdaFunction_VPC_withInvocation(t *testing.T) { - var conf lambda.GetFunctionOutput - - rSt := acctest.RandString(5) - rName := fmt.Sprintf("tf_test_%s", rSt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaFunctionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLambdaConfigWithVPC(rName, rSt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_test", rName, &conf), - testAccAwsInvokeLambdaFunction(&conf), - ), - }, - }, - }) -} - -func TestAccAWSLambdaFunction_s3(t *testing.T) { - var conf lambda.GetFunctionOutput - rSt := acctest.RandString(5) - rName := fmt.Sprintf("tf_test_%s", rSt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaFunctionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLambdaConfigS3(rName, rSt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_s3test", rName, &conf), - testAccCheckAwsLambdaFunctionName(&conf, rName), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, ":"+rName), - testAccCheckAWSLambdaFunctionVersion(&conf, "$LATEST"), - ), - }, - }, - }) -} - -func TestAccAWSLambdaFunction_localUpdate(t *testing.T) { - var conf lambda.GetFunctionOutput - - path, zipFile, err := createTempFile("lambda_localUpdate") - if err != nil { - t.Fatal(err) - } - defer os.Remove(path) - - rInt := acctest.RandInt() - rName := fmt.Sprintf("tf_acc_lambda_local_%d", rInt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaFunctionDestroy, - Steps: []resource.TestStep{ - { - PreConfig: func() { - testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func.js": "lambda.js"}, zipFile) - }, - Config: genAWSLambdaFunctionConfig_local(path, rInt, rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_local", rName, &conf), - testAccCheckAwsLambdaFunctionName(&conf, rName), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, rName), - testAccCheckAwsLambdaSourceCodeHash(&conf, "8DPiX+G1l2LQ8hjBkwRchQFf1TSCEvPrYGRKlM9UoyY="), - ), - }, - { - PreConfig: func() { - testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func_modified.js": "lambda.js"}, zipFile) - }, - Config: genAWSLambdaFunctionConfig_local(path, rInt, rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_local", rName, &conf), - testAccCheckAwsLambdaFunctionName(&conf, rName), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, rName), - testAccCheckAwsLambdaSourceCodeHash(&conf, "0tdaP9H9hsk9c2CycSwOG/sa/x5JyAmSYunA/ce99Pg="), - ), - }, - }, - }) -} - -func TestAccAWSLambdaFunction_localUpdate_nameOnly(t *testing.T) { - var conf lambda.GetFunctionOutput - - rName := fmt.Sprintf("tf_test_iam_%d", acctest.RandInt()) - - path, zipFile, err := createTempFile("lambda_localUpdate") - if err != nil { - t.Fatal(err) - } - defer os.Remove(path) - - updatedPath, updatedZipFile, err := createTempFile("lambda_localUpdate_name_change") - if err != nil { - t.Fatal(err) - } - defer os.Remove(updatedPath) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaFunctionDestroy, - Steps: []resource.TestStep{ - { - PreConfig: func() { - testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func.js": "lambda.js"}, zipFile) - }, - Config: genAWSLambdaFunctionConfig_local_name_only(path, rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_local", rName, &conf), - testAccCheckAwsLambdaFunctionName(&conf, rName), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, rName), - testAccCheckAwsLambdaSourceCodeHash(&conf, "8DPiX+G1l2LQ8hjBkwRchQFf1TSCEvPrYGRKlM9UoyY="), - ), - }, - { - PreConfig: func() { - testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func_modified.js": "lambda.js"}, updatedZipFile) - }, - Config: genAWSLambdaFunctionConfig_local_name_only(updatedPath, rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_local", rName, &conf), - testAccCheckAwsLambdaFunctionName(&conf, rName), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, rName), - testAccCheckAwsLambdaSourceCodeHash(&conf, "0tdaP9H9hsk9c2CycSwOG/sa/x5JyAmSYunA/ce99Pg="), - ), - }, - }, - }) -} - -func TestAccAWSLambdaFunction_s3Update_basic(t *testing.T) { - var conf lambda.GetFunctionOutput - - path, zipFile, err := createTempFile("lambda_s3Update") - if err != nil { - t.Fatal(err) - } - defer os.Remove(path) - - bucketName := fmt.Sprintf("tf-acc-lambda-s3-deployments-%d", randomInteger) - key := "lambda-func.zip" - - rInt := acctest.RandInt() - - rName := fmt.Sprintf("tf_acc_lambda_%d", rInt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaFunctionDestroy, - Steps: []resource.TestStep{ - { - PreConfig: func() { - // Upload 1st version - testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func.js": "lambda.js"}, zipFile) - }, - Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path, rInt, rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_s3", rName, &conf), - testAccCheckAwsLambdaFunctionName(&conf, rName), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, rName), - testAccCheckAwsLambdaSourceCodeHash(&conf, "8DPiX+G1l2LQ8hjBkwRchQFf1TSCEvPrYGRKlM9UoyY="), - ), - }, - { - ExpectNonEmptyPlan: true, - PreConfig: func() { - // Upload 2nd version - testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func_modified.js": "lambda.js"}, zipFile) - }, - Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path, rInt, rName), - }, - // Extra step because of missing ComputedWhen - // See https://github.com/hashicorp/terraform/pull/4846 & https://github.com/hashicorp/terraform/pull/5330 - { - Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path, rInt, rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_s3", rName, &conf), - testAccCheckAwsLambdaFunctionName(&conf, rName), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, rName), - testAccCheckAwsLambdaSourceCodeHash(&conf, "0tdaP9H9hsk9c2CycSwOG/sa/x5JyAmSYunA/ce99Pg="), - ), - }, - }, - }) -} - -func TestAccAWSLambdaFunction_s3Update_unversioned(t *testing.T) { - var conf lambda.GetFunctionOutput - - rName := fmt.Sprintf("tf_iam_lambda_%d", acctest.RandInt()) - - path, zipFile, err := createTempFile("lambda_s3Update") - if err != nil { - t.Fatal(err) - } - defer os.Remove(path) - - bucketName := fmt.Sprintf("tf-acc-lambda-s3-deployments-%d", randomInteger) - key := "lambda-func.zip" - key2 := "lambda-func-modified.zip" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaFunctionDestroy, - Steps: []resource.TestStep{ - { - PreConfig: func() { - // Upload 1st version - testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func.js": "lambda.js"}, zipFile) - }, - Config: testAccAWSLambdaFunctionConfig_s3_unversioned_tpl(rName, bucketName, key, path), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_s3", "tf_acc_lambda_name_s3_unversioned", &conf), - testAccCheckAwsLambdaFunctionName(&conf, "tf_acc_lambda_name_s3_unversioned"), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, "tf_acc_lambda_name_s3_unversioned"), - testAccCheckAwsLambdaSourceCodeHash(&conf, "8DPiX+G1l2LQ8hjBkwRchQFf1TSCEvPrYGRKlM9UoyY="), - ), - }, - { - PreConfig: func() { - // Upload 2nd version - testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func_modified.js": "lambda.js"}, zipFile) - }, - Config: testAccAWSLambdaFunctionConfig_s3_unversioned_tpl(rName, bucketName, key2, path), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_s3", "tf_acc_lambda_name_s3_unversioned", &conf), - testAccCheckAwsLambdaFunctionName(&conf, "tf_acc_lambda_name_s3_unversioned"), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, "tf_acc_lambda_name_s3_unversioned"), - testAccCheckAwsLambdaSourceCodeHash(&conf, "0tdaP9H9hsk9c2CycSwOG/sa/x5JyAmSYunA/ce99Pg="), - ), - }, - }, - }) -} - -func TestAccAWSLambdaFunction_runtimeValidation_noRuntime(t *testing.T) { - rSt := acctest.RandString(5) - rName := fmt.Sprintf("tf_test_%s", rSt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaFunctionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLambdaConfigNoRuntime(rName, rSt), - ExpectError: regexp.MustCompile(`\\"runtime\\": required field is not set`), - }, - }, - }) -} - -func TestAccAWSLambdaFunction_runtimeValidation_nodeJs(t *testing.T) { - rSt := acctest.RandString(5) - rName := fmt.Sprintf("tf_test_%s", rSt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaFunctionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLambdaConfigNodeJsRuntime(rName, rSt), - ExpectError: regexp.MustCompile(fmt.Sprintf("%s has reached end of life since October 2016 and has been deprecated in favor of %s", lambda.RuntimeNodejs, lambda.RuntimeNodejs43)), - }, - }, - }) -} - -func TestAccAWSLambdaFunction_runtimeValidation_nodeJs43(t *testing.T) { - var conf lambda.GetFunctionOutput - rSt := acctest.RandString(5) - rName := fmt.Sprintf("tf_test_%s", rSt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaFunctionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLambdaConfigNodeJs43Runtime(rName, rSt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_test", rName, &conf), - resource.TestCheckResourceAttr("aws_lambda_function.lambda_function_test", "runtime", lambda.RuntimeNodejs43), - ), - }, - }, - }) -} - -func TestAccAWSLambdaFunction_runtimeValidation_python27(t *testing.T) { - var conf lambda.GetFunctionOutput - rSt := acctest.RandString(5) - rName := fmt.Sprintf("tf_test_%s", rSt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaFunctionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLambdaConfigPython27Runtime(rName, rSt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_test", rName, &conf), - resource.TestCheckResourceAttr("aws_lambda_function.lambda_function_test", "runtime", lambda.RuntimePython27), - ), - }, - }, - }) -} - -func TestAccAWSLambdaFunction_runtimeValidation_java8(t *testing.T) { - var conf lambda.GetFunctionOutput - rSt := acctest.RandString(5) - rName := fmt.Sprintf("tf_test_%s", rSt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaFunctionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLambdaConfigJava8Runtime(rName, rSt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_test", rName, &conf), - resource.TestCheckResourceAttr("aws_lambda_function.lambda_function_test", "runtime", lambda.RuntimeJava8), - ), - }, - }, - }) -} - -func TestAccAWSLambdaFunction_tags(t *testing.T) { - var conf lambda.GetFunctionOutput - - rSt := acctest.RandString(5) - rName := fmt.Sprintf("tf_test_%s", rSt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaFunctionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLambdaConfigBasic(rName, rSt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_test", rName, &conf), - testAccCheckAwsLambdaFunctionName(&conf, rName), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, ":"+rName), - resource.TestCheckNoResourceAttr("aws_lambda_function.lambda_function_test", "tags"), - ), - }, - { - Config: testAccAWSLambdaConfigTags(rName, rSt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_test", rName, &conf), - testAccCheckAwsLambdaFunctionName(&conf, rName), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, ":"+rName), - resource.TestCheckResourceAttr("aws_lambda_function.lambda_function_test", "tags.%", "2"), - resource.TestCheckResourceAttr("aws_lambda_function.lambda_function_test", "tags.Key1", "Value One"), - resource.TestCheckResourceAttr("aws_lambda_function.lambda_function_test", "tags.Description", "Very interesting"), - ), - }, - { - Config: testAccAWSLambdaConfigTagsModified(rName, rSt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_test", rName, &conf), - testAccCheckAwsLambdaFunctionName(&conf, rName), - testAccCheckAwsLambdaFunctionArnHasSuffix(&conf, ":"+rName), - resource.TestCheckResourceAttr("aws_lambda_function.lambda_function_test", "tags.%", "3"), - resource.TestCheckResourceAttr("aws_lambda_function.lambda_function_test", "tags.Key1", "Value One Changed"), - resource.TestCheckResourceAttr("aws_lambda_function.lambda_function_test", "tags.Key2", "Value Two"), - resource.TestCheckResourceAttr("aws_lambda_function.lambda_function_test", "tags.Key3", "Value Three"), - ), - }, - }, - }) -} - -func TestAccAWSLambdaFunction_runtimeValidation_python36(t *testing.T) { - var conf lambda.GetFunctionOutput - rSt := acctest.RandString(5) - rName := fmt.Sprintf("tf_test_%s", rSt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaFunctionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLambdaConfigPython36Runtime(rName, rSt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_test", rName, &conf), - resource.TestCheckResourceAttr("aws_lambda_function.lambda_function_test", "runtime", lambda.RuntimePython36), - ), - }, - }, - }) -} - -func testAccCheckLambdaFunctionDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).lambdaconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_lambda_function" { - continue - } - - _, err := conn.GetFunction(&lambda.GetFunctionInput{ - FunctionName: aws.String(rs.Primary.ID), - }) - - if err == nil { - return fmt.Errorf("Lambda Function still exists") - } - - } - - return nil - -} - -func testAccCheckAwsLambdaFunctionExists(res, funcName string, function *lambda.GetFunctionOutput) resource.TestCheckFunc { - // Wait for IAM role - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[res] - if !ok { - return fmt.Errorf("Lambda function not found: %s", res) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Lambda function ID not set") - } - - conn := testAccProvider.Meta().(*AWSClient).lambdaconn - - params := &lambda.GetFunctionInput{ - FunctionName: aws.String(funcName), - } - - getFunction, err := conn.GetFunction(params) - if err != nil { - return err - } - - *function = *getFunction - - return nil - } -} - -func testAccAwsInvokeLambdaFunction(function *lambda.GetFunctionOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - f := function.Configuration - conn := testAccProvider.Meta().(*AWSClient).lambdaconn - - // If the function is VPC-enabled this will create ENI automatically - _, err := conn.Invoke(&lambda.InvokeInput{ - FunctionName: f.FunctionName, - }) - - return err - } -} - -func testAccCheckAwsLambdaFunctionName(function *lambda.GetFunctionOutput, expectedName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - c := function.Configuration - if *c.FunctionName != expectedName { - return fmt.Errorf("Expected function name %s, got %s", expectedName, *c.FunctionName) - } - - return nil - } -} - -func testAccCheckAWSLambdaFunctionVersion(function *lambda.GetFunctionOutput, expectedVersion string) resource.TestCheckFunc { - return func(s *terraform.State) error { - c := function.Configuration - if *c.Version != expectedVersion { - return fmt.Errorf("Expected version %s, got %s", expectedVersion, *c.Version) - } - return nil - } -} - -func testAccCheckAwsLambdaFunctionArnHasSuffix(function *lambda.GetFunctionOutput, arnSuffix string) resource.TestCheckFunc { - return func(s *terraform.State) error { - c := function.Configuration - if !strings.HasSuffix(*c.FunctionArn, arnSuffix) { - return fmt.Errorf("Expected function ARN %s to have suffix %s", *c.FunctionArn, arnSuffix) - } - - return nil - } -} - -func testAccCheckAwsLambdaSourceCodeHash(function *lambda.GetFunctionOutput, expectedHash string) resource.TestCheckFunc { - return func(s *terraform.State) error { - c := function.Configuration - if *c.CodeSha256 != expectedHash { - return fmt.Errorf("Expected code hash %s, got %s", expectedHash, *c.CodeSha256) - } - - return nil - } -} - -func testAccCreateZipFromFiles(files map[string]string, zipFile *os.File) error { - zipFile.Truncate(0) - zipFile.Seek(0, 0) - - w := zip.NewWriter(zipFile) - - for source, destination := range files { - f, err := w.Create(destination) - if err != nil { - return err - } - - fileContent, err := ioutil.ReadFile(source) - if err != nil { - return err - } - - _, err = f.Write(fileContent) - if err != nil { - return err - } - } - - err := w.Close() - if err != nil { - return err - } - - return w.Flush() -} - -func createTempFile(prefix string) (string, *os.File, error) { - f, err := ioutil.TempFile(os.TempDir(), prefix) - if err != nil { - return "", nil, err - } - - pathToFile, err := filepath.Abs(f.Name()) - if err != nil { - return "", nil, err - } - return pathToFile, f, nil -} - -func baseAccAWSLambdaConfig(rst string) string { - return fmt.Sprintf(` -resource "aws_iam_role_policy" "iam_policy_for_lambda" { - name = "iam_policy_for_lambda_%s" - role = "${aws_iam_role.iam_for_lambda.id}" - policy = < all statements deleted - return nil - } - } - if err != nil { - return fmt.Errorf("Unexpected error when checking existence of Lambda permission: %s\n%s", - rs.Primary.ID, err) - } - - policyInBytes := []byte(*resp.Policy) - policy := LambdaPolicy{} - err = json.Unmarshal(policyInBytes, &policy) - if err != nil { - return fmt.Errorf("Error unmarshalling Lambda policy (%s): %s", *resp.Policy, err) - } - - state, err := findLambdaPolicyStatementById(&policy, rs.Primary.ID) - if err != nil { - // statement not found => deleted - return nil - } - - return fmt.Errorf("Policy statement expected to be gone (%s):\n%s", - rs.Primary.ID, *state) -} - -func lambdaPermissionExists(rs *terraform.ResourceState, conn *lambda.Lambda) (*LambdaPolicyStatement, error) { - params := &lambda.GetPolicyInput{ - FunctionName: aws.String(rs.Primary.Attributes["function_name"]), - } - if v, ok := rs.Primary.Attributes["qualifier"]; ok && v != "" { - params.Qualifier = aws.String(v) - } - - resp, err := conn.GetPolicy(params) - if err != nil { - return nil, fmt.Errorf("Lambda policy not found: %q", err) - } - - if resp.Policy == nil { - return nil, fmt.Errorf("Received Lambda policy is empty") - } - - policyInBytes := []byte(*resp.Policy) - policy := LambdaPolicy{} - err = json.Unmarshal(policyInBytes, &policy) - if err != nil { - return nil, fmt.Errorf("Error unmarshalling Lambda policy: %s", err) - } - - return findLambdaPolicyStatementById(&policy, rs.Primary.ID) -} - -func testAccAWSLambdaPermissionConfig(rName string) string { - return fmt.Sprintf(` -resource "aws_lambda_permission" "allow_cloudwatch" { - statement_id = "AllowExecutionFromCloudWatch" - action = "lambda:InvokeFunction" - function_name = "${aws_lambda_function.test_lambda.arn}" - principal = "events.amazonaws.com" -} - -resource "aws_lambda_function" "test_lambda" { - filename = "test-fixtures/lambdatest.zip" - function_name = "lambda_function_name_perm" - role = "${aws_iam_role.iam_for_lambda.arn}" - handler = "exports.handler" - runtime = "nodejs4.3" -} - -resource "aws_iam_role" "iam_for_lambda" { - name = "%s" - assume_role_policy = < 255 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 255 characters", k)) - } - return - }, - }, - - "name_prefix": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - // https://github.com/boto/botocore/blob/9f322b1/botocore/data/autoscaling/2011-01-01/service-2.json#L1932-L1939 - // uuid is 26 characters, limit the prefix to 229. - value := v.(string) - if len(value) > 229 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 229 characters, name is limited to 255", k)) - } - return - }, - }, - - "image_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "instance_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "iam_instance_profile": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "key_name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "user_data": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - StateFunc: func(v interface{}) string { - switch v.(type) { - case string: - hash := sha1.Sum([]byte(v.(string))) - return hex.EncodeToString(hash[:]) - default: - return "" - } - }, - }, - - "security_groups": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "vpc_classic_link_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "vpc_classic_link_security_groups": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "associate_public_ip_address": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Default: false, - }, - - "spot_price": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "ebs_optimized": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "placement_tenancy": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "enable_monitoring": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Default: true, - }, - - "ebs_block_device": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "delete_on_termination": { - Type: schema.TypeBool, - Optional: true, - Default: true, - ForceNew: true, - }, - - "device_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "iops": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "snapshot_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "volume_size": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "volume_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "encrypted": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - }, - }, - - "ephemeral_block_device": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "device_name": { - Type: schema.TypeString, - Required: true, - }, - - "virtual_name": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - Set: func(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["virtual_name"].(string))) - return hashcode.String(buf.String()) - }, - }, - - "root_block_device": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - // "You can only modify the volume size, volume type, and Delete on - // Termination flag on the block device mapping entry for the root - // device volume." - bit.ly/ec2bdmap - Schema: map[string]*schema.Schema{ - "delete_on_termination": { - Type: schema.TypeBool, - Optional: true, - Default: true, - ForceNew: true, - }, - - "iops": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "volume_size": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "volume_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - }, - }, - }, - } -} - -func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface{}) error { - autoscalingconn := meta.(*AWSClient).autoscalingconn - ec2conn := meta.(*AWSClient).ec2conn - - createLaunchConfigurationOpts := autoscaling.CreateLaunchConfigurationInput{ - LaunchConfigurationName: aws.String(d.Get("name").(string)), - ImageId: aws.String(d.Get("image_id").(string)), - InstanceType: aws.String(d.Get("instance_type").(string)), - EbsOptimized: aws.Bool(d.Get("ebs_optimized").(bool)), - } - - if v, ok := d.GetOk("user_data"); ok { - userData := base64Encode([]byte(v.(string))) - createLaunchConfigurationOpts.UserData = aws.String(userData) - } - - createLaunchConfigurationOpts.InstanceMonitoring = &autoscaling.InstanceMonitoring{ - Enabled: aws.Bool(d.Get("enable_monitoring").(bool)), - } - - if v, ok := d.GetOk("iam_instance_profile"); ok { - createLaunchConfigurationOpts.IamInstanceProfile = aws.String(v.(string)) - } - - if v, ok := d.GetOk("placement_tenancy"); ok { - createLaunchConfigurationOpts.PlacementTenancy = aws.String(v.(string)) - } - - if v, ok := d.GetOk("associate_public_ip_address"); ok { - createLaunchConfigurationOpts.AssociatePublicIpAddress = aws.Bool(v.(bool)) - } - - if v, ok := d.GetOk("key_name"); ok { - createLaunchConfigurationOpts.KeyName = aws.String(v.(string)) - } - if v, ok := d.GetOk("spot_price"); ok { - createLaunchConfigurationOpts.SpotPrice = aws.String(v.(string)) - } - - if v, ok := d.GetOk("security_groups"); ok { - createLaunchConfigurationOpts.SecurityGroups = expandStringList( - v.(*schema.Set).List(), - ) - } - - if v, ok := d.GetOk("vpc_classic_link_id"); ok { - createLaunchConfigurationOpts.ClassicLinkVPCId = aws.String(v.(string)) - } - - if v, ok := d.GetOk("vpc_classic_link_security_groups"); ok { - createLaunchConfigurationOpts.ClassicLinkVPCSecurityGroups = expandStringList( - v.(*schema.Set).List(), - ) - } - - var blockDevices []*autoscaling.BlockDeviceMapping - - // We'll use this to detect if we're declaring it incorrectly as an ebs_block_device. - rootDeviceName, err := fetchRootDeviceName(d.Get("image_id").(string), ec2conn) - if err != nil { - return err - } - if rootDeviceName == nil { - // We do this so the value is empty so we don't have to do nil checks later - var blank string - rootDeviceName = &blank - } - - if v, ok := d.GetOk("ebs_block_device"); ok { - vL := v.(*schema.Set).List() - for _, v := range vL { - bd := v.(map[string]interface{}) - ebs := &autoscaling.Ebs{ - DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)), - } - - if v, ok := bd["snapshot_id"].(string); ok && v != "" { - ebs.SnapshotId = aws.String(v) - } - - if v, ok := bd["encrypted"].(bool); ok && v { - ebs.Encrypted = aws.Bool(v) - } - - if v, ok := bd["volume_size"].(int); ok && v != 0 { - ebs.VolumeSize = aws.Int64(int64(v)) - } - - if v, ok := bd["volume_type"].(string); ok && v != "" { - ebs.VolumeType = aws.String(v) - } - - if v, ok := bd["iops"].(int); ok && v > 0 { - ebs.Iops = aws.Int64(int64(v)) - } - - if *aws.String(bd["device_name"].(string)) == *rootDeviceName { - return fmt.Errorf("Root device (%s) declared as an 'ebs_block_device'. Use 'root_block_device' keyword.", *rootDeviceName) - } - - blockDevices = append(blockDevices, &autoscaling.BlockDeviceMapping{ - DeviceName: aws.String(bd["device_name"].(string)), - Ebs: ebs, - }) - } - } - - if v, ok := d.GetOk("ephemeral_block_device"); ok { - vL := v.(*schema.Set).List() - for _, v := range vL { - bd := v.(map[string]interface{}) - blockDevices = append(blockDevices, &autoscaling.BlockDeviceMapping{ - DeviceName: aws.String(bd["device_name"].(string)), - VirtualName: aws.String(bd["virtual_name"].(string)), - }) - } - } - - if v, ok := d.GetOk("root_block_device"); ok { - vL := v.([]interface{}) - for _, v := range vL { - bd := v.(map[string]interface{}) - ebs := &autoscaling.Ebs{ - DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)), - } - - if v, ok := bd["volume_size"].(int); ok && v != 0 { - ebs.VolumeSize = aws.Int64(int64(v)) - } - - if v, ok := bd["volume_type"].(string); ok && v != "" { - ebs.VolumeType = aws.String(v) - } - - if v, ok := bd["iops"].(int); ok && v > 0 { - ebs.Iops = aws.Int64(int64(v)) - } - - if dn, err := fetchRootDeviceName(d.Get("image_id").(string), ec2conn); err == nil { - if dn == nil { - return fmt.Errorf( - "Expected to find a Root Device name for AMI (%s), but got none", - d.Get("image_id").(string)) - } - blockDevices = append(blockDevices, &autoscaling.BlockDeviceMapping{ - DeviceName: dn, - Ebs: ebs, - }) - } else { - return err - } - } - } - - if len(blockDevices) > 0 { - createLaunchConfigurationOpts.BlockDeviceMappings = blockDevices - } - - var lcName string - if v, ok := d.GetOk("name"); ok { - lcName = v.(string) - } else if v, ok := d.GetOk("name_prefix"); ok { - lcName = resource.PrefixedUniqueId(v.(string)) - } else { - lcName = resource.UniqueId() - } - createLaunchConfigurationOpts.LaunchConfigurationName = aws.String(lcName) - - log.Printf( - "[DEBUG] autoscaling create launch configuration: %s", createLaunchConfigurationOpts) - - // IAM profiles can take ~10 seconds to propagate in AWS: - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console - err = resource.Retry(90*time.Second, func() *resource.RetryError { - _, err := autoscalingconn.CreateLaunchConfiguration(&createLaunchConfigurationOpts) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if strings.Contains(awsErr.Message(), "Invalid IamInstanceProfile") { - return resource.RetryableError(err) - } - if strings.Contains(awsErr.Message(), "You are not authorized to perform this operation") { - return resource.RetryableError(err) - } - } - return resource.NonRetryableError(err) - } - return nil - }) - - if err != nil { - return fmt.Errorf("Error creating launch configuration: %s", err) - } - - d.SetId(lcName) - log.Printf("[INFO] launch configuration ID: %s", d.Id()) - - // We put a Retry here since sometimes eventual consistency bites - // us and we need to retry a few times to get the LC to load properly - return resource.Retry(30*time.Second, func() *resource.RetryError { - err := resourceAwsLaunchConfigurationRead(d, meta) - if err != nil { - return resource.RetryableError(err) - } - return nil - }) -} - -func resourceAwsLaunchConfigurationRead(d *schema.ResourceData, meta interface{}) error { - autoscalingconn := meta.(*AWSClient).autoscalingconn - ec2conn := meta.(*AWSClient).ec2conn - - describeOpts := autoscaling.DescribeLaunchConfigurationsInput{ - LaunchConfigurationNames: []*string{aws.String(d.Id())}, - } - - log.Printf("[DEBUG] launch configuration describe configuration: %s", describeOpts) - describConfs, err := autoscalingconn.DescribeLaunchConfigurations(&describeOpts) - if err != nil { - return fmt.Errorf("Error retrieving launch configuration: %s", err) - } - if len(describConfs.LaunchConfigurations) == 0 { - d.SetId("") - return nil - } - - // Verify AWS returned our launch configuration - if *describConfs.LaunchConfigurations[0].LaunchConfigurationName != d.Id() { - return fmt.Errorf( - "Unable to find launch configuration: %#v", - describConfs.LaunchConfigurations) - } - - lc := describConfs.LaunchConfigurations[0] - - d.Set("key_name", lc.KeyName) - d.Set("image_id", lc.ImageId) - d.Set("instance_type", lc.InstanceType) - d.Set("name", lc.LaunchConfigurationName) - - d.Set("iam_instance_profile", lc.IamInstanceProfile) - d.Set("ebs_optimized", lc.EbsOptimized) - d.Set("spot_price", lc.SpotPrice) - d.Set("enable_monitoring", lc.InstanceMonitoring.Enabled) - d.Set("security_groups", lc.SecurityGroups) - d.Set("associate_public_ip_address", lc.AssociatePublicIpAddress) - - d.Set("vpc_classic_link_id", lc.ClassicLinkVPCId) - d.Set("vpc_classic_link_security_groups", lc.ClassicLinkVPCSecurityGroups) - - if err := readLCBlockDevices(d, lc, ec2conn); err != nil { - return err - } - - return nil -} - -func resourceAwsLaunchConfigurationDelete(d *schema.ResourceData, meta interface{}) error { - autoscalingconn := meta.(*AWSClient).autoscalingconn - - log.Printf("[DEBUG] Launch Configuration destroy: %v", d.Id()) - _, err := autoscalingconn.DeleteLaunchConfiguration( - &autoscaling.DeleteLaunchConfigurationInput{ - LaunchConfigurationName: aws.String(d.Id()), - }) - if err != nil { - autoscalingerr, ok := err.(awserr.Error) - if ok && (autoscalingerr.Code() == "InvalidConfiguration.NotFound" || autoscalingerr.Code() == "ValidationError") { - log.Printf("[DEBUG] Launch configuration (%s) not found", d.Id()) - return nil - } - - return err - } - - return nil -} - -func readLCBlockDevices(d *schema.ResourceData, lc *autoscaling.LaunchConfiguration, ec2conn *ec2.EC2) error { - ibds, err := readBlockDevicesFromLaunchConfiguration(d, lc, ec2conn) - if err != nil { - return err - } - - if err := d.Set("ebs_block_device", ibds["ebs"]); err != nil { - return err - } - if err := d.Set("ephemeral_block_device", ibds["ephemeral"]); err != nil { - return err - } - if ibds["root"] != nil { - if err := d.Set("root_block_device", []interface{}{ibds["root"]}); err != nil { - return err - } - } else { - d.Set("root_block_device", []interface{}{}) - } - - return nil -} - -func readBlockDevicesFromLaunchConfiguration(d *schema.ResourceData, lc *autoscaling.LaunchConfiguration, ec2conn *ec2.EC2) ( - map[string]interface{}, error) { - blockDevices := make(map[string]interface{}) - blockDevices["ebs"] = make([]map[string]interface{}, 0) - blockDevices["ephemeral"] = make([]map[string]interface{}, 0) - blockDevices["root"] = nil - if len(lc.BlockDeviceMappings) == 0 { - return nil, nil - } - rootDeviceName, err := fetchRootDeviceName(d.Get("image_id").(string), ec2conn) - if err != nil { - return nil, err - } - if rootDeviceName == nil { - // We do this so the value is empty so we don't have to do nil checks later - var blank string - rootDeviceName = &blank - } - for _, bdm := range lc.BlockDeviceMappings { - bd := make(map[string]interface{}) - if bdm.Ebs != nil && bdm.Ebs.DeleteOnTermination != nil { - bd["delete_on_termination"] = *bdm.Ebs.DeleteOnTermination - } - if bdm.Ebs != nil && bdm.Ebs.VolumeSize != nil { - bd["volume_size"] = *bdm.Ebs.VolumeSize - } - if bdm.Ebs != nil && bdm.Ebs.VolumeType != nil { - bd["volume_type"] = *bdm.Ebs.VolumeType - } - if bdm.Ebs != nil && bdm.Ebs.Iops != nil { - bd["iops"] = *bdm.Ebs.Iops - } - - if bdm.DeviceName != nil && *bdm.DeviceName == *rootDeviceName { - blockDevices["root"] = bd - } else { - if bdm.Ebs != nil && bdm.Ebs.Encrypted != nil { - bd["encrypted"] = *bdm.Ebs.Encrypted - } - if bdm.DeviceName != nil { - bd["device_name"] = *bdm.DeviceName - } - if bdm.VirtualName != nil { - bd["virtual_name"] = *bdm.VirtualName - blockDevices["ephemeral"] = append(blockDevices["ephemeral"].([]map[string]interface{}), bd) - } else { - if bdm.Ebs != nil && bdm.Ebs.SnapshotId != nil { - bd["snapshot_id"] = *bdm.Ebs.SnapshotId - } - blockDevices["ebs"] = append(blockDevices["ebs"].([]map[string]interface{}), bd) - } - } - } - return blockDevices, nil -} diff --git a/builtin/providers/aws/resource_aws_launch_configuration_test.go b/builtin/providers/aws/resource_aws_launch_configuration_test.go deleted file mode 100644 index 255414914..000000000 --- a/builtin/providers/aws/resource_aws_launch_configuration_test.go +++ /dev/null @@ -1,603 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "math/rand" - "strings" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func init() { - resource.AddTestSweepers("aws_launch_configuration", &resource.Sweeper{ - Name: "aws_launch_configuration", - Dependencies: []string{"aws_autoscaling_group"}, - F: testSweepLaunchConfigurations, - }) -} - -func testSweepLaunchConfigurations(region string) error { - client, err := sharedClientForRegion(region) - if err != nil { - return fmt.Errorf("error getting client: %s", err) - } - autoscalingconn := client.(*AWSClient).autoscalingconn - - resp, err := autoscalingconn.DescribeLaunchConfigurations(&autoscaling.DescribeLaunchConfigurationsInput{}) - if err != nil { - return fmt.Errorf("Error retrieving launch configuration: %s", err) - } - - if len(resp.LaunchConfigurations) == 0 { - log.Print("[DEBUG] No aws launch configurations to sweep") - return nil - } - - for _, lc := range resp.LaunchConfigurations { - var testOptGroup bool - for _, testName := range []string{"terraform-", "foobar"} { - if strings.HasPrefix(*lc.LaunchConfigurationName, testName) { - testOptGroup = true - } - } - - if !testOptGroup { - continue - } - - _, err := autoscalingconn.DeleteLaunchConfiguration( - &autoscaling.DeleteLaunchConfigurationInput{ - LaunchConfigurationName: lc.LaunchConfigurationName, - }) - if err != nil { - autoscalingerr, ok := err.(awserr.Error) - if ok && (autoscalingerr.Code() == "InvalidConfiguration.NotFound" || autoscalingerr.Code() == "ValidationError") { - log.Printf("[DEBUG] Launch configuration (%s) not found", *lc.LaunchConfigurationName) - return nil - } - - return err - } - } - - return nil -} - -func TestAccAWSLaunchConfiguration_basic(t *testing.T) { - var conf autoscaling.LaunchConfiguration - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSLaunchConfigurationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLaunchConfigurationNoNameConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.bar", &conf), - testAccCheckAWSLaunchConfigurationGeneratedNamePrefix( - "aws_launch_configuration.bar", "terraform-"), - ), - }, - { - Config: testAccAWSLaunchConfigurationPrefixNameConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.baz", &conf), - testAccCheckAWSLaunchConfigurationGeneratedNamePrefix( - "aws_launch_configuration.baz", "baz-"), - ), - }, - }, - }) -} - -func TestAccAWSLaunchConfiguration_withBlockDevices(t *testing.T) { - var conf autoscaling.LaunchConfiguration - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSLaunchConfigurationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLaunchConfigurationConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.bar", &conf), - testAccCheckAWSLaunchConfigurationAttributes(&conf), - resource.TestCheckResourceAttr( - "aws_launch_configuration.bar", "image_id", "ami-21f78e11"), - resource.TestCheckResourceAttr( - "aws_launch_configuration.bar", "instance_type", "m1.small"), - resource.TestCheckResourceAttr( - "aws_launch_configuration.bar", "associate_public_ip_address", "true"), - resource.TestCheckResourceAttr( - "aws_launch_configuration.bar", "spot_price", ""), - ), - }, - }, - }) -} - -func TestAccAWSLaunchConfiguration_updateRootBlockDevice(t *testing.T) { - var conf autoscaling.LaunchConfiguration - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSLaunchConfigurationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLaunchConfigurationConfigWithRootBlockDevice(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.bar", &conf), - resource.TestCheckResourceAttr( - "aws_launch_configuration.bar", "root_block_device.0.volume_size", "11"), - ), - }, - { - Config: testAccAWSLaunchConfigurationConfigWithRootBlockDeviceUpdated(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.bar", &conf), - resource.TestCheckResourceAttr( - "aws_launch_configuration.bar", "root_block_device.0.volume_size", "20"), - ), - }, - }, - }) -} - -func TestAccAWSLaunchConfiguration_withSpotPrice(t *testing.T) { - var conf autoscaling.LaunchConfiguration - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSLaunchConfigurationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLaunchConfigurationWithSpotPriceConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.bar", &conf), - resource.TestCheckResourceAttr( - "aws_launch_configuration.bar", "spot_price", "0.01"), - ), - }, - }, - }) -} - -func TestAccAWSLaunchConfiguration_withVpcClassicLink(t *testing.T) { - var vpc ec2.Vpc - var group ec2.SecurityGroup - var conf autoscaling.LaunchConfiguration - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSLaunchConfigurationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLaunchConfigurationConfig_withVpcClassicLink, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.foo", &conf), - testAccCheckVpcExists("aws_vpc.foo", &vpc), - testAccCheckAWSSecurityGroupExists("aws_security_group.foo", &group), - ), - }, - }, - }) -} - -func TestAccAWSLaunchConfiguration_withIAMProfile(t *testing.T) { - var conf autoscaling.LaunchConfiguration - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSLaunchConfigurationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLaunchConfigurationConfig_withIAMProfile, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.bar", &conf), - ), - }, - }, - }) -} - -func testAccCheckAWSLaunchConfigurationWithEncryption(conf *autoscaling.LaunchConfiguration) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Map out the block devices by name, which should be unique. - blockDevices := make(map[string]*autoscaling.BlockDeviceMapping) - for _, blockDevice := range conf.BlockDeviceMappings { - blockDevices[*blockDevice.DeviceName] = blockDevice - } - - // Check if the root block device exists. - if _, ok := blockDevices["/dev/sda1"]; !ok { - return fmt.Errorf("block device doesn't exist: /dev/sda1") - } else if blockDevices["/dev/sda1"].Ebs.Encrypted != nil { - return fmt.Errorf("root device should not include value for Encrypted") - } - - // Check if the secondary block device exists. - if _, ok := blockDevices["/dev/sdb"]; !ok { - return fmt.Errorf("block device doesn't exist: /dev/sdb") - } else if !*blockDevices["/dev/sdb"].Ebs.Encrypted { - return fmt.Errorf("block device isn't encrypted as expected: /dev/sdb") - } - - return nil - } -} - -func TestAccAWSLaunchConfiguration_withEncryption(t *testing.T) { - var conf autoscaling.LaunchConfiguration - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSLaunchConfigurationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLaunchConfigurationWithEncryption, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.baz", &conf), - testAccCheckAWSLaunchConfigurationWithEncryption(&conf), - ), - }, - }, - }) -} - -func TestAccAWSLaunchConfiguration_updateEbsBlockDevices(t *testing.T) { - var conf autoscaling.LaunchConfiguration - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSLaunchConfigurationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLaunchConfigurationWithEncryption, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.baz", &conf), - resource.TestCheckResourceAttr( - "aws_launch_configuration.baz", "ebs_block_device.2764618555.volume_size", "9"), - ), - }, - { - Config: testAccAWSLaunchConfigurationWithEncryptionUpdated, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.baz", &conf), - resource.TestCheckResourceAttr( - "aws_launch_configuration.baz", "ebs_block_device.3859927736.volume_size", "10"), - ), - }, - }, - }) -} - -func testAccCheckAWSLaunchConfigurationGeneratedNamePrefix( - resource, prefix string) resource.TestCheckFunc { - return func(s *terraform.State) error { - r, ok := s.RootModule().Resources[resource] - if !ok { - return fmt.Errorf("Resource not found") - } - name, ok := r.Primary.Attributes["name"] - if !ok { - return fmt.Errorf("Name attr not found: %#v", r.Primary.Attributes) - } - if !strings.HasPrefix(name, prefix) { - return fmt.Errorf("Name: %q, does not have prefix: %q", name, prefix) - } - return nil - } -} - -func testAccCheckAWSLaunchConfigurationDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).autoscalingconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_launch_configuration" { - continue - } - - describe, err := conn.DescribeLaunchConfigurations( - &autoscaling.DescribeLaunchConfigurationsInput{ - LaunchConfigurationNames: []*string{aws.String(rs.Primary.ID)}, - }) - - if err == nil { - if len(describe.LaunchConfigurations) != 0 && - *describe.LaunchConfigurations[0].LaunchConfigurationName == rs.Primary.ID { - return fmt.Errorf("Launch Configuration still exists") - } - } - - // Verify the error - providerErr, ok := err.(awserr.Error) - if !ok { - return err - } - if providerErr.Code() != "InvalidLaunchConfiguration.NotFound" { - return err - } - } - - return nil -} - -func testAccCheckAWSLaunchConfigurationAttributes(conf *autoscaling.LaunchConfiguration) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *conf.ImageId != "ami-21f78e11" { - return fmt.Errorf("Bad image_id: %s", *conf.ImageId) - } - - if !strings.HasPrefix(*conf.LaunchConfigurationName, "terraform-") { - return fmt.Errorf("Bad name: %s", *conf.LaunchConfigurationName) - } - - if *conf.InstanceType != "m1.small" { - return fmt.Errorf("Bad instance_type: %s", *conf.InstanceType) - } - - // Map out the block devices by name, which should be unique. - blockDevices := make(map[string]*autoscaling.BlockDeviceMapping) - for _, blockDevice := range conf.BlockDeviceMappings { - blockDevices[*blockDevice.DeviceName] = blockDevice - } - - // Check if the root block device exists. - if _, ok := blockDevices["/dev/sda1"]; !ok { - return fmt.Errorf("block device doesn't exist: /dev/sda1") - } - - // Check if the secondary block device exists. - if _, ok := blockDevices["/dev/sdb"]; !ok { - return fmt.Errorf("block device doesn't exist: /dev/sdb") - } - - // Check if the third block device exists. - if _, ok := blockDevices["/dev/sdc"]; !ok { - return fmt.Errorf("block device doesn't exist: /dev/sdc") - } - - // Check if the secondary block device exists. - if _, ok := blockDevices["/dev/sdb"]; !ok { - return fmt.Errorf("block device doesn't exist: /dev/sdb") - } - - return nil - } -} - -func testAccCheckAWSLaunchConfigurationExists(n string, res *autoscaling.LaunchConfiguration) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Launch Configuration ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).autoscalingconn - - describeOpts := autoscaling.DescribeLaunchConfigurationsInput{ - LaunchConfigurationNames: []*string{aws.String(rs.Primary.ID)}, - } - describe, err := conn.DescribeLaunchConfigurations(&describeOpts) - - if err != nil { - return err - } - - if len(describe.LaunchConfigurations) != 1 || - *describe.LaunchConfigurations[0].LaunchConfigurationName != rs.Primary.ID { - return fmt.Errorf("Launch Configuration Group not found") - } - - *res = *describe.LaunchConfigurations[0] - - return nil - } -} - -func testAccAWSLaunchConfigurationConfigWithRootBlockDevice(rInt int) string { - return fmt.Sprintf(` -resource "aws_launch_configuration" "bar" { - name_prefix = "tf-acc-test-%d" - image_id = "ami-21f78e11" - instance_type = "m1.small" - user_data = "foobar-user-data" - associate_public_ip_address = true - - root_block_device { - volume_type = "gp2" - volume_size = 11 - } - -} -`, rInt) -} - -func testAccAWSLaunchConfigurationConfigWithRootBlockDeviceUpdated(rInt int) string { - return fmt.Sprintf(` -resource "aws_launch_configuration" "bar" { - name_prefix = "tf-acc-test-%d" - image_id = "ami-21f78e11" - instance_type = "m1.small" - user_data = "foobar-user-data" - associate_public_ip_address = true - - root_block_device { - volume_type = "gp2" - volume_size = 20 - } - -} -`, rInt) -} - -var testAccAWSLaunchConfigurationConfig = fmt.Sprintf(` -resource "aws_launch_configuration" "bar" { - name = "terraform-test-%d" - image_id = "ami-21f78e11" - instance_type = "m1.small" - user_data = "foobar-user-data" - associate_public_ip_address = true - - root_block_device { - volume_type = "gp2" - volume_size = 11 - } - ebs_block_device { - device_name = "/dev/sdb" - volume_size = 9 - } - ebs_block_device { - device_name = "/dev/sdc" - volume_size = 10 - volume_type = "io1" - iops = 100 - } - ephemeral_block_device { - device_name = "/dev/sde" - virtual_name = "ephemeral0" - } -} -`, rand.New(rand.NewSource(time.Now().UnixNano())).Int()) - -var testAccAWSLaunchConfigurationWithSpotPriceConfig = fmt.Sprintf(` -resource "aws_launch_configuration" "bar" { - name = "terraform-test-%d" - image_id = "ami-21f78e11" - instance_type = "t1.micro" - spot_price = "0.01" -} -`, rand.New(rand.NewSource(time.Now().UnixNano())).Int()) - -const testAccAWSLaunchConfigurationNoNameConfig = ` -resource "aws_launch_configuration" "bar" { - image_id = "ami-21f78e11" - instance_type = "t1.micro" - user_data = "foobar-user-data-change" - associate_public_ip_address = false -} -` - -const testAccAWSLaunchConfigurationPrefixNameConfig = ` -resource "aws_launch_configuration" "baz" { - name_prefix = "baz-" - image_id = "ami-21f78e11" - instance_type = "t1.micro" - user_data = "foobar-user-data-change" - associate_public_ip_address = false -} -` - -const testAccAWSLaunchConfigurationWithEncryption = ` -resource "aws_launch_configuration" "baz" { - image_id = "ami-5189a661" - instance_type = "t2.micro" - associate_public_ip_address = false - - root_block_device { - volume_type = "gp2" - volume_size = 11 - } - ebs_block_device { - device_name = "/dev/sdb" - volume_size = 9 - encrypted = true - } -} -` - -const testAccAWSLaunchConfigurationWithEncryptionUpdated = ` -resource "aws_launch_configuration" "baz" { - image_id = "ami-5189a661" - instance_type = "t2.micro" - associate_public_ip_address = false - - root_block_device { - volume_type = "gp2" - volume_size = 11 - } - ebs_block_device { - device_name = "/dev/sdb" - volume_size = 10 - encrypted = true - } -} -` - -const testAccAWSLaunchConfigurationConfig_withVpcClassicLink = ` -resource "aws_vpc" "foo" { - cidr_block = "10.0.0.0/16" - enable_classiclink = true - tags { - Name = "testAccAWSLaunchConfigurationConfig_withVpcClassicLink" - } -} - -resource "aws_security_group" "foo" { - name = "foo" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_launch_configuration" "foo" { - name = "TestAccAWSLaunchConfiguration_withVpcClassicLink" - image_id = "ami-21f78e11" - instance_type = "t1.micro" - - vpc_classic_link_id = "${aws_vpc.foo.id}" - vpc_classic_link_security_groups = ["${aws_security_group.foo.id}"] -} -` - -const testAccAWSLaunchConfigurationConfig_withIAMProfile = ` -resource "aws_iam_role" "role" { - name = "TestAccAWSLaunchConfiguration-withIAMProfile" - assume_role_policy = < 0 { - lbspOpts.CookieExpirationPeriod = aws.Int64(int64(v)) - } - - log.Printf("[DEBUG] LB Cookie Stickiness Policy opts: %#v", lbspOpts) - if _, err := elbconn.CreateLBCookieStickinessPolicy(lbspOpts); err != nil { - return fmt.Errorf("Error creating LBCookieStickinessPolicy: %s", err) - } - - setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ - LoadBalancerName: aws.String(d.Get("load_balancer").(string)), - LoadBalancerPort: aws.Int64(int64(d.Get("lb_port").(int))), - PolicyNames: []*string{aws.String(d.Get("name").(string))}, - } - - log.Printf("[DEBUG] LB Cookie Stickiness create configuration: %#v", setLoadBalancerOpts) - if _, err := elbconn.SetLoadBalancerPoliciesOfListener(setLoadBalancerOpts); err != nil { - return fmt.Errorf("Error setting LBCookieStickinessPolicy: %s", err) - } - - d.SetId(fmt.Sprintf("%s:%d:%s", - *lbspOpts.LoadBalancerName, - *setLoadBalancerOpts.LoadBalancerPort, - *lbspOpts.PolicyName)) - return nil -} - -func resourceAwsLBCookieStickinessPolicyRead(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - - lbName, lbPort, policyName := resourceAwsLBCookieStickinessPolicyParseId(d.Id()) - - request := &elb.DescribeLoadBalancerPoliciesInput{ - LoadBalancerName: aws.String(lbName), - PolicyNames: []*string{aws.String(policyName)}, - } - - getResp, err := elbconn.DescribeLoadBalancerPolicies(request) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok { - if ec2err.Code() == "PolicyNotFound" || ec2err.Code() == "LoadBalancerNotFound" { - d.SetId("") - } - return nil - } - return fmt.Errorf("Error retrieving policy: %s", err) - } - - if len(getResp.PolicyDescriptions) != 1 { - return fmt.Errorf("Unable to find policy %#v", getResp.PolicyDescriptions) - } - - // we know the policy exists now, but we have to check if it's assigned to a listener - assigned, err := resourceAwsELBSticknessPolicyAssigned(policyName, lbName, lbPort, elbconn) - if err != nil { - return err - } - if !assigned { - // policy exists, but isn't assigned to a listener - log.Printf("[DEBUG] policy '%s' exists, but isn't assigned to a listener", policyName) - d.SetId("") - return nil - } - - // We can get away with this because there's only one attribute, the - // cookie expiration, in these descriptions. - policyDesc := getResp.PolicyDescriptions[0] - cookieAttr := policyDesc.PolicyAttributeDescriptions[0] - if *cookieAttr.AttributeName != "CookieExpirationPeriod" { - return fmt.Errorf("Unable to find cookie expiration period.") - } - d.Set("cookie_expiration_period", cookieAttr.AttributeValue) - - d.Set("name", policyName) - d.Set("load_balancer", lbName) - d.Set("lb_port", lbPort) - - return nil -} - -func resourceAwsLBCookieStickinessPolicyDelete(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - - lbName, _, policyName := resourceAwsLBCookieStickinessPolicyParseId(d.Id()) - - // Perversely, if we Set an empty list of PolicyNames, we detach the - // policies attached to a listener, which is required to delete the - // policy itself. - setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ - LoadBalancerName: aws.String(d.Get("load_balancer").(string)), - LoadBalancerPort: aws.Int64(int64(d.Get("lb_port").(int))), - PolicyNames: []*string{}, - } - - if _, err := elbconn.SetLoadBalancerPoliciesOfListener(setLoadBalancerOpts); err != nil { - return fmt.Errorf("Error removing LBCookieStickinessPolicy: %s", err) - } - - request := &elb.DeleteLoadBalancerPolicyInput{ - LoadBalancerName: aws.String(lbName), - PolicyName: aws.String(policyName), - } - - if _, err := elbconn.DeleteLoadBalancerPolicy(request); err != nil { - return fmt.Errorf("Error deleting LB stickiness policy %s: %s", d.Id(), err) - } - return nil -} - -// resourceAwsLBCookieStickinessPolicyParseId takes an ID and parses it into -// it's constituent parts. You need three axes (LB name, policy name, and LB -// port) to create or identify a stickiness policy in AWS's API. -func resourceAwsLBCookieStickinessPolicyParseId(id string) (string, string, string) { - parts := strings.SplitN(id, ":", 3) - return parts[0], parts[1], parts[2] -} diff --git a/builtin/providers/aws/resource_aws_lb_cookie_stickiness_policy_test.go b/builtin/providers/aws/resource_aws_lb_cookie_stickiness_policy_test.go deleted file mode 100644 index a57660ac5..000000000 --- a/builtin/providers/aws/resource_aws_lb_cookie_stickiness_policy_test.go +++ /dev/null @@ -1,243 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elb" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSLBCookieStickinessPolicy_basic(t *testing.T) { - lbName := fmt.Sprintf("tf-test-lb-%s", acctest.RandString(5)) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBCookieStickinessPolicyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccLBCookieStickinessPolicyConfig(lbName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLBCookieStickinessPolicy( - "aws_elb.lb", - "aws_lb_cookie_stickiness_policy.foo", - ), - ), - }, - resource.TestStep{ - Config: testAccLBCookieStickinessPolicyConfigUpdate(lbName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLBCookieStickinessPolicy( - "aws_elb.lb", - "aws_lb_cookie_stickiness_policy.foo", - ), - ), - }, - }, - }) -} - -func testAccCheckLBCookieStickinessPolicyDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elbconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_lb_cookie_stickiness_policy" { - continue - } - - lbName, _, policyName := resourceAwsLBCookieStickinessPolicyParseId(rs.Primary.ID) - out, err := conn.DescribeLoadBalancerPolicies( - &elb.DescribeLoadBalancerPoliciesInput{ - LoadBalancerName: aws.String(lbName), - PolicyNames: []*string{aws.String(policyName)}, - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && (ec2err.Code() == "PolicyNotFound" || ec2err.Code() == "LoadBalancerNotFound") { - continue - } - return err - } - - if len(out.PolicyDescriptions) > 0 { - return fmt.Errorf("Policy still exists") - } - } - - return nil -} - -func testAccCheckLBCookieStickinessPolicy(elbResource string, policyResource string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[elbResource] - if !ok { - return fmt.Errorf("Not found: %s", elbResource) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - policy, ok := s.RootModule().Resources[policyResource] - if !ok { - return fmt.Errorf("Not found: %s", policyResource) - } - - elbconn := testAccProvider.Meta().(*AWSClient).elbconn - elbName, _, policyName := resourceAwsLBCookieStickinessPolicyParseId(policy.Primary.ID) - _, err := elbconn.DescribeLoadBalancerPolicies(&elb.DescribeLoadBalancerPoliciesInput{ - LoadBalancerName: aws.String(elbName), - PolicyNames: []*string{aws.String(policyName)}, - }) - - if err != nil { - return err - } - - return nil - } -} - -func TestAccCheckLBCookieStickinessPolicy_drift(t *testing.T) { - lbName := fmt.Sprintf("tf-test-lb-%s", acctest.RandString(5)) - - // We only want to remove the reference to the policy from the listner, - // beacause that's all that can be done via the console. - removePolicy := func() { - conn := testAccProvider.Meta().(*AWSClient).elbconn - - setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ - LoadBalancerName: aws.String(lbName), - LoadBalancerPort: aws.Int64(80), - PolicyNames: []*string{}, - } - - if _, err := conn.SetLoadBalancerPoliciesOfListener(setLoadBalancerOpts); err != nil { - t.Fatalf("Error removing LBCookieStickinessPolicy: %s", err) - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBCookieStickinessPolicyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccLBCookieStickinessPolicyConfig(lbName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLBCookieStickinessPolicy( - "aws_elb.lb", - "aws_lb_cookie_stickiness_policy.foo", - ), - ), - }, - resource.TestStep{ - PreConfig: removePolicy, - Config: testAccLBCookieStickinessPolicyConfig(lbName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLBCookieStickinessPolicy( - "aws_elb.lb", - "aws_lb_cookie_stickiness_policy.foo", - ), - ), - }, - }, - }) -} - -func TestAccAWSLBCookieStickinessPolicy_missingLB(t *testing.T) { - lbName := fmt.Sprintf("tf-test-lb-%s", acctest.RandString(5)) - - // check that we can destroy the policy if the LB is missing - removeLB := func() { - conn := testAccProvider.Meta().(*AWSClient).elbconn - deleteElbOpts := elb.DeleteLoadBalancerInput{ - LoadBalancerName: aws.String(lbName), - } - if _, err := conn.DeleteLoadBalancer(&deleteElbOpts); err != nil { - t.Fatalf("Error deleting ELB: %s", err) - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBCookieStickinessPolicyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccLBCookieStickinessPolicyConfig(lbName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLBCookieStickinessPolicy( - "aws_elb.lb", - "aws_lb_cookie_stickiness_policy.foo", - ), - ), - }, - resource.TestStep{ - PreConfig: removeLB, - Config: testAccLBCookieStickinessPolicyConfigDestroy(lbName), - }, - }, - }) -} - -func testAccLBCookieStickinessPolicyConfig(rName string) string { - return fmt.Sprintf(` -resource "aws_elb" "lb" { - name = "%s" - availability_zones = ["us-west-2a"] - listener { - instance_port = 8000 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } -} - -resource "aws_lb_cookie_stickiness_policy" "foo" { - name = "foo-policy" - load_balancer = "${aws_elb.lb.id}" - lb_port = 80 -}`, rName) -} - -// Sets the cookie_expiration_period to 300s. -func testAccLBCookieStickinessPolicyConfigUpdate(rName string) string { - return fmt.Sprintf(` -resource "aws_elb" "lb" { - name = "%s" - availability_zones = ["us-west-2a"] - listener { - instance_port = 8000 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } -} - -resource "aws_lb_cookie_stickiness_policy" "foo" { - name = "foo-policy" - load_balancer = "${aws_elb.lb.id}" - lb_port = 80 - cookie_expiration_period = 300 -}`, rName) -} - -// attempt to destroy the policy, but we'll delete the LB in the PreConfig -func testAccLBCookieStickinessPolicyConfigDestroy(rName string) string { - return fmt.Sprintf(` -resource "aws_elb" "lb" { - name = "%s" - availability_zones = ["us-west-2a"] - listener { - instance_port = 8000 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } -}`, rName) -} diff --git a/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy.go b/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy.go deleted file mode 100644 index 64a9f98ce..000000000 --- a/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy.go +++ /dev/null @@ -1,189 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "log" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsLBSSLNegotiationPolicy() *schema.Resource { - return &schema.Resource{ - // There is no concept of "updating" an LB policy in - // the AWS API. - Create: resourceAwsLBSSLNegotiationPolicyCreate, - Read: resourceAwsLBSSLNegotiationPolicyRead, - Delete: resourceAwsLBSSLNegotiationPolicyDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "load_balancer": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "lb_port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - - "attribute": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "value": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - Set: func(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - return hashcode.String(buf.String()) - }, - }, - }, - } -} - -func resourceAwsLBSSLNegotiationPolicyCreate(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - - // Provision the SSLNegotiationPolicy - lbspOpts := &elb.CreateLoadBalancerPolicyInput{ - LoadBalancerName: aws.String(d.Get("load_balancer").(string)), - PolicyName: aws.String(d.Get("name").(string)), - PolicyTypeName: aws.String("SSLNegotiationPolicyType"), - } - - // Check for Policy Attributes - if v, ok := d.GetOk("attribute"); ok { - var err error - // Expand the "attribute" set to aws-sdk-go compat []*elb.PolicyAttribute - lbspOpts.PolicyAttributes, err = expandPolicyAttributes(v.(*schema.Set).List()) - if err != nil { - return err - } - } - - log.Printf("[DEBUG] Load Balancer Policy opts: %#v", lbspOpts) - if _, err := elbconn.CreateLoadBalancerPolicy(lbspOpts); err != nil { - return fmt.Errorf("Error creating Load Balancer Policy: %s", err) - } - - setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ - LoadBalancerName: aws.String(d.Get("load_balancer").(string)), - LoadBalancerPort: aws.Int64(int64(d.Get("lb_port").(int))), - PolicyNames: []*string{aws.String(d.Get("name").(string))}, - } - - log.Printf("[DEBUG] SSL Negotiation create configuration: %#v", setLoadBalancerOpts) - if _, err := elbconn.SetLoadBalancerPoliciesOfListener(setLoadBalancerOpts); err != nil { - return fmt.Errorf("Error setting SSLNegotiationPolicy: %s", err) - } - - d.SetId(fmt.Sprintf("%s:%d:%s", - *lbspOpts.LoadBalancerName, - *setLoadBalancerOpts.LoadBalancerPort, - *lbspOpts.PolicyName)) - return nil -} - -func resourceAwsLBSSLNegotiationPolicyRead(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - - lbName, lbPort, policyName := resourceAwsLBSSLNegotiationPolicyParseId(d.Id()) - - request := &elb.DescribeLoadBalancerPoliciesInput{ - LoadBalancerName: aws.String(lbName), - PolicyNames: []*string{aws.String(policyName)}, - } - - getResp, err := elbconn.DescribeLoadBalancerPolicies(request) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "PolicyNotFound" { - // The policy is gone. - d.SetId("") - return nil - } else if isLoadBalancerNotFound(err) { - // The ELB is gone now, so just remove it from the state - d.SetId("") - return nil - } - return fmt.Errorf("Error retrieving policy: %s", err) - } - - if len(getResp.PolicyDescriptions) != 1 { - return fmt.Errorf("Unable to find policy %#v", getResp.PolicyDescriptions) - } - - // We can get away with this because there's only one policy returned - policyDesc := getResp.PolicyDescriptions[0] - attributes := flattenPolicyAttributes(policyDesc.PolicyAttributeDescriptions) - d.Set("attributes", attributes) - - d.Set("name", policyName) - d.Set("load_balancer", lbName) - d.Set("lb_port", lbPort) - - return nil -} - -func resourceAwsLBSSLNegotiationPolicyDelete(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - - lbName, _, policyName := resourceAwsLBSSLNegotiationPolicyParseId(d.Id()) - - // Perversely, if we Set an empty list of PolicyNames, we detach the - // policies attached to a listener, which is required to delete the - // policy itself. - setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ - LoadBalancerName: aws.String(d.Get("load_balancer").(string)), - LoadBalancerPort: aws.Int64(int64(d.Get("lb_port").(int))), - PolicyNames: []*string{}, - } - - if _, err := elbconn.SetLoadBalancerPoliciesOfListener(setLoadBalancerOpts); err != nil { - return fmt.Errorf("Error removing SSLNegotiationPolicy: %s", err) - } - - request := &elb.DeleteLoadBalancerPolicyInput{ - LoadBalancerName: aws.String(lbName), - PolicyName: aws.String(policyName), - } - - if _, err := elbconn.DeleteLoadBalancerPolicy(request); err != nil { - return fmt.Errorf("Error deleting SSL negotiation policy %s: %s", d.Id(), err) - } - return nil -} - -// resourceAwsLBSSLNegotiationPolicyParseId takes an ID and parses it into -// it's constituent parts. You need three axes (LB name, policy name, and LB -// port) to create or identify an SSL negotiation policy in AWS's API. -func resourceAwsLBSSLNegotiationPolicyParseId(id string) (string, string, string) { - parts := strings.SplitN(id, ":", 3) - return parts[0], parts[1], parts[2] -} diff --git a/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy_test.go b/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy_test.go deleted file mode 100644 index 1eb702972..000000000 --- a/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy_test.go +++ /dev/null @@ -1,301 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elb" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSLBSSLNegotiationPolicy_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBSSLNegotiationPolicyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccSslNegotiationPolicyConfig( - fmt.Sprintf("tf-acctest-%s", acctest.RandString(10)), fmt.Sprintf("tf-test-lb-%s", acctest.RandString(5))), - Check: resource.ComposeTestCheckFunc( - testAccCheckLBSSLNegotiationPolicy( - "aws_elb.lb", - "aws_lb_ssl_negotiation_policy.foo", - ), - resource.TestCheckResourceAttr( - "aws_lb_ssl_negotiation_policy.foo", "attribute.#", "7"), - ), - }, - }, - }) -} - -func TestAccAWSLBSSLNegotiationPolicy_missingLB(t *testing.T) { - lbName := fmt.Sprintf("tf-test-lb-%s", acctest.RandString(5)) - - // check that we can destroy the policy if the LB is missing - removeLB := func() { - conn := testAccProvider.Meta().(*AWSClient).elbconn - deleteElbOpts := elb.DeleteLoadBalancerInput{ - LoadBalancerName: aws.String(lbName), - } - if _, err := conn.DeleteLoadBalancer(&deleteElbOpts); err != nil { - t.Fatalf("Error deleting ELB: %s", err) - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBSSLNegotiationPolicyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccSslNegotiationPolicyConfig(fmt.Sprintf("tf-acctest-%s", acctest.RandString(10)), lbName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLBSSLNegotiationPolicy( - "aws_elb.lb", - "aws_lb_ssl_negotiation_policy.foo", - ), - resource.TestCheckResourceAttr( - "aws_lb_ssl_negotiation_policy.foo", "attribute.#", "7"), - ), - }, - resource.TestStep{ - PreConfig: removeLB, - Config: testAccSslNegotiationPolicyConfig(fmt.Sprintf("tf-acctest-%s", acctest.RandString(10)), lbName), - }, - }, - }) -} - -func testAccCheckLBSSLNegotiationPolicyDestroy(s *terraform.State) error { - elbconn := testAccProvider.Meta().(*AWSClient).elbconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_elb" && rs.Type != "aws_lb_ssl_negotiation_policy" { - continue - } - - // Check that the ELB is destroyed - if rs.Type == "aws_elb" { - describe, err := elbconn.DescribeLoadBalancers(&elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{aws.String(rs.Primary.ID)}, - }) - - if err == nil { - if len(describe.LoadBalancerDescriptions) != 0 && - *describe.LoadBalancerDescriptions[0].LoadBalancerName == rs.Primary.ID { - return fmt.Errorf("ELB still exists") - } - } - - // Verify the error - providerErr, ok := err.(awserr.Error) - if !ok { - return err - } - - if providerErr.Code() != "LoadBalancerNotFound" { - return fmt.Errorf("Unexpected error: %s", err) - } - } else { - // Check that the SSL Negotiation Policy is destroyed - elbName, _, policyName := resourceAwsLBSSLNegotiationPolicyParseId(rs.Primary.ID) - _, err := elbconn.DescribeLoadBalancerPolicies(&elb.DescribeLoadBalancerPoliciesInput{ - LoadBalancerName: aws.String(elbName), - PolicyNames: []*string{aws.String(policyName)}, - }) - - if err == nil { - return fmt.Errorf("ELB SSL Negotiation Policy still exists") - } - } - } - - return nil -} - -func testAccCheckLBSSLNegotiationPolicy(elbResource string, policyResource string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[elbResource] - if !ok { - return fmt.Errorf("Not found: %s", elbResource) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - policy, ok := s.RootModule().Resources[policyResource] - if !ok { - return fmt.Errorf("Not found: %s", policyResource) - } - - elbconn := testAccProvider.Meta().(*AWSClient).elbconn - - elbName, _, policyName := resourceAwsLBSSLNegotiationPolicyParseId(policy.Primary.ID) - resp, err := elbconn.DescribeLoadBalancerPolicies(&elb.DescribeLoadBalancerPoliciesInput{ - LoadBalancerName: aws.String(elbName), - PolicyNames: []*string{aws.String(policyName)}, - }) - - if err != nil { - fmt.Printf("[ERROR] Problem describing load balancer policy '%s': %s", policyName, err) - return err - } - - if len(resp.PolicyDescriptions) != 1 { - return fmt.Errorf("Unable to find policy %#v", resp.PolicyDescriptions) - } - - attrmap := policyAttributesToMap(&resp.PolicyDescriptions[0].PolicyAttributeDescriptions) - if attrmap["Protocol-TLSv1"] != "false" { - return fmt.Errorf("Policy attribute 'Protocol-TLSv1' was of value %s instead of false!", attrmap["Protocol-TLSv1"]) - } - if attrmap["Protocol-TLSv1.1"] != "false" { - return fmt.Errorf("Policy attribute 'Protocol-TLSv1.1' was of value %s instead of false!", attrmap["Protocol-TLSv1.1"]) - } - if attrmap["Protocol-TLSv1.2"] != "true" { - return fmt.Errorf("Policy attribute 'Protocol-TLSv1.2' was of value %s instead of true!", attrmap["Protocol-TLSv1.2"]) - } - if attrmap["Server-Defined-Cipher-Order"] != "true" { - return fmt.Errorf("Policy attribute 'Server-Defined-Cipher-Order' was of value %s instead of true!", attrmap["Server-Defined-Cipher-Order"]) - } - if attrmap["ECDHE-RSA-AES128-GCM-SHA256"] != "true" { - return fmt.Errorf("Policy attribute 'ECDHE-RSA-AES128-GCM-SHA256' was of value %s instead of true!", attrmap["ECDHE-RSA-AES128-GCM-SHA256"]) - } - if attrmap["AES128-GCM-SHA256"] != "true" { - return fmt.Errorf("Policy attribute 'AES128-GCM-SHA256' was of value %s instead of true!", attrmap["AES128-GCM-SHA256"]) - } - if attrmap["EDH-RSA-DES-CBC3-SHA"] != "false" { - return fmt.Errorf("Policy attribute 'EDH-RSA-DES-CBC3-SHA' was of value %s instead of false!", attrmap["EDH-RSA-DES-CBC3-SHA"]) - } - - return nil - } -} - -func policyAttributesToMap(attributes *[]*elb.PolicyAttributeDescription) map[string]string { - attrmap := make(map[string]string) - - for _, attrdef := range *attributes { - attrmap[*attrdef.AttributeName] = *attrdef.AttributeValue - } - - return attrmap -} - -// Sets the SSL Negotiation policy with attributes. -// The IAM Server Cert config is lifted from -// builtin/providers/aws/resource_aws_iam_server_certificate_test.go -func testAccSslNegotiationPolicyConfig(certName string, lbName string) string { - return fmt.Sprintf(` -resource "aws_iam_server_certificate" "test_cert" { - name = "%s" - certificate_body = < 0 { - return fmt.Errorf("Policy still exists") - } - case rs.Type == "aws_load_balancer_backend_policy": - loadBalancerName, policyName := resourceAwsLoadBalancerBackendServerPoliciesParseId(rs.Primary.ID) - out, err := conn.DescribeLoadBalancers( - &elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{aws.String(loadBalancerName)}, - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && (ec2err.Code() == "LoadBalancerNotFound") { - continue - } - return err - } - for _, backendServer := range out.LoadBalancerDescriptions[0].BackendServerDescriptions { - policyStrings := []string{} - for _, pol := range backendServer.PolicyNames { - policyStrings = append(policyStrings, *pol) - } - if policyInBackendServerPolicies(policyName, policyStrings) { - return fmt.Errorf("Policy still exists and is assigned") - } - } - default: - continue - } - } - return nil -} - -func testAccCheckAWSLoadBalancerBackendServerPolicyState(loadBalancerName string, loadBalancerBackendAuthPolicyName string, assigned bool) resource.TestCheckFunc { - return func(s *terraform.State) error { - elbconn := testAccProvider.Meta().(*AWSClient).elbconn - - loadBalancerDescription, err := elbconn.DescribeLoadBalancers(&elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{aws.String(loadBalancerName)}, - }) - if err != nil { - return err - } - - for _, backendServer := range loadBalancerDescription.LoadBalancerDescriptions[0].BackendServerDescriptions { - policyStrings := []string{} - for _, pol := range backendServer.PolicyNames { - policyStrings = append(policyStrings, *pol) - } - if policyInBackendServerPolicies(loadBalancerBackendAuthPolicyName, policyStrings) != assigned { - if assigned { - return fmt.Errorf("Policy no longer assigned %s not in %+v", loadBalancerBackendAuthPolicyName, policyStrings) - } else { - return fmt.Errorf("Policy exists and is assigned") - } - } - } - - return nil - } -} - -const testAccAWSLoadBalancerBackendServerPolicyConfig_basic0 = ` -resource "tls_private_key" "example0" { - algorithm = "RSA" -} - -resource "tls_self_signed_cert" "test-cert0" { - key_algorithm = "RSA" - private_key_pem = "${tls_private_key.example0.private_key_pem}" - - subject { - common_name = "example.com" - organization = "ACME Examples, Inc" - } - - validity_period_hours = 12 - - allowed_uses = [ - "key_encipherment", - "digital_signature", - "server_auth", - ] -} - -resource "aws_iam_server_certificate" "test-iam-cert0" { - name_prefix = "test_cert_" - certificate_body = "${tls_self_signed_cert.test-cert0.cert_pem}" - private_key = "${tls_private_key.example0.private_key_pem}" -} - -resource "aws_elb" "test-lb" { - name = "test-aws-policies-lb" - availability_zones = ["us-west-2a"] - - listener { - instance_port = 443 - instance_protocol = "https" - lb_port = 443 - lb_protocol = "https" - ssl_certificate_id = "${aws_iam_server_certificate.test-iam-cert0.arn}" - } - - tags { - Name = "tf-acc-test" - } -} - -resource "aws_load_balancer_policy" "test-pubkey-policy0" { - load_balancer_name = "${aws_elb.test-lb.name}" - policy_name = "test-pubkey-policy0" - policy_type_name = "PublicKeyPolicyType" - policy_attribute = { - name = "PublicKey" - value = "${replace(replace(replace(tls_private_key.example0.public_key_pem, "\n", ""), "-----BEGIN PUBLIC KEY-----", ""), "-----END PUBLIC KEY-----", "")}" - } -} - -resource "aws_load_balancer_policy" "test-backend-auth-policy0" { - load_balancer_name = "${aws_elb.test-lb.name}" - policy_name = "test-backend-auth-policy0" - policy_type_name = "BackendServerAuthenticationPolicyType" - policy_attribute = { - name = "PublicKeyPolicyName" - value = "${aws_load_balancer_policy.test-pubkey-policy0.policy_name}" - } -} - -resource "aws_load_balancer_backend_server_policy" "test-backend-auth-policies-443" { - load_balancer_name = "${aws_elb.test-lb.name}" - instance_port = 443 - policy_names = [ - "${aws_load_balancer_policy.test-backend-auth-policy0.policy_name}" - ] -} -` - -const testAccAWSLoadBalancerBackendServerPolicyConfig_basic1 = ` -resource "tls_private_key" "example0" { - algorithm = "RSA" -} - -resource "tls_self_signed_cert" "test-cert0" { - key_algorithm = "RSA" - private_key_pem = "${tls_private_key.example0.private_key_pem}" - - subject { - common_name = "example.com" - organization = "ACME Examples, Inc" - } - - validity_period_hours = 12 - - allowed_uses = [ - "key_encipherment", - "digital_signature", - "server_auth", - ] -} - -resource "tls_private_key" "example1" { - algorithm = "RSA" -} - -resource "tls_self_signed_cert" "test-cert1" { - key_algorithm = "RSA" - private_key_pem = "${tls_private_key.example1.private_key_pem}" - - subject { - common_name = "example.com" - organization = "ACME Examples, Inc" - } - - validity_period_hours = 12 - - allowed_uses = [ - "key_encipherment", - "digital_signature", - "server_auth", - ] -} - -resource "aws_iam_server_certificate" "test-iam-cert0" { - name_prefix = "test_cert_" - certificate_body = "${tls_self_signed_cert.test-cert0.cert_pem}" - private_key = "${tls_private_key.example0.private_key_pem}" -} - -resource "aws_elb" "test-lb" { - name = "test-aws-policies-lb" - availability_zones = ["us-west-2a"] - - listener { - instance_port = 443 - instance_protocol = "https" - lb_port = 443 - lb_protocol = "https" - ssl_certificate_id = "${aws_iam_server_certificate.test-iam-cert0.arn}" - } - - tags { - Name = "tf-acc-test" - } -} - -resource "aws_load_balancer_policy" "test-pubkey-policy0" { - load_balancer_name = "${aws_elb.test-lb.name}" - policy_name = "test-pubkey-policy0" - policy_type_name = "PublicKeyPolicyType" - policy_attribute = { - name = "PublicKey" - value = "${replace(replace(replace(tls_private_key.example0.public_key_pem, "\n", ""), "-----BEGIN PUBLIC KEY-----", ""), "-----END PUBLIC KEY-----", "")}" - } -} - -resource "aws_load_balancer_policy" "test-pubkey-policy1" { - load_balancer_name = "${aws_elb.test-lb.name}" - policy_name = "test-pubkey-policy1" - policy_type_name = "PublicKeyPolicyType" - policy_attribute = { - name = "PublicKey" - value = "${replace(replace(replace(tls_private_key.example1.public_key_pem, "\n", ""), "-----BEGIN PUBLIC KEY-----", ""), "-----END PUBLIC KEY-----", "")}" - } -} - -resource "aws_load_balancer_policy" "test-backend-auth-policy0" { - load_balancer_name = "${aws_elb.test-lb.name}" - policy_name = "test-backend-auth-policy0" - policy_type_name = "BackendServerAuthenticationPolicyType" - policy_attribute = { - name = "PublicKeyPolicyName" - value = "${aws_load_balancer_policy.test-pubkey-policy1.policy_name}" - } -} - -resource "aws_load_balancer_backend_server_policy" "test-backend-auth-policies-443" { - load_balancer_name = "${aws_elb.test-lb.name}" - instance_port = 443 - policy_names = [ - "${aws_load_balancer_policy.test-backend-auth-policy0.policy_name}" - ] -} -` - -const testAccAWSLoadBalancerBackendServerPolicyConfig_basic2 = ` -resource "tls_private_key" "example0" { - algorithm = "RSA" -} - -resource "tls_self_signed_cert" "test-cert0" { - key_algorithm = "RSA" - private_key_pem = "${tls_private_key.example0.private_key_pem}" - - subject { - common_name = "example.com" - organization = "ACME Examples, Inc" - } - - validity_period_hours = 12 - - allowed_uses = [ - "key_encipherment", - "digital_signature", - "server_auth", - ] -} - -resource "tls_private_key" "example1" { - algorithm = "RSA" -} - -resource "tls_self_signed_cert" "test-cert1" { - key_algorithm = "RSA" - private_key_pem = "${tls_private_key.example1.private_key_pem}" - - subject { - common_name = "example.com" - organization = "ACME Examples, Inc" - } - - validity_period_hours = 12 - - allowed_uses = [ - "key_encipherment", - "digital_signature", - "server_auth", - ] -} - -resource "aws_iam_server_certificate" "test-iam-cert0" { - name_prefix = "test_cert_" - certificate_body = "${tls_self_signed_cert.test-cert0.cert_pem}" - private_key = "${tls_private_key.example0.private_key_pem}" -} - -resource "aws_elb" "test-lb" { - name = "test-aws-policies-lb" - availability_zones = ["us-west-2a"] - - listener { - instance_port = 443 - instance_protocol = "https" - lb_port = 443 - lb_protocol = "https" - ssl_certificate_id = "${aws_iam_server_certificate.test-iam-cert0.arn}" - } - - tags { - Name = "tf-acc-test" - } -} -` diff --git a/builtin/providers/aws/resource_aws_load_balancer_listener_policy.go b/builtin/providers/aws/resource_aws_load_balancer_listener_policy.go deleted file mode 100644 index d1c8cacbb..000000000 --- a/builtin/providers/aws/resource_aws_load_balancer_listener_policy.go +++ /dev/null @@ -1,138 +0,0 @@ -package aws - -import ( - "fmt" - "strconv" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsLoadBalancerListenerPolicies() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsLoadBalancerListenerPoliciesCreate, - Read: resourceAwsLoadBalancerListenerPoliciesRead, - Update: resourceAwsLoadBalancerListenerPoliciesCreate, - Delete: resourceAwsLoadBalancerListenerPoliciesDelete, - - Schema: map[string]*schema.Schema{ - "load_balancer_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "policy_names": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - Set: schema.HashString, - }, - - "load_balancer_port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - }, - } -} - -func resourceAwsLoadBalancerListenerPoliciesCreate(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - - loadBalancerName := d.Get("load_balancer_name") - - policyNames := []*string{} - if v, ok := d.GetOk("policy_names"); ok { - policyNames = expandStringList(v.(*schema.Set).List()) - } - - setOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ - LoadBalancerName: aws.String(loadBalancerName.(string)), - LoadBalancerPort: aws.Int64(int64(d.Get("load_balancer_port").(int))), - PolicyNames: policyNames, - } - - if _, err := elbconn.SetLoadBalancerPoliciesOfListener(setOpts); err != nil { - return fmt.Errorf("Error setting LoadBalancerPoliciesOfListener: %s", err) - } - - d.SetId(fmt.Sprintf("%s:%s", *setOpts.LoadBalancerName, strconv.FormatInt(*setOpts.LoadBalancerPort, 10))) - return resourceAwsLoadBalancerListenerPoliciesRead(d, meta) -} - -func resourceAwsLoadBalancerListenerPoliciesRead(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - - loadBalancerName, loadBalancerPort := resourceAwsLoadBalancerListenerPoliciesParseId(d.Id()) - - describeElbOpts := &elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{aws.String(loadBalancerName)}, - } - - describeResp, err := elbconn.DescribeLoadBalancers(describeElbOpts) - - if err != nil { - if ec2err, ok := err.(awserr.Error); ok { - if ec2err.Code() == "LoadBalancerNotFound" { - d.SetId("") - return fmt.Errorf("LoadBalancerNotFound: %s", err) - } - } - return fmt.Errorf("Error retrieving ELB description: %s", err) - } - - if len(describeResp.LoadBalancerDescriptions) != 1 { - return fmt.Errorf("Unable to find ELB: %#v", describeResp.LoadBalancerDescriptions) - } - - lb := describeResp.LoadBalancerDescriptions[0] - - policyNames := []*string{} - for _, listener := range lb.ListenerDescriptions { - if loadBalancerPort != strconv.Itoa(int(*listener.Listener.LoadBalancerPort)) { - continue - } - - for _, name := range listener.PolicyNames { - policyNames = append(policyNames, name) - } - } - - d.Set("load_balancer_name", loadBalancerName) - d.Set("load_balancer_port", loadBalancerPort) - d.Set("policy_names", flattenStringList(policyNames)) - - return nil -} - -func resourceAwsLoadBalancerListenerPoliciesDelete(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - - loadBalancerName, loadBalancerPort := resourceAwsLoadBalancerListenerPoliciesParseId(d.Id()) - - loadBalancerPortInt, err := strconv.ParseInt(loadBalancerPort, 10, 64) - if err != nil { - return fmt.Errorf("Error parsing loadBalancerPort as integer: %s", err) - } - - setOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ - LoadBalancerName: aws.String(loadBalancerName), - LoadBalancerPort: aws.Int64(loadBalancerPortInt), - PolicyNames: []*string{}, - } - - if _, err := elbconn.SetLoadBalancerPoliciesOfListener(setOpts); err != nil { - return fmt.Errorf("Error setting LoadBalancerPoliciesOfListener: %s", err) - } - - d.SetId("") - return nil -} - -func resourceAwsLoadBalancerListenerPoliciesParseId(id string) (string, string) { - parts := strings.SplitN(id, ":", 2) - return parts[0], parts[1] -} diff --git a/builtin/providers/aws/resource_aws_load_balancer_listener_policy_test.go b/builtin/providers/aws/resource_aws_load_balancer_listener_policy_test.go deleted file mode 100644 index bd663a157..000000000 --- a/builtin/providers/aws/resource_aws_load_balancer_listener_policy_test.go +++ /dev/null @@ -1,240 +0,0 @@ -package aws - -import ( - "fmt" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elb" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSLoadBalancerListenerPolicy_basic(t *testing.T) { - rChar := acctest.RandStringFromCharSet(6, acctest.CharSetAlpha) - lbName := fmt.Sprintf("%s", rChar) - mcName := fmt.Sprintf("%s", rChar) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSLoadBalancerListenerPolicyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSLoadBalancerListenerPolicyConfig_basic0(lbName, mcName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_load_balancer_policy.magic-cookie-sticky"), - testAccCheckAWSLoadBalancerListenerPolicyState(lbName, int64(80), mcName, true), - ), - }, - resource.TestStep{ - Config: testAccAWSLoadBalancerListenerPolicyConfig_basic1(lbName, mcName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_load_balancer_policy.magic-cookie-sticky"), - testAccCheckAWSLoadBalancerListenerPolicyState(lbName, int64(80), mcName, true), - ), - }, - resource.TestStep{ - Config: testAccAWSLoadBalancerListenerPolicyConfig_basic2(lbName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLoadBalancerListenerPolicyState(lbName, int64(80), mcName, false), - ), - }, - }, - }) -} - -func policyInListenerPolicies(str string, list []string) bool { - for _, v := range list { - if v == str { - return true - } - } - return false -} - -func testAccCheckAWSLoadBalancerListenerPolicyDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elbconn - - for _, rs := range s.RootModule().Resources { - switch { - case rs.Type == "aws_load_balancer_policy": - loadBalancerName, policyName := resourceAwsLoadBalancerListenerPoliciesParseId(rs.Primary.ID) - out, err := conn.DescribeLoadBalancerPolicies( - &elb.DescribeLoadBalancerPoliciesInput{ - LoadBalancerName: aws.String(loadBalancerName), - PolicyNames: []*string{aws.String(policyName)}, - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && (ec2err.Code() == "PolicyNotFound" || ec2err.Code() == "LoadBalancerNotFound") { - continue - } - return err - } - if len(out.PolicyDescriptions) > 0 { - return fmt.Errorf("Policy still exists") - } - case rs.Type == "aws_load_listener_policy": - loadBalancerName, _ := resourceAwsLoadBalancerListenerPoliciesParseId(rs.Primary.ID) - out, err := conn.DescribeLoadBalancers( - &elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{aws.String(loadBalancerName)}, - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && (ec2err.Code() == "LoadBalancerNotFound") { - continue - } - return err - } - policyNames := []string{} - for k, _ := range rs.Primary.Attributes { - if strings.HasPrefix(k, "policy_names.") && strings.HasSuffix(k, ".name") { - value_key := fmt.Sprintf("%s.value", strings.TrimSuffix(k, ".name")) - policyNames = append(policyNames, rs.Primary.Attributes[value_key]) - } - } - for _, policyName := range policyNames { - for _, listener := range out.LoadBalancerDescriptions[0].ListenerDescriptions { - policyStrings := []string{} - for _, pol := range listener.PolicyNames { - policyStrings = append(policyStrings, *pol) - } - if policyInListenerPolicies(policyName, policyStrings) { - return fmt.Errorf("Policy still exists and is assigned") - } - } - } - default: - continue - } - } - return nil -} - -func testAccCheckAWSLoadBalancerListenerPolicyState(loadBalancerName string, loadBalancerListenerPort int64, loadBalancerListenerPolicyName string, assigned bool) resource.TestCheckFunc { - return func(s *terraform.State) error { - elbconn := testAccProvider.Meta().(*AWSClient).elbconn - - loadBalancerDescription, err := elbconn.DescribeLoadBalancers(&elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{aws.String(loadBalancerName)}, - }) - if err != nil { - return err - } - - for _, listener := range loadBalancerDescription.LoadBalancerDescriptions[0].ListenerDescriptions { - if *listener.Listener.LoadBalancerPort != loadBalancerListenerPort { - continue - } - policyStrings := []string{} - for _, pol := range listener.PolicyNames { - policyStrings = append(policyStrings, *pol) - } - if policyInListenerPolicies(loadBalancerListenerPolicyName, policyStrings) != assigned { - if assigned { - return fmt.Errorf("Policy no longer assigned %s not in %+v", loadBalancerListenerPolicyName, policyStrings) - } else { - return fmt.Errorf("Policy exists and is assigned") - } - } - } - - return nil - } -} - -func testAccAWSLoadBalancerListenerPolicyConfig_basic0(lbName, mcName string) string { - return fmt.Sprintf(` -resource "aws_elb" "test-lb" { - name = "%s" - availability_zones = ["us-west-2a"] - - listener { - instance_port = 80 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } - - tags { - Name = "tf-acc-test" - } -} - -resource "aws_load_balancer_policy" "magic-cookie-sticky" { - load_balancer_name = "${aws_elb.test-lb.name}" - policy_name = "%s" - policy_type_name = "AppCookieStickinessPolicyType" - policy_attribute = { - name = "CookieName" - value = "magic_cookie" - } -} - -resource "aws_load_balancer_listener_policy" "test-lb-listener-policies-80" { - load_balancer_name = "${aws_elb.test-lb.name}" - load_balancer_port = 80 - policy_names = [ - "${aws_load_balancer_policy.magic-cookie-sticky.policy_name}", - ] -}`, lbName, mcName) -} - -func testAccAWSLoadBalancerListenerPolicyConfig_basic1(lbName, mcName string) string { - return fmt.Sprintf(` -resource "aws_elb" "test-lb" { - name = "%s" - availability_zones = ["us-west-2a"] - - listener { - instance_port = 80 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } - - tags { - Name = "tf-acc-test" - } -} - -resource "aws_load_balancer_policy" "magic-cookie-sticky" { - load_balancer_name = "${aws_elb.test-lb.name}" - policy_name = "%s" - policy_type_name = "AppCookieStickinessPolicyType" - policy_attribute = { - name = "CookieName" - value = "unicorn_cookie" - } -} - -resource "aws_load_balancer_listener_policy" "test-lb-listener-policies-80" { - load_balancer_name = "${aws_elb.test-lb.name}" - load_balancer_port = 80 - policy_names = [ - "${aws_load_balancer_policy.magic-cookie-sticky.policy_name}" - ] -}`, lbName, mcName) -} - -func testAccAWSLoadBalancerListenerPolicyConfig_basic2(lbName string) string { - return fmt.Sprintf(` -resource "aws_elb" "test-lb" { - name = "%s" - availability_zones = ["us-west-2a"] - - listener { - instance_port = 80 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } - - tags { - Name = "tf-acc-test" - } -}`, lbName) -} diff --git a/builtin/providers/aws/resource_aws_load_balancer_policy.go b/builtin/providers/aws/resource_aws_load_balancer_policy.go deleted file mode 100644 index 8305cf992..000000000 --- a/builtin/providers/aws/resource_aws_load_balancer_policy.go +++ /dev/null @@ -1,352 +0,0 @@ -package aws - -import ( - "fmt" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsLoadBalancerPolicy() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsLoadBalancerPolicyCreate, - Read: resourceAwsLoadBalancerPolicyRead, - Update: resourceAwsLoadBalancerPolicyUpdate, - Delete: resourceAwsLoadBalancerPolicyDelete, - - Schema: map[string]*schema.Schema{ - "load_balancer_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "policy_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "policy_type_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "policy_attribute": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "value": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - } -} - -func resourceAwsLoadBalancerPolicyCreate(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - - attributes := []*elb.PolicyAttribute{} - if attributedata, ok := d.GetOk("policy_attribute"); ok { - attributeSet := attributedata.(*schema.Set).List() - for _, attribute := range attributeSet { - data := attribute.(map[string]interface{}) - attributes = append(attributes, &elb.PolicyAttribute{ - AttributeName: aws.String(data["name"].(string)), - AttributeValue: aws.String(data["value"].(string)), - }) - } - } - - lbspOpts := &elb.CreateLoadBalancerPolicyInput{ - LoadBalancerName: aws.String(d.Get("load_balancer_name").(string)), - PolicyName: aws.String(d.Get("policy_name").(string)), - PolicyTypeName: aws.String(d.Get("policy_type_name").(string)), - PolicyAttributes: attributes, - } - - if _, err := elbconn.CreateLoadBalancerPolicy(lbspOpts); err != nil { - return fmt.Errorf("Error creating LoadBalancerPolicy: %s", err) - } - - d.SetId(fmt.Sprintf("%s:%s", - *lbspOpts.LoadBalancerName, - *lbspOpts.PolicyName)) - return resourceAwsLoadBalancerPolicyRead(d, meta) -} - -func resourceAwsLoadBalancerPolicyRead(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - - loadBalancerName, policyName := resourceAwsLoadBalancerPolicyParseId(d.Id()) - - request := &elb.DescribeLoadBalancerPoliciesInput{ - LoadBalancerName: aws.String(loadBalancerName), - PolicyNames: []*string{aws.String(policyName)}, - } - - getResp, err := elbconn.DescribeLoadBalancerPolicies(request) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "PolicyNotFound" { - d.SetId("") - return nil - } - return fmt.Errorf("Error retrieving policy: %s", err) - } - - if len(getResp.PolicyDescriptions) != 1 { - return fmt.Errorf("Unable to find policy %#v", getResp.PolicyDescriptions) - } - - policyDesc := getResp.PolicyDescriptions[0] - policyTypeName := policyDesc.PolicyTypeName - policyAttributes := policyDesc.PolicyAttributeDescriptions - - attributes := []map[string]string{} - for _, a := range policyAttributes { - pair := make(map[string]string) - pair["name"] = *a.AttributeName - pair["value"] = *a.AttributeValue - if (*policyTypeName == "SSLNegotiationPolicyType") && (*a.AttributeValue == "false") { - continue - } - attributes = append(attributes, pair) - } - - d.Set("policy_name", policyName) - d.Set("policy_type_name", policyTypeName) - d.Set("load_balancer_name", loadBalancerName) - d.Set("policy_attribute", attributes) - - return nil -} - -func resourceAwsLoadBalancerPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - reassignments := Reassignment{} - - loadBalancerName, policyName := resourceAwsLoadBalancerPolicyParseId(d.Id()) - - assigned, err := resourceAwsLoadBalancerPolicyAssigned(policyName, loadBalancerName, elbconn) - if err != nil { - return fmt.Errorf("Error determining assignment status of Load Balancer Policy %s: %s", policyName, err) - } - - if assigned { - reassignments, err = resourceAwsLoadBalancerPolicyUnassign(policyName, loadBalancerName, elbconn) - if err != nil { - return fmt.Errorf("Error unassigning Load Balancer Policy %s: %s", policyName, err) - } - } - - request := &elb.DeleteLoadBalancerPolicyInput{ - LoadBalancerName: aws.String(loadBalancerName), - PolicyName: aws.String(policyName), - } - - if _, err := elbconn.DeleteLoadBalancerPolicy(request); err != nil { - return fmt.Errorf("Error deleting Load Balancer Policy %s: %s", d.Id(), err) - } - - err = resourceAwsLoadBalancerPolicyCreate(d, meta) - - for _, listenerAssignment := range reassignments.listenerPolicies { - if _, err := elbconn.SetLoadBalancerPoliciesOfListener(listenerAssignment); err != nil { - return fmt.Errorf("Error setting LoadBalancerPoliciesOfListener: %s", err) - } - } - - for _, backendServerAssignment := range reassignments.backendServerPolicies { - if _, err := elbconn.SetLoadBalancerPoliciesForBackendServer(backendServerAssignment); err != nil { - return fmt.Errorf("Error setting LoadBalancerPoliciesForBackendServer: %s", err) - } - } - - return resourceAwsLoadBalancerPolicyRead(d, meta) -} - -func resourceAwsLoadBalancerPolicyDelete(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - - loadBalancerName, policyName := resourceAwsLoadBalancerPolicyParseId(d.Id()) - - assigned, err := resourceAwsLoadBalancerPolicyAssigned(policyName, loadBalancerName, elbconn) - if err != nil { - return fmt.Errorf("Error determining assignment status of Load Balancer Policy %s: %s", policyName, err) - } - - if assigned { - _, err := resourceAwsLoadBalancerPolicyUnassign(policyName, loadBalancerName, elbconn) - if err != nil { - return fmt.Errorf("Error unassigning Load Balancer Policy %s: %s", policyName, err) - } - } - - request := &elb.DeleteLoadBalancerPolicyInput{ - LoadBalancerName: aws.String(loadBalancerName), - PolicyName: aws.String(policyName), - } - - if _, err := elbconn.DeleteLoadBalancerPolicy(request); err != nil { - return fmt.Errorf("Error deleting Load Balancer Policy %s: %s", d.Id(), err) - } - - d.SetId("") - return nil -} - -func resourceAwsLoadBalancerPolicyParseId(id string) (string, string) { - parts := strings.SplitN(id, ":", 2) - return parts[0], parts[1] -} - -func resourceAwsLoadBalancerPolicyAssigned(policyName, loadBalancerName string, elbconn *elb.ELB) (bool, error) { - describeElbOpts := &elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{aws.String(loadBalancerName)}, - } - - describeResp, err := elbconn.DescribeLoadBalancers(describeElbOpts) - - if err != nil { - if ec2err, ok := err.(awserr.Error); ok { - if ec2err.Code() == "LoadBalancerNotFound" { - return false, nil - } - } - return false, fmt.Errorf("Error retrieving ELB description: %s", err) - } - - if len(describeResp.LoadBalancerDescriptions) != 1 { - return false, fmt.Errorf("Unable to find ELB: %#v", describeResp.LoadBalancerDescriptions) - } - - lb := describeResp.LoadBalancerDescriptions[0] - assigned := false - for _, backendServer := range lb.BackendServerDescriptions { - for _, name := range backendServer.PolicyNames { - if policyName == *name { - assigned = true - break - } - } - } - - for _, listener := range lb.ListenerDescriptions { - for _, name := range listener.PolicyNames { - if policyName == *name { - assigned = true - break - } - } - } - - return assigned, nil -} - -type Reassignment struct { - backendServerPolicies []*elb.SetLoadBalancerPoliciesForBackendServerInput - listenerPolicies []*elb.SetLoadBalancerPoliciesOfListenerInput -} - -func resourceAwsLoadBalancerPolicyUnassign(policyName, loadBalancerName string, elbconn *elb.ELB) (Reassignment, error) { - reassignments := Reassignment{} - - describeElbOpts := &elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{aws.String(loadBalancerName)}, - } - - describeResp, err := elbconn.DescribeLoadBalancers(describeElbOpts) - - if err != nil { - if ec2err, ok := err.(awserr.Error); ok { - if ec2err.Code() == "LoadBalancerNotFound" { - return reassignments, nil - } - } - return reassignments, fmt.Errorf("Error retrieving ELB description: %s", err) - } - - if len(describeResp.LoadBalancerDescriptions) != 1 { - return reassignments, fmt.Errorf("Unable to find ELB: %#v", describeResp.LoadBalancerDescriptions) - } - - lb := describeResp.LoadBalancerDescriptions[0] - - for _, backendServer := range lb.BackendServerDescriptions { - policies := []*string{} - - for _, name := range backendServer.PolicyNames { - if policyName != *name { - policies = append(policies, name) - } - } - - if len(backendServer.PolicyNames) != len(policies) { - setOpts := &elb.SetLoadBalancerPoliciesForBackendServerInput{ - LoadBalancerName: aws.String(loadBalancerName), - InstancePort: aws.Int64(*backendServer.InstancePort), - PolicyNames: policies, - } - - reassignOpts := &elb.SetLoadBalancerPoliciesForBackendServerInput{ - LoadBalancerName: aws.String(loadBalancerName), - InstancePort: aws.Int64(*backendServer.InstancePort), - PolicyNames: backendServer.PolicyNames, - } - - reassignments.backendServerPolicies = append(reassignments.backendServerPolicies, reassignOpts) - - _, err = elbconn.SetLoadBalancerPoliciesForBackendServer(setOpts) - if err != nil { - return reassignments, fmt.Errorf("Error Setting Load Balancer Policies for Backend Server: %s", err) - } - } - } - - for _, listener := range lb.ListenerDescriptions { - policies := []*string{} - - for _, name := range listener.PolicyNames { - if policyName != *name { - policies = append(policies, name) - } - } - - if len(listener.PolicyNames) != len(policies) { - setOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ - LoadBalancerName: aws.String(loadBalancerName), - LoadBalancerPort: aws.Int64(*listener.Listener.LoadBalancerPort), - PolicyNames: policies, - } - - reassignOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ - LoadBalancerName: aws.String(loadBalancerName), - LoadBalancerPort: aws.Int64(*listener.Listener.LoadBalancerPort), - PolicyNames: listener.PolicyNames, - } - - reassignments.listenerPolicies = append(reassignments.listenerPolicies, reassignOpts) - - _, err = elbconn.SetLoadBalancerPoliciesOfListener(setOpts) - if err != nil { - return reassignments, fmt.Errorf("Error Setting Load Balancer Policies of Listener: %s", err) - } - } - } - - return reassignments, nil -} diff --git a/builtin/providers/aws/resource_aws_load_balancer_policy_test.go b/builtin/providers/aws/resource_aws_load_balancer_policy_test.go deleted file mode 100644 index cfdbec8ec..000000000 --- a/builtin/providers/aws/resource_aws_load_balancer_policy_test.go +++ /dev/null @@ -1,246 +0,0 @@ -package aws - -import ( - "fmt" - "strconv" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/elb" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSLoadBalancerPolicy_basic(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSLoadBalancerPolicyDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLoadBalancerPolicyConfig_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_load_balancer_policy.test-policy"), - ), - }, - }, - }) -} - -func TestAccAWSLoadBalancerPolicy_updateWhileAssigned(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSLoadBalancerPolicyDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLoadBalancerPolicyConfig_updateWhileAssigned0(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_load_balancer_policy.test-policy"), - ), - }, - { - Config: testAccAWSLoadBalancerPolicyConfig_updateWhileAssigned1(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_load_balancer_policy.test-policy"), - ), - }, - }, - }) -} - -func testAccCheckAWSLoadBalancerPolicyDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elbconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_load_balancer_policy" { - continue - } - - loadBalancerName, policyName := resourceAwsLoadBalancerPolicyParseId(rs.Primary.ID) - out, err := conn.DescribeLoadBalancerPolicies( - &elb.DescribeLoadBalancerPoliciesInput{ - LoadBalancerName: aws.String(loadBalancerName), - PolicyNames: []*string{aws.String(policyName)}, - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && (ec2err.Code() == "PolicyNotFound" || ec2err.Code() == "LoadBalancerNotFound") { - continue - } - return err - } - - if len(out.PolicyDescriptions) > 0 { - return fmt.Errorf("Policy still exists") - } - } - return nil -} - -func testAccCheckAWSLoadBalancerPolicyState(elbResource string, policyResource string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[elbResource] - if !ok { - return fmt.Errorf("Not found: %s", elbResource) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - policy, ok := s.RootModule().Resources[policyResource] - if !ok { - return fmt.Errorf("Not found: %s", policyResource) - } - - elbconn := testAccProvider.Meta().(*AWSClient).elbconn - loadBalancerName, policyName := resourceAwsLoadBalancerPolicyParseId(policy.Primary.ID) - loadBalancerPolicies, err := elbconn.DescribeLoadBalancerPolicies(&elb.DescribeLoadBalancerPoliciesInput{ - LoadBalancerName: aws.String(loadBalancerName), - PolicyNames: []*string{aws.String(policyName)}, - }) - - if err != nil { - return err - } - - for _, loadBalancerPolicy := range loadBalancerPolicies.PolicyDescriptions { - if *loadBalancerPolicy.PolicyName == policyName { - if *loadBalancerPolicy.PolicyTypeName != policy.Primary.Attributes["policy_type_name"] { - return fmt.Errorf("PolicyTypeName does not match") - } - policyAttributeCount, err := strconv.Atoi(policy.Primary.Attributes["policy_attribute.#"]) - if err != nil { - return err - } - if len(loadBalancerPolicy.PolicyAttributeDescriptions) != policyAttributeCount { - return fmt.Errorf("PolicyAttributeDescriptions length mismatch") - } - policyAttributes := make(map[string]string) - for k, v := range policy.Primary.Attributes { - if strings.HasPrefix(k, "policy_attribute.") && strings.HasSuffix(k, ".name") { - key := v - value_key := fmt.Sprintf("%s.value", strings.TrimSuffix(k, ".name")) - policyAttributes[key] = policy.Primary.Attributes[value_key] - } - } - for _, policyAttribute := range loadBalancerPolicy.PolicyAttributeDescriptions { - if *policyAttribute.AttributeValue != policyAttributes[*policyAttribute.AttributeName] { - return fmt.Errorf("PollicyAttribute Value mismatch %s != %s: %s", *policyAttribute.AttributeValue, policyAttributes[*policyAttribute.AttributeName], policyAttributes) - } - } - } - } - - return nil - } -} - -func testAccAWSLoadBalancerPolicyConfig_basic(rInt int) string { - return fmt.Sprintf(` - resource "aws_elb" "test-lb" { - name = "test-lb-%d" - availability_zones = ["us-west-2a"] - - listener { - instance_port = 80 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } - - tags { - Name = "tf-acc-test" - } - } - - resource "aws_load_balancer_policy" "test-policy" { - load_balancer_name = "${aws_elb.test-lb.name}" - policy_name = "test-policy-%d" - policy_type_name = "AppCookieStickinessPolicyType" - policy_attribute = { - name = "CookieName" - value = "magic_cookie" - } - }`, rInt, rInt) -} - -func testAccAWSLoadBalancerPolicyConfig_updateWhileAssigned0(rInt int) string { - return fmt.Sprintf(` - resource "aws_elb" "test-lb" { - name = "test-lb-%d" - availability_zones = ["us-west-2a"] - - listener { - instance_port = 80 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } - - tags { - Name = "tf-acc-test" - } - } - - resource "aws_load_balancer_policy" "test-policy" { - load_balancer_name = "${aws_elb.test-lb.name}" - policy_name = "test-policy-%d" - policy_type_name = "AppCookieStickinessPolicyType" - policy_attribute = { - name = "CookieName" - value = "magic_cookie" - } - } - - resource "aws_load_balancer_listener_policy" "test-lb-test-policy-80" { - load_balancer_name = "${aws_elb.test-lb.name}" - load_balancer_port = 80 - policy_names = [ - "${aws_load_balancer_policy.test-policy.policy_name}" - ] - }`, rInt, rInt) -} - -func testAccAWSLoadBalancerPolicyConfig_updateWhileAssigned1(rInt int) string { - return fmt.Sprintf(` - resource "aws_elb" "test-lb" { - name = "test-lb-%d" - availability_zones = ["us-west-2a"] - - listener { - instance_port = 80 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } - - tags { - Name = "tf-acc-test" - } - } - - resource "aws_load_balancer_policy" "test-policy" { - load_balancer_name = "${aws_elb.test-lb.name}" - policy_name = "test-policy-%d" - policy_type_name = "AppCookieStickinessPolicyType" - policy_attribute = { - name = "CookieName" - value = "unicorn_cookie" - } - } - - resource "aws_load_balancer_listener_policy" "test-lb-test-policy-80" { - load_balancer_name = "${aws_elb.test-lb.name}" - load_balancer_port = 80 - policy_names = [ - "${aws_load_balancer_policy.test-policy.policy_name}" - ] - }`, rInt, rInt) -} diff --git a/builtin/providers/aws/resource_aws_main_route_table_association.go b/builtin/providers/aws/resource_aws_main_route_table_association.go deleted file mode 100644 index aabecda54..000000000 --- a/builtin/providers/aws/resource_aws_main_route_table_association.go +++ /dev/null @@ -1,169 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsMainRouteTableAssociation() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsMainRouteTableAssociationCreate, - Read: resourceAwsMainRouteTableAssociationRead, - Update: resourceAwsMainRouteTableAssociationUpdate, - Delete: resourceAwsMainRouteTableAssociationDelete, - - Schema: map[string]*schema.Schema{ - "vpc_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "route_table_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - // We use this field to record the main route table that is automatically - // created when the VPC is created. We need this to be able to "destroy" - // our main route table association, which we do by returning this route - // table to its original place as the Main Route Table for the VPC. - "original_route_table_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceAwsMainRouteTableAssociationCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - vpcId := d.Get("vpc_id").(string) - routeTableId := d.Get("route_table_id").(string) - - log.Printf("[INFO] Creating main route table association: %s => %s", vpcId, routeTableId) - - mainAssociation, err := findMainRouteTableAssociation(conn, vpcId) - if err != nil { - return err - } - - resp, err := conn.ReplaceRouteTableAssociation(&ec2.ReplaceRouteTableAssociationInput{ - AssociationId: mainAssociation.RouteTableAssociationId, - RouteTableId: aws.String(routeTableId), - }) - if err != nil { - return err - } - - d.Set("original_route_table_id", mainAssociation.RouteTableId) - d.SetId(*resp.NewAssociationId) - log.Printf("[INFO] New main route table association ID: %s", d.Id()) - - return nil -} - -func resourceAwsMainRouteTableAssociationRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - mainAssociation, err := findMainRouteTableAssociation( - conn, - d.Get("vpc_id").(string)) - if err != nil { - return err - } - - if mainAssociation == nil || *mainAssociation.RouteTableAssociationId != d.Id() { - // It seems it doesn't exist anymore, so clear the ID - d.SetId("") - } - - return nil -} - -// Update is almost exactly like Create, except we want to retain the -// original_route_table_id - this needs to stay recorded as the AWS-created -// table from VPC creation. -func resourceAwsMainRouteTableAssociationUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - vpcId := d.Get("vpc_id").(string) - routeTableId := d.Get("route_table_id").(string) - - log.Printf("[INFO] Updating main route table association: %s => %s", vpcId, routeTableId) - - resp, err := conn.ReplaceRouteTableAssociation(&ec2.ReplaceRouteTableAssociationInput{ - AssociationId: aws.String(d.Id()), - RouteTableId: aws.String(routeTableId), - }) - if err != nil { - return err - } - - d.SetId(*resp.NewAssociationId) - log.Printf("[INFO] New main route table association ID: %s", d.Id()) - - return nil -} - -func resourceAwsMainRouteTableAssociationDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - vpcId := d.Get("vpc_id").(string) - originalRouteTableId := d.Get("original_route_table_id").(string) - - log.Printf("[INFO] Deleting main route table association by resetting Main Route Table for VPC: %s to its original Route Table: %s", - vpcId, - originalRouteTableId) - - resp, err := conn.ReplaceRouteTableAssociation(&ec2.ReplaceRouteTableAssociationInput{ - AssociationId: aws.String(d.Id()), - RouteTableId: aws.String(originalRouteTableId), - }) - if err != nil { - return err - } - - log.Printf("[INFO] Resulting Association ID: %s", *resp.NewAssociationId) - - return nil -} - -func findMainRouteTableAssociation(conn *ec2.EC2, vpcId string) (*ec2.RouteTableAssociation, error) { - mainRouteTable, err := findMainRouteTable(conn, vpcId) - if err != nil { - return nil, err - } - if mainRouteTable == nil { - return nil, nil - } - - for _, a := range mainRouteTable.Associations { - if *a.Main { - return a, nil - } - } - return nil, fmt.Errorf("Could not find main routing table association for VPC: %s", vpcId) -} - -func findMainRouteTable(conn *ec2.EC2, vpcId string) (*ec2.RouteTable, error) { - mainFilter := &ec2.Filter{ - Name: aws.String("association.main"), - Values: []*string{aws.String("true")}, - } - vpcFilter := &ec2.Filter{ - Name: aws.String("vpc-id"), - Values: []*string{aws.String(vpcId)}, - } - routeResp, err := conn.DescribeRouteTables(&ec2.DescribeRouteTablesInput{ - Filters: []*ec2.Filter{mainFilter, vpcFilter}, - }) - if err != nil { - return nil, err - } else if len(routeResp.RouteTables) != 1 { - return nil, nil - } - - return routeResp.RouteTables[0], nil -} diff --git a/builtin/providers/aws/resource_aws_main_route_table_association_test.go b/builtin/providers/aws/resource_aws_main_route_table_association_test.go deleted file mode 100644 index d93707953..000000000 --- a/builtin/providers/aws/resource_aws_main_route_table_association_test.go +++ /dev/null @@ -1,175 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSMainRouteTableAssociation_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckMainRouteTableAssociationDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccMainRouteTableAssociationConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckMainRouteTableAssociation( - "aws_main_route_table_association.foo", - "aws_vpc.foo", - "aws_route_table.foo", - ), - ), - }, - resource.TestStep{ - Config: testAccMainRouteTableAssociationConfigUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckMainRouteTableAssociation( - "aws_main_route_table_association.foo", - "aws_vpc.foo", - "aws_route_table.bar", - ), - ), - }, - }, - }) -} - -func testAccCheckMainRouteTableAssociationDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_main_route_table_association" { - continue - } - - mainAssociation, err := findMainRouteTableAssociation( - conn, - rs.Primary.Attributes["vpc_id"], - ) - if err != nil { - // Verify the error is what we want - if ae, ok := err.(awserr.Error); ok && ae.Code() == "ApplicationDoesNotExistException" { - continue - } - return err - } - - if mainAssociation != nil { - return fmt.Errorf("still exists") - } - } - - return nil -} - -func testAccCheckMainRouteTableAssociation( - mainRouteTableAssociationResource string, - vpcResource string, - routeTableResource string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[mainRouteTableAssociationResource] - if !ok { - return fmt.Errorf("Not found: %s", mainRouteTableAssociationResource) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - vpc, ok := s.RootModule().Resources[vpcResource] - if !ok { - return fmt.Errorf("Not found: %s", vpcResource) - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - mainAssociation, err := findMainRouteTableAssociation(conn, vpc.Primary.ID) - if err != nil { - return err - } - - if *mainAssociation.RouteTableAssociationId != rs.Primary.ID { - return fmt.Errorf("Found wrong main association: %s", - *mainAssociation.RouteTableAssociationId) - } - - return nil - } -} - -const testAccMainRouteTableAssociationConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccMainRouteTableAssociationConfig" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "10.1.1.0/24" -} - -resource "aws_internet_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_route_table" "foo" { - vpc_id = "${aws_vpc.foo.id}" - route { - cidr_block = "10.0.0.0/8" - gateway_id = "${aws_internet_gateway.foo.id}" - } -} - -resource "aws_main_route_table_association" "foo" { - vpc_id = "${aws_vpc.foo.id}" - route_table_id = "${aws_route_table.foo.id}" -} -` - -const testAccMainRouteTableAssociationConfigUpdate = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccMainRouteTableAssociationConfigUpdate" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "10.1.1.0/24" -} - -resource "aws_internet_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} - -// Need to keep the old route table around when we update the -// main_route_table_association, otherwise Terraform will try to destroy the -// route table too early, and will fail because it's still the main one -resource "aws_route_table" "foo" { - vpc_id = "${aws_vpc.foo.id}" - route { - cidr_block = "10.0.0.0/8" - gateway_id = "${aws_internet_gateway.foo.id}" - } -} - -resource "aws_route_table" "bar" { - vpc_id = "${aws_vpc.foo.id}" - route { - cidr_block = "10.0.0.0/8" - gateway_id = "${aws_internet_gateway.foo.id}" - } -} - -resource "aws_main_route_table_association" "foo" { - vpc_id = "${aws_vpc.foo.id}" - route_table_id = "${aws_route_table.bar.id}" -} -` diff --git a/builtin/providers/aws/resource_aws_nat_gateway.go b/builtin/providers/aws/resource_aws_nat_gateway.go deleted file mode 100644 index 1ec5e986e..000000000 --- a/builtin/providers/aws/resource_aws_nat_gateway.go +++ /dev/null @@ -1,195 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsNatGateway() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsNatGatewayCreate, - Read: resourceAwsNatGatewayRead, - Delete: resourceAwsNatGatewayDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "allocation_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "subnet_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "network_interface_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "private_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "public_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - } -} - -func resourceAwsNatGatewayCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - // Create the NAT Gateway - createOpts := &ec2.CreateNatGatewayInput{ - AllocationId: aws.String(d.Get("allocation_id").(string)), - SubnetId: aws.String(d.Get("subnet_id").(string)), - } - - log.Printf("[DEBUG] Create NAT Gateway: %s", *createOpts) - natResp, err := conn.CreateNatGateway(createOpts) - if err != nil { - return fmt.Errorf("Error creating NAT Gateway: %s", err) - } - - // Get the ID and store it - ng := natResp.NatGateway - d.SetId(*ng.NatGatewayId) - log.Printf("[INFO] NAT Gateway ID: %s", d.Id()) - - // Wait for the NAT Gateway to become available - log.Printf("[DEBUG] Waiting for NAT Gateway (%s) to become available", d.Id()) - stateConf := &resource.StateChangeConf{ - Pending: []string{"pending"}, - Target: []string{"available"}, - Refresh: NGStateRefreshFunc(conn, d.Id()), - Timeout: 10 * time.Minute, - } - - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for NAT Gateway (%s) to become available: %s", d.Id(), err) - } - - // Update our attributes and return - return resourceAwsNatGatewayRead(d, meta) -} - -func resourceAwsNatGatewayRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - // Refresh the NAT Gateway state - ngRaw, state, err := NGStateRefreshFunc(conn, d.Id())() - if err != nil { - return err - } - - status := map[string]bool{ - "deleted": true, - "deleting": true, - "failed": true, - } - - if _, ok := status[strings.ToLower(state)]; ngRaw == nil || ok { - log.Printf("[INFO] Removing %s from Terraform state as it is not found or in the deleted state.", d.Id()) - d.SetId("") - return nil - } - - // Set NAT Gateway attributes - ng := ngRaw.(*ec2.NatGateway) - d.Set("subnet_id", ng.SubnetId) - - // Address - address := ng.NatGatewayAddresses[0] - d.Set("allocation_id", address.AllocationId) - d.Set("network_interface_id", address.NetworkInterfaceId) - d.Set("private_ip", address.PrivateIp) - d.Set("public_ip", address.PublicIp) - - return nil -} - -func resourceAwsNatGatewayDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - deleteOpts := &ec2.DeleteNatGatewayInput{ - NatGatewayId: aws.String(d.Id()), - } - log.Printf("[INFO] Deleting NAT Gateway: %s", d.Id()) - - _, err := conn.DeleteNatGateway(deleteOpts) - if err != nil { - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - - if ec2err.Code() == "NatGatewayNotFound" { - return nil - } - - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"deleting"}, - Target: []string{"deleted"}, - Refresh: NGStateRefreshFunc(conn, d.Id()), - Timeout: 30 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 10 * time.Second, - } - - _, stateErr := stateConf.WaitForState() - if stateErr != nil { - return fmt.Errorf("Error waiting for NAT Gateway (%s) to delete: %s", d.Id(), err) - } - - return nil -} - -// NGStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// a NAT Gateway. -func NGStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - opts := &ec2.DescribeNatGatewaysInput{ - NatGatewayIds: []*string{aws.String(id)}, - } - resp, err := conn.DescribeNatGateways(opts) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "NatGatewayNotFound" { - resp = nil - } else { - log.Printf("Error on NGStateRefresh: %s", err) - return nil, "", err - } - } - - if resp == nil { - // Sometimes AWS just has consistency issues and doesn't see - // our instance yet. Return an empty state. - return nil, "", nil - } - - ng := resp.NatGateways[0] - return ng, *ng.State, nil - } -} diff --git a/builtin/providers/aws/resource_aws_nat_gateway_test.go b/builtin/providers/aws/resource_aws_nat_gateway_test.go deleted file mode 100644 index 4790cb7cc..000000000 --- a/builtin/providers/aws/resource_aws_nat_gateway_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package aws - -import ( - "fmt" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSNatGateway_basic(t *testing.T) { - var natGateway ec2.NatGateway - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_nat_gateway.gateway", - Providers: testAccProviders, - CheckDestroy: testAccCheckNatGatewayDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNatGatewayConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckNatGatewayExists("aws_nat_gateway.gateway", &natGateway), - ), - }, - }, - }) -} - -func testAccCheckNatGatewayDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_nat_gateway" { - continue - } - - // Try to find the resource - resp, err := conn.DescribeNatGateways(&ec2.DescribeNatGatewaysInput{ - NatGatewayIds: []*string{aws.String(rs.Primary.ID)}, - }) - if err == nil { - status := map[string]bool{ - "deleted": true, - "deleting": true, - "failed": true, - } - if _, ok := status[strings.ToLower(*resp.NatGateways[0].State)]; len(resp.NatGateways) > 0 && !ok { - return fmt.Errorf("still exists") - } - - return nil - } - - // Verify the error is what we want - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - if ec2err.Code() != "NatGatewayNotFound" { - return err - } - } - - return nil -} - -func testAccCheckNatGatewayExists(n string, ng *ec2.NatGateway) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - resp, err := conn.DescribeNatGateways(&ec2.DescribeNatGatewaysInput{ - NatGatewayIds: []*string{aws.String(rs.Primary.ID)}, - }) - if err != nil { - return err - } - if len(resp.NatGateways) == 0 { - return fmt.Errorf("NatGateway not found") - } - - *ng = *resp.NatGateways[0] - - return nil - } -} - -const testAccNatGatewayConfig = ` -resource "aws_vpc" "vpc" { - cidr_block = "10.0.0.0/16" - tags { - Name = "testAccNatGatewayConfig" - } -} - -resource "aws_subnet" "private" { - vpc_id = "${aws_vpc.vpc.id}" - cidr_block = "10.0.1.0/24" - map_public_ip_on_launch = false -} - -resource "aws_subnet" "public" { - vpc_id = "${aws_vpc.vpc.id}" - cidr_block = "10.0.2.0/24" - map_public_ip_on_launch = true -} - -resource "aws_internet_gateway" "gw" { - vpc_id = "${aws_vpc.vpc.id}" -} - -resource "aws_eip" "nat_gateway" { - vpc = true -} - -// Actual SUT -resource "aws_nat_gateway" "gateway" { - allocation_id = "${aws_eip.nat_gateway.id}" - subnet_id = "${aws_subnet.public.id}" - - depends_on = ["aws_internet_gateway.gw"] -} - -resource "aws_route_table" "private" { - vpc_id = "${aws_vpc.vpc.id}" - - route { - cidr_block = "0.0.0.0/0" - nat_gateway_id = "${aws_nat_gateway.gateway.id}" - } -} - -resource "aws_route_table_association" "private" { - subnet_id = "${aws_subnet.private.id}" - route_table_id = "${aws_route_table.private.id}" -} - -resource "aws_route_table" "public" { - vpc_id = "${aws_vpc.vpc.id}" - - route { - cidr_block = "0.0.0.0/0" - gateway_id = "${aws_internet_gateway.gw.id}" - } -} - -resource "aws_route_table_association" "public" { - subnet_id = "${aws_subnet.public.id}" - route_table_id = "${aws_route_table.public.id}" -} -` diff --git a/builtin/providers/aws/resource_aws_network_acl.go b/builtin/providers/aws/resource_aws_network_acl.go deleted file mode 100644 index 4777f4707..000000000 --- a/builtin/providers/aws/resource_aws_network_acl.go +++ /dev/null @@ -1,648 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "log" - "sort" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsNetworkAcl() *schema.Resource { - - return &schema.Resource{ - Create: resourceAwsNetworkAclCreate, - Read: resourceAwsNetworkAclRead, - Delete: resourceAwsNetworkAclDelete, - Update: resourceAwsNetworkAclUpdate, - Importer: &schema.ResourceImporter{ - State: resourceAwsNetworkAclImportState, - }, - - Schema: map[string]*schema.Schema{ - "vpc_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Computed: false, - }, - "subnet_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: false, - Deprecated: "Attribute subnet_id is deprecated on network_acl resources. Use subnet_ids instead", - }, - "subnet_ids": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - ConflictsWith: []string{"subnet_id"}, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "ingress": { - Type: schema.TypeSet, - Required: false, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "from_port": { - Type: schema.TypeInt, - Required: true, - }, - "to_port": { - Type: schema.TypeInt, - Required: true, - }, - "rule_no": { - Type: schema.TypeInt, - Required: true, - }, - "action": { - Type: schema.TypeString, - Required: true, - }, - "protocol": { - Type: schema.TypeString, - Required: true, - }, - "cidr_block": { - Type: schema.TypeString, - Optional: true, - }, - "ipv6_cidr_block": { - Type: schema.TypeString, - Optional: true, - }, - "icmp_type": { - Type: schema.TypeInt, - Optional: true, - }, - "icmp_code": { - Type: schema.TypeInt, - Optional: true, - }, - }, - }, - Set: resourceAwsNetworkAclEntryHash, - }, - "egress": { - Type: schema.TypeSet, - Required: false, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "from_port": { - Type: schema.TypeInt, - Required: true, - }, - "to_port": { - Type: schema.TypeInt, - Required: true, - }, - "rule_no": { - Type: schema.TypeInt, - Required: true, - }, - "action": { - Type: schema.TypeString, - Required: true, - }, - "protocol": { - Type: schema.TypeString, - Required: true, - }, - "cidr_block": { - Type: schema.TypeString, - Optional: true, - }, - "ipv6_cidr_block": { - Type: schema.TypeString, - Optional: true, - }, - "icmp_type": { - Type: schema.TypeInt, - Optional: true, - }, - "icmp_code": { - Type: schema.TypeInt, - Optional: true, - }, - }, - }, - Set: resourceAwsNetworkAclEntryHash, - }, - "tags": tagsSchema(), - }, - } -} - -func resourceAwsNetworkAclCreate(d *schema.ResourceData, meta interface{}) error { - - conn := meta.(*AWSClient).ec2conn - - // Create the Network Acl - createOpts := &ec2.CreateNetworkAclInput{ - VpcId: aws.String(d.Get("vpc_id").(string)), - } - - log.Printf("[DEBUG] Network Acl create config: %#v", createOpts) - resp, err := conn.CreateNetworkAcl(createOpts) - if err != nil { - return fmt.Errorf("Error creating network acl: %s", err) - } - - // Get the ID and store it - networkAcl := resp.NetworkAcl - d.SetId(*networkAcl.NetworkAclId) - log.Printf("[INFO] Network Acl ID: %s", *networkAcl.NetworkAclId) - - // Update rules and subnet association once acl is created - return resourceAwsNetworkAclUpdate(d, meta) -} - -func resourceAwsNetworkAclRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{ - NetworkAclIds: []*string{aws.String(d.Id())}, - }) - - if err != nil { - if ec2err, ok := err.(awserr.Error); ok { - if ec2err.Code() == "InvalidNetworkAclID.NotFound" { - log.Printf("[DEBUG] Network ACL (%s) not found", d.Id()) - d.SetId("") - return nil - } - } - return err - } - if resp == nil { - return nil - } - - networkAcl := resp.NetworkAcls[0] - var ingressEntries []*ec2.NetworkAclEntry - var egressEntries []*ec2.NetworkAclEntry - - // separate the ingress and egress rules - for _, e := range networkAcl.Entries { - // Skip the default rules added by AWS. They can be neither - // configured or deleted by users. - if *e.RuleNumber == awsDefaultAclRuleNumberIpv4 || - *e.RuleNumber == awsDefaultAclRuleNumberIpv6 { - continue - } - - if *e.Egress == true { - egressEntries = append(egressEntries, e) - } else { - ingressEntries = append(ingressEntries, e) - } - } - - d.Set("vpc_id", networkAcl.VpcId) - d.Set("tags", tagsToMap(networkAcl.Tags)) - - var s []string - for _, a := range networkAcl.Associations { - s = append(s, *a.SubnetId) - } - sort.Strings(s) - if err := d.Set("subnet_ids", s); err != nil { - return err - } - - if err := d.Set("ingress", networkAclEntriesToMapList(ingressEntries)); err != nil { - return err - } - if err := d.Set("egress", networkAclEntriesToMapList(egressEntries)); err != nil { - return err - } - - return nil -} - -func resourceAwsNetworkAclUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - d.Partial(true) - - if d.HasChange("ingress") { - err := updateNetworkAclEntries(d, "ingress", conn) - if err != nil { - return err - } - } - - if d.HasChange("egress") { - err := updateNetworkAclEntries(d, "egress", conn) - if err != nil { - return err - } - } - - if d.HasChange("subnet_id") { - //associate new subnet with the acl. - _, n := d.GetChange("subnet_id") - newSubnet := n.(string) - association, err := findNetworkAclAssociation(newSubnet, conn) - if err != nil { - return fmt.Errorf("Failed to update acl %s with subnet %s: %s", d.Id(), newSubnet, err) - } - _, err = conn.ReplaceNetworkAclAssociation(&ec2.ReplaceNetworkAclAssociationInput{ - AssociationId: association.NetworkAclAssociationId, - NetworkAclId: aws.String(d.Id()), - }) - if err != nil { - return err - } - } - - if d.HasChange("subnet_ids") { - o, n := d.GetChange("subnet_ids") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - - remove := os.Difference(ns).List() - add := ns.Difference(os).List() - - if len(remove) > 0 { - // A Network ACL is required for each subnet. In order to disassociate a - // subnet from this ACL, we must associate it with the default ACL. - defaultAcl, err := getDefaultNetworkAcl(d.Get("vpc_id").(string), conn) - if err != nil { - return fmt.Errorf("Failed to find Default ACL for VPC %s", d.Get("vpc_id").(string)) - } - for _, r := range remove { - association, err := findNetworkAclAssociation(r.(string), conn) - if err != nil { - return fmt.Errorf("Failed to find acl association: acl %s with subnet %s: %s", d.Id(), r, err) - } - log.Printf("DEBUG] Replacing Network Acl Association (%s) with Default Network ACL ID (%s)", *association.NetworkAclAssociationId, *defaultAcl.NetworkAclId) - _, err = conn.ReplaceNetworkAclAssociation(&ec2.ReplaceNetworkAclAssociationInput{ - AssociationId: association.NetworkAclAssociationId, - NetworkAclId: defaultAcl.NetworkAclId, - }) - if err != nil { - return err - } - } - } - - if len(add) > 0 { - for _, a := range add { - association, err := findNetworkAclAssociation(a.(string), conn) - if err != nil { - return fmt.Errorf("Failed to find acl association: acl %s with subnet %s: %s", d.Id(), a, err) - } - _, err = conn.ReplaceNetworkAclAssociation(&ec2.ReplaceNetworkAclAssociationInput{ - AssociationId: association.NetworkAclAssociationId, - NetworkAclId: aws.String(d.Id()), - }) - if err != nil { - return err - } - } - } - - } - - if err := setTags(conn, d); err != nil { - return err - } else { - d.SetPartial("tags") - } - - d.Partial(false) - return resourceAwsNetworkAclRead(d, meta) -} - -func updateNetworkAclEntries(d *schema.ResourceData, entryType string, conn *ec2.EC2) error { - if d.HasChange(entryType) { - o, n := d.GetChange(entryType) - - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - - toBeDeleted, err := expandNetworkAclEntries(os.Difference(ns).List(), entryType) - if err != nil { - return err - } - for _, remove := range toBeDeleted { - // AWS includes default rules with all network ACLs that can be - // neither modified nor destroyed. They have a custom rule - // number that is out of bounds for any other rule. If we - // encounter it, just continue. There's no work to be done. - if *remove.RuleNumber == awsDefaultAclRuleNumberIpv4 || - *remove.RuleNumber == awsDefaultAclRuleNumberIpv6 { - continue - } - - // Delete old Acl - log.Printf("[DEBUG] Destroying Network ACL Entry number (%d)", int(*remove.RuleNumber)) - _, err := conn.DeleteNetworkAclEntry(&ec2.DeleteNetworkAclEntryInput{ - NetworkAclId: aws.String(d.Id()), - RuleNumber: remove.RuleNumber, - Egress: remove.Egress, - }) - if err != nil { - return fmt.Errorf("Error deleting %s entry: %s", entryType, err) - } - } - - toBeCreated, err := expandNetworkAclEntries(ns.Difference(os).List(), entryType) - if err != nil { - return err - } - for _, add := range toBeCreated { - // Protocol -1 rules don't store ports in AWS. Thus, they'll always - // hash differently when being read out of the API. Force the user - // to set from_port and to_port to 0 for these rules, to keep the - // hashing consistent. - if *add.Protocol == "-1" { - to := *add.PortRange.To - from := *add.PortRange.From - expected := &expectedPortPair{ - to_port: 0, - from_port: 0, - } - if ok := validatePorts(to, from, *expected); !ok { - return fmt.Errorf( - "to_port (%d) and from_port (%d) must both be 0 to use the the 'all' \"-1\" protocol!", - to, from) - } - } - - if add.CidrBlock != nil && *add.CidrBlock != "" { - // AWS mutates the CIDR block into a network implied by the IP and - // mask provided. This results in hashing inconsistencies between - // the local config file and the state returned by the API. Error - // if the user provides a CIDR block with an inappropriate mask - if err := validateCIDRBlock(*add.CidrBlock); err != nil { - return err - } - } - - createOpts := &ec2.CreateNetworkAclEntryInput{ - NetworkAclId: aws.String(d.Id()), - Egress: add.Egress, - PortRange: add.PortRange, - Protocol: add.Protocol, - RuleAction: add.RuleAction, - RuleNumber: add.RuleNumber, - IcmpTypeCode: add.IcmpTypeCode, - } - - if add.CidrBlock != nil && *add.CidrBlock != "" { - createOpts.CidrBlock = add.CidrBlock - } - - if add.Ipv6CidrBlock != nil && *add.Ipv6CidrBlock != "" { - createOpts.Ipv6CidrBlock = add.Ipv6CidrBlock - } - - // Add new Acl entry - _, connErr := conn.CreateNetworkAclEntry(createOpts) - if connErr != nil { - return fmt.Errorf("Error creating %s entry: %s", entryType, connErr) - } - } - } - return nil -} - -func resourceAwsNetworkAclDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - log.Printf("[INFO] Deleting Network Acl: %s", d.Id()) - retryErr := resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteNetworkAcl(&ec2.DeleteNetworkAclInput{ - NetworkAclId: aws.String(d.Id()), - }) - if err != nil { - ec2err := err.(awserr.Error) - switch ec2err.Code() { - case "InvalidNetworkAclID.NotFound": - return nil - case "DependencyViolation": - // In case of dependency violation, we remove the association between subnet and network acl. - // This means the subnet is attached to default acl of vpc. - var associations []*ec2.NetworkAclAssociation - if v, ok := d.GetOk("subnet_id"); ok { - - a, err := findNetworkAclAssociation(v.(string), conn) - if err != nil { - return resource.NonRetryableError(err) - } - associations = append(associations, a) - } else if v, ok := d.GetOk("subnet_ids"); ok { - ids := v.(*schema.Set).List() - for _, i := range ids { - a, err := findNetworkAclAssociation(i.(string), conn) - if err != nil { - return resource.NonRetryableError(err) - } - associations = append(associations, a) - } - } - - log.Printf("[DEBUG] Replacing network associations for Network ACL (%s): %s", d.Id(), associations) - defaultAcl, err := getDefaultNetworkAcl(d.Get("vpc_id").(string), conn) - if err != nil { - return resource.NonRetryableError(err) - } - - for _, a := range associations { - log.Printf("DEBUG] Replacing Network Acl Association (%s) with Default Network ACL ID (%s)", *a.NetworkAclAssociationId, *defaultAcl.NetworkAclId) - _, replaceErr := conn.ReplaceNetworkAclAssociation(&ec2.ReplaceNetworkAclAssociationInput{ - AssociationId: a.NetworkAclAssociationId, - NetworkAclId: defaultAcl.NetworkAclId, - }) - if replaceErr != nil { - if replaceEc2err, ok := replaceErr.(awserr.Error); ok { - // It's possible that during an attempt to replace this - // association, the Subnet in question has already been moved to - // another ACL. This can happen if you're destroying a network acl - // and simultaneously re-associating it's subnet(s) with another - // ACL; Terraform may have already re-associated the subnet(s) by - // the time we attempt to destroy them, even between the time we - // list them and then try to destroy them. In this case, the - // association we're trying to replace will no longer exist and - // this call will fail. Here we trap that error and fail - // gracefully; the association we tried to replace gone, we trust - // someone else has taken ownership. - if replaceEc2err.Code() == "InvalidAssociationID.NotFound" { - log.Printf("[WARN] Network Association (%s) no longer found; Network Association likely updated or removed externally, removing from state", *a.NetworkAclAssociationId) - continue - } - } - log.Printf("[ERR] Non retry-able error in replacing associations for Network ACL (%s): %s", d.Id(), replaceErr) - return resource.NonRetryableError(replaceErr) - } - } - return resource.RetryableError(fmt.Errorf("Dependencies found and cleaned up, retrying")) - default: - // Any other error, we want to quit the retry loop immediately - return resource.NonRetryableError(err) - } - } - log.Printf("[Info] Deleted network ACL %s successfully", d.Id()) - return nil - }) - - if retryErr != nil { - return fmt.Errorf("[ERR] Error destroying Network ACL (%s): %s", d.Id(), retryErr) - } - return nil -} - -func resourceAwsNetworkAclEntryHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%d-", m["from_port"].(int))) - buf.WriteString(fmt.Sprintf("%d-", m["to_port"].(int))) - buf.WriteString(fmt.Sprintf("%d-", m["rule_no"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["action"].(string))) - - // The AWS network ACL API only speaks protocol numbers, and that's - // all we store. Never hash a protocol name. - protocol := m["protocol"].(string) - if _, err := strconv.Atoi(m["protocol"].(string)); err != nil { - // We're a protocol name. Look up the number. - buf.WriteString(fmt.Sprintf("%d-", protocolIntegers()[protocol])) - } else { - // We're a protocol number. Pass the value through. - buf.WriteString(fmt.Sprintf("%s-", protocol)) - } - - if v, ok := m["cidr_block"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - - if v, ok := m["ipv6_cidr_block"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - - if v, ok := m["ssl_certificate_id"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - - if v, ok := m["icmp_type"]; ok { - buf.WriteString(fmt.Sprintf("%d-", v.(int))) - } - if v, ok := m["icmp_code"]; ok { - buf.WriteString(fmt.Sprintf("%d-", v.(int))) - } - - return hashcode.String(buf.String()) -} - -func getDefaultNetworkAcl(vpc_id string, conn *ec2.EC2) (defaultAcl *ec2.NetworkAcl, err error) { - resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{ - Filters: []*ec2.Filter{ - { - Name: aws.String("default"), - Values: []*string{aws.String("true")}, - }, - { - Name: aws.String("vpc-id"), - Values: []*string{aws.String(vpc_id)}, - }, - }, - }) - - if err != nil { - return nil, err - } - return resp.NetworkAcls[0], nil -} - -func findNetworkAclAssociation(subnetId string, conn *ec2.EC2) (networkAclAssociation *ec2.NetworkAclAssociation, err error) { - resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{ - Filters: []*ec2.Filter{ - { - Name: aws.String("association.subnet-id"), - Values: []*string{aws.String(subnetId)}, - }, - }, - }) - - if err != nil { - return nil, err - } - if resp.NetworkAcls != nil && len(resp.NetworkAcls) > 0 { - for _, association := range resp.NetworkAcls[0].Associations { - if *association.SubnetId == subnetId { - return association, nil - } - } - } - return nil, fmt.Errorf("could not find association for subnet: %s ", subnetId) -} - -// networkAclEntriesToMapList turns ingress/egress rules read from AWS into a list -// of maps. -func networkAclEntriesToMapList(networkAcls []*ec2.NetworkAclEntry) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(networkAcls)) - for _, entry := range networkAcls { - acl := make(map[string]interface{}) - acl["rule_no"] = *entry.RuleNumber - acl["action"] = *entry.RuleAction - if entry.CidrBlock != nil { - acl["cidr_block"] = *entry.CidrBlock - } - if entry.Ipv6CidrBlock != nil { - acl["ipv6_cidr_block"] = *entry.Ipv6CidrBlock - } - // The AWS network ACL API only speaks protocol numbers, and - // that's all we record. - if _, err := strconv.Atoi(*entry.Protocol); err != nil { - // We're a protocol name. Look up the number. - acl["protocol"] = protocolIntegers()[*entry.Protocol] - } else { - // We're a protocol number. Pass through. - acl["protocol"] = *entry.Protocol - } - - acl["protocol"] = *entry.Protocol - if entry.PortRange != nil { - acl["from_port"] = *entry.PortRange.From - acl["to_port"] = *entry.PortRange.To - } - - if entry.IcmpTypeCode != nil { - acl["icmp_type"] = *entry.IcmpTypeCode.Type - acl["icmp_code"] = *entry.IcmpTypeCode.Code - } - - result = append(result, acl) - } - - return result -} diff --git a/builtin/providers/aws/resource_aws_network_acl_rule.go b/builtin/providers/aws/resource_aws_network_acl_rule.go deleted file mode 100644 index d3aa099fc..000000000 --- a/builtin/providers/aws/resource_aws_network_acl_rule.go +++ /dev/null @@ -1,308 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "log" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsNetworkAclRule() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsNetworkAclRuleCreate, - Read: resourceAwsNetworkAclRuleRead, - Delete: resourceAwsNetworkAclRuleDelete, - - Schema: map[string]*schema.Schema{ - "network_acl_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "rule_number": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "egress": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Default: false, - }, - "protocol": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if old == "all" && new == "-1" || old == "-1" && new == "all" { - return true - } - return false - }, - }, - "rule_action": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "cidr_block": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "ipv6_cidr_block": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "from_port": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - "to_port": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - "icmp_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateICMPArgumentValue, - }, - "icmp_code": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateICMPArgumentValue, - }, - }, - } -} - -func resourceAwsNetworkAclRuleCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - protocol := d.Get("protocol").(string) - p, protocolErr := strconv.Atoi(protocol) - if protocolErr != nil { - var ok bool - p, ok = protocolIntegers()[protocol] - if !ok { - return fmt.Errorf("Invalid Protocol %s for rule %d", protocol, d.Get("rule_number").(int)) - } - } - log.Printf("[INFO] Transformed Protocol %s into %d", protocol, p) - - params := &ec2.CreateNetworkAclEntryInput{ - NetworkAclId: aws.String(d.Get("network_acl_id").(string)), - Egress: aws.Bool(d.Get("egress").(bool)), - RuleNumber: aws.Int64(int64(d.Get("rule_number").(int))), - Protocol: aws.String(strconv.Itoa(p)), - RuleAction: aws.String(d.Get("rule_action").(string)), - PortRange: &ec2.PortRange{ - From: aws.Int64(int64(d.Get("from_port").(int))), - To: aws.Int64(int64(d.Get("to_port").(int))), - }, - } - - cidr, hasCidr := d.GetOk("cidr_block") - ipv6Cidr, hasIpv6Cidr := d.GetOk("ipv6_cidr_block") - - if hasCidr == false && hasIpv6Cidr == false { - return fmt.Errorf("Either `cidr_block` or `ipv6_cidr_block` must be defined") - } - - if hasCidr { - params.CidrBlock = aws.String(cidr.(string)) - } - - if hasIpv6Cidr { - params.Ipv6CidrBlock = aws.String(ipv6Cidr.(string)) - } - - // Specify additional required fields for ICMP. For the list - // of ICMP codes and types, see: http://www.nthelp.com/icmp.html - if p == 1 { - params.IcmpTypeCode = &ec2.IcmpTypeCode{} - if v, ok := d.GetOk("icmp_type"); ok { - icmpType, err := strconv.Atoi(v.(string)) - if err != nil { - return fmt.Errorf("Unable to parse ICMP type %s for rule %d", v, d.Get("rule_number").(int)) - } - params.IcmpTypeCode.Type = aws.Int64(int64(icmpType)) - log.Printf("[DEBUG] Got ICMP type %d for rule %d", icmpType, d.Get("rule_number").(int)) - } - if v, ok := d.GetOk("icmp_code"); ok { - icmpCode, err := strconv.Atoi(v.(string)) - if err != nil { - return fmt.Errorf("Unable to parse ICMP code %s for rule %d", v, d.Get("rule_number").(int)) - } - params.IcmpTypeCode.Code = aws.Int64(int64(icmpCode)) - log.Printf("[DEBUG] Got ICMP code %d for rule %d", icmpCode, d.Get("rule_number").(int)) - } - } - - log.Printf("[INFO] Creating Network Acl Rule: %d (%t)", d.Get("rule_number").(int), d.Get("egress").(bool)) - _, err := conn.CreateNetworkAclEntry(params) - if err != nil { - return fmt.Errorf("Error Creating Network Acl Rule: %s", err.Error()) - } - d.SetId(networkAclIdRuleNumberEgressHash(d.Get("network_acl_id").(string), d.Get("rule_number").(int), d.Get("egress").(bool), d.Get("protocol").(string))) - - // It appears it might be a while until the newly created rule is visible via the - // API (see issue GH-4721). Retry the `findNetworkAclRule` function until it is - // visible (which in most cases is likely immediately). - err = resource.Retry(3*time.Minute, func() *resource.RetryError { - r, findErr := findNetworkAclRule(d, meta) - if findErr != nil { - return resource.RetryableError(findErr) - } - if r == nil { - err := fmt.Errorf("Network ACL rule (%s) not found", d.Id()) - return resource.RetryableError(err) - } - - return nil - }) - if err != nil { - return fmt.Errorf("Created Network ACL Rule was not visible in API within 3 minute period. Running 'terraform apply' again will resume infrastructure creation.") - } - - return resourceAwsNetworkAclRuleRead(d, meta) -} - -func resourceAwsNetworkAclRuleRead(d *schema.ResourceData, meta interface{}) error { - resp, err := findNetworkAclRule(d, meta) - if err != nil { - return err - } - if resp == nil { - log.Printf("[DEBUG] Network ACL rule (%s) not found", d.Id()) - d.SetId("") - return nil - } - - d.Set("rule_number", resp.RuleNumber) - d.Set("cidr_block", resp.CidrBlock) - d.Set("ipv6_cidr_block", resp.Ipv6CidrBlock) - d.Set("egress", resp.Egress) - if resp.IcmpTypeCode != nil { - d.Set("icmp_code", resp.IcmpTypeCode.Code) - d.Set("icmp_type", resp.IcmpTypeCode.Type) - } - if resp.PortRange != nil { - d.Set("from_port", resp.PortRange.From) - d.Set("to_port", resp.PortRange.To) - } - - d.Set("rule_action", resp.RuleAction) - - p, protocolErr := strconv.Atoi(*resp.Protocol) - log.Printf("[INFO] Converting the protocol %v", p) - if protocolErr == nil { - var ok bool - protocol, ok := protocolStrings(protocolIntegers())[p] - if !ok { - return fmt.Errorf("Invalid Protocol %s for rule %d", *resp.Protocol, d.Get("rule_number").(int)) - } - log.Printf("[INFO] Transformed Protocol %s back into %s", *resp.Protocol, protocol) - d.Set("protocol", protocol) - } - - return nil -} - -func resourceAwsNetworkAclRuleDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - params := &ec2.DeleteNetworkAclEntryInput{ - NetworkAclId: aws.String(d.Get("network_acl_id").(string)), - RuleNumber: aws.Int64(int64(d.Get("rule_number").(int))), - Egress: aws.Bool(d.Get("egress").(bool)), - } - - log.Printf("[INFO] Deleting Network Acl Rule: %s", d.Id()) - _, err := conn.DeleteNetworkAclEntry(params) - if err != nil { - return fmt.Errorf("Error Deleting Network Acl Rule: %s", err.Error()) - } - - return nil -} - -func findNetworkAclRule(d *schema.ResourceData, meta interface{}) (*ec2.NetworkAclEntry, error) { - conn := meta.(*AWSClient).ec2conn - - filters := make([]*ec2.Filter, 0, 2) - ruleNumberFilter := &ec2.Filter{ - Name: aws.String("entry.rule-number"), - Values: []*string{aws.String(fmt.Sprintf("%d", d.Get("rule_number").(int)))}, - } - filters = append(filters, ruleNumberFilter) - egressFilter := &ec2.Filter{ - Name: aws.String("entry.egress"), - Values: []*string{aws.String(fmt.Sprintf("%v", d.Get("egress").(bool)))}, - } - filters = append(filters, egressFilter) - params := &ec2.DescribeNetworkAclsInput{ - NetworkAclIds: []*string{aws.String(d.Get("network_acl_id").(string))}, - Filters: filters, - } - - log.Printf("[INFO] Describing Network Acl: %s", d.Get("network_acl_id").(string)) - log.Printf("[INFO] Describing Network Acl with the Filters %#v", params) - resp, err := conn.DescribeNetworkAcls(params) - if err != nil { - return nil, fmt.Errorf("Error Finding Network Acl Rule %d: %s", d.Get("rule_number").(int), err.Error()) - } - - if resp == nil || len(resp.NetworkAcls) == 0 || resp.NetworkAcls[0] == nil { - // Missing NACL rule. - return nil, nil - } - if len(resp.NetworkAcls) > 1 { - return nil, fmt.Errorf( - "Expected to find one Network ACL, got: %#v", - resp.NetworkAcls) - } - networkAcl := resp.NetworkAcls[0] - if networkAcl.Entries != nil { - for _, i := range networkAcl.Entries { - if *i.RuleNumber == int64(d.Get("rule_number").(int)) && *i.Egress == d.Get("egress").(bool) { - return i, nil - } - } - } - return nil, fmt.Errorf( - "Expected the Network ACL to have Entries, got: %#v", - networkAcl) - -} - -func networkAclIdRuleNumberEgressHash(networkAclId string, ruleNumber int, egress bool, protocol string) string { - var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("%s-", networkAclId)) - buf.WriteString(fmt.Sprintf("%d-", ruleNumber)) - buf.WriteString(fmt.Sprintf("%t-", egress)) - buf.WriteString(fmt.Sprintf("%s-", protocol)) - return fmt.Sprintf("nacl-%d", hashcode.String(buf.String())) -} - -func validateICMPArgumentValue(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - _, err := strconv.Atoi(value) - if len(value) == 0 || err != nil { - errors = append(errors, fmt.Errorf("%q must be an integer value: %q", k, value)) - } - return -} diff --git a/builtin/providers/aws/resource_aws_network_acl_rule_test.go b/builtin/providers/aws/resource_aws_network_acl_rule_test.go deleted file mode 100644 index 357d37cbb..000000000 --- a/builtin/providers/aws/resource_aws_network_acl_rule_test.go +++ /dev/null @@ -1,398 +0,0 @@ -package aws - -import ( - "fmt" - "regexp" - "strconv" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSNetworkAclRule_basic(t *testing.T) { - var networkAcl ec2.NetworkAcl - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSNetworkAclRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSNetworkAclRuleBasicConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSNetworkAclRuleExists("aws_network_acl_rule.baz", &networkAcl), - testAccCheckAWSNetworkAclRuleExists("aws_network_acl_rule.qux", &networkAcl), - testAccCheckAWSNetworkAclRuleExists("aws_network_acl_rule.wibble", &networkAcl), - ), - }, - }, - }) -} - -func TestAccAWSNetworkAclRule_missingParam(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSNetworkAclRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSNetworkAclRuleMissingParam, - ExpectError: regexp.MustCompile("Either `cidr_block` or `ipv6_cidr_block` must be defined"), - }, - }, - }) -} - -func TestAccAWSNetworkAclRule_ipv6(t *testing.T) { - var networkAcl ec2.NetworkAcl - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSNetworkAclRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSNetworkAclRuleIpv6Config, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSNetworkAclRuleExists("aws_network_acl_rule.baz", &networkAcl), - ), - }, - }, - }) -} - -func TestAccAWSNetworkAclRule_allProtocol(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSNetworkAclRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSNetworkAclRuleAllProtocolConfig, - ExpectNonEmptyPlan: false, - }, - { - Config: testAccAWSNetworkAclRuleAllProtocolConfigNoRealUpdate, - ExpectNonEmptyPlan: false, - }, - }, - }) -} - -func TestResourceAWSNetworkAclRule_validateICMPArgumentValue(t *testing.T) { - type testCases struct { - Value string - ErrCount int - } - - invalidCases := []testCases{ - { - Value: "", - ErrCount: 1, - }, - { - Value: "not-a-number", - ErrCount: 1, - }, - { - Value: "1.0", - ErrCount: 1, - }, - } - - for _, tc := range invalidCases { - _, errors := validateICMPArgumentValue(tc.Value, "icmp_type") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q to trigger a validation error.", tc.Value) - } - } - - validCases := []testCases{ - { - Value: "0", - ErrCount: 0, - }, - { - Value: "-1", - ErrCount: 0, - }, - { - Value: "1", - ErrCount: 0, - }, - } - - for _, tc := range validCases { - _, errors := validateICMPArgumentValue(tc.Value, "icmp_type") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q not to trigger a validation error.", tc.Value) - } - } - -} - -func TestAccAWSNetworkAclRule_deleteRule(t *testing.T) { - var networkAcl ec2.NetworkAcl - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSNetworkAclRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSNetworkAclRuleBasicConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSNetworkAclRuleExists("aws_network_acl_rule.baz", &networkAcl), - testAccCheckAWSNetworkAclRuleDelete("aws_network_acl_rule.baz"), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testAccCheckAWSNetworkAclRuleDestroy(s *terraform.State) error { - - for _, rs := range s.RootModule().Resources { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - if rs.Type != "aws_network_acl_rule" { - continue - } - - req := &ec2.DescribeNetworkAclsInput{ - NetworkAclIds: []*string{aws.String(rs.Primary.ID)}, - } - resp, err := conn.DescribeNetworkAcls(req) - if err == nil { - if len(resp.NetworkAcls) > 0 && *resp.NetworkAcls[0].NetworkAclId == rs.Primary.ID { - networkAcl := resp.NetworkAcls[0] - if networkAcl.Entries != nil { - return fmt.Errorf("Network ACL Entries still exist") - } - } - } - - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - if ec2err.Code() != "InvalidNetworkAclID.NotFound" { - return err - } - } - - return nil -} - -func testAccCheckAWSNetworkAclRuleExists(n string, networkAcl *ec2.NetworkAcl) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Network ACL Rule Id is set") - } - - req := &ec2.DescribeNetworkAclsInput{ - NetworkAclIds: []*string{aws.String(rs.Primary.Attributes["network_acl_id"])}, - } - resp, err := conn.DescribeNetworkAcls(req) - if err != nil { - return err - } - if len(resp.NetworkAcls) != 1 { - return fmt.Errorf("Network ACL not found") - } - egress, err := strconv.ParseBool(rs.Primary.Attributes["egress"]) - if err != nil { - return err - } - ruleNo, err := strconv.ParseInt(rs.Primary.Attributes["rule_number"], 10, 64) - if err != nil { - return err - } - for _, e := range resp.NetworkAcls[0].Entries { - if *e.RuleNumber == ruleNo && *e.Egress == egress { - return nil - } - } - return fmt.Errorf("Entry not found: %s", resp.NetworkAcls[0]) - } -} - -func testAccCheckAWSNetworkAclRuleDelete(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Network ACL Rule Id is set") - } - - egress, err := strconv.ParseBool(rs.Primary.Attributes["egress"]) - if err != nil { - return err - } - ruleNo, err := strconv.ParseInt(rs.Primary.Attributes["rule_number"], 10, 64) - if err != nil { - return err - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - _, err = conn.DeleteNetworkAclEntry(&ec2.DeleteNetworkAclEntryInput{ - NetworkAclId: aws.String(rs.Primary.Attributes["network_acl_id"]), - RuleNumber: aws.Int64(ruleNo), - Egress: aws.Bool(egress), - }) - if err != nil { - return fmt.Errorf("Error deleting Network ACL Rule (%s) in testAccCheckAWSNetworkAclRuleDelete: %s", rs.Primary.ID, err) - } - - return nil - } -} - -const testAccAWSNetworkAclRuleBasicConfig = ` -provider "aws" { - region = "us-east-1" -} -resource "aws_vpc" "foo" { - cidr_block = "10.3.0.0/16" - tags { - Name = "testAccAWSNetworkAclRuleBasicConfig" - } -} -resource "aws_network_acl" "bar" { - vpc_id = "${aws_vpc.foo.id}" -} -resource "aws_network_acl_rule" "baz" { - network_acl_id = "${aws_network_acl.bar.id}" - rule_number = 200 - egress = false - protocol = "tcp" - rule_action = "allow" - cidr_block = "0.0.0.0/0" - from_port = 22 - to_port = 22 -} -resource "aws_network_acl_rule" "qux" { - network_acl_id = "${aws_network_acl.bar.id}" - rule_number = 300 - protocol = "icmp" - rule_action = "allow" - cidr_block = "0.0.0.0/0" - icmp_type = 0 - icmp_code = -1 -} -resource "aws_network_acl_rule" "wibble" { - network_acl_id = "${aws_network_acl.bar.id}" - rule_number = 400 - protocol = "icmp" - rule_action = "allow" - cidr_block = "0.0.0.0/0" - icmp_type = -1 - icmp_code = -1 -} -` - -const testAccAWSNetworkAclRuleMissingParam = ` -provider "aws" { - region = "us-east-1" -} -resource "aws_vpc" "foo" { - cidr_block = "10.3.0.0/16" - tags { - Name = "testAccAWSNetworkAclRuleMissingParam" - } -} -resource "aws_network_acl" "bar" { - vpc_id = "${aws_vpc.foo.id}" -} -resource "aws_network_acl_rule" "baz" { - network_acl_id = "${aws_network_acl.bar.id}" - rule_number = 200 - egress = false - protocol = "tcp" - rule_action = "allow" - from_port = 22 - to_port = 22 -} -` - -const testAccAWSNetworkAclRuleAllProtocolConfigNoRealUpdate = ` -resource "aws_vpc" "foo" { - cidr_block = "10.3.0.0/16" - tags { - Name = "testAccAWSNetworkAclRuleAllProtocolConfigNoRealUpdate" - } -} -resource "aws_network_acl" "bar" { - vpc_id = "${aws_vpc.foo.id}" -} -resource "aws_network_acl_rule" "baz" { - network_acl_id = "${aws_network_acl.bar.id}" - rule_number = 150 - egress = false - protocol = "all" - rule_action = "allow" - cidr_block = "0.0.0.0/0" - from_port = 22 - to_port = 22 -} -` - -const testAccAWSNetworkAclRuleAllProtocolConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.3.0.0/16" - tags { - Name = "testAccAWSNetworkAclRuleAllProtocolConfig" - } -} -resource "aws_network_acl" "bar" { - vpc_id = "${aws_vpc.foo.id}" -} -resource "aws_network_acl_rule" "baz" { - network_acl_id = "${aws_network_acl.bar.id}" - rule_number = 150 - egress = false - protocol = "-1" - rule_action = "allow" - cidr_block = "0.0.0.0/0" - from_port = 22 - to_port = 22 -} -` - -const testAccAWSNetworkAclRuleIpv6Config = ` -resource "aws_vpc" "foo" { - cidr_block = "10.3.0.0/16" - tags { - Name = "testAccAWSNetworkAclRuleIpv6Config" - } -} -resource "aws_network_acl" "bar" { - vpc_id = "${aws_vpc.foo.id}" -} -resource "aws_network_acl_rule" "baz" { - network_acl_id = "${aws_network_acl.bar.id}" - rule_number = 150 - egress = false - protocol = "tcp" - rule_action = "allow" - ipv6_cidr_block = "::/0" - from_port = 22 - to_port = 22 -} - -` diff --git a/builtin/providers/aws/resource_aws_network_acl_test.go b/builtin/providers/aws/resource_aws_network_acl_test.go deleted file mode 100644 index 253ca9fb7..000000000 --- a/builtin/providers/aws/resource_aws_network_acl_test.go +++ /dev/null @@ -1,793 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSNetworkAcl_EgressAndIngressRules(t *testing.T) { - var networkAcl ec2.NetworkAcl - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_network_acl.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSNetworkAclDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSNetworkAclEgressNIngressConfig, - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSNetworkAclExists("aws_network_acl.bar", &networkAcl), - resource.TestCheckResourceAttr( - "aws_network_acl.bar", "ingress.1871939009.protocol", "6"), - resource.TestCheckResourceAttr( - "aws_network_acl.bar", "ingress.1871939009.rule_no", "1"), - resource.TestCheckResourceAttr( - "aws_network_acl.bar", "ingress.1871939009.from_port", "80"), - resource.TestCheckResourceAttr( - "aws_network_acl.bar", "ingress.1871939009.to_port", "80"), - resource.TestCheckResourceAttr( - "aws_network_acl.bar", "ingress.1871939009.action", "allow"), - resource.TestCheckResourceAttr( - "aws_network_acl.bar", "ingress.1871939009.cidr_block", "10.3.0.0/18"), - resource.TestCheckResourceAttr( - "aws_network_acl.bar", "egress.3111164687.protocol", "6"), - resource.TestCheckResourceAttr( - "aws_network_acl.bar", "egress.3111164687.rule_no", "2"), - resource.TestCheckResourceAttr( - "aws_network_acl.bar", "egress.3111164687.from_port", "443"), - resource.TestCheckResourceAttr( - "aws_network_acl.bar", "egress.3111164687.to_port", "443"), - resource.TestCheckResourceAttr( - "aws_network_acl.bar", "egress.3111164687.cidr_block", "10.3.0.0/18"), - resource.TestCheckResourceAttr( - "aws_network_acl.bar", "egress.3111164687.action", "allow"), - ), - }, - }, - }) -} - -func TestAccAWSNetworkAcl_OnlyIngressRules_basic(t *testing.T) { - var networkAcl ec2.NetworkAcl - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_network_acl.foos", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSNetworkAclDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSNetworkAclIngressConfig, - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSNetworkAclExists("aws_network_acl.foos", &networkAcl), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.4245812720.protocol", "6"), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.4245812720.rule_no", "2"), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.4245812720.from_port", "443"), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.4245812720.to_port", "443"), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.4245812720.action", "deny"), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.4245812720.cidr_block", "10.2.0.0/18"), - ), - }, - }, - }) -} - -func TestAccAWSNetworkAcl_OnlyIngressRules_update(t *testing.T) { - var networkAcl ec2.NetworkAcl - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_network_acl.foos", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSNetworkAclDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSNetworkAclIngressConfig, - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSNetworkAclExists("aws_network_acl.foos", &networkAcl), - testIngressRuleLength(&networkAcl, 2), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.401088754.protocol", "6"), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.401088754.rule_no", "1"), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.401088754.from_port", "0"), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.401088754.to_port", "22"), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.401088754.action", "deny"), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.4245812720.cidr_block", "10.2.0.0/18"), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.4245812720.from_port", "443"), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.4245812720.rule_no", "2"), - ), - }, - { - Config: testAccAWSNetworkAclIngressConfigChange, - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSNetworkAclExists("aws_network_acl.foos", &networkAcl), - testIngressRuleLength(&networkAcl, 1), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.401088754.protocol", "6"), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.401088754.rule_no", "1"), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.401088754.from_port", "0"), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.401088754.to_port", "22"), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.401088754.action", "deny"), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.401088754.cidr_block", "10.2.0.0/18"), - ), - }, - }, - }) -} - -func TestAccAWSNetworkAcl_OnlyEgressRules(t *testing.T) { - var networkAcl ec2.NetworkAcl - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_network_acl.bond", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSNetworkAclDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSNetworkAclEgressConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSNetworkAclExists("aws_network_acl.bond", &networkAcl), - testAccCheckTags(&networkAcl.Tags, "foo", "bar"), - ), - }, - }, - }) -} - -func TestAccAWSNetworkAcl_SubnetChange(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_network_acl.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSNetworkAclDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSNetworkAclSubnetConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckSubnetIsAssociatedWithAcl("aws_network_acl.bar", "aws_subnet.old"), - ), - }, - { - Config: testAccAWSNetworkAclSubnetConfigChange, - Check: resource.ComposeTestCheckFunc( - testAccCheckSubnetIsNotAssociatedWithAcl("aws_network_acl.bar", "aws_subnet.old"), - testAccCheckSubnetIsAssociatedWithAcl("aws_network_acl.bar", "aws_subnet.new"), - ), - }, - }, - }) - -} - -func TestAccAWSNetworkAcl_Subnets(t *testing.T) { - var networkAcl ec2.NetworkAcl - - checkACLSubnets := func(acl *ec2.NetworkAcl, count int) resource.TestCheckFunc { - return func(*terraform.State) (err error) { - if count != len(acl.Associations) { - return fmt.Errorf("ACL association count does not match, expected %d, got %d", count, len(acl.Associations)) - } - - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_network_acl.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSNetworkAclDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSNetworkAclSubnet_SubnetIds, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSNetworkAclExists("aws_network_acl.bar", &networkAcl), - testAccCheckSubnetIsAssociatedWithAcl("aws_network_acl.bar", "aws_subnet.one"), - testAccCheckSubnetIsAssociatedWithAcl("aws_network_acl.bar", "aws_subnet.two"), - checkACLSubnets(&networkAcl, 2), - ), - }, - - { - Config: testAccAWSNetworkAclSubnet_SubnetIdsUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSNetworkAclExists("aws_network_acl.bar", &networkAcl), - testAccCheckSubnetIsAssociatedWithAcl("aws_network_acl.bar", "aws_subnet.one"), - testAccCheckSubnetIsAssociatedWithAcl("aws_network_acl.bar", "aws_subnet.three"), - testAccCheckSubnetIsAssociatedWithAcl("aws_network_acl.bar", "aws_subnet.four"), - checkACLSubnets(&networkAcl, 3), - ), - }, - }, - }) -} - -func TestAccAWSNetworkAcl_ipv6Rules(t *testing.T) { - var networkAcl ec2.NetworkAcl - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_network_acl.foos", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSNetworkAclDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSNetworkAclIpv6Config, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSNetworkAclExists("aws_network_acl.foos", &networkAcl), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.#", "1"), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.1976110835.protocol", "6"), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.1976110835.rule_no", "1"), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.1976110835.from_port", "0"), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.1976110835.to_port", "22"), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.1976110835.action", "allow"), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.1976110835.ipv6_cidr_block", "::/0"), - ), - }, - }, - }) -} - -func TestAccAWSNetworkAcl_ipv6VpcRules(t *testing.T) { - var networkAcl ec2.NetworkAcl - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_network_acl.foos", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSNetworkAclDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSNetworkAclIpv6VpcConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSNetworkAclExists("aws_network_acl.foos", &networkAcl), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.#", "1"), - resource.TestCheckResourceAttr( - "aws_network_acl.foos", "ingress.1296304962.ipv6_cidr_block", "2600:1f16:d1e:9a00::/56"), - ), - }, - }, - }) -} - -func TestAccAWSNetworkAcl_espProtocol(t *testing.T) { - var networkAcl ec2.NetworkAcl - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_network_acl.testesp", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSNetworkAclDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSNetworkAclEsp, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSNetworkAclExists("aws_network_acl.testesp", &networkAcl), - ), - }, - }, - }) -} - -func testAccCheckAWSNetworkAclDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_network" { - continue - } - - // Retrieve the network acl - resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{ - NetworkAclIds: []*string{aws.String(rs.Primary.ID)}, - }) - if err == nil { - if len(resp.NetworkAcls) > 0 && *resp.NetworkAcls[0].NetworkAclId == rs.Primary.ID { - return fmt.Errorf("Network Acl (%s) still exists.", rs.Primary.ID) - } - - return nil - } - - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - // Confirm error code is what we want - if ec2err.Code() != "InvalidNetworkAclID.NotFound" { - return err - } - } - - return nil -} - -func testAccCheckAWSNetworkAclExists(n string, networkAcl *ec2.NetworkAcl) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Security Group is set") - } - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{ - NetworkAclIds: []*string{aws.String(rs.Primary.ID)}, - }) - if err != nil { - return err - } - - if len(resp.NetworkAcls) > 0 && *resp.NetworkAcls[0].NetworkAclId == rs.Primary.ID { - *networkAcl = *resp.NetworkAcls[0] - return nil - } - - return fmt.Errorf("Network Acls not found") - } -} - -func testIngressRuleLength(networkAcl *ec2.NetworkAcl, length int) resource.TestCheckFunc { - return func(s *terraform.State) error { - var ingressEntries []*ec2.NetworkAclEntry - for _, e := range networkAcl.Entries { - if *e.Egress == false { - ingressEntries = append(ingressEntries, e) - } - } - // There is always a default rule (ALL Traffic ... DENY) - // so we have to increase the length by 1 - if len(ingressEntries) != length+1 { - return fmt.Errorf("Invalid number of ingress entries found; count = %d", len(ingressEntries)) - } - return nil - } -} - -func testAccCheckSubnetIsAssociatedWithAcl(acl string, sub string) resource.TestCheckFunc { - return func(s *terraform.State) error { - networkAcl := s.RootModule().Resources[acl] - subnet := s.RootModule().Resources[sub] - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{ - NetworkAclIds: []*string{aws.String(networkAcl.Primary.ID)}, - Filters: []*ec2.Filter{ - { - Name: aws.String("association.subnet-id"), - Values: []*string{aws.String(subnet.Primary.ID)}, - }, - }, - }) - if err != nil { - return err - } - if len(resp.NetworkAcls) > 0 { - return nil - } - - return fmt.Errorf("Network Acl %s is not associated with subnet %s", acl, sub) - } -} - -func testAccCheckSubnetIsNotAssociatedWithAcl(acl string, subnet string) resource.TestCheckFunc { - return func(s *terraform.State) error { - networkAcl := s.RootModule().Resources[acl] - subnet := s.RootModule().Resources[subnet] - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{ - NetworkAclIds: []*string{aws.String(networkAcl.Primary.ID)}, - Filters: []*ec2.Filter{ - { - Name: aws.String("association.subnet-id"), - Values: []*string{aws.String(subnet.Primary.ID)}, - }, - }, - }) - - if err != nil { - return err - } - if len(resp.NetworkAcls) > 0 { - return fmt.Errorf("Network Acl %s is still associated with subnet %s", acl, subnet) - } - return nil - } -} - -const testAccAWSNetworkAclIpv6Config = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "TestAccAWSNetworkAcl_ipv6Rules" - } -} -resource "aws_subnet" "blob" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" - map_public_ip_on_launch = true -} -resource "aws_network_acl" "foos" { - vpc_id = "${aws_vpc.foo.id}" - ingress = { - protocol = "tcp" - rule_no = 1 - action = "allow" - ipv6_cidr_block = "::/0" - from_port = 0 - to_port = 22 - } - - subnet_ids = ["${aws_subnet.blob.id}"] -} -` - -const testAccAWSNetworkAclIpv6VpcConfig = ` -provider "aws" { - region = "us-east-2" -} - -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - assign_generated_ipv6_cidr_block = true - - tags { - Name = "TestAccAWSNetworkAcl_ipv6VpcRules" - } -} - -resource "aws_network_acl" "foos" { - vpc_id = "${aws_vpc.foo.id}" - ingress = { - protocol = "tcp" - rule_no = 1 - action = "allow" - ipv6_cidr_block = "2600:1f16:d1e:9a00::/56" - from_port = 0 - to_port = 22 - } -} -` - -const testAccAWSNetworkAclIngressConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "TestAccAWSNetworkAcl_OnlyIngressRules" - } -} -resource "aws_subnet" "blob" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" - map_public_ip_on_launch = true -} -resource "aws_network_acl" "foos" { - vpc_id = "${aws_vpc.foo.id}" - ingress = { - protocol = "tcp" - rule_no = 1 - action = "deny" - cidr_block = "10.2.0.0/18" - from_port = 0 - to_port = 22 - } - ingress = { - protocol = "tcp" - rule_no = 2 - action = "deny" - cidr_block = "10.2.0.0/18" - from_port = 443 - to_port = 443 - } - - subnet_ids = ["${aws_subnet.blob.id}"] -} -` -const testAccAWSNetworkAclIngressConfigChange = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "TestAccAWSNetworkAcl_OnlyIngressRules" - } -} -resource "aws_subnet" "blob" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" - map_public_ip_on_launch = true -} -resource "aws_network_acl" "foos" { - vpc_id = "${aws_vpc.foo.id}" - ingress = { - protocol = "tcp" - rule_no = 1 - action = "deny" - cidr_block = "10.2.0.0/18" - from_port = 0 - to_port = 22 - } - subnet_ids = ["${aws_subnet.blob.id}"] -} -` - -const testAccAWSNetworkAclEgressConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.2.0.0/16" - tags { - Name = "TestAccAWSNetworkAcl_OnlyEgressRules" - } -} -resource "aws_subnet" "blob" { - cidr_block = "10.2.0.0/24" - vpc_id = "${aws_vpc.foo.id}" - map_public_ip_on_launch = true -} -resource "aws_network_acl" "bond" { - vpc_id = "${aws_vpc.foo.id}" - egress = { - protocol = "tcp" - rule_no = 2 - action = "allow" - cidr_block = "10.2.0.0/18" - from_port = 443 - to_port = 443 - } - - egress = { - protocol = "-1" - rule_no = 4 - action = "allow" - cidr_block = "0.0.0.0/0" - from_port = 0 - to_port = 0 - } - - egress = { - protocol = "tcp" - rule_no = 1 - action = "allow" - cidr_block = "10.2.0.0/18" - from_port = 80 - to_port = 80 - } - - egress = { - protocol = "tcp" - rule_no = 3 - action = "allow" - cidr_block = "10.2.0.0/18" - from_port = 22 - to_port = 22 - } - - tags { - foo = "bar" - } -} -` - -const testAccAWSNetworkAclEgressNIngressConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.3.0.0/16" - tags { - Name = "TestAccAWSNetworkAcl_EgressAndIngressRules" - } -} -resource "aws_subnet" "blob" { - cidr_block = "10.3.0.0/24" - vpc_id = "${aws_vpc.foo.id}" - map_public_ip_on_launch = true -} -resource "aws_network_acl" "bar" { - vpc_id = "${aws_vpc.foo.id}" - egress = { - protocol = "tcp" - rule_no = 2 - action = "allow" - cidr_block = "10.3.0.0/18" - from_port = 443 - to_port = 443 - } - - ingress = { - protocol = "tcp" - rule_no = 1 - action = "allow" - cidr_block = "10.3.0.0/18" - from_port = 80 - to_port = 80 - } -} -` -const testAccAWSNetworkAclSubnetConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "TestAccAWSNetworkAcl_SubnetChange" - } -} -resource "aws_subnet" "old" { - cidr_block = "10.1.111.0/24" - vpc_id = "${aws_vpc.foo.id}" - map_public_ip_on_launch = true -} -resource "aws_subnet" "new" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" - map_public_ip_on_launch = true -} -resource "aws_network_acl" "roll" { - vpc_id = "${aws_vpc.foo.id}" - subnet_ids = ["${aws_subnet.new.id}"] -} -resource "aws_network_acl" "bar" { - vpc_id = "${aws_vpc.foo.id}" - subnet_ids = ["${aws_subnet.old.id}"] -} -` - -const testAccAWSNetworkAclSubnetConfigChange = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "TestAccAWSNetworkAcl_SubnetChange" - } -} -resource "aws_subnet" "old" { - cidr_block = "10.1.111.0/24" - vpc_id = "${aws_vpc.foo.id}" - map_public_ip_on_launch = true -} -resource "aws_subnet" "new" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" - map_public_ip_on_launch = true -} -resource "aws_network_acl" "bar" { - vpc_id = "${aws_vpc.foo.id}" - subnet_ids = ["${aws_subnet.new.id}"] -} -` - -const testAccAWSNetworkAclSubnet_SubnetIds = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "TestAccAWSNetworkAcl_Subnets" - } -} -resource "aws_subnet" "one" { - cidr_block = "10.1.111.0/24" - vpc_id = "${aws_vpc.foo.id}" - tags { - Name = "acl-subnets-test" - } -} -resource "aws_subnet" "two" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" - tags { - Name = "acl-subnets-test" - } -} -resource "aws_network_acl" "bar" { - vpc_id = "${aws_vpc.foo.id}" - subnet_ids = ["${aws_subnet.one.id}", "${aws_subnet.two.id}"] - tags { - Name = "acl-subnets-test" - } -} -` - -const testAccAWSNetworkAclSubnet_SubnetIdsUpdate = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "TestAccAWSNetworkAcl_Subnets" - } -} -resource "aws_subnet" "one" { - cidr_block = "10.1.111.0/24" - vpc_id = "${aws_vpc.foo.id}" - tags { - Name = "acl-subnets-test" - } -} -resource "aws_subnet" "two" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" - tags { - Name = "acl-subnets-test" - } -} - -resource "aws_subnet" "three" { - cidr_block = "10.1.222.0/24" - vpc_id = "${aws_vpc.foo.id}" - tags { - Name = "acl-subnets-test" - } -} -resource "aws_subnet" "four" { - cidr_block = "10.1.4.0/24" - vpc_id = "${aws_vpc.foo.id}" - tags { - Name = "acl-subnets-test" - } -} -resource "aws_network_acl" "bar" { - vpc_id = "${aws_vpc.foo.id}" - subnet_ids = [ - "${aws_subnet.one.id}", - "${aws_subnet.three.id}", - "${aws_subnet.four.id}", - ] - tags { - Name = "acl-subnets-test" - } -} -` - -const testAccAWSNetworkAclEsp = ` -resource "aws_vpc" "testespvpc" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccAWSNetworkAclEsp" - } -} - -resource "aws_network_acl" "testesp" { - vpc_id = "${aws_vpc.testespvpc.id}" - - egress { - protocol = "esp" - rule_no = 5 - action = "allow" - cidr_block = "10.3.0.0/18" - from_port = 0 - to_port = 0 - } - - tags { - Name = "test_esp" - } -} -` diff --git a/builtin/providers/aws/resource_aws_network_interface.go b/builtin/providers/aws/resource_aws_network_interface.go deleted file mode 100644 index 857237141..000000000 --- a/builtin/providers/aws/resource_aws_network_interface.go +++ /dev/null @@ -1,429 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "log" - "math" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsNetworkInterface() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsNetworkInterfaceCreate, - Read: resourceAwsNetworkInterfaceRead, - Update: resourceAwsNetworkInterfaceUpdate, - Delete: resourceAwsNetworkInterfaceDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - - "subnet_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "private_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "private_ips": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "private_ips_count": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "security_groups": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "source_dest_check": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "attachment": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "device_index": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - "attachment_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - }, - Set: resourceAwsEniAttachmentHash, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAwsNetworkInterfaceCreate(d *schema.ResourceData, meta interface{}) error { - - conn := meta.(*AWSClient).ec2conn - - request := &ec2.CreateNetworkInterfaceInput{ - SubnetId: aws.String(d.Get("subnet_id").(string)), - } - - security_groups := d.Get("security_groups").(*schema.Set).List() - if len(security_groups) != 0 { - request.Groups = expandStringList(security_groups) - } - - private_ips := d.Get("private_ips").(*schema.Set).List() - if len(private_ips) != 0 { - request.PrivateIpAddresses = expandPrivateIPAddresses(private_ips) - } - - if v, ok := d.GetOk("description"); ok { - request.Description = aws.String(v.(string)) - } - - if v, ok := d.GetOk("private_ips_count"); ok { - request.SecondaryPrivateIpAddressCount = aws.Int64(int64(v.(int))) - } - - log.Printf("[DEBUG] Creating network interface") - resp, err := conn.CreateNetworkInterface(request) - if err != nil { - return fmt.Errorf("Error creating ENI: %s", err) - } - - d.SetId(*resp.NetworkInterface.NetworkInterfaceId) - log.Printf("[INFO] ENI ID: %s", d.Id()) - return resourceAwsNetworkInterfaceUpdate(d, meta) -} - -func resourceAwsNetworkInterfaceRead(d *schema.ResourceData, meta interface{}) error { - - conn := meta.(*AWSClient).ec2conn - describe_network_interfaces_request := &ec2.DescribeNetworkInterfacesInput{ - NetworkInterfaceIds: []*string{aws.String(d.Id())}, - } - describeResp, err := conn.DescribeNetworkInterfaces(describe_network_interfaces_request) - - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidNetworkInterfaceID.NotFound" { - // The ENI is gone now, so just remove it from the state - d.SetId("") - return nil - } - - return fmt.Errorf("Error retrieving ENI: %s", err) - } - if len(describeResp.NetworkInterfaces) != 1 { - return fmt.Errorf("Unable to find ENI: %#v", describeResp.NetworkInterfaces) - } - - eni := describeResp.NetworkInterfaces[0] - d.Set("subnet_id", eni.SubnetId) - d.Set("private_ip", eni.PrivateIpAddress) - d.Set("private_ips", flattenNetworkInterfacesPrivateIPAddresses(eni.PrivateIpAddresses)) - d.Set("security_groups", flattenGroupIdentifiers(eni.Groups)) - d.Set("source_dest_check", eni.SourceDestCheck) - - if eni.Description != nil { - d.Set("description", eni.Description) - } - - // Tags - d.Set("tags", tagsToMap(eni.TagSet)) - - if eni.Attachment != nil { - attachment := []map[string]interface{}{flattenAttachment(eni.Attachment)} - d.Set("attachment", attachment) - } else { - d.Set("attachment", nil) - } - - return nil -} - -func networkInterfaceAttachmentRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - - describe_network_interfaces_request := &ec2.DescribeNetworkInterfacesInput{ - NetworkInterfaceIds: []*string{aws.String(id)}, - } - describeResp, err := conn.DescribeNetworkInterfaces(describe_network_interfaces_request) - - if err != nil { - log.Printf("[ERROR] Could not find network interface %s. %s", id, err) - return nil, "", err - } - - eni := describeResp.NetworkInterfaces[0] - hasAttachment := strconv.FormatBool(eni.Attachment != nil) - log.Printf("[DEBUG] ENI %s has attachment state %s", id, hasAttachment) - return eni, hasAttachment, nil - } -} - -func resourceAwsNetworkInterfaceDetach(oa *schema.Set, meta interface{}, eniId string) error { - // if there was an old attachment, remove it - if oa != nil && len(oa.List()) > 0 { - old_attachment := oa.List()[0].(map[string]interface{}) - detach_request := &ec2.DetachNetworkInterfaceInput{ - AttachmentId: aws.String(old_attachment["attachment_id"].(string)), - Force: aws.Bool(true), - } - conn := meta.(*AWSClient).ec2conn - _, detach_err := conn.DetachNetworkInterface(detach_request) - if detach_err != nil { - if awsErr, _ := detach_err.(awserr.Error); awsErr.Code() != "InvalidAttachmentID.NotFound" { - return fmt.Errorf("Error detaching ENI: %s", detach_err) - } - } - - log.Printf("[DEBUG] Waiting for ENI (%s) to become dettached", eniId) - stateConf := &resource.StateChangeConf{ - Pending: []string{"true"}, - Target: []string{"false"}, - Refresh: networkInterfaceAttachmentRefreshFunc(conn, eniId), - Timeout: 10 * time.Minute, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf( - "Error waiting for ENI (%s) to become dettached: %s", eniId, err) - } - } - - return nil -} - -func resourceAwsNetworkInterfaceUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - d.Partial(true) - - if d.HasChange("attachment") { - oa, na := d.GetChange("attachment") - - detach_err := resourceAwsNetworkInterfaceDetach(oa.(*schema.Set), meta, d.Id()) - if detach_err != nil { - return detach_err - } - - // if there is a new attachment, attach it - if na != nil && len(na.(*schema.Set).List()) > 0 { - new_attachment := na.(*schema.Set).List()[0].(map[string]interface{}) - di := new_attachment["device_index"].(int) - attach_request := &ec2.AttachNetworkInterfaceInput{ - DeviceIndex: aws.Int64(int64(di)), - InstanceId: aws.String(new_attachment["instance"].(string)), - NetworkInterfaceId: aws.String(d.Id()), - } - _, attach_err := conn.AttachNetworkInterface(attach_request) - if attach_err != nil { - return fmt.Errorf("Error attaching ENI: %s", attach_err) - } - } - - d.SetPartial("attachment") - } - - if d.HasChange("private_ips") { - o, n := d.GetChange("private_ips") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - - // Unassign old IP addresses - unassignIps := os.Difference(ns) - if unassignIps.Len() != 0 { - input := &ec2.UnassignPrivateIpAddressesInput{ - NetworkInterfaceId: aws.String(d.Id()), - PrivateIpAddresses: expandStringList(unassignIps.List()), - } - _, err := conn.UnassignPrivateIpAddresses(input) - if err != nil { - return fmt.Errorf("Failure to unassign Private IPs: %s", err) - } - } - - // Assign new IP addresses - assignIps := ns.Difference(os) - if assignIps.Len() != 0 { - input := &ec2.AssignPrivateIpAddressesInput{ - NetworkInterfaceId: aws.String(d.Id()), - PrivateIpAddresses: expandStringList(assignIps.List()), - } - _, err := conn.AssignPrivateIpAddresses(input) - if err != nil { - return fmt.Errorf("Failure to assign Private IPs: %s", err) - } - } - - d.SetPartial("private_ips") - } - - request := &ec2.ModifyNetworkInterfaceAttributeInput{ - NetworkInterfaceId: aws.String(d.Id()), - SourceDestCheck: &ec2.AttributeBooleanValue{Value: aws.Bool(d.Get("source_dest_check").(bool))}, - } - - _, err := conn.ModifyNetworkInterfaceAttribute(request) - if err != nil { - return fmt.Errorf("Failure updating ENI: %s", err) - } - - d.SetPartial("source_dest_check") - - if d.HasChange("private_ips_count") { - o, n := d.GetChange("private_ips_count") - private_ips := d.Get("private_ips").(*schema.Set).List() - private_ips_filtered := private_ips[:0] - primary_ip := d.Get("private_ip") - - for _, ip := range private_ips { - if ip != primary_ip { - private_ips_filtered = append(private_ips_filtered, ip) - } - } - - if o != nil && o != 0 && n != nil && n != len(private_ips_filtered) { - - diff := n.(int) - o.(int) - - // Surplus of IPs, add the diff - if diff > 0 { - input := &ec2.AssignPrivateIpAddressesInput{ - NetworkInterfaceId: aws.String(d.Id()), - SecondaryPrivateIpAddressCount: aws.Int64(int64(diff)), - } - _, err := conn.AssignPrivateIpAddresses(input) - if err != nil { - return fmt.Errorf("Failure to assign Private IPs: %s", err) - } - } - - if diff < 0 { - input := &ec2.UnassignPrivateIpAddressesInput{ - NetworkInterfaceId: aws.String(d.Id()), - PrivateIpAddresses: expandStringList(private_ips_filtered[0:int(math.Abs(float64(diff)))]), - } - _, err := conn.UnassignPrivateIpAddresses(input) - if err != nil { - return fmt.Errorf("Failure to unassign Private IPs: %s", err) - } - } - - d.SetPartial("private_ips_count") - } - } - - if d.HasChange("security_groups") { - request := &ec2.ModifyNetworkInterfaceAttributeInput{ - NetworkInterfaceId: aws.String(d.Id()), - Groups: expandStringList(d.Get("security_groups").(*schema.Set).List()), - } - - _, err := conn.ModifyNetworkInterfaceAttribute(request) - if err != nil { - return fmt.Errorf("Failure updating ENI: %s", err) - } - - d.SetPartial("security_groups") - } - - if d.HasChange("description") { - request := &ec2.ModifyNetworkInterfaceAttributeInput{ - NetworkInterfaceId: aws.String(d.Id()), - Description: &ec2.AttributeValue{Value: aws.String(d.Get("description").(string))}, - } - - _, err := conn.ModifyNetworkInterfaceAttribute(request) - if err != nil { - return fmt.Errorf("Failure updating ENI: %s", err) - } - - d.SetPartial("description") - } - - if err := setTags(conn, d); err != nil { - return err - } else { - d.SetPartial("tags") - } - - d.Partial(false) - - return resourceAwsNetworkInterfaceRead(d, meta) -} - -func resourceAwsNetworkInterfaceDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - log.Printf("[INFO] Deleting ENI: %s", d.Id()) - - detach_err := resourceAwsNetworkInterfaceDetach(d.Get("attachment").(*schema.Set), meta, d.Id()) - if detach_err != nil { - return detach_err - } - - deleteEniOpts := ec2.DeleteNetworkInterfaceInput{ - NetworkInterfaceId: aws.String(d.Id()), - } - if _, err := conn.DeleteNetworkInterface(&deleteEniOpts); err != nil { - return fmt.Errorf("Error deleting ENI: %s", err) - } - - return nil -} - -func resourceAwsEniAttachmentHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["instance"].(string))) - buf.WriteString(fmt.Sprintf("%d-", m["device_index"].(int))) - return hashcode.String(buf.String()) -} diff --git a/builtin/providers/aws/resource_aws_network_interface_attachment.go b/builtin/providers/aws/resource_aws_network_interface_attachment.go deleted file mode 100644 index c37b0d18f..000000000 --- a/builtin/providers/aws/resource_aws_network_interface_attachment.go +++ /dev/null @@ -1,166 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsNetworkInterfaceAttachment() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsNetworkInterfaceAttachmentCreate, - Read: resourceAwsNetworkInterfaceAttachmentRead, - Delete: resourceAwsNetworkInterfaceAttachmentDelete, - - Schema: map[string]*schema.Schema{ - "device_index": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - - "instance_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "network_interface_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "attachment_id": { - Type: schema.TypeString, - Computed: true, - }, - - "status": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceAwsNetworkInterfaceAttachmentCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - device_index := d.Get("device_index").(int) - instance_id := d.Get("instance_id").(string) - network_interface_id := d.Get("network_interface_id").(string) - - opts := &ec2.AttachNetworkInterfaceInput{ - DeviceIndex: aws.Int64(int64(device_index)), - InstanceId: aws.String(instance_id), - NetworkInterfaceId: aws.String(network_interface_id), - } - - log.Printf("[DEBUG] Attaching network interface (%s) to instance (%s)", network_interface_id, instance_id) - resp, err := conn.AttachNetworkInterface(opts) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - return fmt.Errorf("Error attaching network interface (%s) to instance (%s), message: \"%s\", code: \"%s\"", - network_interface_id, instance_id, awsErr.Message(), awsErr.Code()) - } - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"false"}, - Target: []string{"true"}, - Refresh: networkInterfaceAttachmentRefreshFunc(conn, network_interface_id), - Timeout: 5 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for Volume (%s) to attach to Instance: %s, error: %s", network_interface_id, instance_id, err) - } - - d.SetId(*resp.AttachmentId) - return resourceAwsNetworkInterfaceAttachmentRead(d, meta) -} - -func resourceAwsNetworkInterfaceAttachmentRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - interfaceId := d.Get("network_interface_id").(string) - - req := &ec2.DescribeNetworkInterfacesInput{ - NetworkInterfaceIds: []*string{aws.String(interfaceId)}, - } - - resp, err := conn.DescribeNetworkInterfaces(req) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidNetworkInterfaceID.NotFound" { - // The ENI is gone now, so just remove the attachment from the state - d.SetId("") - return nil - } - - return fmt.Errorf("Error retrieving ENI: %s", err) - } - if len(resp.NetworkInterfaces) != 1 { - return fmt.Errorf("Unable to find ENI (%s): %#v", interfaceId, resp.NetworkInterfaces) - } - - eni := resp.NetworkInterfaces[0] - - if eni.Attachment == nil { - // Interface is no longer attached, remove from state - d.SetId("") - return nil - } - - d.Set("attachment_id", eni.Attachment.AttachmentId) - d.Set("device_index", eni.Attachment.DeviceIndex) - d.Set("instance_id", eni.Attachment.InstanceId) - d.Set("network_interface_id", eni.NetworkInterfaceId) - d.Set("status", eni.Attachment.Status) - - return nil -} - -func resourceAwsNetworkInterfaceAttachmentDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - interfaceId := d.Get("network_interface_id").(string) - - detach_request := &ec2.DetachNetworkInterfaceInput{ - AttachmentId: aws.String(d.Id()), - Force: aws.Bool(true), - } - - _, detach_err := conn.DetachNetworkInterface(detach_request) - if detach_err != nil { - if awsErr, _ := detach_err.(awserr.Error); awsErr.Code() != "InvalidAttachmentID.NotFound" { - return fmt.Errorf("Error detaching ENI: %s", detach_err) - } - } - - log.Printf("[DEBUG] Waiting for ENI (%s) to become dettached", interfaceId) - stateConf := &resource.StateChangeConf{ - Pending: []string{"true"}, - Target: []string{"false"}, - Refresh: networkInterfaceAttachmentRefreshFunc(conn, interfaceId), - Timeout: 10 * time.Minute, - } - - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf( - "Error waiting for ENI (%s) to become dettached: %s", interfaceId, err) - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_network_interface_attacment_test.go b/builtin/providers/aws/resource_aws_network_interface_attacment_test.go deleted file mode 100644 index d5abd957a..000000000 --- a/builtin/providers/aws/resource_aws_network_interface_attacment_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSNetworkInterfaceAttachment_basic(t *testing.T) { - var conf ec2.NetworkInterface - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_network_interface.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSENIDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSNetworkInterfaceAttachmentConfig_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSENIExists("aws_network_interface.bar", &conf), - resource.TestCheckResourceAttr( - "aws_network_interface_attachment.test", "device_index", "1"), - resource.TestCheckResourceAttrSet( - "aws_network_interface_attachment.test", "instance_id"), - resource.TestCheckResourceAttrSet( - "aws_network_interface_attachment.test", "network_interface_id"), - resource.TestCheckResourceAttrSet( - "aws_network_interface_attachment.test", "attachment_id"), - resource.TestCheckResourceAttrSet( - "aws_network_interface_attachment.test", "status"), - ), - }, - }, - }) -} - -func testAccAWSNetworkInterfaceAttachmentConfig_basic(rInt int) string { - return fmt.Sprintf(` -resource "aws_vpc" "foo" { - cidr_block = "172.16.0.0/16" - tags { - Name = "testAccAWSNetworkInterfaceAttachmentConfig_basic" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "172.16.10.0/24" - availability_zone = "us-west-2a" -} - -resource "aws_security_group" "foo" { - vpc_id = "${aws_vpc.foo.id}" - description = "foo" - name = "foo-%d" - - egress { - from_port = 0 - to_port = 0 - protocol = "tcp" - cidr_blocks = ["10.0.0.0/16"] - } -} - -resource "aws_network_interface" "bar" { - subnet_id = "${aws_subnet.foo.id}" - private_ips = ["172.16.10.100"] - security_groups = ["${aws_security_group.foo.id}"] - description = "Managed by Terraform" - tags { - Name = "bar_interface" - } -} - -resource "aws_instance" "foo" { - ami = "ami-c5eabbf5" - instance_type = "t2.micro" - subnet_id = "${aws_subnet.foo.id}" - tags { - Name = "foo-%d" - } -} - -resource "aws_network_interface_attachment" "test" { - device_index = 1 - instance_id = "${aws_instance.foo.id}" - network_interface_id = "${aws_network_interface.bar.id}" -} -`, rInt, rInt) -} diff --git a/builtin/providers/aws/resource_aws_network_interface_test.go b/builtin/providers/aws/resource_aws_network_interface_test.go deleted file mode 100644 index dce87de3d..000000000 --- a/builtin/providers/aws/resource_aws_network_interface_test.go +++ /dev/null @@ -1,516 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSENI_basic(t *testing.T) { - var conf ec2.NetworkInterface - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_network_interface.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSENIDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSENIConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSENIExists("aws_network_interface.bar", &conf), - testAccCheckAWSENIAttributes(&conf), - resource.TestCheckResourceAttr( - "aws_network_interface.bar", "private_ips.#", "1"), - resource.TestCheckResourceAttr( - "aws_network_interface.bar", "tags.Name", "bar_interface"), - resource.TestCheckResourceAttr( - "aws_network_interface.bar", "description", "Managed by Terraform"), - ), - }, - }, - }) -} - -func TestAccAWSENI_updatedDescription(t *testing.T) { - var conf ec2.NetworkInterface - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_network_interface.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSENIDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSENIConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSENIExists("aws_network_interface.bar", &conf), - resource.TestCheckResourceAttr( - "aws_network_interface.bar", "description", "Managed by Terraform"), - ), - }, - - resource.TestStep{ - Config: testAccAWSENIConfigUpdatedDescription, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSENIExists("aws_network_interface.bar", &conf), - resource.TestCheckResourceAttr( - "aws_network_interface.bar", "description", "Updated ENI Description"), - ), - }, - }, - }) -} - -func TestAccAWSENI_attached(t *testing.T) { - var conf ec2.NetworkInterface - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_network_interface.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSENIDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSENIConfigWithAttachment, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSENIExists("aws_network_interface.bar", &conf), - testAccCheckAWSENIAttributesWithAttachment(&conf), - resource.TestCheckResourceAttr( - "aws_network_interface.bar", "private_ips.#", "1"), - resource.TestCheckResourceAttr( - "aws_network_interface.bar", "tags.Name", "bar_interface"), - ), - }, - }, - }) -} - -func TestAccAWSENI_ignoreExternalAttachment(t *testing.T) { - var conf ec2.NetworkInterface - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_network_interface.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSENIDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSENIConfigExternalAttachment, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSENIExists("aws_network_interface.bar", &conf), - testAccCheckAWSENIAttributes(&conf), - testAccCheckAWSENIMakeExternalAttachment("aws_instance.foo", &conf), - ), - }, - }, - }) -} - -func TestAccAWSENI_sourceDestCheck(t *testing.T) { - var conf ec2.NetworkInterface - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_network_interface.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSENIDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSENIConfigWithSourceDestCheck, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSENIExists("aws_network_interface.bar", &conf), - resource.TestCheckResourceAttr( - "aws_network_interface.bar", "source_dest_check", "false"), - ), - }, - }, - }) -} - -func TestAccAWSENI_computedIPs(t *testing.T) { - var conf ec2.NetworkInterface - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_network_interface.bar", - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSENIDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSENIConfigWithNoPrivateIPs, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSENIExists("aws_network_interface.bar", &conf), - resource.TestCheckResourceAttr( - "aws_network_interface.bar", "private_ips.#", "1"), - ), - }, - }, - }) -} - -func testAccCheckAWSENIExists(n string, res *ec2.NetworkInterface) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ENI ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - describe_network_interfaces_request := &ec2.DescribeNetworkInterfacesInput{ - NetworkInterfaceIds: []*string{aws.String(rs.Primary.ID)}, - } - describeResp, err := conn.DescribeNetworkInterfaces(describe_network_interfaces_request) - - if err != nil { - return err - } - - if len(describeResp.NetworkInterfaces) != 1 || - *describeResp.NetworkInterfaces[0].NetworkInterfaceId != rs.Primary.ID { - return fmt.Errorf("ENI not found") - } - - *res = *describeResp.NetworkInterfaces[0] - - return nil - } -} - -func testAccCheckAWSENIAttributes(conf *ec2.NetworkInterface) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if conf.Attachment != nil { - return fmt.Errorf("expected attachment to be nil") - } - - if *conf.AvailabilityZone != "us-west-2a" { - return fmt.Errorf("expected availability_zone to be us-west-2a, but was %s", *conf.AvailabilityZone) - } - - if len(conf.Groups) != 1 && *conf.Groups[0].GroupName != "foo" { - return fmt.Errorf("expected security group to be foo, but was %#v", conf.Groups) - } - - if *conf.PrivateIpAddress != "172.16.10.100" { - return fmt.Errorf("expected private ip to be 172.16.10.100, but was %s", *conf.PrivateIpAddress) - } - - if *conf.SourceDestCheck != true { - return fmt.Errorf("expected source_dest_check to be true, but was %t", *conf.SourceDestCheck) - } - - if len(conf.TagSet) == 0 { - return fmt.Errorf("expected tags") - } - - return nil - } -} - -func testAccCheckAWSENIAttributesWithAttachment(conf *ec2.NetworkInterface) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if conf.Attachment == nil { - return fmt.Errorf("expected attachment to be set, but was nil") - } - - if *conf.Attachment.DeviceIndex != 1 { - return fmt.Errorf("expected attachment device index to be 1, but was %d", *conf.Attachment.DeviceIndex) - } - - if *conf.AvailabilityZone != "us-west-2a" { - return fmt.Errorf("expected availability_zone to be us-west-2a, but was %s", *conf.AvailabilityZone) - } - - if len(conf.Groups) != 1 && *conf.Groups[0].GroupName != "foo" { - return fmt.Errorf("expected security group to be foo, but was %#v", conf.Groups) - } - - if *conf.PrivateIpAddress != "172.16.10.100" { - return fmt.Errorf("expected private ip to be 172.16.10.100, but was %s", *conf.PrivateIpAddress) - } - - return nil - } -} - -func testAccCheckAWSENIDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_network_interface" { - continue - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - describe_network_interfaces_request := &ec2.DescribeNetworkInterfacesInput{ - NetworkInterfaceIds: []*string{aws.String(rs.Primary.ID)}, - } - _, err := conn.DescribeNetworkInterfaces(describe_network_interfaces_request) - - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidNetworkInterfaceID.NotFound" { - return nil - } - - return err - } - } - - return nil -} - -func testAccCheckAWSENIMakeExternalAttachment(n string, conf *ec2.NetworkInterface) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok || rs.Primary.ID == "" { - return fmt.Errorf("Not found: %s", n) - } - attach_request := &ec2.AttachNetworkInterfaceInput{ - DeviceIndex: aws.Int64(2), - InstanceId: aws.String(rs.Primary.ID), - NetworkInterfaceId: conf.NetworkInterfaceId, - } - conn := testAccProvider.Meta().(*AWSClient).ec2conn - _, attach_err := conn.AttachNetworkInterface(attach_request) - if attach_err != nil { - return fmt.Errorf("Error attaching ENI: %s", attach_err) - } - return nil - } -} - -const testAccAWSENIConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "172.16.0.0/16" - tags { - Name = "testAccAWSENIConfig" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "172.16.10.0/24" - availability_zone = "us-west-2a" -} - -resource "aws_security_group" "foo" { - vpc_id = "${aws_vpc.foo.id}" - description = "foo" - name = "foo" - - egress { - from_port = 0 - to_port = 0 - protocol = "tcp" - cidr_blocks = ["10.0.0.0/16"] - } -} - -resource "aws_network_interface" "bar" { - subnet_id = "${aws_subnet.foo.id}" - private_ips = ["172.16.10.100"] - security_groups = ["${aws_security_group.foo.id}"] - description = "Managed by Terraform" - tags { - Name = "bar_interface" - } -} -` - -const testAccAWSENIConfigUpdatedDescription = ` -resource "aws_vpc" "foo" { - cidr_block = "172.16.0.0/16" - tags { - Name = "testAccAWSENIConfigUpdatedDescription" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "172.16.10.0/24" - availability_zone = "us-west-2a" -} - -resource "aws_security_group" "foo" { - vpc_id = "${aws_vpc.foo.id}" - description = "foo" - name = "foo" - - egress { - from_port = 0 - to_port = 0 - protocol = "tcp" - cidr_blocks = ["10.0.0.0/16"] - } -} - -resource "aws_network_interface" "bar" { - subnet_id = "${aws_subnet.foo.id}" - private_ips = ["172.16.10.100"] - security_groups = ["${aws_security_group.foo.id}"] - description = "Updated ENI Description" - tags { - Name = "bar_interface" - } -} -` - -const testAccAWSENIConfigWithSourceDestCheck = ` -resource "aws_vpc" "foo" { - cidr_block = "172.16.0.0/16" - tags { - Name = "testAccAWSENIConfigWithSourceDestCheck" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "172.16.10.0/24" - availability_zone = "us-west-2a" -} - -resource "aws_network_interface" "bar" { - subnet_id = "${aws_subnet.foo.id}" - source_dest_check = false - private_ips = ["172.16.10.100"] -} -` - -const testAccAWSENIConfigWithNoPrivateIPs = ` -resource "aws_vpc" "foo" { - cidr_block = "172.16.0.0/16" - tags { - Name = "testAccAWSENIConfigWithNoPrivateIPs" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "172.16.10.0/24" - availability_zone = "us-west-2a" -} - -resource "aws_network_interface" "bar" { - subnet_id = "${aws_subnet.foo.id}" - source_dest_check = false -} -` - -const testAccAWSENIConfigWithAttachment = ` -resource "aws_vpc" "foo" { - cidr_block = "172.16.0.0/16" - tags { - Name = "tf-eni-test" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "172.16.10.0/24" - availability_zone = "us-west-2a" - tags { - Name = "tf-foo-eni-test" - } -} - -resource "aws_subnet" "bar" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "172.16.11.0/24" - availability_zone = "us-west-2a" - tags { - Name = "tf-bar-eni-test" - } -} - -resource "aws_security_group" "foo" { - vpc_id = "${aws_vpc.foo.id}" - description = "foo" - name = "foo" -} - -resource "aws_instance" "foo" { - ami = "ami-c5eabbf5" - instance_type = "t2.micro" - subnet_id = "${aws_subnet.bar.id}" - associate_public_ip_address = false - private_ip = "172.16.11.50" - tags { - Name = "foo-tf-eni-test" - } -} - -resource "aws_network_interface" "bar" { - subnet_id = "${aws_subnet.foo.id}" - private_ips = ["172.16.10.100"] - security_groups = ["${aws_security_group.foo.id}"] - attachment { - instance = "${aws_instance.foo.id}" - device_index = 1 - } - tags { - Name = "bar_interface" - } -} -` - -const testAccAWSENIConfigExternalAttachment = ` -resource "aws_vpc" "foo" { - cidr_block = "172.16.0.0/16" - tags { - Name = "tf-eni-test" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "172.16.10.0/24" - availability_zone = "us-west-2a" - tags { - Name = "tf-eni-test" - } -} - -resource "aws_subnet" "bar" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "172.16.11.0/24" - availability_zone = "us-west-2a" - tags { - Name = "tf-eni-test" - } -} - -resource "aws_security_group" "foo" { - vpc_id = "${aws_vpc.foo.id}" - description = "foo" - name = "foo" -} - -resource "aws_instance" "foo" { - ami = "ami-c5eabbf5" - instance_type = "t2.micro" - subnet_id = "${aws_subnet.bar.id}" - associate_public_ip_address = false - private_ip = "172.16.11.50" - tags { - Name = "tf-eni-test" - } -} - -resource "aws_network_interface" "bar" { - subnet_id = "${aws_subnet.foo.id}" - private_ips = ["172.16.10.100"] - security_groups = ["${aws_security_group.foo.id}"] - tags { - Name = "bar_interface" - } -} -` diff --git a/builtin/providers/aws/resource_aws_opsworks_application.go b/builtin/providers/aws/resource_aws_opsworks_application.go deleted file mode 100644 index 7333018e5..000000000 --- a/builtin/providers/aws/resource_aws_opsworks_application.go +++ /dev/null @@ -1,633 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/opsworks" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsOpsworksApplication() *schema.Resource { - return &schema.Resource{ - - Create: resourceAwsOpsworksApplicationCreate, - Read: resourceAwsOpsworksApplicationRead, - Update: resourceAwsOpsworksApplicationUpdate, - Delete: resourceAwsOpsworksApplicationDelete, - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - }, - "short_name": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - // aws-flow-ruby | java | rails | php | nodejs | static | other - "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - expected := [7]string{"aws-flow-ruby", "java", "rails", "php", "nodejs", "static", "other"} - - found := false - for _, b := range expected { - if b == value { - found = true - } - } - if !found { - errors = append(errors, fmt.Errorf( - "%q has to be one of [aws-flow-ruby, java, rails, php, nodejs, static, other]", k)) - } - return - }, - }, - "stack_id": { - Type: schema.TypeString, - Required: true, - }, - // TODO: the following 4 vals are really part of the Attributes array. We should validate that only ones relevant to the chosen type are set, perhaps. (what is the default type? how do they map?) - "document_root": { - Type: schema.TypeString, - Optional: true, - //Default: "public", - }, - "rails_env": { - Type: schema.TypeString, - Optional: true, - //Default: "production", - }, - "auto_bundle_on_deploy": { - Type: schema.TypeString, - Optional: true, - //Default: true, - }, - "aws_flow_ruby_settings": { - Type: schema.TypeString, - Optional: true, - }, - "app_source": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - }, - - "url": { - Type: schema.TypeString, - Optional: true, - }, - - "username": { - Type: schema.TypeString, - Optional: true, - }, - - "password": { - Type: schema.TypeString, - Optional: true, - Sensitive: true, - }, - - "revision": { - Type: schema.TypeString, - Optional: true, - }, - - "ssh_key": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - // AutoSelectOpsworksMysqlInstance, OpsworksMysqlInstance, or RdsDbInstance. - // anything beside auto select will lead into failure in case the instance doesn't exist - // XXX: validation? - "data_source_type": { - Type: schema.TypeString, - Optional: true, - }, - "data_source_database_name": { - Type: schema.TypeString, - Optional: true, - }, - "data_source_arn": { - Type: schema.TypeString, - Optional: true, - }, - "description": { - Type: schema.TypeString, - Optional: true, - }, - "domains": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "environment": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - }, - "value": { - Type: schema.TypeString, - Required: true, - }, - "secure": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - }, - }, - }, - "enable_ssl": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "ssl_configuration": { - Type: schema.TypeList, - Optional: true, - //Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "certificate": { - Type: schema.TypeString, - Required: true, - StateFunc: func(v interface{}) string { - switch v.(type) { - case string: - return strings.TrimSpace(v.(string)) - default: - return "" - } - }, - }, - "private_key": { - Type: schema.TypeString, - Required: true, - Sensitive: true, - StateFunc: func(v interface{}) string { - switch v.(type) { - case string: - return strings.TrimSpace(v.(string)) - default: - return "" - } - }, - }, - "chain": { - Type: schema.TypeString, - Optional: true, - StateFunc: func(v interface{}) string { - switch v.(type) { - case string: - return strings.TrimSpace(v.(string)) - default: - return "" - } - }, - }, - }, - }, - }, - }, - } -} - -func resourceAwsOpsworksApplicationValidate(d *schema.ResourceData) error { - appSourceCount := d.Get("app_source.#").(int) - if appSourceCount > 1 { - return fmt.Errorf("Only one app_source is permitted.") - } - - sslCount := d.Get("ssl_configuration.#").(int) - if sslCount > 1 { - return fmt.Errorf("Only one ssl_configuration is permitted.") - } - - if d.Get("type") == opsworks.AppTypeNodejs || d.Get("type") == opsworks.AppTypeJava { - // allowed attributes: none - if d.Get("document_root").(string) != "" || d.Get("rails_env").(string) != "" || d.Get("auto_bundle_on_deploy").(string) != "" || d.Get("aws_flow_ruby_settings").(string) != "" { - return fmt.Errorf("No additional attributes are allowed for app type '%s'.", d.Get("type").(string)) - } - } else if d.Get("type") == opsworks.AppTypeRails { - // allowed attributes: document_root, rails_env, auto_bundle_on_deploy - if d.Get("aws_flow_ruby_settings").(string) != "" { - return fmt.Errorf("Only 'document_root, rails_env, auto_bundle_on_deploy' are allowed for app type '%s'.", opsworks.AppTypeRails) - } - // rails_env is required - if _, ok := d.GetOk("rails_env"); !ok { - return fmt.Errorf("Set rails_env must be set if type is set to rails.") - } - } else if d.Get("type") == opsworks.AppTypePhp || d.Get("type") == opsworks.AppTypeStatic || d.Get("type") == opsworks.AppTypeOther { - log.Printf("[DEBUG] the app type is : %s", d.Get("type").(string)) - log.Printf("[DEBUG] the attributes are: document_root '%s', rails_env '%s', auto_bundle_on_deploy '%s', aws_flow_ruby_settings '%s'", d.Get("document_root").(string), d.Get("rails_env").(string), d.Get("auto_bundle_on_deploy").(string), d.Get("aws_flow_ruby_settings").(string)) - // allowed attributes: document_root - if d.Get("rails_env").(string) != "" || d.Get("auto_bundle_on_deploy").(string) != "" || d.Get("aws_flow_ruby_settings").(string) != "" { - return fmt.Errorf("Only 'document_root' is allowed for app type '%s'.", d.Get("type").(string)) - } - } else if d.Get("type") == opsworks.AppTypeAwsFlowRuby { - // allowed attributes: aws_flow_ruby_settings - if d.Get("document_root").(string) != "" || d.Get("rails_env").(string) != "" || d.Get("auto_bundle_on_deploy").(string) != "" { - return fmt.Errorf("Only 'aws_flow_ruby_settings' is allowed for app type '%s'.", d.Get("type").(string)) - } - } - - return nil -} - -func resourceAwsOpsworksApplicationRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).opsworksconn - - req := &opsworks.DescribeAppsInput{ - AppIds: []*string{ - aws.String(d.Id()), - }, - } - - log.Printf("[DEBUG] Reading OpsWorks app: %s", d.Id()) - - resp, err := client.DescribeApps(req) - if err != nil { - if awserr, ok := err.(awserr.Error); ok { - if awserr.Code() == "ResourceNotFoundException" { - log.Printf("[INFO] App not found: %s", d.Id()) - d.SetId("") - return nil - } - } - return err - } - - app := resp.Apps[0] - - d.Set("name", app.Name) - d.Set("stack_id", app.StackId) - d.Set("type", app.Type) - d.Set("description", app.Description) - d.Set("domains", flattenStringList(app.Domains)) - d.Set("enable_ssl", app.EnableSsl) - resourceAwsOpsworksSetApplicationSsl(d, app.SslConfiguration) - resourceAwsOpsworksSetApplicationSource(d, app.AppSource) - resourceAwsOpsworksSetApplicationDataSources(d, app.DataSources) - resourceAwsOpsworksSetApplicationEnvironmentVariable(d, app.Environment) - resourceAwsOpsworksSetApplicationAttributes(d, app.Attributes) - return nil -} - -func resourceAwsOpsworksApplicationCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).opsworksconn - - err := resourceAwsOpsworksApplicationValidate(d) - if err != nil { - return err - } - - req := &opsworks.CreateAppInput{ - Name: aws.String(d.Get("name").(string)), - Shortname: aws.String(d.Get("short_name").(string)), - StackId: aws.String(d.Get("stack_id").(string)), - Type: aws.String(d.Get("type").(string)), - Description: aws.String(d.Get("description").(string)), - Domains: expandStringList(d.Get("domains").([]interface{})), - EnableSsl: aws.Bool(d.Get("enable_ssl").(bool)), - SslConfiguration: resourceAwsOpsworksApplicationSsl(d), - AppSource: resourceAwsOpsworksApplicationSource(d), - DataSources: resourceAwsOpsworksApplicationDataSources(d), - Environment: resourceAwsOpsworksApplicationEnvironmentVariable(d), - Attributes: resourceAwsOpsworksApplicationAttributes(d), - } - - var resp *opsworks.CreateAppOutput - err = resource.Retry(2*time.Minute, func() *resource.RetryError { - var cerr error - resp, cerr = client.CreateApp(req) - if cerr != nil { - log.Printf("[INFO] client error") - if opserr, ok := cerr.(awserr.Error); ok { - // XXX: handle errors - log.Printf("[ERROR] OpsWorks error: %s message: %s", opserr.Code(), opserr.Message()) - return resource.RetryableError(cerr) - } - return resource.NonRetryableError(cerr) - } - return nil - }) - - if err != nil { - return err - } - - appID := *resp.AppId - d.SetId(appID) - d.Set("id", appID) - - return resourceAwsOpsworksApplicationRead(d, meta) -} - -func resourceAwsOpsworksApplicationUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).opsworksconn - - err := resourceAwsOpsworksApplicationValidate(d) - if err != nil { - return err - } - - req := &opsworks.UpdateAppInput{ - AppId: aws.String(d.Id()), - Name: aws.String(d.Get("name").(string)), - Type: aws.String(d.Get("type").(string)), - Description: aws.String(d.Get("description").(string)), - Domains: expandStringList(d.Get("domains").([]interface{})), - EnableSsl: aws.Bool(d.Get("enable_ssl").(bool)), - SslConfiguration: resourceAwsOpsworksApplicationSsl(d), - AppSource: resourceAwsOpsworksApplicationSource(d), - DataSources: resourceAwsOpsworksApplicationDataSources(d), - Environment: resourceAwsOpsworksApplicationEnvironmentVariable(d), - Attributes: resourceAwsOpsworksApplicationAttributes(d), - } - - log.Printf("[DEBUG] Updating OpsWorks layer: %s", d.Id()) - - err = resource.Retry(2*time.Minute, func() *resource.RetryError { - _, cerr := client.UpdateApp(req) - if cerr != nil { - log.Printf("[INFO] client error") - if opserr, ok := cerr.(awserr.Error); ok { - // XXX: handle errors - log.Printf("[ERROR] OpsWorks error: %s message: %s", opserr.Code(), opserr.Message()) - return resource.NonRetryableError(cerr) - } - return resource.RetryableError(cerr) - } - return nil - }) - - if err != nil { - return err - } - return resourceAwsOpsworksApplicationRead(d, meta) -} - -func resourceAwsOpsworksApplicationDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).opsworksconn - - req := &opsworks.DeleteAppInput{ - AppId: aws.String(d.Id()), - } - - log.Printf("[DEBUG] Deleting OpsWorks application: %s", d.Id()) - - _, err := client.DeleteApp(req) - return err -} - -func resourceAwsOpsworksSetApplicationEnvironmentVariable(d *schema.ResourceData, v []*opsworks.EnvironmentVariable) { - log.Printf("[DEBUG] envs: %s %d", v, len(v)) - if len(v) == 0 { - d.Set("environment", nil) - return - } - newValue := make([]*map[string]interface{}, len(v)) - - for i := 0; i < len(v); i++ { - config := v[i] - data := make(map[string]interface{}) - newValue[i] = &data - - if config.Key != nil { - data["key"] = *config.Key - } - if config.Value != nil { - data["value"] = *config.Value - } - if config.Secure != nil { - - if bool(*config.Secure) { - data["secure"] = &opsworksTrueString - } else { - data["secure"] = &opsworksFalseString - } - } - log.Printf("[DEBUG] v: %s", data) - } - - d.Set("environment", newValue) -} - -func resourceAwsOpsworksApplicationEnvironmentVariable(d *schema.ResourceData) []*opsworks.EnvironmentVariable { - environmentVariables := d.Get("environment").(*schema.Set).List() - result := make([]*opsworks.EnvironmentVariable, len(environmentVariables)) - - for i := 0; i < len(environmentVariables); i++ { - env := environmentVariables[i].(map[string]interface{}) - - result[i] = &opsworks.EnvironmentVariable{ - Key: aws.String(env["key"].(string)), - Value: aws.String(env["value"].(string)), - Secure: aws.Bool(env["secure"].(bool)), - } - } - return result -} - -func resourceAwsOpsworksApplicationSource(d *schema.ResourceData) *opsworks.Source { - count := d.Get("app_source.#").(int) - if count == 0 { - return nil - } - - return &opsworks.Source{ - Type: aws.String(d.Get("app_source.0.type").(string)), - Url: aws.String(d.Get("app_source.0.url").(string)), - Username: aws.String(d.Get("app_source.0.username").(string)), - Password: aws.String(d.Get("app_source.0.password").(string)), - Revision: aws.String(d.Get("app_source.0.revision").(string)), - SshKey: aws.String(d.Get("app_source.0.ssh_key").(string)), - } -} - -func resourceAwsOpsworksSetApplicationSource(d *schema.ResourceData, v *opsworks.Source) { - nv := make([]interface{}, 0, 1) - if v != nil { - m := make(map[string]interface{}) - if v.Type != nil { - m["type"] = *v.Type - } - if v.Url != nil { - m["url"] = *v.Url - } - if v.Username != nil { - m["username"] = *v.Username - } - if v.Password != nil { - m["password"] = *v.Password - } - if v.Revision != nil { - m["revision"] = *v.Revision - } - nv = append(nv, m) - } - - err := d.Set("app_source", nv) - if err != nil { - // should never happen - panic(err) - } -} - -func resourceAwsOpsworksApplicationDataSources(d *schema.ResourceData) []*opsworks.DataSource { - arn := d.Get("data_source_arn").(string) - databaseName := d.Get("data_source_database_name").(string) - databaseType := d.Get("data_source_type").(string) - - result := make([]*opsworks.DataSource, 1) - - if len(arn) > 0 || len(databaseName) > 0 || len(databaseType) > 0 { - result[0] = &opsworks.DataSource{ - Arn: aws.String(arn), - DatabaseName: aws.String(databaseName), - Type: aws.String(databaseType), - } - } - return result -} - -func resourceAwsOpsworksSetApplicationDataSources(d *schema.ResourceData, v []*opsworks.DataSource) { - d.Set("data_source_arn", nil) - d.Set("data_source_database_name", nil) - d.Set("data_source_type", nil) - - if len(v) == 0 { - return - } - - d.Set("data_source_arn", v[0].Arn) - d.Set("data_source_database_name", v[0].DatabaseName) - d.Set("data_source_type", v[0].Type) -} - -func resourceAwsOpsworksApplicationSsl(d *schema.ResourceData) *opsworks.SslConfiguration { - count := d.Get("ssl_configuration.#").(int) - if count == 0 { - return nil - } - - return &opsworks.SslConfiguration{ - PrivateKey: aws.String(d.Get("ssl_configuration.0.private_key").(string)), - Certificate: aws.String(d.Get("ssl_configuration.0.certificate").(string)), - Chain: aws.String(d.Get("ssl_configuration.0.chain").(string)), - } -} - -func resourceAwsOpsworksSetApplicationSsl(d *schema.ResourceData, v *opsworks.SslConfiguration) { - nv := make([]interface{}, 0, 1) - set := false - if v != nil { - m := make(map[string]interface{}) - if v.PrivateKey != nil { - m["private_key"] = *v.PrivateKey - set = true - } - if v.Certificate != nil { - m["certificate"] = *v.Certificate - set = true - } - if v.Chain != nil { - m["chain"] = *v.Chain - set = true - } - if set { - nv = append(nv, m) - } - } - - err := d.Set("ssl_configuration", nv) - if err != nil { - // should never happen - panic(err) - } -} - -func resourceAwsOpsworksApplicationAttributes(d *schema.ResourceData) map[string]*string { - attributes := make(map[string]*string) - - if val := d.Get("document_root").(string); len(val) > 0 { - attributes[opsworks.AppAttributesKeysDocumentRoot] = aws.String(val) - } - if val := d.Get("aws_flow_ruby_settings").(string); len(val) > 0 { - attributes[opsworks.AppAttributesKeysAwsFlowRubySettings] = aws.String(val) - } - if val := d.Get("rails_env").(string); len(val) > 0 { - attributes[opsworks.AppAttributesKeysRailsEnv] = aws.String(val) - } - if val := d.Get("auto_bundle_on_deploy").(string); len(val) > 0 { - if val == "1" { - val = "true" - } else if val == "0" { - val = "false" - } - attributes[opsworks.AppAttributesKeysAutoBundleOnDeploy] = aws.String(val) - } - - return attributes -} - -func resourceAwsOpsworksSetApplicationAttributes(d *schema.ResourceData, v map[string]*string) { - d.Set("document_root", nil) - d.Set("rails_env", nil) - d.Set("aws_flow_ruby_settings", nil) - d.Set("auto_bundle_on_deploy", nil) - - if d.Get("type") == opsworks.AppTypeNodejs || d.Get("type") == opsworks.AppTypeJava { - return - } else if d.Get("type") == opsworks.AppTypeRails { - if val, ok := v[opsworks.AppAttributesKeysDocumentRoot]; ok { - d.Set("document_root", val) - } - if val, ok := v[opsworks.AppAttributesKeysRailsEnv]; ok { - d.Set("rails_env", val) - } - if val, ok := v[opsworks.AppAttributesKeysAutoBundleOnDeploy]; ok { - d.Set("auto_bundle_on_deploy", val) - } - return - } else if d.Get("type") == opsworks.AppTypePhp || d.Get("type") == opsworks.AppTypeStatic || d.Get("type") == opsworks.AppTypeOther { - if val, ok := v[opsworks.AppAttributesKeysDocumentRoot]; ok { - d.Set("document_root", val) - } - return - } else if d.Get("type") == opsworks.AppTypeAwsFlowRuby { - if val, ok := v[opsworks.AppAttributesKeysAwsFlowRubySettings]; ok { - d.Set("aws_flow_ruby_settings", val) - } - return - } - - return -} diff --git a/builtin/providers/aws/resource_aws_opsworks_application_test.go b/builtin/providers/aws/resource_aws_opsworks_application_test.go deleted file mode 100644 index 37d2df0b3..000000000 --- a/builtin/providers/aws/resource_aws_opsworks_application_test.go +++ /dev/null @@ -1,386 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/opsworks" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSOpsworksApplication(t *testing.T) { - var opsapp opsworks.App - - rInt := acctest.RandInt() - name := fmt.Sprintf("tf-ops-acc-application-%d", rInt) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsOpsworksApplicationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsOpsworksApplicationCreate(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksApplicationExists( - "aws_opsworks_application.tf-acc-app", &opsapp), - testAccCheckAWSOpsworksCreateAppAttributes(&opsapp), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "name", name, - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "type", "other", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "enable_ssl", "false", - ), - resource.TestCheckNoResourceAttr( - "aws_opsworks_application.tf-acc-app", "ssl_configuration", - ), - resource.TestCheckNoResourceAttr( - "aws_opsworks_application.tf-acc-app", "domains", - ), - resource.TestCheckNoResourceAttr( - "aws_opsworks_application.tf-acc-app", "app_source", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "environment.3077298702.key", "key1", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "environment.3077298702.value", "value1", - ), - resource.TestCheckNoResourceAttr( - "aws_opsworks_application.tf-acc-app", "environment.3077298702.secret", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "document_root", "foo", - ), - ), - }, - { - Config: testAccAwsOpsworksApplicationUpdate(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksApplicationExists( - "aws_opsworks_application.tf-acc-app", &opsapp), - testAccCheckAWSOpsworksUpdateAppAttributes(&opsapp), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "name", name, - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "type", "rails", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "enable_ssl", "true", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "ssl_configuration.0.certificate", "-----BEGIN CERTIFICATE-----\nMIIBkDCB+gIJALoScFD0sJq3MA0GCSqGSIb3DQEBBQUAMA0xCzAJBgNVBAYTAkRF\nMB4XDTE1MTIxOTIwMzU1MVoXDTE2MDExODIwMzU1MVowDTELMAkGA1UEBhMCREUw\ngZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAKKQKbTTH/Julz16xY7ArYlzJYCP\nedTCx1bopuryCx/+d1gC94MtRdlPSpQl8mfc9iBdtXbJppp73Qh/DzLzO9Ns25xZ\n+kUQMhbIyLsaCBzuEGLgAaVdGpNvRBw++UoYtd0U7QczFAreTGLH8n8+FIzuI5Mc\n+MJ1TKbbt5gFfRSzAgMBAAEwDQYJKoZIhvcNAQEFBQADgYEALARo96wCDmaHKCaX\nS0IGLGnZCfiIUfCmBxOXBSJxDBwter95QHR0dMGxYIujee5n4vvavpVsqZnfMC3I\nOZWPlwiUJbNIpK+04Bg2vd5m/NMMrvi75RfmyeMtSfq/NrIX2Q3+nyWI7DLq7yZI\nV/YEvOqdAiy5NEWBztHx8HvB9G4=\n-----END CERTIFICATE-----", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "ssl_configuration.0.private_key", "-----BEGIN RSA PRIVATE KEY-----\nMIICXQIBAAKBgQCikCm00x/ybpc9esWOwK2JcyWAj3nUwsdW6Kbq8gsf/ndYAveD\nLUXZT0qUJfJn3PYgXbV2yaaae90Ifw8y8zvTbNucWfpFEDIWyMi7Gggc7hBi4AGl\nXRqTb0QcPvlKGLXdFO0HMxQK3kxix/J/PhSM7iOTHPjCdUym27eYBX0UswIDAQAB\nAoGBAIYcrvuqDboguI8U4TUjCkfSAgds1pLLWk79wu8jXkA329d1IyNKT0y3WIye\nPbyoEzmidZmZROQ/+ZsPz8c12Y0DrX73WSVzKNyJeP7XMk9HSzA1D9RX0U0S+5Kh\nFAMc2NEVVFIfQtVtoVmHdKDpnRYtOCHLW9rRpvqOOjd4mYk5AkEAzeiFr1mtlnsa\n67shMxzDaOTAFMchRz6G7aSovvCztxcB63ulFI/w9OTUMdTQ7ff7pet+lVihLc2W\nefIL0HvsjQJBAMocNTKaR/TnsV5GSk2kPAdR+zFP5sQy8sfMy0lEXTylc7zN4ajX\nMeHVoxp+GZgpfDcZ3ya808H1umyXh+xA1j8CQE9x9ZKQYT98RAjL7KVR5btk9w+N\nPTPF1j1+mHUDXfO4ds8qp6jlWKzEVXLcj7ghRADiebaZuaZ4eiSW1SQdjEkCQQC4\nwDhQ3X9RfEpCp3ZcqvjEqEg6t5N3XitYQPjDLN8eBRBbUsgpEy3iBuxl10eGNMX7\niIbYXlwkPYAArDPv3wT5AkAwp4vym+YKmDqh6gseKfRDuJqRiW9yD5A8VGr/w88k\n5rkuduVGP7tK3uIp00Its3aEyKF8mLGWYszVGeeLxAMH\n-----END RSA PRIVATE KEY-----", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "domains.0", "example.com", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "domains.1", "sub.example.com", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "app_source.0.password", "", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "app_source.0.revision", "master", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "app_source.0.ssh_key", "", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "app_source.0.type", "git", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "app_source.0.url", "https://github.com/aws/example.git", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "app_source.0.username", "", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "environment.2107898637.key", "key2", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "environment.2107898637.value", "value2", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "environment.2107898637.secure", "true", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "environment.3077298702.key", "key1", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "environment.3077298702.value", "value1", - ), - resource.TestCheckNoResourceAttr( - "aws_opsworks_application.tf-acc-app", "environment.3077298702.secret", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "document_root", "root", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "auto_bundle_on_deploy", "true", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_application.tf-acc-app", "rails_env", "staging", - ), - ), - }, - }, - }) -} - -func testAccCheckAWSOpsworksApplicationExists( - n string, opsapp *opsworks.App) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).opsworksconn - - params := &opsworks.DescribeAppsInput{ - AppIds: []*string{&rs.Primary.ID}, - } - resp, err := conn.DescribeApps(params) - - if err != nil { - return err - } - - if v := len(resp.Apps); v != 1 { - return fmt.Errorf("Expected 1 response returned, got %d", v) - } - - *opsapp = *resp.Apps[0] - - return nil - } -} - -func testAccCheckAWSOpsworksCreateAppAttributes( - opsapp *opsworks.App) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *opsapp.EnableSsl { - return fmt.Errorf("Unexpected enable ssl: %t", *opsapp.EnableSsl) - } - - if *opsapp.Attributes["DocumentRoot"] != "foo" { - return fmt.Errorf("Unnexpected document root: %s", *opsapp.Attributes["DocumentRoot"]) - } - - if *opsapp.Type != "other" { - return fmt.Errorf("Unnexpected type: %s", *opsapp.Type) - } - - if *opsapp.AppSource.Type != "other" { - return fmt.Errorf("Unnexpected appsource type: %s", *opsapp.AppSource.Type) - } - - expectedEnv := []*opsworks.EnvironmentVariable{ - { - Key: aws.String("key1"), - Value: aws.String("value1"), - Secure: aws.Bool(false), - }, - } - - if !reflect.DeepEqual(expectedEnv, opsapp.Environment) { - return fmt.Errorf("Unnexpected environment: %s", opsapp.Environment) - } - - if v := len(opsapp.Domains); v != 0 { - return fmt.Errorf("Expected 0 domains returned, got %d", v) - } - - return nil - } -} - -func testAccCheckAWSOpsworksUpdateAppAttributes( - opsapp *opsworks.App) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *opsapp.Type != "rails" { - return fmt.Errorf("Unnexpected type: %s", *opsapp.Type) - } - - if !*opsapp.EnableSsl { - return fmt.Errorf("Unexpected enable ssl: %t", *opsapp.EnableSsl) - } - - if *opsapp.SslConfiguration.Certificate != "-----BEGIN CERTIFICATE-----\nMIIBkDCB+gIJALoScFD0sJq3MA0GCSqGSIb3DQEBBQUAMA0xCzAJBgNVBAYTAkRF\nMB4XDTE1MTIxOTIwMzU1MVoXDTE2MDExODIwMzU1MVowDTELMAkGA1UEBhMCREUw\ngZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAKKQKbTTH/Julz16xY7ArYlzJYCP\nedTCx1bopuryCx/+d1gC94MtRdlPSpQl8mfc9iBdtXbJppp73Qh/DzLzO9Ns25xZ\n+kUQMhbIyLsaCBzuEGLgAaVdGpNvRBw++UoYtd0U7QczFAreTGLH8n8+FIzuI5Mc\n+MJ1TKbbt5gFfRSzAgMBAAEwDQYJKoZIhvcNAQEFBQADgYEALARo96wCDmaHKCaX\nS0IGLGnZCfiIUfCmBxOXBSJxDBwter95QHR0dMGxYIujee5n4vvavpVsqZnfMC3I\nOZWPlwiUJbNIpK+04Bg2vd5m/NMMrvi75RfmyeMtSfq/NrIX2Q3+nyWI7DLq7yZI\nV/YEvOqdAiy5NEWBztHx8HvB9G4=\n-----END CERTIFICATE-----" { - return fmt.Errorf("Unexpected ssl configuration certificate: %s", *opsapp.SslConfiguration.Certificate) - } - - if *opsapp.SslConfiguration.PrivateKey != "-----BEGIN RSA PRIVATE KEY-----\nMIICXQIBAAKBgQCikCm00x/ybpc9esWOwK2JcyWAj3nUwsdW6Kbq8gsf/ndYAveD\nLUXZT0qUJfJn3PYgXbV2yaaae90Ifw8y8zvTbNucWfpFEDIWyMi7Gggc7hBi4AGl\nXRqTb0QcPvlKGLXdFO0HMxQK3kxix/J/PhSM7iOTHPjCdUym27eYBX0UswIDAQAB\nAoGBAIYcrvuqDboguI8U4TUjCkfSAgds1pLLWk79wu8jXkA329d1IyNKT0y3WIye\nPbyoEzmidZmZROQ/+ZsPz8c12Y0DrX73WSVzKNyJeP7XMk9HSzA1D9RX0U0S+5Kh\nFAMc2NEVVFIfQtVtoVmHdKDpnRYtOCHLW9rRpvqOOjd4mYk5AkEAzeiFr1mtlnsa\n67shMxzDaOTAFMchRz6G7aSovvCztxcB63ulFI/w9OTUMdTQ7ff7pet+lVihLc2W\nefIL0HvsjQJBAMocNTKaR/TnsV5GSk2kPAdR+zFP5sQy8sfMy0lEXTylc7zN4ajX\nMeHVoxp+GZgpfDcZ3ya808H1umyXh+xA1j8CQE9x9ZKQYT98RAjL7KVR5btk9w+N\nPTPF1j1+mHUDXfO4ds8qp6jlWKzEVXLcj7ghRADiebaZuaZ4eiSW1SQdjEkCQQC4\nwDhQ3X9RfEpCp3ZcqvjEqEg6t5N3XitYQPjDLN8eBRBbUsgpEy3iBuxl10eGNMX7\niIbYXlwkPYAArDPv3wT5AkAwp4vym+YKmDqh6gseKfRDuJqRiW9yD5A8VGr/w88k\n5rkuduVGP7tK3uIp00Its3aEyKF8mLGWYszVGeeLxAMH\n-----END RSA PRIVATE KEY-----" { - return fmt.Errorf("Unexpected ssl configuration private key: %s", *opsapp.SslConfiguration.PrivateKey) - } - - expectedAttrs := map[string]*string{ - "DocumentRoot": aws.String("root"), - "RailsEnv": aws.String("staging"), - "AutoBundleOnDeploy": aws.String("true"), - "AwsFlowRubySettings": nil, - } - - if !reflect.DeepEqual(expectedAttrs, opsapp.Attributes) { - return fmt.Errorf("Unnexpected Attributes: %v", aws.StringValueMap(opsapp.Attributes)) - } - - expectedAppSource := &opsworks.Source{ - Type: aws.String("git"), - Revision: aws.String("master"), - Url: aws.String("https://github.com/aws/example.git"), - } - - if !reflect.DeepEqual(expectedAppSource, opsapp.AppSource) { - return fmt.Errorf("Unnexpected appsource: %s", opsapp.AppSource) - } - - expectedEnv := []*opsworks.EnvironmentVariable{ - { - Key: aws.String("key2"), - Value: aws.String("*****FILTERED*****"), - Secure: aws.Bool(true), - }, - { - Key: aws.String("key1"), - Value: aws.String("value1"), - Secure: aws.Bool(false), - }, - } - - if !reflect.DeepEqual(expectedEnv, opsapp.Environment) { - return fmt.Errorf("Unnexpected environment: %s", opsapp.Environment) - } - - expectedDomains := []*string{ - aws.String("example.com"), - aws.String("sub.example.com"), - } - - if !reflect.DeepEqual(expectedDomains, opsapp.Domains) { - return fmt.Errorf("Unnexpected Daomins : %v", aws.StringValueSlice(opsapp.Domains)) - } - - return nil - } -} - -func testAccCheckAwsOpsworksApplicationDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*AWSClient).opsworksconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_opsworks_application" { - continue - } - - req := &opsworks.DescribeAppsInput{ - AppIds: []*string{ - aws.String(rs.Primary.ID), - }, - } - - resp, err := client.DescribeApps(req) - if err == nil { - if len(resp.Apps) > 0 { - return fmt.Errorf("OpsWorks App still exist.") - } - } - - if awserr, ok := err.(awserr.Error); ok { - if awserr.Code() != "ResourceNotFoundException" { - return err - } - } - } - - return nil -} - -func testAccAwsOpsworksApplicationCreate(name string) string { - return testAccAwsOpsworksStackConfigVpcCreate(name) + - fmt.Sprintf(` -resource "aws_opsworks_application" "tf-acc-app" { - stack_id = "${aws_opsworks_stack.tf-acc.id}" - name = "%s" - type = "other" - enable_ssl = false - app_source ={ - type = "other" - } - environment = { key = "key1" value = "value1" secure = false} - document_root = "foo" -} -`, name) -} - -func testAccAwsOpsworksApplicationUpdate(name string) string { - return testAccAwsOpsworksStackConfigVpcCreate(name) + - fmt.Sprintf(` -resource "aws_opsworks_application" "tf-acc-app" { - stack_id = "${aws_opsworks_stack.tf-acc.id}" - name = "%s" - type = "rails" - domains = ["example.com", "sub.example.com"] - enable_ssl = true - ssl_configuration = { - private_key = < 0 { - ebs.Iops = aws.Int64(int64(v)) - } - - blockDevices = append(blockDevices, &opsworks.BlockDeviceMapping{ - DeviceName: aws.String(bd["device_name"].(string)), - Ebs: ebs, - }) - } - } - - if v, ok := d.GetOk("ephemeral_block_device"); ok { - vL := v.(*schema.Set).List() - for _, v := range vL { - bd := v.(map[string]interface{}) - blockDevices = append(blockDevices, &opsworks.BlockDeviceMapping{ - DeviceName: aws.String(bd["device_name"].(string)), - VirtualName: aws.String(bd["virtual_name"].(string)), - }) - } - } - - if v, ok := d.GetOk("root_block_device"); ok { - vL := v.(*schema.Set).List() - if len(vL) > 1 { - return fmt.Errorf("Cannot specify more than one root_block_device.") - } - for _, v := range vL { - bd := v.(map[string]interface{}) - ebs := &opsworks.EbsBlockDevice{ - DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)), - } - - if v, ok := bd["volume_size"].(int); ok && v != 0 { - ebs.VolumeSize = aws.Int64(int64(v)) - } - - if v, ok := bd["volume_type"].(string); ok && v != "" { - ebs.VolumeType = aws.String(v) - } - - if v, ok := bd["iops"].(int); ok && v > 0 { - ebs.Iops = aws.Int64(int64(v)) - } - - blockDevices = append(blockDevices, &opsworks.BlockDeviceMapping{ - DeviceName: aws.String("ROOT_DEVICE"), - Ebs: ebs, - }) - } - } - - if len(blockDevices) > 0 { - req.BlockDeviceMappings = blockDevices - } - - log.Printf("[DEBUG] Creating OpsWorks instance") - - var resp *opsworks.CreateInstanceOutput - - resp, err = client.CreateInstance(req) - if err != nil { - return err - } - - if resp.InstanceId == nil { - return fmt.Errorf("Error launching instance: no instance returned in response") - } - - instanceId := *resp.InstanceId - d.SetId(instanceId) - d.Set("id", instanceId) - - if v, ok := d.GetOk("state"); ok && v.(string) == "running" { - err := startOpsworksInstance(d, meta, true) - if err != nil { - return err - } - } - - return resourceAwsOpsworksInstanceRead(d, meta) -} - -func resourceAwsOpsworksInstanceUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).opsworksconn - - err := resourceAwsOpsworksInstanceValidate(d) - if err != nil { - return err - } - - req := &opsworks.UpdateInstanceInput{ - AgentVersion: aws.String(d.Get("agent_version").(string)), - Architecture: aws.String(d.Get("architecture").(string)), - InstanceId: aws.String(d.Get("id").(string)), - InstallUpdatesOnBoot: aws.Bool(d.Get("install_updates_on_boot").(bool)), - } - - if v, ok := d.GetOk("ami_id"); ok { - req.AmiId = aws.String(v.(string)) - req.Os = aws.String("Custom") - } - - if v, ok := d.GetOk("auto_scaling_type"); ok { - req.AutoScalingType = aws.String(v.(string)) - } - - if v, ok := d.GetOk("hostname"); ok { - req.Hostname = aws.String(v.(string)) - } - - if v, ok := d.GetOk("instance_type"); ok { - req.InstanceType = aws.String(v.(string)) - } - - if v, ok := d.GetOk("layer_ids"); ok { - req.LayerIds = expandStringList(v.([]interface{})) - } - - if v, ok := d.GetOk("os"); ok { - req.Os = aws.String(v.(string)) - } - - if v, ok := d.GetOk("ssh_key_name"); ok { - req.SshKeyName = aws.String(v.(string)) - } - - log.Printf("[DEBUG] Updating OpsWorks instance: %s", d.Id()) - - _, err = client.UpdateInstance(req) - if err != nil { - return err - } - - var status string - - if v, ok := d.GetOk("status"); ok { - status = v.(string) - } else { - status = "stopped" - } - - if v, ok := d.GetOk("state"); ok { - state := v.(string) - if state == "running" { - if status == "stopped" || status == "stopping" || status == "shutting_down" { - err := startOpsworksInstance(d, meta, false) - if err != nil { - return err - } - } - } else { - if status != "stopped" && status != "stopping" && status != "shutting_down" { - err := stopOpsworksInstance(d, meta, true) - if err != nil { - return err - } - } - } - } - - return resourceAwsOpsworksInstanceRead(d, meta) -} - -func resourceAwsOpsworksInstanceDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).opsworksconn - - if v, ok := d.GetOk("status"); ok && v.(string) != "stopped" { - err := stopOpsworksInstance(d, meta, true) - if err != nil { - return err - } - } - - req := &opsworks.DeleteInstanceInput{ - InstanceId: aws.String(d.Id()), - DeleteElasticIp: aws.Bool(d.Get("delete_eip").(bool)), - DeleteVolumes: aws.Bool(d.Get("delete_ebs").(bool)), - } - - log.Printf("[DEBUG] Deleting OpsWorks instance: %s", d.Id()) - - _, err := client.DeleteInstance(req) - if err != nil { - return err - } - - d.SetId("") - return nil -} - -func resourceAwsOpsworksInstanceImport( - d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - // Neither delete_eip nor delete_ebs can be fetched - // from any API call, so we need to default to the values - // we set in the schema by default - d.Set("delete_ebs", true) - d.Set("delete_eip", true) - return []*schema.ResourceData{d}, nil -} - -func startOpsworksInstance(d *schema.ResourceData, meta interface{}, wait bool) error { - client := meta.(*AWSClient).opsworksconn - - instanceId := d.Get("id").(string) - - req := &opsworks.StartInstanceInput{ - InstanceId: aws.String(instanceId), - } - - log.Printf("[DEBUG] Starting OpsWorks instance: %s", instanceId) - - _, err := client.StartInstance(req) - - if err != nil { - return err - } - - if wait { - log.Printf("[DEBUG] Waiting for instance (%s) to become running", instanceId) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"requested", "pending", "booting", "running_setup"}, - Target: []string{"online"}, - Refresh: OpsworksInstanceStateRefreshFunc(client, instanceId), - Timeout: 10 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for instance (%s) to become stopped: %s", - instanceId, err) - } - } - - return nil -} - -func stopOpsworksInstance(d *schema.ResourceData, meta interface{}, wait bool) error { - client := meta.(*AWSClient).opsworksconn - - instanceId := d.Get("id").(string) - - req := &opsworks.StopInstanceInput{ - InstanceId: aws.String(instanceId), - } - - log.Printf("[DEBUG] Stopping OpsWorks instance: %s", instanceId) - - _, err := client.StopInstance(req) - - if err != nil { - return err - } - - if wait { - log.Printf("[DEBUG] Waiting for instance (%s) to become stopped", instanceId) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"stopping", "terminating", "shutting_down", "terminated"}, - Target: []string{"stopped"}, - Refresh: OpsworksInstanceStateRefreshFunc(client, instanceId), - Timeout: 10 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for instance (%s) to become stopped: %s", - instanceId, err) - } - } - - return nil -} - -func readOpsworksBlockDevices(d *schema.ResourceData, instance *opsworks.Instance, meta interface{}) ( - map[string]interface{}, error) { - - blockDevices := make(map[string]interface{}) - blockDevices["ebs"] = make([]map[string]interface{}, 0) - blockDevices["ephemeral"] = make([]map[string]interface{}, 0) - blockDevices["root"] = nil - - if len(instance.BlockDeviceMappings) == 0 { - return nil, nil - } - - for _, bdm := range instance.BlockDeviceMappings { - bd := make(map[string]interface{}) - if bdm.Ebs != nil && bdm.Ebs.DeleteOnTermination != nil { - bd["delete_on_termination"] = *bdm.Ebs.DeleteOnTermination - } - if bdm.Ebs != nil && bdm.Ebs.VolumeSize != nil { - bd["volume_size"] = *bdm.Ebs.VolumeSize - } - if bdm.Ebs != nil && bdm.Ebs.VolumeType != nil { - bd["volume_type"] = *bdm.Ebs.VolumeType - } - if bdm.Ebs != nil && bdm.Ebs.Iops != nil { - bd["iops"] = *bdm.Ebs.Iops - } - if bdm.DeviceName != nil && *bdm.DeviceName == "ROOT_DEVICE" { - blockDevices["root"] = bd - } else { - if bdm.DeviceName != nil { - bd["device_name"] = *bdm.DeviceName - } - if bdm.VirtualName != nil { - bd["virtual_name"] = *bdm.VirtualName - blockDevices["ephemeral"] = append(blockDevices["ephemeral"].([]map[string]interface{}), bd) - } else { - if bdm.Ebs != nil && bdm.Ebs.SnapshotId != nil { - bd["snapshot_id"] = *bdm.Ebs.SnapshotId - } - blockDevices["ebs"] = append(blockDevices["ebs"].([]map[string]interface{}), bd) - } - } - } - return blockDevices, nil -} - -func OpsworksInstanceStateRefreshFunc(conn *opsworks.OpsWorks, instanceID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - resp, err := conn.DescribeInstances(&opsworks.DescribeInstancesInput{ - InstanceIds: []*string{aws.String(instanceID)}, - }) - if err != nil { - if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "ResourceNotFoundException" { - // Set this to nil as if we didn't find anything. - resp = nil - } else { - log.Printf("Error on OpsworksInstanceStateRefresh: %s", err) - return nil, "", err - } - } - - if resp == nil || len(resp.Instances) == 0 { - // Sometimes AWS just has consistency issues and doesn't see - // our instance yet. Return an empty state. - return nil, "", nil - } - - i := resp.Instances[0] - return i, *i.Status, nil - } -} diff --git a/builtin/providers/aws/resource_aws_opsworks_instance_test.go b/builtin/providers/aws/resource_aws_opsworks_instance_test.go deleted file mode 100644 index 1a2bbe0f6..000000000 --- a/builtin/providers/aws/resource_aws_opsworks_instance_test.go +++ /dev/null @@ -1,397 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/opsworks" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSOpsworksInstance_importBasic(t *testing.T) { - stackName := fmt.Sprintf("tf-%d", acctest.RandInt()) - resourceName := "aws_opsworks_instance.tf-acc" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsOpsworksInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsOpsworksInstanceConfigCreate(stackName), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"state"}, //state is something we pass to the API and get back as status :( - }, - }, - }) -} - -func TestAccAWSOpsworksInstance(t *testing.T) { - stackName := fmt.Sprintf("tf-%d", acctest.RandInt()) - var opsinst opsworks.Instance - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsOpsworksInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsOpsworksInstanceConfigCreate(stackName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksInstanceExists( - "aws_opsworks_instance.tf-acc", &opsinst), - testAccCheckAWSOpsworksInstanceAttributes(&opsinst), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "hostname", "tf-acc1", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "instance_type", "t2.micro", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "state", "stopped", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "layer_ids.#", "1", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "install_updates_on_boot", "true", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "architecture", "x86_64", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "tenancy", "default", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "os", "Amazon Linux 2016.09", // inherited from opsworks_stack_test - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "root_device_type", "ebs", // inherited from opsworks_stack_test - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "availability_zone", "us-west-2a", // inherited from opsworks_stack_test - ), - ), - }, - { - Config: testAccAwsOpsworksInstanceConfigUpdate(stackName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksInstanceExists( - "aws_opsworks_instance.tf-acc", &opsinst), - testAccCheckAWSOpsworksInstanceAttributes(&opsinst), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "hostname", "tf-acc1", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "instance_type", "t2.small", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "layer_ids.#", "2", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "os", "Amazon Linux 2015.09", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "tenancy", "default", - ), - ), - }, - }, - }) -} - -func TestAccAWSOpsworksInstance_UpdateHostNameForceNew(t *testing.T) { - stackName := fmt.Sprintf("tf-%d", acctest.RandInt()) - - var before, after opsworks.Instance - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsOpsworksInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsOpsworksInstanceConfigCreate(stackName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksInstanceExists("aws_opsworks_instance.tf-acc", &before), - resource.TestCheckResourceAttr("aws_opsworks_instance.tf-acc", "hostname", "tf-acc1"), - ), - }, - { - Config: testAccAwsOpsworksInstanceConfigUpdateHostName(stackName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksInstanceExists("aws_opsworks_instance.tf-acc", &after), - resource.TestCheckResourceAttr("aws_opsworks_instance.tf-acc", "hostname", "tf-acc2"), - testAccCheckAwsOpsworksInstanceRecreated(t, &before, &after), - ), - }, - }, - }) -} - -func testAccCheckAwsOpsworksInstanceRecreated(t *testing.T, - before, after *opsworks.Instance) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *before.InstanceId == *after.InstanceId { - t.Fatalf("Expected change of OpsWorks Instance IDs, but both were %s", *before.InstanceId) - } - return nil - } -} - -func testAccCheckAWSOpsworksInstanceExists( - n string, opsinst *opsworks.Instance) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Opsworks Instance is set") - } - - conn := testAccProvider.Meta().(*AWSClient).opsworksconn - - params := &opsworks.DescribeInstancesInput{ - InstanceIds: []*string{&rs.Primary.ID}, - } - resp, err := conn.DescribeInstances(params) - - if err != nil { - return err - } - - if v := len(resp.Instances); v != 1 { - return fmt.Errorf("Expected 1 request returned, got %d", v) - } - - *opsinst = *resp.Instances[0] - - return nil - } -} - -func testAccCheckAWSOpsworksInstanceAttributes( - opsinst *opsworks.Instance) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Depending on the timing, the state could be requested or stopped - if *opsinst.Status != "stopped" && *opsinst.Status != "requested" { - return fmt.Errorf("Unexpected request status: %s", *opsinst.Status) - } - if *opsinst.AvailabilityZone != "us-west-2a" { - return fmt.Errorf("Unexpected availability zone: %s", *opsinst.AvailabilityZone) - } - if *opsinst.Architecture != "x86_64" { - return fmt.Errorf("Unexpected architecture: %s", *opsinst.Architecture) - } - if *opsinst.Tenancy != "default" { - return fmt.Errorf("Unexpected tenancy: %s", *opsinst.Tenancy) - } - if *opsinst.InfrastructureClass != "ec2" { - return fmt.Errorf("Unexpected infrastructure class: %s", *opsinst.InfrastructureClass) - } - if *opsinst.RootDeviceType != "ebs" { - return fmt.Errorf("Unexpected root device type: %s", *opsinst.RootDeviceType) - } - if *opsinst.VirtualizationType != "hvm" { - return fmt.Errorf("Unexpected virtualization type: %s", *opsinst.VirtualizationType) - } - return nil - } -} - -func testAccCheckAwsOpsworksInstanceDestroy(s *terraform.State) error { - opsworksconn := testAccProvider.Meta().(*AWSClient).opsworksconn - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_opsworks_instance" { - continue - } - req := &opsworks.DescribeInstancesInput{ - InstanceIds: []*string{ - aws.String(rs.Primary.ID), - }, - } - - _, err := opsworksconn.DescribeInstances(req) - if err != nil { - if awserr, ok := err.(awserr.Error); ok { - if awserr.Code() == "ResourceNotFoundException" { - // not found, good to go - return nil - } - } - return err - } - } - - return fmt.Errorf("Fall through error on OpsWorks instance test") -} - -func testAccAwsOpsworksInstanceConfigUpdateHostName(name string) string { - return fmt.Sprintf(` -resource "aws_security_group" "tf-ops-acc-web" { - name = "%s-web" - ingress { - from_port = 80 - to_port = 80 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_security_group" "tf-ops-acc-php" { - name = "%s-php" - ingress { - from_port = 8080 - to_port = 8080 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_opsworks_static_web_layer" "tf-acc" { - stack_id = "${aws_opsworks_stack.tf-acc.id}" - - custom_security_group_ids = [ - "${aws_security_group.tf-ops-acc-web.id}", - ] -} - -resource "aws_opsworks_php_app_layer" "tf-acc" { - stack_id = "${aws_opsworks_stack.tf-acc.id}" - - custom_security_group_ids = [ - "${aws_security_group.tf-ops-acc-php.id}", - ] -} - -resource "aws_opsworks_instance" "tf-acc" { - stack_id = "${aws_opsworks_stack.tf-acc.id}" - layer_ids = [ - "${aws_opsworks_static_web_layer.tf-acc.id}", - ] - instance_type = "t2.micro" - state = "stopped" - hostname = "tf-acc2" -} - -%s - -`, name, name, testAccAwsOpsworksStackConfigVpcCreate(name)) -} - -func testAccAwsOpsworksInstanceConfigCreate(name string) string { - return fmt.Sprintf(` -resource "aws_security_group" "tf-ops-acc-web" { - name = "%s-web" - ingress { - from_port = 80 - to_port = 80 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_security_group" "tf-ops-acc-php" { - name = "%s-php" - ingress { - from_port = 8080 - to_port = 8080 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_opsworks_static_web_layer" "tf-acc" { - stack_id = "${aws_opsworks_stack.tf-acc.id}" - - custom_security_group_ids = [ - "${aws_security_group.tf-ops-acc-web.id}", - ] -} - -resource "aws_opsworks_php_app_layer" "tf-acc" { - stack_id = "${aws_opsworks_stack.tf-acc.id}" - - custom_security_group_ids = [ - "${aws_security_group.tf-ops-acc-php.id}", - ] -} - -resource "aws_opsworks_instance" "tf-acc" { - stack_id = "${aws_opsworks_stack.tf-acc.id}" - layer_ids = [ - "${aws_opsworks_static_web_layer.tf-acc.id}", - ] - instance_type = "t2.micro" - state = "stopped" - hostname = "tf-acc1" -} - -%s - -`, name, name, testAccAwsOpsworksStackConfigVpcCreate(name)) -} - -func testAccAwsOpsworksInstanceConfigUpdate(name string) string { - return fmt.Sprintf(` -resource "aws_security_group" "tf-ops-acc-web" { - name = "%s-web" - ingress { - from_port = 80 - to_port = 80 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_security_group" "tf-ops-acc-php" { - name = "%s-php" - ingress { - from_port = 8080 - to_port = 8080 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_opsworks_static_web_layer" "tf-acc" { - stack_id = "${aws_opsworks_stack.tf-acc.id}" - - custom_security_group_ids = [ - "${aws_security_group.tf-ops-acc-web.id}", - ] -} - -resource "aws_opsworks_php_app_layer" "tf-acc" { - stack_id = "${aws_opsworks_stack.tf-acc.id}" - - custom_security_group_ids = [ - "${aws_security_group.tf-ops-acc-php.id}", - ] -} - -resource "aws_opsworks_instance" "tf-acc" { - stack_id = "${aws_opsworks_stack.tf-acc.id}" - layer_ids = [ - "${aws_opsworks_static_web_layer.tf-acc.id}", - "${aws_opsworks_php_app_layer.tf-acc.id}", - ] - instance_type = "t2.small" - state = "stopped" - hostname = "tf-acc1" - os = "Amazon Linux 2015.09" -} - -%s - -`, name, name, testAccAwsOpsworksStackConfigVpcCreate(name)) -} diff --git a/builtin/providers/aws/resource_aws_opsworks_java_app_layer.go b/builtin/providers/aws/resource_aws_opsworks_java_app_layer.go deleted file mode 100644 index 14679658f..000000000 --- a/builtin/providers/aws/resource_aws_opsworks_java_app_layer.go +++ /dev/null @@ -1,42 +0,0 @@ -package aws - -import ( - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsOpsworksJavaAppLayer() *schema.Resource { - layerType := &opsworksLayerType{ - TypeName: "java-app", - DefaultLayerName: "Java App Server", - - Attributes: map[string]*opsworksLayerTypeAttribute{ - "jvm_type": { - AttrName: "Jvm", - Type: schema.TypeString, - Default: "openjdk", - }, - "jvm_version": { - AttrName: "JvmVersion", - Type: schema.TypeString, - Default: "7", - }, - "jvm_options": { - AttrName: "JvmOptions", - Type: schema.TypeString, - Default: "", - }, - "app_server": { - AttrName: "JavaAppServer", - Type: schema.TypeString, - Default: "tomcat", - }, - "app_server_version": { - AttrName: "JavaAppServerVersion", - Type: schema.TypeString, - Default: "7", - }, - }, - } - - return layerType.SchemaResource() -} diff --git a/builtin/providers/aws/resource_aws_opsworks_memcached_layer.go b/builtin/providers/aws/resource_aws_opsworks_memcached_layer.go deleted file mode 100644 index 301d73924..000000000 --- a/builtin/providers/aws/resource_aws_opsworks_memcached_layer.go +++ /dev/null @@ -1,22 +0,0 @@ -package aws - -import ( - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsOpsworksMemcachedLayer() *schema.Resource { - layerType := &opsworksLayerType{ - TypeName: "memcached", - DefaultLayerName: "Memcached", - - Attributes: map[string]*opsworksLayerTypeAttribute{ - "allocated_memory": { - AttrName: "MemcachedMemory", - Type: schema.TypeInt, - Default: 512, - }, - }, - } - - return layerType.SchemaResource() -} diff --git a/builtin/providers/aws/resource_aws_opsworks_mysql_layer.go b/builtin/providers/aws/resource_aws_opsworks_mysql_layer.go deleted file mode 100644 index 560641a4e..000000000 --- a/builtin/providers/aws/resource_aws_opsworks_mysql_layer.go +++ /dev/null @@ -1,27 +0,0 @@ -package aws - -import ( - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsOpsworksMysqlLayer() *schema.Resource { - layerType := &opsworksLayerType{ - TypeName: "db-master", - DefaultLayerName: "MySQL", - - Attributes: map[string]*opsworksLayerTypeAttribute{ - "root_password": { - AttrName: "MysqlRootPassword", - Type: schema.TypeString, - WriteOnly: true, - }, - "root_password_on_all_instances": { - AttrName: "MysqlRootPasswordUbiquitous", - Type: schema.TypeBool, - Default: true, - }, - }, - } - - return layerType.SchemaResource() -} diff --git a/builtin/providers/aws/resource_aws_opsworks_nodejs_app_layer.go b/builtin/providers/aws/resource_aws_opsworks_nodejs_app_layer.go deleted file mode 100644 index d11261b63..000000000 --- a/builtin/providers/aws/resource_aws_opsworks_nodejs_app_layer.go +++ /dev/null @@ -1,22 +0,0 @@ -package aws - -import ( - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsOpsworksNodejsAppLayer() *schema.Resource { - layerType := &opsworksLayerType{ - TypeName: "nodejs-app", - DefaultLayerName: "Node.js App Server", - - Attributes: map[string]*opsworksLayerTypeAttribute{ - "nodejs_version": { - AttrName: "NodejsVersion", - Type: schema.TypeString, - Default: "0.10.38", - }, - }, - } - - return layerType.SchemaResource() -} diff --git a/builtin/providers/aws/resource_aws_opsworks_permission.go b/builtin/providers/aws/resource_aws_opsworks_permission.go deleted file mode 100644 index 6e4d5f2d1..000000000 --- a/builtin/providers/aws/resource_aws_opsworks_permission.go +++ /dev/null @@ -1,156 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/opsworks" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsOpsworksPermission() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsOpsworksSetPermission, - Update: resourceAwsOpsworksSetPermission, - Delete: resourceAwsOpsworksPermissionDelete, - Read: resourceAwsOpsworksPermissionRead, - - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, - "allow_ssh": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - }, - "allow_sudo": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - }, - "user_arn": { - Type: schema.TypeString, - Required: true, - }, - // one of deny, show, deploy, manage, iam_only - "level": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - expected := [5]string{"deny", "show", "deploy", "manage", "iam_only"} - - found := false - for _, b := range expected { - if b == value { - found = true - } - } - if !found { - errors = append(errors, fmt.Errorf( - "%q has to be one of [deny, show, deploy, manage, iam_only]", k)) - } - return - }, - }, - "stack_id": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - }, - } -} - -func resourceAwsOpsworksPermissionDelete(d *schema.ResourceData, meta interface{}) error { - return nil -} - -func resourceAwsOpsworksPermissionRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).opsworksconn - - req := &opsworks.DescribePermissionsInput{ - IamUserArn: aws.String(d.Get("user_arn").(string)), - StackId: aws.String(d.Get("stack_id").(string)), - } - - log.Printf("[DEBUG] Reading OpsWorks prermissions for: %s on stack: %s", d.Get("user_arn"), d.Get("stack_id")) - - resp, err := client.DescribePermissions(req) - if err != nil { - if awserr, ok := err.(awserr.Error); ok { - if awserr.Code() == "ResourceNotFoundException" { - log.Printf("[INFO] Permission not found") - d.SetId("") - return nil - } - } - return err - } - - found := false - id := "" - for _, permission := range resp.Permissions { - id = *permission.IamUserArn + *permission.StackId - - if d.Get("user_arn").(string)+d.Get("stack_id").(string) == id { - found = true - d.SetId(id) - d.Set("id", id) - d.Set("allow_ssh", permission.AllowSsh) - d.Set("allow_sudo", permission.AllowSudo) - d.Set("user_arn", permission.IamUserArn) - d.Set("stack_id", permission.StackId) - d.Set("level", permission.Level) - } - - } - - if false == found { - d.SetId("") - log.Printf("[INFO] The correct permission could not be found for: %s on stack: %s", d.Get("user_arn"), d.Get("stack_id")) - } - - return nil -} - -func resourceAwsOpsworksSetPermission(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).opsworksconn - - req := &opsworks.SetPermissionInput{ - AllowSudo: aws.Bool(d.Get("allow_sudo").(bool)), - AllowSsh: aws.Bool(d.Get("allow_ssh").(bool)), - Level: aws.String(d.Get("level").(string)), - IamUserArn: aws.String(d.Get("user_arn").(string)), - StackId: aws.String(d.Get("stack_id").(string)), - } - - err := resource.Retry(2*time.Minute, func() *resource.RetryError { - var cerr error - _, cerr = client.SetPermission(req) - if cerr != nil { - log.Printf("[INFO] client error") - if opserr, ok := cerr.(awserr.Error); ok { - // XXX: handle errors - log.Printf("[ERROR] OpsWorks error: %s message: %s", opserr.Code(), opserr.Message()) - return resource.RetryableError(cerr) - } - return resource.NonRetryableError(cerr) - } - return nil - }) - - if err != nil { - return err - } - - return resourceAwsOpsworksPermissionRead(d, meta) -} diff --git a/builtin/providers/aws/resource_aws_opsworks_permission_test.go b/builtin/providers/aws/resource_aws_opsworks_permission_test.go deleted file mode 100644 index 9ff9c7e6e..000000000 --- a/builtin/providers/aws/resource_aws_opsworks_permission_test.go +++ /dev/null @@ -1,199 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/opsworks" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSOpsworksPermission(t *testing.T) { - sName := fmt.Sprintf("tf-ops-perm-%d", acctest.RandInt()) - var opsperm opsworks.Permission - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsOpsworksPermissionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsOpsworksPermissionCreate(sName, "true", "true", "iam_only"), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksPermissionExists( - "aws_opsworks_permission.tf-acc-perm", &opsperm), - testAccCheckAWSOpsworksCreatePermissionAttributes(&opsperm, true, true, "iam_only"), - resource.TestCheckResourceAttr( - "aws_opsworks_permission.tf-acc-perm", "allow_ssh", "true", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_permission.tf-acc-perm", "allow_sudo", "true", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_permission.tf-acc-perm", "level", "iam_only", - ), - ), - }, - { - Config: testAccAwsOpsworksPermissionCreate(sName, "true", "false", "iam_only"), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksPermissionExists( - "aws_opsworks_permission.tf-acc-perm", &opsperm), - testAccCheckAWSOpsworksCreatePermissionAttributes(&opsperm, true, false, "iam_only"), - resource.TestCheckResourceAttr( - "aws_opsworks_permission.tf-acc-perm", "allow_ssh", "true", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_permission.tf-acc-perm", "allow_sudo", "false", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_permission.tf-acc-perm", "level", "iam_only", - ), - ), - }, - { - Config: testAccAwsOpsworksPermissionCreate(sName, "false", "false", "deny"), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksPermissionExists( - "aws_opsworks_permission.tf-acc-perm", &opsperm), - testAccCheckAWSOpsworksCreatePermissionAttributes(&opsperm, false, false, "deny"), - resource.TestCheckResourceAttr( - "aws_opsworks_permission.tf-acc-perm", "allow_ssh", "false", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_permission.tf-acc-perm", "allow_sudo", "false", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_permission.tf-acc-perm", "level", "deny", - ), - ), - }, - { - Config: testAccAwsOpsworksPermissionCreate(sName, "false", "false", "show"), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksPermissionExists( - "aws_opsworks_permission.tf-acc-perm", &opsperm), - testAccCheckAWSOpsworksCreatePermissionAttributes(&opsperm, false, false, "show"), - resource.TestCheckResourceAttr( - "aws_opsworks_permission.tf-acc-perm", "allow_ssh", "false", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_permission.tf-acc-perm", "allow_sudo", "false", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_permission.tf-acc-perm", "level", "show", - ), - ), - }, - }, - }) -} - -func testAccCheckAWSOpsworksPermissionExists( - n string, opsperm *opsworks.Permission) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).opsworksconn - - params := &opsworks.DescribePermissionsInput{ - StackId: aws.String(rs.Primary.Attributes["stack_id"]), - IamUserArn: aws.String(rs.Primary.Attributes["user_arn"]), - } - resp, err := conn.DescribePermissions(params) - - if err != nil { - return err - } - - if v := len(resp.Permissions); v != 1 { - return fmt.Errorf("Expected 1 response returned, got %d", v) - } - - *opsperm = *resp.Permissions[0] - - return nil - } -} - -func testAccCheckAWSOpsworksCreatePermissionAttributes( - opsperm *opsworks.Permission, allowSsh bool, allowSudo bool, level string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *opsperm.AllowSsh != allowSsh { - return fmt.Errorf("Unnexpected allowSsh: %t", *opsperm.AllowSsh) - } - - if *opsperm.AllowSudo != allowSudo { - return fmt.Errorf("Unnexpected allowSudo: %t", *opsperm.AllowSudo) - } - - if *opsperm.Level != level { - return fmt.Errorf("Unnexpected level: %s", *opsperm.Level) - } - - return nil - } -} - -func testAccCheckAwsOpsworksPermissionDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*AWSClient).opsworksconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_opsworks_permission" { - continue - } - - req := &opsworks.DescribePermissionsInput{ - IamUserArn: aws.String(rs.Primary.Attributes["user_arn"]), - } - - resp, err := client.DescribePermissions(req) - if err == nil { - if len(resp.Permissions) > 0 { - return fmt.Errorf("OpsWorks Permissions still exist.") - } - } - - if awserr, ok := err.(awserr.Error); ok { - if awserr.Code() != "ResourceNotFoundException" { - return err - } - } - } - return nil -} - -func testAccAwsOpsworksPermissionCreate(name, ssh, sudo, level string) string { - return fmt.Sprintf(` -resource "aws_opsworks_permission" "tf-acc-perm" { - stack_id = "${aws_opsworks_stack.tf-acc.id}" - - allow_ssh = %s - allow_sudo = %s - user_arn = "${aws_opsworks_user_profile.user.user_arn}" - level = "%s" -} - -resource "aws_opsworks_user_profile" "user" { - user_arn = "${aws_iam_user.user.arn}" - ssh_username = "${aws_iam_user.user.name}" -} - -resource "aws_iam_user" "user" { - name = "%s" - path = "/" -} - -%s -`, ssh, sudo, level, name, testAccAwsOpsworksStackConfigVpcCreate(name)) -} diff --git a/builtin/providers/aws/resource_aws_opsworks_php_app_layer.go b/builtin/providers/aws/resource_aws_opsworks_php_app_layer.go deleted file mode 100644 index c3176af5b..000000000 --- a/builtin/providers/aws/resource_aws_opsworks_php_app_layer.go +++ /dev/null @@ -1,16 +0,0 @@ -package aws - -import ( - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsOpsworksPhpAppLayer() *schema.Resource { - layerType := &opsworksLayerType{ - TypeName: "php-app", - DefaultLayerName: "PHP App Server", - - Attributes: map[string]*opsworksLayerTypeAttribute{}, - } - - return layerType.SchemaResource() -} diff --git a/builtin/providers/aws/resource_aws_opsworks_rails_app_layer.go b/builtin/providers/aws/resource_aws_opsworks_rails_app_layer.go deleted file mode 100644 index 55f869c6d..000000000 --- a/builtin/providers/aws/resource_aws_opsworks_rails_app_layer.go +++ /dev/null @@ -1,47 +0,0 @@ -package aws - -import ( - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsOpsworksRailsAppLayer() *schema.Resource { - layerType := &opsworksLayerType{ - TypeName: "rails-app", - DefaultLayerName: "Rails App Server", - - Attributes: map[string]*opsworksLayerTypeAttribute{ - "ruby_version": { - AttrName: "RubyVersion", - Type: schema.TypeString, - Default: "2.0.0", - }, - "app_server": { - AttrName: "RailsStack", - Type: schema.TypeString, - Default: "apache_passenger", - }, - "passenger_version": { - AttrName: "PassengerVersion", - Type: schema.TypeString, - Default: "4.0.46", - }, - "rubygems_version": { - AttrName: "RubygemsVersion", - Type: schema.TypeString, - Default: "2.2.2", - }, - "manage_bundler": { - AttrName: "ManageBundler", - Type: schema.TypeBool, - Default: true, - }, - "bundler_version": { - AttrName: "BundlerVersion", - Type: schema.TypeString, - Default: "1.5.3", - }, - }, - } - - return layerType.SchemaResource() -} diff --git a/builtin/providers/aws/resource_aws_opsworks_rails_app_layer_test.go b/builtin/providers/aws/resource_aws_opsworks_rails_app_layer_test.go deleted file mode 100644 index 710d88312..000000000 --- a/builtin/providers/aws/resource_aws_opsworks_rails_app_layer_test.go +++ /dev/null @@ -1,121 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/opsworks" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -// These tests assume the existence of predefined Opsworks IAM roles named `aws-opsworks-ec2-role` -// and `aws-opsworks-service-role`. - -func TestAccAWSOpsworksRailsAppLayer(t *testing.T) { - stackName := fmt.Sprintf("tf-%d", acctest.RandInt()) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsOpsworksRailsAppLayerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsOpsworksRailsAppLayerConfigVpcCreate(stackName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_opsworks_rails_app_layer.tf-acc", "name", stackName, - ), - resource.TestCheckResourceAttr( - "aws_opsworks_rails_app_layer.tf-acc", "manage_bundler", "true", - ), - ), - }, - { - Config: testAccAwsOpsworksRailsAppLayerNoManageBundlerConfigVpcCreate(stackName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_opsworks_rails_app_layer.tf-acc", "name", stackName, - ), - resource.TestCheckResourceAttr( - "aws_opsworks_rails_app_layer.tf-acc", "manage_bundler", "false", - ), - ), - }, - }, - }) -} - -func testAccCheckAwsOpsworksRailsAppLayerDestroy(s *terraform.State) error { - opsworksconn := testAccProvider.Meta().(*AWSClient).opsworksconn - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_opsworks_rails_app_layer" { - continue - } - req := &opsworks.DescribeLayersInput{ - LayerIds: []*string{ - aws.String(rs.Primary.ID), - }, - } - - _, err := opsworksconn.DescribeLayers(req) - if err != nil { - if awserr, ok := err.(awserr.Error); ok { - if awserr.Code() == "ResourceNotFoundException" { - // not found, good to go - return nil - } - } - return err - } - } - - return fmt.Errorf("Fall through error on OpsWorks custom layer test") -} - -func testAccAwsOpsworksRailsAppLayerConfigVpcCreate(name string) string { - return fmt.Sprintf(` -provider "aws" { - region = "us-west-2" -} - -resource "aws_opsworks_rails_app_layer" "tf-acc" { - stack_id = "${aws_opsworks_stack.tf-acc.id}" - name = "%s" - custom_security_group_ids = [ - "${aws_security_group.tf-ops-acc-layer1.id}", - "${aws_security_group.tf-ops-acc-layer2.id}", - ] -} - -%s - -%s - -`, name, testAccAwsOpsworksStackConfigVpcCreate(name), testAccAwsOpsworksCustomLayerSecurityGroups(name)) -} - -func testAccAwsOpsworksRailsAppLayerNoManageBundlerConfigVpcCreate(name string) string { - return fmt.Sprintf(` -provider "aws" { - region = "us-west-2" -} - -resource "aws_opsworks_rails_app_layer" "tf-acc" { - stack_id = "${aws_opsworks_stack.tf-acc.id}" - name = "%s" - custom_security_group_ids = [ - "${aws_security_group.tf-ops-acc-layer1.id}", - "${aws_security_group.tf-ops-acc-layer2.id}", - ] - manage_bundler = false -} - -%s - -%s - -`, name, testAccAwsOpsworksStackConfigVpcCreate(name), testAccAwsOpsworksCustomLayerSecurityGroups(name)) -} diff --git a/builtin/providers/aws/resource_aws_opsworks_rds_db_instance.go b/builtin/providers/aws/resource_aws_opsworks_rds_db_instance.go deleted file mode 100644 index d1aee9030..000000000 --- a/builtin/providers/aws/resource_aws_opsworks_rds_db_instance.go +++ /dev/null @@ -1,202 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/opsworks" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsOpsworksRdsDbInstance() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsOpsworksRdsDbInstanceRegister, - Update: resourceAwsOpsworksRdsDbInstanceUpdate, - Delete: resourceAwsOpsworksRdsDbInstanceDeregister, - Read: resourceAwsOpsworksRdsDbInstanceRead, - - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, - "stack_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "rds_db_instance_arn": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "db_password": { - Type: schema.TypeString, - Required: true, - Sensitive: true, - }, - "db_user": { - Type: schema.TypeString, - Required: true, - }, - }, - } -} - -func resourceAwsOpsworksRdsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).opsworksconn - - d.Partial(true) - - d.SetPartial("rds_db_instance_arn") - req := &opsworks.UpdateRdsDbInstanceInput{ - RdsDbInstanceArn: aws.String(d.Get("rds_db_instance_arn").(string)), - } - - requestUpdate := false - if d.HasChange("db_user") { - d.SetPartial("db_user") - req.DbUser = aws.String(d.Get("db_user").(string)) - requestUpdate = true - } - if d.HasChange("db_password") { - d.SetPartial("db_password") - req.DbPassword = aws.String(d.Get("db_password").(string)) - requestUpdate = true - } - - if true == requestUpdate { - log.Printf("[DEBUG] Opsworks RDS DB Instance Modification request: %s", req) - - err := resource.Retry(2*time.Minute, func() *resource.RetryError { - var cerr error - _, cerr = client.UpdateRdsDbInstance(req) - if cerr != nil { - log.Printf("[INFO] client error") - if opserr, ok := cerr.(awserr.Error); ok { - log.Printf("[ERROR] OpsWorks error: %s message: %s", opserr.Code(), opserr.Message()) - } - return resource.NonRetryableError(cerr) - } - return nil - }) - - if err != nil { - return err - } - - } - - d.Partial(false) - - return resourceAwsOpsworksRdsDbInstanceRead(d, meta) -} - -func resourceAwsOpsworksRdsDbInstanceDeregister(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).opsworksconn - - req := &opsworks.DeregisterRdsDbInstanceInput{ - RdsDbInstanceArn: aws.String(d.Get("rds_db_instance_arn").(string)), - } - - log.Printf("[DEBUG] Unregistering rds db instance '%s' from stack: %s", d.Get("rds_db_instance_arn"), d.Get("stack_id")) - - err := resource.Retry(2*time.Minute, func() *resource.RetryError { - var cerr error - _, cerr = client.DeregisterRdsDbInstance(req) - if cerr != nil { - log.Printf("[INFO] client error") - if opserr, ok := cerr.(awserr.Error); ok { - if opserr.Code() == "ResourceNotFoundException" { - log.Printf("[INFO] The db instance could not be found. Remove it from state.") - d.SetId("") - - return nil - } - log.Printf("[ERROR] OpsWorks error: %s message: %s", opserr.Code(), opserr.Message()) - } - return resource.NonRetryableError(cerr) - } - - return nil - }) - - if err != nil { - return err - } - - return nil -} - -func resourceAwsOpsworksRdsDbInstanceRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).opsworksconn - - req := &opsworks.DescribeRdsDbInstancesInput{ - StackId: aws.String(d.Get("stack_id").(string)), - } - - log.Printf("[DEBUG] Reading OpsWorks registerd rds db instances for stack: %s", d.Get("stack_id")) - - resp, err := client.DescribeRdsDbInstances(req) - if err != nil { - return err - } - - found := false - id := "" - for _, instance := range resp.RdsDbInstances { - id = fmt.Sprintf("%s%s", *instance.RdsDbInstanceArn, *instance.StackId) - - if fmt.Sprintf("%s%s", d.Get("rds_db_instance_arn").(string), d.Get("stack_id").(string)) == id { - found = true - d.SetId(id) - d.Set("id", id) - d.Set("stack_id", instance.StackId) - d.Set("rds_db_instance_arn", instance.RdsDbInstanceArn) - d.Set("db_user", instance.DbUser) - } - - } - - if false == found { - d.SetId("") - log.Printf("[INFO] The rds instance '%s' could not be found for stack: '%s'", d.Get("rds_db_instance_arn"), d.Get("stack_id")) - } - - return nil -} - -func resourceAwsOpsworksRdsDbInstanceRegister(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).opsworksconn - - req := &opsworks.RegisterRdsDbInstanceInput{ - StackId: aws.String(d.Get("stack_id").(string)), - RdsDbInstanceArn: aws.String(d.Get("rds_db_instance_arn").(string)), - DbUser: aws.String(d.Get("db_user").(string)), - DbPassword: aws.String(d.Get("db_password").(string)), - } - - err := resource.Retry(2*time.Minute, func() *resource.RetryError { - var cerr error - _, cerr = client.RegisterRdsDbInstance(req) - if cerr != nil { - log.Printf("[INFO] client error") - if opserr, ok := cerr.(awserr.Error); ok { - log.Printf("[ERROR] OpsWorks error: %s message: %s", opserr.Code(), opserr.Message()) - } - return resource.NonRetryableError(cerr) - } - - return nil - }) - - if err != nil { - return err - } - - return resourceAwsOpsworksRdsDbInstanceRead(d, meta) -} diff --git a/builtin/providers/aws/resource_aws_opsworks_rds_db_instance_test.go b/builtin/providers/aws/resource_aws_opsworks_rds_db_instance_test.go deleted file mode 100644 index 84c0d86b2..000000000 --- a/builtin/providers/aws/resource_aws_opsworks_rds_db_instance_test.go +++ /dev/null @@ -1,190 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/opsworks" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSOpsworksRdsDbInstance(t *testing.T) { - sName := fmt.Sprintf("test-db-instance-%d", acctest.RandInt()) - var opsdb opsworks.RdsDbInstance - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsOpsworksRdsDbDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsOpsworksRdsDbInstance(sName, "foo", "barbarbarbar"), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksRdsDbExists( - "aws_opsworks_rds_db_instance.tf-acc-opsworks-db", &opsdb), - testAccCheckAWSOpsworksCreateRdsDbAttributes(&opsdb, "foo"), - resource.TestCheckResourceAttr( - "aws_opsworks_rds_db_instance.tf-acc-opsworks-db", "db_user", "foo", - ), - ), - }, - { - Config: testAccAwsOpsworksRdsDbInstance(sName, "bar", "barbarbarbar"), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksRdsDbExists( - "aws_opsworks_rds_db_instance.tf-acc-opsworks-db", &opsdb), - testAccCheckAWSOpsworksCreateRdsDbAttributes(&opsdb, "bar"), - resource.TestCheckResourceAttr( - "aws_opsworks_rds_db_instance.tf-acc-opsworks-db", "db_user", "bar", - ), - ), - }, - { - Config: testAccAwsOpsworksRdsDbInstance(sName, "bar", "foofoofoofoofoo"), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksRdsDbExists( - "aws_opsworks_rds_db_instance.tf-acc-opsworks-db", &opsdb), - testAccCheckAWSOpsworksCreateRdsDbAttributes(&opsdb, "bar"), - resource.TestCheckResourceAttr( - "aws_opsworks_rds_db_instance.tf-acc-opsworks-db", "db_user", "bar", - ), - ), - }, - { - Config: testAccAwsOpsworksRdsDbInstanceForceNew(sName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksRdsDbExists( - "aws_opsworks_rds_db_instance.tf-acc-opsworks-db", &opsdb), - testAccCheckAWSOpsworksCreateRdsDbAttributes(&opsdb, "foo"), - resource.TestCheckResourceAttr( - "aws_opsworks_rds_db_instance.tf-acc-opsworks-db", "db_user", "foo", - ), - ), - }, - }, - }) -} - -func testAccCheckAWSOpsworksRdsDbExists( - n string, opsdb *opsworks.RdsDbInstance) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - if _, ok := rs.Primary.Attributes["stack_id"]; !ok { - return fmt.Errorf("Rds Db stack id is missing, should be set.") - } - - conn := testAccProvider.Meta().(*AWSClient).opsworksconn - - params := &opsworks.DescribeRdsDbInstancesInput{ - StackId: aws.String(rs.Primary.Attributes["stack_id"]), - } - resp, err := conn.DescribeRdsDbInstances(params) - - if err != nil { - return err - } - - if v := len(resp.RdsDbInstances); v != 1 { - return fmt.Errorf("Expected 1 response returned, got %d", v) - } - - *opsdb = *resp.RdsDbInstances[0] - - return nil - } -} - -func testAccCheckAWSOpsworksCreateRdsDbAttributes( - opsdb *opsworks.RdsDbInstance, user string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *opsdb.DbUser != user { - return fmt.Errorf("Unnexpected user: %s", *opsdb.DbUser) - } - if *opsdb.Engine != "mysql" { - return fmt.Errorf("Unnexpected engine: %s", *opsdb.Engine) - } - return nil - } -} - -func testAccCheckAwsOpsworksRdsDbDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*AWSClient).opsworksconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_opsworks_rds_db_instance" { - continue - } - - req := &opsworks.DescribeRdsDbInstancesInput{ - StackId: aws.String(rs.Primary.Attributes["stack_id"]), - } - - resp, err := client.DescribeRdsDbInstances(req) - if err == nil { - if len(resp.RdsDbInstances) > 0 { - return fmt.Errorf("OpsWorks Rds db instances still exist.") - } - } - - if awserr, ok := err.(awserr.Error); ok { - if awserr.Code() != "ResourceNotFoundException" { - return err - } - } - } - return nil -} - -func testAccAwsOpsworksRdsDbInstance(name, userName, password string) string { - return fmt.Sprintf(` -resource "aws_opsworks_rds_db_instance" "tf-acc-opsworks-db" { - stack_id = "${aws_opsworks_stack.tf-acc.id}" - - rds_db_instance_arn = "${aws_db_instance.bar.arn}" - db_user = "%s" - db_password = "%s" -} - -%s - -%s -`, userName, password, testAccAwsOpsworksStackConfigVpcCreate(name), testAccAWSDBInstanceConfig) -} - -func testAccAwsOpsworksRdsDbInstanceForceNew(name string) string { - return fmt.Sprintf(` -resource "aws_opsworks_rds_db_instance" "tf-acc-opsworks-db" { - stack_id = "${aws_opsworks_stack.tf-acc.id}" - - rds_db_instance_arn = "${aws_db_instance.foo.arn}" - db_user = "foo" - db_password = "foofoofoofoo" -} - -%s - -resource "aws_db_instance" "foo" { - allocated_storage = 10 - engine = "MySQL" - engine_version = "5.6.21" - instance_class = "db.t1.micro" - name = "baz" - password = "foofoofoofoo" - username = "foo" - parameter_group_name = "default.mysql5.6" - - skip_final_snapshot = true -} -`, testAccAwsOpsworksStackConfigVpcCreate(name)) -} diff --git a/builtin/providers/aws/resource_aws_opsworks_stack.go b/builtin/providers/aws/resource_aws_opsworks_stack.go deleted file mode 100644 index 496670506..000000000 --- a/builtin/providers/aws/resource_aws_opsworks_stack.go +++ /dev/null @@ -1,591 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "os" - "strings" - "time" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/opsworks" -) - -func resourceAwsOpsworksStack() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsOpsworksStackCreate, - Read: resourceAwsOpsworksStackRead, - Update: resourceAwsOpsworksStackUpdate, - Delete: resourceAwsOpsworksStackDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "agent_version": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "id": { - Type: schema.TypeString, - Computed: true, - }, - - "name": { - Type: schema.TypeString, - Required: true, - }, - - "region": { - Type: schema.TypeString, - ForceNew: true, - Required: true, - }, - - "service_role_arn": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "default_instance_profile_arn": { - Type: schema.TypeString, - Required: true, - }, - - "color": { - Type: schema.TypeString, - Optional: true, - }, - - "configuration_manager_name": { - Type: schema.TypeString, - Optional: true, - Default: "Chef", - }, - - "configuration_manager_version": { - Type: schema.TypeString, - Optional: true, - Default: "11.10", - }, - - "manage_berkshelf": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "berkshelf_version": { - Type: schema.TypeString, - Optional: true, - Default: "3.2.0", - }, - - "custom_cookbooks_source": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - }, - - "url": { - Type: schema.TypeString, - Required: true, - }, - - "username": { - Type: schema.TypeString, - Optional: true, - }, - - "password": { - Type: schema.TypeString, - Optional: true, - Sensitive: true, - }, - - "revision": { - Type: schema.TypeString, - Optional: true, - }, - - "ssh_key": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - - "custom_json": { - Type: schema.TypeString, - Optional: true, - }, - - "default_availability_zone": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "default_os": { - Type: schema.TypeString, - Optional: true, - Default: "Ubuntu 12.04 LTS", - }, - - "default_root_device_type": { - Type: schema.TypeString, - Optional: true, - Default: "instance-store", - }, - - "default_ssh_key_name": { - Type: schema.TypeString, - Optional: true, - }, - - "default_subnet_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "hostname_theme": { - Type: schema.TypeString, - Optional: true, - Default: "Layer_Dependent", - }, - - "use_custom_cookbooks": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "use_opsworks_security_groups": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - - "vpc_id": { - Type: schema.TypeString, - ForceNew: true, - Computed: true, - Optional: true, - }, - - "stack_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceAwsOpsworksStackValidate(d *schema.ResourceData) error { - cookbooksSourceCount := d.Get("custom_cookbooks_source.#").(int) - if cookbooksSourceCount > 1 { - return fmt.Errorf("Only one custom_cookbooks_source is permitted") - } - - vpcId := d.Get("vpc_id").(string) - if vpcId != "" { - if d.Get("default_subnet_id").(string) == "" { - return fmt.Errorf("default_subnet_id must be set if vpc_id is set") - } - } else { - if d.Get("default_availability_zone").(string) == "" { - return fmt.Errorf("either vpc_id or default_availability_zone must be set") - } - } - - return nil -} - -func resourceAwsOpsworksStackCustomCookbooksSource(d *schema.ResourceData) *opsworks.Source { - count := d.Get("custom_cookbooks_source.#").(int) - if count == 0 { - return nil - } - - return &opsworks.Source{ - Type: aws.String(d.Get("custom_cookbooks_source.0.type").(string)), - Url: aws.String(d.Get("custom_cookbooks_source.0.url").(string)), - Username: aws.String(d.Get("custom_cookbooks_source.0.username").(string)), - Password: aws.String(d.Get("custom_cookbooks_source.0.password").(string)), - Revision: aws.String(d.Get("custom_cookbooks_source.0.revision").(string)), - SshKey: aws.String(d.Get("custom_cookbooks_source.0.ssh_key").(string)), - } -} - -func resourceAwsOpsworksSetStackCustomCookbooksSource(d *schema.ResourceData, v *opsworks.Source) { - nv := make([]interface{}, 0, 1) - if v != nil && v.Type != nil && *v.Type != "" { - m := make(map[string]interface{}) - if v.Type != nil { - m["type"] = *v.Type - } - if v.Url != nil { - m["url"] = *v.Url - } - if v.Username != nil { - m["username"] = *v.Username - } - if v.Revision != nil { - m["revision"] = *v.Revision - } - // v.Password will, on read, contain the placeholder string - // "*****FILTERED*****", so we ignore it on read and let persist - // the value already in the state. - nv = append(nv, m) - } - - err := d.Set("custom_cookbooks_source", nv) - if err != nil { - // should never happen - panic(err) - } -} - -func resourceAwsOpsworksStackRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).opsworksconn - var conErr error - if v := d.Get("stack_endpoint").(string); v != "" { - client, conErr = opsworksConnForRegion(v, meta) - if conErr != nil { - return conErr - } - } - - req := &opsworks.DescribeStacksInput{ - StackIds: []*string{ - aws.String(d.Id()), - }, - } - - log.Printf("[DEBUG] Reading OpsWorks stack: %s", d.Id()) - - // notFound represents the number of times we've called DescribeStacks looking - // for this Stack. If it's not found in the the default region we're in, we - // check us-east-1 in the event this stack was created with Terraform before - // version 0.9 - // See https://github.com/hashicorp/terraform/issues/12842 - var notFound int - var resp *opsworks.DescribeStacksOutput - var dErr error - - for { - resp, dErr = client.DescribeStacks(req) - if dErr != nil { - if awserr, ok := dErr.(awserr.Error); ok { - if awserr.Code() == "ResourceNotFoundException" { - if notFound < 1 { - // If we haven't already, try us-east-1, legacy connection - notFound++ - var connErr error - client, connErr = opsworksConnForRegion("us-east-1", meta) - if connErr != nil { - return connErr - } - // start again from the top of the FOR loop, but with a client - // configured to talk to us-east-1 - continue - } - - // We've tried both the original and us-east-1 endpoint, and the stack - // is still not found - log.Printf("[DEBUG] OpsWorks stack (%s) not found", d.Id()) - d.SetId("") - return nil - } - // not ResoureNotFoundException, fall through to returning error - } - return dErr - } - // If the stack was found, set the stack_endpoint - if client.Config.Region != nil && *client.Config.Region != "" { - log.Printf("[DEBUG] Setting stack_endpoint for (%s) to (%s)", d.Id(), *client.Config.Region) - if err := d.Set("stack_endpoint", *client.Config.Region); err != nil { - log.Printf("[WARN] Error setting stack_endpoint: %s", err) - } - } - log.Printf("[DEBUG] Breaking stack endpoint search, found stack for (%s)", d.Id()) - // Break the FOR loop - break - } - - stack := resp.Stacks[0] - d.Set("agent_version", stack.AgentVersion) - d.Set("name", stack.Name) - d.Set("region", stack.Region) - d.Set("default_instance_profile_arn", stack.DefaultInstanceProfileArn) - d.Set("service_role_arn", stack.ServiceRoleArn) - d.Set("default_availability_zone", stack.DefaultAvailabilityZone) - d.Set("default_os", stack.DefaultOs) - d.Set("default_root_device_type", stack.DefaultRootDeviceType) - d.Set("default_ssh_key_name", stack.DefaultSshKeyName) - d.Set("default_subnet_id", stack.DefaultSubnetId) - d.Set("hostname_theme", stack.HostnameTheme) - d.Set("use_custom_cookbooks", stack.UseCustomCookbooks) - if stack.CustomJson != nil { - d.Set("custom_json", stack.CustomJson) - } - d.Set("use_opsworks_security_groups", stack.UseOpsworksSecurityGroups) - d.Set("vpc_id", stack.VpcId) - if color, ok := stack.Attributes["Color"]; ok { - d.Set("color", color) - } - if stack.ConfigurationManager != nil { - d.Set("configuration_manager_name", stack.ConfigurationManager.Name) - d.Set("configuration_manager_version", stack.ConfigurationManager.Version) - } - if stack.ChefConfiguration != nil { - d.Set("berkshelf_version", stack.ChefConfiguration.BerkshelfVersion) - d.Set("manage_berkshelf", stack.ChefConfiguration.ManageBerkshelf) - } - resourceAwsOpsworksSetStackCustomCookbooksSource(d, stack.CustomCookbooksSource) - - return nil -} - -// opsworksConn will return a connection for the stack_endpoint in the -// configuration. Stacks can only be accessed or managed within the endpoint -// in which they are created, so we allow users to specify an original endpoint -// for Stacks created before multiple endpoints were offered (Terraform v0.9.0). -// See: -// - https://github.com/hashicorp/terraform/pull/12688 -// - https://github.com/hashicorp/terraform/issues/12842 -func opsworksConnForRegion(region string, meta interface{}) (*opsworks.OpsWorks, error) { - originalConn := meta.(*AWSClient).opsworksconn - - // Regions are the same, no need to reconfigure - if originalConn.Config.Region != nil && *originalConn.Config.Region == region { - return originalConn, nil - } - - // Set up base session - sess, err := session.NewSession(&originalConn.Config) - if err != nil { - return nil, errwrap.Wrapf("Error creating AWS session: {{err}}", err) - } - - sess.Handlers.Build.PushBackNamed(addTerraformVersionToUserAgent) - - if extraDebug := os.Getenv("TERRAFORM_AWS_AUTHFAILURE_DEBUG"); extraDebug != "" { - sess.Handlers.UnmarshalError.PushFrontNamed(debugAuthFailure) - } - - newSession := sess.Copy(&aws.Config{Region: aws.String(region)}) - newOpsworksconn := opsworks.New(newSession) - - log.Printf("[DEBUG] Returning new OpsWorks client") - return newOpsworksconn, nil -} - -func resourceAwsOpsworksStackCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).opsworksconn - - err := resourceAwsOpsworksStackValidate(d) - if err != nil { - return err - } - - req := &opsworks.CreateStackInput{ - DefaultInstanceProfileArn: aws.String(d.Get("default_instance_profile_arn").(string)), - Name: aws.String(d.Get("name").(string)), - Region: aws.String(d.Get("region").(string)), - ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)), - DefaultOs: aws.String(d.Get("default_os").(string)), - UseOpsworksSecurityGroups: aws.Bool(d.Get("use_opsworks_security_groups").(bool)), - } - req.ConfigurationManager = &opsworks.StackConfigurationManager{ - Name: aws.String(d.Get("configuration_manager_name").(string)), - Version: aws.String(d.Get("configuration_manager_version").(string)), - } - inVpc := false - if vpcId, ok := d.GetOk("vpc_id"); ok { - req.VpcId = aws.String(vpcId.(string)) - inVpc = true - } - if defaultSubnetId, ok := d.GetOk("default_subnet_id"); ok { - req.DefaultSubnetId = aws.String(defaultSubnetId.(string)) - } - if defaultAvailabilityZone, ok := d.GetOk("default_availability_zone"); ok { - req.DefaultAvailabilityZone = aws.String(defaultAvailabilityZone.(string)) - } - if defaultRootDeviceType, ok := d.GetOk("default_root_device_type"); ok { - req.DefaultRootDeviceType = aws.String(defaultRootDeviceType.(string)) - } - - log.Printf("[DEBUG] Creating OpsWorks stack: %s", req) - - var resp *opsworks.CreateStackOutput - err = resource.Retry(20*time.Minute, func() *resource.RetryError { - var cerr error - resp, cerr = client.CreateStack(req) - if cerr != nil { - if opserr, ok := cerr.(awserr.Error); ok { - // If Terraform is also managing the service IAM role, - // it may have just been created and not yet be - // propagated. - // AWS doesn't provide a machine-readable code for this - // specific error, so we're forced to do fragile message - // matching. - // The full error we're looking for looks something like - // the following: - // Service Role Arn: [...] is not yet propagated, please try again in a couple of minutes - propErr := "not yet propagated" - trustErr := "not the necessary trust relationship" - validateErr := "validate IAM role permission" - if opserr.Code() == "ValidationException" && (strings.Contains(opserr.Message(), trustErr) || strings.Contains(opserr.Message(), propErr) || strings.Contains(opserr.Message(), validateErr)) { - log.Printf("[INFO] Waiting for service IAM role to propagate") - return resource.RetryableError(cerr) - } - } - return resource.NonRetryableError(cerr) - } - return nil - }) - if err != nil { - return err - } - - stackId := *resp.StackId - d.SetId(stackId) - d.Set("id", stackId) - - if inVpc && *req.UseOpsworksSecurityGroups { - // For VPC-based stacks, OpsWorks asynchronously creates some default - // security groups which must exist before layers can be created. - // Unfortunately it doesn't tell us what the ids of these are, so - // we can't actually check for them. Instead, we just wait a nominal - // amount of time for their creation to complete. - log.Print("[INFO] Waiting for OpsWorks built-in security groups to be created") - time.Sleep(30 * time.Second) - } - - return resourceAwsOpsworksStackUpdate(d, meta) -} - -func resourceAwsOpsworksStackUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).opsworksconn - var conErr error - if v := d.Get("stack_endpoint").(string); v != "" { - client, conErr = opsworksConnForRegion(v, meta) - if conErr != nil { - return conErr - } - } - - err := resourceAwsOpsworksStackValidate(d) - if err != nil { - return err - } - - req := &opsworks.UpdateStackInput{ - CustomJson: aws.String(d.Get("custom_json").(string)), - DefaultInstanceProfileArn: aws.String(d.Get("default_instance_profile_arn").(string)), - DefaultRootDeviceType: aws.String(d.Get("default_root_device_type").(string)), - DefaultSshKeyName: aws.String(d.Get("default_ssh_key_name").(string)), - Name: aws.String(d.Get("name").(string)), - ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)), - StackId: aws.String(d.Id()), - UseCustomCookbooks: aws.Bool(d.Get("use_custom_cookbooks").(bool)), - UseOpsworksSecurityGroups: aws.Bool(d.Get("use_opsworks_security_groups").(bool)), - Attributes: make(map[string]*string), - CustomCookbooksSource: resourceAwsOpsworksStackCustomCookbooksSource(d), - } - if v, ok := d.GetOk("agent_version"); ok { - req.AgentVersion = aws.String(v.(string)) - } - if v, ok := d.GetOk("default_os"); ok { - req.DefaultOs = aws.String(v.(string)) - } - if v, ok := d.GetOk("default_subnet_id"); ok { - req.DefaultSubnetId = aws.String(v.(string)) - } - if v, ok := d.GetOk("default_availability_zone"); ok { - req.DefaultAvailabilityZone = aws.String(v.(string)) - } - if v, ok := d.GetOk("hostname_theme"); ok { - req.HostnameTheme = aws.String(v.(string)) - } - if v, ok := d.GetOk("color"); ok { - req.Attributes["Color"] = aws.String(v.(string)) - } - - req.ChefConfiguration = &opsworks.ChefConfiguration{ - BerkshelfVersion: aws.String(d.Get("berkshelf_version").(string)), - ManageBerkshelf: aws.Bool(d.Get("manage_berkshelf").(bool)), - } - - req.ConfigurationManager = &opsworks.StackConfigurationManager{ - Name: aws.String(d.Get("configuration_manager_name").(string)), - Version: aws.String(d.Get("configuration_manager_version").(string)), - } - - log.Printf("[DEBUG] Updating OpsWorks stack: %s", req) - - _, err = client.UpdateStack(req) - if err != nil { - return err - } - - return resourceAwsOpsworksStackRead(d, meta) -} - -func resourceAwsOpsworksStackDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*AWSClient).opsworksconn - var conErr error - if v := d.Get("stack_endpoint").(string); v != "" { - client, conErr = opsworksConnForRegion(v, meta) - if conErr != nil { - return conErr - } - } - - req := &opsworks.DeleteStackInput{ - StackId: aws.String(d.Id()), - } - - log.Printf("[DEBUG] Deleting OpsWorks stack: %s", d.Id()) - - _, err := client.DeleteStack(req) - if err != nil { - return err - } - - // For a stack in a VPC, OpsWorks has created some default security groups - // in the VPC, which it will now delete. - // Unfortunately, the security groups are deleted asynchronously and there - // is no robust way for us to determine when it is done. The VPC itself - // isn't deletable until the security groups are cleaned up, so this could - // make 'terraform destroy' fail if the VPC is also managed and we don't - // wait for the security groups to be deleted. - // There is no robust way to check for this, so we'll just wait a - // nominal amount of time. - _, inVpc := d.GetOk("vpc_id") - _, useOpsworksDefaultSg := d.GetOk("use_opsworks_security_group") - - if inVpc && useOpsworksDefaultSg { - log.Print("[INFO] Waiting for Opsworks built-in security groups to be deleted") - time.Sleep(30 * time.Second) - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_opsworks_stack_test.go b/builtin/providers/aws/resource_aws_opsworks_stack_test.go deleted file mode 100644 index f04302f9d..000000000 --- a/builtin/providers/aws/resource_aws_opsworks_stack_test.go +++ /dev/null @@ -1,988 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/opsworks" -) - -/////////////////////////////// -//// Tests for the No-VPC case -/////////////////////////////// - -func TestAccAWSOpsworksStackNoVpc(t *testing.T) { - stackName := fmt.Sprintf("tf-opsworks-acc-%d", acctest.RandInt()) - var opsstack opsworks.Stack - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsOpsworksStackDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsOpsworksStackConfigNoVpcCreate(stackName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksStackExists( - "aws_opsworks_stack.tf-acc", false, &opsstack), - testAccCheckAWSOpsworksCreateStackAttributes( - &opsstack, "us-east-1a", stackName), - testAccAwsOpsworksStackCheckResourceAttrsCreate( - "us-east-1a", stackName), - ), - }, - }, - }) -} - -func TestAccAWSOpsworksStackNoVpcChangeServiceRoleForceNew(t *testing.T) { - stackName := fmt.Sprintf("tf-opsworks-acc-%d", acctest.RandInt()) - var before, after opsworks.Stack - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsOpsworksStackDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsOpsworksStackConfigNoVpcCreate(stackName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksStackExists( - "aws_opsworks_stack.tf-acc", false, &before), - ), - }, - { - Config: testAccAwsOpsworksStackConfigNoVpcCreateUpdateServiceRole(stackName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksStackExists( - "aws_opsworks_stack.tf-acc", false, &after), - testAccCheckAWSOpsworksStackRecreated(t, &before, &after), - ), - }, - }, - }) -} - -func TestAccAWSOpsworksStackVpc(t *testing.T) { - stackName := fmt.Sprintf("tf-opsworks-acc-%d", acctest.RandInt()) - var opsstack opsworks.Stack - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsOpsworksStackDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsOpsworksStackConfigVpcCreate(stackName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksStackExists( - "aws_opsworks_stack.tf-acc", true, &opsstack), - testAccCheckAWSOpsworksCreateStackAttributes( - &opsstack, "us-west-2a", stackName), - testAccAwsOpsworksStackCheckResourceAttrsCreate( - "us-west-2a", stackName), - ), - }, - { - Config: testAccAWSOpsworksStackConfigVpcUpdate(stackName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksStackExists( - "aws_opsworks_stack.tf-acc", true, &opsstack), - testAccCheckAWSOpsworksUpdateStackAttributes( - &opsstack, "us-west-2a", stackName), - testAccAwsOpsworksStackCheckResourceAttrsUpdate( - "us-west-2a", stackName), - ), - }, - }, - }) -} - -// Tests the addition of regional endpoints and supporting the classic link used -// to create Stack's prior to v0.9.0. -// See https://github.com/hashicorp/terraform/issues/12842 -func TestAccAWSOpsWorksStack_classic_endpoints(t *testing.T) { - stackName := fmt.Sprintf("tf-opsworks-acc-%d", acctest.RandInt()) - rInt := acctest.RandInt() - var opsstack opsworks.Stack - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsOpsworksStackDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsOpsWorksStack_classic_endpoint(stackName, rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksStackExists( - "aws_opsworks_stack.main", false, &opsstack), - ), - }, - // Ensure that changing to us-west-2 region results in no plan - { - Config: testAccAwsOpsWorksStack_regional_endpoint(stackName, rInt), - PlanOnly: true, - }, - }, - }) - -} - -func testAccCheckAWSOpsworksStackRecreated(t *testing.T, - before, after *opsworks.Stack) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *before.StackId == *after.StackId { - t.Fatalf("Expected change of Opsworks StackIds, but both were %v", before.StackId) - } - return nil - } -} - -func testAccAwsOpsWorksStack_classic_endpoint(rName string, rInt int) string { - return fmt.Sprintf(` -provider "aws" { - region = "us-east-1" -} - -resource "aws_opsworks_stack" "main" { - name = "%s" - region = "us-west-2" - service_role_arn = "${aws_iam_role.opsworks_service.arn}" - default_instance_profile_arn = "${aws_iam_instance_profile.opsworks_instance.arn}" - - configuration_manager_version = "12" - default_availability_zone = "us-west-2b" -} - -resource "aws_iam_role" "opsworks_service" { - name = "tf_opsworks_service_%d" - - assume_role_policy = < 0 { - return fmt.Errorf("OpsWorks User Profiles still exist.") - } - } - - if awserr, ok := err.(awserr.Error); ok { - if awserr.Code() != "ResourceNotFoundException" { - return err - } - } - } - return nil -} - -func testAccAwsOpsworksUserProfileCreate(rn string) string { - return fmt.Sprintf(` -resource "aws_opsworks_user_profile" "user" { - user_arn = "${aws_iam_user.user.arn}" - ssh_username = "${aws_iam_user.user.name}" -} - -resource "aws_iam_user" "user" { - name = "%s" - path = "/" -} - `, rn) -} - -func testAccAwsOpsworksUserProfileUpdate(rn, updateRn string) string { - return fmt.Sprintf(` -resource "aws_opsworks_user_profile" "user" { - user_arn = "${aws_iam_user.new-user.arn}" - ssh_username = "${aws_iam_user.new-user.name}" -} - -resource "aws_iam_user" "user" { - name = "%s" - path = "/" -} - -resource "aws_iam_user" "new-user" { - name = "%s" - path = "/" -} - `, rn, updateRn) -} diff --git a/builtin/providers/aws/resource_aws_placement_group.go b/builtin/providers/aws/resource_aws_placement_group.go deleted file mode 100644 index e5da78c9e..000000000 --- a/builtin/providers/aws/resource_aws_placement_group.go +++ /dev/null @@ -1,153 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsPlacementGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsPlacementGroupCreate, - Read: resourceAwsPlacementGroupRead, - Delete: resourceAwsPlacementGroupDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "strategy": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceAwsPlacementGroupCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - name := d.Get("name").(string) - input := ec2.CreatePlacementGroupInput{ - GroupName: aws.String(name), - Strategy: aws.String(d.Get("strategy").(string)), - } - log.Printf("[DEBUG] Creating EC2 Placement group: %s", input) - _, err := conn.CreatePlacementGroup(&input) - if err != nil { - return err - } - - wait := resource.StateChangeConf{ - Pending: []string{"pending"}, - Target: []string{"available"}, - Timeout: 5 * time.Minute, - MinTimeout: 1 * time.Second, - Refresh: func() (interface{}, string, error) { - out, err := conn.DescribePlacementGroups(&ec2.DescribePlacementGroupsInput{ - GroupNames: []*string{aws.String(name)}, - }) - - if err != nil { - return out, "", err - } - - if len(out.PlacementGroups) == 0 { - return out, "", fmt.Errorf("Placement group not found (%q)", name) - } - pg := out.PlacementGroups[0] - - return out, *pg.State, nil - }, - } - - _, err = wait.WaitForState() - if err != nil { - return err - } - - log.Printf("[DEBUG] EC2 Placement group created: %q", name) - - d.SetId(name) - - return resourceAwsPlacementGroupRead(d, meta) -} - -func resourceAwsPlacementGroupRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - input := ec2.DescribePlacementGroupsInput{ - GroupNames: []*string{aws.String(d.Id())}, - } - out, err := conn.DescribePlacementGroups(&input) - if err != nil { - return err - } - pg := out.PlacementGroups[0] - - log.Printf("[DEBUG] Received EC2 Placement Group: %s", pg) - - d.Set("name", pg.GroupName) - d.Set("strategy", pg.Strategy) - - return nil -} - -func resourceAwsPlacementGroupDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - log.Printf("[DEBUG] Deleting EC2 Placement Group %q", d.Id()) - _, err := conn.DeletePlacementGroup(&ec2.DeletePlacementGroupInput{ - GroupName: aws.String(d.Id()), - }) - if err != nil { - return err - } - - wait := resource.StateChangeConf{ - Pending: []string{"deleting"}, - Target: []string{"deleted"}, - Timeout: 5 * time.Minute, - MinTimeout: 1 * time.Second, - Refresh: func() (interface{}, string, error) { - out, err := conn.DescribePlacementGroups(&ec2.DescribePlacementGroupsInput{ - GroupNames: []*string{aws.String(d.Id())}, - }) - - if err != nil { - awsErr := err.(awserr.Error) - if awsErr.Code() == "InvalidPlacementGroup.Unknown" { - return out, "deleted", nil - } - return out, "", awsErr - } - - if len(out.PlacementGroups) == 0 { - return out, "deleted", nil - } - - pg := out.PlacementGroups[0] - - return out, *pg.State, nil - }, - } - - _, err = wait.WaitForState() - if err != nil { - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/aws/resource_aws_placement_group_test.go b/builtin/providers/aws/resource_aws_placement_group_test.go deleted file mode 100644 index 8743975c2..000000000 --- a/builtin/providers/aws/resource_aws_placement_group_test.go +++ /dev/null @@ -1,106 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" -) - -func TestAccAWSPlacementGroup_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSPlacementGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSPlacementGroupConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSPlacementGroupExists("aws_placement_group.pg"), - ), - }, - }, - }) -} - -func testAccCheckAWSPlacementGroupDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_placement_group" { - continue - } - - _, err := conn.DescribePlacementGroups(&ec2.DescribePlacementGroupsInput{ - GroupNames: []*string{aws.String(rs.Primary.Attributes["name"])}, - }) - if err != nil { - // Verify the error is what we want - if ae, ok := err.(awserr.Error); ok && ae.Code() == "InvalidPlacementGroup.Unknown" { - continue - } - return err - } - - return fmt.Errorf("still exists") - } - return nil -} - -func testAccCheckAWSPlacementGroupExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Placement Group ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - _, err := conn.DescribePlacementGroups(&ec2.DescribePlacementGroupsInput{ - GroupNames: []*string{aws.String(rs.Primary.ID)}, - }) - - if err != nil { - return fmt.Errorf("Placement Group error: %v", err) - } - return nil - } -} - -func testAccCheckAWSDestroyPlacementGroup(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Placement Group ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - _, err := conn.DeletePlacementGroup(&ec2.DeletePlacementGroupInput{ - GroupName: aws.String(rs.Primary.ID), - }) - - if err != nil { - return fmt.Errorf("Error destroying Placement Group (%s): %s", rs.Primary.ID, err) - } - return nil - } -} - -var testAccAWSPlacementGroupConfig = ` -resource "aws_placement_group" "pg" { - name = "tf-test-pg" - strategy = "cluster" -} -` diff --git a/builtin/providers/aws/resource_aws_proxy_protocol_policy.go b/builtin/providers/aws/resource_aws_proxy_protocol_policy.go deleted file mode 100644 index ae7d61dc9..000000000 --- a/builtin/providers/aws/resource_aws_proxy_protocol_policy.go +++ /dev/null @@ -1,267 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strconv" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsProxyProtocolPolicy() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsProxyProtocolPolicyCreate, - Read: resourceAwsProxyProtocolPolicyRead, - Update: resourceAwsProxyProtocolPolicyUpdate, - Delete: resourceAwsProxyProtocolPolicyDelete, - - Schema: map[string]*schema.Schema{ - "load_balancer": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "instance_ports": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Required: true, - Set: schema.HashString, - }, - }, - } -} - -func resourceAwsProxyProtocolPolicyCreate(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - elbname := aws.String(d.Get("load_balancer").(string)) - - input := &elb.CreateLoadBalancerPolicyInput{ - LoadBalancerName: elbname, - PolicyAttributes: []*elb.PolicyAttribute{ - &elb.PolicyAttribute{ - AttributeName: aws.String("ProxyProtocol"), - AttributeValue: aws.String("True"), - }, - }, - PolicyName: aws.String("TFEnableProxyProtocol"), - PolicyTypeName: aws.String("ProxyProtocolPolicyType"), - } - - // Create a policy - log.Printf("[DEBUG] ELB create a policy %s from policy type %s", - *input.PolicyName, *input.PolicyTypeName) - - if _, err := elbconn.CreateLoadBalancerPolicy(input); err != nil { - return fmt.Errorf("Error creating a policy %s: %s", - *input.PolicyName, err) - } - - // Assign the policy name for use later - d.Partial(true) - d.SetId(fmt.Sprintf("%s:%s", *elbname, *input.PolicyName)) - d.SetPartial("load_balancer") - log.Printf("[INFO] ELB PolicyName: %s", *input.PolicyName) - - return resourceAwsProxyProtocolPolicyUpdate(d, meta) -} - -func resourceAwsProxyProtocolPolicyRead(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - elbname := aws.String(d.Get("load_balancer").(string)) - - // Retrieve the current ELB policies for updating the state - req := &elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{elbname}, - } - resp, err := elbconn.DescribeLoadBalancers(req) - if err != nil { - if isLoadBalancerNotFound(err) { - // The ELB is gone now, so just remove it from the state - d.SetId("") - return nil - } - return fmt.Errorf("Error retrieving ELB attributes: %s", err) - } - - backends := flattenBackendPolicies(resp.LoadBalancerDescriptions[0].BackendServerDescriptions) - - ports := []*string{} - for ip := range backends { - ipstr := strconv.Itoa(int(ip)) - ports = append(ports, &ipstr) - } - d.Set("instance_ports", ports) - d.Set("load_balancer", *elbname) - return nil -} - -func resourceAwsProxyProtocolPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - elbname := aws.String(d.Get("load_balancer").(string)) - - // Retrieve the current ELB policies for updating the state - req := &elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{elbname}, - } - resp, err := elbconn.DescribeLoadBalancers(req) - if err != nil { - if isLoadBalancerNotFound(err) { - // The ELB is gone now, so just remove it from the state - d.SetId("") - return nil - } - return fmt.Errorf("Error retrieving ELB attributes: %s", err) - } - - backends := flattenBackendPolicies(resp.LoadBalancerDescriptions[0].BackendServerDescriptions) - _, policyName := resourceAwsProxyProtocolPolicyParseId(d.Id()) - - d.Partial(true) - if d.HasChange("instance_ports") { - o, n := d.GetChange("instance_ports") - os := o.(*schema.Set) - ns := n.(*schema.Set) - remove := os.Difference(ns).List() - add := ns.Difference(os).List() - - inputs := []*elb.SetLoadBalancerPoliciesForBackendServerInput{} - - i, err := resourceAwsProxyProtocolPolicyRemove(policyName, remove, backends) - if err != nil { - return err - } - inputs = append(inputs, i...) - - i, err = resourceAwsProxyProtocolPolicyAdd(policyName, add, backends) - if err != nil { - return err - } - inputs = append(inputs, i...) - - for _, input := range inputs { - input.LoadBalancerName = elbname - if _, err := elbconn.SetLoadBalancerPoliciesForBackendServer(input); err != nil { - return fmt.Errorf("Error setting policy for backend: %s", err) - } - } - - d.SetPartial("instance_ports") - } - - return resourceAwsProxyProtocolPolicyRead(d, meta) -} - -func resourceAwsProxyProtocolPolicyDelete(d *schema.ResourceData, meta interface{}) error { - elbconn := meta.(*AWSClient).elbconn - elbname := aws.String(d.Get("load_balancer").(string)) - - // Retrieve the current ELB policies for updating the state - req := &elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{elbname}, - } - var err error - resp, err := elbconn.DescribeLoadBalancers(req) - if err != nil { - if isLoadBalancerNotFound(err) { - // The ELB is gone now, so just remove it from the state - d.SetId("") - return nil - } - return fmt.Errorf("Error retrieving ELB attributes: %s", err) - } - - backends := flattenBackendPolicies(resp.LoadBalancerDescriptions[0].BackendServerDescriptions) - ports := d.Get("instance_ports").(*schema.Set).List() - _, policyName := resourceAwsProxyProtocolPolicyParseId(d.Id()) - - inputs, err := resourceAwsProxyProtocolPolicyRemove(policyName, ports, backends) - if err != nil { - return fmt.Errorf("Error detaching a policy from backend: %s", err) - } - for _, input := range inputs { - input.LoadBalancerName = elbname - if _, err := elbconn.SetLoadBalancerPoliciesForBackendServer(input); err != nil { - return fmt.Errorf("Error setting policy for backend: %s", err) - } - } - - pOpt := &elb.DeleteLoadBalancerPolicyInput{ - LoadBalancerName: elbname, - PolicyName: aws.String(policyName), - } - if _, err := elbconn.DeleteLoadBalancerPolicy(pOpt); err != nil { - return fmt.Errorf("Error removing a policy from load balancer: %s", err) - } - - return nil -} - -func resourceAwsProxyProtocolPolicyRemove(policyName string, ports []interface{}, backends map[int64][]string) ([]*elb.SetLoadBalancerPoliciesForBackendServerInput, error) { - inputs := make([]*elb.SetLoadBalancerPoliciesForBackendServerInput, 0, len(ports)) - for _, p := range ports { - ip, err := strconv.ParseInt(p.(string), 10, 64) - if err != nil { - return nil, fmt.Errorf("Error detaching the policy: %s", err) - } - - newPolicies := []*string{} - curPolicies, found := backends[ip] - if !found { - // No policy for this instance port found, just skip it. - continue - } - - for _, policy := range curPolicies { - if policy == policyName { - // remove the policy - continue - } - newPolicies = append(newPolicies, &policy) - } - - inputs = append(inputs, &elb.SetLoadBalancerPoliciesForBackendServerInput{ - InstancePort: &ip, - PolicyNames: newPolicies, - }) - } - return inputs, nil -} - -func resourceAwsProxyProtocolPolicyAdd(policyName string, ports []interface{}, backends map[int64][]string) ([]*elb.SetLoadBalancerPoliciesForBackendServerInput, error) { - inputs := make([]*elb.SetLoadBalancerPoliciesForBackendServerInput, 0, len(ports)) - for _, p := range ports { - ip, err := strconv.ParseInt(p.(string), 10, 64) - if err != nil { - return nil, fmt.Errorf("Error attaching the policy: %s", err) - } - - newPolicies := []*string{} - curPolicies := backends[ip] - for _, p := range curPolicies { - if p == policyName { - // Just remove it for now. It will be back later. - continue - } else { - newPolicies = append(newPolicies, &p) - } - } - newPolicies = append(newPolicies, aws.String(policyName)) - - inputs = append(inputs, &elb.SetLoadBalancerPoliciesForBackendServerInput{ - InstancePort: &ip, - PolicyNames: newPolicies, - }) - } - return inputs, nil -} - -// resourceAwsProxyProtocolPolicyParseId takes an ID and parses it into -// it's constituent parts. You need two axes (LB name, policy name) -// to create or identify a proxy protocol policy in AWS's API. -func resourceAwsProxyProtocolPolicyParseId(id string) (string, string) { - parts := strings.SplitN(id, ":", 2) - return parts[0], parts[1] -} diff --git a/builtin/providers/aws/resource_aws_proxy_protocol_policy_test.go b/builtin/providers/aws/resource_aws_proxy_protocol_policy_test.go deleted file mode 100644 index a1e36f7b7..000000000 --- a/builtin/providers/aws/resource_aws_proxy_protocol_policy_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSProxyProtocolPolicy_basic(t *testing.T) { - lbName := fmt.Sprintf("tf-test-lb-%s", acctest.RandString(5)) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckProxyProtocolPolicyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccProxyProtocolPolicyConfig(lbName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_proxy_protocol_policy.smtp", "load_balancer", lbName), - resource.TestCheckResourceAttr( - "aws_proxy_protocol_policy.smtp", "instance_ports.#", "1"), - resource.TestCheckResourceAttr( - "aws_proxy_protocol_policy.smtp", "instance_ports.4196041389", "25"), - ), - }, - resource.TestStep{ - Config: testAccProxyProtocolPolicyConfigUpdate(lbName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_proxy_protocol_policy.smtp", "load_balancer", lbName), - resource.TestCheckResourceAttr( - "aws_proxy_protocol_policy.smtp", "instance_ports.#", "2"), - resource.TestCheckResourceAttr( - "aws_proxy_protocol_policy.smtp", "instance_ports.4196041389", "25"), - resource.TestCheckResourceAttr( - "aws_proxy_protocol_policy.smtp", "instance_ports.1925441437", "587"), - ), - }, - }, - }) -} - -func testAccCheckProxyProtocolPolicyDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elbconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_placement_group" { - continue - } - - req := &elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{ - aws.String(rs.Primary.Attributes["load_balancer"])}, - } - _, err := conn.DescribeLoadBalancers(req) - if err != nil { - // Verify the error is what we want - if isLoadBalancerNotFound(err) { - continue - } - return err - } - - return fmt.Errorf("still exists") - } - return nil -} - -func testAccProxyProtocolPolicyConfig(rName string) string { - return fmt.Sprintf(` -resource "aws_elb" "lb" { - name = "%s" - availability_zones = ["us-west-2a"] - - listener { - instance_port = 25 - instance_protocol = "tcp" - lb_port = 25 - lb_protocol = "tcp" - } - - listener { - instance_port = 587 - instance_protocol = "tcp" - lb_port = 587 - lb_protocol = "tcp" - } -} - -resource "aws_proxy_protocol_policy" "smtp" { - load_balancer = "${aws_elb.lb.name}" - instance_ports = ["25"] -}`, rName) -} - -func testAccProxyProtocolPolicyConfigUpdate(rName string) string { - return fmt.Sprintf(` -resource "aws_elb" "lb" { - name = "%s" - availability_zones = ["us-west-2a"] - - listener { - instance_port = 25 - instance_protocol = "tcp" - lb_port = 25 - lb_protocol = "tcp" - } - - listener { - instance_port = 587 - instance_protocol = "tcp" - lb_port = 587 - lb_protocol = "tcp" - } -} - -resource "aws_proxy_protocol_policy" "smtp" { - load_balancer = "${aws_elb.lb.name}" - instance_ports = ["25", "587"] -}`, rName) -} diff --git a/builtin/providers/aws/resource_aws_rds_cluster.go b/builtin/providers/aws/resource_aws_rds_cluster.go deleted file mode 100644 index 8fc72ce5b..000000000 --- a/builtin/providers/aws/resource_aws_rds_cluster.go +++ /dev/null @@ -1,740 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "regexp" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsRDSCluster() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsRDSClusterCreate, - Read: resourceAwsRDSClusterRead, - Update: resourceAwsRDSClusterUpdate, - Delete: resourceAwsRDSClusterDelete, - Importer: &schema.ResourceImporter{ - State: resourceAwsRdsClusterImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(120 * time.Minute), - Update: schema.DefaultTimeout(120 * time.Minute), - Delete: schema.DefaultTimeout(120 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - - "availability_zones": { - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - ForceNew: true, - Computed: true, - Set: schema.HashString, - }, - - "cluster_identifier": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"cluster_identifier_prefix"}, - ValidateFunc: validateRdsIdentifier, - }, - "cluster_identifier_prefix": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validateRdsIdentifierPrefix, - }, - - "cluster_members": { - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - Computed: true, - Set: schema.HashString, - }, - - "database_name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "db_subnet_group_name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "db_cluster_parameter_group_name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "endpoint": { - Type: schema.TypeString, - Computed: true, - }, - - "reader_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - - "engine": { - Type: schema.TypeString, - Computed: true, - }, - - "storage_encrypted": { - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - }, - - "final_snapshot_identifier": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - value := v.(string) - if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) { - es = append(es, fmt.Errorf( - "only alphanumeric characters and hyphens allowed in %q", k)) - } - if regexp.MustCompile(`--`).MatchString(value) { - es = append(es, fmt.Errorf("%q cannot contain two consecutive hyphens", k)) - } - if regexp.MustCompile(`-$`).MatchString(value) { - es = append(es, fmt.Errorf("%q cannot end in a hyphen", k)) - } - return - }, - }, - - "skip_final_snapshot": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "master_username": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - - "master_password": { - Type: schema.TypeString, - Optional: true, - Sensitive: true, - }, - - "snapshot_identifier": { - Type: schema.TypeString, - Computed: false, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "port": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - // apply_immediately is used to determine when the update modifications - // take place. - // See http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html - "apply_immediately": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "vpc_security_group_ids": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "preferred_backup_window": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validateOnceADayWindowFormat, - }, - - "preferred_maintenance_window": { - Type: schema.TypeString, - Optional: true, - Computed: true, - StateFunc: func(val interface{}) string { - if val == nil { - return "" - } - return strings.ToLower(val.(string)) - }, - ValidateFunc: validateOnceAWeekWindowFormat, - }, - - "backup_retention_period": { - Type: schema.TypeInt, - Optional: true, - Default: 1, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - value := v.(int) - if value > 35 { - es = append(es, fmt.Errorf( - "backup retention period cannot be more than 35 days")) - } - return - }, - }, - - "kms_key_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validateArn, - }, - - "replication_source_identifier": { - Type: schema.TypeString, - Optional: true, - }, - - "iam_database_authentication_enabled": { - Type: schema.TypeBool, - Optional: true, - }, - - "cluster_resource_id": { - Type: schema.TypeString, - Computed: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAwsRdsClusterImport( - d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - // Neither skip_final_snapshot nor final_snapshot_identifier can be fetched - // from any API call, so we need to default skip_final_snapshot to true so - // that final_snapshot_identifier is not required - d.Set("skip_final_snapshot", true) - return []*schema.ResourceData{d}, nil -} - -func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn - tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) - - var identifier string - if v, ok := d.GetOk("cluster_identifier"); ok { - identifier = v.(string) - } else { - if v, ok := d.GetOk("cluster_identifier_prefix"); ok { - identifier = resource.PrefixedUniqueId(v.(string)) - } else { - identifier = resource.PrefixedUniqueId("tf-") - } - - d.Set("cluster_identifier", identifier) - } - - if _, ok := d.GetOk("snapshot_identifier"); ok { - opts := rds.RestoreDBClusterFromSnapshotInput{ - DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), - SnapshotIdentifier: aws.String(d.Get("snapshot_identifier").(string)), - Engine: aws.String("aurora"), - Tags: tags, - } - - if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 { - opts.AvailabilityZones = expandStringList(attr.List()) - } - - if attr, ok := d.GetOk("db_subnet_group_name"); ok { - opts.DBSubnetGroupName = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("database_name"); ok { - opts.DatabaseName = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("option_group_name"); ok { - opts.OptionGroupName = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("port"); ok { - opts.Port = aws.Int64(int64(attr.(int))) - } - - var sgUpdate bool - if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - sgUpdate = true - opts.VpcSecurityGroupIds = expandStringList(attr.List()) - } - - log.Printf("[DEBUG] RDS Cluster restore from snapshot configuration: %s", opts) - _, err := conn.RestoreDBClusterFromSnapshot(&opts) - if err != nil { - return fmt.Errorf("Error creating RDS Cluster: %s", err) - } - - if sgUpdate { - log.Printf("[INFO] RDS Cluster is restoring from snapshot with default security, but custom security should be set, will now update after snapshot is restored!") - - d.SetId(d.Get("cluster_identifier").(string)) - - log.Printf("[INFO] RDS Cluster Instance ID: %s", d.Id()) - - log.Println("[INFO] Waiting for RDS Cluster to be available") - - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating", "backing-up", "modifying", "preparing-data-migration", "migrating"}, - Target: []string{"available"}, - Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta), - Timeout: d.Timeout(schema.TimeoutCreate), - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - // Wait, catching any errors - _, err := stateConf.WaitForState() - if err != nil { - return err - } - - err = resourceAwsRDSClusterInstanceUpdate(d, meta) - if err != nil { - return err - } - } - } else if _, ok := d.GetOk("replication_source_identifier"); ok { - createOpts := &rds.CreateDBClusterInput{ - DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), - Engine: aws.String("aurora"), - StorageEncrypted: aws.Bool(d.Get("storage_encrypted").(bool)), - ReplicationSourceIdentifier: aws.String(d.Get("replication_source_identifier").(string)), - Tags: tags, - } - - if attr, ok := d.GetOk("port"); ok { - createOpts.Port = aws.Int64(int64(attr.(int))) - } - - if attr, ok := d.GetOk("db_subnet_group_name"); ok { - createOpts.DBSubnetGroupName = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("db_cluster_parameter_group_name"); ok { - createOpts.DBClusterParameterGroupName = aws.String(attr.(string)) - } - - if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - createOpts.VpcSecurityGroupIds = expandStringList(attr.List()) - } - - if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 { - createOpts.AvailabilityZones = expandStringList(attr.List()) - } - - if v, ok := d.GetOk("backup_retention_period"); ok { - createOpts.BackupRetentionPeriod = aws.Int64(int64(v.(int))) - } - - if v, ok := d.GetOk("preferred_backup_window"); ok { - createOpts.PreferredBackupWindow = aws.String(v.(string)) - } - - if v, ok := d.GetOk("preferred_maintenance_window"); ok { - createOpts.PreferredMaintenanceWindow = aws.String(v.(string)) - } - - if attr, ok := d.GetOk("kms_key_id"); ok { - createOpts.KmsKeyId = aws.String(attr.(string)) - } - - log.Printf("[DEBUG] Create RDS Cluster as read replica: %s", createOpts) - resp, err := conn.CreateDBCluster(createOpts) - if err != nil { - log.Printf("[ERROR] Error creating RDS Cluster: %s", err) - return err - } - - log.Printf("[DEBUG]: RDS Cluster create response: %s", resp) - - } else { - if _, ok := d.GetOk("master_password"); !ok { - return fmt.Errorf(`provider.aws: aws_rds_cluster: %s: "master_password": required field is not set`, d.Get("database_name").(string)) - } - - if _, ok := d.GetOk("master_username"); !ok { - return fmt.Errorf(`provider.aws: aws_rds_cluster: %s: "master_username": required field is not set`, d.Get("database_name").(string)) - } - - createOpts := &rds.CreateDBClusterInput{ - DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), - Engine: aws.String("aurora"), - MasterUserPassword: aws.String(d.Get("master_password").(string)), - MasterUsername: aws.String(d.Get("master_username").(string)), - StorageEncrypted: aws.Bool(d.Get("storage_encrypted").(bool)), - Tags: tags, - } - - if v := d.Get("database_name"); v.(string) != "" { - createOpts.DatabaseName = aws.String(v.(string)) - } - - if attr, ok := d.GetOk("port"); ok { - createOpts.Port = aws.Int64(int64(attr.(int))) - } - - if attr, ok := d.GetOk("db_subnet_group_name"); ok { - createOpts.DBSubnetGroupName = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("db_cluster_parameter_group_name"); ok { - createOpts.DBClusterParameterGroupName = aws.String(attr.(string)) - } - - if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - createOpts.VpcSecurityGroupIds = expandStringList(attr.List()) - } - - if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 { - createOpts.AvailabilityZones = expandStringList(attr.List()) - } - - if v, ok := d.GetOk("backup_retention_period"); ok { - createOpts.BackupRetentionPeriod = aws.Int64(int64(v.(int))) - } - - if v, ok := d.GetOk("preferred_backup_window"); ok { - createOpts.PreferredBackupWindow = aws.String(v.(string)) - } - - if v, ok := d.GetOk("preferred_maintenance_window"); ok { - createOpts.PreferredMaintenanceWindow = aws.String(v.(string)) - } - - if attr, ok := d.GetOk("kms_key_id"); ok { - createOpts.KmsKeyId = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("iam_database_authentication_enabled"); ok { - createOpts.EnableIAMDatabaseAuthentication = aws.Bool(attr.(bool)) - } - - log.Printf("[DEBUG] RDS Cluster create options: %s", createOpts) - resp, err := conn.CreateDBCluster(createOpts) - if err != nil { - log.Printf("[ERROR] Error creating RDS Cluster: %s", err) - return err - } - - log.Printf("[DEBUG]: RDS Cluster create response: %s", resp) - } - - d.SetId(d.Get("cluster_identifier").(string)) - - log.Printf("[INFO] RDS Cluster ID: %s", d.Id()) - - log.Println( - "[INFO] Waiting for RDS Cluster to be available") - - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating", "backing-up", "modifying"}, - Target: []string{"available"}, - Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta), - Timeout: d.Timeout(schema.TimeoutCreate), - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - // Wait, catching any errors - _, err := stateConf.WaitForState() - if err != nil { - return fmt.Errorf("[WARN] Error waiting for RDS Cluster state to be \"available\": %s", err) - } - - return resourceAwsRDSClusterRead(d, meta) -} - -func resourceAwsRDSClusterRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn - - resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ - DBClusterIdentifier: aws.String(d.Id()), - }) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if "DBClusterNotFoundFault" == awsErr.Code() { - d.SetId("") - log.Printf("[DEBUG] RDS Cluster (%s) not found", d.Id()) - return nil - } - } - log.Printf("[DEBUG] Error describing RDS Cluster (%s)", d.Id()) - return err - } - - var dbc *rds.DBCluster - for _, c := range resp.DBClusters { - if *c.DBClusterIdentifier == d.Id() { - dbc = c - } - } - - if dbc == nil { - log.Printf("[WARN] RDS Cluster (%s) not found", d.Id()) - d.SetId("") - return nil - } - - if err := d.Set("availability_zones", aws.StringValueSlice(dbc.AvailabilityZones)); err != nil { - return fmt.Errorf("[DEBUG] Error saving AvailabilityZones to state for RDS Cluster (%s): %s", d.Id(), err) - } - - // Only set the DatabaseName if it is not nil. There is a known API bug where - // RDS accepts a DatabaseName but does not return it, causing a perpetual - // diff. - // See https://github.com/hashicorp/terraform/issues/4671 for backstory - if dbc.DatabaseName != nil { - d.Set("database_name", dbc.DatabaseName) - } - - d.Set("cluster_identifier", dbc.DBClusterIdentifier) - d.Set("cluster_resource_id", dbc.DbClusterResourceId) - d.Set("db_subnet_group_name", dbc.DBSubnetGroup) - d.Set("db_cluster_parameter_group_name", dbc.DBClusterParameterGroup) - d.Set("endpoint", dbc.Endpoint) - d.Set("engine", dbc.Engine) - d.Set("master_username", dbc.MasterUsername) - d.Set("port", dbc.Port) - d.Set("storage_encrypted", dbc.StorageEncrypted) - d.Set("backup_retention_period", dbc.BackupRetentionPeriod) - d.Set("preferred_backup_window", dbc.PreferredBackupWindow) - d.Set("preferred_maintenance_window", dbc.PreferredMaintenanceWindow) - d.Set("kms_key_id", dbc.KmsKeyId) - d.Set("reader_endpoint", dbc.ReaderEndpoint) - d.Set("replication_source_identifier", dbc.ReplicationSourceIdentifier) - d.Set("iam_database_authentication_enabled", dbc.IAMDatabaseAuthenticationEnabled) - - var vpcg []string - for _, g := range dbc.VpcSecurityGroups { - vpcg = append(vpcg, *g.VpcSecurityGroupId) - } - if err := d.Set("vpc_security_group_ids", vpcg); err != nil { - return fmt.Errorf("[DEBUG] Error saving VPC Security Group IDs to state for RDS Cluster (%s): %s", d.Id(), err) - } - - var cm []string - for _, m := range dbc.DBClusterMembers { - cm = append(cm, *m.DBInstanceIdentifier) - } - if err := d.Set("cluster_members", cm); err != nil { - return fmt.Errorf("[DEBUG] Error saving RDS Cluster Members to state for RDS Cluster (%s): %s", d.Id(), err) - } - - // Fetch and save tags - arn, err := buildRDSClusterARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) - if err != nil { - log.Printf("[DEBUG] Error building ARN for RDS Cluster (%s), not setting Tags", *dbc.DBClusterIdentifier) - } else { - if err := saveTagsRDS(conn, d, arn); err != nil { - log.Printf("[WARN] Failed to save tags for RDS Cluster (%s): %s", *dbc.DBClusterIdentifier, err) - } - } - - return nil -} - -func resourceAwsRDSClusterUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn - requestUpdate := false - - req := &rds.ModifyDBClusterInput{ - ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)), - DBClusterIdentifier: aws.String(d.Id()), - } - - if d.HasChange("master_password") { - req.MasterUserPassword = aws.String(d.Get("master_password").(string)) - requestUpdate = true - } - - if d.HasChange("vpc_security_group_ids") { - if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - req.VpcSecurityGroupIds = expandStringList(attr.List()) - } else { - req.VpcSecurityGroupIds = []*string{} - } - requestUpdate = true - } - - if d.HasChange("preferred_backup_window") { - req.PreferredBackupWindow = aws.String(d.Get("preferred_backup_window").(string)) - requestUpdate = true - } - - if d.HasChange("preferred_maintenance_window") { - req.PreferredMaintenanceWindow = aws.String(d.Get("preferred_maintenance_window").(string)) - requestUpdate = true - } - - if d.HasChange("backup_retention_period") { - req.BackupRetentionPeriod = aws.Int64(int64(d.Get("backup_retention_period").(int))) - requestUpdate = true - } - - if d.HasChange("db_cluster_parameter_group_name") { - d.SetPartial("db_cluster_parameter_group_name") - req.DBClusterParameterGroupName = aws.String(d.Get("db_cluster_parameter_group_name").(string)) - requestUpdate = true - } - - if d.HasChange("iam_database_authentication_enabled") { - req.EnableIAMDatabaseAuthentication = aws.Bool(d.Get("iam_database_authentication_enabled").(bool)) - requestUpdate = true - } - - if requestUpdate { - _, err := conn.ModifyDBCluster(req) - if err != nil { - return fmt.Errorf("[WARN] Error modifying RDS Cluster (%s): %s", d.Id(), err) - } - } - - if arn, err := buildRDSClusterARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil { - if err := setTagsRDS(conn, d, arn); err != nil { - return err - } else { - d.SetPartial("tags") - } - } - - return resourceAwsRDSClusterRead(d, meta) -} - -func resourceAwsRDSClusterDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn - log.Printf("[DEBUG] Destroying RDS Cluster (%s)", d.Id()) - - deleteOpts := rds.DeleteDBClusterInput{ - DBClusterIdentifier: aws.String(d.Id()), - } - - skipFinalSnapshot := d.Get("skip_final_snapshot").(bool) - deleteOpts.SkipFinalSnapshot = aws.Bool(skipFinalSnapshot) - - if skipFinalSnapshot == false { - if name, present := d.GetOk("final_snapshot_identifier"); present { - deleteOpts.FinalDBSnapshotIdentifier = aws.String(name.(string)) - } else { - return fmt.Errorf("RDS Cluster FinalSnapshotIdentifier is required when a final snapshot is required") - } - } - - log.Printf("[DEBUG] RDS Cluster delete options: %s", deleteOpts) - _, err := conn.DeleteDBCluster(&deleteOpts) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if "InvalidDBClusterStateFault" == awsErr.Code() { - return fmt.Errorf("RDS Cluster cannot be deleted: %s", awsErr.Message()) - } - } - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"available", "deleting", "backing-up", "modifying"}, - Target: []string{"destroyed"}, - Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta), - Timeout: d.Timeout(schema.TimeoutDelete), - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - // Wait, catching any errors - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("[WARN] Error deleting RDS Cluster (%s): %s", d.Id(), err) - } - - return nil -} - -func resourceAwsRDSClusterStateRefreshFunc( - d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - conn := meta.(*AWSClient).rdsconn - - resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ - DBClusterIdentifier: aws.String(d.Id()), - }) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if "DBClusterNotFoundFault" == awsErr.Code() { - return 42, "destroyed", nil - } - } - log.Printf("[WARN] Error on retrieving DB Cluster (%s) when waiting: %s", d.Id(), err) - return nil, "", err - } - - var dbc *rds.DBCluster - - for _, c := range resp.DBClusters { - if *c.DBClusterIdentifier == d.Id() { - dbc = c - } - } - - if dbc == nil { - return 42, "destroyed", nil - } - - if dbc.Status != nil { - log.Printf("[DEBUG] DB Cluster status (%s): %s", d.Id(), *dbc.Status) - } - - return dbc, *dbc.Status, nil - } -} - -func buildRDSClusterARN(identifier, partition, accountid, region string) (string, error) { - if partition == "" { - return "", fmt.Errorf("Unable to construct RDS Cluster ARN because of missing AWS partition") - } - if accountid == "" { - return "", fmt.Errorf("Unable to construct RDS Cluster ARN because of missing AWS Account ID") - } - - arn := fmt.Sprintf("arn:%s:rds:%s:%s:cluster:%s", partition, region, accountid, identifier) - return arn, nil - -} diff --git a/builtin/providers/aws/resource_aws_rds_cluster_instance.go b/builtin/providers/aws/resource_aws_rds_cluster_instance.go deleted file mode 100644 index 41bf2d03e..000000000 --- a/builtin/providers/aws/resource_aws_rds_cluster_instance.go +++ /dev/null @@ -1,438 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsRDSClusterInstance() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsRDSClusterInstanceCreate, - Read: resourceAwsRDSClusterInstanceRead, - Update: resourceAwsRDSClusterInstanceUpdate, - Delete: resourceAwsRDSClusterInstanceDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(90 * time.Minute), - Update: schema.DefaultTimeout(90 * time.Minute), - Delete: schema.DefaultTimeout(90 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "identifier": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"identifier_prefix"}, - ValidateFunc: validateRdsIdentifier, - }, - "identifier_prefix": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validateRdsIdentifierPrefix, - }, - - "db_subnet_group_name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "writer": { - Type: schema.TypeBool, - Computed: true, - }, - - "cluster_identifier": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "endpoint": { - Type: schema.TypeString, - Computed: true, - }, - - "port": { - Type: schema.TypeInt, - Computed: true, - }, - - "publicly_accessible": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "instance_class": { - Type: schema.TypeString, - Required: true, - }, - - "db_parameter_group_name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - // apply_immediately is used to determine when the update modifications - // take place. - // See http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html - "apply_immediately": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "kms_key_id": { - Type: schema.TypeString, - Computed: true, - }, - - "storage_encrypted": { - Type: schema.TypeBool, - Computed: true, - }, - - "auto_minor_version_upgrade": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - - "monitoring_role_arn": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "preferred_maintenance_window": { - Type: schema.TypeString, - Optional: true, - Computed: true, - StateFunc: func(v interface{}) string { - if v != nil { - value := v.(string) - return strings.ToLower(value) - } - return "" - }, - ValidateFunc: validateOnceAWeekWindowFormat, - }, - - "preferred_backup_window": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validateOnceADayWindowFormat, - }, - - "monitoring_interval": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - - "promotion_tier": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAwsRDSClusterInstanceCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn - tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) - - createOpts := &rds.CreateDBInstanceInput{ - DBInstanceClass: aws.String(d.Get("instance_class").(string)), - DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), - Engine: aws.String("aurora"), - PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), - PromotionTier: aws.Int64(int64(d.Get("promotion_tier").(int))), - AutoMinorVersionUpgrade: aws.Bool(d.Get("auto_minor_version_upgrade").(bool)), - Tags: tags, - } - - if attr, ok := d.GetOk("db_parameter_group_name"); ok { - createOpts.DBParameterGroupName = aws.String(attr.(string)) - } - - if v, ok := d.GetOk("identifier"); ok { - createOpts.DBInstanceIdentifier = aws.String(v.(string)) - } else { - if v, ok := d.GetOk("identifier_prefix"); ok { - createOpts.DBInstanceIdentifier = aws.String(resource.PrefixedUniqueId(v.(string))) - } else { - createOpts.DBInstanceIdentifier = aws.String(resource.PrefixedUniqueId("tf-")) - } - } - - if attr, ok := d.GetOk("db_subnet_group_name"); ok { - createOpts.DBSubnetGroupName = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("monitoring_role_arn"); ok { - createOpts.MonitoringRoleArn = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("preferred_backup_window"); ok { - createOpts.PreferredBackupWindow = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("preferred_maintenance_window"); ok { - createOpts.PreferredMaintenanceWindow = aws.String(attr.(string)) - } - - if attr, ok := d.GetOk("monitoring_interval"); ok { - createOpts.MonitoringInterval = aws.Int64(int64(attr.(int))) - } - - log.Printf("[DEBUG] Creating RDS DB Instance opts: %s", createOpts) - resp, err := conn.CreateDBInstance(createOpts) - if err != nil { - return err - } - - d.SetId(*resp.DBInstance.DBInstanceIdentifier) - - // reuse db_instance refresh func - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating", "backing-up", "modifying"}, - Target: []string{"available"}, - Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), - Timeout: d.Timeout(schema.TimeoutCreate), - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - // Wait, catching any errors - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - return resourceAwsRDSClusterInstanceRead(d, meta) -} - -func resourceAwsRDSClusterInstanceRead(d *schema.ResourceData, meta interface{}) error { - db, err := resourceAwsDbInstanceRetrieve(d, meta) - // Errors from this helper are always reportable - if err != nil { - return fmt.Errorf("[WARN] Error on retrieving RDS Cluster Instance (%s): %s", d.Id(), err) - } - // A nil response means "not found" - if db == nil { - log.Printf("[WARN] RDS Cluster Instance (%s): not found, removing from state.", d.Id()) - d.SetId("") - return nil - } - - // Retrieve DB Cluster information, to determine if this Instance is a writer - conn := meta.(*AWSClient).rdsconn - resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ - DBClusterIdentifier: db.DBClusterIdentifier, - }) - - var dbc *rds.DBCluster - for _, c := range resp.DBClusters { - if *c.DBClusterIdentifier == *db.DBClusterIdentifier { - dbc = c - } - } - - if dbc == nil { - return fmt.Errorf("[WARN] Error finding RDS Cluster (%s) for Cluster Instance (%s): %s", - *db.DBClusterIdentifier, *db.DBInstanceIdentifier, err) - } - - for _, m := range dbc.DBClusterMembers { - if *db.DBInstanceIdentifier == *m.DBInstanceIdentifier { - if *m.IsClusterWriter == true { - d.Set("writer", true) - } else { - d.Set("writer", false) - } - } - } - - if db.Endpoint != nil { - d.Set("endpoint", db.Endpoint.Address) - d.Set("port", db.Endpoint.Port) - } - - d.Set("publicly_accessible", db.PubliclyAccessible) - d.Set("cluster_identifier", db.DBClusterIdentifier) - d.Set("instance_class", db.DBInstanceClass) - d.Set("identifier", db.DBInstanceIdentifier) - d.Set("storage_encrypted", db.StorageEncrypted) - d.Set("kms_key_id", db.KmsKeyId) - d.Set("auto_minor_version_upgrade", db.AutoMinorVersionUpgrade) - d.Set("promotion_tier", db.PromotionTier) - d.Set("preferred_backup_window", db.PreferredBackupWindow) - d.Set("preferred_maintenance_window", db.PreferredMaintenanceWindow) - - if db.MonitoringInterval != nil { - d.Set("monitoring_interval", db.MonitoringInterval) - } - - if db.MonitoringRoleArn != nil { - d.Set("monitoring_role_arn", db.MonitoringRoleArn) - } - - if len(db.DBParameterGroups) > 0 { - d.Set("db_parameter_group_name", db.DBParameterGroups[0].DBParameterGroupName) - } - - // Fetch and save tags - arn, err := buildRDSARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) - if err != nil { - log.Printf("[DEBUG] Error building ARN for RDS Cluster Instance (%s), not setting Tags", *db.DBInstanceIdentifier) - } else { - if err := saveTagsRDS(conn, d, arn); err != nil { - log.Printf("[WARN] Failed to save tags for RDS Cluster Instance (%s): %s", *db.DBClusterIdentifier, err) - } - } - - return nil -} - -func resourceAwsRDSClusterInstanceUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn - requestUpdate := false - - req := &rds.ModifyDBInstanceInput{ - ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)), - DBInstanceIdentifier: aws.String(d.Id()), - } - - if d.HasChange("db_parameter_group_name") { - req.DBParameterGroupName = aws.String(d.Get("db_parameter_group_name").(string)) - requestUpdate = true - } - - if d.HasChange("instance_class") { - req.DBInstanceClass = aws.String(d.Get("instance_class").(string)) - requestUpdate = true - } - - if d.HasChange("monitoring_role_arn") { - d.SetPartial("monitoring_role_arn") - req.MonitoringRoleArn = aws.String(d.Get("monitoring_role_arn").(string)) - requestUpdate = true - } - - if d.HasChange("preferred_backup_window") { - d.SetPartial("preferred_backup_window") - req.PreferredBackupWindow = aws.String(d.Get("preferred_backup_window").(string)) - requestUpdate = true - } - - if d.HasChange("preferred_maintenance_window") { - d.SetPartial("preferred_maintenance_window") - req.PreferredMaintenanceWindow = aws.String(d.Get("preferred_maintenance_window").(string)) - requestUpdate = true - } - - if d.HasChange("monitoring_interval") { - d.SetPartial("monitoring_interval") - req.MonitoringInterval = aws.Int64(int64(d.Get("monitoring_interval").(int))) - requestUpdate = true - } - - if d.HasChange("auto_minor_version_upgrade") { - d.SetPartial("auto_minor_version_upgrade") - req.AutoMinorVersionUpgrade = aws.Bool(d.Get("auto_minor_version_upgrade").(bool)) - requestUpdate = true - } - - if d.HasChange("promotion_tier") { - d.SetPartial("promotion_tier") - req.PromotionTier = aws.Int64(int64(d.Get("promotion_tier").(int))) - requestUpdate = true - } - - log.Printf("[DEBUG] Send DB Instance Modification request: %#v", requestUpdate) - if requestUpdate { - log.Printf("[DEBUG] DB Instance Modification request: %#v", req) - _, err := conn.ModifyDBInstance(req) - if err != nil { - return fmt.Errorf("Error modifying DB Instance %s: %s", d.Id(), err) - } - - // reuse db_instance refresh func - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating", "backing-up", "modifying"}, - Target: []string{"available"}, - Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), - Timeout: d.Timeout(schema.TimeoutUpdate), - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting - } - - // Wait, catching any errors - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - } - - if arn, err := buildRDSARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil { - if err := setTagsRDS(conn, d, arn); err != nil { - return err - } - } - - return resourceAwsRDSClusterInstanceRead(d, meta) -} - -func resourceAwsRDSClusterInstanceDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn - - log.Printf("[DEBUG] RDS Cluster Instance destroy: %v", d.Id()) - - opts := rds.DeleteDBInstanceInput{DBInstanceIdentifier: aws.String(d.Id())} - - log.Printf("[DEBUG] RDS Cluster Instance destroy configuration: %s", opts) - if _, err := conn.DeleteDBInstance(&opts); err != nil { - return err - } - - // re-uses db_instance refresh func - log.Println("[INFO] Waiting for RDS Cluster Instance to be destroyed") - stateConf := &resource.StateChangeConf{ - Pending: []string{"modifying", "deleting"}, - Target: []string{}, - Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), - Timeout: d.Timeout(schema.TimeoutDelete), - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting - } - - if _, err := stateConf.WaitForState(); err != nil { - return err - } - - return nil - -} diff --git a/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go b/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go deleted file mode 100644 index e3f6987f5..000000000 --- a/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go +++ /dev/null @@ -1,503 +0,0 @@ -package aws - -import ( - "fmt" - "regexp" - "strings" - "testing" - "time" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/rds" -) - -func TestAccAWSRDSClusterInstance_basic(t *testing.T) { - var v rds.DBInstance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSClusterInstanceConfig(acctest.RandInt()), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSClusterInstanceExists("aws_rds_cluster_instance.cluster_instances", &v), - testAccCheckAWSDBClusterInstanceAttributes(&v), - resource.TestCheckResourceAttr("aws_rds_cluster_instance.cluster_instances", "auto_minor_version_upgrade", "true"), - resource.TestCheckResourceAttrSet("aws_rds_cluster_instance.cluster_instances", "preferred_maintenance_window"), - resource.TestCheckResourceAttrSet("aws_rds_cluster_instance.cluster_instances", "preferred_backup_window"), - ), - }, - { - Config: testAccAWSClusterInstanceConfigModified(acctest.RandInt()), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSClusterInstanceExists("aws_rds_cluster_instance.cluster_instances", &v), - testAccCheckAWSDBClusterInstanceAttributes(&v), - resource.TestCheckResourceAttr("aws_rds_cluster_instance.cluster_instances", "auto_minor_version_upgrade", "false"), - ), - }, - }, - }) -} - -func TestAccAWSRDSClusterInstance_namePrefix(t *testing.T) { - var v rds.DBInstance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSClusterInstanceConfig_namePrefix(acctest.RandInt()), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSClusterInstanceExists("aws_rds_cluster_instance.test", &v), - testAccCheckAWSDBClusterInstanceAttributes(&v), - resource.TestMatchResourceAttr( - "aws_rds_cluster_instance.test", "identifier", regexp.MustCompile("^tf-cluster-instance-")), - ), - }, - }, - }) -} - -func TestAccAWSRDSClusterInstance_generatedName(t *testing.T) { - var v rds.DBInstance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSClusterInstanceConfig_generatedName(acctest.RandInt()), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSClusterInstanceExists("aws_rds_cluster_instance.test", &v), - testAccCheckAWSDBClusterInstanceAttributes(&v), - resource.TestMatchResourceAttr( - "aws_rds_cluster_instance.test", "identifier", regexp.MustCompile("^tf-")), - ), - }, - }, - }) -} - -func TestAccAWSRDSClusterInstance_kmsKey(t *testing.T) { - var v rds.DBInstance - keyRegex := regexp.MustCompile("^arn:aws:kms:") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSClusterInstanceConfigKmsKey(acctest.RandInt()), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSClusterInstanceExists("aws_rds_cluster_instance.cluster_instances", &v), - resource.TestMatchResourceAttr( - "aws_rds_cluster_instance.cluster_instances", "kms_key_id", keyRegex), - ), - }, - }, - }) -} - -// https://github.com/hashicorp/terraform/issues/5350 -func TestAccAWSRDSClusterInstance_disappears(t *testing.T) { - var v rds.DBInstance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSClusterInstanceConfig(acctest.RandInt()), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSClusterInstanceExists("aws_rds_cluster_instance.cluster_instances", &v), - testAccAWSClusterInstanceDisappears(&v), - ), - // A non-empty plan is what we want. A crash is what we don't want. :) - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testAccCheckAWSDBClusterInstanceAttributes(v *rds.DBInstance) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if *v.Engine != "aurora" { - return fmt.Errorf("bad engine, expected \"aurora\": %#v", *v.Engine) - } - - if !strings.HasPrefix(*v.DBClusterIdentifier, "tf-aurora-cluster") { - return fmt.Errorf("Bad Cluster Identifier prefix:\nexpected: %s\ngot: %s", "tf-aurora-cluster", *v.DBClusterIdentifier) - } - - return nil - } -} - -func testAccAWSClusterInstanceDisappears(v *rds.DBInstance) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).rdsconn - opts := &rds.DeleteDBInstanceInput{ - DBInstanceIdentifier: v.DBInstanceIdentifier, - } - if _, err := conn.DeleteDBInstance(opts); err != nil { - return err - } - return resource.Retry(40*time.Minute, func() *resource.RetryError { - opts := &rds.DescribeDBInstancesInput{ - DBInstanceIdentifier: v.DBInstanceIdentifier, - } - _, err := conn.DescribeDBInstances(opts) - if err != nil { - dbinstanceerr, ok := err.(awserr.Error) - if ok && dbinstanceerr.Code() == "DBInstanceNotFound" { - return nil - } - return resource.NonRetryableError( - fmt.Errorf("Error retrieving DB Instances: %s", err)) - } - return resource.RetryableError(fmt.Errorf( - "Waiting for instance to be deleted: %v", v.DBInstanceIdentifier)) - }) - } -} - -func testAccCheckAWSClusterInstanceExists(n string, v *rds.DBInstance) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No DB Instance ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).rdsconn - resp, err := conn.DescribeDBInstances(&rds.DescribeDBInstancesInput{ - DBInstanceIdentifier: aws.String(rs.Primary.ID), - }) - - if err != nil { - return err - } - - for _, d := range resp.DBInstances { - if *d.DBInstanceIdentifier == rs.Primary.ID { - *v = *d - return nil - } - } - - return fmt.Errorf("DB Cluster (%s) not found", rs.Primary.ID) - } -} - -func TestAccAWSRDSClusterInstance_withInstanceEnhancedMonitor(t *testing.T) { - var v rds.DBInstance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSClusterInstanceEnhancedMonitor(acctest.RandInt()), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSClusterInstanceExists("aws_rds_cluster_instance.cluster_instances", &v), - testAccCheckAWSDBClusterInstanceAttributes(&v), - ), - }, - }, - }) -} - -// Add some random to the name, to avoid collision -func testAccAWSClusterInstanceConfig(n int) string { - return fmt.Sprintf(` -resource "aws_rds_cluster" "default" { - cluster_identifier = "tf-aurora-cluster-test-%d" - availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"] - database_name = "mydb" - master_username = "foo" - master_password = "mustbeeightcharaters" - skip_final_snapshot = true -} - -resource "aws_rds_cluster_instance" "cluster_instances" { - identifier = "tf-cluster-instance-%d" - cluster_identifier = "${aws_rds_cluster.default.id}" - instance_class = "db.r3.large" - db_parameter_group_name = "${aws_db_parameter_group.bar.name}" - promotion_tier = "3" -} - -resource "aws_db_parameter_group" "bar" { - name = "tfcluster-test-group-%d" - family = "aurora5.6" - - parameter { - name = "back_log" - value = "32767" - apply_method = "pending-reboot" - } - - tags { - foo = "bar" - } -} -`, n, n, n) -} - -func testAccAWSClusterInstanceConfigModified(n int) string { - return fmt.Sprintf(` -resource "aws_rds_cluster" "default" { - cluster_identifier = "tf-aurora-cluster-test-%d" - availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"] - database_name = "mydb" - master_username = "foo" - master_password = "mustbeeightcharaters" - skip_final_snapshot = true -} - -resource "aws_rds_cluster_instance" "cluster_instances" { - identifier = "tf-cluster-instance-%d" - cluster_identifier = "${aws_rds_cluster.default.id}" - instance_class = "db.r3.large" - db_parameter_group_name = "${aws_db_parameter_group.bar.name}" - auto_minor_version_upgrade = false - promotion_tier = "3" -} - -resource "aws_db_parameter_group" "bar" { - name = "tfcluster-test-group-%d" - family = "aurora5.6" - - parameter { - name = "back_log" - value = "32767" - apply_method = "pending-reboot" - } - - tags { - foo = "bar" - } -} -`, n, n, n) -} - -func testAccAWSClusterInstanceConfig_namePrefix(n int) string { - return fmt.Sprintf(` -resource "aws_rds_cluster_instance" "test" { - identifier_prefix = "tf-cluster-instance-" - cluster_identifier = "${aws_rds_cluster.test.id}" - instance_class = "db.r3.large" -} - -resource "aws_rds_cluster" "test" { - cluster_identifier = "tf-aurora-cluster-%d" - master_username = "root" - master_password = "password" - db_subnet_group_name = "${aws_db_subnet_group.test.name}" - skip_final_snapshot = true -} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - tags { - Name = "testAccAWSClusterInstanceConfig_namePrefix" - } -} - -resource "aws_subnet" "a" { - vpc_id = "${aws_vpc.test.id}" - cidr_block = "10.0.0.0/24" - availability_zone = "us-west-2a" -} - -resource "aws_subnet" "b" { - vpc_id = "${aws_vpc.test.id}" - cidr_block = "10.0.1.0/24" - availability_zone = "us-west-2b" -} - -resource "aws_db_subnet_group" "test" { - name = "tf-test-%d" - subnet_ids = ["${aws_subnet.a.id}", "${aws_subnet.b.id}"] -} -`, n, n) -} - -func testAccAWSClusterInstanceConfig_generatedName(n int) string { - return fmt.Sprintf(` -resource "aws_rds_cluster_instance" "test" { - cluster_identifier = "${aws_rds_cluster.test.id}" - instance_class = "db.r3.large" -} - -resource "aws_rds_cluster" "test" { - cluster_identifier = "tf-aurora-cluster-%d" - master_username = "root" - master_password = "password" - db_subnet_group_name = "${aws_db_subnet_group.test.name}" - skip_final_snapshot = true -} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - tags { - Name = "testAccAWSClusterInstanceConfig_generatedName" - } -} - -resource "aws_subnet" "a" { - vpc_id = "${aws_vpc.test.id}" - cidr_block = "10.0.0.0/24" - availability_zone = "us-west-2a" -} - -resource "aws_subnet" "b" { - vpc_id = "${aws_vpc.test.id}" - cidr_block = "10.0.1.0/24" - availability_zone = "us-west-2b" -} - -resource "aws_db_subnet_group" "test" { - name = "tf-test-%d" - subnet_ids = ["${aws_subnet.a.id}", "${aws_subnet.b.id}"] -} -`, n, n) -} - -func testAccAWSClusterInstanceConfigKmsKey(n int) string { - return fmt.Sprintf(` - -resource "aws_kms_key" "foo" { - description = "Terraform acc test %d" - policy = < 0 { - dt = resp.TagList - } - d.Set("tags", tagsToMapRDS(dt)) - } - - return nil -} - -func resourceAwsRDSClusterParameterGroupUpdate(d *schema.ResourceData, meta interface{}) error { - rdsconn := meta.(*AWSClient).rdsconn - - d.Partial(true) - - if d.HasChange("parameter") { - o, n := d.GetChange("parameter") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - - // Expand the "parameter" set to aws-sdk-go compat []rds.Parameter - parameters, err := expandParameters(ns.Difference(os).List()) - if err != nil { - return err - } - - if len(parameters) > 0 { - modifyOpts := rds.ModifyDBClusterParameterGroupInput{ - DBClusterParameterGroupName: aws.String(d.Get("name").(string)), - Parameters: parameters, - } - - log.Printf("[DEBUG] Modify DB Cluster Parameter Group: %s", modifyOpts) - _, err = rdsconn.ModifyDBClusterParameterGroup(&modifyOpts) - if err != nil { - return fmt.Errorf("Error modifying DB Cluster Parameter Group: %s", err) - } - } - d.SetPartial("parameter") - } - - if arn, err := buildRDSCPGARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil { - if err := setTagsRDS(rdsconn, d, arn); err != nil { - return err - } else { - d.SetPartial("tags") - } - } - - d.Partial(false) - - return resourceAwsRDSClusterParameterGroupRead(d, meta) -} - -func resourceAwsRDSClusterParameterGroupDelete(d *schema.ResourceData, meta interface{}) error { - stateConf := &resource.StateChangeConf{ - Pending: []string{"pending"}, - Target: []string{"destroyed"}, - Refresh: resourceAwsRDSClusterParameterGroupDeleteRefreshFunc(d, meta), - Timeout: 3 * time.Minute, - MinTimeout: 1 * time.Second, - } - _, err := stateConf.WaitForState() - return err -} - -func resourceAwsRDSClusterParameterGroupDeleteRefreshFunc( - d *schema.ResourceData, - meta interface{}) resource.StateRefreshFunc { - rdsconn := meta.(*AWSClient).rdsconn - - return func() (interface{}, string, error) { - - deleteOpts := rds.DeleteDBClusterParameterGroupInput{ - DBClusterParameterGroupName: aws.String(d.Id()), - } - - if _, err := rdsconn.DeleteDBClusterParameterGroup(&deleteOpts); err != nil { - rdserr, ok := err.(awserr.Error) - if !ok { - return d, "error", err - } - - if rdserr.Code() != "DBParameterGroupNotFound" { - return d, "error", err - } - } - - return d, "destroyed", nil - } -} - -func buildRDSCPGARN(identifier, partition, accountid, region string) (string, error) { - if partition == "" { - return "", fmt.Errorf("Unable to construct RDS Cluster ARN because of missing AWS partition") - } - if accountid == "" { - return "", fmt.Errorf("Unable to construct RDS Cluster ARN because of missing AWS Account ID") - } - arn := fmt.Sprintf("arn:%s:rds:%s:%s:cluster-pg:%s", partition, region, accountid, identifier) - return arn, nil - -} diff --git a/builtin/providers/aws/resource_aws_rds_cluster_parameter_group_test.go b/builtin/providers/aws/resource_aws_rds_cluster_parameter_group_test.go deleted file mode 100644 index 231fdf44c..000000000 --- a/builtin/providers/aws/resource_aws_rds_cluster_parameter_group_test.go +++ /dev/null @@ -1,418 +0,0 @@ -package aws - -import ( - "errors" - "fmt" - "regexp" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSDBClusterParameterGroup_basic(t *testing.T) { - var v rds.DBClusterParameterGroup - - parameterGroupName := fmt.Sprintf("cluster-parameter-group-test-terraform-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBClusterParameterGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBClusterParameterGroupConfig(parameterGroupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBClusterParameterGroupExists("aws_rds_cluster_parameter_group.bar", &v), - testAccCheckAWSDBClusterParameterGroupAttributes(&v, parameterGroupName), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "name", parameterGroupName), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "family", "aurora5.6"), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "description", "Test cluster parameter group for terraform"), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "parameter.1708034931.name", "character_set_results"), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "parameter.1708034931.value", "utf8"), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "parameter.2421266705.name", "character_set_server"), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "parameter.2421266705.value", "utf8"), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "parameter.2478663599.name", "character_set_client"), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "parameter.2478663599.value", "utf8"), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "tags.%", "1"), - ), - }, - { - Config: testAccAWSDBClusterParameterGroupAddParametersConfig(parameterGroupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBClusterParameterGroupExists("aws_rds_cluster_parameter_group.bar", &v), - testAccCheckAWSDBClusterParameterGroupAttributes(&v, parameterGroupName), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "name", parameterGroupName), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "family", "aurora5.6"), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "description", "Test cluster parameter group for terraform"), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "parameter.1706463059.name", "collation_connection"), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "parameter.1706463059.value", "utf8_unicode_ci"), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "parameter.1708034931.name", "character_set_results"), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "parameter.1708034931.value", "utf8"), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "parameter.2421266705.name", "character_set_server"), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "parameter.2421266705.value", "utf8"), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "parameter.2475805061.name", "collation_server"), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "parameter.2475805061.value", "utf8_unicode_ci"), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "parameter.2478663599.name", "character_set_client"), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "parameter.2478663599.value", "utf8"), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "tags.%", "2"), - ), - }, - }, - }) -} - -func TestAccAWSDBClusterParameterGroup_namePrefix(t *testing.T) { - var v rds.DBClusterParameterGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBClusterParameterGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBClusterParameterGroupConfig_namePrefix, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBClusterParameterGroupExists("aws_rds_cluster_parameter_group.test", &v), - resource.TestMatchResourceAttr( - "aws_rds_cluster_parameter_group.test", "name", regexp.MustCompile("^tf-test-")), - ), - }, - }, - }) -} - -func TestAccAWSDBClusterParameterGroup_generatedName(t *testing.T) { - var v rds.DBClusterParameterGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBClusterParameterGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBClusterParameterGroupConfig_generatedName, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBClusterParameterGroupExists("aws_rds_cluster_parameter_group.test", &v), - ), - }, - }, - }) -} - -func TestAccAWSDBClusterParameterGroup_disappears(t *testing.T) { - var v rds.DBClusterParameterGroup - - parameterGroupName := fmt.Sprintf("cluster-parameter-group-test-terraform-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBClusterParameterGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBClusterParameterGroupConfig(parameterGroupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBClusterParameterGroupExists("aws_rds_cluster_parameter_group.bar", &v), - testAccAWSDBClusterParameterGroupDisappears(&v), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAWSDBClusterParameterGroupOnly(t *testing.T) { - var v rds.DBClusterParameterGroup - - parameterGroupName := fmt.Sprintf("cluster-parameter-group-test-tf-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBClusterParameterGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBClusterParameterGroupOnlyConfig(parameterGroupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBClusterParameterGroupExists("aws_rds_cluster_parameter_group.bar", &v), - testAccCheckAWSDBClusterParameterGroupAttributes(&v, parameterGroupName), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "name", parameterGroupName), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "family", "aurora5.6"), - resource.TestCheckResourceAttr( - "aws_rds_cluster_parameter_group.bar", "description", "Managed by Terraform"), - ), - }, - }, - }) -} - -func TestResourceAWSDBClusterParameterGroupName_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "tEsting123", - ErrCount: 1, - }, - { - Value: "testing123!", - ErrCount: 1, - }, - { - Value: "1testing123", - ErrCount: 1, - }, - { - Value: "testing--123", - ErrCount: 1, - }, - { - Value: "testing123-", - ErrCount: 1, - }, - { - Value: randomString(256), - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateDbParamGroupName(tc.Value, "aws_rds_cluster_parameter_group_name") - - if len(errors) != tc.ErrCount { - t.Fatal("Expected the DB Cluster Parameter Group Name to trigger a validation error") - } - } -} - -func testAccCheckAWSDBClusterParameterGroupDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).rdsconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_rds_cluster_parameter_group" { - continue - } - - // Try to find the Group - resp, err := conn.DescribeDBClusterParameterGroups( - &rds.DescribeDBClusterParameterGroupsInput{ - DBClusterParameterGroupName: aws.String(rs.Primary.ID), - }) - - if err == nil { - if len(resp.DBClusterParameterGroups) != 0 && - *resp.DBClusterParameterGroups[0].DBClusterParameterGroupName == rs.Primary.ID { - return errors.New("DB Cluster Parameter Group still exists") - } - } - - // Verify the error - newerr, ok := err.(awserr.Error) - if !ok { - return err - } - if newerr.Code() != "DBParameterGroupNotFound" { - return err - } - } - - return nil -} - -func testAccCheckAWSDBClusterParameterGroupAttributes(v *rds.DBClusterParameterGroup, name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if *v.DBClusterParameterGroupName != name { - return fmt.Errorf("bad name: %#v expected: %v", *v.DBClusterParameterGroupName, name) - } - - if *v.DBParameterGroupFamily != "aurora5.6" { - return fmt.Errorf("bad family: %#v", *v.DBParameterGroupFamily) - } - - return nil - } -} - -func testAccAWSDBClusterParameterGroupDisappears(v *rds.DBClusterParameterGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).rdsconn - opts := &rds.DeleteDBClusterParameterGroupInput{ - DBClusterParameterGroupName: v.DBClusterParameterGroupName, - } - if _, err := conn.DeleteDBClusterParameterGroup(opts); err != nil { - return err - } - return resource.Retry(40*time.Minute, func() *resource.RetryError { - opts := &rds.DescribeDBClusterParameterGroupsInput{ - DBClusterParameterGroupName: v.DBClusterParameterGroupName, - } - _, err := conn.DescribeDBClusterParameterGroups(opts) - if err != nil { - dbparamgrouperr, ok := err.(awserr.Error) - if ok && dbparamgrouperr.Code() == "DBParameterGroupNotFound" { - return nil - } - return resource.NonRetryableError( - fmt.Errorf("Error retrieving DB Cluster Parameter Groups: %s", err)) - } - return resource.RetryableError(fmt.Errorf( - "Waiting for cluster parameter group to be deleted: %v", v.DBClusterParameterGroupName)) - }) - } -} - -func testAccCheckAWSDBClusterParameterGroupExists(n string, v *rds.DBClusterParameterGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return errors.New("No DB Cluster Parameter Group ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).rdsconn - - opts := rds.DescribeDBClusterParameterGroupsInput{ - DBClusterParameterGroupName: aws.String(rs.Primary.ID), - } - - resp, err := conn.DescribeDBClusterParameterGroups(&opts) - - if err != nil { - return err - } - - if len(resp.DBClusterParameterGroups) != 1 || - *resp.DBClusterParameterGroups[0].DBClusterParameterGroupName != rs.Primary.ID { - return errors.New("DB Cluster Parameter Group not found") - } - - *v = *resp.DBClusterParameterGroups[0] - - return nil - } -} - -func testAccAWSDBClusterParameterGroupConfig(name string) string { - return fmt.Sprintf(` -resource "aws_rds_cluster_parameter_group" "bar" { - name = "%s" - family = "aurora5.6" - description = "Test cluster parameter group for terraform" - - parameter { - name = "character_set_server" - value = "utf8" - } - - parameter { - name = "character_set_client" - value = "utf8" - } - - parameter { - name = "character_set_results" - value = "utf8" - } - - tags { - foo = "bar" - } -} -`, name) -} - -func testAccAWSDBClusterParameterGroupAddParametersConfig(name string) string { - return fmt.Sprintf(` -resource "aws_rds_cluster_parameter_group" "bar" { - name = "%s" - family = "aurora5.6" - description = "Test cluster parameter group for terraform" - - parameter { - name = "character_set_server" - value = "utf8" - } - - parameter { - name = "character_set_client" - value = "utf8" - } - - parameter { - name = "character_set_results" - value = "utf8" - } - - parameter { - name = "collation_server" - value = "utf8_unicode_ci" - } - - parameter { - name = "collation_connection" - value = "utf8_unicode_ci" - } - - tags { - foo = "bar" - baz = "foo" - } -} -`, name) -} - -func testAccAWSDBClusterParameterGroupOnlyConfig(name string) string { - return fmt.Sprintf(`resource "aws_rds_cluster_parameter_group" "bar" { - name = "%s" - family = "aurora5.6" -}`, name) -} - -const testAccAWSDBClusterParameterGroupConfig_namePrefix = ` -resource "aws_rds_cluster_parameter_group" "test" { - name_prefix = "tf-test-" - family = "aurora5.6" -} -` -const testAccAWSDBClusterParameterGroupConfig_generatedName = ` -resource "aws_rds_cluster_parameter_group" "test" { - family = "aurora5.6" -} -` diff --git a/builtin/providers/aws/resource_aws_rds_cluster_test.go b/builtin/providers/aws/resource_aws_rds_cluster_test.go deleted file mode 100644 index 5023bd84a..000000000 --- a/builtin/providers/aws/resource_aws_rds_cluster_test.go +++ /dev/null @@ -1,593 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "regexp" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/rds" -) - -func TestAccAWSRDSCluster_basic(t *testing.T) { - var v rds.DBCluster - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSClusterConfig(acctest.RandInt()), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSClusterExists("aws_rds_cluster.default", &v), - resource.TestCheckResourceAttr( - "aws_rds_cluster.default", "storage_encrypted", "false"), - resource.TestCheckResourceAttr( - "aws_rds_cluster.default", "db_cluster_parameter_group_name", "default.aurora5.6"), - resource.TestCheckResourceAttrSet( - "aws_rds_cluster.default", "reader_endpoint"), - resource.TestCheckResourceAttrSet( - "aws_rds_cluster.default", "cluster_resource_id"), - ), - }, - }, - }) -} - -func TestAccAWSRDSCluster_namePrefix(t *testing.T) { - var v rds.DBCluster - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSClusterConfig_namePrefix(acctest.RandInt()), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSClusterExists("aws_rds_cluster.test", &v), - resource.TestMatchResourceAttr( - "aws_rds_cluster.test", "cluster_identifier", regexp.MustCompile("^tf-test-")), - ), - }, - }, - }) -} - -func TestAccAWSRDSCluster_generatedName(t *testing.T) { - var v rds.DBCluster - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSClusterConfig_generatedName(acctest.RandInt()), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSClusterExists("aws_rds_cluster.test", &v), - resource.TestMatchResourceAttr( - "aws_rds_cluster.test", "cluster_identifier", regexp.MustCompile("^tf-")), - ), - }, - }, - }) -} - -func TestAccAWSRDSCluster_takeFinalSnapshot(t *testing.T) { - var v rds.DBCluster - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSClusterSnapshot(rInt), - Steps: []resource.TestStep{ - { - Config: testAccAWSClusterConfigWithFinalSnapshot(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSClusterExists("aws_rds_cluster.default", &v), - ), - }, - }, - }) -} - -/// This is a regression test to make sure that we always cover the scenario as hightlighted in -/// https://github.com/hashicorp/terraform/issues/11568 -func TestAccAWSRDSCluster_missingUserNameCausesError(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSClusterConfigWithoutUserNameAndPassword(acctest.RandInt()), - ExpectError: regexp.MustCompile(`required field is not set`), - }, - }, - }) -} - -func TestAccAWSRDSCluster_updateTags(t *testing.T) { - var v rds.DBCluster - ri := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSClusterConfig(ri), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSClusterExists("aws_rds_cluster.default", &v), - resource.TestCheckResourceAttr( - "aws_rds_cluster.default", "tags.%", "1"), - ), - }, - { - Config: testAccAWSClusterConfigUpdatedTags(ri), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSClusterExists("aws_rds_cluster.default", &v), - resource.TestCheckResourceAttr( - "aws_rds_cluster.default", "tags.%", "2"), - ), - }, - }, - }) -} - -func TestAccAWSRDSCluster_kmsKey(t *testing.T) { - var v rds.DBCluster - keyRegex := regexp.MustCompile("^arn:aws:kms:") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSClusterConfig_kmsKey(acctest.RandInt()), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSClusterExists("aws_rds_cluster.default", &v), - resource.TestMatchResourceAttr( - "aws_rds_cluster.default", "kms_key_id", keyRegex), - ), - }, - }, - }) -} - -func TestAccAWSRDSCluster_encrypted(t *testing.T) { - var v rds.DBCluster - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSClusterConfig_encrypted(acctest.RandInt()), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSClusterExists("aws_rds_cluster.default", &v), - resource.TestCheckResourceAttr( - "aws_rds_cluster.default", "storage_encrypted", "true"), - resource.TestCheckResourceAttr( - "aws_rds_cluster.default", "db_cluster_parameter_group_name", "default.aurora5.6"), - ), - }, - }, - }) -} - -func TestAccAWSRDSCluster_backupsUpdate(t *testing.T) { - var v rds.DBCluster - - ri := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSClusterConfig_backups(ri), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSClusterExists("aws_rds_cluster.default", &v), - resource.TestCheckResourceAttr( - "aws_rds_cluster.default", "preferred_backup_window", "07:00-09:00"), - resource.TestCheckResourceAttr( - "aws_rds_cluster.default", "backup_retention_period", "5"), - resource.TestCheckResourceAttr( - "aws_rds_cluster.default", "preferred_maintenance_window", "tue:04:00-tue:04:30"), - ), - }, - - resource.TestStep{ - Config: testAccAWSClusterConfig_backupsUpdate(ri), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSClusterExists("aws_rds_cluster.default", &v), - resource.TestCheckResourceAttr( - "aws_rds_cluster.default", "preferred_backup_window", "03:00-09:00"), - resource.TestCheckResourceAttr( - "aws_rds_cluster.default", "backup_retention_period", "10"), - resource.TestCheckResourceAttr( - "aws_rds_cluster.default", "preferred_maintenance_window", "wed:01:00-wed:01:30"), - ), - }, - }, - }) -} - -func TestAccAWSRDSCluster_iamAuth(t *testing.T) { - var v rds.DBCluster - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSClusterConfig_iamAuth(acctest.RandInt()), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSClusterExists("aws_rds_cluster.default", &v), - resource.TestCheckResourceAttr( - "aws_rds_cluster.default", "iam_database_authentication_enabled", "true"), - ), - }, - }, - }) -} - -func testAccCheckAWSClusterDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_rds_cluster" { - continue - } - - // Try to find the Group - conn := testAccProvider.Meta().(*AWSClient).rdsconn - var err error - resp, err := conn.DescribeDBClusters( - &rds.DescribeDBClustersInput{ - DBClusterIdentifier: aws.String(rs.Primary.ID), - }) - - if err == nil { - if len(resp.DBClusters) != 0 && - *resp.DBClusters[0].DBClusterIdentifier == rs.Primary.ID { - return fmt.Errorf("DB Cluster %s still exists", rs.Primary.ID) - } - } - - // Return nil if the cluster is already destroyed - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "DBClusterNotFoundFault" { - return nil - } - } - - return err - } - - return nil -} - -func testAccCheckAWSClusterSnapshot(rInt int) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_rds_cluster" { - continue - } - - // Try and delete the snapshot before we check for the cluster not found - snapshot_identifier := fmt.Sprintf("tf-acctest-rdscluster-snapshot-%d", rInt) - - awsClient := testAccProvider.Meta().(*AWSClient) - conn := awsClient.rdsconn - - arn, arnErr := buildRDSClusterARN(snapshot_identifier, awsClient.partition, awsClient.accountid, awsClient.region) - tagsARN := strings.Replace(arn, ":cluster:", ":snapshot:", 1) - if arnErr != nil { - return fmt.Errorf("Error building ARN for tags check with ARN (%s): %s", tagsARN, arnErr) - } - - log.Printf("[INFO] Deleting the Snapshot %s", snapshot_identifier) - _, snapDeleteErr := conn.DeleteDBClusterSnapshot( - &rds.DeleteDBClusterSnapshotInput{ - DBClusterSnapshotIdentifier: aws.String(snapshot_identifier), - }) - if snapDeleteErr != nil { - return snapDeleteErr - } - - // Try to find the Group - var err error - resp, err := conn.DescribeDBClusters( - &rds.DescribeDBClustersInput{ - DBClusterIdentifier: aws.String(rs.Primary.ID), - }) - - if err == nil { - if len(resp.DBClusters) != 0 && - *resp.DBClusters[0].DBClusterIdentifier == rs.Primary.ID { - return fmt.Errorf("DB Cluster %s still exists", rs.Primary.ID) - } - } - - // Return nil if the cluster is already destroyed - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "DBClusterNotFoundFault" { - return nil - } - } - - return err - } - - return nil - } -} - -func testAccCheckAWSClusterExists(n string, v *rds.DBCluster) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No DB Instance ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).rdsconn - resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ - DBClusterIdentifier: aws.String(rs.Primary.ID), - }) - - if err != nil { - return err - } - - for _, c := range resp.DBClusters { - if *c.DBClusterIdentifier == rs.Primary.ID { - *v = *c - return nil - } - } - - return fmt.Errorf("DB Cluster (%s) not found", rs.Primary.ID) - } -} - -func testAccAWSClusterConfig(n int) string { - return fmt.Sprintf(` -resource "aws_rds_cluster" "default" { - cluster_identifier = "tf-aurora-cluster-%d" - availability_zones = ["us-west-2a","us-west-2b","us-west-2c"] - database_name = "mydb" - master_username = "foo" - master_password = "mustbeeightcharaters" - db_cluster_parameter_group_name = "default.aurora5.6" - skip_final_snapshot = true - tags { - Environment = "production" - } -}`, n) -} - -func testAccAWSClusterConfig_namePrefix(n int) string { - return fmt.Sprintf(` -resource "aws_rds_cluster" "test" { - cluster_identifier_prefix = "tf-test-" - master_username = "root" - master_password = "password" - db_subnet_group_name = "${aws_db_subnet_group.test.name}" - skip_final_snapshot = true -} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - tags { - Name = "testAccAWSClusterConfig_namePrefix" - } -} - -resource "aws_subnet" "a" { - vpc_id = "${aws_vpc.test.id}" - cidr_block = "10.0.0.0/24" - availability_zone = "us-west-2a" -} - -resource "aws_subnet" "b" { - vpc_id = "${aws_vpc.test.id}" - cidr_block = "10.0.1.0/24" - availability_zone = "us-west-2b" -} - -resource "aws_db_subnet_group" "test" { - name = "tf-test-%d" - subnet_ids = ["${aws_subnet.a.id}", "${aws_subnet.b.id}"] -} -`, n) -} - -func testAccAWSClusterConfig_generatedName(n int) string { - return fmt.Sprintf(` -resource "aws_rds_cluster" "test" { - master_username = "root" - master_password = "password" - db_subnet_group_name = "${aws_db_subnet_group.test.name}" - skip_final_snapshot = true -} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - tags { - Name = "testAccAWSClusterConfig_generatedName" - } -} - -resource "aws_subnet" "a" { - vpc_id = "${aws_vpc.test.id}" - cidr_block = "10.0.0.0/24" - availability_zone = "us-west-2a" -} - -resource "aws_subnet" "b" { - vpc_id = "${aws_vpc.test.id}" - cidr_block = "10.0.1.0/24" - availability_zone = "us-west-2b" -} - -resource "aws_db_subnet_group" "test" { - name = "tf-test-%d" - subnet_ids = ["${aws_subnet.a.id}", "${aws_subnet.b.id}"] -} -`, n) -} - -func testAccAWSClusterConfigWithFinalSnapshot(n int) string { - return fmt.Sprintf(` -resource "aws_rds_cluster" "default" { - cluster_identifier = "tf-aurora-cluster-%d" - availability_zones = ["us-west-2a","us-west-2b","us-west-2c"] - database_name = "mydb" - master_username = "foo" - master_password = "mustbeeightcharaters" - db_cluster_parameter_group_name = "default.aurora5.6" - final_snapshot_identifier = "tf-acctest-rdscluster-snapshot-%d" - tags { - Environment = "production" - } -}`, n, n) -} - -func testAccAWSClusterConfigWithoutUserNameAndPassword(n int) string { - return fmt.Sprintf(` -resource "aws_rds_cluster" "default" { - cluster_identifier = "tf-aurora-cluster-%d" - availability_zones = ["us-west-2a","us-west-2b","us-west-2c"] - database_name = "mydb" - skip_final_snapshot = true -}`, n) -} - -func testAccAWSClusterConfigUpdatedTags(n int) string { - return fmt.Sprintf(` -resource "aws_rds_cluster" "default" { - cluster_identifier = "tf-aurora-cluster-%d" - availability_zones = ["us-west-2a","us-west-2b","us-west-2c"] - database_name = "mydb" - master_username = "foo" - master_password = "mustbeeightcharaters" - db_cluster_parameter_group_name = "default.aurora5.6" - skip_final_snapshot = true - tags { - Environment = "production" - AnotherTag = "test" - } -}`, n) -} - -func testAccAWSClusterConfig_kmsKey(n int) string { - return fmt.Sprintf(` - - resource "aws_kms_key" "foo" { - description = "Terraform acc test %d" - policy = < 35 { - es = append(es, fmt.Errorf( - "backup retention period cannot be more than 35 days")) - } - return - }, - }, - - "port": { - Type: schema.TypeInt, - Optional: true, - Default: 5439, - }, - - "cluster_version": { - Type: schema.TypeString, - Optional: true, - Default: "1.0", - }, - - "allow_version_upgrade": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - - "number_of_nodes": { - Type: schema.TypeInt, - Optional: true, - Default: 1, - }, - - "publicly_accessible": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - - "encrypted": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "enhanced_vpc_routing": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "kms_key_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validateArn, - }, - - "elastic_ip": { - Type: schema.TypeString, - Optional: true, - }, - - "final_snapshot_identifier": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRedshiftClusterFinalSnapshotIdentifier, - }, - - "skip_final_snapshot": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "endpoint": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "cluster_public_key": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "cluster_revision_number": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "iam_roles": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "enable_logging": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "bucket_name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "s3_key_prefix": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "snapshot_identifier": { - Type: schema.TypeString, - Optional: true, - }, - - "snapshot_cluster_identifier": { - Type: schema.TypeString, - Optional: true, - }, - - "owner_account": { - Type: schema.TypeString, - Optional: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAwsRedshiftClusterImport( - d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - // Neither skip_final_snapshot nor final_snapshot_identifier can be fetched - // from any API call, so we need to default skip_final_snapshot to true so - // that final_snapshot_identifier is not required - d.Set("skip_final_snapshot", true) - return []*schema.ResourceData{d}, nil -} - -func resourceAwsRedshiftClusterCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).redshiftconn - tags := tagsFromMapRedshift(d.Get("tags").(map[string]interface{})) - - if v, ok := d.GetOk("snapshot_identifier"); ok { - restoreOpts := &redshift.RestoreFromClusterSnapshotInput{ - ClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), - SnapshotIdentifier: aws.String(v.(string)), - Port: aws.Int64(int64(d.Get("port").(int))), - AllowVersionUpgrade: aws.Bool(d.Get("allow_version_upgrade").(bool)), - NodeType: aws.String(d.Get("node_type").(string)), - PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), - AutomatedSnapshotRetentionPeriod: aws.Int64(int64(d.Get("automated_snapshot_retention_period").(int))), - } - - if v, ok := d.GetOk("owner_account"); ok { - restoreOpts.OwnerAccount = aws.String(v.(string)) - } - - if v, ok := d.GetOk("snapshot_cluster_identifier"); ok { - restoreOpts.SnapshotClusterIdentifier = aws.String(v.(string)) - } - - if v, ok := d.GetOk("availability_zone"); ok { - restoreOpts.AvailabilityZone = aws.String(v.(string)) - } - - if v, ok := d.GetOk("cluster_subnet_group_name"); ok { - restoreOpts.ClusterSubnetGroupName = aws.String(v.(string)) - } - - if v, ok := d.GetOk("cluster_parameter_group_name"); ok { - restoreOpts.ClusterParameterGroupName = aws.String(v.(string)) - } - - if v := d.Get("cluster_security_groups").(*schema.Set); v.Len() > 0 { - restoreOpts.ClusterSecurityGroups = expandStringList(v.List()) - } - - if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 { - restoreOpts.VpcSecurityGroupIds = expandStringList(v.List()) - } - - if v, ok := d.GetOk("preferred_maintenance_window"); ok { - restoreOpts.PreferredMaintenanceWindow = aws.String(v.(string)) - } - - if v, ok := d.GetOk("kms_key_id"); ok { - restoreOpts.KmsKeyId = aws.String(v.(string)) - } - - if v, ok := d.GetOk("elastic_ip"); ok { - restoreOpts.ElasticIp = aws.String(v.(string)) - } - - if v, ok := d.GetOk("enhanced_vpc_routing"); ok { - restoreOpts.EnhancedVpcRouting = aws.Bool(v.(bool)) - } - - if v, ok := d.GetOk("iam_roles"); ok { - restoreOpts.IamRoles = expandStringList(v.(*schema.Set).List()) - } - - log.Printf("[DEBUG] Redshift Cluster restore cluster options: %s", restoreOpts) - - resp, err := conn.RestoreFromClusterSnapshot(restoreOpts) - if err != nil { - log.Printf("[ERROR] Error Restoring Redshift Cluster from Snapshot: %s", err) - return err - } - - d.SetId(*resp.Cluster.ClusterIdentifier) - - } else { - if _, ok := d.GetOk("master_password"); !ok { - return fmt.Errorf(`provider.aws: aws_redshift_cluster: %s: "master_password": required field is not set`, d.Get("cluster_identifier").(string)) - } - - if _, ok := d.GetOk("master_username"); !ok { - return fmt.Errorf(`provider.aws: aws_redshift_cluster: %s: "master_username": required field is not set`, d.Get("cluster_identifier").(string)) - } - - createOpts := &redshift.CreateClusterInput{ - ClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), - Port: aws.Int64(int64(d.Get("port").(int))), - MasterUserPassword: aws.String(d.Get("master_password").(string)), - MasterUsername: aws.String(d.Get("master_username").(string)), - ClusterVersion: aws.String(d.Get("cluster_version").(string)), - NodeType: aws.String(d.Get("node_type").(string)), - DBName: aws.String(d.Get("database_name").(string)), - AllowVersionUpgrade: aws.Bool(d.Get("allow_version_upgrade").(bool)), - PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), - AutomatedSnapshotRetentionPeriod: aws.Int64(int64(d.Get("automated_snapshot_retention_period").(int))), - Tags: tags, - } - - if v := d.Get("number_of_nodes").(int); v > 1 { - createOpts.ClusterType = aws.String("multi-node") - createOpts.NumberOfNodes = aws.Int64(int64(d.Get("number_of_nodes").(int))) - } else { - createOpts.ClusterType = aws.String("single-node") - } - - if v := d.Get("cluster_security_groups").(*schema.Set); v.Len() > 0 { - createOpts.ClusterSecurityGroups = expandStringList(v.List()) - } - - if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 { - createOpts.VpcSecurityGroupIds = expandStringList(v.List()) - } - - if v, ok := d.GetOk("cluster_subnet_group_name"); ok { - createOpts.ClusterSubnetGroupName = aws.String(v.(string)) - } - - if v, ok := d.GetOk("availability_zone"); ok { - createOpts.AvailabilityZone = aws.String(v.(string)) - } - - if v, ok := d.GetOk("preferred_maintenance_window"); ok { - createOpts.PreferredMaintenanceWindow = aws.String(v.(string)) - } - - if v, ok := d.GetOk("cluster_parameter_group_name"); ok { - createOpts.ClusterParameterGroupName = aws.String(v.(string)) - } - - if v, ok := d.GetOk("encrypted"); ok { - createOpts.Encrypted = aws.Bool(v.(bool)) - } - - if v, ok := d.GetOk("enhanced_vpc_routing"); ok { - createOpts.EnhancedVpcRouting = aws.Bool(v.(bool)) - } - - if v, ok := d.GetOk("kms_key_id"); ok { - createOpts.KmsKeyId = aws.String(v.(string)) - } - - if v, ok := d.GetOk("elastic_ip"); ok { - createOpts.ElasticIp = aws.String(v.(string)) - } - - if v, ok := d.GetOk("iam_roles"); ok { - createOpts.IamRoles = expandStringList(v.(*schema.Set).List()) - } - - log.Printf("[DEBUG] Redshift Cluster create options: %s", createOpts) - resp, err := conn.CreateCluster(createOpts) - if err != nil { - log.Printf("[ERROR] Error creating Redshift Cluster: %s", err) - return err - } - - log.Printf("[DEBUG]: Cluster create response: %s", resp) - d.SetId(*resp.Cluster.ClusterIdentifier) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating", "backing-up", "modifying", "restoring"}, - Target: []string{"available"}, - Refresh: resourceAwsRedshiftClusterStateRefreshFunc(d, meta), - Timeout: 75 * time.Minute, - MinTimeout: 10 * time.Second, - } - - _, err := stateConf.WaitForState() - if err != nil { - return fmt.Errorf("[WARN] Error waiting for Redshift Cluster state to be \"available\": %s", err) - } - - if _, ok := d.GetOk("enable_logging"); ok { - - loggingErr := enableRedshiftClusterLogging(d, conn) - if loggingErr != nil { - log.Printf("[ERROR] Error Enabling Logging on Redshift Cluster: %s", err) - return loggingErr - } - - } - - return resourceAwsRedshiftClusterRead(d, meta) -} - -func resourceAwsRedshiftClusterRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).redshiftconn - - log.Printf("[INFO] Reading Redshift Cluster Information: %s", d.Id()) - resp, err := conn.DescribeClusters(&redshift.DescribeClustersInput{ - ClusterIdentifier: aws.String(d.Id()), - }) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if "ClusterNotFound" == awsErr.Code() { - d.SetId("") - log.Printf("[DEBUG] Redshift Cluster (%s) not found", d.Id()) - return nil - } - } - log.Printf("[DEBUG] Error describing Redshift Cluster (%s)", d.Id()) - return err - } - - var rsc *redshift.Cluster - for _, c := range resp.Clusters { - if *c.ClusterIdentifier == d.Id() { - rsc = c - } - } - - if rsc == nil { - log.Printf("[WARN] Redshift Cluster (%s) not found", d.Id()) - d.SetId("") - return nil - } - - log.Printf("[INFO] Reading Redshift Cluster Logging Status: %s", d.Id()) - loggingStatus, loggingErr := conn.DescribeLoggingStatus(&redshift.DescribeLoggingStatusInput{ - ClusterIdentifier: aws.String(d.Id()), - }) - - if loggingErr != nil { - return loggingErr - } - - d.Set("master_username", rsc.MasterUsername) - d.Set("node_type", rsc.NodeType) - d.Set("allow_version_upgrade", rsc.AllowVersionUpgrade) - d.Set("database_name", rsc.DBName) - d.Set("cluster_identifier", rsc.ClusterIdentifier) - d.Set("cluster_version", rsc.ClusterVersion) - - d.Set("cluster_subnet_group_name", rsc.ClusterSubnetGroupName) - d.Set("availability_zone", rsc.AvailabilityZone) - d.Set("encrypted", rsc.Encrypted) - d.Set("enhanced_vpc_routing", rsc.EnhancedVpcRouting) - d.Set("kms_key_id", rsc.KmsKeyId) - d.Set("automated_snapshot_retention_period", rsc.AutomatedSnapshotRetentionPeriod) - d.Set("preferred_maintenance_window", rsc.PreferredMaintenanceWindow) - if rsc.Endpoint != nil && rsc.Endpoint.Address != nil { - endpoint := *rsc.Endpoint.Address - if rsc.Endpoint.Port != nil { - endpoint = fmt.Sprintf("%s:%d", endpoint, *rsc.Endpoint.Port) - } - d.Set("port", rsc.Endpoint.Port) - d.Set("endpoint", endpoint) - } - d.Set("cluster_parameter_group_name", rsc.ClusterParameterGroups[0].ParameterGroupName) - if len(rsc.ClusterNodes) > 1 { - d.Set("cluster_type", "multi-node") - } else { - d.Set("cluster_type", "single-node") - } - d.Set("number_of_nodes", rsc.NumberOfNodes) - d.Set("publicly_accessible", rsc.PubliclyAccessible) - - var vpcg []string - for _, g := range rsc.VpcSecurityGroups { - vpcg = append(vpcg, *g.VpcSecurityGroupId) - } - if err := d.Set("vpc_security_group_ids", vpcg); err != nil { - return fmt.Errorf("[DEBUG] Error saving VPC Security Group IDs to state for Redshift Cluster (%s): %s", d.Id(), err) - } - - var csg []string - for _, g := range rsc.ClusterSecurityGroups { - csg = append(csg, *g.ClusterSecurityGroupName) - } - if err := d.Set("cluster_security_groups", csg); err != nil { - return fmt.Errorf("[DEBUG] Error saving Cluster Security Group Names to state for Redshift Cluster (%s): %s", d.Id(), err) - } - - var iamRoles []string - for _, i := range rsc.IamRoles { - iamRoles = append(iamRoles, *i.IamRoleArn) - } - if err := d.Set("iam_roles", iamRoles); err != nil { - return fmt.Errorf("[DEBUG] Error saving IAM Roles to state for Redshift Cluster (%s): %s", d.Id(), err) - } - - d.Set("cluster_public_key", rsc.ClusterPublicKey) - d.Set("cluster_revision_number", rsc.ClusterRevisionNumber) - d.Set("tags", tagsToMapRedshift(rsc.Tags)) - - d.Set("bucket_name", loggingStatus.BucketName) - d.Set("enable_logging", loggingStatus.LoggingEnabled) - d.Set("s3_key_prefix", loggingStatus.S3KeyPrefix) - - return nil -} - -func resourceAwsRedshiftClusterUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).redshiftconn - d.Partial(true) - - arn, tagErr := buildRedshiftARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) - if tagErr != nil { - return fmt.Errorf("Error building ARN for Redshift Cluster, not updating Tags for cluster %s", d.Id()) - } else { - if tagErr := setTagsRedshift(conn, d, arn); tagErr != nil { - return tagErr - } else { - d.SetPartial("tags") - } - } - - requestUpdate := false - log.Printf("[INFO] Building Redshift Modify Cluster Options") - req := &redshift.ModifyClusterInput{ - ClusterIdentifier: aws.String(d.Id()), - } - - if d.HasChange("cluster_type") { - req.ClusterType = aws.String(d.Get("cluster_type").(string)) - requestUpdate = true - } - - if d.HasChange("node_type") { - req.NodeType = aws.String(d.Get("node_type").(string)) - requestUpdate = true - } - - if d.HasChange("number_of_nodes") { - if v := d.Get("number_of_nodes").(int); v > 1 { - req.ClusterType = aws.String("multi-node") - req.NumberOfNodes = aws.Int64(int64(d.Get("number_of_nodes").(int))) - } else { - req.ClusterType = aws.String("single-node") - } - - req.NodeType = aws.String(d.Get("node_type").(string)) - requestUpdate = true - } - - if d.HasChange("cluster_security_groups") { - req.ClusterSecurityGroups = expandStringList(d.Get("cluster_security_groups").(*schema.Set).List()) - requestUpdate = true - } - - if d.HasChange("vpc_security_group_ids") { - req.VpcSecurityGroupIds = expandStringList(d.Get("vpc_security_group_ids").(*schema.Set).List()) - requestUpdate = true - } - - if d.HasChange("master_password") { - req.MasterUserPassword = aws.String(d.Get("master_password").(string)) - requestUpdate = true - } - - if d.HasChange("cluster_parameter_group_name") { - req.ClusterParameterGroupName = aws.String(d.Get("cluster_parameter_group_name").(string)) - requestUpdate = true - } - - if d.HasChange("automated_snapshot_retention_period") { - req.AutomatedSnapshotRetentionPeriod = aws.Int64(int64(d.Get("automated_snapshot_retention_period").(int))) - requestUpdate = true - } - - if d.HasChange("preferred_maintenance_window") { - req.PreferredMaintenanceWindow = aws.String(d.Get("preferred_maintenance_window").(string)) - requestUpdate = true - } - - if d.HasChange("cluster_version") { - req.ClusterVersion = aws.String(d.Get("cluster_version").(string)) - requestUpdate = true - } - - if d.HasChange("allow_version_upgrade") { - req.AllowVersionUpgrade = aws.Bool(d.Get("allow_version_upgrade").(bool)) - requestUpdate = true - } - - if d.HasChange("publicly_accessible") { - req.PubliclyAccessible = aws.Bool(d.Get("publicly_accessible").(bool)) - requestUpdate = true - } - - if d.HasChange("enhanced_vpc_routing") { - req.EnhancedVpcRouting = aws.Bool(d.Get("enhanced_vpc_routing").(bool)) - requestUpdate = true - } - - if requestUpdate { - log.Printf("[INFO] Modifying Redshift Cluster: %s", d.Id()) - log.Printf("[DEBUG] Redshift Cluster Modify options: %s", req) - _, err := conn.ModifyCluster(req) - if err != nil { - return fmt.Errorf("[WARN] Error modifying Redshift Cluster (%s): %s", d.Id(), err) - } - } - - if d.HasChange("iam_roles") { - o, n := d.GetChange("iam_roles") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - - removeIams := os.Difference(ns).List() - addIams := ns.Difference(os).List() - - log.Printf("[INFO] Building Redshift Modify Cluster IAM Role Options") - req := &redshift.ModifyClusterIamRolesInput{ - ClusterIdentifier: aws.String(d.Id()), - AddIamRoles: expandStringList(addIams), - RemoveIamRoles: expandStringList(removeIams), - } - - log.Printf("[INFO] Modifying Redshift Cluster IAM Roles: %s", d.Id()) - log.Printf("[DEBUG] Redshift Cluster Modify IAM Role options: %s", req) - _, err := conn.ModifyClusterIamRoles(req) - if err != nil { - return fmt.Errorf("[WARN] Error modifying Redshift Cluster IAM Roles (%s): %s", d.Id(), err) - } - - d.SetPartial("iam_roles") - } - - if requestUpdate || d.HasChange("iam_roles") { - - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating", "deleting", "rebooting", "resizing", "renaming", "modifying"}, - Target: []string{"available"}, - Refresh: resourceAwsRedshiftClusterStateRefreshFunc(d, meta), - Timeout: 40 * time.Minute, - MinTimeout: 10 * time.Second, - } - - // Wait, catching any errors - _, err := stateConf.WaitForState() - if err != nil { - return fmt.Errorf("[WARN] Error Modifying Redshift Cluster (%s): %s", d.Id(), err) - } - } - - if d.HasChange("enable_logging") || d.HasChange("bucket_name") || d.HasChange("s3_key_prefix") { - var loggingErr error - if _, ok := d.GetOk("enable_logging"); ok { - - log.Printf("[INFO] Enabling Logging for Redshift Cluster %q", d.Id()) - loggingErr = enableRedshiftClusterLogging(d, conn) - if loggingErr != nil { - return loggingErr - } - } else { - - log.Printf("[INFO] Disabling Logging for Redshift Cluster %q", d.Id()) - _, loggingErr = conn.DisableLogging(&redshift.DisableLoggingInput{ - ClusterIdentifier: aws.String(d.Id()), - }) - if loggingErr != nil { - return loggingErr - } - } - - d.SetPartial("enable_logging") - } - - d.Partial(false) - - return resourceAwsRedshiftClusterRead(d, meta) -} - -func enableRedshiftClusterLogging(d *schema.ResourceData, conn *redshift.Redshift) error { - if _, ok := d.GetOk("bucket_name"); !ok { - return fmt.Errorf("bucket_name must be set when enabling logging for Redshift Clusters") - } - - params := &redshift.EnableLoggingInput{ - ClusterIdentifier: aws.String(d.Id()), - BucketName: aws.String(d.Get("bucket_name").(string)), - } - - if v, ok := d.GetOk("s3_key_prefix"); ok { - params.S3KeyPrefix = aws.String(v.(string)) - } - - _, loggingErr := conn.EnableLogging(params) - if loggingErr != nil { - log.Printf("[ERROR] Error Enabling Logging on Redshift Cluster: %s", loggingErr) - return loggingErr - } - return nil -} - -func resourceAwsRedshiftClusterDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).redshiftconn - log.Printf("[DEBUG] Destroying Redshift Cluster (%s)", d.Id()) - - deleteOpts := redshift.DeleteClusterInput{ - ClusterIdentifier: aws.String(d.Id()), - } - - skipFinalSnapshot := d.Get("skip_final_snapshot").(bool) - deleteOpts.SkipFinalClusterSnapshot = aws.Bool(skipFinalSnapshot) - - if skipFinalSnapshot == false { - if name, present := d.GetOk("final_snapshot_identifier"); present { - deleteOpts.FinalClusterSnapshotIdentifier = aws.String(name.(string)) - } else { - return fmt.Errorf("Redshift Cluster Instance FinalSnapshotIdentifier is required when a final snapshot is required") - } - } - - log.Printf("[DEBUG] Redshift Cluster delete options: %s", deleteOpts) - err := resource.Retry(15*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteCluster(&deleteOpts) - awsErr, ok := err.(awserr.Error) - if ok && awsErr.Code() == "InvalidClusterState" { - return resource.RetryableError(err) - } - - return resource.NonRetryableError(err) - }) - - if err != nil { - return fmt.Errorf("[ERROR] Error deleting Redshift Cluster (%s): %s", d.Id(), err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"available", "creating", "deleting", "rebooting", "resizing", "renaming", "final-snapshot"}, - Target: []string{"destroyed"}, - Refresh: resourceAwsRedshiftClusterStateRefreshFunc(d, meta), - Timeout: 40 * time.Minute, - MinTimeout: 5 * time.Second, - } - - // Wait, catching any errors - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("[ERROR] Error deleting Redshift Cluster (%s): %s", d.Id(), err) - } - - log.Printf("[INFO] Redshift Cluster %s successfully deleted", d.Id()) - - return nil -} - -func resourceAwsRedshiftClusterStateRefreshFunc(d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - conn := meta.(*AWSClient).redshiftconn - - log.Printf("[INFO] Reading Redshift Cluster Information: %s", d.Id()) - resp, err := conn.DescribeClusters(&redshift.DescribeClustersInput{ - ClusterIdentifier: aws.String(d.Id()), - }) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if "ClusterNotFound" == awsErr.Code() { - return 42, "destroyed", nil - } - } - log.Printf("[WARN] Error on retrieving Redshift Cluster (%s) when waiting: %s", d.Id(), err) - return nil, "", err - } - - var rsc *redshift.Cluster - - for _, c := range resp.Clusters { - if *c.ClusterIdentifier == d.Id() { - rsc = c - } - } - - if rsc == nil { - return 42, "destroyed", nil - } - - if rsc.ClusterStatus != nil { - log.Printf("[DEBUG] Redshift Cluster status (%s): %s", d.Id(), *rsc.ClusterStatus) - } - - return rsc, *rsc.ClusterStatus, nil - } -} - -func validateRedshiftClusterIdentifier(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only lowercase alphanumeric characters and hyphens allowed in %q", k)) - } - if !regexp.MustCompile(`^[a-z]`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "first character of %q must be a letter", k)) - } - if regexp.MustCompile(`--`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot contain two consecutive hyphens", k)) - } - if regexp.MustCompile(`-$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot end with a hyphen", k)) - } - return -} - -func validateRedshiftClusterDbName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[0-9a-z_$]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only lowercase alphanumeric characters, underscores, and dollar signs are allowed in %q", k)) - } - if !regexp.MustCompile(`^[a-zA-Z_]`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "first character of %q must be a letter or underscore", k)) - } - if len(value) > 64 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 64 characters: %q", k, value)) - } - if value == "" { - errors = append(errors, fmt.Errorf( - "%q cannot be an empty string", k)) - } - - return -} - -func validateRedshiftClusterFinalSnapshotIdentifier(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only alphanumeric characters and hyphens allowed in %q", k)) - } - if regexp.MustCompile(`--`).MatchString(value) { - errors = append(errors, fmt.Errorf("%q cannot contain two consecutive hyphens", k)) - } - if regexp.MustCompile(`-$`).MatchString(value) { - errors = append(errors, fmt.Errorf("%q cannot end in a hyphen", k)) - } - if len(value) > 255 { - errors = append(errors, fmt.Errorf("%q cannot be more than 255 characters", k)) - } - return -} - -func validateRedshiftClusterMasterUsername(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^\w+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only alphanumeric characters in %q", k)) - } - if !regexp.MustCompile(`^[A-Za-z]`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "first character of %q must be a letter", k)) - } - if len(value) > 128 { - errors = append(errors, fmt.Errorf("%q cannot be more than 128 characters", k)) - } - return -} - -func validateRedshiftClusterMasterPassword(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^.*[a-z].*`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q must contain at least one lowercase letter", k)) - } - if !regexp.MustCompile(`^.*[A-Z].*`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q must contain at least one uppercase letter", k)) - } - if !regexp.MustCompile(`^.*[0-9].*`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q must contain at least one number", k)) - } - if !regexp.MustCompile(`^[^\@\/'" ]*$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot contain [/@\"' ]", k)) - } - if len(value) < 8 { - errors = append(errors, fmt.Errorf("%q must be at least 8 characters", k)) - } - return -} - -func buildRedshiftARN(identifier, partition, accountid, region string) (string, error) { - if partition == "" { - return "", fmt.Errorf("Unable to construct cluster ARN because of missing AWS partition") - } - if accountid == "" { - return "", fmt.Errorf("Unable to construct cluster ARN because of missing AWS Account ID") - } - arn := fmt.Sprintf("arn:%s:redshift:%s:%s:cluster:%s", partition, region, accountid, identifier) - return arn, nil - -} diff --git a/builtin/providers/aws/resource_aws_redshift_cluster_test.go b/builtin/providers/aws/resource_aws_redshift_cluster_test.go deleted file mode 100644 index 8093c0d49..000000000 --- a/builtin/providers/aws/resource_aws_redshift_cluster_test.go +++ /dev/null @@ -1,962 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "math/rand" - "regexp" - "strings" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/redshift" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestValidateRedshiftClusterDbName(t *testing.T) { - validNames := []string{ - "testdbname", - "test_dbname", - "testdbname123", - "testdbname$hashicorp", - "_dbname", - } - for _, v := range validNames { - _, errors := validateRedshiftClusterDbName(v, "name") - if len(errors) != 0 { - t.Fatalf("%q should be a valid Redshift DBName: %q", v, errors) - } - } - - invalidNames := []string{ - "!", - "/", - " ", - ":", - ";", - "test name", - "/slash-at-the-beginning", - "slash-at-the-end/", - "", - randomString(100), - "TestDBname", - } - for _, v := range invalidNames { - _, errors := validateRedshiftClusterDbName(v, "name") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid Redshift DBName", v) - } - } -} - -func TestAccAWSRedshiftCluster_basic(t *testing.T) { - var v redshift.Cluster - - ri := rand.New(rand.NewSource(time.Now().UnixNano())).Int() - config := fmt.Sprintf(testAccAWSRedshiftClusterConfig_basic, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSRedshiftClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftClusterExists("aws_redshift_cluster.default", &v), - resource.TestCheckResourceAttr( - "aws_redshift_cluster.default", "cluster_type", "single-node"), - resource.TestCheckResourceAttr( - "aws_redshift_cluster.default", "publicly_accessible", "true"), - ), - }, - }, - }) -} - -func TestAccAWSRedshiftCluster_withFinalSnapshot(t *testing.T) { - var v redshift.Cluster - - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSRedshiftClusterSnapshot(rInt), - Steps: []resource.TestStep{ - { - Config: testAccAWSRedshiftClusterConfigWithFinalSnapshot(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftClusterExists("aws_redshift_cluster.default", &v), - ), - }, - }, - }) -} - -func TestAccAWSRedshiftCluster_kmsKey(t *testing.T) { - var v redshift.Cluster - - ri := rand.New(rand.NewSource(time.Now().UnixNano())).Int() - config := fmt.Sprintf(testAccAWSRedshiftClusterConfig_kmsKey, ri, ri) - keyRegex := regexp.MustCompile("^arn:aws:([a-zA-Z0-9\\-])+:([a-z]{2}-[a-z]+-\\d{1})?:(\\d{12})?:(.*)$") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSRedshiftClusterDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftClusterExists("aws_redshift_cluster.default", &v), - resource.TestCheckResourceAttr( - "aws_redshift_cluster.default", "cluster_type", "single-node"), - resource.TestCheckResourceAttr( - "aws_redshift_cluster.default", "publicly_accessible", "true"), - resource.TestMatchResourceAttr("aws_redshift_cluster.default", "kms_key_id", keyRegex), - ), - }, - }, - }) -} - -func TestAccAWSRedshiftCluster_enhancedVpcRoutingEnabled(t *testing.T) { - var v redshift.Cluster - - ri := rand.New(rand.NewSource(time.Now().UnixNano())).Int() - preConfig := fmt.Sprintf(testAccAWSRedshiftClusterConfig_enhancedVpcRoutingEnabled, ri) - postConfig := fmt.Sprintf(testAccAWSRedshiftClusterConfig_enhancedVpcRoutingDisabled, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSRedshiftClusterDestroy, - Steps: []resource.TestStep{ - { - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftClusterExists("aws_redshift_cluster.default", &v), - resource.TestCheckResourceAttr( - "aws_redshift_cluster.default", "enhanced_vpc_routing", "true"), - ), - }, - { - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftClusterExists("aws_redshift_cluster.default", &v), - resource.TestCheckResourceAttr( - "aws_redshift_cluster.default", "enhanced_vpc_routing", "false"), - ), - }, - }, - }) -} - -func TestAccAWSRedshiftCluster_loggingEnabled(t *testing.T) { - var v redshift.Cluster - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSRedshiftClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSRedshiftClusterConfig_loggingEnabled(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftClusterExists("aws_redshift_cluster.default", &v), - resource.TestCheckResourceAttr( - "aws_redshift_cluster.default", "enable_logging", "true"), - resource.TestCheckResourceAttr( - "aws_redshift_cluster.default", "bucket_name", fmt.Sprintf("tf-redshift-logging-%d", rInt)), - ), - }, - - { - Config: testAccAWSRedshiftClusterConfig_loggingDisabled(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftClusterExists("aws_redshift_cluster.default", &v), - resource.TestCheckResourceAttr( - "aws_redshift_cluster.default", "enable_logging", "false"), - ), - }, - }, - }) -} - -func TestAccAWSRedshiftCluster_iamRoles(t *testing.T) { - var v redshift.Cluster - - ri := rand.New(rand.NewSource(time.Now().UnixNano())).Int() - preConfig := fmt.Sprintf(testAccAWSRedshiftClusterConfig_iamRoles, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAWSRedshiftClusterConfig_updateIamRoles, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSRedshiftClusterDestroy, - Steps: []resource.TestStep{ - { - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftClusterExists("aws_redshift_cluster.default", &v), - resource.TestCheckResourceAttr( - "aws_redshift_cluster.default", "iam_roles.#", "2"), - ), - }, - - { - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftClusterExists("aws_redshift_cluster.default", &v), - resource.TestCheckResourceAttr( - "aws_redshift_cluster.default", "iam_roles.#", "1"), - ), - }, - }, - }) -} - -func TestAccAWSRedshiftCluster_publiclyAccessible(t *testing.T) { - var v redshift.Cluster - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSRedshiftClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSRedshiftClusterConfig_notPubliclyAccessible(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftClusterExists("aws_redshift_cluster.default", &v), - resource.TestCheckResourceAttr( - "aws_redshift_cluster.default", "publicly_accessible", "false"), - ), - }, - - { - Config: testAccAWSRedshiftClusterConfig_updatePubliclyAccessible(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftClusterExists("aws_redshift_cluster.default", &v), - resource.TestCheckResourceAttr( - "aws_redshift_cluster.default", "publicly_accessible", "true"), - ), - }, - }, - }) -} - -func TestAccAWSRedshiftCluster_updateNodeCount(t *testing.T) { - var v redshift.Cluster - - ri := rand.New(rand.NewSource(time.Now().UnixNano())).Int() - preConfig := fmt.Sprintf(testAccAWSRedshiftClusterConfig_basic, ri) - postConfig := fmt.Sprintf(testAccAWSRedshiftClusterConfig_updateNodeCount, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSRedshiftClusterDestroy, - Steps: []resource.TestStep{ - { - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftClusterExists("aws_redshift_cluster.default", &v), - resource.TestCheckResourceAttr( - "aws_redshift_cluster.default", "number_of_nodes", "1"), - ), - }, - - { - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftClusterExists("aws_redshift_cluster.default", &v), - resource.TestCheckResourceAttr( - "aws_redshift_cluster.default", "number_of_nodes", "2"), - ), - }, - }, - }) -} - -func TestAccAWSRedshiftCluster_tags(t *testing.T) { - var v redshift.Cluster - - ri := rand.New(rand.NewSource(time.Now().UnixNano())).Int() - preConfig := fmt.Sprintf(testAccAWSRedshiftClusterConfig_tags, ri) - postConfig := fmt.Sprintf(testAccAWSRedshiftClusterConfig_updatedTags, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSRedshiftClusterDestroy, - Steps: []resource.TestStep{ - { - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftClusterExists("aws_redshift_cluster.default", &v), - resource.TestCheckResourceAttr( - "aws_redshift_cluster.default", "tags.%", "3"), - resource.TestCheckResourceAttr("aws_redshift_cluster.default", "tags.environment", "Production"), - ), - }, - - { - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftClusterExists("aws_redshift_cluster.default", &v), - resource.TestCheckResourceAttr( - "aws_redshift_cluster.default", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_redshift_cluster.default", "tags.environment", "Production"), - ), - }, - }, - }) -} - -func testAccCheckAWSRedshiftClusterDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_redshift_cluster" { - continue - } - - // Try to find the Group - conn := testAccProvider.Meta().(*AWSClient).redshiftconn - var err error - resp, err := conn.DescribeClusters( - &redshift.DescribeClustersInput{ - ClusterIdentifier: aws.String(rs.Primary.ID), - }) - - if err == nil { - if len(resp.Clusters) != 0 && - *resp.Clusters[0].ClusterIdentifier == rs.Primary.ID { - return fmt.Errorf("Redshift Cluster %s still exists", rs.Primary.ID) - } - } - - // Return nil if the cluster is already destroyed - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "ClusterNotFound" { - return nil - } - } - - return err - } - - return nil -} - -func testAccCheckAWSRedshiftClusterSnapshot(rInt int) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_redshift_cluster" { - continue - } - - var err error - - // Try and delete the snapshot before we check for the cluster not found - conn := testAccProvider.Meta().(*AWSClient).redshiftconn - - snapshot_identifier := fmt.Sprintf("tf-acctest-snapshot-%d", rInt) - arn, err := buildRedshiftARN(snapshot_identifier, testAccProvider.Meta().(*AWSClient).partition, testAccProvider.Meta().(*AWSClient).accountid, testAccProvider.Meta().(*AWSClient).region) - tagsARN := strings.Replace(arn, ":cluster:", ":snapshot:", 1) - if err != nil { - return fmt.Errorf("Error building ARN for tags check with ARN (%s): %s", tagsARN, err) - } - - log.Printf("[INFO] Deleting the Snapshot %s", snapshot_identifier) - _, snapDeleteErr := conn.DeleteClusterSnapshot( - &redshift.DeleteClusterSnapshotInput{ - SnapshotIdentifier: aws.String(snapshot_identifier), - }) - if snapDeleteErr != nil { - return err - } - - //lastly check that the Cluster is missing - resp, err := conn.DescribeClusters( - &redshift.DescribeClustersInput{ - ClusterIdentifier: aws.String(rs.Primary.ID), - }) - - if err == nil { - if len(resp.Clusters) != 0 && - *resp.Clusters[0].ClusterIdentifier == rs.Primary.ID { - return fmt.Errorf("Redshift Cluster %s still exists", rs.Primary.ID) - } - } - - // Return nil if the cluster is already destroyed - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "ClusterNotFound" { - return nil - } - - return err - } - - } - - return nil - } -} - -func testAccCheckAWSRedshiftClusterExists(n string, v *redshift.Cluster) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Redshift Cluster Instance ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).redshiftconn - resp, err := conn.DescribeClusters(&redshift.DescribeClustersInput{ - ClusterIdentifier: aws.String(rs.Primary.ID), - }) - - if err != nil { - return err - } - - for _, c := range resp.Clusters { - if *c.ClusterIdentifier == rs.Primary.ID { - *v = *c - return nil - } - } - - return fmt.Errorf("Redshift Cluster (%s) not found", rs.Primary.ID) - } -} - -func TestResourceAWSRedshiftClusterIdentifierValidation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "tEsting", - ErrCount: 1, - }, - { - Value: "1testing", - ErrCount: 1, - }, - { - Value: "testing--123", - ErrCount: 1, - }, - { - Value: "testing!", - ErrCount: 1, - }, - { - Value: "testing-", - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateRedshiftClusterIdentifier(tc.Value, "aws_redshift_cluster_identifier") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Redshift Cluster cluster_identifier to trigger a validation error") - } - } -} - -func TestResourceAWSRedshiftClusterFinalSnapshotIdentifierValidation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "testing--123", - ErrCount: 1, - }, - { - Value: "testing-", - ErrCount: 1, - }, - { - Value: "Testingq123!", - ErrCount: 1, - }, - { - Value: randomString(256), - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateRedshiftClusterFinalSnapshotIdentifier(tc.Value, "aws_redshift_cluster_final_snapshot_identifier") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Redshift Cluster final_snapshot_identifier to trigger a validation error") - } - } -} - -func TestResourceAWSRedshiftClusterMasterUsernameValidation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "1Testing", - ErrCount: 1, - }, - { - Value: "Testing!!", - ErrCount: 1, - }, - { - Value: randomString(129), - ErrCount: 1, - }, - { - Value: "testing_testing123", - ErrCount: 0, - }, - } - - for _, tc := range cases { - _, errors := validateRedshiftClusterMasterUsername(tc.Value, "aws_redshift_cluster_master_username") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Redshift Cluster master_username to trigger a validation error") - } - } -} - -func TestResourceAWSRedshiftClusterMasterPasswordValidation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "1TESTING", - ErrCount: 1, - }, - { - Value: "1testing", - ErrCount: 1, - }, - { - Value: "TestTest", - ErrCount: 1, - }, - { - Value: "T3st", - ErrCount: 1, - }, - { - Value: "1Testing", - ErrCount: 0, - }, - { - Value: "1Testing@", - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateRedshiftClusterMasterPassword(tc.Value, "aws_redshift_cluster_master_password") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Redshift Cluster master_password to trigger a validation error") - } - } -} - -var testAccAWSRedshiftClusterConfig_updateNodeCount = ` -resource "aws_redshift_cluster" "default" { - cluster_identifier = "tf-redshift-cluster-%d" - availability_zone = "us-west-2a" - database_name = "mydb" - master_username = "foo_test" - master_password = "Mustbe8characters" - node_type = "dc1.large" - automated_snapshot_retention_period = 0 - allow_version_upgrade = false - number_of_nodes = 2 - skip_final_snapshot = true -} -` - -var testAccAWSRedshiftClusterConfig_basic = ` -resource "aws_redshift_cluster" "default" { - cluster_identifier = "tf-redshift-cluster-%d" - availability_zone = "us-west-2a" - database_name = "mydb" - master_username = "foo_test" - master_password = "Mustbe8characters" - node_type = "dc1.large" - automated_snapshot_retention_period = 0 - allow_version_upgrade = false - skip_final_snapshot = true -}` - -func testAccAWSRedshiftClusterConfigWithFinalSnapshot(rInt int) string { - return fmt.Sprintf(` -resource "aws_redshift_cluster" "default" { - cluster_identifier = "tf-redshift-cluster-%d" - availability_zone = "us-west-2a" - database_name = "mydb" - master_username = "foo_test" - master_password = "Mustbe8characters" - node_type = "dc1.large" - automated_snapshot_retention_period = 0 - allow_version_upgrade = false - skip_final_snapshot = false - final_snapshot_identifier = "tf-acctest-snapshot-%d" -}`, rInt, rInt) -} - -var testAccAWSRedshiftClusterConfig_kmsKey = ` -resource "aws_kms_key" "foo" { - description = "Terraform acc test %d" - policy = < 0 { - modifyOpts := redshift.ModifyClusterParameterGroupInput{ - ParameterGroupName: aws.String(d.Get("name").(string)), - Parameters: parameters, - } - - log.Printf("[DEBUG] Modify Redshift Parameter Group: %s", modifyOpts) - _, err = conn.ModifyClusterParameterGroup(&modifyOpts) - if err != nil { - return fmt.Errorf("Error modifying Redshift Parameter Group: %s", err) - } - } - d.SetPartial("parameter") - } - - d.Partial(false) - return resourceAwsRedshiftParameterGroupRead(d, meta) -} - -func resourceAwsRedshiftParameterGroupDelete(d *schema.ResourceData, meta interface{}) error { - stateConf := &resource.StateChangeConf{ - Pending: []string{"pending"}, - Target: []string{"destroyed"}, - Refresh: resourceAwsRedshiftParameterGroupDeleteRefreshFunc(d, meta), - Timeout: 3 * time.Minute, - MinTimeout: 1 * time.Second, - } - _, err := stateConf.WaitForState() - return err -} - -func resourceAwsRedshiftParameterGroupDeleteRefreshFunc( - d *schema.ResourceData, - meta interface{}) resource.StateRefreshFunc { - conn := meta.(*AWSClient).redshiftconn - - return func() (interface{}, string, error) { - - deleteOpts := redshift.DeleteClusterParameterGroupInput{ - ParameterGroupName: aws.String(d.Id()), - } - - if _, err := conn.DeleteClusterParameterGroup(&deleteOpts); err != nil { - redshiftErr, ok := err.(awserr.Error) - if !ok { - return d, "error", err - } - - if redshiftErr.Code() != "RedshiftParameterGroupNotFoundFault" { - return d, "error", err - } - } - - return d, "destroyed", nil - } -} - -func resourceAwsRedshiftParameterHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - // Store the value as a lower case string, to match how we store them in flattenParameters - buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["value"].(string)))) - - return hashcode.String(buf.String()) -} - -func validateRedshiftParamGroupName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only lowercase alphanumeric characters and hyphens allowed in %q", k)) - } - if !regexp.MustCompile(`^[a-z]`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "first character of %q must be a letter", k)) - } - if regexp.MustCompile(`--`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot contain two consecutive hyphens", k)) - } - if regexp.MustCompile(`-$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot end with a hyphen", k)) - } - if len(value) > 255 { - errors = append(errors, fmt.Errorf( - "%q cannot be greater than 255 characters", k)) - } - return -} diff --git a/builtin/providers/aws/resource_aws_redshift_parameter_group_test.go b/builtin/providers/aws/resource_aws_redshift_parameter_group_test.go deleted file mode 100644 index edd293b82..000000000 --- a/builtin/providers/aws/resource_aws_redshift_parameter_group_test.go +++ /dev/null @@ -1,212 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/redshift" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSRedshiftParameterGroup_withParameters(t *testing.T) { - var v redshift.ClusterParameterGroup - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSRedshiftParameterGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSRedshiftParameterGroupConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftParameterGroupExists("aws_redshift_parameter_group.bar", &v), - resource.TestCheckResourceAttr( - "aws_redshift_parameter_group.bar", "name", fmt.Sprintf("test-terraform-%d", rInt)), - resource.TestCheckResourceAttr( - "aws_redshift_parameter_group.bar", "family", "redshift-1.0"), - resource.TestCheckResourceAttr( - "aws_redshift_parameter_group.bar", "description", "Managed by Terraform"), - resource.TestCheckResourceAttr( - "aws_redshift_parameter_group.bar", "parameter.490804664.name", "require_ssl"), - resource.TestCheckResourceAttr( - "aws_redshift_parameter_group.bar", "parameter.490804664.value", "true"), - resource.TestCheckResourceAttr( - "aws_redshift_parameter_group.bar", "parameter.2036118857.name", "query_group"), - resource.TestCheckResourceAttr( - "aws_redshift_parameter_group.bar", "parameter.2036118857.value", "example"), - resource.TestCheckResourceAttr( - "aws_redshift_parameter_group.bar", "parameter.484080973.name", "enable_user_activity_logging"), - resource.TestCheckResourceAttr( - "aws_redshift_parameter_group.bar", "parameter.484080973.value", "true"), - ), - }, - }, - }) -} - -func TestAccAWSRedshiftParameterGroup_withoutParameters(t *testing.T) { - var v redshift.ClusterParameterGroup - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSRedshiftParameterGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSRedshiftParameterGroupOnlyConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftParameterGroupExists("aws_redshift_parameter_group.bar", &v), - resource.TestCheckResourceAttr( - "aws_redshift_parameter_group.bar", "name", fmt.Sprintf("test-terraform-%d", rInt)), - resource.TestCheckResourceAttr( - "aws_redshift_parameter_group.bar", "family", "redshift-1.0"), - resource.TestCheckResourceAttr( - "aws_redshift_parameter_group.bar", "description", "Test parameter group for terraform"), - ), - }, - }, - }) -} - -func TestResourceAWSRedshiftParameterGroupNameValidation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "tEsting123", - ErrCount: 1, - }, - { - Value: "testing123!", - ErrCount: 1, - }, - { - Value: "1testing123", - ErrCount: 1, - }, - { - Value: "testing--123", - ErrCount: 1, - }, - { - Value: "testing123-", - ErrCount: 1, - }, - { - Value: randomString(256), - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateRedshiftParamGroupName(tc.Value, "aws_redshift_parameter_group_name") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Redshift Parameter Group Name to trigger a validation error") - } - } -} - -func testAccCheckAWSRedshiftParameterGroupDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).redshiftconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_redshift_parameter_group" { - continue - } - - // Try to find the Group - resp, err := conn.DescribeClusterParameterGroups( - &redshift.DescribeClusterParameterGroupsInput{ - ParameterGroupName: aws.String(rs.Primary.ID), - }) - - if err == nil { - if len(resp.ParameterGroups) != 0 && - *resp.ParameterGroups[0].ParameterGroupName == rs.Primary.ID { - return fmt.Errorf("Redshift Parameter Group still exists") - } - } - - // Verify the error - newerr, ok := err.(awserr.Error) - if !ok { - return err - } - if newerr.Code() != "ClusterParameterGroupNotFound" { - return err - } - } - - return nil -} - -func testAccCheckAWSRedshiftParameterGroupExists(n string, v *redshift.ClusterParameterGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Redshift Parameter Group ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).redshiftconn - - opts := redshift.DescribeClusterParameterGroupsInput{ - ParameterGroupName: aws.String(rs.Primary.ID), - } - - resp, err := conn.DescribeClusterParameterGroups(&opts) - - if err != nil { - return err - } - - if len(resp.ParameterGroups) != 1 || - *resp.ParameterGroups[0].ParameterGroupName != rs.Primary.ID { - return fmt.Errorf("Redshift Parameter Group not found") - } - - *v = *resp.ParameterGroups[0] - - return nil - } -} - -func testAccAWSRedshiftParameterGroupOnlyConfig(rInt int) string { - return fmt.Sprintf(` - resource "aws_redshift_parameter_group" "bar" { - name = "test-terraform-%d" - family = "redshift-1.0" - description = "Test parameter group for terraform" - }`, rInt) -} - -func testAccAWSRedshiftParameterGroupConfig(rInt int) string { - return fmt.Sprintf(` - resource "aws_redshift_parameter_group" "bar" { - name = "test-terraform-%d" - family = "redshift-1.0" - parameter { - name = "require_ssl" - value = "true" - } - parameter { - name = "query_group" - value = "example" - } - parameter{ - name = "enable_user_activity_logging" - value = "true" - } - }`, rInt) -} diff --git a/builtin/providers/aws/resource_aws_redshift_security_group.go b/builtin/providers/aws/resource_aws_redshift_security_group.go deleted file mode 100644 index 24a45bfde..000000000 --- a/builtin/providers/aws/resource_aws_redshift_security_group.go +++ /dev/null @@ -1,400 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "log" - "regexp" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/redshift" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsRedshiftSecurityGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsRedshiftSecurityGroupCreate, - Read: resourceAwsRedshiftSecurityGroupRead, - Update: resourceAwsRedshiftSecurityGroupUpdate, - Delete: resourceAwsRedshiftSecurityGroupDelete, - Importer: &schema.ResourceImporter{ - State: resourceAwsRedshiftClusterImport, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRedshiftSecurityGroupName, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "Managed by Terraform", - }, - - "ingress": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cidr": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "security_group_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "security_group_owner_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - }, - Set: resourceAwsRedshiftSecurityGroupIngressHash, - }, - }, - } -} - -func resourceAwsRedshiftSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).redshiftconn - - var err error - var errs []error - - name := d.Get("name").(string) - desc := d.Get("description").(string) - sgInput := &redshift.CreateClusterSecurityGroupInput{ - ClusterSecurityGroupName: aws.String(name), - Description: aws.String(desc), - } - log.Printf("[DEBUG] Redshift security group create: name: %s, description: %s", name, desc) - _, err = conn.CreateClusterSecurityGroup(sgInput) - if err != nil { - return fmt.Errorf("Error creating RedshiftSecurityGroup: %s", err) - } - - d.SetId(d.Get("name").(string)) - - log.Printf("[INFO] Redshift Security Group ID: %s", d.Id()) - sg, err := resourceAwsRedshiftSecurityGroupRetrieve(d, meta) - if err != nil { - return err - } - - ingresses := d.Get("ingress").(*schema.Set) - for _, ing := range ingresses.List() { - err := resourceAwsRedshiftSecurityGroupAuthorizeRule(ing, *sg.ClusterSecurityGroupName, conn) - if err != nil { - errs = append(errs, err) - } - } - - if len(errs) > 0 { - return &multierror.Error{Errors: errs} - } - - log.Println("[INFO] Waiting for Redshift Security Group Ingress Authorizations to be authorized") - stateConf := &resource.StateChangeConf{ - Pending: []string{"authorizing"}, - Target: []string{"authorized"}, - Refresh: resourceAwsRedshiftSecurityGroupStateRefreshFunc(d, meta), - Timeout: 10 * time.Minute, - } - - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - return resourceAwsRedshiftSecurityGroupRead(d, meta) -} - -func resourceAwsRedshiftSecurityGroupRead(d *schema.ResourceData, meta interface{}) error { - sg, err := resourceAwsRedshiftSecurityGroupRetrieve(d, meta) - if err != nil { - return err - } - - rules := &schema.Set{ - F: resourceAwsRedshiftSecurityGroupIngressHash, - } - - for _, v := range sg.IPRanges { - rule := map[string]interface{}{"cidr": *v.CIDRIP} - rules.Add(rule) - } - - for _, g := range sg.EC2SecurityGroups { - rule := map[string]interface{}{ - "security_group_name": *g.EC2SecurityGroupName, - "security_group_owner_id": *g.EC2SecurityGroupOwnerId, - } - rules.Add(rule) - } - - d.Set("ingress", rules) - d.Set("name", *sg.ClusterSecurityGroupName) - d.Set("description", *sg.Description) - - return nil -} - -func resourceAwsRedshiftSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).redshiftconn - - if d.HasChange("ingress") { - o, n := d.GetChange("ingress") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - - removeIngressRules, err := expandRedshiftSGRevokeIngress(os.Difference(ns).List()) - if err != nil { - return err - } - if len(removeIngressRules) > 0 { - for _, r := range removeIngressRules { - r.ClusterSecurityGroupName = aws.String(d.Id()) - - _, err := conn.RevokeClusterSecurityGroupIngress(&r) - if err != nil { - return err - } - } - } - - addIngressRules, err := expandRedshiftSGAuthorizeIngress(ns.Difference(os).List()) - if err != nil { - return err - } - if len(addIngressRules) > 0 { - for _, r := range addIngressRules { - r.ClusterSecurityGroupName = aws.String(d.Id()) - - _, err := conn.AuthorizeClusterSecurityGroupIngress(&r) - if err != nil { - return err - } - } - } - - } - return resourceAwsRedshiftSecurityGroupRead(d, meta) -} - -func resourceAwsRedshiftSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).redshiftconn - - log.Printf("[DEBUG] Redshift Security Group destroy: %v", d.Id()) - opts := redshift.DeleteClusterSecurityGroupInput{ - ClusterSecurityGroupName: aws.String(d.Id()), - } - - log.Printf("[DEBUG] Redshift Security Group destroy configuration: %v", opts) - _, err := conn.DeleteClusterSecurityGroup(&opts) - - if err != nil { - newerr, ok := err.(awserr.Error) - if ok && newerr.Code() == "InvalidRedshiftSecurityGroup.NotFound" { - return nil - } - return err - } - - return nil -} - -func resourceAwsRedshiftSecurityGroupRetrieve(d *schema.ResourceData, meta interface{}) (*redshift.ClusterSecurityGroup, error) { - conn := meta.(*AWSClient).redshiftconn - - opts := redshift.DescribeClusterSecurityGroupsInput{ - ClusterSecurityGroupName: aws.String(d.Id()), - } - - log.Printf("[DEBUG] Redshift Security Group describe configuration: %#v", opts) - - resp, err := conn.DescribeClusterSecurityGroups(&opts) - - if err != nil { - return nil, fmt.Errorf("Error retrieving Redshift Security Groups: %s", err) - } - - if len(resp.ClusterSecurityGroups) != 1 || - *resp.ClusterSecurityGroups[0].ClusterSecurityGroupName != d.Id() { - return nil, fmt.Errorf("Unable to find Redshift Security Group: %#v", resp.ClusterSecurityGroups) - } - - return resp.ClusterSecurityGroups[0], nil -} - -func validateRedshiftSecurityGroupName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value == "default" { - errors = append(errors, fmt.Errorf("the Redshift Security Group name cannot be %q", value)) - } - if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only lowercase alphanumeric characters and hyphens allowed in %q: %q", - k, value)) - } - if len(value) > 255 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 32 characters: %q", k, value)) - } - return - -} - -func resourceAwsRedshiftSecurityGroupIngressHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - if v, ok := m["cidr"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - - if v, ok := m["security_group_name"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - - if v, ok := m["security_group_owner_id"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - - return hashcode.String(buf.String()) -} - -func resourceAwsRedshiftSecurityGroupAuthorizeRule(ingress interface{}, redshiftSecurityGroupName string, conn *redshift.Redshift) error { - ing := ingress.(map[string]interface{}) - - opts := redshift.AuthorizeClusterSecurityGroupIngressInput{ - ClusterSecurityGroupName: aws.String(redshiftSecurityGroupName), - } - - if attr, ok := ing["cidr"]; ok && attr != "" { - opts.CIDRIP = aws.String(attr.(string)) - } - - if attr, ok := ing["security_group_name"]; ok && attr != "" { - opts.EC2SecurityGroupName = aws.String(attr.(string)) - } - - if attr, ok := ing["security_group_owner_id"]; ok && attr != "" { - opts.EC2SecurityGroupOwnerId = aws.String(attr.(string)) - } - - log.Printf("[DEBUG] Authorize ingress rule configuration: %#v", opts) - _, err := conn.AuthorizeClusterSecurityGroupIngress(&opts) - - if err != nil { - return fmt.Errorf("Error authorizing security group ingress: %s", err) - } - - return nil -} - -func resourceAwsRedshiftSecurityGroupStateRefreshFunc( - d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - v, err := resourceAwsRedshiftSecurityGroupRetrieve(d, meta) - - if err != nil { - log.Printf("Error on retrieving Redshift Security Group when waiting: %s", err) - return nil, "", err - } - - statuses := make([]string, 0, len(v.EC2SecurityGroups)+len(v.IPRanges)) - for _, ec2g := range v.EC2SecurityGroups { - statuses = append(statuses, *ec2g.Status) - } - for _, ips := range v.IPRanges { - statuses = append(statuses, *ips.Status) - } - - for _, stat := range statuses { - // Not done - if stat != "authorized" { - return nil, "authorizing", nil - } - } - - return v, "authorized", nil - } -} - -func expandRedshiftSGAuthorizeIngress(configured []interface{}) ([]redshift.AuthorizeClusterSecurityGroupIngressInput, error) { - var ingress []redshift.AuthorizeClusterSecurityGroupIngressInput - - // Loop over our configured parameters and create - // an array of aws-sdk-go compatible objects - for _, pRaw := range configured { - data := pRaw.(map[string]interface{}) - - i := redshift.AuthorizeClusterSecurityGroupIngressInput{} - - if v, ok := data["cidr"]; ok { - i.CIDRIP = aws.String(v.(string)) - } - - if v, ok := data["security_group_name"]; ok { - i.EC2SecurityGroupName = aws.String(v.(string)) - } - - if v, ok := data["security_group_owner_id"]; ok { - i.EC2SecurityGroupOwnerId = aws.String(v.(string)) - } - - ingress = append(ingress, i) - } - - return ingress, nil -} - -func expandRedshiftSGRevokeIngress(configured []interface{}) ([]redshift.RevokeClusterSecurityGroupIngressInput, error) { - var ingress []redshift.RevokeClusterSecurityGroupIngressInput - - // Loop over our configured parameters and create - // an array of aws-sdk-go compatible objects - for _, pRaw := range configured { - data := pRaw.(map[string]interface{}) - - i := redshift.RevokeClusterSecurityGroupIngressInput{} - - if v, ok := data["cidr"]; ok { - i.CIDRIP = aws.String(v.(string)) - } - - if v, ok := data["security_group_name"]; ok { - i.EC2SecurityGroupName = aws.String(v.(string)) - } - - if v, ok := data["security_group_owner_id"]; ok { - i.EC2SecurityGroupOwnerId = aws.String(v.(string)) - } - - ingress = append(ingress, i) - } - - return ingress, nil -} diff --git a/builtin/providers/aws/resource_aws_redshift_security_group_test.go b/builtin/providers/aws/resource_aws_redshift_security_group_test.go deleted file mode 100644 index aa30a5145..000000000 --- a/builtin/providers/aws/resource_aws_redshift_security_group_test.go +++ /dev/null @@ -1,440 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/redshift" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSRedshiftSecurityGroup_ingressCidr(t *testing.T) { - var v redshift.ClusterSecurityGroup - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSRedshiftSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSRedshiftSecurityGroupConfig_ingressCidr(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftSecurityGroupExists("aws_redshift_security_group.bar", &v), - resource.TestCheckResourceAttr( - "aws_redshift_security_group.bar", "name", fmt.Sprintf("redshift-sg-terraform-%d", rInt)), - resource.TestCheckResourceAttr( - "aws_redshift_security_group.bar", "description", "Managed by Terraform"), - resource.TestCheckResourceAttr( - "aws_redshift_security_group.bar", "ingress.2735652665.cidr", "10.0.0.1/24"), - resource.TestCheckResourceAttr( - "aws_redshift_security_group.bar", "ingress.#", "1"), - ), - }, - }, - }) -} - -func TestAccAWSRedshiftSecurityGroup_updateIngressCidr(t *testing.T) { - var v redshift.ClusterSecurityGroup - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSRedshiftSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSRedshiftSecurityGroupConfig_ingressCidr(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftSecurityGroupExists("aws_redshift_security_group.bar", &v), - resource.TestCheckResourceAttr( - "aws_redshift_security_group.bar", "ingress.#", "1"), - ), - }, - - { - Config: testAccAWSRedshiftSecurityGroupConfig_ingressCidrAdd(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftSecurityGroupExists("aws_redshift_security_group.bar", &v), - resource.TestCheckResourceAttr( - "aws_redshift_security_group.bar", "ingress.#", "3"), - ), - }, - - { - Config: testAccAWSRedshiftSecurityGroupConfig_ingressCidrReduce(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftSecurityGroupExists("aws_redshift_security_group.bar", &v), - resource.TestCheckResourceAttr( - "aws_redshift_security_group.bar", "ingress.#", "2"), - ), - }, - }, - }) -} - -func TestAccAWSRedshiftSecurityGroup_ingressSecurityGroup(t *testing.T) { - var v redshift.ClusterSecurityGroup - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSRedshiftSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSRedshiftSecurityGroupConfig_ingressSgId(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftSecurityGroupExists("aws_redshift_security_group.bar", &v), - resource.TestCheckResourceAttr( - "aws_redshift_security_group.bar", "name", fmt.Sprintf("redshift-sg-terraform-%d", rInt)), - resource.TestCheckResourceAttr( - "aws_redshift_security_group.bar", "description", "this is a description"), - resource.TestCheckResourceAttr( - "aws_redshift_security_group.bar", "ingress.#", "1"), - ), - }, - }, - }) -} - -func TestAccAWSRedshiftSecurityGroup_updateIngressSecurityGroup(t *testing.T) { - var v redshift.ClusterSecurityGroup - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSRedshiftSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSRedshiftSecurityGroupConfig_ingressSgId(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftSecurityGroupExists("aws_redshift_security_group.bar", &v), - resource.TestCheckResourceAttr( - "aws_redshift_security_group.bar", "ingress.#", "1"), - ), - }, - - { - Config: testAccAWSRedshiftSecurityGroupConfig_ingressSgIdAdd(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftSecurityGroupExists("aws_redshift_security_group.bar", &v), - resource.TestCheckResourceAttr( - "aws_redshift_security_group.bar", "ingress.#", "3"), - ), - }, - - { - Config: testAccAWSRedshiftSecurityGroupConfig_ingressSgIdReduce(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRedshiftSecurityGroupExists("aws_redshift_security_group.bar", &v), - resource.TestCheckResourceAttr( - "aws_redshift_security_group.bar", "ingress.#", "2"), - ), - }, - }, - }) -} - -func testAccCheckAWSRedshiftSecurityGroupExists(n string, v *redshift.ClusterSecurityGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Redshift Security Group ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).redshiftconn - - opts := redshift.DescribeClusterSecurityGroupsInput{ - ClusterSecurityGroupName: aws.String(rs.Primary.ID), - } - - resp, err := conn.DescribeClusterSecurityGroups(&opts) - - if err != nil { - return err - } - - if len(resp.ClusterSecurityGroups) != 1 || - *resp.ClusterSecurityGroups[0].ClusterSecurityGroupName != rs.Primary.ID { - return fmt.Errorf("Redshift Security Group not found") - } - - *v = *resp.ClusterSecurityGroups[0] - - return nil - } -} - -func testAccCheckAWSRedshiftSecurityGroupDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).redshiftconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_redshift_security_group" { - continue - } - - // Try to find the Group - resp, err := conn.DescribeClusterSecurityGroups( - &redshift.DescribeClusterSecurityGroupsInput{ - ClusterSecurityGroupName: aws.String(rs.Primary.ID), - }) - - if err == nil { - if len(resp.ClusterSecurityGroups) != 0 && - *resp.ClusterSecurityGroups[0].ClusterSecurityGroupName == rs.Primary.ID { - return fmt.Errorf("Redshift Security Group still exists") - } - } - - // Verify the error - newerr, ok := err.(awserr.Error) - if !ok { - return err - } - if newerr.Code() != "ClusterSecurityGroupNotFound" { - return err - } - } - - return nil -} - -func TestResourceAWSRedshiftSecurityGroupNameValidation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "default", - ErrCount: 1, - }, - { - Value: "testing123%%", - ErrCount: 1, - }, - { - Value: "TestingSG", - ErrCount: 1, - }, - { - Value: randomString(256), - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateRedshiftSecurityGroupName(tc.Value, "aws_redshift_security_group_name") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Redshift Security Group Name to trigger a validation error") - } - } -} - -func testAccAWSRedshiftSecurityGroupConfig_ingressCidr(rInt int) string { - return fmt.Sprintf(` - provider "aws" { - region = "us-east-1" - } - - resource "aws_redshift_security_group" "bar" { - name = "redshift-sg-terraform-%d" - - ingress { - cidr = "10.0.0.1/24" - } - }`, rInt) -} - -func testAccAWSRedshiftSecurityGroupConfig_ingressCidrAdd(rInt int) string { - return fmt.Sprintf(` - provider "aws" { - region = "us-east-1" - } - - resource "aws_redshift_security_group" "bar" { - name = "redshift-sg-terraform-%d" - description = "this is a description" - - ingress { - cidr = "10.0.0.1/24" - } - - ingress { - cidr = "10.0.10.1/24" - } - - ingress { - cidr = "10.0.20.1/24" - } - }`, rInt) -} - -func testAccAWSRedshiftSecurityGroupConfig_ingressCidrReduce(rInt int) string { - return fmt.Sprintf(` - provider "aws" { - region = "us-east-1" - } - - resource "aws_redshift_security_group" "bar" { - name = "redshift-sg-terraform-%d" - description = "this is a description" - - ingress { - cidr = "10.0.0.1/24" - } - - ingress { - cidr = "10.0.10.1/24" - } - }`, rInt) -} - -func testAccAWSRedshiftSecurityGroupConfig_ingressSgId(rInt int) string { - return fmt.Sprintf(` - provider "aws" { - region = "us-east-1" - } - - resource "aws_security_group" "redshift" { - name = "terraform_redshift_test_%d" - description = "Used in the redshift acceptance tests" - - ingress { - protocol = "tcp" - from_port = 22 - to_port = 22 - cidr_blocks = ["10.0.0.0/8"] - } - } - - resource "aws_redshift_security_group" "bar" { - name = "redshift-sg-terraform-%d" - description = "this is a description" - - ingress { - security_group_name = "${aws_security_group.redshift.name}" - security_group_owner_id = "${aws_security_group.redshift.owner_id}" - } - }`, rInt, rInt) -} - -func testAccAWSRedshiftSecurityGroupConfig_ingressSgIdAdd(rInt int) string { - return fmt.Sprintf(` - provider "aws" { - region = "us-east-1" - } - - resource "aws_security_group" "redshift" { - name = "terraform_redshift_test_%d" - description = "Used in the redshift acceptance tests" - - ingress { - protocol = "tcp" - from_port = 22 - to_port = 22 - cidr_blocks = ["10.0.0.0/16"] - } - } - - resource "aws_security_group" "redshift2" { - name = "terraform_redshift_test_2_%d" - description = "Used in the redshift acceptance tests #2" - - ingress { - protocol = "tcp" - from_port = 22 - to_port = 22 - cidr_blocks = ["10.1.0.0/16"] - } - } - - resource "aws_security_group" "redshift3" { - name = "terraform_redshift_test_3_%d" - description = "Used in the redshift acceptance tests #3" - - ingress { - protocol = "tcp" - from_port = 22 - to_port = 22 - cidr_blocks = ["10.2.0.0/16"] - } - } - - resource "aws_redshift_security_group" "bar" { - name = "redshift-sg-terraform-%d" - description = "this is a description" - - ingress { - security_group_name = "${aws_security_group.redshift.name}" - security_group_owner_id = "${aws_security_group.redshift.owner_id}" - } - - ingress { - security_group_name = "${aws_security_group.redshift2.name}" - security_group_owner_id = "${aws_security_group.redshift.owner_id}" - } - - ingress { - security_group_name = "${aws_security_group.redshift3.name}" - security_group_owner_id = "${aws_security_group.redshift.owner_id}" - } - }`, rInt, rInt, rInt, rInt) -} - -func testAccAWSRedshiftSecurityGroupConfig_ingressSgIdReduce(rInt int) string { - return fmt.Sprintf(` - provider "aws" { - region = "us-east-1" - } - - resource "aws_security_group" "redshift" { - name = "terraform_redshift_test_%d" - description = "Used in the redshift acceptance tests" - - ingress { - protocol = "tcp" - from_port = 22 - to_port = 22 - cidr_blocks = ["10.0.0.0/16"] - } - } - - resource "aws_security_group" "redshift2" { - name = "terraform_redshift_test_2_%d" - description = "Used in the redshift acceptance tests #2" - - ingress { - protocol = "tcp" - from_port = 22 - to_port = 22 - cidr_blocks = ["10.1.0.0/16"] - } - } - - resource "aws_redshift_security_group" "bar" { - name = "redshift-sg-terraform-%d" - description = "this is a description" - - ingress { - security_group_name = "${aws_security_group.redshift.name}" - security_group_owner_id = "${aws_security_group.redshift.owner_id}" - } - - ingress { - security_group_name = "${aws_security_group.redshift2.name}" - security_group_owner_id = "${aws_security_group.redshift.owner_id}" - } - }`, rInt, rInt, rInt) -} diff --git a/builtin/providers/aws/resource_aws_redshift_subnet_group.go b/builtin/providers/aws/resource_aws_redshift_subnet_group.go deleted file mode 100644 index 118abffe4..000000000 --- a/builtin/providers/aws/resource_aws_redshift_subnet_group.go +++ /dev/null @@ -1,220 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "regexp" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/redshift" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsRedshiftSubnetGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsRedshiftSubnetGroupCreate, - Read: resourceAwsRedshiftSubnetGroupRead, - Update: resourceAwsRedshiftSubnetGroupUpdate, - Delete: resourceAwsRedshiftSubnetGroupDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - ForceNew: true, - Required: true, - ValidateFunc: validateRedshiftSubnetGroupName, - }, - - "description": { - Type: schema.TypeString, - Optional: true, - Default: "Managed by Terraform", - }, - - "subnet_ids": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAwsRedshiftSubnetGroupCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).redshiftconn - - subnetIdsSet := d.Get("subnet_ids").(*schema.Set) - subnetIds := make([]*string, subnetIdsSet.Len()) - for i, subnetId := range subnetIdsSet.List() { - subnetIds[i] = aws.String(subnetId.(string)) - } - tags := tagsFromMapRedshift(d.Get("tags").(map[string]interface{})) - - createOpts := redshift.CreateClusterSubnetGroupInput{ - ClusterSubnetGroupName: aws.String(d.Get("name").(string)), - Description: aws.String(d.Get("description").(string)), - SubnetIds: subnetIds, - Tags: tags, - } - - log.Printf("[DEBUG] Create Redshift Subnet Group: %#v", createOpts) - _, err := conn.CreateClusterSubnetGroup(&createOpts) - if err != nil { - return fmt.Errorf("Error creating Redshift Subnet Group: %s", err) - } - - d.SetId(*createOpts.ClusterSubnetGroupName) - log.Printf("[INFO] Redshift Subnet Group ID: %s", d.Id()) - return resourceAwsRedshiftSubnetGroupRead(d, meta) -} - -func resourceAwsRedshiftSubnetGroupRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).redshiftconn - - describeOpts := redshift.DescribeClusterSubnetGroupsInput{ - ClusterSubnetGroupName: aws.String(d.Id()), - } - - describeResp, err := conn.DescribeClusterSubnetGroups(&describeOpts) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "ClusterSubnetGroupNotFoundFault" { - log.Printf("[INFO] Redshift Subnet Group: %s was not found", d.Id()) - d.SetId("") - return nil - } - return err - } - - if len(describeResp.ClusterSubnetGroups) == 0 { - return fmt.Errorf("Unable to find Redshift Subnet Group: %#v", describeResp.ClusterSubnetGroups) - } - - d.Set("name", d.Id()) - d.Set("description", describeResp.ClusterSubnetGroups[0].Description) - d.Set("subnet_ids", subnetIdsToSlice(describeResp.ClusterSubnetGroups[0].Subnets)) - if err := d.Set("tags", tagsToMapRedshift(describeResp.ClusterSubnetGroups[0].Tags)); err != nil { - return fmt.Errorf("[DEBUG] Error setting Redshift Subnet Group Tags: %#v", err) - } - - return nil -} - -func resourceAwsRedshiftSubnetGroupUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).redshiftconn - - arn, tagErr := buildRedshiftSubnetGroupARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) - if tagErr != nil { - return fmt.Errorf("Error building ARN for Redshift Subnet Group, not updating Tags for Subnet Group %s", d.Id()) - } else { - if tagErr := setTagsRedshift(conn, d, arn); tagErr != nil { - return tagErr - } - } - - if d.HasChange("subnet_ids") || d.HasChange("description") { - _, n := d.GetChange("subnet_ids") - if n == nil { - n = new(schema.Set) - } - ns := n.(*schema.Set) - - var sIds []*string - for _, s := range ns.List() { - sIds = append(sIds, aws.String(s.(string))) - } - - _, err := conn.ModifyClusterSubnetGroup(&redshift.ModifyClusterSubnetGroupInput{ - ClusterSubnetGroupName: aws.String(d.Id()), - Description: aws.String(d.Get("description").(string)), - SubnetIds: sIds, - }) - - if err != nil { - return err - } - } - - return nil -} - -func resourceAwsRedshiftSubnetGroupDelete(d *schema.ResourceData, meta interface{}) error { - stateConf := &resource.StateChangeConf{ - Pending: []string{"pending"}, - Target: []string{"destroyed"}, - Refresh: resourceAwsRedshiftSubnetGroupDeleteRefreshFunc(d, meta), - Timeout: 3 * time.Minute, - MinTimeout: 1 * time.Second, - } - _, err := stateConf.WaitForState() - return err -} - -func resourceAwsRedshiftSubnetGroupDeleteRefreshFunc(d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { - conn := meta.(*AWSClient).redshiftconn - - return func() (interface{}, string, error) { - - deleteOpts := redshift.DeleteClusterSubnetGroupInput{ - ClusterSubnetGroupName: aws.String(d.Id()), - } - - if _, err := conn.DeleteClusterSubnetGroup(&deleteOpts); err != nil { - redshiftErr, ok := err.(awserr.Error) - if !ok { - return d, "error", err - } - - if redshiftErr.Code() != "ClusterSubnetGroupNotFoundFault" { - return d, "error", err - } - } - - return d, "destroyed", nil - } -} - -func subnetIdsToSlice(subnetIds []*redshift.Subnet) []string { - subnetsSlice := make([]string, 0, len(subnetIds)) - for _, s := range subnetIds { - subnetsSlice = append(subnetsSlice, *s.SubnetIdentifier) - } - return subnetsSlice -} - -func validateRedshiftSubnetGroupName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only lowercase alphanumeric characters and hyphens allowed in %q", k)) - } - if len(value) > 255 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 255 characters", k)) - } - if regexp.MustCompile(`(?i)^default$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q is not allowed as %q", "Default", k)) - } - return -} - -func buildRedshiftSubnetGroupARN(identifier, partition, accountid, region string) (string, error) { - if partition == "" { - return "", fmt.Errorf("Unable to construct Subnet Group ARN because of missing AWS partition") - } - if accountid == "" { - return "", fmt.Errorf("Unable to construct Subnet Group ARN because of missing AWS Account ID") - } - arn := fmt.Sprintf("arn:%s:redshift:%s:%s:subnetgroup:%s", partition, region, accountid, identifier) - return arn, nil - -} diff --git a/builtin/providers/aws/resource_aws_redshift_subnet_group_test.go b/builtin/providers/aws/resource_aws_redshift_subnet_group_test.go deleted file mode 100644 index b71e7bee3..000000000 --- a/builtin/providers/aws/resource_aws_redshift_subnet_group_test.go +++ /dev/null @@ -1,416 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/redshift" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSRedshiftSubnetGroup_basic(t *testing.T) { - var v redshift.ClusterSubnetGroup - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRedshiftSubnetGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRedshiftSubnetGroupConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckRedshiftSubnetGroupExists("aws_redshift_subnet_group.foo", &v), - resource.TestCheckResourceAttr( - "aws_redshift_subnet_group.foo", "subnet_ids.#", "2"), - resource.TestCheckResourceAttr( - "aws_redshift_subnet_group.foo", "description", "foo description"), - ), - }, - }, - }) -} - -func TestAccAWSRedshiftSubnetGroup_updateDescription(t *testing.T) { - var v redshift.ClusterSubnetGroup - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRedshiftSubnetGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRedshiftSubnetGroupConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckRedshiftSubnetGroupExists("aws_redshift_subnet_group.foo", &v), - resource.TestCheckResourceAttr( - "aws_redshift_subnet_group.foo", "description", "foo description"), - ), - }, - - resource.TestStep{ - Config: testAccRedshiftSubnetGroup_updateDescription(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckRedshiftSubnetGroupExists("aws_redshift_subnet_group.foo", &v), - resource.TestCheckResourceAttr( - "aws_redshift_subnet_group.foo", "description", "foo description updated"), - ), - }, - }, - }) -} - -func TestAccAWSRedshiftSubnetGroup_updateSubnetIds(t *testing.T) { - var v redshift.ClusterSubnetGroup - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRedshiftSubnetGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRedshiftSubnetGroupConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckRedshiftSubnetGroupExists("aws_redshift_subnet_group.foo", &v), - resource.TestCheckResourceAttr( - "aws_redshift_subnet_group.foo", "subnet_ids.#", "2"), - ), - }, - - resource.TestStep{ - Config: testAccRedshiftSubnetGroupConfig_updateSubnetIds(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckRedshiftSubnetGroupExists("aws_redshift_subnet_group.foo", &v), - resource.TestCheckResourceAttr( - "aws_redshift_subnet_group.foo", "subnet_ids.#", "3"), - ), - }, - }, - }) -} - -func TestAccAWSRedshiftSubnetGroup_tags(t *testing.T) { - var v redshift.ClusterSubnetGroup - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRedshiftSubnetGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccRedshiftSubnetGroupConfigWithTags(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckRedshiftSubnetGroupExists("aws_redshift_subnet_group.foo", &v), - resource.TestCheckResourceAttr( - "aws_redshift_subnet_group.foo", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_redshift_subnet_group.foo", "tags.Name", "tf-redshift-subnetgroup"), - ), - }, - { - Config: testAccRedshiftSubnetGroupConfigWithTagsUpdated(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckRedshiftSubnetGroupExists("aws_redshift_subnet_group.foo", &v), - resource.TestCheckResourceAttr( - "aws_redshift_subnet_group.foo", "tags.%", "3"), - resource.TestCheckResourceAttr("aws_redshift_subnet_group.foo", "tags.environment", "production"), - resource.TestCheckResourceAttr("aws_redshift_subnet_group.foo", "tags.Name", "tf-redshift-subnetgroup"), - resource.TestCheckResourceAttr("aws_redshift_subnet_group.foo", "tags.foo", "bar"), - ), - }, - }, - }) -} - -func TestResourceAWSRedshiftSubnetGroupNameValidation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "default", - ErrCount: 1, - }, - { - Value: "testing123%%", - ErrCount: 1, - }, - { - Value: "TestingSG", - ErrCount: 1, - }, - { - Value: "testing_123", - ErrCount: 1, - }, - { - Value: "testing.123", - ErrCount: 1, - }, - { - Value: randomString(256), - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateRedshiftSubnetGroupName(tc.Value, "aws_redshift_subnet_group_name") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Redshift Subnet Group Name to trigger a validation error") - } - } -} - -func testAccCheckRedshiftSubnetGroupDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).redshiftconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_redshift_subnet_group" { - continue - } - - resp, err := conn.DescribeClusterSubnetGroups( - &redshift.DescribeClusterSubnetGroupsInput{ - ClusterSubnetGroupName: aws.String(rs.Primary.ID)}) - if err == nil { - if len(resp.ClusterSubnetGroups) > 0 { - return fmt.Errorf("still exist.") - } - - return nil - } - - redshiftErr, ok := err.(awserr.Error) - if !ok { - return err - } - if redshiftErr.Code() != "ClusterSubnetGroupNotFoundFault" { - return err - } - } - - return nil -} - -func testAccCheckRedshiftSubnetGroupExists(n string, v *redshift.ClusterSubnetGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).redshiftconn - resp, err := conn.DescribeClusterSubnetGroups( - &redshift.DescribeClusterSubnetGroupsInput{ClusterSubnetGroupName: aws.String(rs.Primary.ID)}) - if err != nil { - return err - } - if len(resp.ClusterSubnetGroups) == 0 { - return fmt.Errorf("ClusterSubnetGroup not found") - } - - *v = *resp.ClusterSubnetGroups[0] - - return nil - } -} - -func testAccRedshiftSubnetGroupConfig(rInt int) string { - return fmt.Sprintf(` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccRedshiftSubnetGroupConfig" - } -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - availability_zone = "us-west-2a" - vpc_id = "${aws_vpc.foo.id}" - tags { - Name = "tf-dbsubnet-test-1" - } -} - -resource "aws_subnet" "bar" { - cidr_block = "10.1.2.0/24" - availability_zone = "us-west-2b" - vpc_id = "${aws_vpc.foo.id}" - tags { - Name = "tf-dbsubnet-test-2" - } -} - -resource "aws_redshift_subnet_group" "foo" { - name = "foo-%d" - description = "foo description" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] -} - `, rInt) -} - -func testAccRedshiftSubnetGroup_updateDescription(rInt int) string { - return fmt.Sprintf(` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccRedshiftSubnetGroup_updateDescription" - } -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - availability_zone = "us-west-2a" - vpc_id = "${aws_vpc.foo.id}" - tags { - Name = "tf-dbsubnet-test-1" - } -} - -resource "aws_subnet" "bar" { - cidr_block = "10.1.2.0/24" - availability_zone = "us-west-2b" - vpc_id = "${aws_vpc.foo.id}" - tags { - Name = "tf-dbsubnet-test-2" - } -} - -resource "aws_redshift_subnet_group" "foo" { - name = "foo-%d" - description = "foo description updated" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] -} -`, rInt) -} - -func testAccRedshiftSubnetGroupConfigWithTags(rInt int) string { - return fmt.Sprintf(` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccRedshiftSubnetGroupConfigWithTags" - } -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - availability_zone = "us-west-2a" - vpc_id = "${aws_vpc.foo.id}" - tags { - Name = "tf-dbsubnet-test-1" - } -} - -resource "aws_subnet" "bar" { - cidr_block = "10.1.2.0/24" - availability_zone = "us-west-2b" - vpc_id = "${aws_vpc.foo.id}" - tags { - Name = "tf-dbsubnet-test-2" - } -} - -resource "aws_redshift_subnet_group" "foo" { - name = "foo-%d" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] - tags { - Name = "tf-redshift-subnetgroup" - } -} -`, rInt) -} - -func testAccRedshiftSubnetGroupConfigWithTagsUpdated(rInt int) string { - return fmt.Sprintf(` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccRedshiftSubnetGroupConfigWithTags" - } -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - availability_zone = "us-west-2a" - vpc_id = "${aws_vpc.foo.id}" - tags { - Name = "tf-dbsubnet-test-1" - } -} - -resource "aws_subnet" "bar" { - cidr_block = "10.1.2.0/24" - availability_zone = "us-west-2b" - vpc_id = "${aws_vpc.foo.id}" - tags { - Name = "tf-dbsubnet-test-2" - } -} - -resource "aws_redshift_subnet_group" "foo" { - name = "foo-%d" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] - tags { - Name = "tf-redshift-subnetgroup" - environment = "production" - foo = "bar" - } -} -`, rInt) -} - -func testAccRedshiftSubnetGroupConfig_updateSubnetIds(rInt int) string { - return fmt.Sprintf(` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - tags { - Name = "testAccRedshiftSubnetGroupConfig_updateSubnetIds" - } -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - availability_zone = "us-west-2a" - vpc_id = "${aws_vpc.foo.id}" - tags { - Name = "tf-dbsubnet-test-1" - } -} - -resource "aws_subnet" "bar" { - cidr_block = "10.1.2.0/24" - availability_zone = "us-west-2b" - vpc_id = "${aws_vpc.foo.id}" - tags { - Name = "tf-dbsubnet-test-2" - } -} - -resource "aws_subnet" "foobar" { - cidr_block = "10.1.3.0/24" - availability_zone = "us-west-2c" - vpc_id = "${aws_vpc.foo.id}" - tags { - Name = "tf-dbsubnet-test-3" - } -} - -resource "aws_redshift_subnet_group" "foo" { - name = "foo-%d" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}", "${aws_subnet.foobar.id}"] -} -`, rInt) -} diff --git a/builtin/providers/aws/resource_aws_route.go b/builtin/providers/aws/resource_aws_route.go deleted file mode 100644 index 85151b089..000000000 --- a/builtin/providers/aws/resource_aws_route.go +++ /dev/null @@ -1,498 +0,0 @@ -package aws - -import ( - "errors" - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -// How long to sleep if a limit-exceeded event happens -var routeTargetValidationError = errors.New("Error: more than 1 target specified. Only 1 of gateway_id, " + - "egress_only_gateway_id, nat_gateway_id, instance_id, network_interface_id or " + - "vpc_peering_connection_id is allowed.") - -// AWS Route resource Schema declaration -func resourceAwsRoute() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsRouteCreate, - Read: resourceAwsRouteRead, - Update: resourceAwsRouteUpdate, - Delete: resourceAwsRouteDelete, - Exists: resourceAwsRouteExists, - - Schema: map[string]*schema.Schema{ - "destination_cidr_block": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "destination_ipv6_cidr_block": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "destination_prefix_list_id": { - Type: schema.TypeString, - Computed: true, - }, - - "gateway_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "egress_only_gateway_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "nat_gateway_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "instance_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "instance_owner_id": { - Type: schema.TypeString, - Computed: true, - }, - - "network_interface_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "origin": { - Type: schema.TypeString, - Computed: true, - }, - - "state": { - Type: schema.TypeString, - Computed: true, - }, - - "route_table_id": { - Type: schema.TypeString, - Required: true, - }, - - "vpc_peering_connection_id": { - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -func resourceAwsRouteCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - var numTargets int - var setTarget string - allowedTargets := []string{ - "egress_only_gateway_id", - "gateway_id", - "nat_gateway_id", - "instance_id", - "network_interface_id", - "vpc_peering_connection_id", - } - - // Check if more than 1 target is specified - for _, target := range allowedTargets { - if len(d.Get(target).(string)) > 0 { - numTargets++ - setTarget = target - } - } - - if numTargets > 1 { - return routeTargetValidationError - } - - createOpts := &ec2.CreateRouteInput{} - // Formulate CreateRouteInput based on the target type - switch setTarget { - case "gateway_id": - createOpts = &ec2.CreateRouteInput{ - RouteTableId: aws.String(d.Get("route_table_id").(string)), - GatewayId: aws.String(d.Get("gateway_id").(string)), - } - - if v, ok := d.GetOk("destination_cidr_block"); ok { - createOpts.DestinationCidrBlock = aws.String(v.(string)) - } - - if v, ok := d.GetOk("destination_ipv6_cidr_block"); ok { - createOpts.DestinationIpv6CidrBlock = aws.String(v.(string)) - } - - case "egress_only_gateway_id": - createOpts = &ec2.CreateRouteInput{ - RouteTableId: aws.String(d.Get("route_table_id").(string)), - DestinationIpv6CidrBlock: aws.String(d.Get("destination_ipv6_cidr_block").(string)), - EgressOnlyInternetGatewayId: aws.String(d.Get("egress_only_gateway_id").(string)), - } - case "nat_gateway_id": - createOpts = &ec2.CreateRouteInput{ - RouteTableId: aws.String(d.Get("route_table_id").(string)), - DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)), - NatGatewayId: aws.String(d.Get("nat_gateway_id").(string)), - } - case "instance_id": - createOpts = &ec2.CreateRouteInput{ - RouteTableId: aws.String(d.Get("route_table_id").(string)), - DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)), - InstanceId: aws.String(d.Get("instance_id").(string)), - } - case "network_interface_id": - createOpts = &ec2.CreateRouteInput{ - RouteTableId: aws.String(d.Get("route_table_id").(string)), - DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)), - NetworkInterfaceId: aws.String(d.Get("network_interface_id").(string)), - } - case "vpc_peering_connection_id": - createOpts = &ec2.CreateRouteInput{ - RouteTableId: aws.String(d.Get("route_table_id").(string)), - DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)), - VpcPeeringConnectionId: aws.String(d.Get("vpc_peering_connection_id").(string)), - } - default: - return fmt.Errorf("An invalid target type specified: %s", setTarget) - } - log.Printf("[DEBUG] Route create config: %s", createOpts) - - // Create the route - var err error - - err = resource.Retry(2*time.Minute, func() *resource.RetryError { - _, err = conn.CreateRoute(createOpts) - - if err != nil { - ec2err, ok := err.(awserr.Error) - if !ok { - return resource.NonRetryableError(err) - } - if ec2err.Code() == "InvalidParameterException" { - log.Printf("[DEBUG] Trying to create route again: %q", ec2err.Message()) - return resource.RetryableError(err) - } - - return resource.NonRetryableError(err) - } - - return nil - }) - if err != nil { - return fmt.Errorf("Error creating route: %s", err) - } - - var route *ec2.Route - - if v, ok := d.GetOk("destination_cidr_block"); ok { - err = resource.Retry(2*time.Minute, func() *resource.RetryError { - route, err = findResourceRoute(conn, d.Get("route_table_id").(string), v.(string), "") - return resource.RetryableError(err) - }) - if err != nil { - return fmt.Errorf("Error finding route after creating it: %s", err) - } - } - - if v, ok := d.GetOk("destination_ipv6_cidr_block"); ok { - err = resource.Retry(2*time.Minute, func() *resource.RetryError { - route, err = findResourceRoute(conn, d.Get("route_table_id").(string), "", v.(string)) - return resource.RetryableError(err) - }) - if err != nil { - return fmt.Errorf("Error finding route after creating it: %s", err) - } - } - - d.SetId(routeIDHash(d, route)) - resourceAwsRouteSetResourceData(d, route) - return nil -} - -func resourceAwsRouteRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - routeTableId := d.Get("route_table_id").(string) - - destinationCidrBlock := d.Get("destination_cidr_block").(string) - destinationIpv6CidrBlock := d.Get("destination_ipv6_cidr_block").(string) - - route, err := findResourceRoute(conn, routeTableId, destinationCidrBlock, destinationIpv6CidrBlock) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidRouteTableID.NotFound" { - log.Printf("[WARN] Route Table %q could not be found. Removing Route from state.", - routeTableId) - d.SetId("") - return nil - } - return err - } - resourceAwsRouteSetResourceData(d, route) - return nil -} - -func resourceAwsRouteSetResourceData(d *schema.ResourceData, route *ec2.Route) { - d.Set("destination_prefix_list_id", route.DestinationPrefixListId) - d.Set("gateway_id", route.GatewayId) - d.Set("egress_only_gateway_id", route.EgressOnlyInternetGatewayId) - d.Set("nat_gateway_id", route.NatGatewayId) - d.Set("instance_id", route.InstanceId) - d.Set("instance_owner_id", route.InstanceOwnerId) - d.Set("network_interface_id", route.NetworkInterfaceId) - d.Set("origin", route.Origin) - d.Set("state", route.State) - d.Set("vpc_peering_connection_id", route.VpcPeeringConnectionId) -} - -func resourceAwsRouteUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - var numTargets int - var setTarget string - - allowedTargets := []string{ - "egress_only_gateway_id", - "gateway_id", - "nat_gateway_id", - "network_interface_id", - "instance_id", - "vpc_peering_connection_id", - } - replaceOpts := &ec2.ReplaceRouteInput{} - - // Check if more than 1 target is specified - for _, target := range allowedTargets { - if len(d.Get(target).(string)) > 0 { - numTargets++ - setTarget = target - } - } - - switch setTarget { - //instance_id is a special case due to the fact that AWS will "discover" the network_interace_id - //when it creates the route and return that data. In the case of an update, we should ignore the - //existing network_interface_id - case "instance_id": - if numTargets > 2 || (numTargets == 2 && len(d.Get("network_interface_id").(string)) == 0) { - return routeTargetValidationError - } - default: - if numTargets > 1 { - return routeTargetValidationError - } - } - - // Formulate ReplaceRouteInput based on the target type - switch setTarget { - case "gateway_id": - replaceOpts = &ec2.ReplaceRouteInput{ - RouteTableId: aws.String(d.Get("route_table_id").(string)), - DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)), - GatewayId: aws.String(d.Get("gateway_id").(string)), - } - case "egress_only_gateway_id": - replaceOpts = &ec2.ReplaceRouteInput{ - RouteTableId: aws.String(d.Get("route_table_id").(string)), - DestinationIpv6CidrBlock: aws.String(d.Get("destination_ipv6_cidr_block").(string)), - EgressOnlyInternetGatewayId: aws.String(d.Get("egress_only_gateway_id").(string)), - } - case "nat_gateway_id": - replaceOpts = &ec2.ReplaceRouteInput{ - RouteTableId: aws.String(d.Get("route_table_id").(string)), - DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)), - NatGatewayId: aws.String(d.Get("nat_gateway_id").(string)), - } - case "instance_id": - replaceOpts = &ec2.ReplaceRouteInput{ - RouteTableId: aws.String(d.Get("route_table_id").(string)), - DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)), - InstanceId: aws.String(d.Get("instance_id").(string)), - } - case "network_interface_id": - replaceOpts = &ec2.ReplaceRouteInput{ - RouteTableId: aws.String(d.Get("route_table_id").(string)), - DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)), - NetworkInterfaceId: aws.String(d.Get("network_interface_id").(string)), - } - case "vpc_peering_connection_id": - replaceOpts = &ec2.ReplaceRouteInput{ - RouteTableId: aws.String(d.Get("route_table_id").(string)), - DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)), - VpcPeeringConnectionId: aws.String(d.Get("vpc_peering_connection_id").(string)), - } - default: - return fmt.Errorf("An invalid target type specified: %s", setTarget) - } - log.Printf("[DEBUG] Route replace config: %s", replaceOpts) - - // Replace the route - _, err := conn.ReplaceRoute(replaceOpts) - if err != nil { - return err - } - - return nil -} - -func resourceAwsRouteDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - deleteOpts := &ec2.DeleteRouteInput{ - RouteTableId: aws.String(d.Get("route_table_id").(string)), - } - if v, ok := d.GetOk("destination_cidr_block"); ok { - deleteOpts.DestinationCidrBlock = aws.String(v.(string)) - } - if v, ok := d.GetOk("destination_ipv6_cidr_block"); ok { - deleteOpts.DestinationIpv6CidrBlock = aws.String(v.(string)) - } - log.Printf("[DEBUG] Route delete opts: %s", deleteOpts) - - var err error - err = resource.Retry(5*time.Minute, func() *resource.RetryError { - log.Printf("[DEBUG] Trying to delete route with opts %s", deleteOpts) - resp, err := conn.DeleteRoute(deleteOpts) - log.Printf("[DEBUG] Route delete result: %s", resp) - - if err == nil { - return nil - } - - ec2err, ok := err.(awserr.Error) - if !ok { - return resource.NonRetryableError(err) - } - if ec2err.Code() == "InvalidParameterException" { - log.Printf("[DEBUG] Trying to delete route again: %q", - ec2err.Message()) - return resource.RetryableError(err) - } - - return resource.NonRetryableError(err) - }) - - if err != nil { - return err - } - - d.SetId("") - return nil -} - -func resourceAwsRouteExists(d *schema.ResourceData, meta interface{}) (bool, error) { - conn := meta.(*AWSClient).ec2conn - routeTableId := d.Get("route_table_id").(string) - - findOpts := &ec2.DescribeRouteTablesInput{ - RouteTableIds: []*string{&routeTableId}, - } - - res, err := conn.DescribeRouteTables(findOpts) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidRouteTableID.NotFound" { - log.Printf("[WARN] Route Table %q could not be found.", routeTableId) - return false, nil - } - return false, fmt.Errorf("Error while checking if route exists: %s", err) - } - - if len(res.RouteTables) < 1 || res.RouteTables[0] == nil { - log.Printf("[WARN] Route Table %q is gone, or route does not exist.", - routeTableId) - return false, nil - } - - if v, ok := d.GetOk("destination_cidr_block"); ok { - for _, route := range (*res.RouteTables[0]).Routes { - if route.DestinationCidrBlock != nil && *route.DestinationCidrBlock == v.(string) { - return true, nil - } - } - } - - if v, ok := d.GetOk("destination_ipv6_cidr_block"); ok { - for _, route := range (*res.RouteTables[0]).Routes { - if route.DestinationIpv6CidrBlock != nil && *route.DestinationIpv6CidrBlock == v.(string) { - return true, nil - } - } - } - - return false, nil -} - -// Create an ID for a route -func routeIDHash(d *schema.ResourceData, r *ec2.Route) string { - - if r.DestinationIpv6CidrBlock != nil && *r.DestinationIpv6CidrBlock != "" { - return fmt.Sprintf("r-%s%d", d.Get("route_table_id").(string), hashcode.String(*r.DestinationIpv6CidrBlock)) - } - - return fmt.Sprintf("r-%s%d", d.Get("route_table_id").(string), hashcode.String(*r.DestinationCidrBlock)) -} - -// Helper: retrieve a route -func findResourceRoute(conn *ec2.EC2, rtbid string, cidr string, ipv6cidr string) (*ec2.Route, error) { - routeTableID := rtbid - - findOpts := &ec2.DescribeRouteTablesInput{ - RouteTableIds: []*string{&routeTableID}, - } - - resp, err := conn.DescribeRouteTables(findOpts) - if err != nil { - return nil, err - } - - if len(resp.RouteTables) < 1 || resp.RouteTables[0] == nil { - return nil, fmt.Errorf("Route Table %q is gone, or route does not exist.", - routeTableID) - } - - if cidr != "" { - for _, route := range (*resp.RouteTables[0]).Routes { - if route.DestinationCidrBlock != nil && *route.DestinationCidrBlock == cidr { - return route, nil - } - } - - return nil, fmt.Errorf("Unable to find matching route for Route Table (%s) "+ - "and destination CIDR block (%s).", rtbid, cidr) - } - - if ipv6cidr != "" { - for _, route := range (*resp.RouteTables[0]).Routes { - if route.DestinationIpv6CidrBlock != nil && *route.DestinationIpv6CidrBlock == ipv6cidr { - return route, nil - } - } - - return nil, fmt.Errorf("Unable to find matching route for Route Table (%s) "+ - "and destination IPv6 CIDR block (%s).", rtbid, ipv6cidr) - } - - return nil, fmt.Errorf("When trying to find a matching route for Route Table %q "+ - "you need to specify a CIDR block of IPv6 CIDR Block", rtbid) - -} diff --git a/builtin/providers/aws/resource_aws_route53_delegation_set.go b/builtin/providers/aws/resource_aws_route53_delegation_set.go deleted file mode 100644 index 34f96ddf5..000000000 --- a/builtin/providers/aws/resource_aws_route53_delegation_set.go +++ /dev/null @@ -1,111 +0,0 @@ -package aws - -import ( - "log" - "sort" - "strings" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/route53" -) - -func resourceAwsRoute53DelegationSet() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsRoute53DelegationSetCreate, - Read: resourceAwsRoute53DelegationSetRead, - Delete: resourceAwsRoute53DelegationSetDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "reference_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "name_servers": &schema.Schema{ - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Computed: true, - }, - }, - } -} - -func resourceAwsRoute53DelegationSetCreate(d *schema.ResourceData, meta interface{}) error { - r53 := meta.(*AWSClient).r53conn - - callerRef := resource.UniqueId() - if v, ok := d.GetOk("reference_name"); ok { - callerRef = strings.Join([]string{ - v.(string), "-", callerRef, - }, "") - } - input := &route53.CreateReusableDelegationSetInput{ - CallerReference: aws.String(callerRef), - } - - log.Printf("[DEBUG] Creating Route53 reusable delegation set: %#v", input) - out, err := r53.CreateReusableDelegationSet(input) - if err != nil { - return err - } - log.Printf("[DEBUG] Route53 reusable delegation set created: %#v", out) - - set := out.DelegationSet - d.SetId(cleanDelegationSetId(*set.Id)) - d.Set("name_servers", expandNameServers(set.NameServers)) - return nil -} - -func resourceAwsRoute53DelegationSetRead(d *schema.ResourceData, meta interface{}) error { - r53 := meta.(*AWSClient).r53conn - - input := &route53.GetReusableDelegationSetInput{ - Id: aws.String(cleanDelegationSetId(d.Id())), - } - log.Printf("[DEBUG] Reading Route53 reusable delegation set: %#v", input) - out, err := r53.GetReusableDelegationSet(input) - if err != nil { - return err - } - log.Printf("[DEBUG] Route53 reusable delegation set received: %#v", out) - - set := out.DelegationSet - - d.SetId(cleanDelegationSetId(*set.Id)) - d.Set("name_servers", expandNameServers(set.NameServers)) - - return nil -} - -func resourceAwsRoute53DelegationSetDelete(d *schema.ResourceData, meta interface{}) error { - r53 := meta.(*AWSClient).r53conn - - input := &route53.DeleteReusableDelegationSetInput{ - Id: aws.String(cleanDelegationSetId(d.Id())), - } - log.Printf("[DEBUG] Deleting Route53 reusable delegation set: %#v", input) - _, err := r53.DeleteReusableDelegationSet(input) - return err -} - -func expandNameServers(name_servers []*string) []string { - log.Printf("[DEBUG] Processing %d name servers: %#v...", len(name_servers), name_servers) - ns := make([]string, len(name_servers)) - for i, server := range name_servers { - ns[i] = *server - } - sort.Strings(ns) - log.Printf("[DEBUG] Returning processed name servers: %#v", ns) - return ns -} - -func cleanDelegationSetId(id string) string { - return strings.TrimPrefix(id, "/delegationset/") -} diff --git a/builtin/providers/aws/resource_aws_route53_delegation_set_test.go b/builtin/providers/aws/resource_aws_route53_delegation_set_test.go deleted file mode 100644 index 26e88f60d..000000000 --- a/builtin/providers/aws/resource_aws_route53_delegation_set_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/route53" -) - -func TestAccAWSRoute53DelegationSet_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_delegation_set.test", - IDRefreshIgnore: []string{"reference_name"}, - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53ZoneDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53DelegationSetConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53DelegationSetExists("aws_route53_delegation_set.test"), - ), - }, - }, - }) -} - -func TestAccAWSRoute53DelegationSet_withZones(t *testing.T) { - var zone route53.GetHostedZoneOutput - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_delegation_set.main", - IDRefreshIgnore: []string{"reference_name"}, - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53ZoneDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53DelegationSetWithZonesConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53DelegationSetExists("aws_route53_delegation_set.main"), - testAccCheckRoute53ZoneExists("aws_route53_zone.primary", &zone), - testAccCheckRoute53ZoneExists("aws_route53_zone.secondary", &zone), - testAccCheckRoute53NameServersMatch("aws_route53_delegation_set.main", "aws_route53_zone.primary"), - testAccCheckRoute53NameServersMatch("aws_route53_delegation_set.main", "aws_route53_zone.secondary"), - ), - }, - }, - }) -} - -func testAccCheckRoute53DelegationSetDestroy(s *terraform.State, provider *schema.Provider) error { - conn := provider.Meta().(*AWSClient).r53conn - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_route53_delegation_set" { - continue - } - - _, err := conn.GetReusableDelegationSet(&route53.GetReusableDelegationSetInput{Id: aws.String(rs.Primary.ID)}) - if err == nil { - return fmt.Errorf("Delegation set still exists") - } - } - return nil -} - -func testAccCheckRoute53DelegationSetExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).r53conn - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No delegation set ID is set") - } - - out, err := conn.GetReusableDelegationSet(&route53.GetReusableDelegationSetInput{ - Id: aws.String(rs.Primary.ID), - }) - - if err != nil { - return fmt.Errorf("Delegation set does not exist: %#v", rs.Primary.ID) - } - - setID := cleanDelegationSetId(*out.DelegationSet.Id) - if setID != rs.Primary.ID { - return fmt.Errorf("Delegation set ID does not match:\nExpected: %#v\nReturned: %#v", rs.Primary.ID, setID) - } - - return nil - } -} - -func testAccCheckRoute53NameServersMatch(delegationSetName, zoneName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).r53conn - - delegationSetLocal, ok := s.RootModule().Resources[delegationSetName] - if !ok { - return fmt.Errorf("Not found: %s", delegationSetName) - } - delegationSet, err := conn.GetReusableDelegationSet(&route53.GetReusableDelegationSetInput{ - Id: aws.String(delegationSetLocal.Primary.ID), - }) - if err != nil { - return fmt.Errorf("Delegation set does not exist: %#v", delegationSetLocal.Primary.ID) - } - - hostedZoneLocal, ok := s.RootModule().Resources[zoneName] - if !ok { - return fmt.Errorf("Not found: %s", zoneName) - } - hostedZone, err := conn.GetHostedZone(&route53.GetHostedZoneInput{ - Id: aws.String(hostedZoneLocal.Primary.ID), - }) - if err != nil { - return fmt.Errorf("Delegation set does not exist: %#v", hostedZoneLocal.Primary.ID) - } - - if !reflect.DeepEqual(delegationSet.DelegationSet.NameServers, hostedZone.DelegationSet.NameServers) { - return fmt.Errorf("Name servers do not match:\nDelegation Set: %#v\nHosted Zone:%#v", - delegationSet.DelegationSet.NameServers, hostedZone.DelegationSet.NameServers) - } - - return nil - } -} - -const testAccRoute53DelegationSetConfig = ` -resource "aws_route53_delegation_set" "test" { - reference_name = "test" -} -` - -const testAccRoute53DelegationSetWithZonesConfig = ` -resource "aws_route53_delegation_set" "main" { - reference_name = "main" -} - -resource "aws_route53_zone" "primary" { - name = "hashicorp.com" - delegation_set_id = "${aws_route53_delegation_set.main.id}" -} - -resource "aws_route53_zone" "secondary" { - name = "terraform.io" - delegation_set_id = "${aws_route53_delegation_set.main.id}" -} -` diff --git a/builtin/providers/aws/resource_aws_route53_health_check.go b/builtin/providers/aws/resource_aws_route53_health_check.go deleted file mode 100644 index 6cf4ee205..000000000 --- a/builtin/providers/aws/resource_aws_route53_health_check.go +++ /dev/null @@ -1,379 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/route53" -) - -func resourceAwsRoute53HealthCheck() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsRoute53HealthCheckCreate, - Read: resourceAwsRoute53HealthCheckRead, - Update: resourceAwsRoute53HealthCheckUpdate, - Delete: resourceAwsRoute53HealthCheckDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - StateFunc: func(val interface{}) string { - return strings.ToUpper(val.(string)) - }, - }, - "failure_threshold": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - "request_interval": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, // todo this should be updateable but the awslabs route53 service doesnt have the ability - }, - "ip_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "fqdn": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "port": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - - "invert_healthcheck": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - - "resource_path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "search_string": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "measure_latency": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - }, - - "child_healthchecks": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - Set: schema.HashString, - }, - "child_health_threshold": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - value := v.(int) - if value > 256 { - es = append(es, fmt.Errorf( - "Child HealthThreshold cannot be more than 256")) - } - return - }, - }, - - "cloudwatch_alarm_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "cloudwatch_alarm_region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "insufficient_data_health_status": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "reference_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "enable_sni": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAwsRoute53HealthCheckUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).r53conn - - updateHealthCheck := &route53.UpdateHealthCheckInput{ - HealthCheckId: aws.String(d.Id()), - } - - if d.HasChange("failure_threshold") { - updateHealthCheck.FailureThreshold = aws.Int64(int64(d.Get("failure_threshold").(int))) - } - - if d.HasChange("fqdn") { - updateHealthCheck.FullyQualifiedDomainName = aws.String(d.Get("fqdn").(string)) - } - - if d.HasChange("port") { - updateHealthCheck.Port = aws.Int64(int64(d.Get("port").(int))) - } - - if d.HasChange("resource_path") { - updateHealthCheck.ResourcePath = aws.String(d.Get("resource_path").(string)) - } - - if d.HasChange("invert_healthcheck") { - updateHealthCheck.Inverted = aws.Bool(d.Get("invert_healthcheck").(bool)) - } - - if d.HasChange("child_healthchecks") { - updateHealthCheck.ChildHealthChecks = expandStringList(d.Get("child_healthchecks").(*schema.Set).List()) - - } - if d.HasChange("child_health_threshold") { - updateHealthCheck.HealthThreshold = aws.Int64(int64(d.Get("child_health_threshold").(int))) - } - - if d.HasChange("search_string") { - updateHealthCheck.SearchString = aws.String(d.Get("search_string").(string)) - } - - if d.HasChange("cloudwatch_alarm_name") || d.HasChange("cloudwatch_alarm_region") { - cloudwatchAlarm := &route53.AlarmIdentifier{ - Name: aws.String(d.Get("cloudwatch_alarm_name").(string)), - Region: aws.String(d.Get("cloudwatch_alarm_region").(string)), - } - - updateHealthCheck.AlarmIdentifier = cloudwatchAlarm - } - - if d.HasChange("insufficient_data_health_status") { - updateHealthCheck.InsufficientDataHealthStatus = aws.String(d.Get("insufficient_data_health_status").(string)) - } - - if d.HasChange("enable_sni") { - updateHealthCheck.EnableSNI = aws.Bool(d.Get("enable_sni").(bool)) - } - - _, err := conn.UpdateHealthCheck(updateHealthCheck) - if err != nil { - return err - } - - if err := setTagsR53(conn, d, "healthcheck"); err != nil { - return err - } - - return resourceAwsRoute53HealthCheckRead(d, meta) -} - -func resourceAwsRoute53HealthCheckCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).r53conn - - healthConfig := &route53.HealthCheckConfig{ - Type: aws.String(d.Get("type").(string)), - } - - if v, ok := d.GetOk("request_interval"); ok { - healthConfig.RequestInterval = aws.Int64(int64(v.(int))) - } - - if v, ok := d.GetOk("failure_threshold"); ok { - healthConfig.FailureThreshold = aws.Int64(int64(v.(int))) - } - - if v, ok := d.GetOk("fqdn"); ok { - healthConfig.FullyQualifiedDomainName = aws.String(v.(string)) - } - - if v, ok := d.GetOk("search_string"); ok { - healthConfig.SearchString = aws.String(v.(string)) - } - - if v, ok := d.GetOk("ip_address"); ok { - healthConfig.IPAddress = aws.String(v.(string)) - } - - if v, ok := d.GetOk("port"); ok { - healthConfig.Port = aws.Int64(int64(v.(int))) - } - - if v, ok := d.GetOk("resource_path"); ok { - healthConfig.ResourcePath = aws.String(v.(string)) - } - - if *healthConfig.Type != route53.HealthCheckTypeCalculated && *healthConfig.Type != route53.HealthCheckTypeCloudwatchMetric { - if v, ok := d.GetOk("measure_latency"); ok { - healthConfig.MeasureLatency = aws.Bool(v.(bool)) - } - } - - if v, ok := d.GetOk("invert_healthcheck"); ok { - healthConfig.Inverted = aws.Bool(v.(bool)) - } - - if v, ok := d.GetOk("enable_sni"); ok { - healthConfig.EnableSNI = aws.Bool(v.(bool)) - } - - if *healthConfig.Type == route53.HealthCheckTypeCalculated { - if v, ok := d.GetOk("child_healthchecks"); ok { - healthConfig.ChildHealthChecks = expandStringList(v.(*schema.Set).List()) - } - - if v, ok := d.GetOk("child_health_threshold"); ok { - healthConfig.HealthThreshold = aws.Int64(int64(v.(int))) - } - } - - if *healthConfig.Type == route53.HealthCheckTypeCloudwatchMetric { - cloudwatchAlarmIdentifier := &route53.AlarmIdentifier{} - - if v, ok := d.GetOk("cloudwatch_alarm_name"); ok { - cloudwatchAlarmIdentifier.Name = aws.String(v.(string)) - } - - if v, ok := d.GetOk("cloudwatch_alarm_region"); ok { - cloudwatchAlarmIdentifier.Region = aws.String(v.(string)) - } - - healthConfig.AlarmIdentifier = cloudwatchAlarmIdentifier - - if v, ok := d.GetOk("insufficient_data_health_status"); ok { - healthConfig.InsufficientDataHealthStatus = aws.String(v.(string)) - } - } - - callerRef := resource.UniqueId() - if v, ok := d.GetOk("reference_name"); ok { - callerRef = fmt.Sprintf("%s-%s", v.(string), callerRef) - } - - input := &route53.CreateHealthCheckInput{ - CallerReference: aws.String(callerRef), - HealthCheckConfig: healthConfig, - } - - resp, err := conn.CreateHealthCheck(input) - - if err != nil { - return err - } - - d.SetId(*resp.HealthCheck.Id) - - if err := setTagsR53(conn, d, "healthcheck"); err != nil { - return err - } - - return resourceAwsRoute53HealthCheckRead(d, meta) -} - -func resourceAwsRoute53HealthCheckRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).r53conn - - read, err := conn.GetHealthCheck(&route53.GetHealthCheckInput{HealthCheckId: aws.String(d.Id())}) - if err != nil { - if r53err, ok := err.(awserr.Error); ok && r53err.Code() == "NoSuchHealthCheck" { - d.SetId("") - return nil - - } - return err - } - - if read == nil { - return nil - } - - updated := read.HealthCheck.HealthCheckConfig - d.Set("type", updated.Type) - d.Set("failure_threshold", updated.FailureThreshold) - d.Set("request_interval", updated.RequestInterval) - d.Set("fqdn", updated.FullyQualifiedDomainName) - d.Set("search_string", updated.SearchString) - d.Set("ip_address", updated.IPAddress) - d.Set("port", updated.Port) - d.Set("resource_path", updated.ResourcePath) - d.Set("measure_latency", updated.MeasureLatency) - d.Set("invert_healthcheck", updated.Inverted) - d.Set("child_healthchecks", updated.ChildHealthChecks) - d.Set("child_health_threshold", updated.HealthThreshold) - d.Set("insufficient_data_health_status", updated.InsufficientDataHealthStatus) - d.Set("enable_sni", updated.EnableSNI) - - if updated.AlarmIdentifier != nil { - d.Set("cloudwatch_alarm_name", updated.AlarmIdentifier.Name) - d.Set("cloudwatch_alarm_region", updated.AlarmIdentifier.Region) - } - - // read the tags - req := &route53.ListTagsForResourceInput{ - ResourceId: aws.String(d.Id()), - ResourceType: aws.String("healthcheck"), - } - - resp, err := conn.ListTagsForResource(req) - if err != nil { - return err - } - - var tags []*route53.Tag - if resp.ResourceTagSet != nil { - tags = resp.ResourceTagSet.Tags - } - - if err := d.Set("tags", tagsToMapR53(tags)); err != nil { - return err - } - - return nil -} - -func resourceAwsRoute53HealthCheckDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).r53conn - - log.Printf("[DEBUG] Deleteing Route53 health check: %s", d.Id()) - _, err := conn.DeleteHealthCheck(&route53.DeleteHealthCheckInput{HealthCheckId: aws.String(d.Id())}) - if err != nil { - return err - } - - return nil -} - -func createChildHealthCheckList(s *schema.Set) (nl []*string) { - l := s.List() - for _, n := range l { - nl = append(nl, aws.String(n.(string))) - } - - return nl -} diff --git a/builtin/providers/aws/resource_aws_route53_health_check_test.go b/builtin/providers/aws/resource_aws_route53_health_check_test.go deleted file mode 100644 index 75e53af91..000000000 --- a/builtin/providers/aws/resource_aws_route53_health_check_test.go +++ /dev/null @@ -1,404 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/aws/aws-sdk-go/service/route53" -) - -func TestAccAWSRoute53HealthCheck_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_health_check.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53HealthCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53HealthCheckConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53HealthCheckExists("aws_route53_health_check.foo"), - resource.TestCheckResourceAttr( - "aws_route53_health_check.foo", "measure_latency", "true"), - resource.TestCheckResourceAttr( - "aws_route53_health_check.foo", "invert_healthcheck", "true"), - ), - }, - resource.TestStep{ - Config: testAccRoute53HealthCheckConfigUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53HealthCheckExists("aws_route53_health_check.foo"), - resource.TestCheckResourceAttr( - "aws_route53_health_check.foo", "failure_threshold", "5"), - resource.TestCheckResourceAttr( - "aws_route53_health_check.foo", "invert_healthcheck", "false"), - ), - }, - }, - }) -} - -func TestAccAWSRoute53HealthCheck_withSearchString(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_health_check.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53HealthCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53HealthCheckConfigWithSearchString, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53HealthCheckExists("aws_route53_health_check.foo"), - resource.TestCheckResourceAttr( - "aws_route53_health_check.foo", "invert_healthcheck", "false"), - resource.TestCheckResourceAttr( - "aws_route53_health_check.foo", "search_string", "OK"), - ), - }, - resource.TestStep{ - Config: testAccRoute53HealthCheckConfigWithSearchStringUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53HealthCheckExists("aws_route53_health_check.foo"), - resource.TestCheckResourceAttr( - "aws_route53_health_check.foo", "invert_healthcheck", "true"), - resource.TestCheckResourceAttr( - "aws_route53_health_check.foo", "search_string", "FAILED"), - ), - }, - }, - }) -} - -func TestAccAWSRoute53HealthCheck_withChildHealthChecks(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53HealthCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53HealthCheckConfig_withChildHealthChecks, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53HealthCheckExists("aws_route53_health_check.foo"), - ), - }, - }, - }) -} - -func TestAccAWSRoute53HealthCheck_IpConfig(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53HealthCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53HealthCheckIpConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53HealthCheckExists("aws_route53_health_check.bar"), - ), - }, - }, - }) -} - -func TestAccAWSRoute53HealthCheck_CloudWatchAlarmCheck(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53HealthCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53HealthCheckCloudWatchAlarm, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53HealthCheckExists("aws_route53_health_check.foo"), - resource.TestCheckResourceAttr( - "aws_route53_health_check.foo", "cloudwatch_alarm_name", "cloudwatch-healthcheck-alarm"), - ), - }, - }, - }) -} - -func TestAccAWSRoute53HealthCheck_withSNI(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_health_check.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53HealthCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53HealthCheckConfigWithoutSNI, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53HealthCheckExists("aws_route53_health_check.foo"), - resource.TestCheckResourceAttr( - "aws_route53_health_check.foo", "enable_sni", "true"), - ), - }, - resource.TestStep{ - Config: testAccRoute53HealthCheckConfigWithSNIDisabled, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53HealthCheckExists("aws_route53_health_check.foo"), - resource.TestCheckResourceAttr( - "aws_route53_health_check.foo", "enable_sni", "false"), - ), - }, - resource.TestStep{ - Config: testAccRoute53HealthCheckConfigWithSNI, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53HealthCheckExists("aws_route53_health_check.foo"), - resource.TestCheckResourceAttr( - "aws_route53_health_check.foo", "enable_sni", "true"), - ), - }, - }, - }) -} - -func testAccCheckRoute53HealthCheckDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).r53conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_route53_health_check" { - continue - } - - lopts := &route53.ListHealthChecksInput{} - resp, err := conn.ListHealthChecks(lopts) - if err != nil { - return err - } - if len(resp.HealthChecks) == 0 { - return nil - } - - for _, check := range resp.HealthChecks { - if *check.Id == rs.Primary.ID { - return fmt.Errorf("Record still exists: %#v", check) - } - - } - - } - return nil -} - -func testAccCheckRoute53HealthCheckExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).r53conn - - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - fmt.Print(rs.Primary.ID) - - if rs.Primary.ID == "" { - return fmt.Errorf("No health check ID is set") - } - - lopts := &route53.ListHealthChecksInput{} - resp, err := conn.ListHealthChecks(lopts) - if err != nil { - return err - } - if len(resp.HealthChecks) == 0 { - return fmt.Errorf("Health Check does not exist") - } - - for _, check := range resp.HealthChecks { - if *check.Id == rs.Primary.ID { - return nil - } - - } - return fmt.Errorf("Health Check does not exist") - } -} - -func testUpdateHappened(n string) resource.TestCheckFunc { - return nil -} - -const testAccRoute53HealthCheckConfig = ` -resource "aws_route53_health_check" "foo" { - fqdn = "dev.notexample.com" - port = 80 - type = "HTTP" - resource_path = "/" - failure_threshold = "2" - request_interval = "30" - measure_latency = true - invert_healthcheck = true - - tags = { - Name = "tf-test-health-check" - } -} -` - -const testAccRoute53HealthCheckConfigUpdate = ` -resource "aws_route53_health_check" "foo" { - fqdn = "dev.notexample.com" - port = 80 - type = "HTTP" - resource_path = "/" - failure_threshold = "5" - request_interval = "30" - measure_latency = true - invert_healthcheck = false - - tags = { - Name = "tf-test-health-check" - } -} -` - -const testAccRoute53HealthCheckIpConfig = ` -resource "aws_route53_health_check" "bar" { - ip_address = "1.2.3.4" - port = 80 - type = "HTTP" - resource_path = "/" - failure_threshold = "2" - request_interval = "30" - - tags = { - Name = "tf-test-health-check" - } -} -` - -const testAccRoute53HealthCheckConfig_withChildHealthChecks = ` -resource "aws_route53_health_check" "child1" { - fqdn = "child1.notexample.com" - port = 80 - type = "HTTP" - resource_path = "/" - failure_threshold = "2" - request_interval = "30" -} - -resource "aws_route53_health_check" "foo" { - type = "CALCULATED" - child_health_threshold = 1 - child_healthchecks = ["${aws_route53_health_check.child1.id}"] - - tags = { - Name = "tf-test-calculated-health-check" - } -} -` - -const testAccRoute53HealthCheckCloudWatchAlarm = ` -resource "aws_cloudwatch_metric_alarm" "foobar" { - alarm_name = "cloudwatch-healthcheck-alarm" - comparison_operator = "GreaterThanOrEqualToThreshold" - evaluation_periods = "2" - metric_name = "CPUUtilization" - namespace = "AWS/EC2" - period = "120" - statistic = "Average" - threshold = "80" - alarm_description = "This metric monitors ec2 cpu utilization" -} - -resource "aws_route53_health_check" "foo" { - type = "CLOUDWATCH_METRIC" - cloudwatch_alarm_name = "${aws_cloudwatch_metric_alarm.foobar.alarm_name}" - cloudwatch_alarm_region = "us-west-2" - insufficient_data_health_status = "Healthy" -} -` - -const testAccRoute53HealthCheckConfigWithSearchString = ` -resource "aws_route53_health_check" "foo" { - fqdn = "dev.notexample.com" - port = 80 - type = "HTTP_STR_MATCH" - resource_path = "/" - failure_threshold = "2" - request_interval = "30" - measure_latency = true - invert_healthcheck = false - search_string = "OK" - - tags = { - Name = "tf-test-health-check" - } -} -` - -const testAccRoute53HealthCheckConfigWithSearchStringUpdate = ` -resource "aws_route53_health_check" "foo" { - fqdn = "dev.notexample.com" - port = 80 - type = "HTTP_STR_MATCH" - resource_path = "/" - failure_threshold = "5" - request_interval = "30" - measure_latency = true - invert_healthcheck = true - search_string = "FAILED" - - tags = { - Name = "tf-test-health-check" - } -} -` - -const testAccRoute53HealthCheckConfigWithoutSNI = ` -resource "aws_route53_health_check" "foo" { - fqdn = "dev.notexample.com" - port = 443 - type = "HTTPS" - resource_path = "/" - failure_threshold = "2" - request_interval = "30" - measure_latency = true - invert_healthcheck = true - - tags = { - Name = "tf-test-health-check" - } -} -` - -const testAccRoute53HealthCheckConfigWithSNI = ` -resource "aws_route53_health_check" "foo" { - fqdn = "dev.notexample.com" - port = 443 - type = "HTTPS" - resource_path = "/" - failure_threshold = "2" - request_interval = "30" - measure_latency = true - invert_healthcheck = true - enable_sni = true - - tags = { - Name = "tf-test-health-check" - } -} -` - -const testAccRoute53HealthCheckConfigWithSNIDisabled = ` -resource "aws_route53_health_check" "foo" { - fqdn = "dev.notexample.com" - port = 443 - type = "HTTPS" - resource_path = "/" - failure_threshold = "2" - request_interval = "30" - measure_latency = true - invert_healthcheck = true - enable_sni = false - - tags = { - Name = "tf-test-health-check" - } -} -` diff --git a/builtin/providers/aws/resource_aws_route53_record.go b/builtin/providers/aws/resource_aws_route53_record.go deleted file mode 100644 index 42c02c917..000000000 --- a/builtin/providers/aws/resource_aws_route53_record.go +++ /dev/null @@ -1,877 +0,0 @@ -package aws - -import ( - "bytes" - "errors" - "fmt" - "log" - "strings" - "time" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/route53" -) - -var r53NoRecordsFound = errors.New("No matching Hosted Zone found") -var r53NoHostedZoneFound = errors.New("No matching records found") - -func resourceAwsRoute53Record() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsRoute53RecordCreate, - Read: resourceAwsRoute53RecordRead, - Update: resourceAwsRoute53RecordUpdate, - Delete: resourceAwsRoute53RecordDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - SchemaVersion: 2, - MigrateState: resourceAwsRoute53RecordMigrateState, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - StateFunc: func(v interface{}) string { - value := strings.TrimSuffix(v.(string), ".") - return strings.ToLower(value) - }, - }, - - "fqdn": { - Type: schema.TypeString, - Computed: true, - }, - - "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateRoute53RecordType, - }, - - "zone_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - value := v.(string) - if value == "" { - es = append(es, fmt.Errorf("Cannot have empty zone_id")) - } - return - }, - }, - - "ttl": { - Type: schema.TypeInt, - Optional: true, - ConflictsWith: []string{"alias"}, - }, - - "weight": { - Type: schema.TypeInt, - Optional: true, - Removed: "Now implemented as weighted_routing_policy; Please see https://www.terraform.io/docs/providers/aws/r/route53_record.html", - }, - - "set_identifier": { - Type: schema.TypeString, - Optional: true, - }, - - "alias": { - Type: schema.TypeSet, - Optional: true, - ConflictsWith: []string{"records", "ttl"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "zone_id": { - Type: schema.TypeString, - Required: true, - }, - - "name": { - Type: schema.TypeString, - Required: true, - StateFunc: normalizeAwsAliasName, - }, - - "evaluate_target_health": { - Type: schema.TypeBool, - Required: true, - }, - }, - }, - Set: resourceAwsRoute53AliasRecordHash, - }, - - "failover": { // PRIMARY | SECONDARY - Type: schema.TypeString, - Optional: true, - Removed: "Now implemented as failover_routing_policy; see docs", - }, - - "failover_routing_policy": { - Type: schema.TypeList, - Optional: true, - ConflictsWith: []string{ - "geolocation_routing_policy", - "latency_routing_policy", - "weighted_routing_policy", - }, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - value := v.(string) - if value != "PRIMARY" && value != "SECONDARY" { - es = append(es, fmt.Errorf("Failover policy type must be PRIMARY or SECONDARY")) - } - return - }, - }, - }, - }, - }, - - "latency_routing_policy": { - Type: schema.TypeList, - Optional: true, - ConflictsWith: []string{ - "failover_routing_policy", - "geolocation_routing_policy", - "weighted_routing_policy", - }, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "region": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - - "geolocation_routing_policy": { // AWS Geolocation - Type: schema.TypeList, - Optional: true, - ConflictsWith: []string{ - "failover_routing_policy", - "latency_routing_policy", - "weighted_routing_policy", - }, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "continent": { - Type: schema.TypeString, - Optional: true, - }, - "country": { - Type: schema.TypeString, - Optional: true, - }, - "subdivision": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - - "weighted_routing_policy": { - Type: schema.TypeList, - Optional: true, - ConflictsWith: []string{ - "failover_routing_policy", - "geolocation_routing_policy", - "latency_routing_policy", - }, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "weight": { - Type: schema.TypeInt, - Required: true, - }, - }, - }, - }, - - "health_check_id": { // ID of health check - Type: schema.TypeString, - Optional: true, - }, - - "records": { - Type: schema.TypeSet, - ConflictsWith: []string{"alias"}, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - Set: schema.HashString, - }, - }, - } -} - -func resourceAwsRoute53RecordUpdate(d *schema.ResourceData, meta interface{}) error { - // Route 53 supports CREATE, DELETE, and UPSERT actions. We use UPSERT, and - // AWS dynamically determines if a record should be created or updated. - // Amazon Route 53 can update an existing resource record set only when all - // of the following values match: Name, Type and SetIdentifier - // See http://docs.aws.amazon.com/Route53/latest/APIReference/API_ChangeResourceRecordSets.html - - if !d.HasChange("type") && !d.HasChange("set_identifier") { - // If neither type nor set_identifier changed we use UPSERT, - // for resouce update here we simply fall through to - // our resource create function. - return resourceAwsRoute53RecordCreate(d, meta) - } - - // Otherwise we delete the existing record and create a new record within - // a transactional change - conn := meta.(*AWSClient).r53conn - zone := cleanZoneID(d.Get("zone_id").(string)) - - var err error - zoneRecord, err := conn.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(zone)}) - if err != nil { - return err - } - if zoneRecord.HostedZone == nil { - return fmt.Errorf("[WARN] No Route53 Zone found for id (%s)", zone) - } - - // Build the to be deleted record - en := expandRecordName(d.Get("name").(string), *zoneRecord.HostedZone.Name) - typeo, _ := d.GetChange("type") - - oldRec := &route53.ResourceRecordSet{ - Name: aws.String(en), - Type: aws.String(typeo.(string)), - } - - if v, _ := d.GetChange("ttl"); v.(int) != 0 { - oldRec.TTL = aws.Int64(int64(v.(int))) - } - - // Resource records - if v, _ := d.GetChange("records"); v != nil { - recs := v.(*schema.Set).List() - if len(recs) > 0 { - oldRec.ResourceRecords = expandResourceRecords(recs, typeo.(string)) - } - } - - // Alias record - if v, _ := d.GetChange("alias"); v != nil { - aliases := v.(*schema.Set).List() - if len(aliases) == 1 { - alias := aliases[0].(map[string]interface{}) - oldRec.AliasTarget = &route53.AliasTarget{ - DNSName: aws.String(alias["name"].(string)), - EvaluateTargetHealth: aws.Bool(alias["evaluate_target_health"].(bool)), - HostedZoneId: aws.String(alias["zone_id"].(string)), - } - } - } - - if v, _ := d.GetChange("set_identifier"); v.(string) != "" { - oldRec.SetIdentifier = aws.String(v.(string)) - } - - // Build the to be created record - rec, err := resourceAwsRoute53RecordBuildSet(d, *zoneRecord.HostedZone.Name) - if err != nil { - return err - } - - // Delete the old and create the new records in a single batch. We abuse - // StateChangeConf for this to retry for us since Route53 sometimes returns - // errors about another operation happening at the same time. - changeBatch := &route53.ChangeBatch{ - Comment: aws.String("Managed by Terraform"), - Changes: []*route53.Change{ - { - Action: aws.String("DELETE"), - ResourceRecordSet: oldRec, - }, - { - Action: aws.String("CREATE"), - ResourceRecordSet: rec, - }, - }, - } - - req := &route53.ChangeResourceRecordSetsInput{ - HostedZoneId: aws.String(cleanZoneID(*zoneRecord.HostedZone.Id)), - ChangeBatch: changeBatch, - } - - log.Printf("[DEBUG] Updating resource records for zone: %s, name: %s\n\n%s", - zone, *rec.Name, req) - - respRaw, err := changeRoute53RecordSet(conn, req) - if err != nil { - return errwrap.Wrapf("[ERR]: Error building changeset: {{err}}", err) - } - - changeInfo := respRaw.(*route53.ChangeResourceRecordSetsOutput).ChangeInfo - - // Generate an ID - vars := []string{ - zone, - strings.ToLower(d.Get("name").(string)), - d.Get("type").(string), - } - if v, ok := d.GetOk("set_identifier"); ok { - vars = append(vars, v.(string)) - } - - d.SetId(strings.Join(vars, "_")) - - err = waitForRoute53RecordSetToSync(conn, cleanChangeID(*changeInfo.Id)) - if err != nil { - return err - } - - return resourceAwsRoute53RecordRead(d, meta) -} - -func resourceAwsRoute53RecordCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).r53conn - zone := cleanZoneID(d.Get("zone_id").(string)) - - var err error - zoneRecord, err := conn.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(zone)}) - if err != nil { - return err - } - if zoneRecord.HostedZone == nil { - return fmt.Errorf("[WARN] No Route53 Zone found for id (%s)", zone) - } - - // Build the record - rec, err := resourceAwsRoute53RecordBuildSet(d, *zoneRecord.HostedZone.Name) - if err != nil { - return err - } - - // Create the new records. We abuse StateChangeConf for this to - // retry for us since Route53 sometimes returns errors about another - // operation happening at the same time. - changeBatch := &route53.ChangeBatch{ - Comment: aws.String("Managed by Terraform"), - Changes: []*route53.Change{ - { - Action: aws.String("UPSERT"), - ResourceRecordSet: rec, - }, - }, - } - - req := &route53.ChangeResourceRecordSetsInput{ - HostedZoneId: aws.String(cleanZoneID(*zoneRecord.HostedZone.Id)), - ChangeBatch: changeBatch, - } - - log.Printf("[DEBUG] Creating resource records for zone: %s, name: %s\n\n%s", - zone, *rec.Name, req) - - respRaw, err := changeRoute53RecordSet(conn, req) - if err != nil { - return errwrap.Wrapf("[ERR]: Error building changeset: {{err}}", err) - } - - changeInfo := respRaw.(*route53.ChangeResourceRecordSetsOutput).ChangeInfo - - // Generate an ID - vars := []string{ - zone, - strings.ToLower(d.Get("name").(string)), - d.Get("type").(string), - } - if v, ok := d.GetOk("set_identifier"); ok { - vars = append(vars, v.(string)) - } - - d.SetId(strings.Join(vars, "_")) - - err = waitForRoute53RecordSetToSync(conn, cleanChangeID(*changeInfo.Id)) - if err != nil { - return err - } - - return resourceAwsRoute53RecordRead(d, meta) -} - -func changeRoute53RecordSet(conn *route53.Route53, input *route53.ChangeResourceRecordSetsInput) (interface{}, error) { - wait := resource.StateChangeConf{ - Pending: []string{"rejected"}, - Target: []string{"accepted"}, - Timeout: 5 * time.Minute, - MinTimeout: 1 * time.Second, - Refresh: func() (interface{}, string, error) { - resp, err := conn.ChangeResourceRecordSets(input) - if err != nil { - if r53err, ok := err.(awserr.Error); ok { - if r53err.Code() == "PriorRequestNotComplete" { - // There is some pending operation, so just retry - // in a bit. - return nil, "rejected", nil - } - } - - return nil, "failure", err - } - - return resp, "accepted", nil - }, - } - - return wait.WaitForState() -} - -func waitForRoute53RecordSetToSync(conn *route53.Route53, requestId string) error { - wait := resource.StateChangeConf{ - Delay: 30 * time.Second, - Pending: []string{"PENDING"}, - Target: []string{"INSYNC"}, - Timeout: 30 * time.Minute, - MinTimeout: 5 * time.Second, - Refresh: func() (result interface{}, state string, err error) { - changeRequest := &route53.GetChangeInput{ - Id: aws.String(requestId), - } - return resourceAwsGoRoute53Wait(conn, changeRequest) - }, - } - _, err := wait.WaitForState() - return err -} - -func resourceAwsRoute53RecordRead(d *schema.ResourceData, meta interface{}) error { - // If we don't have a zone ID we're doing an import. Parse it from the ID. - if _, ok := d.GetOk("zone_id"); !ok { - parts := strings.Split(d.Id(), "_") - - //we check that we have parsed the id into the correct number of segments - //we need at least 3 segments! - if len(parts) == 1 || len(parts) < 3 { - return fmt.Errorf("Error Importing aws_route_53 record. Please make sure the record ID is in the form ZONEID_RECORDNAME_TYPE (i.e. Z4KAPRWWNC7JR_dev_A") - } - - d.Set("zone_id", parts[0]) - d.Set("name", parts[1]) - d.Set("type", parts[2]) - if len(parts) > 3 { - d.Set("set_identifier", parts[3]) - } - } - - record, err := findRecord(d, meta) - if err != nil { - switch err { - case r53NoHostedZoneFound, r53NoRecordsFound: - log.Printf("[DEBUG] %s for: %s, removing from state file", err, d.Id()) - d.SetId("") - return nil - default: - return err - } - } - - err = d.Set("records", flattenResourceRecords(record.ResourceRecords, *record.Type)) - if err != nil { - return fmt.Errorf("[DEBUG] Error setting records for: %s, error: %#v", d.Id(), err) - } - - if alias := record.AliasTarget; alias != nil { - name := normalizeAwsAliasName(*alias.DNSName) - d.Set("alias", []interface{}{ - map[string]interface{}{ - "zone_id": *alias.HostedZoneId, - "name": name, - "evaluate_target_health": *alias.EvaluateTargetHealth, - }, - }) - } - - d.Set("ttl", record.TTL) - - if record.Failover != nil { - v := []map[string]interface{}{{ - "type": aws.StringValue(record.Failover), - }} - if err := d.Set("failover_routing_policy", v); err != nil { - return fmt.Errorf("[DEBUG] Error setting failover records for: %s, error: %#v", d.Id(), err) - } - } - - if record.GeoLocation != nil { - v := []map[string]interface{}{{ - "continent": aws.StringValue(record.GeoLocation.ContinentCode), - "country": aws.StringValue(record.GeoLocation.CountryCode), - "subdivision": aws.StringValue(record.GeoLocation.SubdivisionCode), - }} - if err := d.Set("geolocation_routing_policy", v); err != nil { - return fmt.Errorf("[DEBUG] Error setting gelocation records for: %s, error: %#v", d.Id(), err) - } - } - - if record.Region != nil { - v := []map[string]interface{}{{ - "region": aws.StringValue(record.Region), - }} - if err := d.Set("latency_routing_policy", v); err != nil { - return fmt.Errorf("[DEBUG] Error setting latency records for: %s, error: %#v", d.Id(), err) - } - } - - if record.Weight != nil { - v := []map[string]interface{}{{ - "weight": aws.Int64Value((record.Weight)), - }} - if err := d.Set("weighted_routing_policy", v); err != nil { - return fmt.Errorf("[DEBUG] Error setting weighted records for: %s, error: %#v", d.Id(), err) - } - } - - d.Set("set_identifier", record.SetIdentifier) - d.Set("health_check_id", record.HealthCheckId) - - return nil -} - -// findRecord takes a ResourceData struct for aws_resource_route53_record. It -// uses the referenced zone_id to query Route53 and find information on it's -// records. -// -// If records are found, it returns the matching -// route53.ResourceRecordSet and nil for the error. -// -// If no hosted zone is found, it returns a nil recordset and r53NoHostedZoneFound -// error. -// -// If no matching recordset is found, it returns nil and a r53NoRecordsFound -// error -// -// If there are other errors, it returns nil a nil recordset and passes on the -// error. -func findRecord(d *schema.ResourceData, meta interface{}) (*route53.ResourceRecordSet, error) { - conn := meta.(*AWSClient).r53conn - // Scan for a - zone := cleanZoneID(d.Get("zone_id").(string)) - - // get expanded name - zoneRecord, err := conn.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(zone)}) - if err != nil { - if r53err, ok := err.(awserr.Error); ok && r53err.Code() == "NoSuchHostedZone" { - return nil, r53NoHostedZoneFound - } - return nil, err - } - - en := expandRecordName(d.Get("name").(string), *zoneRecord.HostedZone.Name) - log.Printf("[DEBUG] Expanded record name: %s", en) - d.Set("fqdn", en) - - lopts := &route53.ListResourceRecordSetsInput{ - HostedZoneId: aws.String(cleanZoneID(zone)), - StartRecordName: aws.String(en), - StartRecordType: aws.String(d.Get("type").(string)), - } - - log.Printf("[DEBUG] List resource records sets for zone: %s, opts: %s", - zone, lopts) - resp, err := conn.ListResourceRecordSets(lopts) - if err != nil { - return nil, err - } - - for _, record := range resp.ResourceRecordSets { - name := cleanRecordName(*record.Name) - if FQDN(strings.ToLower(name)) != FQDN(strings.ToLower(*lopts.StartRecordName)) { - continue - } - if strings.ToUpper(*record.Type) != strings.ToUpper(*lopts.StartRecordType) { - continue - } - - if record.SetIdentifier != nil && *record.SetIdentifier != d.Get("set_identifier") { - continue - } - // The only safe return where a record is found - return record, nil - } - return nil, r53NoRecordsFound -} - -func resourceAwsRoute53RecordDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).r53conn - // Get the records - rec, err := findRecord(d, meta) - if err != nil { - switch err { - case r53NoHostedZoneFound, r53NoRecordsFound: - log.Printf("[DEBUG] %s for: %s, removing from state file", err, d.Id()) - d.SetId("") - return nil - default: - return err - } - } - - // Change batch for deleting - changeBatch := &route53.ChangeBatch{ - Comment: aws.String("Deleted by Terraform"), - Changes: []*route53.Change{ - { - Action: aws.String("DELETE"), - ResourceRecordSet: rec, - }, - }, - } - - zone := cleanZoneID(d.Get("zone_id").(string)) - - req := &route53.ChangeResourceRecordSetsInput{ - HostedZoneId: aws.String(cleanZoneID(zone)), - ChangeBatch: changeBatch, - } - - respRaw, err := deleteRoute53RecordSet(conn, req) - if err != nil { - return errwrap.Wrapf("[ERR]: Error building changeset: {{err}}", err) - } - - changeInfo := respRaw.(*route53.ChangeResourceRecordSetsOutput).ChangeInfo - if changeInfo == nil { - log.Printf("[INFO] No ChangeInfo Found. Waiting for Sync not required") - return nil - } - - err = waitForRoute53RecordSetToSync(conn, cleanChangeID(*changeInfo.Id)) - if err != nil { - return err - } - - return err -} - -func deleteRoute53RecordSet(conn *route53.Route53, input *route53.ChangeResourceRecordSetsInput) (interface{}, error) { - wait := resource.StateChangeConf{ - Pending: []string{"rejected"}, - Target: []string{"accepted"}, - Timeout: 5 * time.Minute, - MinTimeout: 1 * time.Second, - Refresh: func() (interface{}, string, error) { - resp, err := conn.ChangeResourceRecordSets(input) - if err != nil { - if r53err, ok := err.(awserr.Error); ok { - if r53err.Code() == "PriorRequestNotComplete" { - // There is some pending operation, so just retry - // in a bit. - return 42, "rejected", nil - } - - if r53err.Code() == "InvalidChangeBatch" { - // This means that the record is already gone. - return resp, "accepted", nil - } - } - - return 42, "failure", err - } - - return resp, "accepted", nil - }, - } - - return wait.WaitForState() -} - -func resourceAwsRoute53RecordBuildSet(d *schema.ResourceData, zoneName string) (*route53.ResourceRecordSet, error) { - // get expanded name - en := expandRecordName(d.Get("name").(string), zoneName) - - // Create the RecordSet request with the fully expanded name, e.g. - // sub.domain.com. Route 53 requires a fully qualified domain name, but does - // not require the trailing ".", which it will itself, so we don't call FQDN - // here. - rec := &route53.ResourceRecordSet{ - Name: aws.String(en), - Type: aws.String(d.Get("type").(string)), - } - - if v, ok := d.GetOk("ttl"); ok { - rec.TTL = aws.Int64(int64(v.(int))) - } - - // Resource records - if v, ok := d.GetOk("records"); ok { - recs := v.(*schema.Set).List() - rec.ResourceRecords = expandResourceRecords(recs, d.Get("type").(string)) - } - - // Alias record - if v, ok := d.GetOk("alias"); ok { - aliases := v.(*schema.Set).List() - if len(aliases) > 1 { - return nil, fmt.Errorf("You can only define a single alias target per record") - } - alias := aliases[0].(map[string]interface{}) - rec.AliasTarget = &route53.AliasTarget{ - DNSName: aws.String(alias["name"].(string)), - EvaluateTargetHealth: aws.Bool(alias["evaluate_target_health"].(bool)), - HostedZoneId: aws.String(alias["zone_id"].(string)), - } - log.Printf("[DEBUG] Creating alias: %#v", alias) - } else { - if _, ok := d.GetOk("ttl"); !ok { - return nil, fmt.Errorf(`provider.aws: aws_route53_record: %s: "ttl": required field is not set`, d.Get("name").(string)) - } - - if _, ok := d.GetOk("records"); !ok { - return nil, fmt.Errorf(`provider.aws: aws_route53_record: %s: "records": required field is not set`, d.Get("name").(string)) - } - } - - if v, ok := d.GetOk("failover_routing_policy"); ok { - if _, ok := d.GetOk("set_identifier"); !ok { - return nil, fmt.Errorf(`provider.aws: aws_route53_record: %s: "set_identifier": required field is not set when "failover_routing_policy" is set`, d.Get("name").(string)) - } - records := v.([]interface{}) - if len(records) > 1 { - return nil, fmt.Errorf("You can only define a single failover_routing_policy per record") - } - failover := records[0].(map[string]interface{}) - - rec.Failover = aws.String(failover["type"].(string)) - } - - if v, ok := d.GetOk("health_check_id"); ok { - rec.HealthCheckId = aws.String(v.(string)) - } - - if v, ok := d.GetOk("weighted_routing_policy"); ok { - if _, ok := d.GetOk("set_identifier"); !ok { - return nil, fmt.Errorf(`provider.aws: aws_route53_record: %s: "set_identifier": required field is not set when "weight_routing_policy" is set`, d.Get("name").(string)) - } - records := v.([]interface{}) - if len(records) > 1 { - return nil, fmt.Errorf("You can only define a single weighed_routing_policy per record") - } - weight := records[0].(map[string]interface{}) - - rec.Weight = aws.Int64(int64(weight["weight"].(int))) - } - - if v, ok := d.GetOk("set_identifier"); ok { - rec.SetIdentifier = aws.String(v.(string)) - } - - if v, ok := d.GetOk("latency_routing_policy"); ok { - if _, ok := d.GetOk("set_identifier"); !ok { - return nil, fmt.Errorf(`provider.aws: aws_route53_record: %s: "set_identifier": required field is not set when "latency_routing_policy" is set`, d.Get("name").(string)) - } - records := v.([]interface{}) - if len(records) > 1 { - return nil, fmt.Errorf("You can only define a single latency_routing_policy per record") - } - latency := records[0].(map[string]interface{}) - - rec.Region = aws.String(latency["region"].(string)) - } - - if v, ok := d.GetOk("geolocation_routing_policy"); ok { - if _, ok := d.GetOk("set_identifier"); !ok { - return nil, fmt.Errorf(`provider.aws: aws_route53_record: %s: "set_identifier": required field is not set when "geolocation_routing_policy" is set`, d.Get("name").(string)) - } - geolocations := v.([]interface{}) - if len(geolocations) > 1 { - return nil, fmt.Errorf("You can only define a single geolocation_routing_policy per record") - } - geolocation := geolocations[0].(map[string]interface{}) - - rec.GeoLocation = &route53.GeoLocation{ - ContinentCode: nilString(geolocation["continent"].(string)), - CountryCode: nilString(geolocation["country"].(string)), - SubdivisionCode: nilString(geolocation["subdivision"].(string)), - } - log.Printf("[DEBUG] Creating geolocation: %#v", geolocation) - } - - return rec, nil -} - -func FQDN(name string) string { - n := len(name) - if n == 0 || name[n-1] == '.' { - return name - } else { - return name + "." - } -} - -// Route 53 stores the "*" wildcard indicator as ASCII 42 and returns the -// octal equivalent, "\\052". Here we look for that, and convert back to "*" -// as needed. -func cleanRecordName(name string) string { - str := name - if strings.HasPrefix(name, "\\052") { - str = strings.Replace(name, "\\052", "*", 1) - log.Printf("[DEBUG] Replacing octal \\052 for * in: %s", name) - } - return str -} - -// Check if the current record name contains the zone suffix. -// If it does not, add the zone name to form a fully qualified name -// and keep AWS happy. -func expandRecordName(name, zone string) string { - rn := strings.ToLower(strings.TrimSuffix(name, ".")) - zone = strings.TrimSuffix(zone, ".") - if !strings.HasSuffix(rn, zone) { - if len(name) == 0 { - rn = zone - } else { - rn = strings.Join([]string{name, zone}, ".") - } - } - return rn -} - -func resourceAwsRoute53AliasRecordHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", normalizeAwsAliasName(m["name"].(string)))) - buf.WriteString(fmt.Sprintf("%s-", m["zone_id"].(string))) - buf.WriteString(fmt.Sprintf("%t-", m["evaluate_target_health"].(bool))) - - return hashcode.String(buf.String()) -} - -// nilString takes a string as an argument and returns a string -// pointer. The returned pointer is nil if the string argument is -// empty, otherwise it is a pointer to a copy of the string. -func nilString(s string) *string { - if s == "" { - return nil - } - return aws.String(s) -} - -func normalizeAwsAliasName(alias interface{}) string { - input := alias.(string) - if strings.HasPrefix(input, "dualstack.") { - return strings.Replace(input, "dualstack.", "", -1) - } - - return strings.TrimRight(input, ".") -} diff --git a/builtin/providers/aws/resource_aws_route53_record_migrate.go b/builtin/providers/aws/resource_aws_route53_record_migrate.go deleted file mode 100644 index ad6cda9d3..000000000 --- a/builtin/providers/aws/resource_aws_route53_record_migrate.go +++ /dev/null @@ -1,62 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/terraform" -) - -func resourceAwsRoute53RecordMigrateState( - v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - switch v { - case 0: - log.Println("[INFO] Found AWS Route53 Record State v0; migrating to v1 then v2") - v1InstanceState, err := migrateRoute53RecordStateV0toV1(is) - if err != nil { - return v1InstanceState, err - } - return migrateRoute53RecordStateV1toV2(v1InstanceState) - case 1: - log.Println("[INFO] Found AWS Route53 Record State v1; migrating to v2") - return migrateRoute53RecordStateV1toV2(is) - default: - return is, fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateRoute53RecordStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { - if is.Empty() { - log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return is, nil - } - - log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - newName := strings.TrimSuffix(is.Attributes["name"], ".") - is.Attributes["name"] = newName - log.Printf("[DEBUG] Attributes after migration: %#v, new name: %s", is.Attributes, newName) - return is, nil -} - -func migrateRoute53RecordStateV1toV2(is *terraform.InstanceState) (*terraform.InstanceState, error) { - if is.Empty() { - log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return is, nil - } - log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - if is.Attributes["weight"] != "" && is.Attributes["weight"] != "-1" { - is.Attributes["weighted_routing_policy.#"] = "1" - key := fmt.Sprintf("weighted_routing_policy.0.weight") - is.Attributes[key] = is.Attributes["weight"] - } - if is.Attributes["failover"] != "" { - is.Attributes["failover_routing_policy.#"] = "1" - key := fmt.Sprintf("failover_routing_policy.0.type") - is.Attributes[key] = is.Attributes["failover"] - } - delete(is.Attributes, "weight") - delete(is.Attributes, "failover") - log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} diff --git a/builtin/providers/aws/resource_aws_route53_record_migrate_test.go b/builtin/providers/aws/resource_aws_route53_record_migrate_test.go deleted file mode 100644 index 6efe0a4fa..000000000 --- a/builtin/providers/aws/resource_aws_route53_record_migrate_test.go +++ /dev/null @@ -1,110 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/terraform" -) - -func TestAWSRoute53RecordMigrateState(t *testing.T) { - cases := map[string]struct { - StateVersion int - ID string - Attributes map[string]string - Expected string - Meta interface{} - }{ - "v0_0": { - StateVersion: 0, - ID: "some_id", - Attributes: map[string]string{ - "name": "www", - }, - Expected: "www", - }, - "v0_1": { - StateVersion: 0, - ID: "some_id", - Attributes: map[string]string{ - "name": "www.notdomain.com.", - }, - Expected: "www.notdomain.com", - }, - "v0_2": { - StateVersion: 0, - ID: "some_id", - Attributes: map[string]string{ - "name": "www.notdomain.com", - }, - Expected: "www.notdomain.com", - }, - } - - for tn, tc := range cases { - is := &terraform.InstanceState{ - ID: tc.ID, - Attributes: tc.Attributes, - } - is, err := resourceAwsRoute53RecordMigrateState( - tc.StateVersion, is, tc.Meta) - - if err != nil { - t.Fatalf("bad: %s, err: %#v", tn, err) - } - - if is.Attributes["name"] != tc.Expected { - t.Fatalf("bad Route 53 Migrate: %s\n\n expected: %s", is.Attributes["name"], tc.Expected) - } - } -} - -func TestAWSRoute53RecordMigrateStateV1toV2(t *testing.T) { - cases := map[string]struct { - StateVersion int - Attributes map[string]string - Expected map[string]string - Meta interface{} - }{ - "v0_1": { - StateVersion: 1, - Attributes: map[string]string{ - "weight": "0", - "failover": "PRIMARY", - }, - Expected: map[string]string{ - "weighted_routing_policy.#": "1", - "weighted_routing_policy.0.weight": "0", - "failover_routing_policy.#": "1", - "failover_routing_policy.0.type": "PRIMARY", - }, - }, - "v0_2": { - StateVersion: 0, - Attributes: map[string]string{ - "weight": "-1", - }, - Expected: map[string]string{}, - }, - } - - for tn, tc := range cases { - is := &terraform.InstanceState{ - ID: "route53_record", - Attributes: tc.Attributes, - } - is, err := resourceAwsRoute53Record().MigrateState( - tc.StateVersion, is, tc.Meta) - - if err != nil { - t.Fatalf("bad: %s, err: %#v", tn, err) - } - - for k, v := range tc.Expected { - if is.Attributes[k] != v { - t.Fatalf( - "bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", - tn, k, v, k, is.Attributes[k], is.Attributes) - } - } - } -} diff --git a/builtin/providers/aws/resource_aws_route53_record_test.go b/builtin/providers/aws/resource_aws_route53_record_test.go deleted file mode 100644 index 567f36a13..000000000 --- a/builtin/providers/aws/resource_aws_route53_record_test.go +++ /dev/null @@ -1,1197 +0,0 @@ -package aws - -import ( - "fmt" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/route53" -) - -func TestCleanRecordName(t *testing.T) { - cases := []struct { - Input, Output string - }{ - {"www.nonexample.com", "www.nonexample.com"}, - {"\\052.nonexample.com", "*.nonexample.com"}, - {"nonexample.com", "nonexample.com"}, - } - - for _, tc := range cases { - actual := cleanRecordName(tc.Input) - if actual != tc.Output { - t.Fatalf("input: %s\noutput: %s", tc.Input, actual) - } - } -} - -func TestExpandRecordName(t *testing.T) { - cases := []struct { - Input, Output string - }{ - {"www", "www.nonexample.com"}, - {"dev.www", "dev.www.nonexample.com"}, - {"*", "*.nonexample.com"}, - {"nonexample.com", "nonexample.com"}, - {"test.nonexample.com", "test.nonexample.com"}, - {"test.nonexample.com.", "test.nonexample.com"}, - } - - zone_name := "nonexample.com" - for _, tc := range cases { - actual := expandRecordName(tc.Input, zone_name) - if actual != tc.Output { - t.Fatalf("input: %s\noutput: %s", tc.Input, actual) - } - } -} - -func TestAccAWSRoute53Record_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_record.default", - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53RecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53RecordConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53RecordExists("aws_route53_record.default"), - ), - }, - }, - }) -} - -func TestAccAWSRoute53Record_basic_fqdn(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_record.default", - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53RecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53RecordConfig_fqdn, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53RecordExists("aws_route53_record.default"), - ), - }, - - // Ensure that changing the name to include a trailing "dot" results in - // nothing happening, because the name is stripped of trailing dots on - // save. Otherwise, an update would occur and due to the - // create_before_destroy, the record would actually be destroyed, and a - // non-empty plan would appear, and the record will fail to exist in - // testAccCheckRoute53RecordExists - resource.TestStep{ - Config: testAccRoute53RecordConfig_fqdn_no_op, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53RecordExists("aws_route53_record.default"), - ), - }, - }, - }) -} - -func TestAccAWSRoute53Record_txtSupport(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_record.default", - IDRefreshIgnore: []string{"zone_id"}, // just for this test - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53RecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53RecordConfigTXT, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53RecordExists("aws_route53_record.default"), - ), - }, - }, - }) -} - -func TestAccAWSRoute53Record_spfSupport(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_record.default", - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53RecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53RecordConfigSPF, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53RecordExists("aws_route53_record.default"), - resource.TestCheckResourceAttr( - "aws_route53_record.default", "records.2930149397", "include:notexample.com"), - ), - }, - }, - }) -} -func TestAccAWSRoute53Record_generatesSuffix(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_record.default", - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53RecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53RecordConfigSuffix, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53RecordExists("aws_route53_record.default"), - ), - }, - }, - }) -} - -func TestAccAWSRoute53Record_wildcard(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_record.wildcard", - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53RecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53WildCardRecordConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53RecordExists("aws_route53_record.wildcard"), - ), - }, - - // Cause a change, which will trigger a refresh - resource.TestStep{ - Config: testAccRoute53WildCardRecordConfigUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53RecordExists("aws_route53_record.wildcard"), - ), - }, - }, - }) -} - -func TestAccAWSRoute53Record_failover(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_record.www-primary", - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53RecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53FailoverCNAMERecord, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53RecordExists("aws_route53_record.www-primary"), - testAccCheckRoute53RecordExists("aws_route53_record.www-secondary"), - ), - }, - }, - }) -} - -func TestAccAWSRoute53Record_weighted_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_record.www-live", - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53RecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53WeightedCNAMERecord, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53RecordExists("aws_route53_record.www-dev"), - testAccCheckRoute53RecordExists("aws_route53_record.www-live"), - testAccCheckRoute53RecordExists("aws_route53_record.www-off"), - ), - }, - }, - }) -} - -func TestAccAWSRoute53Record_alias(t *testing.T) { - rs := acctest.RandString(10) - config := fmt.Sprintf(testAccRoute53ElbAliasRecord, rs) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_record.alias", - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53RecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53RecordExists("aws_route53_record.alias"), - ), - }, - }, - }) -} - -func TestAccAWSRoute53Record_s3_alias(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53RecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53S3AliasRecord, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53RecordExists("aws_route53_record.alias"), - ), - }, - }, - }) -} - -func TestAccAWSRoute53Record_weighted_alias(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_record.elb_weighted_alias_live", - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53RecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53WeightedElbAliasRecord, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53RecordExists("aws_route53_record.elb_weighted_alias_live"), - testAccCheckRoute53RecordExists("aws_route53_record.elb_weighted_alias_dev"), - ), - }, - - resource.TestStep{ - Config: testAccRoute53WeightedR53AliasRecord, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53RecordExists("aws_route53_record.green_origin"), - testAccCheckRoute53RecordExists("aws_route53_record.r53_weighted_alias_live"), - testAccCheckRoute53RecordExists("aws_route53_record.blue_origin"), - testAccCheckRoute53RecordExists("aws_route53_record.r53_weighted_alias_dev"), - ), - }, - }, - }) -} - -func TestAccAWSRoute53Record_geolocation_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53RecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53GeolocationCNAMERecord, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53RecordExists("aws_route53_record.default"), - testAccCheckRoute53RecordExists("aws_route53_record.california"), - testAccCheckRoute53RecordExists("aws_route53_record.oceania"), - testAccCheckRoute53RecordExists("aws_route53_record.denmark"), - ), - }, - }, - }) -} - -func TestAccAWSRoute53Record_latency_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53RecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53LatencyCNAMERecord, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53RecordExists("aws_route53_record.us-east-1"), - testAccCheckRoute53RecordExists("aws_route53_record.eu-west-1"), - testAccCheckRoute53RecordExists("aws_route53_record.ap-northeast-1"), - ), - }, - }, - }) -} - -func TestAccAWSRoute53Record_TypeChange(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_record.sample", - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53RecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53RecordTypeChangePre, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53RecordExists("aws_route53_record.sample"), - ), - }, - - // Cause a change, which will trigger a refresh - resource.TestStep{ - Config: testAccRoute53RecordTypeChangePost, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53RecordExists("aws_route53_record.sample"), - ), - }, - }, - }) -} - -func TestAccAWSRoute53Record_SetIdentiferChange(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_record.basic_to_weighted", - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53RecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53RecordSetIdentifierChangePre, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53RecordExists("aws_route53_record.basic_to_weighted"), - ), - }, - - // Cause a change, which will trigger a refresh - resource.TestStep{ - Config: testAccRoute53RecordSetIdentifierChangePost, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53RecordExists("aws_route53_record.basic_to_weighted"), - ), - }, - }, - }) -} - -func TestAccAWSRoute53Record_AliasChange(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_record.elb_alias_change", - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53RecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53RecordAliasChangePre, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53RecordExists("aws_route53_record.elb_alias_change"), - ), - }, - - // Cause a change, which will trigger a refresh - resource.TestStep{ - Config: testAccRoute53RecordAliasChangePost, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53RecordExists("aws_route53_record.elb_alias_change"), - ), - }, - }, - }) -} - -func TestAccAWSRoute53Record_empty(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_record.empty", - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53RecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53RecordConfigEmptyName, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53RecordExists("aws_route53_record.empty"), - ), - }, - }, - }) -} - -// Regression test for https://github.com/hashicorp/terraform/issues/8423 -func TestAccAWSRoute53Record_longTXTrecord(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_record.long_txt", - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53RecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53RecordConfigLongTxtRecord, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53RecordExists("aws_route53_record.long_txt"), - ), - }, - }, - }) -} - -func testAccCheckRoute53RecordDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).r53conn - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_route53_record" { - continue - } - - parts := strings.Split(rs.Primary.ID, "_") - zone := parts[0] - name := parts[1] - rType := parts[2] - - en := expandRecordName(name, "notexample.com") - - lopts := &route53.ListResourceRecordSetsInput{ - HostedZoneId: aws.String(cleanZoneID(zone)), - StartRecordName: aws.String(en), - StartRecordType: aws.String(rType), - } - - resp, err := conn.ListResourceRecordSets(lopts) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // if NoSuchHostedZone, then all the things are destroyed - if awsErr.Code() == "NoSuchHostedZone" { - return nil - } - } - return err - } - if len(resp.ResourceRecordSets) == 0 { - return nil - } - rec := resp.ResourceRecordSets[0] - if FQDN(*rec.Name) == FQDN(name) && *rec.Type == rType { - return fmt.Errorf("Record still exists: %#v", rec) - } - } - return nil -} - -func testAccCheckRoute53RecordExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).r53conn - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No hosted zone ID is set") - } - - parts := strings.Split(rs.Primary.ID, "_") - zone := parts[0] - name := parts[1] - rType := parts[2] - - en := expandRecordName(name, "notexample.com") - - lopts := &route53.ListResourceRecordSetsInput{ - HostedZoneId: aws.String(cleanZoneID(zone)), - StartRecordName: aws.String(en), - StartRecordType: aws.String(rType), - } - - resp, err := conn.ListResourceRecordSets(lopts) - if err != nil { - return err - } - if len(resp.ResourceRecordSets) == 0 { - return fmt.Errorf("Record does not exist") - } - - // rec := resp.ResourceRecordSets[0] - for _, rec := range resp.ResourceRecordSets { - recName := cleanRecordName(*rec.Name) - if FQDN(strings.ToLower(recName)) == FQDN(strings.ToLower(en)) && *rec.Type == rType { - return nil - } - } - return fmt.Errorf("Record does not exist: %#v", rs.Primary.ID) - } -} - -const testAccRoute53RecordConfig = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_route53_record" "default" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "www.NOTexamplE.com" - type = "A" - ttl = "30" - records = ["127.0.0.1", "127.0.0.27"] -} -` - -const testAccRoute53RecordConfigCNAMERecord = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_route53_record" "default" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "host123.domain" - type = "CNAME" - ttl = "30" - records = ["1.2.3.4"] -} -` - -const testAccRoute53RecordConfigCNAMERecordUpdateToCNAME = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_route53_record" "default" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "host123.domain" - type = "A" - ttl = "30" - records = ["1.2.3.4"] -} -` - -const testAccRoute53RecordConfig_fqdn = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_route53_record" "default" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "www.NOTexamplE.com" - type = "A" - ttl = "30" - records = ["127.0.0.1", "127.0.0.27"] - - lifecycle { - create_before_destroy = true - } -} -` - -const testAccRoute53RecordConfig_fqdn_no_op = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_route53_record" "default" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "www.NOTexamplE.com." - type = "A" - ttl = "30" - records = ["127.0.0.1", "127.0.0.27"] - - lifecycle { - create_before_destroy = true - } -} -` - -const testAccRoute53RecordNoConfig = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} -` - -const testAccRoute53RecordConfigSuffix = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_route53_record" "default" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "subdomain" - type = "A" - ttl = "30" - records = ["127.0.0.1", "127.0.0.27"] -} -` - -const testAccRoute53WildCardRecordConfig = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_route53_record" "default" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "subdomain" - type = "A" - ttl = "30" - records = ["127.0.0.1", "127.0.0.27"] -} - -resource "aws_route53_record" "wildcard" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "*.notexample.com" - type = "A" - ttl = "30" - records = ["127.0.0.1"] -} -` - -const testAccRoute53WildCardRecordConfigUpdate = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_route53_record" "default" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "subdomain" - type = "A" - ttl = "30" - records = ["127.0.0.1", "127.0.0.27"] -} - -resource "aws_route53_record" "wildcard" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "*.notexample.com" - type = "A" - ttl = "60" - records = ["127.0.0.1"] -} -` -const testAccRoute53RecordConfigTXT = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_route53_record" "default" { - zone_id = "/hostedzone/${aws_route53_zone.main.zone_id}" - name = "subdomain" - type = "TXT" - ttl = "30" - records = ["lalalala"] -} -` -const testAccRoute53RecordConfigSPF = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_route53_record" "default" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "test" - type = "SPF" - ttl = "30" - records = ["include:notexample.com"] -} -` - -const testAccRoute53FailoverCNAMERecord = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_route53_health_check" "foo" { - fqdn = "dev.notexample.com" - port = 80 - type = "HTTP" - resource_path = "/" - failure_threshold = "2" - request_interval = "30" - - tags = { - Name = "tf-test-health-check" - } -} - -resource "aws_route53_record" "www-primary" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "www" - type = "CNAME" - ttl = "5" - failover_routing_policy { - type = "PRIMARY" - } - health_check_id = "${aws_route53_health_check.foo.id}" - set_identifier = "www-primary" - records = ["primary.notexample.com"] -} - -resource "aws_route53_record" "www-secondary" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "www" - type = "CNAME" - ttl = "5" - failover_routing_policy { - type = "SECONDARY" - } - set_identifier = "www-secondary" - records = ["secondary.notexample.com"] -} -` - -const testAccRoute53WeightedCNAMERecord = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_route53_record" "www-dev" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "www" - type = "CNAME" - ttl = "5" - weighted_routing_policy { - weight = 10 - } - set_identifier = "dev" - records = ["dev.notexample.com"] -} - -resource "aws_route53_record" "www-live" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "www" - type = "CNAME" - ttl = "5" - weighted_routing_policy { - weight = 90 - } - set_identifier = "live" - records = ["dev.notexample.com"] -} - -resource "aws_route53_record" "www-off" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "www" - type = "CNAME" - ttl = "5" - weighted_routing_policy = { - weight = 0 - } - set_identifier = "off" - records = ["dev.notexample.com"] -} -` - -const testAccRoute53GeolocationCNAMERecord = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_route53_record" "default" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "www" - type = "CNAME" - ttl = "5" - geolocation_routing_policy { - country = "*" - } - set_identifier = "Default" - records = ["dev.notexample.com"] -} - -resource "aws_route53_record" "california" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "www" - type = "CNAME" - ttl = "5" - geolocation_routing_policy { - country = "US" - subdivision = "CA" - } - set_identifier = "California" - records = ["dev.notexample.com"] -} - -resource "aws_route53_record" "oceania" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "www" - type = "CNAME" - ttl = "5" - geolocation_routing_policy { - continent = "OC" - } - set_identifier = "Oceania" - records = ["dev.notexample.com"] -} - -resource "aws_route53_record" "denmark" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "www" - type = "CNAME" - ttl = "5" - geolocation_routing_policy { - country = "DK" - } - set_identifier = "Denmark" - records = ["dev.notexample.com"] -} -` - -const testAccRoute53LatencyCNAMERecord = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_route53_record" "us-east-1" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "www" - type = "CNAME" - ttl = "5" - latency_routing_policy { - region = "us-east-1" - } - set_identifier = "us-east-1" - records = ["dev.notexample.com"] -} - -resource "aws_route53_record" "eu-west-1" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "www" - type = "CNAME" - ttl = "5" - latency_routing_policy { - region = "eu-west-1" - } - set_identifier = "eu-west-1" - records = ["dev.notexample.com"] -} - -resource "aws_route53_record" "ap-northeast-1" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "www" - type = "CNAME" - ttl = "5" - latency_routing_policy { - region = "ap-northeast-1" - } - set_identifier = "ap-northeast-1" - records = ["dev.notexample.com"] -} -` - -const testAccRoute53ElbAliasRecord = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_route53_record" "alias" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "www" - type = "A" - - alias { - zone_id = "${aws_elb.main.zone_id}" - name = "${aws_elb.main.dns_name}" - evaluate_target_health = true - } -} - -resource "aws_elb" "main" { - name = "foobar-terraform-elb-%s" - availability_zones = ["us-west-2a"] - - listener { - instance_port = 80 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } -} -` - -const testAccRoute53AliasRecord = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_route53_record" "origin" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "origin" - type = "A" - ttl = 5 - records = ["127.0.0.1"] -} - -resource "aws_route53_record" "alias" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "www" - type = "A" - - alias { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "${aws_route53_record.origin.name}.${aws_route53_zone.main.name}" - evaluate_target_health = true - } -} -` - -const testAccRoute53S3AliasRecord = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_s3_bucket" "website" { - bucket = "website.notexample.com" - acl = "public-read" - website { - index_document = "index.html" - } -} - -resource "aws_route53_record" "alias" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "www" - type = "A" - - alias { - zone_id = "${aws_s3_bucket.website.hosted_zone_id}" - name = "${aws_s3_bucket.website.website_domain}" - evaluate_target_health = true - } -} -` - -const testAccRoute53WeightedElbAliasRecord = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_elb" "live" { - name = "foobar-terraform-elb-live" - availability_zones = ["us-west-2a"] - - listener { - instance_port = 80 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } -} - -resource "aws_route53_record" "elb_weighted_alias_live" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "www" - type = "A" - - weighted_routing_policy { - weight = 90 - } - set_identifier = "live" - - alias { - zone_id = "${aws_elb.live.zone_id}" - name = "${aws_elb.live.dns_name}" - evaluate_target_health = true - } -} - -resource "aws_elb" "dev" { - name = "foobar-terraform-elb-dev" - availability_zones = ["us-west-2a"] - - listener { - instance_port = 80 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } -} - -resource "aws_route53_record" "elb_weighted_alias_dev" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "www" - type = "A" - - weighted_routing_policy { - weight = 10 - } - set_identifier = "dev" - - alias { - zone_id = "${aws_elb.dev.zone_id}" - name = "${aws_elb.dev.dns_name}" - evaluate_target_health = true - } -} -` - -const testAccRoute53WeightedR53AliasRecord = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_route53_record" "blue_origin" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "blue-origin" - type = "CNAME" - ttl = 5 - records = ["v1.terraform.io"] -} - -resource "aws_route53_record" "r53_weighted_alias_live" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "www" - type = "CNAME" - - weighted_routing_policy { - weight = 90 - } - set_identifier = "blue" - - alias { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "${aws_route53_record.blue_origin.name}.${aws_route53_zone.main.name}" - evaluate_target_health = false - } -} - -resource "aws_route53_record" "green_origin" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "green-origin" - type = "CNAME" - ttl = 5 - records = ["v2.terraform.io"] -} - -resource "aws_route53_record" "r53_weighted_alias_dev" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "www" - type = "CNAME" - - weighted_routing_policy { - weight = 10 - } - set_identifier = "green" - - alias { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "${aws_route53_record.green_origin.name}.${aws_route53_zone.main.name}" - evaluate_target_health = false - } -} -` - -const testAccRoute53RecordTypeChangePre = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_route53_record" "sample" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "sample" - type = "CNAME" - ttl = "30" - records = ["www.terraform.io"] -} -` - -const testAccRoute53RecordTypeChangePost = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_route53_record" "sample" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "sample" - type = "A" - ttl = "30" - records = ["127.0.0.1", "8.8.8.8"] -} -` - -const testAccRoute53RecordSetIdentifierChangePre = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_route53_record" "basic_to_weighted" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "sample" - type = "A" - ttl = "30" - records = ["127.0.0.1", "8.8.8.8"] -} -` - -const testAccRoute53RecordSetIdentifierChangePost = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_route53_record" "basic_to_weighted" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "sample" - type = "A" - ttl = "30" - records = ["127.0.0.1", "8.8.8.8"] - set_identifier = "cluster-a" - weighted_routing_policy { - weight = 100 - } -} -` - -const testAccRoute53RecordAliasChangePre = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_elb" "alias_change" { - name = "foobar-tf-elb-alias-change" - availability_zones = ["us-west-2a"] - - listener { - instance_port = 80 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } -} - -resource "aws_route53_record" "elb_alias_change" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "alias-change" - type = "A" - - alias { - zone_id = "${aws_elb.alias_change.zone_id}" - name = "${aws_elb.alias_change.dns_name}" - evaluate_target_health = true - } -} -` - -const testAccRoute53RecordAliasChangePost = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_route53_record" "elb_alias_change" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "alias-change" - type = "CNAME" - ttl = "30" - records = ["www.terraform.io"] -} -` - -const testAccRoute53RecordConfigEmptyName = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_route53_record" "empty" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "" - type = "A" - ttl = "30" - records = ["127.0.0.1"] -} -` - -const testAccRoute53RecordConfigLongTxtRecord = ` -resource "aws_route53_zone" "main" { - name = "notexample.com" -} - -resource "aws_route53_record" "long_txt" { - zone_id = "${aws_route53_zone.main.zone_id}" - name = "google.notexample.com" - type = "TXT" - ttl = "30" - records = [ - "v=DKIM1; k=rsa; p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAiajKNMp\" \"/A12roF4p3MBm9QxQu6GDsBlWUWFx8EaS8TCo3Qe8Cj0kTag1JMjzCC1s6oM0a43JhO6mp6z/" - ] -} -` diff --git a/builtin/providers/aws/resource_aws_route53_zone.go b/builtin/providers/aws/resource_aws_route53_zone.go deleted file mode 100644 index b30d38829..000000000 --- a/builtin/providers/aws/resource_aws_route53_zone.go +++ /dev/null @@ -1,391 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "sort" - "strings" - "time" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/route53" -) - -func resourceAwsRoute53Zone() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsRoute53ZoneCreate, - Read: resourceAwsRoute53ZoneRead, - Update: resourceAwsRoute53ZoneUpdate, - Delete: resourceAwsRoute53ZoneDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "comment": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "Managed by Terraform", - }, - - "vpc_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"delegation_set_id"}, - }, - - "vpc_region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "zone_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "delegation_set_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"vpc_id"}, - }, - - "name_servers": &schema.Schema{ - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Computed: true, - }, - - "tags": tagsSchema(), - - "force_destroy": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - }, - } -} - -func resourceAwsRoute53ZoneCreate(d *schema.ResourceData, meta interface{}) error { - r53 := meta.(*AWSClient).r53conn - - req := &route53.CreateHostedZoneInput{ - Name: aws.String(d.Get("name").(string)), - HostedZoneConfig: &route53.HostedZoneConfig{Comment: aws.String(d.Get("comment").(string))}, - CallerReference: aws.String(time.Now().Format(time.RFC3339Nano)), - } - if v := d.Get("vpc_id"); v != "" { - req.VPC = &route53.VPC{ - VPCId: aws.String(v.(string)), - VPCRegion: aws.String(meta.(*AWSClient).region), - } - if w := d.Get("vpc_region"); w != "" { - req.VPC.VPCRegion = aws.String(w.(string)) - } - d.Set("vpc_region", req.VPC.VPCRegion) - } - - if v, ok := d.GetOk("delegation_set_id"); ok { - req.DelegationSetId = aws.String(v.(string)) - } - - log.Printf("[DEBUG] Creating Route53 hosted zone: %s", *req.Name) - var err error - resp, err := r53.CreateHostedZone(req) - if err != nil { - return err - } - - // Store the zone_id - zone := cleanZoneID(*resp.HostedZone.Id) - d.Set("zone_id", zone) - d.SetId(zone) - - // Wait until we are done initializing - wait := resource.StateChangeConf{ - Delay: 30 * time.Second, - Pending: []string{"PENDING"}, - Target: []string{"INSYNC"}, - Timeout: 10 * time.Minute, - MinTimeout: 2 * time.Second, - Refresh: func() (result interface{}, state string, err error) { - changeRequest := &route53.GetChangeInput{ - Id: aws.String(cleanChangeID(*resp.ChangeInfo.Id)), - } - return resourceAwsGoRoute53Wait(r53, changeRequest) - }, - } - _, err = wait.WaitForState() - if err != nil { - return err - } - return resourceAwsRoute53ZoneUpdate(d, meta) -} - -func resourceAwsRoute53ZoneRead(d *schema.ResourceData, meta interface{}) error { - r53 := meta.(*AWSClient).r53conn - zone, err := r53.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(d.Id())}) - if err != nil { - // Handle a deleted zone - if r53err, ok := err.(awserr.Error); ok && r53err.Code() == "NoSuchHostedZone" { - d.SetId("") - return nil - } - return err - } - - // In the import case this will be empty - if _, ok := d.GetOk("zone_id"); !ok { - d.Set("zone_id", d.Id()) - } - if _, ok := d.GetOk("name"); !ok { - d.Set("name", zone.HostedZone.Name) - } - - if !*zone.HostedZone.Config.PrivateZone { - ns := make([]string, len(zone.DelegationSet.NameServers)) - for i := range zone.DelegationSet.NameServers { - ns[i] = *zone.DelegationSet.NameServers[i] - } - sort.Strings(ns) - if err := d.Set("name_servers", ns); err != nil { - return fmt.Errorf("[DEBUG] Error setting name servers for: %s, error: %#v", d.Id(), err) - } - } else { - ns, err := getNameServers(d.Id(), d.Get("name").(string), r53) - if err != nil { - return err - } - if err := d.Set("name_servers", ns); err != nil { - return fmt.Errorf("[DEBUG] Error setting name servers for: %s, error: %#v", d.Id(), err) - } - - // In the import case we just associate it with the first VPC - if _, ok := d.GetOk("vpc_id"); !ok { - if len(zone.VPCs) > 1 { - return fmt.Errorf( - "Can't import a route53_zone with more than one VPC attachment") - } - - if len(zone.VPCs) > 0 { - d.Set("vpc_id", zone.VPCs[0].VPCId) - d.Set("vpc_region", zone.VPCs[0].VPCRegion) - } - } - - var associatedVPC *route53.VPC - for _, vpc := range zone.VPCs { - if *vpc.VPCId == d.Get("vpc_id") { - associatedVPC = vpc - break - } - } - if associatedVPC == nil { - return fmt.Errorf("[DEBUG] VPC: %v is not associated with Zone: %v", d.Get("vpc_id"), d.Id()) - } - } - - if zone.DelegationSet != nil && zone.DelegationSet.Id != nil { - d.Set("delegation_set_id", cleanDelegationSetId(*zone.DelegationSet.Id)) - } - - if zone.HostedZone != nil && zone.HostedZone.Config != nil && zone.HostedZone.Config.Comment != nil { - d.Set("comment", zone.HostedZone.Config.Comment) - } - - // get tags - req := &route53.ListTagsForResourceInput{ - ResourceId: aws.String(d.Id()), - ResourceType: aws.String("hostedzone"), - } - - resp, err := r53.ListTagsForResource(req) - if err != nil { - return err - } - - var tags []*route53.Tag - if resp.ResourceTagSet != nil { - tags = resp.ResourceTagSet.Tags - } - - if err := d.Set("tags", tagsToMapR53(tags)); err != nil { - return err - } - - return nil -} - -func resourceAwsRoute53ZoneUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).r53conn - - d.Partial(true) - - if d.HasChange("comment") { - zoneInput := route53.UpdateHostedZoneCommentInput{ - Id: aws.String(d.Id()), - Comment: aws.String(d.Get("comment").(string)), - } - - _, err := conn.UpdateHostedZoneComment(&zoneInput) - if err != nil { - return err - } else { - d.SetPartial("comment") - } - } - - if err := setTagsR53(conn, d, "hostedzone"); err != nil { - return err - } else { - d.SetPartial("tags") - } - - d.Partial(false) - - return resourceAwsRoute53ZoneRead(d, meta) -} - -func resourceAwsRoute53ZoneDelete(d *schema.ResourceData, meta interface{}) error { - r53 := meta.(*AWSClient).r53conn - - if d.Get("force_destroy").(bool) { - if err := deleteAllRecordsInHostedZoneId(d.Id(), d.Get("name").(string), r53); err != nil { - return errwrap.Wrapf("{{err}}", err) - } - } - - log.Printf("[DEBUG] Deleting Route53 hosted zone: %s (ID: %s)", - d.Get("name").(string), d.Id()) - _, err := r53.DeleteHostedZone(&route53.DeleteHostedZoneInput{Id: aws.String(d.Id())}) - if err != nil { - if r53err, ok := err.(awserr.Error); ok && r53err.Code() == "NoSuchHostedZone" { - log.Printf("[DEBUG] No matching Route 53 Zone found for: %s, removing from state file", d.Id()) - d.SetId("") - return nil - } - return err - } - - return nil -} - -func deleteAllRecordsInHostedZoneId(hostedZoneId, hostedZoneName string, conn *route53.Route53) error { - input := &route53.ListResourceRecordSetsInput{ - HostedZoneId: aws.String(hostedZoneId), - } - - var lastDeleteErr, lastErrorFromWaiter error - var pageNum = 0 - err := conn.ListResourceRecordSetsPages(input, func(page *route53.ListResourceRecordSetsOutput, isLastPage bool) bool { - sets := page.ResourceRecordSets - pageNum += 1 - - changes := make([]*route53.Change, 0) - // 100 items per page returned by default - for _, set := range sets { - if strings.TrimSuffix(*set.Name, ".") == strings.TrimSuffix(hostedZoneName, ".") && (*set.Type == "NS" || *set.Type == "SOA") { - // Zone NS & SOA records cannot be deleted - continue - } - changes = append(changes, &route53.Change{ - Action: aws.String("DELETE"), - ResourceRecordSet: set, - }) - } - log.Printf("[DEBUG] Deleting %d records (page %d) from %s", - len(changes), pageNum, hostedZoneId) - - req := &route53.ChangeResourceRecordSetsInput{ - HostedZoneId: aws.String(hostedZoneId), - ChangeBatch: &route53.ChangeBatch{ - Comment: aws.String("Deleted by Terraform"), - Changes: changes, - }, - } - - var resp interface{} - resp, lastDeleteErr = deleteRoute53RecordSet(conn, req) - if out, ok := resp.(*route53.ChangeResourceRecordSetsOutput); ok { - log.Printf("[DEBUG] Waiting for change batch to become INSYNC: %#v", out) - if out.ChangeInfo != nil && out.ChangeInfo.Id != nil { - lastErrorFromWaiter = waitForRoute53RecordSetToSync(conn, cleanChangeID(*out.ChangeInfo.Id)) - } else { - log.Printf("[DEBUG] Change info was empty") - } - } else { - log.Printf("[DEBUG] Unable to wait for change batch because of an error: %s", lastDeleteErr) - } - - return !isLastPage - }) - if err != nil { - return fmt.Errorf("Failed listing/deleting record sets: %s\nLast error from deletion: %s\nLast error from waiter: %s", - err, lastDeleteErr, lastErrorFromWaiter) - } - - return nil -} - -func resourceAwsGoRoute53Wait(r53 *route53.Route53, ref *route53.GetChangeInput) (result interface{}, state string, err error) { - - status, err := r53.GetChange(ref) - if err != nil { - return nil, "UNKNOWN", err - } - return true, *status.ChangeInfo.Status, nil -} - -// cleanChangeID is used to remove the leading /change/ -func cleanChangeID(ID string) string { - return cleanPrefix(ID, "/change/") -} - -// cleanZoneID is used to remove the leading /hostedzone/ -func cleanZoneID(ID string) string { - return cleanPrefix(ID, "/hostedzone/") -} - -// cleanPrefix removes a string prefix from an ID -func cleanPrefix(ID, prefix string) string { - if strings.HasPrefix(ID, prefix) { - ID = strings.TrimPrefix(ID, prefix) - } - return ID -} - -func getNameServers(zoneId string, zoneName string, r53 *route53.Route53) ([]string, error) { - resp, err := r53.ListResourceRecordSets(&route53.ListResourceRecordSetsInput{ - HostedZoneId: aws.String(zoneId), - StartRecordName: aws.String(zoneName), - StartRecordType: aws.String("NS"), - }) - if err != nil { - return nil, err - } - if len(resp.ResourceRecordSets) == 0 { - return nil, nil - } - ns := make([]string, len(resp.ResourceRecordSets[0].ResourceRecords)) - for i := range resp.ResourceRecordSets[0].ResourceRecords { - ns[i] = *resp.ResourceRecordSets[0].ResourceRecords[i].Value - } - sort.Strings(ns) - return ns, nil -} diff --git a/builtin/providers/aws/resource_aws_route53_zone_association.go b/builtin/providers/aws/resource_aws_route53_zone_association.go deleted file mode 100644 index c416095ec..000000000 --- a/builtin/providers/aws/resource_aws_route53_zone_association.go +++ /dev/null @@ -1,149 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strings" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/route53" -) - -func resourceAwsRoute53ZoneAssociation() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsRoute53ZoneAssociationCreate, - Read: resourceAwsRoute53ZoneAssociationRead, - Update: resourceAwsRoute53ZoneAssociationUpdate, - Delete: resourceAwsRoute53ZoneAssociationDelete, - - Schema: map[string]*schema.Schema{ - "zone_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "vpc_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "vpc_region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - } -} - -func resourceAwsRoute53ZoneAssociationCreate(d *schema.ResourceData, meta interface{}) error { - r53 := meta.(*AWSClient).r53conn - - req := &route53.AssociateVPCWithHostedZoneInput{ - HostedZoneId: aws.String(d.Get("zone_id").(string)), - VPC: &route53.VPC{ - VPCId: aws.String(d.Get("vpc_id").(string)), - VPCRegion: aws.String(meta.(*AWSClient).region), - }, - Comment: aws.String("Managed by Terraform"), - } - if w := d.Get("vpc_region"); w != "" { - req.VPC.VPCRegion = aws.String(w.(string)) - } - - log.Printf("[DEBUG] Associating Route53 Private Zone %s with VPC %s with region %s", *req.HostedZoneId, *req.VPC.VPCId, *req.VPC.VPCRegion) - var err error - resp, err := r53.AssociateVPCWithHostedZone(req) - if err != nil { - return err - } - - // Store association id - d.SetId(fmt.Sprintf("%s:%s", *req.HostedZoneId, *req.VPC.VPCId)) - d.Set("vpc_region", req.VPC.VPCRegion) - - // Wait until we are done initializing - wait := resource.StateChangeConf{ - Delay: 30 * time.Second, - Pending: []string{"PENDING"}, - Target: []string{"INSYNC"}, - Timeout: 10 * time.Minute, - MinTimeout: 2 * time.Second, - Refresh: func() (result interface{}, state string, err error) { - changeRequest := &route53.GetChangeInput{ - Id: aws.String(cleanChangeID(*resp.ChangeInfo.Id)), - } - return resourceAwsGoRoute53Wait(r53, changeRequest) - }, - } - _, err = wait.WaitForState() - if err != nil { - return err - } - - return resourceAwsRoute53ZoneAssociationUpdate(d, meta) -} - -func resourceAwsRoute53ZoneAssociationRead(d *schema.ResourceData, meta interface{}) error { - r53 := meta.(*AWSClient).r53conn - zone_id, vpc_id := resourceAwsRoute53ZoneAssociationParseId(d.Id()) - zone, err := r53.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(zone_id)}) - if err != nil { - // Handle a deleted zone - if r53err, ok := err.(awserr.Error); ok && r53err.Code() == "NoSuchHostedZone" { - d.SetId("") - return nil - } - return err - } - - for _, vpc := range zone.VPCs { - if vpc_id == *vpc.VPCId { - // association is there, return - return nil - } - } - - // no association found - d.SetId("") - return nil -} - -func resourceAwsRoute53ZoneAssociationUpdate(d *schema.ResourceData, meta interface{}) error { - return resourceAwsRoute53ZoneAssociationRead(d, meta) -} - -func resourceAwsRoute53ZoneAssociationDelete(d *schema.ResourceData, meta interface{}) error { - r53 := meta.(*AWSClient).r53conn - zone_id, vpc_id := resourceAwsRoute53ZoneAssociationParseId(d.Id()) - log.Printf("[DEBUG] Deleting Route53 Private Zone (%s) association (VPC: %s)", - zone_id, vpc_id) - - req := &route53.DisassociateVPCFromHostedZoneInput{ - HostedZoneId: aws.String(zone_id), - VPC: &route53.VPC{ - VPCId: aws.String(vpc_id), - VPCRegion: aws.String(d.Get("vpc_region").(string)), - }, - Comment: aws.String("Managed by Terraform"), - } - - _, err := r53.DisassociateVPCFromHostedZone(req) - if err != nil { - return err - } - - return nil -} - -func resourceAwsRoute53ZoneAssociationParseId(id string) (zone_id, vpc_id string) { - parts := strings.SplitN(id, ":", 2) - zone_id = parts[0] - vpc_id = parts[1] - return -} diff --git a/builtin/providers/aws/resource_aws_route53_zone_association_test.go b/builtin/providers/aws/resource_aws_route53_zone_association_test.go deleted file mode 100644 index 7817a113c..000000000 --- a/builtin/providers/aws/resource_aws_route53_zone_association_test.go +++ /dev/null @@ -1,219 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/route53" -) - -func TestAccAWSRoute53ZoneAssociation_basic(t *testing.T) { - var zone route53.HostedZone - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53ZoneAssociationDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53ZoneAssociationConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53ZoneAssociationExists("aws_route53_zone_association.foobar", &zone), - ), - }, - }, - }) -} - -func TestAccAWSRoute53ZoneAssociation_region(t *testing.T) { - var zone route53.HostedZone - - // record the initialized providers so that we can use them to - // check for the instances in each region - var providers []*schema.Provider - providerFactories := map[string]terraform.ResourceProviderFactory{ - "aws": func() (terraform.ResourceProvider, error) { - p := Provider() - providers = append(providers, p.(*schema.Provider)) - return p, nil - }, - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: providerFactories, - CheckDestroy: testAccCheckRoute53ZoneAssociationDestroyWithProviders(&providers), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53ZoneAssociationRegionConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53ZoneAssociationExistsWithProviders("aws_route53_zone_association.foobar", &zone, &providers), - ), - }, - }, - }) -} - -func testAccCheckRoute53ZoneAssociationDestroy(s *terraform.State) error { - return testAccCheckRoute53ZoneAssociationDestroyWithProvider(s, testAccProvider) -} - -func testAccCheckRoute53ZoneAssociationDestroyWithProviders(providers *[]*schema.Provider) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, provider := range *providers { - if provider.Meta() == nil { - continue - } - if err := testAccCheckRoute53ZoneAssociationDestroyWithProvider(s, provider); err != nil { - return err - } - } - return nil - } -} - -func testAccCheckRoute53ZoneAssociationDestroyWithProvider(s *terraform.State, provider *schema.Provider) error { - conn := provider.Meta().(*AWSClient).r53conn - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_route53_zone_association" { - continue - } - - zone_id, vpc_id := resourceAwsRoute53ZoneAssociationParseId(rs.Primary.ID) - - resp, err := conn.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(zone_id)}) - if err != nil { - exists := false - for _, vpc := range resp.VPCs { - if vpc_id == *vpc.VPCId { - exists = true - } - } - if exists { - return fmt.Errorf("VPC: %v is still associated to HostedZone: %v", vpc_id, zone_id) - } - } - } - return nil -} - -func testAccCheckRoute53ZoneAssociationExists(n string, zone *route53.HostedZone) resource.TestCheckFunc { - return func(s *terraform.State) error { - return testAccCheckRoute53ZoneAssociationExistsWithProvider(s, n, zone, testAccProvider) - } -} - -func testAccCheckRoute53ZoneAssociationExistsWithProviders(n string, zone *route53.HostedZone, providers *[]*schema.Provider) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, provider := range *providers { - if provider.Meta() == nil { - continue - } - if err := testAccCheckRoute53ZoneAssociationExistsWithProvider(s, n, zone, provider); err != nil { - return err - } - } - return nil - } -} - -func testAccCheckRoute53ZoneAssociationExistsWithProvider(s *terraform.State, n string, zone *route53.HostedZone, provider *schema.Provider) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No zone association ID is set") - } - - zone_id, vpc_id := resourceAwsRoute53ZoneAssociationParseId(rs.Primary.ID) - - conn := provider.Meta().(*AWSClient).r53conn - resp, err := conn.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(zone_id)}) - if err != nil { - return fmt.Errorf("Hosted zone err: %v", err) - } - - exists := false - for _, vpc := range resp.VPCs { - if vpc_id == *vpc.VPCId { - exists = true - } - } - if !exists { - return fmt.Errorf("Hosted zone association not found") - } - - *zone = *resp.HostedZone - return nil -} - -const testAccRoute53ZoneAssociationConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.6.0.0/16" - enable_dns_hostnames = true - enable_dns_support = true -} - -resource "aws_vpc" "bar" { - cidr_block = "10.7.0.0/16" - enable_dns_hostnames = true - enable_dns_support = true -} - -resource "aws_route53_zone" "foo" { - name = "foo.com" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_route53_zone_association" "foobar" { - zone_id = "${aws_route53_zone.foo.id}" - vpc_id = "${aws_vpc.bar.id}" -} -` - -const testAccRoute53ZoneAssociationRegionConfig = ` -provider "aws" { - alias = "west" - region = "us-west-2" -} - -provider "aws" { - alias = "east" - region = "us-east-1" -} - -resource "aws_vpc" "foo" { - provider = "aws.west" - cidr_block = "10.6.0.0/16" - enable_dns_hostnames = true - enable_dns_support = true -} - -resource "aws_vpc" "bar" { - provider = "aws.east" - cidr_block = "10.7.0.0/16" - enable_dns_hostnames = true - enable_dns_support = true -} - -resource "aws_route53_zone" "foo" { - provider = "aws.west" - name = "foo.com" - vpc_id = "${aws_vpc.foo.id}" - vpc_region = "us-west-2" -} - -resource "aws_route53_zone_association" "foobar" { - provider = "aws.west" - zone_id = "${aws_route53_zone.foo.id}" - vpc_id = "${aws_vpc.bar.id}" - vpc_region = "us-east-1" -} -` diff --git a/builtin/providers/aws/resource_aws_route53_zone_test.go b/builtin/providers/aws/resource_aws_route53_zone_test.go deleted file mode 100644 index ee1b5d6d6..000000000 --- a/builtin/providers/aws/resource_aws_route53_zone_test.go +++ /dev/null @@ -1,483 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "sort" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/route53" -) - -func TestCleanPrefix(t *testing.T) { - cases := []struct { - Input, Prefix, Output string - }{ - {"/hostedzone/foo", "/hostedzone/", "foo"}, - {"/change/foo", "/change/", "foo"}, - {"/bar", "/test", "/bar"}, - } - - for _, tc := range cases { - actual := cleanPrefix(tc.Input, tc.Prefix) - if actual != tc.Output { - t.Fatalf("input: %s\noutput: %s", tc.Input, actual) - } - } -} - -func TestCleanZoneID(t *testing.T) { - cases := []struct { - Input, Output string - }{ - {"/hostedzone/foo", "foo"}, - {"/change/foo", "/change/foo"}, - {"/bar", "/bar"}, - } - - for _, tc := range cases { - actual := cleanZoneID(tc.Input) - if actual != tc.Output { - t.Fatalf("input: %s\noutput: %s", tc.Input, actual) - } - } -} - -func TestCleanChangeID(t *testing.T) { - cases := []struct { - Input, Output string - }{ - {"/hostedzone/foo", "/hostedzone/foo"}, - {"/change/foo", "foo"}, - {"/bar", "/bar"}, - } - - for _, tc := range cases { - actual := cleanChangeID(tc.Input) - if actual != tc.Output { - t.Fatalf("input: %s\noutput: %s", tc.Input, actual) - } - } -} - -func TestAccAWSRoute53Zone_basic(t *testing.T) { - var zone route53.GetHostedZoneOutput - var td route53.ResourceTagSet - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_zone.main", - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53ZoneDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53ZoneConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53ZoneExists("aws_route53_zone.main", &zone), - testAccLoadTagsR53(&zone, &td), - testAccCheckTagsR53(&td.Tags, "foo", "bar"), - ), - }, - }, - }) -} - -func TestAccAWSRoute53Zone_forceDestroy(t *testing.T) { - var zone, zoneWithDot route53.GetHostedZoneOutput - - // record the initialized providers so that we can use them to - // check for the instances in each region - var providers []*schema.Provider - providerFactories := map[string]terraform.ResourceProviderFactory{ - "aws": func() (terraform.ResourceProvider, error) { - p := Provider() - providers = append(providers, p.(*schema.Provider)) - return p, nil - }, - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_zone.destroyable", - ProviderFactories: providerFactories, - CheckDestroy: testAccCheckRoute53ZoneDestroyWithProviders(&providers), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53ZoneConfig_forceDestroy, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53ZoneExistsWithProviders("aws_route53_zone.destroyable", &zone, &providers), - // Add >100 records to verify pagination works ok - testAccCreateRandomRoute53RecordsInZoneIdWithProviders(&providers, &zone, 100), - testAccCreateRandomRoute53RecordsInZoneIdWithProviders(&providers, &zone, 5), - - testAccCheckRoute53ZoneExistsWithProviders("aws_route53_zone.with_trailing_dot", &zoneWithDot, &providers), - // Add >100 records to verify pagination works ok - testAccCreateRandomRoute53RecordsInZoneIdWithProviders(&providers, &zoneWithDot, 100), - testAccCreateRandomRoute53RecordsInZoneIdWithProviders(&providers, &zoneWithDot, 5), - ), - }, - }, - }) -} - -func TestAccAWSRoute53Zone_updateComment(t *testing.T) { - var zone route53.GetHostedZoneOutput - var td route53.ResourceTagSet - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_zone.main", - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53ZoneDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53ZoneConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53ZoneExists("aws_route53_zone.main", &zone), - testAccLoadTagsR53(&zone, &td), - testAccCheckTagsR53(&td.Tags, "foo", "bar"), - resource.TestCheckResourceAttr( - "aws_route53_zone.main", "comment", "Custom comment"), - ), - }, - - resource.TestStep{ - Config: testAccRoute53ZoneConfigUpdateComment, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53ZoneExists("aws_route53_zone.main", &zone), - testAccLoadTagsR53(&zone, &td), - resource.TestCheckResourceAttr( - "aws_route53_zone.main", "comment", "Change Custom Comment"), - ), - }, - }, - }) -} - -func TestAccAWSRoute53Zone_private_basic(t *testing.T) { - var zone route53.GetHostedZoneOutput - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_zone.main", - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53ZoneDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53PrivateZoneConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53ZoneExists("aws_route53_zone.main", &zone), - testAccCheckRoute53ZoneAssociatesWithVpc("aws_vpc.main", &zone), - ), - }, - }, - }) -} - -func TestAccAWSRoute53Zone_private_region(t *testing.T) { - var zone route53.GetHostedZoneOutput - - // record the initialized providers so that we can use them to - // check for the instances in each region - var providers []*schema.Provider - providerFactories := map[string]terraform.ResourceProviderFactory{ - "aws": func() (terraform.ResourceProvider, error) { - p := Provider() - providers = append(providers, p.(*schema.Provider)) - return p, nil - }, - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route53_zone.main", - ProviderFactories: providerFactories, - CheckDestroy: testAccCheckRoute53ZoneDestroyWithProviders(&providers), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRoute53PrivateZoneRegionConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53ZoneExistsWithProviders("aws_route53_zone.main", &zone, &providers), - testAccCheckRoute53ZoneAssociatesWithVpc("aws_vpc.main", &zone), - ), - }, - }, - }) -} - -func testAccCheckRoute53ZoneDestroy(s *terraform.State) error { - return testAccCheckRoute53ZoneDestroyWithProvider(s, testAccProvider) -} - -func testAccCheckRoute53ZoneDestroyWithProviders(providers *[]*schema.Provider) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, provider := range *providers { - if provider.Meta() == nil { - continue - } - if err := testAccCheckRoute53ZoneDestroyWithProvider(s, provider); err != nil { - return err - } - } - return nil - } -} - -func testAccCheckRoute53ZoneDestroyWithProvider(s *terraform.State, provider *schema.Provider) error { - conn := provider.Meta().(*AWSClient).r53conn - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_route53_zone" { - continue - } - - _, err := conn.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(rs.Primary.ID)}) - if err == nil { - return fmt.Errorf("Hosted zone still exists") - } - } - return nil -} - -func testAccCreateRandomRoute53RecordsInZoneIdWithProviders(providers *[]*schema.Provider, - zone *route53.GetHostedZoneOutput, recordsCount int) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, provider := range *providers { - if provider.Meta() == nil { - continue - } - if err := testAccCreateRandomRoute53RecordsInZoneId(provider, zone, recordsCount); err != nil { - return err - } - } - return nil - } -} - -func testAccCreateRandomRoute53RecordsInZoneId(provider *schema.Provider, zone *route53.GetHostedZoneOutput, recordsCount int) error { - conn := provider.Meta().(*AWSClient).r53conn - - var changes []*route53.Change - if recordsCount > 100 { - return fmt.Errorf("Route53 API only allows 100 record sets in a single batch") - } - for i := 0; i < recordsCount; i++ { - changes = append(changes, &route53.Change{ - Action: aws.String("UPSERT"), - ResourceRecordSet: &route53.ResourceRecordSet{ - Name: aws.String(fmt.Sprintf("%d-tf-acc-random.%s", acctest.RandInt(), *zone.HostedZone.Name)), - Type: aws.String("CNAME"), - ResourceRecords: []*route53.ResourceRecord{ - &route53.ResourceRecord{Value: aws.String(fmt.Sprintf("random.%s", *zone.HostedZone.Name))}, - }, - TTL: aws.Int64(int64(30)), - }, - }) - } - - req := &route53.ChangeResourceRecordSetsInput{ - HostedZoneId: zone.HostedZone.Id, - ChangeBatch: &route53.ChangeBatch{ - Comment: aws.String("Generated by Terraform"), - Changes: changes, - }, - } - log.Printf("[DEBUG] Change set: %s\n", *req) - resp, err := changeRoute53RecordSet(conn, req) - if err != nil { - return err - } - changeInfo := resp.(*route53.ChangeResourceRecordSetsOutput).ChangeInfo - err = waitForRoute53RecordSetToSync(conn, cleanChangeID(*changeInfo.Id)) - return err -} - -func testAccCheckRoute53ZoneExists(n string, zone *route53.GetHostedZoneOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - return testAccCheckRoute53ZoneExistsWithProvider(s, n, zone, testAccProvider) - } -} - -func testAccCheckRoute53ZoneExistsWithProviders(n string, zone *route53.GetHostedZoneOutput, providers *[]*schema.Provider) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, provider := range *providers { - if provider.Meta() == nil { - continue - } - if err := testAccCheckRoute53ZoneExistsWithProvider(s, n, zone, provider); err != nil { - return err - } - } - return nil - } -} - -func testAccCheckRoute53ZoneExistsWithProvider(s *terraform.State, n string, zone *route53.GetHostedZoneOutput, provider *schema.Provider) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No hosted zone ID is set") - } - - conn := provider.Meta().(*AWSClient).r53conn - resp, err := conn.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(rs.Primary.ID)}) - if err != nil { - return fmt.Errorf("Hosted zone err: %v", err) - } - - aws_comment := *resp.HostedZone.Config.Comment - rs_comment := rs.Primary.Attributes["comment"] - if rs_comment != "" && rs_comment != aws_comment { - return fmt.Errorf("Hosted zone with comment '%s' found but does not match '%s'", aws_comment, rs_comment) - } - - if !*resp.HostedZone.Config.PrivateZone { - sorted_ns := make([]string, len(resp.DelegationSet.NameServers)) - for i, ns := range resp.DelegationSet.NameServers { - sorted_ns[i] = *ns - } - sort.Strings(sorted_ns) - for idx, ns := range sorted_ns { - attribute := fmt.Sprintf("name_servers.%d", idx) - dsns := rs.Primary.Attributes[attribute] - if dsns != ns { - return fmt.Errorf("Got: %v for %v, Expected: %v", dsns, attribute, ns) - } - } - } - - *zone = *resp - return nil -} - -func testAccCheckRoute53ZoneAssociatesWithVpc(n string, zone *route53.GetHostedZoneOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No VPC ID is set") - } - - var associatedVPC *route53.VPC - for _, vpc := range zone.VPCs { - if *vpc.VPCId == rs.Primary.ID { - associatedVPC = vpc - } - } - if associatedVPC == nil { - return fmt.Errorf("VPC: %v is not associated to Zone: %v", n, cleanZoneID(*zone.HostedZone.Id)) - } - return nil - } -} - -func testAccLoadTagsR53(zone *route53.GetHostedZoneOutput, td *route53.ResourceTagSet) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).r53conn - - zone := cleanZoneID(*zone.HostedZone.Id) - req := &route53.ListTagsForResourceInput{ - ResourceId: aws.String(zone), - ResourceType: aws.String("hostedzone"), - } - - resp, err := conn.ListTagsForResource(req) - if err != nil { - return err - } - - if resp.ResourceTagSet != nil { - *td = *resp.ResourceTagSet - } - - return nil - } -} - -const testAccRoute53ZoneConfig = ` -resource "aws_route53_zone" "main" { - name = "hashicorp.com." - comment = "Custom comment" - - tags { - foo = "bar" - Name = "tf-route53-tag-test" - } -} -` - -const testAccRoute53ZoneConfig_forceDestroy = ` -resource "aws_route53_zone" "destroyable" { - name = "terraform.io" - force_destroy = true -} - -resource "aws_route53_zone" "with_trailing_dot" { - name = "hashicorptest.io." - force_destroy = true -} -` - -const testAccRoute53ZoneConfigUpdateComment = ` -resource "aws_route53_zone" "main" { - name = "hashicorp.com." - comment = "Change Custom Comment" - - tags { - foo = "bar" - Name = "tf-route53-tag-test" - } -} -` - -const testAccRoute53PrivateZoneConfig = ` -resource "aws_vpc" "main" { - cidr_block = "172.29.0.0/24" - instance_tenancy = "default" - enable_dns_support = true - enable_dns_hostnames = true -} - -resource "aws_route53_zone" "main" { - name = "hashicorp.com." - vpc_id = "${aws_vpc.main.id}" -} -` - -const testAccRoute53PrivateZoneRegionConfig = ` -provider "aws" { - alias = "west" - region = "us-west-2" -} - -provider "aws" { - alias = "east" - region = "us-east-1" -} - -resource "aws_vpc" "main" { - provider = "aws.east" - cidr_block = "172.29.0.0/24" - instance_tenancy = "default" - enable_dns_support = true - enable_dns_hostnames = true -} - -resource "aws_route53_zone" "main" { - provider = "aws.west" - name = "hashicorp.com." - vpc_id = "${aws_vpc.main.id}" - vpc_region = "us-east-1" -} -` diff --git a/builtin/providers/aws/resource_aws_route_table.go b/builtin/providers/aws/resource_aws_route_table.go deleted file mode 100644 index f5c72e2d5..000000000 --- a/builtin/providers/aws/resource_aws_route_table.go +++ /dev/null @@ -1,525 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsRouteTable() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsRouteTableCreate, - Read: resourceAwsRouteTableRead, - Update: resourceAwsRouteTableUpdate, - Delete: resourceAwsRouteTableDelete, - Importer: &schema.ResourceImporter{ - State: resourceAwsRouteTableImportState, - }, - - Schema: map[string]*schema.Schema{ - "vpc_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "tags": tagsSchema(), - - "propagating_vgws": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "route": { - Type: schema.TypeSet, - Computed: true, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cidr_block": { - Type: schema.TypeString, - Optional: true, - }, - - "ipv6_cidr_block": { - Type: schema.TypeString, - Optional: true, - }, - - "egress_only_gateway_id": { - Type: schema.TypeString, - Optional: true, - }, - - "gateway_id": { - Type: schema.TypeString, - Optional: true, - }, - - "instance_id": { - Type: schema.TypeString, - Optional: true, - }, - - "nat_gateway_id": { - Type: schema.TypeString, - Optional: true, - }, - - "vpc_peering_connection_id": { - Type: schema.TypeString, - Optional: true, - }, - - "network_interface_id": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - Set: resourceAwsRouteTableHash, - }, - }, - } -} - -func resourceAwsRouteTableCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - // Create the routing table - createOpts := &ec2.CreateRouteTableInput{ - VpcId: aws.String(d.Get("vpc_id").(string)), - } - log.Printf("[DEBUG] RouteTable create config: %#v", createOpts) - - resp, err := conn.CreateRouteTable(createOpts) - if err != nil { - return fmt.Errorf("Error creating route table: %s", err) - } - - // Get the ID and store it - rt := resp.RouteTable - d.SetId(*rt.RouteTableId) - log.Printf("[INFO] Route Table ID: %s", d.Id()) - - // Wait for the route table to become available - log.Printf( - "[DEBUG] Waiting for route table (%s) to become available", - d.Id()) - stateConf := &resource.StateChangeConf{ - Pending: []string{"pending"}, - Target: []string{"ready"}, - Refresh: resourceAwsRouteTableStateRefreshFunc(conn, d.Id()), - Timeout: 10 * time.Minute, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf( - "Error waiting for route table (%s) to become available: %s", - d.Id(), err) - } - - return resourceAwsRouteTableUpdate(d, meta) -} - -func resourceAwsRouteTableRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - rtRaw, _, err := resourceAwsRouteTableStateRefreshFunc(conn, d.Id())() - if err != nil { - return err - } - if rtRaw == nil { - d.SetId("") - return nil - } - - rt := rtRaw.(*ec2.RouteTable) - d.Set("vpc_id", rt.VpcId) - - propagatingVGWs := make([]string, 0, len(rt.PropagatingVgws)) - for _, vgw := range rt.PropagatingVgws { - propagatingVGWs = append(propagatingVGWs, *vgw.GatewayId) - } - d.Set("propagating_vgws", propagatingVGWs) - - // Create an empty schema.Set to hold all routes - route := &schema.Set{F: resourceAwsRouteTableHash} - - // Loop through the routes and add them to the set - for _, r := range rt.Routes { - if r.GatewayId != nil && *r.GatewayId == "local" { - continue - } - - if r.Origin != nil && *r.Origin == "EnableVgwRoutePropagation" { - continue - } - - if r.DestinationPrefixListId != nil { - // Skipping because VPC endpoint routes are handled separately - // See aws_vpc_endpoint - continue - } - - m := make(map[string]interface{}) - - if r.DestinationCidrBlock != nil { - m["cidr_block"] = *r.DestinationCidrBlock - } - if r.DestinationIpv6CidrBlock != nil { - m["ipv6_cidr_block"] = *r.DestinationIpv6CidrBlock - } - if r.EgressOnlyInternetGatewayId != nil { - m["egress_only_gateway_id"] = *r.EgressOnlyInternetGatewayId - } - if r.GatewayId != nil { - m["gateway_id"] = *r.GatewayId - } - if r.NatGatewayId != nil { - m["nat_gateway_id"] = *r.NatGatewayId - } - if r.InstanceId != nil { - m["instance_id"] = *r.InstanceId - } - if r.VpcPeeringConnectionId != nil { - m["vpc_peering_connection_id"] = *r.VpcPeeringConnectionId - } - if r.NetworkInterfaceId != nil { - m["network_interface_id"] = *r.NetworkInterfaceId - } - - route.Add(m) - } - d.Set("route", route) - - // Tags - d.Set("tags", tagsToMap(rt.Tags)) - - return nil -} - -func resourceAwsRouteTableUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - if d.HasChange("propagating_vgws") { - o, n := d.GetChange("propagating_vgws") - os := o.(*schema.Set) - ns := n.(*schema.Set) - remove := os.Difference(ns).List() - add := ns.Difference(os).List() - - // Now first loop through all the old propagations and disable any obsolete ones - for _, vgw := range remove { - id := vgw.(string) - - // Disable the propagation as it no longer exists in the config - log.Printf( - "[INFO] Deleting VGW propagation from %s: %s", - d.Id(), id) - _, err := conn.DisableVgwRoutePropagation(&ec2.DisableVgwRoutePropagationInput{ - RouteTableId: aws.String(d.Id()), - GatewayId: aws.String(id), - }) - if err != nil { - return err - } - } - - // Make sure we save the state of the currently configured rules - propagatingVGWs := os.Intersection(ns) - d.Set("propagating_vgws", propagatingVGWs) - - // Then loop through all the newly configured propagations and enable them - for _, vgw := range add { - id := vgw.(string) - - var err error - for i := 0; i < 5; i++ { - log.Printf("[INFO] Enabling VGW propagation for %s: %s", d.Id(), id) - _, err = conn.EnableVgwRoutePropagation(&ec2.EnableVgwRoutePropagationInput{ - RouteTableId: aws.String(d.Id()), - GatewayId: aws.String(id), - }) - if err == nil { - break - } - - // If we get a Gateway.NotAttached, it is usually some - // eventually consistency stuff. So we have to just wait a - // bit... - ec2err, ok := err.(awserr.Error) - if ok && ec2err.Code() == "Gateway.NotAttached" { - time.Sleep(20 * time.Second) - continue - } - } - if err != nil { - return err - } - - propagatingVGWs.Add(vgw) - d.Set("propagating_vgws", propagatingVGWs) - } - } - - // Check if the route set as a whole has changed - if d.HasChange("route") { - o, n := d.GetChange("route") - ors := o.(*schema.Set).Difference(n.(*schema.Set)) - nrs := n.(*schema.Set).Difference(o.(*schema.Set)) - - // Now first loop through all the old routes and delete any obsolete ones - for _, route := range ors.List() { - m := route.(map[string]interface{}) - - deleteOpts := &ec2.DeleteRouteInput{ - RouteTableId: aws.String(d.Id()), - } - - if s := m["ipv6_cidr_block"].(string); s != "" { - deleteOpts.DestinationIpv6CidrBlock = aws.String(s) - - log.Printf( - "[INFO] Deleting route from %s: %s", - d.Id(), m["ipv6_cidr_block"].(string)) - } - - if s := m["cidr_block"].(string); s != "" { - deleteOpts.DestinationCidrBlock = aws.String(s) - - log.Printf( - "[INFO] Deleting route from %s: %s", - d.Id(), m["cidr_block"].(string)) - } - - _, err := conn.DeleteRoute(deleteOpts) - if err != nil { - return err - } - } - - // Make sure we save the state of the currently configured rules - routes := o.(*schema.Set).Intersection(n.(*schema.Set)) - d.Set("route", routes) - - // Then loop through all the newly configured routes and create them - for _, route := range nrs.List() { - m := route.(map[string]interface{}) - - opts := ec2.CreateRouteInput{ - RouteTableId: aws.String(d.Id()), - } - - if s := m["vpc_peering_connection_id"].(string); s != "" { - opts.VpcPeeringConnectionId = aws.String(s) - } - - if s := m["network_interface_id"].(string); s != "" { - opts.NetworkInterfaceId = aws.String(s) - } - - if s := m["instance_id"].(string); s != "" { - opts.InstanceId = aws.String(s) - } - - if s := m["ipv6_cidr_block"].(string); s != "" { - opts.DestinationIpv6CidrBlock = aws.String(s) - } - - if s := m["cidr_block"].(string); s != "" { - opts.DestinationCidrBlock = aws.String(s) - } - - if s := m["gateway_id"].(string); s != "" { - opts.GatewayId = aws.String(s) - } - - if s := m["egress_only_gateway_id"].(string); s != "" { - opts.EgressOnlyInternetGatewayId = aws.String(s) - } - - if s := m["nat_gateway_id"].(string); s != "" { - opts.NatGatewayId = aws.String(s) - } - - log.Printf("[INFO] Creating route for %s: %#v", d.Id(), opts) - err := resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.CreateRoute(&opts) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "InvalidRouteTableID.NotFound" { - return resource.RetryableError(awsErr) - } - } - return resource.NonRetryableError(err) - } - return nil - }) - if err != nil { - return err - } - - routes.Add(route) - d.Set("route", routes) - } - } - - if err := setTags(conn, d); err != nil { - return err - } else { - d.SetPartial("tags") - } - - return resourceAwsRouteTableRead(d, meta) -} - -func resourceAwsRouteTableDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - // First request the routing table since we'll have to disassociate - // all the subnets first. - rtRaw, _, err := resourceAwsRouteTableStateRefreshFunc(conn, d.Id())() - if err != nil { - return err - } - if rtRaw == nil { - return nil - } - rt := rtRaw.(*ec2.RouteTable) - - // Do all the disassociations - for _, a := range rt.Associations { - log.Printf("[INFO] Disassociating association: %s", *a.RouteTableAssociationId) - _, err := conn.DisassociateRouteTable(&ec2.DisassociateRouteTableInput{ - AssociationId: a.RouteTableAssociationId, - }) - if err != nil { - // First check if the association ID is not found. If this - // is the case, then it was already disassociated somehow, - // and that is okay. - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidAssociationID.NotFound" { - err = nil - } - } - if err != nil { - return err - } - } - - // Delete the route table - log.Printf("[INFO] Deleting Route Table: %s", d.Id()) - _, err = conn.DeleteRouteTable(&ec2.DeleteRouteTableInput{ - RouteTableId: aws.String(d.Id()), - }) - if err != nil { - ec2err, ok := err.(awserr.Error) - if ok && ec2err.Code() == "InvalidRouteTableID.NotFound" { - return nil - } - - return fmt.Errorf("Error deleting route table: %s", err) - } - - // Wait for the route table to really destroy - log.Printf( - "[DEBUG] Waiting for route table (%s) to become destroyed", - d.Id()) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ready"}, - Target: []string{}, - Refresh: resourceAwsRouteTableStateRefreshFunc(conn, d.Id()), - Timeout: 5 * time.Minute, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf( - "Error waiting for route table (%s) to become destroyed: %s", - d.Id(), err) - } - - return nil -} - -func resourceAwsRouteTableHash(v interface{}) int { - var buf bytes.Buffer - m, castOk := v.(map[string]interface{}) - if !castOk { - return 0 - } - - if v, ok := m["ipv6_cidr_block"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - - if v, ok := m["cidr_block"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - - if v, ok := m["gateway_id"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - - if v, ok := m["egress_only_gateway_id"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - - natGatewaySet := false - if v, ok := m["nat_gateway_id"]; ok { - natGatewaySet = v.(string) != "" - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - - instanceSet := false - if v, ok := m["instance_id"]; ok { - instanceSet = v.(string) != "" - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - - if v, ok := m["vpc_peering_connection_id"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - - if v, ok := m["network_interface_id"]; ok && !(instanceSet || natGatewaySet) { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - - return hashcode.String(buf.String()) -} - -// resourceAwsRouteTableStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// a RouteTable. -func resourceAwsRouteTableStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - resp, err := conn.DescribeRouteTables(&ec2.DescribeRouteTablesInput{ - RouteTableIds: []*string{aws.String(id)}, - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidRouteTableID.NotFound" { - resp = nil - } else { - log.Printf("Error on RouteTableStateRefresh: %s", err) - return nil, "", err - } - } - - if resp == nil { - // Sometimes AWS just has consistency issues and doesn't see - // our instance yet. Return an empty state. - return nil, "", nil - } - - rt := resp.RouteTables[0] - return rt, "ready", nil - } -} diff --git a/builtin/providers/aws/resource_aws_route_table_association.go b/builtin/providers/aws/resource_aws_route_table_association.go deleted file mode 100644 index eb2c19409..000000000 --- a/builtin/providers/aws/resource_aws_route_table_association.go +++ /dev/null @@ -1,155 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsRouteTableAssociation() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsRouteTableAssociationCreate, - Read: resourceAwsRouteTableAssociationRead, - Update: resourceAwsRouteTableAssociationUpdate, - Delete: resourceAwsRouteTableAssociationDelete, - - Schema: map[string]*schema.Schema{ - "subnet_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "route_table_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - } -} - -func resourceAwsRouteTableAssociationCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - log.Printf( - "[INFO] Creating route table association: %s => %s", - d.Get("subnet_id").(string), - d.Get("route_table_id").(string)) - - associationOpts := ec2.AssociateRouteTableInput{ - RouteTableId: aws.String(d.Get("route_table_id").(string)), - SubnetId: aws.String(d.Get("subnet_id").(string)), - } - - var resp *ec2.AssociateRouteTableOutput - var err error - err = resource.Retry(5*time.Minute, func() *resource.RetryError { - resp, err = conn.AssociateRouteTable(&associationOpts) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "InvalidRouteTableID.NotFound" { - return resource.RetryableError(awsErr) - } - } - return resource.NonRetryableError(err) - } - return nil - }) - if err != nil { - return err - } - - // Set the ID and return - d.SetId(*resp.AssociationId) - log.Printf("[INFO] Association ID: %s", d.Id()) - - return nil -} - -func resourceAwsRouteTableAssociationRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - // Get the routing table that this association belongs to - rtRaw, _, err := resourceAwsRouteTableStateRefreshFunc( - conn, d.Get("route_table_id").(string))() - if err != nil { - return err - } - if rtRaw == nil { - return nil - } - rt := rtRaw.(*ec2.RouteTable) - - // Inspect that the association exists - found := false - for _, a := range rt.Associations { - if *a.RouteTableAssociationId == d.Id() { - found = true - d.Set("subnet_id", *a.SubnetId) - break - } - } - - if !found { - // It seems it doesn't exist anymore, so clear the ID - d.SetId("") - } - - return nil -} - -func resourceAwsRouteTableAssociationUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - log.Printf( - "[INFO] Creating route table association: %s => %s", - d.Get("subnet_id").(string), - d.Get("route_table_id").(string)) - - req := &ec2.ReplaceRouteTableAssociationInput{ - AssociationId: aws.String(d.Id()), - RouteTableId: aws.String(d.Get("route_table_id").(string)), - } - resp, err := conn.ReplaceRouteTableAssociation(req) - - if err != nil { - ec2err, ok := err.(awserr.Error) - if ok && ec2err.Code() == "InvalidAssociationID.NotFound" { - // Not found, so just create a new one - return resourceAwsRouteTableAssociationCreate(d, meta) - } - - return err - } - - // Update the ID - d.SetId(*resp.NewAssociationId) - log.Printf("[INFO] Association ID: %s", d.Id()) - - return nil -} - -func resourceAwsRouteTableAssociationDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - log.Printf("[INFO] Deleting route table association: %s", d.Id()) - _, err := conn.DisassociateRouteTable(&ec2.DisassociateRouteTableInput{ - AssociationId: aws.String(d.Id()), - }) - if err != nil { - ec2err, ok := err.(awserr.Error) - if ok && ec2err.Code() == "InvalidAssociationID.NotFound" { - return nil - } - - return fmt.Errorf("Error deleting route table association: %s", err) - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_route_table_association_test.go b/builtin/providers/aws/resource_aws_route_table_association_test.go deleted file mode 100644 index 138fda4ff..000000000 --- a/builtin/providers/aws/resource_aws_route_table_association_test.go +++ /dev/null @@ -1,162 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSRouteTableAssociation_basic(t *testing.T) { - var v, v2 ec2.RouteTable - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRouteTableAssociationDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRouteTableAssociationConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableAssociationExists( - "aws_route_table_association.foo", &v), - ), - }, - - resource.TestStep{ - Config: testAccRouteTableAssociationConfigChange, - Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableAssociationExists( - "aws_route_table_association.foo", &v2), - ), - }, - }, - }) -} - -func testAccCheckRouteTableAssociationDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_route_table_association" { - continue - } - - // Try to find the resource - resp, err := conn.DescribeRouteTables(&ec2.DescribeRouteTablesInput{ - RouteTableIds: []*string{aws.String(rs.Primary.Attributes["route_table_id"])}, - }) - if err != nil { - // Verify the error is what we want - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - if ec2err.Code() != "InvalidRouteTableID.NotFound" { - return err - } - return nil - } - - rt := resp.RouteTables[0] - if len(rt.Associations) > 0 { - return fmt.Errorf( - "route table %s has associations", *rt.RouteTableId) - - } - } - - return nil -} - -func testAccCheckRouteTableAssociationExists(n string, v *ec2.RouteTable) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - resp, err := conn.DescribeRouteTables(&ec2.DescribeRouteTablesInput{ - RouteTableIds: []*string{aws.String(rs.Primary.Attributes["route_table_id"])}, - }) - if err != nil { - return err - } - if len(resp.RouteTables) == 0 { - return fmt.Errorf("RouteTable not found") - } - - *v = *resp.RouteTables[0] - - if len(v.Associations) == 0 { - return fmt.Errorf("no associations") - } - - return nil - } -} - -const testAccRouteTableAssociationConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "10.1.1.0/24" -} - -resource "aws_internet_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_route_table" "foo" { - vpc_id = "${aws_vpc.foo.id}" - route { - cidr_block = "10.0.0.0/8" - gateway_id = "${aws_internet_gateway.foo.id}" - } -} - -resource "aws_route_table_association" "foo" { - route_table_id = "${aws_route_table.foo.id}" - subnet_id = "${aws_subnet.foo.id}" -} -` - -const testAccRouteTableAssociationConfigChange = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "10.1.1.0/24" -} - -resource "aws_internet_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_route_table" "bar" { - vpc_id = "${aws_vpc.foo.id}" - route { - cidr_block = "10.0.0.0/8" - gateway_id = "${aws_internet_gateway.foo.id}" - } -} - -resource "aws_route_table_association" "foo" { - route_table_id = "${aws_route_table.bar.id}" - subnet_id = "${aws_subnet.foo.id}" -} -` diff --git a/builtin/providers/aws/resource_aws_route_table_test.go b/builtin/providers/aws/resource_aws_route_table_test.go deleted file mode 100644 index b4b764d37..000000000 --- a/builtin/providers/aws/resource_aws_route_table_test.go +++ /dev/null @@ -1,530 +0,0 @@ -package aws - -import ( - "fmt" - "regexp" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSRouteTable_basic(t *testing.T) { - var v ec2.RouteTable - - testCheck := func(*terraform.State) error { - if len(v.Routes) != 2 { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - - routes := make(map[string]*ec2.Route) - for _, r := range v.Routes { - routes[*r.DestinationCidrBlock] = r - } - - if _, ok := routes["10.1.0.0/16"]; !ok { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - if _, ok := routes["10.2.0.0/16"]; !ok { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - - return nil - } - - testCheckChange := func(*terraform.State) error { - if len(v.Routes) != 3 { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - - routes := make(map[string]*ec2.Route) - for _, r := range v.Routes { - routes[*r.DestinationCidrBlock] = r - } - - if _, ok := routes["10.1.0.0/16"]; !ok { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - if _, ok := routes["10.3.0.0/16"]; !ok { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - if _, ok := routes["10.4.0.0/16"]; !ok { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route_table.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckRouteTableDestroy, - Steps: []resource.TestStep{ - { - Config: testAccRouteTableConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists( - "aws_route_table.foo", &v), - testCheck, - ), - }, - - { - Config: testAccRouteTableConfigChange, - Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists( - "aws_route_table.foo", &v), - testCheckChange, - ), - }, - }, - }) -} - -func TestAccAWSRouteTable_instance(t *testing.T) { - var v ec2.RouteTable - - testCheck := func(*terraform.State) error { - if len(v.Routes) != 2 { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - - routes := make(map[string]*ec2.Route) - for _, r := range v.Routes { - routes[*r.DestinationCidrBlock] = r - } - - if _, ok := routes["10.1.0.0/16"]; !ok { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - if _, ok := routes["10.2.0.0/16"]; !ok { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route_table.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckRouteTableDestroy, - Steps: []resource.TestStep{ - { - Config: testAccRouteTableConfigInstance, - Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists( - "aws_route_table.foo", &v), - testCheck, - ), - }, - }, - }) -} - -func TestAccAWSRouteTable_ipv6(t *testing.T) { - var v ec2.RouteTable - - testCheck := func(*terraform.State) error { - // Expect 3: 2 IPv6 (local + all outbound) + 1 IPv4 - if len(v.Routes) != 3 { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route_table.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckRouteTableDestroy, - Steps: []resource.TestStep{ - { - Config: testAccRouteTableConfigIpv6, - Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists("aws_route_table.foo", &v), - testCheck, - ), - }, - }, - }) -} - -func TestAccAWSRouteTable_tags(t *testing.T) { - var route_table ec2.RouteTable - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route_table.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckRouteTableDestroy, - Steps: []resource.TestStep{ - { - Config: testAccRouteTableConfigTags, - Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists("aws_route_table.foo", &route_table), - testAccCheckTags(&route_table.Tags, "foo", "bar"), - ), - }, - - { - Config: testAccRouteTableConfigTagsUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists("aws_route_table.foo", &route_table), - testAccCheckTags(&route_table.Tags, "foo", ""), - testAccCheckTags(&route_table.Tags, "bar", "baz"), - ), - }, - }, - }) -} - -// For GH-13545, Fixes panic on an empty route config block -func TestAccAWSRouteTable_panicEmptyRoute(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_route_table.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckRouteTableDestroy, - Steps: []resource.TestStep{ - { - Config: testAccRouteTableConfigPanicEmptyRoute, - ExpectError: regexp.MustCompile("The request must contain the parameter destinationCidrBlock or destinationIpv6CidrBlock"), - }, - }, - }) -} - -func testAccCheckRouteTableDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_route_table" { - continue - } - - // Try to find the resource - resp, err := conn.DescribeRouteTables(&ec2.DescribeRouteTablesInput{ - RouteTableIds: []*string{aws.String(rs.Primary.ID)}, - }) - if err == nil { - if len(resp.RouteTables) > 0 { - return fmt.Errorf("still exist.") - } - - return nil - } - - // Verify the error is what we want - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - if ec2err.Code() != "InvalidRouteTableID.NotFound" { - return err - } - } - - return nil -} - -func testAccCheckRouteTableExists(n string, v *ec2.RouteTable) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - resp, err := conn.DescribeRouteTables(&ec2.DescribeRouteTablesInput{ - RouteTableIds: []*string{aws.String(rs.Primary.ID)}, - }) - if err != nil { - return err - } - if len(resp.RouteTables) == 0 { - return fmt.Errorf("RouteTable not found") - } - - *v = *resp.RouteTables[0] - - return nil - } -} - -// VPC Peering connections are prefixed with pcx -// Right now there is no VPC Peering resource -func TestAccAWSRouteTable_vpcPeering(t *testing.T) { - var v ec2.RouteTable - - testCheck := func(*terraform.State) error { - if len(v.Routes) != 2 { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - - routes := make(map[string]*ec2.Route) - for _, r := range v.Routes { - routes[*r.DestinationCidrBlock] = r - } - - if _, ok := routes["10.1.0.0/16"]; !ok { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - if _, ok := routes["10.2.0.0/16"]; !ok { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - - return nil - } - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRouteTableDestroy, - Steps: []resource.TestStep{ - { - Config: testAccRouteTableVpcPeeringConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists( - "aws_route_table.foo", &v), - testCheck, - ), - }, - }, - }) -} - -func TestAccAWSRouteTable_vgwRoutePropagation(t *testing.T) { - var v ec2.RouteTable - var vgw ec2.VpnGateway - - testCheck := func(*terraform.State) error { - if len(v.PropagatingVgws) != 1 { - return fmt.Errorf("bad propagating vgws: %#v", v.PropagatingVgws) - } - - propagatingVGWs := make(map[string]*ec2.PropagatingVgw) - for _, gw := range v.PropagatingVgws { - propagatingVGWs[*gw.GatewayId] = gw - } - - if _, ok := propagatingVGWs[*vgw.VpnGatewayId]; !ok { - return fmt.Errorf("bad propagating vgws: %#v", v.PropagatingVgws) - } - - return nil - - } - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: resource.ComposeTestCheckFunc( - testAccCheckVpnGatewayDestroy, - testAccCheckRouteTableDestroy, - ), - Steps: []resource.TestStep{ - { - Config: testAccRouteTableVgwRoutePropagationConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists( - "aws_route_table.foo", &v), - testAccCheckVpnGatewayExists( - "aws_vpn_gateway.foo", &vgw), - testCheck, - ), - }, - }, - }) -} - -const testAccRouteTableConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_internet_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_route_table" "foo" { - vpc_id = "${aws_vpc.foo.id}" - - route { - cidr_block = "10.2.0.0/16" - gateway_id = "${aws_internet_gateway.foo.id}" - } -} -` - -const testAccRouteTableConfigChange = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_internet_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_route_table" "foo" { - vpc_id = "${aws_vpc.foo.id}" - - route { - cidr_block = "10.3.0.0/16" - gateway_id = "${aws_internet_gateway.foo.id}" - } - - route { - cidr_block = "10.4.0.0/16" - gateway_id = "${aws_internet_gateway.foo.id}" - } -} -` - -const testAccRouteTableConfigIpv6 = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - assign_generated_ipv6_cidr_block = true -} - -resource "aws_egress_only_internet_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_route_table" "foo" { - vpc_id = "${aws_vpc.foo.id}" - - route { - ipv6_cidr_block = "::/0" - egress_only_gateway_id = "${aws_egress_only_internet_gateway.foo.id}" - } -} -` - -const testAccRouteTableConfigInstance = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_instance" "foo" { - # us-west-2 - ami = "ami-4fccb37f" - instance_type = "m1.small" - subnet_id = "${aws_subnet.foo.id}" -} - -resource "aws_route_table" "foo" { - vpc_id = "${aws_vpc.foo.id}" - - route { - cidr_block = "10.2.0.0/16" - instance_id = "${aws_instance.foo.id}" - } -} -` - -const testAccRouteTableConfigTags = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_route_table" "foo" { - vpc_id = "${aws_vpc.foo.id}" - - tags { - foo = "bar" - } -} -` - -const testAccRouteTableConfigTagsUpdate = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_route_table" "foo" { - vpc_id = "${aws_vpc.foo.id}" - - tags { - bar = "baz" - } -} -` - -// VPC Peering connections are prefixed with pcx -const testAccRouteTableVpcPeeringConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_internet_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_vpc" "bar" { - cidr_block = "10.3.0.0/16" -} - -resource "aws_internet_gateway" "bar" { - vpc_id = "${aws_vpc.bar.id}" -} - -resource "aws_vpc_peering_connection" "foo" { - vpc_id = "${aws_vpc.foo.id}" - peer_vpc_id = "${aws_vpc.bar.id}" - tags { - foo = "bar" - } -} - -resource "aws_route_table" "foo" { - vpc_id = "${aws_vpc.foo.id}" - - route { - cidr_block = "10.2.0.0/16" - vpc_peering_connection_id = "${aws_vpc_peering_connection.foo.id}" - } -} -` - -const testAccRouteTableVgwRoutePropagationConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_vpn_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_route_table" "foo" { - vpc_id = "${aws_vpc.foo.id}" - - propagating_vgws = ["${aws_vpn_gateway.foo.id}"] -} -` - -// For GH-13545 -const testAccRouteTableConfigPanicEmptyRoute = ` -resource "aws_vpc" "foo" { - cidr_block = "10.2.0.0/16" -} - -resource "aws_route_table" "foo" { - vpc_id = "${aws_vpc.foo.id}" - - route { - } -} -` diff --git a/builtin/providers/aws/resource_aws_route_test.go b/builtin/providers/aws/resource_aws_route_test.go deleted file mode 100644 index 24459689b..000000000 --- a/builtin/providers/aws/resource_aws_route_test.go +++ /dev/null @@ -1,460 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSRoute_basic(t *testing.T) { - var route ec2.Route - - //aws creates a default route - testCheck := func(s *terraform.State) error { - if *route.DestinationCidrBlock != "10.3.0.0/16" { - return fmt.Errorf("Destination Cidr (Expected=%s, Actual=%s)\n", "10.3.0.0/16", *route.DestinationCidrBlock) - } - - name := "aws_internet_gateway.foo" - gwres, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s\n", name) - } - - if *route.GatewayId != gwres.Primary.ID { - return fmt.Errorf("Internet Gateway Id (Expected=%s, Actual=%s)\n", gwres.Primary.ID, *route.GatewayId) - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSRouteDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSRouteBasicConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRouteExists("aws_route.bar", &route), - testCheck, - ), - }, - }, - }) -} - -func TestAccAWSRoute_ipv6Support(t *testing.T) { - var route ec2.Route - - //aws creates a default route - testCheck := func(s *terraform.State) error { - - name := "aws_egress_only_internet_gateway.foo" - gwres, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s\n", name) - } - - if *route.EgressOnlyInternetGatewayId != gwres.Primary.ID { - return fmt.Errorf("Egress Only Internet Gateway Id (Expected=%s, Actual=%s)\n", gwres.Primary.ID, *route.EgressOnlyInternetGatewayId) - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSRouteDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSRouteConfigIpv6, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRouteExists("aws_route.bar", &route), - testCheck, - ), - }, - }, - }) -} - -func TestAccAWSRoute_ipv6ToInternetGateway(t *testing.T) { - var route ec2.Route - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSRouteDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSRouteConfigIpv6InternetGateway, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRouteExists("aws_route.igw", &route), - ), - }, - }, - }) -} - -func TestAccAWSRoute_changeCidr(t *testing.T) { - var route ec2.Route - var routeTable ec2.RouteTable - - //aws creates a default route - testCheck := func(s *terraform.State) error { - if *route.DestinationCidrBlock != "10.3.0.0/16" { - return fmt.Errorf("Destination Cidr (Expected=%s, Actual=%s)\n", "10.3.0.0/16", *route.DestinationCidrBlock) - } - - name := "aws_internet_gateway.foo" - gwres, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s\n", name) - } - - if *route.GatewayId != gwres.Primary.ID { - return fmt.Errorf("Internet Gateway Id (Expected=%s, Actual=%s)\n", gwres.Primary.ID, *route.GatewayId) - } - - return nil - } - - testCheckChange := func(s *terraform.State) error { - if *route.DestinationCidrBlock != "10.2.0.0/16" { - return fmt.Errorf("Destination Cidr (Expected=%s, Actual=%s)\n", "10.2.0.0/16", *route.DestinationCidrBlock) - } - - name := "aws_internet_gateway.foo" - gwres, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s\n", name) - } - - if *route.GatewayId != gwres.Primary.ID { - return fmt.Errorf("Internet Gateway Id (Expected=%s, Actual=%s)\n", gwres.Primary.ID, *route.GatewayId) - } - - if rtlen := len(routeTable.Routes); rtlen != 2 { - return fmt.Errorf("Route Table has too many routes (Expected=%d, Actual=%d)\n", rtlen, 2) - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSRouteDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSRouteBasicConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRouteExists("aws_route.bar", &route), - testCheck, - ), - }, - { - Config: testAccAWSRouteBasicConfigChangeCidr, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRouteExists("aws_route.bar", &route), - testAccCheckRouteTableExists("aws_route_table.foo", &routeTable), - testCheckChange, - ), - }, - }, - }) -} - -func TestAccAWSRoute_noopdiff(t *testing.T) { - var route ec2.Route - var routeTable ec2.RouteTable - - testCheck := func(s *terraform.State) error { - return nil - } - - testCheckChange := func(s *terraform.State) error { - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSRouteDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSRouteNoopChange, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRouteExists("aws_route.test", &route), - testCheck, - ), - }, - { - Config: testAccAWSRouteNoopChange, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRouteExists("aws_route.test", &route), - testAccCheckRouteTableExists("aws_route_table.test", &routeTable), - testCheckChange, - ), - }, - }, - }) -} - -func TestAccAWSRoute_doesNotCrashWithVPCEndpoint(t *testing.T) { - var route ec2.Route - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSRouteDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSRouteWithVPCEndpoint, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSRouteExists("aws_route.bar", &route), - ), - }, - }, - }) -} - -func testAccCheckAWSRouteExists(n string, res *ec2.Route) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s\n", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - r, err := findResourceRoute( - conn, - rs.Primary.Attributes["route_table_id"], - rs.Primary.Attributes["destination_cidr_block"], - rs.Primary.Attributes["destination_ipv6_cidr_block"], - ) - - if err != nil { - return err - } - - if r == nil { - return fmt.Errorf("Route not found") - } - - *res = *r - - return nil - } -} - -func testAccCheckAWSRouteDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_route" { - continue - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - route, err := findResourceRoute( - conn, - rs.Primary.Attributes["route_table_id"], - rs.Primary.Attributes["destination_cidr_block"], - rs.Primary.Attributes["destination_ipv6_cidr_block"], - ) - - if route == nil && err == nil { - return nil - } - } - - return nil -} - -var testAccAWSRouteBasicConfig = fmt.Sprint(` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_internet_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_route_table" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_route" "bar" { - route_table_id = "${aws_route_table.foo.id}" - destination_cidr_block = "10.3.0.0/16" - gateway_id = "${aws_internet_gateway.foo.id}" -} -`) - -var testAccAWSRouteConfigIpv6InternetGateway = fmt.Sprintf(` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - assign_generated_ipv6_cidr_block = true -} - -resource "aws_egress_only_internet_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_internet_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_route_table" "external" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_route" "igw" { - route_table_id = "${aws_route_table.external.id}" - destination_ipv6_cidr_block = "::/0" - gateway_id = "${aws_internet_gateway.foo.id}" -} - -`) - -var testAccAWSRouteConfigIpv6 = fmt.Sprintf(` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - assign_generated_ipv6_cidr_block = true -} - -resource "aws_egress_only_internet_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_route_table" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_route" "bar" { - route_table_id = "${aws_route_table.foo.id}" - destination_ipv6_cidr_block = "::/0" - egress_only_gateway_id = "${aws_egress_only_internet_gateway.foo.id}" -} - - -`) - -var testAccAWSRouteBasicConfigChangeCidr = fmt.Sprint(` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_internet_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_route_table" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_route" "bar" { - route_table_id = "${aws_route_table.foo.id}" - destination_cidr_block = "10.2.0.0/16" - gateway_id = "${aws_internet_gateway.foo.id}" -} -`) - -// Acceptance test if mixed inline and external routes are implemented -var testAccAWSRouteMixConfig = fmt.Sprint(` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_internet_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_route_table" "foo" { - vpc_id = "${aws_vpc.foo.id}" - - route { - cidr_block = "10.2.0.0/16" - gateway_id = "${aws_internet_gateway.foo.id}" - } -} - -resource "aws_route" "bar" { - route_table_id = "${aws_route_table.foo.id}" - destination_cidr_block = "0.0.0.0/0" - gateway_id = "${aws_internet_gateway.foo.id}" -} -`) - -var testAccAWSRouteNoopChange = fmt.Sprint(` -resource "aws_vpc" "test" { - cidr_block = "10.10.0.0/16" -} - -resource "aws_route_table" "test" { - vpc_id = "${aws_vpc.test.id}" -} - -resource "aws_subnet" "test" { - vpc_id = "${aws_vpc.test.id}" - cidr_block = "10.10.10.0/24" -} - -resource "aws_route" "test" { - route_table_id = "${aws_route_table.test.id}" - destination_cidr_block = "0.0.0.0/0" - instance_id = "${aws_instance.nat.id}" -} - -resource "aws_instance" "nat" { - ami = "ami-9abea4fb" - instance_type = "t2.nano" - subnet_id = "${aws_subnet.test.id}" -} -`) - -var testAccAWSRouteWithVPCEndpoint = fmt.Sprint(` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_internet_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_route_table" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_route" "bar" { - route_table_id = "${aws_route_table.foo.id}" - destination_cidr_block = "10.3.0.0/16" - gateway_id = "${aws_internet_gateway.foo.id}" - - # Forcing endpoint to create before route - without this the crash is a race. - depends_on = ["aws_vpc_endpoint.baz"] -} - -resource "aws_vpc_endpoint" "baz" { - vpc_id = "${aws_vpc.foo.id}" - service_name = "com.amazonaws.us-west-2.s3" - route_table_ids = ["${aws_route_table.foo.id}"] -} -`) diff --git a/builtin/providers/aws/resource_aws_s3_bucket.go b/builtin/providers/aws/resource_aws_s3_bucket.go deleted file mode 100644 index 7da1ac18f..000000000 --- a/builtin/providers/aws/resource_aws_s3_bucket.go +++ /dev/null @@ -1,1858 +0,0 @@ -package aws - -import ( - "bytes" - "encoding/json" - "fmt" - "log" - "net/url" - "regexp" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsS3Bucket() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsS3BucketCreate, - Read: resourceAwsS3BucketRead, - Update: resourceAwsS3BucketUpdate, - Delete: resourceAwsS3BucketDelete, - Importer: &schema.ResourceImporter{ - State: resourceAwsS3BucketImportState, - }, - - Schema: map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"bucket_prefix"}, - }, - "bucket_prefix": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "bucket_domain_name": { - Type: schema.TypeString, - Computed: true, - }, - - "arn": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "acl": { - Type: schema.TypeString, - Default: "private", - Optional: true, - }, - - "policy": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateJsonString, - DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, - }, - - "cors_rule": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "allowed_headers": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "allowed_methods": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "allowed_origins": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "expose_headers": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "max_age_seconds": { - Type: schema.TypeInt, - Optional: true, - }, - }, - }, - }, - - "website": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "index_document": { - Type: schema.TypeString, - Optional: true, - }, - - "error_document": { - Type: schema.TypeString, - Optional: true, - }, - - "redirect_all_requests_to": { - Type: schema.TypeString, - ConflictsWith: []string{ - "website.0.index_document", - "website.0.error_document", - "website.0.routing_rules", - }, - Optional: true, - }, - - "routing_rules": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateJsonString, - StateFunc: func(v interface{}) string { - json, _ := normalizeJsonString(v) - return json - }, - }, - }, - }, - }, - - "hosted_zone_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "region": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "website_endpoint": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "website_domain": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "versioning": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "mfa_delete": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - }, - }, - }, - - "logging": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "target_bucket": { - Type: schema.TypeString, - Required: true, - }, - "target_prefix": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - Set: func(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["target_bucket"])) - buf.WriteString(fmt.Sprintf("%s-", m["target_prefix"])) - return hashcode.String(buf.String()) - }, - }, - - "lifecycle_rule": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validateS3BucketLifecycleRuleId, - }, - "prefix": { - Type: schema.TypeString, - Required: true, - }, - "enabled": { - Type: schema.TypeBool, - Required: true, - }, - "abort_incomplete_multipart_upload_days": { - Type: schema.TypeInt, - Optional: true, - }, - "expiration": { - Type: schema.TypeSet, - Optional: true, - Set: expirationHash, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "date": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateS3BucketLifecycleTimestamp, - }, - "days": { - Type: schema.TypeInt, - Optional: true, - }, - "expired_object_delete_marker": { - Type: schema.TypeBool, - Optional: true, - }, - }, - }, - }, - "noncurrent_version_expiration": { - Type: schema.TypeSet, - Optional: true, - Set: expirationHash, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "days": { - Type: schema.TypeInt, - Optional: true, - }, - }, - }, - }, - "transition": { - Type: schema.TypeSet, - Optional: true, - Set: transitionHash, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "date": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateS3BucketLifecycleTimestamp, - }, - "days": { - Type: schema.TypeInt, - Optional: true, - }, - "storage_class": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateS3BucketLifecycleStorageClass, - }, - }, - }, - }, - "noncurrent_version_transition": { - Type: schema.TypeSet, - Optional: true, - Set: transitionHash, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "days": { - Type: schema.TypeInt, - Optional: true, - }, - "storage_class": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateS3BucketLifecycleStorageClass, - }, - }, - }, - }, - }, - }, - }, - - "force_destroy": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "acceleration_status": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validateS3BucketAccelerationStatus, - }, - - "request_payer": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validateS3BucketRequestPayerType, - }, - - "replication_configuration": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "role": { - Type: schema.TypeString, - Required: true, - }, - "rules": { - Type: schema.TypeSet, - Required: true, - Set: rulesHash, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateS3BucketReplicationRuleId, - }, - "destination": { - Type: schema.TypeSet, - MaxItems: 1, - MinItems: 1, - Required: true, - Set: destinationHash, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateArn, - }, - "storage_class": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateS3BucketReplicationDestinationStorageClass, - }, - }, - }, - }, - "prefix": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateS3BucketReplicationRulePrefix, - }, - "status": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateS3BucketReplicationRuleStatus, - }, - }, - }, - }, - }, - }, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAwsS3BucketCreate(d *schema.ResourceData, meta interface{}) error { - s3conn := meta.(*AWSClient).s3conn - - // Get the bucket and acl - var bucket string - if v, ok := d.GetOk("bucket"); ok { - bucket = v.(string) - } else if v, ok := d.GetOk("bucket_prefix"); ok { - bucket = resource.PrefixedUniqueId(v.(string)) - } else { - bucket = resource.UniqueId() - } - d.Set("bucket", bucket) - acl := d.Get("acl").(string) - - log.Printf("[DEBUG] S3 bucket create: %s, ACL: %s", bucket, acl) - - req := &s3.CreateBucketInput{ - Bucket: aws.String(bucket), - ACL: aws.String(acl), - } - - var awsRegion string - if region, ok := d.GetOk("region"); ok { - awsRegion = region.(string) - } else { - awsRegion = meta.(*AWSClient).region - } - log.Printf("[DEBUG] S3 bucket create: %s, using region: %s", bucket, awsRegion) - - // Special case us-east-1 region and do not set the LocationConstraint. - // See "Request Elements: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUT.html - if awsRegion != "us-east-1" { - req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{ - LocationConstraint: aws.String(awsRegion), - } - } - - if err := validateS3BucketName(bucket, awsRegion); err != nil { - return fmt.Errorf("Error validating S3 bucket name: %s", err) - } - - err := resource.Retry(5*time.Minute, func() *resource.RetryError { - log.Printf("[DEBUG] Trying to create new S3 bucket: %q", bucket) - _, err := s3conn.CreateBucket(req) - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "OperationAborted" { - log.Printf("[WARN] Got an error while trying to create S3 bucket %s: %s", bucket, err) - return resource.RetryableError( - fmt.Errorf("[WARN] Error creating S3 bucket %s, retrying: %s", - bucket, err)) - } - } - if err != nil { - return resource.NonRetryableError(err) - } - - return nil - }) - - if err != nil { - return fmt.Errorf("Error creating S3 bucket: %s", err) - } - - // Assign the bucket name as the resource ID - d.SetId(bucket) - - return resourceAwsS3BucketUpdate(d, meta) -} - -func resourceAwsS3BucketUpdate(d *schema.ResourceData, meta interface{}) error { - s3conn := meta.(*AWSClient).s3conn - if err := setTagsS3(s3conn, d); err != nil { - return fmt.Errorf("%q: %s", d.Get("bucket").(string), err) - } - - if d.HasChange("policy") { - if err := resourceAwsS3BucketPolicyUpdate(s3conn, d); err != nil { - return err - } - } - - if d.HasChange("cors_rule") { - if err := resourceAwsS3BucketCorsUpdate(s3conn, d); err != nil { - return err - } - } - - if d.HasChange("website") { - if err := resourceAwsS3BucketWebsiteUpdate(s3conn, d); err != nil { - return err - } - } - - if d.HasChange("versioning") { - if err := resourceAwsS3BucketVersioningUpdate(s3conn, d); err != nil { - return err - } - } - if d.HasChange("acl") { - if err := resourceAwsS3BucketAclUpdate(s3conn, d); err != nil { - return err - } - } - - if d.HasChange("logging") { - if err := resourceAwsS3BucketLoggingUpdate(s3conn, d); err != nil { - return err - } - } - - if d.HasChange("lifecycle_rule") { - if err := resourceAwsS3BucketLifecycleUpdate(s3conn, d); err != nil { - return err - } - } - - if d.HasChange("acceleration_status") { - if err := resourceAwsS3BucketAccelerationUpdate(s3conn, d); err != nil { - return err - } - } - - if d.HasChange("request_payer") { - if err := resourceAwsS3BucketRequestPayerUpdate(s3conn, d); err != nil { - return err - } - } - - if d.HasChange("replication_configuration") { - if err := resourceAwsS3BucketReplicationConfigurationUpdate(s3conn, d); err != nil { - return err - } - } - - return resourceAwsS3BucketRead(d, meta) -} - -func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { - s3conn := meta.(*AWSClient).s3conn - - var err error - _, err = s3conn.HeadBucket(&s3.HeadBucketInput{ - Bucket: aws.String(d.Id()), - }) - if err != nil { - if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() == 404 { - log.Printf("[WARN] S3 Bucket (%s) not found, error code (404)", d.Id()) - d.SetId("") - return nil - } else { - // some of the AWS SDK's errors can be empty strings, so let's add - // some additional context. - return fmt.Errorf("error reading S3 bucket \"%s\": %s", d.Id(), err) - } - } - - // In the import case, we won't have this - if _, ok := d.GetOk("bucket"); !ok { - d.Set("bucket", d.Id()) - } - - d.Set("bucket_domain_name", bucketDomainName(d.Get("bucket").(string))) - - // Read the policy - if _, ok := d.GetOk("policy"); ok { - pol, err := s3conn.GetBucketPolicy(&s3.GetBucketPolicyInput{ - Bucket: aws.String(d.Id()), - }) - log.Printf("[DEBUG] S3 bucket: %s, read policy: %v", d.Id(), pol) - if err != nil { - if err := d.Set("policy", ""); err != nil { - return err - } - } else { - if v := pol.Policy; v == nil { - if err := d.Set("policy", ""); err != nil { - return err - } - } else { - policy, err := normalizeJsonString(*v) - if err != nil { - return errwrap.Wrapf("policy contains an invalid JSON: {{err}}", err) - } - d.Set("policy", policy) - } - } - } - - // Read the CORS - cors, err := s3conn.GetBucketCors(&s3.GetBucketCorsInput{ - Bucket: aws.String(d.Id()), - }) - if err != nil { - // An S3 Bucket might not have CORS configuration set. - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "NoSuchCORSConfiguration" { - return err - } - log.Printf("[WARN] S3 bucket: %s, no CORS configuration could be found.", d.Id()) - } - log.Printf("[DEBUG] S3 bucket: %s, read CORS: %v", d.Id(), cors) - if cors.CORSRules != nil { - rules := make([]map[string]interface{}, 0, len(cors.CORSRules)) - for _, ruleObject := range cors.CORSRules { - rule := make(map[string]interface{}) - rule["allowed_headers"] = flattenStringList(ruleObject.AllowedHeaders) - rule["allowed_methods"] = flattenStringList(ruleObject.AllowedMethods) - rule["allowed_origins"] = flattenStringList(ruleObject.AllowedOrigins) - // Both the "ExposeHeaders" and "MaxAgeSeconds" might not be set. - if ruleObject.AllowedOrigins != nil { - rule["expose_headers"] = flattenStringList(ruleObject.ExposeHeaders) - } - if ruleObject.MaxAgeSeconds != nil { - rule["max_age_seconds"] = int(*ruleObject.MaxAgeSeconds) - } - rules = append(rules, rule) - } - if err := d.Set("cors_rule", rules); err != nil { - return err - } - } - - // Read the website configuration - ws, err := s3conn.GetBucketWebsite(&s3.GetBucketWebsiteInput{ - Bucket: aws.String(d.Id()), - }) - var websites []map[string]interface{} - if err == nil { - w := make(map[string]interface{}) - - if v := ws.IndexDocument; v != nil { - w["index_document"] = *v.Suffix - } - - if v := ws.ErrorDocument; v != nil { - w["error_document"] = *v.Key - } - - if v := ws.RedirectAllRequestsTo; v != nil { - if v.Protocol == nil { - w["redirect_all_requests_to"] = *v.HostName - } else { - var host string - var path string - parsedHostName, err := url.Parse(*v.HostName) - if err == nil { - host = parsedHostName.Host - path = parsedHostName.Path - } else { - host = *v.HostName - path = "" - } - - w["redirect_all_requests_to"] = (&url.URL{ - Host: host, - Path: path, - Scheme: *v.Protocol, - }).String() - } - } - - if v := ws.RoutingRules; v != nil { - rr, err := normalizeRoutingRules(v) - if err != nil { - return fmt.Errorf("Error while marshaling routing rules: %s", err) - } - w["routing_rules"] = rr - } - - websites = append(websites, w) - } - if err := d.Set("website", websites); err != nil { - return err - } - - // Read the versioning configuration - versioning, err := s3conn.GetBucketVersioning(&s3.GetBucketVersioningInput{ - Bucket: aws.String(d.Id()), - }) - if err != nil { - return err - } - log.Printf("[DEBUG] S3 Bucket: %s, versioning: %v", d.Id(), versioning) - if versioning != nil { - vcl := make([]map[string]interface{}, 0, 1) - vc := make(map[string]interface{}) - if versioning.Status != nil && *versioning.Status == s3.BucketVersioningStatusEnabled { - vc["enabled"] = true - } else { - vc["enabled"] = false - } - - if versioning.MFADelete != nil && *versioning.MFADelete == s3.MFADeleteEnabled { - vc["mfa_delete"] = true - } else { - vc["mfa_delete"] = false - } - vcl = append(vcl, vc) - if err := d.Set("versioning", vcl); err != nil { - return err - } - } - - // Read the acceleration status - accelerate, err := s3conn.GetBucketAccelerateConfiguration(&s3.GetBucketAccelerateConfigurationInput{ - Bucket: aws.String(d.Id()), - }) - if err != nil { - // Amazon S3 Transfer Acceleration might not be supported in the - // given region, for example, China (Beijing) and the Government - // Cloud does not support this feature at the moment. - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "UnsupportedArgument" { - return err - } - - var awsRegion string - if region, ok := d.GetOk("region"); ok { - awsRegion = region.(string) - } else { - awsRegion = meta.(*AWSClient).region - } - - log.Printf("[WARN] S3 bucket: %s, the S3 Transfer Acceleration is not supported in the region: %s", d.Id(), awsRegion) - } else { - log.Printf("[DEBUG] S3 bucket: %s, read Acceleration: %v", d.Id(), accelerate) - d.Set("acceleration_status", accelerate.Status) - } - - // Read the request payer configuration. - payer, err := s3conn.GetBucketRequestPayment(&s3.GetBucketRequestPaymentInput{ - Bucket: aws.String(d.Id()), - }) - if err != nil { - return err - } - log.Printf("[DEBUG] S3 Bucket: %s, read request payer: %v", d.Id(), payer) - if payer.Payer != nil { - if err := d.Set("request_payer", *payer.Payer); err != nil { - return err - } - } - - // Read the logging configuration - logging, err := s3conn.GetBucketLogging(&s3.GetBucketLoggingInput{ - Bucket: aws.String(d.Id()), - }) - if err != nil { - return err - } - - log.Printf("[DEBUG] S3 Bucket: %s, logging: %v", d.Id(), logging) - lcl := make([]map[string]interface{}, 0, 1) - if v := logging.LoggingEnabled; v != nil { - lc := make(map[string]interface{}) - if *v.TargetBucket != "" { - lc["target_bucket"] = *v.TargetBucket - } - if *v.TargetPrefix != "" { - lc["target_prefix"] = *v.TargetPrefix - } - lcl = append(lcl, lc) - } - if err := d.Set("logging", lcl); err != nil { - return err - } - - // Read the lifecycle configuration - lifecycle, err := s3conn.GetBucketLifecycleConfiguration(&s3.GetBucketLifecycleConfigurationInput{ - Bucket: aws.String(d.Id()), - }) - if err != nil { - if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() != 404 { - return err - } - } - log.Printf("[DEBUG] S3 Bucket: %s, lifecycle: %v", d.Id(), lifecycle) - if len(lifecycle.Rules) > 0 { - rules := make([]map[string]interface{}, 0, len(lifecycle.Rules)) - - for _, lifecycleRule := range lifecycle.Rules { - rule := make(map[string]interface{}) - - // ID - if lifecycleRule.ID != nil && *lifecycleRule.ID != "" { - rule["id"] = *lifecycleRule.ID - } - // Prefix - if lifecycleRule.Prefix != nil && *lifecycleRule.Prefix != "" { - rule["prefix"] = *lifecycleRule.Prefix - } - // Enabled - if lifecycleRule.Status != nil { - if *lifecycleRule.Status == s3.ExpirationStatusEnabled { - rule["enabled"] = true - } else { - rule["enabled"] = false - } - } - - // AbortIncompleteMultipartUploadDays - if lifecycleRule.AbortIncompleteMultipartUpload != nil { - if lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil { - rule["abort_incomplete_multipart_upload_days"] = int(*lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation) - } - } - - // expiration - if lifecycleRule.Expiration != nil { - e := make(map[string]interface{}) - if lifecycleRule.Expiration.Date != nil { - e["date"] = (*lifecycleRule.Expiration.Date).Format("2006-01-02") - } - if lifecycleRule.Expiration.Days != nil { - e["days"] = int(*lifecycleRule.Expiration.Days) - } - if lifecycleRule.Expiration.ExpiredObjectDeleteMarker != nil { - e["expired_object_delete_marker"] = *lifecycleRule.Expiration.ExpiredObjectDeleteMarker - } - rule["expiration"] = schema.NewSet(expirationHash, []interface{}{e}) - } - // noncurrent_version_expiration - if lifecycleRule.NoncurrentVersionExpiration != nil { - e := make(map[string]interface{}) - if lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays != nil { - e["days"] = int(*lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays) - } - rule["noncurrent_version_expiration"] = schema.NewSet(expirationHash, []interface{}{e}) - } - //// transition - if len(lifecycleRule.Transitions) > 0 { - transitions := make([]interface{}, 0, len(lifecycleRule.Transitions)) - for _, v := range lifecycleRule.Transitions { - t := make(map[string]interface{}) - if v.Date != nil { - t["date"] = (*v.Date).Format("2006-01-02") - } - if v.Days != nil { - t["days"] = int(*v.Days) - } - if v.StorageClass != nil { - t["storage_class"] = *v.StorageClass - } - transitions = append(transitions, t) - } - rule["transition"] = schema.NewSet(transitionHash, transitions) - } - // noncurrent_version_transition - if len(lifecycleRule.NoncurrentVersionTransitions) > 0 { - transitions := make([]interface{}, 0, len(lifecycleRule.NoncurrentVersionTransitions)) - for _, v := range lifecycleRule.NoncurrentVersionTransitions { - t := make(map[string]interface{}) - if v.NoncurrentDays != nil { - t["days"] = int(*v.NoncurrentDays) - } - if v.StorageClass != nil { - t["storage_class"] = *v.StorageClass - } - transitions = append(transitions, t) - } - rule["noncurrent_version_transition"] = schema.NewSet(transitionHash, transitions) - } - - rules = append(rules, rule) - } - - if err := d.Set("lifecycle_rule", rules); err != nil { - return err - } - } - - // Read the bucket replication configuration - replication, err := s3conn.GetBucketReplication(&s3.GetBucketReplicationInput{ - Bucket: aws.String(d.Id()), - }) - if err != nil { - if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() != 404 { - return err - } - } - - log.Printf("[DEBUG] S3 Bucket: %s, read replication configuration: %v", d.Id(), replication) - if r := replication.ReplicationConfiguration; r != nil { - if err := d.Set("replication_configuration", flattenAwsS3BucketReplicationConfiguration(replication.ReplicationConfiguration)); err != nil { - log.Printf("[DEBUG] Error setting replication configuration: %s", err) - return err - } - } - - // Add the region as an attribute - location, err := s3conn.GetBucketLocation( - &s3.GetBucketLocationInput{ - Bucket: aws.String(d.Id()), - }, - ) - if err != nil { - return err - } - var region string - if location.LocationConstraint != nil { - region = *location.LocationConstraint - } - region = normalizeRegion(region) - if err := d.Set("region", region); err != nil { - return err - } - - // Add the hosted zone ID for this bucket's region as an attribute - hostedZoneID := HostedZoneIDForRegion(region) - if err := d.Set("hosted_zone_id", hostedZoneID); err != nil { - return err - } - - // Add website_endpoint as an attribute - websiteEndpoint, err := websiteEndpoint(s3conn, d) - if err != nil { - return err - } - if websiteEndpoint != nil { - if err := d.Set("website_endpoint", websiteEndpoint.Endpoint); err != nil { - return err - } - if err := d.Set("website_domain", websiteEndpoint.Domain); err != nil { - return err - } - } - - tagSet, err := getTagSetS3(s3conn, d.Id()) - if err != nil { - return err - } - - if err := d.Set("tags", tagsToMapS3(tagSet)); err != nil { - return err - } - - d.Set("arn", fmt.Sprintf("arn:%s:s3:::%s", meta.(*AWSClient).partition, d.Id())) - - return nil -} - -func resourceAwsS3BucketDelete(d *schema.ResourceData, meta interface{}) error { - s3conn := meta.(*AWSClient).s3conn - - log.Printf("[DEBUG] S3 Delete Bucket: %s", d.Id()) - _, err := s3conn.DeleteBucket(&s3.DeleteBucketInput{ - Bucket: aws.String(d.Id()), - }) - if err != nil { - ec2err, ok := err.(awserr.Error) - if ok && ec2err.Code() == "BucketNotEmpty" { - if d.Get("force_destroy").(bool) { - // bucket may have things delete them - log.Printf("[DEBUG] S3 Bucket attempting to forceDestroy %+v", err) - - bucket := d.Get("bucket").(string) - resp, err := s3conn.ListObjectVersions( - &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucket), - }, - ) - - if err != nil { - return fmt.Errorf("Error S3 Bucket list Object Versions err: %s", err) - } - - objectsToDelete := make([]*s3.ObjectIdentifier, 0) - - if len(resp.DeleteMarkers) != 0 { - - for _, v := range resp.DeleteMarkers { - objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{ - Key: v.Key, - VersionId: v.VersionId, - }) - } - } - - if len(resp.Versions) != 0 { - for _, v := range resp.Versions { - objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{ - Key: v.Key, - VersionId: v.VersionId, - }) - } - } - - params := &s3.DeleteObjectsInput{ - Bucket: aws.String(bucket), - Delete: &s3.Delete{ - Objects: objectsToDelete, - }, - } - - _, err = s3conn.DeleteObjects(params) - - if err != nil { - return fmt.Errorf("Error S3 Bucket force_destroy error deleting: %s", err) - } - - // this line recurses until all objects are deleted or an error is returned - return resourceAwsS3BucketDelete(d, meta) - } - } - return fmt.Errorf("Error deleting S3 Bucket: %s %q", err, d.Get("bucket").(string)) - } - return nil -} - -func resourceAwsS3BucketPolicyUpdate(s3conn *s3.S3, d *schema.ResourceData) error { - bucket := d.Get("bucket").(string) - policy := d.Get("policy").(string) - - if policy != "" { - log.Printf("[DEBUG] S3 bucket: %s, put policy: %s", bucket, policy) - - params := &s3.PutBucketPolicyInput{ - Bucket: aws.String(bucket), - Policy: aws.String(policy), - } - - err := resource.Retry(1*time.Minute, func() *resource.RetryError { - if _, err := s3conn.PutBucketPolicy(params); err != nil { - if awserr, ok := err.(awserr.Error); ok { - if awserr.Code() == "MalformedPolicy" { - return resource.RetryableError(awserr) - } - } - return resource.NonRetryableError(err) - } - return nil - }) - - if err != nil { - return fmt.Errorf("Error putting S3 policy: %s", err) - } - } else { - log.Printf("[DEBUG] S3 bucket: %s, delete policy: %s", bucket, policy) - _, err := s3conn.DeleteBucketPolicy(&s3.DeleteBucketPolicyInput{ - Bucket: aws.String(bucket), - }) - - if err != nil { - return fmt.Errorf("Error deleting S3 policy: %s", err) - } - } - - return nil -} - -func resourceAwsS3BucketCorsUpdate(s3conn *s3.S3, d *schema.ResourceData) error { - bucket := d.Get("bucket").(string) - rawCors := d.Get("cors_rule").([]interface{}) - - if len(rawCors) == 0 { - // Delete CORS - log.Printf("[DEBUG] S3 bucket: %s, delete CORS", bucket) - _, err := s3conn.DeleteBucketCors(&s3.DeleteBucketCorsInput{ - Bucket: aws.String(bucket), - }) - if err != nil { - return fmt.Errorf("Error deleting S3 CORS: %s", err) - } - } else { - // Put CORS - rules := make([]*s3.CORSRule, 0, len(rawCors)) - for _, cors := range rawCors { - corsMap := cors.(map[string]interface{}) - r := &s3.CORSRule{} - for k, v := range corsMap { - log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v, %#v", bucket, k, v) - if k == "max_age_seconds" { - r.MaxAgeSeconds = aws.Int64(int64(v.(int))) - } else { - vMap := make([]*string, len(v.([]interface{}))) - for i, vv := range v.([]interface{}) { - str := vv.(string) - vMap[i] = aws.String(str) - } - switch k { - case "allowed_headers": - r.AllowedHeaders = vMap - case "allowed_methods": - r.AllowedMethods = vMap - case "allowed_origins": - r.AllowedOrigins = vMap - case "expose_headers": - r.ExposeHeaders = vMap - } - } - } - rules = append(rules, r) - } - corsInput := &s3.PutBucketCorsInput{ - Bucket: aws.String(bucket), - CORSConfiguration: &s3.CORSConfiguration{ - CORSRules: rules, - }, - } - log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v", bucket, corsInput) - _, err := s3conn.PutBucketCors(corsInput) - if err != nil { - return fmt.Errorf("Error putting S3 CORS: %s", err) - } - } - - return nil -} - -func resourceAwsS3BucketWebsiteUpdate(s3conn *s3.S3, d *schema.ResourceData) error { - ws := d.Get("website").([]interface{}) - - if len(ws) == 1 { - var w map[string]interface{} - if ws[0] != nil { - w = ws[0].(map[string]interface{}) - } else { - w = make(map[string]interface{}) - } - return resourceAwsS3BucketWebsitePut(s3conn, d, w) - } else if len(ws) == 0 { - return resourceAwsS3BucketWebsiteDelete(s3conn, d) - } else { - return fmt.Errorf("Cannot specify more than one website.") - } -} - -func resourceAwsS3BucketWebsitePut(s3conn *s3.S3, d *schema.ResourceData, website map[string]interface{}) error { - bucket := d.Get("bucket").(string) - - var indexDocument, errorDocument, redirectAllRequestsTo, routingRules string - if v, ok := website["index_document"]; ok { - indexDocument = v.(string) - } - if v, ok := website["error_document"]; ok { - errorDocument = v.(string) - } - if v, ok := website["redirect_all_requests_to"]; ok { - redirectAllRequestsTo = v.(string) - } - if v, ok := website["routing_rules"]; ok { - routingRules = v.(string) - } - - if indexDocument == "" && redirectAllRequestsTo == "" { - return fmt.Errorf("Must specify either index_document or redirect_all_requests_to.") - } - - websiteConfiguration := &s3.WebsiteConfiguration{} - - if indexDocument != "" { - websiteConfiguration.IndexDocument = &s3.IndexDocument{Suffix: aws.String(indexDocument)} - } - - if errorDocument != "" { - websiteConfiguration.ErrorDocument = &s3.ErrorDocument{Key: aws.String(errorDocument)} - } - - if redirectAllRequestsTo != "" { - redirect, err := url.Parse(redirectAllRequestsTo) - if err == nil && redirect.Scheme != "" { - var redirectHostBuf bytes.Buffer - redirectHostBuf.WriteString(redirect.Host) - if redirect.Path != "" { - redirectHostBuf.WriteString(redirect.Path) - } - websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectHostBuf.String()), Protocol: aws.String(redirect.Scheme)} - } else { - websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectAllRequestsTo)} - } - } - - if routingRules != "" { - var unmarshaledRules []*s3.RoutingRule - if err := json.Unmarshal([]byte(routingRules), &unmarshaledRules); err != nil { - return err - } - websiteConfiguration.RoutingRules = unmarshaledRules - } - - putInput := &s3.PutBucketWebsiteInput{ - Bucket: aws.String(bucket), - WebsiteConfiguration: websiteConfiguration, - } - - log.Printf("[DEBUG] S3 put bucket website: %#v", putInput) - - _, err := s3conn.PutBucketWebsite(putInput) - if err != nil { - return fmt.Errorf("Error putting S3 website: %s", err) - } - - return nil -} - -func resourceAwsS3BucketWebsiteDelete(s3conn *s3.S3, d *schema.ResourceData) error { - bucket := d.Get("bucket").(string) - deleteInput := &s3.DeleteBucketWebsiteInput{Bucket: aws.String(bucket)} - - log.Printf("[DEBUG] S3 delete bucket website: %#v", deleteInput) - - _, err := s3conn.DeleteBucketWebsite(deleteInput) - if err != nil { - return fmt.Errorf("Error deleting S3 website: %s", err) - } - - d.Set("website_endpoint", "") - d.Set("website_domain", "") - - return nil -} - -func websiteEndpoint(s3conn *s3.S3, d *schema.ResourceData) (*S3Website, error) { - // If the bucket doesn't have a website configuration, return an empty - // endpoint - if _, ok := d.GetOk("website"); !ok { - return nil, nil - } - - bucket := d.Get("bucket").(string) - - // Lookup the region for this bucket - location, err := s3conn.GetBucketLocation( - &s3.GetBucketLocationInput{ - Bucket: aws.String(bucket), - }, - ) - if err != nil { - return nil, err - } - var region string - if location.LocationConstraint != nil { - region = *location.LocationConstraint - } - - return WebsiteEndpoint(bucket, region), nil -} - -func bucketDomainName(bucket string) string { - return fmt.Sprintf("%s.s3.amazonaws.com", bucket) -} - -func WebsiteEndpoint(bucket string, region string) *S3Website { - domain := WebsiteDomainUrl(region) - return &S3Website{Endpoint: fmt.Sprintf("%s.%s", bucket, domain), Domain: domain} -} - -func WebsiteDomainUrl(region string) string { - region = normalizeRegion(region) - - // New regions uses different syntax for website endpoints - // http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html - if isOldRegion(region) { - return fmt.Sprintf("s3-website-%s.amazonaws.com", region) - } - return fmt.Sprintf("s3-website.%s.amazonaws.com", region) -} - -func isOldRegion(region string) bool { - oldRegions := []string{ - "ap-northeast-1", - "ap-southeast-1", - "ap-southeast-2", - "eu-west-1", - "sa-east-1", - "us-east-1", - "us-gov-west-1", - "us-west-1", - "us-west-2", - } - for _, r := range oldRegions { - if region == r { - return true - } - } - return false -} - -func resourceAwsS3BucketAclUpdate(s3conn *s3.S3, d *schema.ResourceData) error { - acl := d.Get("acl").(string) - bucket := d.Get("bucket").(string) - - i := &s3.PutBucketAclInput{ - Bucket: aws.String(bucket), - ACL: aws.String(acl), - } - log.Printf("[DEBUG] S3 put bucket ACL: %#v", i) - - _, err := s3conn.PutBucketAcl(i) - if err != nil { - return fmt.Errorf("Error putting S3 ACL: %s", err) - } - - return nil -} - -func resourceAwsS3BucketVersioningUpdate(s3conn *s3.S3, d *schema.ResourceData) error { - v := d.Get("versioning").([]interface{}) - bucket := d.Get("bucket").(string) - vc := &s3.VersioningConfiguration{} - - if len(v) > 0 { - c := v[0].(map[string]interface{}) - - if c["enabled"].(bool) { - vc.Status = aws.String(s3.BucketVersioningStatusEnabled) - } else { - vc.Status = aws.String(s3.BucketVersioningStatusSuspended) - } - - if c["mfa_delete"].(bool) { - vc.MFADelete = aws.String(s3.MFADeleteEnabled) - } else { - vc.MFADelete = aws.String(s3.MFADeleteDisabled) - } - - } else { - vc.Status = aws.String(s3.BucketVersioningStatusSuspended) - } - - i := &s3.PutBucketVersioningInput{ - Bucket: aws.String(bucket), - VersioningConfiguration: vc, - } - log.Printf("[DEBUG] S3 put bucket versioning: %#v", i) - - _, err := s3conn.PutBucketVersioning(i) - if err != nil { - return fmt.Errorf("Error putting S3 versioning: %s", err) - } - - return nil -} - -func resourceAwsS3BucketLoggingUpdate(s3conn *s3.S3, d *schema.ResourceData) error { - logging := d.Get("logging").(*schema.Set).List() - bucket := d.Get("bucket").(string) - loggingStatus := &s3.BucketLoggingStatus{} - - if len(logging) > 0 { - c := logging[0].(map[string]interface{}) - - loggingEnabled := &s3.LoggingEnabled{} - if val, ok := c["target_bucket"]; ok { - loggingEnabled.TargetBucket = aws.String(val.(string)) - } - if val, ok := c["target_prefix"]; ok { - loggingEnabled.TargetPrefix = aws.String(val.(string)) - } - - loggingStatus.LoggingEnabled = loggingEnabled - } - - i := &s3.PutBucketLoggingInput{ - Bucket: aws.String(bucket), - BucketLoggingStatus: loggingStatus, - } - log.Printf("[DEBUG] S3 put bucket logging: %#v", i) - - _, err := s3conn.PutBucketLogging(i) - if err != nil { - return fmt.Errorf("Error putting S3 logging: %s", err) - } - - return nil -} - -func resourceAwsS3BucketAccelerationUpdate(s3conn *s3.S3, d *schema.ResourceData) error { - bucket := d.Get("bucket").(string) - enableAcceleration := d.Get("acceleration_status").(string) - - i := &s3.PutBucketAccelerateConfigurationInput{ - Bucket: aws.String(bucket), - AccelerateConfiguration: &s3.AccelerateConfiguration{ - Status: aws.String(enableAcceleration), - }, - } - log.Printf("[DEBUG] S3 put bucket acceleration: %#v", i) - - _, err := s3conn.PutBucketAccelerateConfiguration(i) - if err != nil { - return fmt.Errorf("Error putting S3 acceleration: %s", err) - } - - return nil -} - -func resourceAwsS3BucketRequestPayerUpdate(s3conn *s3.S3, d *schema.ResourceData) error { - bucket := d.Get("bucket").(string) - payer := d.Get("request_payer").(string) - - i := &s3.PutBucketRequestPaymentInput{ - Bucket: aws.String(bucket), - RequestPaymentConfiguration: &s3.RequestPaymentConfiguration{ - Payer: aws.String(payer), - }, - } - log.Printf("[DEBUG] S3 put bucket request payer: %#v", i) - - _, err := s3conn.PutBucketRequestPayment(i) - if err != nil { - return fmt.Errorf("Error putting S3 request payer: %s", err) - } - - return nil -} - -func resourceAwsS3BucketReplicationConfigurationUpdate(s3conn *s3.S3, d *schema.ResourceData) error { - bucket := d.Get("bucket").(string) - replicationConfiguration := d.Get("replication_configuration").([]interface{}) - - if len(replicationConfiguration) == 0 { - i := &s3.DeleteBucketReplicationInput{ - Bucket: aws.String(bucket), - } - - err := resource.Retry(1*time.Minute, func() *resource.RetryError { - if _, err := s3conn.DeleteBucketReplication(i); err != nil { - return resource.NonRetryableError(err) - } - return nil - }) - if err != nil { - return fmt.Errorf("Error removing S3 bucket replication: %s", err) - } - return nil - } - - hasVersioning := false - // Validate that bucket versioning is enabled - if versioning, ok := d.GetOk("versioning"); ok { - v := versioning.([]interface{}) - - if v[0].(map[string]interface{})["enabled"].(bool) { - hasVersioning = true - } - } - - if !hasVersioning { - return fmt.Errorf("versioning must be enabled to allow S3 bucket replication") - } - - c := replicationConfiguration[0].(map[string]interface{}) - - rc := &s3.ReplicationConfiguration{} - if val, ok := c["role"]; ok { - rc.Role = aws.String(val.(string)) - } - - rcRules := c["rules"].(*schema.Set).List() - rules := []*s3.ReplicationRule{} - for _, v := range rcRules { - rr := v.(map[string]interface{}) - rcRule := &s3.ReplicationRule{ - Prefix: aws.String(rr["prefix"].(string)), - Status: aws.String(rr["status"].(string)), - } - - if rrid, ok := rr["id"]; ok { - rcRule.ID = aws.String(rrid.(string)) - } - - ruleDestination := &s3.Destination{} - if destination, ok := rr["destination"]; ok { - dest := destination.(*schema.Set).List() - - bd := dest[0].(map[string]interface{}) - ruleDestination.Bucket = aws.String(bd["bucket"].(string)) - - if storageClass, ok := bd["storage_class"]; ok && storageClass != "" { - ruleDestination.StorageClass = aws.String(storageClass.(string)) - } - } - rcRule.Destination = ruleDestination - rules = append(rules, rcRule) - } - - rc.Rules = rules - i := &s3.PutBucketReplicationInput{ - Bucket: aws.String(bucket), - ReplicationConfiguration: rc, - } - log.Printf("[DEBUG] S3 put bucket replication configuration: %#v", i) - - _, err := s3conn.PutBucketReplication(i) - if err != nil { - return fmt.Errorf("Error putting S3 replication configuration: %s", err) - } - - return nil -} - -func resourceAwsS3BucketLifecycleUpdate(s3conn *s3.S3, d *schema.ResourceData) error { - bucket := d.Get("bucket").(string) - - lifecycleRules := d.Get("lifecycle_rule").([]interface{}) - - if len(lifecycleRules) == 0 { - i := &s3.DeleteBucketLifecycleInput{ - Bucket: aws.String(bucket), - } - - err := resource.Retry(1*time.Minute, func() *resource.RetryError { - if _, err := s3conn.DeleteBucketLifecycle(i); err != nil { - return resource.NonRetryableError(err) - } - return nil - }) - if err != nil { - return fmt.Errorf("Error removing S3 lifecycle: %s", err) - } - return nil - } - - rules := make([]*s3.LifecycleRule, 0, len(lifecycleRules)) - - for i, lifecycleRule := range lifecycleRules { - r := lifecycleRule.(map[string]interface{}) - - rule := &s3.LifecycleRule{ - Prefix: aws.String(r["prefix"].(string)), - } - - // ID - if val, ok := r["id"].(string); ok && val != "" { - rule.ID = aws.String(val) - } else { - rule.ID = aws.String(resource.PrefixedUniqueId("tf-s3-lifecycle-")) - } - - // Enabled - if val, ok := r["enabled"].(bool); ok && val { - rule.Status = aws.String(s3.ExpirationStatusEnabled) - } else { - rule.Status = aws.String(s3.ExpirationStatusDisabled) - } - - // AbortIncompleteMultipartUpload - if val, ok := r["abort_incomplete_multipart_upload_days"].(int); ok && val > 0 { - rule.AbortIncompleteMultipartUpload = &s3.AbortIncompleteMultipartUpload{ - DaysAfterInitiation: aws.Int64(int64(val)), - } - } - - // Expiration - expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.expiration", i)).(*schema.Set).List() - if len(expiration) > 0 { - e := expiration[0].(map[string]interface{}) - i := &s3.LifecycleExpiration{} - - if val, ok := e["date"].(string); ok && val != "" { - t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) - if err != nil { - return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) - } - i.Date = aws.Time(t) - } else if val, ok := e["days"].(int); ok && val > 0 { - i.Days = aws.Int64(int64(val)) - } else if val, ok := e["expired_object_delete_marker"].(bool); ok { - i.ExpiredObjectDeleteMarker = aws.Bool(val) - } - rule.Expiration = i - } - - // NoncurrentVersionExpiration - nc_expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_expiration", i)).(*schema.Set).List() - if len(nc_expiration) > 0 { - e := nc_expiration[0].(map[string]interface{}) - - if val, ok := e["days"].(int); ok && val > 0 { - rule.NoncurrentVersionExpiration = &s3.NoncurrentVersionExpiration{ - NoncurrentDays: aws.Int64(int64(val)), - } - } - } - - // Transitions - transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.transition", i)).(*schema.Set).List() - if len(transitions) > 0 { - rule.Transitions = make([]*s3.Transition, 0, len(transitions)) - for _, transition := range transitions { - transition := transition.(map[string]interface{}) - i := &s3.Transition{} - if val, ok := transition["date"].(string); ok && val != "" { - t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) - if err != nil { - return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) - } - i.Date = aws.Time(t) - } else if val, ok := transition["days"].(int); ok && val > 0 { - i.Days = aws.Int64(int64(val)) - } - if val, ok := transition["storage_class"].(string); ok && val != "" { - i.StorageClass = aws.String(val) - } - - rule.Transitions = append(rule.Transitions, i) - } - } - // NoncurrentVersionTransitions - nc_transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_transition", i)).(*schema.Set).List() - if len(nc_transitions) > 0 { - rule.NoncurrentVersionTransitions = make([]*s3.NoncurrentVersionTransition, 0, len(nc_transitions)) - for _, transition := range nc_transitions { - transition := transition.(map[string]interface{}) - i := &s3.NoncurrentVersionTransition{} - if val, ok := transition["days"].(int); ok && val > 0 { - i.NoncurrentDays = aws.Int64(int64(val)) - } - if val, ok := transition["storage_class"].(string); ok && val != "" { - i.StorageClass = aws.String(val) - } - - rule.NoncurrentVersionTransitions = append(rule.NoncurrentVersionTransitions, i) - } - } - - rules = append(rules, rule) - } - - i := &s3.PutBucketLifecycleConfigurationInput{ - Bucket: aws.String(bucket), - LifecycleConfiguration: &s3.BucketLifecycleConfiguration{ - Rules: rules, - }, - } - - err := resource.Retry(1*time.Minute, func() *resource.RetryError { - if _, err := s3conn.PutBucketLifecycleConfiguration(i); err != nil { - return resource.NonRetryableError(err) - } - return nil - }) - if err != nil { - return fmt.Errorf("Error putting S3 lifecycle: %s", err) - } - - return nil -} - -func flattenAwsS3BucketReplicationConfiguration(r *s3.ReplicationConfiguration) []map[string]interface{} { - replication_configuration := make([]map[string]interface{}, 0, 1) - m := make(map[string]interface{}) - - if r.Role != nil && *r.Role != "" { - m["role"] = *r.Role - } - - rules := make([]interface{}, 0, len(r.Rules)) - for _, v := range r.Rules { - t := make(map[string]interface{}) - if v.Destination != nil { - rd := make(map[string]interface{}) - if v.Destination.Bucket != nil { - rd["bucket"] = *v.Destination.Bucket - } - if v.Destination.StorageClass != nil { - rd["storage_class"] = *v.Destination.StorageClass - } - t["destination"] = schema.NewSet(destinationHash, []interface{}{rd}) - } - - if v.ID != nil { - t["id"] = *v.ID - } - if v.Prefix != nil { - t["prefix"] = *v.Prefix - } - if v.Status != nil { - t["status"] = *v.Status - } - rules = append(rules, t) - } - m["rules"] = schema.NewSet(rulesHash, rules) - - replication_configuration = append(replication_configuration, m) - - return replication_configuration -} - -func normalizeRoutingRules(w []*s3.RoutingRule) (string, error) { - withNulls, err := json.Marshal(w) - if err != nil { - return "", err - } - - var rules []map[string]interface{} - if err := json.Unmarshal(withNulls, &rules); err != nil { - return "", err - } - - var cleanRules []map[string]interface{} - for _, rule := range rules { - cleanRules = append(cleanRules, removeNil(rule)) - } - - withoutNulls, err := json.Marshal(cleanRules) - if err != nil { - return "", err - } - - return string(withoutNulls), nil -} - -func removeNil(data map[string]interface{}) map[string]interface{} { - withoutNil := make(map[string]interface{}) - - for k, v := range data { - if v == nil { - continue - } - - switch v.(type) { - case map[string]interface{}: - withoutNil[k] = removeNil(v.(map[string]interface{})) - default: - withoutNil[k] = v - } - } - - return withoutNil -} - -// DEPRECATED. Please consider using `normalizeJsonString` function instead. -func normalizeJson(jsonString interface{}) string { - if jsonString == nil || jsonString == "" { - return "" - } - var j interface{} - err := json.Unmarshal([]byte(jsonString.(string)), &j) - if err != nil { - return fmt.Sprintf("Error parsing JSON: %s", err) - } - b, _ := json.Marshal(j) - return string(b[:]) -} - -func normalizeRegion(region string) string { - // Default to us-east-1 if the bucket doesn't have a region: - // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html - if region == "" { - region = "us-east-1" - } - - return region -} - -func validateS3BucketAccelerationStatus(v interface{}, k string) (ws []string, errors []error) { - validTypes := map[string]struct{}{ - "Enabled": struct{}{}, - "Suspended": struct{}{}, - } - - if _, ok := validTypes[v.(string)]; !ok { - errors = append(errors, fmt.Errorf("S3 Bucket Acceleration Status %q is invalid, must be %q or %q", v.(string), "Enabled", "Suspended")) - } - return -} - -func validateS3BucketRequestPayerType(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != s3.PayerRequester && value != s3.PayerBucketOwner { - errors = append(errors, fmt.Errorf( - "%q contains an invalid Request Payer type %q. Valid types are either %q or %q", - k, value, s3.PayerRequester, s3.PayerBucketOwner)) - } - return -} - -// validateS3BucketName validates any S3 bucket name that is not inside the us-east-1 region. -// Buckets outside of this region have to be DNS-compliant. After the same restrictions are -// applied to buckets in the us-east-1 region, this function can be refactored as a SchemaValidateFunc -func validateS3BucketName(value string, region string) error { - if region != "us-east-1" { - if (len(value) < 3) || (len(value) > 63) { - return fmt.Errorf("%q must contain from 3 to 63 characters", value) - } - if !regexp.MustCompile(`^[0-9a-z-.]+$`).MatchString(value) { - return fmt.Errorf("only lowercase alphanumeric characters and hyphens allowed in %q", value) - } - if regexp.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`).MatchString(value) { - return fmt.Errorf("%q must not be formatted as an IP address", value) - } - if strings.HasPrefix(value, `.`) { - return fmt.Errorf("%q cannot start with a period", value) - } - if strings.HasSuffix(value, `.`) { - return fmt.Errorf("%q cannot end with a period", value) - } - if strings.Contains(value, `..`) { - return fmt.Errorf("%q can be only one period between labels", value) - } - } else { - if len(value) > 255 { - return fmt.Errorf("%q must contain less than 256 characters", value) - } - if !regexp.MustCompile(`^[0-9a-zA-Z-._]+$`).MatchString(value) { - return fmt.Errorf("only alphanumeric characters, hyphens, periods, and underscores allowed in %q", value) - } - } - return nil -} - -func expirationHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - if v, ok := m["date"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - if v, ok := m["days"]; ok { - buf.WriteString(fmt.Sprintf("%d-", v.(int))) - } - if v, ok := m["expired_object_delete_marker"]; ok { - buf.WriteString(fmt.Sprintf("%t-", v.(bool))) - } - return hashcode.String(buf.String()) -} - -func transitionHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - if v, ok := m["date"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - if v, ok := m["days"]; ok { - buf.WriteString(fmt.Sprintf("%d-", v.(int))) - } - if v, ok := m["storage_class"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - return hashcode.String(buf.String()) -} - -func rulesHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - if v, ok := m["id"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - if v, ok := m["prefix"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - if v, ok := m["status"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - return hashcode.String(buf.String()) -} - -func destinationHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - if v, ok := m["bucket"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - if v, ok := m["storage_class"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - return hashcode.String(buf.String()) -} - -type S3Website struct { - Endpoint, Domain string -} diff --git a/builtin/providers/aws/resource_aws_s3_bucket_notification.go b/builtin/providers/aws/resource_aws_s3_bucket_notification.go deleted file mode 100644 index f3e19b484..000000000 --- a/builtin/providers/aws/resource_aws_s3_bucket_notification.go +++ /dev/null @@ -1,467 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strings" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/s3" -) - -func resourceAwsS3BucketNotification() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsS3BucketNotificationPut, - Read: resourceAwsS3BucketNotificationRead, - Update: resourceAwsS3BucketNotificationPut, - Delete: resourceAwsS3BucketNotificationDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "bucket": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "topic": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "filter_prefix": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "filter_suffix": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "topic_arn": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "events": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - }, - }, - - "queue": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "filter_prefix": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "filter_suffix": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "queue_arn": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "events": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - }, - }, - - "lambda_function": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "filter_prefix": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "filter_suffix": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "lambda_function_arn": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "events": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - }, - }, - }, - } -} - -func resourceAwsS3BucketNotificationPut(d *schema.ResourceData, meta interface{}) error { - s3conn := meta.(*AWSClient).s3conn - bucket := d.Get("bucket").(string) - - // TopicNotifications - topicNotifications := d.Get("topic").([]interface{}) - topicConfigs := make([]*s3.TopicConfiguration, 0, len(topicNotifications)) - for i, c := range topicNotifications { - tc := &s3.TopicConfiguration{} - - c := c.(map[string]interface{}) - - // Id - if val, ok := c["id"].(string); ok && val != "" { - tc.Id = aws.String(val) - } else { - tc.Id = aws.String(resource.PrefixedUniqueId("tf-s3-topic-")) - } - - // TopicArn - if val, ok := c["topic_arn"].(string); ok { - tc.TopicArn = aws.String(val) - } - - // Events - events := d.Get(fmt.Sprintf("topic.%d.events", i)).(*schema.Set).List() - tc.Events = make([]*string, 0, len(events)) - for _, e := range events { - tc.Events = append(tc.Events, aws.String(e.(string))) - } - - // Filter - filterRules := make([]*s3.FilterRule, 0, 2) - if val, ok := c["filter_prefix"].(string); ok && val != "" { - filterRule := &s3.FilterRule{ - Name: aws.String("prefix"), - Value: aws.String(val), - } - filterRules = append(filterRules, filterRule) - } - if val, ok := c["filter_suffix"].(string); ok && val != "" { - filterRule := &s3.FilterRule{ - Name: aws.String("suffix"), - Value: aws.String(val), - } - filterRules = append(filterRules, filterRule) - } - if len(filterRules) > 0 { - tc.Filter = &s3.NotificationConfigurationFilter{ - Key: &s3.KeyFilter{ - FilterRules: filterRules, - }, - } - } - topicConfigs = append(topicConfigs, tc) - } - - // SQS - queueNotifications := d.Get("queue").([]interface{}) - queueConfigs := make([]*s3.QueueConfiguration, 0, len(queueNotifications)) - for i, c := range queueNotifications { - qc := &s3.QueueConfiguration{} - - c := c.(map[string]interface{}) - - // Id - if val, ok := c["id"].(string); ok && val != "" { - qc.Id = aws.String(val) - } else { - qc.Id = aws.String(resource.PrefixedUniqueId("tf-s3-queue-")) - } - - // QueueArn - if val, ok := c["queue_arn"].(string); ok { - qc.QueueArn = aws.String(val) - } - - // Events - events := d.Get(fmt.Sprintf("queue.%d.events", i)).(*schema.Set).List() - qc.Events = make([]*string, 0, len(events)) - for _, e := range events { - qc.Events = append(qc.Events, aws.String(e.(string))) - } - - // Filter - filterRules := make([]*s3.FilterRule, 0, 2) - if val, ok := c["filter_prefix"].(string); ok && val != "" { - filterRule := &s3.FilterRule{ - Name: aws.String("prefix"), - Value: aws.String(val), - } - filterRules = append(filterRules, filterRule) - } - if val, ok := c["filter_suffix"].(string); ok && val != "" { - filterRule := &s3.FilterRule{ - Name: aws.String("suffix"), - Value: aws.String(val), - } - filterRules = append(filterRules, filterRule) - } - if len(filterRules) > 0 { - qc.Filter = &s3.NotificationConfigurationFilter{ - Key: &s3.KeyFilter{ - FilterRules: filterRules, - }, - } - } - queueConfigs = append(queueConfigs, qc) - } - - // Lambda - lambdaFunctionNotifications := d.Get("lambda_function").([]interface{}) - lambdaConfigs := make([]*s3.LambdaFunctionConfiguration, 0, len(lambdaFunctionNotifications)) - for i, c := range lambdaFunctionNotifications { - lc := &s3.LambdaFunctionConfiguration{} - - c := c.(map[string]interface{}) - - // Id - if val, ok := c["id"].(string); ok && val != "" { - lc.Id = aws.String(val) - } else { - lc.Id = aws.String(resource.PrefixedUniqueId("tf-s3-lambda-")) - } - - // LambdaFunctionArn - if val, ok := c["lambda_function_arn"].(string); ok { - lc.LambdaFunctionArn = aws.String(val) - } - - // Events - events := d.Get(fmt.Sprintf("lambda_function.%d.events", i)).(*schema.Set).List() - lc.Events = make([]*string, 0, len(events)) - for _, e := range events { - lc.Events = append(lc.Events, aws.String(e.(string))) - } - - // Filter - filterRules := make([]*s3.FilterRule, 0, 2) - if val, ok := c["filter_prefix"].(string); ok && val != "" { - filterRule := &s3.FilterRule{ - Name: aws.String("prefix"), - Value: aws.String(val), - } - filterRules = append(filterRules, filterRule) - } - if val, ok := c["filter_suffix"].(string); ok && val != "" { - filterRule := &s3.FilterRule{ - Name: aws.String("suffix"), - Value: aws.String(val), - } - filterRules = append(filterRules, filterRule) - } - if len(filterRules) > 0 { - lc.Filter = &s3.NotificationConfigurationFilter{ - Key: &s3.KeyFilter{ - FilterRules: filterRules, - }, - } - } - lambdaConfigs = append(lambdaConfigs, lc) - } - - notificationConfiguration := &s3.NotificationConfiguration{} - if len(lambdaConfigs) > 0 { - notificationConfiguration.LambdaFunctionConfigurations = lambdaConfigs - } - if len(queueConfigs) > 0 { - notificationConfiguration.QueueConfigurations = queueConfigs - } - if len(topicConfigs) > 0 { - notificationConfiguration.TopicConfigurations = topicConfigs - } - i := &s3.PutBucketNotificationConfigurationInput{ - Bucket: aws.String(bucket), - NotificationConfiguration: notificationConfiguration, - } - - log.Printf("[DEBUG] S3 bucket: %s, Putting notification: %v", bucket, i) - err := resource.Retry(1*time.Minute, func() *resource.RetryError { - if _, err := s3conn.PutBucketNotificationConfiguration(i); err != nil { - if awserr, ok := err.(awserr.Error); ok { - switch awserr.Message() { - case "Unable to validate the following destination configurations": - return resource.RetryableError(awserr) - } - } - // Didn't recognize the error, so shouldn't retry. - return resource.NonRetryableError(err) - } - // Successful put configuration - return nil - }) - if err != nil { - return fmt.Errorf("Error putting S3 notification configuration: %s", err) - } - - d.SetId(bucket) - - return resourceAwsS3BucketNotificationRead(d, meta) -} - -func resourceAwsS3BucketNotificationDelete(d *schema.ResourceData, meta interface{}) error { - s3conn := meta.(*AWSClient).s3conn - - i := &s3.PutBucketNotificationConfigurationInput{ - Bucket: aws.String(d.Id()), - NotificationConfiguration: &s3.NotificationConfiguration{}, - } - - log.Printf("[DEBUG] S3 bucket: %s, Deleting notification: %v", d.Id(), i) - _, err := s3conn.PutBucketNotificationConfiguration(i) - if err != nil { - return fmt.Errorf("Error deleting S3 notification configuration: %s", err) - } - - d.SetId("") - - return nil -} - -func resourceAwsS3BucketNotificationRead(d *schema.ResourceData, meta interface{}) error { - s3conn := meta.(*AWSClient).s3conn - - var err error - _, err = s3conn.HeadBucket(&s3.HeadBucketInput{ - Bucket: aws.String(d.Id()), - }) - if err != nil { - if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() == 404 { - log.Printf("[WARN] S3 Bucket (%s) not found, error code (404)", d.Id()) - d.SetId("") - return nil - } else { - // some of the AWS SDK's errors can be empty strings, so let's add - // some additional context. - return fmt.Errorf("error reading S3 bucket \"%s\": %s", d.Id(), err) - } - } - - // Read the notification configuration - notificationConfigs, err := s3conn.GetBucketNotificationConfiguration(&s3.GetBucketNotificationConfigurationRequest{ - Bucket: aws.String(d.Id()), - }) - if err != nil { - return err - } - log.Printf("[DEBUG] S3 Bucket: %s, get notification: %v", d.Id(), notificationConfigs) - // Topic Notification - if err := d.Set("topic", flattenTopicConfigurations(notificationConfigs.TopicConfigurations)); err != nil { - return fmt.Errorf("error reading S3 bucket \"%s\" topic notification: %s", d.Id(), err) - } - - // SQS Notification - if err := d.Set("queue", flattenQueueConfigurations(notificationConfigs.QueueConfigurations)); err != nil { - return fmt.Errorf("error reading S3 bucket \"%s\" queue notification: %s", d.Id(), err) - } - - // Lambda Notification - if err := d.Set("lambda_function", flattenLambdaFunctionConfigurations(notificationConfigs.LambdaFunctionConfigurations)); err != nil { - return fmt.Errorf("error reading S3 bucket \"%s\" lambda function notification: %s", d.Id(), err) - } - - return nil -} - -func flattenNotificationConfigurationFilter(filter *s3.NotificationConfigurationFilter) map[string]interface{} { - filterRules := map[string]interface{}{} - for _, f := range filter.Key.FilterRules { - if strings.ToLower(*f.Name) == s3.FilterRuleNamePrefix { - filterRules["filter_prefix"] = *f.Value - } - if strings.ToLower(*f.Name) == s3.FilterRuleNameSuffix { - filterRules["filter_suffix"] = *f.Value - } - } - return filterRules -} - -func flattenTopicConfigurations(configs []*s3.TopicConfiguration) []map[string]interface{} { - topicNotifications := make([]map[string]interface{}, 0, len(configs)) - for _, notification := range configs { - var conf map[string]interface{} - if filter := notification.Filter; filter != nil { - conf = flattenNotificationConfigurationFilter(filter) - } else { - conf = map[string]interface{}{} - } - - conf["id"] = *notification.Id - conf["events"] = schema.NewSet(schema.HashString, flattenStringList(notification.Events)) - conf["topic_arn"] = *notification.TopicArn - topicNotifications = append(topicNotifications, conf) - } - - return topicNotifications -} - -func flattenQueueConfigurations(configs []*s3.QueueConfiguration) []map[string]interface{} { - queueNotifications := make([]map[string]interface{}, 0, len(configs)) - for _, notification := range configs { - var conf map[string]interface{} - if filter := notification.Filter; filter != nil { - conf = flattenNotificationConfigurationFilter(filter) - } else { - conf = map[string]interface{}{} - } - - conf["id"] = *notification.Id - conf["events"] = schema.NewSet(schema.HashString, flattenStringList(notification.Events)) - conf["queue_arn"] = *notification.QueueArn - queueNotifications = append(queueNotifications, conf) - } - - return queueNotifications -} - -func flattenLambdaFunctionConfigurations(configs []*s3.LambdaFunctionConfiguration) []map[string]interface{} { - lambdaFunctionNotifications := make([]map[string]interface{}, 0, len(configs)) - for _, notification := range configs { - var conf map[string]interface{} - if filter := notification.Filter; filter != nil { - conf = flattenNotificationConfigurationFilter(filter) - } else { - conf = map[string]interface{}{} - } - - conf["id"] = *notification.Id - conf["events"] = schema.NewSet(schema.HashString, flattenStringList(notification.Events)) - conf["lambda_function_arn"] = *notification.LambdaFunctionArn - lambdaFunctionNotifications = append(lambdaFunctionNotifications, conf) - } - - return lambdaFunctionNotifications -} diff --git a/builtin/providers/aws/resource_aws_s3_bucket_notification_test.go b/builtin/providers/aws/resource_aws_s3_bucket_notification_test.go deleted file mode 100644 index 848604626..000000000 --- a/builtin/providers/aws/resource_aws_s3_bucket_notification_test.go +++ /dev/null @@ -1,520 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "sort" - "testing" - "time" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/s3" -) - -func TestAccAWSS3Bucket_Notification(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSS3BucketNotificationDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSS3BucketConfigWithTopicNotification(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketTopicNotification( - "aws_s3_bucket.bucket", - "notification-sns1", - "aws_sns_topic.topic", - []string{"s3:ObjectCreated:*", "s3:ObjectRemoved:Delete"}, - &s3.KeyFilter{ - FilterRules: []*s3.FilterRule{ - &s3.FilterRule{ - Name: aws.String("Prefix"), - Value: aws.String(fmt.Sprintf("%d/", rInt)), - }, - &s3.FilterRule{ - Name: aws.String("Suffix"), - Value: aws.String(".txt"), - }, - }, - }, - ), - testAccCheckAWSS3BucketTopicNotification( - "aws_s3_bucket.bucket", - "notification-sns2", - "aws_sns_topic.topic", - []string{"s3:ObjectCreated:*", "s3:ObjectRemoved:Delete"}, - &s3.KeyFilter{ - FilterRules: []*s3.FilterRule{ - &s3.FilterRule{ - Name: aws.String("Suffix"), - Value: aws.String(".log"), - }, - }, - }, - ), - ), - }, - resource.TestStep{ - Config: testAccAWSS3BucketConfigWithQueueNotification(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketQueueNotification( - "aws_s3_bucket.bucket", - "notification-sqs", - "aws_sqs_queue.queue", - []string{"s3:ObjectCreated:*", "s3:ObjectRemoved:Delete"}, - &s3.KeyFilter{ - FilterRules: []*s3.FilterRule{ - &s3.FilterRule{ - Name: aws.String("Prefix"), - Value: aws.String(fmt.Sprintf("%d/", rInt)), - }, - &s3.FilterRule{ - Name: aws.String("Suffix"), - Value: aws.String(".mp4"), - }, - }, - }, - ), - ), - }, - resource.TestStep{ - Config: testAccAWSS3BucketConfigWithLambdaNotification(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketLambdaFunctionConfiguration( - "aws_s3_bucket.bucket", - "notification-lambda", - "aws_lambda_function.func", - []string{"s3:ObjectCreated:*", "s3:ObjectRemoved:Delete"}, - &s3.KeyFilter{ - FilterRules: []*s3.FilterRule{ - &s3.FilterRule{ - Name: aws.String("Prefix"), - Value: aws.String(fmt.Sprintf("%d/", rInt)), - }, - &s3.FilterRule{ - Name: aws.String("Suffix"), - Value: aws.String(".png"), - }, - }, - }, - ), - ), - }, - }, - }) -} - -func TestAccAWSS3Bucket_NotificationWithoutFilter(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSS3BucketNotificationDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSS3BucketConfigWithTopicNotificationWithoutFilter(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSS3BucketTopicNotification( - "aws_s3_bucket.bucket", - "notification-sns1", - "aws_sns_topic.topic", - []string{"s3:ObjectCreated:*", "s3:ObjectRemoved:Delete"}, - nil, - ), - ), - }, - }, - }) -} - -func testAccCheckAWSS3BucketNotificationDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).s3conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_s3_bucket_notification" { - continue - } - err := resource.Retry(1*time.Minute, func() *resource.RetryError { - out, err := conn.GetBucketNotificationConfiguration(&s3.GetBucketNotificationConfigurationRequest{ - Bucket: aws.String(rs.Primary.ID), - }) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoSuchBucket" { - return nil - } - return resource.NonRetryableError(err) - } - if len(out.TopicConfigurations) > 0 { - return resource.RetryableError(fmt.Errorf("TopicConfigurations is exists: %v", out)) - } - if len(out.LambdaFunctionConfigurations) > 0 { - return resource.RetryableError(fmt.Errorf("LambdaFunctionConfigurations is exists: %v", out)) - } - if len(out.QueueConfigurations) > 0 { - return resource.RetryableError(fmt.Errorf("QueueConfigurations is exists: %v", out)) - } - - return nil - }) - - if err != nil { - return err - } - } - return nil -} - -func testAccCheckAWSS3BucketTopicNotification(n, i, t string, events []string, filters *s3.KeyFilter) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, _ := s.RootModule().Resources[n] - topicArn := s.RootModule().Resources[t].Primary.ID - conn := testAccProvider.Meta().(*AWSClient).s3conn - - err := resource.Retry(1*time.Minute, func() *resource.RetryError { - out, err := conn.GetBucketNotificationConfiguration(&s3.GetBucketNotificationConfigurationRequest{ - Bucket: aws.String(rs.Primary.ID), - }) - - if err != nil { - return resource.NonRetryableError(fmt.Errorf("GetBucketNotification error: %v", err)) - } - - eventSlice := sort.StringSlice(events) - eventSlice.Sort() - - outputTopics := out.TopicConfigurations - matched := false - for _, outputTopic := range outputTopics { - if *outputTopic.Id == i { - matched = true - - if *outputTopic.TopicArn != topicArn { - return resource.RetryableError(fmt.Errorf("bad topic arn, expected: %s, got %#v", topicArn, *outputTopic.TopicArn)) - } - - if filters != nil { - if !reflect.DeepEqual(filters, outputTopic.Filter.Key) { - return resource.RetryableError(fmt.Errorf("bad notification filters, expected: %#v, got %#v", filters, outputTopic.Filter.Key)) - } - } else { - if outputTopic.Filter != nil { - return resource.RetryableError(fmt.Errorf("bad notification filters, expected: nil, got %#v", outputTopic.Filter)) - } - } - - outputEventSlice := sort.StringSlice(aws.StringValueSlice(outputTopic.Events)) - outputEventSlice.Sort() - if !reflect.DeepEqual(eventSlice, outputEventSlice) { - return resource.RetryableError(fmt.Errorf("bad notification events, expected: %#v, got %#v", events, outputEventSlice)) - } - } - } - - if !matched { - return resource.RetryableError(fmt.Errorf("No match topic configurations: %#v", out)) - } - - return nil - }) - - return err - } -} - -func testAccCheckAWSS3BucketQueueNotification(n, i, t string, events []string, filters *s3.KeyFilter) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, _ := s.RootModule().Resources[n] - queueArn := s.RootModule().Resources[t].Primary.Attributes["arn"] - conn := testAccProvider.Meta().(*AWSClient).s3conn - - err := resource.Retry(1*time.Minute, func() *resource.RetryError { - out, err := conn.GetBucketNotificationConfiguration(&s3.GetBucketNotificationConfigurationRequest{ - Bucket: aws.String(rs.Primary.ID), - }) - - if err != nil { - return resource.NonRetryableError(fmt.Errorf("GetBucketNotification error: %v", err)) - } - - eventSlice := sort.StringSlice(events) - eventSlice.Sort() - - outputQueues := out.QueueConfigurations - matched := false - for _, outputQueue := range outputQueues { - if *outputQueue.Id == i { - matched = true - - if *outputQueue.QueueArn != queueArn { - return resource.RetryableError(fmt.Errorf("bad queue arn, expected: %s, got %#v", queueArn, *outputQueue.QueueArn)) - } - - if filters != nil { - if !reflect.DeepEqual(filters, outputQueue.Filter.Key) { - return resource.RetryableError(fmt.Errorf("bad notification filters, expected: %#v, got %#v", filters, outputQueue.Filter.Key)) - } - } else { - if outputQueue.Filter != nil { - return resource.RetryableError(fmt.Errorf("bad notification filters, expected: nil, got %#v", outputQueue.Filter)) - } - } - - outputEventSlice := sort.StringSlice(aws.StringValueSlice(outputQueue.Events)) - outputEventSlice.Sort() - if !reflect.DeepEqual(eventSlice, outputEventSlice) { - return resource.RetryableError(fmt.Errorf("bad notification events, expected: %#v, got %#v", events, outputEventSlice)) - } - } - } - - if !matched { - return resource.RetryableError(fmt.Errorf("No match queue configurations: %#v", out)) - } - - return nil - }) - - return err - } -} - -func testAccCheckAWSS3BucketLambdaFunctionConfiguration(n, i, t string, events []string, filters *s3.KeyFilter) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, _ := s.RootModule().Resources[n] - funcArn := s.RootModule().Resources[t].Primary.Attributes["arn"] - conn := testAccProvider.Meta().(*AWSClient).s3conn - - err := resource.Retry(1*time.Minute, func() *resource.RetryError { - out, err := conn.GetBucketNotificationConfiguration(&s3.GetBucketNotificationConfigurationRequest{ - Bucket: aws.String(rs.Primary.ID), - }) - - if err != nil { - return resource.NonRetryableError(fmt.Errorf("GetBucketNotification error: %v", err)) - } - - eventSlice := sort.StringSlice(events) - eventSlice.Sort() - - outputFunctions := out.LambdaFunctionConfigurations - matched := false - for _, outputFunc := range outputFunctions { - if *outputFunc.Id == i { - matched = true - - if *outputFunc.LambdaFunctionArn != funcArn { - return resource.RetryableError(fmt.Errorf("bad lambda function arn, expected: %s, got %#v", funcArn, *outputFunc.LambdaFunctionArn)) - } - - if filters != nil { - if !reflect.DeepEqual(filters, outputFunc.Filter.Key) { - return resource.RetryableError(fmt.Errorf("bad notification filters, expected: %#v, got %#v", filters, outputFunc.Filter.Key)) - } - } else { - if outputFunc.Filter != nil { - return resource.RetryableError(fmt.Errorf("bad notification filters, expected: nil, got %#v", outputFunc.Filter)) - } - } - - outputEventSlice := sort.StringSlice(aws.StringValueSlice(outputFunc.Events)) - outputEventSlice.Sort() - if !reflect.DeepEqual(eventSlice, outputEventSlice) { - return resource.RetryableError(fmt.Errorf("bad notification events, expected: %#v, got %#v", events, outputEventSlice)) - } - } - } - - if !matched { - return resource.RetryableError(fmt.Errorf("No match lambda function configurations: %#v", out)) - } - - return nil - }) - - return err - } -} - -func testAccAWSS3BucketConfigWithTopicNotification(randInt int) string { - return fmt.Sprintf(` -resource "aws_sns_topic" "topic" { - name = "terraform-test-topic-%d" - policy = < 255 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 255 characters", k)) - } - return - }, - }, - - "name_prefix": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 100 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 100 characters, name is limited to 255", k)) - } - return - }, - }, - - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "Managed by Terraform", - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 255 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 255 characters", k)) - } - return - }, - }, - - "vpc_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "ingress": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "from_port": { - Type: schema.TypeInt, - Required: true, - }, - - "to_port": { - Type: schema.TypeInt, - Required: true, - }, - - "protocol": { - Type: schema.TypeString, - Required: true, - StateFunc: protocolStateFunc, - }, - - "cidr_blocks": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateCIDRNetworkAddress, - }, - }, - - "ipv6_cidr_blocks": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateCIDRNetworkAddress, - }, - }, - - "security_groups": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "self": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - }, - }, - Set: resourceAwsSecurityGroupRuleHash, - }, - - "egress": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "from_port": { - Type: schema.TypeInt, - Required: true, - }, - - "to_port": { - Type: schema.TypeInt, - Required: true, - }, - - "protocol": { - Type: schema.TypeString, - Required: true, - StateFunc: protocolStateFunc, - }, - - "cidr_blocks": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateCIDRNetworkAddress, - }, - }, - - "ipv6_cidr_blocks": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateCIDRNetworkAddress, - }, - }, - - "prefix_list_ids": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "security_groups": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "self": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - }, - }, - Set: resourceAwsSecurityGroupRuleHash, - }, - - "owner_id": { - Type: schema.TypeString, - Computed: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAwsSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - securityGroupOpts := &ec2.CreateSecurityGroupInput{} - - if v, ok := d.GetOk("vpc_id"); ok { - securityGroupOpts.VpcId = aws.String(v.(string)) - } - - if v := d.Get("description"); v != nil { - securityGroupOpts.Description = aws.String(v.(string)) - } - - var groupName string - if v, ok := d.GetOk("name"); ok { - groupName = v.(string) - } else if v, ok := d.GetOk("name_prefix"); ok { - groupName = resource.PrefixedUniqueId(v.(string)) - } else { - groupName = resource.UniqueId() - } - securityGroupOpts.GroupName = aws.String(groupName) - - var err error - log.Printf( - "[DEBUG] Security Group create configuration: %#v", securityGroupOpts) - createResp, err := conn.CreateSecurityGroup(securityGroupOpts) - if err != nil { - return fmt.Errorf("Error creating Security Group: %s", err) - } - - d.SetId(*createResp.GroupId) - - log.Printf("[INFO] Security Group ID: %s", d.Id()) - - // Wait for the security group to truly exist - log.Printf( - "[DEBUG] Waiting for Security Group (%s) to exist", - d.Id()) - stateConf := &resource.StateChangeConf{ - Pending: []string{""}, - Target: []string{"exists"}, - Refresh: SGStateRefreshFunc(conn, d.Id()), - Timeout: 5 * time.Minute, - } - - resp, err := stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for Security Group (%s) to become available: %s", - d.Id(), err) - } - - if err := setTags(conn, d); err != nil { - return err - } - - // AWS defaults all Security Groups to have an ALLOW ALL egress rule. Here we - // revoke that rule, so users don't unknowingly have/use it. - group := resp.(*ec2.SecurityGroup) - if group.VpcId != nil && *group.VpcId != "" { - log.Printf("[DEBUG] Revoking default egress rule for Security Group for %s", d.Id()) - - req := &ec2.RevokeSecurityGroupEgressInput{ - GroupId: createResp.GroupId, - IpPermissions: []*ec2.IpPermission{ - { - FromPort: aws.Int64(int64(0)), - ToPort: aws.Int64(int64(0)), - IpRanges: []*ec2.IpRange{ - { - CidrIp: aws.String("0.0.0.0/0"), - }, - }, - IpProtocol: aws.String("-1"), - }, - }, - } - - if _, err = conn.RevokeSecurityGroupEgress(req); err != nil { - return fmt.Errorf( - "Error revoking default egress rule for Security Group (%s): %s", - d.Id(), err) - } - - log.Printf("[DEBUG] Revoking default IPv6 egress rule for Security Group for %s", d.Id()) - req = &ec2.RevokeSecurityGroupEgressInput{ - GroupId: createResp.GroupId, - IpPermissions: []*ec2.IpPermission{ - { - FromPort: aws.Int64(int64(0)), - ToPort: aws.Int64(int64(0)), - Ipv6Ranges: []*ec2.Ipv6Range{ - { - CidrIpv6: aws.String("::/0"), - }, - }, - IpProtocol: aws.String("-1"), - }, - }, - } - - _, err = conn.RevokeSecurityGroupEgress(req) - if err != nil { - //If we have a NotFound, then we are trying to remove the default IPv6 egress of a non-IPv6 - //enabled SG - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() != "InvalidPermission.NotFound" { - return fmt.Errorf( - "Error revoking default IPv6 egress rule for Security Group (%s): %s", - d.Id(), err) - } - } - - } - - return resourceAwsSecurityGroupUpdate(d, meta) -} - -func resourceAwsSecurityGroupRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - sgRaw, _, err := SGStateRefreshFunc(conn, d.Id())() - if err != nil { - return err - } - if sgRaw == nil { - d.SetId("") - return nil - } - - sg := sgRaw.(*ec2.SecurityGroup) - - remoteIngressRules := resourceAwsSecurityGroupIPPermGather(d.Id(), sg.IpPermissions, sg.OwnerId) - remoteEgressRules := resourceAwsSecurityGroupIPPermGather(d.Id(), sg.IpPermissionsEgress, sg.OwnerId) - - localIngressRules := d.Get("ingress").(*schema.Set).List() - localEgressRules := d.Get("egress").(*schema.Set).List() - - // Loop through the local state of rules, doing a match against the remote - // ruleSet we built above. - ingressRules := matchRules("ingress", localIngressRules, remoteIngressRules) - egressRules := matchRules("egress", localEgressRules, remoteEgressRules) - - d.Set("description", sg.Description) - d.Set("name", sg.GroupName) - d.Set("vpc_id", sg.VpcId) - d.Set("owner_id", sg.OwnerId) - - if err := d.Set("ingress", ingressRules); err != nil { - log.Printf("[WARN] Error setting Ingress rule set for (%s): %s", d.Id(), err) - } - - if err := d.Set("egress", egressRules); err != nil { - log.Printf("[WARN] Error setting Egress rule set for (%s): %s", d.Id(), err) - } - - d.Set("tags", tagsToMap(sg.Tags)) - return nil -} - -func resourceAwsSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - sgRaw, _, err := SGStateRefreshFunc(conn, d.Id())() - if err != nil { - return err - } - if sgRaw == nil { - d.SetId("") - return nil - } - - group := sgRaw.(*ec2.SecurityGroup) - - err = resourceAwsSecurityGroupUpdateRules(d, "ingress", meta, group) - if err != nil { - return err - } - - if d.Get("vpc_id") != nil { - err = resourceAwsSecurityGroupUpdateRules(d, "egress", meta, group) - if err != nil { - return err - } - } - - if !d.IsNewResource() { - if err := setTags(conn, d); err != nil { - return err - } - d.SetPartial("tags") - } - - return resourceAwsSecurityGroupRead(d, meta) -} - -func resourceAwsSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - log.Printf("[DEBUG] Security Group destroy: %v", d.Id()) - - if err := deleteLingeringLambdaENIs(conn, d); err != nil { - return fmt.Errorf("Failed to delete Lambda ENIs: %s", err) - } - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteSecurityGroup(&ec2.DeleteSecurityGroupInput{ - GroupId: aws.String(d.Id()), - }) - if err != nil { - ec2err, ok := err.(awserr.Error) - if !ok { - return resource.RetryableError(err) - } - - switch ec2err.Code() { - case "InvalidGroup.NotFound": - return nil - case "DependencyViolation": - // If it is a dependency violation, we want to retry - return resource.RetryableError(err) - default: - // Any other error, we want to quit the retry loop immediately - return resource.NonRetryableError(err) - } - } - - return nil - }) -} - -func resourceAwsSecurityGroupRuleHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%d-", m["from_port"].(int))) - buf.WriteString(fmt.Sprintf("%d-", m["to_port"].(int))) - p := protocolForValue(m["protocol"].(string)) - buf.WriteString(fmt.Sprintf("%s-", p)) - buf.WriteString(fmt.Sprintf("%t-", m["self"].(bool))) - - // We need to make sure to sort the strings below so that we always - // generate the same hash code no matter what is in the set. - if v, ok := m["cidr_blocks"]; ok { - vs := v.([]interface{}) - s := make([]string, len(vs)) - for i, raw := range vs { - s[i] = raw.(string) - } - sort.Strings(s) - - for _, v := range s { - buf.WriteString(fmt.Sprintf("%s-", v)) - } - } - if v, ok := m["ipv6_cidr_blocks"]; ok { - vs := v.([]interface{}) - s := make([]string, len(vs)) - for i, raw := range vs { - s[i] = raw.(string) - } - sort.Strings(s) - - for _, v := range s { - buf.WriteString(fmt.Sprintf("%s-", v)) - } - } - if v, ok := m["prefix_list_ids"]; ok { - vs := v.([]interface{}) - s := make([]string, len(vs)) - for i, raw := range vs { - s[i] = raw.(string) - } - sort.Strings(s) - - for _, v := range s { - buf.WriteString(fmt.Sprintf("%s-", v)) - } - } - if v, ok := m["security_groups"]; ok { - vs := v.(*schema.Set).List() - s := make([]string, len(vs)) - for i, raw := range vs { - s[i] = raw.(string) - } - sort.Strings(s) - - for _, v := range s { - buf.WriteString(fmt.Sprintf("%s-", v)) - } - } - - return hashcode.String(buf.String()) -} - -func resourceAwsSecurityGroupIPPermGather(groupId string, permissions []*ec2.IpPermission, ownerId *string) []map[string]interface{} { - ruleMap := make(map[string]map[string]interface{}) - for _, perm := range permissions { - var fromPort, toPort int64 - if v := perm.FromPort; v != nil { - fromPort = *v - } - if v := perm.ToPort; v != nil { - toPort = *v - } - - k := fmt.Sprintf("%s-%d-%d", *perm.IpProtocol, fromPort, toPort) - m, ok := ruleMap[k] - if !ok { - m = make(map[string]interface{}) - ruleMap[k] = m - } - - m["from_port"] = fromPort - m["to_port"] = toPort - m["protocol"] = *perm.IpProtocol - - if len(perm.IpRanges) > 0 { - raw, ok := m["cidr_blocks"] - if !ok { - raw = make([]string, 0, len(perm.IpRanges)) - } - list := raw.([]string) - - for _, ip := range perm.IpRanges { - list = append(list, *ip.CidrIp) - } - - m["cidr_blocks"] = list - } - - if len(perm.Ipv6Ranges) > 0 { - raw, ok := m["ipv6_cidr_blocks"] - if !ok { - raw = make([]string, 0, len(perm.Ipv6Ranges)) - } - list := raw.([]string) - - for _, ip := range perm.Ipv6Ranges { - list = append(list, *ip.CidrIpv6) - } - - m["ipv6_cidr_blocks"] = list - } - - if len(perm.PrefixListIds) > 0 { - raw, ok := m["prefix_list_ids"] - if !ok { - raw = make([]string, 0, len(perm.PrefixListIds)) - } - list := raw.([]string) - - for _, pl := range perm.PrefixListIds { - list = append(list, *pl.PrefixListId) - } - - m["prefix_list_ids"] = list - } - - groups := flattenSecurityGroups(perm.UserIdGroupPairs, ownerId) - for i, g := range groups { - if *g.GroupId == groupId { - groups[i], groups = groups[len(groups)-1], groups[:len(groups)-1] - m["self"] = true - } - } - - if len(groups) > 0 { - raw, ok := m["security_groups"] - if !ok { - raw = schema.NewSet(schema.HashString, nil) - } - list := raw.(*schema.Set) - - for _, g := range groups { - if g.GroupName != nil { - list.Add(*g.GroupName) - } else { - list.Add(*g.GroupId) - } - } - - m["security_groups"] = list - } - } - rules := make([]map[string]interface{}, 0, len(ruleMap)) - for _, m := range ruleMap { - rules = append(rules, m) - } - - return rules -} - -func resourceAwsSecurityGroupUpdateRules( - d *schema.ResourceData, ruleset string, - meta interface{}, group *ec2.SecurityGroup) error { - - if d.HasChange(ruleset) { - o, n := d.GetChange(ruleset) - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - - remove, err := expandIPPerms(group, os.Difference(ns).List()) - if err != nil { - return err - } - add, err := expandIPPerms(group, ns.Difference(os).List()) - if err != nil { - return err - } - - // TODO: We need to handle partial state better in the in-between - // in this update. - - // TODO: It'd be nicer to authorize before removing, but then we have - // to deal with complicated unrolling to get individual CIDR blocks - // to avoid authorizing already authorized sources. Removing before - // adding is easier here, and Terraform should be fast enough to - // not have service issues. - - if len(remove) > 0 || len(add) > 0 { - conn := meta.(*AWSClient).ec2conn - - var err error - if len(remove) > 0 { - log.Printf("[DEBUG] Revoking security group %#v %s rule: %#v", - group, ruleset, remove) - - if ruleset == "egress" { - req := &ec2.RevokeSecurityGroupEgressInput{ - GroupId: group.GroupId, - IpPermissions: remove, - } - _, err = conn.RevokeSecurityGroupEgress(req) - } else { - req := &ec2.RevokeSecurityGroupIngressInput{ - GroupId: group.GroupId, - IpPermissions: remove, - } - if group.VpcId == nil || *group.VpcId == "" { - req.GroupId = nil - req.GroupName = group.GroupName - } - _, err = conn.RevokeSecurityGroupIngress(req) - } - - if err != nil { - return fmt.Errorf( - "Error revoking security group %s rules: %s", - ruleset, err) - } - } - - if len(add) > 0 { - log.Printf("[DEBUG] Authorizing security group %#v %s rule: %#v", - group, ruleset, add) - // Authorize the new rules - if ruleset == "egress" { - req := &ec2.AuthorizeSecurityGroupEgressInput{ - GroupId: group.GroupId, - IpPermissions: add, - } - _, err = conn.AuthorizeSecurityGroupEgress(req) - } else { - req := &ec2.AuthorizeSecurityGroupIngressInput{ - GroupId: group.GroupId, - IpPermissions: add, - } - if group.VpcId == nil || *group.VpcId == "" { - req.GroupId = nil - req.GroupName = group.GroupName - } - - _, err = conn.AuthorizeSecurityGroupIngress(req) - } - - if err != nil { - return fmt.Errorf( - "Error authorizing security group %s rules: %s", - ruleset, err) - } - } - } - } - return nil -} - -// SGStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// a security group. -func SGStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - req := &ec2.DescribeSecurityGroupsInput{ - GroupIds: []*string{aws.String(id)}, - } - resp, err := conn.DescribeSecurityGroups(req) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok { - if ec2err.Code() == "InvalidSecurityGroupID.NotFound" || - ec2err.Code() == "InvalidGroup.NotFound" { - resp = nil - err = nil - } - } - - if err != nil { - log.Printf("Error on SGStateRefresh: %s", err) - return nil, "", err - } - } - - if resp == nil { - return nil, "", nil - } - - group := resp.SecurityGroups[0] - return group, "exists", nil - } -} - -// matchRules receives the group id, type of rules, and the local / remote maps -// of rules. We iterate through the local set of rules trying to find a matching -// remote rule, which may be structured differently because of how AWS -// aggregates the rules under the to, from, and type. -// -// -// Matching rules are written to state, with their elements removed from the -// remote set -// -// If no match is found, we'll write the remote rule to state and let the graph -// sort things out -func matchRules(rType string, local []interface{}, remote []map[string]interface{}) []map[string]interface{} { - // For each local ip or security_group, we need to match against the remote - // ruleSet until all ips or security_groups are found - - // saves represents the rules that have been identified to be saved to state, - // in the appropriate d.Set("{ingress,egress}") call. - var saves []map[string]interface{} - for _, raw := range local { - l := raw.(map[string]interface{}) - - var selfVal bool - if v, ok := l["self"]; ok { - selfVal = v.(bool) - } - - // matching against self is required to detect rules that only include self - // as the rule. resourceAwsSecurityGroupIPPermGather parses the group out - // and replaces it with self if it's ID is found - localHash := idHash(rType, l["protocol"].(string), int64(l["to_port"].(int)), int64(l["from_port"].(int)), selfVal) - - // loop remote rules, looking for a matching hash - for _, r := range remote { - var remoteSelfVal bool - if v, ok := r["self"]; ok { - remoteSelfVal = v.(bool) - } - - // hash this remote rule and compare it for a match consideration with the - // local rule we're examining - rHash := idHash(rType, r["protocol"].(string), r["to_port"].(int64), r["from_port"].(int64), remoteSelfVal) - if rHash == localHash { - var numExpectedCidrs, numExpectedIpv6Cidrs, numExpectedPrefixLists, numExpectedSGs, numRemoteCidrs, numRemoteIpv6Cidrs, numRemotePrefixLists, numRemoteSGs int - var matchingCidrs []string - var matchingIpv6Cidrs []string - var matchingSGs []string - var matchingPrefixLists []string - - // grab the local/remote cidr and sg groups, capturing the expected and - // actual counts - lcRaw, ok := l["cidr_blocks"] - if ok { - numExpectedCidrs = len(l["cidr_blocks"].([]interface{})) - } - liRaw, ok := l["ipv6_cidr_blocks"] - if ok { - numExpectedIpv6Cidrs = len(l["ipv6_cidr_blocks"].([]interface{})) - } - lpRaw, ok := l["prefix_list_ids"] - if ok { - numExpectedPrefixLists = len(l["prefix_list_ids"].([]interface{})) - } - lsRaw, ok := l["security_groups"] - if ok { - numExpectedSGs = len(l["security_groups"].(*schema.Set).List()) - } - - rcRaw, ok := r["cidr_blocks"] - if ok { - numRemoteCidrs = len(r["cidr_blocks"].([]string)) - } - riRaw, ok := r["ipv6_cidr_blocks"] - if ok { - numRemoteIpv6Cidrs = len(r["ipv6_cidr_blocks"].([]string)) - } - rpRaw, ok := r["prefix_list_ids"] - if ok { - numRemotePrefixLists = len(r["prefix_list_ids"].([]string)) - } - - rsRaw, ok := r["security_groups"] - if ok { - numRemoteSGs = len(r["security_groups"].(*schema.Set).List()) - } - - // check some early failures - if numExpectedCidrs > numRemoteCidrs { - log.Printf("[DEBUG] Local rule has more CIDR blocks, continuing (%d/%d)", numExpectedCidrs, numRemoteCidrs) - continue - } - if numExpectedIpv6Cidrs > numRemoteIpv6Cidrs { - log.Printf("[DEBUG] Local rule has more IPV6 CIDR blocks, continuing (%d/%d)", numExpectedIpv6Cidrs, numRemoteIpv6Cidrs) - continue - } - if numExpectedPrefixLists > numRemotePrefixLists { - log.Printf("[DEBUG] Local rule has more prefix lists, continuing (%d/%d)", numExpectedPrefixLists, numRemotePrefixLists) - continue - } - if numExpectedSGs > numRemoteSGs { - log.Printf("[DEBUG] Local rule has more Security Groups, continuing (%d/%d)", numExpectedSGs, numRemoteSGs) - continue - } - - // match CIDRs by converting both to sets, and using Set methods - var localCidrs []interface{} - if lcRaw != nil { - localCidrs = lcRaw.([]interface{}) - } - localCidrSet := schema.NewSet(schema.HashString, localCidrs) - - // remote cidrs are presented as a slice of strings, so we need to - // reformat them into a slice of interfaces to be used in creating the - // remote cidr set - var remoteCidrs []string - if rcRaw != nil { - remoteCidrs = rcRaw.([]string) - } - // convert remote cidrs to a set, for easy comparisons - var list []interface{} - for _, s := range remoteCidrs { - list = append(list, s) - } - remoteCidrSet := schema.NewSet(schema.HashString, list) - - // Build up a list of local cidrs that are found in the remote set - for _, s := range localCidrSet.List() { - if remoteCidrSet.Contains(s) { - matchingCidrs = append(matchingCidrs, s.(string)) - } - } - - //IPV6 CIDRs - var localIpv6Cidrs []interface{} - if liRaw != nil { - localIpv6Cidrs = liRaw.([]interface{}) - } - localIpv6CidrSet := schema.NewSet(schema.HashString, localIpv6Cidrs) - - var remoteIpv6Cidrs []string - if riRaw != nil { - remoteIpv6Cidrs = riRaw.([]string) - } - var listIpv6 []interface{} - for _, s := range remoteIpv6Cidrs { - listIpv6 = append(listIpv6, s) - } - remoteIpv6CidrSet := schema.NewSet(schema.HashString, listIpv6) - - for _, s := range localIpv6CidrSet.List() { - if remoteIpv6CidrSet.Contains(s) { - matchingIpv6Cidrs = append(matchingIpv6Cidrs, s.(string)) - } - } - - // match prefix lists by converting both to sets, and using Set methods - var localPrefixLists []interface{} - if lpRaw != nil { - localPrefixLists = lpRaw.([]interface{}) - } - localPrefixListsSet := schema.NewSet(schema.HashString, localPrefixLists) - - // remote prefix lists are presented as a slice of strings, so we need to - // reformat them into a slice of interfaces to be used in creating the - // remote prefix list set - var remotePrefixLists []string - if rpRaw != nil { - remotePrefixLists = rpRaw.([]string) - } - // convert remote prefix lists to a set, for easy comparison - list = nil - for _, s := range remotePrefixLists { - list = append(list, s) - } - remotePrefixListsSet := schema.NewSet(schema.HashString, list) - - // Build up a list of local prefix lists that are found in the remote set - for _, s := range localPrefixListsSet.List() { - if remotePrefixListsSet.Contains(s) { - matchingPrefixLists = append(matchingPrefixLists, s.(string)) - } - } - - // match SGs. Both local and remote are already sets - var localSGSet *schema.Set - if lsRaw == nil { - localSGSet = schema.NewSet(schema.HashString, nil) - } else { - localSGSet = lsRaw.(*schema.Set) - } - - var remoteSGSet *schema.Set - if rsRaw == nil { - remoteSGSet = schema.NewSet(schema.HashString, nil) - } else { - remoteSGSet = rsRaw.(*schema.Set) - } - - // Build up a list of local security groups that are found in the remote set - for _, s := range localSGSet.List() { - if remoteSGSet.Contains(s) { - matchingSGs = append(matchingSGs, s.(string)) - } - } - - // compare equalities for matches. - // If we found the number of cidrs and number of sgs, we declare a - // match, and then remove those elements from the remote rule, so that - // this remote rule can still be considered by other local rules - if numExpectedCidrs == len(matchingCidrs) { - if numExpectedIpv6Cidrs == len(matchingIpv6Cidrs) { - if numExpectedPrefixLists == len(matchingPrefixLists) { - if numExpectedSGs == len(matchingSGs) { - // confirm that self references match - var lSelf bool - var rSelf bool - if _, ok := l["self"]; ok { - lSelf = l["self"].(bool) - } - if _, ok := r["self"]; ok { - rSelf = r["self"].(bool) - } - if rSelf == lSelf { - delete(r, "self") - // pop local cidrs from remote - diffCidr := remoteCidrSet.Difference(localCidrSet) - var newCidr []string - for _, cRaw := range diffCidr.List() { - newCidr = append(newCidr, cRaw.(string)) - } - - // reassigning - if len(newCidr) > 0 { - r["cidr_blocks"] = newCidr - } else { - delete(r, "cidr_blocks") - } - - //// IPV6 - //// Comparison - diffIpv6Cidr := remoteIpv6CidrSet.Difference(localIpv6CidrSet) - var newIpv6Cidr []string - for _, cRaw := range diffIpv6Cidr.List() { - newIpv6Cidr = append(newIpv6Cidr, cRaw.(string)) - } - - // reassigning - if len(newIpv6Cidr) > 0 { - r["ipv6_cidr_blocks"] = newIpv6Cidr - } else { - delete(r, "ipv6_cidr_blocks") - } - - // pop local prefix lists from remote - diffPrefixLists := remotePrefixListsSet.Difference(localPrefixListsSet) - var newPrefixLists []string - for _, pRaw := range diffPrefixLists.List() { - newPrefixLists = append(newPrefixLists, pRaw.(string)) - } - - // reassigning - if len(newPrefixLists) > 0 { - r["prefix_list_ids"] = newPrefixLists - } else { - delete(r, "prefix_list_ids") - } - - // pop local sgs from remote - diffSGs := remoteSGSet.Difference(localSGSet) - if len(diffSGs.List()) > 0 { - r["security_groups"] = diffSGs - } else { - delete(r, "security_groups") - } - - saves = append(saves, l) - } - } - } - - } - } - } - } - } - // Here we catch any remote rules that have not been stripped of all self, - // cidrs, and security groups. We'll add remote rules here that have not been - // matched locally, and let the graph sort things out. This will happen when - // rules are added externally to Terraform - for _, r := range remote { - var lenCidr, lenIpv6Cidr, lenPrefixLists, lenSGs int - if rCidrs, ok := r["cidr_blocks"]; ok { - lenCidr = len(rCidrs.([]string)) - } - if rIpv6Cidrs, ok := r["ipv6_cidr_blocks"]; ok { - lenIpv6Cidr = len(rIpv6Cidrs.([]string)) - } - if rPrefixLists, ok := r["prefix_list_ids"]; ok { - lenPrefixLists = len(rPrefixLists.([]string)) - } - if rawSGs, ok := r["security_groups"]; ok { - lenSGs = len(rawSGs.(*schema.Set).List()) - } - - if _, ok := r["self"]; ok { - if r["self"].(bool) == true { - lenSGs++ - } - } - - if lenSGs+lenCidr+lenIpv6Cidr+lenPrefixLists > 0 { - log.Printf("[DEBUG] Found a remote Rule that wasn't empty: (%#v)", r) - saves = append(saves, r) - } - } - - return saves -} - -// Creates a unique hash for the type, ports, and protocol, used as a key in -// maps -func idHash(rType, protocol string, toPort, fromPort int64, self bool) string { - var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("%s-", rType)) - buf.WriteString(fmt.Sprintf("%d-", toPort)) - buf.WriteString(fmt.Sprintf("%d-", fromPort)) - buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(protocol))) - buf.WriteString(fmt.Sprintf("%t-", self)) - - return fmt.Sprintf("rule-%d", hashcode.String(buf.String())) -} - -// protocolStateFunc ensures we only store a string in any protocol field -func protocolStateFunc(v interface{}) string { - switch v.(type) { - case string: - p := protocolForValue(v.(string)) - return p - default: - log.Printf("[WARN] Non String value given for Protocol: %#v", v) - return "" - } -} - -// protocolForValue converts a valid Internet Protocol number into it's name -// representation. If a name is given, it validates that it's a proper protocol -// name. Names/numbers are as defined at -// https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml -func protocolForValue(v string) string { - // special case -1 - protocol := strings.ToLower(v) - if protocol == "-1" || protocol == "all" { - return "-1" - } - // if it's a name like tcp, return that - if _, ok := sgProtocolIntegers()[protocol]; ok { - return protocol - } - // convert to int, look for that value - p, err := strconv.Atoi(protocol) - if err != nil { - // we were unable to convert to int, suggesting a string name, but it wasn't - // found above - log.Printf("[WARN] Unable to determine valid protocol: %s", err) - return protocol - } - - for k, v := range sgProtocolIntegers() { - if p == v { - // guard against protocolIntegers sometime in the future not having lower - // case ids in the map - return strings.ToLower(k) - } - } - - // fall through - log.Printf("[WARN] Unable to determine valid protocol: no matching protocols found") - return protocol -} - -// a map of protocol names and their codes, defined at -// https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml, -// documented to be supported by AWS Security Groups -// http://docs.aws.amazon.com/fr_fr/AWSEC2/latest/APIReference/API_IpPermission.html -// Similar to protocolIntegers() used by Network ACLs, but explicitly only -// supports "tcp", "udp", "icmp", and "all" -func sgProtocolIntegers() map[string]int { - var protocolIntegers = make(map[string]int) - protocolIntegers = map[string]int{ - "udp": 17, - "tcp": 6, - "icmp": 1, - "all": -1, - } - return protocolIntegers -} - -// The AWS Lambda service creates ENIs behind the scenes and keeps these around for a while -// which would prevent SGs attached to such ENIs from being destroyed -func deleteLingeringLambdaENIs(conn *ec2.EC2, d *schema.ResourceData) error { - // Here we carefully find the offenders - params := &ec2.DescribeNetworkInterfacesInput{ - Filters: []*ec2.Filter{ - { - Name: aws.String("group-id"), - Values: []*string{aws.String(d.Id())}, - }, - { - Name: aws.String("description"), - Values: []*string{aws.String("AWS Lambda VPC ENI: *")}, - }, - }, - } - networkInterfaceResp, err := conn.DescribeNetworkInterfaces(params) - if err != nil { - return err - } - - // Then we detach and finally delete those - v := networkInterfaceResp.NetworkInterfaces - for _, eni := range v { - if eni.Attachment != nil { - detachNetworkInterfaceParams := &ec2.DetachNetworkInterfaceInput{ - AttachmentId: eni.Attachment.AttachmentId, - } - _, detachNetworkInterfaceErr := conn.DetachNetworkInterface(detachNetworkInterfaceParams) - - if detachNetworkInterfaceErr != nil { - return detachNetworkInterfaceErr - } - - log.Printf("[DEBUG] Waiting for ENI (%s) to become detached", *eni.NetworkInterfaceId) - stateConf := &resource.StateChangeConf{ - Pending: []string{"true"}, - Target: []string{"false"}, - Refresh: networkInterfaceAttachedRefreshFunc(conn, *eni.NetworkInterfaceId), - Timeout: 10 * time.Minute, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf( - "Error waiting for ENI (%s) to become detached: %s", *eni.NetworkInterfaceId, err) - } - } - - deleteNetworkInterfaceParams := &ec2.DeleteNetworkInterfaceInput{ - NetworkInterfaceId: eni.NetworkInterfaceId, - } - _, deleteNetworkInterfaceErr := conn.DeleteNetworkInterface(deleteNetworkInterfaceParams) - - if deleteNetworkInterfaceErr != nil { - return deleteNetworkInterfaceErr - } - } - - return nil -} - -func networkInterfaceAttachedRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - - describe_network_interfaces_request := &ec2.DescribeNetworkInterfacesInput{ - NetworkInterfaceIds: []*string{aws.String(id)}, - } - describeResp, err := conn.DescribeNetworkInterfaces(describe_network_interfaces_request) - - if err != nil { - log.Printf("[ERROR] Could not find network interface %s. %s", id, err) - return nil, "", err - } - - eni := describeResp.NetworkInterfaces[0] - hasAttachment := strconv.FormatBool(eni.Attachment != nil) - log.Printf("[DEBUG] ENI %s has attachment state %s", id, hasAttachment) - return eni, hasAttachment, nil - } -} diff --git a/builtin/providers/aws/resource_aws_security_group_rule.go b/builtin/providers/aws/resource_aws_security_group_rule.go deleted file mode 100644 index 1372bc83d..000000000 --- a/builtin/providers/aws/resource_aws_security_group_rule.go +++ /dev/null @@ -1,674 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "log" - "sort" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsSecurityGroupRule() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsSecurityGroupRuleCreate, - Read: resourceAwsSecurityGroupRuleRead, - Delete: resourceAwsSecurityGroupRuleDelete, - - SchemaVersion: 2, - MigrateState: resourceAwsSecurityGroupRuleMigrateState, - - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Type of rule, ingress (inbound) or egress (outbound).", - ValidateFunc: validateSecurityRuleType, - }, - - "from_port": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - - "to_port": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - - "protocol": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - StateFunc: protocolStateFunc, - }, - - "cidr_blocks": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateCIDRNetworkAddress, - }, - }, - - "ipv6_cidr_blocks": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateCIDRNetworkAddress, - }, - }, - - "prefix_list_ids": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "security_group_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "source_security_group_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - ConflictsWith: []string{"cidr_blocks", "self"}, - }, - - "self": { - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - ConflictsWith: []string{"cidr_blocks"}, - }, - }, - } -} - -func resourceAwsSecurityGroupRuleCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - sg_id := d.Get("security_group_id").(string) - - awsMutexKV.Lock(sg_id) - defer awsMutexKV.Unlock(sg_id) - - sg, err := findResourceSecurityGroup(conn, sg_id) - if err != nil { - return err - } - - perm, err := expandIPPerm(d, sg) - if err != nil { - return err - } - - // Verify that either 'cidr_blocks', 'self', or 'source_security_group_id' is set - // If they are not set the AWS API will silently fail. This causes TF to hit a timeout - // at 5-minutes waiting for the security group rule to appear, when it was never actually - // created. - if err := validateAwsSecurityGroupRule(d); err != nil { - return err - } - - ruleType := d.Get("type").(string) - isVPC := sg.VpcId != nil && *sg.VpcId != "" - - var autherr error - switch ruleType { - case "ingress": - log.Printf("[DEBUG] Authorizing security group %s %s rule: %s", - sg_id, "Ingress", perm) - - req := &ec2.AuthorizeSecurityGroupIngressInput{ - GroupId: sg.GroupId, - IpPermissions: []*ec2.IpPermission{perm}, - } - - if !isVPC { - req.GroupId = nil - req.GroupName = sg.GroupName - } - - _, autherr = conn.AuthorizeSecurityGroupIngress(req) - - case "egress": - log.Printf("[DEBUG] Authorizing security group %s %s rule: %#v", - sg_id, "Egress", perm) - - req := &ec2.AuthorizeSecurityGroupEgressInput{ - GroupId: sg.GroupId, - IpPermissions: []*ec2.IpPermission{perm}, - } - - _, autherr = conn.AuthorizeSecurityGroupEgress(req) - - default: - return fmt.Errorf("Security Group Rule must be type 'ingress' or type 'egress'") - } - - if autherr != nil { - if awsErr, ok := autherr.(awserr.Error); ok { - if awsErr.Code() == "InvalidPermission.Duplicate" { - return fmt.Errorf(`[WARN] A duplicate Security Group rule was found on (%s). This may be -a side effect of a now-fixed Terraform issue causing two security groups with -identical attributes but different source_security_group_ids to overwrite each -other in the state. See https://github.com/hashicorp/terraform/pull/2376 for more -information and instructions for recovery. Error message: %s`, sg_id, awsErr.Message()) - } - } - - return fmt.Errorf( - "Error authorizing security group rule type %s: %s", - ruleType, autherr) - } - - id := ipPermissionIDHash(sg_id, ruleType, perm) - log.Printf("[DEBUG] Computed group rule ID %s", id) - - retErr := resource.Retry(5*time.Minute, func() *resource.RetryError { - sg, err := findResourceSecurityGroup(conn, sg_id) - - if err != nil { - log.Printf("[DEBUG] Error finding Security Group (%s) for Rule (%s): %s", sg_id, id, err) - return resource.NonRetryableError(err) - } - - var rules []*ec2.IpPermission - switch ruleType { - case "ingress": - rules = sg.IpPermissions - default: - rules = sg.IpPermissionsEgress - } - - rule := findRuleMatch(perm, rules, isVPC) - - if rule == nil { - log.Printf("[DEBUG] Unable to find matching %s Security Group Rule (%s) for Group %s", - ruleType, id, sg_id) - return resource.RetryableError(fmt.Errorf("No match found")) - } - - log.Printf("[DEBUG] Found rule for Security Group Rule (%s): %s", id, rule) - return nil - }) - - if retErr != nil { - return fmt.Errorf("Error finding matching %s Security Group Rule (%s) for Group %s", - ruleType, id, sg_id) - } - - d.SetId(id) - return nil -} - -func resourceAwsSecurityGroupRuleRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - sg_id := d.Get("security_group_id").(string) - sg, err := findResourceSecurityGroup(conn, sg_id) - if _, notFound := err.(securityGroupNotFound); notFound { - // The security group containing this rule no longer exists. - d.SetId("") - return nil - } - if err != nil { - return fmt.Errorf("Error finding security group (%s) for rule (%s): %s", sg_id, d.Id(), err) - } - - isVPC := sg.VpcId != nil && *sg.VpcId != "" - - var rule *ec2.IpPermission - var rules []*ec2.IpPermission - ruleType := d.Get("type").(string) - switch ruleType { - case "ingress": - rules = sg.IpPermissions - default: - rules = sg.IpPermissionsEgress - } - - p, err := expandIPPerm(d, sg) - if err != nil { - return err - } - - if len(rules) == 0 { - log.Printf("[WARN] No %s rules were found for Security Group (%s) looking for Security Group Rule (%s)", - ruleType, *sg.GroupName, d.Id()) - d.SetId("") - return nil - } - - rule = findRuleMatch(p, rules, isVPC) - - if rule == nil { - log.Printf("[DEBUG] Unable to find matching %s Security Group Rule (%s) for Group %s", - ruleType, d.Id(), sg_id) - d.SetId("") - return nil - } - - log.Printf("[DEBUG] Found rule for Security Group Rule (%s): %s", d.Id(), rule) - - d.Set("type", ruleType) - if err := setFromIPPerm(d, sg, p); err != nil { - return errwrap.Wrapf("Error setting IP Permission for Security Group Rule: {{err}}", err) - } - return nil -} - -func resourceAwsSecurityGroupRuleDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - sg_id := d.Get("security_group_id").(string) - - awsMutexKV.Lock(sg_id) - defer awsMutexKV.Unlock(sg_id) - - sg, err := findResourceSecurityGroup(conn, sg_id) - if err != nil { - return err - } - - perm, err := expandIPPerm(d, sg) - if err != nil { - return err - } - ruleType := d.Get("type").(string) - switch ruleType { - case "ingress": - log.Printf("[DEBUG] Revoking rule (%s) from security group %s:\n%s", - "ingress", sg_id, perm) - req := &ec2.RevokeSecurityGroupIngressInput{ - GroupId: sg.GroupId, - IpPermissions: []*ec2.IpPermission{perm}, - } - - _, err = conn.RevokeSecurityGroupIngress(req) - - if err != nil { - return fmt.Errorf( - "Error revoking security group %s rules: %s", - sg_id, err) - } - case "egress": - - log.Printf("[DEBUG] Revoking security group %#v %s rule: %#v", - sg_id, "egress", perm) - req := &ec2.RevokeSecurityGroupEgressInput{ - GroupId: sg.GroupId, - IpPermissions: []*ec2.IpPermission{perm}, - } - - _, err = conn.RevokeSecurityGroupEgress(req) - - if err != nil { - return fmt.Errorf( - "Error revoking security group %s rules: %s", - sg_id, err) - } - } - - d.SetId("") - - return nil -} - -func findResourceSecurityGroup(conn *ec2.EC2, id string) (*ec2.SecurityGroup, error) { - req := &ec2.DescribeSecurityGroupsInput{ - GroupIds: []*string{aws.String(id)}, - } - resp, err := conn.DescribeSecurityGroups(req) - if err, ok := err.(awserr.Error); ok && err.Code() == "InvalidGroup.NotFound" { - return nil, securityGroupNotFound{id, nil} - } - if err != nil { - return nil, err - } - if resp == nil { - return nil, securityGroupNotFound{id, nil} - } - if len(resp.SecurityGroups) != 1 || resp.SecurityGroups[0] == nil { - return nil, securityGroupNotFound{id, resp.SecurityGroups} - } - - return resp.SecurityGroups[0], nil -} - -type securityGroupNotFound struct { - id string - securityGroups []*ec2.SecurityGroup -} - -func (err securityGroupNotFound) Error() string { - if err.securityGroups == nil { - return fmt.Sprintf("No security group with ID %q", err.id) - } - return fmt.Sprintf("Expected to find one security group with ID %q, got: %#v", - err.id, err.securityGroups) -} - -// ByGroupPair implements sort.Interface for []*ec2.UserIDGroupPairs based on -// GroupID or GroupName field (only one should be set). -type ByGroupPair []*ec2.UserIdGroupPair - -func (b ByGroupPair) Len() int { return len(b) } -func (b ByGroupPair) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b ByGroupPair) Less(i, j int) bool { - if b[i].GroupId != nil && b[j].GroupId != nil { - return *b[i].GroupId < *b[j].GroupId - } - if b[i].GroupName != nil && b[j].GroupName != nil { - return *b[i].GroupName < *b[j].GroupName - } - - panic("mismatched security group rules, may be a terraform bug") -} - -func findRuleMatch(p *ec2.IpPermission, rules []*ec2.IpPermission, isVPC bool) *ec2.IpPermission { - var rule *ec2.IpPermission - for _, r := range rules { - if r.ToPort != nil && *p.ToPort != *r.ToPort { - continue - } - - if r.FromPort != nil && *p.FromPort != *r.FromPort { - continue - } - - if r.IpProtocol != nil && *p.IpProtocol != *r.IpProtocol { - continue - } - - remaining := len(p.IpRanges) - for _, ip := range p.IpRanges { - for _, rip := range r.IpRanges { - if *ip.CidrIp == *rip.CidrIp { - remaining-- - } - } - } - - if remaining > 0 { - continue - } - - remaining = len(p.Ipv6Ranges) - for _, ipv6 := range p.Ipv6Ranges { - for _, ipv6ip := range r.Ipv6Ranges { - if *ipv6.CidrIpv6 == *ipv6ip.CidrIpv6 { - remaining-- - } - } - } - - if remaining > 0 { - continue - } - - remaining = len(p.PrefixListIds) - for _, pl := range p.PrefixListIds { - for _, rpl := range r.PrefixListIds { - if *pl.PrefixListId == *rpl.PrefixListId { - remaining-- - } - } - } - - if remaining > 0 { - continue - } - - remaining = len(p.UserIdGroupPairs) - for _, ip := range p.UserIdGroupPairs { - for _, rip := range r.UserIdGroupPairs { - if isVPC { - if *ip.GroupId == *rip.GroupId { - remaining-- - } - } else { - if *ip.GroupName == *rip.GroupName { - remaining-- - } - } - } - } - - if remaining > 0 { - continue - } - - rule = r - } - return rule -} - -func ipPermissionIDHash(sg_id, ruleType string, ip *ec2.IpPermission) string { - var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("%s-", sg_id)) - if ip.FromPort != nil && *ip.FromPort > 0 { - buf.WriteString(fmt.Sprintf("%d-", *ip.FromPort)) - } - if ip.ToPort != nil && *ip.ToPort > 0 { - buf.WriteString(fmt.Sprintf("%d-", *ip.ToPort)) - } - buf.WriteString(fmt.Sprintf("%s-", *ip.IpProtocol)) - buf.WriteString(fmt.Sprintf("%s-", ruleType)) - - // We need to make sure to sort the strings below so that we always - // generate the same hash code no matter what is in the set. - if len(ip.IpRanges) > 0 { - s := make([]string, len(ip.IpRanges)) - for i, r := range ip.IpRanges { - s[i] = *r.CidrIp - } - sort.Strings(s) - - for _, v := range s { - buf.WriteString(fmt.Sprintf("%s-", v)) - } - } - - if len(ip.Ipv6Ranges) > 0 { - s := make([]string, len(ip.Ipv6Ranges)) - for i, r := range ip.Ipv6Ranges { - s[i] = *r.CidrIpv6 - } - sort.Strings(s) - - for _, v := range s { - buf.WriteString(fmt.Sprintf("%s-", v)) - } - } - - if len(ip.PrefixListIds) > 0 { - s := make([]string, len(ip.PrefixListIds)) - for i, pl := range ip.PrefixListIds { - s[i] = *pl.PrefixListId - } - sort.Strings(s) - - for _, v := range s { - buf.WriteString(fmt.Sprintf("%s-", v)) - } - } - - if len(ip.UserIdGroupPairs) > 0 { - sort.Sort(ByGroupPair(ip.UserIdGroupPairs)) - for _, pair := range ip.UserIdGroupPairs { - if pair.GroupId != nil { - buf.WriteString(fmt.Sprintf("%s-", *pair.GroupId)) - } else { - buf.WriteString("-") - } - if pair.GroupName != nil { - buf.WriteString(fmt.Sprintf("%s-", *pair.GroupName)) - } else { - buf.WriteString("-") - } - } - } - - return fmt.Sprintf("sgrule-%d", hashcode.String(buf.String())) -} - -func expandIPPerm(d *schema.ResourceData, sg *ec2.SecurityGroup) (*ec2.IpPermission, error) { - var perm ec2.IpPermission - - perm.FromPort = aws.Int64(int64(d.Get("from_port").(int))) - perm.ToPort = aws.Int64(int64(d.Get("to_port").(int))) - protocol := protocolForValue(d.Get("protocol").(string)) - perm.IpProtocol = aws.String(protocol) - - // build a group map that behaves like a set - groups := make(map[string]bool) - if raw, ok := d.GetOk("source_security_group_id"); ok { - groups[raw.(string)] = true - } - - if v, ok := d.GetOk("self"); ok && v.(bool) { - if sg.VpcId != nil && *sg.VpcId != "" { - groups[*sg.GroupId] = true - } else { - groups[*sg.GroupName] = true - } - } - - if len(groups) > 0 { - perm.UserIdGroupPairs = make([]*ec2.UserIdGroupPair, len(groups)) - // build string list of group name/ids - var gl []string - for k, _ := range groups { - gl = append(gl, k) - } - - for i, name := range gl { - ownerId, id := "", name - if items := strings.Split(id, "/"); len(items) > 1 { - ownerId, id = items[0], items[1] - } - - perm.UserIdGroupPairs[i] = &ec2.UserIdGroupPair{ - GroupId: aws.String(id), - UserId: aws.String(ownerId), - } - - if sg.VpcId == nil || *sg.VpcId == "" { - perm.UserIdGroupPairs[i].GroupId = nil - perm.UserIdGroupPairs[i].GroupName = aws.String(id) - perm.UserIdGroupPairs[i].UserId = nil - } - } - } - - if raw, ok := d.GetOk("cidr_blocks"); ok { - list := raw.([]interface{}) - perm.IpRanges = make([]*ec2.IpRange, len(list)) - for i, v := range list { - cidrIP, ok := v.(string) - if !ok { - return nil, fmt.Errorf("empty element found in cidr_blocks - consider using the compact function") - } - perm.IpRanges[i] = &ec2.IpRange{CidrIp: aws.String(cidrIP)} - } - } - - if raw, ok := d.GetOk("ipv6_cidr_blocks"); ok { - list := raw.([]interface{}) - perm.Ipv6Ranges = make([]*ec2.Ipv6Range, len(list)) - for i, v := range list { - cidrIP, ok := v.(string) - if !ok { - return nil, fmt.Errorf("empty element found in ipv6_cidr_blocks - consider using the compact function") - } - perm.Ipv6Ranges[i] = &ec2.Ipv6Range{CidrIpv6: aws.String(cidrIP)} - } - } - - if raw, ok := d.GetOk("prefix_list_ids"); ok { - list := raw.([]interface{}) - perm.PrefixListIds = make([]*ec2.PrefixListId, len(list)) - for i, v := range list { - prefixListID, ok := v.(string) - if !ok { - return nil, fmt.Errorf("empty element found in prefix_list_ids - consider using the compact function") - } - perm.PrefixListIds[i] = &ec2.PrefixListId{PrefixListId: aws.String(prefixListID)} - } - } - - return &perm, nil -} - -func setFromIPPerm(d *schema.ResourceData, sg *ec2.SecurityGroup, rule *ec2.IpPermission) error { - isVPC := sg.VpcId != nil && *sg.VpcId != "" - - d.Set("from_port", rule.FromPort) - d.Set("to_port", rule.ToPort) - d.Set("protocol", rule.IpProtocol) - - var cb []string - for _, c := range rule.IpRanges { - cb = append(cb, *c.CidrIp) - } - - d.Set("cidr_blocks", cb) - - var ipv6 []string - for _, ip := range rule.Ipv6Ranges { - ipv6 = append(ipv6, *ip.CidrIpv6) - } - d.Set("ipv6_cidr_blocks", ipv6) - - var pl []string - for _, p := range rule.PrefixListIds { - pl = append(pl, *p.PrefixListId) - } - d.Set("prefix_list_ids", pl) - - if len(rule.UserIdGroupPairs) > 0 { - s := rule.UserIdGroupPairs[0] - - if isVPC { - d.Set("source_security_group_id", *s.GroupId) - } else { - d.Set("source_security_group_id", *s.GroupName) - } - } - - return nil -} - -// Validates that either 'cidr_blocks', 'ipv6_cidr_blocks', 'self', or 'source_security_group_id' is set -func validateAwsSecurityGroupRule(d *schema.ResourceData) error { - _, blocksOk := d.GetOk("cidr_blocks") - _, ipv6Ok := d.GetOk("ipv6_cidr_blocks") - _, sourceOk := d.GetOk("source_security_group_id") - _, selfOk := d.GetOk("self") - _, prefixOk := d.GetOk("prefix_list_ids") - if !blocksOk && !sourceOk && !selfOk && !prefixOk && !ipv6Ok { - return fmt.Errorf( - "One of ['cidr_blocks', 'ipv6_cidr_blocks', 'self', 'source_security_group_id', 'prefix_list_ids'] must be set to create an AWS Security Group Rule") - } - return nil -} diff --git a/builtin/providers/aws/resource_aws_security_group_rule_migrate.go b/builtin/providers/aws/resource_aws_security_group_rule_migrate.go deleted file mode 100644 index 12788054e..000000000 --- a/builtin/providers/aws/resource_aws_security_group_rule_migrate.go +++ /dev/null @@ -1,105 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strconv" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/terraform" -) - -func resourceAwsSecurityGroupRuleMigrateState( - v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - switch v { - case 0: - log.Println("[INFO] Found AWS Security Group State v0; migrating to v1") - return migrateSGRuleStateV0toV1(is) - case 1: - log.Println("[INFO] Found AWS Security Group State v1; migrating to v2") - // migrating to version 2 of the schema is the same as 0->1, since the - // method signature has changed now and will use the security group id in - // the hash - return migrateSGRuleStateV0toV1(is) - default: - return is, fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateSGRuleStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { - if is.Empty() { - log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return is, nil - } - - perm, err := migrateExpandIPPerm(is.Attributes) - - if err != nil { - return nil, fmt.Errorf("[WARN] Error making new IP Permission in Security Group migration") - } - - log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - newID := ipPermissionIDHash(is.Attributes["security_group_id"], is.Attributes["type"], perm) - is.Attributes["id"] = newID - is.ID = newID - log.Printf("[DEBUG] Attributes after migration: %#v, new id: %s", is.Attributes, newID) - return is, nil -} - -func migrateExpandIPPerm(attrs map[string]string) (*ec2.IpPermission, error) { - var perm ec2.IpPermission - tp, err := strconv.Atoi(attrs["to_port"]) - if err != nil { - return nil, fmt.Errorf("Error converting to_port in Security Group migration") - } - - fp, err := strconv.Atoi(attrs["from_port"]) - if err != nil { - return nil, fmt.Errorf("Error converting from_port in Security Group migration") - } - - perm.ToPort = aws.Int64(int64(tp)) - perm.FromPort = aws.Int64(int64(fp)) - perm.IpProtocol = aws.String(attrs["protocol"]) - - groups := make(map[string]bool) - if attrs["self"] == "true" { - groups[attrs["security_group_id"]] = true - } - - if attrs["source_security_group_id"] != "" { - groups[attrs["source_security_group_id"]] = true - } - - if len(groups) > 0 { - perm.UserIdGroupPairs = make([]*ec2.UserIdGroupPair, len(groups)) - // build string list of group name/ids - var gl []string - for k, _ := range groups { - gl = append(gl, k) - } - - for i, name := range gl { - perm.UserIdGroupPairs[i] = &ec2.UserIdGroupPair{ - GroupId: aws.String(name), - } - } - } - - var cb []string - for k, v := range attrs { - if k != "cidr_blocks.#" && strings.HasPrefix(k, "cidr_blocks") { - cb = append(cb, v) - } - } - if len(cb) > 0 { - perm.IpRanges = make([]*ec2.IpRange, len(cb)) - for i, v := range cb { - perm.IpRanges[i] = &ec2.IpRange{CidrIp: aws.String(v)} - } - } - - return &perm, nil -} diff --git a/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go b/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go deleted file mode 100644 index 496834b8c..000000000 --- a/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/terraform" -) - -func TestAWSSecurityGroupRuleMigrateState(t *testing.T) { - cases := map[string]struct { - StateVersion int - ID string - Attributes map[string]string - Expected string - Meta interface{} - }{ - "v0_1": { - StateVersion: 0, - ID: "sg-4235098228", - Attributes: map[string]string{ - "self": "false", - "to_port": "0", - "security_group_id": "sg-13877277", - "cidr_blocks.#": "0", - "type": "ingress", - "protocol": "-1", - "from_port": "0", - "source_security_group_id": "sg-11877275", - }, - Expected: "sgrule-2889201120", - }, - "v0_2": { - StateVersion: 0, - ID: "sg-1021609891", - Attributes: map[string]string{ - "security_group_id": "sg-0981746d", - "from_port": "0", - "to_port": "0", - "type": "ingress", - "self": "false", - "protocol": "-1", - "cidr_blocks.0": "172.16.1.0/24", - "cidr_blocks.1": "172.16.2.0/24", - "cidr_blocks.2": "172.16.3.0/24", - "cidr_blocks.3": "172.16.4.0/24", - "cidr_blocks.#": "4"}, - Expected: "sgrule-1826358977", - }, - } - - for tn, tc := range cases { - is := &terraform.InstanceState{ - ID: tc.ID, - Attributes: tc.Attributes, - } - is, err := resourceAwsSecurityGroupRuleMigrateState( - tc.StateVersion, is, tc.Meta) - - if err != nil { - t.Fatalf("bad: %s, err: %#v", tn, err) - } - - if is.ID != tc.Expected { - t.Fatalf("bad sg rule id: %s\n\n expected: %s", is.ID, tc.Expected) - } - } -} diff --git a/builtin/providers/aws/resource_aws_security_group_rule_test.go b/builtin/providers/aws/resource_aws_security_group_rule_test.go deleted file mode 100644 index 299276304..000000000 --- a/builtin/providers/aws/resource_aws_security_group_rule_test.go +++ /dev/null @@ -1,1219 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "log" - "regexp" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestIpPermissionIDHash(t *testing.T) { - simple := &ec2.IpPermission{ - IpProtocol: aws.String("tcp"), - FromPort: aws.Int64(int64(80)), - ToPort: aws.Int64(int64(8000)), - IpRanges: []*ec2.IpRange{ - { - CidrIp: aws.String("10.0.0.0/8"), - }, - }, - } - - egress := &ec2.IpPermission{ - IpProtocol: aws.String("tcp"), - FromPort: aws.Int64(int64(80)), - ToPort: aws.Int64(int64(8000)), - IpRanges: []*ec2.IpRange{ - { - CidrIp: aws.String("10.0.0.0/8"), - }, - }, - } - - egress_all := &ec2.IpPermission{ - IpProtocol: aws.String("-1"), - IpRanges: []*ec2.IpRange{ - { - CidrIp: aws.String("10.0.0.0/8"), - }, - }, - } - - vpc_security_group_source := &ec2.IpPermission{ - IpProtocol: aws.String("tcp"), - FromPort: aws.Int64(int64(80)), - ToPort: aws.Int64(int64(8000)), - UserIdGroupPairs: []*ec2.UserIdGroupPair{ - { - UserId: aws.String("987654321"), - GroupId: aws.String("sg-12345678"), - }, - { - UserId: aws.String("123456789"), - GroupId: aws.String("sg-987654321"), - }, - { - UserId: aws.String("123456789"), - GroupId: aws.String("sg-12345678"), - }, - }, - } - - security_group_source := &ec2.IpPermission{ - IpProtocol: aws.String("tcp"), - FromPort: aws.Int64(int64(80)), - ToPort: aws.Int64(int64(8000)), - UserIdGroupPairs: []*ec2.UserIdGroupPair{ - { - UserId: aws.String("987654321"), - GroupName: aws.String("my-security-group"), - }, - { - UserId: aws.String("123456789"), - GroupName: aws.String("my-security-group"), - }, - { - UserId: aws.String("123456789"), - GroupName: aws.String("my-other-security-group"), - }, - }, - } - - // hardcoded hashes, to detect future change - cases := []struct { - Input *ec2.IpPermission - Type string - Output string - }{ - {simple, "ingress", "sgrule-3403497314"}, - {egress, "egress", "sgrule-1173186295"}, - {egress_all, "egress", "sgrule-766323498"}, - {vpc_security_group_source, "egress", "sgrule-351225364"}, - {security_group_source, "egress", "sgrule-2198807188"}, - } - - for _, tc := range cases { - actual := ipPermissionIDHash("sg-12345", tc.Type, tc.Input) - if actual != tc.Output { - t.Errorf("input: %s - %s\noutput: %s", tc.Type, tc.Input, actual) - } - } -} - -func TestAccAWSSecurityGroupRule_Ingress_VPC(t *testing.T) { - var group ec2.SecurityGroup - rInt := acctest.RandInt() - - testRuleCount := func(*terraform.State) error { - if len(group.IpPermissions) != 1 { - return fmt.Errorf("Wrong Security Group rule count, expected %d, got %d", - 1, len(group.IpPermissions)) - } - - rule := group.IpPermissions[0] - if *rule.FromPort != int64(80) { - return fmt.Errorf("Wrong Security Group port setting, expected %d, got %d", - 80, int(*rule.FromPort)) - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupRuleIngressConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress_1", &group, nil, "ingress"), - resource.TestCheckResourceAttr( - "aws_security_group_rule.ingress_1", "from_port", "80"), - testRuleCount, - ), - }, - }, - }) -} - -func TestAccAWSSecurityGroupRule_Ingress_Protocol(t *testing.T) { - var group ec2.SecurityGroup - - testRuleCount := func(*terraform.State) error { - if len(group.IpPermissions) != 1 { - return fmt.Errorf("Wrong Security Group rule count, expected %d, got %d", - 1, len(group.IpPermissions)) - } - - rule := group.IpPermissions[0] - if *rule.FromPort != int64(80) { - return fmt.Errorf("Wrong Security Group port setting, expected %d, got %d", - 80, int(*rule.FromPort)) - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupRuleIngress_protocolConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress_1", &group, nil, "ingress"), - resource.TestCheckResourceAttr( - "aws_security_group_rule.ingress_1", "from_port", "80"), - testRuleCount, - ), - }, - }, - }) -} - -func TestAccAWSSecurityGroupRule_Ingress_Ipv6(t *testing.T) { - var group ec2.SecurityGroup - - testRuleCount := func(*terraform.State) error { - if len(group.IpPermissions) != 1 { - return fmt.Errorf("Wrong Security Group rule count, expected %d, got %d", - 1, len(group.IpPermissions)) - } - - rule := group.IpPermissions[0] - if *rule.FromPort != int64(80) { - return fmt.Errorf("Wrong Security Group port setting, expected %d, got %d", - 80, int(*rule.FromPort)) - } - - ipv6Address := rule.Ipv6Ranges[0] - if *ipv6Address.CidrIpv6 != "::/0" { - return fmt.Errorf("Wrong Security Group IPv6 address, expected %s, got %s", - "::/0", *ipv6Address.CidrIpv6) - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupRuleIngress_ipv6Config, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - testRuleCount, - ), - }, - }, - }) -} - -func TestAccAWSSecurityGroupRule_Ingress_Classic(t *testing.T) { - var group ec2.SecurityGroup - rInt := acctest.RandInt() - - testRuleCount := func(*terraform.State) error { - if len(group.IpPermissions) != 1 { - return fmt.Errorf("Wrong Security Group rule count, expected %d, got %d", - 1, len(group.IpPermissions)) - } - - rule := group.IpPermissions[0] - if *rule.FromPort != int64(80) { - return fmt.Errorf("Wrong Security Group port setting, expected %d, got %d", - 80, int(*rule.FromPort)) - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupRuleIngressClassicConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress_1", &group, nil, "ingress"), - resource.TestCheckResourceAttr( - "aws_security_group_rule.ingress_1", "from_port", "80"), - testRuleCount, - ), - }, - }, - }) -} - -func TestAccAWSSecurityGroupRule_MultiIngress(t *testing.T) { - var group ec2.SecurityGroup - - testMultiRuleCount := func(*terraform.State) error { - if len(group.IpPermissions) != 2 { - return fmt.Errorf("Wrong Security Group rule count, expected %d, got %d", - 2, len(group.IpPermissions)) - } - - var rule *ec2.IpPermission - for _, r := range group.IpPermissions { - if *r.FromPort == int64(80) { - rule = r - } - } - - if *rule.ToPort != int64(8000) { - return fmt.Errorf("Wrong Security Group port 2 setting, expected %d, got %d", - 8000, int(*rule.ToPort)) - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupRuleConfigMultiIngress, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - testMultiRuleCount, - ), - }, - }, - }) -} - -func TestAccAWSSecurityGroupRule_Egress(t *testing.T) { - var group ec2.SecurityGroup - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupRuleEgressConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.egress_1", &group, nil, "egress"), - ), - }, - }, - }) -} - -func TestAccAWSSecurityGroupRule_SelfReference(t *testing.T) { - var group ec2.SecurityGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupRuleConfigSelfReference, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - ), - }, - }, - }) -} - -func TestAccAWSSecurityGroupRule_ExpectInvalidTypeError(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupRuleExpectInvalidType(rInt), - ExpectError: regexp.MustCompile(`\\"type\\" contains an invalid Security Group Rule type \\"foobar\\"`), - }, - }, - }) -} - -func TestAccAWSSecurityGroupRule_ExpectInvalidCIDR(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupRuleInvalidIPv4CIDR(rInt), - ExpectError: regexp.MustCompile("invalid CIDR address: 1.2.3.4/33"), - }, - { - Config: testAccAWSSecurityGroupRuleInvalidIPv6CIDR(rInt), - ExpectError: regexp.MustCompile("invalid CIDR address: ::/244"), - }, - }, - }) -} - -// testing partial match implementation -func TestAccAWSSecurityGroupRule_PartialMatching_basic(t *testing.T) { - var group ec2.SecurityGroup - rInt := acctest.RandInt() - - p := ec2.IpPermission{ - FromPort: aws.Int64(80), - ToPort: aws.Int64(80), - IpProtocol: aws.String("tcp"), - IpRanges: []*ec2.IpRange{ - {CidrIp: aws.String("10.0.2.0/24")}, - {CidrIp: aws.String("10.0.3.0/24")}, - {CidrIp: aws.String("10.0.4.0/24")}, - }, - } - - o := ec2.IpPermission{ - FromPort: aws.Int64(80), - ToPort: aws.Int64(80), - IpProtocol: aws.String("tcp"), - IpRanges: []*ec2.IpRange{ - {CidrIp: aws.String("10.0.5.0/24")}, - }, - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupRulePartialMatching(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress", &group, &p, "ingress"), - testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.other", &group, &o, "ingress"), - testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.nat_ingress", &group, &o, "ingress"), - ), - }, - }, - }) -} - -func TestAccAWSSecurityGroupRule_PartialMatching_Source(t *testing.T) { - var group ec2.SecurityGroup - var nat ec2.SecurityGroup - var p ec2.IpPermission - rInt := acctest.RandInt() - - // This function creates the expected IPPermission with the group id from an - // external security group, needed because Security Group IDs are generated on - // AWS side and can't be known ahead of time. - setupSG := func(*terraform.State) error { - if nat.GroupId == nil { - return fmt.Errorf("Error: nat group has nil GroupID") - } - - p = ec2.IpPermission{ - FromPort: aws.Int64(80), - ToPort: aws.Int64(80), - IpProtocol: aws.String("tcp"), - UserIdGroupPairs: []*ec2.UserIdGroupPair{ - {GroupId: nat.GroupId}, - }, - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupRulePartialMatching_Source(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - testAccCheckAWSSecurityGroupRuleExists("aws_security_group.nat", &nat), - setupSG, - testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.source_ingress", &group, &p, "ingress"), - ), - }, - }, - }) -} - -func TestAccAWSSecurityGroupRule_Issue5310(t *testing.T) { - var group ec2.SecurityGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupRuleIssue5310, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupRuleExists("aws_security_group.issue_5310", &group), - ), - }, - }, - }) -} - -func TestAccAWSSecurityGroupRule_Race(t *testing.T) { - var group ec2.SecurityGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupRuleRace, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupRuleExists("aws_security_group.race", &group), - ), - }, - }, - }) -} - -func TestAccAWSSecurityGroupRule_SelfSource(t *testing.T) { - var group ec2.SecurityGroup - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupRuleSelfInSource(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - ), - }, - }, - }) -} - -func TestAccAWSSecurityGroupRule_PrefixListEgress(t *testing.T) { - var group ec2.SecurityGroup - var endpoint ec2.VpcEndpoint - var p ec2.IpPermission - - // This function creates the expected IPPermission with the prefix list ID from - // the VPC Endpoint created in the test - setupSG := func(*terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - prefixListInput := &ec2.DescribePrefixListsInput{ - Filters: []*ec2.Filter{ - {Name: aws.String("prefix-list-name"), Values: []*string{endpoint.ServiceName}}, - }, - } - - log.Printf("[DEBUG] Reading VPC Endpoint prefix list: %s", prefixListInput) - prefixListsOutput, err := conn.DescribePrefixLists(prefixListInput) - - if err != nil { - _, ok := err.(awserr.Error) - if !ok { - return fmt.Errorf("Error reading VPC Endpoint prefix list: %s", err.Error()) - } - } - - if len(prefixListsOutput.PrefixLists) != 1 { - return fmt.Errorf("There are multiple prefix lists associated with the service name '%s'. Unexpected", prefixListsOutput) - } - - p = ec2.IpPermission{ - IpProtocol: aws.String("-1"), - PrefixListIds: []*ec2.PrefixListId{ - {PrefixListId: prefixListsOutput.PrefixLists[0].PrefixListId}, - }, - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupRulePrefixListEgressConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupRuleExists("aws_security_group.egress", &group), - // lookup info on the VPC Endpoint created, to populate the expected - // IP Perm - testAccCheckVpcEndpointExists("aws_vpc_endpoint.s3-us-west-2", &endpoint), - setupSG, - testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.egress_1", &group, &p, "egress"), - ), - }, - }, - }) -} - -func testAccCheckAWSSecurityGroupRuleDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_security_group" { - continue - } - - // Retrieve our group - req := &ec2.DescribeSecurityGroupsInput{ - GroupIds: []*string{aws.String(rs.Primary.ID)}, - } - resp, err := conn.DescribeSecurityGroups(req) - if err == nil { - if len(resp.SecurityGroups) > 0 && *resp.SecurityGroups[0].GroupId == rs.Primary.ID { - return fmt.Errorf("Security Group (%s) still exists.", rs.Primary.ID) - } - - return nil - } - - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - // Confirm error code is what we want - if ec2err.Code() != "InvalidGroup.NotFound" { - return err - } - } - - return nil -} - -func testAccCheckAWSSecurityGroupRuleExists(n string, group *ec2.SecurityGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Security Group is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - req := &ec2.DescribeSecurityGroupsInput{ - GroupIds: []*string{aws.String(rs.Primary.ID)}, - } - resp, err := conn.DescribeSecurityGroups(req) - if err != nil { - return err - } - - if len(resp.SecurityGroups) > 0 && *resp.SecurityGroups[0].GroupId == rs.Primary.ID { - *group = *resp.SecurityGroups[0] - return nil - } - - return fmt.Errorf("Security Group not found") - } -} - -func testAccCheckAWSSecurityGroupRuleAttributes(n string, group *ec2.SecurityGroup, p *ec2.IpPermission, ruleType string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Security Group Rule Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Security Group Rule is set") - } - - if p == nil { - p = &ec2.IpPermission{ - FromPort: aws.Int64(80), - ToPort: aws.Int64(8000), - IpProtocol: aws.String("tcp"), - IpRanges: []*ec2.IpRange{{CidrIp: aws.String("10.0.0.0/8")}}, - } - } - - var matchingRule *ec2.IpPermission - var rules []*ec2.IpPermission - if ruleType == "ingress" { - rules = group.IpPermissions - } else { - rules = group.IpPermissionsEgress - } - - if len(rules) == 0 { - return fmt.Errorf("No IPPerms") - } - - for _, r := range rules { - if r.ToPort != nil && *p.ToPort != *r.ToPort { - continue - } - - if r.FromPort != nil && *p.FromPort != *r.FromPort { - continue - } - - if r.IpProtocol != nil && *p.IpProtocol != *r.IpProtocol { - continue - } - - remaining := len(p.IpRanges) - for _, ip := range p.IpRanges { - for _, rip := range r.IpRanges { - if *ip.CidrIp == *rip.CidrIp { - remaining-- - } - } - } - - if remaining > 0 { - continue - } - - remaining = len(p.UserIdGroupPairs) - for _, ip := range p.UserIdGroupPairs { - for _, rip := range r.UserIdGroupPairs { - if *ip.GroupId == *rip.GroupId { - remaining-- - } - } - } - - if remaining > 0 { - continue - } - - remaining = len(p.PrefixListIds) - for _, pip := range p.PrefixListIds { - for _, rpip := range r.PrefixListIds { - if *pip.PrefixListId == *rpip.PrefixListId { - remaining-- - } - } - } - - if remaining > 0 { - continue - } - - matchingRule = r - } - - if matchingRule != nil { - log.Printf("[DEBUG] Matching rule found : %s", matchingRule) - return nil - } - - return fmt.Errorf("Error here\n\tlooking for %s, wasn't found in %s", p, rules) - } -} - -func testAccAWSSecurityGroupRuleIngressConfig(rInt int) string { - return fmt.Sprintf(` - resource "aws_security_group" "web" { - name = "terraform_test_%d" - description = "Used in the terraform acceptance tests" - - tags { - Name = "tf-acc-test" - } - } - - resource "aws_security_group_rule" "ingress_1" { - type = "ingress" - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - - security_group_id = "${aws_security_group.web.id}" - }`, rInt) -} - -const testAccAWSSecurityGroupRuleIngress_ipv6Config = ` -resource "aws_vpc" "tftest" { - cidr_block = "10.0.0.0/16" - - tags { - Name = "tf-testing" - } -} - -resource "aws_security_group" "web" { - vpc_id = "${aws_vpc.tftest.id}" - - tags { - Name = "tf-acc-test" - } -} - -resource "aws_security_group_rule" "ingress_1" { - type = "ingress" - protocol = "6" - from_port = 80 - to_port = 8000 - ipv6_cidr_blocks = ["::/0"] - - security_group_id = "${aws_security_group.web.id}" -} -` - -const testAccAWSSecurityGroupRuleIngress_protocolConfig = ` -resource "aws_vpc" "tftest" { - cidr_block = "10.0.0.0/16" - - tags { - Name = "tf-testing" - } -} - -resource "aws_security_group" "web" { - vpc_id = "${aws_vpc.tftest.id}" - - tags { - Name = "tf-acc-test" - } -} - -resource "aws_security_group_rule" "ingress_1" { - type = "ingress" - protocol = "6" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - - security_group_id = "${aws_security_group.web.id}" -} - -` - -const testAccAWSSecurityGroupRuleIssue5310 = ` -provider "aws" { - region = "us-east-1" -} - -resource "aws_security_group" "issue_5310" { - name = "terraform-test-issue_5310" - description = "SG for test of issue 5310" -} - -resource "aws_security_group_rule" "issue_5310" { - type = "ingress" - from_port = 0 - to_port = 65535 - protocol = "tcp" - security_group_id = "${aws_security_group.issue_5310.id}" - self = true -} -` - -func testAccAWSSecurityGroupRuleIngressClassicConfig(rInt int) string { - return fmt.Sprintf(` - provider "aws" { - region = "us-east-1" - } - - resource "aws_security_group" "web" { - name = "terraform_test_%d" - description = "Used in the terraform acceptance tests" - - tags { - Name = "tf-acc-test" - } - } - - resource "aws_security_group_rule" "ingress_1" { - type = "ingress" - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - - security_group_id = "${aws_security_group.web.id}" - }`, rInt) -} - -func testAccAWSSecurityGroupRuleEgressConfig(rInt int) string { - return fmt.Sprintf(` - resource "aws_security_group" "web" { - name = "terraform_test_%d" - description = "Used in the terraform acceptance tests" - - tags { - Name = "tf-acc-test" - } - } - - resource "aws_security_group_rule" "egress_1" { - type = "egress" - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - - security_group_id = "${aws_security_group.web.id}" - }`, rInt) -} - -const testAccAWSSecurityGroupRuleConfigMultiIngress = ` -resource "aws_security_group" "web" { - name = "terraform_acceptance_test_example_2" - description = "Used in the terraform acceptance tests" -} - -resource "aws_security_group" "worker" { - name = "terraform_acceptance_test_example_worker" - description = "Used in the terraform acceptance tests" -} - - -resource "aws_security_group_rule" "ingress_1" { - type = "ingress" - protocol = "tcp" - from_port = 22 - to_port = 22 - cidr_blocks = ["10.0.0.0/8"] - - security_group_id = "${aws_security_group.web.id}" -} - -resource "aws_security_group_rule" "ingress_2" { - type = "ingress" - protocol = "tcp" - from_port = 80 - to_port = 8000 - self = true - - security_group_id = "${aws_security_group.web.id}" -} -` - -// check for GH-1985 regression -const testAccAWSSecurityGroupRuleConfigSelfReference = ` -provider "aws" { - region = "us-west-2" -} - -resource "aws_vpc" "main" { - cidr_block = "10.0.0.0/16" - tags { - Name = "sg-self-test" - } -} - -resource "aws_security_group" "web" { - name = "main" - vpc_id = "${aws_vpc.main.id}" - tags { - Name = "sg-self-test" - } -} - -resource "aws_security_group_rule" "self" { - type = "ingress" - protocol = "-1" - from_port = 0 - to_port = 0 - self = true - security_group_id = "${aws_security_group.web.id}" -} -` - -func testAccAWSSecurityGroupRulePartialMatching(rInt int) string { - return fmt.Sprintf(` - resource "aws_vpc" "default" { - cidr_block = "10.0.0.0/16" - tags { - Name = "tf-sg-rule-bug" - } - } - - resource "aws_security_group" "web" { - name = "tf-other-%d" - vpc_id = "${aws_vpc.default.id}" - tags { - Name = "tf-other-sg" - } - } - - resource "aws_security_group" "nat" { - name = "tf-nat-%d" - vpc_id = "${aws_vpc.default.id}" - tags { - Name = "tf-nat-sg" - } - } - - resource "aws_security_group_rule" "ingress" { - type = "ingress" - from_port = 80 - to_port = 80 - protocol = "tcp" - cidr_blocks = ["10.0.2.0/24", "10.0.3.0/24", "10.0.4.0/24"] - - security_group_id = "${aws_security_group.web.id}" - } - - resource "aws_security_group_rule" "other" { - type = "ingress" - from_port = 80 - to_port = 80 - protocol = "tcp" - cidr_blocks = ["10.0.5.0/24"] - - security_group_id = "${aws_security_group.web.id}" - } - - // same a above, but different group, to guard against bad hashing - resource "aws_security_group_rule" "nat_ingress" { - type = "ingress" - from_port = 80 - to_port = 80 - protocol = "tcp" - cidr_blocks = ["10.0.2.0/24", "10.0.3.0/24", "10.0.4.0/24"] - - security_group_id = "${aws_security_group.nat.id}" - }`, rInt, rInt) -} - -func testAccAWSSecurityGroupRulePartialMatching_Source(rInt int) string { - return fmt.Sprintf(` - resource "aws_vpc" "default" { - cidr_block = "10.0.0.0/16" - tags { - Name = "tf-sg-rule-bug" - } - } - - resource "aws_security_group" "web" { - name = "tf-other-%d" - vpc_id = "${aws_vpc.default.id}" - tags { - Name = "tf-other-sg" - } - } - - resource "aws_security_group" "nat" { - name = "tf-nat-%d" - vpc_id = "${aws_vpc.default.id}" - tags { - Name = "tf-nat-sg" - } - } - - resource "aws_security_group_rule" "source_ingress" { - type = "ingress" - from_port = 80 - to_port = 80 - protocol = "tcp" - - source_security_group_id = "${aws_security_group.nat.id}" - security_group_id = "${aws_security_group.web.id}" - } - - resource "aws_security_group_rule" "other_ingress" { - type = "ingress" - from_port = 80 - to_port = 80 - protocol = "tcp" - cidr_blocks = ["10.0.2.0/24", "10.0.3.0/24", "10.0.4.0/24"] - - security_group_id = "${aws_security_group.web.id}" - }`, rInt, rInt) -} - -var testAccAWSSecurityGroupRuleRace = func() string { - var b bytes.Buffer - iterations := 50 - b.WriteString(fmt.Sprintf(` - resource "aws_vpc" "default" { - cidr_block = "10.0.0.0/16" - tags { Name = "tf-sg-rule-race" } - } - - resource "aws_security_group" "race" { - name = "tf-sg-rule-race-group-%d" - vpc_id = "${aws_vpc.default.id}" - } - `, acctest.RandInt())) - for i := 1; i < iterations; i++ { - b.WriteString(fmt.Sprintf(` - resource "aws_security_group_rule" "ingress%d" { - security_group_id = "${aws_security_group.race.id}" - type = "ingress" - from_port = %d - to_port = %d - protocol = "tcp" - cidr_blocks = ["10.0.0.%d/32"] - } - - resource "aws_security_group_rule" "egress%d" { - security_group_id = "${aws_security_group.race.id}" - type = "egress" - from_port = %d - to_port = %d - protocol = "tcp" - cidr_blocks = ["10.0.0.%d/32"] - } - `, i, i, i, i, i, i, i, i)) - } - return b.String() -}() - -const testAccAWSSecurityGroupRulePrefixListEgressConfig = ` - -resource "aws_vpc" "tf_sg_prefix_list_egress_test" { - cidr_block = "10.0.0.0/16" - tags { - Name = "tf_sg_prefix_list_egress_test" - } -} - -resource "aws_route_table" "default" { - vpc_id = "${aws_vpc.tf_sg_prefix_list_egress_test.id}" -} - -resource "aws_vpc_endpoint" "s3-us-west-2" { - vpc_id = "${aws_vpc.tf_sg_prefix_list_egress_test.id}" - service_name = "com.amazonaws.us-west-2.s3" - route_table_ids = ["${aws_route_table.default.id}"] - policy = < 0 && *resp.SecurityGroups[0].GroupId == rs.Primary.ID { - return fmt.Errorf("Security Group (%s) still exists.", rs.Primary.ID) - } - - return nil - } - - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - // Confirm error code is what we want - if ec2err.Code() != "InvalidGroup.NotFound" { - return err - } - } - - return nil -} - -func testAccCheckAWSSecurityGroupGeneratedNamePrefix( - resource, prefix string) resource.TestCheckFunc { - return func(s *terraform.State) error { - r, ok := s.RootModule().Resources[resource] - if !ok { - return fmt.Errorf("Resource not found") - } - name, ok := r.Primary.Attributes["name"] - if !ok { - return fmt.Errorf("Name attr not found: %#v", r.Primary.Attributes) - } - if !strings.HasPrefix(name, prefix) { - return fmt.Errorf("Name: %q, does not have prefix: %q", name, prefix) - } - return nil - } -} - -func testAccCheckAWSSecurityGroupExists(n string, group *ec2.SecurityGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Security Group is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - req := &ec2.DescribeSecurityGroupsInput{ - GroupIds: []*string{aws.String(rs.Primary.ID)}, - } - resp, err := conn.DescribeSecurityGroups(req) - if err != nil { - return err - } - - if len(resp.SecurityGroups) > 0 && *resp.SecurityGroups[0].GroupId == rs.Primary.ID { - *group = *resp.SecurityGroups[0] - return nil - } - - return fmt.Errorf("Security Group not found") - } -} - -func testAccCheckAWSSecurityGroupAttributes(group *ec2.SecurityGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - p := &ec2.IpPermission{ - FromPort: aws.Int64(80), - ToPort: aws.Int64(8000), - IpProtocol: aws.String("tcp"), - IpRanges: []*ec2.IpRange{{CidrIp: aws.String("10.0.0.0/8")}}, - } - - if *group.GroupName != "terraform_acceptance_test_example" { - return fmt.Errorf("Bad name: %s", *group.GroupName) - } - - if *group.Description != "Used in the terraform acceptance tests" { - return fmt.Errorf("Bad description: %s", *group.Description) - } - - if len(group.IpPermissions) == 0 { - return fmt.Errorf("No IPPerms") - } - - // Compare our ingress - if !reflect.DeepEqual(group.IpPermissions[0], p) { - return fmt.Errorf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - group.IpPermissions[0], - p) - } - - return nil - } -} - -func testAccCheckAWSSecurityGroupAttributesNegOneProtocol(group *ec2.SecurityGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - p := &ec2.IpPermission{ - IpProtocol: aws.String("-1"), - IpRanges: []*ec2.IpRange{{CidrIp: aws.String("10.0.0.0/8")}}, - } - - if *group.GroupName != "terraform_acceptance_test_example" { - return fmt.Errorf("Bad name: %s", *group.GroupName) - } - - if *group.Description != "Used in the terraform acceptance tests" { - return fmt.Errorf("Bad description: %s", *group.Description) - } - - if len(group.IpPermissions) == 0 { - return fmt.Errorf("No IPPerms") - } - - // Compare our ingress - if !reflect.DeepEqual(group.IpPermissions[0], p) { - return fmt.Errorf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - group.IpPermissions[0], - p) - } - - return nil - } -} - -func TestAccAWSSecurityGroup_tags(t *testing.T) { - var group ec2.SecurityGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupConfigTags, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupExists("aws_security_group.foo", &group), - testAccCheckTags(&group.Tags, "foo", "bar"), - ), - }, - - { - Config: testAccAWSSecurityGroupConfigTagsUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupExists("aws_security_group.foo", &group), - testAccCheckTags(&group.Tags, "foo", ""), - testAccCheckTags(&group.Tags, "bar", "baz"), - testAccCheckTags(&group.Tags, "env", "Production"), - ), - }, - }, - }) -} - -func TestAccAWSSecurityGroup_CIDRandGroups(t *testing.T) { - var group ec2.SecurityGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupCombindCIDRandGroups, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupExists("aws_security_group.mixed", &group), - // testAccCheckAWSSecurityGroupAttributes(&group), - ), - }, - }, - }) -} - -func TestAccAWSSecurityGroup_ingressWithCidrAndSGs(t *testing.T) { - var group ec2.SecurityGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupConfig_ingressWithCidrAndSGs, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group), - testAccCheckAWSSecurityGroupSGandCidrAttributes(&group), - resource.TestCheckResourceAttr( - "aws_security_group.web", "name", "terraform_acceptance_test_example"), - resource.TestCheckResourceAttr( - "aws_security_group.web", "description", "Used in the terraform acceptance tests"), - resource.TestCheckResourceAttr( - "aws_security_group.web", "ingress.#", "2"), - ), - }, - }, - }) -} - -// This test requires an EC2 Classic region -func TestAccAWSSecurityGroup_ingressWithCidrAndSGs_classic(t *testing.T) { - var group ec2.SecurityGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupConfig_ingressWithCidrAndSGs_classic, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupExists("aws_security_group.web", &group), - testAccCheckAWSSecurityGroupSGandCidrAttributes(&group), - resource.TestCheckResourceAttr( - "aws_security_group.web", "name", "terraform_acceptance_test_example"), - resource.TestCheckResourceAttr( - "aws_security_group.web", "description", "Used in the terraform acceptance tests"), - resource.TestCheckResourceAttr( - "aws_security_group.web", "ingress.#", "2"), - ), - }, - }, - }) -} - -func TestAccAWSSecurityGroup_egressWithPrefixList(t *testing.T) { - var group ec2.SecurityGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupConfigPrefixListEgress, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupExists("aws_security_group.egress", &group), - testAccCheckAWSSecurityGroupPrefixListAttributes(&group), - resource.TestCheckResourceAttr( - "aws_security_group.egress", "egress.#", "1"), - ), - }, - }, - }) -} - -func TestAccAWSSecurityGroup_ipv4andipv6Egress(t *testing.T) { - var group ec2.SecurityGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupConfigIpv4andIpv6Egress, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupExists("aws_security_group.egress", &group), - resource.TestCheckResourceAttr( - "aws_security_group.egress", "egress.#", "2"), - ), - }, - }, - }) -} - -func testAccCheckAWSSecurityGroupSGandCidrAttributes(group *ec2.SecurityGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *group.GroupName != "terraform_acceptance_test_example" { - return fmt.Errorf("Bad name: %s", *group.GroupName) - } - - if *group.Description != "Used in the terraform acceptance tests" { - return fmt.Errorf("Bad description: %s", *group.Description) - } - - if len(group.IpPermissions) == 0 { - return fmt.Errorf("No IPPerms") - } - - if len(group.IpPermissions) != 2 { - return fmt.Errorf("Expected 2 ingress rules, got %d", len(group.IpPermissions)) - } - - for _, p := range group.IpPermissions { - if *p.FromPort == int64(22) { - if len(p.IpRanges) != 1 || p.UserIdGroupPairs != nil { - return fmt.Errorf("Found ip perm of 22, but not the right ipranges / pairs: %s", p) - } - continue - } else if *p.FromPort == int64(80) { - if len(p.IpRanges) != 1 || len(p.UserIdGroupPairs) != 1 { - return fmt.Errorf("Found ip perm of 80, but not the right ipranges / pairs: %s", p) - } - continue - } - return fmt.Errorf("Found a rouge rule") - } - - return nil - } -} - -func testAccCheckAWSSecurityGroupPrefixListAttributes(group *ec2.SecurityGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *group.GroupName != "terraform_acceptance_test_prefix_list_egress" { - return fmt.Errorf("Bad name: %s", *group.GroupName) - } - if *group.Description != "Used in the terraform acceptance tests" { - return fmt.Errorf("Bad description: %s", *group.Description) - } - if len(group.IpPermissionsEgress) == 0 { - return fmt.Errorf("No egress IPPerms") - } - if len(group.IpPermissionsEgress) != 1 { - return fmt.Errorf("Expected 1 egress rule, got %d", len(group.IpPermissions)) - } - - p := group.IpPermissionsEgress[0] - - if len(p.PrefixListIds) != 1 { - return fmt.Errorf("Expected 1 prefix list, got %d", len(p.PrefixListIds)) - } - - return nil - } -} - -func testAccCheckAWSSecurityGroupAttributesChanged(group *ec2.SecurityGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - p := []*ec2.IpPermission{ - { - FromPort: aws.Int64(80), - ToPort: aws.Int64(9000), - IpProtocol: aws.String("tcp"), - IpRanges: []*ec2.IpRange{{CidrIp: aws.String("10.0.0.0/8")}}, - }, - { - FromPort: aws.Int64(80), - ToPort: aws.Int64(8000), - IpProtocol: aws.String("tcp"), - IpRanges: []*ec2.IpRange{ - { - CidrIp: aws.String("0.0.0.0/0"), - }, - { - CidrIp: aws.String("10.0.0.0/8"), - }, - }, - }, - } - - if *group.GroupName != "terraform_acceptance_test_example" { - return fmt.Errorf("Bad name: %s", *group.GroupName) - } - - if *group.Description != "Used in the terraform acceptance tests" { - return fmt.Errorf("Bad description: %s", *group.Description) - } - - // Compare our ingress - if len(group.IpPermissions) != 2 { - return fmt.Errorf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - group.IpPermissions, - p) - } - - if *group.IpPermissions[0].ToPort == 8000 { - group.IpPermissions[1], group.IpPermissions[0] = - group.IpPermissions[0], group.IpPermissions[1] - } - - if !reflect.DeepEqual(group.IpPermissions, p) { - return fmt.Errorf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - group.IpPermissions, - p) - } - - return nil - } -} - -func testAccCheckAWSSecurityGroupExistsWithoutDefault(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Security Group is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - req := &ec2.DescribeSecurityGroupsInput{ - GroupIds: []*string{aws.String(rs.Primary.ID)}, - } - resp, err := conn.DescribeSecurityGroups(req) - if err != nil { - return err - } - - if len(resp.SecurityGroups) > 0 && *resp.SecurityGroups[0].GroupId == rs.Primary.ID { - group := *resp.SecurityGroups[0] - - if len(group.IpPermissionsEgress) != 1 { - return fmt.Errorf("Security Group should have only 1 egress rule, got %d", len(group.IpPermissionsEgress)) - } - } - - return nil - } -} - -func TestAccAWSSecurityGroup_failWithDiffMismatch(t *testing.T) { - var group ec2.SecurityGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupConfig_failWithDiffMismatch, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupExists("aws_security_group.nat", &group), - ), - }, - }, - }) -} - -const testAccAWSSecurityGroupConfigForTagsOrdering = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_security_group" "web" { - name = "terraform_acceptance_test_example" - description = "Used in the terraform acceptance tests" - vpc_id = "${aws_vpc.foo.id}" - - ingress { - protocol = "6" - from_port = 80 - to_port = 80000 - cidr_blocks = ["10.0.0.0/8"] - } - - egress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } - - tags { - Name = "tf-acc-test" - } -}` - -const testAccAWSSecurityGroupConfigIpv6 = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_security_group" "web" { - name = "terraform_acceptance_test_example" - description = "Used in the terraform acceptance tests" - vpc_id = "${aws_vpc.foo.id}" - - ingress { - protocol = "6" - from_port = 80 - to_port = 8000 - ipv6_cidr_blocks = ["::/0"] - } - - egress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - ipv6_cidr_blocks = ["::/0"] - } - - tags { - Name = "tf-acc-test" - } -} -` - -const testAccAWSSecurityGroupConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_security_group" "web" { - name = "terraform_acceptance_test_example" - description = "Used in the terraform acceptance tests" - vpc_id = "${aws_vpc.foo.id}" - - ingress { - protocol = "6" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } - - egress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } - - tags { - Name = "tf-acc-test" - } -} -` - -const testAccAWSSecurityGroupConfigChange = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_security_group" "web" { - name = "terraform_acceptance_test_example" - description = "Used in the terraform acceptance tests" - vpc_id = "${aws_vpc.foo.id}" - - ingress { - protocol = "tcp" - from_port = 80 - to_port = 9000 - cidr_blocks = ["10.0.0.0/8"] - } - - ingress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["0.0.0.0/0", "10.0.0.0/8"] - } - - egress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } -} -` - -const testAccAWSSecurityGroupConfigSelf = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_security_group" "web" { - name = "terraform_acceptance_test_example" - description = "Used in the terraform acceptance tests" - vpc_id = "${aws_vpc.foo.id}" - - ingress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - self = true - } - - egress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } -} -` - -const testAccAWSSecurityGroupConfigVpc = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_security_group" "web" { - name = "terraform_acceptance_test_example" - description = "Used in the terraform acceptance tests" - vpc_id = "${aws_vpc.foo.id}" - - ingress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } - - egress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } -} -` - -const testAccAWSSecurityGroupConfigVpcNegOneIngress = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_security_group" "web" { - name = "terraform_acceptance_test_example" - description = "Used in the terraform acceptance tests" - vpc_id = "${aws_vpc.foo.id}" - - ingress { - protocol = "-1" - from_port = 0 - to_port = 0 - cidr_blocks = ["10.0.0.0/8"] - } -} -` - -const testAccAWSSecurityGroupConfigVpcProtoNumIngress = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_security_group" "web" { - name = "terraform_acceptance_test_example" - description = "Used in the terraform acceptance tests" - vpc_id = "${aws_vpc.foo.id}" - - ingress { - protocol = "50" - from_port = 0 - to_port = 0 - cidr_blocks = ["10.0.0.0/8"] - } -} -` - -const testAccAWSSecurityGroupConfigMultiIngress = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_security_group" "worker" { - name = "terraform_acceptance_test_example_1" - description = "Used in the terraform acceptance tests" - vpc_id = "${aws_vpc.foo.id}" - - ingress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } - - egress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } -} - -resource "aws_security_group" "web" { - name = "terraform_acceptance_test_example_2" - description = "Used in the terraform acceptance tests" - vpc_id = "${aws_vpc.foo.id}" - - ingress { - protocol = "tcp" - from_port = 22 - to_port = 22 - cidr_blocks = ["10.0.0.0/8"] - } - - ingress { - protocol = "tcp" - from_port = 800 - to_port = 800 - cidr_blocks = ["10.0.0.0/8"] - } - - ingress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - security_groups = ["${aws_security_group.worker.id}"] - } - - egress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } -} -` - -const testAccAWSSecurityGroupConfigTags = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_security_group" "foo" { - name = "terraform_acceptance_test_example" - description = "Used in the terraform acceptance tests" - vpc_id = "${aws_vpc.foo.id}" - - ingress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } - - egress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } - - tags { - foo = "bar" - } -} -` - -const testAccAWSSecurityGroupConfigTagsUpdate = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_security_group" "foo" { - name = "terraform_acceptance_test_example" - description = "Used in the terraform acceptance tests" - vpc_id = "${aws_vpc.foo.id}" - - ingress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } - - egress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } - - tags { - bar = "baz" - env = "Production" - } -} -` - -const testAccAWSSecurityGroupConfig_generatedName = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_security_group" "web" { - vpc_id = "${aws_vpc.foo.id}" - - ingress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } - - egress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } - - tags { - Name = "tf-acc-test" - } -} -` - -const testAccAWSSecurityGroupConfigDefaultEgress = ` -resource "aws_vpc" "tf_sg_egress_test" { - cidr_block = "10.0.0.0/16" - tags { - Name = "tf_sg_egress_test" - } -} - -resource "aws_security_group" "worker" { - name = "terraform_acceptance_test_example_1" - description = "Used in the terraform acceptance tests" - vpc_id = "${aws_vpc.tf_sg_egress_test.id}" - - egress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } -} -` - -const testAccAWSSecurityGroupConfigClassic = ` -provider "aws" { - region = "us-east-1" -} - -resource "aws_security_group" "web" { - name = "terraform_acceptance_test_example_1" - description = "Used in the terraform acceptance tests" -} -` - -const testAccAWSSecurityGroupPrefixNameConfig = ` -provider "aws" { - region = "us-east-1" -} - -resource "aws_security_group" "baz" { - name_prefix = "baz-" - description = "Used in the terraform acceptance tests" -} -` - -func testAccAWSSecurityGroupConfig_drift() string { - return fmt.Sprintf(` -resource "aws_security_group" "web" { - name = "tf_acc_%d" - description = "Used in the terraform acceptance tests" - - ingress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } - - ingress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["206.0.0.0/8"] - } - - tags { - Name = "tf-acc-test" - } -} -`, acctest.RandInt()) -} - -func testAccAWSSecurityGroupConfig_drift_complex() string { - return fmt.Sprintf(` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_security_group" "otherweb" { - name = "tf_acc_%d" - description = "Used in the terraform acceptance tests" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_security_group" "web" { - name = "tf_acc_%d" - description = "Used in the terraform acceptance tests" - vpc_id = "${aws_vpc.foo.id}" - - ingress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } - - ingress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["206.0.0.0/8"] - } - - ingress { - protocol = "tcp" - from_port = 22 - to_port = 22 - security_groups = ["${aws_security_group.otherweb.id}"] - } - - egress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["206.0.0.0/8"] - } - - egress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } - - egress { - protocol = "tcp" - from_port = 22 - to_port = 22 - security_groups = ["${aws_security_group.otherweb.id}"] - } - - tags { - Name = "tf-acc-test" - } -}`, acctest.RandInt(), acctest.RandInt()) -} - -const testAccAWSSecurityGroupInvalidIngressCidr = ` -resource "aws_security_group" "foo" { - name = "testing-foo" - description = "foo-testing" - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["1.2.3.4/33"] - } -}` - -const testAccAWSSecurityGroupInvalidEgressCidr = ` -resource "aws_security_group" "foo" { - name = "testing-foo" - description = "foo-testing" - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["1.2.3.4/33"] - } -}` - -const testAccAWSSecurityGroupInvalidIPv6IngressCidr = ` -resource "aws_security_group" "foo" { - name = "testing-foo" - description = "foo-testing" - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - ipv6_cidr_blocks = ["::/244"] - } -}` - -const testAccAWSSecurityGroupInvalidIPv6EgressCidr = ` -resource "aws_security_group" "foo" { - name = "testing-foo" - description = "foo-testing" - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - ipv6_cidr_blocks = ["::/244"] - } -}` - -const testAccAWSSecurityGroupCombindCIDRandGroups = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_security_group" "two" { - name = "tf-test-1" - vpc_id = "${aws_vpc.foo.id}" - tags { - Name = "tf-test-1" - } -} - -resource "aws_security_group" "one" { - name = "tf-test-2" - vpc_id = "${aws_vpc.foo.id}" - tags { - Name = "tf-test-w" - } -} - -resource "aws_security_group" "three" { - name = "tf-test-3" - vpc_id = "${aws_vpc.foo.id}" - tags { - Name = "tf-test-3" - } -} - -resource "aws_security_group" "mixed" { - name = "tf-mix-test" - vpc_id = "${aws_vpc.foo.id}" - - ingress { - from_port = 80 - to_port = 80 - protocol = "tcp" - cidr_blocks = ["10.0.0.0/16", "10.1.0.0/16", "10.7.0.0/16"] - - security_groups = [ - "${aws_security_group.one.id}", - "${aws_security_group.two.id}", - "${aws_security_group.three.id}", - ] - } - - tags { - Name = "tf-mix-test" - } -} -` - -const testAccAWSSecurityGroupConfig_ingressWithCidrAndSGs = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_security_group" "other_web" { - name = "tf_other_acc_tests" - description = "Used in the terraform acceptance tests" - vpc_id = "${aws_vpc.foo.id}" - - tags { - Name = "tf-acc-test" - } -} - -resource "aws_security_group" "web" { - name = "terraform_acceptance_test_example" - description = "Used in the terraform acceptance tests" - vpc_id = "${aws_vpc.foo.id}" - - ingress { - protocol = "tcp" - from_port = "22" - to_port = "22" - - cidr_blocks = [ - "192.168.0.1/32", - ] - } - - ingress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - security_groups = ["${aws_security_group.other_web.id}"] - } - - egress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } - - tags { - Name = "tf-acc-test" - } -} -` - -const testAccAWSSecurityGroupConfig_ingressWithCidrAndSGs_classic = ` -provider "aws" { - region = "us-east-1" -} - -resource "aws_security_group" "other_web" { - name = "tf_other_acc_tests" - description = "Used in the terraform acceptance tests" - - tags { - Name = "tf-acc-test" - } -} - -resource "aws_security_group" "web" { - name = "terraform_acceptance_test_example" - description = "Used in the terraform acceptance tests" - - ingress { - protocol = "tcp" - from_port = "22" - to_port = "22" - - cidr_blocks = [ - "192.168.0.1/32", - ] - } - - ingress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - security_groups = ["${aws_security_group.other_web.name}"] - } - - tags { - Name = "tf-acc-test" - } -} -` - -// fails to apply in one pass with the error "diffs didn't match during apply" -// GH-2027 -const testAccAWSSecurityGroupConfig_failWithDiffMismatch = ` -resource "aws_vpc" "main" { - cidr_block = "10.0.0.0/16" - - tags { - Name = "tf-test" - } -} - -resource "aws_security_group" "ssh_base" { - name = "test-ssh-base" - vpc_id = "${aws_vpc.main.id}" -} - -resource "aws_security_group" "jump" { - name = "test-jump" - vpc_id = "${aws_vpc.main.id}" -} - -resource "aws_security_group" "provision" { - name = "test-provision" - vpc_id = "${aws_vpc.main.id}" -} - -resource "aws_security_group" "nat" { - vpc_id = "${aws_vpc.main.id}" - name = "nat" - description = "For nat servers " - - ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - security_groups = ["${aws_security_group.jump.id}"] - } - - ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - security_groups = ["${aws_security_group.provision.id}"] - } -} -` -const testAccAWSSecurityGroupConfig_importSelf = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - - tags { - Name = "tf_sg_import_test" - } -} - -resource "aws_security_group" "allow_all" { - name = "allow_all" - description = "Allow all inbound traffic" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_security_group_rule" "allow_all" { - type = "ingress" - from_port = 0 - to_port = 65535 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - - security_group_id = "${aws_security_group.allow_all.id}" -} - -resource "aws_security_group_rule" "allow_all-1" { - type = "ingress" - from_port = 65534 - to_port = 65535 - protocol = "tcp" - - self = true - security_group_id = "${aws_security_group.allow_all.id}" -} -` - -const testAccAWSSecurityGroupConfig_importSourceSecurityGroup = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - - tags { - Name = "tf_sg_import_test" - } -} - -resource "aws_security_group" "test_group_1" { - name = "test group 1" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_security_group" "test_group_2" { - name = "test group 2" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_security_group" "test_group_3" { - name = "test group 3" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_security_group_rule" "allow_test_group_2" { - type = "ingress" - from_port = 0 - to_port = 0 - protocol = "tcp" - - source_security_group_id = "${aws_security_group.test_group_1.id}" - security_group_id = "${aws_security_group.test_group_2.id}" -} - -resource "aws_security_group_rule" "allow_test_group_3" { - type = "ingress" - from_port = 0 - to_port = 0 - protocol = "tcp" - - source_security_group_id = "${aws_security_group.test_group_1.id}" - security_group_id = "${aws_security_group.test_group_3.id}" -} -` - -const testAccAWSSecurityGroupConfig_importIPRangeAndSecurityGroupWithSameRules = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - - tags { - Name = "tf_sg_import_test" - } -} - -resource "aws_security_group" "test_group_1" { - name = "test group 1" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_security_group" "test_group_2" { - name = "test group 2" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_security_group_rule" "allow_security_group" { - type = "ingress" - from_port = 0 - to_port = 0 - protocol = "tcp" - - source_security_group_id = "${aws_security_group.test_group_2.id}" - security_group_id = "${aws_security_group.test_group_1.id}" -} - -resource "aws_security_group_rule" "allow_cidr_block" { - type = "ingress" - from_port = 0 - to_port = 0 - protocol = "tcp" - - cidr_blocks = ["10.0.0.0/32"] - security_group_id = "${aws_security_group.test_group_1.id}" -} - -resource "aws_security_group_rule" "allow_ipv6_cidr_block" { - type = "ingress" - from_port = 0 - to_port = 0 - protocol = "tcp" - - ipv6_cidr_blocks = ["::/0"] - security_group_id = "${aws_security_group.test_group_1.id}" -} -` - -const testAccAWSSecurityGroupConfig_importIPRangesWithSameRules = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - - tags { - Name = "tf_sg_import_test" - } -} - -resource "aws_security_group" "test_group_1" { - name = "test group 1" - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_security_group_rule" "allow_cidr_block" { - type = "ingress" - from_port = 0 - to_port = 0 - protocol = "tcp" - - cidr_blocks = ["10.0.0.0/32"] - security_group_id = "${aws_security_group.test_group_1.id}" -} - -resource "aws_security_group_rule" "allow_ipv6_cidr_block" { - type = "ingress" - from_port = 0 - to_port = 0 - protocol = "tcp" - - ipv6_cidr_blocks = ["::/0"] - security_group_id = "${aws_security_group.test_group_1.id}" -} -` - -const testAccAWSSecurityGroupConfigIpv4andIpv6Egress = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - assign_generated_ipv6_cidr_block = true - tags { - Name = "tf_sg_ipv4_and_ipv6_acc_test" - } -} - -resource "aws_security_group" "egress" { - name = "terraform_acceptance_test_example" - description = "Used in the terraform acceptance tests" - vpc_id = "${aws_vpc.foo.id}" - ingress { - from_port = 22 - to_port = 22 - protocol = "6" - cidr_blocks = ["0.0.0.0/0"] - } - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - ipv6_cidr_blocks = ["::/0"] - } -} -` - -const testAccAWSSecurityGroupConfigPrefixListEgress = ` -resource "aws_vpc" "tf_sg_prefix_list_egress_test" { - cidr_block = "10.0.0.0/16" - tags { - Name = "tf_sg_prefix_list_egress_test" - } -} - -resource "aws_route_table" "default" { - vpc_id = "${aws_vpc.tf_sg_prefix_list_egress_test.id}" -} - -resource "aws_vpc_endpoint" "s3-us-west-2" { - vpc_id = "${aws_vpc.tf_sg_prefix_list_egress_test.id}" - service_name = "com.amazonaws.us-west-2.s3" - route_table_ids = ["${aws_route_table.default.id}"] - policy = < 1 { - return fmt.Errorf("You can only define a single kinesis destination per record") - } - kinesis := destination[0].(map[string]interface{}) - createOpts.EventDestination.KinesisFirehoseDestination = &ses.KinesisFirehoseDestination{ - DeliveryStreamARN: aws.String(kinesis["stream_arn"].(string)), - IAMRoleARN: aws.String(kinesis["role_arn"].(string)), - } - log.Printf("[DEBUG] Creating kinesis destination: %#v", kinesis) - } - - _, err := conn.CreateConfigurationSetEventDestination(createOpts) - if err != nil { - return fmt.Errorf("Error creating SES configuration set event destination: %s", err) - } - - d.SetId(eventDestinationName) - - log.Printf("[WARN] SES DONE") - return resourceAwsSesEventDestinationRead(d, meta) -} - -func resourceAwsSesEventDestinationRead(d *schema.ResourceData, meta interface{}) error { - - return nil -} - -func resourceAwsSesEventDestinationDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).sesConn - - log.Printf("[DEBUG] SES Delete Configuration Set Destination: %s", d.Id()) - _, err := conn.DeleteConfigurationSetEventDestination(&ses.DeleteConfigurationSetEventDestinationInput{ - ConfigurationSetName: aws.String(d.Get("configuration_set_name").(string)), - EventDestinationName: aws.String(d.Id()), - }) - - if err != nil { - return err - } - - return nil -} - -func validateMatchingTypes(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - matchingTypes := map[string]bool{ - "send": true, - "reject": true, - "bounce": true, - "complaint": true, - "delivery": true, - } - - if !matchingTypes[value] { - errors = append(errors, fmt.Errorf("%q must be a valid matching event type value: %q", k, value)) - } - return -} - -func validateDimensionValueSource(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - matchingSource := map[string]bool{ - "messageTag": true, - "emailHeader": true, - } - - if !matchingSource[value] { - errors = append(errors, fmt.Errorf("%q must be a valid dimension value: %q", k, value)) - } - return -} - -func generateCloudWatchDestination(v []interface{}) []*ses.CloudWatchDimensionConfiguration { - - b := make([]*ses.CloudWatchDimensionConfiguration, len(v)) - - for i, vI := range v { - cloudwatch := vI.(map[string]interface{}) - b[i] = &ses.CloudWatchDimensionConfiguration{ - DefaultDimensionValue: aws.String(cloudwatch["default_value"].(string)), - DimensionName: aws.String(cloudwatch["dimension_name"].(string)), - DimensionValueSource: aws.String(cloudwatch["value_source"].(string)), - } - } - - return b -} diff --git a/builtin/providers/aws/resource_aws_ses_event_destination_test.go b/builtin/providers/aws/resource_aws_ses_event_destination_test.go deleted file mode 100644 index 624ce0c83..000000000 --- a/builtin/providers/aws/resource_aws_ses_event_destination_test.go +++ /dev/null @@ -1,187 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/service/ses" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSSESEventDestination_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckSESEventDestinationDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSSESEventDestinationConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsSESEventDestinationExists("aws_ses_configuration_set.test"), - resource.TestCheckResourceAttr( - "aws_ses_event_destination.kinesis", "name", "event-destination-kinesis"), - resource.TestCheckResourceAttr( - "aws_ses_event_destination.cloudwatch", "name", "event-destination-cloudwatch"), - ), - }, - }, - }) -} - -func testAccCheckSESEventDestinationDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).sesConn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_ses_configuration_set" { - continue - } - - response, err := conn.ListConfigurationSets(&ses.ListConfigurationSetsInput{}) - if err != nil { - return err - } - - found := false - for _, element := range response.ConfigurationSets { - if *element.Name == fmt.Sprintf("some-configuration-set-%d", edRandomInteger) { - found = true - } - } - - if found { - return fmt.Errorf("The configuration set still exists") - } - - } - - return nil - -} - -func testAccCheckAwsSESEventDestinationExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("SES event destination not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("SES event destination ID not set") - } - - conn := testAccProvider.Meta().(*AWSClient).sesConn - - response, err := conn.ListConfigurationSets(&ses.ListConfigurationSetsInput{}) - if err != nil { - return err - } - - found := false - for _, element := range response.ConfigurationSets { - if *element.Name == fmt.Sprintf("some-configuration-set-%d", edRandomInteger) { - found = true - } - } - - if !found { - return fmt.Errorf("The configuration set was not created") - } - - return nil - } -} - -var edRandomInteger = acctest.RandInt() -var testAccAWSSESEventDestinationConfig = fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { - bucket = "tf-test-bucket-format" - acl = "private" -} - -resource "aws_iam_role" "firehose_role" { - name = "firehose_test_role_test" - assume_role_policy = < 0 { - attrmap := attributeOutput.Attributes - resource := *resourceAwsSnsTopic() - // iKey = internal struct key, oKey = AWS Attribute Map key - for iKey, oKey := range SNSAttributeMap { - log.Printf("[DEBUG] Reading %s => %s", iKey, oKey) - - if attrmap[oKey] != nil { - // Some of the fetched attributes are stateful properties such as - // the number of subscriptions, the owner, etc. skip those - if resource.Schema[iKey] != nil { - var value string - if iKey == "policy" { - value, err = normalizeJsonString(*attrmap[oKey]) - if err != nil { - return errwrap.Wrapf("policy contains an invalid JSON: {{err}}", err) - } - } else { - value = *attrmap[oKey] - } - log.Printf("[DEBUG] Reading %s => %s -> %s", iKey, oKey, value) - d.Set(iKey, value) - } - } - } - } - - // If we have no name set (import) then determine it from the ARN. - // This is a bit of a heuristic for now since AWS provides no other - // way to get it. - if _, ok := d.GetOk("name"); !ok { - arn := d.Get("arn").(string) - idx := strings.LastIndex(arn, ":") - if idx > -1 { - d.Set("name", arn[idx+1:]) - } - } - - return nil -} - -func resourceAwsSnsTopicDelete(d *schema.ResourceData, meta interface{}) error { - snsconn := meta.(*AWSClient).snsconn - - log.Printf("[DEBUG] SNS Delete Topic: %s", d.Id()) - _, err := snsconn.DeleteTopic(&sns.DeleteTopicInput{ - TopicArn: aws.String(d.Id()), - }) - if err != nil { - return err - } - return nil -} diff --git a/builtin/providers/aws/resource_aws_sns_topic_policy.go b/builtin/providers/aws/resource_aws_sns_topic_policy.go deleted file mode 100644 index 288a9a449..000000000 --- a/builtin/providers/aws/resource_aws_sns_topic_policy.go +++ /dev/null @@ -1,179 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "regexp" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/sns" -) - -func resourceAwsSnsTopicPolicy() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsSnsTopicPolicyUpsert, - Read: resourceAwsSnsTopicPolicyRead, - Update: resourceAwsSnsTopicPolicyUpsert, - Delete: resourceAwsSnsTopicPolicyDelete, - - Schema: map[string]*schema.Schema{ - "arn": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "policy": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateJsonString, - DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, - }, - }, - } -} - -func resourceAwsSnsTopicPolicyUpsert(d *schema.ResourceData, meta interface{}) error { - arn := d.Get("arn").(string) - req := sns.SetTopicAttributesInput{ - TopicArn: aws.String(arn), - AttributeName: aws.String("Policy"), - AttributeValue: aws.String(d.Get("policy").(string)), - } - - d.SetId(arn) - - // Retry the update in the event of an eventually consistent style of - // error, where say an IAM resource is successfully created but not - // actually available. See https://github.com/hashicorp/terraform/issues/3660 - log.Printf("[DEBUG] Updating SNS Topic Policy: %s", req) - stateConf := &resource.StateChangeConf{ - Pending: []string{"retrying"}, - Target: []string{"success"}, - Refresh: resourceAwsSNSUpdateRefreshFunc(meta, req), - Timeout: 3 * time.Minute, - MinTimeout: 3 * time.Second, - } - _, err := stateConf.WaitForState() - if err != nil { - return err - } - - return resourceAwsSnsTopicPolicyRead(d, meta) -} - -func resourceAwsSnsTopicPolicyRead(d *schema.ResourceData, meta interface{}) error { - snsconn := meta.(*AWSClient).snsconn - - attributeOutput, err := snsconn.GetTopicAttributes(&sns.GetTopicAttributesInput{ - TopicArn: aws.String(d.Id()), - }) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFound" { - log.Printf("[WARN] SNS Topic (%s) not found, error code (404)", d.Id()) - d.SetId("") - return nil - } - - return err - } - - if attributeOutput.Attributes == nil { - log.Printf("[WARN] SNS Topic (%q) attributes not found (nil)", d.Id()) - d.SetId("") - return nil - } - attrmap := attributeOutput.Attributes - - policy, ok := attrmap["Policy"] - if !ok { - log.Printf("[WARN] SNS Topic (%q) policy not found in attributes", d.Id()) - d.SetId("") - return nil - } - - d.Set("policy", policy) - - return nil -} - -func resourceAwsSnsTopicPolicyDelete(d *schema.ResourceData, meta interface{}) error { - accountId, err := getAccountIdFromSnsTopicArn(d.Id(), meta.(*AWSClient).partition) - if err != nil { - return err - } - - req := sns.SetTopicAttributesInput{ - TopicArn: aws.String(d.Id()), - AttributeName: aws.String("Policy"), - // It is impossible to delete a policy or set to empty - // (confirmed by AWS Support representative) - // so we instead set it back to the default one - AttributeValue: aws.String(buildDefaultSnsTopicPolicy(d.Id(), accountId)), - } - - // Retry the update in the event of an eventually consistent style of - // error, where say an IAM resource is successfully created but not - // actually available. See https://github.com/hashicorp/terraform/issues/3660 - log.Printf("[DEBUG] Resetting SNS Topic Policy to default: %s", req) - stateConf := &resource.StateChangeConf{ - Pending: []string{"retrying"}, - Target: []string{"success"}, - Refresh: resourceAwsSNSUpdateRefreshFunc(meta, req), - Timeout: 3 * time.Minute, - MinTimeout: 3 * time.Second, - } - _, err = stateConf.WaitForState() - if err != nil { - return err - } - return nil -} - -func getAccountIdFromSnsTopicArn(arn, partition string) (string, error) { - // arn:aws:sns:us-west-2:123456789012:test-new - // arn:aws-us-gov:sns:us-west-2:123456789012:test-new - re := regexp.MustCompile(fmt.Sprintf("^arn:%s:sns:[^:]+:([0-9]{12}):.+", partition)) - matches := re.FindStringSubmatch(arn) - if len(matches) != 2 { - return "", fmt.Errorf("Unable to get account ID from ARN (%q)", arn) - } - return matches[1], nil -} - -func buildDefaultSnsTopicPolicy(topicArn, accountId string) string { - return fmt.Sprintf(`{ - "Version": "2008-10-17", - "Id": "__default_policy_ID", - "Statement": [ - { - "Sid": "__default_statement_ID", - "Effect": "Allow", - "Principal": { - "AWS": "*" - }, - "Action": [ - "SNS:GetTopicAttributes", - "SNS:SetTopicAttributes", - "SNS:AddPermission", - "SNS:RemovePermission", - "SNS:DeleteTopic", - "SNS:Subscribe", - "SNS:ListSubscriptionsByTopic", - "SNS:Publish", - "SNS:Receive" - ], - "Resource": "%s", - "Condition": { - "StringEquals": { - "AWS:SourceOwner": "%s" - } - } - } - ] -}`, topicArn, accountId) -} diff --git a/builtin/providers/aws/resource_aws_sns_topic_policy_test.go b/builtin/providers/aws/resource_aws_sns_topic_policy_test.go deleted file mode 100644 index 4aae9645b..000000000 --- a/builtin/providers/aws/resource_aws_sns_topic_policy_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package aws - -import ( - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSSNSTopicPolicy_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSNSTopicDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSNSTopicConfig_withPolicy, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSNSTopicExists("aws_sns_topic.test"), - resource.TestMatchResourceAttr("aws_sns_topic_policy.custom", "policy", - regexp.MustCompile("^{\"Version\":\"2012-10-17\".+")), - ), - }, - }, - }) -} - -const testAccAWSSNSTopicConfig_withPolicy = ` -resource "aws_sns_topic" "test" { - name = "tf-acc-test-topic-with-policy" -} - -resource "aws_sns_topic_policy" "custom" { - arn = "${aws_sns_topic.test.arn}" - policy = < 0 { - attrHash := attributeOutput.Attributes - resource := *resourceAwsSnsTopicSubscription() - - for iKey, oKey := range SNSSubscriptionAttributeMap { - log.Printf("[DEBUG] Reading %s => %s", iKey, oKey) - - if attrHash[oKey] != nil { - if resource.Schema[iKey] != nil { - var value string - value = *attrHash[oKey] - log.Printf("[DEBUG] Reading %s => %s -> %s", iKey, oKey, value) - d.Set(iKey, value) - } - } - } - } - - return nil -} - -func resourceAwsSnsTopicSubscriptionDelete(d *schema.ResourceData, meta interface{}) error { - snsconn := meta.(*AWSClient).snsconn - - log.Printf("[DEBUG] SNS delete topic subscription: %s", d.Id()) - _, err := snsconn.Unsubscribe(&sns.UnsubscribeInput{ - SubscriptionArn: aws.String(d.Id()), - }) - if err != nil { - return err - } - return nil -} - -func subscribeToSNSTopic(d *schema.ResourceData, snsconn *sns.SNS) (output *sns.SubscribeOutput, err error) { - protocol := d.Get("protocol").(string) - endpoint := d.Get("endpoint").(string) - topic_arn := d.Get("topic_arn").(string) - endpoint_auto_confirms := d.Get("endpoint_auto_confirms").(bool) - confirmation_timeout_in_minutes := d.Get("confirmation_timeout_in_minutes").(int) - - if strings.Contains(protocol, "http") && !endpoint_auto_confirms { - return nil, fmt.Errorf("Protocol http/https is only supported for endpoints which auto confirms!") - } - - log.Printf("[DEBUG] SNS create topic subscription: %s (%s) @ '%s'", endpoint, protocol, topic_arn) - - req := &sns.SubscribeInput{ - Protocol: aws.String(protocol), - Endpoint: aws.String(endpoint), - TopicArn: aws.String(topic_arn), - } - - output, err = snsconn.Subscribe(req) - if err != nil { - return nil, fmt.Errorf("Error creating SNS topic: %s", err) - } - - log.Printf("[DEBUG] Finished subscribing to topic %s with subscription arn %s", topic_arn, *output.SubscriptionArn) - - if strings.Contains(protocol, "http") && subscriptionHasPendingConfirmation(output.SubscriptionArn) { - - log.Printf("[DEBUG] SNS create topic subscription is pending so fetching the subscription list for topic : %s (%s) @ '%s'", endpoint, protocol, topic_arn) - - err = resource.Retry(time.Duration(confirmation_timeout_in_minutes)*time.Minute, func() *resource.RetryError { - - subscription, err := findSubscriptionByNonID(d, snsconn) - - if subscription != nil { - output.SubscriptionArn = subscription.SubscriptionArn - return nil - } - - if err != nil { - return resource.RetryableError( - fmt.Errorf("Error fetching subscriptions for SNS topic %s: %s", topic_arn, err)) - } - - return resource.RetryableError( - fmt.Errorf("Endpoint (%s) did not autoconfirm the subscription for topic %s", endpoint, topic_arn)) - }) - - if err != nil { - return nil, err - } - } - - log.Printf("[DEBUG] Created new subscription! %s", *output.SubscriptionArn) - return output, nil -} - -// finds a subscription using protocol, endpoint and topic_arn (which is a key in sns subscription) -func findSubscriptionByNonID(d *schema.ResourceData, snsconn *sns.SNS) (*sns.Subscription, error) { - protocol := d.Get("protocol").(string) - endpoint := d.Get("endpoint").(string) - topic_arn := d.Get("topic_arn").(string) - - req := &sns.ListSubscriptionsByTopicInput{ - TopicArn: aws.String(topic_arn), - } - - for { - - res, err := snsconn.ListSubscriptionsByTopic(req) - - if err != nil { - return nil, fmt.Errorf("Error fetching subscripitions for topic %s : %s", topic_arn, err) - } - - for _, subscription := range res.Subscriptions { - log.Printf("[DEBUG] check subscription with EndPoint %s, Protocol %s, topicARN %s and SubscriptionARN %s", *subscription.Endpoint, *subscription.Protocol, *subscription.TopicArn, *subscription.SubscriptionArn) - if *subscription.Endpoint == endpoint && *subscription.Protocol == protocol && *subscription.TopicArn == topic_arn && !subscriptionHasPendingConfirmation(subscription.SubscriptionArn) { - return subscription, nil - } - } - - // if there are more than 100 subscriptions then go to the next 100 otherwise return nil - if res.NextToken != nil { - req.NextToken = res.NextToken - } else { - return nil, nil - } - } -} - -// returns true if arn is nil or has both pending and confirmation words in the arn -func subscriptionHasPendingConfirmation(arn *string) bool { - if arn != nil && !strings.Contains(strings.Replace(strings.ToLower(*arn), " ", "", -1), awsSNSPendingConfirmationMessageWithoutSpaces) { - return false - } - - return true -} diff --git a/builtin/providers/aws/resource_aws_sns_topic_subscription_test.go b/builtin/providers/aws/resource_aws_sns_topic_subscription_test.go deleted file mode 100644 index f39214ee4..000000000 --- a/builtin/providers/aws/resource_aws_sns_topic_subscription_test.go +++ /dev/null @@ -1,247 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/sns" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSSNSTopicSubscription_basic(t *testing.T) { - ri := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSNSTopicSubscriptionDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSSNSTopicSubscriptionConfig(ri), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSNSTopicExists("aws_sns_topic.test_topic"), - testAccCheckAWSSNSTopicSubscriptionExists("aws_sns_topic_subscription.test_subscription"), - ), - }, - }, - }) -} - -func TestAccAWSSNSTopicSubscription_autoConfirmingEndpoint(t *testing.T) { - ri := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSNSTopicSubscriptionDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSSNSTopicSubscriptionConfig_autoConfirmingEndpoint(ri), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSNSTopicExists("aws_sns_topic.test_topic"), - testAccCheckAWSSNSTopicSubscriptionExists("aws_sns_topic_subscription.test_subscription"), - ), - }, - }, - }) -} - -func testAccCheckAWSSNSTopicSubscriptionDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).snsconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_sns_topic" { - continue - } - - // Try to find key pair - req := &sns.GetSubscriptionAttributesInput{ - SubscriptionArn: aws.String(rs.Primary.ID), - } - - _, err := conn.GetSubscriptionAttributes(req) - - if err == nil { - return fmt.Errorf("Subscription still exists, can't continue.") - } - - // Verify the error is an API error, not something else - _, ok := err.(awserr.Error) - if !ok { - return err - } - } - - return nil -} - -func testAccCheckAWSSNSTopicSubscriptionExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No SNS subscription with that ARN exists") - } - - conn := testAccProvider.Meta().(*AWSClient).snsconn - - params := &sns.GetSubscriptionAttributesInput{ - SubscriptionArn: aws.String(rs.Primary.ID), - } - _, err := conn.GetSubscriptionAttributes(params) - - if err != nil { - return err - } - - return nil - } -} - -func testAccAWSSNSTopicSubscriptionConfig(i int) string { - return fmt.Sprintf(` -resource "aws_sns_topic" "test_topic" { - name = "terraform-test-topic-%d" -} - -resource "aws_sqs_queue" "test_queue" { - name = "terraform-subscription-test-queue-%d" -} - -resource "aws_sns_topic_subscription" "test_subscription" { - topic_arn = "${aws_sns_topic.test_topic.arn}" - protocol = "sqs" - endpoint = "${aws_sqs_queue.test_queue.arn}" -} -`, i, i) -} - -func testAccAWSSNSTopicSubscriptionConfig_autoConfirmingEndpoint(i int) string { - return fmt.Sprintf(` -resource "aws_sns_topic" "test_topic" { - name = "tf-acc-test-sns-%d" -} - -resource "aws_api_gateway_rest_api" "test" { - name = "tf-acc-test-sns-%d" - description = "Terraform Acceptance test for SNS subscription" -} - -resource "aws_api_gateway_method" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_rest_api.test.root_resource_id}" - http_method = "POST" - authorization = "NONE" -} - -resource "aws_api_gateway_method_response" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_rest_api.test.root_resource_id}" - http_method = "${aws_api_gateway_method.test.http_method}" - status_code = "200" - - response_parameters { - "method.response.header.Access-Control-Allow-Origin" = true - } -} - -resource "aws_api_gateway_integration" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_rest_api.test.root_resource_id}" - http_method = "${aws_api_gateway_method.test.http_method}" - integration_http_method = "POST" - type = "AWS" - uri = "${aws_lambda_function.lambda.invoke_arn}" -} - -resource "aws_api_gateway_integration_response" "test" { - depends_on = ["aws_api_gateway_integration.test"] - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_rest_api.test.root_resource_id}" - http_method = "${aws_api_gateway_method.test.http_method}" - status_code = "${aws_api_gateway_method_response.test.status_code}" - - response_parameters { - "method.response.header.Access-Control-Allow-Origin" = "'*'" - } -} - -resource "aws_iam_role" "iam_for_lambda" { - name = "tf-acc-test-sns-%d" - - assume_role_policy = < 0 { - for _, v := range s.List() { - securityGroupIds = append(securityGroupIds, aws.String(v.(string))) - } - } - } - - subnetId, hasSubnetId := d["subnet_id"] - if hasSubnetId { - opts.SubnetId = aws.String(subnetId.(string)) - } - - associatePublicIpAddress, hasPublicIpAddress := d["associate_public_ip_address"] - if hasPublicIpAddress && associatePublicIpAddress.(bool) == true && hasSubnetId { - - // If we have a non-default VPC / Subnet specified, we can flag - // AssociatePublicIpAddress to get a Public IP assigned. By default these are not provided. - // You cannot specify both SubnetId and the NetworkInterface.0.* parameters though, otherwise - // you get: Network interfaces and an instance-level subnet ID may not be specified on the same request - // You also need to attach Security Groups to the NetworkInterface instead of the instance, - // to avoid: Network interfaces and an instance-level security groups may not be specified on - // the same request - ni := &ec2.InstanceNetworkInterfaceSpecification{ - AssociatePublicIpAddress: aws.Bool(true), - DeleteOnTermination: aws.Bool(true), - DeviceIndex: aws.Int64(int64(0)), - SubnetId: aws.String(subnetId.(string)), - Groups: securityGroupIds, - } - - opts.NetworkInterfaces = []*ec2.InstanceNetworkInterfaceSpecification{ni} - opts.SubnetId = aws.String("") - } else { - for _, id := range securityGroupIds { - opts.SecurityGroups = append(opts.SecurityGroups, &ec2.GroupIdentifier{GroupId: id}) - } - } - - blockDevices, err := readSpotFleetBlockDeviceMappingsFromConfig(d, conn) - if err != nil { - return nil, err - } - if len(blockDevices) > 0 { - opts.BlockDeviceMappings = blockDevices - } - - return opts, nil -} - -func validateSpotFleetRequestKeyName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if value == "" { - errors = append(errors, fmt.Errorf("Key name cannot be empty.")) - } - - return -} - -func readSpotFleetBlockDeviceMappingsFromConfig( - d map[string]interface{}, conn *ec2.EC2) ([]*ec2.BlockDeviceMapping, error) { - blockDevices := make([]*ec2.BlockDeviceMapping, 0) - - if v, ok := d["ebs_block_device"]; ok { - vL := v.(*schema.Set).List() - for _, v := range vL { - bd := v.(map[string]interface{}) - ebs := &ec2.EbsBlockDevice{ - DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)), - } - - if v, ok := bd["snapshot_id"].(string); ok && v != "" { - ebs.SnapshotId = aws.String(v) - } - - if v, ok := bd["encrypted"].(bool); ok && v { - ebs.Encrypted = aws.Bool(v) - } - - if v, ok := bd["volume_size"].(int); ok && v != 0 { - ebs.VolumeSize = aws.Int64(int64(v)) - } - - if v, ok := bd["volume_type"].(string); ok && v != "" { - ebs.VolumeType = aws.String(v) - } - - if v, ok := bd["iops"].(int); ok && v > 0 { - ebs.Iops = aws.Int64(int64(v)) - } - - blockDevices = append(blockDevices, &ec2.BlockDeviceMapping{ - DeviceName: aws.String(bd["device_name"].(string)), - Ebs: ebs, - }) - } - } - - if v, ok := d["ephemeral_block_device"]; ok { - vL := v.(*schema.Set).List() - for _, v := range vL { - bd := v.(map[string]interface{}) - blockDevices = append(blockDevices, &ec2.BlockDeviceMapping{ - DeviceName: aws.String(bd["device_name"].(string)), - VirtualName: aws.String(bd["virtual_name"].(string)), - }) - } - } - - if v, ok := d["root_block_device"]; ok { - vL := v.(*schema.Set).List() - if len(vL) > 1 { - return nil, fmt.Errorf("Cannot specify more than one root_block_device.") - } - for _, v := range vL { - bd := v.(map[string]interface{}) - ebs := &ec2.EbsBlockDevice{ - DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)), - } - - if v, ok := bd["volume_size"].(int); ok && v != 0 { - ebs.VolumeSize = aws.Int64(int64(v)) - } - - if v, ok := bd["volume_type"].(string); ok && v != "" { - ebs.VolumeType = aws.String(v) - } - - if v, ok := bd["iops"].(int); ok && v > 0 { - ebs.Iops = aws.Int64(int64(v)) - } - - if dn, err := fetchRootDeviceName(d["ami"].(string), conn); err == nil { - if dn == nil { - return nil, fmt.Errorf( - "Expected 1 AMI for ID: %s, got none", - d["ami"].(string)) - } - - blockDevices = append(blockDevices, &ec2.BlockDeviceMapping{ - DeviceName: dn, - Ebs: ebs, - }) - } else { - return nil, err - } - } - } - - return blockDevices, nil -} - -func buildAwsSpotFleetLaunchSpecifications( - d *schema.ResourceData, meta interface{}) ([]*ec2.SpotFleetLaunchSpecification, error) { - - user_specs := d.Get("launch_specification").(*schema.Set).List() - specs := make([]*ec2.SpotFleetLaunchSpecification, len(user_specs)) - for i, user_spec := range user_specs { - user_spec_map := user_spec.(map[string]interface{}) - // panic: interface conversion: interface {} is map[string]interface {}, not *schema.ResourceData - opts, err := buildSpotFleetLaunchSpecification(user_spec_map, meta) - if err != nil { - return nil, err - } - specs[i] = opts - } - - return specs, nil -} - -func resourceAwsSpotFleetRequestCreate(d *schema.ResourceData, meta interface{}) error { - // http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotFleet.html - conn := meta.(*AWSClient).ec2conn - - launch_specs, err := buildAwsSpotFleetLaunchSpecifications(d, meta) - if err != nil { - return err - } - - // http://docs.aws.amazon.com/sdk-for-go/api/service/ec2.html#type-SpotFleetRequestConfigData - spotFleetConfig := &ec2.SpotFleetRequestConfigData{ - IamFleetRole: aws.String(d.Get("iam_fleet_role").(string)), - LaunchSpecifications: launch_specs, - SpotPrice: aws.String(d.Get("spot_price").(string)), - TargetCapacity: aws.Int64(int64(d.Get("target_capacity").(int))), - ClientToken: aws.String(resource.UniqueId()), - TerminateInstancesWithExpiration: aws.Bool(d.Get("terminate_instances_with_expiration").(bool)), - ReplaceUnhealthyInstances: aws.Bool(d.Get("replace_unhealthy_instances").(bool)), - } - - if v, ok := d.GetOk("excess_capacity_termination_policy"); ok { - spotFleetConfig.ExcessCapacityTerminationPolicy = aws.String(v.(string)) - } - - if v, ok := d.GetOk("allocation_strategy"); ok { - spotFleetConfig.AllocationStrategy = aws.String(v.(string)) - } else { - spotFleetConfig.AllocationStrategy = aws.String("lowestPrice") - } - - if v, ok := d.GetOk("valid_from"); ok { - valid_from, err := time.Parse(awsAutoscalingScheduleTimeLayout, v.(string)) - if err != nil { - return err - } - spotFleetConfig.ValidFrom = &valid_from - } - - if v, ok := d.GetOk("valid_until"); ok { - valid_until, err := time.Parse(awsAutoscalingScheduleTimeLayout, v.(string)) - if err != nil { - return err - } - spotFleetConfig.ValidUntil = &valid_until - } else { - valid_until := time.Now().Add(24 * time.Hour) - spotFleetConfig.ValidUntil = &valid_until - } - - // http://docs.aws.amazon.com/sdk-for-go/api/service/ec2.html#type-RequestSpotFleetInput - spotFleetOpts := &ec2.RequestSpotFleetInput{ - SpotFleetRequestConfig: spotFleetConfig, - DryRun: aws.Bool(false), - } - - log.Printf("[DEBUG] Requesting spot fleet with these opts: %+v", spotFleetOpts) - - // Since IAM is eventually consistent, we retry creation as a newly created role may not - // take effect immediately, resulting in an InvalidSpotFleetRequestConfig error - var resp *ec2.RequestSpotFleetOutput - err = resource.Retry(1*time.Minute, func() *resource.RetryError { - var err error - resp, err = conn.RequestSpotFleet(spotFleetOpts) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // IAM is eventually consistent :/ - if awsErr.Code() == "InvalidSpotFleetRequestConfig" { - return resource.RetryableError( - fmt.Errorf("[WARN] Error creating Spot fleet request, retrying: %s", err)) - } - } - return resource.NonRetryableError(err) - } - return nil - }) - - if err != nil { - return fmt.Errorf("Error requesting spot fleet: %s", err) - } - - d.SetId(*resp.SpotFleetRequestId) - - log.Printf("[INFO] Spot Fleet Request ID: %s", d.Id()) - log.Println("[INFO] Waiting for Spot Fleet Request to be active") - stateConf := &resource.StateChangeConf{ - Pending: []string{"submitted"}, - Target: []string{"active"}, - Refresh: resourceAwsSpotFleetRequestStateRefreshFunc(d, meta), - Timeout: 10 * time.Minute, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - return resourceAwsSpotFleetRequestRead(d, meta) -} - -func resourceAwsSpotFleetRequestStateRefreshFunc(d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - conn := meta.(*AWSClient).ec2conn - req := &ec2.DescribeSpotFleetRequestsInput{ - SpotFleetRequestIds: []*string{aws.String(d.Id())}, - } - resp, err := conn.DescribeSpotFleetRequests(req) - - if err != nil { - log.Printf("Error on retrieving Spot Fleet Request when waiting: %s", err) - return nil, "", nil - } - - if resp == nil { - return nil, "", nil - } - - if len(resp.SpotFleetRequestConfigs) == 0 { - return nil, "", nil - } - - spotFleetRequest := resp.SpotFleetRequestConfigs[0] - - return spotFleetRequest, *spotFleetRequest.SpotFleetRequestState, nil - } -} - -func resourceAwsSpotFleetRequestRead(d *schema.ResourceData, meta interface{}) error { - // http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSpotFleetRequests.html - conn := meta.(*AWSClient).ec2conn - - req := &ec2.DescribeSpotFleetRequestsInput{ - SpotFleetRequestIds: []*string{aws.String(d.Id())}, - } - resp, err := conn.DescribeSpotFleetRequests(req) - - if err != nil { - // If the spot request was not found, return nil so that we can show - // that it is gone. - ec2err, ok := err.(awserr.Error) - if ok && ec2err.Code() == "InvalidSpotFleetRequestId.NotFound" { - d.SetId("") - return nil - } - - // Some other error, report it - return err - } - - sfr := resp.SpotFleetRequestConfigs[0] - - // if the request is cancelled, then it is gone - cancelledStates := map[string]bool{ - "cancelled": true, - "cancelled_running": true, - "cancelled_terminating": true, - } - if _, ok := cancelledStates[*sfr.SpotFleetRequestState]; ok { - d.SetId("") - return nil - } - - d.SetId(*sfr.SpotFleetRequestId) - d.Set("spot_request_state", aws.StringValue(sfr.SpotFleetRequestState)) - - config := sfr.SpotFleetRequestConfig - - if config.AllocationStrategy != nil { - d.Set("allocation_strategy", aws.StringValue(config.AllocationStrategy)) - } - - if config.ClientToken != nil { - d.Set("client_token", aws.StringValue(config.ClientToken)) - } - - if config.ExcessCapacityTerminationPolicy != nil { - d.Set("excess_capacity_termination_policy", - aws.StringValue(config.ExcessCapacityTerminationPolicy)) - } - - if config.IamFleetRole != nil { - d.Set("iam_fleet_role", aws.StringValue(config.IamFleetRole)) - } - - if config.SpotPrice != nil { - d.Set("spot_price", aws.StringValue(config.SpotPrice)) - } - - if config.TargetCapacity != nil { - d.Set("target_capacity", aws.Int64Value(config.TargetCapacity)) - } - - if config.TerminateInstancesWithExpiration != nil { - d.Set("terminate_instances_with_expiration", - aws.BoolValue(config.TerminateInstancesWithExpiration)) - } - - if config.ValidFrom != nil { - d.Set("valid_from", - aws.TimeValue(config.ValidFrom).Format(awsAutoscalingScheduleTimeLayout)) - } - - if config.ValidUntil != nil { - d.Set("valid_until", - aws.TimeValue(config.ValidUntil).Format(awsAutoscalingScheduleTimeLayout)) - } - - d.Set("replace_unhealthy_instances", config.ReplaceUnhealthyInstances) - d.Set("launch_specification", launchSpecsToSet(config.LaunchSpecifications, conn)) - - return nil -} - -func launchSpecsToSet(launchSpecs []*ec2.SpotFleetLaunchSpecification, conn *ec2.EC2) *schema.Set { - specSet := &schema.Set{F: hashLaunchSpecification} - for _, spec := range launchSpecs { - rootDeviceName, err := fetchRootDeviceName(aws.StringValue(spec.ImageId), conn) - if err != nil { - log.Panic(err) - } - - specSet.Add(launchSpecToMap(spec, rootDeviceName)) - } - return specSet -} - -func launchSpecToMap(l *ec2.SpotFleetLaunchSpecification, rootDevName *string) map[string]interface{} { - m := make(map[string]interface{}) - - m["root_block_device"] = rootBlockDeviceToSet(l.BlockDeviceMappings, rootDevName) - m["ebs_block_device"] = ebsBlockDevicesToSet(l.BlockDeviceMappings, rootDevName) - m["ephemeral_block_device"] = ephemeralBlockDevicesToSet(l.BlockDeviceMappings) - - if l.ImageId != nil { - m["ami"] = aws.StringValue(l.ImageId) - } - - if l.InstanceType != nil { - m["instance_type"] = aws.StringValue(l.InstanceType) - } - - if l.SpotPrice != nil { - m["spot_price"] = aws.StringValue(l.SpotPrice) - } - - if l.EbsOptimized != nil { - m["ebs_optimized"] = aws.BoolValue(l.EbsOptimized) - } - - if l.Monitoring != nil && l.Monitoring.Enabled != nil { - m["monitoring"] = aws.BoolValue(l.Monitoring.Enabled) - } - - if l.IamInstanceProfile != nil && l.IamInstanceProfile.Name != nil { - m["iam_instance_profile"] = aws.StringValue(l.IamInstanceProfile.Name) - } - - if l.UserData != nil { - m["user_data"] = userDataHashSum(aws.StringValue(l.UserData)) - } - - if l.KeyName != nil { - m["key_name"] = aws.StringValue(l.KeyName) - } - - if l.Placement != nil { - m["availability_zone"] = aws.StringValue(l.Placement.AvailabilityZone) - } - - if l.SubnetId != nil { - m["subnet_id"] = aws.StringValue(l.SubnetId) - } - - securityGroupIds := &schema.Set{F: schema.HashString} - if len(l.NetworkInterfaces) > 0 { - m["associate_public_ip_address"] = aws.BoolValue(l.NetworkInterfaces[0].AssociatePublicIpAddress) - m["subnet_id"] = aws.StringValue(l.NetworkInterfaces[0].SubnetId) - - for _, group := range l.NetworkInterfaces[0].Groups { - securityGroupIds.Add(aws.StringValue(group)) - } - } else { - for _, group := range l.SecurityGroups { - securityGroupIds.Add(aws.StringValue(group.GroupId)) - } - } - m["vpc_security_group_ids"] = securityGroupIds - - if l.WeightedCapacity != nil { - m["weighted_capacity"] = strconv.FormatFloat(*l.WeightedCapacity, 'f', 0, 64) - } - - return m -} - -func ebsBlockDevicesToSet(bdm []*ec2.BlockDeviceMapping, rootDevName *string) *schema.Set { - set := &schema.Set{F: hashEbsBlockDevice} - - for _, val := range bdm { - if val.Ebs != nil { - m := make(map[string]interface{}) - - ebs := val.Ebs - - if val.DeviceName != nil { - if aws.StringValue(rootDevName) == aws.StringValue(val.DeviceName) { - continue - } - - m["device_name"] = aws.StringValue(val.DeviceName) - } - - if ebs.DeleteOnTermination != nil { - m["delete_on_termination"] = aws.BoolValue(ebs.DeleteOnTermination) - } - - if ebs.SnapshotId != nil { - m["snapshot_id"] = aws.StringValue(ebs.SnapshotId) - } - - if ebs.Encrypted != nil { - m["encrypted"] = aws.BoolValue(ebs.Encrypted) - } - - if ebs.VolumeSize != nil { - m["volume_size"] = aws.Int64Value(ebs.VolumeSize) - } - - if ebs.VolumeType != nil { - m["volume_type"] = aws.StringValue(ebs.VolumeType) - } - - if ebs.Iops != nil { - m["iops"] = aws.Int64Value(ebs.Iops) - } - - set.Add(m) - } - } - - return set -} - -func ephemeralBlockDevicesToSet(bdm []*ec2.BlockDeviceMapping) *schema.Set { - set := &schema.Set{F: hashEphemeralBlockDevice} - - for _, val := range bdm { - if val.VirtualName != nil { - m := make(map[string]interface{}) - m["virtual_name"] = aws.StringValue(val.VirtualName) - - if val.DeviceName != nil { - m["device_name"] = aws.StringValue(val.DeviceName) - } - - set.Add(m) - } - } - - return set -} - -func rootBlockDeviceToSet( - bdm []*ec2.BlockDeviceMapping, - rootDevName *string, -) *schema.Set { - set := &schema.Set{F: hashRootBlockDevice} - - if rootDevName != nil { - for _, val := range bdm { - if aws.StringValue(val.DeviceName) == aws.StringValue(rootDevName) { - m := make(map[string]interface{}) - if val.Ebs.DeleteOnTermination != nil { - m["delete_on_termination"] = aws.BoolValue(val.Ebs.DeleteOnTermination) - } - - if val.Ebs.VolumeSize != nil { - m["volume_size"] = aws.Int64Value(val.Ebs.VolumeSize) - } - - if val.Ebs.VolumeType != nil { - m["volume_type"] = aws.StringValue(val.Ebs.VolumeType) - } - - if val.Ebs.Iops != nil { - m["iops"] = aws.Int64Value(val.Ebs.Iops) - } - - set.Add(m) - } - } - } - - return set -} - -func resourceAwsSpotFleetRequestUpdate(d *schema.ResourceData, meta interface{}) error { - // http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifySpotFleetRequest.html - conn := meta.(*AWSClient).ec2conn - - d.Partial(true) - - req := &ec2.ModifySpotFleetRequestInput{ - SpotFleetRequestId: aws.String(d.Id()), - } - - if val, ok := d.GetOk("target_capacity"); ok { - req.TargetCapacity = aws.Int64(int64(val.(int))) - } - - if val, ok := d.GetOk("excess_capacity_termination_policy"); ok { - req.ExcessCapacityTerminationPolicy = aws.String(val.(string)) - } - - resp, err := conn.ModifySpotFleetRequest(req) - if err == nil && aws.BoolValue(resp.Return) { - // TODO: rollback to old values? - } - - return nil -} - -func resourceAwsSpotFleetRequestDelete(d *schema.ResourceData, meta interface{}) error { - // http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CancelSpotFleetRequests.html - conn := meta.(*AWSClient).ec2conn - terminateInstances := d.Get("terminate_instances_with_expiration").(bool) - - log.Printf("[INFO] Cancelling spot fleet request: %s", d.Id()) - resp, err := conn.CancelSpotFleetRequests(&ec2.CancelSpotFleetRequestsInput{ - SpotFleetRequestIds: []*string{aws.String(d.Id())}, - TerminateInstances: aws.Bool(terminateInstances), - }) - - if err != nil { - return fmt.Errorf("Error cancelling spot request (%s): %s", d.Id(), err) - } - - // check response successfulFleetRequestSet to make sure our request was canceled - var found bool - for _, s := range resp.SuccessfulFleetRequests { - if *s.SpotFleetRequestId == d.Id() { - found = true - } - } - - if !found { - return fmt.Errorf("[ERR] Spot Fleet request (%s) was not found to be successfully canceled, dangling resources may exit", d.Id()) - } - - // Only wait for instance termination if requested - if !terminateInstances { - return nil - } - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - resp, err := conn.DescribeSpotFleetInstances(&ec2.DescribeSpotFleetInstancesInput{ - SpotFleetRequestId: aws.String(d.Id()), - }) - if err != nil { - return resource.NonRetryableError(err) - } - - if len(resp.ActiveInstances) == 0 { - log.Printf("[DEBUG] Active instance count is 0 for Spot Fleet Request (%s), removing", d.Id()) - return nil - } - - log.Printf("[DEBUG] Active instance count in Spot Fleet Request (%s): %d", d.Id(), len(resp.ActiveInstances)) - - return resource.RetryableError( - fmt.Errorf("fleet still has (%d) running instances", len(resp.ActiveInstances))) - }) -} - -func hashEphemeralBlockDevice(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["virtual_name"].(string))) - return hashcode.String(buf.String()) -} - -func hashRootBlockDevice(v interface{}) int { - // there can be only one root device; no need to hash anything - return 0 -} - -func hashLaunchSpecification(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["ami"].(string))) - if m["availability_zone"] != "" { - buf.WriteString(fmt.Sprintf("%s-", m["availability_zone"].(string))) - } - if m["subnet_id"] != "" { - buf.WriteString(fmt.Sprintf("%s-", m["subnet_id"].(string))) - } - buf.WriteString(fmt.Sprintf("%s-", m["instance_type"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["spot_price"].(string))) - return hashcode.String(buf.String()) -} - -func hashEbsBlockDevice(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - if name, ok := m["device_name"]; ok { - buf.WriteString(fmt.Sprintf("%s-", name.(string))) - } - if id, ok := m["snapshot_id"]; ok { - buf.WriteString(fmt.Sprintf("%s-", id.(string))) - } - return hashcode.String(buf.String()) -} diff --git a/builtin/providers/aws/resource_aws_spot_fleet_request_migrate.go b/builtin/providers/aws/resource_aws_spot_fleet_request_migrate.go deleted file mode 100644 index dea0a32e8..000000000 --- a/builtin/providers/aws/resource_aws_spot_fleet_request_migrate.go +++ /dev/null @@ -1,33 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/terraform" -) - -func resourceAwsSpotFleetRequestMigrateState( - v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - switch v { - case 0: - log.Println("[INFO] Found AWS Spot Fleet Request State v0; migrating to v1") - return migrateSpotFleetRequestV0toV1(is) - default: - return is, fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateSpotFleetRequestV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { - if is.Empty() { - log.Println("[DEBUG] Empty Spot Fleet Request State; nothing to migrate.") - return is, nil - } - - log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - - is.Attributes["associate_public_ip_address"] = "false" - - log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} diff --git a/builtin/providers/aws/resource_aws_spot_fleet_request_migrate_test.go b/builtin/providers/aws/resource_aws_spot_fleet_request_migrate_test.go deleted file mode 100644 index 28e750f59..000000000 --- a/builtin/providers/aws/resource_aws_spot_fleet_request_migrate_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/terraform" -) - -func TestAWSSpotFleetRequestMigrateState(t *testing.T) { - cases := map[string]struct { - StateVersion int - ID string - Attributes map[string]string - Expected string - Meta interface{} - }{ - "v0_1": { - StateVersion: 0, - ID: "some_id", - Attributes: map[string]string{ - "associate_public_ip_address": "true", - }, - Expected: "false", - }, - } - - for tn, tc := range cases { - is := &terraform.InstanceState{ - ID: tc.ID, - Attributes: tc.Attributes, - } - is, err := resourceAwsSpotFleetRequestMigrateState( - tc.StateVersion, is, tc.Meta) - - if err != nil { - t.Fatalf("bad: %s, err: %#v", tn, err) - } - - if is.Attributes["associate_public_ip_address"] != tc.Expected { - t.Fatalf("bad Spot Fleet Request Migrate: %s\n\n expected: %s", is.Attributes["associate_public_ip_address"], tc.Expected) - } - } -} diff --git a/builtin/providers/aws/resource_aws_spot_fleet_request_test.go b/builtin/providers/aws/resource_aws_spot_fleet_request_test.go deleted file mode 100644 index d1098ed6d..000000000 --- a/builtin/providers/aws/resource_aws_spot_fleet_request_test.go +++ /dev/null @@ -1,1473 +0,0 @@ -package aws - -import ( - "errors" - "fmt" - "log" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSSpotFleetRequest_associatePublicIpAddress(t *testing.T) { - var sfr ec2.SpotFleetRequestConfig - rName := acctest.RandString(10) - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSpotFleetRequestDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSpotFleetRequestConfigAssociatePublicIpAddress(rName, rInt), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSSpotFleetRequestExists( - "aws_spot_fleet_request.foo", &sfr), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "spot_request_state", "active"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.#", "1"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.2633484960.associate_public_ip_address", "true"), - ), - }, - }, - }) -} - -func TestAccAWSSpotFleetRequest_changePriceForcesNewRequest(t *testing.T) { - var before, after ec2.SpotFleetRequestConfig - rName := acctest.RandString(10) - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSpotFleetRequestDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSpotFleetRequestConfig(rName, rInt), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSSpotFleetRequestExists( - "aws_spot_fleet_request.foo", &before), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "spot_request_state", "active"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "spot_price", "0.005"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.#", "1"), - ), - }, - { - Config: testAccAWSSpotFleetRequestConfigChangeSpotBidPrice(rName, rInt), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSSpotFleetRequestExists( - "aws_spot_fleet_request.foo", &after), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "spot_request_state", "active"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.#", "1"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "spot_price", "0.01"), - testAccCheckAWSSpotFleetRequestConfigRecreated(t, &before, &after), - ), - }, - }, - }) -} - -func TestAccAWSSpotFleetRequest_lowestPriceAzOrSubnetInRegion(t *testing.T) { - var sfr ec2.SpotFleetRequestConfig - rName := acctest.RandString(10) - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSpotFleetRequestDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSpotFleetRequestConfig(rName, rInt), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSSpotFleetRequestExists( - "aws_spot_fleet_request.foo", &sfr), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "spot_request_state", "active"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.#", "1"), - ), - }, - }, - }) -} - -func TestAccAWSSpotFleetRequest_lowestPriceAzInGivenList(t *testing.T) { - var sfr ec2.SpotFleetRequestConfig - rName := acctest.RandString(10) - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSpotFleetRequestDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSpotFleetRequestConfigWithAzs(rName, rInt), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSSpotFleetRequestExists( - "aws_spot_fleet_request.foo", &sfr), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "spot_request_state", "active"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.#", "2"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.335709043.availability_zone", "us-west-2a"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.1671188867.availability_zone", "us-west-2b"), - ), - }, - }, - }) -} - -func TestAccAWSSpotFleetRequest_lowestPriceSubnetInGivenList(t *testing.T) { - var sfr ec2.SpotFleetRequestConfig - rName := acctest.RandString(10) - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSpotFleetRequestDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSpotFleetRequestConfigWithSubnet(rName, rInt), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSSpotFleetRequestExists( - "aws_spot_fleet_request.foo", &sfr), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "spot_request_state", "active"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.#", "2"), - ), - }, - }, - }) -} - -func TestAccAWSSpotFleetRequest_multipleInstanceTypesInSameAz(t *testing.T) { - var sfr ec2.SpotFleetRequestConfig - rName := acctest.RandString(10) - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSpotFleetRequestDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSpotFleetRequestConfigMultipleInstanceTypesinSameAz(rName, rInt), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSSpotFleetRequestExists( - "aws_spot_fleet_request.foo", &sfr), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "spot_request_state", "active"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.#", "2"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.335709043.instance_type", "m1.small"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.335709043.availability_zone", "us-west-2a"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.590403189.instance_type", "m3.large"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.590403189.availability_zone", "us-west-2a"), - ), - }, - }, - }) -} - -func TestAccAWSSpotFleetRequest_multipleInstanceTypesInSameSubnet(t *testing.T) { - var sfr ec2.SpotFleetRequestConfig - rName := acctest.RandString(10) - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSpotFleetRequestDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSpotFleetRequestConfigMultipleInstanceTypesinSameSubnet(rName, rInt), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSSpotFleetRequestExists( - "aws_spot_fleet_request.foo", &sfr), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "spot_request_state", "active"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.#", "2"), - ), - }, - }, - }) -} - -func TestAccAWSSpotFleetRequest_overriddingSpotPrice(t *testing.T) { - var sfr ec2.SpotFleetRequestConfig - rName := acctest.RandString(10) - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSpotFleetRequestDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSpotFleetRequestConfigOverridingSpotPrice(rName, rInt), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSSpotFleetRequestExists( - "aws_spot_fleet_request.foo", &sfr), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "spot_request_state", "active"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "spot_price", "0.005"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.#", "2"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.4143232216.spot_price", "0.01"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.4143232216.instance_type", "m3.large"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.335709043.spot_price", ""), //there will not be a value here since it's not overriding - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.335709043.instance_type", "m1.small"), - ), - }, - }, - }) -} - -func TestAccAWSSpotFleetRequest_diversifiedAllocation(t *testing.T) { - var sfr ec2.SpotFleetRequestConfig - rName := acctest.RandString(10) - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSpotFleetRequestDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSpotFleetRequestConfigDiversifiedAllocation(rName, rInt), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSSpotFleetRequestExists( - "aws_spot_fleet_request.foo", &sfr), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "spot_request_state", "active"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.#", "3"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "allocation_strategy", "diversified"), - ), - }, - }, - }) -} - -func TestAccAWSSpotFleetRequest_withWeightedCapacity(t *testing.T) { - var sfr ec2.SpotFleetRequestConfig - rName := acctest.RandString(10) - rInt := acctest.RandInt() - - fulfillSleep := func() resource.TestCheckFunc { - // sleep so that EC2 can fuflill the request. We do this to guard against a - // regression and possible leak where we'll destroy the request and the - // associated IAM role before anything is actually provisioned and running, - // thus leaking when those newly started instances are attempted to be - // destroyed - // See https://github.com/hashicorp/terraform/pull/8938 - return func(s *terraform.State) error { - log.Print("[DEBUG] Test: Sleep to allow EC2 to actually begin fulfilling TestAccAWSSpotFleetRequest_withWeightedCapacity request") - time.Sleep(1 * time.Minute) - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSpotFleetRequestDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSpotFleetRequestConfigWithWeightedCapacity(rName, rInt), - Check: resource.ComposeAggregateTestCheckFunc( - fulfillSleep(), - testAccCheckAWSSpotFleetRequestExists( - "aws_spot_fleet_request.foo", &sfr), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "spot_request_state", "active"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.#", "2"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.4120185872.weighted_capacity", "3"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.4120185872.instance_type", "r3.large"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.590403189.weighted_capacity", "6"), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "launch_specification.590403189.instance_type", "m3.large"), - ), - }, - }, - }) -} - -func TestAccAWSSpotFleetRequest_withEBSDisk(t *testing.T) { - var config ec2.SpotFleetRequestConfig - rName := acctest.RandString(10) - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSpotFleetRequestDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSpotFleetRequestEBSConfig(rName, rInt), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSSpotFleetRequestExists( - "aws_spot_fleet_request.foo", &config), - testAccCheckAWSSpotFleetRequest_EBSAttributes( - &config), - ), - }, - }, - }) -} - -func TestAccAWSSpotFleetRequest_placementTenancy(t *testing.T) { - var sfr ec2.SpotFleetRequestConfig - rName := acctest.RandString(10) - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSpotFleetRequestDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSpotFleetRequestTenancyConfig(rName, rInt), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSSpotFleetRequestExists( - "aws_spot_fleet_request.foo", &sfr), - resource.TestCheckResourceAttr( - "aws_spot_fleet_request.foo", "spot_request_state", "active"), - testAccCheckAWSSpotFleetRequest_PlacementAttributes(&sfr), - ), - }, - }, - }) -} - -func TestAccAWSSpotFleetRequest_CannotUseEmptyKeyName(t *testing.T) { - _, errs := validateSpotFleetRequestKeyName("", "key_name") - if len(errs) == 0 { - t.Fatal("Expected the key name to trigger a validation error") - } -} - -func testAccCheckAWSSpotFleetRequestConfigRecreated(t *testing.T, - before, after *ec2.SpotFleetRequestConfig) resource.TestCheckFunc { - return func(s *terraform.State) error { - if before.SpotFleetRequestId == after.SpotFleetRequestId { - t.Fatalf("Expected change of Spot Fleet Request IDs, but both were %v", before.SpotFleetRequestId) - } - return nil - } -} - -func testAccCheckAWSSpotFleetRequestExists( - n string, sfr *ec2.SpotFleetRequestConfig) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return errors.New("No Spot fleet request with that id exists") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - params := &ec2.DescribeSpotFleetRequestsInput{ - SpotFleetRequestIds: []*string{&rs.Primary.ID}, - } - resp, err := conn.DescribeSpotFleetRequests(params) - - if err != nil { - return err - } - - if v := len(resp.SpotFleetRequestConfigs); v != 1 { - return fmt.Errorf("Expected 1 request returned, got %d", v) - } - - *sfr = *resp.SpotFleetRequestConfigs[0] - - return nil - } -} - -func testAccCheckAWSSpotFleetRequest_EBSAttributes( - sfr *ec2.SpotFleetRequestConfig) resource.TestCheckFunc { - return func(s *terraform.State) error { - if len(sfr.SpotFleetRequestConfig.LaunchSpecifications) == 0 { - return errors.New("Missing launch specification") - } - - spec := *sfr.SpotFleetRequestConfig.LaunchSpecifications[0] - - ebs := spec.BlockDeviceMappings - if len(ebs) < 2 { - return fmt.Errorf("Expected %d block device mappings, got %d", 2, len(ebs)) - } - - if *ebs[0].DeviceName != "/dev/xvda" { - return fmt.Errorf("Expected device 0's name to be %s, got %s", "/dev/xvda", *ebs[0].DeviceName) - } - if *ebs[1].DeviceName != "/dev/xvdcz" { - return fmt.Errorf("Expected device 1's name to be %s, got %s", "/dev/xvdcz", *ebs[1].DeviceName) - } - - return nil - } -} - -func testAccCheckAWSSpotFleetRequest_PlacementAttributes( - sfr *ec2.SpotFleetRequestConfig) resource.TestCheckFunc { - return func(s *terraform.State) error { - if len(sfr.SpotFleetRequestConfig.LaunchSpecifications) == 0 { - return errors.New("Missing launch specification") - } - - spec := *sfr.SpotFleetRequestConfig.LaunchSpecifications[0] - - placement := spec.Placement - if placement == nil { - return fmt.Errorf("Expected placement to be set, got nil") - } - if *placement.Tenancy != "dedicated" { - return fmt.Errorf("Expected placement tenancy to be %q, got %q", "dedicated", placement.Tenancy) - } - - return nil - } -} - -func testAccCheckAWSSpotFleetRequestDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_spot_fleet_request" { - continue - } - - _, err := conn.CancelSpotFleetRequests(&ec2.CancelSpotFleetRequestsInput{ - SpotFleetRequestIds: []*string{aws.String(rs.Primary.ID)}, - TerminateInstances: aws.Bool(true), - }) - - if err != nil { - return fmt.Errorf("Error cancelling spot request (%s): %s", rs.Primary.ID, err) - } - } - - return nil -} - -func testAccAWSSpotFleetRequestConfigAssociatePublicIpAddress(rName string, rInt int) string { - return fmt.Sprintf(` -resource "aws_key_pair" "debugging" { - key_name = "tmp-key-%s" - public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD3F6tyPEFEzV0LX3X8BsXdMsQz1x2cEikKDEY0aIj41qgxMCP/iteneqXSIFZBp5vizPvaoIR3Um9xK7PGoW8giupGn+EPuxIA4cDM4vzOqOkiMPhz5XK0whEjkVzTo4+S0puvDZuwIsdiW9mxhJc7tgBNL0cYlWSYVkz4G/fslNfRPW5mYAM49f4fhtxPb5ok4Q2Lg9dPKVHO/Bgeu5woMc7RY0p1ej6D4CKFE6lymSDJpW0YHX/wqE9+cfEauh7xZcG0q9t2ta6F6fmX0agvpFyZo8aFbXeUBr7osSCJNgvavWbM/06niWrOvYX2xwWdhXmXSrbX8ZbabVohBK41 phodgson@thoughtworks.com" -} - -resource "aws_iam_policy" "test-policy" { - name = "test-policy-%d" - path = "/" - description = "Spot Fleet Request ACCTest Policy" - policy = < 0 { - spotOpts.LaunchSpecification.SecurityGroupIds = instanceOpts.NetworkInterfaces[0].Groups - spotOpts.LaunchSpecification.SubnetId = instanceOpts.NetworkInterfaces[0].SubnetId - } - - // Make the spot instance request - log.Printf("[DEBUG] Requesting spot bid opts: %s", spotOpts) - - var resp *ec2.RequestSpotInstancesOutput - err = resource.Retry(15*time.Second, func() *resource.RetryError { - var err error - resp, err = conn.RequestSpotInstances(spotOpts) - // IAM instance profiles can take ~10 seconds to propagate in AWS: - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console - if isAWSErr(err, "InvalidParameterValue", "Invalid IAM Instance Profile") { - log.Printf("[DEBUG] Invalid IAM Instance Profile referenced, retrying...") - return resource.RetryableError(err) - } - // IAM roles can also take time to propagate in AWS: - if isAWSErr(err, "InvalidParameterValue", " has no associated IAM Roles") { - log.Printf("[DEBUG] IAM Instance Profile appears to have no IAM roles, retrying...") - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - }) - - if err != nil { - return fmt.Errorf("Error requesting spot instances: %s", err) - } - if len(resp.SpotInstanceRequests) != 1 { - return fmt.Errorf( - "Expected response with length 1, got: %s", resp) - } - - sir := *resp.SpotInstanceRequests[0] - d.SetId(*sir.SpotInstanceRequestId) - - if d.Get("wait_for_fulfillment").(bool) { - spotStateConf := &resource.StateChangeConf{ - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html - Pending: []string{"start", "pending-evaluation", "pending-fulfillment"}, - Target: []string{"fulfilled"}, - Refresh: SpotInstanceStateRefreshFunc(conn, sir), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - log.Printf("[DEBUG] waiting for spot bid to resolve... this may take several minutes.") - _, err = spotStateConf.WaitForState() - - if err != nil { - return fmt.Errorf("Error while waiting for spot request (%s) to resolve: %s", sir, err) - } - } - - return resourceAwsSpotInstanceRequestUpdate(d, meta) -} - -// Update spot state, etc -func resourceAwsSpotInstanceRequestRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - req := &ec2.DescribeSpotInstanceRequestsInput{ - SpotInstanceRequestIds: []*string{aws.String(d.Id())}, - } - resp, err := conn.DescribeSpotInstanceRequests(req) - - if err != nil { - // If the spot request was not found, return nil so that we can show - // that it is gone. - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidSpotInstanceRequestID.NotFound" { - d.SetId("") - return nil - } - - // Some other error, report it - return err - } - - // If nothing was found, then return no state - if len(resp.SpotInstanceRequests) == 0 { - d.SetId("") - return nil - } - - request := resp.SpotInstanceRequests[0] - - // if the request is cancelled, then it is gone - if *request.State == "cancelled" { - d.SetId("") - return nil - } - - d.Set("spot_bid_status", *request.Status.Code) - // Instance ID is not set if the request is still pending - if request.InstanceId != nil { - d.Set("spot_instance_id", *request.InstanceId) - // Read the instance data, setting up connection information - if err := readInstance(d, meta); err != nil { - return fmt.Errorf("[ERR] Error reading Spot Instance Data: %s", err) - } - } - - d.Set("spot_request_state", request.State) - d.Set("block_duration_minutes", request.BlockDurationMinutes) - d.Set("tags", tagsToMap(request.Tags)) - - return nil -} - -func readInstance(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - resp, err := conn.DescribeInstances(&ec2.DescribeInstancesInput{ - InstanceIds: []*string{aws.String(d.Get("spot_instance_id").(string))}, - }) - if err != nil { - // If the instance was not found, return nil so that we can show - // that the instance is gone. - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidInstanceID.NotFound" { - return fmt.Errorf("no instance found") - } - - // Some other error, report it - return err - } - - // If nothing was found, then return no state - if len(resp.Reservations) == 0 { - return fmt.Errorf("no instances found") - } - - instance := resp.Reservations[0].Instances[0] - - // Set these fields for connection information - if instance != nil { - d.Set("public_dns", instance.PublicDnsName) - d.Set("public_ip", instance.PublicIpAddress) - d.Set("private_dns", instance.PrivateDnsName) - d.Set("private_ip", instance.PrivateIpAddress) - - // set connection information - if instance.PublicIpAddress != nil { - d.SetConnInfo(map[string]string{ - "type": "ssh", - "host": *instance.PublicIpAddress, - }) - } else if instance.PrivateIpAddress != nil { - d.SetConnInfo(map[string]string{ - "type": "ssh", - "host": *instance.PrivateIpAddress, - }) - } - if err := readBlockDevices(d, instance, conn); err != nil { - return err - } - - var ipv6Addresses []string - if len(instance.NetworkInterfaces) > 0 { - for _, ni := range instance.NetworkInterfaces { - if *ni.Attachment.DeviceIndex == 0 { - d.Set("subnet_id", ni.SubnetId) - d.Set("network_interface_id", ni.NetworkInterfaceId) - d.Set("associate_public_ip_address", ni.Association != nil) - d.Set("ipv6_address_count", len(ni.Ipv6Addresses)) - - for _, address := range ni.Ipv6Addresses { - ipv6Addresses = append(ipv6Addresses, *address.Ipv6Address) - } - } - } - } else { - d.Set("subnet_id", instance.SubnetId) - d.Set("network_interface_id", "") - } - - if err := d.Set("ipv6_addresses", ipv6Addresses); err != nil { - log.Printf("[WARN] Error setting ipv6_addresses for AWS Spot Instance (%s): %s", d.Id(), err) - } - } - - return nil -} - -func resourceAwsSpotInstanceRequestUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - d.Partial(true) - if err := setTags(conn, d); err != nil { - return err - } else { - d.SetPartial("tags") - } - - d.Partial(false) - - return resourceAwsSpotInstanceRequestRead(d, meta) -} - -func resourceAwsSpotInstanceRequestDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - log.Printf("[INFO] Cancelling spot request: %s", d.Id()) - _, err := conn.CancelSpotInstanceRequests(&ec2.CancelSpotInstanceRequestsInput{ - SpotInstanceRequestIds: []*string{aws.String(d.Id())}, - }) - - if err != nil { - return fmt.Errorf("Error cancelling spot request (%s): %s", d.Id(), err) - } - - if instanceId := d.Get("spot_instance_id").(string); instanceId != "" { - log.Printf("[INFO] Terminating instance: %s", instanceId) - if err := awsTerminateInstance(conn, instanceId, d); err != nil { - return fmt.Errorf("Error terminating spot instance: %s", err) - } - } - - return nil -} - -// SpotInstanceStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// an EC2 spot instance request -func SpotInstanceStateRefreshFunc( - conn *ec2.EC2, sir ec2.SpotInstanceRequest) resource.StateRefreshFunc { - - return func() (interface{}, string, error) { - resp, err := conn.DescribeSpotInstanceRequests(&ec2.DescribeSpotInstanceRequestsInput{ - SpotInstanceRequestIds: []*string{sir.SpotInstanceRequestId}, - }) - - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidSpotInstanceRequestID.NotFound" { - // Set this to nil as if we didn't find anything. - resp = nil - } else { - log.Printf("Error on StateRefresh: %s", err) - return nil, "", err - } - } - - if resp == nil || len(resp.SpotInstanceRequests) == 0 { - // Sometimes AWS just has consistency issues and doesn't see - // our request yet. Return an empty state. - return nil, "", nil - } - - req := resp.SpotInstanceRequests[0] - return req, *req.Status.Code, nil - } -} diff --git a/builtin/providers/aws/resource_aws_spot_instance_request_test.go b/builtin/providers/aws/resource_aws_spot_instance_request_test.go deleted file mode 100644 index 268bf7ee3..000000000 --- a/builtin/providers/aws/resource_aws_spot_instance_request_test.go +++ /dev/null @@ -1,427 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSSpotInstanceRequest_basic(t *testing.T) { - var sir ec2.SpotInstanceRequest - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSpotInstanceRequestDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSpotInstanceRequestConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSpotInstanceRequestExists( - "aws_spot_instance_request.foo", &sir), - testAccCheckAWSSpotInstanceRequestAttributes(&sir), - testCheckKeyPair(fmt.Sprintf("tmp-key-%d", rInt), &sir), - resource.TestCheckResourceAttr( - "aws_spot_instance_request.foo", "spot_bid_status", "fulfilled"), - resource.TestCheckResourceAttr( - "aws_spot_instance_request.foo", "spot_request_state", "active"), - ), - }, - }, - }) -} - -func TestAccAWSSpotInstanceRequest_withBlockDuration(t *testing.T) { - var sir ec2.SpotInstanceRequest - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSpotInstanceRequestDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSpotInstanceRequestConfig_withBlockDuration(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSpotInstanceRequestExists( - "aws_spot_instance_request.foo", &sir), - testAccCheckAWSSpotInstanceRequestAttributes(&sir), - testCheckKeyPair(fmt.Sprintf("tmp-key-%d", rInt), &sir), - resource.TestCheckResourceAttr( - "aws_spot_instance_request.foo", "spot_bid_status", "fulfilled"), - resource.TestCheckResourceAttr( - "aws_spot_instance_request.foo", "spot_request_state", "active"), - resource.TestCheckResourceAttr( - "aws_spot_instance_request.foo", "block_duration_minutes", "60"), - ), - }, - }, - }) -} - -func TestAccAWSSpotInstanceRequest_vpc(t *testing.T) { - var sir ec2.SpotInstanceRequest - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSpotInstanceRequestDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSpotInstanceRequestConfigVPC(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSpotInstanceRequestExists( - "aws_spot_instance_request.foo_VPC", &sir), - testAccCheckAWSSpotInstanceRequestAttributes(&sir), - testCheckKeyPair(fmt.Sprintf("tmp-key-%d", rInt), &sir), - testAccCheckAWSSpotInstanceRequestAttributesVPC(&sir), - resource.TestCheckResourceAttr( - "aws_spot_instance_request.foo_VPC", "spot_bid_status", "fulfilled"), - resource.TestCheckResourceAttr( - "aws_spot_instance_request.foo_VPC", "spot_request_state", "active"), - ), - }, - }, - }) -} - -func TestAccAWSSpotInstanceRequest_SubnetAndSG(t *testing.T) { - var sir ec2.SpotInstanceRequest - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSpotInstanceRequestDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSpotInstanceRequestConfig_SubnetAndSG(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSpotInstanceRequestExists( - "aws_spot_instance_request.foo", &sir), - testAccCheckAWSSpotInstanceRequest_InstanceAttributes(&sir, rInt), - ), - }, - }, - }) -} - -func testCheckKeyPair(keyName string, sir *ec2.SpotInstanceRequest) resource.TestCheckFunc { - return func(*terraform.State) error { - if sir.LaunchSpecification.KeyName == nil { - return fmt.Errorf("No Key Pair found, expected(%s)", keyName) - } - if sir.LaunchSpecification.KeyName != nil && *sir.LaunchSpecification.KeyName != keyName { - return fmt.Errorf("Bad key name, expected (%s), got (%s)", keyName, *sir.LaunchSpecification.KeyName) - } - - return nil - } -} - -func testAccCheckAWSSpotInstanceRequestDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_spot_instance_request" { - continue - } - - req := &ec2.DescribeSpotInstanceRequestsInput{ - SpotInstanceRequestIds: []*string{aws.String(rs.Primary.ID)}, - } - - resp, err := conn.DescribeSpotInstanceRequests(req) - var s *ec2.SpotInstanceRequest - if err == nil { - for _, sir := range resp.SpotInstanceRequests { - if sir.SpotInstanceRequestId != nil && *sir.SpotInstanceRequestId == rs.Primary.ID { - s = sir - } - continue - } - } - - if s == nil { - // not found - return nil - } - - if *s.State == "canceled" { - // Requests stick around for a while, so we make sure it's cancelled - return nil - } - - // Verify the error is what we expect - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - if ec2err.Code() != "InvalidSpotInstanceRequestID.NotFound" { - return err - } - - // Now check if the associated Spot Instance was also destroyed - instId := rs.Primary.Attributes["spot_instance_id"] - instResp, instErr := conn.DescribeInstances(&ec2.DescribeInstancesInput{ - InstanceIds: []*string{aws.String(instId)}, - }) - if instErr == nil { - if len(instResp.Reservations) > 0 { - return fmt.Errorf("Instance still exists.") - } - - return nil - } - - // Verify the error is what we expect - ec2err, ok = err.(awserr.Error) - if !ok { - return err - } - if ec2err.Code() != "InvalidInstanceID.NotFound" { - return err - } - } - - return nil -} - -func testAccCheckAWSSpotInstanceRequestExists( - n string, sir *ec2.SpotInstanceRequest) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No SNS subscription with that ARN exists") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - params := &ec2.DescribeSpotInstanceRequestsInput{ - SpotInstanceRequestIds: []*string{&rs.Primary.ID}, - } - resp, err := conn.DescribeSpotInstanceRequests(params) - - if err != nil { - return err - } - - if v := len(resp.SpotInstanceRequests); v != 1 { - return fmt.Errorf("Expected 1 request returned, got %d", v) - } - - *sir = *resp.SpotInstanceRequests[0] - - return nil - } -} - -func testAccCheckAWSSpotInstanceRequestAttributes( - sir *ec2.SpotInstanceRequest) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *sir.SpotPrice != "0.050000" { - return fmt.Errorf("Unexpected spot price: %s", *sir.SpotPrice) - } - if *sir.State != "active" { - return fmt.Errorf("Unexpected request state: %s", *sir.State) - } - if *sir.Status.Code != "fulfilled" { - return fmt.Errorf("Unexpected bid status: %s", *sir.State) - } - return nil - } -} - -func testAccCheckAWSSpotInstanceRequest_InstanceAttributes( - sir *ec2.SpotInstanceRequest, rInt int) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - resp, err := conn.DescribeInstances(&ec2.DescribeInstancesInput{ - InstanceIds: []*string{sir.InstanceId}, - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidInstanceID.NotFound" { - return fmt.Errorf("Spot Instance not found") - } - return err - } - - // If nothing was found, then return no state - if len(resp.Reservations) == 0 { - return fmt.Errorf("Spot Instance not found") - } - - instance := resp.Reservations[0].Instances[0] - - var sgMatch bool - for _, s := range instance.SecurityGroups { - // Hardcoded name for the security group that should be added inside the - // VPC - if *s.GroupName == fmt.Sprintf("tf_test_sg_ssh-%d", rInt) { - sgMatch = true - } - } - - if !sgMatch { - return fmt.Errorf("Error in matching Spot Instance Security Group, expected 'tf_test_sg_ssh-%d', got %s", rInt, instance.SecurityGroups) - } - - return nil - } -} - -func testAccCheckAWSSpotInstanceRequestAttributesVPC( - sir *ec2.SpotInstanceRequest) resource.TestCheckFunc { - return func(s *terraform.State) error { - if sir.LaunchSpecification.SubnetId == nil { - return fmt.Errorf("SubnetID was not passed, but should have been for this instance to belong to a VPC") - } - return nil - } -} - -func testAccAWSSpotInstanceRequestConfig(rInt int) string { - return fmt.Sprintf(` - resource "aws_key_pair" "debugging" { - key_name = "tmp-key-%d" - public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD3F6tyPEFEzV0LX3X8BsXdMsQz1x2cEikKDEY0aIj41qgxMCP/iteneqXSIFZBp5vizPvaoIR3Um9xK7PGoW8giupGn+EPuxIA4cDM4vzOqOkiMPhz5XK0whEjkVzTo4+S0puvDZuwIsdiW9mxhJc7tgBNL0cYlWSYVkz4G/fslNfRPW5mYAM49f4fhtxPb5ok4Q2Lg9dPKVHO/Bgeu5woMc7RY0p1ej6D4CKFE6lymSDJpW0YHX/wqE9+cfEauh7xZcG0q9t2ta6F6fmX0agvpFyZo8aFbXeUBr7osSCJNgvavWbM/06niWrOvYX2xwWdhXmXSrbX8ZbabVohBK41 phodgson@thoughtworks.com" - } - - resource "aws_spot_instance_request" "foo" { - ami = "ami-4fccb37f" - instance_type = "m1.small" - key_name = "${aws_key_pair.debugging.key_name}" - - // base price is $0.044 hourly, so bidding above that should theoretically - // always fulfill - spot_price = "0.05" - - // we wait for fulfillment because we want to inspect the launched instance - // and verify termination behavior - wait_for_fulfillment = true - - tags { - Name = "terraform-test" - } - }`, rInt) -} - -func testAccAWSSpotInstanceRequestConfig_withBlockDuration(rInt int) string { - return fmt.Sprintf(` - resource "aws_key_pair" "debugging" { - key_name = "tmp-key-%d" - public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD3F6tyPEFEzV0LX3X8BsXdMsQz1x2cEikKDEY0aIj41qgxMCP/iteneqXSIFZBp5vizPvaoIR3Um9xK7PGoW8giupGn+EPuxIA4cDM4vzOqOkiMPhz5XK0whEjkVzTo4+S0puvDZuwIsdiW9mxhJc7tgBNL0cYlWSYVkz4G/fslNfRPW5mYAM49f4fhtxPb5ok4Q2Lg9dPKVHO/Bgeu5woMc7RY0p1ej6D4CKFE6lymSDJpW0YHX/wqE9+cfEauh7xZcG0q9t2ta6F6fmX0agvpFyZo8aFbXeUBr7osSCJNgvavWbM/06niWrOvYX2xwWdhXmXSrbX8ZbabVohBK41 phodgson@thoughtworks.com" - } - - resource "aws_spot_instance_request" "foo" { - ami = "ami-4fccb37f" - instance_type = "m1.small" - key_name = "${aws_key_pair.debugging.key_name}" - - // base price is $0.044 hourly, so bidding above that should theoretically - // always fulfill - spot_price = "0.05" - - // we wait for fulfillment because we want to inspect the launched instance - // and verify termination behavior - wait_for_fulfillment = true - - block_duration_minutes = 60 - - tags { - Name = "terraform-test" - } - }`, rInt) -} - -func testAccAWSSpotInstanceRequestConfigVPC(rInt int) string { - return fmt.Sprintf(` - resource "aws_vpc" "foo_VPC" { - cidr_block = "10.1.0.0/16" - } - - resource "aws_subnet" "foo_VPC" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo_VPC.id}" - } - - resource "aws_key_pair" "debugging" { - key_name = "tmp-key-%d" - public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD3F6tyPEFEzV0LX3X8BsXdMsQz1x2cEikKDEY0aIj41qgxMCP/iteneqXSIFZBp5vizPvaoIR3Um9xK7PGoW8giupGn+EPuxIA4cDM4vzOqOkiMPhz5XK0whEjkVzTo4+S0puvDZuwIsdiW9mxhJc7tgBNL0cYlWSYVkz4G/fslNfRPW5mYAM49f4fhtxPb5ok4Q2Lg9dPKVHO/Bgeu5woMc7RY0p1ej6D4CKFE6lymSDJpW0YHX/wqE9+cfEauh7xZcG0q9t2ta6F6fmX0agvpFyZo8aFbXeUBr7osSCJNgvavWbM/06niWrOvYX2xwWdhXmXSrbX8ZbabVohBK41 phodgson@thoughtworks.com" - } - - resource "aws_spot_instance_request" "foo_VPC" { - ami = "ami-4fccb37f" - instance_type = "m1.small" - key_name = "${aws_key_pair.debugging.key_name}" - - // base price is $0.044 hourly, so bidding above that should theoretically - // always fulfill - spot_price = "0.05" - - // VPC settings - subnet_id = "${aws_subnet.foo_VPC.id}" - - // we wait for fulfillment because we want to inspect the launched instance - // and verify termination behavior - wait_for_fulfillment = true - - tags { - Name = "terraform-test-VPC" - } - }`, rInt) -} - -func testAccAWSSpotInstanceRequestConfig_SubnetAndSG(rInt int) string { - return fmt.Sprintf(` - resource "aws_spot_instance_request" "foo" { - ami = "ami-4fccb37f" - instance_type = "m1.small" - spot_price = "0.05" - wait_for_fulfillment = true - subnet_id = "${aws_subnet.tf_test_subnet.id}" - vpc_security_group_ids = ["${aws_security_group.tf_test_sg_ssh.id}"] - associate_public_ip_address = true - } - - resource "aws_vpc" "default" { - cidr_block = "10.0.0.0/16" - enable_dns_hostnames = true - - tags { - Name = "tf_test_vpc" - } - } - - resource "aws_subnet" "tf_test_subnet" { - vpc_id = "${aws_vpc.default.id}" - cidr_block = "10.0.0.0/24" - map_public_ip_on_launch = true - - tags { - Name = "tf_test_subnet-%d" - } - } - - resource "aws_security_group" "tf_test_sg_ssh" { - name = "tf_test_sg_ssh-%d" - description = "tf_test_sg_ssh" - vpc_id = "${aws_vpc.default.id}" - - tags { - Name = "tf_test_sg_ssh-%d" - } - }`, rInt, rInt, rInt) -} diff --git a/builtin/providers/aws/resource_aws_sqs_queue.go b/builtin/providers/aws/resource_aws_sqs_queue.go deleted file mode 100644 index b7ce4c52b..000000000 --- a/builtin/providers/aws/resource_aws_sqs_queue.go +++ /dev/null @@ -1,297 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "net/url" - "strconv" - - "github.com/hashicorp/terraform/helper/schema" - - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/sqs" -) - -var AttributeMap = map[string]string{ - "delay_seconds": "DelaySeconds", - "max_message_size": "MaximumMessageSize", - "message_retention_seconds": "MessageRetentionPeriod", - "receive_wait_time_seconds": "ReceiveMessageWaitTimeSeconds", - "visibility_timeout_seconds": "VisibilityTimeout", - "policy": "Policy", - "redrive_policy": "RedrivePolicy", - "arn": "QueueArn", - "fifo_queue": "FifoQueue", - "content_based_deduplication": "ContentBasedDeduplication", -} - -// A number of these are marked as computed because if you don't -// provide a value, SQS will provide you with defaults (which are the -// default values specified below) -func resourceAwsSqsQueue() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsSqsQueueCreate, - Read: resourceAwsSqsQueueRead, - Update: resourceAwsSqsQueueUpdate, - Delete: resourceAwsSqsQueueDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "delay_seconds": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "max_message_size": { - Type: schema.TypeInt, - Optional: true, - Default: 262144, - }, - "message_retention_seconds": { - Type: schema.TypeInt, - Optional: true, - Default: 345600, - }, - "receive_wait_time_seconds": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "visibility_timeout_seconds": { - Type: schema.TypeInt, - Optional: true, - Default: 30, - }, - "policy": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validateJsonString, - DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, - }, - "redrive_policy": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateJsonString, - StateFunc: func(v interface{}) string { - json, _ := normalizeJsonString(v) - return json - }, - }, - "arn": { - Type: schema.TypeString, - Computed: true, - }, - "fifo_queue": { - Type: schema.TypeBool, - Default: false, - ForceNew: true, - Optional: true, - }, - "content_based_deduplication": { - Type: schema.TypeBool, - Default: false, - Optional: true, - }, - }, - } -} - -func resourceAwsSqsQueueCreate(d *schema.ResourceData, meta interface{}) error { - sqsconn := meta.(*AWSClient).sqsconn - - name := d.Get("name").(string) - fq := d.Get("fifo_queue").(bool) - cbd := d.Get("content_based_deduplication").(bool) - - if fq { - if errors := validateSQSFifoQueueName(name, "name"); len(errors) > 0 { - return fmt.Errorf("Error validating the FIFO queue name: %v", errors) - } - } else { - if errors := validateSQSQueueName(name, "name"); len(errors) > 0 { - return fmt.Errorf("Error validating SQS queue name: %v", errors) - } - } - - if !fq && cbd { - return fmt.Errorf("Content based deduplication can only be set with FIFO queues") - } - - log.Printf("[DEBUG] SQS queue create: %s", name) - - req := &sqs.CreateQueueInput{ - QueueName: aws.String(name), - } - - attributes := make(map[string]*string) - - resource := *resourceAwsSqsQueue() - - for k, s := range resource.Schema { - if attrKey, ok := AttributeMap[k]; ok { - if value, ok := d.GetOk(k); ok { - switch s.Type { - case schema.TypeInt: - attributes[attrKey] = aws.String(strconv.Itoa(value.(int))) - case schema.TypeBool: - attributes[attrKey] = aws.String(strconv.FormatBool(value.(bool))) - default: - attributes[attrKey] = aws.String(value.(string)) - } - } - - } - } - - if len(attributes) > 0 { - req.Attributes = attributes - } - - output, err := sqsconn.CreateQueue(req) - if err != nil { - return fmt.Errorf("Error creating SQS queue: %s", err) - } - - d.SetId(*output.QueueUrl) - - return resourceAwsSqsQueueUpdate(d, meta) -} - -func resourceAwsSqsQueueUpdate(d *schema.ResourceData, meta interface{}) error { - sqsconn := meta.(*AWSClient).sqsconn - attributes := make(map[string]*string) - - resource := *resourceAwsSqsQueue() - - for k, s := range resource.Schema { - if attrKey, ok := AttributeMap[k]; ok { - if d.HasChange(k) { - log.Printf("[DEBUG] Updating %s", attrKey) - _, n := d.GetChange(k) - switch s.Type { - case schema.TypeInt: - attributes[attrKey] = aws.String(strconv.Itoa(n.(int))) - case schema.TypeBool: - attributes[attrKey] = aws.String(strconv.FormatBool(n.(bool))) - default: - attributes[attrKey] = aws.String(n.(string)) - } - } - } - } - - if len(attributes) > 0 { - req := &sqs.SetQueueAttributesInput{ - QueueUrl: aws.String(d.Id()), - Attributes: attributes, - } - if _, err := sqsconn.SetQueueAttributes(req); err != nil { - return fmt.Errorf("[ERR] Error updating SQS attributes: %s", err) - } - } - - return resourceAwsSqsQueueRead(d, meta) -} - -func resourceAwsSqsQueueRead(d *schema.ResourceData, meta interface{}) error { - sqsconn := meta.(*AWSClient).sqsconn - - attributeOutput, err := sqsconn.GetQueueAttributes(&sqs.GetQueueAttributesInput{ - QueueUrl: aws.String(d.Id()), - AttributeNames: []*string{aws.String("All")}, - }) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - log.Printf("ERROR Found %s", awsErr.Code()) - if "AWS.SimpleQueueService.NonExistentQueue" == awsErr.Code() { - d.SetId("") - log.Printf("[DEBUG] SQS Queue (%s) not found", d.Get("name").(string)) - return nil - } - } - return err - } - - name, err := extractNameFromSqsQueueUrl(d.Id()) - if err != nil { - return err - } - d.Set("name", name) - - if attributeOutput.Attributes != nil && len(attributeOutput.Attributes) > 0 { - attrmap := attributeOutput.Attributes - resource := *resourceAwsSqsQueue() - // iKey = internal struct key, oKey = AWS Attribute Map key - for iKey, oKey := range AttributeMap { - if attrmap[oKey] != nil { - switch resource.Schema[iKey].Type { - case schema.TypeInt: - value, err := strconv.Atoi(*attrmap[oKey]) - if err != nil { - return err - } - d.Set(iKey, value) - log.Printf("[DEBUG] Reading %s => %s -> %d", iKey, oKey, value) - case schema.TypeBool: - value, err := strconv.ParseBool(*attrmap[oKey]) - if err != nil { - return err - } - d.Set(iKey, value) - log.Printf("[DEBUG] Reading %s => %s -> %t", iKey, oKey, value) - default: - log.Printf("[DEBUG] Reading %s => %s -> %s", iKey, oKey, *attrmap[oKey]) - d.Set(iKey, *attrmap[oKey]) - } - } - } - } - - // Since AWS does not send the FifoQueue attribute back when the queue - // is a standard one (even to false), this enforces the queue to be set - // to the correct value. - d.Set("fifo_queue", d.Get("fifo_queue").(bool)) - d.Set("content_based_deduplication", d.Get("content_based_deduplication").(bool)) - - return nil -} - -func resourceAwsSqsQueueDelete(d *schema.ResourceData, meta interface{}) error { - sqsconn := meta.(*AWSClient).sqsconn - - log.Printf("[DEBUG] SQS Delete Queue: %s", d.Id()) - _, err := sqsconn.DeleteQueue(&sqs.DeleteQueueInput{ - QueueUrl: aws.String(d.Id()), - }) - if err != nil { - return err - } - return nil -} - -func extractNameFromSqsQueueUrl(queue string) (string, error) { - //http://sqs.us-west-2.amazonaws.com/123456789012/queueName - u, err := url.Parse(queue) - if err != nil { - return "", err - } - segments := strings.Split(u.Path, "/") - if len(segments) != 3 { - return "", fmt.Errorf("SQS Url not parsed correctly") - } - - return segments[2], nil - -} diff --git a/builtin/providers/aws/resource_aws_sqs_queue_policy.go b/builtin/providers/aws/resource_aws_sqs_queue_policy.go deleted file mode 100644 index 343249799..000000000 --- a/builtin/providers/aws/resource_aws_sqs_queue_policy.go +++ /dev/null @@ -1,100 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/sqs" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsSqsQueuePolicy() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsSqsQueuePolicyUpsert, - Read: resourceAwsSqsQueuePolicyRead, - Update: resourceAwsSqsQueuePolicyUpsert, - Delete: resourceAwsSqsQueuePolicyDelete, - - Schema: map[string]*schema.Schema{ - "queue_url": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "policy": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateJsonString, - DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, - }, - }, - } -} - -func resourceAwsSqsQueuePolicyUpsert(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).sqsconn - url := d.Get("queue_url").(string) - - _, err := conn.SetQueueAttributes(&sqs.SetQueueAttributesInput{ - QueueUrl: aws.String(url), - Attributes: aws.StringMap(map[string]string{ - "Policy": d.Get("policy").(string), - }), - }) - if err != nil { - return fmt.Errorf("Error updating SQS attributes: %s", err) - } - - d.SetId("sqs-policy-" + url) - - return resourceAwsSqsQueuePolicyRead(d, meta) -} - -func resourceAwsSqsQueuePolicyRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).sqsconn - url := d.Get("queue_url").(string) - out, err := conn.GetQueueAttributes(&sqs.GetQueueAttributesInput{ - QueueUrl: aws.String(url), - AttributeNames: []*string{aws.String("Policy")}, - }) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "AWS.SimpleQueueService.NonExistentQueue" { - log.Printf("[WARN] SQS Queue (%s) not found", d.Id()) - d.SetId("") - return nil - } - return err - } - if out == nil { - return fmt.Errorf("Received empty response for SQS queue %s", d.Id()) - } - - policy, ok := out.Attributes["Policy"] - if !ok { - return fmt.Errorf("SQS Queue policy not found for %s", d.Id()) - } - - d.Set("policy", policy) - - return nil -} - -func resourceAwsSqsQueuePolicyDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).sqsconn - - url := d.Get("queue_url").(string) - log.Printf("[DEBUG] Deleting SQS Queue Policy of %s", url) - _, err := conn.SetQueueAttributes(&sqs.SetQueueAttributesInput{ - QueueUrl: aws.String(url), - Attributes: aws.StringMap(map[string]string{ - "Policy": "", - }), - }) - if err != nil { - return fmt.Errorf("Error deleting SQS Queue policy: %s", err) - } - return nil -} diff --git a/builtin/providers/aws/resource_aws_sqs_queue_policy_test.go b/builtin/providers/aws/resource_aws_sqs_queue_policy_test.go deleted file mode 100644 index c663cac19..000000000 --- a/builtin/providers/aws/resource_aws_sqs_queue_policy_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package aws - -import ( - "fmt" - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSSQSQueuePolicy_basic(t *testing.T) { - queueName := fmt.Sprintf("sqs-queue-%s", acctest.RandString(5)) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSQSQueueDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSSQSPolicyConfig_basic(queueName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSQSExistsWithDefaults("aws_sqs_queue.q"), - resource.TestMatchResourceAttr("aws_sqs_queue_policy.test", "policy", - regexp.MustCompile("^{\"Version\":\"2012-10-17\".+")), - ), - }, - }, - }) -} - -func testAccAWSSQSPolicyConfig_basic(r string) string { - return fmt.Sprintf(testAccAWSSQSPolicyConfig_basic_tpl, r) -} - -const testAccAWSSQSPolicyConfig_basic_tpl = ` -resource "aws_sqs_queue" "q" { - name = "%s" -} - -resource "aws_sqs_queue_policy" "test" { - queue_url = "${aws_sqs_queue.q.id}" - policy = < 0 { - return fmt.Errorf("Expected AWS SSM Activation to be gone, but was still found") - } - - return nil - } - - return fmt.Errorf("Default error in SSM Activation Test") -} - -func testAccAWSSSMActivationBasicConfig(rName string) string { - return fmt.Sprintf(` -resource "aws_iam_role" "test_role" { - name = "test_role-%s" - assume_role_policy = < 1 { - ids = strings.Join(account_ids, ",") - } else { - ids = "" - } - - if ids == "" { - return nil, nil - } - - perms := make(map[string]interface{}) - perms["type"] = permissionType - perms["account_ids"] = ids - - return perms, nil -} - -func deleteDocumentPermissions(d *schema.ResourceData, meta interface{}) error { - ssmconn := meta.(*AWSClient).ssmconn - - log.Printf("[INFO] Removing permissions from document: %s", d.Id()) - - permInput := &ssm.ModifyDocumentPermissionInput{ - Name: aws.String(d.Get("name").(string)), - PermissionType: aws.String("Share"), - AccountIdsToRemove: aws.StringSlice(strings.Split("all", ",")), - } - - _, err := ssmconn.ModifyDocumentPermission(permInput) - - if err != nil { - return errwrap.Wrapf("[ERROR] Error removing permissions for SSM document: {{err}}", err) - } - - return nil -} - -func updateAwsSSMDocument(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Updating SSM Document: %s", d.Id()) - - name := d.Get("name").(string) - - updateDocInput := &ssm.UpdateDocumentInput{ - Name: aws.String(name), - Content: aws.String(d.Get("content").(string)), - DocumentVersion: aws.String(d.Get("default_version").(string)), - } - - newDefaultVersion := d.Get("default_version").(string) - - ssmconn := meta.(*AWSClient).ssmconn - updated, err := ssmconn.UpdateDocument(updateDocInput) - - if isAWSErr(err, "DuplicateDocumentContent", "") { - log.Printf("[DEBUG] Content is a duplicate of the latest version so update is not necessary: %s", d.Id()) - log.Printf("[INFO] Updating the default version to the latest version %s: %s", newDefaultVersion, d.Id()) - - newDefaultVersion = d.Get("latest_version").(string) - } else if err != nil { - return errwrap.Wrapf("Error updating SSM document: {{err}}", err) - } else { - log.Printf("[INFO] Updating the default version to the new version %s: %s", newDefaultVersion, d.Id()) - newDefaultVersion = *updated.DocumentDescription.DocumentVersion - } - - updateDefaultInput := &ssm.UpdateDocumentDefaultVersionInput{ - Name: aws.String(name), - DocumentVersion: aws.String(newDefaultVersion), - } - - _, err = ssmconn.UpdateDocumentDefaultVersion(updateDefaultInput) - - if err != nil { - return errwrap.Wrapf("Error updating the default document version to that of the updated document: {{err}}", err) - } - return nil -} - -func validateAwsSSMDocumentType(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - types := map[string]bool{ - "Command": true, - "Policy": true, - "Automation": true, - } - - if !types[value] { - errors = append(errors, fmt.Errorf("Document type %s is invalid. Valid types are Command, Policy or Automation", value)) - } - return -} diff --git a/builtin/providers/aws/resource_aws_ssm_document_test.go b/builtin/providers/aws/resource_aws_ssm_document_test.go deleted file mode 100644 index dc34276e1..000000000 --- a/builtin/providers/aws/resource_aws_ssm_document_test.go +++ /dev/null @@ -1,473 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ssm" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSSSMDocument_basic(t *testing.T) { - name := acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSSMDocumentDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSSSMDocumentBasicConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSMDocumentExists("aws_ssm_document.foo"), - ), - }, - }, - }) -} - -func TestAccAWSSSMDocument_update(t *testing.T) { - name := acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSSMDocumentDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSSSMDocument20Config(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSMDocumentExists("aws_ssm_document.foo"), - resource.TestCheckResourceAttr( - "aws_ssm_document.foo", "schema_version", "2.0"), - resource.TestCheckResourceAttr( - "aws_ssm_document.foo", "latest_version", "1"), - resource.TestCheckResourceAttr( - "aws_ssm_document.foo", "default_version", "1"), - ), - }, - resource.TestStep{ - Config: testAccAWSSSMDocument20UpdatedConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSMDocumentExists("aws_ssm_document.foo"), - resource.TestCheckResourceAttr( - "aws_ssm_document.foo", "latest_version", "2"), - resource.TestCheckResourceAttr( - "aws_ssm_document.foo", "default_version", "2"), - ), - }, - }, - }) -} - -func TestAccAWSSSMDocument_permission(t *testing.T) { - name := acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSSMDocumentDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSSSMDocumentPermissionConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSMDocumentExists("aws_ssm_document.foo"), - resource.TestCheckResourceAttr( - "aws_ssm_document.foo", "permissions.type", "Share"), - resource.TestCheckResourceAttr( - "aws_ssm_document.foo", "permissions.account_ids", "all"), - ), - }, - }, - }) -} - -func TestAccAWSSSMDocument_params(t *testing.T) { - name := acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSSMDocumentDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSSSMDocumentParamConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSMDocumentExists("aws_ssm_document.foo"), - resource.TestCheckResourceAttr( - "aws_ssm_document.foo", "parameter.0.name", "commands"), - resource.TestCheckResourceAttr( - "aws_ssm_document.foo", "parameter.0.type", "StringList"), - resource.TestCheckResourceAttr( - "aws_ssm_document.foo", "parameter.1.name", "workingDirectory"), - resource.TestCheckResourceAttr( - "aws_ssm_document.foo", "parameter.1.type", "String"), - resource.TestCheckResourceAttr( - "aws_ssm_document.foo", "parameter.2.name", "executionTimeout"), - resource.TestCheckResourceAttr( - "aws_ssm_document.foo", "parameter.2.type", "String"), - ), - }, - }, - }) -} - -func TestAccAWSSSMDocument_automation(t *testing.T) { - name := acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSSMDocumentDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSSSMDocumentTypeAutomationConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSMDocumentExists("aws_ssm_document.foo"), - resource.TestCheckResourceAttr( - "aws_ssm_document.foo", "document_type", "Automation"), - ), - }, - }, - }) -} - -func testAccCheckAWSSSMDocumentExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No SSM Document ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ssmconn - - _, err := conn.DescribeDocument(&ssm.DescribeDocumentInput{ - Name: aws.String(rs.Primary.ID), - }) - if err != nil { - return err - } - - return nil - } -} - -func testAccCheckAWSSSMDocumentDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ssmconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_ssm_document" { - continue - } - - out, err := conn.DescribeDocument(&ssm.DescribeDocumentInput{ - Name: aws.String(rs.Primary.Attributes["name"]), - }) - - if err != nil { - // InvalidDocument means it's gone, this is good - if wserr, ok := err.(awserr.Error); ok && wserr.Code() == "InvalidDocument" { - return nil - } - return err - } - - if out != nil { - return fmt.Errorf("Expected AWS SSM Document to be gone, but was still found") - } - - return nil - } - - return fmt.Errorf("Default error in SSM Document Test") -} - -/* -Based on examples from here: https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/create-ssm-doc.html -*/ - -func testAccAWSSSMDocumentBasicConfig(rName string) string { - return fmt.Sprintf(` -resource "aws_ssm_document" "foo" { - name = "test_document-%s" - document_type = "Command" - - content = < 0 { - return fmt.Errorf("Expected AWS SSM Maintenance Target to be gone, but was still found") - } - - return nil - } - - return nil -} - -func testAccAWSSSMMaintenanceWindowTargetBasicConfig(rName string) string { - return fmt.Sprintf(` -resource "aws_ssm_maintenance_window" "foo" { - name = "maintenance-window-%s" - schedule = "cron(0 16 ? * TUE *)" - duration = 3 - cutoff = 1 -} - -resource "aws_ssm_maintenance_window_target" "target" { - window_id = "${aws_ssm_maintenance_window.foo.id}" - resource_type = "INSTANCE" - targets { - key = "tag:Name" - values = ["acceptance_test"] - } -} -`, rName) -} diff --git a/builtin/providers/aws/resource_aws_ssm_maintenance_window_task.go b/builtin/providers/aws/resource_aws_ssm_maintenance_window_task.go deleted file mode 100644 index 1931d385a..000000000 --- a/builtin/providers/aws/resource_aws_ssm_maintenance_window_task.go +++ /dev/null @@ -1,283 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ssm" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsSsmMaintenanceWindowTask() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsSsmMaintenanceWindowTaskCreate, - Read: resourceAwsSsmMaintenanceWindowTaskRead, - Delete: resourceAwsSsmMaintenanceWindowTaskDelete, - - Schema: map[string]*schema.Schema{ - "window_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "max_concurrency": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "max_errors": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "task_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "task_arn": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "service_role_arn": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "targets": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - }, - "values": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - - "priority": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - - "logging_info": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "s3_bucket_name": { - Type: schema.TypeString, - Required: true, - }, - "s3_region": { - Type: schema.TypeString, - Required: true, - }, - "s3_bucket_prefix": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - - "task_parameters": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "values": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - }, - } -} - -func expandAwsSsmMaintenanceWindowLoggingInfo(config []interface{}) *ssm.LoggingInfo { - - loggingConfig := config[0].(map[string]interface{}) - - loggingInfo := &ssm.LoggingInfo{ - S3BucketName: aws.String(loggingConfig["s3_bucket_name"].(string)), - S3Region: aws.String(loggingConfig["s3_region"].(string)), - } - - if s := loggingConfig["s3_bucket_prefix"].(string); s != "" { - loggingInfo.S3KeyPrefix = aws.String(s) - } - - return loggingInfo -} - -func flattenAwsSsmMaintenanceWindowLoggingInfo(loggingInfo *ssm.LoggingInfo) []interface{} { - - result := make(map[string]interface{}) - result["s3_bucket_name"] = *loggingInfo.S3BucketName - result["s3_region"] = *loggingInfo.S3Region - - if loggingInfo.S3KeyPrefix != nil { - result["s3_bucket_prefix"] = *loggingInfo.S3KeyPrefix - } - - return []interface{}{result} -} - -func expandAwsSsmTaskParameters(config []interface{}) map[string]*ssm.MaintenanceWindowTaskParameterValueExpression { - params := make(map[string]*ssm.MaintenanceWindowTaskParameterValueExpression) - for _, v := range config { - paramConfig := v.(map[string]interface{}) - params[paramConfig["name"].(string)] = &ssm.MaintenanceWindowTaskParameterValueExpression{ - Values: expandStringList(paramConfig["values"].([]interface{})), - } - } - return params -} - -func flattenAwsSsmTaskParameters(taskParameters map[string]*ssm.MaintenanceWindowTaskParameterValueExpression) []interface{} { - result := make([]interface{}, 0, len(taskParameters)) - for k, v := range taskParameters { - taskParam := map[string]interface{}{ - "name": k, - "values": flattenStringList(v.Values), - } - result = append(result, taskParam) - } - - return result -} - -func resourceAwsSsmMaintenanceWindowTaskCreate(d *schema.ResourceData, meta interface{}) error { - ssmconn := meta.(*AWSClient).ssmconn - - log.Printf("[INFO] Registering SSM Maintenance Window Task") - - params := &ssm.RegisterTaskWithMaintenanceWindowInput{ - WindowId: aws.String(d.Get("window_id").(string)), - MaxConcurrency: aws.String(d.Get("max_concurrency").(string)), - MaxErrors: aws.String(d.Get("max_errors").(string)), - TaskType: aws.String(d.Get("task_type").(string)), - ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)), - TaskArn: aws.String(d.Get("task_arn").(string)), - Targets: expandAwsSsmTargets(d), - } - - if v, ok := d.GetOk("priority"); ok { - params.Priority = aws.Int64(int64(v.(int))) - } - - if v, ok := d.GetOk("logging_info"); ok { - params.LoggingInfo = expandAwsSsmMaintenanceWindowLoggingInfo(v.([]interface{})) - } - - if v, ok := d.GetOk("task_parameters"); ok { - params.TaskParameters = expandAwsSsmTaskParameters(v.([]interface{})) - } - - resp, err := ssmconn.RegisterTaskWithMaintenanceWindow(params) - if err != nil { - return err - } - - d.SetId(*resp.WindowTaskId) - - return resourceAwsSsmMaintenanceWindowTaskRead(d, meta) -} - -func resourceAwsSsmMaintenanceWindowTaskRead(d *schema.ResourceData, meta interface{}) error { - ssmconn := meta.(*AWSClient).ssmconn - - params := &ssm.DescribeMaintenanceWindowTasksInput{ - WindowId: aws.String(d.Get("window_id").(string)), - } - - resp, err := ssmconn.DescribeMaintenanceWindowTasks(params) - if err != nil { - return err - } - - found := false - for _, t := range resp.Tasks { - if *t.WindowTaskId == d.Id() { - found = true - - d.Set("window_id", t.WindowId) - d.Set("max_concurrency", t.MaxConcurrency) - d.Set("max_errors", t.MaxErrors) - d.Set("task_type", t.Type) - d.Set("service_role_arn", t.ServiceRoleArn) - d.Set("task_arn", t.TaskArn) - d.Set("priority", t.Priority) - - if t.LoggingInfo != nil { - if err := d.Set("logging_info", flattenAwsSsmMaintenanceWindowLoggingInfo(t.LoggingInfo)); err != nil { - return fmt.Errorf("[DEBUG] Error setting logging_info error: %#v", err) - } - } - - if t.TaskParameters != nil { - if err := d.Set("task_parameters", flattenAwsSsmTaskParameters(t.TaskParameters)); err != nil { - return fmt.Errorf("[DEBUG] Error setting task_parameters error: %#v", err) - } - } - - if err := d.Set("targets", flattenAwsSsmTargets(t.Targets)); err != nil { - return fmt.Errorf("[DEBUG] Error setting targets error: %#v", err) - } - } - } - - if !found { - log.Printf("[INFO] Maintenance Window Target not found. Removing from state") - d.SetId("") - return nil - } - - return nil -} - -func resourceAwsSsmMaintenanceWindowTaskDelete(d *schema.ResourceData, meta interface{}) error { - ssmconn := meta.(*AWSClient).ssmconn - - log.Printf("[INFO] Deregistering SSM Maintenance Window Task: %s", d.Id()) - - params := &ssm.DeregisterTaskFromMaintenanceWindowInput{ - WindowId: aws.String(d.Get("window_id").(string)), - WindowTaskId: aws.String(d.Id()), - } - - _, err := ssmconn.DeregisterTaskFromMaintenanceWindow(params) - if err != nil { - return err - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_ssm_maintenance_window_task_test.go b/builtin/providers/aws/resource_aws_ssm_maintenance_window_task_test.go deleted file mode 100644 index 88718137a..000000000 --- a/builtin/providers/aws/resource_aws_ssm_maintenance_window_task_test.go +++ /dev/null @@ -1,162 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ssm" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSSSMMaintenanceWindowTask_basic(t *testing.T) { - name := acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSSMMaintenanceWindowTaskDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSSMMaintenanceWindowTaskBasicConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSMMaintenanceWindowTaskExists("aws_ssm_maintenance_window_task.target"), - ), - }, - }, - }) -} - -func testAccCheckAWSSSMMaintenanceWindowTaskExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No SSM Maintenance Window Task Window ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ssmconn - - resp, err := conn.DescribeMaintenanceWindowTasks(&ssm.DescribeMaintenanceWindowTasksInput{ - WindowId: aws.String(rs.Primary.Attributes["window_id"]), - }) - if err != nil { - return err - } - - for _, i := range resp.Tasks { - if *i.WindowTaskId == rs.Primary.ID { - return nil - } - } - - return fmt.Errorf("No AWS SSM Maintenance window task found") - } -} - -func testAccCheckAWSSSMMaintenanceWindowTaskDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ssmconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_ssm_maintenance_window_target" { - continue - } - - out, err := conn.DescribeMaintenanceWindowTasks(&ssm.DescribeMaintenanceWindowTasksInput{ - WindowId: aws.String(rs.Primary.Attributes["window_id"]), - }) - - if err != nil { - // Verify the error is what we want - if ae, ok := err.(awserr.Error); ok && ae.Code() == "DoesNotExistException" { - continue - } - return err - } - - if len(out.Tasks) > 0 { - return fmt.Errorf("Expected AWS SSM Maintenance Task to be gone, but was still found") - } - - return nil - } - - return nil -} - -func testAccAWSSSMMaintenanceWindowTaskBasicConfig(rName string) string { - return fmt.Sprintf(` -resource "aws_ssm_maintenance_window" "foo" { - name = "maintenance-window-%s" - schedule = "cron(0 16 ? * TUE *)" - duration = 3 - cutoff = 1 -} - -resource "aws_ssm_maintenance_window_task" "target" { - window_id = "${aws_ssm_maintenance_window.foo.id}" - task_type = "RUN_COMMAND" - task_arn = "AWS-RunShellScript" - priority = 1 - service_role_arn = "${aws_iam_role.ssm_role.arn}" - max_concurrency = "2" - max_errors = "1" - targets { - key = "InstanceIds" - values = ["${aws_instance.foo.id}"] - } - task_parameters { - name = "commands" - values = ["pwd"] - } -} - -resource "aws_instance" "foo" { - ami = "ami-4fccb37f" - - instance_type = "m1.small" -} - -resource "aws_iam_role" "ssm_role" { - name = "ssm-role-%s" - - assume_role_policy = < 0 { - return fmt.Errorf("Expected AWS SSM Maintenance Document to be gone, but was still found") - } - - return nil - } - - return nil -} - -func testAccAWSSSMMaintenanceWindowBasicConfig(rName string) string { - return fmt.Sprintf(` -resource "aws_ssm_maintenance_window" "foo" { - name = "maintenance-window-%s" - schedule = "cron(0 16 ? * TUE *)" - duration = 3 - cutoff = 1 -} - -`, rName) -} - -func testAccAWSSSMMaintenanceWindowBasicConfigUpdated(rName string) string { - return fmt.Sprintf(` -resource "aws_ssm_maintenance_window" "foo" { - name = "updated-maintenance-window-%s" - schedule = "cron(0 16 ? * WED *)" - duration = 10 - cutoff = 8 -} - -`, rName) -} diff --git a/builtin/providers/aws/resource_aws_ssm_parameter.go b/builtin/providers/aws/resource_aws_ssm_parameter.go deleted file mode 100644 index 16b44bebd..000000000 --- a/builtin/providers/aws/resource_aws_ssm_parameter.go +++ /dev/null @@ -1,128 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ssm" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsSsmParameter() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsSsmParameterCreate, - Read: resourceAwsSsmParameterRead, - Update: resourceAwsSsmParameterUpdate, - Delete: resourceAwsSsmParameterDelete, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateSsmParameterType, - }, - "value": { - Type: schema.TypeString, - Required: true, - Sensitive: true, - }, - "key_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceAwsSsmParameterCreate(d *schema.ResourceData, meta interface{}) error { - return putAwsSSMParameter(d, meta) -} - -func resourceAwsSsmParameterRead(d *schema.ResourceData, meta interface{}) error { - ssmconn := meta.(*AWSClient).ssmconn - - log.Printf("[DEBUG] Reading SSM Parameter: %s", d.Id()) - - paramInput := &ssm.GetParametersInput{ - Names: []*string{ - aws.String(d.Get("name").(string)), - }, - WithDecryption: aws.Bool(true), - } - - resp, err := ssmconn.GetParameters(paramInput) - - if err != nil { - return errwrap.Wrapf("[ERROR] Error describing SSM parameter: {{err}}", err) - } - - if len(resp.InvalidParameters) > 0 { - return fmt.Errorf("[ERROR] SSM Parameter %s is invalid", d.Id()) - } - - param := resp.Parameters[0] - d.Set("name", param.Name) - d.Set("type", param.Type) - d.Set("value", param.Value) - - return nil -} - -func resourceAwsSsmParameterUpdate(d *schema.ResourceData, meta interface{}) error { - return putAwsSSMParameter(d, meta) -} - -func resourceAwsSsmParameterDelete(d *schema.ResourceData, meta interface{}) error { - ssmconn := meta.(*AWSClient).ssmconn - - log.Printf("[INFO] Deleting SSM Parameter: %s", d.Id()) - - paramInput := &ssm.DeleteParameterInput{ - Name: aws.String(d.Get("name").(string)), - } - - _, err := ssmconn.DeleteParameter(paramInput) - if err != nil { - return err - } - - d.SetId("") - - return nil -} - -func putAwsSSMParameter(d *schema.ResourceData, meta interface{}) error { - ssmconn := meta.(*AWSClient).ssmconn - - log.Printf("[INFO] Creating SSM Parameter: %s", d.Get("name").(string)) - - paramInput := &ssm.PutParameterInput{ - Name: aws.String(d.Get("name").(string)), - Type: aws.String(d.Get("type").(string)), - Value: aws.String(d.Get("value").(string)), - Overwrite: aws.Bool(!d.IsNewResource()), - } - if keyID, ok := d.GetOk("key_id"); ok { - log.Printf("[DEBUG] Setting key_id for SSM Parameter %s: %s", d.Get("name").(string), keyID.(string)) - paramInput.SetKeyId(keyID.(string)) - } - - log.Printf("[DEBUG] Waiting for SSM Parameter %q to be updated", d.Get("name").(string)) - _, err := ssmconn.PutParameter(paramInput) - - if err != nil { - return errwrap.Wrapf("[ERROR] Error creating SSM parameter: {{err}}", err) - } - - d.SetId(d.Get("name").(string)) - - return resourceAwsSsmParameterRead(d, meta) -} diff --git a/builtin/providers/aws/resource_aws_ssm_parameter_test.go b/builtin/providers/aws/resource_aws_ssm_parameter_test.go deleted file mode 100644 index b8c46b229..000000000 --- a/builtin/providers/aws/resource_aws_ssm_parameter_test.go +++ /dev/null @@ -1,210 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ssm" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSSSMParameter_basic(t *testing.T) { - name := acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSSMParameterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSSMParameterBasicConfig(name, "bar"), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSMParameterHasValue("aws_ssm_parameter.foo", "bar"), - testAccCheckAWSSSMParameterType("aws_ssm_parameter.foo", "String"), - ), - }, - }, - }) -} - -func TestAccAWSSSMParameter_update(t *testing.T) { - name := acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSSMParameterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSSMParameterBasicConfig(name, "bar"), - }, - { - Config: testAccAWSSSMParameterBasicConfig(name, "baz"), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSMParameterHasValue("aws_ssm_parameter.foo", "baz"), - testAccCheckAWSSSMParameterType("aws_ssm_parameter.foo", "String"), - ), - }, - }, - }) -} - -func TestAccAWSSSMParameter_secure(t *testing.T) { - name := acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSSMParameterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSSMParameterSecureConfig(name, "secret"), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSMParameterHasValue("aws_ssm_parameter.secret_foo", "secret"), - testAccCheckAWSSSMParameterType("aws_ssm_parameter.secret_foo", "SecureString"), - ), - }, - }, - }) -} - -func TestAccAWSSSMParameter_secure_with_key(t *testing.T) { - name := acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSSMParameterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSSMParameterSecureConfigWithKey(name, "secret"), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSMParameterHasValue("aws_ssm_parameter.secret_foo", "secret"), - testAccCheckAWSSSMParameterType("aws_ssm_parameter.secret_foo", "SecureString"), - ), - }, - }, - }) -} - -func testAccCheckAWSSSMGetParameter(s *terraform.State, n string) ([]*ssm.Parameter, error) { - rs, ok := s.RootModule().Resources[n] - if !ok { - return []*ssm.Parameter{}, fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return []*ssm.Parameter{}, fmt.Errorf("No SSM Parameter ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ssmconn - - paramInput := &ssm.GetParametersInput{ - Names: []*string{ - aws.String(rs.Primary.Attributes["name"]), - }, - WithDecryption: aws.Bool(true), - } - - resp, _ := conn.GetParameters(paramInput) - - if len(resp.Parameters) == 0 { - return resp.Parameters, fmt.Errorf("Expected AWS SSM Parameter to be created, but wasn't found") - } - return resp.Parameters, nil -} - -func testAccCheckAWSSSMParameterHasValue(n string, v string) resource.TestCheckFunc { - return func(s *terraform.State) error { - parameters, err := testAccCheckAWSSSMGetParameter(s, n) - if err != nil { - return err - } - - parameterValue := parameters[0].Value - - if *parameterValue != v { - return fmt.Errorf("Expected AWS SSM Parameter to have value %s but had %s", v, *parameterValue) - } - - return nil - } -} - -func testAccCheckAWSSSMParameterType(n string, v string) resource.TestCheckFunc { - return func(s *terraform.State) error { - parameters, err := testAccCheckAWSSSMGetParameter(s, n) - if err != nil { - return err - } - - parameterValue := parameters[0].Type - - if *parameterValue != v { - return fmt.Errorf("Expected AWS SSM Parameter to have type %s but had %s", v, *parameterValue) - } - - return nil - } -} - -func testAccCheckAWSSSMParameterDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ssmconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_ssm_parameter" { - continue - } - - paramInput := &ssm.GetParametersInput{ - Names: []*string{ - aws.String(rs.Primary.Attributes["name"]), - }, - } - - resp, _ := conn.GetParameters(paramInput) - - if len(resp.Parameters) > 0 { - return fmt.Errorf("Expected AWS SSM Parameter to be gone, but was still found") - } - - return nil - } - - return fmt.Errorf("Default error in SSM Parameter Test") -} - -func testAccAWSSSMParameterBasicConfig(rName string, value string) string { - return fmt.Sprintf(` -resource "aws_ssm_parameter" "foo" { - name = "test_parameter-%s" - type = "String" - value = "%s" -} -`, rName, value) -} - -func testAccAWSSSMParameterSecureConfig(rName string, value string) string { - return fmt.Sprintf(` -resource "aws_ssm_parameter" "secret_foo" { - name = "test_secure_parameter-%s" - type = "SecureString" - value = "%s" -} -`, rName, value) -} - -func testAccAWSSSMParameterSecureConfigWithKey(rName string, value string) string { - return fmt.Sprintf(` -resource "aws_ssm_parameter" "secret_foo" { - name = "test_secure_parameter-%s" - type = "SecureString" - value = "%s" - key_id = "${aws_kms_key.test_key.id}" -} - -resource "aws_kms_key" "test_key" { - description = "KMS key 1" - deletion_window_in_days = 7 -} -`, rName, value) -} diff --git a/builtin/providers/aws/resource_aws_ssm_patch_baseline.go b/builtin/providers/aws/resource_aws_ssm_patch_baseline.go deleted file mode 100644 index 4109c5083..000000000 --- a/builtin/providers/aws/resource_aws_ssm_patch_baseline.go +++ /dev/null @@ -1,277 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ssm" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsSsmPatchBaseline() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsSsmPatchBaselineCreate, - Read: resourceAwsSsmPatchBaselineRead, - Delete: resourceAwsSsmPatchBaselineDelete, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "global_filter": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 4, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - }, - "values": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - - "approval_rule": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "approve_after_days": { - Type: schema.TypeInt, - Required: true, - }, - - "patch_filter": { - Type: schema.TypeList, - Required: true, - MaxItems: 10, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - }, - "values": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - }, - }, - }, - - "approved_patches": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "rejected_patches": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - } -} - -func resourceAwsSsmPatchBaselineCreate(d *schema.ResourceData, meta interface{}) error { - ssmconn := meta.(*AWSClient).ssmconn - - params := &ssm.CreatePatchBaselineInput{ - Name: aws.String(d.Get("name").(string)), - } - - if v, ok := d.GetOk("description"); ok { - params.Description = aws.String(v.(string)) - } - - if v, ok := d.GetOk("approved_patches"); ok && v.(*schema.Set).Len() > 0 { - params.ApprovedPatches = expandStringList(v.(*schema.Set).List()) - } - - if v, ok := d.GetOk("rejected_patches"); ok && v.(*schema.Set).Len() > 0 { - params.RejectedPatches = expandStringList(v.(*schema.Set).List()) - } - - if _, ok := d.GetOk("global_filter"); ok { - params.GlobalFilters = expandAwsSsmPatchFilterGroup(d) - } - - if _, ok := d.GetOk("approval_rule"); ok { - params.ApprovalRules = expandAwsSsmPatchRuleGroup(d) - } - - resp, err := ssmconn.CreatePatchBaseline(params) - if err != nil { - return err - } - - d.SetId(*resp.BaselineId) - return resourceAwsSsmPatchBaselineRead(d, meta) -} - -func resourceAwsSsmPatchBaselineRead(d *schema.ResourceData, meta interface{}) error { - ssmconn := meta.(*AWSClient).ssmconn - - params := &ssm.GetPatchBaselineInput{ - BaselineId: aws.String(d.Id()), - } - - resp, err := ssmconn.GetPatchBaseline(params) - if err != nil { - return err - } - - d.Set("name", resp.Name) - d.Set("description", resp.Description) - d.Set("approved_patches", flattenStringList(resp.ApprovedPatches)) - d.Set("rejected_patches", flattenStringList(resp.RejectedPatches)) - - if err := d.Set("global_filter", flattenAwsSsmPatchFilterGroup(resp.GlobalFilters)); err != nil { - return fmt.Errorf("[DEBUG] Error setting global filters error: %#v", err) - } - - if err := d.Set("approval_rule", flattenAwsSsmPatchRuleGroup(resp.ApprovalRules)); err != nil { - return fmt.Errorf("[DEBUG] Error setting approval rules error: %#v", err) - } - - return nil -} - -func resourceAwsSsmPatchBaselineDelete(d *schema.ResourceData, meta interface{}) error { - ssmconn := meta.(*AWSClient).ssmconn - - log.Printf("[INFO] Deleting SSM Patch Baseline: %s", d.Id()) - - params := &ssm.DeletePatchBaselineInput{ - BaselineId: aws.String(d.Id()), - } - - _, err := ssmconn.DeletePatchBaseline(params) - if err != nil { - return err - } - - return nil -} - -func expandAwsSsmPatchFilterGroup(d *schema.ResourceData) *ssm.PatchFilterGroup { - var filters []*ssm.PatchFilter - - filterConfig := d.Get("global_filter").([]interface{}) - - for _, fConfig := range filterConfig { - config := fConfig.(map[string]interface{}) - - filter := &ssm.PatchFilter{ - Key: aws.String(config["key"].(string)), - Values: expandStringList(config["values"].([]interface{})), - } - - filters = append(filters, filter) - } - - return &ssm.PatchFilterGroup{ - PatchFilters: filters, - } -} - -func flattenAwsSsmPatchFilterGroup(group *ssm.PatchFilterGroup) []map[string]interface{} { - if len(group.PatchFilters) == 0 { - return nil - } - - result := make([]map[string]interface{}, 0, len(group.PatchFilters)) - - for _, filter := range group.PatchFilters { - f := make(map[string]interface{}) - f["key"] = *filter.Key - f["values"] = flattenStringList(filter.Values) - - result = append(result, f) - } - - return result -} - -func expandAwsSsmPatchRuleGroup(d *schema.ResourceData) *ssm.PatchRuleGroup { - var rules []*ssm.PatchRule - - ruleConfig := d.Get("approval_rule").([]interface{}) - - for _, rConfig := range ruleConfig { - rCfg := rConfig.(map[string]interface{}) - - var filters []*ssm.PatchFilter - filterConfig := rCfg["patch_filter"].([]interface{}) - - for _, fConfig := range filterConfig { - fCfg := fConfig.(map[string]interface{}) - - filter := &ssm.PatchFilter{ - Key: aws.String(fCfg["key"].(string)), - Values: expandStringList(fCfg["values"].([]interface{})), - } - - filters = append(filters, filter) - } - - filterGroup := &ssm.PatchFilterGroup{ - PatchFilters: filters, - } - - rule := &ssm.PatchRule{ - ApproveAfterDays: aws.Int64(int64(rCfg["approve_after_days"].(int))), - PatchFilterGroup: filterGroup, - } - - rules = append(rules, rule) - } - - return &ssm.PatchRuleGroup{ - PatchRules: rules, - } -} - -func flattenAwsSsmPatchRuleGroup(group *ssm.PatchRuleGroup) []map[string]interface{} { - if len(group.PatchRules) == 0 { - return nil - } - - result := make([]map[string]interface{}, 0, len(group.PatchRules)) - - for _, rule := range group.PatchRules { - r := make(map[string]interface{}) - r["approve_after_days"] = *rule.ApproveAfterDays - r["patch_filter"] = flattenAwsSsmPatchFilterGroup(rule.PatchFilterGroup) - result = append(result, r) - } - - return result -} diff --git a/builtin/providers/aws/resource_aws_ssm_patch_baseline_test.go b/builtin/providers/aws/resource_aws_ssm_patch_baseline_test.go deleted file mode 100644 index 6df3c627b..000000000 --- a/builtin/providers/aws/resource_aws_ssm_patch_baseline_test.go +++ /dev/null @@ -1,137 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ssm" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSSSMPatchBaseline_basic(t *testing.T) { - name := acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSSMPatchBaselineDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSSMPatchBaselineBasicConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSMPatchBaselineExists("aws_ssm_patch_baseline.foo"), - resource.TestCheckResourceAttr( - "aws_ssm_patch_baseline.foo", "approved_patches.#", "1"), - resource.TestCheckResourceAttr( - "aws_ssm_patch_baseline.foo", "approved_patches.2062620480", "KB123456"), - resource.TestCheckResourceAttr( - "aws_ssm_patch_baseline.foo", "name", fmt.Sprintf("patch-baseline-%s", name)), - ), - }, - { - Config: testAccAWSSSMPatchBaselineBasicConfigUpdated(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSMPatchBaselineExists("aws_ssm_patch_baseline.foo"), - resource.TestCheckResourceAttr( - "aws_ssm_patch_baseline.foo", "approved_patches.#", "2"), - resource.TestCheckResourceAttr( - "aws_ssm_patch_baseline.foo", "approved_patches.2062620480", "KB123456"), - resource.TestCheckResourceAttr( - "aws_ssm_patch_baseline.foo", "approved_patches.2291496788", "KB456789"), - resource.TestCheckResourceAttr( - "aws_ssm_patch_baseline.foo", "name", fmt.Sprintf("updated-patch-baseline-%s", name)), - ), - }, - }, - }) -} - -func testAccCheckAWSSSMPatchBaselineExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No SSM Patch Baseline ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ssmconn - - resp, err := conn.DescribePatchBaselines(&ssm.DescribePatchBaselinesInput{ - Filters: []*ssm.PatchOrchestratorFilter{ - { - Key: aws.String("NAME_PREFIX"), - Values: []*string{aws.String(rs.Primary.Attributes["name"])}, - }, - }, - }) - - for _, i := range resp.BaselineIdentities { - if *i.BaselineId == rs.Primary.ID { - return nil - } - } - if err != nil { - return err - } - - return fmt.Errorf("No AWS SSM Patch Baseline found") - } -} - -func testAccCheckAWSSSMPatchBaselineDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ssmconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_ssm_patch_baseline" { - continue - } - - out, err := conn.DescribePatchBaselines(&ssm.DescribePatchBaselinesInput{ - Filters: []*ssm.PatchOrchestratorFilter{ - { - Key: aws.String("NAME_PREFIX"), - Values: []*string{aws.String(rs.Primary.Attributes["name"])}, - }, - }, - }) - - if err != nil { - return err - } - - if len(out.BaselineIdentities) > 0 { - return fmt.Errorf("Expected AWS SSM Patch Baseline to be gone, but was still found") - } - - return nil - } - - return nil -} - -func testAccAWSSSMPatchBaselineBasicConfig(rName string) string { - return fmt.Sprintf(` - -resource "aws_ssm_patch_baseline" "foo" { - name = "patch-baseline-%s" - approved_patches = ["KB123456"] -} - -`, rName) -} - -func testAccAWSSSMPatchBaselineBasicConfigUpdated(rName string) string { - return fmt.Sprintf(` - -resource "aws_ssm_patch_baseline" "foo" { - name = "updated-patch-baseline-%s" - approved_patches = ["KB123456","KB456789"] -} - -`, rName) -} diff --git a/builtin/providers/aws/resource_aws_ssm_patch_group.go b/builtin/providers/aws/resource_aws_ssm_patch_group.go deleted file mode 100644 index 20327b248..000000000 --- a/builtin/providers/aws/resource_aws_ssm_patch_group.go +++ /dev/null @@ -1,95 +0,0 @@ -package aws - -import ( - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ssm" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsSsmPatchGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsSsmPatchGroupCreate, - Read: resourceAwsSsmPatchGroupRead, - Delete: resourceAwsSsmPatchGroupDelete, - - Schema: map[string]*schema.Schema{ - "baseline_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "patch_group": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceAwsSsmPatchGroupCreate(d *schema.ResourceData, meta interface{}) error { - ssmconn := meta.(*AWSClient).ssmconn - - params := &ssm.RegisterPatchBaselineForPatchGroupInput{ - BaselineId: aws.String(d.Get("baseline_id").(string)), - PatchGroup: aws.String(d.Get("patch_group").(string)), - } - - resp, err := ssmconn.RegisterPatchBaselineForPatchGroup(params) - if err != nil { - return err - } - - d.SetId(*resp.PatchGroup) - return resourceAwsSsmPatchGroupRead(d, meta) -} - -func resourceAwsSsmPatchGroupRead(d *schema.ResourceData, meta interface{}) error { - ssmconn := meta.(*AWSClient).ssmconn - - params := &ssm.DescribePatchGroupsInput{} - - resp, err := ssmconn.DescribePatchGroups(params) - if err != nil { - return err - } - - found := false - for _, t := range resp.Mappings { - if *t.PatchGroup == d.Id() { - found = true - - d.Set("patch_group", t.PatchGroup) - d.Set("baseline_id", t.BaselineIdentity.BaselineId) - } - } - - if !found { - log.Printf("[INFO] Patch Group not found. Removing from state") - d.SetId("") - return nil - } - - return nil - -} - -func resourceAwsSsmPatchGroupDelete(d *schema.ResourceData, meta interface{}) error { - ssmconn := meta.(*AWSClient).ssmconn - - log.Printf("[INFO] Deleting SSM Patch Group: %s", d.Id()) - - params := &ssm.DeregisterPatchBaselineForPatchGroupInput{ - BaselineId: aws.String(d.Get("baseline_id").(string)), - PatchGroup: aws.String(d.Get("patch_group").(string)), - } - - _, err := ssmconn.DeregisterPatchBaselineForPatchGroup(params) - if err != nil { - return err - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_ssm_patch_group_test.go b/builtin/providers/aws/resource_aws_ssm_patch_group_test.go deleted file mode 100644 index a244beb23..000000000 --- a/builtin/providers/aws/resource_aws_ssm_patch_group_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ssm" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSSSMPatchGroup_basic(t *testing.T) { - name := acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSSMPatchGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSSMPatchGroupBasicConfig(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSMPatchGroupExists("aws_ssm_patch_group.patchgroup"), - ), - }, - }, - }) -} - -func testAccCheckAWSSSMPatchGroupExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No SSM Patch Baseline ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ssmconn - - resp, err := conn.DescribePatchGroups(&ssm.DescribePatchGroupsInput{}) - if err != nil { - return err - } - - for _, i := range resp.Mappings { - if *i.BaselineIdentity.BaselineId == rs.Primary.Attributes["baseline_id"] && *i.PatchGroup == rs.Primary.ID { - return nil - } - } - - return fmt.Errorf("No AWS SSM Patch Group found") - } -} - -func testAccCheckAWSSSMPatchGroupDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ssmconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_ssm_patch_group" { - continue - } - - resp, err := conn.DescribePatchGroups(&ssm.DescribePatchGroupsInput{}) - - if err != nil { - // Verify the error is what we want - if ae, ok := err.(awserr.Error); ok && ae.Code() == "DoesNotExistException" { - continue - } - return err - } - - for _, i := range resp.Mappings { - if *i.BaselineIdentity.BaselineId == rs.Primary.Attributes["baseline_id"] && *i.PatchGroup == rs.Primary.ID { - return fmt.Errorf("Expected AWS SSM Patch Group to be gone, but was still found") - } - } - - return nil - } - - return nil -} - -func testAccAWSSSMPatchGroupBasicConfig(rName string) string { - return fmt.Sprintf(` - -resource "aws_ssm_patch_baseline" "foo" { - name = "patch-baseline-%s" - approved_patches = ["KB123456"] -} - -resource "aws_ssm_patch_group" "patchgroup" { - baseline_id = "${aws_ssm_patch_baseline.foo.id}" - patch_group = "patch-group" -} - -`, rName) -} diff --git a/builtin/providers/aws/resource_aws_subnet.go b/builtin/providers/aws/resource_aws_subnet.go deleted file mode 100644 index 88d23e829..000000000 --- a/builtin/providers/aws/resource_aws_subnet.go +++ /dev/null @@ -1,387 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsSubnet() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsSubnetCreate, - Read: resourceAwsSubnetRead, - Update: resourceAwsSubnetUpdate, - Delete: resourceAwsSubnetDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - SchemaVersion: 1, - MigrateState: resourceAwsSubnetMigrateState, - - Schema: map[string]*schema.Schema{ - "vpc_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "cidr_block": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "ipv6_cidr_block": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "availability_zone": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "map_public_ip_on_launch": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "assign_ipv6_address_on_creation": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "ipv6_cidr_block_association_id": { - Type: schema.TypeString, - Computed: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAwsSubnetCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - createOpts := &ec2.CreateSubnetInput{ - AvailabilityZone: aws.String(d.Get("availability_zone").(string)), - CidrBlock: aws.String(d.Get("cidr_block").(string)), - VpcId: aws.String(d.Get("vpc_id").(string)), - } - - if v, ok := d.GetOk("ipv6_cidr_block"); ok { - createOpts.Ipv6CidrBlock = aws.String(v.(string)) - } - - var err error - resp, err := conn.CreateSubnet(createOpts) - - if err != nil { - return fmt.Errorf("Error creating subnet: %s", err) - } - - // Get the ID and store it - subnet := resp.Subnet - d.SetId(*subnet.SubnetId) - log.Printf("[INFO] Subnet ID: %s", *subnet.SubnetId) - - // Wait for the Subnet to become available - log.Printf("[DEBUG] Waiting for subnet (%s) to become available", *subnet.SubnetId) - stateConf := &resource.StateChangeConf{ - Pending: []string{"pending"}, - Target: []string{"available"}, - Refresh: SubnetStateRefreshFunc(conn, *subnet.SubnetId), - Timeout: 10 * time.Minute, - } - - _, err = stateConf.WaitForState() - - if err != nil { - return fmt.Errorf( - "Error waiting for subnet (%s) to become ready: %s", - d.Id(), err) - } - - return resourceAwsSubnetUpdate(d, meta) -} - -func resourceAwsSubnetRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - resp, err := conn.DescribeSubnets(&ec2.DescribeSubnetsInput{ - SubnetIds: []*string{aws.String(d.Id())}, - }) - - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidSubnetID.NotFound" { - // Update state to indicate the subnet no longer exists. - d.SetId("") - return nil - } - return err - } - if resp == nil { - return nil - } - - subnet := resp.Subnets[0] - - d.Set("vpc_id", subnet.VpcId) - d.Set("availability_zone", subnet.AvailabilityZone) - d.Set("cidr_block", subnet.CidrBlock) - d.Set("map_public_ip_on_launch", subnet.MapPublicIpOnLaunch) - d.Set("assign_ipv6_address_on_creation", subnet.AssignIpv6AddressOnCreation) - for _, a := range subnet.Ipv6CidrBlockAssociationSet { - if *a.Ipv6CidrBlockState.State == "associated" { //we can only ever have 1 IPv6 block associated at once - d.Set("ipv6_cidr_block_association_id", a.AssociationId) - d.Set("ipv6_cidr_block", a.Ipv6CidrBlock) - break - } else { - d.Set("ipv6_cidr_block_association_id", "") // we blank these out to remove old entries - d.Set("ipv6_cidr_block", "") - } - } - d.Set("tags", tagsToMap(subnet.Tags)) - - return nil -} - -func resourceAwsSubnetUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - d.Partial(true) - - if err := setTags(conn, d); err != nil { - return err - } else { - d.SetPartial("tags") - } - - if d.HasChange("map_public_ip_on_launch") { - modifyOpts := &ec2.ModifySubnetAttributeInput{ - SubnetId: aws.String(d.Id()), - MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{ - Value: aws.Bool(d.Get("map_public_ip_on_launch").(bool)), - }, - } - - log.Printf("[DEBUG] Subnet modify attributes: %#v", modifyOpts) - - _, err := conn.ModifySubnetAttribute(modifyOpts) - - if err != nil { - return err - } else { - d.SetPartial("map_public_ip_on_launch") - } - } - - // We have to be careful here to not go through a change of association if this is a new resource - // A New resource here would denote that the Update func is called by the Create func - if d.HasChange("ipv6_cidr_block") && !d.IsNewResource() { - // We need to handle that we disassociate the IPv6 CIDR block before we try and associate the new one - // This could be an issue as, we could error out when we try and add the new one - // We may need to roll back the state and reattach the old one if this is the case - - _, new := d.GetChange("ipv6_cidr_block") - - if v, ok := d.GetOk("ipv6_cidr_block_association_id"); ok { - - //Firstly we have to disassociate the old IPv6 CIDR Block - disassociateOps := &ec2.DisassociateSubnetCidrBlockInput{ - AssociationId: aws.String(v.(string)), - } - - _, err := conn.DisassociateSubnetCidrBlock(disassociateOps) - if err != nil { - return err - } - - // Wait for the CIDR to become disassociated - log.Printf( - "[DEBUG] Waiting for IPv6 CIDR (%s) to become disassociated", - d.Id()) - stateConf := &resource.StateChangeConf{ - Pending: []string{"disassociating", "associated"}, - Target: []string{"disassociated"}, - Refresh: SubnetIpv6CidrStateRefreshFunc(conn, d.Id(), d.Get("ipv6_cidr_block_association_id").(string)), - Timeout: 3 * time.Minute, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf( - "Error waiting for IPv6 CIDR (%s) to become disassociated: %s", - d.Id(), err) - } - } - - //Now we need to try and associate the new CIDR block - associatesOpts := &ec2.AssociateSubnetCidrBlockInput{ - SubnetId: aws.String(d.Id()), - Ipv6CidrBlock: aws.String(new.(string)), - } - - resp, err := conn.AssociateSubnetCidrBlock(associatesOpts) - if err != nil { - //The big question here is, do we want to try and reassociate the old one?? - //If we have a failure here, then we may be in a situation that we have nothing associated - return err - } - - // Wait for the CIDR to become associated - log.Printf( - "[DEBUG] Waiting for IPv6 CIDR (%s) to become associated", - d.Id()) - stateConf := &resource.StateChangeConf{ - Pending: []string{"associating", "disassociated"}, - Target: []string{"associated"}, - Refresh: SubnetIpv6CidrStateRefreshFunc(conn, d.Id(), *resp.Ipv6CidrBlockAssociation.AssociationId), - Timeout: 3 * time.Minute, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf( - "Error waiting for IPv6 CIDR (%s) to become associated: %s", - d.Id(), err) - } - - d.SetPartial("ipv6_cidr_block") - } - - if d.HasChange("assign_ipv6_address_on_creation") { - modifyOpts := &ec2.ModifySubnetAttributeInput{ - SubnetId: aws.String(d.Id()), - AssignIpv6AddressOnCreation: &ec2.AttributeBooleanValue{ - Value: aws.Bool(d.Get("assign_ipv6_address_on_creation").(bool)), - }, - } - - log.Printf("[DEBUG] Subnet modify attributes: %#v", modifyOpts) - - _, err := conn.ModifySubnetAttribute(modifyOpts) - - if err != nil { - return err - } else { - d.SetPartial("assign_ipv6_address_on_creation") - } - } - - d.Partial(false) - - return resourceAwsSubnetRead(d, meta) -} - -func resourceAwsSubnetDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - log.Printf("[INFO] Deleting subnet: %s", d.Id()) - req := &ec2.DeleteSubnetInput{ - SubnetId: aws.String(d.Id()), - } - - wait := resource.StateChangeConf{ - Pending: []string{"pending"}, - Target: []string{"destroyed"}, - Timeout: 10 * time.Minute, - MinTimeout: 1 * time.Second, - Refresh: func() (interface{}, string, error) { - _, err := conn.DeleteSubnet(req) - if err != nil { - if apiErr, ok := err.(awserr.Error); ok { - if apiErr.Code() == "DependencyViolation" { - // There is some pending operation, so just retry - // in a bit. - return 42, "pending", nil - } - - if apiErr.Code() == "InvalidSubnetID.NotFound" { - return 42, "destroyed", nil - } - } - - return 42, "failure", err - } - - return 42, "destroyed", nil - }, - } - - if _, err := wait.WaitForState(); err != nil { - return fmt.Errorf("Error deleting subnet: %s", err) - } - - return nil -} - -// SubnetStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch a Subnet. -func SubnetStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - resp, err := conn.DescribeSubnets(&ec2.DescribeSubnetsInput{ - SubnetIds: []*string{aws.String(id)}, - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidSubnetID.NotFound" { - resp = nil - } else { - log.Printf("Error on SubnetStateRefresh: %s", err) - return nil, "", err - } - } - - if resp == nil { - // Sometimes AWS just has consistency issues and doesn't see - // our instance yet. Return an empty state. - return nil, "", nil - } - - subnet := resp.Subnets[0] - return subnet, *subnet.State, nil - } -} - -func SubnetIpv6CidrStateRefreshFunc(conn *ec2.EC2, id string, associationId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - opts := &ec2.DescribeSubnetsInput{ - SubnetIds: []*string{aws.String(id)}, - } - resp, err := conn.DescribeSubnets(opts) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidSubnetID.NotFound" { - resp = nil - } else { - log.Printf("Error on SubnetIpv6CidrStateRefreshFunc: %s", err) - return nil, "", err - } - } - - if resp == nil { - // Sometimes AWS just has consistency issues and doesn't see - // our instance yet. Return an empty state. - return nil, "", nil - } - - if resp.Subnets[0].Ipv6CidrBlockAssociationSet == nil { - return nil, "", nil - } - - for _, association := range resp.Subnets[0].Ipv6CidrBlockAssociationSet { - if *association.AssociationId == associationId { - return association, *association.Ipv6CidrBlockState.State, nil - } - } - - return nil, "", nil - } -} diff --git a/builtin/providers/aws/resource_aws_subnet_migrate.go b/builtin/providers/aws/resource_aws_subnet_migrate.go deleted file mode 100644 index 0e0f19cf6..000000000 --- a/builtin/providers/aws/resource_aws_subnet_migrate.go +++ /dev/null @@ -1,33 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/terraform" -) - -func resourceAwsSubnetMigrateState( - v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - switch v { - case 0: - log.Println("[INFO] Found AWS Subnet State v0; migrating to v1") - return migrateSubnetStateV0toV1(is) - default: - return is, fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateSubnetStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { - if is.Empty() || is.Attributes == nil { - log.Println("[DEBUG] Empty Subnet State; nothing to migrate.") - return is, nil - } - - log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - - is.Attributes["assign_ipv6_address_on_creation"] = "false" - - log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} diff --git a/builtin/providers/aws/resource_aws_subnet_migrate_test.go b/builtin/providers/aws/resource_aws_subnet_migrate_test.go deleted file mode 100644 index c3bdae859..000000000 --- a/builtin/providers/aws/resource_aws_subnet_migrate_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/terraform" -) - -func TestAWSSubnetMigrateState(t *testing.T) { - cases := map[string]struct { - StateVersion int - ID string - Attributes map[string]string - Expected string - Meta interface{} - }{ - "v0_1_without_value": { - StateVersion: 0, - ID: "some_id", - Attributes: map[string]string{}, - Expected: "false", - }, - } - - for tn, tc := range cases { - is := &terraform.InstanceState{ - ID: tc.ID, - Attributes: tc.Attributes, - } - is, err := resourceAwsSubnetMigrateState( - tc.StateVersion, is, tc.Meta) - - if err != nil { - t.Fatalf("bad: %s, err: %#v", tn, err) - } - - if is.Attributes["assign_ipv6_address_on_creation"] != tc.Expected { - t.Fatalf("bad Subnet Migrate: %s\n\n expected: %s", is.Attributes["assign_ipv6_address_on_creation"], tc.Expected) - } - } -} diff --git a/builtin/providers/aws/resource_aws_subnet_test.go b/builtin/providers/aws/resource_aws_subnet_test.go deleted file mode 100644 index 210a1f39b..000000000 --- a/builtin/providers/aws/resource_aws_subnet_test.go +++ /dev/null @@ -1,290 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSSubnet_basic(t *testing.T) { - var v ec2.Subnet - - testCheck := func(*terraform.State) error { - if *v.CidrBlock != "10.1.1.0/24" { - return fmt.Errorf("bad cidr: %s", *v.CidrBlock) - } - - if *v.MapPublicIpOnLaunch != true { - return fmt.Errorf("bad MapPublicIpOnLaunch: %t", *v.MapPublicIpOnLaunch) - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_subnet.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckSubnetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccSubnetConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckSubnetExists( - "aws_subnet.foo", &v), - testCheck, - ), - }, - }, - }) -} - -func TestAccAWSSubnet_ipv6(t *testing.T) { - var before, after ec2.Subnet - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_subnet.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckSubnetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccSubnetConfigIpv6, - Check: resource.ComposeTestCheckFunc( - testAccCheckSubnetExists( - "aws_subnet.foo", &before), - testAccCheckAwsSubnetIpv6BeforeUpdate(t, &before), - ), - }, - { - Config: testAccSubnetConfigIpv6UpdateAssignIpv6OnCreation, - Check: resource.ComposeTestCheckFunc( - testAccCheckSubnetExists( - "aws_subnet.foo", &after), - testAccCheckAwsSubnetIpv6AfterUpdate(t, &after), - ), - }, - { - Config: testAccSubnetConfigIpv6UpdateIpv6Cidr, - Check: resource.ComposeTestCheckFunc( - testAccCheckSubnetExists( - "aws_subnet.foo", &after), - - testAccCheckAwsSubnetNotRecreated(t, &before, &after), - ), - }, - }, - }) -} - -func TestAccAWSSubnet_enableIpv6(t *testing.T) { - var subnet ec2.Subnet - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_subnet.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckSubnetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccSubnetConfigPreIpv6, - Check: resource.ComposeTestCheckFunc( - testAccCheckSubnetExists( - "aws_subnet.foo", &subnet), - ), - }, - { - Config: testAccSubnetConfigIpv6, - Check: resource.ComposeTestCheckFunc( - testAccCheckSubnetExists( - "aws_subnet.foo", &subnet), - ), - }, - }, - }) -} - -func testAccCheckAwsSubnetIpv6BeforeUpdate(t *testing.T, subnet *ec2.Subnet) resource.TestCheckFunc { - return func(s *terraform.State) error { - if subnet.Ipv6CidrBlockAssociationSet == nil { - return fmt.Errorf("Expected IPV6 CIDR Block Association") - } - - if *subnet.AssignIpv6AddressOnCreation != true { - return fmt.Errorf("bad AssignIpv6AddressOnCreation: %t", *subnet.AssignIpv6AddressOnCreation) - } - - return nil - } -} - -func testAccCheckAwsSubnetIpv6AfterUpdate(t *testing.T, subnet *ec2.Subnet) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *subnet.AssignIpv6AddressOnCreation != false { - return fmt.Errorf("bad AssignIpv6AddressOnCreation: %t", *subnet.AssignIpv6AddressOnCreation) - } - - return nil - } -} - -func testAccCheckAwsSubnetNotRecreated(t *testing.T, - before, after *ec2.Subnet) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *before.SubnetId != *after.SubnetId { - t.Fatalf("Expected SubnetIDs not to change, but both got before: %s and after: %s", *before.SubnetId, *after.SubnetId) - } - return nil - } -} - -func testAccCheckSubnetDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_subnet" { - continue - } - - // Try to find the resource - resp, err := conn.DescribeSubnets(&ec2.DescribeSubnetsInput{ - SubnetIds: []*string{aws.String(rs.Primary.ID)}, - }) - if err == nil { - if len(resp.Subnets) > 0 { - return fmt.Errorf("still exist.") - } - - return nil - } - - // Verify the error is what we want - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - if ec2err.Code() != "InvalidSubnetID.NotFound" { - return err - } - } - - return nil -} - -func testAccCheckSubnetExists(n string, v *ec2.Subnet) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - resp, err := conn.DescribeSubnets(&ec2.DescribeSubnetsInput{ - SubnetIds: []*string{aws.String(rs.Primary.ID)}, - }) - if err != nil { - return err - } - if len(resp.Subnets) == 0 { - return fmt.Errorf("Subnet not found") - } - - *v = *resp.Subnets[0] - - return nil - } -} - -const testAccSubnetConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_subnet" "foo" { - cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" - map_public_ip_on_launch = true - tags { - Name = "tf-subnet-acc-test" - } -} -` - -const testAccSubnetConfigPreIpv6 = ` -resource "aws_vpc" "foo" { - cidr_block = "10.10.0.0/16" - assign_generated_ipv6_cidr_block = true -} - -resource "aws_subnet" "foo" { - cidr_block = "10.10.1.0/24" - vpc_id = "${aws_vpc.foo.id}" - map_public_ip_on_launch = true - tags { - Name = "tf-subnet-acc-test" - } -} -` - -const testAccSubnetConfigIpv6 = ` -resource "aws_vpc" "foo" { - cidr_block = "10.10.0.0/16" - assign_generated_ipv6_cidr_block = true -} - -resource "aws_subnet" "foo" { - cidr_block = "10.10.1.0/24" - vpc_id = "${aws_vpc.foo.id}" - ipv6_cidr_block = "${cidrsubnet(aws_vpc.foo.ipv6_cidr_block, 8, 1)}" - map_public_ip_on_launch = true - assign_ipv6_address_on_creation = true - tags { - Name = "tf-subnet-acc-test" - } -} -` - -const testAccSubnetConfigIpv6UpdateAssignIpv6OnCreation = ` -resource "aws_vpc" "foo" { - cidr_block = "10.10.0.0/16" - assign_generated_ipv6_cidr_block = true -} - -resource "aws_subnet" "foo" { - cidr_block = "10.10.1.0/24" - vpc_id = "${aws_vpc.foo.id}" - ipv6_cidr_block = "${cidrsubnet(aws_vpc.foo.ipv6_cidr_block, 8, 1)}" - map_public_ip_on_launch = true - assign_ipv6_address_on_creation = false - tags { - Name = "tf-subnet-acc-test" - } -} -` - -const testAccSubnetConfigIpv6UpdateIpv6Cidr = ` -resource "aws_vpc" "foo" { - cidr_block = "10.10.0.0/16" - assign_generated_ipv6_cidr_block = true -} - -resource "aws_subnet" "foo" { - cidr_block = "10.10.1.0/24" - vpc_id = "${aws_vpc.foo.id}" - ipv6_cidr_block = "${cidrsubnet(aws_vpc.foo.ipv6_cidr_block, 8, 3)}" - map_public_ip_on_launch = true - assign_ipv6_address_on_creation = false - tags { - Name = "tf-subnet-acc-test" - } -} -` diff --git a/builtin/providers/aws/resource_aws_volume_attachment.go b/builtin/providers/aws/resource_aws_volume_attachment.go deleted file mode 100644 index 2afcd6c67..000000000 --- a/builtin/providers/aws/resource_aws_volume_attachment.go +++ /dev/null @@ -1,251 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsVolumeAttachment() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsVolumeAttachmentCreate, - Read: resourceAwsVolumeAttachmentRead, - Delete: resourceAwsVolumeAttachmentDelete, - - Schema: map[string]*schema.Schema{ - "device_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "instance_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "volume_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "force_detach": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - "skip_destroy": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - }, - } -} - -func resourceAwsVolumeAttachmentCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - name := d.Get("device_name").(string) - iID := d.Get("instance_id").(string) - vID := d.Get("volume_id").(string) - - // Find out if the volume is already attached to the instance, in which case - // we have nothing to do - request := &ec2.DescribeVolumesInput{ - VolumeIds: []*string{aws.String(vID)}, - Filters: []*ec2.Filter{ - &ec2.Filter{ - Name: aws.String("attachment.instance-id"), - Values: []*string{aws.String(iID)}, - }, - &ec2.Filter{ - Name: aws.String("attachment.device"), - Values: []*string{aws.String(name)}, - }, - }, - } - - vols, err := conn.DescribeVolumes(request) - if (err != nil) || (len(vols.Volumes) == 0) { - // This handles the situation where the instance is created by - // a spot request and whilst the request has been fulfilled the - // instance is not running yet - stateConf := &resource.StateChangeConf{ - Pending: []string{"pending"}, - Target: []string{"running"}, - Refresh: InstanceStateRefreshFunc(conn, iID, "terminated"), - Timeout: 10 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for instance (%s) to become ready: %s", - iID, err) - } - - // not attached - opts := &ec2.AttachVolumeInput{ - Device: aws.String(name), - InstanceId: aws.String(iID), - VolumeId: aws.String(vID), - } - - log.Printf("[DEBUG] Attaching Volume (%s) to Instance (%s)", vID, iID) - _, err := conn.AttachVolume(opts) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - return fmt.Errorf("[WARN] Error attaching volume (%s) to instance (%s), message: \"%s\", code: \"%s\"", - vID, iID, awsErr.Message(), awsErr.Code()) - } - return err - } - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"attaching"}, - Target: []string{"attached"}, - Refresh: volumeAttachmentStateRefreshFunc(conn, vID, iID), - Timeout: 5 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for Volume (%s) to attach to Instance: %s, error: %s", - vID, iID, err) - } - - d.SetId(volumeAttachmentID(name, vID, iID)) - return resourceAwsVolumeAttachmentRead(d, meta) -} - -func volumeAttachmentStateRefreshFunc(conn *ec2.EC2, volumeID, instanceID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - - request := &ec2.DescribeVolumesInput{ - VolumeIds: []*string{aws.String(volumeID)}, - Filters: []*ec2.Filter{ - &ec2.Filter{ - Name: aws.String("attachment.instance-id"), - Values: []*string{aws.String(instanceID)}, - }, - }, - } - - resp, err := conn.DescribeVolumes(request) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - return nil, "failed", fmt.Errorf("code: %s, message: %s", awsErr.Code(), awsErr.Message()) - } - return nil, "failed", err - } - - if len(resp.Volumes) > 0 { - v := resp.Volumes[0] - for _, a := range v.Attachments { - if a.InstanceId != nil && *a.InstanceId == instanceID { - return a, *a.State, nil - } - } - } - // assume detached if volume count is 0 - return 42, "detached", nil - } -} -func resourceAwsVolumeAttachmentRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - request := &ec2.DescribeVolumesInput{ - VolumeIds: []*string{aws.String(d.Get("volume_id").(string))}, - Filters: []*ec2.Filter{ - &ec2.Filter{ - Name: aws.String("attachment.instance-id"), - Values: []*string{aws.String(d.Get("instance_id").(string))}, - }, - }, - } - - vols, err := conn.DescribeVolumes(request) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVolume.NotFound" { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading EC2 volume %s for instance: %s: %#v", d.Get("volume_id").(string), d.Get("instance_id").(string), err) - } - - if len(vols.Volumes) == 0 || *vols.Volumes[0].State == "available" { - log.Printf("[DEBUG] Volume Attachment (%s) not found, removing from state", d.Id()) - d.SetId("") - } - - return nil -} - -func resourceAwsVolumeAttachmentDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - if _, ok := d.GetOk("skip_destroy"); ok { - log.Printf("[INFO] Found skip_destroy to be true, removing attachment %q from state", d.Id()) - d.SetId("") - return nil - } - - vID := d.Get("volume_id").(string) - iID := d.Get("instance_id").(string) - - opts := &ec2.DetachVolumeInput{ - Device: aws.String(d.Get("device_name").(string)), - InstanceId: aws.String(iID), - VolumeId: aws.String(vID), - Force: aws.Bool(d.Get("force_detach").(bool)), - } - - _, err := conn.DetachVolume(opts) - if err != nil { - return fmt.Errorf("Failed to detach Volume (%s) from Instance (%s): %s", - vID, iID, err) - } - stateConf := &resource.StateChangeConf{ - Pending: []string{"detaching"}, - Target: []string{"detached"}, - Refresh: volumeAttachmentStateRefreshFunc(conn, vID, iID), - Timeout: 5 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - log.Printf("[DEBUG] Detaching Volume (%s) from Instance (%s)", vID, iID) - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for Volume (%s) to detach from Instance: %s", - vID, iID) - } - d.SetId("") - return nil -} - -func volumeAttachmentID(name, volumeID, instanceID string) string { - var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("%s-", name)) - buf.WriteString(fmt.Sprintf("%s-", instanceID)) - buf.WriteString(fmt.Sprintf("%s-", volumeID)) - - return fmt.Sprintf("vai-%d", hashcode.String(buf.String())) -} diff --git a/builtin/providers/aws/resource_aws_volume_attachment_test.go b/builtin/providers/aws/resource_aws_volume_attachment_test.go deleted file mode 100644 index 0b99d1ffa..000000000 --- a/builtin/providers/aws/resource_aws_volume_attachment_test.go +++ /dev/null @@ -1,160 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "testing" - - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSVolumeAttachment_basic(t *testing.T) { - var i ec2.Instance - var v ec2.Volume - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVolumeAttachmentDestroy, - Steps: []resource.TestStep{ - { - Config: testAccVolumeAttachmentConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_volume_attachment.ebs_att", "device_name", "/dev/sdh"), - testAccCheckInstanceExists( - "aws_instance.web", &i), - testAccCheckVolumeExists( - "aws_ebs_volume.example", &v), - testAccCheckVolumeAttachmentExists( - "aws_volume_attachment.ebs_att", &i, &v), - ), - }, - }, - }) -} - -func TestAccAWSVolumeAttachment_skipDestroy(t *testing.T) { - var i ec2.Instance - var v ec2.Volume - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVolumeAttachmentDestroy, - Steps: []resource.TestStep{ - { - Config: testAccVolumeAttachmentConfigSkipDestroy, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_volume_attachment.ebs_att", "device_name", "/dev/sdh"), - testAccCheckInstanceExists( - "aws_instance.web", &i), - testAccCheckVolumeExists( - "aws_ebs_volume.example", &v), - testAccCheckVolumeAttachmentExists( - "aws_volume_attachment.ebs_att", &i, &v), - ), - }, - }, - }) -} - -func testAccCheckVolumeAttachmentExists(n string, i *ec2.Instance, v *ec2.Volume) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - for _, b := range i.BlockDeviceMappings { - if rs.Primary.Attributes["device_name"] == *b.DeviceName { - if b.Ebs.VolumeId != nil && rs.Primary.Attributes["volume_id"] == *b.Ebs.VolumeId { - // pass - return nil - } - } - } - - return fmt.Errorf("Error finding instance/volume") - } -} - -func testAccCheckVolumeAttachmentDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - log.Printf("\n\n----- This is never called") - if rs.Type != "aws_volume_attachment" { - continue - } - } - return nil -} - -const testAccVolumeAttachmentConfig = ` -resource "aws_instance" "web" { - ami = "ami-21f78e11" - availability_zone = "us-west-2a" - instance_type = "t1.micro" - tags { - Name = "HelloWorld" - } -} - -resource "aws_ebs_volume" "example" { - availability_zone = "us-west-2a" - size = 1 -} - -resource "aws_volume_attachment" "ebs_att" { - device_name = "/dev/sdh" - volume_id = "${aws_ebs_volume.example.id}" - instance_id = "${aws_instance.web.id}" -} -` - -const testAccVolumeAttachmentConfigSkipDestroy = ` -resource "aws_instance" "web" { - ami = "ami-21f78e11" - availability_zone = "us-west-2a" - instance_type = "t1.micro" - tags { - Name = "HelloWorld" - } -} - -resource "aws_ebs_volume" "example" { - availability_zone = "us-west-2a" - size = 1 - tags { - Name = "TestVolume" - } -} - -data "aws_ebs_volume" "ebs_volume" { - filter { - name = "size" - values = ["${aws_ebs_volume.example.size}"] - } - filter { - name = "availability-zone" - values = ["${aws_ebs_volume.example.availability_zone}"] - } - filter { - name = "tag:Name" - values = ["TestVolume"] - } -} - -resource "aws_volume_attachment" "ebs_att" { - device_name = "/dev/sdh" - volume_id = "${data.aws_ebs_volume.ebs_volume.id}" - instance_id = "${aws_instance.web.id}" - skip_destroy = true -} -` diff --git a/builtin/providers/aws/resource_aws_vpc.go b/builtin/providers/aws/resource_aws_vpc.go deleted file mode 100644 index 6a8edca4b..000000000 --- a/builtin/providers/aws/resource_aws_vpc.go +++ /dev/null @@ -1,592 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsVpc() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsVpcCreate, - Read: resourceAwsVpcRead, - Update: resourceAwsVpcUpdate, - Delete: resourceAwsVpcDelete, - Importer: &schema.ResourceImporter{ - State: resourceAwsVpcInstanceImport, - }, - - SchemaVersion: 1, - MigrateState: resourceAwsVpcMigrateState, - - Schema: map[string]*schema.Schema{ - "cidr_block": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateCIDRNetworkAddress, - }, - - "instance_tenancy": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "enable_dns_hostnames": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "enable_dns_support": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - - "enable_classiclink": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "assign_generated_ipv6_cidr_block": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "main_route_table_id": { - Type: schema.TypeString, - Computed: true, - }, - - "default_network_acl_id": { - Type: schema.TypeString, - Computed: true, - }, - - "dhcp_options_id": { - Type: schema.TypeString, - Computed: true, - }, - - "default_security_group_id": { - Type: schema.TypeString, - Computed: true, - }, - - "default_route_table_id": { - Type: schema.TypeString, - Computed: true, - }, - - "ipv6_association_id": { - Type: schema.TypeString, - Computed: true, - }, - - "ipv6_cidr_block": { - Type: schema.TypeString, - Computed: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAwsVpcCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - instance_tenancy := "default" - if v, ok := d.GetOk("instance_tenancy"); ok { - instance_tenancy = v.(string) - } - - // Create the VPC - createOpts := &ec2.CreateVpcInput{ - CidrBlock: aws.String(d.Get("cidr_block").(string)), - InstanceTenancy: aws.String(instance_tenancy), - AmazonProvidedIpv6CidrBlock: aws.Bool(d.Get("assign_generated_ipv6_cidr_block").(bool)), - } - - log.Printf("[DEBUG] VPC create config: %#v", *createOpts) - vpcResp, err := conn.CreateVpc(createOpts) - if err != nil { - return fmt.Errorf("Error creating VPC: %s", err) - } - - // Get the ID and store it - vpc := vpcResp.Vpc - d.SetId(*vpc.VpcId) - log.Printf("[INFO] VPC ID: %s", d.Id()) - - // Set partial mode and say that we setup the cidr block - d.Partial(true) - d.SetPartial("cidr_block") - - // Wait for the VPC to become available - log.Printf( - "[DEBUG] Waiting for VPC (%s) to become available", - d.Id()) - stateConf := &resource.StateChangeConf{ - Pending: []string{"pending"}, - Target: []string{"available"}, - Refresh: VPCStateRefreshFunc(conn, d.Id()), - Timeout: 10 * time.Minute, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf( - "Error waiting for VPC (%s) to become available: %s", - d.Id(), err) - } - - // Update our attributes and return - return resourceAwsVpcUpdate(d, meta) -} - -func resourceAwsVpcRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - // Refresh the VPC state - vpcRaw, _, err := VPCStateRefreshFunc(conn, d.Id())() - if err != nil { - return err - } - if vpcRaw == nil { - d.SetId("") - return nil - } - - // VPC stuff - vpc := vpcRaw.(*ec2.Vpc) - vpcid := d.Id() - d.Set("cidr_block", vpc.CidrBlock) - d.Set("dhcp_options_id", vpc.DhcpOptionsId) - d.Set("instance_tenancy", vpc.InstanceTenancy) - - // Tags - d.Set("tags", tagsToMap(vpc.Tags)) - - for _, a := range vpc.Ipv6CidrBlockAssociationSet { - if *a.Ipv6CidrBlockState.State == "associated" { //we can only ever have 1 IPv6 block associated at once - d.Set("assign_generated_ipv6_cidr_block", true) - d.Set("ipv6_association_id", a.AssociationId) - d.Set("ipv6_cidr_block", a.Ipv6CidrBlock) - } else { - d.Set("assign_generated_ipv6_cidr_block", false) - d.Set("ipv6_association_id", "") // we blank these out to remove old entries - d.Set("ipv6_cidr_block", "") - } - } - - // Attributes - attribute := "enableDnsSupport" - DescribeAttrOpts := &ec2.DescribeVpcAttributeInput{ - Attribute: aws.String(attribute), - VpcId: aws.String(vpcid), - } - resp, err := conn.DescribeVpcAttribute(DescribeAttrOpts) - if err != nil { - return err - } - d.Set("enable_dns_support", *resp.EnableDnsSupport.Value) - attribute = "enableDnsHostnames" - DescribeAttrOpts = &ec2.DescribeVpcAttributeInput{ - Attribute: &attribute, - VpcId: &vpcid, - } - resp, err = conn.DescribeVpcAttribute(DescribeAttrOpts) - if err != nil { - return err - } - d.Set("enable_dns_hostnames", *resp.EnableDnsHostnames.Value) - - DescribeClassiclinkOpts := &ec2.DescribeVpcClassicLinkInput{ - VpcIds: []*string{&vpcid}, - } - - // Classic Link is only available in regions that support EC2 Classic - respClassiclink, err := conn.DescribeVpcClassicLink(DescribeClassiclinkOpts) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "UnsupportedOperation" { - log.Printf("[WARN] VPC Classic Link is not supported in this region") - } else { - return err - } - } else { - classiclink_enabled := false - for _, v := range respClassiclink.Vpcs { - if *v.VpcId == vpcid { - if v.ClassicLinkEnabled != nil { - classiclink_enabled = *v.ClassicLinkEnabled - } - break - } - } - d.Set("enable_classiclink", classiclink_enabled) - } - - // Get the main routing table for this VPC - // Really Ugly need to make this better - rmenn - filter1 := &ec2.Filter{ - Name: aws.String("association.main"), - Values: []*string{aws.String("true")}, - } - filter2 := &ec2.Filter{ - Name: aws.String("vpc-id"), - Values: []*string{aws.String(d.Id())}, - } - DescribeRouteOpts := &ec2.DescribeRouteTablesInput{ - Filters: []*ec2.Filter{filter1, filter2}, - } - routeResp, err := conn.DescribeRouteTables(DescribeRouteOpts) - if err != nil { - return err - } - if v := routeResp.RouteTables; len(v) > 0 { - d.Set("main_route_table_id", *v[0].RouteTableId) - } - - if err := resourceAwsVpcSetDefaultNetworkAcl(conn, d); err != nil { - log.Printf("[WARN] Unable to set Default Network ACL: %s", err) - } - if err := resourceAwsVpcSetDefaultSecurityGroup(conn, d); err != nil { - log.Printf("[WARN] Unable to set Default Security Group: %s", err) - } - if err := resourceAwsVpcSetDefaultRouteTable(conn, d); err != nil { - log.Printf("[WARN] Unable to set Default Route Table: %s", err) - } - - return nil -} - -func resourceAwsVpcUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - // Turn on partial mode - d.Partial(true) - vpcid := d.Id() - if d.HasChange("enable_dns_hostnames") { - val := d.Get("enable_dns_hostnames").(bool) - modifyOpts := &ec2.ModifyVpcAttributeInput{ - VpcId: &vpcid, - EnableDnsHostnames: &ec2.AttributeBooleanValue{ - Value: &val, - }, - } - - log.Printf( - "[INFO] Modifying enable_dns_hostnames vpc attribute for %s: %s", - d.Id(), modifyOpts) - if _, err := conn.ModifyVpcAttribute(modifyOpts); err != nil { - return err - } - - d.SetPartial("enable_dns_hostnames") - } - - _, hasEnableDnsSupportOption := d.GetOk("enable_dns_support") - - if !hasEnableDnsSupportOption || d.HasChange("enable_dns_support") { - val := d.Get("enable_dns_support").(bool) - modifyOpts := &ec2.ModifyVpcAttributeInput{ - VpcId: &vpcid, - EnableDnsSupport: &ec2.AttributeBooleanValue{ - Value: &val, - }, - } - - log.Printf( - "[INFO] Modifying enable_dns_support vpc attribute for %s: %s", - d.Id(), modifyOpts) - if _, err := conn.ModifyVpcAttribute(modifyOpts); err != nil { - return err - } - - d.SetPartial("enable_dns_support") - } - - if d.HasChange("enable_classiclink") { - val := d.Get("enable_classiclink").(bool) - - if val { - modifyOpts := &ec2.EnableVpcClassicLinkInput{ - VpcId: &vpcid, - } - log.Printf( - "[INFO] Modifying enable_classiclink vpc attribute for %s: %#v", - d.Id(), modifyOpts) - if _, err := conn.EnableVpcClassicLink(modifyOpts); err != nil { - return err - } - } else { - modifyOpts := &ec2.DisableVpcClassicLinkInput{ - VpcId: &vpcid, - } - log.Printf( - "[INFO] Modifying enable_classiclink vpc attribute for %s: %#v", - d.Id(), modifyOpts) - if _, err := conn.DisableVpcClassicLink(modifyOpts); err != nil { - return err - } - } - - d.SetPartial("enable_classiclink") - } - - if d.HasChange("assign_generated_ipv6_cidr_block") && !d.IsNewResource() { - toAssign := d.Get("assign_generated_ipv6_cidr_block").(bool) - - log.Printf("[INFO] Modifying assign_generated_ipv6_cidr_block to %#v", toAssign) - - if toAssign { - modifyOpts := &ec2.AssociateVpcCidrBlockInput{ - VpcId: &vpcid, - AmazonProvidedIpv6CidrBlock: aws.Bool(toAssign), - } - log.Printf("[INFO] Enabling assign_generated_ipv6_cidr_block vpc attribute for %s: %#v", - d.Id(), modifyOpts) - resp, err := conn.AssociateVpcCidrBlock(modifyOpts) - if err != nil { - return err - } - - // Wait for the CIDR to become available - log.Printf( - "[DEBUG] Waiting for IPv6 CIDR (%s) to become associated", - d.Id()) - stateConf := &resource.StateChangeConf{ - Pending: []string{"associating", "disassociated"}, - Target: []string{"associated"}, - Refresh: Ipv6CidrStateRefreshFunc(conn, d.Id(), *resp.Ipv6CidrBlockAssociation.AssociationId), - Timeout: 1 * time.Minute, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf( - "Error waiting for IPv6 CIDR (%s) to become associated: %s", - d.Id(), err) - } - } else { - modifyOpts := &ec2.DisassociateVpcCidrBlockInput{ - AssociationId: aws.String(d.Get("ipv6_association_id").(string)), - } - log.Printf("[INFO] Disabling assign_generated_ipv6_cidr_block vpc attribute for %s: %#v", - d.Id(), modifyOpts) - if _, err := conn.DisassociateVpcCidrBlock(modifyOpts); err != nil { - return err - } - - // Wait for the CIDR to become available - log.Printf( - "[DEBUG] Waiting for IPv6 CIDR (%s) to become disassociated", - d.Id()) - stateConf := &resource.StateChangeConf{ - Pending: []string{"disassociating", "associated"}, - Target: []string{"disassociated"}, - Refresh: Ipv6CidrStateRefreshFunc(conn, d.Id(), d.Get("ipv6_association_id").(string)), - Timeout: 1 * time.Minute, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf( - "Error waiting for IPv6 CIDR (%s) to become disassociated: %s", - d.Id(), err) - } - } - - d.SetPartial("assign_generated_ipv6_cidr_block") - } - - if err := setTags(conn, d); err != nil { - return err - } else { - d.SetPartial("tags") - } - - d.Partial(false) - return resourceAwsVpcRead(d, meta) -} - -func resourceAwsVpcDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - vpcID := d.Id() - DeleteVpcOpts := &ec2.DeleteVpcInput{ - VpcId: &vpcID, - } - log.Printf("[INFO] Deleting VPC: %s", d.Id()) - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteVpc(DeleteVpcOpts) - if err == nil { - return nil - } - - ec2err, ok := err.(awserr.Error) - if !ok { - return resource.NonRetryableError(err) - } - - switch ec2err.Code() { - case "InvalidVpcID.NotFound": - return nil - case "DependencyViolation": - return resource.RetryableError(err) - } - - return resource.NonRetryableError(fmt.Errorf("Error deleting VPC: %s", err)) - }) -} - -// VPCStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// a VPC. -func VPCStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - DescribeVpcOpts := &ec2.DescribeVpcsInput{ - VpcIds: []*string{aws.String(id)}, - } - resp, err := conn.DescribeVpcs(DescribeVpcOpts) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpcID.NotFound" { - resp = nil - } else { - log.Printf("Error on VPCStateRefresh: %s", err) - return nil, "", err - } - } - - if resp == nil { - // Sometimes AWS just has consistency issues and doesn't see - // our instance yet. Return an empty state. - return nil, "", nil - } - - vpc := resp.Vpcs[0] - return vpc, *vpc.State, nil - } -} - -func Ipv6CidrStateRefreshFunc(conn *ec2.EC2, id string, associationId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - describeVpcOpts := &ec2.DescribeVpcsInput{ - VpcIds: []*string{aws.String(id)}, - } - resp, err := conn.DescribeVpcs(describeVpcOpts) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpcID.NotFound" { - resp = nil - } else { - log.Printf("Error on VPCStateRefresh: %s", err) - return nil, "", err - } - } - - if resp == nil { - // Sometimes AWS just has consistency issues and doesn't see - // our instance yet. Return an empty state. - return nil, "", nil - } - - if resp.Vpcs[0].Ipv6CidrBlockAssociationSet == nil { - return nil, "", nil - } - - for _, association := range resp.Vpcs[0].Ipv6CidrBlockAssociationSet { - if *association.AssociationId == associationId { - return association, *association.Ipv6CidrBlockState.State, nil - } - } - - return nil, "", nil - } -} - -func resourceAwsVpcSetDefaultNetworkAcl(conn *ec2.EC2, d *schema.ResourceData) error { - filter1 := &ec2.Filter{ - Name: aws.String("default"), - Values: []*string{aws.String("true")}, - } - filter2 := &ec2.Filter{ - Name: aws.String("vpc-id"), - Values: []*string{aws.String(d.Id())}, - } - DescribeNetworkACLOpts := &ec2.DescribeNetworkAclsInput{ - Filters: []*ec2.Filter{filter1, filter2}, - } - networkAclResp, err := conn.DescribeNetworkAcls(DescribeNetworkACLOpts) - - if err != nil { - return err - } - if v := networkAclResp.NetworkAcls; len(v) > 0 { - d.Set("default_network_acl_id", v[0].NetworkAclId) - } - - return nil -} - -func resourceAwsVpcSetDefaultSecurityGroup(conn *ec2.EC2, d *schema.ResourceData) error { - filter1 := &ec2.Filter{ - Name: aws.String("group-name"), - Values: []*string{aws.String("default")}, - } - filter2 := &ec2.Filter{ - Name: aws.String("vpc-id"), - Values: []*string{aws.String(d.Id())}, - } - DescribeSgOpts := &ec2.DescribeSecurityGroupsInput{ - Filters: []*ec2.Filter{filter1, filter2}, - } - securityGroupResp, err := conn.DescribeSecurityGroups(DescribeSgOpts) - - if err != nil { - return err - } - if v := securityGroupResp.SecurityGroups; len(v) > 0 { - d.Set("default_security_group_id", v[0].GroupId) - } - - return nil -} - -func resourceAwsVpcSetDefaultRouteTable(conn *ec2.EC2, d *schema.ResourceData) error { - filter1 := &ec2.Filter{ - Name: aws.String("association.main"), - Values: []*string{aws.String("true")}, - } - filter2 := &ec2.Filter{ - Name: aws.String("vpc-id"), - Values: []*string{aws.String(d.Id())}, - } - - findOpts := &ec2.DescribeRouteTablesInput{ - Filters: []*ec2.Filter{filter1, filter2}, - } - - resp, err := conn.DescribeRouteTables(findOpts) - if err != nil { - return err - } - - if len(resp.RouteTables) < 1 || resp.RouteTables[0] == nil { - return fmt.Errorf("Default Route table not found") - } - - // There Can Be Only 1 ... Default Route Table - d.Set("default_route_table_id", resp.RouteTables[0].RouteTableId) - - return nil -} - -func resourceAwsVpcInstanceImport( - d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - d.Set("assign_generated_ipv6_cidr_block", false) - return []*schema.ResourceData{d}, nil -} diff --git a/builtin/providers/aws/resource_aws_vpc_dhcp_options.go b/builtin/providers/aws/resource_aws_vpc_dhcp_options.go deleted file mode 100644 index ec2844cc7..000000000 --- a/builtin/providers/aws/resource_aws_vpc_dhcp_options.go +++ /dev/null @@ -1,292 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsVpcDhcpOptions() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsVpcDhcpOptionsCreate, - Read: resourceAwsVpcDhcpOptionsRead, - Update: resourceAwsVpcDhcpOptionsUpdate, - Delete: resourceAwsVpcDhcpOptionsDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "domain_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "domain_name_servers": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "ntp_servers": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "netbios_node_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "netbios_name_servers": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "tags": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - }, - }, - } -} - -func resourceAwsVpcDhcpOptionsCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - setDHCPOption := func(key string) *ec2.NewDhcpConfiguration { - log.Printf("[DEBUG] Setting DHCP option %s...", key) - tfKey := strings.Replace(key, "-", "_", -1) - - value, ok := d.GetOk(tfKey) - if !ok { - return nil - } - - if v, ok := value.(string); ok { - return &ec2.NewDhcpConfiguration{ - Key: aws.String(key), - Values: []*string{ - aws.String(v), - }, - } - } - - if v, ok := value.([]interface{}); ok { - var s []*string - for _, attr := range v { - s = append(s, aws.String(attr.(string))) - } - - return &ec2.NewDhcpConfiguration{ - Key: aws.String(key), - Values: s, - } - } - - return nil - } - - createOpts := &ec2.CreateDhcpOptionsInput{ - DhcpConfigurations: []*ec2.NewDhcpConfiguration{ - setDHCPOption("domain-name"), - setDHCPOption("domain-name-servers"), - setDHCPOption("ntp-servers"), - setDHCPOption("netbios-node-type"), - setDHCPOption("netbios-name-servers"), - }, - } - - resp, err := conn.CreateDhcpOptions(createOpts) - if err != nil { - return fmt.Errorf("Error creating DHCP Options Set: %s", err) - } - - dos := resp.DhcpOptions - d.SetId(*dos.DhcpOptionsId) - log.Printf("[INFO] DHCP Options Set ID: %s", d.Id()) - - // Wait for the DHCP Options to become available - log.Printf("[DEBUG] Waiting for DHCP Options (%s) to become available", d.Id()) - stateConf := &resource.StateChangeConf{ - Pending: []string{"pending"}, - Target: []string{"created"}, - Refresh: resourceDHCPOptionsStateRefreshFunc(conn, d.Id()), - Timeout: 5 * time.Minute, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf( - "Error waiting for DHCP Options (%s) to become available: %s", - d.Id(), err) - } - - return resourceAwsVpcDhcpOptionsUpdate(d, meta) -} - -func resourceAwsVpcDhcpOptionsRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - req := &ec2.DescribeDhcpOptionsInput{ - DhcpOptionsIds: []*string{ - aws.String(d.Id()), - }, - } - - resp, err := conn.DescribeDhcpOptions(req) - if err != nil { - ec2err, ok := err.(awserr.Error) - if !ok { - return fmt.Errorf("Error retrieving DHCP Options: %s", err.Error()) - } - - if ec2err.Code() == "InvalidDhcpOptionID.NotFound" { - log.Printf("[WARN] DHCP Options (%s) not found, removing from state", d.Id()) - d.SetId("") - return nil - } - - return fmt.Errorf("Error retrieving DHCP Options: %s", err.Error()) - } - - if len(resp.DhcpOptions) == 0 { - return nil - } - - opts := resp.DhcpOptions[0] - d.Set("tags", tagsToMap(opts.Tags)) - - for _, cfg := range opts.DhcpConfigurations { - tfKey := strings.Replace(*cfg.Key, "-", "_", -1) - - if _, ok := d.Get(tfKey).(string); ok { - d.Set(tfKey, cfg.Values[0].Value) - } else { - values := make([]string, 0, len(cfg.Values)) - for _, v := range cfg.Values { - values = append(values, *v.Value) - } - - d.Set(tfKey, values) - } - } - - return nil -} - -func resourceAwsVpcDhcpOptionsUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - return setTags(conn, d) -} - -func resourceAwsVpcDhcpOptionsDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - return resource.Retry(3*time.Minute, func() *resource.RetryError { - log.Printf("[INFO] Deleting DHCP Options ID %s...", d.Id()) - _, err := conn.DeleteDhcpOptions(&ec2.DeleteDhcpOptionsInput{ - DhcpOptionsId: aws.String(d.Id()), - }) - - if err == nil { - return nil - } - - log.Printf("[WARN] %s", err) - - ec2err, ok := err.(awserr.Error) - if !ok { - return resource.RetryableError(err) - } - - switch ec2err.Code() { - case "InvalidDhcpOptionsID.NotFound": - return nil - case "DependencyViolation": - // If it is a dependency violation, we want to disassociate - // all VPCs using the given DHCP Options ID, and retry deleting. - vpcs, err2 := findVPCsByDHCPOptionsID(conn, d.Id()) - if err2 != nil { - log.Printf("[ERROR] %s", err2) - return resource.RetryableError(err2) - } - - for _, vpc := range vpcs { - log.Printf("[INFO] Disassociating DHCP Options Set %s from VPC %s...", d.Id(), *vpc.VpcId) - if _, err := conn.AssociateDhcpOptions(&ec2.AssociateDhcpOptionsInput{ - DhcpOptionsId: aws.String("default"), - VpcId: vpc.VpcId, - }); err != nil { - return resource.RetryableError(err) - } - } - return resource.RetryableError(err) - default: - return resource.NonRetryableError(err) - } - }) -} - -func findVPCsByDHCPOptionsID(conn *ec2.EC2, id string) ([]*ec2.Vpc, error) { - req := &ec2.DescribeVpcsInput{ - Filters: []*ec2.Filter{ - &ec2.Filter{ - Name: aws.String("dhcp-options-id"), - Values: []*string{ - aws.String(id), - }, - }, - }, - } - - resp, err := conn.DescribeVpcs(req) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpcID.NotFound" { - return nil, nil - } - return nil, err - } - - return resp.Vpcs, nil -} - -func resourceDHCPOptionsStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - DescribeDhcpOpts := &ec2.DescribeDhcpOptionsInput{ - DhcpOptionsIds: []*string{ - aws.String(id), - }, - } - - resp, err := conn.DescribeDhcpOptions(DescribeDhcpOpts) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidDhcpOptionsID.NotFound" { - resp = nil - } else { - log.Printf("Error on DHCPOptionsStateRefresh: %s", err) - return nil, "", err - } - } - - if resp == nil { - // Sometimes AWS just has consistency issues and doesn't see - // our instance yet. Return an empty state. - return nil, "", nil - } - - dos := resp.DhcpOptions[0] - return dos, "created", nil - } -} diff --git a/builtin/providers/aws/resource_aws_vpc_dhcp_options_association.go b/builtin/providers/aws/resource_aws_vpc_dhcp_options_association.go deleted file mode 100644 index 7bdcb7a68..000000000 --- a/builtin/providers/aws/resource_aws_vpc_dhcp_options_association.go +++ /dev/null @@ -1,99 +0,0 @@ -package aws - -import ( - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsVpcDhcpOptionsAssociation() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsVpcDhcpOptionsAssociationCreate, - Read: resourceAwsVpcDhcpOptionsAssociationRead, - Update: resourceAwsVpcDhcpOptionsAssociationUpdate, - Delete: resourceAwsVpcDhcpOptionsAssociationDelete, - - Schema: map[string]*schema.Schema{ - "vpc_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "dhcp_options_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - } -} - -func resourceAwsVpcDhcpOptionsAssociationCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - log.Printf( - "[INFO] Creating DHCP Options association: %s => %s", - d.Get("vpc_id").(string), - d.Get("dhcp_options_id").(string)) - - optsID := aws.String(d.Get("dhcp_options_id").(string)) - vpcID := aws.String(d.Get("vpc_id").(string)) - - if _, err := conn.AssociateDhcpOptions(&ec2.AssociateDhcpOptionsInput{ - DhcpOptionsId: optsID, - VpcId: vpcID, - }); err != nil { - return err - } - - // Set the ID and return - d.SetId(*optsID + "-" + *vpcID) - log.Printf("[INFO] Association ID: %s", d.Id()) - - return nil -} - -func resourceAwsVpcDhcpOptionsAssociationRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - // Get the VPC that this association belongs to - vpcRaw, _, err := VPCStateRefreshFunc(conn, d.Get("vpc_id").(string))() - - if err != nil { - return err - } - - if vpcRaw == nil { - return nil - } - - vpc := vpcRaw.(*ec2.Vpc) - if *vpc.VpcId != d.Get("vpc_id") || *vpc.DhcpOptionsId != d.Get("dhcp_options_id") { - log.Printf("[INFO] It seems the DHCP Options association is gone. Deleting reference from Graph...") - d.SetId("") - } - - return nil -} - -// DHCP Options Asociations cannot be updated. -func resourceAwsVpcDhcpOptionsAssociationUpdate(d *schema.ResourceData, meta interface{}) error { - return resourceAwsVpcDhcpOptionsAssociationCreate(d, meta) -} - -// AWS does not provide an API to disassociate a DHCP Options set from a VPC. -// So, we do this by setting the VPC to the default DHCP Options Set. -func resourceAwsVpcDhcpOptionsAssociationDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - log.Printf("[INFO] Disassociating DHCP Options Set %s from VPC %s...", d.Get("dhcp_options_id"), d.Get("vpc_id")) - if _, err := conn.AssociateDhcpOptions(&ec2.AssociateDhcpOptionsInput{ - DhcpOptionsId: aws.String("default"), - VpcId: aws.String(d.Get("vpc_id").(string)), - }); err != nil { - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/aws/resource_aws_vpc_dhcp_options_association_test.go b/builtin/providers/aws/resource_aws_vpc_dhcp_options_association_test.go deleted file mode 100644 index 84b58029a..000000000 --- a/builtin/providers/aws/resource_aws_vpc_dhcp_options_association_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSDHCPOptionsAssociation_basic(t *testing.T) { - var v ec2.Vpc - var d ec2.DhcpOptions - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDHCPOptionsAssociationDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDHCPOptionsAssociationConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckDHCPOptionsExists("aws_vpc_dhcp_options.foo", &d), - testAccCheckVpcExists("aws_vpc.foo", &v), - testAccCheckDHCPOptionsAssociationExist("aws_vpc_dhcp_options_association.foo", &v), - ), - }, - }, - }) -} - -func testAccCheckDHCPOptionsAssociationDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_vpc_dhcp_options_association" { - continue - } - - // Try to find the VPC associated to the DHCP Options set - vpcs, err := findVPCsByDHCPOptionsID(conn, rs.Primary.Attributes["dhcp_options_id"]) - if err != nil { - return err - } - - if len(vpcs) > 0 { - return fmt.Errorf("DHCP Options association is still associated to %d VPCs.", len(vpcs)) - } - } - - return nil -} - -func testAccCheckDHCPOptionsAssociationExist(n string, vpc *ec2.Vpc) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No DHCP Options Set association ID is set") - } - - if *vpc.DhcpOptionsId != rs.Primary.Attributes["dhcp_options_id"] { - return fmt.Errorf("VPC %s does not have DHCP Options Set %s associated", *vpc.VpcId, rs.Primary.Attributes["dhcp_options_id"]) - } - - if *vpc.VpcId != rs.Primary.Attributes["vpc_id"] { - return fmt.Errorf("DHCP Options Set %s is not associated with VPC %s", rs.Primary.Attributes["dhcp_options_id"], *vpc.VpcId) - } - - return nil - } -} - -const testAccDHCPOptionsAssociationConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_vpc_dhcp_options" "foo" { - domain_name = "service.consul" - domain_name_servers = ["127.0.0.1", "10.0.0.2"] - ntp_servers = ["127.0.0.1"] - netbios_name_servers = ["127.0.0.1"] - netbios_node_type = 2 - - tags { - Name = "foo" - } -} - -resource "aws_vpc_dhcp_options_association" "foo" { - vpc_id = "${aws_vpc.foo.id}" - dhcp_options_id = "${aws_vpc_dhcp_options.foo.id}" -} -` diff --git a/builtin/providers/aws/resource_aws_vpc_dhcp_options_test.go b/builtin/providers/aws/resource_aws_vpc_dhcp_options_test.go deleted file mode 100644 index f101f95f3..000000000 --- a/builtin/providers/aws/resource_aws_vpc_dhcp_options_test.go +++ /dev/null @@ -1,159 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSDHCPOptions_basic(t *testing.T) { - var d ec2.DhcpOptions - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDHCPOptionsDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDHCPOptionsConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckDHCPOptionsExists("aws_vpc_dhcp_options.foo", &d), - resource.TestCheckResourceAttr("aws_vpc_dhcp_options.foo", "domain_name", "service.consul"), - resource.TestCheckResourceAttr("aws_vpc_dhcp_options.foo", "domain_name_servers.0", "127.0.0.1"), - resource.TestCheckResourceAttr("aws_vpc_dhcp_options.foo", "domain_name_servers.1", "10.0.0.2"), - resource.TestCheckResourceAttr("aws_vpc_dhcp_options.foo", "ntp_servers.0", "127.0.0.1"), - resource.TestCheckResourceAttr("aws_vpc_dhcp_options.foo", "netbios_name_servers.0", "127.0.0.1"), - resource.TestCheckResourceAttr("aws_vpc_dhcp_options.foo", "netbios_node_type", "2"), - resource.TestCheckResourceAttr("aws_vpc_dhcp_options.foo", "tags.Name", "foo-name"), - ), - }, - }, - }) -} - -func TestAccAWSDHCPOptions_deleteOptions(t *testing.T) { - var d ec2.DhcpOptions - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDHCPOptionsDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDHCPOptionsConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckDHCPOptionsExists("aws_vpc_dhcp_options.foo", &d), - testAccCheckDHCPOptionsDelete("aws_vpc_dhcp_options.foo"), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testAccCheckDHCPOptionsDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_vpc_dhcp_options" { - continue - } - - // Try to find the resource - resp, err := conn.DescribeDhcpOptions(&ec2.DescribeDhcpOptionsInput{ - DhcpOptionsIds: []*string{ - aws.String(rs.Primary.ID), - }, - }) - if ae, ok := err.(awserr.Error); ok && ae.Code() == "InvalidDhcpOptionID.NotFound" { - continue - } - if err == nil { - if len(resp.DhcpOptions) > 0 { - return fmt.Errorf("still exists") - } - - return nil - } - - // Verify the error is what we want - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - if ec2err.Code() != "InvalidDhcpOptionsID.NotFound" { - return err - } - } - - return nil -} - -func testAccCheckDHCPOptionsExists(n string, d *ec2.DhcpOptions) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - resp, err := conn.DescribeDhcpOptions(&ec2.DescribeDhcpOptionsInput{ - DhcpOptionsIds: []*string{ - aws.String(rs.Primary.ID), - }, - }) - if err != nil { - return err - } - if len(resp.DhcpOptions) == 0 { - return fmt.Errorf("DHCP Options not found") - } - - *d = *resp.DhcpOptions[0] - - return nil - } -} - -func testAccCheckDHCPOptionsDelete(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - _, err := conn.DeleteDhcpOptions(&ec2.DeleteDhcpOptionsInput{ - DhcpOptionsId: aws.String(rs.Primary.ID), - }) - - return err - } -} - -const testAccDHCPOptionsConfig = ` -resource "aws_vpc_dhcp_options" "foo" { - domain_name = "service.consul" - domain_name_servers = ["127.0.0.1", "10.0.0.2"] - ntp_servers = ["127.0.0.1"] - netbios_name_servers = ["127.0.0.1"] - netbios_node_type = 2 - - tags { - Name = "foo-name" - } -} -` diff --git a/builtin/providers/aws/resource_aws_vpc_endpoint.go b/builtin/providers/aws/resource_aws_vpc_endpoint.go deleted file mode 100644 index b07940326..000000000 --- a/builtin/providers/aws/resource_aws_vpc_endpoint.go +++ /dev/null @@ -1,237 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsVpcEndpoint() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsVPCEndpointCreate, - Read: resourceAwsVPCEndpointRead, - Update: resourceAwsVPCEndpointUpdate, - Delete: resourceAwsVPCEndpointDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "policy": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validateJsonString, - StateFunc: func(v interface{}) string { - json, _ := normalizeJsonString(v) - return json - }, - }, - "vpc_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "service_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "route_table_ids": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "prefix_list_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "cidr_blocks": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func resourceAwsVPCEndpointCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - input := &ec2.CreateVpcEndpointInput{ - VpcId: aws.String(d.Get("vpc_id").(string)), - ServiceName: aws.String(d.Get("service_name").(string)), - } - - if v, ok := d.GetOk("route_table_ids"); ok { - list := v.(*schema.Set).List() - if len(list) > 0 { - input.RouteTableIds = expandStringList(list) - } - } - - if v, ok := d.GetOk("policy"); ok { - policy, err := normalizeJsonString(v) - if err != nil { - return errwrap.Wrapf("policy contains an invalid JSON: {{err}}", err) - } - input.PolicyDocument = aws.String(policy) - } - - log.Printf("[DEBUG] Creating VPC Endpoint: %#v", input) - output, err := conn.CreateVpcEndpoint(input) - if err != nil { - return fmt.Errorf("Error creating VPC Endpoint: %s", err) - } - log.Printf("[DEBUG] VPC Endpoint %q created.", *output.VpcEndpoint.VpcEndpointId) - - d.SetId(*output.VpcEndpoint.VpcEndpointId) - - return resourceAwsVPCEndpointRead(d, meta) -} - -func resourceAwsVPCEndpointRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - input := &ec2.DescribeVpcEndpointsInput{ - VpcEndpointIds: []*string{aws.String(d.Id())}, - } - - log.Printf("[DEBUG] Reading VPC Endpoint: %q", d.Id()) - output, err := conn.DescribeVpcEndpoints(input) - - if err != nil { - ec2err, ok := err.(awserr.Error) - if !ok { - return fmt.Errorf("Error reading VPC Endpoint: %s", err.Error()) - } - - if ec2err.Code() == "InvalidVpcEndpointId.NotFound" { - log.Printf("[WARN] VPC Endpoint (%s) not found, removing from state", d.Id()) - d.SetId("") - return nil - } - - return fmt.Errorf("Error reading VPC Endpoint: %s", err.Error()) - } - - if len(output.VpcEndpoints) != 1 { - return fmt.Errorf("There's no unique VPC Endpoint, but %d endpoints: %#v", - len(output.VpcEndpoints), output.VpcEndpoints) - } - - vpce := output.VpcEndpoints[0] - - // A VPC Endpoint is associated with exactly one prefix list name (also called Service Name). - // The prefix list ID can be used in security groups, so retrieve it to support that capability. - prefixListServiceName := *vpce.ServiceName - prefixListInput := &ec2.DescribePrefixListsInput{ - Filters: []*ec2.Filter{ - {Name: aws.String("prefix-list-name"), Values: []*string{aws.String(prefixListServiceName)}}, - }, - } - - log.Printf("[DEBUG] Reading VPC Endpoint prefix list: %s", prefixListServiceName) - prefixListsOutput, err := conn.DescribePrefixLists(prefixListInput) - - if err != nil { - _, ok := err.(awserr.Error) - if !ok { - return fmt.Errorf("Error reading VPC Endpoint prefix list: %s", err.Error()) - } - } - - if len(prefixListsOutput.PrefixLists) != 1 { - return fmt.Errorf("There are multiple prefix lists associated with the service name '%s'. Unexpected", prefixListServiceName) - } - - policy, err := normalizeJsonString(*vpce.PolicyDocument) - if err != nil { - return errwrap.Wrapf("policy contains an invalid JSON: {{err}}", err) - } - - d.Set("vpc_id", vpce.VpcId) - d.Set("policy", policy) - d.Set("service_name", vpce.ServiceName) - if err := d.Set("route_table_ids", aws.StringValueSlice(vpce.RouteTableIds)); err != nil { - return err - } - pl := prefixListsOutput.PrefixLists[0] - d.Set("prefix_list_id", pl.PrefixListId) - d.Set("cidr_blocks", aws.StringValueSlice(pl.Cidrs)) - - return nil -} - -func resourceAwsVPCEndpointUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - input := &ec2.ModifyVpcEndpointInput{ - VpcEndpointId: aws.String(d.Id()), - } - - if d.HasChange("route_table_ids") { - o, n := d.GetChange("route_table_ids") - os := o.(*schema.Set) - ns := n.(*schema.Set) - - add := expandStringList(ns.Difference(os).List()) - if len(add) > 0 { - input.AddRouteTableIds = add - } - - remove := expandStringList(os.Difference(ns).List()) - if len(remove) > 0 { - input.RemoveRouteTableIds = remove - } - } - - if d.HasChange("policy") { - policy, err := normalizeJsonString(d.Get("policy")) - if err != nil { - return errwrap.Wrapf("policy contains an invalid JSON: {{err}}", err) - } - input.PolicyDocument = aws.String(policy) - } - - log.Printf("[DEBUG] Updating VPC Endpoint: %#v", input) - _, err := conn.ModifyVpcEndpoint(input) - if err != nil { - return fmt.Errorf("Error updating VPC Endpoint: %s", err) - } - log.Printf("[DEBUG] VPC Endpoint %q updated", input.VpcEndpointId) - - return resourceAwsVPCEndpointRead(d, meta) -} - -func resourceAwsVPCEndpointDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - input := &ec2.DeleteVpcEndpointsInput{ - VpcEndpointIds: []*string{aws.String(d.Id())}, - } - - log.Printf("[DEBUG] Deleting VPC Endpoint: %#v", input) - _, err := conn.DeleteVpcEndpoints(input) - - if err != nil { - ec2err, ok := err.(awserr.Error) - if !ok { - return fmt.Errorf("Error deleting VPC Endpoint: %s", err.Error()) - } - - if ec2err.Code() == "InvalidVpcEndpointId.NotFound" { - log.Printf("[DEBUG] VPC Endpoint %q is already gone", d.Id()) - } else { - return fmt.Errorf("Error deleting VPC Endpoint: %s", err.Error()) - } - } - - log.Printf("[DEBUG] VPC Endpoint %q deleted", d.Id()) - d.SetId("") - - return nil -} diff --git a/builtin/providers/aws/resource_aws_vpc_endpoint_route_table_association.go b/builtin/providers/aws/resource_aws_vpc_endpoint_route_table_association.go deleted file mode 100644 index 655638aa4..000000000 --- a/builtin/providers/aws/resource_aws_vpc_endpoint_route_table_association.go +++ /dev/null @@ -1,159 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsVpcEndpointRouteTableAssociation() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsVPCEndpointRouteTableAssociationCreate, - Read: resourceAwsVPCEndpointRouteTableAssociationRead, - Delete: resourceAwsVPCEndpointRouteTableAssociationDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "vpc_endpoint_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "route_table_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceAwsVPCEndpointRouteTableAssociationCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - endpointId := d.Get("vpc_endpoint_id").(string) - rtId := d.Get("route_table_id").(string) - - _, err := findResourceVPCEndpoint(conn, endpointId) - if err != nil { - return err - } - - log.Printf( - "[INFO] Creating VPC Endpoint/Route Table association: %s => %s", - endpointId, rtId) - - input := &ec2.ModifyVpcEndpointInput{ - VpcEndpointId: aws.String(endpointId), - AddRouteTableIds: aws.StringSlice([]string{rtId}), - } - - _, err = conn.ModifyVpcEndpoint(input) - if err != nil { - return fmt.Errorf("Error creating VPC Endpoint/Route Table association: %s", err.Error()) - } - id := vpcEndpointIdRouteTableIdHash(endpointId, rtId) - log.Printf("[DEBUG] VPC Endpoint/Route Table association %q created.", id) - - d.SetId(id) - - return resourceAwsVPCEndpointRouteTableAssociationRead(d, meta) -} - -func resourceAwsVPCEndpointRouteTableAssociationRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - endpointId := d.Get("vpc_endpoint_id").(string) - rtId := d.Get("route_table_id").(string) - - vpce, err := findResourceVPCEndpoint(conn, endpointId) - if err != nil { - if err, ok := err.(awserr.Error); ok && err.Code() == "InvalidVpcEndpointId.NotFound" { - d.SetId("") - return nil - } - - return err - } - - found := false - for _, id := range vpce.RouteTableIds { - if id != nil && *id == rtId { - found = true - break - } - } - if !found { - // The association no longer exists. - d.SetId("") - return nil - } - - id := vpcEndpointIdRouteTableIdHash(endpointId, rtId) - log.Printf("[DEBUG] Computed VPC Endpoint/Route Table ID %s", id) - d.SetId(id) - - return nil -} - -func resourceAwsVPCEndpointRouteTableAssociationDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - endpointId := d.Get("vpc_endpoint_id").(string) - rtId := d.Get("route_table_id").(string) - - input := &ec2.ModifyVpcEndpointInput{ - VpcEndpointId: aws.String(endpointId), - RemoveRouteTableIds: aws.StringSlice([]string{rtId}), - } - - _, err := conn.ModifyVpcEndpoint(input) - if err != nil { - ec2err, ok := err.(awserr.Error) - if !ok { - return fmt.Errorf("Error deleting VPC Endpoint/Route Table association: %s", err.Error()) - } - - switch ec2err.Code() { - case "InvalidVpcEndpointId.NotFound": - fallthrough - case "InvalidRouteTableId.NotFound": - fallthrough - case "InvalidParameter": - log.Printf("[DEBUG] VPC Endpoint/Route Table association is already gone") - default: - return fmt.Errorf("Error deleting VPC Endpoint/Route Table association: %s", err.Error()) - } - } - - log.Printf("[DEBUG] VPC Endpoint/Route Table association %q deleted", d.Id()) - d.SetId("") - - return nil -} - -func findResourceVPCEndpoint(conn *ec2.EC2, id string) (*ec2.VpcEndpoint, error) { - input := &ec2.DescribeVpcEndpointsInput{ - VpcEndpointIds: aws.StringSlice([]string{id}), - } - - log.Printf("[DEBUG] Reading VPC Endpoint: %q", id) - output, err := conn.DescribeVpcEndpoints(input) - if err != nil { - return nil, err - } - - if output.VpcEndpoints == nil { - return nil, fmt.Errorf("No VPC Endpoints were found for %q", id) - } - - return output.VpcEndpoints[0], nil -} - -func vpcEndpointIdRouteTableIdHash(endpointId, rtId string) string { - return fmt.Sprintf("a-%s%d", endpointId, hashcode.String(rtId)) -} diff --git a/builtin/providers/aws/resource_aws_vpc_endpoint_route_table_association_test.go b/builtin/providers/aws/resource_aws_vpc_endpoint_route_table_association_test.go deleted file mode 100644 index 450b6dd9b..000000000 --- a/builtin/providers/aws/resource_aws_vpc_endpoint_route_table_association_test.go +++ /dev/null @@ -1,131 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSVpcEndpointRouteTableAssociation_basic(t *testing.T) { - var vpce ec2.VpcEndpoint - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVpcEndpointRouteTableAssociationDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVpcEndpointRouteTableAssociationConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpcEndpointRouteTableAssociationExists( - "aws_vpc_endpoint_route_table_association.a", &vpce), - ), - }, - }, - }) -} - -func testAccCheckVpcEndpointRouteTableAssociationDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_vpc_endpoint_route_table_association" { - continue - } - - // Try to find the resource - resp, err := conn.DescribeVpcEndpoints(&ec2.DescribeVpcEndpointsInput{ - VpcEndpointIds: aws.StringSlice([]string{rs.Primary.Attributes["vpc_endpoint_id"]}), - }) - if err != nil { - // Verify the error is what we want - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - if ec2err.Code() != "InvalidVpcEndpointId.NotFound" { - return err - } - return nil - } - - vpce := resp.VpcEndpoints[0] - if len(vpce.RouteTableIds) > 0 { - return fmt.Errorf( - "VPC endpoint %s has route tables", *vpce.VpcEndpointId) - } - } - - return nil -} - -func testAccCheckVpcEndpointRouteTableAssociationExists(n string, vpce *ec2.VpcEndpoint) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - resp, err := conn.DescribeVpcEndpoints(&ec2.DescribeVpcEndpointsInput{ - VpcEndpointIds: aws.StringSlice([]string{rs.Primary.Attributes["vpc_endpoint_id"]}), - }) - if err != nil { - return err - } - if len(resp.VpcEndpoints) == 0 { - return fmt.Errorf("VPC endpoint not found") - } - - *vpce = *resp.VpcEndpoints[0] - - if len(vpce.RouteTableIds) == 0 { - return fmt.Errorf("no route table associations") - } - - for _, id := range vpce.RouteTableIds { - if *id == rs.Primary.Attributes["route_table_id"] { - return nil - } - } - - return fmt.Errorf("route table association not found") - } -} - -const testAccVpcEndpointRouteTableAssociationConfig = ` -provider "aws" { - region = "us-west-2" -} - -resource "aws_vpc" "foo" { - cidr_block = "10.0.0.0/16" -} - -resource "aws_vpc_endpoint" "s3" { - vpc_id = "${aws_vpc.foo.id}" - service_name = "com.amazonaws.us-west-2.s3" -} - -resource "aws_route_table" "rt" { - vpc_id = "${aws_vpc.foo.id}" - - tags { - Name = "test" - } -} - -resource "aws_vpc_endpoint_route_table_association" "a" { - vpc_endpoint_id = "${aws_vpc_endpoint.s3.id}" - route_table_id = "${aws_route_table.rt.id}" -} -` diff --git a/builtin/providers/aws/resource_aws_vpc_endpoint_test.go b/builtin/providers/aws/resource_aws_vpc_endpoint_test.go deleted file mode 100644 index f04d36157..000000000 --- a/builtin/providers/aws/resource_aws_vpc_endpoint_test.go +++ /dev/null @@ -1,307 +0,0 @@ -package aws - -import ( - "fmt" - "strconv" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSVpcEndpoint_basic(t *testing.T) { - var endpoint ec2.VpcEndpoint - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_vpc_endpoint.second-private-s3", - Providers: testAccProviders, - CheckDestroy: testAccCheckVpcEndpointDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVpcEndpointWithRouteTableAndPolicyConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpcEndpointExists("aws_vpc_endpoint.second-private-s3", &endpoint), - testAccCheckVpcEndpointPrefixListAvailable("aws_vpc_endpoint.second-private-s3"), - ), - }, - }, - }) -} - -func TestAccAWSVpcEndpoint_withRouteTableAndPolicy(t *testing.T) { - var endpoint ec2.VpcEndpoint - var routeTable ec2.RouteTable - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_vpc_endpoint.second-private-s3", - Providers: testAccProviders, - CheckDestroy: testAccCheckVpcEndpointDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVpcEndpointWithRouteTableAndPolicyConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpcEndpointExists("aws_vpc_endpoint.second-private-s3", &endpoint), - testAccCheckRouteTableExists("aws_route_table.default", &routeTable), - ), - }, - resource.TestStep{ - Config: testAccVpcEndpointWithRouteTableAndPolicyConfigModified, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpcEndpointExists("aws_vpc_endpoint.second-private-s3", &endpoint), - testAccCheckRouteTableExists("aws_route_table.default", &routeTable), - ), - }, - }, - }) -} - -func TestAccAWSVpcEndpoint_WithoutRouteTableOrPolicyConfig(t *testing.T) { - var endpoint ec2.VpcEndpoint - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_vpc_endpoint.second-private-s3", - Providers: testAccProviders, - CheckDestroy: testAccCheckVpcEndpointDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVpcEndpointWithoutRouteTableOrPolicyConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpcEndpointExists("aws_vpc_endpoint.second-private-s3", &endpoint), - testAccCheckVpcEndpointPrefixListAvailable("aws_vpc_endpoint.second-private-s3"), - ), - }, - }, - }) -} - -func TestAccAWSVpcEndpoint_removed(t *testing.T) { - var endpoint ec2.VpcEndpoint - - // reach out and DELETE the VPC Endpoint outside of Terraform - testDestroy := func(*terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - input := &ec2.DeleteVpcEndpointsInput{ - VpcEndpointIds: []*string{endpoint.VpcEndpointId}, - } - - _, err := conn.DeleteVpcEndpoints(input) - if err != nil { - return err - } - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVpcEndpointDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVpcEndpointWithoutRouteTableOrPolicyConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpcEndpointExists("aws_vpc_endpoint.second-private-s3", &endpoint), - testDestroy, - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testAccCheckVpcEndpointDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_vpc_endpoint" { - continue - } - - // Try to find the VPC - input := &ec2.DescribeVpcEndpointsInput{ - VpcEndpointIds: []*string{aws.String(rs.Primary.ID)}, - } - resp, err := conn.DescribeVpcEndpoints(input) - if err != nil { - // Verify the error is what we want - if ae, ok := err.(awserr.Error); ok && ae.Code() == "InvalidVpcEndpointId.NotFound" { - continue - } - return err - } - if len(resp.VpcEndpoints) > 0 { - return fmt.Errorf("VPC Endpoints still exist.") - } - - return err - } - - return nil -} - -func testAccCheckVpcEndpointExists(n string, endpoint *ec2.VpcEndpoint) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No VPC Endpoint ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - input := &ec2.DescribeVpcEndpointsInput{ - VpcEndpointIds: []*string{aws.String(rs.Primary.ID)}, - } - resp, err := conn.DescribeVpcEndpoints(input) - if err != nil { - return err - } - if len(resp.VpcEndpoints) == 0 { - return fmt.Errorf("VPC Endpoint not found") - } - - *endpoint = *resp.VpcEndpoints[0] - - return nil - } -} - -func testAccCheckVpcEndpointPrefixListAvailable(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - prefixListID := rs.Primary.Attributes["prefix_list_id"] - if prefixListID == "" { - return fmt.Errorf("Prefix list ID not available") - } - if !strings.HasPrefix(prefixListID, "pl") { - return fmt.Errorf("Prefix list ID does not appear to be a valid value: '%s'", prefixListID) - } - - var ( - cidrBlockSize int - err error - ) - - if cidrBlockSize, err = strconv.Atoi(rs.Primary.Attributes["cidr_blocks.#"]); err != nil { - return err - } - if cidrBlockSize < 1 { - return fmt.Errorf("cidr_blocks seem suspiciously low: %d", cidrBlockSize) - } - - return nil - } -} - -const testAccVpcEndpointWithRouteTableAndPolicyConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.0.0.0/16" -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "10.0.1.0/24" -} - -resource "aws_vpc_endpoint" "second-private-s3" { - vpc_id = "${aws_vpc.foo.id}" - service_name = "com.amazonaws.us-west-2.s3" - route_table_ids = ["${aws_route_table.default.id}"] - policy = < 0 { - co := s.List()[0].(map[string]interface{}) - modifyOpts.AccepterPeeringConnectionOptions = expandPeeringOptions(co) - } - } - - if v, ok := d.GetOk("requester"); ok { - if s := v.(*schema.Set); len(s.List()) > 0 { - co := s.List()[0].(map[string]interface{}) - modifyOpts.RequesterPeeringConnectionOptions = expandPeeringOptions(co) - } - } - - log.Printf("[DEBUG] VPC Peering Connection modify options: %#v", modifyOpts) - if _, err := conn.ModifyVpcPeeringConnectionOptions(modifyOpts); err != nil { - return err - } - - return nil -} - -func resourceAwsVPCPeeringUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - if err := setTags(conn, d); err != nil { - return err - } else { - d.SetPartial("tags") - } - - pcRaw, _, err := resourceAwsVPCPeeringConnectionStateRefreshFunc(conn, d.Id())() - if err != nil { - return err - } - - if pcRaw == nil { - d.SetId("") - return nil - } - pc := pcRaw.(*ec2.VpcPeeringConnection) - - if _, ok := d.GetOk("auto_accept"); ok { - if pc.Status != nil && *pc.Status.Code == "pending-acceptance" { - status, err := resourceVPCPeeringConnectionAccept(conn, d.Id()) - if err != nil { - return errwrap.Wrapf("Unable to accept VPC Peering Connection: {{err}}", err) - } - log.Printf("[DEBUG] VPC Peering Connection accept status: %s", status) - } - } - - if d.HasChange("accepter") || d.HasChange("requester") { - _, ok := d.GetOk("auto_accept") - if !ok && pc.Status != nil && *pc.Status.Code != "active" { - return fmt.Errorf("Unable to modify peering options. The VPC Peering Connection "+ - "%q is not active. Please set `auto_accept` attribute to `true`, "+ - "or activate VPC Peering Connection manually.", d.Id()) - } - - if err := resourceVPCPeeringConnectionOptionsModify(d, meta); err != nil { - return errwrap.Wrapf("Error modifying VPC Peering Connection options: {{err}}", err) - } - } - - return resourceAwsVPCPeeringRead(d, meta) -} - -func resourceAwsVPCPeeringDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - _, err := conn.DeleteVpcPeeringConnection( - &ec2.DeleteVpcPeeringConnectionInput{ - VpcPeeringConnectionId: aws.String(d.Id()), - }) - - return err -} - -// resourceAwsVPCPeeringConnectionStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// a VPCPeeringConnection. -func resourceAwsVPCPeeringConnectionStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - resp, err := conn.DescribeVpcPeeringConnections(&ec2.DescribeVpcPeeringConnectionsInput{ - VpcPeeringConnectionIds: []*string{aws.String(id)}, - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpcPeeringConnectionID.NotFound" { - resp = nil - } else { - log.Printf("Error reading VPC Peering Connection details: %s", err) - return nil, "error", err - } - } - - if resp == nil { - // Sometimes AWS just has consistency issues and doesn't see - // our instance yet. Return an empty state. - return nil, "", nil - } - - pc := resp.VpcPeeringConnections[0] - - // A VPC Peering Connection can exist in a failed state due to - // incorrect VPC ID, account ID, or overlapping IP address range, - // thus we short circuit before the time out would occur. - if pc != nil && *pc.Status.Code == "failed" { - return nil, "failed", errors.New(*pc.Status.Message) - } - - return pc, *pc.Status.Code, nil - } -} - -func vpcPeeringConnectionOptionsSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "allow_remote_vpc_dns_resolution": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "allow_classic_link_to_remote_vpc": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "allow_vpc_to_remote_classic_link": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - }, - }, - } -} - -func flattenPeeringOptions(options *ec2.VpcPeeringConnectionOptionsDescription) (results []map[string]interface{}) { - m := make(map[string]interface{}) - - if options.AllowDnsResolutionFromRemoteVpc != nil { - m["allow_remote_vpc_dns_resolution"] = *options.AllowDnsResolutionFromRemoteVpc - } - - if options.AllowEgressFromLocalClassicLinkToRemoteVpc != nil { - m["allow_classic_link_to_remote_vpc"] = *options.AllowEgressFromLocalClassicLinkToRemoteVpc - } - - if options.AllowEgressFromLocalVpcToRemoteClassicLink != nil { - m["allow_vpc_to_remote_classic_link"] = *options.AllowEgressFromLocalVpcToRemoteClassicLink - } - - results = append(results, m) - return -} - -func expandPeeringOptions(m map[string]interface{}) *ec2.PeeringConnectionOptionsRequest { - r := &ec2.PeeringConnectionOptionsRequest{} - - if v, ok := m["allow_remote_vpc_dns_resolution"]; ok { - r.AllowDnsResolutionFromRemoteVpc = aws.Bool(v.(bool)) - } - - if v, ok := m["allow_classic_link_to_remote_vpc"]; ok { - r.AllowEgressFromLocalClassicLinkToRemoteVpc = aws.Bool(v.(bool)) - } - - if v, ok := m["allow_vpc_to_remote_classic_link"]; ok { - r.AllowEgressFromLocalVpcToRemoteClassicLink = aws.Bool(v.(bool)) - } - - return r -} diff --git a/builtin/providers/aws/resource_aws_vpc_peering_connection_accepter.go b/builtin/providers/aws/resource_aws_vpc_peering_connection_accepter.go deleted file mode 100644 index 8b1efff50..000000000 --- a/builtin/providers/aws/resource_aws_vpc_peering_connection_accepter.go +++ /dev/null @@ -1,76 +0,0 @@ -package aws - -import ( - "errors" - "log" - - "fmt" - - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsVpcPeeringConnectionAccepter() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsVPCPeeringAccepterCreate, - Read: resourceAwsVPCPeeringRead, - Update: resourceAwsVPCPeeringUpdate, - Delete: resourceAwsVPCPeeringAccepterDelete, - - Schema: map[string]*schema.Schema{ - "vpc_peering_connection_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - Computed: false, - }, - "auto_accept": { - Type: schema.TypeBool, - Optional: true, - }, - "accept_status": { - Type: schema.TypeString, - Computed: true, - }, - "vpc_id": { - Type: schema.TypeString, - Computed: true, - }, - "peer_vpc_id": { - Type: schema.TypeString, - Computed: true, - }, - "peer_owner_id": { - Type: schema.TypeString, - Computed: true, - }, - "accepter": vpcPeeringConnectionOptionsSchema(), - "requester": vpcPeeringConnectionOptionsSchema(), - "tags": tagsSchema(), - }, - } -} - -func resourceAwsVPCPeeringAccepterCreate(d *schema.ResourceData, meta interface{}) error { - id := d.Get("vpc_peering_connection_id").(string) - d.SetId(id) - - if err := resourceAwsVPCPeeringRead(d, meta); err != nil { - return err - } - if d.Id() == "" { - return fmt.Errorf("VPC Peering Connection %q not found", id) - } - - // Ensure that this IS as cross-account VPC peering connection. - if d.Get("peer_owner_id").(string) == meta.(*AWSClient).accountid { - return errors.New("aws_vpc_peering_connection_accepter can only adopt into management cross-account VPC peering connections") - } - - return resourceAwsVPCPeeringUpdate(d, meta) -} - -func resourceAwsVPCPeeringAccepterDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[WARN] Will not delete VPC peering connection. Terraform will remove this resource from the state file, however resources may remain.") - d.SetId("") - return nil -} diff --git a/builtin/providers/aws/resource_aws_vpc_peering_connection_accepter_test.go b/builtin/providers/aws/resource_aws_vpc_peering_connection_accepter_test.go deleted file mode 100644 index 74b899c4e..000000000 --- a/builtin/providers/aws/resource_aws_vpc_peering_connection_accepter_test.go +++ /dev/null @@ -1,78 +0,0 @@ -// make testacc TEST=./builtin/providers/aws/ TESTARGS='-run=TestAccAwsVPCPeeringConnectionAccepter_' -package aws - -import ( - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAwsVPCPeeringConnectionAccepter_sameAccount(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccAwsVPCPeeringConnectionAccepterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAwsVPCPeeringConnectionAccepterSameAccountConfig, - ExpectError: regexp.MustCompile(`aws_vpc_peering_connection_accepter can only adopt into management cross-account VPC peering connections`), - }, - }, - }) -} - -func testAccAwsVPCPeeringConnectionAccepterDestroy(s *terraform.State) error { - // We don't destroy the underlying VPC Peering Connection. - return nil -} - -const testAccAwsVPCPeeringConnectionAccepterSameAccountConfig = ` -provider "aws" { - region = "us-west-2" - // Requester's credentials. -} - -provider "aws" { - alias = "peer" - region = "us-west-2" - // Accepter's credentials. -} - -resource "aws_vpc" "main" { - cidr_block = "10.0.0.0/16" -} - -resource "aws_vpc" "peer" { - provider = "aws.peer" - cidr_block = "10.1.0.0/16" -} - -data "aws_caller_identity" "peer" { - provider = "aws.peer" -} - -// Requester's side of the connection. -resource "aws_vpc_peering_connection" "peer" { - vpc_id = "${aws_vpc.main.id}" - peer_vpc_id = "${aws_vpc.peer.id}" - peer_owner_id = "${data.aws_caller_identity.peer.account_id}" - auto_accept = false - - tags { - Side = "Requester" - } -} - -// Accepter's side of the connection. -resource "aws_vpc_peering_connection_accepter" "peer" { - provider = "aws.peer" - vpc_peering_connection_id = "${aws_vpc_peering_connection.peer.id}" - auto_accept = true - - tags { - Side = "Accepter" - } -} -` diff --git a/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go b/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go deleted file mode 100644 index fe9b6a7c5..000000000 --- a/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go +++ /dev/null @@ -1,404 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "reflect" - "regexp" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSVPCPeeringConnection_basic(t *testing.T) { - var connection ec2.VpcPeeringConnection - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_vpc_peering_connection.foo", - IDRefreshIgnore: []string{"auto_accept"}, - - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSVpcPeeringConnectionDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVpcPeeringConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSVpcPeeringConnectionExists( - "aws_vpc_peering_connection.foo", - &connection), - ), - }, - }, - }) -} - -func TestAccAWSVPCPeeringConnection_plan(t *testing.T) { - var connection ec2.VpcPeeringConnection - - // reach out and DELETE the VPC Peering connection outside of Terraform - testDestroy := func(*terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - log.Printf("[DEBUG] Test deleting the VPC Peering Connection.") - _, err := conn.DeleteVpcPeeringConnection( - &ec2.DeleteVpcPeeringConnectionInput{ - VpcPeeringConnectionId: connection.VpcPeeringConnectionId, - }) - if err != nil { - return err - } - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshIgnore: []string{"auto_accept"}, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSVpcPeeringConnectionDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVpcPeeringConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSVpcPeeringConnectionExists( - "aws_vpc_peering_connection.foo", - &connection), - testDestroy, - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAWSVPCPeeringConnection_tags(t *testing.T) { - var connection ec2.VpcPeeringConnection - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_vpc_peering_connection.foo", - IDRefreshIgnore: []string{"auto_accept"}, - - Providers: testAccProviders, - CheckDestroy: testAccCheckVpcDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVpcPeeringConfigTags, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSVpcPeeringConnectionExists( - "aws_vpc_peering_connection.foo", - &connection), - testAccCheckTags(&connection.Tags, "foo", "bar"), - ), - }, - }, - }) -} - -func TestAccAWSVPCPeeringConnection_options(t *testing.T) { - var connection ec2.VpcPeeringConnection - - testAccepterChange := func(*terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - log.Printf("[DEBUG] Test change to the VPC Peering Connection Options.") - - _, err := conn.ModifyVpcPeeringConnectionOptions( - &ec2.ModifyVpcPeeringConnectionOptionsInput{ - VpcPeeringConnectionId: connection.VpcPeeringConnectionId, - AccepterPeeringConnectionOptions: &ec2.PeeringConnectionOptionsRequest{ - AllowDnsResolutionFromRemoteVpc: aws.Bool(false), - }, - }) - if err != nil { - return err - } - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_vpc_peering_connection.foo", - IDRefreshIgnore: []string{"auto_accept"}, - - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSVpcPeeringConnectionDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVpcPeeringConfigOptions, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSVpcPeeringConnectionExists( - "aws_vpc_peering_connection.foo", - &connection), - resource.TestCheckResourceAttr( - "aws_vpc_peering_connection.foo", - "accepter.#", "1"), - resource.TestCheckResourceAttr( - "aws_vpc_peering_connection.foo", - "accepter.1102046665.allow_remote_vpc_dns_resolution", "true"), - testAccCheckAWSVpcPeeringConnectionOptions( - "aws_vpc_peering_connection.foo", "accepter", - &ec2.VpcPeeringConnectionOptionsDescription{ - AllowDnsResolutionFromRemoteVpc: aws.Bool(true), - AllowEgressFromLocalClassicLinkToRemoteVpc: aws.Bool(false), - AllowEgressFromLocalVpcToRemoteClassicLink: aws.Bool(false), - }), - resource.TestCheckResourceAttr( - "aws_vpc_peering_connection.foo", - "requester.#", "1"), - resource.TestCheckResourceAttr( - "aws_vpc_peering_connection.foo", - "requester.41753983.allow_classic_link_to_remote_vpc", "true"), - resource.TestCheckResourceAttr( - "aws_vpc_peering_connection.foo", - "requester.41753983.allow_vpc_to_remote_classic_link", "true"), - testAccCheckAWSVpcPeeringConnectionOptions( - "aws_vpc_peering_connection.foo", "requester", - &ec2.VpcPeeringConnectionOptionsDescription{ - AllowDnsResolutionFromRemoteVpc: aws.Bool(false), - AllowEgressFromLocalClassicLinkToRemoteVpc: aws.Bool(true), - AllowEgressFromLocalVpcToRemoteClassicLink: aws.Bool(true), - }, - ), - testAccepterChange, - ), - ExpectNonEmptyPlan: true, - }, - resource.TestStep{ - Config: testAccVpcPeeringConfigOptions, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSVpcPeeringConnectionExists( - "aws_vpc_peering_connection.foo", - &connection), - resource.TestCheckResourceAttr( - "aws_vpc_peering_connection.foo", - "accepter.#", "1"), - resource.TestCheckResourceAttr( - "aws_vpc_peering_connection.foo", - "accepter.1102046665.allow_remote_vpc_dns_resolution", "true"), - testAccCheckAWSVpcPeeringConnectionOptions( - "aws_vpc_peering_connection.foo", "accepter", - &ec2.VpcPeeringConnectionOptionsDescription{ - AllowDnsResolutionFromRemoteVpc: aws.Bool(true), - AllowEgressFromLocalClassicLinkToRemoteVpc: aws.Bool(false), - AllowEgressFromLocalVpcToRemoteClassicLink: aws.Bool(false), - }, - ), - ), - }, - }, - }) -} - -func TestAccAWSVPCPeeringConnection_failedState(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshIgnore: []string{"auto_accept"}, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSVpcPeeringConnectionDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVpcPeeringConfigFailedState, - ExpectError: regexp.MustCompile(`.*Error waiting.*\(pcx-\w+\).*incorrect.*VPC-ID.*`), - }, - }, - }) -} - -func testAccCheckAWSVpcPeeringConnectionDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_vpc_peering_connection" { - continue - } - - describe, err := conn.DescribeVpcPeeringConnections( - &ec2.DescribeVpcPeeringConnectionsInput{ - VpcPeeringConnectionIds: []*string{aws.String(rs.Primary.ID)}, - }) - - if err != nil { - return err - } - - var pc *ec2.VpcPeeringConnection - for _, c := range describe.VpcPeeringConnections { - if rs.Primary.ID == *c.VpcPeeringConnectionId { - pc = c - } - } - - if pc == nil { - // not found - return nil - } - - if pc.Status != nil { - if *pc.Status.Code == "deleted" { - return nil - } - return fmt.Errorf("Found the VPC Peering Connection in an unexpected state: %s", pc) - } - - // return error here; we've found the vpc_peering object we want, however - // it's not in an expected state - return fmt.Errorf("Fall through error for testAccCheckAWSVpcPeeringConnectionDestroy.") - } - - return nil -} - -func testAccCheckAWSVpcPeeringConnectionExists(n string, connection *ec2.VpcPeeringConnection) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No VPC Peering Connection ID is set.") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - resp, err := conn.DescribeVpcPeeringConnections( - &ec2.DescribeVpcPeeringConnectionsInput{ - VpcPeeringConnectionIds: []*string{aws.String(rs.Primary.ID)}, - }) - if err != nil { - return err - } - if len(resp.VpcPeeringConnections) == 0 { - return fmt.Errorf("VPC Peering Connection could not be found") - } - - *connection = *resp.VpcPeeringConnections[0] - - return nil - } -} - -func testAccCheckAWSVpcPeeringConnectionOptions(n, block string, options *ec2.VpcPeeringConnectionOptionsDescription) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No VPC Peering Connection ID is set.") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - resp, err := conn.DescribeVpcPeeringConnections( - &ec2.DescribeVpcPeeringConnectionsInput{ - VpcPeeringConnectionIds: []*string{aws.String(rs.Primary.ID)}, - }) - if err != nil { - return err - } - - pc := resp.VpcPeeringConnections[0] - - o := pc.AccepterVpcInfo - if block == "requester" { - o = pc.RequesterVpcInfo - } - - if !reflect.DeepEqual(o.PeeringOptions, options) { - return fmt.Errorf("Expected the VPC Peering Connection Options to be %#v, got %#v", - options, o.PeeringOptions) - } - - return nil - } -} - -const testAccVpcPeeringConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.0.0.0/16" - tags { - Name = "TestAccAWSVPCPeeringConnection_basic" - } -} - -resource "aws_vpc" "bar" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_vpc_peering_connection" "foo" { - vpc_id = "${aws_vpc.foo.id}" - peer_vpc_id = "${aws_vpc.bar.id}" - auto_accept = true -} -` - -const testAccVpcPeeringConfigTags = ` -resource "aws_vpc" "foo" { - cidr_block = "10.0.0.0/16" - tags { - Name = "TestAccAWSVPCPeeringConnection_tags" - } -} - -resource "aws_vpc" "bar" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_vpc_peering_connection" "foo" { - vpc_id = "${aws_vpc.foo.id}" - peer_vpc_id = "${aws_vpc.bar.id}" - auto_accept = true - tags { - foo = "bar" - } -} -` - -const testAccVpcPeeringConfigOptions = ` -resource "aws_vpc" "foo" { - cidr_block = "10.0.0.0/16" - tags { - Name = "TestAccAWSVPCPeeringConnection_options" - } -} - -resource "aws_vpc" "bar" { - cidr_block = "10.1.0.0/16" - enable_dns_hostnames = true -} - -resource "aws_vpc_peering_connection" "foo" { - vpc_id = "${aws_vpc.foo.id}" - peer_vpc_id = "${aws_vpc.bar.id}" - auto_accept = true - - accepter { - allow_remote_vpc_dns_resolution = true - } - - requester { - allow_vpc_to_remote_classic_link = true - allow_classic_link_to_remote_vpc = true - } -} -` - -const testAccVpcPeeringConfigFailedState = ` -resource "aws_vpc" "foo" { - cidr_block = "10.0.0.0/16" - tags { - Name = "TestAccAWSVPCPeeringConnection_failedState" - } -} - -resource "aws_vpc" "bar" { - cidr_block = "10.0.0.0/16" -} - -resource "aws_vpc_peering_connection" "foo" { - vpc_id = "${aws_vpc.foo.id}" - peer_vpc_id = "${aws_vpc.bar.id}" -} -` diff --git a/builtin/providers/aws/resource_aws_vpc_test.go b/builtin/providers/aws/resource_aws_vpc_test.go deleted file mode 100644 index ca68bdfe8..000000000 --- a/builtin/providers/aws/resource_aws_vpc_test.go +++ /dev/null @@ -1,385 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSVpc_basic(t *testing.T) { - var vpc ec2.Vpc - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVpcDestroy, - Steps: []resource.TestStep{ - { - Config: testAccVpcConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpcExists("aws_vpc.foo", &vpc), - testAccCheckVpcCidr(&vpc, "10.1.0.0/16"), - resource.TestCheckResourceAttr( - "aws_vpc.foo", "cidr_block", "10.1.0.0/16"), - resource.TestCheckResourceAttrSet( - "aws_vpc.foo", "default_route_table_id"), - resource.TestCheckResourceAttr( - "aws_vpc.foo", "enable_dns_support", "true"), - ), - }, - }, - }) -} - -func TestAccAWSVpc_enableIpv6(t *testing.T) { - var vpc ec2.Vpc - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVpcDestroy, - Steps: []resource.TestStep{ - { - Config: testAccVpcConfigIpv6Enabled, - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckVpcExists("aws_vpc.foo", &vpc), - testAccCheckVpcCidr(&vpc, "10.1.0.0/16"), - resource.TestCheckResourceAttr( - "aws_vpc.foo", "cidr_block", "10.1.0.0/16"), - resource.TestCheckResourceAttrSet( - "aws_vpc.foo", "ipv6_association_id"), - resource.TestCheckResourceAttrSet( - "aws_vpc.foo", "ipv6_cidr_block"), - resource.TestCheckResourceAttr( - "aws_vpc.foo", "assign_generated_ipv6_cidr_block", "true"), - ), - }, - { - Config: testAccVpcConfigIpv6Disabled, - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckVpcExists("aws_vpc.foo", &vpc), - testAccCheckVpcCidr(&vpc, "10.1.0.0/16"), - resource.TestCheckResourceAttr( - "aws_vpc.foo", "cidr_block", "10.1.0.0/16"), - resource.TestCheckResourceAttr( - "aws_vpc.foo", "assign_generated_ipv6_cidr_block", "false"), - ), - }, - { - Config: testAccVpcConfigIpv6Enabled, - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckVpcExists("aws_vpc.foo", &vpc), - testAccCheckVpcCidr(&vpc, "10.1.0.0/16"), - resource.TestCheckResourceAttr( - "aws_vpc.foo", "cidr_block", "10.1.0.0/16"), - resource.TestCheckResourceAttrSet( - "aws_vpc.foo", "ipv6_association_id"), - resource.TestCheckResourceAttrSet( - "aws_vpc.foo", "ipv6_cidr_block"), - resource.TestCheckResourceAttr( - "aws_vpc.foo", "assign_generated_ipv6_cidr_block", "true"), - ), - }, - }, - }) -} - -func TestAccAWSVpc_dedicatedTenancy(t *testing.T) { - var vpc ec2.Vpc - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVpcDestroy, - Steps: []resource.TestStep{ - { - Config: testAccVpcDedicatedConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpcExists("aws_vpc.bar", &vpc), - resource.TestCheckResourceAttr( - "aws_vpc.bar", "instance_tenancy", "dedicated"), - ), - }, - }, - }) -} - -func TestAccAWSVpc_tags(t *testing.T) { - var vpc ec2.Vpc - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVpcDestroy, - Steps: []resource.TestStep{ - { - Config: testAccVpcConfigTags, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpcExists("aws_vpc.foo", &vpc), - testAccCheckVpcCidr(&vpc, "10.1.0.0/16"), - resource.TestCheckResourceAttr( - "aws_vpc.foo", "cidr_block", "10.1.0.0/16"), - testAccCheckTags(&vpc.Tags, "foo", "bar"), - ), - }, - - { - Config: testAccVpcConfigTagsUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpcExists("aws_vpc.foo", &vpc), - testAccCheckTags(&vpc.Tags, "foo", ""), - testAccCheckTags(&vpc.Tags, "bar", "baz"), - ), - }, - }, - }) -} - -func TestAccAWSVpc_update(t *testing.T) { - var vpc ec2.Vpc - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVpcDestroy, - Steps: []resource.TestStep{ - { - Config: testAccVpcConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpcExists("aws_vpc.foo", &vpc), - testAccCheckVpcCidr(&vpc, "10.1.0.0/16"), - resource.TestCheckResourceAttr( - "aws_vpc.foo", "cidr_block", "10.1.0.0/16"), - ), - }, - { - Config: testAccVpcConfigUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpcExists("aws_vpc.foo", &vpc), - resource.TestCheckResourceAttr( - "aws_vpc.foo", "enable_dns_hostnames", "true"), - ), - }, - }, - }) -} - -func testAccCheckVpcDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_vpc" { - continue - } - - // Try to find the VPC - DescribeVpcOpts := &ec2.DescribeVpcsInput{ - VpcIds: []*string{aws.String(rs.Primary.ID)}, - } - resp, err := conn.DescribeVpcs(DescribeVpcOpts) - if err == nil { - if len(resp.Vpcs) > 0 { - return fmt.Errorf("VPCs still exist.") - } - - return nil - } - - // Verify the error is what we want - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - if ec2err.Code() != "InvalidVpcID.NotFound" { - return err - } - } - - return nil -} - -func testAccCheckVpcCidr(vpc *ec2.Vpc, expected string) resource.TestCheckFunc { - return func(s *terraform.State) error { - CIDRBlock := vpc.CidrBlock - if *CIDRBlock != expected { - return fmt.Errorf("Bad cidr: %s", *vpc.CidrBlock) - } - - return nil - } -} - -func testAccCheckVpcExists(n string, vpc *ec2.Vpc) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No VPC ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - DescribeVpcOpts := &ec2.DescribeVpcsInput{ - VpcIds: []*string{aws.String(rs.Primary.ID)}, - } - resp, err := conn.DescribeVpcs(DescribeVpcOpts) - if err != nil { - return err - } - if len(resp.Vpcs) == 0 { - return fmt.Errorf("VPC not found") - } - - *vpc = *resp.Vpcs[0] - - return nil - } -} - -// https://github.com/hashicorp/terraform/issues/1301 -func TestAccAWSVpc_bothDnsOptionsSet(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVpcDestroy, - Steps: []resource.TestStep{ - { - Config: testAccVpcConfig_BothDnsOptions, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_vpc.bar", "enable_dns_hostnames", "true"), - resource.TestCheckResourceAttr( - "aws_vpc.bar", "enable_dns_support", "true"), - ), - }, - }, - }) -} - -// https://github.com/hashicorp/terraform/issues/10168 -func TestAccAWSVpc_DisabledDnsSupport(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVpcDestroy, - Steps: []resource.TestStep{ - { - Config: testAccVpcConfig_DisabledDnsSupport, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_vpc.bar", "enable_dns_support", "false"), - ), - }, - }, - }) -} - -func TestAccAWSVpc_classiclinkOptionSet(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVpcDestroy, - Steps: []resource.TestStep{ - { - Config: testAccVpcConfig_ClassiclinkOption, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_vpc.bar", "enable_classiclink", "true"), - ), - }, - }, - }) -} - -const testAccVpcConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} -` - -const testAccVpcConfigIpv6Enabled = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - assign_generated_ipv6_cidr_block = true -} -` - -const testAccVpcConfigIpv6Disabled = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} -` - -const testAccVpcConfigUpdate = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - enable_dns_hostnames = true -} -` - -const testAccVpcConfigTags = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - - tags { - foo = "bar" - } -} -` - -const testAccVpcConfigTagsUpdate = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - - tags { - bar = "baz" - } -} -` -const testAccVpcDedicatedConfig = ` -resource "aws_vpc" "bar" { - instance_tenancy = "dedicated" - - cidr_block = "10.2.0.0/16" -} -` - -const testAccVpcConfig_BothDnsOptions = ` -provider "aws" { - region = "eu-central-1" -} - -resource "aws_vpc" "bar" { - cidr_block = "10.2.0.0/16" - - enable_dns_hostnames = true - enable_dns_support = true -} -` - -const testAccVpcConfig_DisabledDnsSupport = ` -provider "aws" { - region = "us-west-2" -} - -resource "aws_vpc" "bar" { - cidr_block = "10.2.0.0/16" - - enable_dns_support = false -} -` - -const testAccVpcConfig_ClassiclinkOption = ` -resource "aws_vpc" "bar" { - cidr_block = "172.2.0.0/16" - - enable_classiclink = true -} -` diff --git a/builtin/providers/aws/resource_aws_vpn_connection.go b/builtin/providers/aws/resource_aws_vpn_connection.go deleted file mode 100644 index 1bef00d3b..000000000 --- a/builtin/providers/aws/resource_aws_vpn_connection.go +++ /dev/null @@ -1,484 +0,0 @@ -package aws - -import ( - "bytes" - "encoding/xml" - "fmt" - "log" - "sort" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -type XmlVpnConnectionConfig struct { - Tunnels []XmlIpsecTunnel `xml:"ipsec_tunnel"` -} - -type XmlIpsecTunnel struct { - OutsideAddress string `xml:"vpn_gateway>tunnel_outside_address>ip_address"` - PreSharedKey string `xml:"ike>pre_shared_key"` - CgwInsideAddress string `xml:"customer_gateway>tunnel_inside_address>ip_address"` - VgwInsideAddress string `xml:"vpn_gateway>tunnel_inside_address>ip_address"` -} - -type TunnelInfo struct { - Tunnel1Address string - Tunnel1CgwInsideAddress string - Tunnel1VgwInsideAddress string - Tunnel1PreSharedKey string - Tunnel2Address string - Tunnel2CgwInsideAddress string - Tunnel2VgwInsideAddress string - Tunnel2PreSharedKey string -} - -func (slice XmlVpnConnectionConfig) Len() int { - return len(slice.Tunnels) -} - -func (slice XmlVpnConnectionConfig) Less(i, j int) bool { - return slice.Tunnels[i].OutsideAddress < slice.Tunnels[j].OutsideAddress -} - -func (slice XmlVpnConnectionConfig) Swap(i, j int) { - slice.Tunnels[i], slice.Tunnels[j] = slice.Tunnels[j], slice.Tunnels[i] -} - -func resourceAwsVpnConnection() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsVpnConnectionCreate, - Read: resourceAwsVpnConnectionRead, - Update: resourceAwsVpnConnectionUpdate, - Delete: resourceAwsVpnConnectionDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "vpn_gateway_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "customer_gateway_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "static_routes_only": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "tags": tagsSchema(), - - // Begin read only attributes - "customer_gateway_configuration": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - - "tunnel1_address": { - Type: schema.TypeString, - Computed: true, - }, - - "tunnel1_cgw_inside_address": { - Type: schema.TypeString, - Computed: true, - }, - - "tunnel1_vgw_inside_address": { - Type: schema.TypeString, - Computed: true, - }, - - "tunnel1_preshared_key": { - Type: schema.TypeString, - Computed: true, - }, - - "tunnel2_address": { - Type: schema.TypeString, - Computed: true, - }, - - "tunnel2_cgw_inside_address": { - Type: schema.TypeString, - Computed: true, - }, - - "tunnel2_vgw_inside_address": { - Type: schema.TypeString, - Computed: true, - }, - - "tunnel2_preshared_key": { - Type: schema.TypeString, - Computed: true, - }, - - "routes": { - Type: schema.TypeSet, - Computed: true, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "destination_cidr_block": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - - "source": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - - "state": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - }, - }, - Set: func(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["destination_cidr_block"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["source"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["state"].(string))) - return hashcode.String(buf.String()) - }, - }, - - "vgw_telemetry": { - Type: schema.TypeSet, - Computed: true, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "accepted_route_count": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - }, - - "last_status_change": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - - "outside_ip_address": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - - "status": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - - "status_message": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - }, - }, - Set: func(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["outside_ip_address"].(string))) - return hashcode.String(buf.String()) - }, - }, - }, - } -} - -func resourceAwsVpnConnectionCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - connectOpts := &ec2.VpnConnectionOptionsSpecification{ - StaticRoutesOnly: aws.Bool(d.Get("static_routes_only").(bool)), - } - - createOpts := &ec2.CreateVpnConnectionInput{ - CustomerGatewayId: aws.String(d.Get("customer_gateway_id").(string)), - Options: connectOpts, - Type: aws.String(d.Get("type").(string)), - VpnGatewayId: aws.String(d.Get("vpn_gateway_id").(string)), - } - - // Create the VPN Connection - log.Printf("[DEBUG] Creating vpn connection") - resp, err := conn.CreateVpnConnection(createOpts) - if err != nil { - return fmt.Errorf("Error creating vpn connection: %s", err) - } - - // Store the ID - vpnConnection := resp.VpnConnection - d.SetId(*vpnConnection.VpnConnectionId) - log.Printf("[INFO] VPN connection ID: %s", *vpnConnection.VpnConnectionId) - - // Wait for the connection to become available. This has an obscenely - // high default timeout because AWS VPN connections are notoriously - // slow at coming up or going down. There's also no point in checking - // more frequently than every ten seconds. - stateConf := &resource.StateChangeConf{ - Pending: []string{"pending"}, - Target: []string{"available"}, - Refresh: vpnConnectionRefreshFunc(conn, *vpnConnection.VpnConnectionId), - Timeout: 30 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 10 * time.Second, - } - - _, stateErr := stateConf.WaitForState() - if stateErr != nil { - return fmt.Errorf( - "Error waiting for VPN connection (%s) to become ready: %s", - *vpnConnection.VpnConnectionId, err) - } - - // Create tags. - if err := setTags(conn, d); err != nil { - return err - } - - // Read off the API to populate our RO fields. - return resourceAwsVpnConnectionRead(d, meta) -} - -func vpnConnectionRefreshFunc(conn *ec2.EC2, connectionId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - resp, err := conn.DescribeVpnConnections(&ec2.DescribeVpnConnectionsInput{ - VpnConnectionIds: []*string{aws.String(connectionId)}, - }) - - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpnConnectionID.NotFound" { - resp = nil - } else { - log.Printf("Error on VPNConnectionRefresh: %s", err) - return nil, "", err - } - } - - if resp == nil || len(resp.VpnConnections) == 0 { - return nil, "", nil - } - - connection := resp.VpnConnections[0] - return connection, *connection.State, nil - } -} - -func resourceAwsVpnConnectionRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - resp, err := conn.DescribeVpnConnections(&ec2.DescribeVpnConnectionsInput{ - VpnConnectionIds: []*string{aws.String(d.Id())}, - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpnConnectionID.NotFound" { - d.SetId("") - return nil - } else { - log.Printf("[ERROR] Error finding VPN connection: %s", err) - return err - } - } - - if len(resp.VpnConnections) != 1 { - return fmt.Errorf("[ERROR] Error finding VPN connection: %s", d.Id()) - } - - vpnConnection := resp.VpnConnections[0] - if vpnConnection == nil || *vpnConnection.State == "deleted" { - // Seems we have lost our VPN Connection - d.SetId("") - return nil - } - - // Set attributes under the user's control. - d.Set("vpn_gateway_id", vpnConnection.VpnGatewayId) - d.Set("customer_gateway_id", vpnConnection.CustomerGatewayId) - d.Set("type", vpnConnection.Type) - d.Set("tags", tagsToMap(vpnConnection.Tags)) - - if vpnConnection.Options != nil { - if err := d.Set("static_routes_only", vpnConnection.Options.StaticRoutesOnly); err != nil { - return err - } - } else { - //If there no Options on the connection then we do not support *static_routes* - d.Set("static_routes_only", false) - } - - // Set read only attributes. - d.Set("customer_gateway_configuration", vpnConnection.CustomerGatewayConfiguration) - - if vpnConnection.CustomerGatewayConfiguration != nil { - if tunnelInfo, err := xmlConfigToTunnelInfo(*vpnConnection.CustomerGatewayConfiguration); err != nil { - log.Printf("[ERR] Error unmarshaling XML configuration for (%s): %s", d.Id(), err) - } else { - d.Set("tunnel1_address", tunnelInfo.Tunnel1Address) - d.Set("tunnel1_cgw_inside_address", tunnelInfo.Tunnel1CgwInsideAddress) - d.Set("tunnel1_vgw_inside_address", tunnelInfo.Tunnel1VgwInsideAddress) - d.Set("tunnel1_preshared_key", tunnelInfo.Tunnel1PreSharedKey) - d.Set("tunnel2_address", tunnelInfo.Tunnel2Address) - d.Set("tunnel2_preshared_key", tunnelInfo.Tunnel2PreSharedKey) - d.Set("tunnel2_cgw_inside_address", tunnelInfo.Tunnel2CgwInsideAddress) - d.Set("tunnel2_vgw_inside_address", tunnelInfo.Tunnel2VgwInsideAddress) - } - } - - if err := d.Set("vgw_telemetry", telemetryToMapList(vpnConnection.VgwTelemetry)); err != nil { - return err - } - if err := d.Set("routes", routesToMapList(vpnConnection.Routes)); err != nil { - return err - } - - return nil -} - -func resourceAwsVpnConnectionUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - // Update tags if required. - if err := setTags(conn, d); err != nil { - return err - } - - d.SetPartial("tags") - - return resourceAwsVpnConnectionRead(d, meta) -} - -func resourceAwsVpnConnectionDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - _, err := conn.DeleteVpnConnection(&ec2.DeleteVpnConnectionInput{ - VpnConnectionId: aws.String(d.Id()), - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpnConnectionID.NotFound" { - d.SetId("") - return nil - } else { - log.Printf("[ERROR] Error deleting VPN connection: %s", err) - return err - } - } - - // These things can take quite a while to tear themselves down and any - // attempt to modify resources they reference (e.g. CustomerGateways or - // VPN Gateways) before deletion will result in an error. Furthermore, - // they don't just disappear. The go into "deleted" state. We need to - // wait to ensure any other modifications the user might make to their - // VPC stack can safely run. - stateConf := &resource.StateChangeConf{ - Pending: []string{"deleting"}, - Target: []string{"deleted"}, - Refresh: vpnConnectionRefreshFunc(conn, d.Id()), - Timeout: 30 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 10 * time.Second, - } - - _, stateErr := stateConf.WaitForState() - if stateErr != nil { - return fmt.Errorf( - "Error waiting for VPN connection (%s) to delete: %s", d.Id(), err) - } - - return nil -} - -// routesToMapList turns the list of routes into a list of maps. -func routesToMapList(routes []*ec2.VpnStaticRoute) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(routes)) - for _, r := range routes { - staticRoute := make(map[string]interface{}) - staticRoute["destination_cidr_block"] = *r.DestinationCidrBlock - staticRoute["state"] = *r.State - - if r.Source != nil { - staticRoute["source"] = *r.Source - } - - result = append(result, staticRoute) - } - - return result -} - -// telemetryToMapList turns the VGW telemetry into a list of maps. -func telemetryToMapList(telemetry []*ec2.VgwTelemetry) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(telemetry)) - for _, t := range telemetry { - vgw := make(map[string]interface{}) - vgw["accepted_route_count"] = *t.AcceptedRouteCount - vgw["outside_ip_address"] = *t.OutsideIpAddress - vgw["status"] = *t.Status - vgw["status_message"] = *t.StatusMessage - - // LastStatusChange is a time.Time(). Convert it into a string - // so it can be handled by schema's type system. - vgw["last_status_change"] = t.LastStatusChange.String() - result = append(result, vgw) - } - - return result -} - -func xmlConfigToTunnelInfo(xmlConfig string) (*TunnelInfo, error) { - var vpnConfig XmlVpnConnectionConfig - if err := xml.Unmarshal([]byte(xmlConfig), &vpnConfig); err != nil { - return nil, errwrap.Wrapf("Error Unmarshalling XML: {{err}}", err) - } - - // don't expect consistent ordering from the XML - sort.Sort(vpnConfig) - - tunnelInfo := TunnelInfo{ - Tunnel1Address: vpnConfig.Tunnels[0].OutsideAddress, - Tunnel1PreSharedKey: vpnConfig.Tunnels[0].PreSharedKey, - Tunnel1CgwInsideAddress: vpnConfig.Tunnels[0].CgwInsideAddress, - Tunnel1VgwInsideAddress: vpnConfig.Tunnels[0].VgwInsideAddress, - - Tunnel2Address: vpnConfig.Tunnels[1].OutsideAddress, - Tunnel2PreSharedKey: vpnConfig.Tunnels[1].PreSharedKey, - Tunnel2CgwInsideAddress: vpnConfig.Tunnels[1].CgwInsideAddress, - Tunnel2VgwInsideAddress: vpnConfig.Tunnels[1].VgwInsideAddress, - } - - return &tunnelInfo, nil -} diff --git a/builtin/providers/aws/resource_aws_vpn_connection_test.go b/builtin/providers/aws/resource_aws_vpn_connection_test.go deleted file mode 100644 index 142b6db89..000000000 --- a/builtin/providers/aws/resource_aws_vpn_connection_test.go +++ /dev/null @@ -1,370 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSVpnConnection_basic(t *testing.T) { - rInt := acctest.RandInt() - rBgpAsn := acctest.RandIntRange(64512, 65534) - var vpn ec2.VpnConnection - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_vpn_connection.foo", - Providers: testAccProviders, - CheckDestroy: testAccAwsVpnConnectionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsVpnConnectionConfig(rBgpAsn), - Check: resource.ComposeTestCheckFunc( - testAccAwsVpnConnection( - "aws_vpc.vpc", - "aws_vpn_gateway.vpn_gateway", - "aws_customer_gateway.customer_gateway", - "aws_vpn_connection.foo", - &vpn, - ), - ), - }, - { - Config: testAccAwsVpnConnectionConfigUpdate(rInt, rBgpAsn), - Check: resource.ComposeTestCheckFunc( - testAccAwsVpnConnection( - "aws_vpc.vpc", - "aws_vpn_gateway.vpn_gateway", - "aws_customer_gateway.customer_gateway", - "aws_vpn_connection.foo", - &vpn, - ), - ), - }, - }, - }) -} - -func TestAccAWSVpnConnection_withoutStaticRoutes(t *testing.T) { - rInt := acctest.RandInt() - rBgpAsn := acctest.RandIntRange(64512, 65534) - var vpn ec2.VpnConnection - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_vpn_connection.foo", - Providers: testAccProviders, - CheckDestroy: testAccAwsVpnConnectionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsVpnConnectionConfigUpdate(rInt, rBgpAsn), - Check: resource.ComposeTestCheckFunc( - testAccAwsVpnConnection( - "aws_vpc.vpc", - "aws_vpn_gateway.vpn_gateway", - "aws_customer_gateway.customer_gateway", - "aws_vpn_connection.foo", - &vpn, - ), - resource.TestCheckResourceAttr("aws_vpn_connection.foo", "static_routes_only", "false"), - ), - }, - }, - }) -} - -func TestAccAWSVpnConnection_disappears(t *testing.T) { - rBgpAsn := acctest.RandIntRange(64512, 65534) - var vpn ec2.VpnConnection - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccAwsVpnConnectionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsVpnConnectionConfig(rBgpAsn), - Check: resource.ComposeTestCheckFunc( - testAccAwsVpnConnection( - "aws_vpc.vpc", - "aws_vpn_gateway.vpn_gateway", - "aws_customer_gateway.customer_gateway", - "aws_vpn_connection.foo", - &vpn, - ), - testAccAWSVpnConnectionDisappears(&vpn), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testAccAWSVpnConnectionDisappears(connection *ec2.VpnConnection) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - _, err := conn.DeleteVpnConnection(&ec2.DeleteVpnConnectionInput{ - VpnConnectionId: connection.VpnConnectionId, - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpnConnectionID.NotFound" { - return nil - } - if err != nil { - return err - } - } - - return resource.Retry(40*time.Minute, func() *resource.RetryError { - opts := &ec2.DescribeVpnConnectionsInput{ - VpnConnectionIds: []*string{connection.VpnConnectionId}, - } - resp, err := conn.DescribeVpnConnections(opts) - if err != nil { - cgw, ok := err.(awserr.Error) - if ok && cgw.Code() == "InvalidVpnConnectionID.NotFound" { - return nil - } - if ok && cgw.Code() == "IncorrectState" { - return resource.RetryableError(fmt.Errorf( - "Waiting for VPN Connection to be in the correct state: %v", connection.VpnConnectionId)) - } - return resource.NonRetryableError( - fmt.Errorf("Error retrieving VPN Connection: %s", err)) - } - if *resp.VpnConnections[0].State == "deleted" { - return nil - } - return resource.RetryableError(fmt.Errorf( - "Waiting for VPN Connection: %v", connection.VpnConnectionId)) - }) - } -} - -func testAccAwsVpnConnectionDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_vpn_connection" { - continue - } - - resp, err := conn.DescribeVpnConnections(&ec2.DescribeVpnConnectionsInput{ - VpnConnectionIds: []*string{aws.String(rs.Primary.ID)}, - }) - - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpnConnectionID.NotFound" { - // not found - return nil - } - return err - } - - var vpn *ec2.VpnConnection - for _, v := range resp.VpnConnections { - if v.VpnConnectionId != nil && *v.VpnConnectionId == rs.Primary.ID { - vpn = v - } - } - - if vpn == nil { - // vpn connection not found - return nil - } - - if vpn.State != nil && *vpn.State == "deleted" { - return nil - } - - } - - return nil -} - -func testAccAwsVpnConnection( - vpcResource string, - vpnGatewayResource string, - customerGatewayResource string, - vpnConnectionResource string, - vpnConnection *ec2.VpnConnection) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[vpnConnectionResource] - if !ok { - return fmt.Errorf("Not found: %s", vpnConnectionResource) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - connection, ok := s.RootModule().Resources[vpnConnectionResource] - if !ok { - return fmt.Errorf("Not found: %s", vpnConnectionResource) - } - - ec2conn := testAccProvider.Meta().(*AWSClient).ec2conn - - resp, err := ec2conn.DescribeVpnConnections(&ec2.DescribeVpnConnectionsInput{ - VpnConnectionIds: []*string{aws.String(connection.Primary.ID)}, - }) - - if err != nil { - return err - } - - *vpnConnection = *resp.VpnConnections[0] - - return nil - } -} - -func TestAWSVpnConnection_xmlconfig(t *testing.T) { - tunnelInfo, err := xmlConfigToTunnelInfo(testAccAwsVpnTunnelInfoXML) - if err != nil { - t.Fatalf("Error unmarshalling XML: %s", err) - } - if tunnelInfo.Tunnel1Address != "FIRST_ADDRESS" { - t.Fatalf("First address from tunnel XML was incorrect.") - } - if tunnelInfo.Tunnel1CgwInsideAddress != "FIRST_CGW_INSIDE_ADDRESS" { - t.Fatalf("First Customer Gateway inside address from tunnel" + - " XML was incorrect.") - } - if tunnelInfo.Tunnel1VgwInsideAddress != "FIRST_VGW_INSIDE_ADDRESS" { - t.Fatalf("First VPN Gateway inside address from tunnel " + - " XML was incorrect.") - } - if tunnelInfo.Tunnel1PreSharedKey != "FIRST_KEY" { - t.Fatalf("First key from tunnel XML was incorrect.") - } - if tunnelInfo.Tunnel2Address != "SECOND_ADDRESS" { - t.Fatalf("Second address from tunnel XML was incorrect.") - } - if tunnelInfo.Tunnel2CgwInsideAddress != "SECOND_CGW_INSIDE_ADDRESS" { - t.Fatalf("Second Customer Gateway inside address from tunnel" + - " XML was incorrect.") - } - if tunnelInfo.Tunnel2VgwInsideAddress != "SECOND_VGW_INSIDE_ADDRESS" { - t.Fatalf("Second VPN Gateway inside address from tunnel " + - " XML was incorrect.") - } - if tunnelInfo.Tunnel2PreSharedKey != "SECOND_KEY" { - t.Fatalf("Second key from tunnel XML was incorrect.") - } -} - -func testAccAwsVpnConnectionConfig(rBgpAsn int) string { - return fmt.Sprintf(` - resource "aws_vpn_gateway" "vpn_gateway" { - tags { - Name = "vpn_gateway" - } - } - - resource "aws_customer_gateway" "customer_gateway" { - bgp_asn = %d - ip_address = "178.0.0.1" - type = "ipsec.1" - tags { - Name = "main-customer-gateway" - } - } - - resource "aws_vpn_connection" "foo" { - vpn_gateway_id = "${aws_vpn_gateway.vpn_gateway.id}" - customer_gateway_id = "${aws_customer_gateway.customer_gateway.id}" - type = "ipsec.1" - static_routes_only = true - } - `, rBgpAsn) -} - -// Change static_routes_only to be false, forcing a refresh. -func testAccAwsVpnConnectionConfigUpdate(rInt, rBgpAsn int) string { - return fmt.Sprintf(` - resource "aws_vpn_gateway" "vpn_gateway" { - tags { - Name = "vpn_gateway" - } - } - - resource "aws_customer_gateway" "customer_gateway" { - bgp_asn = %d - ip_address = "178.0.0.1" - type = "ipsec.1" - tags { - Name = "main-customer-gateway-%d" - } - } - - resource "aws_vpn_connection" "foo" { - vpn_gateway_id = "${aws_vpn_gateway.vpn_gateway.id}" - customer_gateway_id = "${aws_customer_gateway.customer_gateway.id}" - type = "ipsec.1" - static_routes_only = false - } - `, rBgpAsn, rInt) -} - -// Test our VPN tunnel config XML parsing -const testAccAwsVpnTunnelInfoXML = ` - - - - - 123.123.123.123 - - - SECOND_CGW_INSIDE_ADDRESS - 255.255.255.252 - 30 - - - - - SECOND_ADDRESS - - - SECOND_VGW_INSIDE_ADDRESS - 255.255.255.252 - 30 - - - - SECOND_KEY - - - - - - 123.123.123.123 - - - FIRST_CGW_INSIDE_ADDRESS - 255.255.255.252 - 30 - - - - - FIRST_ADDRESS - - - FIRST_VGW_INSIDE_ADDRESS - 255.255.255.252 - 30 - - - - FIRST_KEY - - - -` diff --git a/builtin/providers/aws/resource_aws_vpn_gateway.go b/builtin/providers/aws/resource_aws_vpn_gateway.go deleted file mode 100644 index 0c40d8c8c..000000000 --- a/builtin/providers/aws/resource_aws_vpn_gateway.go +++ /dev/null @@ -1,326 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsVpnGateway() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsVpnGatewayCreate, - Read: resourceAwsVpnGatewayRead, - Update: resourceAwsVpnGatewayUpdate, - Delete: resourceAwsVpnGatewayDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "availability_zone": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "vpc_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAwsVpnGatewayCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - createOpts := &ec2.CreateVpnGatewayInput{ - AvailabilityZone: aws.String(d.Get("availability_zone").(string)), - Type: aws.String("ipsec.1"), - } - - // Create the VPN gateway - log.Printf("[DEBUG] Creating VPN gateway") - resp, err := conn.CreateVpnGateway(createOpts) - if err != nil { - return fmt.Errorf("Error creating VPN gateway: %s", err) - } - - // Get the ID and store it - vpnGateway := resp.VpnGateway - d.SetId(*vpnGateway.VpnGatewayId) - log.Printf("[INFO] VPN Gateway ID: %s", *vpnGateway.VpnGatewayId) - - // Attach the VPN gateway to the correct VPC - return resourceAwsVpnGatewayUpdate(d, meta) -} - -func resourceAwsVpnGatewayRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - resp, err := conn.DescribeVpnGateways(&ec2.DescribeVpnGatewaysInput{ - VpnGatewayIds: []*string{aws.String(d.Id())}, - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpnGatewayID.NotFound" { - d.SetId("") - return nil - } else { - log.Printf("[ERROR] Error finding VpnGateway: %s", err) - return err - } - } - - vpnGateway := resp.VpnGateways[0] - if vpnGateway == nil || *vpnGateway.State == "deleted" { - // Seems we have lost our VPN gateway - d.SetId("") - return nil - } - - vpnAttachment := vpnGatewayGetAttachment(vpnGateway) - if len(vpnGateway.VpcAttachments) == 0 || *vpnAttachment.State == "detached" { - // Gateway exists but not attached to the VPC - d.Set("vpc_id", "") - } else { - d.Set("vpc_id", *vpnAttachment.VpcId) - } - - if vpnGateway.AvailabilityZone != nil && *vpnGateway.AvailabilityZone != "" { - d.Set("availability_zone", vpnGateway.AvailabilityZone) - } - d.Set("tags", tagsToMap(vpnGateway.Tags)) - - return nil -} - -func resourceAwsVpnGatewayUpdate(d *schema.ResourceData, meta interface{}) error { - if d.HasChange("vpc_id") { - // If we're already attached, detach it first - if err := resourceAwsVpnGatewayDetach(d, meta); err != nil { - return err - } - - // Attach the VPN gateway to the new vpc - if err := resourceAwsVpnGatewayAttach(d, meta); err != nil { - return err - } - } - - conn := meta.(*AWSClient).ec2conn - - if err := setTags(conn, d); err != nil { - return err - } - - d.SetPartial("tags") - - return resourceAwsVpnGatewayRead(d, meta) -} - -func resourceAwsVpnGatewayDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - // Detach if it is attached - if err := resourceAwsVpnGatewayDetach(d, meta); err != nil { - return err - } - - log.Printf("[INFO] Deleting VPN gateway: %s", d.Id()) - - return resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.DeleteVpnGateway(&ec2.DeleteVpnGatewayInput{ - VpnGatewayId: aws.String(d.Id()), - }) - if err == nil { - return nil - } - - ec2err, ok := err.(awserr.Error) - if !ok { - return resource.RetryableError(err) - } - - switch ec2err.Code() { - case "InvalidVpnGatewayID.NotFound": - return nil - case "IncorrectState": - return resource.RetryableError(err) - } - - return resource.NonRetryableError(err) - }) -} - -func resourceAwsVpnGatewayAttach(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - if d.Get("vpc_id").(string) == "" { - log.Printf( - "[DEBUG] Not attaching VPN Gateway '%s' as no VPC ID is set", - d.Id()) - return nil - } - - log.Printf( - "[INFO] Attaching VPN Gateway '%s' to VPC '%s'", - d.Id(), - d.Get("vpc_id").(string)) - - req := &ec2.AttachVpnGatewayInput{ - VpnGatewayId: aws.String(d.Id()), - VpcId: aws.String(d.Get("vpc_id").(string)), - } - - err := resource.Retry(30*time.Second, func() *resource.RetryError { - _, err := conn.AttachVpnGateway(req) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok { - if "InvalidVpnGatewayID.NotFound" == ec2err.Code() { - return resource.RetryableError( - fmt.Errorf("Gateway not found, retry for eventual consistancy")) - } - } - return resource.NonRetryableError(err) - } - return nil - }) - - if err != nil { - return err - } - - // Wait for it to be fully attached before continuing - log.Printf("[DEBUG] Waiting for VPN gateway (%s) to attach", d.Id()) - stateConf := &resource.StateChangeConf{ - Pending: []string{"detached", "attaching"}, - Target: []string{"attached"}, - Refresh: vpnGatewayAttachStateRefreshFunc(conn, d.Id(), "available"), - Timeout: 10 * time.Minute, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf( - "Error waiting for VPN gateway (%s) to attach: %s", - d.Id(), err) - } - - return nil -} - -func resourceAwsVpnGatewayDetach(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - // Get the old VPC ID to detach from - vpcID, _ := d.GetChange("vpc_id") - - if vpcID.(string) == "" { - log.Printf( - "[DEBUG] Not detaching VPN Gateway '%s' as no VPC ID is set", - d.Id()) - return nil - } - - log.Printf( - "[INFO] Detaching VPN Gateway '%s' from VPC '%s'", - d.Id(), - vpcID.(string)) - - wait := true - _, err := conn.DetachVpnGateway(&ec2.DetachVpnGatewayInput{ - VpnGatewayId: aws.String(d.Id()), - VpcId: aws.String(vpcID.(string)), - }) - if err != nil { - ec2err, ok := err.(awserr.Error) - if ok { - if ec2err.Code() == "InvalidVpnGatewayID.NotFound" { - err = nil - wait = false - } else if ec2err.Code() == "InvalidVpnGatewayAttachment.NotFound" { - err = nil - wait = false - } - } - - if err != nil { - return err - } - } - - if !wait { - return nil - } - - // Wait for it to be fully detached before continuing - log.Printf("[DEBUG] Waiting for VPN gateway (%s) to detach", d.Id()) - stateConf := &resource.StateChangeConf{ - Pending: []string{"attached", "detaching", "available"}, - Target: []string{"detached"}, - Refresh: vpnGatewayAttachStateRefreshFunc(conn, d.Id(), "detached"), - Timeout: 10 * time.Minute, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf( - "Error waiting for vpn gateway (%s) to detach: %s", - d.Id(), err) - } - - return nil -} - -// vpnGatewayAttachStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// the state of a VPN gateway's attachment -func vpnGatewayAttachStateRefreshFunc(conn *ec2.EC2, id string, expected string) resource.StateRefreshFunc { - var start time.Time - return func() (interface{}, string, error) { - if start.IsZero() { - start = time.Now() - } - - resp, err := conn.DescribeVpnGateways(&ec2.DescribeVpnGatewaysInput{ - VpnGatewayIds: []*string{aws.String(id)}, - }) - - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpnGatewayID.NotFound" { - resp = nil - } else { - log.Printf("[ERROR] Error on VpnGatewayStateRefresh: %s", err) - return nil, "", err - } - } - - if resp == nil { - // Sometimes AWS just has consistency issues and doesn't see - // our instance yet. Return an empty state. - return nil, "", nil - } - - vpnGateway := resp.VpnGateways[0] - if len(vpnGateway.VpcAttachments) == 0 { - // No attachments, we're detached - return vpnGateway, "detached", nil - } - - vpnAttachment := vpnGatewayGetAttachment(vpnGateway) - return vpnGateway, *vpnAttachment.State, nil - } -} - -func vpnGatewayGetAttachment(vgw *ec2.VpnGateway) *ec2.VpcAttachment { - for _, v := range vgw.VpcAttachments { - if *v.State == "attached" { - return v - } - } - return &ec2.VpcAttachment{State: aws.String("detached")} -} diff --git a/builtin/providers/aws/resource_aws_vpn_gateway_attachment.go b/builtin/providers/aws/resource_aws_vpn_gateway_attachment.go deleted file mode 100644 index db0110000..000000000 --- a/builtin/providers/aws/resource_aws_vpn_gateway_attachment.go +++ /dev/null @@ -1,210 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsVpnGatewayAttachment() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsVpnGatewayAttachmentCreate, - Read: resourceAwsVpnGatewayAttachmentRead, - Delete: resourceAwsVpnGatewayAttachmentDelete, - - Schema: map[string]*schema.Schema{ - "vpc_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "vpn_gateway_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceAwsVpnGatewayAttachmentCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - vpcId := d.Get("vpc_id").(string) - vgwId := d.Get("vpn_gateway_id").(string) - - createOpts := &ec2.AttachVpnGatewayInput{ - VpcId: aws.String(vpcId), - VpnGatewayId: aws.String(vgwId), - } - log.Printf("[DEBUG] VPN Gateway attachment options: %#v", *createOpts) - - _, err := conn.AttachVpnGateway(createOpts) - if err != nil { - return fmt.Errorf("Error attaching VPN Gateway %q to VPC %q: %s", - vgwId, vpcId, err) - } - - d.SetId(vpnGatewayAttachmentId(vpcId, vgwId)) - log.Printf("[INFO] VPN Gateway %q attachment ID: %s", vgwId, d.Id()) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"detached", "attaching"}, - Target: []string{"attached"}, - Refresh: vpnGatewayAttachmentStateRefresh(conn, vpcId, vgwId), - Timeout: 15 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 5 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for VPN Gateway %q to attach to VPC %q: %s", - vgwId, vpcId, err) - } - log.Printf("[DEBUG] VPN Gateway %q attached to VPC %q.", vgwId, vpcId) - - return resourceAwsVpnGatewayAttachmentRead(d, meta) -} - -func resourceAwsVpnGatewayAttachmentRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - vgwId := d.Get("vpn_gateway_id").(string) - - resp, err := conn.DescribeVpnGateways(&ec2.DescribeVpnGatewaysInput{ - VpnGatewayIds: []*string{aws.String(vgwId)}, - }) - - if err != nil { - awsErr, ok := err.(awserr.Error) - if ok && awsErr.Code() == "InvalidVPNGatewayID.NotFound" { - log.Printf("[WARN] VPN Gateway %q not found.", vgwId) - d.SetId("") - return nil - } - return err - } - - vgw := resp.VpnGateways[0] - if *vgw.State == "deleted" { - log.Printf("[INFO] VPN Gateway %q appears to have been deleted.", vgwId) - d.SetId("") - return nil - } - - vga := vpnGatewayGetAttachment(vgw) - if len(vgw.VpcAttachments) == 0 || *vga.State == "detached" { - d.Set("vpc_id", "") - return nil - } - - d.Set("vpc_id", *vga.VpcId) - return nil -} - -func resourceAwsVpnGatewayAttachmentDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - vpcId := d.Get("vpc_id").(string) - vgwId := d.Get("vpn_gateway_id").(string) - - if vpcId == "" { - log.Printf("[DEBUG] Not detaching VPN Gateway %q as no VPC ID is set.", vgwId) - return nil - } - - _, err := conn.DetachVpnGateway(&ec2.DetachVpnGatewayInput{ - VpcId: aws.String(vpcId), - VpnGatewayId: aws.String(vgwId), - }) - - if err != nil { - awsErr, ok := err.(awserr.Error) - if ok { - switch awsErr.Code() { - case "InvalidVPNGatewayID.NotFound": - log.Printf("[WARN] VPN Gateway %q not found.", vgwId) - d.SetId("") - return nil - case "InvalidVpnGatewayAttachment.NotFound": - log.Printf( - "[WARN] VPN Gateway %q attachment to VPC %q not found.", - vgwId, vpcId) - d.SetId("") - return nil - } - } - - return fmt.Errorf("Error detaching VPN Gateway %q from VPC %q: %s", - vgwId, vpcId, err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"attached", "detaching"}, - Target: []string{"detached"}, - Refresh: vpnGatewayAttachmentStateRefresh(conn, vpcId, vgwId), - Timeout: 15 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 5 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for VPN Gateway %q to detach from VPC %q: %s", - vgwId, vpcId, err) - } - log.Printf("[DEBUG] VPN Gateway %q detached from VPC %q.", vgwId, vpcId) - - d.SetId("") - return nil -} - -func vpnGatewayAttachmentStateRefresh(conn *ec2.EC2, vpcId, vgwId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - resp, err := conn.DescribeVpnGateways(&ec2.DescribeVpnGatewaysInput{ - Filters: []*ec2.Filter{ - &ec2.Filter{ - Name: aws.String("attachment.vpc-id"), - Values: []*string{aws.String(vpcId)}, - }, - }, - VpnGatewayIds: []*string{aws.String(vgwId)}, - }) - - if err != nil { - awsErr, ok := err.(awserr.Error) - if ok { - switch awsErr.Code() { - case "InvalidVPNGatewayID.NotFound": - fallthrough - case "InvalidVpnGatewayAttachment.NotFound": - return nil, "", nil - } - } - - return nil, "", err - } - - vgw := resp.VpnGateways[0] - if len(vgw.VpcAttachments) == 0 { - return vgw, "detached", nil - } - - vga := vpnGatewayGetAttachment(vgw) - - log.Printf("[DEBUG] VPN Gateway %q attachment status: %s", vgwId, *vga.State) - return vgw, *vga.State, nil - } -} - -func vpnGatewayAttachmentId(vpcId, vgwId string) string { - return fmt.Sprintf("vpn-attachment-%x", hashcode.String(fmt.Sprintf("%s-%s", vpcId, vgwId))) -} diff --git a/builtin/providers/aws/resource_aws_vpn_gateway_attachment_test.go b/builtin/providers/aws/resource_aws_vpn_gateway_attachment_test.go deleted file mode 100644 index 5f12d6fb8..000000000 --- a/builtin/providers/aws/resource_aws_vpn_gateway_attachment_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSVpnGatewayAttachment_basic(t *testing.T) { - var vpc ec2.Vpc - var vgw ec2.VpnGateway - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_vpn_gateway_attachment.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckVpnGatewayAttachmentDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVpnGatewayAttachmentConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpcExists( - "aws_vpc.test", - &vpc), - testAccCheckVpnGatewayExists( - "aws_vpn_gateway.test", - &vgw), - testAccCheckVpnGatewayAttachmentExists( - "aws_vpn_gateway_attachment.test", - &vpc, &vgw), - ), - }, - }, - }) -} - -func TestAccAWSVpnGatewayAttachment_deleted(t *testing.T) { - var vpc ec2.Vpc - var vgw ec2.VpnGateway - - testDeleted := func(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, ok := s.RootModule().Resources[n] - if ok { - return fmt.Errorf("Expected VPN Gateway attachment resource %q to be deleted.", n) - } - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_vpn_gateway_attachment.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckVpnGatewayAttachmentDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVpnGatewayAttachmentConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpcExists( - "aws_vpc.test", - &vpc), - testAccCheckVpnGatewayExists( - "aws_vpn_gateway.test", - &vgw), - testAccCheckVpnGatewayAttachmentExists( - "aws_vpn_gateway_attachment.test", - &vpc, &vgw), - ), - }, - resource.TestStep{ - Config: testAccNoVpnGatewayAttachmentConfig, - Check: resource.ComposeTestCheckFunc( - testDeleted("aws_vpn_gateway_attachment.test"), - ), - }, - }, - }) -} - -func testAccCheckVpnGatewayAttachmentExists(n string, vpc *ec2.Vpc, vgw *ec2.VpnGateway) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - vpcId := rs.Primary.Attributes["vpc_id"] - vgwId := rs.Primary.Attributes["vpn_gateway_id"] - - if len(vgw.VpcAttachments) == 0 { - return fmt.Errorf("VPN Gateway %q has no attachments.", vgwId) - } - - if *vgw.VpcAttachments[0].State != "attached" { - return fmt.Errorf("Expected VPN Gateway %q to be in attached state, but got: %q", - vgwId, *vgw.VpcAttachments[0].State) - } - - if *vgw.VpcAttachments[0].VpcId != *vpc.VpcId { - return fmt.Errorf("Expected VPN Gateway %q to be attached to VPC %q, but got: %q", - vgwId, vpcId, *vgw.VpcAttachments[0].VpcId) - } - - return nil - } -} - -func testAccCheckVpnGatewayAttachmentDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_vpn_gateway_attachment" { - continue - } - - vgwId := rs.Primary.Attributes["vpn_gateway_id"] - - resp, err := conn.DescribeVpnGateways(&ec2.DescribeVpnGatewaysInput{ - VpnGatewayIds: []*string{aws.String(vgwId)}, - }) - if err != nil { - return err - } - - vgw := resp.VpnGateways[0] - if *vgw.VpcAttachments[0].State != "detached" { - return fmt.Errorf("Expected VPN Gateway %q to be in detached state, but got: %q", - vgwId, *vgw.VpcAttachments[0].State) - } - } - - return nil -} - -const testAccNoVpnGatewayAttachmentConfig = ` -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" -} - -resource "aws_vpn_gateway" "test" { } -` - -const testAccVpnGatewayAttachmentConfig = ` -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" -} - -resource "aws_vpn_gateway" "test" { } - -resource "aws_vpn_gateway_attachment" "test" { - vpc_id = "${aws_vpc.test.id}" - vpn_gateway_id = "${aws_vpn_gateway.test.id}" -} -` diff --git a/builtin/providers/aws/resource_aws_vpn_gateway_route_propagation.go b/builtin/providers/aws/resource_aws_vpn_gateway_route_propagation.go deleted file mode 100644 index 46e4b2208..000000000 --- a/builtin/providers/aws/resource_aws_vpn_gateway_route_propagation.go +++ /dev/null @@ -1,102 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsVpnGatewayRoutePropagation() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsVpnGatewayRoutePropagationEnable, - Read: resourceAwsVpnGatewayRoutePropagationRead, - Delete: resourceAwsVpnGatewayRoutePropagationDisable, - - Schema: map[string]*schema.Schema{ - "vpn_gateway_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "route_table_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceAwsVpnGatewayRoutePropagationEnable(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - gwID := d.Get("vpn_gateway_id").(string) - rtID := d.Get("route_table_id").(string) - - log.Printf("[INFO] Enabling VGW propagation from %s to %s", gwID, rtID) - _, err := conn.EnableVgwRoutePropagation(&ec2.EnableVgwRoutePropagationInput{ - GatewayId: aws.String(gwID), - RouteTableId: aws.String(rtID), - }) - if err != nil { - return fmt.Errorf("error enabling VGW propagation: %s", err) - } - - d.SetId(fmt.Sprintf("%s_%s", gwID, rtID)) - return nil -} - -func resourceAwsVpnGatewayRoutePropagationDisable(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - gwID := d.Get("vpn_gateway_id").(string) - rtID := d.Get("route_table_id").(string) - - log.Printf("[INFO] Disabling VGW propagation from %s to %s", gwID, rtID) - _, err := conn.DisableVgwRoutePropagation(&ec2.DisableVgwRoutePropagationInput{ - GatewayId: aws.String(gwID), - RouteTableId: aws.String(rtID), - }) - if err != nil { - return fmt.Errorf("error disabling VGW propagation: %s", err) - } - - d.SetId("") - return nil -} - -func resourceAwsVpnGatewayRoutePropagationRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - gwID := d.Get("vpn_gateway_id").(string) - rtID := d.Get("route_table_id").(string) - - log.Printf("[INFO] Reading route table %s to check for VPN gateway %s", rtID, gwID) - rtRaw, _, err := resourceAwsRouteTableStateRefreshFunc(conn, rtID)() - if err != nil { - return err - } - if rtRaw == nil { - log.Printf("[INFO] Route table %q doesn't exist, so dropping %q route propagation from state", rtID, gwID) - d.SetId("") - return nil - } - - rt := rtRaw.(*ec2.RouteTable) - exists := false - for _, vgw := range rt.PropagatingVgws { - if *vgw.GatewayId == gwID { - exists = true - } - } - if !exists { - log.Printf("[INFO] %s is no longer propagating to %s, so dropping route propagation from state", rtID, gwID) - d.SetId("") - return nil - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_vpn_gateway_route_propagation_test.go b/builtin/providers/aws/resource_aws_vpn_gateway_route_propagation_test.go deleted file mode 100644 index 49b1764d3..000000000 --- a/builtin/providers/aws/resource_aws_vpn_gateway_route_propagation_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package aws - -import ( - "errors" - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSVPNGatewayRoutePropagation_basic(t *testing.T) { - var rtID, gwID string - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_vpn_gateway_route_propagation.foo", - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccAWSVPNGatewayRoutePropagation_basic, - Check: func(state *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - rs := state.RootModule().Resources["aws_vpn_gateway_route_propagation.foo"] - if rs == nil { - return errors.New("missing resource state") - } - - rtID = rs.Primary.Attributes["route_table_id"] - gwID = rs.Primary.Attributes["vpn_gateway_id"] - - rtRaw, _, err := resourceAwsRouteTableStateRefreshFunc(conn, rtID)() - if err != nil { - return fmt.Errorf("failed to read route table: %s", err) - } - if rtRaw == nil { - return errors.New("route table doesn't exist") - } - - rt := rtRaw.(*ec2.RouteTable) - exists := false - for _, vgw := range rt.PropagatingVgws { - if *vgw.GatewayId == gwID { - exists = true - } - } - if !exists { - return errors.New("route table does not list VPN gateway as a propagator") - } - - return nil - }, - }, - }, - CheckDestroy: func(state *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - rtRaw, _, err := resourceAwsRouteTableStateRefreshFunc(conn, rtID)() - if err != nil { - return fmt.Errorf("failed to read route table: %s", err) - } - if rtRaw != nil { - return errors.New("route table still exists") - } - return nil - }, - }) - -} - -const testAccAWSVPNGatewayRoutePropagation_basic = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_vpn_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_route_table" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_vpn_gateway_route_propagation" "foo" { - vpn_gateway_id = "${aws_vpn_gateway.foo.id}" - route_table_id = "${aws_route_table.foo.id}" -} -` diff --git a/builtin/providers/aws/resource_aws_vpn_gateway_test.go b/builtin/providers/aws/resource_aws_vpn_gateway_test.go deleted file mode 100644 index 652e00a63..000000000 --- a/builtin/providers/aws/resource_aws_vpn_gateway_test.go +++ /dev/null @@ -1,468 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSVpnGateway_basic(t *testing.T) { - var v, v2 ec2.VpnGateway - - testNotEqual := func(*terraform.State) error { - if len(v.VpcAttachments) == 0 { - return fmt.Errorf("VPN Gateway A is not attached") - } - if len(v2.VpcAttachments) == 0 { - return fmt.Errorf("VPN Gateway B is not attached") - } - - id1 := v.VpcAttachments[0].VpcId - id2 := v2.VpcAttachments[0].VpcId - if id1 == id2 { - return fmt.Errorf("Both attachment IDs are the same") - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_vpn_gateway.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckVpnGatewayDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVpnGatewayConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpnGatewayExists( - "aws_vpn_gateway.foo", &v), - ), - }, - - resource.TestStep{ - Config: testAccVpnGatewayConfigChangeVPC, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpnGatewayExists( - "aws_vpn_gateway.foo", &v2), - testNotEqual, - ), - }, - }, - }) -} - -func TestAccAWSVpnGateway_withAvailabilityZoneSetToState(t *testing.T) { - var v ec2.VpnGateway - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVpnGatewayDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVpnGatewayConfigWithAZ, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpnGatewayExists("aws_vpn_gateway.foo", &v), - resource.TestCheckResourceAttr( - "aws_vpn_gateway.foo", "availability_zone", "us-west-2a"), - ), - }, - }, - }) -} - -func TestAccAWSVpnGateway_disappears(t *testing.T) { - var v ec2.VpnGateway - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVpnGatewayDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVpnGatewayConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpnGatewayExists("aws_vpn_gateway.foo", &v), - testAccAWSVpnGatewayDisappears(&v), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAWSVpnGateway_reattach(t *testing.T) { - var vpc1, vpc2 ec2.Vpc - var vgw1, vgw2 ec2.VpnGateway - - testAttachmentFunc := func(vgw *ec2.VpnGateway, vpc *ec2.Vpc) func(*terraform.State) error { - return func(*terraform.State) error { - if len(vgw.VpcAttachments) == 0 { - return fmt.Errorf("VPN Gateway %q has no VPC attachments.", - *vgw.VpnGatewayId) - } - - if len(vgw.VpcAttachments) > 1 { - count := 0 - for _, v := range vgw.VpcAttachments { - if *v.State == "attached" { - count += 1 - } - } - if count > 1 { - return fmt.Errorf( - "VPN Gateway %q has an unexpected number of VPC attachments (more than 1): %#v", - *vgw.VpnGatewayId, vgw.VpcAttachments) - } - } - - if *vgw.VpcAttachments[0].State != "attached" { - return fmt.Errorf("Expected VPN Gateway %q to be attached.", - *vgw.VpnGatewayId) - } - - if *vgw.VpcAttachments[0].VpcId != *vpc.VpcId { - return fmt.Errorf("Expected VPN Gateway %q to be attached to VPC %q, but got: %q", - *vgw.VpnGatewayId, *vpc.VpcId, *vgw.VpcAttachments[0].VpcId) - } - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_vpn_gateway.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckVpnGatewayDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckVpnGatewayConfigReattach, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpcExists("aws_vpc.foo", &vpc1), - testAccCheckVpcExists("aws_vpc.bar", &vpc2), - testAccCheckVpnGatewayExists( - "aws_vpn_gateway.foo", &vgw1), - testAccCheckVpnGatewayExists( - "aws_vpn_gateway.bar", &vgw2), - testAttachmentFunc(&vgw1, &vpc1), - testAttachmentFunc(&vgw2, &vpc2), - ), - }, - resource.TestStep{ - Config: testAccCheckVpnGatewayConfigReattachChange, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpnGatewayExists( - "aws_vpn_gateway.foo", &vgw1), - testAccCheckVpnGatewayExists( - "aws_vpn_gateway.bar", &vgw2), - testAttachmentFunc(&vgw2, &vpc1), - testAttachmentFunc(&vgw1, &vpc2), - ), - }, - resource.TestStep{ - Config: testAccCheckVpnGatewayConfigReattach, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpnGatewayExists( - "aws_vpn_gateway.foo", &vgw1), - testAccCheckVpnGatewayExists( - "aws_vpn_gateway.bar", &vgw2), - testAttachmentFunc(&vgw1, &vpc1), - testAttachmentFunc(&vgw2, &vpc2), - ), - }, - }, - }) -} - -func TestAccAWSVpnGateway_delete(t *testing.T) { - var vpnGateway ec2.VpnGateway - - testDeleted := func(r string) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, ok := s.RootModule().Resources[r] - if ok { - return fmt.Errorf("VPN Gateway %q should have been deleted.", r) - } - return nil - } - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_vpn_gateway.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckVpnGatewayDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVpnGatewayConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpnGatewayExists("aws_vpn_gateway.foo", &vpnGateway)), - }, - resource.TestStep{ - Config: testAccNoVpnGatewayConfig, - Check: resource.ComposeTestCheckFunc(testDeleted("aws_vpn_gateway.foo")), - }, - }, - }) -} - -func TestAccAWSVpnGateway_tags(t *testing.T) { - var v ec2.VpnGateway - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_vpn_gateway.foo", - Providers: testAccProviders, - CheckDestroy: testAccCheckVpnGatewayDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckVpnGatewayConfigTags, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpnGatewayExists("aws_vpn_gateway.foo", &v), - testAccCheckTags(&v.Tags, "foo", "bar"), - ), - }, - resource.TestStep{ - Config: testAccCheckVpnGatewayConfigTagsUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckVpnGatewayExists("aws_vpn_gateway.foo", &v), - testAccCheckTags(&v.Tags, "foo", ""), - testAccCheckTags(&v.Tags, "bar", "baz"), - ), - }, - }, - }) -} - -func testAccAWSVpnGatewayDisappears(gateway *ec2.VpnGateway) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - _, err := conn.DetachVpnGateway(&ec2.DetachVpnGatewayInput{ - VpnGatewayId: gateway.VpnGatewayId, - VpcId: gateway.VpcAttachments[0].VpcId, - }) - if err != nil { - ec2err, ok := err.(awserr.Error) - if ok { - if ec2err.Code() == "InvalidVpnGatewayID.NotFound" { - return nil - } else if ec2err.Code() == "InvalidVpnGatewayAttachment.NotFound" { - return nil - } - } - - if err != nil { - return err - } - } - - opts := &ec2.DeleteVpnGatewayInput{ - VpnGatewayId: gateway.VpnGatewayId, - } - if _, err := conn.DeleteVpnGateway(opts); err != nil { - return err - } - return resource.Retry(40*time.Minute, func() *resource.RetryError { - opts := &ec2.DescribeVpnGatewaysInput{ - VpnGatewayIds: []*string{gateway.VpnGatewayId}, - } - resp, err := conn.DescribeVpnGateways(opts) - if err != nil { - cgw, ok := err.(awserr.Error) - if ok && cgw.Code() == "InvalidVpnGatewayID.NotFound" { - return nil - } - if ok && cgw.Code() == "IncorrectState" { - return resource.RetryableError(fmt.Errorf( - "Waiting for VPN Gateway to be in the correct state: %v", gateway.VpnGatewayId)) - } - return resource.NonRetryableError( - fmt.Errorf("Error retrieving VPN Gateway: %s", err)) - } - if *resp.VpnGateways[0].State == "deleted" { - return nil - } - return resource.RetryableError(fmt.Errorf( - "Waiting for VPN Gateway: %v", gateway.VpnGatewayId)) - }) - } -} - -func testAccCheckVpnGatewayDestroy(s *terraform.State) error { - ec2conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_vpn_gateway" { - continue - } - - // Try to find the resource - resp, err := ec2conn.DescribeVpnGateways(&ec2.DescribeVpnGatewaysInput{ - VpnGatewayIds: []*string{aws.String(rs.Primary.ID)}, - }) - if err == nil { - var v *ec2.VpnGateway - for _, g := range resp.VpnGateways { - if *g.VpnGatewayId == rs.Primary.ID { - v = g - } - } - - if v == nil { - // wasn't found - return nil - } - - if *v.State != "deleted" { - return fmt.Errorf("Expected VPN Gateway to be in deleted state, but was not: %s", v) - } - return nil - } - - // Verify the error is what we want - ec2err, ok := err.(awserr.Error) - if !ok { - return err - } - if ec2err.Code() != "InvalidVpnGatewayID.NotFound" { - return err - } - } - - return nil -} - -func testAccCheckVpnGatewayExists(n string, ig *ec2.VpnGateway) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - ec2conn := testAccProvider.Meta().(*AWSClient).ec2conn - resp, err := ec2conn.DescribeVpnGateways(&ec2.DescribeVpnGatewaysInput{ - VpnGatewayIds: []*string{aws.String(rs.Primary.ID)}, - }) - if err != nil { - return err - } - if len(resp.VpnGateways) == 0 { - return fmt.Errorf("VPN Gateway not found") - } - - *ig = *resp.VpnGateways[0] - - return nil - } -} - -const testAccNoVpnGatewayConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} -` - -const testAccVpnGatewayConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_vpn_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} -` - -const testAccVpnGatewayConfigChangeVPC = ` -resource "aws_vpc" "bar" { - cidr_block = "10.2.0.0/16" -} - -resource "aws_vpn_gateway" "foo" { - vpc_id = "${aws_vpc.bar.id}" -} -` - -const testAccCheckVpnGatewayConfigTags = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_vpn_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" - tags { - foo = "bar" - } -} -` - -const testAccCheckVpnGatewayConfigTagsUpdate = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_vpn_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" - tags { - bar = "baz" - } -} -` - -const testAccCheckVpnGatewayConfigReattach = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_vpc" "bar" { - cidr_block = "10.2.0.0/16" -} - -resource "aws_vpn_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" -} - -resource "aws_vpn_gateway" "bar" { - vpc_id = "${aws_vpc.bar.id}" -} -` - -const testAccCheckVpnGatewayConfigReattachChange = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_vpc" "bar" { - cidr_block = "10.2.0.0/16" -} - -resource "aws_vpn_gateway" "foo" { - vpc_id = "${aws_vpc.bar.id}" -} - -resource "aws_vpn_gateway" "bar" { - vpc_id = "${aws_vpc.foo.id}" -} -` - -const testAccVpnGatewayConfigWithAZ = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_vpn_gateway" "foo" { - vpc_id = "${aws_vpc.foo.id}" - availability_zone = "us-west-2a" -} -` diff --git a/builtin/providers/aws/resource_aws_waf_byte_match_set.go b/builtin/providers/aws/resource_aws_waf_byte_match_set.go deleted file mode 100644 index 53f3e93b8..000000000 --- a/builtin/providers/aws/resource_aws_waf_byte_match_set.go +++ /dev/null @@ -1,249 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsWafByteMatchSet() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsWafByteMatchSetCreate, - Read: resourceAwsWafByteMatchSetRead, - Update: resourceAwsWafByteMatchSetUpdate, - Delete: resourceAwsWafByteMatchSetDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "byte_match_tuples": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "field_to_match": { - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "data": { - Type: schema.TypeString, - Optional: true, - }, - "type": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "positional_constraint": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "target_string": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "text_transformation": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - } -} - -func resourceAwsWafByteMatchSetCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - - log.Printf("[INFO] Creating ByteMatchSet: %s", d.Get("name").(string)) - - wr := newWafRetryer(conn, "global") - out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - params := &waf.CreateByteMatchSetInput{ - ChangeToken: token, - Name: aws.String(d.Get("name").(string)), - } - return conn.CreateByteMatchSet(params) - }) - if err != nil { - return errwrap.Wrapf("[ERROR] Error creating ByteMatchSet: {{err}}", err) - } - resp := out.(*waf.CreateByteMatchSetOutput) - - d.SetId(*resp.ByteMatchSet.ByteMatchSetId) - - return resourceAwsWafByteMatchSetUpdate(d, meta) -} - -func resourceAwsWafByteMatchSetRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - log.Printf("[INFO] Reading ByteMatchSet: %s", d.Get("name").(string)) - params := &waf.GetByteMatchSetInput{ - ByteMatchSetId: aws.String(d.Id()), - } - - resp, err := conn.GetByteMatchSet(params) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "WAFNonexistentItemException" { - log.Printf("[WARN] WAF IPSet (%s) not found, error code (404)", d.Id()) - d.SetId("") - return nil - } - - return err - } - - d.Set("name", resp.ByteMatchSet.Name) - d.Set("byte_match_tuples", flattenWafByteMatchTuples(resp.ByteMatchSet.ByteMatchTuples)) - - return nil -} - -func resourceAwsWafByteMatchSetUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - - log.Printf("[INFO] Updating ByteMatchSet: %s", d.Get("name").(string)) - - if d.HasChange("byte_match_tuples") { - o, n := d.GetChange("byte_match_tuples") - oldT, newT := o.(*schema.Set).List(), n.(*schema.Set).List() - err := updateByteMatchSetResource(d.Id(), oldT, newT, conn) - if err != nil { - return errwrap.Wrapf("[ERROR] Error updating ByteMatchSet: {{err}}", err) - } - } - - return resourceAwsWafByteMatchSetRead(d, meta) -} - -func resourceAwsWafByteMatchSetDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - - oldTuples := d.Get("byte_match_tuples").(*schema.Set).List() - if len(oldTuples) > 0 { - noTuples := []interface{}{} - err := updateByteMatchSetResource(d.Id(), oldTuples, noTuples, conn) - if err != nil { - return fmt.Errorf("Error updating ByteMatchSet: %s", err) - } - } - - wr := newWafRetryer(conn, "global") - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.DeleteByteMatchSetInput{ - ChangeToken: token, - ByteMatchSetId: aws.String(d.Id()), - } - log.Printf("[INFO] Deleting WAF ByteMatchSet: %s", req) - return conn.DeleteByteMatchSet(req) - }) - if err != nil { - return errwrap.Wrapf("[ERROR] Error deleting ByteMatchSet: {{err}}", err) - } - - return nil -} - -func updateByteMatchSetResource(id string, oldT, newT []interface{}, conn *waf.WAF) error { - wr := newWafRetryer(conn, "global") - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.UpdateByteMatchSetInput{ - ChangeToken: token, - ByteMatchSetId: aws.String(id), - Updates: diffWafByteMatchSetTuples(oldT, newT), - } - - return conn.UpdateByteMatchSet(req) - }) - if err != nil { - return errwrap.Wrapf("[ERROR] Error updating ByteMatchSet: {{err}}", err) - } - - return nil -} - -func flattenWafByteMatchTuples(bmt []*waf.ByteMatchTuple) []interface{} { - out := make([]interface{}, len(bmt), len(bmt)) - for i, t := range bmt { - m := make(map[string]interface{}) - - if t.FieldToMatch != nil { - m["field_to_match"] = flattenFieldToMatch(t.FieldToMatch) - } - m["positional_constraint"] = *t.PositionalConstraint - m["target_string"] = string(t.TargetString) - m["text_transformation"] = *t.TextTransformation - - out[i] = m - } - return out -} - -func expandFieldToMatch(d map[string]interface{}) *waf.FieldToMatch { - return &waf.FieldToMatch{ - Type: aws.String(d["type"].(string)), - Data: aws.String(d["data"].(string)), - } -} - -func flattenFieldToMatch(fm *waf.FieldToMatch) []interface{} { - m := make(map[string]interface{}) - if fm.Data != nil { - m["data"] = *fm.Data - } - if fm.Type != nil { - m["type"] = *fm.Type - } - return []interface{}{m} -} - -func diffWafByteMatchSetTuples(oldT, newT []interface{}) []*waf.ByteMatchSetUpdate { - updates := make([]*waf.ByteMatchSetUpdate, 0) - - for _, ot := range oldT { - tuple := ot.(map[string]interface{}) - - if idx, contains := sliceContainsMap(newT, tuple); contains { - newT = append(newT[:idx], newT[idx+1:]...) - continue - } - - updates = append(updates, &waf.ByteMatchSetUpdate{ - Action: aws.String(waf.ChangeActionDelete), - ByteMatchTuple: &waf.ByteMatchTuple{ - FieldToMatch: expandFieldToMatch(tuple["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), - PositionalConstraint: aws.String(tuple["positional_constraint"].(string)), - TargetString: []byte(tuple["target_string"].(string)), - TextTransformation: aws.String(tuple["text_transformation"].(string)), - }, - }) - } - - for _, nt := range newT { - tuple := nt.(map[string]interface{}) - - updates = append(updates, &waf.ByteMatchSetUpdate{ - Action: aws.String(waf.ChangeActionInsert), - ByteMatchTuple: &waf.ByteMatchTuple{ - FieldToMatch: expandFieldToMatch(tuple["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), - PositionalConstraint: aws.String(tuple["positional_constraint"].(string)), - TargetString: []byte(tuple["target_string"].(string)), - TextTransformation: aws.String(tuple["text_transformation"].(string)), - }, - }) - } - return updates -} diff --git a/builtin/providers/aws/resource_aws_waf_byte_match_set_test.go b/builtin/providers/aws/resource_aws_waf_byte_match_set_test.go deleted file mode 100644 index 2e432befb..000000000 --- a/builtin/providers/aws/resource_aws_waf_byte_match_set_test.go +++ /dev/null @@ -1,369 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/acctest" -) - -func TestAccAWSWafByteMatchSet_basic(t *testing.T) { - var v waf.ByteMatchSet - byteMatchSet := fmt.Sprintf("byteMatchSet-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafByteMatchSetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSWafByteMatchSetConfig(byteMatchSet), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafByteMatchSetExists("aws_waf_byte_match_set.byte_set", &v), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "name", byteMatchSet), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.#", "2"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.2174619346.field_to_match.#", "1"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.2174619346.field_to_match.2991901334.data", "referer"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.2174619346.field_to_match.2991901334.type", "HEADER"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.2174619346.positional_constraint", "CONTAINS"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.2174619346.target_string", "badrefer1"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.2174619346.text_transformation", "NONE"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.839525137.field_to_match.#", "1"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.839525137.field_to_match.2991901334.data", "referer"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.839525137.field_to_match.2991901334.type", "HEADER"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.839525137.positional_constraint", "CONTAINS"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.839525137.target_string", "badrefer2"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.839525137.text_transformation", "NONE"), - ), - }, - }, - }) -} - -func TestAccAWSWafByteMatchSet_changeNameForceNew(t *testing.T) { - var before, after waf.ByteMatchSet - byteMatchSet := fmt.Sprintf("byteMatchSet-%s", acctest.RandString(5)) - byteMatchSetNewName := fmt.Sprintf("byteMatchSet-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafByteMatchSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafByteMatchSetConfig(byteMatchSet), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafByteMatchSetExists("aws_waf_byte_match_set.byte_set", &before), - resource.TestCheckResourceAttr( - "aws_waf_byte_match_set.byte_set", "name", byteMatchSet), - resource.TestCheckResourceAttr( - "aws_waf_byte_match_set.byte_set", "byte_match_tuples.#", "2"), - ), - }, - { - Config: testAccAWSWafByteMatchSetConfigChangeName(byteMatchSetNewName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafByteMatchSetExists("aws_waf_byte_match_set.byte_set", &after), - resource.TestCheckResourceAttr( - "aws_waf_byte_match_set.byte_set", "name", byteMatchSetNewName), - resource.TestCheckResourceAttr( - "aws_waf_byte_match_set.byte_set", "byte_match_tuples.#", "2"), - ), - }, - }, - }) -} - -func TestAccAWSWafByteMatchSet_changeTuples(t *testing.T) { - var before, after waf.ByteMatchSet - byteMatchSetName := fmt.Sprintf("byteMatchSet-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafByteMatchSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafByteMatchSetConfig(byteMatchSetName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafByteMatchSetExists("aws_waf_byte_match_set.byte_set", &before), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "name", byteMatchSetName), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.#", "2"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.2174619346.field_to_match.#", "1"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.2174619346.field_to_match.2991901334.data", "referer"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.2174619346.field_to_match.2991901334.type", "HEADER"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.2174619346.positional_constraint", "CONTAINS"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.2174619346.target_string", "badrefer1"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.2174619346.text_transformation", "NONE"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.839525137.field_to_match.#", "1"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.839525137.field_to_match.2991901334.data", "referer"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.839525137.field_to_match.2991901334.type", "HEADER"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.839525137.positional_constraint", "CONTAINS"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.839525137.target_string", "badrefer2"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.839525137.text_transformation", "NONE"), - ), - }, - { - Config: testAccAWSWafByteMatchSetConfig_changeTuples(byteMatchSetName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafByteMatchSetExists("aws_waf_byte_match_set.byte_set", &after), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "name", byteMatchSetName), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.#", "2"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.2174619346.field_to_match.#", "1"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.2174619346.field_to_match.2991901334.data", "referer"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.2174619346.field_to_match.2991901334.type", "HEADER"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.2174619346.positional_constraint", "CONTAINS"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.2174619346.target_string", "badrefer1"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.2174619346.text_transformation", "NONE"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.4224486115.field_to_match.#", "1"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.4224486115.field_to_match.4253810390.data", "GET"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.4224486115.field_to_match.4253810390.type", "METHOD"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.4224486115.positional_constraint", "CONTAINS_WORD"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.4224486115.target_string", "blah"), - resource.TestCheckResourceAttr("aws_waf_byte_match_set.byte_set", "byte_match_tuples.4224486115.text_transformation", "URL_DECODE"), - ), - }, - }, - }) -} - -func TestAccAWSWafByteMatchSet_noTuples(t *testing.T) { - var byteSet waf.ByteMatchSet - byteMatchSetName := fmt.Sprintf("byteMatchSet-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafByteMatchSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafByteMatchSetConfig_noTuples(byteMatchSetName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafByteMatchSetExists("aws_waf_byte_match_set.byte_set", &byteSet), - resource.TestCheckResourceAttr( - "aws_waf_byte_match_set.byte_set", "name", byteMatchSetName), - resource.TestCheckResourceAttr( - "aws_waf_byte_match_set.byte_set", "byte_match_tuples.#", "0"), - ), - }, - }, - }) -} - -func TestAccAWSWafByteMatchSet_disappears(t *testing.T) { - var v waf.ByteMatchSet - byteMatchSet := fmt.Sprintf("byteMatchSet-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafByteMatchSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafByteMatchSetConfig(byteMatchSet), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafByteMatchSetExists("aws_waf_byte_match_set.byte_set", &v), - testAccCheckAWSWafByteMatchSetDisappears(&v), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testAccCheckAWSWafByteMatchSetDisappears(v *waf.ByteMatchSet) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).wafconn - - wr := newWafRetryer(conn, "global") - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.UpdateByteMatchSetInput{ - ChangeToken: token, - ByteMatchSetId: v.ByteMatchSetId, - } - - for _, ByteMatchTuple := range v.ByteMatchTuples { - ByteMatchUpdate := &waf.ByteMatchSetUpdate{ - Action: aws.String("DELETE"), - ByteMatchTuple: &waf.ByteMatchTuple{ - FieldToMatch: ByteMatchTuple.FieldToMatch, - PositionalConstraint: ByteMatchTuple.PositionalConstraint, - TargetString: ByteMatchTuple.TargetString, - TextTransformation: ByteMatchTuple.TextTransformation, - }, - } - req.Updates = append(req.Updates, ByteMatchUpdate) - } - - return conn.UpdateByteMatchSet(req) - }) - if err != nil { - return errwrap.Wrapf("[ERROR] Error updating ByteMatchSet: {{err}}", err) - } - - _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { - opts := &waf.DeleteByteMatchSetInput{ - ChangeToken: token, - ByteMatchSetId: v.ByteMatchSetId, - } - return conn.DeleteByteMatchSet(opts) - }) - if err != nil { - return errwrap.Wrapf("[ERROR] Error deleting ByteMatchSet: {{err}}", err) - } - - return nil - } -} - -func testAccCheckAWSWafByteMatchSetExists(n string, v *waf.ByteMatchSet) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No WAF ByteMatchSet ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).wafconn - resp, err := conn.GetByteMatchSet(&waf.GetByteMatchSetInput{ - ByteMatchSetId: aws.String(rs.Primary.ID), - }) - - if err != nil { - return err - } - - if *resp.ByteMatchSet.ByteMatchSetId == rs.Primary.ID { - *v = *resp.ByteMatchSet - return nil - } - - return fmt.Errorf("WAF ByteMatchSet (%s) not found", rs.Primary.ID) - } -} - -func testAccCheckAWSWafByteMatchSetDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_waf_byte_match_set" { - continue - } - - conn := testAccProvider.Meta().(*AWSClient).wafconn - resp, err := conn.GetByteMatchSet( - &waf.GetByteMatchSetInput{ - ByteMatchSetId: aws.String(rs.Primary.ID), - }) - - if err == nil { - if *resp.ByteMatchSet.ByteMatchSetId == rs.Primary.ID { - return fmt.Errorf("WAF ByteMatchSet %s still exists", rs.Primary.ID) - } - } - - // Return nil if the ByteMatchSet is already destroyed - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "WAFNonexistentItemException" { - return nil - } - } - - return err - } - - return nil -} - -func testAccAWSWafByteMatchSetConfig(name string) string { - return fmt.Sprintf(` -resource "aws_waf_byte_match_set" "byte_set" { - name = "%s" - byte_match_tuples { - text_transformation = "NONE" - target_string = "badrefer1" - positional_constraint = "CONTAINS" - field_to_match { - type = "HEADER" - data = "referer" - } - } - - byte_match_tuples { - text_transformation = "NONE" - target_string = "badrefer2" - positional_constraint = "CONTAINS" - field_to_match { - type = "HEADER" - data = "referer" - } - } -}`, name) -} - -func testAccAWSWafByteMatchSetConfigChangeName(name string) string { - return fmt.Sprintf(` -resource "aws_waf_byte_match_set" "byte_set" { - name = "%s" - byte_match_tuples { - text_transformation = "NONE" - target_string = "badrefer1" - positional_constraint = "CONTAINS" - field_to_match { - type = "HEADER" - data = "referer" - } - } - - byte_match_tuples { - text_transformation = "NONE" - target_string = "badrefer2" - positional_constraint = "CONTAINS" - field_to_match { - type = "HEADER" - data = "referer" - } - } -}`, name) -} - -func testAccAWSWafByteMatchSetConfig_changeTuples(name string) string { - return fmt.Sprintf(` -resource "aws_waf_byte_match_set" "byte_set" { - name = "%s" - byte_match_tuples { - text_transformation = "NONE" - target_string = "badrefer1" - positional_constraint = "CONTAINS" - field_to_match { - type = "HEADER" - data = "referer" - } - } - - byte_match_tuples { - text_transformation = "URL_DECODE" - target_string = "blah" - positional_constraint = "CONTAINS_WORD" - field_to_match { - type = "METHOD" - data = "GET" - } - } -}`, name) -} - -func testAccAWSWafByteMatchSetConfig_noTuples(name string) string { - return fmt.Sprintf(` -resource "aws_waf_byte_match_set" "byte_set" { - name = "%s" -}`, name) -} diff --git a/builtin/providers/aws/resource_aws_waf_ipset.go b/builtin/providers/aws/resource_aws_waf_ipset.go deleted file mode 100644 index 40ef54ff3..000000000 --- a/builtin/providers/aws/resource_aws_waf_ipset.go +++ /dev/null @@ -1,195 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsWafIPSet() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsWafIPSetCreate, - Read: resourceAwsWafIPSetRead, - Update: resourceAwsWafIPSetUpdate, - Delete: resourceAwsWafIPSetDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "ip_set_descriptors": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "value": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - } -} - -func resourceAwsWafIPSetCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - - wr := newWafRetryer(conn, "global") - out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - params := &waf.CreateIPSetInput{ - ChangeToken: token, - Name: aws.String(d.Get("name").(string)), - } - return conn.CreateIPSet(params) - }) - if err != nil { - return err - } - resp := out.(*waf.CreateIPSetOutput) - d.SetId(*resp.IPSet.IPSetId) - return resourceAwsWafIPSetUpdate(d, meta) -} - -func resourceAwsWafIPSetRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - - params := &waf.GetIPSetInput{ - IPSetId: aws.String(d.Id()), - } - - resp, err := conn.GetIPSet(params) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "WAFNonexistentItemException" { - log.Printf("[WARN] WAF IPSet (%s) not found, error code (404)", d.Id()) - d.SetId("") - return nil - } - - return err - } - - var descriptors []map[string]interface{} - - for _, descriptor := range resp.IPSet.IPSetDescriptors { - d := map[string]interface{}{ - "type": *descriptor.Type, - "value": *descriptor.Value, - } - descriptors = append(descriptors, d) - } - - d.Set("ip_set_descriptors", descriptors) - - d.Set("name", resp.IPSet.Name) - - return nil -} - -func resourceAwsWafIPSetUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - - if d.HasChange("ip_set_descriptors") { - o, n := d.GetChange("ip_set_descriptors") - oldD, newD := o.(*schema.Set).List(), n.(*schema.Set).List() - - err := updateWafIpSetDescriptors(d.Id(), oldD, newD, conn) - if err != nil { - return fmt.Errorf("Error Updating WAF IPSet: %s", err) - } - } - - return resourceAwsWafIPSetRead(d, meta) -} - -func resourceAwsWafIPSetDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - - oldDescriptors := d.Get("ip_set_descriptors").(*schema.Set).List() - - if len(oldDescriptors) > 0 { - noDescriptors := []interface{}{} - err := updateWafIpSetDescriptors(d.Id(), oldDescriptors, noDescriptors, conn) - if err != nil { - return fmt.Errorf("Error updating IPSetDescriptors: %s", err) - } - } - - wr := newWafRetryer(conn, "global") - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.DeleteIPSetInput{ - ChangeToken: token, - IPSetId: aws.String(d.Id()), - } - log.Printf("[INFO] Deleting WAF IPSet") - return conn.DeleteIPSet(req) - }) - if err != nil { - return fmt.Errorf("Error Deleting WAF IPSet: %s", err) - } - - return nil -} - -func updateWafIpSetDescriptors(id string, oldD, newD []interface{}, conn *waf.WAF) error { - wr := newWafRetryer(conn, "global") - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.UpdateIPSetInput{ - ChangeToken: token, - IPSetId: aws.String(id), - Updates: diffWafIpSetDescriptors(oldD, newD), - } - log.Printf("[INFO] Updating IPSet descriptors: %s", req) - return conn.UpdateIPSet(req) - }) - if err != nil { - return fmt.Errorf("Error Updating WAF IPSet: %s", err) - } - - return nil -} - -func diffWafIpSetDescriptors(oldD, newD []interface{}) []*waf.IPSetUpdate { - updates := make([]*waf.IPSetUpdate, 0) - - for _, od := range oldD { - descriptor := od.(map[string]interface{}) - - if idx, contains := sliceContainsMap(newD, descriptor); contains { - newD = append(newD[:idx], newD[idx+1:]...) - continue - } - - updates = append(updates, &waf.IPSetUpdate{ - Action: aws.String(waf.ChangeActionDelete), - IPSetDescriptor: &waf.IPSetDescriptor{ - Type: aws.String(descriptor["type"].(string)), - Value: aws.String(descriptor["value"].(string)), - }, - }) - } - - for _, nd := range newD { - descriptor := nd.(map[string]interface{}) - - updates = append(updates, &waf.IPSetUpdate{ - Action: aws.String(waf.ChangeActionInsert), - IPSetDescriptor: &waf.IPSetDescriptor{ - Type: aws.String(descriptor["type"].(string)), - Value: aws.String(descriptor["value"].(string)), - }, - }) - } - return updates -} diff --git a/builtin/providers/aws/resource_aws_waf_ipset_test.go b/builtin/providers/aws/resource_aws_waf_ipset_test.go deleted file mode 100644 index ee7593116..000000000 --- a/builtin/providers/aws/resource_aws_waf_ipset_test.go +++ /dev/null @@ -1,400 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/terraform/helper/acctest" -) - -func TestAccAWSWafIPSet_basic(t *testing.T) { - var v waf.IPSet - ipsetName := fmt.Sprintf("ip-set-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafIPSetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSWafIPSetConfig(ipsetName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafIPSetExists("aws_waf_ipset.ipset", &v), - resource.TestCheckResourceAttr( - "aws_waf_ipset.ipset", "name", ipsetName), - resource.TestCheckResourceAttr( - "aws_waf_ipset.ipset", "ip_set_descriptors.4037960608.type", "IPV4"), - resource.TestCheckResourceAttr( - "aws_waf_ipset.ipset", "ip_set_descriptors.4037960608.value", "192.0.7.0/24"), - ), - }, - }, - }) -} - -func TestAccAWSWafIPSet_disappears(t *testing.T) { - var v waf.IPSet - ipsetName := fmt.Sprintf("ip-set-%s", acctest.RandString(5)) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafIPSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafIPSetConfig(ipsetName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafIPSetExists("aws_waf_ipset.ipset", &v), - testAccCheckAWSWafIPSetDisappears(&v), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAWSWafIPSet_changeNameForceNew(t *testing.T) { - var before, after waf.IPSet - ipsetName := fmt.Sprintf("ip-set-%s", acctest.RandString(5)) - ipsetNewName := fmt.Sprintf("ip-set-new-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafIPSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafIPSetConfig(ipsetName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafIPSetExists("aws_waf_ipset.ipset", &before), - resource.TestCheckResourceAttr( - "aws_waf_ipset.ipset", "name", ipsetName), - resource.TestCheckResourceAttr( - "aws_waf_ipset.ipset", "ip_set_descriptors.4037960608.type", "IPV4"), - resource.TestCheckResourceAttr( - "aws_waf_ipset.ipset", "ip_set_descriptors.4037960608.value", "192.0.7.0/24"), - ), - }, - { - Config: testAccAWSWafIPSetConfigChangeName(ipsetNewName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafIPSetExists("aws_waf_ipset.ipset", &after), - resource.TestCheckResourceAttr( - "aws_waf_ipset.ipset", "name", ipsetNewName), - resource.TestCheckResourceAttr( - "aws_waf_ipset.ipset", "ip_set_descriptors.4037960608.type", "IPV4"), - resource.TestCheckResourceAttr( - "aws_waf_ipset.ipset", "ip_set_descriptors.4037960608.value", "192.0.7.0/24"), - ), - }, - }, - }) -} - -func TestAccAWSWafIPSet_changeDescriptors(t *testing.T) { - var before, after waf.IPSet - ipsetName := fmt.Sprintf("ip-set-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafIPSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafIPSetConfig(ipsetName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafIPSetExists("aws_waf_ipset.ipset", &before), - resource.TestCheckResourceAttr( - "aws_waf_ipset.ipset", "name", ipsetName), - resource.TestCheckResourceAttr( - "aws_waf_ipset.ipset", "ip_set_descriptors.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_ipset.ipset", "ip_set_descriptors.4037960608.type", "IPV4"), - resource.TestCheckResourceAttr( - "aws_waf_ipset.ipset", "ip_set_descriptors.4037960608.value", "192.0.7.0/24"), - ), - }, - { - Config: testAccAWSWafIPSetConfigChangeIPSetDescriptors(ipsetName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafIPSetExists("aws_waf_ipset.ipset", &after), - resource.TestCheckResourceAttr( - "aws_waf_ipset.ipset", "name", ipsetName), - resource.TestCheckResourceAttr( - "aws_waf_ipset.ipset", "ip_set_descriptors.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_ipset.ipset", "ip_set_descriptors.115741513.type", "IPV4"), - resource.TestCheckResourceAttr( - "aws_waf_ipset.ipset", "ip_set_descriptors.115741513.value", "192.0.8.0/24"), - ), - }, - }, - }) -} - -func TestAccAWSWafIPSet_noDescriptors(t *testing.T) { - var ipset waf.IPSet - ipsetName := fmt.Sprintf("ip-set-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafIPSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafIPSetConfig_noDescriptors(ipsetName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafIPSetExists("aws_waf_ipset.ipset", &ipset), - resource.TestCheckResourceAttr( - "aws_waf_ipset.ipset", "name", ipsetName), - resource.TestCheckResourceAttr( - "aws_waf_ipset.ipset", "ip_set_descriptors.#", "0"), - ), - }, - }, - }) -} - -func TestDiffWafIpSetDescriptors(t *testing.T) { - testCases := []struct { - Old []interface{} - New []interface{} - ExpectedUpdates []*waf.IPSetUpdate - }{ - { - // Change - Old: []interface{}{ - map[string]interface{}{"type": "IPV4", "value": "192.0.7.0/24"}, - }, - New: []interface{}{ - map[string]interface{}{"type": "IPV4", "value": "192.0.8.0/24"}, - }, - ExpectedUpdates: []*waf.IPSetUpdate{ - &waf.IPSetUpdate{ - Action: aws.String(waf.ChangeActionDelete), - IPSetDescriptor: &waf.IPSetDescriptor{ - Type: aws.String("IPV4"), - Value: aws.String("192.0.7.0/24"), - }, - }, - &waf.IPSetUpdate{ - Action: aws.String(waf.ChangeActionInsert), - IPSetDescriptor: &waf.IPSetDescriptor{ - Type: aws.String("IPV4"), - Value: aws.String("192.0.8.0/24"), - }, - }, - }, - }, - { - // Fresh IPSet - Old: []interface{}{}, - New: []interface{}{ - map[string]interface{}{"type": "IPV4", "value": "10.0.1.0/24"}, - map[string]interface{}{"type": "IPV4", "value": "10.0.2.0/24"}, - map[string]interface{}{"type": "IPV4", "value": "10.0.3.0/24"}, - }, - ExpectedUpdates: []*waf.IPSetUpdate{ - &waf.IPSetUpdate{ - Action: aws.String(waf.ChangeActionInsert), - IPSetDescriptor: &waf.IPSetDescriptor{ - Type: aws.String("IPV4"), - Value: aws.String("10.0.1.0/24"), - }, - }, - &waf.IPSetUpdate{ - Action: aws.String(waf.ChangeActionInsert), - IPSetDescriptor: &waf.IPSetDescriptor{ - Type: aws.String("IPV4"), - Value: aws.String("10.0.2.0/24"), - }, - }, - &waf.IPSetUpdate{ - Action: aws.String(waf.ChangeActionInsert), - IPSetDescriptor: &waf.IPSetDescriptor{ - Type: aws.String("IPV4"), - Value: aws.String("10.0.3.0/24"), - }, - }, - }, - }, - { - // Deletion - Old: []interface{}{ - map[string]interface{}{"type": "IPV4", "value": "192.0.7.0/24"}, - map[string]interface{}{"type": "IPV4", "value": "192.0.8.0/24"}, - }, - New: []interface{}{}, - ExpectedUpdates: []*waf.IPSetUpdate{ - &waf.IPSetUpdate{ - Action: aws.String(waf.ChangeActionDelete), - IPSetDescriptor: &waf.IPSetDescriptor{ - Type: aws.String("IPV4"), - Value: aws.String("192.0.7.0/24"), - }, - }, - &waf.IPSetUpdate{ - Action: aws.String(waf.ChangeActionDelete), - IPSetDescriptor: &waf.IPSetDescriptor{ - Type: aws.String("IPV4"), - Value: aws.String("192.0.8.0/24"), - }, - }, - }, - }, - } - for i, tc := range testCases { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - updates := diffWafIpSetDescriptors(tc.Old, tc.New) - if !reflect.DeepEqual(updates, tc.ExpectedUpdates) { - t.Fatalf("IPSet updates don't match.\nGiven: %s\nExpected: %s", - updates, tc.ExpectedUpdates) - } - }) - } -} - -func testAccCheckAWSWafIPSetDisappears(v *waf.IPSet) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).wafconn - - wr := newWafRetryer(conn, "global") - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.UpdateIPSetInput{ - ChangeToken: token, - IPSetId: v.IPSetId, - } - - for _, IPSetDescriptor := range v.IPSetDescriptors { - IPSetUpdate := &waf.IPSetUpdate{ - Action: aws.String("DELETE"), - IPSetDescriptor: &waf.IPSetDescriptor{ - Type: IPSetDescriptor.Type, - Value: IPSetDescriptor.Value, - }, - } - req.Updates = append(req.Updates, IPSetUpdate) - } - - return conn.UpdateIPSet(req) - }) - if err != nil { - return fmt.Errorf("Error Updating WAF IPSet: %s", err) - } - - _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { - opts := &waf.DeleteIPSetInput{ - ChangeToken: token, - IPSetId: v.IPSetId, - } - return conn.DeleteIPSet(opts) - }) - if err != nil { - return fmt.Errorf("Error Deleting WAF IPSet: %s", err) - } - return nil - } -} - -func testAccCheckAWSWafIPSetDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_waf_ipset" { - continue - } - - conn := testAccProvider.Meta().(*AWSClient).wafconn - resp, err := conn.GetIPSet( - &waf.GetIPSetInput{ - IPSetId: aws.String(rs.Primary.ID), - }) - - if err == nil { - if *resp.IPSet.IPSetId == rs.Primary.ID { - return fmt.Errorf("WAF IPSet %s still exists", rs.Primary.ID) - } - } - - // Return nil if the IPSet is already destroyed - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "WAFNonexistentItemException" { - return nil - } - } - - return err - } - - return nil -} - -func testAccCheckAWSWafIPSetExists(n string, v *waf.IPSet) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No WAF IPSet ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).wafconn - resp, err := conn.GetIPSet(&waf.GetIPSetInput{ - IPSetId: aws.String(rs.Primary.ID), - }) - - if err != nil { - return err - } - - if *resp.IPSet.IPSetId == rs.Primary.ID { - *v = *resp.IPSet - return nil - } - - return fmt.Errorf("WAF IPSet (%s) not found", rs.Primary.ID) - } -} - -func testAccAWSWafIPSetConfig(name string) string { - return fmt.Sprintf(` -resource "aws_waf_ipset" "ipset" { - name = "%s" - ip_set_descriptors { - type = "IPV4" - value = "192.0.7.0/24" - } -}`, name) -} - -func testAccAWSWafIPSetConfigChangeName(name string) string { - return fmt.Sprintf(`resource "aws_waf_ipset" "ipset" { - name = "%s" - ip_set_descriptors { - type = "IPV4" - value = "192.0.7.0/24" - } -}`, name) -} - -func testAccAWSWafIPSetConfigChangeIPSetDescriptors(name string) string { - return fmt.Sprintf(`resource "aws_waf_ipset" "ipset" { - name = "%s" - ip_set_descriptors { - type = "IPV4" - value = "192.0.8.0/24" - } -}`, name) -} - -func testAccAWSWafIPSetConfig_noDescriptors(name string) string { - return fmt.Sprintf(`resource "aws_waf_ipset" "ipset" { - name = "%s" -}`, name) -} diff --git a/builtin/providers/aws/resource_aws_waf_rule.go b/builtin/providers/aws/resource_aws_waf_rule.go deleted file mode 100644 index e7d44d7be..000000000 --- a/builtin/providers/aws/resource_aws_waf_rule.go +++ /dev/null @@ -1,225 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsWafRule() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsWafRuleCreate, - Read: resourceAwsWafRuleRead, - Update: resourceAwsWafRuleUpdate, - Delete: resourceAwsWafRuleDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "metric_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateWafMetricName, - }, - "predicates": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "negated": &schema.Schema{ - Type: schema.TypeBool, - Required: true, - }, - "data_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 128 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 128 characters", k)) - } - return - }, - }, - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "IPMatch" && value != "ByteMatch" && value != "SqlInjectionMatch" && value != "SizeConstraint" && value != "XssMatch" { - errors = append(errors, fmt.Errorf( - "%q must be one of IPMatch | ByteMatch | SqlInjectionMatch | SizeConstraint | XssMatch", k)) - } - return - }, - }, - }, - }, - }, - }, - } -} - -func resourceAwsWafRuleCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - - wr := newWafRetryer(conn, "global") - out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - params := &waf.CreateRuleInput{ - ChangeToken: token, - MetricName: aws.String(d.Get("metric_name").(string)), - Name: aws.String(d.Get("name").(string)), - } - - return conn.CreateRule(params) - }) - if err != nil { - return err - } - resp := out.(*waf.CreateRuleOutput) - d.SetId(*resp.Rule.RuleId) - return resourceAwsWafRuleUpdate(d, meta) -} - -func resourceAwsWafRuleRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - - params := &waf.GetRuleInput{ - RuleId: aws.String(d.Id()), - } - - resp, err := conn.GetRule(params) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "WAFNonexistentItemException" { - log.Printf("[WARN] WAF Rule (%s) not found, error code (404)", d.Id()) - d.SetId("") - return nil - } - - return err - } - - var predicates []map[string]interface{} - - for _, predicateSet := range resp.Rule.Predicates { - predicate := map[string]interface{}{ - "negated": *predicateSet.Negated, - "type": *predicateSet.Type, - "data_id": *predicateSet.DataId, - } - predicates = append(predicates, predicate) - } - - d.Set("predicates", predicates) - d.Set("name", resp.Rule.Name) - d.Set("metric_name", resp.Rule.MetricName) - - return nil -} - -func resourceAwsWafRuleUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - - if d.HasChange("predicates") { - o, n := d.GetChange("predicates") - oldP, newP := o.(*schema.Set).List(), n.(*schema.Set).List() - - err := updateWafRuleResource(d.Id(), oldP, newP, conn) - if err != nil { - return fmt.Errorf("Error Updating WAF Rule: %s", err) - } - } - - return resourceAwsWafRuleRead(d, meta) -} - -func resourceAwsWafRuleDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - - oldPredicates := d.Get("predicates").(*schema.Set).List() - if len(oldPredicates) > 0 { - noPredicates := []interface{}{} - err := updateWafRuleResource(d.Id(), oldPredicates, noPredicates, conn) - if err != nil { - return fmt.Errorf("Error updating WAF Rule Predicates: %s", err) - } - } - - wr := newWafRetryer(conn, "global") - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.DeleteRuleInput{ - ChangeToken: token, - RuleId: aws.String(d.Id()), - } - log.Printf("[INFO] Deleting WAF Rule") - return conn.DeleteRule(req) - }) - if err != nil { - return fmt.Errorf("Error deleting WAF Rule: %s", err) - } - - return nil -} - -func updateWafRuleResource(id string, oldP, newP []interface{}, conn *waf.WAF) error { - wr := newWafRetryer(conn, "global") - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.UpdateRuleInput{ - ChangeToken: token, - RuleId: aws.String(id), - Updates: diffWafRulePredicates(oldP, newP), - } - - return conn.UpdateRule(req) - }) - if err != nil { - return fmt.Errorf("Error Updating WAF Rule: %s", err) - } - - return nil -} - -func diffWafRulePredicates(oldP, newP []interface{}) []*waf.RuleUpdate { - updates := make([]*waf.RuleUpdate, 0) - - for _, op := range oldP { - predicate := op.(map[string]interface{}) - - if idx, contains := sliceContainsMap(newP, predicate); contains { - newP = append(newP[:idx], newP[idx+1:]...) - continue - } - - updates = append(updates, &waf.RuleUpdate{ - Action: aws.String(waf.ChangeActionDelete), - Predicate: &waf.Predicate{ - Negated: aws.Bool(predicate["negated"].(bool)), - Type: aws.String(predicate["type"].(string)), - DataId: aws.String(predicate["data_id"].(string)), - }, - }) - } - - for _, np := range newP { - predicate := np.(map[string]interface{}) - - updates = append(updates, &waf.RuleUpdate{ - Action: aws.String(waf.ChangeActionInsert), - Predicate: &waf.Predicate{ - Negated: aws.Bool(predicate["negated"].(bool)), - Type: aws.String(predicate["type"].(string)), - DataId: aws.String(predicate["data_id"].(string)), - }, - }) - } - return updates -} diff --git a/builtin/providers/aws/resource_aws_waf_rule_test.go b/builtin/providers/aws/resource_aws_waf_rule_test.go deleted file mode 100644 index 456b1f5aa..000000000 --- a/builtin/providers/aws/resource_aws_waf_rule_test.go +++ /dev/null @@ -1,395 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/terraform/helper/acctest" -) - -func TestAccAWSWafRule_basic(t *testing.T) { - var v waf.Rule - wafRuleName := fmt.Sprintf("wafrule%s", acctest.RandString(5)) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSWafRuleConfig(wafRuleName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafRuleExists("aws_waf_rule.wafrule", &v), - resource.TestCheckResourceAttr( - "aws_waf_rule.wafrule", "name", wafRuleName), - resource.TestCheckResourceAttr( - "aws_waf_rule.wafrule", "predicates.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_rule.wafrule", "metric_name", wafRuleName), - ), - }, - }, - }) -} - -func TestAccAWSWafRule_changeNameForceNew(t *testing.T) { - var before, after waf.Rule - wafRuleName := fmt.Sprintf("wafrule%s", acctest.RandString(5)) - wafRuleNewName := fmt.Sprintf("wafrulenew%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafIPSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafRuleConfig(wafRuleName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafRuleExists("aws_waf_rule.wafrule", &before), - resource.TestCheckResourceAttr( - "aws_waf_rule.wafrule", "name", wafRuleName), - resource.TestCheckResourceAttr( - "aws_waf_rule.wafrule", "predicates.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_rule.wafrule", "metric_name", wafRuleName), - ), - }, - { - Config: testAccAWSWafRuleConfigChangeName(wafRuleNewName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafRuleExists("aws_waf_rule.wafrule", &after), - resource.TestCheckResourceAttr( - "aws_waf_rule.wafrule", "name", wafRuleNewName), - resource.TestCheckResourceAttr( - "aws_waf_rule.wafrule", "predicates.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_rule.wafrule", "metric_name", wafRuleNewName), - ), - }, - }, - }) -} - -func TestAccAWSWafRule_disappears(t *testing.T) { - var v waf.Rule - wafRuleName := fmt.Sprintf("wafrule%s", acctest.RandString(5)) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafRuleConfig(wafRuleName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafRuleExists("aws_waf_rule.wafrule", &v), - testAccCheckAWSWafRuleDisappears(&v), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAWSWafRule_changePredicates(t *testing.T) { - var ipset waf.IPSet - var byteMatchSet waf.ByteMatchSet - - var before, after waf.Rule - var idx int - ruleName := fmt.Sprintf("wafrule%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafRuleConfig(ruleName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafIPSetExists("aws_waf_ipset.ipset", &ipset), - testAccCheckAWSWafRuleExists("aws_waf_rule.wafrule", &before), - resource.TestCheckResourceAttr("aws_waf_rule.wafrule", "name", ruleName), - resource.TestCheckResourceAttr("aws_waf_rule.wafrule", "predicates.#", "1"), - computeWafRulePredicateWithIpSet(&ipset, false, "IPMatch", &idx), - testCheckResourceAttrWithIndexesAddr("aws_waf_rule.wafrule", "predicates.%d.negated", &idx, "false"), - testCheckResourceAttrWithIndexesAddr("aws_waf_rule.wafrule", "predicates.%d.type", &idx, "IPMatch"), - ), - }, - { - Config: testAccAWSWafRuleConfig_changePredicates(ruleName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafByteMatchSetExists("aws_waf_byte_match_set.set", &byteMatchSet), - testAccCheckAWSWafRuleExists("aws_waf_rule.wafrule", &after), - resource.TestCheckResourceAttr("aws_waf_rule.wafrule", "name", ruleName), - resource.TestCheckResourceAttr("aws_waf_rule.wafrule", "predicates.#", "1"), - computeWafRulePredicateWithByteMatchSet(&byteMatchSet, true, "ByteMatch", &idx), - testCheckResourceAttrWithIndexesAddr("aws_waf_rule.wafrule", "predicates.%d.negated", &idx, "true"), - testCheckResourceAttrWithIndexesAddr("aws_waf_rule.wafrule", "predicates.%d.type", &idx, "ByteMatch"), - ), - }, - }, - }) -} - -// computeWafRulePredicateWithIpSet calculates index -// which isn't static because dataId is generated as part of the test -func computeWafRulePredicateWithIpSet(ipSet *waf.IPSet, negated bool, pType string, idx *int) resource.TestCheckFunc { - return func(s *terraform.State) error { - predicateResource := resourceAwsWafRule().Schema["predicates"].Elem.(*schema.Resource) - - m := map[string]interface{}{ - "data_id": *ipSet.IPSetId, - "negated": negated, - "type": pType, - } - - f := schema.HashResource(predicateResource) - *idx = f(m) - - return nil - } -} - -// computeWafRulePredicateWithByteMatchSet calculates index -// which isn't static because dataId is generated as part of the test -func computeWafRulePredicateWithByteMatchSet(set *waf.ByteMatchSet, negated bool, pType string, idx *int) resource.TestCheckFunc { - return func(s *terraform.State) error { - predicateResource := resourceAwsWafRule().Schema["predicates"].Elem.(*schema.Resource) - - m := map[string]interface{}{ - "data_id": *set.ByteMatchSetId, - "negated": negated, - "type": pType, - } - - f := schema.HashResource(predicateResource) - *idx = f(m) - - return nil - } -} - -func testCheckResourceAttrWithIndexesAddr(name, format string, idx *int, value string) resource.TestCheckFunc { - return func(s *terraform.State) error { - return resource.TestCheckResourceAttr(name, fmt.Sprintf(format, *idx), value)(s) - } -} - -func TestAccAWSWafRule_noPredicates(t *testing.T) { - var rule waf.Rule - ruleName := fmt.Sprintf("wafrule%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafRuleConfig_noPredicates(ruleName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafRuleExists("aws_waf_rule.wafrule", &rule), - resource.TestCheckResourceAttr( - "aws_waf_rule.wafrule", "name", ruleName), - resource.TestCheckResourceAttr( - "aws_waf_rule.wafrule", "predicates.#", "0"), - ), - }, - }, - }) -} - -func testAccCheckAWSWafRuleDisappears(v *waf.Rule) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).wafconn - - wr := newWafRetryer(conn, "global") - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.UpdateRuleInput{ - ChangeToken: token, - RuleId: v.RuleId, - } - - for _, Predicate := range v.Predicates { - Predicate := &waf.RuleUpdate{ - Action: aws.String("DELETE"), - Predicate: &waf.Predicate{ - Negated: Predicate.Negated, - Type: Predicate.Type, - DataId: Predicate.DataId, - }, - } - req.Updates = append(req.Updates, Predicate) - } - - return conn.UpdateRule(req) - }) - if err != nil { - return fmt.Errorf("Error Updating WAF Rule: %s", err) - } - - _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { - opts := &waf.DeleteRuleInput{ - ChangeToken: token, - RuleId: v.RuleId, - } - return conn.DeleteRule(opts) - }) - if err != nil { - return fmt.Errorf("Error Deleting WAF Rule: %s", err) - } - return nil - } -} - -func testAccCheckAWSWafRuleDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_waf_rule" { - continue - } - - conn := testAccProvider.Meta().(*AWSClient).wafconn - resp, err := conn.GetRule( - &waf.GetRuleInput{ - RuleId: aws.String(rs.Primary.ID), - }) - - if err == nil { - if *resp.Rule.RuleId == rs.Primary.ID { - return fmt.Errorf("WAF Rule %s still exists", rs.Primary.ID) - } - } - - // Return nil if the Rule is already destroyed - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "WAFNonexistentItemException" { - return nil - } - } - - return err - } - - return nil -} - -func testAccCheckAWSWafRuleExists(n string, v *waf.Rule) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No WAF Rule ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).wafconn - resp, err := conn.GetRule(&waf.GetRuleInput{ - RuleId: aws.String(rs.Primary.ID), - }) - - if err != nil { - return err - } - - if *resp.Rule.RuleId == rs.Primary.ID { - *v = *resp.Rule - return nil - } - - return fmt.Errorf("WAF Rule (%s) not found", rs.Primary.ID) - } -} - -func testAccAWSWafRuleConfig(name string) string { - return fmt.Sprintf(` -resource "aws_waf_ipset" "ipset" { - name = "%s" - ip_set_descriptors { - type = "IPV4" - value = "192.0.7.0/24" - } -} - -resource "aws_waf_rule" "wafrule" { - depends_on = ["aws_waf_ipset.ipset"] - name = "%s" - metric_name = "%s" - predicates { - data_id = "${aws_waf_ipset.ipset.id}" - negated = false - type = "IPMatch" - } -}`, name, name, name) -} - -func testAccAWSWafRuleConfigChangeName(name string) string { - return fmt.Sprintf(` -resource "aws_waf_ipset" "ipset" { - name = "%s" - ip_set_descriptors { - type = "IPV4" - value = "192.0.7.0/24" - } -} - -resource "aws_waf_rule" "wafrule" { - depends_on = ["aws_waf_ipset.ipset"] - name = "%s" - metric_name = "%s" - predicates { - data_id = "${aws_waf_ipset.ipset.id}" - negated = false - type = "IPMatch" - } -}`, name, name, name) -} - -func testAccAWSWafRuleConfig_changePredicates(name string) string { - return fmt.Sprintf(` -resource "aws_waf_ipset" "ipset" { - name = "%s" - ip_set_descriptors { - type = "IPV4" - value = "192.0.7.0/24" - } -} - -resource "aws_waf_byte_match_set" "set" { - name = "%s" - byte_match_tuples { - text_transformation = "NONE" - target_string = "badrefer1" - positional_constraint = "CONTAINS" - - field_to_match { - type = "HEADER" - data = "referer" - } - } -} - -resource "aws_waf_rule" "wafrule" { - name = "%s" - metric_name = "%s" - predicates { - data_id = "${aws_waf_byte_match_set.set.id}" - negated = true - type = "ByteMatch" - } -}`, name, name, name, name) -} - -func testAccAWSWafRuleConfig_noPredicates(name string) string { - return fmt.Sprintf(` -resource "aws_waf_rule" "wafrule" { - name = "%s" - metric_name = "%s" -}`, name, name) -} diff --git a/builtin/providers/aws/resource_aws_waf_size_constraint_set.go b/builtin/providers/aws/resource_aws_waf_size_constraint_set.go deleted file mode 100644 index 5e9f46dd4..000000000 --- a/builtin/providers/aws/resource_aws_waf_size_constraint_set.go +++ /dev/null @@ -1,229 +0,0 @@ -package aws - -import ( - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsWafSizeConstraintSet() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsWafSizeConstraintSetCreate, - Read: resourceAwsWafSizeConstraintSetRead, - Update: resourceAwsWafSizeConstraintSetUpdate, - Delete: resourceAwsWafSizeConstraintSetDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "size_constraints": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "field_to_match": { - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "data": { - Type: schema.TypeString, - Optional: true, - }, - "type": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "comparison_operator": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "size": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - "text_transformation": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - } -} - -func resourceAwsWafSizeConstraintSetCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - - log.Printf("[INFO] Creating SizeConstraintSet: %s", d.Get("name").(string)) - - wr := newWafRetryer(conn, "global") - out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - params := &waf.CreateSizeConstraintSetInput{ - ChangeToken: token, - Name: aws.String(d.Get("name").(string)), - } - - return conn.CreateSizeConstraintSet(params) - }) - if err != nil { - return errwrap.Wrapf("[ERROR] Error creating SizeConstraintSet: {{err}}", err) - } - resp := out.(*waf.CreateSizeConstraintSetOutput) - - d.SetId(*resp.SizeConstraintSet.SizeConstraintSetId) - - return resourceAwsWafSizeConstraintSetUpdate(d, meta) -} - -func resourceAwsWafSizeConstraintSetRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - log.Printf("[INFO] Reading SizeConstraintSet: %s", d.Get("name").(string)) - params := &waf.GetSizeConstraintSetInput{ - SizeConstraintSetId: aws.String(d.Id()), - } - - resp, err := conn.GetSizeConstraintSet(params) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "WAFNonexistentItemException" { - log.Printf("[WARN] WAF IPSet (%s) not found, error code (404)", d.Id()) - d.SetId("") - return nil - } - - return err - } - - d.Set("name", resp.SizeConstraintSet.Name) - d.Set("size_constraints", flattenWafSizeConstraints(resp.SizeConstraintSet.SizeConstraints)) - - return nil -} - -func resourceAwsWafSizeConstraintSetUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - - if d.HasChange("size_constraints") { - o, n := d.GetChange("size_constraints") - oldS, newS := o.(*schema.Set).List(), n.(*schema.Set).List() - - err := updateSizeConstraintSetResource(d.Id(), oldS, newS, conn) - if err != nil { - return errwrap.Wrapf("[ERROR] Error updating SizeConstraintSet: {{err}}", err) - } - } - - return resourceAwsWafSizeConstraintSetRead(d, meta) -} - -func resourceAwsWafSizeConstraintSetDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - - oldConstraints := d.Get("size_constraints").(*schema.Set).List() - - if len(oldConstraints) > 0 { - noConstraints := []interface{}{} - err := updateSizeConstraintSetResource(d.Id(), oldConstraints, noConstraints, conn) - if err != nil { - return errwrap.Wrapf("[ERROR] Error deleting SizeConstraintSet: {{err}}", err) - } - } - - wr := newWafRetryer(conn, "global") - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.DeleteSizeConstraintSetInput{ - ChangeToken: token, - SizeConstraintSetId: aws.String(d.Id()), - } - return conn.DeleteSizeConstraintSet(req) - }) - if err != nil { - return errwrap.Wrapf("[ERROR] Error deleting SizeConstraintSet: {{err}}", err) - } - - return nil -} - -func updateSizeConstraintSetResource(id string, oldS, newS []interface{}, conn *waf.WAF) error { - wr := newWafRetryer(conn, "global") - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.UpdateSizeConstraintSetInput{ - ChangeToken: token, - SizeConstraintSetId: aws.String(id), - Updates: diffWafSizeConstraints(oldS, newS), - } - - log.Printf("[INFO] Updating WAF Size Constraint constraints: %s", req) - return conn.UpdateSizeConstraintSet(req) - }) - if err != nil { - return errwrap.Wrapf("[ERROR] Error updating SizeConstraintSet: {{err}}", err) - } - - return nil -} - -func flattenWafSizeConstraints(sc []*waf.SizeConstraint) []interface{} { - out := make([]interface{}, len(sc), len(sc)) - for i, c := range sc { - m := make(map[string]interface{}) - m["comparison_operator"] = *c.ComparisonOperator - if c.FieldToMatch != nil { - m["field_to_match"] = flattenFieldToMatch(c.FieldToMatch) - } - m["size"] = *c.Size - m["text_transformation"] = *c.TextTransformation - out[i] = m - } - return out -} - -func diffWafSizeConstraints(oldS, newS []interface{}) []*waf.SizeConstraintSetUpdate { - updates := make([]*waf.SizeConstraintSetUpdate, 0) - - for _, os := range oldS { - constraint := os.(map[string]interface{}) - - if idx, contains := sliceContainsMap(newS, constraint); contains { - newS = append(newS[:idx], newS[idx+1:]...) - continue - } - - updates = append(updates, &waf.SizeConstraintSetUpdate{ - Action: aws.String(waf.ChangeActionDelete), - SizeConstraint: &waf.SizeConstraint{ - FieldToMatch: expandFieldToMatch(constraint["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), - ComparisonOperator: aws.String(constraint["comparison_operator"].(string)), - Size: aws.Int64(int64(constraint["size"].(int))), - TextTransformation: aws.String(constraint["text_transformation"].(string)), - }, - }) - } - - for _, ns := range newS { - constraint := ns.(map[string]interface{}) - - updates = append(updates, &waf.SizeConstraintSetUpdate{ - Action: aws.String(waf.ChangeActionInsert), - SizeConstraint: &waf.SizeConstraint{ - FieldToMatch: expandFieldToMatch(constraint["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), - ComparisonOperator: aws.String(constraint["comparison_operator"].(string)), - Size: aws.Int64(int64(constraint["size"].(int))), - TextTransformation: aws.String(constraint["text_transformation"].(string)), - }, - }) - } - return updates -} diff --git a/builtin/providers/aws/resource_aws_waf_size_constraint_set_test.go b/builtin/providers/aws/resource_aws_waf_size_constraint_set_test.go deleted file mode 100644 index dcfac5d20..000000000 --- a/builtin/providers/aws/resource_aws_waf_size_constraint_set_test.go +++ /dev/null @@ -1,340 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/acctest" -) - -func TestAccAWSWafSizeConstraintSet_basic(t *testing.T) { - var v waf.SizeConstraintSet - sizeConstraintSet := fmt.Sprintf("sizeConstraintSet-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafSizeConstraintSetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSWafSizeConstraintSetConfig(sizeConstraintSet), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafSizeConstraintSetExists("aws_waf_size_constraint_set.size_constraint_set", &v), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "name", sizeConstraintSet), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "size_constraints.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "size_constraints.2029852522.comparison_operator", "EQ"), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "size_constraints.2029852522.field_to_match.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "size_constraints.2029852522.field_to_match.281401076.data", ""), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "size_constraints.2029852522.field_to_match.281401076.type", "BODY"), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "size_constraints.2029852522.size", "4096"), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "size_constraints.2029852522.text_transformation", "NONE"), - ), - }, - }, - }) -} - -func TestAccAWSWafSizeConstraintSet_changeNameForceNew(t *testing.T) { - var before, after waf.SizeConstraintSet - sizeConstraintSet := fmt.Sprintf("sizeConstraintSet-%s", acctest.RandString(5)) - sizeConstraintSetNewName := fmt.Sprintf("sizeConstraintSet-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafSizeConstraintSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafSizeConstraintSetConfig(sizeConstraintSet), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafSizeConstraintSetExists("aws_waf_size_constraint_set.size_constraint_set", &before), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "name", sizeConstraintSet), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "size_constraints.#", "1"), - ), - }, - { - Config: testAccAWSWafSizeConstraintSetConfigChangeName(sizeConstraintSetNewName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafSizeConstraintSetExists("aws_waf_size_constraint_set.size_constraint_set", &after), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "name", sizeConstraintSetNewName), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "size_constraints.#", "1"), - ), - }, - }, - }) -} - -func TestAccAWSWafSizeConstraintSet_disappears(t *testing.T) { - var v waf.SizeConstraintSet - sizeConstraintSet := fmt.Sprintf("sizeConstraintSet-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafSizeConstraintSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafSizeConstraintSetConfig(sizeConstraintSet), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafSizeConstraintSetExists("aws_waf_size_constraint_set.size_constraint_set", &v), - testAccCheckAWSWafSizeConstraintSetDisappears(&v), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAWSWafSizeConstraintSet_changeConstraints(t *testing.T) { - var before, after waf.SizeConstraintSet - setName := fmt.Sprintf("sizeConstraintSet-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafSizeConstraintSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafSizeConstraintSetConfig(setName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafSizeConstraintSetExists("aws_waf_size_constraint_set.size_constraint_set", &before), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "name", setName), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "size_constraints.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "size_constraints.2029852522.comparison_operator", "EQ"), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "size_constraints.2029852522.field_to_match.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "size_constraints.2029852522.field_to_match.281401076.data", ""), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "size_constraints.2029852522.field_to_match.281401076.type", "BODY"), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "size_constraints.2029852522.size", "4096"), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "size_constraints.2029852522.text_transformation", "NONE"), - ), - }, - { - Config: testAccAWSWafSizeConstraintSetConfig_changeConstraints(setName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafSizeConstraintSetExists("aws_waf_size_constraint_set.size_constraint_set", &after), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "name", setName), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "size_constraints.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "size_constraints.3222308386.comparison_operator", "GE"), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "size_constraints.3222308386.field_to_match.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "size_constraints.3222308386.field_to_match.281401076.data", ""), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "size_constraints.3222308386.field_to_match.281401076.type", "BODY"), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "size_constraints.3222308386.size", "1024"), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "size_constraints.3222308386.text_transformation", "NONE"), - ), - }, - }, - }) -} - -func TestAccAWSWafSizeConstraintSet_noConstraints(t *testing.T) { - var ipset waf.SizeConstraintSet - setName := fmt.Sprintf("sizeConstraintSet-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafSizeConstraintSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafSizeConstraintSetConfig_noConstraints(setName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafSizeConstraintSetExists("aws_waf_size_constraint_set.size_constraint_set", &ipset), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "name", setName), - resource.TestCheckResourceAttr( - "aws_waf_size_constraint_set.size_constraint_set", "size_constraints.#", "0"), - ), - }, - }, - }) -} - -func testAccCheckAWSWafSizeConstraintSetDisappears(v *waf.SizeConstraintSet) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).wafconn - - wr := newWafRetryer(conn, "global") - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.UpdateSizeConstraintSetInput{ - ChangeToken: token, - SizeConstraintSetId: v.SizeConstraintSetId, - } - - for _, sizeConstraint := range v.SizeConstraints { - sizeConstraintUpdate := &waf.SizeConstraintSetUpdate{ - Action: aws.String("DELETE"), - SizeConstraint: &waf.SizeConstraint{ - FieldToMatch: sizeConstraint.FieldToMatch, - ComparisonOperator: sizeConstraint.ComparisonOperator, - Size: sizeConstraint.Size, - TextTransformation: sizeConstraint.TextTransformation, - }, - } - req.Updates = append(req.Updates, sizeConstraintUpdate) - } - return conn.UpdateSizeConstraintSet(req) - }) - if err != nil { - return errwrap.Wrapf("[ERROR] Error updating SizeConstraintSet: {{err}}", err) - } - - _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { - opts := &waf.DeleteSizeConstraintSetInput{ - ChangeToken: token, - SizeConstraintSetId: v.SizeConstraintSetId, - } - return conn.DeleteSizeConstraintSet(opts) - }) - if err != nil { - return err - } - return nil - } -} - -func testAccCheckAWSWafSizeConstraintSetExists(n string, v *waf.SizeConstraintSet) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No WAF SizeConstraintSet ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).wafconn - resp, err := conn.GetSizeConstraintSet(&waf.GetSizeConstraintSetInput{ - SizeConstraintSetId: aws.String(rs.Primary.ID), - }) - - if err != nil { - return err - } - - if *resp.SizeConstraintSet.SizeConstraintSetId == rs.Primary.ID { - *v = *resp.SizeConstraintSet - return nil - } - - return fmt.Errorf("WAF SizeConstraintSet (%s) not found", rs.Primary.ID) - } -} - -func testAccCheckAWSWafSizeConstraintSetDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_waf_byte_match_set" { - continue - } - - conn := testAccProvider.Meta().(*AWSClient).wafconn - resp, err := conn.GetSizeConstraintSet( - &waf.GetSizeConstraintSetInput{ - SizeConstraintSetId: aws.String(rs.Primary.ID), - }) - - if err == nil { - if *resp.SizeConstraintSet.SizeConstraintSetId == rs.Primary.ID { - return fmt.Errorf("WAF SizeConstraintSet %s still exists", rs.Primary.ID) - } - } - - // Return nil if the SizeConstraintSet is already destroyed - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "WAFNonexistentItemException" { - return nil - } - } - - return err - } - - return nil -} - -func testAccAWSWafSizeConstraintSetConfig(name string) string { - return fmt.Sprintf(` -resource "aws_waf_size_constraint_set" "size_constraint_set" { - name = "%s" - size_constraints { - text_transformation = "NONE" - comparison_operator = "EQ" - size = "4096" - field_to_match { - type = "BODY" - } - } -}`, name) -} - -func testAccAWSWafSizeConstraintSetConfigChangeName(name string) string { - return fmt.Sprintf(` -resource "aws_waf_size_constraint_set" "size_constraint_set" { - name = "%s" - size_constraints { - text_transformation = "NONE" - comparison_operator = "EQ" - size = "4096" - field_to_match { - type = "BODY" - } - } -}`, name) -} - -func testAccAWSWafSizeConstraintSetConfig_changeConstraints(name string) string { - return fmt.Sprintf(` -resource "aws_waf_size_constraint_set" "size_constraint_set" { - name = "%s" - size_constraints { - text_transformation = "NONE" - comparison_operator = "GE" - size = "1024" - field_to_match { - type = "BODY" - } - } -}`, name) -} - -func testAccAWSWafSizeConstraintSetConfig_noConstraints(name string) string { - return fmt.Sprintf(` -resource "aws_waf_size_constraint_set" "size_constraint_set" { - name = "%s" -}`, name) -} diff --git a/builtin/providers/aws/resource_aws_waf_sql_injection_match_set.go b/builtin/providers/aws/resource_aws_waf_sql_injection_match_set.go deleted file mode 100644 index 808373c4a..000000000 --- a/builtin/providers/aws/resource_aws_waf_sql_injection_match_set.go +++ /dev/null @@ -1,214 +0,0 @@ -package aws - -import ( - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsWafSqlInjectionMatchSet() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsWafSqlInjectionMatchSetCreate, - Read: resourceAwsWafSqlInjectionMatchSetRead, - Update: resourceAwsWafSqlInjectionMatchSetUpdate, - Delete: resourceAwsWafSqlInjectionMatchSetDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "sql_injection_match_tuples": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "field_to_match": { - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "data": { - Type: schema.TypeString, - Optional: true, - }, - "type": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "text_transformation": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - } -} - -func resourceAwsWafSqlInjectionMatchSetCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - - log.Printf("[INFO] Creating SqlInjectionMatchSet: %s", d.Get("name").(string)) - - wr := newWafRetryer(conn, "global") - out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - params := &waf.CreateSqlInjectionMatchSetInput{ - ChangeToken: token, - Name: aws.String(d.Get("name").(string)), - } - - return conn.CreateSqlInjectionMatchSet(params) - }) - if err != nil { - return errwrap.Wrapf("[ERROR] Error creating SqlInjectionMatchSet: {{err}}", err) - } - resp := out.(*waf.CreateSqlInjectionMatchSetOutput) - d.SetId(*resp.SqlInjectionMatchSet.SqlInjectionMatchSetId) - - return resourceAwsWafSqlInjectionMatchSetUpdate(d, meta) -} - -func resourceAwsWafSqlInjectionMatchSetRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - log.Printf("[INFO] Reading SqlInjectionMatchSet: %s", d.Get("name").(string)) - params := &waf.GetSqlInjectionMatchSetInput{ - SqlInjectionMatchSetId: aws.String(d.Id()), - } - - resp, err := conn.GetSqlInjectionMatchSet(params) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "WAFNonexistentItemException" { - log.Printf("[WARN] WAF IPSet (%s) not found, error code (404)", d.Id()) - d.SetId("") - return nil - } - - return err - } - - d.Set("name", resp.SqlInjectionMatchSet.Name) - d.Set("sql_injection_match_tuples", resp.SqlInjectionMatchSet.SqlInjectionMatchTuples) - - return nil -} - -func resourceAwsWafSqlInjectionMatchSetUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - - if d.HasChange("sql_injection_match_tuples") { - o, n := d.GetChange("sql_injection_match_tuples") - oldT, newT := o.(*schema.Set).List(), n.(*schema.Set).List() - - err := updateSqlInjectionMatchSetResource(d.Id(), oldT, newT, conn) - if err != nil { - return errwrap.Wrapf("[ERROR] Error updating SqlInjectionMatchSet: {{err}}", err) - } - } - - return resourceAwsWafSqlInjectionMatchSetRead(d, meta) -} - -func resourceAwsWafSqlInjectionMatchSetDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - - oldTuples := d.Get("sql_injection_match_tuples").(*schema.Set).List() - - if len(oldTuples) > 0 { - noTuples := []interface{}{} - err := updateSqlInjectionMatchSetResource(d.Id(), oldTuples, noTuples, conn) - if err != nil { - return errwrap.Wrapf("[ERROR] Error deleting SqlInjectionMatchSet: {{err}}", err) - } - } - - wr := newWafRetryer(conn, "global") - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.DeleteSqlInjectionMatchSetInput{ - ChangeToken: token, - SqlInjectionMatchSetId: aws.String(d.Id()), - } - - return conn.DeleteSqlInjectionMatchSet(req) - }) - if err != nil { - return errwrap.Wrapf("[ERROR] Error deleting SqlInjectionMatchSet: {{err}}", err) - } - - return nil -} - -func updateSqlInjectionMatchSetResource(id string, oldT, newT []interface{}, conn *waf.WAF) error { - wr := newWafRetryer(conn, "global") - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.UpdateSqlInjectionMatchSetInput{ - ChangeToken: token, - SqlInjectionMatchSetId: aws.String(id), - Updates: diffWafSqlInjectionMatchTuples(oldT, newT), - } - - log.Printf("[INFO] Updating SqlInjectionMatchSet: %s", req) - return conn.UpdateSqlInjectionMatchSet(req) - }) - if err != nil { - return errwrap.Wrapf("[ERROR] Error updating SqlInjectionMatchSet: {{err}}", err) - } - - return nil -} - -func flattenWafSqlInjectionMatchTuples(ts []*waf.SqlInjectionMatchTuple) []interface{} { - out := make([]interface{}, len(ts), len(ts)) - for i, t := range ts { - m := make(map[string]interface{}) - m["text_transformation"] = *t.TextTransformation - m["field_to_match"] = flattenFieldToMatch(t.FieldToMatch) - out[i] = m - } - - return out -} - -func diffWafSqlInjectionMatchTuples(oldT, newT []interface{}) []*waf.SqlInjectionMatchSetUpdate { - updates := make([]*waf.SqlInjectionMatchSetUpdate, 0) - - for _, od := range oldT { - tuple := od.(map[string]interface{}) - - if idx, contains := sliceContainsMap(newT, tuple); contains { - newT = append(newT[:idx], newT[idx+1:]...) - continue - } - - updates = append(updates, &waf.SqlInjectionMatchSetUpdate{ - Action: aws.String(waf.ChangeActionDelete), - SqlInjectionMatchTuple: &waf.SqlInjectionMatchTuple{ - FieldToMatch: expandFieldToMatch(tuple["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), - TextTransformation: aws.String(tuple["text_transformation"].(string)), - }, - }) - } - - for _, nd := range newT { - tuple := nd.(map[string]interface{}) - - updates = append(updates, &waf.SqlInjectionMatchSetUpdate{ - Action: aws.String(waf.ChangeActionInsert), - SqlInjectionMatchTuple: &waf.SqlInjectionMatchTuple{ - FieldToMatch: expandFieldToMatch(tuple["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), - TextTransformation: aws.String(tuple["text_transformation"].(string)), - }, - }) - } - return updates -} diff --git a/builtin/providers/aws/resource_aws_waf_sql_injection_match_set_test.go b/builtin/providers/aws/resource_aws_waf_sql_injection_match_set_test.go deleted file mode 100644 index c9f081b3d..000000000 --- a/builtin/providers/aws/resource_aws_waf_sql_injection_match_set_test.go +++ /dev/null @@ -1,321 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/acctest" -) - -func TestAccAWSWafSqlInjectionMatchSet_basic(t *testing.T) { - var v waf.SqlInjectionMatchSet - sqlInjectionMatchSet := fmt.Sprintf("sqlInjectionMatchSet-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafSqlInjectionMatchSetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSWafSqlInjectionMatchSetConfig(sqlInjectionMatchSet), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafSqlInjectionMatchSetExists("aws_waf_sql_injection_match_set.sql_injection_match_set", &v), - resource.TestCheckResourceAttr( - "aws_waf_sql_injection_match_set.sql_injection_match_set", "name", sqlInjectionMatchSet), - resource.TestCheckResourceAttr( - "aws_waf_sql_injection_match_set.sql_injection_match_set", "sql_injection_match_tuples.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_sql_injection_match_set.sql_injection_match_set", "sql_injection_match_tuples.3367958210.field_to_match.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_sql_injection_match_set.sql_injection_match_set", "sql_injection_match_tuples.3367958210.field_to_match.2316364334.data", ""), - resource.TestCheckResourceAttr( - "aws_waf_sql_injection_match_set.sql_injection_match_set", "sql_injection_match_tuples.3367958210.field_to_match.2316364334.type", "QUERY_STRING"), - resource.TestCheckResourceAttr( - "aws_waf_sql_injection_match_set.sql_injection_match_set", "sql_injection_match_tuples.3367958210.text_transformation", "URL_DECODE"), - ), - }, - }, - }) -} - -func TestAccAWSWafSqlInjectionMatchSet_changeNameForceNew(t *testing.T) { - var before, after waf.SqlInjectionMatchSet - sqlInjectionMatchSet := fmt.Sprintf("sqlInjectionMatchSet-%s", acctest.RandString(5)) - sqlInjectionMatchSetNewName := fmt.Sprintf("sqlInjectionMatchSetNewName-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafSqlInjectionMatchSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafSqlInjectionMatchSetConfig(sqlInjectionMatchSet), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafSqlInjectionMatchSetExists("aws_waf_sql_injection_match_set.sql_injection_match_set", &before), - resource.TestCheckResourceAttr( - "aws_waf_sql_injection_match_set.sql_injection_match_set", "name", sqlInjectionMatchSet), - resource.TestCheckResourceAttr( - "aws_waf_sql_injection_match_set.sql_injection_match_set", "sql_injection_match_tuples.#", "1"), - ), - }, - { - Config: testAccAWSWafSqlInjectionMatchSetConfigChangeName(sqlInjectionMatchSetNewName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafSqlInjectionMatchSetExists("aws_waf_sql_injection_match_set.sql_injection_match_set", &after), - resource.TestCheckResourceAttr( - "aws_waf_sql_injection_match_set.sql_injection_match_set", "name", sqlInjectionMatchSetNewName), - resource.TestCheckResourceAttr( - "aws_waf_sql_injection_match_set.sql_injection_match_set", "sql_injection_match_tuples.#", "1"), - ), - }, - }, - }) -} - -func TestAccAWSWafSqlInjectionMatchSet_disappears(t *testing.T) { - var v waf.SqlInjectionMatchSet - sqlInjectionMatchSet := fmt.Sprintf("sqlInjectionMatchSet-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafSqlInjectionMatchSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafSqlInjectionMatchSetConfig(sqlInjectionMatchSet), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafSqlInjectionMatchSetExists("aws_waf_sql_injection_match_set.sql_injection_match_set", &v), - testAccCheckAWSWafSqlInjectionMatchSetDisappears(&v), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAWSWafSqlInjectionMatchSet_changeTuples(t *testing.T) { - var before, after waf.SqlInjectionMatchSet - setName := fmt.Sprintf("sqlInjectionMatchSet-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafSqlInjectionMatchSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafSqlInjectionMatchSetConfig(setName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafSqlInjectionMatchSetExists("aws_waf_sql_injection_match_set.sql_injection_match_set", &before), - resource.TestCheckResourceAttr( - "aws_waf_sql_injection_match_set.sql_injection_match_set", "name", setName), - resource.TestCheckResourceAttr( - "aws_waf_sql_injection_match_set.sql_injection_match_set", "sql_injection_match_tuples.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_sql_injection_match_set.sql_injection_match_set", "sql_injection_match_tuples.3367958210.field_to_match.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_sql_injection_match_set.sql_injection_match_set", "sql_injection_match_tuples.3367958210.field_to_match.2316364334.data", ""), - resource.TestCheckResourceAttr( - "aws_waf_sql_injection_match_set.sql_injection_match_set", "sql_injection_match_tuples.3367958210.field_to_match.2316364334.type", "QUERY_STRING"), - resource.TestCheckResourceAttr( - "aws_waf_sql_injection_match_set.sql_injection_match_set", "sql_injection_match_tuples.3367958210.text_transformation", "URL_DECODE"), - ), - }, - { - Config: testAccAWSWafSqlInjectionMatchSetConfig_changeTuples(setName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafSqlInjectionMatchSetExists("aws_waf_sql_injection_match_set.sql_injection_match_set", &after), - resource.TestCheckResourceAttr( - "aws_waf_sql_injection_match_set.sql_injection_match_set", "name", setName), - resource.TestCheckResourceAttr( - "aws_waf_sql_injection_match_set.sql_injection_match_set", "sql_injection_match_tuples.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_sql_injection_match_set.sql_injection_match_set", "sql_injection_match_tuples.3333510133.field_to_match.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_sql_injection_match_set.sql_injection_match_set", "sql_injection_match_tuples.3333510133.field_to_match.4253810390.data", "GET"), - resource.TestCheckResourceAttr( - "aws_waf_sql_injection_match_set.sql_injection_match_set", "sql_injection_match_tuples.3333510133.field_to_match.4253810390.type", "METHOD"), - resource.TestCheckResourceAttr( - "aws_waf_sql_injection_match_set.sql_injection_match_set", "sql_injection_match_tuples.3333510133.text_transformation", "NONE"), - ), - }, - }, - }) -} - -func TestAccAWSWafSqlInjectionMatchSet_noTuples(t *testing.T) { - var ipset waf.SqlInjectionMatchSet - setName := fmt.Sprintf("sqlInjectionMatchSet-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafSqlInjectionMatchSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafSqlInjectionMatchSetConfig_noTuples(setName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafSqlInjectionMatchSetExists("aws_waf_sql_injection_match_set.sql_injection_match_set", &ipset), - resource.TestCheckResourceAttr( - "aws_waf_sql_injection_match_set.sql_injection_match_set", "name", setName), - resource.TestCheckResourceAttr( - "aws_waf_sql_injection_match_set.sql_injection_match_set", "sql_injection_match_tuples.#", "0"), - ), - }, - }, - }) -} - -func testAccCheckAWSWafSqlInjectionMatchSetDisappears(v *waf.SqlInjectionMatchSet) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).wafconn - - wr := newWafRetryer(conn, "global") - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.UpdateSqlInjectionMatchSetInput{ - ChangeToken: token, - SqlInjectionMatchSetId: v.SqlInjectionMatchSetId, - } - - for _, sqlInjectionMatchTuple := range v.SqlInjectionMatchTuples { - sqlInjectionMatchTupleUpdate := &waf.SqlInjectionMatchSetUpdate{ - Action: aws.String("DELETE"), - SqlInjectionMatchTuple: &waf.SqlInjectionMatchTuple{ - FieldToMatch: sqlInjectionMatchTuple.FieldToMatch, - TextTransformation: sqlInjectionMatchTuple.TextTransformation, - }, - } - req.Updates = append(req.Updates, sqlInjectionMatchTupleUpdate) - } - return conn.UpdateSqlInjectionMatchSet(req) - }) - if err != nil { - return errwrap.Wrapf("[ERROR] Error updating SqlInjectionMatchSet: {{err}}", err) - } - - _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { - opts := &waf.DeleteSqlInjectionMatchSetInput{ - ChangeToken: token, - SqlInjectionMatchSetId: v.SqlInjectionMatchSetId, - } - return conn.DeleteSqlInjectionMatchSet(opts) - }) - if err != nil { - return errwrap.Wrapf("[ERROR] Error deleting SqlInjectionMatchSet: {{err}}", err) - } - return nil - } -} - -func testAccCheckAWSWafSqlInjectionMatchSetExists(n string, v *waf.SqlInjectionMatchSet) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No WAF SqlInjectionMatchSet ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).wafconn - resp, err := conn.GetSqlInjectionMatchSet(&waf.GetSqlInjectionMatchSetInput{ - SqlInjectionMatchSetId: aws.String(rs.Primary.ID), - }) - - if err != nil { - return err - } - - if *resp.SqlInjectionMatchSet.SqlInjectionMatchSetId == rs.Primary.ID { - *v = *resp.SqlInjectionMatchSet - return nil - } - - return fmt.Errorf("WAF SqlInjectionMatchSet (%s) not found", rs.Primary.ID) - } -} - -func testAccCheckAWSWafSqlInjectionMatchSetDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_waf_byte_match_set" { - continue - } - - conn := testAccProvider.Meta().(*AWSClient).wafconn - resp, err := conn.GetSqlInjectionMatchSet( - &waf.GetSqlInjectionMatchSetInput{ - SqlInjectionMatchSetId: aws.String(rs.Primary.ID), - }) - - if err == nil { - if *resp.SqlInjectionMatchSet.SqlInjectionMatchSetId == rs.Primary.ID { - return fmt.Errorf("WAF SqlInjectionMatchSet %s still exists", rs.Primary.ID) - } - } - - // Return nil if the SqlInjectionMatchSet is already destroyed - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "WAFNonexistentItemException" { - return nil - } - } - - return err - } - - return nil -} - -func testAccAWSWafSqlInjectionMatchSetConfig(name string) string { - return fmt.Sprintf(` -resource "aws_waf_sql_injection_match_set" "sql_injection_match_set" { - name = "%s" - sql_injection_match_tuples { - text_transformation = "URL_DECODE" - field_to_match { - type = "QUERY_STRING" - } - } -}`, name) -} - -func testAccAWSWafSqlInjectionMatchSetConfigChangeName(name string) string { - return fmt.Sprintf(` -resource "aws_waf_sql_injection_match_set" "sql_injection_match_set" { - name = "%s" - sql_injection_match_tuples { - text_transformation = "URL_DECODE" - field_to_match { - type = "QUERY_STRING" - } - } -}`, name) -} - -func testAccAWSWafSqlInjectionMatchSetConfig_changeTuples(name string) string { - return fmt.Sprintf(` -resource "aws_waf_sql_injection_match_set" "sql_injection_match_set" { - name = "%s" - sql_injection_match_tuples { - text_transformation = "NONE" - field_to_match { - type = "METHOD" - data = "GET" - } - } -}`, name) -} - -func testAccAWSWafSqlInjectionMatchSetConfig_noTuples(name string) string { - return fmt.Sprintf(` -resource "aws_waf_sql_injection_match_set" "sql_injection_match_set" { - name = "%s" -}`, name) -} diff --git a/builtin/providers/aws/resource_aws_waf_web_acl.go b/builtin/providers/aws/resource_aws_waf_web_acl.go deleted file mode 100644 index 7e3ac7237..000000000 --- a/builtin/providers/aws/resource_aws_waf_web_acl.go +++ /dev/null @@ -1,228 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsWafWebAcl() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsWafWebAclCreate, - Read: resourceAwsWafWebAclRead, - Update: resourceAwsWafWebAclUpdate, - Delete: resourceAwsWafWebAclDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "default_action": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "metric_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateWafMetricName, - }, - "rules": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "action": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "priority": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - "rule_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - } -} - -func resourceAwsWafWebAclCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - - wr := newWafRetryer(conn, "global") - out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - params := &waf.CreateWebACLInput{ - ChangeToken: token, - DefaultAction: expandDefaultAction(d), - MetricName: aws.String(d.Get("metric_name").(string)), - Name: aws.String(d.Get("name").(string)), - } - - return conn.CreateWebACL(params) - }) - if err != nil { - return err - } - resp := out.(*waf.CreateWebACLOutput) - d.SetId(*resp.WebACL.WebACLId) - return resourceAwsWafWebAclUpdate(d, meta) -} - -func resourceAwsWafWebAclRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - params := &waf.GetWebACLInput{ - WebACLId: aws.String(d.Id()), - } - - resp, err := conn.GetWebACL(params) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "WAFNonexistentItemException" { - log.Printf("[WARN] WAF ACL (%s) not found, error code (404)", d.Id()) - d.SetId("") - return nil - } - - return err - } - - defaultAction := flattenDefaultAction(resp.WebACL.DefaultAction) - if defaultAction != nil { - if err := d.Set("default_action", defaultAction); err != nil { - return fmt.Errorf("error setting default_action: %s", err) - } - } - d.Set("name", resp.WebACL.Name) - d.Set("metric_name", resp.WebACL.MetricName) - - return nil -} - -func resourceAwsWafWebAclUpdate(d *schema.ResourceData, meta interface{}) error { - err := updateWebAclResource(d, meta, waf.ChangeActionInsert) - if err != nil { - return fmt.Errorf("Error Updating WAF ACL: %s", err) - } - return resourceAwsWafWebAclRead(d, meta) -} - -func resourceAwsWafWebAclDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - err := updateWebAclResource(d, meta, waf.ChangeActionDelete) - if err != nil { - return fmt.Errorf("Error Removing WAF ACL Rules: %s", err) - } - - wr := newWafRetryer(conn, "global") - _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.DeleteWebACLInput{ - ChangeToken: token, - WebACLId: aws.String(d.Id()), - } - - log.Printf("[INFO] Deleting WAF ACL") - return conn.DeleteWebACL(req) - }) - if err != nil { - return fmt.Errorf("Error Deleting WAF ACL: %s", err) - } - return nil -} - -func updateWebAclResource(d *schema.ResourceData, meta interface{}, ChangeAction string) error { - conn := meta.(*AWSClient).wafconn - - wr := newWafRetryer(conn, "global") - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.UpdateWebACLInput{ - ChangeToken: token, - WebACLId: aws.String(d.Id()), - } - - if d.HasChange("default_action") { - req.DefaultAction = expandDefaultAction(d) - } - - rules := d.Get("rules").(*schema.Set) - for _, rule := range rules.List() { - aclRule := rule.(map[string]interface{}) - action := aclRule["action"].(*schema.Set).List()[0].(map[string]interface{}) - aclRuleUpdate := &waf.WebACLUpdate{ - Action: aws.String(ChangeAction), - ActivatedRule: &waf.ActivatedRule{ - Priority: aws.Int64(int64(aclRule["priority"].(int))), - RuleId: aws.String(aclRule["rule_id"].(string)), - Action: &waf.WafAction{Type: aws.String(action["type"].(string))}, - }, - } - req.Updates = append(req.Updates, aclRuleUpdate) - } - return conn.UpdateWebACL(req) - }) - if err != nil { - return fmt.Errorf("Error Updating WAF ACL: %s", err) - } - return nil -} - -func expandDefaultAction(d *schema.ResourceData) *waf.WafAction { - set, ok := d.GetOk("default_action") - if !ok { - return nil - } - - s := set.(*schema.Set).List() - if s == nil || len(s) == 0 { - return nil - } - - if s[0] == nil { - log.Printf("[ERR] First element of Default Action is set to nil") - return nil - } - - dA := s[0].(map[string]interface{}) - - return &waf.WafAction{ - Type: aws.String(dA["type"].(string)), - } -} - -func flattenDefaultAction(n *waf.WafAction) []map[string]interface{} { - if n == nil { - return nil - } - - m := setMap(make(map[string]interface{})) - - m.SetString("type", n.Type) - return m.MapList() -} diff --git a/builtin/providers/aws/resource_aws_waf_web_acl_test.go b/builtin/providers/aws/resource_aws_waf_web_acl_test.go deleted file mode 100644 index 6591fed0e..000000000 --- a/builtin/providers/aws/resource_aws_waf_web_acl_test.go +++ /dev/null @@ -1,367 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/terraform/helper/acctest" -) - -func TestAccAWSWafWebAcl_basic(t *testing.T) { - var v waf.WebACL - wafAclName := fmt.Sprintf("wafacl%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafWebAclDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSWafWebAclConfig(wafAclName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafWebAclExists("aws_waf_web_acl.waf_acl", &v), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "default_action.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "default_action.4234791575.type", "ALLOW"), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "name", wafAclName), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "rules.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "metric_name", wafAclName), - ), - }, - }, - }) -} - -func TestAccAWSWafWebAcl_changeNameForceNew(t *testing.T) { - var before, after waf.WebACL - wafAclName := fmt.Sprintf("wafacl%s", acctest.RandString(5)) - wafAclNewName := fmt.Sprintf("wafacl%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafWebAclDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafWebAclConfig(wafAclName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafWebAclExists("aws_waf_web_acl.waf_acl", &before), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "default_action.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "default_action.4234791575.type", "ALLOW"), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "name", wafAclName), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "rules.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "metric_name", wafAclName), - ), - }, - { - Config: testAccAWSWafWebAclConfigChangeName(wafAclNewName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafWebAclExists("aws_waf_web_acl.waf_acl", &after), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "default_action.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "default_action.4234791575.type", "ALLOW"), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "name", wafAclNewName), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "rules.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "metric_name", wafAclNewName), - ), - }, - }, - }) -} - -func TestAccAWSWafWebAcl_changeDefaultAction(t *testing.T) { - var before, after waf.WebACL - wafAclName := fmt.Sprintf("wafacl%s", acctest.RandString(5)) - wafAclNewName := fmt.Sprintf("wafacl%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafWebAclDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafWebAclConfig(wafAclName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafWebAclExists("aws_waf_web_acl.waf_acl", &before), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "default_action.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "default_action.4234791575.type", "ALLOW"), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "name", wafAclName), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "rules.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "metric_name", wafAclName), - ), - }, - { - Config: testAccAWSWafWebAclConfigDefaultAction(wafAclNewName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafWebAclExists("aws_waf_web_acl.waf_acl", &after), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "default_action.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "default_action.2267395054.type", "BLOCK"), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "name", wafAclNewName), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "rules.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_web_acl.waf_acl", "metric_name", wafAclNewName), - ), - }, - }, - }) -} - -func TestAccAWSWafWebAcl_disappears(t *testing.T) { - var v waf.WebACL - wafAclName := fmt.Sprintf("wafacl%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafWebAclDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafWebAclConfig(wafAclName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafWebAclExists("aws_waf_web_acl.waf_acl", &v), - testAccCheckAWSWafWebAclDisappears(&v), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testAccCheckAWSWafWebAclDisappears(v *waf.WebACL) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).wafconn - - wr := newWafRetryer(conn, "global") - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.UpdateWebACLInput{ - ChangeToken: token, - WebACLId: v.WebACLId, - } - - for _, ActivatedRule := range v.Rules { - WebACLUpdate := &waf.WebACLUpdate{ - Action: aws.String("DELETE"), - ActivatedRule: &waf.ActivatedRule{ - Priority: ActivatedRule.Priority, - RuleId: ActivatedRule.RuleId, - Action: ActivatedRule.Action, - }, - } - req.Updates = append(req.Updates, WebACLUpdate) - } - - return conn.UpdateWebACL(req) - }) - if err != nil { - return fmt.Errorf("Error Updating WAF ACL: %s", err) - } - - _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { - opts := &waf.DeleteWebACLInput{ - ChangeToken: token, - WebACLId: v.WebACLId, - } - return conn.DeleteWebACL(opts) - }) - if err != nil { - return fmt.Errorf("Error Deleting WAF ACL: %s", err) - } - return nil - } -} - -func testAccCheckAWSWafWebAclDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_waf_web_acl" { - continue - } - - conn := testAccProvider.Meta().(*AWSClient).wafconn - resp, err := conn.GetWebACL( - &waf.GetWebACLInput{ - WebACLId: aws.String(rs.Primary.ID), - }) - - if err == nil { - if *resp.WebACL.WebACLId == rs.Primary.ID { - return fmt.Errorf("WebACL %s still exists", rs.Primary.ID) - } - } - - // Return nil if the WebACL is already destroyed - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "WAFNonexistentItemException" { - return nil - } - } - - return err - } - - return nil -} - -func testAccCheckAWSWafWebAclExists(n string, v *waf.WebACL) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No WebACL ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).wafconn - resp, err := conn.GetWebACL(&waf.GetWebACLInput{ - WebACLId: aws.String(rs.Primary.ID), - }) - - if err != nil { - return err - } - - if *resp.WebACL.WebACLId == rs.Primary.ID { - *v = *resp.WebACL - return nil - } - - return fmt.Errorf("WebACL (%s) not found", rs.Primary.ID) - } -} - -func testAccAWSWafWebAclConfig(name string) string { - return fmt.Sprintf(`resource "aws_waf_ipset" "ipset" { - name = "%s" - ip_set_descriptors { - type = "IPV4" - value = "192.0.7.0/24" - } -} - -resource "aws_waf_rule" "wafrule" { - depends_on = ["aws_waf_ipset.ipset"] - name = "%s" - metric_name = "%s" - predicates { - data_id = "${aws_waf_ipset.ipset.id}" - negated = false - type = "IPMatch" - } -} -resource "aws_waf_web_acl" "waf_acl" { - depends_on = ["aws_waf_ipset.ipset", "aws_waf_rule.wafrule"] - name = "%s" - metric_name = "%s" - default_action { - type = "ALLOW" - } - rules { - action { - type = "BLOCK" - } - priority = 1 - rule_id = "${aws_waf_rule.wafrule.id}" - } -}`, name, name, name, name, name) -} - -func testAccAWSWafWebAclConfigChangeName(name string) string { - return fmt.Sprintf(`resource "aws_waf_ipset" "ipset" { - name = "%s" - ip_set_descriptors { - type = "IPV4" - value = "192.0.7.0/24" - } -} - -resource "aws_waf_rule" "wafrule" { - depends_on = ["aws_waf_ipset.ipset"] - name = "%s" - metric_name = "%s" - predicates { - data_id = "${aws_waf_ipset.ipset.id}" - negated = false - type = "IPMatch" - } -} -resource "aws_waf_web_acl" "waf_acl" { - depends_on = ["aws_waf_ipset.ipset", "aws_waf_rule.wafrule"] - name = "%s" - metric_name = "%s" - default_action { - type = "ALLOW" - } - rules { - action { - type = "BLOCK" - } - priority = 1 - rule_id = "${aws_waf_rule.wafrule.id}" - } -}`, name, name, name, name, name) -} - -func testAccAWSWafWebAclConfigDefaultAction(name string) string { - return fmt.Sprintf(`resource "aws_waf_ipset" "ipset" { - name = "%s" - ip_set_descriptors { - type = "IPV4" - value = "192.0.7.0/24" - } -} - -resource "aws_waf_rule" "wafrule" { - depends_on = ["aws_waf_ipset.ipset"] - name = "%s" - metric_name = "%s" - predicates { - data_id = "${aws_waf_ipset.ipset.id}" - negated = false - type = "IPMatch" - } -} -resource "aws_waf_web_acl" "waf_acl" { - depends_on = ["aws_waf_ipset.ipset", "aws_waf_rule.wafrule"] - name = "%s" - metric_name = "%s" - default_action { - type = "BLOCK" - } - rules { - action { - type = "BLOCK" - } - priority = 1 - rule_id = "${aws_waf_rule.wafrule.id}" - } -}`, name, name, name, name, name) -} diff --git a/builtin/providers/aws/resource_aws_waf_xss_match_set.go b/builtin/providers/aws/resource_aws_waf_xss_match_set.go deleted file mode 100644 index c6ea0d630..000000000 --- a/builtin/providers/aws/resource_aws_waf_xss_match_set.go +++ /dev/null @@ -1,214 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsWafXssMatchSet() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsWafXssMatchSetCreate, - Read: resourceAwsWafXssMatchSetRead, - Update: resourceAwsWafXssMatchSetUpdate, - Delete: resourceAwsWafXssMatchSetDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "xss_match_tuples": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "field_to_match": { - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "data": { - Type: schema.TypeString, - Optional: true, - }, - "type": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "text_transformation": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - } -} - -func resourceAwsWafXssMatchSetCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - - log.Printf("[INFO] Creating XssMatchSet: %s", d.Get("name").(string)) - - wr := newWafRetryer(conn, "global") - out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - params := &waf.CreateXssMatchSetInput{ - ChangeToken: token, - Name: aws.String(d.Get("name").(string)), - } - - return conn.CreateXssMatchSet(params) - }) - if err != nil { - return errwrap.Wrapf("[ERROR] Error creating XssMatchSet: {{err}}", err) - } - resp := out.(*waf.CreateXssMatchSetOutput) - - d.SetId(*resp.XssMatchSet.XssMatchSetId) - - return resourceAwsWafXssMatchSetUpdate(d, meta) -} - -func resourceAwsWafXssMatchSetRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - log.Printf("[INFO] Reading XssMatchSet: %s", d.Get("name").(string)) - params := &waf.GetXssMatchSetInput{ - XssMatchSetId: aws.String(d.Id()), - } - - resp, err := conn.GetXssMatchSet(params) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "WAFNonexistentItemException" { - log.Printf("[WARN] WAF IPSet (%s) not found, error code (404)", d.Id()) - d.SetId("") - return nil - } - - return err - } - - d.Set("name", resp.XssMatchSet.Name) - d.Set("xss_match_tuples", flattenWafXssMatchTuples(resp.XssMatchSet.XssMatchTuples)) - - return nil -} - -func resourceAwsWafXssMatchSetUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - - if d.HasChange("xss_match_tuples") { - o, n := d.GetChange("xss_match_tuples") - oldT, newT := o.(*schema.Set).List(), n.(*schema.Set).List() - - err := updateXssMatchSetResource(d.Id(), oldT, newT, conn) - if err != nil { - return errwrap.Wrapf("[ERROR] Error updating XssMatchSet: {{err}}", err) - } - } - - return resourceAwsWafXssMatchSetRead(d, meta) -} - -func resourceAwsWafXssMatchSetDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafconn - - oldTuples := d.Get("xss_match_tuples").(*schema.Set).List() - if len(oldTuples) > 0 { - noTuples := []interface{}{} - err := updateXssMatchSetResource(d.Id(), oldTuples, noTuples, conn) - if err != nil { - return fmt.Errorf("Error updating IPSetDescriptors: %s", err) - } - } - - wr := newWafRetryer(conn, "global") - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.DeleteXssMatchSetInput{ - ChangeToken: token, - XssMatchSetId: aws.String(d.Id()), - } - - return conn.DeleteXssMatchSet(req) - }) - if err != nil { - return errwrap.Wrapf("[ERROR] Error deleting XssMatchSet: {{err}}", err) - } - - return nil -} - -func updateXssMatchSetResource(id string, oldT, newT []interface{}, conn *waf.WAF) error { - wr := newWafRetryer(conn, "global") - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.UpdateXssMatchSetInput{ - ChangeToken: token, - XssMatchSetId: aws.String(id), - Updates: diffWafXssMatchSetTuples(oldT, newT), - } - - log.Printf("[INFO] Updating XssMatchSet tuples: %s", req) - return conn.UpdateXssMatchSet(req) - }) - if err != nil { - return errwrap.Wrapf("[ERROR] Error updating XssMatchSet: {{err}}", err) - } - - return nil -} - -func flattenWafXssMatchTuples(ts []*waf.XssMatchTuple) []interface{} { - out := make([]interface{}, len(ts), len(ts)) - for i, t := range ts { - m := make(map[string]interface{}) - m["field_to_match"] = flattenFieldToMatch(t.FieldToMatch) - m["text_transformation"] = *t.TextTransformation - out[i] = m - } - return out -} - -func diffWafXssMatchSetTuples(oldT, newT []interface{}) []*waf.XssMatchSetUpdate { - updates := make([]*waf.XssMatchSetUpdate, 0) - - for _, od := range oldT { - tuple := od.(map[string]interface{}) - - if idx, contains := sliceContainsMap(newT, tuple); contains { - newT = append(newT[:idx], newT[idx+1:]...) - continue - } - - updates = append(updates, &waf.XssMatchSetUpdate{ - Action: aws.String(waf.ChangeActionDelete), - XssMatchTuple: &waf.XssMatchTuple{ - FieldToMatch: expandFieldToMatch(tuple["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), - TextTransformation: aws.String(tuple["text_transformation"].(string)), - }, - }) - } - - for _, nd := range newT { - tuple := nd.(map[string]interface{}) - - updates = append(updates, &waf.XssMatchSetUpdate{ - Action: aws.String(waf.ChangeActionInsert), - XssMatchTuple: &waf.XssMatchTuple{ - FieldToMatch: expandFieldToMatch(tuple["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), - TextTransformation: aws.String(tuple["text_transformation"].(string)), - }, - }) - } - return updates -} diff --git a/builtin/providers/aws/resource_aws_waf_xss_match_set_test.go b/builtin/providers/aws/resource_aws_waf_xss_match_set_test.go deleted file mode 100644 index 175e61946..000000000 --- a/builtin/providers/aws/resource_aws_waf_xss_match_set_test.go +++ /dev/null @@ -1,366 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/acctest" -) - -func TestAccAWSWafXssMatchSet_basic(t *testing.T) { - var v waf.XssMatchSet - xssMatchSet := fmt.Sprintf("xssMatchSet-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafXssMatchSetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSWafXssMatchSetConfig(xssMatchSet), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafXssMatchSetExists("aws_waf_xss_match_set.xss_match_set", &v), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "name", xssMatchSet), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.#", "2"), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.2018581549.field_to_match.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.2018581549.field_to_match.2316364334.data", ""), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.2018581549.field_to_match.2316364334.type", "QUERY_STRING"), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.2018581549.text_transformation", "NONE"), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.2786024938.field_to_match.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.2786024938.field_to_match.3756326843.data", ""), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.2786024938.field_to_match.3756326843.type", "URI"), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.2786024938.text_transformation", "NONE"), - ), - }, - }, - }) -} - -func TestAccAWSWafXssMatchSet_changeNameForceNew(t *testing.T) { - var before, after waf.XssMatchSet - xssMatchSet := fmt.Sprintf("xssMatchSet-%s", acctest.RandString(5)) - xssMatchSetNewName := fmt.Sprintf("xssMatchSetNewName-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafXssMatchSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafXssMatchSetConfig(xssMatchSet), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafXssMatchSetExists("aws_waf_xss_match_set.xss_match_set", &before), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "name", xssMatchSet), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.#", "2"), - ), - }, - { - Config: testAccAWSWafXssMatchSetConfigChangeName(xssMatchSetNewName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafXssMatchSetExists("aws_waf_xss_match_set.xss_match_set", &after), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "name", xssMatchSetNewName), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.#", "2"), - ), - }, - }, - }) -} - -func TestAccAWSWafXssMatchSet_disappears(t *testing.T) { - var v waf.XssMatchSet - xssMatchSet := fmt.Sprintf("xssMatchSet-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafXssMatchSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafXssMatchSetConfig(xssMatchSet), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafXssMatchSetExists("aws_waf_xss_match_set.xss_match_set", &v), - testAccCheckAWSWafXssMatchSetDisappears(&v), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAWSWafXssMatchSet_changeTuples(t *testing.T) { - var before, after waf.XssMatchSet - setName := fmt.Sprintf("xssMatchSet-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafXssMatchSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafXssMatchSetConfig(setName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafXssMatchSetExists("aws_waf_xss_match_set.xss_match_set", &before), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "name", setName), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.#", "2"), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.2018581549.field_to_match.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.2018581549.field_to_match.2316364334.data", ""), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.2018581549.field_to_match.2316364334.type", "QUERY_STRING"), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.2018581549.text_transformation", "NONE"), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.2786024938.field_to_match.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.2786024938.field_to_match.3756326843.data", ""), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.2786024938.field_to_match.3756326843.type", "URI"), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.2786024938.text_transformation", "NONE"), - ), - }, - { - Config: testAccAWSWafXssMatchSetConfig_changeTuples(setName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafXssMatchSetExists("aws_waf_xss_match_set.xss_match_set", &after), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "name", setName), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.#", "2"), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.2893682529.field_to_match.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.2893682529.field_to_match.4253810390.data", "GET"), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.2893682529.field_to_match.4253810390.type", "METHOD"), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.2893682529.text_transformation", "HTML_ENTITY_DECODE"), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.4270311415.field_to_match.#", "1"), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.4270311415.field_to_match.281401076.data", ""), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.4270311415.field_to_match.281401076.type", "BODY"), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.4270311415.text_transformation", "CMD_LINE"), - ), - }, - }, - }) -} - -func TestAccAWSWafXssMatchSet_noTuples(t *testing.T) { - var ipset waf.XssMatchSet - setName := fmt.Sprintf("xssMatchSet-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafXssMatchSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafXssMatchSetConfig_noTuples(setName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafXssMatchSetExists("aws_waf_xss_match_set.xss_match_set", &ipset), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "name", setName), - resource.TestCheckResourceAttr( - "aws_waf_xss_match_set.xss_match_set", "xss_match_tuples.#", "0"), - ), - }, - }, - }) -} - -func testAccCheckAWSWafXssMatchSetDisappears(v *waf.XssMatchSet) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).wafconn - - wr := newWafRetryer(conn, "global") - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.UpdateXssMatchSetInput{ - ChangeToken: token, - XssMatchSetId: v.XssMatchSetId, - } - - for _, xssMatchTuple := range v.XssMatchTuples { - xssMatchTupleUpdate := &waf.XssMatchSetUpdate{ - Action: aws.String("DELETE"), - XssMatchTuple: &waf.XssMatchTuple{ - FieldToMatch: xssMatchTuple.FieldToMatch, - TextTransformation: xssMatchTuple.TextTransformation, - }, - } - req.Updates = append(req.Updates, xssMatchTupleUpdate) - } - return conn.UpdateXssMatchSet(req) - }) - if err != nil { - return errwrap.Wrapf("[ERROR] Error updating XssMatchSet: {{err}}", err) - } - - _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { - opts := &waf.DeleteXssMatchSetInput{ - ChangeToken: token, - XssMatchSetId: v.XssMatchSetId, - } - return conn.DeleteXssMatchSet(opts) - }) - if err != nil { - return errwrap.Wrapf("[ERROR] Error deleting XssMatchSet: {{err}}", err) - } - return nil - } -} - -func testAccCheckAWSWafXssMatchSetExists(n string, v *waf.XssMatchSet) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No WAF XssMatchSet ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).wafconn - resp, err := conn.GetXssMatchSet(&waf.GetXssMatchSetInput{ - XssMatchSetId: aws.String(rs.Primary.ID), - }) - - if err != nil { - return err - } - - if *resp.XssMatchSet.XssMatchSetId == rs.Primary.ID { - *v = *resp.XssMatchSet - return nil - } - - return fmt.Errorf("WAF XssMatchSet (%s) not found", rs.Primary.ID) - } -} - -func testAccCheckAWSWafXssMatchSetDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_waf_byte_match_set" { - continue - } - - conn := testAccProvider.Meta().(*AWSClient).wafconn - resp, err := conn.GetXssMatchSet( - &waf.GetXssMatchSetInput{ - XssMatchSetId: aws.String(rs.Primary.ID), - }) - - if err == nil { - if *resp.XssMatchSet.XssMatchSetId == rs.Primary.ID { - return fmt.Errorf("WAF XssMatchSet %s still exists", rs.Primary.ID) - } - } - - // Return nil if the XssMatchSet is already destroyed - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "WAFNonexistentItemException" { - return nil - } - } - - return err - } - - return nil -} - -func testAccAWSWafXssMatchSetConfig(name string) string { - return fmt.Sprintf(` -resource "aws_waf_xss_match_set" "xss_match_set" { - name = "%s" - xss_match_tuples { - text_transformation = "NONE" - field_to_match { - type = "URI" - } - } - - xss_match_tuples { - text_transformation = "NONE" - field_to_match { - type = "QUERY_STRING" - } - } -}`, name) -} - -func testAccAWSWafXssMatchSetConfigChangeName(name string) string { - return fmt.Sprintf(` -resource "aws_waf_xss_match_set" "xss_match_set" { - name = "%s" - xss_match_tuples { - text_transformation = "NONE" - field_to_match { - type = "URI" - } - } - - xss_match_tuples { - text_transformation = "NONE" - field_to_match { - type = "QUERY_STRING" - } - } -}`, name) -} - -func testAccAWSWafXssMatchSetConfig_changeTuples(name string) string { - return fmt.Sprintf(` -resource "aws_waf_xss_match_set" "xss_match_set" { - name = "%s" - xss_match_tuples { - text_transformation = "CMD_LINE" - field_to_match { - type = "BODY" - } - } - - xss_match_tuples { - text_transformation = "HTML_ENTITY_DECODE" - field_to_match { - type = "METHOD" - data = "GET" - } - } -}`, name) -} - -func testAccAWSWafXssMatchSetConfig_noTuples(name string) string { - return fmt.Sprintf(` -resource "aws_waf_xss_match_set" "xss_match_set" { - name = "%s" -}`, name) -} diff --git a/builtin/providers/aws/resource_aws_wafregional_byte_match_set.go b/builtin/providers/aws/resource_aws_wafregional_byte_match_set.go deleted file mode 100644 index d7f916ad3..000000000 --- a/builtin/providers/aws/resource_aws_wafregional_byte_match_set.go +++ /dev/null @@ -1,266 +0,0 @@ -package aws - -import ( - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/waf" - "github.com/aws/aws-sdk-go/service/wafregional" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsWafRegionalByteMatchSet() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsWafRegionalByteMatchSetCreate, - Read: resourceAwsWafRegionalByteMatchSetRead, - Update: resourceAwsWafRegionalByteMatchSetUpdate, - Delete: resourceAwsWafRegionalByteMatchSetDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "byte_match_tuple": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "field_to_match": { - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "data": { - Type: schema.TypeString, - Optional: true, - }, - "type": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "positional_constraint": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "target_string": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "text_transformation": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - } -} - -func resourceAwsWafRegionalByteMatchSetCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafregionalconn - region := meta.(*AWSClient).region - - log.Printf("[INFO] Creating ByteMatchSet: %s", d.Get("name").(string)) - - wr := newWafRegionalRetryer(conn, region) - out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - params := &waf.CreateByteMatchSetInput{ - ChangeToken: token, - Name: aws.String(d.Get("name").(string)), - } - return conn.CreateByteMatchSet(params) - }) - - if err != nil { - return errwrap.Wrapf("[ERROR] Error creating ByteMatchSet: {{err}}", err) - } - resp := out.(*waf.CreateByteMatchSetOutput) - - d.SetId(*resp.ByteMatchSet.ByteMatchSetId) - - return resourceAwsWafRegionalByteMatchSetUpdate(d, meta) -} - -func resourceAwsWafRegionalByteMatchSetRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafregionalconn - - log.Printf("[INFO] Reading ByteMatchSet: %s", d.Get("name").(string)) - - params := &waf.GetByteMatchSetInput{ - ByteMatchSetId: aws.String(d.Id()), - } - - resp, err := conn.GetByteMatchSet(params) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "WAFNonexistentItemException" { - log.Printf("[WARN] WAF IPSet (%s) not found, error code (404)", d.Id()) - d.SetId("") - return nil - } - - return err - } - - d.Set("byte_match_tuple", flattenWafByteMatchTuplesWR(resp.ByteMatchSet.ByteMatchTuples)) - d.Set("name", resp.ByteMatchSet.Name) - - return nil -} - -func flattenWafByteMatchTuplesWR(in []*waf.ByteMatchTuple) []interface{} { - tuples := make([]interface{}, len(in), len(in)) - - for i, tuple := range in { - field_to_match := tuple.FieldToMatch - m := map[string]interface{}{ - "type": *field_to_match.Type, - } - - if field_to_match.Data == nil { - m["data"] = "" - } else { - m["data"] = *field_to_match.Data - } - - var ms []map[string]interface{} - ms = append(ms, m) - - tuple := map[string]interface{}{ - "field_to_match": ms, - "positional_constraint": *tuple.PositionalConstraint, - "target_string": tuple.TargetString, - "text_transformation": *tuple.TextTransformation, - } - tuples[i] = tuple - } - - return tuples -} - -func resourceAwsWafRegionalByteMatchSetUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafregionalconn - region := meta.(*AWSClient).region - log.Printf("[INFO] Updating ByteMatchSet: %s", d.Get("name").(string)) - - if d.HasChange("byte_match_tuple") { - o, n := d.GetChange("byte_match_tuple") - oldT, newT := o.(*schema.Set).List(), n.(*schema.Set).List() - - err := updateByteMatchSetResourceWR(d, oldT, newT, conn, region) - if err != nil { - return errwrap.Wrapf("[ERROR] Error updating ByteMatchSet: {{err}}", err) - } - } - return resourceAwsWafRegionalByteMatchSetRead(d, meta) -} - -func resourceAwsWafRegionalByteMatchSetDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafregionalconn - region := meta.(*AWSClient).region - - log.Printf("[INFO] Deleting ByteMatchSet: %s", d.Get("name").(string)) - - oldT := d.Get("byte_match_tuple").(*schema.Set).List() - - if len(oldT) > 0 { - var newT []interface{} - - err := updateByteMatchSetResourceWR(d, oldT, newT, conn, region) - if err != nil { - return errwrap.Wrapf("[ERROR] Error deleting ByteMatchSet: {{err}}", err) - } - } - - wr := newWafRegionalRetryer(conn, region) - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.DeleteByteMatchSetInput{ - ChangeToken: token, - ByteMatchSetId: aws.String(d.Id()), - } - return conn.DeleteByteMatchSet(req) - }) - if err != nil { - return errwrap.Wrapf("[ERROR] Error deleting ByteMatchSet: {{err}}", err) - } - - return nil -} - -func updateByteMatchSetResourceWR(d *schema.ResourceData, oldT, newT []interface{}, conn *wafregional.WAFRegional, region string) error { - wr := newWafRegionalRetryer(conn, region) - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.UpdateByteMatchSetInput{ - ChangeToken: token, - ByteMatchSetId: aws.String(d.Id()), - Updates: diffByteMatchSetTuple(oldT, newT), - } - - return conn.UpdateByteMatchSet(req) - }) - if err != nil { - return errwrap.Wrapf("[ERROR] Error updating ByteMatchSet: {{err}}", err) - } - - return nil -} - -func expandFieldToMatchWR(d map[string]interface{}) *waf.FieldToMatch { - return &waf.FieldToMatch{ - Type: aws.String(d["type"].(string)), - Data: aws.String(d["data"].(string)), - } -} - -func flattenFieldToMatchWR(fm *waf.FieldToMatch) map[string]interface{} { - m := make(map[string]interface{}) - m["data"] = *fm.Data - m["type"] = *fm.Type - return m -} - -func diffByteMatchSetTuple(oldT, newT []interface{}) []*waf.ByteMatchSetUpdate { - updates := make([]*waf.ByteMatchSetUpdate, 0) - - for _, ot := range oldT { - tuple := ot.(map[string]interface{}) - - if idx, contains := sliceContainsMap(newT, tuple); contains { - newT = append(newT[:idx], newT[idx+1:]...) - continue - } - - updates = append(updates, &waf.ByteMatchSetUpdate{ - Action: aws.String(waf.ChangeActionDelete), - ByteMatchTuple: &waf.ByteMatchTuple{ - FieldToMatch: expandFieldToMatch(tuple["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), - PositionalConstraint: aws.String(tuple["positional_constraint"].(string)), - TargetString: []byte(tuple["target_string"].(string)), - TextTransformation: aws.String(tuple["text_transformation"].(string)), - }, - }) - } - - for _, nt := range newT { - tuple := nt.(map[string]interface{}) - - updates = append(updates, &waf.ByteMatchSetUpdate{ - Action: aws.String(waf.ChangeActionInsert), - ByteMatchTuple: &waf.ByteMatchTuple{ - FieldToMatch: expandFieldToMatch(tuple["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), - PositionalConstraint: aws.String(tuple["positional_constraint"].(string)), - TargetString: []byte(tuple["target_string"].(string)), - TextTransformation: aws.String(tuple["text_transformation"].(string)), - }, - }) - } - return updates -} diff --git a/builtin/providers/aws/resource_aws_wafregional_byte_match_set_test.go b/builtin/providers/aws/resource_aws_wafregional_byte_match_set_test.go deleted file mode 100644 index 2600349ae..000000000 --- a/builtin/providers/aws/resource_aws_wafregional_byte_match_set_test.go +++ /dev/null @@ -1,437 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/acctest" -) - -func TestAccAWSWafRegionalByteMatchSet_basic(t *testing.T) { - var v waf.ByteMatchSet - byteMatchSet := fmt.Sprintf("byteMatchSet-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafRegionalByteMatchSetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSWafRegionalByteMatchSetConfig(byteMatchSet), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafRegionalByteMatchSetExists("aws_wafregional_byte_match_set.byte_set", &v), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "name", byteMatchSet), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.#", "2"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2174619346.field_to_match.2991901334.data", "referer"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2174619346.field_to_match.2991901334.type", "HEADER"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2174619346.positional_constraint", "CONTAINS"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2174619346.target_string", "badrefer1"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2174619346.text_transformation", "NONE"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.839525137.field_to_match.2991901334.data", "referer"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.839525137.field_to_match.2991901334.type", "HEADER"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.839525137.positional_constraint", "CONTAINS"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.839525137.target_string", "badrefer2"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.839525137.text_transformation", "NONE"), - ), - }, - }, - }) -} - -func TestAccAWSWafRegionalByteMatchSet_changeNameForceNew(t *testing.T) { - var before, after waf.ByteMatchSet - byteMatchSet := fmt.Sprintf("byteMatchSet-%s", acctest.RandString(5)) - byteMatchSetNewName := fmt.Sprintf("byteMatchSet-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafRegionalByteMatchSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafRegionalByteMatchSetConfig(byteMatchSet), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafRegionalByteMatchSetExists("aws_wafregional_byte_match_set.byte_set", &before), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "name", byteMatchSet), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.#", "2"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2174619346.field_to_match.2991901334.data", "referer"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2174619346.field_to_match.2991901334.type", "HEADER"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2174619346.positional_constraint", "CONTAINS"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2174619346.target_string", "badrefer1"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2174619346.text_transformation", "NONE"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.839525137.field_to_match.2991901334.data", "referer"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.839525137.field_to_match.2991901334.type", "HEADER"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.839525137.positional_constraint", "CONTAINS"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.839525137.target_string", "badrefer2"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.839525137.text_transformation", "NONE"), - ), - }, - { - Config: testAccAWSWafRegionalByteMatchSetConfigChangeName(byteMatchSetNewName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafRegionalByteMatchSetExists("aws_wafregional_byte_match_set.byte_set", &after), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "name", byteMatchSetNewName), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.#", "2"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2174619346.field_to_match.2991901334.data", "referer"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2174619346.field_to_match.2991901334.type", "HEADER"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2174619346.positional_constraint", "CONTAINS"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2174619346.target_string", "badrefer1"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2174619346.text_transformation", "NONE"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.839525137.field_to_match.2991901334.data", "referer"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.839525137.field_to_match.2991901334.type", "HEADER"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.839525137.positional_constraint", "CONTAINS"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.839525137.target_string", "badrefer2"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.839525137.text_transformation", "NONE"), - ), - }, - }, - }) -} - -func TestAccAWSWafRegionalByteMatchSet_changeByteMatchTuple(t *testing.T) { - var before, after waf.ByteMatchSet - byteMatchSetName := fmt.Sprintf("byte-batch-set-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafRegionalByteMatchSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafRegionalByteMatchSetConfig(byteMatchSetName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafRegionalByteMatchSetExists("aws_wafregional_byte_match_set.byte_set", &before), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "name", byteMatchSetName), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.#", "2"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2174619346.field_to_match.2991901334.data", "referer"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2174619346.field_to_match.2991901334.type", "HEADER"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2174619346.positional_constraint", "CONTAINS"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2174619346.target_string", "badrefer1"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2174619346.text_transformation", "NONE"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.839525137.field_to_match.2991901334.data", "referer"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.839525137.field_to_match.2991901334.type", "HEADER"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.839525137.positional_constraint", "CONTAINS"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.839525137.target_string", "badrefer2"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.839525137.text_transformation", "NONE"), - ), - }, - { - Config: testAccAWSWafRegionalByteMatchSetConfigChangeByteMatchTuple(byteMatchSetName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafRegionalByteMatchSetExists("aws_wafregional_byte_match_set.byte_set", &after), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "name", byteMatchSetName), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.#", "2"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2397850647.field_to_match.4253810390.data", "GET"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2397850647.field_to_match.4253810390.type", "METHOD"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2397850647.positional_constraint", "STARTS_WITH"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2397850647.target_string", "badrefer1+"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.2397850647.text_transformation", "LOWERCASE"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.4153613423.field_to_match.3756326843.data", ""), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.4153613423.field_to_match.3756326843.type", "URI"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.4153613423.positional_constraint", "ENDS_WITH"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.4153613423.target_string", "badrefer2+"), - resource.TestCheckResourceAttr( - "aws_wafregional_byte_match_set.byte_set", "byte_match_tuple.4153613423.text_transformation", "LOWERCASE"), - ), - }, - }, - }) -} - -func TestAccAWSWafRegionalByteMatchSet_noByteMatchTuples(t *testing.T) { - var byteMatchSet waf.ByteMatchSet - byteMatchSetName := fmt.Sprintf("byte-batch-set-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafRegionalByteMatchSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafRegionalByteMatchSetConfig_noDescriptors(byteMatchSetName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafRegionalByteMatchSetExists("aws_wafregional_byte_match_set.byte_match_set", &byteMatchSet), - resource.TestCheckResourceAttr("aws_wafregional_byte_match_set.byte_match_set", "name", byteMatchSetName), - resource.TestCheckResourceAttr("aws_wafregional_byte_match_set.byte_match_set", "byte_match_tuple.#", "0"), - ), - }, - }, - }) -} - -func TestAccAWSWafRegionalByteMatchSet_disappears(t *testing.T) { - var v waf.ByteMatchSet - byteMatchSet := fmt.Sprintf("byteMatchSet-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafRegionalByteMatchSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafRegionalByteMatchSetConfig(byteMatchSet), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafRegionalByteMatchSetExists("aws_wafregional_byte_match_set.byte_set", &v), - testAccCheckAWSWafRegionalByteMatchSetDisappears(&v), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testAccCheckAWSWafRegionalByteMatchSetDisappears(v *waf.ByteMatchSet) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).wafregionalconn - region := testAccProvider.Meta().(*AWSClient).region - - wr := newWafRegionalRetryer(conn, region) - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.UpdateByteMatchSetInput{ - ChangeToken: token, - ByteMatchSetId: v.ByteMatchSetId, - } - - for _, ByteMatchTuple := range v.ByteMatchTuples { - ByteMatchUpdate := &waf.ByteMatchSetUpdate{ - Action: aws.String("DELETE"), - ByteMatchTuple: &waf.ByteMatchTuple{ - FieldToMatch: ByteMatchTuple.FieldToMatch, - PositionalConstraint: ByteMatchTuple.PositionalConstraint, - TargetString: ByteMatchTuple.TargetString, - TextTransformation: ByteMatchTuple.TextTransformation, - }, - } - req.Updates = append(req.Updates, ByteMatchUpdate) - } - - return conn.UpdateByteMatchSet(req) - }) - if err != nil { - return errwrap.Wrapf("[ERROR] Error updating ByteMatchSet: {{err}}", err) - } - - _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { - opts := &waf.DeleteByteMatchSetInput{ - ChangeToken: token, - ByteMatchSetId: v.ByteMatchSetId, - } - return conn.DeleteByteMatchSet(opts) - }) - if err != nil { - return errwrap.Wrapf("[ERROR] Error deleting ByteMatchSet: {{err}}", err) - } - - return nil - } -} - -func testAccCheckAWSWafRegionalByteMatchSetExists(n string, v *waf.ByteMatchSet) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No WAF ByteMatchSet ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).wafregionalconn - resp, err := conn.GetByteMatchSet(&waf.GetByteMatchSetInput{ - ByteMatchSetId: aws.String(rs.Primary.ID), - }) - - if err != nil { - return err - } - - if *resp.ByteMatchSet.ByteMatchSetId == rs.Primary.ID { - *v = *resp.ByteMatchSet - return nil - } - - return fmt.Errorf("WAF ByteMatchSet (%s) not found", rs.Primary.ID) - } -} - -func testAccCheckAWSWafRegionalByteMatchSetDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_wafregional_byte_match_set" { - continue - } - - conn := testAccProvider.Meta().(*AWSClient).wafregionalconn - resp, err := conn.GetByteMatchSet( - &waf.GetByteMatchSetInput{ - ByteMatchSetId: aws.String(rs.Primary.ID), - }) - - if err == nil { - if *resp.ByteMatchSet.ByteMatchSetId == rs.Primary.ID { - return fmt.Errorf("WAF ByteMatchSet %s still exists", rs.Primary.ID) - } - } - - // Return nil if the ByteMatchSet is already destroyed - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "WAFNonexistentItemException" { - return nil - } - } - - return err - } - - return nil -} - -func testAccAWSWafRegionalByteMatchSetConfig(name string) string { - return fmt.Sprintf(` -resource "aws_wafregional_byte_match_set" "byte_set" { - name = "%s" - byte_match_tuple { - text_transformation = "NONE" - target_string = "badrefer1" - positional_constraint = "CONTAINS" - field_to_match { - type = "HEADER" - data = "referer" - } - } - - byte_match_tuple { - text_transformation = "NONE" - target_string = "badrefer2" - positional_constraint = "CONTAINS" - field_to_match { - type = "HEADER" - data = "referer" - } - } -}`, name) -} - -func testAccAWSWafRegionalByteMatchSetConfigChangeName(name string) string { - return fmt.Sprintf(` -resource "aws_wafregional_byte_match_set" "byte_set" { - name = "%s" - byte_match_tuple { - text_transformation = "NONE" - target_string = "badrefer1" - positional_constraint = "CONTAINS" - field_to_match { - type = "HEADER" - data = "referer" - } - } - - byte_match_tuple { - text_transformation = "NONE" - target_string = "badrefer2" - positional_constraint = "CONTAINS" - field_to_match { - type = "HEADER" - data = "referer" - } - } -}`, name) -} - -func testAccAWSWafRegionalByteMatchSetConfig_noDescriptors(name string) string { - return fmt.Sprintf(` -resource "aws_wafregional_byte_match_set" "byte_match_set" { - name = "%s" -}`, name) -} - -func testAccAWSWafRegionalByteMatchSetConfigChangeByteMatchTuple(name string) string { - return fmt.Sprintf(` -resource "aws_wafregional_byte_match_set" "byte_set" { - name = "%s" - byte_match_tuple { - text_transformation = "LOWERCASE" - target_string = "badrefer1+" - positional_constraint = "STARTS_WITH" - field_to_match { - type = "METHOD" - data = "GET" - } - } - - byte_match_tuple { - text_transformation = "LOWERCASE" - target_string = "badrefer2+" - positional_constraint = "ENDS_WITH" - field_to_match { - type = "URI" - } - } -}`, name) -} diff --git a/builtin/providers/aws/resource_aws_wafregional_ipset.go b/builtin/providers/aws/resource_aws_wafregional_ipset.go deleted file mode 100644 index 0507621ee..000000000 --- a/builtin/providers/aws/resource_aws_wafregional_ipset.go +++ /dev/null @@ -1,170 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/waf" - "github.com/aws/aws-sdk-go/service/wafregional" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsWafRegionalIPSet() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsWafRegionalIPSetCreate, - Read: resourceAwsWafRegionalIPSetRead, - Update: resourceAwsWafRegionalIPSetUpdate, - Delete: resourceAwsWafRegionalIPSetDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "ip_set_descriptor": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "value": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - } -} - -func resourceAwsWafRegionalIPSetCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafregionalconn - region := meta.(*AWSClient).region - - wr := newWafRegionalRetryer(conn, region) - out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - params := &waf.CreateIPSetInput{ - ChangeToken: token, - Name: aws.String(d.Get("name").(string)), - } - return conn.CreateIPSet(params) - }) - if err != nil { - return err - } - resp := out.(*waf.CreateIPSetOutput) - d.SetId(*resp.IPSet.IPSetId) - return resourceAwsWafRegionalIPSetUpdate(d, meta) -} - -func resourceAwsWafRegionalIPSetRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafregionalconn - - params := &waf.GetIPSetInput{ - IPSetId: aws.String(d.Id()), - } - - resp, err := conn.GetIPSet(params) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "WAFNonexistentItemException" { - log.Printf("[WARN] WAF IPSet (%s) not found, error code (404)", d.Id()) - d.SetId("") - return nil - } - - return err - } - - d.Set("ip_set_descriptor", flattenWafIpSetDescriptorWR(resp.IPSet.IPSetDescriptors)) - d.Set("name", resp.IPSet.Name) - - return nil -} - -func flattenWafIpSetDescriptorWR(in []*waf.IPSetDescriptor) []interface{} { - descriptors := make([]interface{}, len(in), len(in)) - - for i, descriptor := range in { - d := map[string]interface{}{ - "type": *descriptor.Type, - "value": *descriptor.Value, - } - descriptors[i] = d - } - - return descriptors -} - -func resourceAwsWafRegionalIPSetUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafregionalconn - region := meta.(*AWSClient).region - - if d.HasChange("ip_set_descriptor") { - o, n := d.GetChange("ip_set_descriptor") - oldD, newD := o.(*schema.Set).List(), n.(*schema.Set).List() - - err := updateIPSetResourceWR(d.Id(), oldD, newD, conn, region) - if err != nil { - return fmt.Errorf("Error Updating WAF IPSet: %s", err) - } - } - return resourceAwsWafRegionalIPSetRead(d, meta) -} - -func resourceAwsWafRegionalIPSetDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).wafregionalconn - region := meta.(*AWSClient).region - - oldD := d.Get("ip_set_descriptor").(*schema.Set).List() - - if len(oldD) > 0 { - noD := []interface{}{} - err := updateIPSetResourceWR(d.Id(), oldD, noD, conn, region) - - if err != nil { - return fmt.Errorf("Error Removing IPSetDescriptors: %s", err) - } - } - - wr := newWafRegionalRetryer(conn, region) - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.DeleteIPSetInput{ - ChangeToken: token, - IPSetId: aws.String(d.Id()), - } - log.Printf("[INFO] Deleting WAF IPSet") - return conn.DeleteIPSet(req) - }) - if err != nil { - return fmt.Errorf("Error Deleting WAF IPSet: %s", err) - } - - return nil -} - -func updateIPSetResourceWR(id string, oldD, newD []interface{}, conn *wafregional.WAFRegional, region string) error { - - wr := newWafRegionalRetryer(conn, region) - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.UpdateIPSetInput{ - ChangeToken: token, - IPSetId: aws.String(id), - Updates: diffWafIpSetDescriptors(oldD, newD), - } - log.Printf("[INFO] Updating IPSet descriptor: %s", req) - - return conn.UpdateIPSet(req) - }) - if err != nil { - return fmt.Errorf("Error Updating WAF IPSet: %s", err) - } - - return nil -} diff --git a/builtin/providers/aws/resource_aws_wafregional_ipset_test.go b/builtin/providers/aws/resource_aws_wafregional_ipset_test.go deleted file mode 100644 index 3aa251d43..000000000 --- a/builtin/providers/aws/resource_aws_wafregional_ipset_test.go +++ /dev/null @@ -1,402 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/waf" - "github.com/aws/aws-sdk-go/service/wafregional" - "github.com/hashicorp/terraform/helper/acctest" -) - -func TestAccAWSWafRegionalIPSet_basic(t *testing.T) { - var v waf.IPSet - ipsetName := fmt.Sprintf("ip-set-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafRegionalIPSetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSWafRegionalIPSetConfig(ipsetName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafRegionalIPSetExists("aws_wafregional_ipset.ipset", &v), - resource.TestCheckResourceAttr( - "aws_wafregional_ipset.ipset", "name", ipsetName), - resource.TestCheckResourceAttr( - "aws_wafregional_ipset.ipset", "ip_set_descriptor.4037960608.type", "IPV4"), - resource.TestCheckResourceAttr( - "aws_wafregional_ipset.ipset", "ip_set_descriptor.4037960608.value", "192.0.7.0/24"), - ), - }, - }, - }) -} - -func TestAccAWSWafRegionalIPSet_disappears(t *testing.T) { - var v waf.IPSet - ipsetName := fmt.Sprintf("ip-set-%s", acctest.RandString(5)) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafRegionalIPSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafRegionalIPSetConfig(ipsetName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSWafRegionalIPSetExists("aws_wafregional_ipset.ipset", &v), - testAccCheckAWSWafRegionalIPSetDisappears(&v), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAWSWafRegionalIPSet_changeNameForceNew(t *testing.T) { - var before, after waf.IPSet - ipsetName := fmt.Sprintf("ip-set-%s", acctest.RandString(5)) - ipsetNewName := fmt.Sprintf("ip-set-new-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafRegionalIPSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafRegionalIPSetConfig(ipsetName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafRegionalIPSetExists("aws_wafregional_ipset.ipset", &before), - resource.TestCheckResourceAttr( - "aws_wafregional_ipset.ipset", "name", ipsetName), - resource.TestCheckResourceAttr( - "aws_wafregional_ipset.ipset", "ip_set_descriptor.4037960608.type", "IPV4"), - resource.TestCheckResourceAttr( - "aws_wafregional_ipset.ipset", "ip_set_descriptor.4037960608.value", "192.0.7.0/24"), - ), - }, - { - Config: testAccAWSWafRegionalIPSetConfigChangeName(ipsetNewName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafRegionalIPSetExists("aws_wafregional_ipset.ipset", &after), - resource.TestCheckResourceAttr( - "aws_wafregional_ipset.ipset", "name", ipsetNewName), - resource.TestCheckResourceAttr( - "aws_wafregional_ipset.ipset", "ip_set_descriptor.4037960608.type", "IPV4"), - resource.TestCheckResourceAttr( - "aws_wafregional_ipset.ipset", "ip_set_descriptor.4037960608.value", "192.0.7.0/24"), - ), - }, - }, - }) -} - -func TestAccAWSWafRegionalIPSet_changeDescriptors(t *testing.T) { - var before, after waf.IPSet - ipsetName := fmt.Sprintf("ip-set-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafRegionalIPSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafRegionalIPSetConfig(ipsetName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafRegionalIPSetExists("aws_wafregional_ipset.ipset", &before), - resource.TestCheckResourceAttr( - "aws_wafregional_ipset.ipset", "name", ipsetName), - resource.TestCheckResourceAttr( - "aws_wafregional_ipset.ipset", "ip_set_descriptor.#", "1"), - resource.TestCheckResourceAttr( - "aws_wafregional_ipset.ipset", "ip_set_descriptor.4037960608.type", "IPV4"), - resource.TestCheckResourceAttr( - "aws_wafregional_ipset.ipset", "ip_set_descriptor.4037960608.value", "192.0.7.0/24"), - ), - }, - { - Config: testAccAWSWafRegionalIPSetConfigChangeIPSetDescriptors(ipsetName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafRegionalIPSetExists("aws_wafregional_ipset.ipset", &after), - resource.TestCheckResourceAttr( - "aws_wafregional_ipset.ipset", "name", ipsetName), - resource.TestCheckResourceAttr( - "aws_wafregional_ipset.ipset", "ip_set_descriptor.#", "1"), - resource.TestCheckResourceAttr( - "aws_wafregional_ipset.ipset", "ip_set_descriptor.115741513.type", "IPV4"), - resource.TestCheckResourceAttr( - "aws_wafregional_ipset.ipset", "ip_set_descriptor.115741513.value", "192.0.8.0/24"), - ), - }, - }, - }) -} - -func TestAccAWSWafRegionalIPSet_noDescriptors(t *testing.T) { - var ipset waf.IPSet - ipsetName := fmt.Sprintf("ip-set-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSWafRegionalIPSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSWafRegionalIPSetConfig_noDescriptors(ipsetName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSWafRegionalIPSetExists("aws_wafregional_ipset.ipset", &ipset), - resource.TestCheckResourceAttr( - "aws_wafregional_ipset.ipset", "name", ipsetName), - resource.TestCheckResourceAttr( - "aws_wafregional_ipset.ipset", "ip_set_descriptor.#", "0"), - ), - }, - }, - }) -} - -func TestDiffWafRegionalIpSetDescriptors(t *testing.T) { - testCases := []struct { - Old []interface{} - New []interface{} - ExpectedUpdates []*waf.IPSetUpdate - }{ - { - // Change - Old: []interface{}{ - map[string]interface{}{"type": "IPV4", "value": "192.0.7.0/24"}, - }, - New: []interface{}{ - map[string]interface{}{"type": "IPV4", "value": "192.0.8.0/24"}, - }, - ExpectedUpdates: []*waf.IPSetUpdate{ - &waf.IPSetUpdate{ - Action: aws.String(wafregional.ChangeActionDelete), - IPSetDescriptor: &waf.IPSetDescriptor{ - Type: aws.String("IPV4"), - Value: aws.String("192.0.7.0/24"), - }, - }, - &waf.IPSetUpdate{ - Action: aws.String(wafregional.ChangeActionInsert), - IPSetDescriptor: &waf.IPSetDescriptor{ - Type: aws.String("IPV4"), - Value: aws.String("192.0.8.0/24"), - }, - }, - }, - }, - { - // Fresh IPSet - Old: []interface{}{}, - New: []interface{}{ - map[string]interface{}{"type": "IPV4", "value": "10.0.1.0/24"}, - map[string]interface{}{"type": "IPV4", "value": "10.0.2.0/24"}, - map[string]interface{}{"type": "IPV4", "value": "10.0.3.0/24"}, - }, - ExpectedUpdates: []*waf.IPSetUpdate{ - &waf.IPSetUpdate{ - Action: aws.String(wafregional.ChangeActionInsert), - IPSetDescriptor: &waf.IPSetDescriptor{ - Type: aws.String("IPV4"), - Value: aws.String("10.0.1.0/24"), - }, - }, - &waf.IPSetUpdate{ - Action: aws.String(wafregional.ChangeActionInsert), - IPSetDescriptor: &waf.IPSetDescriptor{ - Type: aws.String("IPV4"), - Value: aws.String("10.0.2.0/24"), - }, - }, - &waf.IPSetUpdate{ - Action: aws.String(wafregional.ChangeActionInsert), - IPSetDescriptor: &waf.IPSetDescriptor{ - Type: aws.String("IPV4"), - Value: aws.String("10.0.3.0/24"), - }, - }, - }, - }, - { - // Deletion - Old: []interface{}{ - map[string]interface{}{"type": "IPV4", "value": "192.0.7.0/24"}, - map[string]interface{}{"type": "IPV4", "value": "192.0.8.0/24"}, - }, - New: []interface{}{}, - ExpectedUpdates: []*waf.IPSetUpdate{ - &waf.IPSetUpdate{ - Action: aws.String(wafregional.ChangeActionDelete), - IPSetDescriptor: &waf.IPSetDescriptor{ - Type: aws.String("IPV4"), - Value: aws.String("192.0.7.0/24"), - }, - }, - &waf.IPSetUpdate{ - Action: aws.String(wafregional.ChangeActionDelete), - IPSetDescriptor: &waf.IPSetDescriptor{ - Type: aws.String("IPV4"), - Value: aws.String("192.0.8.0/24"), - }, - }, - }, - }, - } - for i, tc := range testCases { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - updates := diffWafIpSetDescriptors(tc.Old, tc.New) - if !reflect.DeepEqual(updates, tc.ExpectedUpdates) { - t.Fatalf("IPSet updates don't match.\nGiven: %s\nExpected: %s", - updates, tc.ExpectedUpdates) - } - }) - } -} - -func testAccCheckAWSWafRegionalIPSetDisappears(v *waf.IPSet) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).wafregionalconn - region := testAccProvider.Meta().(*AWSClient).region - - wr := newWafRegionalRetryer(conn, region) - _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { - req := &waf.UpdateIPSetInput{ - ChangeToken: token, - IPSetId: v.IPSetId, - } - - for _, IPSetDescriptor := range v.IPSetDescriptors { - IPSetUpdate := &waf.IPSetUpdate{ - Action: aws.String("DELETE"), - IPSetDescriptor: &waf.IPSetDescriptor{ - Type: IPSetDescriptor.Type, - Value: IPSetDescriptor.Value, - }, - } - req.Updates = append(req.Updates, IPSetUpdate) - } - - return conn.UpdateIPSet(req) - }) - if err != nil { - return fmt.Errorf("Error Updating WAF IPSet: %s", err) - } - - _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { - opts := &waf.DeleteIPSetInput{ - ChangeToken: token, - IPSetId: v.IPSetId, - } - return conn.DeleteIPSet(opts) - }) - if err != nil { - return fmt.Errorf("Error Deleting WAF IPSet: %s", err) - } - return nil - } -} - -func testAccCheckAWSWafRegionalIPSetDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_wafregional_ipset" { - continue - } - - conn := testAccProvider.Meta().(*AWSClient).wafregionalconn - resp, err := conn.GetIPSet( - &waf.GetIPSetInput{ - IPSetId: aws.String(rs.Primary.ID), - }) - - if err == nil { - if *resp.IPSet.IPSetId == rs.Primary.ID { - return fmt.Errorf("WAF IPSet %s still exists", rs.Primary.ID) - } - } - - // Return nil if the IPSet is already destroyed - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "WAFNonexistentItemException" { - return nil - } - } - - return err - } - - return nil -} - -func testAccCheckAWSWafRegionalIPSetExists(n string, v *waf.IPSet) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No WAF IPSet ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).wafregionalconn - resp, err := conn.GetIPSet(&waf.GetIPSetInput{ - IPSetId: aws.String(rs.Primary.ID), - }) - - if err != nil { - return err - } - - if *resp.IPSet.IPSetId == rs.Primary.ID { - *v = *resp.IPSet - return nil - } - - return fmt.Errorf("WAF IPSet (%s) not found", rs.Primary.ID) - } -} - -func testAccAWSWafRegionalIPSetConfig(name string) string { - return fmt.Sprintf(` -resource "aws_wafregional_ipset" "ipset" { - name = "%s" - ip_set_descriptor { - type = "IPV4" - value = "192.0.7.0/24" - } -}`, name) -} - -func testAccAWSWafRegionalIPSetConfigChangeName(name string) string { - return fmt.Sprintf(`resource "aws_wafregional_ipset" "ipset" { - name = "%s" - ip_set_descriptor { - type = "IPV4" - value = "192.0.7.0/24" - } -}`, name) -} - -func testAccAWSWafRegionalIPSetConfigChangeIPSetDescriptors(name string) string { - return fmt.Sprintf(`resource "aws_wafregional_ipset" "ipset" { - name = "%s" - ip_set_descriptor { - type = "IPV4" - value = "192.0.8.0/24" - } -}`, name) -} - -func testAccAWSWafRegionalIPSetConfig_noDescriptors(name string) string { - return fmt.Sprintf(`resource "aws_wafregional_ipset" "ipset" { - name = "%s" - }`, name) -} diff --git a/builtin/providers/aws/resource_vpn_connection_route.go b/builtin/providers/aws/resource_vpn_connection_route.go deleted file mode 100644 index e6863f721..000000000 --- a/builtin/providers/aws/resource_vpn_connection_route.go +++ /dev/null @@ -1,140 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAwsVpnConnectionRoute() *schema.Resource { - return &schema.Resource{ - // You can't update a route. You can just delete one and make - // a new one. - Create: resourceAwsVpnConnectionRouteCreate, - Read: resourceAwsVpnConnectionRouteRead, - Delete: resourceAwsVpnConnectionRouteDelete, - - Schema: map[string]*schema.Schema{ - "destination_cidr_block": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "vpn_connection_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceAwsVpnConnectionRouteCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - createOpts := &ec2.CreateVpnConnectionRouteInput{ - DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)), - VpnConnectionId: aws.String(d.Get("vpn_connection_id").(string)), - } - - // Create the route. - log.Printf("[DEBUG] Creating VPN connection route") - _, err := conn.CreateVpnConnectionRoute(createOpts) - if err != nil { - return fmt.Errorf("Error creating VPN connection route: %s", err) - } - - // Store the ID by the only two data we have available to us. - d.SetId(fmt.Sprintf("%s:%s", *createOpts.DestinationCidrBlock, *createOpts.VpnConnectionId)) - - return resourceAwsVpnConnectionRouteRead(d, meta) -} - -func resourceAwsVpnConnectionRouteRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - cidrBlock, vpnConnectionId := resourceAwsVpnConnectionRouteParseId(d.Id()) - - routeFilters := []*ec2.Filter{ - &ec2.Filter{ - Name: aws.String("route.destination-cidr-block"), - Values: []*string{aws.String(cidrBlock)}, - }, - &ec2.Filter{ - Name: aws.String("vpn-connection-id"), - Values: []*string{aws.String(vpnConnectionId)}, - }, - } - - // Technically, we know everything there is to know about the route - // from its ID, but we still want to catch cases where it changes - // outside of terraform and results in a stale state file. Hence, - // conduct a read. - resp, err := conn.DescribeVpnConnections(&ec2.DescribeVpnConnectionsInput{ - Filters: routeFilters, - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpnConnectionID.NotFound" { - d.SetId("") - return nil - } else { - log.Printf("[ERROR] Error finding VPN connection route: %s", err) - return err - } - } - if resp == nil || len(resp.VpnConnections) == 0 { - // This is kind of a weird edge case. I'd rather return an error - // instead of just blindly setting the ID to ""... since I don't know - // what might cause this. - return fmt.Errorf("No VPN connections returned") - } - - vpnConnection := resp.VpnConnections[0] - - var found bool - for _, r := range vpnConnection.Routes { - if *r.DestinationCidrBlock == cidrBlock { - d.Set("destination_cidr_block", *r.DestinationCidrBlock) - d.Set("vpn_connection_id", *vpnConnection.VpnConnectionId) - found = true - } - } - if !found { - // Something other than terraform eliminated the route. - d.SetId("") - } - - return nil -} - -func resourceAwsVpnConnectionRouteDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - _, err := conn.DeleteVpnConnectionRoute(&ec2.DeleteVpnConnectionRouteInput{ - DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)), - VpnConnectionId: aws.String(d.Get("vpn_connection_id").(string)), - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpnConnectionID.NotFound" { - d.SetId("") - return nil - } else { - log.Printf("[ERROR] Error deleting VPN connection route: %s", err) - return err - } - } - - return nil -} - -func resourceAwsVpnConnectionRouteParseId(id string) (string, string) { - parts := strings.SplitN(id, ":", 2) - return parts[0], parts[1] -} diff --git a/builtin/providers/aws/resource_vpn_connection_route_test.go b/builtin/providers/aws/resource_vpn_connection_route_test.go deleted file mode 100644 index 23229b0f9..000000000 --- a/builtin/providers/aws/resource_vpn_connection_route_test.go +++ /dev/null @@ -1,203 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/ec2" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAWSVpnConnectionRoute_basic(t *testing.T) { - rBgpAsn := acctest.RandIntRange(64512, 65534) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccAwsVpnConnectionRouteDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAwsVpnConnectionRouteConfig(rBgpAsn), - Check: resource.ComposeTestCheckFunc( - testAccAwsVpnConnectionRoute( - "aws_vpn_gateway.vpn_gateway", - "aws_customer_gateway.customer_gateway", - "aws_vpn_connection.vpn_connection", - "aws_vpn_connection_route.foo", - ), - ), - }, - resource.TestStep{ - Config: testAccAwsVpnConnectionRouteConfigUpdate(rBgpAsn), - Check: resource.ComposeTestCheckFunc( - testAccAwsVpnConnectionRoute( - "aws_vpn_gateway.vpn_gateway", - "aws_customer_gateway.customer_gateway", - "aws_vpn_connection.vpn_connection", - "aws_vpn_connection_route.foo", - ), - ), - }, - }, - }) -} - -func testAccAwsVpnConnectionRouteDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_vpn_connection_route" { - continue - } - - cidrBlock, vpnConnectionId := resourceAwsVpnConnectionRouteParseId(rs.Primary.ID) - - routeFilters := []*ec2.Filter{ - &ec2.Filter{ - Name: aws.String("route.destination-cidr-block"), - Values: []*string{aws.String(cidrBlock)}, - }, - &ec2.Filter{ - Name: aws.String("vpn-connection-id"), - Values: []*string{aws.String(vpnConnectionId)}, - }, - } - - resp, err := conn.DescribeVpnConnections(&ec2.DescribeVpnConnectionsInput{ - Filters: routeFilters, - }) - if err != nil { - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpnConnectionID.NotFound" { - // not found, all good - return nil - } - return err - } - - var vpnc *ec2.VpnConnection - if resp != nil { - // range over the connections and isolate the one we created - for _, v := range resp.VpnConnections { - if *v.VpnConnectionId == vpnConnectionId { - vpnc = v - } - } - - if vpnc == nil { - // vpn connection not found, so that's good... - return nil - } - - if vpnc.State != nil && *vpnc.State == "deleted" { - return nil - } - } - - } - return fmt.Errorf("Fall through error, Check Destroy criteria not met") -} - -func testAccAwsVpnConnectionRoute( - vpnGatewayResource string, - customerGatewayResource string, - vpnConnectionResource string, - vpnConnectionRouteResource string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[vpnConnectionRouteResource] - if !ok { - return fmt.Errorf("Not found: %s", vpnConnectionRouteResource) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - route, ok := s.RootModule().Resources[vpnConnectionRouteResource] - if !ok { - return fmt.Errorf("Not found: %s", vpnConnectionRouteResource) - } - - cidrBlock, vpnConnectionId := resourceAwsVpnConnectionRouteParseId(route.Primary.ID) - - routeFilters := []*ec2.Filter{ - &ec2.Filter{ - Name: aws.String("route.destination-cidr-block"), - Values: []*string{aws.String(cidrBlock)}, - }, - &ec2.Filter{ - Name: aws.String("vpn-connection-id"), - Values: []*string{aws.String(vpnConnectionId)}, - }, - } - - ec2conn := testAccProvider.Meta().(*AWSClient).ec2conn - - _, err := ec2conn.DescribeVpnConnections(&ec2.DescribeVpnConnectionsInput{ - Filters: routeFilters, - }) - if err != nil { - return err - } - - return nil - } -} - -func testAccAwsVpnConnectionRouteConfig(rBgpAsn int) string { - return fmt.Sprintf(` - resource "aws_vpn_gateway" "vpn_gateway" { - tags { - Name = "vpn_gateway" - } - } - - resource "aws_customer_gateway" "customer_gateway" { - bgp_asn = %d - ip_address = "182.0.0.1" - type = "ipsec.1" - } - - resource "aws_vpn_connection" "vpn_connection" { - vpn_gateway_id = "${aws_vpn_gateway.vpn_gateway.id}" - customer_gateway_id = "${aws_customer_gateway.customer_gateway.id}" - type = "ipsec.1" - static_routes_only = true - } - - resource "aws_vpn_connection_route" "foo" { - destination_cidr_block = "172.168.10.0/24" - vpn_connection_id = "${aws_vpn_connection.vpn_connection.id}" - } - `, rBgpAsn) -} - -// Change destination_cidr_block -func testAccAwsVpnConnectionRouteConfigUpdate(rBgpAsn int) string { - return fmt.Sprintf(` - resource "aws_vpn_gateway" "vpn_gateway" { - tags { - Name = "vpn_gateway" - } - } - - resource "aws_customer_gateway" "customer_gateway" { - bgp_asn = %d - ip_address = "182.0.0.1" - type = "ipsec.1" - } - - resource "aws_vpn_connection" "vpn_connection" { - vpn_gateway_id = "${aws_vpn_gateway.vpn_gateway.id}" - customer_gateway_id = "${aws_customer_gateway.customer_gateway.id}" - type = "ipsec.1" - static_routes_only = true - } - - resource "aws_vpn_connection_route" "foo" { - destination_cidr_block = "172.168.20.0/24" - vpn_connection_id = "${aws_vpn_connection.vpn_connection.id}" - } - `, rBgpAsn) -} diff --git a/builtin/providers/aws/s3_tags.go b/builtin/providers/aws/s3_tags.go deleted file mode 100644 index f691cff46..000000000 --- a/builtin/providers/aws/s3_tags.go +++ /dev/null @@ -1,133 +0,0 @@ -package aws - -import ( - "log" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/terraform/helper/schema" -) - -// setTags is a helper to set the tags for a resource. It expects the -// tags field to be named "tags" -func setTagsS3(conn *s3.S3, d *schema.ResourceData) error { - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffTagsS3(tagsFromMapS3(o), tagsFromMapS3(n)) - - // Set tags - if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags: %#v", remove) - _, err := conn.DeleteBucketTagging(&s3.DeleteBucketTaggingInput{ - Bucket: aws.String(d.Get("bucket").(string)), - }) - if err != nil { - return err - } - } - if len(create) > 0 { - log.Printf("[DEBUG] Creating tags: %#v", create) - req := &s3.PutBucketTaggingInput{ - Bucket: aws.String(d.Get("bucket").(string)), - Tagging: &s3.Tagging{ - TagSet: create, - }, - } - - _, err := conn.PutBucketTagging(req) - if err != nil { - return err - } - } - } - - return nil -} - -// diffTags takes our tags locally and the ones remotely and returns -// the set of tags that must be created, and the set of tags that must -// be destroyed. -func diffTagsS3(oldTags, newTags []*s3.Tag) ([]*s3.Tag, []*s3.Tag) { - // First, we're creating everything we have - create := make(map[string]interface{}) - for _, t := range newTags { - create[*t.Key] = *t.Value - } - - // Build the list of what to remove - var remove []*s3.Tag - for _, t := range oldTags { - old, ok := create[*t.Key] - if !ok || old != *t.Value { - // Delete it! - remove = append(remove, t) - } - } - - return tagsFromMapS3(create), remove -} - -// tagsFromMap returns the tags for the given map of data. -func tagsFromMapS3(m map[string]interface{}) []*s3.Tag { - result := make([]*s3.Tag, 0, len(m)) - for k, v := range m { - t := &s3.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - } - if !tagIgnoredS3(t) { - result = append(result, t) - } - } - - return result -} - -// tagsToMap turns the list of tags into a map. -func tagsToMapS3(ts []*s3.Tag) map[string]string { - result := make(map[string]string) - for _, t := range ts { - if !tagIgnoredS3(t) { - result[*t.Key] = *t.Value - } - } - - return result -} - -// return a slice of s3 tags associated with the given s3 bucket. Essentially -// s3.GetBucketTagging, except returns an empty slice instead of an error when -// there are no tags. -func getTagSetS3(s3conn *s3.S3, bucket string) ([]*s3.Tag, error) { - request := &s3.GetBucketTaggingInput{ - Bucket: aws.String(bucket), - } - - response, err := s3conn.GetBucketTagging(request) - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "NoSuchTagSet" { - // There is no tag set associated with the bucket. - return []*s3.Tag{}, nil - } else if err != nil { - return nil, err - } - - return response.TagSet, nil -} - -// compare a tag against a list of strings and checks if it should -// be ignored or not -func tagIgnoredS3(t *s3.Tag) bool { - filter := []string{"^aws:"} - for _, v := range filter { - log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) - if r, _ := regexp.MatchString(v, *t.Key); r == true { - log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) - return true - } - } - return false -} diff --git a/builtin/providers/aws/s3_tags_test.go b/builtin/providers/aws/s3_tags_test.go deleted file mode 100644 index 42d5b605d..000000000 --- a/builtin/providers/aws/s3_tags_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package aws - -import ( - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" -) - -func TestDiffTagsS3(t *testing.T) { - cases := []struct { - Old, New map[string]interface{} - Create, Remove map[string]string - }{ - // Basic add/remove - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "bar": "baz", - }, - Create: map[string]string{ - "bar": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - - // Modify - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "foo": "baz", - }, - Create: map[string]string{ - "foo": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - } - - for i, tc := range cases { - c, r := diffTagsS3(tagsFromMapS3(tc.Old), tagsFromMapS3(tc.New)) - cm := tagsToMapS3(c) - rm := tagsToMapS3(r) - if !reflect.DeepEqual(cm, tc.Create) { - t.Fatalf("%d: bad create: %#v", i, cm) - } - if !reflect.DeepEqual(rm, tc.Remove) { - t.Fatalf("%d: bad remove: %#v", i, rm) - } - } -} - -func TestIgnoringTagsS3(t *testing.T) { - var ignoredTags []*s3.Tag - ignoredTags = append(ignoredTags, &s3.Tag{ - Key: aws.String("aws:cloudformation:logical-id"), - Value: aws.String("foo"), - }) - ignoredTags = append(ignoredTags, &s3.Tag{ - Key: aws.String("aws:foo:bar"), - Value: aws.String("baz"), - }) - for _, tag := range ignoredTags { - if !tagIgnoredS3(tag) { - t.Fatalf("Tag %v with value %v not ignored, but should be!", *tag.Key, *tag.Value) - } - } -} diff --git a/builtin/providers/aws/sort.go b/builtin/providers/aws/sort.go deleted file mode 100644 index 0d90458eb..000000000 --- a/builtin/providers/aws/sort.go +++ /dev/null @@ -1,53 +0,0 @@ -package aws - -import ( - "sort" - "time" - - "github.com/aws/aws-sdk-go/service/ec2" -) - -type imageSort []*ec2.Image -type snapshotSort []*ec2.Snapshot - -func (a imageSort) Len() int { - return len(a) -} - -func (a imageSort) Swap(i, j int) { - a[i], a[j] = a[j], a[i] -} - -func (a imageSort) Less(i, j int) bool { - itime, _ := time.Parse(time.RFC3339, *a[i].CreationDate) - jtime, _ := time.Parse(time.RFC3339, *a[j].CreationDate) - return itime.Unix() < jtime.Unix() -} - -// Sort images by creation date, in descending order. -func sortImages(images []*ec2.Image) []*ec2.Image { - sortedImages := images - sort.Sort(sort.Reverse(imageSort(sortedImages))) - return sortedImages -} - -func (a snapshotSort) Len() int { - return len(a) -} - -func (a snapshotSort) Swap(i, j int) { - a[i], a[j] = a[j], a[i] -} - -func (a snapshotSort) Less(i, j int) bool { - itime := *a[i].StartTime - jtime := *a[j].StartTime - return itime.Unix() < jtime.Unix() -} - -// Sort snapshots by creation date, in descending order. -func sortSnapshots(snapshots []*ec2.Snapshot) []*ec2.Snapshot { - sortedSnapshots := snapshots - sort.Sort(sort.Reverse(snapshotSort(sortedSnapshots))) - return sortedSnapshots -} diff --git a/builtin/providers/aws/structure.go b/builtin/providers/aws/structure.go deleted file mode 100644 index 262dccfdc..000000000 --- a/builtin/providers/aws/structure.go +++ /dev/null @@ -1,2127 +0,0 @@ -package aws - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "sort" - "strconv" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/aws/aws-sdk-go/service/cloudformation" - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" - "github.com/aws/aws-sdk-go/service/cognitoidentity" - "github.com/aws/aws-sdk-go/service/configservice" - "github.com/aws/aws-sdk-go/service/directoryservice" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/ecs" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/aws/aws-sdk-go/service/elasticbeanstalk" - elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" - "github.com/aws/aws-sdk-go/service/elb" - "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/aws/aws-sdk-go/service/lambda" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/aws/aws-sdk-go/service/redshift" - "github.com/aws/aws-sdk-go/service/route53" - "github.com/aws/aws-sdk-go/service/ssm" - "github.com/hashicorp/terraform/helper/schema" - "gopkg.in/yaml.v2" -) - -// Takes the result of flatmap.Expand for an array of listeners and -// returns ELB API compatible objects -func expandListeners(configured []interface{}) ([]*elb.Listener, error) { - listeners := make([]*elb.Listener, 0, len(configured)) - - // Loop over our configured listeners and create - // an array of aws-sdk-go compatible objects - for _, lRaw := range configured { - data := lRaw.(map[string]interface{}) - - ip := int64(data["instance_port"].(int)) - lp := int64(data["lb_port"].(int)) - l := &elb.Listener{ - InstancePort: &ip, - InstanceProtocol: aws.String(data["instance_protocol"].(string)), - LoadBalancerPort: &lp, - Protocol: aws.String(data["lb_protocol"].(string)), - } - - if v, ok := data["ssl_certificate_id"]; ok { - l.SSLCertificateId = aws.String(v.(string)) - } - - var valid bool - if l.SSLCertificateId != nil && *l.SSLCertificateId != "" { - // validate the protocol is correct - for _, p := range []string{"https", "ssl"} { - if (strings.ToLower(*l.InstanceProtocol) == p) || (strings.ToLower(*l.Protocol) == p) { - valid = true - } - } - } else { - valid = true - } - - if valid { - listeners = append(listeners, l) - } else { - return nil, fmt.Errorf("[ERR] ELB Listener: ssl_certificate_id may be set only when protocol is 'https' or 'ssl'") - } - } - - return listeners, nil -} - -// Takes the result of flatmap. Expand for an array of listeners and -// returns ECS Volume compatible objects -func expandEcsVolumes(configured []interface{}) ([]*ecs.Volume, error) { - volumes := make([]*ecs.Volume, 0, len(configured)) - - // Loop over our configured volumes and create - // an array of aws-sdk-go compatible objects - for _, lRaw := range configured { - data := lRaw.(map[string]interface{}) - - l := &ecs.Volume{ - Name: aws.String(data["name"].(string)), - } - - hostPath := data["host_path"].(string) - if hostPath != "" { - l.Host = &ecs.HostVolumeProperties{ - SourcePath: aws.String(hostPath), - } - } - - volumes = append(volumes, l) - } - - return volumes, nil -} - -// Takes JSON in a string. Decodes JSON into -// an array of ecs.ContainerDefinition compatible objects -func expandEcsContainerDefinitions(rawDefinitions string) ([]*ecs.ContainerDefinition, error) { - var definitions []*ecs.ContainerDefinition - - err := json.Unmarshal([]byte(rawDefinitions), &definitions) - if err != nil { - return nil, fmt.Errorf("Error decoding JSON: %s", err) - } - - return definitions, nil -} - -// Takes the result of flatmap. Expand for an array of load balancers and -// returns ecs.LoadBalancer compatible objects -func expandEcsLoadBalancers(configured []interface{}) []*ecs.LoadBalancer { - loadBalancers := make([]*ecs.LoadBalancer, 0, len(configured)) - - // Loop over our configured load balancers and create - // an array of aws-sdk-go compatible objects - for _, lRaw := range configured { - data := lRaw.(map[string]interface{}) - - l := &ecs.LoadBalancer{ - ContainerName: aws.String(data["container_name"].(string)), - ContainerPort: aws.Int64(int64(data["container_port"].(int))), - } - - if v, ok := data["elb_name"]; ok && v.(string) != "" { - l.LoadBalancerName = aws.String(v.(string)) - } - if v, ok := data["target_group_arn"]; ok && v.(string) != "" { - l.TargetGroupArn = aws.String(v.(string)) - } - - loadBalancers = append(loadBalancers, l) - } - - return loadBalancers -} - -// Takes the result of flatmap.Expand for an array of ingress/egress security -// group rules and returns EC2 API compatible objects. This function will error -// if it finds invalid permissions input, namely a protocol of "-1" with either -// to_port or from_port set to a non-zero value. -func expandIPPerms( - group *ec2.SecurityGroup, configured []interface{}) ([]*ec2.IpPermission, error) { - vpc := group.VpcId != nil && *group.VpcId != "" - - perms := make([]*ec2.IpPermission, len(configured)) - for i, mRaw := range configured { - var perm ec2.IpPermission - m := mRaw.(map[string]interface{}) - - perm.FromPort = aws.Int64(int64(m["from_port"].(int))) - perm.ToPort = aws.Int64(int64(m["to_port"].(int))) - perm.IpProtocol = aws.String(m["protocol"].(string)) - - // When protocol is "-1", AWS won't store any ports for the - // rule, but also won't error if the user specifies ports other - // than '0'. Force the user to make a deliberate '0' port - // choice when specifying a "-1" protocol, and tell them about - // AWS's behavior in the error message. - if *perm.IpProtocol == "-1" && (*perm.FromPort != 0 || *perm.ToPort != 0) { - return nil, fmt.Errorf( - "from_port (%d) and to_port (%d) must both be 0 to use the 'ALL' \"-1\" protocol!", - *perm.FromPort, *perm.ToPort) - } - - var groups []string - if raw, ok := m["security_groups"]; ok { - list := raw.(*schema.Set).List() - for _, v := range list { - groups = append(groups, v.(string)) - } - } - if v, ok := m["self"]; ok && v.(bool) { - if vpc { - groups = append(groups, *group.GroupId) - } else { - groups = append(groups, *group.GroupName) - } - } - - if len(groups) > 0 { - perm.UserIdGroupPairs = make([]*ec2.UserIdGroupPair, len(groups)) - for i, name := range groups { - ownerId, id := "", name - if items := strings.Split(id, "/"); len(items) > 1 { - ownerId, id = items[0], items[1] - } - - perm.UserIdGroupPairs[i] = &ec2.UserIdGroupPair{ - GroupId: aws.String(id), - } - - if ownerId != "" { - perm.UserIdGroupPairs[i].UserId = aws.String(ownerId) - } - - if !vpc { - perm.UserIdGroupPairs[i].GroupId = nil - perm.UserIdGroupPairs[i].GroupName = aws.String(id) - } - } - } - - if raw, ok := m["cidr_blocks"]; ok { - list := raw.([]interface{}) - for _, v := range list { - perm.IpRanges = append(perm.IpRanges, &ec2.IpRange{CidrIp: aws.String(v.(string))}) - } - } - if raw, ok := m["ipv6_cidr_blocks"]; ok { - list := raw.([]interface{}) - for _, v := range list { - perm.Ipv6Ranges = append(perm.Ipv6Ranges, &ec2.Ipv6Range{CidrIpv6: aws.String(v.(string))}) - } - } - - if raw, ok := m["prefix_list_ids"]; ok { - list := raw.([]interface{}) - for _, v := range list { - perm.PrefixListIds = append(perm.PrefixListIds, &ec2.PrefixListId{PrefixListId: aws.String(v.(string))}) - } - } - - perms[i] = &perm - } - - return perms, nil -} - -// Takes the result of flatmap.Expand for an array of parameters and -// returns Parameter API compatible objects -func expandParameters(configured []interface{}) ([]*rds.Parameter, error) { - var parameters []*rds.Parameter - - // Loop over our configured parameters and create - // an array of aws-sdk-go compatible objects - for _, pRaw := range configured { - data := pRaw.(map[string]interface{}) - - if data["name"].(string) == "" { - continue - } - - p := &rds.Parameter{ - ApplyMethod: aws.String(data["apply_method"].(string)), - ParameterName: aws.String(data["name"].(string)), - ParameterValue: aws.String(data["value"].(string)), - } - - parameters = append(parameters, p) - } - - return parameters, nil -} - -func expandRedshiftParameters(configured []interface{}) ([]*redshift.Parameter, error) { - var parameters []*redshift.Parameter - - // Loop over our configured parameters and create - // an array of aws-sdk-go compatible objects - for _, pRaw := range configured { - data := pRaw.(map[string]interface{}) - - if data["name"].(string) == "" { - continue - } - - p := &redshift.Parameter{ - ParameterName: aws.String(data["name"].(string)), - ParameterValue: aws.String(data["value"].(string)), - } - - parameters = append(parameters, p) - } - - return parameters, nil -} - -func expandOptionConfiguration(configured []interface{}) ([]*rds.OptionConfiguration, error) { - var option []*rds.OptionConfiguration - - for _, pRaw := range configured { - data := pRaw.(map[string]interface{}) - - o := &rds.OptionConfiguration{ - OptionName: aws.String(data["option_name"].(string)), - } - - if raw, ok := data["port"]; ok { - port := raw.(int) - if port != 0 { - o.Port = aws.Int64(int64(port)) - } - } - - if raw, ok := data["db_security_group_memberships"]; ok { - memberships := expandStringList(raw.(*schema.Set).List()) - if len(memberships) > 0 { - o.DBSecurityGroupMemberships = memberships - } - } - - if raw, ok := data["vpc_security_group_memberships"]; ok { - memberships := expandStringList(raw.(*schema.Set).List()) - if len(memberships) > 0 { - o.VpcSecurityGroupMemberships = memberships - } - } - - if raw, ok := data["option_settings"]; ok { - o.OptionSettings = expandOptionSetting(raw.(*schema.Set).List()) - } - - option = append(option, o) - } - - return option, nil -} - -func expandOptionSetting(list []interface{}) []*rds.OptionSetting { - options := make([]*rds.OptionSetting, 0, len(list)) - - for _, oRaw := range list { - data := oRaw.(map[string]interface{}) - - o := &rds.OptionSetting{ - Name: aws.String(data["name"].(string)), - Value: aws.String(data["value"].(string)), - } - - options = append(options, o) - } - - return options -} - -// Takes the result of flatmap.Expand for an array of parameters and -// returns Parameter API compatible objects -func expandElastiCacheParameters(configured []interface{}) ([]*elasticache.ParameterNameValue, error) { - parameters := make([]*elasticache.ParameterNameValue, 0, len(configured)) - - // Loop over our configured parameters and create - // an array of aws-sdk-go compatible objects - for _, pRaw := range configured { - data := pRaw.(map[string]interface{}) - - p := &elasticache.ParameterNameValue{ - ParameterName: aws.String(data["name"].(string)), - ParameterValue: aws.String(data["value"].(string)), - } - - parameters = append(parameters, p) - } - - return parameters, nil -} - -// Flattens an access log into something that flatmap.Flatten() can handle -func flattenAccessLog(l *elb.AccessLog) []map[string]interface{} { - result := make([]map[string]interface{}, 0, 1) - - if l == nil { - return nil - } - - r := make(map[string]interface{}) - if l.S3BucketName != nil { - r["bucket"] = *l.S3BucketName - } - - if l.S3BucketPrefix != nil { - r["bucket_prefix"] = *l.S3BucketPrefix - } - - if l.EmitInterval != nil { - r["interval"] = *l.EmitInterval - } - - if l.Enabled != nil { - r["enabled"] = *l.Enabled - } - - result = append(result, r) - - return result -} - -// Takes the result of flatmap.Expand for an array of step adjustments and -// returns a []*autoscaling.StepAdjustment. -func expandStepAdjustments(configured []interface{}) ([]*autoscaling.StepAdjustment, error) { - var adjustments []*autoscaling.StepAdjustment - - // Loop over our configured step adjustments and create an array - // of aws-sdk-go compatible objects. We're forced to convert strings - // to floats here because there's no way to detect whether or not - // an uninitialized, optional schema element is "0.0" deliberately. - // With strings, we can test for "", which is definitely an empty - // struct value. - for _, raw := range configured { - data := raw.(map[string]interface{}) - a := &autoscaling.StepAdjustment{ - ScalingAdjustment: aws.Int64(int64(data["scaling_adjustment"].(int))), - } - if data["metric_interval_lower_bound"] != "" { - bound := data["metric_interval_lower_bound"] - switch bound := bound.(type) { - case string: - f, err := strconv.ParseFloat(bound, 64) - if err != nil { - return nil, fmt.Errorf( - "metric_interval_lower_bound must be a float value represented as a string") - } - a.MetricIntervalLowerBound = aws.Float64(f) - default: - return nil, fmt.Errorf( - "metric_interval_lower_bound isn't a string. This is a bug. Please file an issue.") - } - } - if data["metric_interval_upper_bound"] != "" { - bound := data["metric_interval_upper_bound"] - switch bound := bound.(type) { - case string: - f, err := strconv.ParseFloat(bound, 64) - if err != nil { - return nil, fmt.Errorf( - "metric_interval_upper_bound must be a float value represented as a string") - } - a.MetricIntervalUpperBound = aws.Float64(f) - default: - return nil, fmt.Errorf( - "metric_interval_upper_bound isn't a string. This is a bug. Please file an issue.") - } - } - adjustments = append(adjustments, a) - } - - return adjustments, nil -} - -// Flattens a health check into something that flatmap.Flatten() -// can handle -func flattenHealthCheck(check *elb.HealthCheck) []map[string]interface{} { - result := make([]map[string]interface{}, 0, 1) - - chk := make(map[string]interface{}) - chk["unhealthy_threshold"] = *check.UnhealthyThreshold - chk["healthy_threshold"] = *check.HealthyThreshold - chk["target"] = *check.Target - chk["timeout"] = *check.Timeout - chk["interval"] = *check.Interval - - result = append(result, chk) - - return result -} - -// Flattens an array of UserSecurityGroups into a []*ec2.GroupIdentifier -func flattenSecurityGroups(list []*ec2.UserIdGroupPair, ownerId *string) []*ec2.GroupIdentifier { - result := make([]*ec2.GroupIdentifier, 0, len(list)) - for _, g := range list { - var userId *string - if g.UserId != nil && *g.UserId != "" && (ownerId == nil || *ownerId != *g.UserId) { - userId = g.UserId - } - // userid nil here for same vpc groups - - vpc := g.GroupName == nil || *g.GroupName == "" - var id *string - if vpc { - id = g.GroupId - } else { - id = g.GroupName - } - - // id is groupid for vpcs - // id is groupname for non vpc (classic) - - if userId != nil { - id = aws.String(*userId + "/" + *id) - } - - if vpc { - result = append(result, &ec2.GroupIdentifier{ - GroupId: id, - }) - } else { - result = append(result, &ec2.GroupIdentifier{ - GroupId: g.GroupId, - GroupName: id, - }) - } - } - return result -} - -// Flattens an array of Instances into a []string -func flattenInstances(list []*elb.Instance) []string { - result := make([]string, 0, len(list)) - for _, i := range list { - result = append(result, *i.InstanceId) - } - return result -} - -// Expands an array of String Instance IDs into a []Instances -func expandInstanceString(list []interface{}) []*elb.Instance { - result := make([]*elb.Instance, 0, len(list)) - for _, i := range list { - result = append(result, &elb.Instance{InstanceId: aws.String(i.(string))}) - } - return result -} - -// Flattens an array of Backend Descriptions into a a map of instance_port to policy names. -func flattenBackendPolicies(backends []*elb.BackendServerDescription) map[int64][]string { - policies := make(map[int64][]string) - for _, i := range backends { - for _, p := range i.PolicyNames { - policies[*i.InstancePort] = append(policies[*i.InstancePort], *p) - } - sort.Strings(policies[*i.InstancePort]) - } - return policies -} - -// Flattens an array of Listeners into a []map[string]interface{} -func flattenListeners(list []*elb.ListenerDescription) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(list)) - for _, i := range list { - l := map[string]interface{}{ - "instance_port": *i.Listener.InstancePort, - "instance_protocol": strings.ToLower(*i.Listener.InstanceProtocol), - "lb_port": *i.Listener.LoadBalancerPort, - "lb_protocol": strings.ToLower(*i.Listener.Protocol), - } - // SSLCertificateID is optional, and may be nil - if i.Listener.SSLCertificateId != nil { - l["ssl_certificate_id"] = *i.Listener.SSLCertificateId - } - result = append(result, l) - } - return result -} - -// Flattens an array of Volumes into a []map[string]interface{} -func flattenEcsVolumes(list []*ecs.Volume) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(list)) - for _, volume := range list { - l := map[string]interface{}{ - "name": *volume.Name, - } - - if volume.Host.SourcePath != nil { - l["host_path"] = *volume.Host.SourcePath - } - - result = append(result, l) - } - return result -} - -// Flattens an array of ECS LoadBalancers into a []map[string]interface{} -func flattenEcsLoadBalancers(list []*ecs.LoadBalancer) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(list)) - for _, loadBalancer := range list { - l := map[string]interface{}{ - "container_name": *loadBalancer.ContainerName, - "container_port": *loadBalancer.ContainerPort, - } - - if loadBalancer.LoadBalancerName != nil { - l["elb_name"] = *loadBalancer.LoadBalancerName - } - - if loadBalancer.TargetGroupArn != nil { - l["target_group_arn"] = *loadBalancer.TargetGroupArn - } - - result = append(result, l) - } - return result -} - -// Encodes an array of ecs.ContainerDefinitions into a JSON string -func flattenEcsContainerDefinitions(definitions []*ecs.ContainerDefinition) (string, error) { - byteArray, err := json.Marshal(definitions) - if err != nil { - return "", fmt.Errorf("Error encoding to JSON: %s", err) - } - - n := bytes.Index(byteArray, []byte{0}) - return string(byteArray[:n]), nil -} - -// Flattens an array of Options into a []map[string]interface{} -func flattenOptions(list []*rds.Option) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(list)) - for _, i := range list { - if i.OptionName != nil { - r := make(map[string]interface{}) - r["option_name"] = strings.ToLower(*i.OptionName) - // Default empty string, guard against nil parameter values - r["port"] = "" - if i.Port != nil { - r["port"] = int(*i.Port) - } - if i.VpcSecurityGroupMemberships != nil { - vpcs := make([]string, 0, len(i.VpcSecurityGroupMemberships)) - for _, vpc := range i.VpcSecurityGroupMemberships { - id := vpc.VpcSecurityGroupId - vpcs = append(vpcs, *id) - } - - r["vpc_security_group_memberships"] = vpcs - } - if i.DBSecurityGroupMemberships != nil { - dbs := make([]string, 0, len(i.DBSecurityGroupMemberships)) - for _, db := range i.DBSecurityGroupMemberships { - id := db.DBSecurityGroupName - dbs = append(dbs, *id) - } - - r["db_security_group_memberships"] = dbs - } - if i.OptionSettings != nil { - settings := make([]map[string]interface{}, 0, len(i.OptionSettings)) - for _, j := range i.OptionSettings { - setting := map[string]interface{}{ - "name": *j.Name, - } - if j.Value != nil { - setting["value"] = *j.Value - } - - settings = append(settings, setting) - } - - r["option_settings"] = settings - } - result = append(result, r) - } - } - return result -} - -// Flattens an array of Parameters into a []map[string]interface{} -func flattenParameters(list []*rds.Parameter) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(list)) - for _, i := range list { - if i.ParameterName != nil { - r := make(map[string]interface{}) - r["name"] = strings.ToLower(*i.ParameterName) - // Default empty string, guard against nil parameter values - r["value"] = "" - if i.ParameterValue != nil { - r["value"] = strings.ToLower(*i.ParameterValue) - } - if i.ApplyMethod != nil { - r["apply_method"] = strings.ToLower(*i.ApplyMethod) - } - - result = append(result, r) - } - } - return result -} - -// Flattens an array of Redshift Parameters into a []map[string]interface{} -func flattenRedshiftParameters(list []*redshift.Parameter) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(list)) - for _, i := range list { - result = append(result, map[string]interface{}{ - "name": strings.ToLower(*i.ParameterName), - "value": strings.ToLower(*i.ParameterValue), - }) - } - return result -} - -// Flattens an array of Parameters into a []map[string]interface{} -func flattenElastiCacheParameters(list []*elasticache.Parameter) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(list)) - for _, i := range list { - if i.ParameterValue != nil { - result = append(result, map[string]interface{}{ - "name": strings.ToLower(*i.ParameterName), - "value": *i.ParameterValue, - }) - } - } - return result -} - -// Takes the result of flatmap.Expand for an array of strings -// and returns a []*string -func expandStringList(configured []interface{}) []*string { - vs := make([]*string, 0, len(configured)) - for _, v := range configured { - val, ok := v.(string) - if ok && val != "" { - vs = append(vs, aws.String(v.(string))) - } - } - return vs -} - -// Takes the result of schema.Set of strings and returns a []*string -func expandStringSet(configured *schema.Set) []*string { - return expandStringList(configured.List()) -} - -// Takes list of pointers to strings. Expand to an array -// of raw strings and returns a []interface{} -// to keep compatibility w/ schema.NewSetschema.NewSet -func flattenStringList(list []*string) []interface{} { - vs := make([]interface{}, 0, len(list)) - for _, v := range list { - vs = append(vs, *v) - } - return vs -} - -//Flattens an array of private ip addresses into a []string, where the elements returned are the IP strings e.g. "192.168.0.0" -func flattenNetworkInterfacesPrivateIPAddresses(dtos []*ec2.NetworkInterfacePrivateIpAddress) []string { - ips := make([]string, 0, len(dtos)) - for _, v := range dtos { - ip := *v.PrivateIpAddress - ips = append(ips, ip) - } - return ips -} - -//Flattens security group identifiers into a []string, where the elements returned are the GroupIDs -func flattenGroupIdentifiers(dtos []*ec2.GroupIdentifier) []string { - ids := make([]string, 0, len(dtos)) - for _, v := range dtos { - group_id := *v.GroupId - ids = append(ids, group_id) - } - return ids -} - -//Expands an array of IPs into a ec2 Private IP Address Spec -func expandPrivateIPAddresses(ips []interface{}) []*ec2.PrivateIpAddressSpecification { - dtos := make([]*ec2.PrivateIpAddressSpecification, 0, len(ips)) - for i, v := range ips { - new_private_ip := &ec2.PrivateIpAddressSpecification{ - PrivateIpAddress: aws.String(v.(string)), - } - - new_private_ip.Primary = aws.Bool(i == 0) - - dtos = append(dtos, new_private_ip) - } - return dtos -} - -//Flattens network interface attachment into a map[string]interface -func flattenAttachment(a *ec2.NetworkInterfaceAttachment) map[string]interface{} { - att := make(map[string]interface{}) - if a.InstanceId != nil { - att["instance"] = *a.InstanceId - } - att["device_index"] = *a.DeviceIndex - att["attachment_id"] = *a.AttachmentId - return att -} - -func flattenElastiCacheSecurityGroupNames(securityGroups []*elasticache.CacheSecurityGroupMembership) []string { - result := make([]string, 0, len(securityGroups)) - for _, sg := range securityGroups { - if sg.CacheSecurityGroupName != nil { - result = append(result, *sg.CacheSecurityGroupName) - } - } - return result -} - -func flattenElastiCacheSecurityGroupIds(securityGroups []*elasticache.SecurityGroupMembership) []string { - result := make([]string, 0, len(securityGroups)) - for _, sg := range securityGroups { - if sg.SecurityGroupId != nil { - result = append(result, *sg.SecurityGroupId) - } - } - return result -} - -// Flattens step adjustments into a list of map[string]interface. -func flattenStepAdjustments(adjustments []*autoscaling.StepAdjustment) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(adjustments)) - for _, raw := range adjustments { - a := map[string]interface{}{ - "scaling_adjustment": *raw.ScalingAdjustment, - } - if raw.MetricIntervalUpperBound != nil { - a["metric_interval_upper_bound"] = *raw.MetricIntervalUpperBound - } - if raw.MetricIntervalLowerBound != nil { - a["metric_interval_lower_bound"] = *raw.MetricIntervalLowerBound - } - result = append(result, a) - } - return result -} - -func flattenResourceRecords(recs []*route53.ResourceRecord, typeStr string) []string { - strs := make([]string, 0, len(recs)) - for _, r := range recs { - if r.Value != nil { - s := *r.Value - if typeStr == "TXT" || typeStr == "SPF" { - s = expandTxtEntry(s) - } - strs = append(strs, s) - } - } - return strs -} - -func expandResourceRecords(recs []interface{}, typeStr string) []*route53.ResourceRecord { - records := make([]*route53.ResourceRecord, 0, len(recs)) - for _, r := range recs { - s := r.(string) - if typeStr == "TXT" || typeStr == "SPF" { - s = flattenTxtEntry(s) - } - records = append(records, &route53.ResourceRecord{Value: aws.String(s)}) - } - return records -} - -// How 'flattenTxtEntry' and 'expandTxtEntry' work. -// -// In the Route 53, TXT entries are written using quoted strings, one per line. -// Example: -// "x=foo" -// "bar=12" -// -// In Terraform, there are two differences: -// - We use a list of strings instead of separating strings with newlines. -// - Within each string, we dont' include the surrounding quotes. -// Example: -// records = ["x=foo", "bar=12"] # Instead of ["\"x=foo\", \"bar=12\""] -// -// When we pull from Route 53, `expandTxtEntry` removes the surrounding quotes; -// when we push to Route 53, `flattenTxtEntry` adds them back. -// -// One complication is that a single TXT entry can have multiple quoted strings. -// For example, here are two TXT entries, one with two quoted strings and the -// other with three. -// "x=" "foo" -// "ba" "r" "=12" -// -// DNS clients are expected to merge the quoted strings before interpreting the -// value. Since `expandTxtEntry` only removes the quotes at the end we can still -// (hackily) represent the above configuration in Terraform: -// records = ["x=\" \"foo", "ba\" \"r\" \"=12"] -// -// The primary reason to use multiple strings for an entry is that DNS (and Route -// 53) doesn't allow a quoted string to be more than 255 characters long. If you -// want a longer TXT entry, you must use multiple quoted strings. -// -// It would be nice if this Terraform automatically split strings longer than 255 -// characters. For example, imagine "xxx..xxx" has 256 "x" characters. -// records = ["xxx..xxx"] -// When pushing to Route 53, this could be converted to: -// "xxx..xx" "x" -// -// This could also work when the user is already using multiple quoted strings: -// records = ["xxx.xxx\" \"yyy..yyy"] -// When pushing to Route 53, this could be converted to: -// "xxx..xx" "xyyy...y" "yy" -// -// If you want to add this feature, make sure to follow all the quoting rules in -// . If you make a mistake, people -// might end up relying on that mistake so fixing it would be a breaking change. - -func flattenTxtEntry(s string) string { - return fmt.Sprintf(`"%s"`, s) -} - -func expandTxtEntry(s string) string { - last := len(s) - 1 - if last != 0 && s[0] == '"' && s[last] == '"' { - s = s[1:last] - } - return s -} - -func expandESClusterConfig(m map[string]interface{}) *elasticsearch.ElasticsearchClusterConfig { - config := elasticsearch.ElasticsearchClusterConfig{} - - if v, ok := m["dedicated_master_enabled"]; ok { - isEnabled := v.(bool) - config.DedicatedMasterEnabled = aws.Bool(isEnabled) - - if isEnabled { - if v, ok := m["dedicated_master_count"]; ok && v.(int) > 0 { - config.DedicatedMasterCount = aws.Int64(int64(v.(int))) - } - if v, ok := m["dedicated_master_type"]; ok && v.(string) != "" { - config.DedicatedMasterType = aws.String(v.(string)) - } - } - } - - if v, ok := m["instance_count"]; ok { - config.InstanceCount = aws.Int64(int64(v.(int))) - } - if v, ok := m["instance_type"]; ok { - config.InstanceType = aws.String(v.(string)) - } - - if v, ok := m["zone_awareness_enabled"]; ok { - config.ZoneAwarenessEnabled = aws.Bool(v.(bool)) - } - - return &config -} - -func flattenESClusterConfig(c *elasticsearch.ElasticsearchClusterConfig) []map[string]interface{} { - m := map[string]interface{}{} - - if c.DedicatedMasterCount != nil { - m["dedicated_master_count"] = *c.DedicatedMasterCount - } - if c.DedicatedMasterEnabled != nil { - m["dedicated_master_enabled"] = *c.DedicatedMasterEnabled - } - if c.DedicatedMasterType != nil { - m["dedicated_master_type"] = *c.DedicatedMasterType - } - if c.InstanceCount != nil { - m["instance_count"] = *c.InstanceCount - } - if c.InstanceType != nil { - m["instance_type"] = *c.InstanceType - } - if c.ZoneAwarenessEnabled != nil { - m["zone_awareness_enabled"] = *c.ZoneAwarenessEnabled - } - - return []map[string]interface{}{m} -} - -func flattenESEBSOptions(o *elasticsearch.EBSOptions) []map[string]interface{} { - m := map[string]interface{}{} - - if o.EBSEnabled != nil { - m["ebs_enabled"] = *o.EBSEnabled - } - if o.Iops != nil { - m["iops"] = *o.Iops - } - if o.VolumeSize != nil { - m["volume_size"] = *o.VolumeSize - } - if o.VolumeType != nil { - m["volume_type"] = *o.VolumeType - } - - return []map[string]interface{}{m} -} - -func expandESEBSOptions(m map[string]interface{}) *elasticsearch.EBSOptions { - options := elasticsearch.EBSOptions{} - - if v, ok := m["ebs_enabled"]; ok { - options.EBSEnabled = aws.Bool(v.(bool)) - } - if v, ok := m["iops"]; ok && v.(int) > 0 { - options.Iops = aws.Int64(int64(v.(int))) - } - if v, ok := m["volume_size"]; ok && v.(int) > 0 { - options.VolumeSize = aws.Int64(int64(v.(int))) - } - if v, ok := m["volume_type"]; ok && v.(string) != "" { - options.VolumeType = aws.String(v.(string)) - } - - return &options -} - -func expandConfigRecordingGroup(configured []interface{}) *configservice.RecordingGroup { - recordingGroup := configservice.RecordingGroup{} - group := configured[0].(map[string]interface{}) - - if v, ok := group["all_supported"]; ok { - recordingGroup.AllSupported = aws.Bool(v.(bool)) - } - - if v, ok := group["include_global_resource_types"]; ok { - recordingGroup.IncludeGlobalResourceTypes = aws.Bool(v.(bool)) - } - - if v, ok := group["resource_types"]; ok { - recordingGroup.ResourceTypes = expandStringList(v.(*schema.Set).List()) - } - return &recordingGroup -} - -func flattenConfigRecordingGroup(g *configservice.RecordingGroup) []map[string]interface{} { - m := make(map[string]interface{}, 1) - - if g.AllSupported != nil { - m["all_supported"] = *g.AllSupported - } - - if g.IncludeGlobalResourceTypes != nil { - m["include_global_resource_types"] = *g.IncludeGlobalResourceTypes - } - - if g.ResourceTypes != nil && len(g.ResourceTypes) > 0 { - m["resource_types"] = schema.NewSet(schema.HashString, flattenStringList(g.ResourceTypes)) - } - - return []map[string]interface{}{m} -} - -func flattenConfigSnapshotDeliveryProperties(p *configservice.ConfigSnapshotDeliveryProperties) []map[string]interface{} { - m := make(map[string]interface{}, 0) - - if p.DeliveryFrequency != nil { - m["delivery_frequency"] = *p.DeliveryFrequency - } - - return []map[string]interface{}{m} -} - -func pointersMapToStringList(pointers map[string]*string) map[string]interface{} { - list := make(map[string]interface{}, len(pointers)) - for i, v := range pointers { - list[i] = *v - } - return list -} - -func stringMapToPointers(m map[string]interface{}) map[string]*string { - list := make(map[string]*string, len(m)) - for i, v := range m { - list[i] = aws.String(v.(string)) - } - return list -} - -func flattenDSVpcSettings( - s *directoryservice.DirectoryVpcSettingsDescription) []map[string]interface{} { - settings := make(map[string]interface{}, 0) - - if s == nil { - return nil - } - - settings["subnet_ids"] = schema.NewSet(schema.HashString, flattenStringList(s.SubnetIds)) - settings["vpc_id"] = *s.VpcId - - return []map[string]interface{}{settings} -} - -func flattenLambdaEnvironment(lambdaEnv *lambda.EnvironmentResponse) []interface{} { - envs := make(map[string]interface{}) - en := make(map[string]string) - - if lambdaEnv == nil { - return nil - } - - for k, v := range lambdaEnv.Variables { - en[k] = *v - } - if len(en) > 0 { - envs["variables"] = en - } - - return []interface{}{envs} -} - -func flattenLambdaVpcConfigResponse(s *lambda.VpcConfigResponse) []map[string]interface{} { - settings := make(map[string]interface{}, 0) - - if s == nil { - return nil - } - - var emptyVpc bool - if s.VpcId == nil || *s.VpcId == "" { - emptyVpc = true - } - if len(s.SubnetIds) == 0 && len(s.SecurityGroupIds) == 0 && emptyVpc { - return nil - } - - settings["subnet_ids"] = schema.NewSet(schema.HashString, flattenStringList(s.SubnetIds)) - settings["security_group_ids"] = schema.NewSet(schema.HashString, flattenStringList(s.SecurityGroupIds)) - if s.VpcId != nil { - settings["vpc_id"] = *s.VpcId - } - - return []map[string]interface{}{settings} -} - -func flattenDSConnectSettings( - customerDnsIps []*string, - s *directoryservice.DirectoryConnectSettingsDescription) []map[string]interface{} { - if s == nil { - return nil - } - - settings := make(map[string]interface{}, 0) - - settings["customer_dns_ips"] = schema.NewSet(schema.HashString, flattenStringList(customerDnsIps)) - settings["connect_ips"] = schema.NewSet(schema.HashString, flattenStringList(s.ConnectIps)) - settings["customer_username"] = *s.CustomerUserName - settings["subnet_ids"] = schema.NewSet(schema.HashString, flattenStringList(s.SubnetIds)) - settings["vpc_id"] = *s.VpcId - - return []map[string]interface{}{settings} -} - -func expandCloudFormationParameters(params map[string]interface{}) []*cloudformation.Parameter { - var cfParams []*cloudformation.Parameter - for k, v := range params { - cfParams = append(cfParams, &cloudformation.Parameter{ - ParameterKey: aws.String(k), - ParameterValue: aws.String(v.(string)), - }) - } - - return cfParams -} - -// flattenCloudFormationParameters is flattening list of -// *cloudformation.Parameters and only returning existing -// parameters to avoid clash with default values -func flattenCloudFormationParameters(cfParams []*cloudformation.Parameter, - originalParams map[string]interface{}) map[string]interface{} { - params := make(map[string]interface{}, len(cfParams)) - for _, p := range cfParams { - _, isConfigured := originalParams[*p.ParameterKey] - if isConfigured { - params[*p.ParameterKey] = *p.ParameterValue - } - } - return params -} - -func flattenAllCloudFormationParameters(cfParams []*cloudformation.Parameter) map[string]interface{} { - params := make(map[string]interface{}, len(cfParams)) - for _, p := range cfParams { - params[*p.ParameterKey] = *p.ParameterValue - } - return params -} - -func expandCloudFormationTags(tags map[string]interface{}) []*cloudformation.Tag { - var cfTags []*cloudformation.Tag - for k, v := range tags { - cfTags = append(cfTags, &cloudformation.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - }) - } - return cfTags -} - -func flattenCloudFormationTags(cfTags []*cloudformation.Tag) map[string]string { - tags := make(map[string]string, len(cfTags)) - for _, t := range cfTags { - tags[*t.Key] = *t.Value - } - return tags -} - -func flattenCloudFormationOutputs(cfOutputs []*cloudformation.Output) map[string]string { - outputs := make(map[string]string, len(cfOutputs)) - for _, o := range cfOutputs { - outputs[*o.OutputKey] = *o.OutputValue - } - return outputs -} - -func flattenAsgSuspendedProcesses(list []*autoscaling.SuspendedProcess) []string { - strs := make([]string, 0, len(list)) - for _, r := range list { - if r.ProcessName != nil { - strs = append(strs, *r.ProcessName) - } - } - return strs -} - -func flattenAsgEnabledMetrics(list []*autoscaling.EnabledMetric) []string { - strs := make([]string, 0, len(list)) - for _, r := range list { - if r.Metric != nil { - strs = append(strs, *r.Metric) - } - } - return strs -} - -func flattenKinesisShardLevelMetrics(list []*kinesis.EnhancedMetrics) []string { - if len(list) == 0 { - return []string{} - } - strs := make([]string, 0, len(list[0].ShardLevelMetrics)) - for _, s := range list[0].ShardLevelMetrics { - strs = append(strs, *s) - } - return strs -} - -func flattenApiGatewayStageKeys(keys []*string) []map[string]interface{} { - stageKeys := make([]map[string]interface{}, 0, len(keys)) - for _, o := range keys { - key := make(map[string]interface{}) - parts := strings.Split(*o, "/") - key["stage_name"] = parts[1] - key["rest_api_id"] = parts[0] - - stageKeys = append(stageKeys, key) - } - return stageKeys -} - -func expandApiGatewayStageKeys(d *schema.ResourceData) []*apigateway.StageKey { - var stageKeys []*apigateway.StageKey - - if stageKeyData, ok := d.GetOk("stage_key"); ok { - params := stageKeyData.(*schema.Set).List() - for k := range params { - data := params[k].(map[string]interface{}) - stageKeys = append(stageKeys, &apigateway.StageKey{ - RestApiId: aws.String(data["rest_api_id"].(string)), - StageName: aws.String(data["stage_name"].(string)), - }) - } - } - - return stageKeys -} - -func expandApiGatewayRequestResponseModelOperations(d *schema.ResourceData, key string, prefix string) []*apigateway.PatchOperation { - operations := make([]*apigateway.PatchOperation, 0) - - oldModels, newModels := d.GetChange(key) - oldModelMap := oldModels.(map[string]interface{}) - newModelMap := newModels.(map[string]interface{}) - - for k, _ := range oldModelMap { - operation := apigateway.PatchOperation{ - Op: aws.String("remove"), - Path: aws.String(fmt.Sprintf("/%s/%s", prefix, strings.Replace(k, "/", "~1", -1))), - } - - for nK, nV := range newModelMap { - if nK == k { - operation.Op = aws.String("replace") - operation.Value = aws.String(nV.(string)) - } - } - - operations = append(operations, &operation) - } - - for nK, nV := range newModelMap { - exists := false - for k, _ := range oldModelMap { - if k == nK { - exists = true - } - } - if !exists { - operation := apigateway.PatchOperation{ - Op: aws.String("add"), - Path: aws.String(fmt.Sprintf("/%s/%s", prefix, strings.Replace(nK, "/", "~1", -1))), - Value: aws.String(nV.(string)), - } - operations = append(operations, &operation) - } - } - - return operations -} - -func deprecatedExpandApiGatewayMethodParametersJSONOperations(d *schema.ResourceData, key string, prefix string) ([]*apigateway.PatchOperation, error) { - operations := make([]*apigateway.PatchOperation, 0) - oldParameters, newParameters := d.GetChange(key) - oldParametersMap := make(map[string]interface{}) - newParametersMap := make(map[string]interface{}) - - if err := json.Unmarshal([]byte(oldParameters.(string)), &oldParametersMap); err != nil { - err := fmt.Errorf("Error unmarshaling old %s: %s", key, err) - return operations, err - } - - if err := json.Unmarshal([]byte(newParameters.(string)), &newParametersMap); err != nil { - err := fmt.Errorf("Error unmarshaling new %s: %s", key, err) - return operations, err - } - - for k, _ := range oldParametersMap { - operation := apigateway.PatchOperation{ - Op: aws.String("remove"), - Path: aws.String(fmt.Sprintf("/%s/%s", prefix, k)), - } - - for nK, nV := range newParametersMap { - if nK == k { - operation.Op = aws.String("replace") - operation.Value = aws.String(strconv.FormatBool(nV.(bool))) - } - } - - operations = append(operations, &operation) - } - - for nK, nV := range newParametersMap { - exists := false - for k, _ := range oldParametersMap { - if k == nK { - exists = true - } - } - if !exists { - operation := apigateway.PatchOperation{ - Op: aws.String("add"), - Path: aws.String(fmt.Sprintf("/%s/%s", prefix, nK)), - Value: aws.String(strconv.FormatBool(nV.(bool))), - } - operations = append(operations, &operation) - } - } - - return operations, nil -} - -func expandApiGatewayMethodParametersOperations(d *schema.ResourceData, key string, prefix string) ([]*apigateway.PatchOperation, error) { - operations := make([]*apigateway.PatchOperation, 0) - - oldParameters, newParameters := d.GetChange(key) - oldParametersMap := oldParameters.(map[string]interface{}) - newParametersMap := newParameters.(map[string]interface{}) - - for k, _ := range oldParametersMap { - operation := apigateway.PatchOperation{ - Op: aws.String("remove"), - Path: aws.String(fmt.Sprintf("/%s/%s", prefix, k)), - } - - for nK, nV := range newParametersMap { - b, ok := nV.(bool) - if !ok { - value, _ := strconv.ParseBool(nV.(string)) - b = value - } - if nK == k { - operation.Op = aws.String("replace") - operation.Value = aws.String(strconv.FormatBool(b)) - } - } - - operations = append(operations, &operation) - } - - for nK, nV := range newParametersMap { - exists := false - for k, _ := range oldParametersMap { - if k == nK { - exists = true - } - } - if !exists { - b, ok := nV.(bool) - if !ok { - value, _ := strconv.ParseBool(nV.(string)) - b = value - } - operation := apigateway.PatchOperation{ - Op: aws.String("add"), - Path: aws.String(fmt.Sprintf("/%s/%s", prefix, nK)), - Value: aws.String(strconv.FormatBool(b)), - } - operations = append(operations, &operation) - } - } - - return operations, nil -} - -func expandApiGatewayStageKeyOperations(d *schema.ResourceData) []*apigateway.PatchOperation { - operations := make([]*apigateway.PatchOperation, 0) - - prev, curr := d.GetChange("stage_key") - prevList := prev.(*schema.Set).List() - currList := curr.(*schema.Set).List() - - for i := range prevList { - p := prevList[i].(map[string]interface{}) - exists := false - - for j := range currList { - c := currList[j].(map[string]interface{}) - if c["rest_api_id"].(string) == p["rest_api_id"].(string) && c["stage_name"].(string) == p["stage_name"].(string) { - exists = true - } - } - - if !exists { - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("remove"), - Path: aws.String("/stages"), - Value: aws.String(fmt.Sprintf("%s/%s", p["rest_api_id"].(string), p["stage_name"].(string))), - }) - } - } - - for i := range currList { - c := currList[i].(map[string]interface{}) - exists := false - - for j := range prevList { - p := prevList[j].(map[string]interface{}) - if c["rest_api_id"].(string) == p["rest_api_id"].(string) && c["stage_name"].(string) == p["stage_name"].(string) { - exists = true - } - } - - if !exists { - operations = append(operations, &apigateway.PatchOperation{ - Op: aws.String("add"), - Path: aws.String("/stages"), - Value: aws.String(fmt.Sprintf("%s/%s", c["rest_api_id"].(string), c["stage_name"].(string))), - }) - } - } - - return operations -} - -func expandCloudWachLogMetricTransformations(m map[string]interface{}) []*cloudwatchlogs.MetricTransformation { - transformation := cloudwatchlogs.MetricTransformation{ - MetricName: aws.String(m["name"].(string)), - MetricNamespace: aws.String(m["namespace"].(string)), - MetricValue: aws.String(m["value"].(string)), - } - - return []*cloudwatchlogs.MetricTransformation{&transformation} -} - -func flattenCloudWachLogMetricTransformations(ts []*cloudwatchlogs.MetricTransformation) map[string]string { - m := make(map[string]string, 0) - - m["name"] = *ts[0].MetricName - m["namespace"] = *ts[0].MetricNamespace - m["value"] = *ts[0].MetricValue - - return m -} - -func flattenBeanstalkAsg(list []*elasticbeanstalk.AutoScalingGroup) []string { - strs := make([]string, 0, len(list)) - for _, r := range list { - if r.Name != nil { - strs = append(strs, *r.Name) - } - } - return strs -} - -func flattenBeanstalkInstances(list []*elasticbeanstalk.Instance) []string { - strs := make([]string, 0, len(list)) - for _, r := range list { - if r.Id != nil { - strs = append(strs, *r.Id) - } - } - return strs -} - -func flattenBeanstalkLc(list []*elasticbeanstalk.LaunchConfiguration) []string { - strs := make([]string, 0, len(list)) - for _, r := range list { - if r.Name != nil { - strs = append(strs, *r.Name) - } - } - return strs -} - -func flattenBeanstalkElb(list []*elasticbeanstalk.LoadBalancer) []string { - strs := make([]string, 0, len(list)) - for _, r := range list { - if r.Name != nil { - strs = append(strs, *r.Name) - } - } - return strs -} - -func flattenBeanstalkSqs(list []*elasticbeanstalk.Queue) []string { - strs := make([]string, 0, len(list)) - for _, r := range list { - if r.URL != nil { - strs = append(strs, *r.URL) - } - } - return strs -} - -func flattenBeanstalkTrigger(list []*elasticbeanstalk.Trigger) []string { - strs := make([]string, 0, len(list)) - for _, r := range list { - if r.Name != nil { - strs = append(strs, *r.Name) - } - } - return strs -} - -// There are several parts of the AWS API that will sort lists of strings, -// causing diffs inbetween resources that use lists. This avoids a bit of -// code duplication for pre-sorts that can be used for things like hash -// functions, etc. -func sortInterfaceSlice(in []interface{}) []interface{} { - a := []string{} - b := []interface{}{} - for _, v := range in { - a = append(a, v.(string)) - } - - sort.Strings(a) - - for _, v := range a { - b = append(b, v) - } - - return b -} - -// This function sorts List A to look like a list found in the tf file. -func sortListBasedonTFFile(in []string, d *schema.ResourceData, listName string) ([]string, error) { - if attributeCount, ok := d.Get(listName + ".#").(int); ok { - for i := 0; i < attributeCount; i++ { - currAttributeId := d.Get(listName + "." + strconv.Itoa(i)) - for j := 0; j < len(in); j++ { - if currAttributeId == in[j] { - in[i], in[j] = in[j], in[i] - } - } - } - return in, nil - } - return in, fmt.Errorf("Could not find list: %s", listName) -} - -func flattenApiGatewayThrottleSettings(settings *apigateway.ThrottleSettings) []map[string]interface{} { - result := make([]map[string]interface{}, 0, 1) - - if settings != nil { - r := make(map[string]interface{}) - if settings.BurstLimit != nil { - r["burst_limit"] = *settings.BurstLimit - } - - if settings.RateLimit != nil { - r["rate_limit"] = *settings.RateLimit - } - - result = append(result, r) - } - - return result -} - -// TODO: refactor some of these helper functions and types in the terraform/helper packages - -// getStringPtr returns a *string version of the value taken from m, where m -// can be a map[string]interface{} or a *schema.ResourceData. If the key isn't -// present or is empty, getNilString returns nil. -func getStringPtr(m interface{}, key string) *string { - switch m := m.(type) { - case map[string]interface{}: - v := m[key] - - if v == nil { - return nil - } - - s := v.(string) - if s == "" { - return nil - } - - return &s - - case *schema.ResourceData: - if v, ok := m.GetOk(key); ok { - if v == nil || v.(string) == "" { - return nil - } - s := v.(string) - return &s - } - - default: - panic("unknown type in getStringPtr") - } - - return nil -} - -// getStringPtrList returns a []*string version of the map value. If the key -// isn't present, getNilStringList returns nil. -func getStringPtrList(m map[string]interface{}, key string) []*string { - if v, ok := m[key]; ok { - var stringList []*string - for _, i := range v.([]interface{}) { - s := i.(string) - stringList = append(stringList, &s) - } - - return stringList - } - - return nil -} - -// a convenience wrapper type for the schema.Set map[string]interface{} -// Set operations only alter the underlying map if the value is not nil -type setMap map[string]interface{} - -// SetString sets m[key] = *value only if `value != nil` -func (s setMap) SetString(key string, value *string) { - if value == nil { - return - } - - s[key] = *value -} - -// SetStringMap sets key to value as a map[string]interface{}, stripping any nil -// values. The value parameter can be a map[string]interface{}, a -// map[string]*string, or a map[string]string. -func (s setMap) SetStringMap(key string, value interface{}) { - // because these methods are meant to be chained without intermediate - // checks for nil, we are likely to get interfaces with dynamic types but - // a nil value. - if reflect.ValueOf(value).IsNil() { - return - } - - m := make(map[string]interface{}) - - switch value := value.(type) { - case map[string]string: - for k, v := range value { - m[k] = v - } - case map[string]*string: - for k, v := range value { - if v == nil { - continue - } - m[k] = *v - } - case map[string]interface{}: - for k, v := range value { - if v == nil { - continue - } - - switch v := v.(type) { - case string: - m[k] = v - case *string: - if v != nil { - m[k] = *v - } - default: - panic(fmt.Sprintf("unknown type for SetString: %T", v)) - } - } - } - - // catch the case where the interface wasn't nil, but we had no non-nil values - if len(m) > 0 { - s[key] = m - } -} - -// Set assigns value to s[key] if value isn't nil -func (s setMap) Set(key string, value interface{}) { - if reflect.ValueOf(value).IsNil() { - return - } - - s[key] = value -} - -// Map returns the raw map type for a shorter type conversion -func (s setMap) Map() map[string]interface{} { - return map[string]interface{}(s) -} - -// MapList returns the map[string]interface{} as a single element in a slice to -// match the schema.Set data type used for structs. -func (s setMap) MapList() []map[string]interface{} { - return []map[string]interface{}{s.Map()} -} - -// Takes the result of flatmap.Expand for an array of policy attributes and -// returns ELB API compatible objects -func expandPolicyAttributes(configured []interface{}) ([]*elb.PolicyAttribute, error) { - attributes := make([]*elb.PolicyAttribute, 0, len(configured)) - - // Loop over our configured attributes and create - // an array of aws-sdk-go compatible objects - for _, lRaw := range configured { - data := lRaw.(map[string]interface{}) - - a := &elb.PolicyAttribute{ - AttributeName: aws.String(data["name"].(string)), - AttributeValue: aws.String(data["value"].(string)), - } - - attributes = append(attributes, a) - - } - - return attributes, nil -} - -// Flattens an array of PolicyAttributes into a []interface{} -func flattenPolicyAttributes(list []*elb.PolicyAttributeDescription) []interface{} { - attributes := []interface{}{} - for _, attrdef := range list { - attribute := map[string]string{ - "name": *attrdef.AttributeName, - "value": *attrdef.AttributeValue, - } - - attributes = append(attributes, attribute) - - } - - return attributes -} - -func flattenConfigRuleSource(source *configservice.Source) []interface{} { - var result []interface{} - m := make(map[string]interface{}) - m["owner"] = *source.Owner - m["source_identifier"] = *source.SourceIdentifier - if len(source.SourceDetails) > 0 { - m["source_detail"] = schema.NewSet(configRuleSourceDetailsHash, flattenConfigRuleSourceDetails(source.SourceDetails)) - } - result = append(result, m) - return result -} - -func flattenConfigRuleSourceDetails(details []*configservice.SourceDetail) []interface{} { - var items []interface{} - for _, d := range details { - m := make(map[string]interface{}) - if d.MessageType != nil { - m["message_type"] = *d.MessageType - } - if d.EventSource != nil { - m["event_source"] = *d.EventSource - } - if d.MaximumExecutionFrequency != nil { - m["maximum_execution_frequency"] = *d.MaximumExecutionFrequency - } - - items = append(items, m) - } - - return items -} - -func expandConfigRuleSource(configured []interface{}) *configservice.Source { - cfg := configured[0].(map[string]interface{}) - source := configservice.Source{ - Owner: aws.String(cfg["owner"].(string)), - SourceIdentifier: aws.String(cfg["source_identifier"].(string)), - } - if details, ok := cfg["source_detail"]; ok { - source.SourceDetails = expandConfigRuleSourceDetails(details.(*schema.Set)) - } - return &source -} - -func expandConfigRuleSourceDetails(configured *schema.Set) []*configservice.SourceDetail { - var results []*configservice.SourceDetail - - for _, item := range configured.List() { - detail := item.(map[string]interface{}) - src := configservice.SourceDetail{} - - if msgType, ok := detail["message_type"].(string); ok && msgType != "" { - src.MessageType = aws.String(msgType) - } - if eventSource, ok := detail["event_source"].(string); ok && eventSource != "" { - src.EventSource = aws.String(eventSource) - } - if maxExecFreq, ok := detail["maximum_execution_frequency"].(string); ok && maxExecFreq != "" { - src.MaximumExecutionFrequency = aws.String(maxExecFreq) - } - - results = append(results, &src) - } - - return results -} - -func flattenConfigRuleScope(scope *configservice.Scope) []interface{} { - var items []interface{} - - m := make(map[string]interface{}) - if scope.ComplianceResourceId != nil { - m["compliance_resource_id"] = *scope.ComplianceResourceId - } - if scope.ComplianceResourceTypes != nil { - m["compliance_resource_types"] = schema.NewSet(schema.HashString, flattenStringList(scope.ComplianceResourceTypes)) - } - if scope.TagKey != nil { - m["tag_key"] = *scope.TagKey - } - if scope.TagValue != nil { - m["tag_value"] = *scope.TagValue - } - - items = append(items, m) - return items -} - -func expandConfigRuleScope(configured map[string]interface{}) *configservice.Scope { - scope := &configservice.Scope{} - - if v, ok := configured["compliance_resource_id"].(string); ok && v != "" { - scope.ComplianceResourceId = aws.String(v) - } - if v, ok := configured["compliance_resource_types"]; ok { - l := v.(*schema.Set) - if l.Len() > 0 { - scope.ComplianceResourceTypes = expandStringList(l.List()) - } - } - if v, ok := configured["tag_key"].(string); ok && v != "" { - scope.TagKey = aws.String(v) - } - if v, ok := configured["tag_value"].(string); ok && v != "" { - scope.TagValue = aws.String(v) - } - - return scope -} - -// Takes a value containing JSON string and passes it through -// the JSON parser to normalize it, returns either a parsing -// error or normalized JSON string. -func normalizeJsonString(jsonString interface{}) (string, error) { - var j interface{} - - if jsonString == nil || jsonString.(string) == "" { - return "", nil - } - - s := jsonString.(string) - - err := json.Unmarshal([]byte(s), &j) - if err != nil { - return s, err - } - - // The error is intentionally ignored here to allow empty policies to passthrough validation. - // This covers any interpolated values - bytes, _ := json.Marshal(j) - - return string(bytes[:]), nil -} - -// Takes a value containing YAML string and passes it through -// the YAML parser. Returns either a parsing -// error or original YAML string. -func checkYamlString(yamlString interface{}) (string, error) { - var y interface{} - - if yamlString == nil || yamlString.(string) == "" { - return "", nil - } - - s := yamlString.(string) - - err := yaml.Unmarshal([]byte(s), &y) - if err != nil { - return s, err - } - - return s, nil -} - -func normalizeCloudFormationTemplate(templateString interface{}) (string, error) { - if looksLikeJsonString(templateString) { - return normalizeJsonString(templateString) - } else { - return checkYamlString(templateString) - } -} - -func flattenInspectorTags(cfTags []*cloudformation.Tag) map[string]string { - tags := make(map[string]string, len(cfTags)) - for _, t := range cfTags { - tags[*t.Key] = *t.Value - } - return tags -} - -func flattenApiGatewayUsageApiStages(s []*apigateway.ApiStage) []map[string]interface{} { - stages := make([]map[string]interface{}, 0) - - for _, bd := range s { - if bd.ApiId != nil && bd.Stage != nil { - stage := make(map[string]interface{}) - stage["api_id"] = *bd.ApiId - stage["stage"] = *bd.Stage - - stages = append(stages, stage) - } - } - - if len(stages) > 0 { - return stages - } - - return nil -} - -func flattenApiGatewayUsagePlanThrottling(s *apigateway.ThrottleSettings) []map[string]interface{} { - settings := make(map[string]interface{}, 0) - - if s == nil { - return nil - } - - if s.BurstLimit != nil { - settings["burst_limit"] = *s.BurstLimit - } - - if s.RateLimit != nil { - settings["rate_limit"] = *s.RateLimit - } - - return []map[string]interface{}{settings} -} - -func flattenApiGatewayUsagePlanQuota(s *apigateway.QuotaSettings) []map[string]interface{} { - settings := make(map[string]interface{}, 0) - - if s == nil { - return nil - } - - if s.Limit != nil { - settings["limit"] = *s.Limit - } - - if s.Offset != nil { - settings["offset"] = *s.Offset - } - - if s.Period != nil { - settings["period"] = *s.Period - } - - return []map[string]interface{}{settings} -} - -func buildApiGatewayInvokeURL(restApiId, region, stageName string) string { - return fmt.Sprintf("https://%s.execute-api.%s.amazonaws.com/%s", - restApiId, region, stageName) -} - -func buildApiGatewayExecutionARN(restApiId, region, accountId string) (string, error) { - if accountId == "" { - return "", fmt.Errorf("Unable to build execution ARN for %s as account ID is missing", - restApiId) - } - return fmt.Sprintf("arn:aws:execute-api:%s:%s:%s", - region, accountId, restApiId), nil -} - -func expandCognitoSupportedLoginProviders(config map[string]interface{}) map[string]*string { - m := map[string]*string{} - for k, v := range config { - s := v.(string) - m[k] = &s - } - return m -} - -func flattenCognitoSupportedLoginProviders(config map[string]*string) map[string]string { - m := map[string]string{} - for k, v := range config { - m[k] = *v - } - return m -} - -func expandCognitoIdentityProviders(s *schema.Set) []*cognitoidentity.Provider { - ips := make([]*cognitoidentity.Provider, 0) - - for _, v := range s.List() { - s := v.(map[string]interface{}) - - ip := &cognitoidentity.Provider{} - - if sv, ok := s["client_id"].(string); ok { - ip.ClientId = aws.String(sv) - } - - if sv, ok := s["provider_name"].(string); ok { - ip.ProviderName = aws.String(sv) - } - - if sv, ok := s["server_side_token_check"].(bool); ok { - ip.ServerSideTokenCheck = aws.Bool(sv) - } - - ips = append(ips, ip) - } - - return ips -} - -func flattenCognitoIdentityProviders(ips []*cognitoidentity.Provider) []map[string]interface{} { - values := make([]map[string]interface{}, 0) - - for _, v := range ips { - ip := make(map[string]interface{}) - - if v == nil { - return nil - } - - if v.ClientId != nil { - ip["client_id"] = *v.ClientId - } - - if v.ProviderName != nil { - ip["provider_name"] = *v.ProviderName - } - - if v.ServerSideTokenCheck != nil { - ip["server_side_token_check"] = *v.ServerSideTokenCheck - } - - values = append(values, ip) - } - - return values -} - -func buildLambdaInvokeArn(lambdaArn, region string) string { - apiVersion := "2015-03-31" - return fmt.Sprintf("arn:aws:apigateway:%s:lambda:path/%s/functions/%s/invocations", - region, apiVersion, lambdaArn) -} - -func sliceContainsMap(l []interface{}, m map[string]interface{}) (int, bool) { - for i, t := range l { - if reflect.DeepEqual(m, t.(map[string]interface{})) { - return i, true - } - } - - return -1, false -} - -func expandAwsSsmTargets(d *schema.ResourceData) []*ssm.Target { - var targets []*ssm.Target - - targetConfig := d.Get("targets").([]interface{}) - - for _, tConfig := range targetConfig { - config := tConfig.(map[string]interface{}) - - target := &ssm.Target{ - Key: aws.String(config["key"].(string)), - Values: expandStringList(config["values"].([]interface{})), - } - - targets = append(targets, target) - } - - return targets -} - -func flattenAwsSsmTargets(targets []*ssm.Target) []map[string]interface{} { - if len(targets) == 0 { - return nil - } - - result := make([]map[string]interface{}, 0, len(targets)) - target := targets[0] - - t := make(map[string]interface{}) - t["key"] = *target.Key - t["values"] = flattenStringList(target.Values) - - result = append(result, t) - - return result -} diff --git a/builtin/providers/aws/structure_test.go b/builtin/providers/aws/structure_test.go deleted file mode 100644 index 11e74d2ba..000000000 --- a/builtin/providers/aws/structure_test.go +++ /dev/null @@ -1,1287 +0,0 @@ -package aws - -import ( - "reflect" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/aws/aws-sdk-go/service/elb" - "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/aws/aws-sdk-go/service/redshift" - "github.com/aws/aws-sdk-go/service/route53" - "github.com/hashicorp/terraform/flatmap" - "github.com/hashicorp/terraform/helper/schema" -) - -// Returns test configuration -func testConf() map[string]string { - return map[string]string{ - "listener.#": "1", - "listener.0.lb_port": "80", - "listener.0.lb_protocol": "http", - "listener.0.instance_port": "8000", - "listener.0.instance_protocol": "http", - "availability_zones.#": "2", - "availability_zones.0": "us-east-1a", - "availability_zones.1": "us-east-1b", - "ingress.#": "1", - "ingress.0.protocol": "icmp", - "ingress.0.from_port": "1", - "ingress.0.to_port": "-1", - "ingress.0.cidr_blocks.#": "1", - "ingress.0.cidr_blocks.0": "0.0.0.0/0", - "ingress.0.security_groups.#": "2", - "ingress.0.security_groups.0": "sg-11111", - "ingress.0.security_groups.1": "foo/sg-22222", - } -} - -func TestExpandIPPerms(t *testing.T) { - hash := schema.HashString - - expanded := []interface{}{ - map[string]interface{}{ - "protocol": "icmp", - "from_port": 1, - "to_port": -1, - "cidr_blocks": []interface{}{"0.0.0.0/0"}, - "security_groups": schema.NewSet(hash, []interface{}{ - "sg-11111", - "foo/sg-22222", - }), - }, - map[string]interface{}{ - "protocol": "icmp", - "from_port": 1, - "to_port": -1, - "self": true, - }, - } - group := &ec2.SecurityGroup{ - GroupId: aws.String("foo"), - VpcId: aws.String("bar"), - } - perms, err := expandIPPerms(group, expanded) - if err != nil { - t.Fatalf("error expanding perms: %v", err) - } - - expected := []ec2.IpPermission{ - ec2.IpPermission{ - IpProtocol: aws.String("icmp"), - FromPort: aws.Int64(int64(1)), - ToPort: aws.Int64(int64(-1)), - IpRanges: []*ec2.IpRange{&ec2.IpRange{CidrIp: aws.String("0.0.0.0/0")}}, - UserIdGroupPairs: []*ec2.UserIdGroupPair{ - &ec2.UserIdGroupPair{ - UserId: aws.String("foo"), - GroupId: aws.String("sg-22222"), - }, - &ec2.UserIdGroupPair{ - GroupId: aws.String("sg-11111"), - }, - }, - }, - ec2.IpPermission{ - IpProtocol: aws.String("icmp"), - FromPort: aws.Int64(int64(1)), - ToPort: aws.Int64(int64(-1)), - UserIdGroupPairs: []*ec2.UserIdGroupPair{ - &ec2.UserIdGroupPair{ - GroupId: aws.String("foo"), - }, - }, - }, - } - - exp := expected[0] - perm := perms[0] - - if *exp.FromPort != *perm.FromPort { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - *perm.FromPort, - *exp.FromPort) - } - - if *exp.IpRanges[0].CidrIp != *perm.IpRanges[0].CidrIp { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - *perm.IpRanges[0].CidrIp, - *exp.IpRanges[0].CidrIp) - } - - if *exp.UserIdGroupPairs[0].UserId != *perm.UserIdGroupPairs[0].UserId { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - *perm.UserIdGroupPairs[0].UserId, - *exp.UserIdGroupPairs[0].UserId) - } - - if *exp.UserIdGroupPairs[0].GroupId != *perm.UserIdGroupPairs[0].GroupId { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - *perm.UserIdGroupPairs[0].GroupId, - *exp.UserIdGroupPairs[0].GroupId) - } - - if *exp.UserIdGroupPairs[1].GroupId != *perm.UserIdGroupPairs[1].GroupId { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - *perm.UserIdGroupPairs[1].GroupId, - *exp.UserIdGroupPairs[1].GroupId) - } - - exp = expected[1] - perm = perms[1] - - if *exp.UserIdGroupPairs[0].GroupId != *perm.UserIdGroupPairs[0].GroupId { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - *perm.UserIdGroupPairs[0].GroupId, - *exp.UserIdGroupPairs[0].GroupId) - } -} - -func TestExpandIPPerms_NegOneProtocol(t *testing.T) { - hash := schema.HashString - - expanded := []interface{}{ - map[string]interface{}{ - "protocol": "-1", - "from_port": 0, - "to_port": 0, - "cidr_blocks": []interface{}{"0.0.0.0/0"}, - "security_groups": schema.NewSet(hash, []interface{}{ - "sg-11111", - "foo/sg-22222", - }), - }, - } - group := &ec2.SecurityGroup{ - GroupId: aws.String("foo"), - VpcId: aws.String("bar"), - } - - perms, err := expandIPPerms(group, expanded) - if err != nil { - t.Fatalf("error expanding perms: %v", err) - } - - expected := []ec2.IpPermission{ - ec2.IpPermission{ - IpProtocol: aws.String("-1"), - FromPort: aws.Int64(int64(0)), - ToPort: aws.Int64(int64(0)), - IpRanges: []*ec2.IpRange{&ec2.IpRange{CidrIp: aws.String("0.0.0.0/0")}}, - UserIdGroupPairs: []*ec2.UserIdGroupPair{ - &ec2.UserIdGroupPair{ - UserId: aws.String("foo"), - GroupId: aws.String("sg-22222"), - }, - &ec2.UserIdGroupPair{ - GroupId: aws.String("sg-11111"), - }, - }, - }, - } - - exp := expected[0] - perm := perms[0] - - if *exp.FromPort != *perm.FromPort { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - *perm.FromPort, - *exp.FromPort) - } - - if *exp.IpRanges[0].CidrIp != *perm.IpRanges[0].CidrIp { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - *perm.IpRanges[0].CidrIp, - *exp.IpRanges[0].CidrIp) - } - - if *exp.UserIdGroupPairs[0].UserId != *perm.UserIdGroupPairs[0].UserId { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - *perm.UserIdGroupPairs[0].UserId, - *exp.UserIdGroupPairs[0].UserId) - } - - // Now test the error case. This *should* error when either from_port - // or to_port is not zero, but protocol is "-1". - errorCase := []interface{}{ - map[string]interface{}{ - "protocol": "-1", - "from_port": 0, - "to_port": 65535, - "cidr_blocks": []interface{}{"0.0.0.0/0"}, - "security_groups": schema.NewSet(hash, []interface{}{ - "sg-11111", - "foo/sg-22222", - }), - }, - } - securityGroups := &ec2.SecurityGroup{ - GroupId: aws.String("foo"), - VpcId: aws.String("bar"), - } - - _, expandErr := expandIPPerms(securityGroups, errorCase) - if expandErr == nil { - t.Fatal("expandIPPerms should have errored!") - } -} - -func TestExpandIPPerms_nonVPC(t *testing.T) { - hash := schema.HashString - - expanded := []interface{}{ - map[string]interface{}{ - "protocol": "icmp", - "from_port": 1, - "to_port": -1, - "cidr_blocks": []interface{}{"0.0.0.0/0"}, - "security_groups": schema.NewSet(hash, []interface{}{ - "sg-11111", - "foo/sg-22222", - }), - }, - map[string]interface{}{ - "protocol": "icmp", - "from_port": 1, - "to_port": -1, - "self": true, - }, - } - group := &ec2.SecurityGroup{ - GroupName: aws.String("foo"), - } - perms, err := expandIPPerms(group, expanded) - if err != nil { - t.Fatalf("error expanding perms: %v", err) - } - - expected := []ec2.IpPermission{ - ec2.IpPermission{ - IpProtocol: aws.String("icmp"), - FromPort: aws.Int64(int64(1)), - ToPort: aws.Int64(int64(-1)), - IpRanges: []*ec2.IpRange{&ec2.IpRange{CidrIp: aws.String("0.0.0.0/0")}}, - UserIdGroupPairs: []*ec2.UserIdGroupPair{ - &ec2.UserIdGroupPair{ - GroupName: aws.String("sg-22222"), - }, - &ec2.UserIdGroupPair{ - GroupName: aws.String("sg-11111"), - }, - }, - }, - ec2.IpPermission{ - IpProtocol: aws.String("icmp"), - FromPort: aws.Int64(int64(1)), - ToPort: aws.Int64(int64(-1)), - UserIdGroupPairs: []*ec2.UserIdGroupPair{ - &ec2.UserIdGroupPair{ - GroupName: aws.String("foo"), - }, - }, - }, - } - - exp := expected[0] - perm := perms[0] - - if *exp.FromPort != *perm.FromPort { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - *perm.FromPort, - *exp.FromPort) - } - - if *exp.IpRanges[0].CidrIp != *perm.IpRanges[0].CidrIp { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - *perm.IpRanges[0].CidrIp, - *exp.IpRanges[0].CidrIp) - } - - if *exp.UserIdGroupPairs[0].GroupName != *perm.UserIdGroupPairs[0].GroupName { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - *perm.UserIdGroupPairs[0].GroupName, - *exp.UserIdGroupPairs[0].GroupName) - } - - if *exp.UserIdGroupPairs[1].GroupName != *perm.UserIdGroupPairs[1].GroupName { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - *perm.UserIdGroupPairs[1].GroupName, - *exp.UserIdGroupPairs[1].GroupName) - } - - exp = expected[1] - perm = perms[1] - - if *exp.UserIdGroupPairs[0].GroupName != *perm.UserIdGroupPairs[0].GroupName { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - *perm.UserIdGroupPairs[0].GroupName, - *exp.UserIdGroupPairs[0].GroupName) - } -} - -func TestExpandListeners(t *testing.T) { - expanded := []interface{}{ - map[string]interface{}{ - "instance_port": 8000, - "lb_port": 80, - "instance_protocol": "http", - "lb_protocol": "http", - }, - map[string]interface{}{ - "instance_port": 8000, - "lb_port": 80, - "instance_protocol": "https", - "lb_protocol": "https", - "ssl_certificate_id": "something", - }, - } - listeners, err := expandListeners(expanded) - if err != nil { - t.Fatalf("bad: %#v", err) - } - - expected := &elb.Listener{ - InstancePort: aws.Int64(int64(8000)), - LoadBalancerPort: aws.Int64(int64(80)), - InstanceProtocol: aws.String("http"), - Protocol: aws.String("http"), - } - - if !reflect.DeepEqual(listeners[0], expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - listeners[0], - expected) - } -} - -// this test should produce an error from expandlisteners on an invalid -// combination -func TestExpandListeners_invalid(t *testing.T) { - expanded := []interface{}{ - map[string]interface{}{ - "instance_port": 8000, - "lb_port": 80, - "instance_protocol": "http", - "lb_protocol": "http", - "ssl_certificate_id": "something", - }, - } - _, err := expandListeners(expanded) - if err != nil { - // Check the error we got - if !strings.Contains(err.Error(), "ssl_certificate_id may be set only when protocol") { - t.Fatalf("Got error in TestExpandListeners_invalid, but not what we expected: %s", err) - } - } - - if err == nil { - t.Fatalf("Expected TestExpandListeners_invalid to fail, but passed") - } -} - -func TestFlattenHealthCheck(t *testing.T) { - cases := []struct { - Input *elb.HealthCheck - Output []map[string]interface{} - }{ - { - Input: &elb.HealthCheck{ - UnhealthyThreshold: aws.Int64(int64(10)), - HealthyThreshold: aws.Int64(int64(10)), - Target: aws.String("HTTP:80/"), - Timeout: aws.Int64(int64(30)), - Interval: aws.Int64(int64(30)), - }, - Output: []map[string]interface{}{ - map[string]interface{}{ - "unhealthy_threshold": int64(10), - "healthy_threshold": int64(10), - "target": "HTTP:80/", - "timeout": int64(30), - "interval": int64(30), - }, - }, - }, - } - - for _, tc := range cases { - output := flattenHealthCheck(tc.Input) - if !reflect.DeepEqual(output, tc.Output) { - t.Fatalf("Got:\n\n%#v\n\nExpected:\n\n%#v", output, tc.Output) - } - } -} - -func TestExpandStringList(t *testing.T) { - expanded := flatmap.Expand(testConf(), "availability_zones").([]interface{}) - stringList := expandStringList(expanded) - expected := []*string{ - aws.String("us-east-1a"), - aws.String("us-east-1b"), - } - - if !reflect.DeepEqual(stringList, expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - stringList, - expected) - } -} - -func TestExpandStringListEmptyItems(t *testing.T) { - initialList := []string{"foo", "bar", "", "baz"} - l := make([]interface{}, len(initialList)) - for i, v := range initialList { - l[i] = v - } - stringList := expandStringList(l) - expected := []*string{ - aws.String("foo"), - aws.String("bar"), - aws.String("baz"), - } - - if !reflect.DeepEqual(stringList, expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - stringList, - expected) - } -} - -func TestExpandParameters(t *testing.T) { - expanded := []interface{}{ - map[string]interface{}{ - "name": "character_set_client", - "value": "utf8", - "apply_method": "immediate", - }, - } - parameters, err := expandParameters(expanded) - if err != nil { - t.Fatalf("bad: %#v", err) - } - - expected := &rds.Parameter{ - ParameterName: aws.String("character_set_client"), - ParameterValue: aws.String("utf8"), - ApplyMethod: aws.String("immediate"), - } - - if !reflect.DeepEqual(parameters[0], expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - parameters[0], - expected) - } -} - -func TestExpandRedshiftParameters(t *testing.T) { - expanded := []interface{}{ - map[string]interface{}{ - "name": "character_set_client", - "value": "utf8", - }, - } - parameters, err := expandRedshiftParameters(expanded) - if err != nil { - t.Fatalf("bad: %#v", err) - } - - expected := &redshift.Parameter{ - ParameterName: aws.String("character_set_client"), - ParameterValue: aws.String("utf8"), - } - - if !reflect.DeepEqual(parameters[0], expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - parameters[0], - expected) - } -} - -func TestExpandElasticacheParameters(t *testing.T) { - expanded := []interface{}{ - map[string]interface{}{ - "name": "activerehashing", - "value": "yes", - "apply_method": "immediate", - }, - } - parameters, err := expandElastiCacheParameters(expanded) - if err != nil { - t.Fatalf("bad: %#v", err) - } - - expected := &elasticache.ParameterNameValue{ - ParameterName: aws.String("activerehashing"), - ParameterValue: aws.String("yes"), - } - - if !reflect.DeepEqual(parameters[0], expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - parameters[0], - expected) - } -} - -func TestExpandStepAdjustments(t *testing.T) { - expanded := []interface{}{ - map[string]interface{}{ - "metric_interval_lower_bound": "1.0", - "metric_interval_upper_bound": "2.0", - "scaling_adjustment": 1, - }, - } - parameters, err := expandStepAdjustments(expanded) - if err != nil { - t.Fatalf("bad: %#v", err) - } - - expected := &autoscaling.StepAdjustment{ - MetricIntervalLowerBound: aws.Float64(1.0), - MetricIntervalUpperBound: aws.Float64(2.0), - ScalingAdjustment: aws.Int64(int64(1)), - } - - if !reflect.DeepEqual(parameters[0], expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - parameters[0], - expected) - } -} - -func TestFlattenParameters(t *testing.T) { - cases := []struct { - Input []*rds.Parameter - Output []map[string]interface{} - }{ - { - Input: []*rds.Parameter{ - &rds.Parameter{ - ParameterName: aws.String("character_set_client"), - ParameterValue: aws.String("utf8"), - }, - }, - Output: []map[string]interface{}{ - map[string]interface{}{ - "name": "character_set_client", - "value": "utf8", - }, - }, - }, - } - - for _, tc := range cases { - output := flattenParameters(tc.Input) - if !reflect.DeepEqual(output, tc.Output) { - t.Fatalf("Got:\n\n%#v\n\nExpected:\n\n%#v", output, tc.Output) - } - } -} - -func TestFlattenRedshiftParameters(t *testing.T) { - cases := []struct { - Input []*redshift.Parameter - Output []map[string]interface{} - }{ - { - Input: []*redshift.Parameter{ - &redshift.Parameter{ - ParameterName: aws.String("character_set_client"), - ParameterValue: aws.String("utf8"), - }, - }, - Output: []map[string]interface{}{ - map[string]interface{}{ - "name": "character_set_client", - "value": "utf8", - }, - }, - }, - } - - for _, tc := range cases { - output := flattenRedshiftParameters(tc.Input) - if !reflect.DeepEqual(output, tc.Output) { - t.Fatalf("Got:\n\n%#v\n\nExpected:\n\n%#v", output, tc.Output) - } - } -} - -func TestFlattenElasticacheParameters(t *testing.T) { - cases := []struct { - Input []*elasticache.Parameter - Output []map[string]interface{} - }{ - { - Input: []*elasticache.Parameter{ - &elasticache.Parameter{ - ParameterName: aws.String("activerehashing"), - ParameterValue: aws.String("yes"), - }, - }, - Output: []map[string]interface{}{ - map[string]interface{}{ - "name": "activerehashing", - "value": "yes", - }, - }, - }, - } - - for _, tc := range cases { - output := flattenElastiCacheParameters(tc.Input) - if !reflect.DeepEqual(output, tc.Output) { - t.Fatalf("Got:\n\n%#v\n\nExpected:\n\n%#v", output, tc.Output) - } - } -} - -func TestExpandInstanceString(t *testing.T) { - - expected := []*elb.Instance{ - &elb.Instance{InstanceId: aws.String("test-one")}, - &elb.Instance{InstanceId: aws.String("test-two")}, - } - - ids := []interface{}{ - "test-one", - "test-two", - } - - expanded := expandInstanceString(ids) - - if !reflect.DeepEqual(expanded, expected) { - t.Fatalf("Expand Instance String output did not match.\nGot:\n%#v\n\nexpected:\n%#v", expanded, expected) - } -} - -func TestFlattenNetworkInterfacesPrivateIPAddresses(t *testing.T) { - expanded := []*ec2.NetworkInterfacePrivateIpAddress{ - &ec2.NetworkInterfacePrivateIpAddress{PrivateIpAddress: aws.String("192.168.0.1")}, - &ec2.NetworkInterfacePrivateIpAddress{PrivateIpAddress: aws.String("192.168.0.2")}, - } - - result := flattenNetworkInterfacesPrivateIPAddresses(expanded) - - if result == nil { - t.Fatal("result was nil") - } - - if len(result) != 2 { - t.Fatalf("expected result had %d elements, but got %d", 2, len(result)) - } - - if result[0] != "192.168.0.1" { - t.Fatalf("expected ip to be 192.168.0.1, but was %s", result[0]) - } - - if result[1] != "192.168.0.2" { - t.Fatalf("expected ip to be 192.168.0.2, but was %s", result[1]) - } -} - -func TestFlattenGroupIdentifiers(t *testing.T) { - expanded := []*ec2.GroupIdentifier{ - &ec2.GroupIdentifier{GroupId: aws.String("sg-001")}, - &ec2.GroupIdentifier{GroupId: aws.String("sg-002")}, - } - - result := flattenGroupIdentifiers(expanded) - - if len(result) != 2 { - t.Fatalf("expected result had %d elements, but got %d", 2, len(result)) - } - - if result[0] != "sg-001" { - t.Fatalf("expected id to be sg-001, but was %s", result[0]) - } - - if result[1] != "sg-002" { - t.Fatalf("expected id to be sg-002, but was %s", result[1]) - } -} - -func TestExpandPrivateIPAddresses(t *testing.T) { - - ip1 := "192.168.0.1" - ip2 := "192.168.0.2" - flattened := []interface{}{ - ip1, - ip2, - } - - result := expandPrivateIPAddresses(flattened) - - if len(result) != 2 { - t.Fatalf("expected result had %d elements, but got %d", 2, len(result)) - } - - if *result[0].PrivateIpAddress != "192.168.0.1" || !*result[0].Primary { - t.Fatalf("expected ip to be 192.168.0.1 and Primary, but got %v, %t", *result[0].PrivateIpAddress, *result[0].Primary) - } - - if *result[1].PrivateIpAddress != "192.168.0.2" || *result[1].Primary { - t.Fatalf("expected ip to be 192.168.0.2 and not Primary, but got %v, %t", *result[1].PrivateIpAddress, *result[1].Primary) - } -} - -func TestFlattenAttachment(t *testing.T) { - expanded := &ec2.NetworkInterfaceAttachment{ - InstanceId: aws.String("i-00001"), - DeviceIndex: aws.Int64(int64(1)), - AttachmentId: aws.String("at-002"), - } - - result := flattenAttachment(expanded) - - if result == nil { - t.Fatal("expected result to have value, but got nil") - } - - if result["instance"] != "i-00001" { - t.Fatalf("expected instance to be i-00001, but got %s", result["instance"]) - } - - if result["device_index"] != int64(1) { - t.Fatalf("expected device_index to be 1, but got %d", result["device_index"]) - } - - if result["attachment_id"] != "at-002" { - t.Fatalf("expected attachment_id to be at-002, but got %s", result["attachment_id"]) - } -} - -func TestFlattenAttachmentWhenNoInstanceId(t *testing.T) { - expanded := &ec2.NetworkInterfaceAttachment{ - DeviceIndex: aws.Int64(int64(1)), - AttachmentId: aws.String("at-002"), - } - - result := flattenAttachment(expanded) - - if result == nil { - t.Fatal("expected result to have value, but got nil") - } - - if result["instance"] != nil { - t.Fatalf("expected instance to be nil, but got %s", result["instance"]) - } -} - -func TestFlattenStepAdjustments(t *testing.T) { - expanded := []*autoscaling.StepAdjustment{ - &autoscaling.StepAdjustment{ - MetricIntervalLowerBound: aws.Float64(1.0), - MetricIntervalUpperBound: aws.Float64(2.0), - ScalingAdjustment: aws.Int64(int64(1)), - }, - } - - result := flattenStepAdjustments(expanded)[0] - if result == nil { - t.Fatal("expected result to have value, but got nil") - } - if result["metric_interval_lower_bound"] != float64(1.0) { - t.Fatalf("expected metric_interval_lower_bound to be 1.0, but got %d", result["metric_interval_lower_bound"]) - } - if result["metric_interval_upper_bound"] != float64(2.0) { - t.Fatalf("expected metric_interval_upper_bound to be 1.0, but got %d", result["metric_interval_upper_bound"]) - } - if result["scaling_adjustment"] != int64(1) { - t.Fatalf("expected scaling_adjustment to be 1, but got %d", result["scaling_adjustment"]) - } -} - -func TestFlattenResourceRecords(t *testing.T) { - original := []string{ - `127.0.0.1`, - `"abc def"`, - `"abc" "def"`, - `"abc" ""`, - } - - dequoted := []string{ - `127.0.0.1`, - `abc def`, - `abc" "def`, - `abc" "`, - } - - var wrapped []*route53.ResourceRecord = nil - for _, original := range original { - wrapped = append(wrapped, &route53.ResourceRecord{Value: aws.String(original)}) - } - - sub := func(recordType string, expected []string) { - t.Run(recordType, func(t *testing.T) { - checkFlattenResourceRecords(t, recordType, wrapped, expected) - }) - } - - // These record types should be dequoted. - sub("TXT", dequoted) - sub("SPF", dequoted) - - // These record types should not be touched. - sub("CNAME", original) - sub("MX", original) -} - -func checkFlattenResourceRecords( - t *testing.T, - recordType string, - expanded []*route53.ResourceRecord, - expected []string) { - - result := flattenResourceRecords(expanded, recordType) - - if result == nil { - t.Fatal("expected result to have value, but got nil") - } - - if len(result) != len(expected) { - t.Fatalf("expected %v, got %v", expected, result) - } - - for i, e := range expected { - if result[i] != e { - t.Fatalf("expected %v, got %v", expected, result) - } - } -} - -func TestFlattenAsgEnabledMetrics(t *testing.T) { - expanded := []*autoscaling.EnabledMetric{ - &autoscaling.EnabledMetric{Granularity: aws.String("1Minute"), Metric: aws.String("GroupTotalInstances")}, - &autoscaling.EnabledMetric{Granularity: aws.String("1Minute"), Metric: aws.String("GroupMaxSize")}, - } - - result := flattenAsgEnabledMetrics(expanded) - - if len(result) != 2 { - t.Fatalf("expected result had %d elements, but got %d", 2, len(result)) - } - - if result[0] != "GroupTotalInstances" { - t.Fatalf("expected id to be GroupTotalInstances, but was %s", result[0]) - } - - if result[1] != "GroupMaxSize" { - t.Fatalf("expected id to be GroupMaxSize, but was %s", result[1]) - } -} - -func TestFlattenKinesisShardLevelMetrics(t *testing.T) { - expanded := []*kinesis.EnhancedMetrics{ - &kinesis.EnhancedMetrics{ - ShardLevelMetrics: []*string{ - aws.String("IncomingBytes"), - aws.String("IncomingRecords"), - }, - }, - } - result := flattenKinesisShardLevelMetrics(expanded) - if len(result) != 2 { - t.Fatalf("expected result had %d elements, but got %d", 2, len(result)) - } - if result[0] != "IncomingBytes" { - t.Fatalf("expected element 0 to be IncomingBytes, but was %s", result[0]) - } - if result[1] != "IncomingRecords" { - t.Fatalf("expected element 0 to be IncomingRecords, but was %s", result[1]) - } -} - -func TestFlattenSecurityGroups(t *testing.T) { - cases := []struct { - ownerId *string - pairs []*ec2.UserIdGroupPair - expected []*ec2.GroupIdentifier - }{ - // simple, no user id included (we ignore it mostly) - { - ownerId: aws.String("user1234"), - pairs: []*ec2.UserIdGroupPair{ - &ec2.UserIdGroupPair{ - GroupId: aws.String("sg-12345"), - }, - }, - expected: []*ec2.GroupIdentifier{ - &ec2.GroupIdentifier{ - GroupId: aws.String("sg-12345"), - }, - }, - }, - // include the owner id, but keep it consitent with the same account. Tests - // EC2 classic situation - { - ownerId: aws.String("user1234"), - pairs: []*ec2.UserIdGroupPair{ - &ec2.UserIdGroupPair{ - GroupId: aws.String("sg-12345"), - UserId: aws.String("user1234"), - }, - }, - expected: []*ec2.GroupIdentifier{ - &ec2.GroupIdentifier{ - GroupId: aws.String("sg-12345"), - }, - }, - }, - - // include the owner id, but from a different account. This is reflects - // EC2 Classic when referring to groups by name - { - ownerId: aws.String("user1234"), - pairs: []*ec2.UserIdGroupPair{ - &ec2.UserIdGroupPair{ - GroupId: aws.String("sg-12345"), - GroupName: aws.String("somegroup"), // GroupName is only included in Classic - UserId: aws.String("user4321"), - }, - }, - expected: []*ec2.GroupIdentifier{ - &ec2.GroupIdentifier{ - GroupId: aws.String("sg-12345"), - GroupName: aws.String("user4321/somegroup"), - }, - }, - }, - - // include the owner id, but from a different account. This reflects in - // EC2 VPC when referring to groups by id - { - ownerId: aws.String("user1234"), - pairs: []*ec2.UserIdGroupPair{ - &ec2.UserIdGroupPair{ - GroupId: aws.String("sg-12345"), - UserId: aws.String("user4321"), - }, - }, - expected: []*ec2.GroupIdentifier{ - &ec2.GroupIdentifier{ - GroupId: aws.String("user4321/sg-12345"), - }, - }, - }, - } - - for _, c := range cases { - out := flattenSecurityGroups(c.pairs, c.ownerId) - if !reflect.DeepEqual(out, c.expected) { - t.Fatalf("Error matching output and expected: %#v vs %#v", out, c.expected) - } - } -} - -func TestFlattenApiGatewayThrottleSettings(t *testing.T) { - expectedBurstLimit := int64(140) - expectedRateLimit := 120.0 - - ts := &apigateway.ThrottleSettings{ - BurstLimit: aws.Int64(expectedBurstLimit), - RateLimit: aws.Float64(expectedRateLimit), - } - result := flattenApiGatewayThrottleSettings(ts) - - if len(result) != 1 { - t.Fatalf("Expected map to have exactly 1 element, got %d", len(result)) - } - - burstLimit, ok := result[0]["burst_limit"] - if !ok { - t.Fatal("Expected 'burst_limit' key in the map") - } - burstLimitInt, ok := burstLimit.(int64) - if !ok { - t.Fatal("Expected 'burst_limit' to be int") - } - if burstLimitInt != expectedBurstLimit { - t.Fatalf("Expected 'burst_limit' to equal %d, got %d", expectedBurstLimit, burstLimitInt) - } - - rateLimit, ok := result[0]["rate_limit"] - if !ok { - t.Fatal("Expected 'rate_limit' key in the map") - } - rateLimitFloat, ok := rateLimit.(float64) - if !ok { - t.Fatal("Expected 'rate_limit' to be float64") - } - if rateLimitFloat != expectedRateLimit { - t.Fatalf("Expected 'rate_limit' to equal %f, got %f", expectedRateLimit, rateLimitFloat) - } -} - -func TestFlattenApiGatewayStageKeys(t *testing.T) { - cases := []struct { - Input []*string - Output []map[string]interface{} - }{ - { - Input: []*string{ - aws.String("a1b2c3d4e5/dev"), - aws.String("e5d4c3b2a1/test"), - }, - Output: []map[string]interface{}{ - map[string]interface{}{ - "stage_name": "dev", - "rest_api_id": "a1b2c3d4e5", - }, - map[string]interface{}{ - "stage_name": "test", - "rest_api_id": "e5d4c3b2a1", - }, - }, - }, - } - - for _, tc := range cases { - output := flattenApiGatewayStageKeys(tc.Input) - if !reflect.DeepEqual(output, tc.Output) { - t.Fatalf("Got:\n\n%#v\n\nExpected:\n\n%#v", output, tc.Output) - } - } -} - -func TestExpandPolicyAttributes(t *testing.T) { - expanded := []interface{}{ - map[string]interface{}{ - "name": "Protocol-TLSv1", - "value": "false", - }, - map[string]interface{}{ - "name": "Protocol-TLSv1.1", - "value": "false", - }, - map[string]interface{}{ - "name": "Protocol-TLSv1.2", - "value": "true", - }, - } - attributes, err := expandPolicyAttributes(expanded) - if err != nil { - t.Fatalf("bad: %#v", err) - } - - if len(attributes) != 3 { - t.Fatalf("expected number of attributes to be 3, but got %d", len(attributes)) - } - - expected := &elb.PolicyAttribute{ - AttributeName: aws.String("Protocol-TLSv1.2"), - AttributeValue: aws.String("true"), - } - - if !reflect.DeepEqual(attributes[2], expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - attributes[2], - expected) - } -} - -func TestExpandPolicyAttributes_invalid(t *testing.T) { - expanded := []interface{}{ - map[string]interface{}{ - "name": "Protocol-TLSv1.2", - "value": "true", - }, - } - attributes, err := expandPolicyAttributes(expanded) - if err != nil { - t.Fatalf("bad: %#v", err) - } - - expected := &elb.PolicyAttribute{ - AttributeName: aws.String("Protocol-TLSv1.2"), - AttributeValue: aws.String("false"), - } - - if reflect.DeepEqual(attributes[0], expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - attributes[0], - expected) - } -} - -func TestExpandPolicyAttributes_empty(t *testing.T) { - var expanded []interface{} - - attributes, err := expandPolicyAttributes(expanded) - if err != nil { - t.Fatalf("bad: %#v", err) - } - - if len(attributes) != 0 { - t.Fatalf("expected number of attributes to be 0, but got %d", len(attributes)) - } -} - -func TestFlattenPolicyAttributes(t *testing.T) { - cases := []struct { - Input []*elb.PolicyAttributeDescription - Output []interface{} - }{ - { - Input: []*elb.PolicyAttributeDescription{ - &elb.PolicyAttributeDescription{ - AttributeName: aws.String("Protocol-TLSv1.2"), - AttributeValue: aws.String("true"), - }, - }, - Output: []interface{}{ - map[string]string{ - "name": "Protocol-TLSv1.2", - "value": "true", - }, - }, - }, - } - - for _, tc := range cases { - output := flattenPolicyAttributes(tc.Input) - if !reflect.DeepEqual(output, tc.Output) { - t.Fatalf("Got:\n\n%#v\n\nExpected:\n\n%#v", output, tc.Output) - } - } -} - -func TestNormalizeJsonString(t *testing.T) { - var err error - var actual string - - // Well formatted and valid. - validJson := `{ - "abc": { - "def": 123, - "xyz": [ - { - "a": "ホリネズミ" - }, - { - "b": "1\\n2" - } - ] - } -}` - expected := `{"abc":{"def":123,"xyz":[{"a":"ホリネズミ"},{"b":"1\\n2"}]}}` - - actual, err = normalizeJsonString(validJson) - if err != nil { - t.Fatalf("Expected not to throw an error while parsing JSON, but got: %s", err) - } - - if actual != expected { - t.Fatalf("Got:\n\n%s\n\nExpected:\n\n%s\n", actual, expected) - } - - // Well formatted but not valid, - // missing closing squre bracket. - invalidJson := `{ - "abc": { - "def": 123, - "xyz": [ - { - "a": "1" - } - } - } -}` - actual, err = normalizeJsonString(invalidJson) - if err == nil { - t.Fatalf("Expected to throw an error while parsing JSON, but got: %s", err) - } - - // We expect the invalid JSON to be shown back to us again. - if actual != invalidJson { - t.Fatalf("Got:\n\n%s\n\nExpected:\n\n%s\n", expected, invalidJson) - } -} - -func TestCheckYamlString(t *testing.T) { - var err error - var actual string - - validYaml := `--- -abc: - def: 123 - xyz: - - - a: "ホリネズミ" - b: "1" -` - - actual, err = checkYamlString(validYaml) - if err != nil { - t.Fatalf("Expected not to throw an error while parsing YAML, but got: %s", err) - } - - // We expect the same YAML string back - if actual != validYaml { - t.Fatalf("Got:\n\n%s\n\nExpected:\n\n%s\n", actual, validYaml) - } - - invalidYaml := `abc: [` - - actual, err = checkYamlString(invalidYaml) - if err == nil { - t.Fatalf("Expected to throw an error while parsing YAML, but got: %s", err) - } - - // We expect the invalid YAML to be shown back to us again. - if actual != invalidYaml { - t.Fatalf("Got:\n\n%s\n\nExpected:\n\n%s\n", actual, invalidYaml) - } -} - -func TestNormalizeCloudFormationTemplate(t *testing.T) { - var err error - var actual string - - validNormalizedJson := `{"abc":"1"}` - actual, err = normalizeCloudFormationTemplate(validNormalizedJson) - if err != nil { - t.Fatalf("Expected not to throw an error while parsing template, but got: %s", err) - } - if actual != validNormalizedJson { - t.Fatalf("Got:\n\n%s\n\nExpected:\n\n%s\n", actual, validNormalizedJson) - } - - validNormalizedYaml := `abc: 1 -` - actual, err = normalizeCloudFormationTemplate(validNormalizedYaml) - if err != nil { - t.Fatalf("Expected not to throw an error while parsing template, but got: %s", err) - } - if actual != validNormalizedYaml { - t.Fatalf("Got:\n\n%s\n\nExpected:\n\n%s\n", actual, validNormalizedYaml) - } -} diff --git a/builtin/providers/aws/tags.go b/builtin/providers/aws/tags.go deleted file mode 100644 index 46438c0fd..000000000 --- a/builtin/providers/aws/tags.go +++ /dev/null @@ -1,407 +0,0 @@ -package aws - -import ( - "log" - "regexp" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -// tagsSchema returns the schema to use for tags. -// -func tagsSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - } -} - -func tagsSchemaComputed() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Computed: true, - } -} - -func setElbV2Tags(conn *elbv2.ELBV2, d *schema.ResourceData) error { - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffElbV2Tags(tagsFromMapELBv2(o), tagsFromMapELBv2(n)) - - // Set tags - if len(remove) > 0 { - var tagKeys []*string - for _, tag := range remove { - tagKeys = append(tagKeys, tag.Key) - } - log.Printf("[DEBUG] Removing tags: %#v from %s", remove, d.Id()) - _, err := conn.RemoveTags(&elbv2.RemoveTagsInput{ - ResourceArns: []*string{aws.String(d.Id())}, - TagKeys: tagKeys, - }) - if err != nil { - return err - } - } - if len(create) > 0 { - log.Printf("[DEBUG] Creating tags: %s for %s", create, d.Id()) - _, err := conn.AddTags(&elbv2.AddTagsInput{ - ResourceArns: []*string{aws.String(d.Id())}, - Tags: create, - }) - if err != nil { - return err - } - } - } - - return nil -} - -func setVolumeTags(conn *ec2.EC2, d *schema.ResourceData) error { - if d.HasChange("volume_tags") { - oraw, nraw := d.GetChange("volume_tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffTags(tagsFromMap(o), tagsFromMap(n)) - - volumeIds, err := getAwsInstanceVolumeIds(conn, d) - if err != nil { - return err - } - - if len(remove) > 0 { - err := resource.Retry(2*time.Minute, func() *resource.RetryError { - log.Printf("[DEBUG] Removing volume tags: %#v from %s", remove, d.Id()) - _, err := conn.DeleteTags(&ec2.DeleteTagsInput{ - Resources: volumeIds, - Tags: remove, - }) - if err != nil { - ec2err, ok := err.(awserr.Error) - if ok && strings.Contains(ec2err.Code(), ".NotFound") { - return resource.RetryableError(err) // retry - } - return resource.NonRetryableError(err) - } - return nil - }) - if err != nil { - return err - } - } - if len(create) > 0 { - err := resource.Retry(2*time.Minute, func() *resource.RetryError { - log.Printf("[DEBUG] Creating vol tags: %s for %s", create, d.Id()) - _, err := conn.CreateTags(&ec2.CreateTagsInput{ - Resources: volumeIds, - Tags: create, - }) - if err != nil { - ec2err, ok := err.(awserr.Error) - if ok && strings.Contains(ec2err.Code(), ".NotFound") { - return resource.RetryableError(err) // retry - } - return resource.NonRetryableError(err) - } - return nil - }) - if err != nil { - return err - } - } - } - - return nil -} - -// setTags is a helper to set the tags for a resource. It expects the -// tags field to be named "tags" -func setTags(conn *ec2.EC2, d *schema.ResourceData) error { - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffTags(tagsFromMap(o), tagsFromMap(n)) - - // Set tags - if len(remove) > 0 { - err := resource.Retry(5*time.Minute, func() *resource.RetryError { - log.Printf("[DEBUG] Removing tags: %#v from %s", remove, d.Id()) - _, err := conn.DeleteTags(&ec2.DeleteTagsInput{ - Resources: []*string{aws.String(d.Id())}, - Tags: remove, - }) - if err != nil { - ec2err, ok := err.(awserr.Error) - if ok && strings.Contains(ec2err.Code(), ".NotFound") { - return resource.RetryableError(err) // retry - } - return resource.NonRetryableError(err) - } - return nil - }) - if err != nil { - return err - } - } - if len(create) > 0 { - err := resource.Retry(5*time.Minute, func() *resource.RetryError { - log.Printf("[DEBUG] Creating tags: %s for %s", create, d.Id()) - _, err := conn.CreateTags(&ec2.CreateTagsInput{ - Resources: []*string{aws.String(d.Id())}, - Tags: create, - }) - if err != nil { - ec2err, ok := err.(awserr.Error) - if ok && strings.Contains(ec2err.Code(), ".NotFound") { - return resource.RetryableError(err) // retry - } - return resource.NonRetryableError(err) - } - return nil - }) - if err != nil { - return err - } - } - } - - return nil -} - -// diffTags takes our tags locally and the ones remotely and returns -// the set of tags that must be created, and the set of tags that must -// be destroyed. -func diffTags(oldTags, newTags []*ec2.Tag) ([]*ec2.Tag, []*ec2.Tag) { - // First, we're creating everything we have - create := make(map[string]interface{}) - for _, t := range newTags { - create[*t.Key] = *t.Value - } - - // Build the list of what to remove - var remove []*ec2.Tag - for _, t := range oldTags { - old, ok := create[*t.Key] - if !ok || old != *t.Value { - remove = append(remove, t) - } - } - - return tagsFromMap(create), remove -} - -// tagsFromMap returns the tags for the given map of data. -func tagsFromMap(m map[string]interface{}) []*ec2.Tag { - result := make([]*ec2.Tag, 0, len(m)) - for k, v := range m { - t := &ec2.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - } - if !tagIgnored(t) { - result = append(result, t) - } - } - - return result -} - -// tagsToMap turns the list of tags into a map. -func tagsToMap(ts []*ec2.Tag) map[string]string { - result := make(map[string]string) - for _, t := range ts { - if !tagIgnored(t) { - result[*t.Key] = *t.Value - } - } - - return result -} - -func diffElbV2Tags(oldTags, newTags []*elbv2.Tag) ([]*elbv2.Tag, []*elbv2.Tag) { - // First, we're creating everything we have - create := make(map[string]interface{}) - for _, t := range newTags { - create[*t.Key] = *t.Value - } - - // Build the list of what to remove - var remove []*elbv2.Tag - for _, t := range oldTags { - old, ok := create[*t.Key] - if !ok || old != *t.Value { - // Delete it! - remove = append(remove, t) - } - } - - return tagsFromMapELBv2(create), remove -} - -// tagsToMapELBv2 turns the list of tags into a map. -func tagsToMapELBv2(ts []*elbv2.Tag) map[string]string { - result := make(map[string]string) - for _, t := range ts { - if !tagIgnoredELBv2(t) { - result[*t.Key] = *t.Value - } - } - - return result -} - -// tagsFromMapELBv2 returns the tags for the given map of data. -func tagsFromMapELBv2(m map[string]interface{}) []*elbv2.Tag { - var result []*elbv2.Tag - for k, v := range m { - t := &elbv2.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - } - if !tagIgnoredELBv2(t) { - result = append(result, t) - } - } - - return result -} - -// tagIgnored compares a tag against a list of strings and checks if it should -// be ignored or not -func tagIgnored(t *ec2.Tag) bool { - filter := []string{"^aws:"} - for _, v := range filter { - log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) - if r, _ := regexp.MatchString(v, *t.Key); r == true { - log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) - return true - } - } - return false -} - -// and for ELBv2 as well -func tagIgnoredELBv2(t *elbv2.Tag) bool { - filter := []string{"^aws:"} - for _, v := range filter { - log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) - if r, _ := regexp.MatchString(v, *t.Key); r == true { - log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) - return true - } - } - return false -} - -// tagsToMapDynamoDb turns the list of tags into a map for dynamoDB -func tagsToMapDynamoDb(ts []*dynamodb.Tag) map[string]string { - result := make(map[string]string) - for _, t := range ts { - result[*t.Key] = *t.Value - } - return result -} - -// tagsFromMapDynamoDb returns the tags for a given map -func tagsFromMapDynamoDb(m map[string]interface{}) []*dynamodb.Tag { - result := make([]*dynamodb.Tag, 0, len(m)) - for k, v := range m { - t := &dynamodb.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - } - result = append(result, t) - } - return result -} - -// setTagsDynamoDb is a helper to set the tags for a dynamoDB resource -// This is needed because dynamodb requires a completely different set and delete -// method from the ec2 tag resource handling. Also the `UntagResource` method -// for dynamoDB only requires a list of tag keys, instead of the full map of keys. -func setTagsDynamoDb(conn *dynamodb.DynamoDB, d *schema.ResourceData) error { - if d.HasChange("tags") { - arn := d.Get("arn").(string) - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffTagsDynamoDb(tagsFromMapDynamoDb(o), tagsFromMapDynamoDb(n)) - - // Set tags - if len(remove) > 0 { - err := resource.Retry(2*time.Minute, func() *resource.RetryError { - log.Printf("[DEBUG] Removing tags: %#v from %s", remove, d.Id()) - _, err := conn.UntagResource(&dynamodb.UntagResourceInput{ - ResourceArn: aws.String(arn), - TagKeys: remove, - }) - if err != nil { - ec2err, ok := err.(awserr.Error) - if ok && strings.Contains(ec2err.Code(), "ResourceNotFoundException") { - return resource.RetryableError(err) // retry - } - return resource.NonRetryableError(err) - } - return nil - }) - if err != nil { - return err - } - } - if len(create) > 0 { - err := resource.Retry(2*time.Minute, func() *resource.RetryError { - log.Printf("[DEBUG] Creating tags: %s for %s", create, d.Id()) - _, err := conn.TagResource(&dynamodb.TagResourceInput{ - ResourceArn: aws.String(arn), - Tags: create, - }) - if err != nil { - ec2err, ok := err.(awserr.Error) - if ok && strings.Contains(ec2err.Code(), "ResourceNotFoundException") { - return resource.RetryableError(err) // retry - } - return resource.NonRetryableError(err) - } - return nil - }) - if err != nil { - return err - } - } - } - - return nil -} - -// diffTagsDynamoDb takes a local set of dynamodb tags and the ones found remotely -// and returns the set of tags that must be created as a map, and returns a list of tag keys -// that must be destroyed. -func diffTagsDynamoDb(oldTags, newTags []*dynamodb.Tag) ([]*dynamodb.Tag, []*string) { - create := make(map[string]interface{}) - for _, t := range newTags { - create[*t.Key] = *t.Value - } - - var remove []*string - for _, t := range oldTags { - // Verify the old tag is not a tag we're currently attempting to create - old, ok := create[*t.Key] - if !ok || old != *t.Value { - remove = append(remove, t.Key) - } - } - return tagsFromMapDynamoDb(create), remove -} diff --git a/builtin/providers/aws/tagsBeanstalk.go b/builtin/providers/aws/tagsBeanstalk.go deleted file mode 100644 index 7b85d6116..000000000 --- a/builtin/providers/aws/tagsBeanstalk.go +++ /dev/null @@ -1,74 +0,0 @@ -package aws - -import ( - "log" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticbeanstalk" -) - -// diffTags takes our tags locally and the ones remotely and returns -// the set of tags that must be created, and the set of tags that must -// be destroyed. -func diffTagsBeanstalk(oldTags, newTags []*elasticbeanstalk.Tag) ([]*elasticbeanstalk.Tag, []*elasticbeanstalk.Tag) { - // First, we're creating everything we have - create := make(map[string]interface{}) - for _, t := range newTags { - create[*t.Key] = *t.Value - } - - // Build the list of what to remove - var remove []*elasticbeanstalk.Tag - for _, t := range oldTags { - old, ok := create[*t.Key] - if !ok || old != *t.Value { - // Delete it! - remove = append(remove, t) - } - } - - return tagsFromMapBeanstalk(create), remove -} - -// tagsFromMap returns the tags for the given map of data. -func tagsFromMapBeanstalk(m map[string]interface{}) []*elasticbeanstalk.Tag { - var result []*elasticbeanstalk.Tag - for k, v := range m { - t := &elasticbeanstalk.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - } - if !tagIgnoredBeanstalk(t) { - result = append(result, t) - } - } - - return result -} - -// tagsToMap turns the list of tags into a map. -func tagsToMapBeanstalk(ts []*elasticbeanstalk.Tag) map[string]string { - result := make(map[string]string) - for _, t := range ts { - if !tagIgnoredBeanstalk(t) { - result[*t.Key] = *t.Value - } - } - - return result -} - -// compare a tag against a list of strings and checks if it should -// be ignored or not -func tagIgnoredBeanstalk(t *elasticbeanstalk.Tag) bool { - filter := []string{"^aws:"} - for _, v := range filter { - log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) - if r, _ := regexp.MatchString(v, *t.Key); r == true { - log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) - return true - } - } - return false -} diff --git a/builtin/providers/aws/tagsBeanstalk_test.go b/builtin/providers/aws/tagsBeanstalk_test.go deleted file mode 100644 index 0c9233863..000000000 --- a/builtin/providers/aws/tagsBeanstalk_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticbeanstalk" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestDiffBeanstalkTags(t *testing.T) { - cases := []struct { - Old, New map[string]interface{} - Create, Remove map[string]string - }{ - // Basic add/remove - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "bar": "baz", - }, - Create: map[string]string{ - "bar": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - - // Modify - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "foo": "baz", - }, - Create: map[string]string{ - "foo": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - } - - for i, tc := range cases { - c, r := diffTagsBeanstalk(tagsFromMapBeanstalk(tc.Old), tagsFromMapBeanstalk(tc.New)) - cm := tagsToMapBeanstalk(c) - rm := tagsToMapBeanstalk(r) - if !reflect.DeepEqual(cm, tc.Create) { - t.Fatalf("%d: bad create: %#v", i, cm) - } - if !reflect.DeepEqual(rm, tc.Remove) { - t.Fatalf("%d: bad remove: %#v", i, rm) - } - } -} - -func TestIgnoringTagsBeanstalk(t *testing.T) { - var ignoredTags []*elasticbeanstalk.Tag - ignoredTags = append(ignoredTags, &elasticbeanstalk.Tag{ - Key: aws.String("aws:cloudformation:logical-id"), - Value: aws.String("foo"), - }) - ignoredTags = append(ignoredTags, &elasticbeanstalk.Tag{ - Key: aws.String("aws:foo:bar"), - Value: aws.String("baz"), - }) - for _, tag := range ignoredTags { - if !tagIgnoredBeanstalk(tag) { - t.Fatalf("Tag %v with value %v not ignored, but should be!", *tag.Key, *tag.Value) - } - } -} - -// testAccCheckTags can be used to check the tags on a resource. -func testAccCheckBeanstalkTags( - ts *[]*elasticbeanstalk.Tag, key string, value string) resource.TestCheckFunc { - return func(s *terraform.State) error { - m := tagsToMapBeanstalk(*ts) - v, ok := m[key] - if value != "" && !ok { - return fmt.Errorf("Missing tag: %s", key) - } else if value == "" && ok { - return fmt.Errorf("Extra tag: %s", key) - } - if value == "" { - return nil - } - - if v != value { - return fmt.Errorf("%s: bad value: %s", key, v) - } - - return nil - } -} diff --git a/builtin/providers/aws/tagsCloudFront.go b/builtin/providers/aws/tagsCloudFront.go deleted file mode 100644 index d2b60c73c..000000000 --- a/builtin/providers/aws/tagsCloudFront.go +++ /dev/null @@ -1,98 +0,0 @@ -package aws - -import ( - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudfront" - "github.com/hashicorp/terraform/helper/schema" -) - -func setTagsCloudFront(conn *cloudfront.CloudFront, d *schema.ResourceData, arn string) error { - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffTagsCloudFront(tagsFromMapCloudFront(o), tagsFromMapCloudFront(n)) - - if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags: %s", remove) - k := make([]*string, 0, len(remove)) - for _, t := range remove { - k = append(k, t.Key) - } - - _, err := conn.UntagResource(&cloudfront.UntagResourceInput{ - Resource: aws.String(arn), - TagKeys: &cloudfront.TagKeys{ - Items: k, - }, - }) - if err != nil { - return err - } - } - - if len(create) > 0 { - log.Printf("[DEBUG] Creating tags: %s", create) - _, err := conn.TagResource(&cloudfront.TagResourceInput{ - Resource: aws.String(arn), - Tags: &cloudfront.Tags{ - Items: create, - }, - }) - if err != nil { - return err - } - } - - } - - return nil -} -func diffTagsCloudFront(oldTags, newTags *cloudfront.Tags) ([]*cloudfront.Tag, []*cloudfront.Tag) { - // First, we're creating everything we have - create := make(map[string]interface{}) - for _, t := range newTags.Items { - create[*t.Key] = *t.Value - } - - // Build the list of what to remove - var remove []*cloudfront.Tag - for _, t := range oldTags.Items { - old, ok := create[*t.Key] - if !ok || old != *t.Value { - // Delete it! - remove = append(remove, t) - } - } - - createTags := tagsFromMapCloudFront(create) - return createTags.Items, remove -} - -func tagsFromMapCloudFront(m map[string]interface{}) *cloudfront.Tags { - result := make([]*cloudfront.Tag, 0, len(m)) - for k, v := range m { - result = append(result, &cloudfront.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - }) - } - - tags := &cloudfront.Tags{ - Items: result, - } - - return tags -} - -func tagsToMapCloudFront(ts *cloudfront.Tags) map[string]string { - result := make(map[string]string) - - for _, t := range ts.Items { - result[*t.Key] = *t.Value - } - - return result -} diff --git a/builtin/providers/aws/tagsCloudtrail.go b/builtin/providers/aws/tagsCloudtrail.go deleted file mode 100644 index b4302ddd1..000000000 --- a/builtin/providers/aws/tagsCloudtrail.go +++ /dev/null @@ -1,112 +0,0 @@ -package aws - -import ( - "log" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudtrail" - "github.com/hashicorp/terraform/helper/schema" -) - -// setTags is a helper to set the tags for a resource. It expects the -// tags field to be named "tags" -func setTagsCloudtrail(conn *cloudtrail.CloudTrail, d *schema.ResourceData) error { - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffTagsCloudtrail(tagsFromMapCloudtrail(o), tagsFromMapCloudtrail(n)) - - // Set tags - if len(remove) > 0 { - input := cloudtrail.RemoveTagsInput{ - ResourceId: aws.String(d.Get("arn").(string)), - TagsList: remove, - } - log.Printf("[DEBUG] Removing CloudTrail tags: %s", input) - _, err := conn.RemoveTags(&input) - if err != nil { - return err - } - } - if len(create) > 0 { - input := cloudtrail.AddTagsInput{ - ResourceId: aws.String(d.Get("arn").(string)), - TagsList: create, - } - log.Printf("[DEBUG] Adding CloudTrail tags: %s", input) - _, err := conn.AddTags(&input) - if err != nil { - return err - } - } - } - - return nil -} - -// diffTags takes our tags locally and the ones remotely and returns -// the set of tags that must be created, and the set of tags that must -// be destroyed. -func diffTagsCloudtrail(oldTags, newTags []*cloudtrail.Tag) ([]*cloudtrail.Tag, []*cloudtrail.Tag) { - // First, we're creating everything we have - create := make(map[string]interface{}) - for _, t := range newTags { - create[*t.Key] = *t.Value - } - - // Build the list of what to remove - var remove []*cloudtrail.Tag - for _, t := range oldTags { - old, ok := create[*t.Key] - if !ok || old != *t.Value { - // Delete it! - remove = append(remove, t) - } - } - - return tagsFromMapCloudtrail(create), remove -} - -// tagsFromMap returns the tags for the given map of data. -func tagsFromMapCloudtrail(m map[string]interface{}) []*cloudtrail.Tag { - var result []*cloudtrail.Tag - for k, v := range m { - t := &cloudtrail.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - } - if !tagIgnoredCloudtrail(t) { - result = append(result, t) - } - } - - return result -} - -// tagsToMap turns the list of tags into a map. -func tagsToMapCloudtrail(ts []*cloudtrail.Tag) map[string]string { - result := make(map[string]string) - for _, t := range ts { - if !tagIgnoredCloudtrail(t) { - result[*t.Key] = *t.Value - } - } - - return result -} - -// compare a tag against a list of strings and checks if it should -// be ignored or not -func tagIgnoredCloudtrail(t *cloudtrail.Tag) bool { - filter := []string{"^aws:"} - for _, v := range filter { - log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) - if r, _ := regexp.MatchString(v, *t.Key); r == true { - log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) - return true - } - } - return false -} diff --git a/builtin/providers/aws/tagsCloudtrail_test.go b/builtin/providers/aws/tagsCloudtrail_test.go deleted file mode 100644 index a1d7b0463..000000000 --- a/builtin/providers/aws/tagsCloudtrail_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudtrail" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestDiffCloudtrailTags(t *testing.T) { - cases := []struct { - Old, New map[string]interface{} - Create, Remove map[string]string - }{ - // Basic add/remove - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "bar": "baz", - }, - Create: map[string]string{ - "bar": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - - // Modify - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "foo": "baz", - }, - Create: map[string]string{ - "foo": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - } - - for i, tc := range cases { - c, r := diffTagsCloudtrail(tagsFromMapCloudtrail(tc.Old), tagsFromMapCloudtrail(tc.New)) - cm := tagsToMapCloudtrail(c) - rm := tagsToMapCloudtrail(r) - if !reflect.DeepEqual(cm, tc.Create) { - t.Fatalf("%d: bad create: %#v", i, cm) - } - if !reflect.DeepEqual(rm, tc.Remove) { - t.Fatalf("%d: bad remove: %#v", i, rm) - } - } -} - -func TestIgnoringTagsCloudtrail(t *testing.T) { - var ignoredTags []*cloudtrail.Tag - ignoredTags = append(ignoredTags, &cloudtrail.Tag{ - Key: aws.String("aws:cloudformation:logical-id"), - Value: aws.String("foo"), - }) - ignoredTags = append(ignoredTags, &cloudtrail.Tag{ - Key: aws.String("aws:foo:bar"), - Value: aws.String("baz"), - }) - for _, tag := range ignoredTags { - if !tagIgnoredCloudtrail(tag) { - t.Fatalf("Tag %v with value %v not ignored, but should be!", *tag.Key, *tag.Value) - } - } -} - -// testAccCheckCloudTrailCheckTags can be used to check the tags on a trail -func testAccCheckCloudTrailCheckTags(tags *[]*cloudtrail.Tag, expectedTags map[string]string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if !reflect.DeepEqual(expectedTags, tagsToMapCloudtrail(*tags)) { - return fmt.Errorf("Tags mismatch.\nExpected: %#v\nGiven: %#v", - expectedTags, tagsToMapCloudtrail(*tags)) - } - return nil - } -} diff --git a/builtin/providers/aws/tagsCodeBuild.go b/builtin/providers/aws/tagsCodeBuild.go deleted file mode 100644 index 3302d7426..000000000 --- a/builtin/providers/aws/tagsCodeBuild.go +++ /dev/null @@ -1,67 +0,0 @@ -package aws - -import ( - "log" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/codebuild" -) - -// diffTags takes our tags locally and the ones remotely and returns -// the set of tags that must be created, and the set of tags that must -// be destroyed. -func diffTagsCodeBuild(oldTags, newTags []*codebuild.Tag) ([]*codebuild.Tag, []*codebuild.Tag) { - // First, we're creating everything we have - create := make(map[string]interface{}) - for _, t := range newTags { - create[*t.Key] = *t.Value - } - - // Build the list of what to remove - var remove []*codebuild.Tag - for _, t := range oldTags { - old, ok := create[*t.Key] - if !ok || old != *t.Value { - // Delete it! - remove = append(remove, t) - } - } - - return tagsFromMapCodeBuild(create), remove -} - -func tagsFromMapCodeBuild(m map[string]interface{}) []*codebuild.Tag { - result := []*codebuild.Tag{} - for k, v := range m { - result = append(result, &codebuild.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - }) - } - - return result -} - -func tagsToMapCodeBuild(ts []*codebuild.Tag) map[string]string { - result := map[string]string{} - for _, t := range ts { - result[*t.Key] = *t.Value - } - - return result -} - -// compare a tag against a list of strings and checks if it should -// be ignored or not -func tagIgnoredCodeBuild(t *codebuild.Tag) bool { - filter := []string{"^aws:"} - for _, v := range filter { - log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) - if r, _ := regexp.MatchString(v, *t.Key); r == true { - log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) - return true - } - } - return false -} diff --git a/builtin/providers/aws/tagsCodeBuild_test.go b/builtin/providers/aws/tagsCodeBuild_test.go deleted file mode 100644 index 82a3af899..000000000 --- a/builtin/providers/aws/tagsCodeBuild_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/codebuild" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestDiffTagsCodeBuild(t *testing.T) { - cases := []struct { - Old, New map[string]interface{} - Create, Remove map[string]string - }{ - // Basic add/remove - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "bar": "baz", - }, - Create: map[string]string{ - "bar": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - - // Modify - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "foo": "baz", - }, - Create: map[string]string{ - "foo": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - } - - for i, tc := range cases { - c, r := diffTagsCodeBuild(tagsFromMapCodeBuild(tc.Old), tagsFromMapCodeBuild(tc.New)) - cm := tagsToMapCodeBuild(c) - rm := tagsToMapCodeBuild(r) - if !reflect.DeepEqual(cm, tc.Create) { - t.Fatalf("%d: bad create: %#v", i, cm) - } - if !reflect.DeepEqual(rm, tc.Remove) { - t.Fatalf("%d: bad remove: %#v", i, rm) - } - } -} - -func TestIgnoringTagsCodeBuild(t *testing.T) { - var ignoredTags []*codebuild.Tag - ignoredTags = append(ignoredTags, &codebuild.Tag{ - Key: aws.String("aws:cloudformation:logical-id"), - Value: aws.String("foo"), - }) - ignoredTags = append(ignoredTags, &codebuild.Tag{ - Key: aws.String("aws:foo:bar"), - Value: aws.String("baz"), - }) - for _, tag := range ignoredTags { - if !tagIgnoredCodeBuild(tag) { - t.Fatalf("Tag %v with value %v not ignored, but should be!", *tag.Key, *tag.Value) - } - } -} - -// testAccCheckTags can be used to check the tags on a resource. -func testAccCheckTagsCodeBuild( - ts *[]*codebuild.Tag, key string, value string) resource.TestCheckFunc { - return func(s *terraform.State) error { - m := tagsToMapCodeBuild(*ts) - v, ok := m[key] - if value != "" && !ok { - return fmt.Errorf("Missing tag: %s", key) - } else if value == "" && ok { - return fmt.Errorf("Extra tag: %s", key) - } - if value == "" { - return nil - } - - if v != value { - return fmt.Errorf("%s: bad value: %s", key, v) - } - - return nil - } -} diff --git a/builtin/providers/aws/tagsEC.go b/builtin/providers/aws/tagsEC.go deleted file mode 100644 index b9b22af9c..000000000 --- a/builtin/providers/aws/tagsEC.go +++ /dev/null @@ -1,115 +0,0 @@ -package aws - -import ( - "log" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/terraform/helper/schema" -) - -// setTags is a helper to set the tags for a resource. It expects the -// tags field to be named "tags" -func setTagsEC(conn *elasticache.ElastiCache, d *schema.ResourceData, arn string) error { - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffTagsEC(tagsFromMapEC(o), tagsFromMapEC(n)) - - // Set tags - if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags: %#v", remove) - k := make([]*string, len(remove), len(remove)) - for i, t := range remove { - k[i] = t.Key - } - - _, err := conn.RemoveTagsFromResource(&elasticache.RemoveTagsFromResourceInput{ - ResourceName: aws.String(arn), - TagKeys: k, - }) - if err != nil { - return err - } - } - if len(create) > 0 { - log.Printf("[DEBUG] Creating tags: %#v", create) - _, err := conn.AddTagsToResource(&elasticache.AddTagsToResourceInput{ - ResourceName: aws.String(arn), - Tags: create, - }) - if err != nil { - return err - } - } - } - - return nil -} - -// diffTags takes our tags locally and the ones remotely and returns -// the set of tags that must be created, and the set of tags that must -// be destroyed. -func diffTagsEC(oldTags, newTags []*elasticache.Tag) ([]*elasticache.Tag, []*elasticache.Tag) { - // First, we're creating everything we have - create := make(map[string]interface{}) - for _, t := range newTags { - create[*t.Key] = *t.Value - } - - // Build the list of what to remove - var remove []*elasticache.Tag - for _, t := range oldTags { - old, ok := create[*t.Key] - if !ok || old != *t.Value { - // Delete it! - remove = append(remove, t) - } - } - - return tagsFromMapEC(create), remove -} - -// tagsFromMap returns the tags for the given map of data. -func tagsFromMapEC(m map[string]interface{}) []*elasticache.Tag { - result := make([]*elasticache.Tag, 0, len(m)) - for k, v := range m { - t := &elasticache.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - } - if !tagIgnoredEC(t) { - result = append(result, t) - } - } - - return result -} - -// tagsToMap turns the list of tags into a map. -func tagsToMapEC(ts []*elasticache.Tag) map[string]string { - result := make(map[string]string) - for _, t := range ts { - if !tagIgnoredEC(t) { - result[*t.Key] = *t.Value - } - } - - return result -} - -// compare a tag against a list of strings and checks if it should -// be ignored or not -func tagIgnoredEC(t *elasticache.Tag) bool { - filter := []string{"^aws:"} - for _, v := range filter { - log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) - if r, _ := regexp.MatchString(v, *t.Key); r == true { - log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) - return true - } - } - return false -} diff --git a/builtin/providers/aws/tagsEC_test.go b/builtin/providers/aws/tagsEC_test.go deleted file mode 100644 index 3ea3a8d70..000000000 --- a/builtin/providers/aws/tagsEC_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestDiffelasticacheTags(t *testing.T) { - cases := []struct { - Old, New map[string]interface{} - Create, Remove map[string]string - }{ - // Basic add/remove - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "bar": "baz", - }, - Create: map[string]string{ - "bar": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - - // Modify - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "foo": "baz", - }, - Create: map[string]string{ - "foo": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - } - - for i, tc := range cases { - c, r := diffTagsEC(tagsFromMapEC(tc.Old), tagsFromMapEC(tc.New)) - cm := tagsToMapEC(c) - rm := tagsToMapEC(r) - if !reflect.DeepEqual(cm, tc.Create) { - t.Fatalf("%d: bad create: %#v", i, cm) - } - if !reflect.DeepEqual(rm, tc.Remove) { - t.Fatalf("%d: bad remove: %#v", i, rm) - } - } -} - -func TestIgnoringTagsEC(t *testing.T) { - var ignoredTags []*elasticache.Tag - ignoredTags = append(ignoredTags, &elasticache.Tag{ - Key: aws.String("aws:cloudformation:logical-id"), - Value: aws.String("foo"), - }) - ignoredTags = append(ignoredTags, &elasticache.Tag{ - Key: aws.String("aws:foo:bar"), - Value: aws.String("baz"), - }) - for _, tag := range ignoredTags { - if !tagIgnoredEC(tag) { - t.Fatalf("Tag %v with value %v not ignored, but should be!", *tag.Key, *tag.Value) - } - } -} - -// testAccCheckTags can be used to check the tags on a resource. -func testAccCheckelasticacheTags( - ts []*elasticache.Tag, key string, value string) resource.TestCheckFunc { - return func(s *terraform.State) error { - m := tagsToMapEC(ts) - v, ok := m[key] - if value != "" && !ok { - return fmt.Errorf("Missing tag: %s", key) - } else if value == "" && ok { - return fmt.Errorf("Extra tag: %s", key) - } - if value == "" { - return nil - } - - if v != value { - return fmt.Errorf("%s: bad value: %s", key, v) - } - - return nil - } -} diff --git a/builtin/providers/aws/tagsEFS.go b/builtin/providers/aws/tagsEFS.go deleted file mode 100644 index b61973165..000000000 --- a/builtin/providers/aws/tagsEFS.go +++ /dev/null @@ -1,114 +0,0 @@ -package aws - -import ( - "log" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/efs" - "github.com/hashicorp/terraform/helper/schema" -) - -// setTags is a helper to set the tags for a resource. It expects the -// tags field to be named "tags" -func setTagsEFS(conn *efs.EFS, d *schema.ResourceData) error { - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffTagsEFS(tagsFromMapEFS(o), tagsFromMapEFS(n)) - - // Set tags - if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags: %#v", remove) - k := make([]*string, 0, len(remove)) - for _, t := range remove { - k = append(k, t.Key) - } - _, err := conn.DeleteTags(&efs.DeleteTagsInput{ - FileSystemId: aws.String(d.Id()), - TagKeys: k, - }) - if err != nil { - return err - } - } - if len(create) > 0 { - log.Printf("[DEBUG] Creating tags: %#v", create) - _, err := conn.CreateTags(&efs.CreateTagsInput{ - FileSystemId: aws.String(d.Id()), - Tags: create, - }) - if err != nil { - return err - } - } - } - - return nil -} - -// diffTags takes our tags locally and the ones remotely and returns -// the set of tags that must be created, and the set of tags that must -// be destroyed. -func diffTagsEFS(oldTags, newTags []*efs.Tag) ([]*efs.Tag, []*efs.Tag) { - // First, we're creating everything we have - create := make(map[string]interface{}) - for _, t := range newTags { - create[*t.Key] = *t.Value - } - - // Build the list of what to remove - var remove []*efs.Tag - for _, t := range oldTags { - old, ok := create[*t.Key] - if !ok || old != *t.Value { - // Delete it! - remove = append(remove, t) - } - } - - return tagsFromMapEFS(create), remove -} - -// tagsFromMap returns the tags for the given map of data. -func tagsFromMapEFS(m map[string]interface{}) []*efs.Tag { - var result []*efs.Tag - for k, v := range m { - t := &efs.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - } - if !tagIgnoredEFS(t) { - result = append(result, t) - } - } - - return result -} - -// tagsToMap turns the list of tags into a map. -func tagsToMapEFS(ts []*efs.Tag) map[string]string { - result := make(map[string]string) - for _, t := range ts { - if !tagIgnoredEFS(t) { - result[*t.Key] = *t.Value - } - } - - return result -} - -// compare a tag against a list of strings and checks if it should -// be ignored or not -func tagIgnoredEFS(t *efs.Tag) bool { - filter := []string{"^aws:"} - for _, v := range filter { - log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) - if r, _ := regexp.MatchString(v, *t.Key); r == true { - log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) - return true - } - } - return false -} diff --git a/builtin/providers/aws/tagsEFS_test.go b/builtin/providers/aws/tagsEFS_test.go deleted file mode 100644 index 58ed72da1..000000000 --- a/builtin/providers/aws/tagsEFS_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/efs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestDiffEFSTags(t *testing.T) { - cases := []struct { - Old, New map[string]interface{} - Create, Remove map[string]string - }{ - // Basic add/remove - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "bar": "baz", - }, - Create: map[string]string{ - "bar": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - - // Modify - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "foo": "baz", - }, - Create: map[string]string{ - "foo": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - } - - for i, tc := range cases { - c, r := diffTagsEFS(tagsFromMapEFS(tc.Old), tagsFromMapEFS(tc.New)) - cm := tagsToMapEFS(c) - rm := tagsToMapEFS(r) - if !reflect.DeepEqual(cm, tc.Create) { - t.Fatalf("%d: bad create: %#v", i, cm) - } - if !reflect.DeepEqual(rm, tc.Remove) { - t.Fatalf("%d: bad remove: %#v", i, rm) - } - } -} - -func TestIgnoringTagsEFS(t *testing.T) { - var ignoredTags []*efs.Tag - ignoredTags = append(ignoredTags, &efs.Tag{ - Key: aws.String("aws:cloudformation:logical-id"), - Value: aws.String("foo"), - }) - ignoredTags = append(ignoredTags, &efs.Tag{ - Key: aws.String("aws:foo:bar"), - Value: aws.String("baz"), - }) - for _, tag := range ignoredTags { - if !tagIgnoredEFS(tag) { - t.Fatalf("Tag %v with value %v not ignored, but should be!", *tag.Key, *tag.Value) - } - } -} - -// testAccCheckTags can be used to check the tags on a resource. -func testAccCheckEFSTags( - ts *[]*efs.Tag, key string, value string) resource.TestCheckFunc { - return func(s *terraform.State) error { - m := tagsToMapEFS(*ts) - v, ok := m[key] - if value != "" && !ok { - return fmt.Errorf("Missing tag: %s", key) - } else if value == "" && ok { - return fmt.Errorf("Extra tag: %s", key) - } - if value == "" { - return nil - } - - if v != value { - return fmt.Errorf("%s: bad value: %s", key, v) - } - - return nil - } -} diff --git a/builtin/providers/aws/tagsELB.go b/builtin/providers/aws/tagsELB.go deleted file mode 100644 index 081de9cc1..000000000 --- a/builtin/providers/aws/tagsELB.go +++ /dev/null @@ -1,114 +0,0 @@ -package aws - -import ( - "log" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform/helper/schema" -) - -// setTags is a helper to set the tags for a resource. It expects the -// tags field to be named "tags" -func setTagsELB(conn *elb.ELB, d *schema.ResourceData) error { - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffTagsELB(tagsFromMapELB(o), tagsFromMapELB(n)) - - // Set tags - if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags: %#v", remove) - k := make([]*elb.TagKeyOnly, 0, len(remove)) - for _, t := range remove { - k = append(k, &elb.TagKeyOnly{Key: t.Key}) - } - _, err := conn.RemoveTags(&elb.RemoveTagsInput{ - LoadBalancerNames: []*string{aws.String(d.Get("name").(string))}, - Tags: k, - }) - if err != nil { - return err - } - } - if len(create) > 0 { - log.Printf("[DEBUG] Creating tags: %#v", create) - _, err := conn.AddTags(&elb.AddTagsInput{ - LoadBalancerNames: []*string{aws.String(d.Get("name").(string))}, - Tags: create, - }) - if err != nil { - return err - } - } - } - - return nil -} - -// diffTags takes our tags locally and the ones remotely and returns -// the set of tags that must be created, and the set of tags that must -// be destroyed. -func diffTagsELB(oldTags, newTags []*elb.Tag) ([]*elb.Tag, []*elb.Tag) { - // First, we're creating everything we have - create := make(map[string]interface{}) - for _, t := range newTags { - create[*t.Key] = *t.Value - } - - // Build the list of what to remove - var remove []*elb.Tag - for _, t := range oldTags { - old, ok := create[*t.Key] - if !ok || old != *t.Value { - // Delete it! - remove = append(remove, t) - } - } - - return tagsFromMapELB(create), remove -} - -// tagsFromMap returns the tags for the given map of data. -func tagsFromMapELB(m map[string]interface{}) []*elb.Tag { - var result []*elb.Tag - for k, v := range m { - t := &elb.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - } - if !tagIgnoredELB(t) { - result = append(result, t) - } - } - - return result -} - -// tagsToMap turns the list of tags into a map. -func tagsToMapELB(ts []*elb.Tag) map[string]string { - result := make(map[string]string) - for _, t := range ts { - if !tagIgnoredELB(t) { - result[*t.Key] = *t.Value - } - } - - return result -} - -// compare a tag against a list of strings and checks if it should -// be ignored or not -func tagIgnoredELB(t *elb.Tag) bool { - filter := []string{"^aws:"} - for _, v := range filter { - log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) - if r, _ := regexp.MatchString(v, *t.Key); r == true { - log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) - return true - } - } - return false -} diff --git a/builtin/providers/aws/tagsELB_test.go b/builtin/providers/aws/tagsELB_test.go deleted file mode 100644 index 3fc97d661..000000000 --- a/builtin/providers/aws/tagsELB_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestDiffELBTags(t *testing.T) { - cases := []struct { - Old, New map[string]interface{} - Create, Remove map[string]string - }{ - // Basic add/remove - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "bar": "baz", - }, - Create: map[string]string{ - "bar": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - - // Modify - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "foo": "baz", - }, - Create: map[string]string{ - "foo": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - } - - for i, tc := range cases { - c, r := diffTagsELB(tagsFromMapELB(tc.Old), tagsFromMapELB(tc.New)) - cm := tagsToMapELB(c) - rm := tagsToMapELB(r) - if !reflect.DeepEqual(cm, tc.Create) { - t.Fatalf("%d: bad create: %#v", i, cm) - } - if !reflect.DeepEqual(rm, tc.Remove) { - t.Fatalf("%d: bad remove: %#v", i, rm) - } - } -} - -func TestIgnoringTagsELB(t *testing.T) { - var ignoredTags []*elb.Tag - ignoredTags = append(ignoredTags, &elb.Tag{ - Key: aws.String("aws:cloudformation:logical-id"), - Value: aws.String("foo"), - }) - ignoredTags = append(ignoredTags, &elb.Tag{ - Key: aws.String("aws:foo:bar"), - Value: aws.String("baz"), - }) - for _, tag := range ignoredTags { - if !tagIgnoredELB(tag) { - t.Fatalf("Tag %v with value %v not ignored, but should be!", *tag.Key, *tag.Value) - } - } -} - -// testAccCheckTags can be used to check the tags on a resource. -func testAccCheckELBTags( - ts *[]*elb.Tag, key string, value string) resource.TestCheckFunc { - return func(s *terraform.State) error { - m := tagsToMapELB(*ts) - v, ok := m[key] - if value != "" && !ok { - return fmt.Errorf("Missing tag: %s", key) - } else if value == "" && ok { - return fmt.Errorf("Extra tag: %s", key) - } - if value == "" { - return nil - } - - if v != value { - return fmt.Errorf("%s: bad value: %s", key, v) - } - - return nil - } -} diff --git a/builtin/providers/aws/tagsGeneric.go b/builtin/providers/aws/tagsGeneric.go deleted file mode 100644 index d494a4972..000000000 --- a/builtin/providers/aws/tagsGeneric.go +++ /dev/null @@ -1,69 +0,0 @@ -package aws - -import ( - "log" - "regexp" - - "github.com/aws/aws-sdk-go/aws" -) - -// diffTags takes our tags locally and the ones remotely and returns -// the set of tags that must be created, and the set of tags that must -// be destroyed. -func diffTagsGeneric(oldTags, newTags map[string]interface{}) (map[string]*string, map[string]*string) { - // First, we're creating everything we have - create := make(map[string]*string) - for k, v := range newTags { - create[k] = aws.String(v.(string)) - } - - // Build the map of what to remove - remove := make(map[string]*string) - for k, v := range oldTags { - old, ok := create[k] - if !ok || old != aws.String(v.(string)) { - // Delete it! - remove[k] = aws.String(v.(string)) - } - } - - return create, remove -} - -// tagsFromMap returns the tags for the given map of data. -func tagsFromMapGeneric(m map[string]interface{}) map[string]*string { - result := make(map[string]*string) - for k, v := range m { - if !tagIgnoredGeneric(k) { - result[k] = aws.String(v.(string)) - } - } - - return result -} - -// tagsToMap turns the tags into a map. -func tagsToMapGeneric(ts map[string]*string) map[string]string { - result := make(map[string]string) - for k, v := range ts { - if !tagIgnoredGeneric(k) { - result[k] = aws.StringValue(v) - } - } - - return result -} - -// compare a tag against a list of strings and checks if it should -// be ignored or not -func tagIgnoredGeneric(k string) bool { - filter := []string{"^aws:"} - for _, v := range filter { - log.Printf("[DEBUG] Matching %v with %v\n", v, k) - if r, _ := regexp.MatchString(v, k); r == true { - log.Printf("[DEBUG] Found AWS specific tag %s, ignoring.\n", k) - return true - } - } - return false -} diff --git a/builtin/providers/aws/tagsGeneric_test.go b/builtin/providers/aws/tagsGeneric_test.go deleted file mode 100644 index 2477f3aa5..000000000 --- a/builtin/providers/aws/tagsGeneric_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package aws - -import ( - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" -) - -// go test -v -run="TestDiffGenericTags" -func TestDiffGenericTags(t *testing.T) { - cases := []struct { - Old, New map[string]interface{} - Create, Remove map[string]string - }{ - // Basic add/remove - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "bar": "baz", - }, - Create: map[string]string{ - "bar": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - - // Modify - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "foo": "baz", - }, - Create: map[string]string{ - "foo": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - } - - for i, tc := range cases { - c, r := diffTagsGeneric(tc.Old, tc.New) - cm := tagsToMapGeneric(c) - rm := tagsToMapGeneric(r) - if !reflect.DeepEqual(cm, tc.Create) { - t.Fatalf("%d: bad create: %#v", i, cm) - } - if !reflect.DeepEqual(rm, tc.Remove) { - t.Fatalf("%d: bad remove: %#v", i, rm) - } - } -} - -// go test -v -run="TestIgnoringTagsGeneric" -func TestIgnoringTagsGeneric(t *testing.T) { - ignoredTags := map[string]*string{ - "aws:cloudformation:logical-id": aws.String("foo"), - "aws:foo:bar": aws.String("baz"), - } - for k, v := range ignoredTags { - if !tagIgnoredGeneric(k) { - t.Fatalf("Tag %v with value %v not ignored, but should be!", k, *v) - } - } -} diff --git a/builtin/providers/aws/tagsInspector.go b/builtin/providers/aws/tagsInspector.go deleted file mode 100644 index ef18f33c2..000000000 --- a/builtin/providers/aws/tagsInspector.go +++ /dev/null @@ -1,74 +0,0 @@ -package aws - -import ( - "log" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/inspector" -) - -// diffTags takes our tags locally and the ones remotely and returns -// the set of tags that must be created, and the set of tags that must -// be destroyed. -func diffTagsInspector(oldTags, newTags []*inspector.ResourceGroupTag) ([]*inspector.ResourceGroupTag, []*inspector.ResourceGroupTag) { - // First, we're creating everything we have - create := make(map[string]interface{}) - for _, t := range newTags { - create[*t.Key] = *t.Value - } - - // Build the list of what to remove - var remove []*inspector.ResourceGroupTag - for _, t := range oldTags { - old, ok := create[*t.Key] - if !ok || old != *t.Value { - // Delete it! - remove = append(remove, t) - } - } - - return tagsFromMapInspector(create), remove -} - -// tagsFromMap returns the tags for the given map of data. -func tagsFromMapInspector(m map[string]interface{}) []*inspector.ResourceGroupTag { - var result []*inspector.ResourceGroupTag - for k, v := range m { - t := &inspector.ResourceGroupTag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - } - if !tagIgnoredInspector(t) { - result = append(result, t) - } - } - - return result -} - -// tagsToMap turns the list of tags into a map. -func tagsToMapInspector(ts []*inspector.ResourceGroupTag) map[string]string { - result := make(map[string]string) - for _, t := range ts { - if !tagIgnoredInspector(t) { - result[*t.Key] = *t.Value - } - } - - return result -} - -// compare a tag against a list of strings and checks if it should -// be ignored or not -func tagIgnoredInspector(t *inspector.ResourceGroupTag) bool { - filter := []string{"^aws:"} - for _, v := range filter { - log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) - if r, _ := regexp.MatchString(v, *t.Key); r == true { - log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) - return true - } - } - return false -} diff --git a/builtin/providers/aws/tagsKMS.go b/builtin/providers/aws/tagsKMS.go deleted file mode 100644 index 4e918414e..000000000 --- a/builtin/providers/aws/tagsKMS.go +++ /dev/null @@ -1,115 +0,0 @@ -package aws - -import ( - "log" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kms" - "github.com/hashicorp/terraform/helper/schema" -) - -// setTags is a helper to set the tags for a resource. It expects the -// tags field to be named "tags" -func setTagsKMS(conn *kms.KMS, d *schema.ResourceData, keyId string) error { - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffTagsKMS(tagsFromMapKMS(o), tagsFromMapKMS(n)) - - // Set tags - if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags: %#v", remove) - k := make([]*string, len(remove), len(remove)) - for i, t := range remove { - k[i] = t.TagKey - } - - _, err := conn.UntagResource(&kms.UntagResourceInput{ - KeyId: aws.String(keyId), - TagKeys: k, - }) - if err != nil { - return err - } - } - if len(create) > 0 { - log.Printf("[DEBUG] Creating tags: %#v", create) - _, err := conn.TagResource(&kms.TagResourceInput{ - KeyId: aws.String(keyId), - Tags: create, - }) - if err != nil { - return err - } - } - } - - return nil -} - -// diffTags takes our tags locally and the ones remotely and returns -// the set of tags that must be created, and the set of tags that must -// be destroyed. -func diffTagsKMS(oldTags, newTags []*kms.Tag) ([]*kms.Tag, []*kms.Tag) { - // First, we're creating everything we have - create := make(map[string]interface{}) - for _, t := range newTags { - create[aws.StringValue(t.TagKey)] = aws.StringValue(t.TagValue) - } - - // Build the list of what to remove - var remove []*kms.Tag - for _, t := range oldTags { - old, ok := create[aws.StringValue(t.TagKey)] - if !ok || old != aws.StringValue(t.TagValue) { - // Delete it! - remove = append(remove, t) - } - } - - return tagsFromMapKMS(create), remove -} - -// tagsFromMap returns the tags for the given map of data. -func tagsFromMapKMS(m map[string]interface{}) []*kms.Tag { - result := make([]*kms.Tag, 0, len(m)) - for k, v := range m { - t := &kms.Tag{ - TagKey: aws.String(k), - TagValue: aws.String(v.(string)), - } - if !tagIgnoredKMS(t) { - result = append(result, t) - } - } - - return result -} - -// tagsToMap turns the list of tags into a map. -func tagsToMapKMS(ts []*kms.Tag) map[string]string { - result := make(map[string]string) - for _, t := range ts { - if !tagIgnoredKMS(t) { - result[aws.StringValue(t.TagKey)] = aws.StringValue(t.TagValue) - } - } - - return result -} - -// compare a tag against a list of strings and checks if it should -// be ignored or not -func tagIgnoredKMS(t *kms.Tag) bool { - filter := []string{"^aws:"} - for _, v := range filter { - log.Printf("[DEBUG] Matching %v with %v\n", v, *t.TagKey) - if r, _ := regexp.MatchString(v, *t.TagKey); r == true { - log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.TagKey, *t.TagValue) - return true - } - } - return false -} diff --git a/builtin/providers/aws/tagsKMS_test.go b/builtin/providers/aws/tagsKMS_test.go deleted file mode 100644 index a1d7a770e..000000000 --- a/builtin/providers/aws/tagsKMS_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kms" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -// go test -v -run="TestDiffKMSTags" -func TestDiffKMSTags(t *testing.T) { - cases := []struct { - Old, New map[string]interface{} - Create, Remove map[string]string - }{ - // Basic add/remove - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "bar": "baz", - }, - Create: map[string]string{ - "bar": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - - // Modify - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "foo": "baz", - }, - Create: map[string]string{ - "foo": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - } - - for i, tc := range cases { - c, r := diffTagsKMS(tagsFromMapKMS(tc.Old), tagsFromMapKMS(tc.New)) - cm := tagsToMapKMS(c) - rm := tagsToMapKMS(r) - if !reflect.DeepEqual(cm, tc.Create) { - t.Fatalf("%d: bad create: %#v", i, cm) - } - if !reflect.DeepEqual(rm, tc.Remove) { - t.Fatalf("%d: bad remove: %#v", i, rm) - } - } -} - -// go test -v -run="TestIgnoringTagsKMS" -func TestIgnoringTagsKMS(t *testing.T) { - var ignoredTags []*kms.Tag - ignoredTags = append(ignoredTags, &kms.Tag{ - TagKey: aws.String("aws:cloudformation:logical-id"), - TagValue: aws.String("foo"), - }) - ignoredTags = append(ignoredTags, &kms.Tag{ - TagKey: aws.String("aws:foo:bar"), - TagValue: aws.String("baz"), - }) - for _, tag := range ignoredTags { - if !tagIgnoredKMS(tag) { - t.Fatalf("Tag %v with value %v not ignored, but should be!", *tag.TagKey, *tag.TagValue) - } - } -} - -// testAccCheckTags can be used to check the tags on a resource. -func testAccCheckKMSTags( - ts []*kms.Tag, key string, value string) resource.TestCheckFunc { - return func(s *terraform.State) error { - m := tagsToMapKMS(ts) - v, ok := m[key] - if value != "" && !ok { - return fmt.Errorf("Missing tag: %s", key) - } else if value == "" && ok { - return fmt.Errorf("Extra tag: %s", key) - } - if value == "" { - return nil - } - - if v != value { - return fmt.Errorf("%s: bad value: %s", key, v) - } - - return nil - } -} diff --git a/builtin/providers/aws/tagsLambda.go b/builtin/providers/aws/tagsLambda.go deleted file mode 100644 index 28aa25121..000000000 --- a/builtin/providers/aws/tagsLambda.go +++ /dev/null @@ -1,50 +0,0 @@ -package aws - -import ( - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/lambda" - "github.com/hashicorp/terraform/helper/schema" -) - -// setTags is a helper to set the tags for a resource. It expects the -// tags field to be named "tags" -func setTagsLambda(conn *lambda.Lambda, d *schema.ResourceData, arn string) error { - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffTagsGeneric(o, n) - - // Set tags - if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags: %#v", remove) - keys := make([]*string, 0, len(remove)) - for k := range remove { - keys = append(keys, aws.String(k)) - } - - _, err := conn.UntagResource(&lambda.UntagResourceInput{ - Resource: aws.String(arn), - TagKeys: keys, - }) - if err != nil { - return err - } - } - if len(create) > 0 { - log.Printf("[DEBUG] Creating tags: %#v", create) - - _, err := conn.TagResource(&lambda.TagResourceInput{ - Resource: aws.String(arn), - Tags: create, - }) - if err != nil { - return err - } - } - } - - return nil -} diff --git a/builtin/providers/aws/tagsRDS.go b/builtin/providers/aws/tagsRDS.go deleted file mode 100644 index 2d6411348..000000000 --- a/builtin/providers/aws/tagsRDS.go +++ /dev/null @@ -1,133 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/schema" -) - -// setTags is a helper to set the tags for a resource. It expects the -// tags field to be named "tags" -func setTagsRDS(conn *rds.RDS, d *schema.ResourceData, arn string) error { - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffTagsRDS(tagsFromMapRDS(o), tagsFromMapRDS(n)) - - // Set tags - if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags: %s", remove) - k := make([]*string, len(remove), len(remove)) - for i, t := range remove { - k[i] = t.Key - } - - _, err := conn.RemoveTagsFromResource(&rds.RemoveTagsFromResourceInput{ - ResourceName: aws.String(arn), - TagKeys: k, - }) - if err != nil { - return err - } - } - if len(create) > 0 { - log.Printf("[DEBUG] Creating tags: %s", create) - _, err := conn.AddTagsToResource(&rds.AddTagsToResourceInput{ - ResourceName: aws.String(arn), - Tags: create, - }) - if err != nil { - return err - } - } - } - - return nil -} - -// diffTags takes our tags locally and the ones remotely and returns -// the set of tags that must be created, and the set of tags that must -// be destroyed. -func diffTagsRDS(oldTags, newTags []*rds.Tag) ([]*rds.Tag, []*rds.Tag) { - // First, we're creating everything we have - create := make(map[string]interface{}) - for _, t := range newTags { - create[*t.Key] = *t.Value - } - - // Build the list of what to remove - var remove []*rds.Tag - for _, t := range oldTags { - old, ok := create[*t.Key] - if !ok || old != *t.Value { - // Delete it! - remove = append(remove, t) - } - } - - return tagsFromMapRDS(create), remove -} - -// tagsFromMap returns the tags for the given map of data. -func tagsFromMapRDS(m map[string]interface{}) []*rds.Tag { - result := make([]*rds.Tag, 0, len(m)) - for k, v := range m { - t := &rds.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - } - if !tagIgnoredRDS(t) { - result = append(result, t) - } - } - - return result -} - -// tagsToMap turns the list of tags into a map. -func tagsToMapRDS(ts []*rds.Tag) map[string]string { - result := make(map[string]string) - for _, t := range ts { - if !tagIgnoredRDS(t) { - result[*t.Key] = *t.Value - } - } - - return result -} - -func saveTagsRDS(conn *rds.RDS, d *schema.ResourceData, arn string) error { - resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{ - ResourceName: aws.String(arn), - }) - - if err != nil { - return fmt.Errorf("[DEBUG] Error retreiving tags for ARN: %s", arn) - } - - var dt []*rds.Tag - if len(resp.TagList) > 0 { - dt = resp.TagList - } - - return d.Set("tags", tagsToMapRDS(dt)) -} - -// compare a tag against a list of strings and checks if it should -// be ignored or not -func tagIgnoredRDS(t *rds.Tag) bool { - filter := []string{"^aws:"} - for _, v := range filter { - log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) - if r, _ := regexp.MatchString(v, *t.Key); r == true { - log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) - return true - } - } - return false -} diff --git a/builtin/providers/aws/tagsRDS_test.go b/builtin/providers/aws/tagsRDS_test.go deleted file mode 100644 index cc2887daa..000000000 --- a/builtin/providers/aws/tagsRDS_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestDiffRDSTags(t *testing.T) { - cases := []struct { - Old, New map[string]interface{} - Create, Remove map[string]string - }{ - // Basic add/remove - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "bar": "baz", - }, - Create: map[string]string{ - "bar": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - - // Modify - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "foo": "baz", - }, - Create: map[string]string{ - "foo": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - } - - for i, tc := range cases { - c, r := diffTagsRDS(tagsFromMapRDS(tc.Old), tagsFromMapRDS(tc.New)) - cm := tagsToMapRDS(c) - rm := tagsToMapRDS(r) - if !reflect.DeepEqual(cm, tc.Create) { - t.Fatalf("%d: bad create: %#v", i, cm) - } - if !reflect.DeepEqual(rm, tc.Remove) { - t.Fatalf("%d: bad remove: %#v", i, rm) - } - } -} - -func TestIgnoringTagsRDS(t *testing.T) { - var ignoredTags []*rds.Tag - ignoredTags = append(ignoredTags, &rds.Tag{ - Key: aws.String("aws:cloudformation:logical-id"), - Value: aws.String("foo"), - }) - ignoredTags = append(ignoredTags, &rds.Tag{ - Key: aws.String("aws:foo:bar"), - Value: aws.String("baz"), - }) - for _, tag := range ignoredTags { - if !tagIgnoredRDS(tag) { - t.Fatalf("Tag %v with value %v not ignored, but should be!", *tag.Key, *tag.Value) - } - } -} - -// testAccCheckTags can be used to check the tags on a resource. -func testAccCheckRDSTags( - ts []*rds.Tag, key string, value string) resource.TestCheckFunc { - return func(s *terraform.State) error { - m := tagsToMapRDS(ts) - v, ok := m[key] - if value != "" && !ok { - return fmt.Errorf("Missing tag: %s", key) - } else if value == "" && ok { - return fmt.Errorf("Extra tag: %s", key) - } - if value == "" { - return nil - } - - if v != value { - return fmt.Errorf("%s: bad value: %s", key, v) - } - - return nil - } -} diff --git a/builtin/providers/aws/tagsRedshift.go b/builtin/providers/aws/tagsRedshift.go deleted file mode 100644 index 715e82045..000000000 --- a/builtin/providers/aws/tagsRedshift.go +++ /dev/null @@ -1,108 +0,0 @@ -package aws - -import ( - "log" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/redshift" - "github.com/hashicorp/terraform/helper/schema" -) - -func setTagsRedshift(conn *redshift.Redshift, d *schema.ResourceData, arn string) error { - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffTagsRedshift(tagsFromMapRedshift(o), tagsFromMapRedshift(n)) - - // Set tags - if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags: %#v", remove) - k := make([]*string, len(remove), len(remove)) - for i, t := range remove { - k[i] = t.Key - } - - _, err := conn.DeleteTags(&redshift.DeleteTagsInput{ - ResourceName: aws.String(arn), - TagKeys: k, - }) - if err != nil { - return err - } - } - if len(create) > 0 { - log.Printf("[DEBUG] Creating tags: %#v", create) - _, err := conn.CreateTags(&redshift.CreateTagsInput{ - ResourceName: aws.String(arn), - Tags: create, - }) - if err != nil { - return err - } - } - } - - return nil -} - -func diffTagsRedshift(oldTags, newTags []*redshift.Tag) ([]*redshift.Tag, []*redshift.Tag) { - // First, we're creating everything we have - create := make(map[string]interface{}) - for _, t := range newTags { - create[*t.Key] = *t.Value - } - - // Build the list of what to remove - var remove []*redshift.Tag - for _, t := range oldTags { - old, ok := create[*t.Key] - if !ok || old != *t.Value { - // Delete it! - remove = append(remove, t) - } - } - - return tagsFromMapRedshift(create), remove -} - -func tagsFromMapRedshift(m map[string]interface{}) []*redshift.Tag { - result := make([]*redshift.Tag, 0, len(m)) - for k, v := range m { - t := &redshift.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - } - if !tagIgnoredRedshift(t) { - result = append(result, t) - } - } - - return result -} - -func tagsToMapRedshift(ts []*redshift.Tag) map[string]string { - result := make(map[string]string) - for _, t := range ts { - if !tagIgnoredRedshift(t) { - result[*t.Key] = *t.Value - } - } - - return result -} - -// compare a tag against a list of strings and checks if it should -// be ignored or not -func tagIgnoredRedshift(t *redshift.Tag) bool { - filter := []string{"^aws:"} - for _, v := range filter { - log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) - if r, _ := regexp.MatchString(v, *t.Key); r == true { - log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) - return true - } - } - return false -} diff --git a/builtin/providers/aws/tagsRedshift_test.go b/builtin/providers/aws/tagsRedshift_test.go deleted file mode 100644 index ec269e540..000000000 --- a/builtin/providers/aws/tagsRedshift_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package aws - -import ( - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/redshift" -) - -func TestDiffRedshiftTags(t *testing.T) { - cases := []struct { - Old, New map[string]interface{} - Create, Remove map[string]string - }{ - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "bar": "baz", - }, - Create: map[string]string{ - "bar": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "foo": "baz", - }, - Create: map[string]string{ - "foo": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - } - - for i, tc := range cases { - c, r := diffTagsRedshift(tagsFromMapRedshift(tc.Old), tagsFromMapRedshift(tc.New)) - cm := tagsToMapRedshift(c) - rm := tagsToMapRedshift(r) - if !reflect.DeepEqual(cm, tc.Create) { - t.Fatalf("%d: bad create: %#v", i, cm) - } - if !reflect.DeepEqual(rm, tc.Remove) { - t.Fatalf("%d: bad remove: %#v", i, rm) - } - } -} - -func TestIgnoringTagsRedshift(t *testing.T) { - var ignoredTags []*redshift.Tag - ignoredTags = append(ignoredTags, &redshift.Tag{ - Key: aws.String("aws:cloudformation:logical-id"), - Value: aws.String("foo"), - }) - ignoredTags = append(ignoredTags, &redshift.Tag{ - Key: aws.String("aws:foo:bar"), - Value: aws.String("baz"), - }) - for _, tag := range ignoredTags { - if !tagIgnoredRedshift(tag) { - t.Fatalf("Tag %v with value %v not ignored, but should be!", *tag.Key, *tag.Value) - } - } -} diff --git a/builtin/providers/aws/tags_dms.go b/builtin/providers/aws/tags_dms.go deleted file mode 100644 index c88050059..000000000 --- a/builtin/providers/aws/tags_dms.go +++ /dev/null @@ -1,91 +0,0 @@ -package aws - -import ( - "github.com/aws/aws-sdk-go/aws" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/terraform/helper/schema" -) - -func dmsTagsToMap(tags []*dms.Tag) map[string]string { - result := make(map[string]string) - - for _, tag := range tags { - result[*tag.Key] = *tag.Value - } - - return result -} - -func dmsTagsFromMap(m map[string]interface{}) []*dms.Tag { - result := make([]*dms.Tag, 0, len(m)) - - for k, v := range m { - result = append(result, &dms.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - }) - } - - return result -} - -func dmsDiffTags(oldTags, newTags []*dms.Tag) ([]*dms.Tag, []*dms.Tag) { - create := make(map[string]interface{}) - for _, t := range newTags { - create[*t.Key] = *t.Value - } - - remove := []*dms.Tag{} - for _, t := range oldTags { - v, ok := create[*t.Key] - if !ok || v != *t.Value { - remove = append(remove, t) - } - } - - return dmsTagsFromMap(create), remove -} - -func dmsGetTagKeys(tags []*dms.Tag) []*string { - keys := []*string{} - - for _, tag := range tags { - keys = append(keys, tag.Key) - } - - return keys -} - -func dmsSetTags(arn string, d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).dmsconn - - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - - add, remove := dmsDiffTags(dmsTagsFromMap(o), dmsTagsFromMap(n)) - - if len(remove) > 0 { - _, err := conn.RemoveTagsFromResource(&dms.RemoveTagsFromResourceInput{ - ResourceArn: aws.String(arn), - TagKeys: dmsGetTagKeys(remove), - }) - if err != nil { - return err - } - } - - if len(add) > 0 { - _, err := conn.AddTagsToResource(&dms.AddTagsToResourceInput{ - ResourceArn: aws.String(arn), - Tags: add, - }) - if err != nil { - return err - } - } - } - - return nil -} diff --git a/builtin/providers/aws/tags_dms_test.go b/builtin/providers/aws/tags_dms_test.go deleted file mode 100644 index 630ace372..000000000 --- a/builtin/providers/aws/tags_dms_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/aws/aws-sdk-go/aws" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "reflect" -) - -func TestDmsTagsToMap(t *testing.T) { - tags := []*dms.Tag{ - { - Key: aws.String("test-key-1"), - Value: aws.String("test-value-1"), - }, - { - Key: aws.String("test-key-2"), - Value: aws.String("test-value-2"), - }, - } - - result := dmsTagsToMap(tags) - - for _, tag := range tags { - if v, ok := result[*tag.Key]; ok { - if v != *tag.Value { - t.Fatalf("Key %s had value of %s. Expected %s.", *tag.Key, v, *tag.Value) - } - } else { - t.Fatalf("Key %s not in map.", *tag.Key) - } - } -} - -func TestDmsTagsFromMap(t *testing.T) { - tagMap := map[string]interface{}{ - "test-key-1": "test-value-1", - "test-key-2": "test-value-2", - } - - result := dmsTagsFromMap(tagMap) - - for k, v := range tagMap { - found := false - for _, tag := range result { - if k == *tag.Key { - if v != *tag.Value { - t.Fatalf("Key %s had value of %s. Expected %s.", k, v, *tag.Value) - } - found = true - break - } - } - if !found { - t.Fatalf("Key %s not in tags.", k) - } - } -} - -func TestDmsDiffTags(t *testing.T) { - cases := []struct { - o, n map[string]interface{} - a, r map[string]string - }{ - // basic add / remove - { - o: map[string]interface{}{"test-key-1": "test-value-1"}, - n: map[string]interface{}{"test-key-2": "test-value-2"}, - a: map[string]string{"test-key-2": "test-value-2"}, - r: map[string]string{"test-key-1": "test-value-1"}, - }, - // modify - { - o: map[string]interface{}{"test-key-1": "test-value-1"}, - n: map[string]interface{}{"test-key-1": "test-value-1-modified"}, - a: map[string]string{"test-key-1": "test-value-1-modified"}, - r: map[string]string{"test-key-1": "test-value-1"}, - }, - } - - for _, c := range cases { - ar, rr := dmsDiffTags(dmsTagsFromMap(c.o), dmsTagsFromMap(c.n)) - a := dmsTagsToMap(ar) - r := dmsTagsToMap(rr) - - if !reflect.DeepEqual(a, c.a) { - t.Fatalf("Add tags mismatch: Actual %#v; Expected %#v", a, c.a) - } - if !reflect.DeepEqual(r, c.r) { - t.Fatalf("Remove tags mismatch: Actual %#v; Expected %#v", r, c.r) - } - } -} - -func TestDmsGetTagKeys(t *testing.T) { - tags := []*dms.Tag{ - { - Key: aws.String("test-key-1"), - Value: aws.String("test-value-1"), - }, - { - Key: aws.String("test-key-2"), - Value: aws.String("test-value-2"), - }, - } - - result := dmsGetTagKeys(tags) - expected := []*string{aws.String("test-key-1"), aws.String("test-key-2")} - - if !reflect.DeepEqual(result, expected) { - t.Fatalf("Actual %s; Expected %s", aws.StringValueSlice(result), aws.StringValueSlice(expected)) - } -} diff --git a/builtin/providers/aws/tags_elasticsearchservice.go b/builtin/providers/aws/tags_elasticsearchservice.go deleted file mode 100644 index e585d1afa..000000000 --- a/builtin/providers/aws/tags_elasticsearchservice.go +++ /dev/null @@ -1,114 +0,0 @@ -package aws - -import ( - "log" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" - "github.com/hashicorp/terraform/helper/schema" -) - -// setTags is a helper to set the tags for a resource. It expects the -// tags field to be named "tags" -func setTagsElasticsearchService(conn *elasticsearch.ElasticsearchService, d *schema.ResourceData, arn string) error { - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffTagsElasticsearchService(tagsFromMapElasticsearchService(o), tagsFromMapElasticsearchService(n)) - - // Set tags - if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags: %#v", remove) - k := make([]*string, 0, len(remove)) - for _, t := range remove { - k = append(k, t.Key) - } - _, err := conn.RemoveTags(&elasticsearch.RemoveTagsInput{ - ARN: aws.String(arn), - TagKeys: k, - }) - if err != nil { - return err - } - } - if len(create) > 0 { - log.Printf("[DEBUG] Creating tags: %#v", create) - _, err := conn.AddTags(&elasticsearch.AddTagsInput{ - ARN: aws.String(arn), - TagList: create, - }) - if err != nil { - return err - } - } - } - - return nil -} - -// diffTags takes our tags locally and the ones remotely and returns -// the set of tags that must be created, and the set of tags that must -// be destroyed. -func diffTagsElasticsearchService(oldTags, newTags []*elasticsearch.Tag) ([]*elasticsearch.Tag, []*elasticsearch.Tag) { - // First, we're creating everything we have - create := make(map[string]interface{}) - for _, t := range newTags { - create[*t.Key] = *t.Value - } - - // Build the list of what to remove - var remove []*elasticsearch.Tag - for _, t := range oldTags { - old, ok := create[*t.Key] - if !ok || old != *t.Value { - // Delete it! - remove = append(remove, t) - } - } - - return tagsFromMapElasticsearchService(create), remove -} - -// tagsFromMap returns the tags for the given map of data. -func tagsFromMapElasticsearchService(m map[string]interface{}) []*elasticsearch.Tag { - var result []*elasticsearch.Tag - for k, v := range m { - t := &elasticsearch.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - } - if !tagIgnoredElasticsearchService(t) { - result = append(result, t) - } - } - - return result -} - -// tagsToMap turns the list of tags into a map. -func tagsToMapElasticsearchService(ts []*elasticsearch.Tag) map[string]string { - result := make(map[string]string) - for _, t := range ts { - if !tagIgnoredElasticsearchService(t) { - result[*t.Key] = *t.Value - } - } - - return result -} - -// compare a tag against a list of strings and checks if it should -// be ignored or not -func tagIgnoredElasticsearchService(t *elasticsearch.Tag) bool { - filter := []string{"^aws:"} - for _, v := range filter { - log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) - if r, _ := regexp.MatchString(v, *t.Key); r == true { - log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) - return true - } - } - return false -} diff --git a/builtin/providers/aws/tags_elasticsearchservice_test.go b/builtin/providers/aws/tags_elasticsearchservice_test.go deleted file mode 100644 index 11c211d3c..000000000 --- a/builtin/providers/aws/tags_elasticsearchservice_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestDiffElasticsearchServiceTags(t *testing.T) { - cases := []struct { - Old, New map[string]interface{} - Create, Remove map[string]string - }{ - // Basic add/remove - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "bar": "baz", - }, - Create: map[string]string{ - "bar": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - - // Modify - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "foo": "baz", - }, - Create: map[string]string{ - "foo": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - } - - for i, tc := range cases { - c, r := diffTagsElasticsearchService(tagsFromMapElasticsearchService(tc.Old), tagsFromMapElasticsearchService(tc.New)) - cm := tagsToMapElasticsearchService(c) - rm := tagsToMapElasticsearchService(r) - if !reflect.DeepEqual(cm, tc.Create) { - t.Fatalf("%d: bad create: %#v", i, cm) - } - if !reflect.DeepEqual(rm, tc.Remove) { - t.Fatalf("%d: bad remove: %#v", i, rm) - } - } -} - -func TestIgnoringTagsElasticsearchService(t *testing.T) { - var ignoredTags []*elasticsearch.Tag - ignoredTags = append(ignoredTags, &elasticsearch.Tag{ - Key: aws.String("aws:cloudformation:logical-id"), - Value: aws.String("foo"), - }) - ignoredTags = append(ignoredTags, &elasticsearch.Tag{ - Key: aws.String("aws:foo:bar"), - Value: aws.String("baz"), - }) - for _, tag := range ignoredTags { - if !tagIgnoredElasticsearchService(tag) { - t.Fatalf("Tag %v with value %v not ignored, but should be!", *tag.Key, *tag.Value) - } - } -} - -// testAccCheckTags can be used to check the tags on a resource. -func testAccCheckElasticsearchServiceTags( - ts *[]*elasticsearch.Tag, key string, value string) resource.TestCheckFunc { - return func(s *terraform.State) error { - m := tagsToMapElasticsearchService(*ts) - v, ok := m[key] - if value != "" && !ok { - return fmt.Errorf("Missing tag: %s", key) - } else if value == "" && ok { - return fmt.Errorf("Extra tag: %s", key) - } - if value == "" { - return nil - } - - if v != value { - return fmt.Errorf("%s: bad value: %s", key, v) - } - - return nil - } -} diff --git a/builtin/providers/aws/tags_kinesis.go b/builtin/providers/aws/tags_kinesis.go deleted file mode 100644 index a5622e95d..000000000 --- a/builtin/providers/aws/tags_kinesis.go +++ /dev/null @@ -1,125 +0,0 @@ -package aws - -import ( - "log" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/hashicorp/terraform/helper/schema" -) - -// setTags is a helper to set the tags for a resource. It expects the -// tags field to be named "tags" -func setTagsKinesis(conn *kinesis.Kinesis, d *schema.ResourceData) error { - - sn := d.Get("name").(string) - - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffTagsKinesis(tagsFromMapKinesis(o), tagsFromMapKinesis(n)) - - // Set tags - if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags: %#v", remove) - k := make([]*string, len(remove), len(remove)) - for i, t := range remove { - k[i] = t.Key - } - - _, err := conn.RemoveTagsFromStream(&kinesis.RemoveTagsFromStreamInput{ - StreamName: aws.String(sn), - TagKeys: k, - }) - if err != nil { - return err - } - } - - if len(create) > 0 { - - log.Printf("[DEBUG] Creating tags: %#v", create) - t := make(map[string]*string) - for _, tag := range create { - t[*tag.Key] = tag.Value - } - - _, err := conn.AddTagsToStream(&kinesis.AddTagsToStreamInput{ - StreamName: aws.String(sn), - Tags: t, - }) - if err != nil { - return err - } - } - } - - return nil -} - -// diffTags takes our tags locally and the ones remotely and returns -// the set of tags that must be created, and the set of tags that must -// be destroyed. -func diffTagsKinesis(oldTags, newTags []*kinesis.Tag) ([]*kinesis.Tag, []*kinesis.Tag) { - // First, we're creating everything we have - create := make(map[string]interface{}) - for _, t := range newTags { - create[*t.Key] = *t.Value - } - - // Build the list of what to remove - var remove []*kinesis.Tag - for _, t := range oldTags { - old, ok := create[*t.Key] - if !ok || old != *t.Value { - // Delete it! - remove = append(remove, t) - } - } - - return tagsFromMapKinesis(create), remove -} - -// tagsFromMap returns the tags for the given map of data. -func tagsFromMapKinesis(m map[string]interface{}) []*kinesis.Tag { - var result []*kinesis.Tag - for k, v := range m { - t := &kinesis.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - } - if !tagIgnoredKinesis(t) { - result = append(result, t) - } - } - - return result -} - -// tagsToMap turns the list of tags into a map. -func tagsToMapKinesis(ts []*kinesis.Tag) map[string]string { - result := make(map[string]string) - for _, t := range ts { - if !tagIgnoredKinesis(t) { - result[*t.Key] = *t.Value - } - } - - return result -} - -// compare a tag against a list of strings and checks if it should -// be ignored or not -func tagIgnoredKinesis(t *kinesis.Tag) bool { - filter := []string{"^aws:"} - for _, v := range filter { - log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) - if r, _ := regexp.MatchString(v, *t.Key); r == true { - log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) - return true - } - } - return false -} diff --git a/builtin/providers/aws/tags_kinesis_test.go b/builtin/providers/aws/tags_kinesis_test.go deleted file mode 100644 index 63504b10b..000000000 --- a/builtin/providers/aws/tags_kinesis_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestDiffTagsKinesis(t *testing.T) { - cases := []struct { - Old, New map[string]interface{} - Create, Remove map[string]string - }{ - // Basic add/remove - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "bar": "baz", - }, - Create: map[string]string{ - "bar": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - - // Modify - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "foo": "baz", - }, - Create: map[string]string{ - "foo": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - } - - for i, tc := range cases { - c, r := diffTagsKinesis(tagsFromMapKinesis(tc.Old), tagsFromMapKinesis(tc.New)) - cm := tagsToMapKinesis(c) - rm := tagsToMapKinesis(r) - if !reflect.DeepEqual(cm, tc.Create) { - t.Fatalf("%d: bad create: %#v", i, cm) - } - if !reflect.DeepEqual(rm, tc.Remove) { - t.Fatalf("%d: bad remove: %#v", i, rm) - } - } -} - -func TestIgnoringTagsKinesis(t *testing.T) { - var ignoredTags []*kinesis.Tag - ignoredTags = append(ignoredTags, &kinesis.Tag{ - Key: aws.String("aws:cloudformation:logical-id"), - Value: aws.String("foo"), - }) - ignoredTags = append(ignoredTags, &kinesis.Tag{ - Key: aws.String("aws:foo:bar"), - Value: aws.String("baz"), - }) - for _, tag := range ignoredTags { - if !tagIgnoredKinesis(tag) { - t.Fatalf("Tag %v with value %v not ignored, but should be!", *tag.Key, *tag.Value) - } - } -} - -// testAccCheckTags can be used to check the tags on a resource. -func testAccCheckKinesisTags(ts []*kinesis.Tag, key string, value string) resource.TestCheckFunc { - return func(s *terraform.State) error { - m := tagsToMapKinesis(ts) - v, ok := m[key] - if value != "" && !ok { - return fmt.Errorf("Missing tag: %s", key) - } else if value == "" && ok { - return fmt.Errorf("Extra tag: %s", key) - } - if value == "" { - return nil - } - - if v != value { - return fmt.Errorf("%s: bad value: %s", key, v) - } - - return nil - } -} diff --git a/builtin/providers/aws/tags_route53.go b/builtin/providers/aws/tags_route53.go deleted file mode 100644 index 372167291..000000000 --- a/builtin/providers/aws/tags_route53.go +++ /dev/null @@ -1,111 +0,0 @@ -package aws - -import ( - "log" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/route53" - "github.com/hashicorp/terraform/helper/schema" -) - -// setTags is a helper to set the tags for a resource. It expects the -// tags field to be named "tags" -func setTagsR53(conn *route53.Route53, d *schema.ResourceData, resourceType string) error { - if d.HasChange("tags") { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffTagsR53(tagsFromMapR53(o), tagsFromMapR53(n)) - - // Set tags - r := make([]*string, len(remove)) - for i, t := range remove { - r[i] = t.Key - } - log.Printf("[DEBUG] Changing tags: \n\tadding: %#v\n\tremoving:%#v", create, remove) - req := &route53.ChangeTagsForResourceInput{ - ResourceId: aws.String(d.Id()), - ResourceType: aws.String(resourceType), - } - - if len(create) > 0 { - req.AddTags = create - } - if len(r) > 0 { - req.RemoveTagKeys = r - } - - _, err := conn.ChangeTagsForResource(req) - if err != nil { - return err - } - } - - return nil -} - -// diffTags takes our tags locally and the ones remotely and returns -// the set of tags that must be created, and the set of tags that must -// be destroyed. -func diffTagsR53(oldTags, newTags []*route53.Tag) ([]*route53.Tag, []*route53.Tag) { - // First, we're creating everything we have - create := make(map[string]interface{}) - for _, t := range newTags { - create[*t.Key] = *t.Value - } - - // Build the list of what to remove - var remove []*route53.Tag - for _, t := range oldTags { - old, ok := create[*t.Key] - if !ok || old != *t.Value { - // Delete it! - remove = append(remove, t) - } - } - - return tagsFromMapR53(create), remove -} - -// tagsFromMap returns the tags for the given map of data. -func tagsFromMapR53(m map[string]interface{}) []*route53.Tag { - result := make([]*route53.Tag, 0, len(m)) - for k, v := range m { - t := &route53.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - } - if !tagIgnoredRoute53(t) { - result = append(result, t) - } - } - - return result -} - -// tagsToMap turns the list of tags into a map. -func tagsToMapR53(ts []*route53.Tag) map[string]string { - result := make(map[string]string) - for _, t := range ts { - if !tagIgnoredRoute53(t) { - result[*t.Key] = *t.Value - } - } - - return result -} - -// compare a tag against a list of strings and checks if it should -// be ignored or not -func tagIgnoredRoute53(t *route53.Tag) bool { - filter := []string{"^aws:"} - for _, v := range filter { - log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) - if r, _ := regexp.MatchString(v, *t.Key); r == true { - log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) - return true - } - } - return false -} diff --git a/builtin/providers/aws/tags_route53_test.go b/builtin/providers/aws/tags_route53_test.go deleted file mode 100644 index 4703dbf39..000000000 --- a/builtin/providers/aws/tags_route53_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/route53" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestDiffTagsR53(t *testing.T) { - cases := []struct { - Old, New map[string]interface{} - Create, Remove map[string]string - }{ - // Basic add/remove - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "bar": "baz", - }, - Create: map[string]string{ - "bar": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - - // Modify - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "foo": "baz", - }, - Create: map[string]string{ - "foo": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - } - - for i, tc := range cases { - c, r := diffTagsR53(tagsFromMapR53(tc.Old), tagsFromMapR53(tc.New)) - cm := tagsToMapR53(c) - rm := tagsToMapR53(r) - if !reflect.DeepEqual(cm, tc.Create) { - t.Fatalf("%d: bad create: %#v", i, cm) - } - if !reflect.DeepEqual(rm, tc.Remove) { - t.Fatalf("%d: bad remove: %#v", i, rm) - } - } -} - -func TestIgnoringTagsRoute53(t *testing.T) { - var ignoredTags []*route53.Tag - ignoredTags = append(ignoredTags, &route53.Tag{ - Key: aws.String("aws:cloudformation:logical-id"), - Value: aws.String("foo"), - }) - ignoredTags = append(ignoredTags, &route53.Tag{ - Key: aws.String("aws:foo:bar"), - Value: aws.String("baz"), - }) - for _, tag := range ignoredTags { - if !tagIgnoredRoute53(tag) { - t.Fatalf("Tag %v with value %v not ignored, but should be!", *tag.Key, *tag.Value) - } - } -} - -// testAccCheckTags can be used to check the tags on a resource. -func testAccCheckTagsR53( - ts *[]*route53.Tag, key string, value string) resource.TestCheckFunc { - return func(s *terraform.State) error { - m := tagsToMapR53(*ts) - v, ok := m[key] - if value != "" && !ok { - return fmt.Errorf("Missing tag: %s", key) - } else if value == "" && ok { - return fmt.Errorf("Extra tag: %s", key) - } - if value == "" { - return nil - } - - if v != value { - return fmt.Errorf("%s: bad value: %s", key, v) - } - - return nil - } -} diff --git a/builtin/providers/aws/tags_test.go b/builtin/providers/aws/tags_test.go deleted file mode 100644 index 1777c3764..000000000 --- a/builtin/providers/aws/tags_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestDiffTags(t *testing.T) { - cases := []struct { - Old, New map[string]interface{} - Create, Remove map[string]string - }{ - // Basic add/remove - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "bar": "baz", - }, - Create: map[string]string{ - "bar": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - - // Modify - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "foo": "baz", - }, - Create: map[string]string{ - "foo": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - } - - for i, tc := range cases { - c, r := diffTags(tagsFromMap(tc.Old), tagsFromMap(tc.New)) - cm := tagsToMap(c) - rm := tagsToMap(r) - if !reflect.DeepEqual(cm, tc.Create) { - t.Fatalf("%d: bad create: %#v", i, cm) - } - if !reflect.DeepEqual(rm, tc.Remove) { - t.Fatalf("%d: bad remove: %#v", i, rm) - } - } -} - -func TestIgnoringTags(t *testing.T) { - var ignoredTags []*ec2.Tag - ignoredTags = append(ignoredTags, &ec2.Tag{ - - Key: aws.String("aws:cloudformation:logical-id"), - Value: aws.String("foo"), - }) - ignoredTags = append(ignoredTags, &ec2.Tag{ - Key: aws.String("aws:foo:bar"), - Value: aws.String("baz"), - }) - for _, tag := range ignoredTags { - if !tagIgnored(tag) { - t.Fatalf("Tag %v with value %v not ignored, but should be!", *tag.Key, *tag.Value) - } - } -} - -// testAccCheckTags can be used to check the tags on a resource. -func testAccCheckTags( - ts *[]*ec2.Tag, key string, value string) resource.TestCheckFunc { - return func(s *terraform.State) error { - m := tagsToMap(*ts) - v, ok := m[key] - if value != "" && !ok { - return fmt.Errorf("Missing tag: %s", key) - } else if value == "" && ok { - return fmt.Errorf("Extra tag: %s", key) - } - if value == "" { - return nil - } - - if v != value { - return fmt.Errorf("%s: bad value: %s", key, v) - } - - return nil - } -} diff --git a/builtin/providers/aws/test-fixtures/cloudformation-template.json b/builtin/providers/aws/test-fixtures/cloudformation-template.json deleted file mode 100644 index a01c4e5e0..000000000 --- a/builtin/providers/aws/test-fixtures/cloudformation-template.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "Parameters" : { - "VpcCIDR" : { - "Description" : "CIDR to be used for the VPC", - "Type" : "String" - } - }, - "Resources" : { - "MyVPC": { - "Type" : "AWS::EC2::VPC", - "Properties" : { - "CidrBlock" : {"Ref": "VpcCIDR"}, - "Tags" : [ - {"Key": "Name", "Value": "Primary_CF_VPC"} - ] - } - } - } -} diff --git a/builtin/providers/aws/test-fixtures/cloudformation-template.yaml b/builtin/providers/aws/test-fixtures/cloudformation-template.yaml deleted file mode 100644 index c84e3fcdd..000000000 --- a/builtin/providers/aws/test-fixtures/cloudformation-template.yaml +++ /dev/null @@ -1,14 +0,0 @@ -Parameters: - VpcCIDR: - Description: CIDR to be used for the VPC - Type: String - -Resources: - MyVPC: - Type: AWS::EC2::VPC - Properties: - CidrBlock: !Ref VpcCIDR - Tags: - - - Key: Name - Value: Primary_CF_VPC diff --git a/builtin/providers/aws/test-fixtures/emr_configurations.json b/builtin/providers/aws/test-fixtures/emr_configurations.json deleted file mode 100644 index 48b22d9d3..000000000 --- a/builtin/providers/aws/test-fixtures/emr_configurations.json +++ /dev/null @@ -1,28 +0,0 @@ -[ - { - "Classification": "hadoop-env", - "Configurations": [ - { - "Classification": "export", - "Configurations": [], - "Properties": { - "JAVA_HOME": "/usr/lib/jvm/java-1.8.0" - } - } - ], - "Properties": {} - }, - { - "Classification": "spark-env", - "Configurations": [ - { - "Classification": "export", - "Configurations": [], - "Properties": { - "JAVA_HOME": "/usr/lib/jvm/java-1.8.0" - } - } - ], - "Properties": {} - } -] \ No newline at end of file diff --git a/builtin/providers/aws/test-fixtures/iam-ssl-unix-line-endings.pem b/builtin/providers/aws/test-fixtures/iam-ssl-unix-line-endings.pem deleted file mode 100644 index 9ac6e6fdc..000000000 --- a/builtin/providers/aws/test-fixtures/iam-ssl-unix-line-endings.pem +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDBjCCAe4CCQCGWwBmOiHQdTANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB -VTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0 -cyBQdHkgTHRkMB4XDTE2MDYyMTE2MzM0MVoXDTE3MDYyMTE2MzM0MVowRTELMAkG -A1UEBhMCQVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0 -IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -AL+LFlsCJG5txZp4yuu+lQnuUrgBXRG+irQqcTXlV91Bp5hpmRIyhnGCtWxxDBUL -xrh4WN3VV/0jDzKT976oLgOy3hj56Cdqf+JlZ1qgMN5bHB3mm3aVWnrnsLbBsfwZ -SEbk3Kht/cE1nK2toNVW+rznS3m+eoV3Zn/DUNwGlZr42hGNs6ETn2jURY78ETqR -mW47xvjf86eIo7vULHJaY6xyarPqkL8DZazOmvY06hUGvGwGBny7gugfXqDG+I8n -cPBsGJGSAmHmVV8o0RCB9UjY+TvSMQRpEDoVlvyrGuglsD8to/4+7UcsuDGlRYN6 -jmIOC37mOi/jwRfWL1YUa4MCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAPDxTH0oQ -JjKXoJgkmQxurB81RfnK/NrswJVzWbOv6ejcbhwh+/ZgJTMc15BrYcxU6vUW1V/i -Z7APU0qJ0icECACML+a2fRI7YdLCTiPIOmY66HY8MZHAn3dGjU5TeiUflC0n0zkP -mxKJe43kcYLNDItbfvUDo/GoxTXrC3EFVZyU0RhFzoVJdODlTHXMVFCzcbQEBrBJ -xKdShCEc8nFMneZcGFeEU488ntZoWzzms8/QpYrKa5S0Sd7umEU2Kwu4HTkvUFg/ -CqDUFjhydXxYRsxXBBrEiLOE5BdtJR1sH/QHxIJe23C9iHI2nS1NbLziNEApLwC4 -GnSud83VUo9G9w== ------END CERTIFICATE----- diff --git a/builtin/providers/aws/test-fixtures/iam-ssl-windows-line-endings.pem b/builtin/providers/aws/test-fixtures/iam-ssl-windows-line-endings.pem deleted file mode 100644 index 101c239fb..000000000 --- a/builtin/providers/aws/test-fixtures/iam-ssl-windows-line-endings.pem +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDBjCCAe4CCQCGWwBmOiHQdTANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB -VTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0 -cyBQdHkgTHRkMB4XDTE2MDYyMTE2MzM0MVoXDTE3MDYyMTE2MzM0MVowRTELMAkG -A1UEBhMCQVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0 -IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -AL+LFlsCJG5txZp4yuu+lQnuUrgBXRG+irQqcTXlV91Bp5hpmRIyhnGCtWxxDBUL -xrh4WN3VV/0jDzKT976oLgOy3hj56Cdqf+JlZ1qgMN5bHB3mm3aVWnrnsLbBsfwZ -SEbk3Kht/cE1nK2toNVW+rznS3m+eoV3Zn/DUNwGlZr42hGNs6ETn2jURY78ETqR -mW47xvjf86eIo7vULHJaY6xyarPqkL8DZazOmvY06hUGvGwGBny7gugfXqDG+I8n -cPBsGJGSAmHmVV8o0RCB9UjY+TvSMQRpEDoVlvyrGuglsD8to/4+7UcsuDGlRYN6 -jmIOC37mOi/jwRfWL1YUa4MCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAPDxTH0oQ -JjKXoJgkmQxurB81RfnK/NrswJVzWbOv6ejcbhwh+/ZgJTMc15BrYcxU6vUW1V/i -Z7APU0qJ0icECACML+a2fRI7YdLCTiPIOmY66HY8MZHAn3dGjU5TeiUflC0n0zkP -mxKJe43kcYLNDItbfvUDo/GoxTXrC3EFVZyU0RhFzoVJdODlTHXMVFCzcbQEBrBJ -xKdShCEc8nFMneZcGFeEU488ntZoWzzms8/QpYrKa5S0Sd7umEU2Kwu4HTkvUFg/ -CqDUFjhydXxYRsxXBBrEiLOE5BdtJR1sH/QHxIJe23C9iHI2nS1NbLziNEApLwC4 -GnSud83VUo9G9w== ------END CERTIFICATE----- diff --git a/builtin/providers/aws/test-fixtures/lambda_confirm_sns.zip b/builtin/providers/aws/test-fixtures/lambda_confirm_sns.zip deleted file mode 100644 index c88d2d400..000000000 Binary files a/builtin/providers/aws/test-fixtures/lambda_confirm_sns.zip and /dev/null differ diff --git a/builtin/providers/aws/test-fixtures/lambda_func.js b/builtin/providers/aws/test-fixtures/lambda_func.js deleted file mode 100644 index 556182a5c..000000000 --- a/builtin/providers/aws/test-fixtures/lambda_func.js +++ /dev/null @@ -1,9 +0,0 @@ -var http = require('http') - -exports.handler = function(event, context) { - http.get("http://requestb.in/10m32wg1", function(res) { - console.log("success", res.statusCode, res.body) - }).on('error', function(e) { - console.log("error", e) - }) -} diff --git a/builtin/providers/aws/test-fixtures/lambda_func_modified.js b/builtin/providers/aws/test-fixtures/lambda_func_modified.js deleted file mode 100644 index 9842040bb..000000000 --- a/builtin/providers/aws/test-fixtures/lambda_func_modified.js +++ /dev/null @@ -1,9 +0,0 @@ -var http = require('http') - -exports.handler = function(event, context) { - http.get("http://requestb.in/MODIFIED", function(res) { - console.log("success", res.statusCode, res.body) - }).on('error', function(e) { - console.log("error", e) - }) -} diff --git a/builtin/providers/aws/test-fixtures/lambdatest.zip b/builtin/providers/aws/test-fixtures/lambdatest.zip deleted file mode 100644 index 5c636e955..000000000 Binary files a/builtin/providers/aws/test-fixtures/lambdatest.zip and /dev/null differ diff --git a/builtin/providers/aws/test-fixtures/python-v1.zip b/builtin/providers/aws/test-fixtures/python-v1.zip deleted file mode 100644 index f7d68b1fc..000000000 Binary files a/builtin/providers/aws/test-fixtures/python-v1.zip and /dev/null differ diff --git a/builtin/providers/aws/test-fixtures/saml-metadata-modified.xml b/builtin/providers/aws/test-fixtures/saml-metadata-modified.xml deleted file mode 100644 index aaca7afc0..000000000 --- a/builtin/providers/aws/test-fixtures/saml-metadata-modified.xml +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - MIIErDCCA5SgAwIBAgIOAU+PT8RBAAAAAHxJXEcwDQYJKoZIhvcNAQELBQAwgZAxKDAmBgNVBAMMH1NlbGZTaWduZWRDZXJ0XzAyU2VwMjAxNV8xODI2NTMxGDAWBgNVBAsMDzAwRDI0MDAwMDAwcEFvQTEXMBUGA1UECgwOU2FsZXNmb3JjZS5jb20xFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xCzAJBgNVBAgMAkNBMQwwCgYDVQQGEwNVU0EwHhcNMTUwOTAyMTgyNjUzWhcNMTcwOTAyMTIwMDAwWjCBkDEoMCYGA1UEAwwfU2VsZlNpZ25lZENlcnRfMDJTZXAyMDE1XzE4MjY1MzEYMBYGA1UECwwPMDBEMjQwMDAwMDBwQW9BMRcwFQYDVQQKDA5TYWxlc2ZvcmNlLmNvbTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzELMAkGA1UECAwCQ0ExDDAKBgNVBAYTA1VTQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJp/wTRr9n1IWJpkRTjNpep47OKJrD2E6rGbJ18TG2RxtIz+zCn2JwH2aP3TULh0r0hhcg/pecv51RRcG7O19DBBaTQ5+KuoICQyKZy07/yDXSiZontTwkEYs06ssTwTHUcRXbcwTKv16L7omt0MjIhTTGfvtLOYiPwyvKvzAHg4eNuAcli0duVM78UIBORtdmy9C9ZcMh8yRJo5aPBq85wsE3JXU58ytyZzCHTBLH+2xFQrjYnUSEW+FOEEpI7o33MVdFBvWWg1R17HkWzcve4C30lqOHqvxBzyESZ/N1mMlmSt8gPFyB+mUXY99StJDJpnytbY8DwSzMQUo/sOVB0CAwEAAaOCAQAwgf0wHQYDVR0OBBYEFByu1EQqRQS0bYQBKS9K5qwKi+6IMA8GA1UdEwEB/wQFMAMBAf8wgcoGA1UdIwSBwjCBv4AUHK7URCpFBLRthAEpL0rmrAqL7oihgZakgZMwgZAxKDAmBgNVBAMMH1NlbGZTaWduZWRDZXJ0XzAyU2VwMjAxNV8xODI2NTMxGDAWBgNVBAsMDzAwRDI0MDAwMDAwcEFvQTEXMBUGA1UECgwOU2FsZXNmb3JjZS5jb20xFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xCzAJBgNVBAgMAkNBMQwwCgYDVQQGEwNVU0GCDgFPj0/EQQAAAAB8SVxHMA0GCSqGSIb3DQEBCwUAA4IBAQA9O5o1tC71qJnkq+ABPo4A1aFKZVT/07GcBX4/wetcbYySL4Q2nR9pMgfPYYS1j+P2E3viPsQwPIWDUBwFkNsjjX5DSGEkLAioVGKRwJshRSCSynMcsVZbQkfBUiZXqhM0wzvoa/ALvGD+aSSb1m+x7lEpDYNwQKWaUW2VYcHWv9wjujMyy7dlj8E/jqM71mw7ThNl6k4+3RQ802dMa14txm8pkF0vZgfpV3tkqhBqtjBAicVCaveqr3r3iGqjvyilBgdY+0NR8szqzm7CD/Bkb22+/IgM/mXQuL9KHD/WADlSGmYKmG3SSahmcZxznYCnzcRNN9LVuXlz5cbljmBj - - - - urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified - - - - diff --git a/builtin/providers/aws/test-fixtures/saml-metadata.xml b/builtin/providers/aws/test-fixtures/saml-metadata.xml deleted file mode 100644 index 69e353b77..000000000 --- a/builtin/providers/aws/test-fixtures/saml-metadata.xml +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - MIIErDCCA5SgAwIBAgIOAU+PT8RBAAAAAHxJXEcwDQYJKoZIhvcNAQELBQAwgZAxKDAmBgNVBAMMH1NlbGZTaWduZWRDZXJ0XzAyU2VwMjAxNV8xODI2NTMxGDAWBgNVBAsMDzAwRDI0MDAwMDAwcEFvQTEXMBUGA1UECgwOU2FsZXNmb3JjZS5jb20xFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xCzAJBgNVBAgMAkNBMQwwCgYDVQQGEwNVU0EwHhcNMTUwOTAyMTgyNjUzWhcNMTcwOTAyMTIwMDAwWjCBkDEoMCYGA1UEAwwfU2VsZlNpZ25lZENlcnRfMDJTZXAyMDE1XzE4MjY1MzEYMBYGA1UECwwPMDBEMjQwMDAwMDBwQW9BMRcwFQYDVQQKDA5TYWxlc2ZvcmNlLmNvbTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzELMAkGA1UECAwCQ0ExDDAKBgNVBAYTA1VTQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJp/wTRr9n1IWJpkRTjNpep47OKJrD2E6rGbJ18TG2RxtIz+zCn2JwH2aP3TULh0r0hhcg/pecv51RRcG7O19DBBaTQ5+KuoICQyKZy07/yDXSiZontTwkEYs06ssTwTHUcRXbcwTKv16L7omt0MjIhTTGfvtLOYiPwyvKvzAHg4eNuAcli0duVM78UIBORtdmy9C9ZcMh8yRJo5aPBq85wsE3JXU58ytyZzCHTBLH+2xFQrjYnUSEW+FOEEpI7o33MVdFBvWWg1R17HkWzcve4C30lqOHqvxBzyESZ/N1mMlmSt8gPFyB+mUXY99StJDJpnytbY8DwSzMQUo/sOVB0CAwEAAaOCAQAwgf0wHQYDVR0OBBYEFByu1EQqRQS0bYQBKS9K5qwKi+6IMA8GA1UdEwEB/wQFMAMBAf8wgcoGA1UdIwSBwjCBv4AUHK7URCpFBLRthAEpL0rmrAqL7oihgZakgZMwgZAxKDAmBgNVBAMMH1NlbGZTaWduZWRDZXJ0XzAyU2VwMjAxNV8xODI2NTMxGDAWBgNVBAsMDzAwRDI0MDAwMDAwcEFvQTEXMBUGA1UECgwOU2FsZXNmb3JjZS5jb20xFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xCzAJBgNVBAgMAkNBMQwwCgYDVQQGEwNVU0GCDgFPj0/EQQAAAAB8SVxHMA0GCSqGSIb3DQEBCwUAA4IBAQA9O5o1tC71qJnkq+ABPo4A1aFKZVT/07GcBX4/wetcbYySL4Q2nR9pMgfPYYS1j+P2E3viPsQwPIWDUBwFkNsjjX5DSGEkLAioVGKRwJshRSCSynMcsVZbQkfBUiZXqhM0wzvoa/ALvGD+aSSb1m+x7lEpDYNwQKWaUW2VYcHWv9wjujMyy7dlj8E/jqM71mw7ThNl6k4+3RQ802dMa14txm8pkF0vZgfpV3tkqhBqtjBAicVCaveqr3r3iGqjvyilBgdY+0NR8szqzm7CD/Bkb22+/IgM/mXQuL9KHD/WADlSGmYKmG3SSahmcZxznYCnzcRNN9LVuXlz5cbljmBj - - - - urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified - - - - diff --git a/builtin/providers/aws/utils.go b/builtin/providers/aws/utils.go deleted file mode 100644 index bfca044cf..000000000 --- a/builtin/providers/aws/utils.go +++ /dev/null @@ -1,42 +0,0 @@ -package aws - -import ( - "encoding/base64" - "encoding/json" - "reflect" - "regexp" -) - -// Base64Encode encodes data if the input isn't already encoded using base64.StdEncoding.EncodeToString. -// If the input is already base64 encoded, return the original input unchanged. -func base64Encode(data []byte) string { - // Check whether the data is already Base64 encoded; don't double-encode - if isBase64Encoded(data) { - return string(data) - } - // data has not been encoded encode and return - return base64.StdEncoding.EncodeToString(data) -} - -func isBase64Encoded(data []byte) bool { - _, err := base64.StdEncoding.DecodeString(string(data)) - return err == nil -} - -func looksLikeJsonString(s interface{}) bool { - return regexp.MustCompile(`^\s*{`).MatchString(s.(string)) -} - -func jsonBytesEqual(b1, b2 []byte) bool { - var o1 interface{} - if err := json.Unmarshal(b1, &o1); err != nil { - return false - } - - var o2 interface{} - if err := json.Unmarshal(b2, &o2); err != nil { - return false - } - - return reflect.DeepEqual(o1, o2) -} diff --git a/builtin/providers/aws/utils_test.go b/builtin/providers/aws/utils_test.go deleted file mode 100644 index 8248f4384..000000000 --- a/builtin/providers/aws/utils_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package aws - -import "testing" - -var base64encodingTests = []struct { - in []byte - out string -}{ - // normal encoding case - {[]byte("data should be encoded"), "ZGF0YSBzaG91bGQgYmUgZW5jb2RlZA=="}, - // base64 encoded input should result in no change of output - {[]byte("ZGF0YSBzaG91bGQgYmUgZW5jb2RlZA=="), "ZGF0YSBzaG91bGQgYmUgZW5jb2RlZA=="}, -} - -func TestBase64Encode(t *testing.T) { - for _, tt := range base64encodingTests { - out := base64Encode(tt.in) - if out != tt.out { - t.Errorf("base64Encode(%s) => %s, want %s", tt.in, out, tt.out) - } - } -} - -func TestLooksLikeJsonString(t *testing.T) { - looksLikeJson := ` {"abc":"1"} ` - doesNotLookLikeJson := `abc: 1` - - if !looksLikeJsonString(looksLikeJson) { - t.Errorf("Expected looksLikeJson to return true for %s", looksLikeJson) - } - if looksLikeJsonString(doesNotLookLikeJson) { - t.Errorf("Expected looksLikeJson to return false for %s", doesNotLookLikeJson) - } -} - -func TestJsonBytesEqualQuotedAndUnquoted(t *testing.T) { - unquoted := `{"test": "test"}` - quoted := "{\"test\": \"test\"}" - - if !jsonBytesEqual([]byte(unquoted), []byte(quoted)) { - t.Errorf("Expected jsonBytesEqual to return true for %s == %s", unquoted, quoted) - } - - unquotedDiff := `{"test": "test"}` - quotedDiff := "{\"test\": \"tested\"}" - - if jsonBytesEqual([]byte(unquotedDiff), []byte(quotedDiff)) { - t.Errorf("Expected jsonBytesEqual to return false for %s == %s", unquotedDiff, quotedDiff) - } -} - -func TestJsonBytesEqualWhitespaceAndNoWhitespace(t *testing.T) { - noWhitespace := `{"test":"test"}` - whitespace := ` -{ - "test": "test" -}` - - if !jsonBytesEqual([]byte(noWhitespace), []byte(whitespace)) { - t.Errorf("Expected jsonBytesEqual to return true for %s == %s", noWhitespace, whitespace) - } - - noWhitespaceDiff := `{"test":"test"}` - whitespaceDiff := ` -{ - "test": "tested" -}` - - if jsonBytesEqual([]byte(noWhitespaceDiff), []byte(whitespaceDiff)) { - t.Errorf("Expected jsonBytesEqual to return false for %s == %s", noWhitespaceDiff, whitespaceDiff) - } -} diff --git a/builtin/providers/aws/validators.go b/builtin/providers/aws/validators.go deleted file mode 100644 index 8102b456a..000000000 --- a/builtin/providers/aws/validators.go +++ /dev/null @@ -1,1351 +0,0 @@ -package aws - -import ( - "fmt" - "net" - "net/url" - "regexp" - "strings" - "time" - - "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/terraform/helper/schema" -) - -func validateRdsIdentifier(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only lowercase alphanumeric characters and hyphens allowed in %q", k)) - } - if !regexp.MustCompile(`^[a-z]`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "first character of %q must be a letter", k)) - } - if regexp.MustCompile(`--`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot contain two consecutive hyphens", k)) - } - if regexp.MustCompile(`-$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot end with a hyphen", k)) - } - return -} - -func validateRdsIdentifierPrefix(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only lowercase alphanumeric characters and hyphens allowed in %q", k)) - } - if !regexp.MustCompile(`^[a-z]`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "first character of %q must be a letter", k)) - } - if regexp.MustCompile(`--`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot contain two consecutive hyphens", k)) - } - return -} - -func validateElastiCacheClusterId(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if (len(value) < 1) || (len(value) > 20) { - errors = append(errors, fmt.Errorf( - "%q (%q) must contain from 1 to 20 alphanumeric characters or hyphens", k, value)) - } - if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only lowercase alphanumeric characters and hyphens allowed in %q (%q)", k, value)) - } - if !regexp.MustCompile(`^[a-z]`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "first character of %q (%q) must be a letter", k, value)) - } - if regexp.MustCompile(`--`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q (%q) cannot contain two consecutive hyphens", k, value)) - } - if regexp.MustCompile(`-$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q (%q) cannot end with a hyphen", k, value)) - } - return -} - -func validateASGScheduleTimestamp(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - _, err := time.Parse(awsAutoscalingScheduleTimeLayout, value) - if err != nil { - errors = append(errors, fmt.Errorf( - "%q cannot be parsed as iso8601 Timestamp Format", value)) - } - - return -} - -// validateTagFilters confirms the "value" component of a tag filter is one of -// AWS's three allowed types. -func validateTagFilters(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "KEY_ONLY" && value != "VALUE_ONLY" && value != "KEY_AND_VALUE" { - errors = append(errors, fmt.Errorf( - "%q must be one of \"KEY_ONLY\", \"VALUE_ONLY\", or \"KEY_AND_VALUE\"", k)) - } - return -} - -func validateDbParamGroupName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only lowercase alphanumeric characters and hyphens allowed in %q", k)) - } - if !regexp.MustCompile(`^[a-z]`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "first character of %q must be a letter", k)) - } - if regexp.MustCompile(`--`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot contain two consecutive hyphens", k)) - } - if regexp.MustCompile(`-$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot end with a hyphen", k)) - } - if len(value) > 255 { - errors = append(errors, fmt.Errorf( - "%q cannot be greater than 255 characters", k)) - } - return -} - -func validateDbParamGroupNamePrefix(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only lowercase alphanumeric characters and hyphens allowed in %q", k)) - } - if !regexp.MustCompile(`^[a-z]`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "first character of %q must be a letter", k)) - } - if regexp.MustCompile(`--`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot contain two consecutive hyphens", k)) - } - if len(value) > 255 { - errors = append(errors, fmt.Errorf( - "%q cannot be greater than 226 characters", k)) - } - return -} - -func validateStreamViewType(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - viewTypes := map[string]bool{ - "KEYS_ONLY": true, - "NEW_IMAGE": true, - "OLD_IMAGE": true, - "NEW_AND_OLD_IMAGES": true, - } - - if !viewTypes[value] { - errors = append(errors, fmt.Errorf("%q must be a valid DynamoDB StreamViewType", k)) - } - return -} - -func validateElbName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) == 0 { - return // short-circuit - } - if len(value) > 32 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 32 characters: %q", k, value)) - } - if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only alphanumeric characters and hyphens allowed in %q: %q", - k, value)) - } - if regexp.MustCompile(`^-`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot begin with a hyphen: %q", k, value)) - } - if regexp.MustCompile(`-$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot end with a hyphen: %q", k, value)) - } - return -} - -func validateElbNamePrefix(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only alphanumeric characters and hyphens allowed in %q: %q", - k, value)) - } - if len(value) > 6 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 6 characters: %q", k, value)) - } - if regexp.MustCompile(`^-`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot begin with a hyphen: %q", k, value)) - } - return -} - -func validateEcrRepositoryName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) < 2 { - errors = append(errors, fmt.Errorf( - "%q must be at least 2 characters long: %q", k, value)) - } - if len(value) > 256 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 256 characters: %q", k, value)) - } - - // http://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_CreateRepository.html - pattern := `^(?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)*[a-z0-9]+(?:[._-][a-z0-9]+)*$` - if !regexp.MustCompile(pattern).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q doesn't comply with restrictions (%q): %q", - k, pattern, value)) - } - - return -} - -func validateCloudWatchEventRuleName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 64 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 64 characters: %q", k, value)) - } - - // http://docs.aws.amazon.com/AmazonCloudWatchEvents/latest/APIReference/API_PutRule.html - pattern := `^[\.\-_A-Za-z0-9]+$` - if !regexp.MustCompile(pattern).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q doesn't comply with restrictions (%q): %q", - k, pattern, value)) - } - - return -} - -func validateMaxLength(length int) schema.SchemaValidateFunc { - return func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > length { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than %d characters: %q", k, length, value)) - } - return - } -} - -func validateIntegerInRange(min, max int) schema.SchemaValidateFunc { - return func(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < min { - errors = append(errors, fmt.Errorf( - "%q cannot be lower than %d: %d", k, min, value)) - } - if value > max { - errors = append(errors, fmt.Errorf( - "%q cannot be higher than %d: %d", k, max, value)) - } - return - } -} - -func validateCloudWatchEventTargetId(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 64 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 64 characters: %q", k, value)) - } - - // http://docs.aws.amazon.com/AmazonCloudWatchEvents/latest/APIReference/API_Target.html - pattern := `^[\.\-_A-Za-z0-9]+$` - if !regexp.MustCompile(pattern).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q doesn't comply with restrictions (%q): %q", - k, pattern, value)) - } - - return -} - -func validateLambdaFunctionName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 140 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 140 characters: %q", k, value)) - } - // http://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html - pattern := `^(arn:[\w-]+:lambda:)?([a-z]{2}-[a-z]+-\d{1}:)?(\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:(\$LATEST|[a-zA-Z0-9-_]+))?$` - if !regexp.MustCompile(pattern).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q doesn't comply with restrictions (%q): %q", - k, pattern, value)) - } - - return -} - -func validateLambdaQualifier(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 128 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 128 characters: %q", k, value)) - } - // http://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html - pattern := `^[a-zA-Z0-9$_-]+$` - if !regexp.MustCompile(pattern).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q doesn't comply with restrictions (%q): %q", - k, pattern, value)) - } - - return -} - -func validateLambdaPermissionAction(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - // http://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html - pattern := `^(lambda:[*]|lambda:[a-zA-Z]+|[*])$` - if !regexp.MustCompile(pattern).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q doesn't comply with restrictions (%q): %q", - k, pattern, value)) - } - - return -} - -func validateAwsAccountId(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - // http://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html - pattern := `^\d{12}$` - if !regexp.MustCompile(pattern).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q doesn't look like AWS Account ID (exactly 12 digits): %q", - k, value)) - } - - return -} - -func validateArn(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if value == "" { - return - } - - // http://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html - pattern := `^arn:[\w-]+:([a-zA-Z0-9\-])+:([a-z]{2}-(gov-)?[a-z]+-\d{1})?:(\d{12})?:(.*)$` - if !regexp.MustCompile(pattern).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q doesn't look like a valid ARN (%q): %q", - k, pattern, value)) - } - - return -} - -func validatePolicyStatementId(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if len(value) > 100 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 100 characters: %q", k, value)) - } - - // http://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html - pattern := `^[a-zA-Z0-9-_]+$` - if !regexp.MustCompile(pattern).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q doesn't look like a valid statement ID (%q): %q", - k, pattern, value)) - } - - return -} - -// validateCIDRNetworkAddress ensures that the string value is a valid CIDR that -// represents a network address - it adds an error otherwise -func validateCIDRNetworkAddress(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - _, ipnet, err := net.ParseCIDR(value) - if err != nil { - errors = append(errors, fmt.Errorf( - "%q must contain a valid CIDR, got error parsing: %s", k, err)) - return - } - - if ipnet == nil || value != ipnet.String() { - errors = append(errors, fmt.Errorf( - "%q must contain a valid network CIDR, got %q", k, value)) - } - - return -} - -func validateHTTPMethod(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - validMethods := map[string]bool{ - "ANY": true, - "DELETE": true, - "GET": true, - "HEAD": true, - "OPTIONS": true, - "PATCH": true, - "POST": true, - "PUT": true, - } - - if _, ok := validMethods[value]; !ok { - errors = append(errors, fmt.Errorf( - "%q contains an invalid method %q. Valid methods are either %q, %q, %q, %q, %q, %q, %q, or %q.", - k, value, "ANY", "DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT")) - } - return -} - -func validateLogMetricFilterName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if len(value) > 512 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 512 characters: %q", k, value)) - } - - // http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutMetricFilter.html - pattern := `^[^:*]+$` - if !regexp.MustCompile(pattern).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q isn't a valid log metric name (must not contain colon nor asterisk): %q", - k, value)) - } - - return -} - -func validateLogMetricFilterTransformationName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if len(value) > 255 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 255 characters: %q", k, value)) - } - - // http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_MetricTransformation.html - pattern := `^[^:*$]*$` - if !regexp.MustCompile(pattern).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q isn't a valid log metric transformation name (must not contain"+ - " colon, asterisk nor dollar sign): %q", - k, value)) - } - - return -} - -func validateLogGroupName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if len(value) > 512 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 512 characters: %q", k, value)) - } - - // http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateLogGroup.html - pattern := `^[\.\-_/#A-Za-z0-9]+$` - if !regexp.MustCompile(pattern).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q isn't a valid log group name (alphanumeric characters, underscores,"+ - " hyphens, slashes, hash signs and dots are allowed): %q", - k, value)) - } - - return -} - -func validateLogGroupNamePrefix(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if len(value) > 483 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 483 characters: %q", k, value)) - } - - // http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateLogGroup.html - pattern := `^[\.\-_/#A-Za-z0-9]+$` - if !regexp.MustCompile(pattern).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q isn't a valid log group name (alphanumeric characters, underscores,"+ - " hyphens, slashes, hash signs and dots are allowed): %q", - k, value)) - } - - return -} - -func validateS3BucketLifecycleTimestamp(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - _, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", value)) - if err != nil { - errors = append(errors, fmt.Errorf( - "%q cannot be parsed as RFC3339 Timestamp Format", value)) - } - - return -} - -func validateS3BucketLifecycleStorageClass(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != s3.TransitionStorageClassStandardIa && value != s3.TransitionStorageClassGlacier { - errors = append(errors, fmt.Errorf( - "%q must be one of '%q', '%q'", k, s3.TransitionStorageClassStandardIa, s3.TransitionStorageClassGlacier)) - } - - return -} - -func validateS3BucketReplicationRuleId(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 255 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 255 characters: %q", k, value)) - } - - return -} - -func validateS3BucketReplicationRulePrefix(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 1024 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 1024 characters: %q", k, value)) - } - - return -} - -func validateS3BucketReplicationDestinationStorageClass(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != s3.StorageClassStandard && value != s3.StorageClassStandardIa && value != s3.StorageClassReducedRedundancy { - errors = append(errors, fmt.Errorf( - "%q must be one of '%q', '%q' or '%q'", k, s3.StorageClassStandard, s3.StorageClassStandardIa, s3.StorageClassReducedRedundancy)) - } - - return -} - -func validateS3BucketReplicationRuleStatus(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != s3.ReplicationRuleStatusEnabled && value != s3.ReplicationRuleStatusDisabled { - errors = append(errors, fmt.Errorf( - "%q must be one of '%q' or '%q'", k, s3.ReplicationRuleStatusEnabled, s3.ReplicationRuleStatusDisabled)) - } - - return -} - -func validateS3BucketLifecycleRuleId(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 255 { - errors = append(errors, fmt.Errorf( - "%q cannot exceed 255 characters", k)) - } - return -} - -func validateDbEventSubscriptionName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only alphanumeric characters and hyphens allowed in %q", k)) - } - if len(value) > 255 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 255 characters", k)) - } - return -} - -func validateApiGatewayIntegrationPassthroughBehavior(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "WHEN_NO_MATCH" && value != "WHEN_NO_TEMPLATES" && value != "NEVER" { - errors = append(errors, fmt.Errorf( - "%q must be one of 'WHEN_NO_MATCH', 'WHEN_NO_TEMPLATES', 'NEVER'", k)) - } - return -} - -func validateJsonString(v interface{}, k string) (ws []string, errors []error) { - if _, err := normalizeJsonString(v); err != nil { - errors = append(errors, fmt.Errorf("%q contains an invalid JSON: %s", k, err)) - } - return -} - -func validateIAMPolicyJson(v interface{}, k string) (ws []string, errors []error) { - // IAM Policy documents need to be valid JSON, and pass legacy parsing - value := v.(string) - if len(value) < 1 { - errors = append(errors, fmt.Errorf("%q contains an invalid JSON policy", k)) - return - } - if value[:1] != "{" { - errors = append(errors, fmt.Errorf("%q contains an invalid JSON policy", k)) - return - } - if _, err := normalizeJsonString(v); err != nil { - errors = append(errors, fmt.Errorf("%q contains an invalid JSON: %s", k, err)) - } - return -} - -func validateCloudFormationTemplate(v interface{}, k string) (ws []string, errors []error) { - if looksLikeJsonString(v) { - if _, err := normalizeJsonString(v); err != nil { - errors = append(errors, fmt.Errorf("%q contains an invalid JSON: %s", k, err)) - } - } else { - if _, err := checkYamlString(v); err != nil { - errors = append(errors, fmt.Errorf("%q contains an invalid YAML: %s", k, err)) - } - } - return -} - -func validateApiGatewayIntegrationType(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - validTypes := map[string]bool{ - "AWS": true, - "AWS_PROXY": true, - "HTTP": true, - "HTTP_PROXY": true, - "MOCK": true, - } - - if _, ok := validTypes[value]; !ok { - errors = append(errors, fmt.Errorf( - "%q contains an invalid integration type %q. Valid types are either %q, %q, %q, %q, or %q.", - k, value, "AWS", "AWS_PROXY", "HTTP", "HTTP_PROXY", "MOCK")) - } - return -} - -func validateApiGatewayIntegrationContentHandling(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - validTypes := map[string]bool{ - "CONVERT_TO_BINARY": true, - "CONVERT_TO_TEXT": true, - } - - if _, ok := validTypes[value]; !ok { - errors = append(errors, fmt.Errorf( - "%q contains an invalid integration type %q. Valid types are either %q or %q.", - k, value, "CONVERT_TO_BINARY", "CONVERT_TO_TEXT")) - } - return -} - -func validateSQSQueueName(v interface{}, k string) (errors []error) { - value := v.(string) - if len(value) > 80 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 80 characters", k)) - } - - if !regexp.MustCompile(`^[0-9A-Za-z-_]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf("only alphanumeric characters and hyphens allowed in %q", k)) - } - return -} - -func validateSQSFifoQueueName(v interface{}, k string) (errors []error) { - value := v.(string) - - if len(value) > 80 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 80 characters", k)) - } - - if !regexp.MustCompile(`^[0-9A-Za-z-_.]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf("only alphanumeric characters and hyphens allowed in %q", k)) - } - - if regexp.MustCompile(`^[^a-zA-Z0-9-_]`).MatchString(value) { - errors = append(errors, fmt.Errorf("FIFO queue name must start with one of these characters [a-zA-Z0-9-_]: %v", value)) - } - - if !regexp.MustCompile(`\.fifo$`).MatchString(value) { - errors = append(errors, fmt.Errorf("FIFO queue name should ends with \".fifo\": %v", value)) - } - - return -} - -func validateSNSSubscriptionProtocol(v interface{}, k string) (ws []string, errors []error) { - value := strings.ToLower(v.(string)) - forbidden := []string{"email", "sms"} - for _, f := range forbidden { - if strings.Contains(value, f) { - errors = append( - errors, - fmt.Errorf("Unsupported protocol (%s) for SNS Topic", value), - ) - } - } - return -} - -func validateSecurityRuleType(v interface{}, k string) (ws []string, errors []error) { - value := strings.ToLower(v.(string)) - - validTypes := map[string]bool{ - "ingress": true, - "egress": true, - } - - if _, ok := validTypes[value]; !ok { - errors = append(errors, fmt.Errorf( - "%q contains an invalid Security Group Rule type %q. Valid types are either %q or %q.", - k, value, "ingress", "egress")) - } - return -} - -func validateOnceAWeekWindowFormat(v interface{}, k string) (ws []string, errors []error) { - // valid time format is "ddd:hh24:mi" - validTimeFormat := "(sun|mon|tue|wed|thu|fri|sat):([0-1][0-9]|2[0-3]):([0-5][0-9])" - validTimeFormatConsolidated := "^(" + validTimeFormat + "-" + validTimeFormat + "|)$" - - value := strings.ToLower(v.(string)) - if !regexp.MustCompile(validTimeFormatConsolidated).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q must satisfy the format of \"ddd:hh24:mi-ddd:hh24:mi\".", k)) - } - return -} - -func validateOnceADayWindowFormat(v interface{}, k string) (ws []string, errors []error) { - // valid time format is "hh24:mi" - validTimeFormat := "([0-1][0-9]|2[0-3]):([0-5][0-9])" - validTimeFormatConsolidated := "^(" + validTimeFormat + "-" + validTimeFormat + "|)$" - - value := v.(string) - if !regexp.MustCompile(validTimeFormatConsolidated).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q must satisfy the format of \"hh24:mi-hh24:mi\".", k)) - } - return -} - -func validateRoute53RecordType(v interface{}, k string) (ws []string, errors []error) { - // Valid Record types - // SOA, A, TXT, NS, CNAME, MX, NAPTR, PTR, SRV, SPF, AAAA - validTypes := map[string]struct{}{ - "SOA": {}, - "A": {}, - "TXT": {}, - "NS": {}, - "CNAME": {}, - "MX": {}, - "NAPTR": {}, - "PTR": {}, - "SRV": {}, - "SPF": {}, - "AAAA": {}, - } - - value := v.(string) - if _, ok := validTypes[value]; !ok { - errors = append(errors, fmt.Errorf( - "%q must be one of [SOA, A, TXT, NS, CNAME, MX, NAPTR, PTR, SRV, SPF, AAAA]", k)) - } - return -} - -// Validates that ECS Placement Constraints are set correctly -// Takes type, and expression as strings -func validateAwsEcsPlacementConstraint(constType, constExpr string) error { - switch constType { - case "distinctInstance": - // Expression can be nil for distinctInstance - return nil - case "memberOf": - if constExpr == "" { - return fmt.Errorf("Expression cannot be nil for 'memberOf' type") - } - default: - return fmt.Errorf("Unknown type provided: %q", constType) - } - return nil -} - -// Validates that an Ecs placement strategy is set correctly -// Takes type, and field as strings -func validateAwsEcsPlacementStrategy(stratType, stratField string) error { - switch stratType { - case "random": - // random does not need the field attribute set, could error, but it isn't read at the API level - return nil - case "spread": - // For the spread placement strategy, valid values are instanceId - // (or host, which has the same effect), or any platform or custom attribute - // that is applied to a container instance - // stratField is already cased to a string - return nil - case "binpack": - if stratField != "cpu" && stratField != "memory" { - return fmt.Errorf("Binpack type requires the field attribute to be either 'cpu' or 'memory'. Got: %s", - stratField) - } - default: - return fmt.Errorf("Unknown type %s. Must be one of 'random', 'spread', or 'binpack'.", stratType) - } - return nil -} - -func validateAwsEmrEbsVolumeType(v interface{}, k string) (ws []string, errors []error) { - validTypes := map[string]struct{}{ - "gp2": {}, - "io1": {}, - "standard": {}, - } - - value := v.(string) - - if _, ok := validTypes[value]; !ok { - errors = append(errors, fmt.Errorf( - "%q must be one of ['gp2', 'io1', 'standard']", k)) - } - return -} - -func validateSfnActivityName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 80 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 80 characters", k)) - } - - return -} - -func validateSfnStateMachineDefinition(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 1048576 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 1048576 characters", k)) - } - return -} - -func validateSfnStateMachineName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 80 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 80 characters", k)) - } - - if !regexp.MustCompile(`^[a-zA-Z0-9-_]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q must be composed with only these characters [a-zA-Z0-9-_]: %v", k, value)) - } - return -} - -func validateDmsCertificateId(v interface{}, k string) (ws []string, es []error) { - val := v.(string) - - if len(val) > 255 { - es = append(es, fmt.Errorf("%q must not be longer than 255 characters", k)) - } - if !regexp.MustCompile("^[a-zA-Z][a-zA-Z0-9-]+$").MatchString(val) { - es = append(es, fmt.Errorf("%q must start with a letter, only contain alphanumeric characters and hyphens", k)) - } - if strings.Contains(val, "--") { - es = append(es, fmt.Errorf("%q must not contain consecutive hyphens", k)) - } - if strings.HasSuffix(val, "-") { - es = append(es, fmt.Errorf("%q must not end in a hyphen", k)) - } - - return -} - -func validateDmsEndpointId(v interface{}, k string) (ws []string, es []error) { - val := v.(string) - - if len(val) > 255 { - es = append(es, fmt.Errorf("%q must not be longer than 255 characters", k)) - } - if !regexp.MustCompile("^[a-zA-Z][a-zA-Z0-9-]+$").MatchString(val) { - es = append(es, fmt.Errorf("%q must start with a letter, only contain alphanumeric characters and hyphens", k)) - } - if strings.Contains(val, "--") { - es = append(es, fmt.Errorf("%q must not contain consecutive hyphens", k)) - } - if strings.HasSuffix(val, "-") { - es = append(es, fmt.Errorf("%q must not end in a hyphen", k)) - } - - return -} - -func validateDmsReplicationInstanceId(v interface{}, k string) (ws []string, es []error) { - val := v.(string) - - if len(val) > 63 { - es = append(es, fmt.Errorf("%q must not be longer than 63 characters", k)) - } - if !regexp.MustCompile("^[a-zA-Z][a-zA-Z0-9-]+$").MatchString(val) { - es = append(es, fmt.Errorf("%q must start with a letter, only contain alphanumeric characters and hyphens", k)) - } - if strings.Contains(val, "--") { - es = append(es, fmt.Errorf("%q must not contain consecutive hyphens", k)) - } - if strings.HasSuffix(val, "-") { - es = append(es, fmt.Errorf("%q must not end in a hyphen", k)) - } - - return -} - -func validateDmsReplicationSubnetGroupId(v interface{}, k string) (ws []string, es []error) { - val := v.(string) - - if val == "default" { - es = append(es, fmt.Errorf("%q must not be default", k)) - } - if len(val) > 255 { - es = append(es, fmt.Errorf("%q must not be longer than 255 characters", k)) - } - if !regexp.MustCompile(`^[a-zA-Z0-9. _-]+$`).MatchString(val) { - es = append(es, fmt.Errorf("%q must only contain alphanumeric characters, periods, spaces, underscores and hyphens", k)) - } - - return -} - -func validateDmsReplicationTaskId(v interface{}, k string) (ws []string, es []error) { - val := v.(string) - - if len(val) > 255 { - es = append(es, fmt.Errorf("%q must not be longer than 255 characters", k)) - } - if !regexp.MustCompile("^[a-zA-Z][a-zA-Z0-9-]+$").MatchString(val) { - es = append(es, fmt.Errorf("%q must start with a letter, only contain alphanumeric characters and hyphens", k)) - } - if strings.Contains(val, "--") { - es = append(es, fmt.Errorf("%q must not contain consecutive hyphens", k)) - } - if strings.HasSuffix(val, "-") { - es = append(es, fmt.Errorf("%q must not end in a hyphen", k)) - } - - return -} - -func validateAppautoscalingScalableDimension(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - dimensions := map[string]bool{ - "ecs:service:DesiredCount": true, - "ec2:spot-fleet-request:TargetCapacity": true, - "elasticmapreduce:instancegroup:InstanceCount": true, - } - - if !dimensions[value] { - errors = append(errors, fmt.Errorf("%q must be a valid scalable dimension value: %q", k, value)) - } - return -} - -func validateAppautoscalingServiceNamespace(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - namespaces := map[string]bool{ - "ecs": true, - "ec2": true, - "elasticmapreduce": true, - } - - if !namespaces[value] { - errors = append(errors, fmt.Errorf("%q must be a valid service namespace value: %q", k, value)) - } - return -} - -func validateConfigRuleSourceOwner(v interface{}, k string) (ws []string, errors []error) { - validOwners := []string{ - "CUSTOM_LAMBDA", - "AWS", - } - owner := v.(string) - for _, o := range validOwners { - if owner == o { - return - } - } - errors = append(errors, fmt.Errorf( - "%q contains an invalid owner %q. Valid owners are %q.", - k, owner, validOwners)) - return -} - -func validateConfigExecutionFrequency(v interface{}, k string) (ws []string, errors []error) { - validFrequencies := []string{ - "One_Hour", - "Three_Hours", - "Six_Hours", - "Twelve_Hours", - "TwentyFour_Hours", - } - frequency := v.(string) - for _, f := range validFrequencies { - if frequency == f { - return - } - } - errors = append(errors, fmt.Errorf( - "%q contains an invalid frequency %q. Valid frequencies are %q.", - k, frequency, validFrequencies)) - return -} - -func validateAccountAlias(v interface{}, k string) (ws []string, es []error) { - val := v.(string) - - if (len(val) < 3) || (len(val) > 63) { - es = append(es, fmt.Errorf("%q must contain from 3 to 63 alphanumeric characters or hyphens", k)) - } - if !regexp.MustCompile("^[a-z0-9][a-z0-9-]+$").MatchString(val) { - es = append(es, fmt.Errorf("%q must start with an alphanumeric character and only contain lowercase alphanumeric characters and hyphens", k)) - } - if strings.Contains(val, "--") { - es = append(es, fmt.Errorf("%q must not contain consecutive hyphens", k)) - } - if strings.HasSuffix(val, "-") { - es = append(es, fmt.Errorf("%q must not end in a hyphen", k)) - } - return -} - -func validateApiGatewayApiKeyValue(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) < 30 { - errors = append(errors, fmt.Errorf( - "%q must be at least 30 characters long", k)) - } - if len(value) > 128 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 128 characters", k)) - } - return -} - -func validateIamRolePolicyName(v interface{}, k string) (ws []string, errors []error) { - // https://github.com/boto/botocore/blob/2485f5c/botocore/data/iam/2010-05-08/service-2.json#L8291-L8296 - value := v.(string) - if len(value) > 128 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 128 characters", k)) - } - if !regexp.MustCompile("^[\\w+=,.@-]+$").MatchString(value) { - errors = append(errors, fmt.Errorf("%q must match [\\w+=,.@-]", k)) - } - return -} - -func validateIamRolePolicyNamePrefix(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 100 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 100 characters", k)) - } - if !regexp.MustCompile("^[\\w+=,.@-]+$").MatchString(value) { - errors = append(errors, fmt.Errorf("%q must match [\\w+=,.@-]", k)) - } - return -} - -func validateApiGatewayUsagePlanQuotaSettingsPeriod(v interface{}, k string) (ws []string, errors []error) { - validPeriods := []string{ - apigateway.QuotaPeriodTypeDay, - apigateway.QuotaPeriodTypeWeek, - apigateway.QuotaPeriodTypeMonth, - } - period := v.(string) - for _, f := range validPeriods { - if period == f { - return - } - } - errors = append(errors, fmt.Errorf( - "%q contains an invalid period %q. Valid period are %q.", - k, period, validPeriods)) - return -} - -func validateApiGatewayUsagePlanQuotaSettings(v map[string]interface{}) (errors []error) { - period := v["period"].(string) - offset := v["offset"].(int) - - if period == apigateway.QuotaPeriodTypeDay && offset != 0 { - errors = append(errors, fmt.Errorf("Usage Plan quota offset must be zero in the DAY period")) - } - - if period == apigateway.QuotaPeriodTypeWeek && (offset < 0 || offset > 6) { - errors = append(errors, fmt.Errorf("Usage Plan quota offset must be between 0 and 6 inclusive in the WEEK period")) - } - - if period == apigateway.QuotaPeriodTypeMonth && (offset < 0 || offset > 27) { - errors = append(errors, fmt.Errorf("Usage Plan quota offset must be between 0 and 27 inclusive in the MONTH period")) - } - - return -} - -func validateDbSubnetGroupName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[ .0-9a-z-_]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only lowercase alphanumeric characters, hyphens, underscores, periods, and spaces allowed in %q", k)) - } - if len(value) > 255 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 255 characters", k)) - } - if regexp.MustCompile(`(?i)^default$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q is not allowed as %q", "Default", k)) - } - return -} - -func validateDbSubnetGroupNamePrefix(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[ .0-9a-z-_]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only lowercase alphanumeric characters, hyphens, underscores, periods, and spaces allowed in %q", k)) - } - if len(value) > 229 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 229 characters", k)) - } - return -} - -func validateDbOptionGroupName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[a-z]`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "first character of %q must be a letter", k)) - } - if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only lowercase alphanumeric characters and hyphens allowed in %q", k)) - } - if regexp.MustCompile(`--`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot contain two consecutive hyphens", k)) - } - if regexp.MustCompile(`-$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot end with a hyphen", k)) - } - if len(value) > 255 { - errors = append(errors, fmt.Errorf( - "%q cannot be greater than 255 characters", k)) - } - return -} - -func validateDbOptionGroupNamePrefix(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[a-z]`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "first character of %q must be a letter", k)) - } - if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only alphanumeric characters and hyphens allowed in %q", k)) - } - if regexp.MustCompile(`--`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot contain two consecutive hyphens", k)) - } - if len(value) > 229 { - errors = append(errors, fmt.Errorf( - "%q cannot be greater than 229 characters", k)) - } - return -} - -func validateAwsAlbTargetGroupName(v interface{}, k string) (ws []string, errors []error) { - name := v.(string) - if len(name) > 32 { - errors = append(errors, fmt.Errorf("%q (%q) cannot be longer than '32' characters", k, name)) - } - return -} - -func validateAwsAlbTargetGroupNamePrefix(v interface{}, k string) (ws []string, errors []error) { - name := v.(string) - if len(name) > 32 { - errors = append(errors, fmt.Errorf("%q (%q) cannot be longer than '6' characters", k, name)) - } - return -} - -func validateOpenIdURL(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - u, err := url.Parse(value) - if err != nil { - errors = append(errors, fmt.Errorf("%q has to be a valid URL", k)) - return - } - if u.Scheme != "https" { - errors = append(errors, fmt.Errorf("%q has to use HTTPS scheme (i.e. begin with https://)", k)) - } - if len(u.Query()) > 0 { - errors = append(errors, fmt.Errorf("%q cannot contain query parameters per the OIDC standard", k)) - } - return -} - -func validateAwsKmsName(v interface{}, k string) (ws []string, es []error) { - value := v.(string) - if !regexp.MustCompile(`^(alias\/)[a-zA-Z0-9:/_-]+$`).MatchString(value) { - es = append(es, fmt.Errorf( - "%q must begin with 'alias/' and be comprised of only [a-zA-Z0-9:/_-]", k)) - } - return -} - -func validateCognitoIdentityPoolName(v interface{}, k string) (ws []string, errors []error) { - val := v.(string) - if !regexp.MustCompile("^[\\w _]+$").MatchString(val) { - errors = append(errors, fmt.Errorf("%q must contain only alphanumeric caracters and spaces", k)) - } - - return -} - -func validateCognitoProviderDeveloperName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 100 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 100 caracters", k)) - } - - if !regexp.MustCompile("^[\\w._-]+$").MatchString(value) { - errors = append(errors, fmt.Errorf("%q must contain only alphanumeric caracters, dots, underscores and hyphens", k)) - } - - return -} - -func validateCognitoSupportedLoginProviders(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) < 1 { - errors = append(errors, fmt.Errorf("%q cannot be less than 1 character", k)) - } - - if len(value) > 128 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 128 caracters", k)) - } - - if !regexp.MustCompile("^[\\w.;_/-]+$").MatchString(value) { - errors = append(errors, fmt.Errorf("%q must contain only alphanumeric caracters, dots, semicolons, underscores, slashes and hyphens", k)) - } - - return -} - -func validateCognitoIdentityProvidersClientId(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) < 1 { - errors = append(errors, fmt.Errorf("%q cannot be less than 1 character", k)) - } - - if len(value) > 128 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 128 caracters", k)) - } - - if !regexp.MustCompile("^[\\w_]+$").MatchString(value) { - errors = append(errors, fmt.Errorf("%q must contain only alphanumeric caracters and underscores", k)) - } - - return -} - -func validateCognitoIdentityProvidersProviderName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) < 1 { - errors = append(errors, fmt.Errorf("%q cannot be less than 1 character", k)) - } - - if len(value) > 128 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 128 caracters", k)) - } - - if !regexp.MustCompile("^[\\w._:/-]+$").MatchString(value) { - errors = append(errors, fmt.Errorf("%q must contain only alphanumeric caracters, dots, underscores, colons, slashes and hyphens", k)) - } - - return -} - -func validateWafMetricName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[0-9A-Za-z]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "Only alphanumeric characters allowed in %q: %q", - k, value)) - } - return -} - -func validateIamRoleDescription(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if len(value) > 1000 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 1000 caracters", k)) - } - - if !regexp.MustCompile(`[\p{L}\p{M}\p{Z}\p{S}\p{N}\p{P}]*`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "Only alphanumeric & accented characters allowed in %q: %q (Must satisfy regular expression pattern: [\\p{L}\\p{M}\\p{Z}\\p{S}\\p{N}\\p{P}]*)", - k, value)) - } - return -} - -func validateSsmParameterType(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - types := map[string]bool{ - "String": true, - "StringList": true, - "SecureString": true, - } - - if !types[value] { - errors = append(errors, fmt.Errorf("Parameter type %s is invalid. Valid types are String, StringList or SecureString", value)) - } - return -} diff --git a/builtin/providers/aws/validators_test.go b/builtin/providers/aws/validators_test.go deleted file mode 100644 index c577fdbfd..000000000 --- a/builtin/providers/aws/validators_test.go +++ /dev/null @@ -1,2319 +0,0 @@ -package aws - -import ( - "fmt" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/service/s3" -) - -func TestValidateEcrRepositoryName(t *testing.T) { - validNames := []string{ - "nginx-web-app", - "project-a/nginx-web-app", - "domain.ltd/nginx-web-app", - "3chosome-thing.com/01different-pattern", - "0123456789/999999999", - "double/forward/slash", - "000000000000000", - } - for _, v := range validNames { - _, errors := validateEcrRepositoryName(v, "name") - if len(errors) != 0 { - t.Fatalf("%q should be a valid ECR repository name: %q", v, errors) - } - } - - invalidNames := []string{ - // length > 256 - "3cho_some-thing.com/01different.-_pattern01different.-_pattern01diff" + - "erent.-_pattern01different.-_pattern01different.-_pattern01different" + - ".-_pattern01different.-_pattern01different.-_pattern01different.-_pa" + - "ttern01different.-_pattern01different.-_pattern234567", - // length < 2 - "i", - "special@character", - "different+special=character", - "double//slash", - "double..dot", - "/slash-at-the-beginning", - "slash-at-the-end/", - } - for _, v := range invalidNames { - _, errors := validateEcrRepositoryName(v, "name") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid ECR repository name", v) - } - } -} - -func TestValidateCloudWatchEventRuleName(t *testing.T) { - validNames := []string{ - "HelloWorl_d", - "hello-world", - "hello.World0125", - } - for _, v := range validNames { - _, errors := validateCloudWatchEventRuleName(v, "name") - if len(errors) != 0 { - t.Fatalf("%q should be a valid CW event rule name: %q", v, errors) - } - } - - invalidNames := []string{ - "special@character", - "slash/in-the-middle", - // Length > 64 - "TooLooooooooooooooooooooooooooooooooooooooooooooooooooooooongName", - } - for _, v := range invalidNames { - _, errors := validateCloudWatchEventRuleName(v, "name") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid CW event rule name", v) - } - } -} - -func TestValidateLambdaFunctionName(t *testing.T) { - validNames := []string{ - "arn:aws:lambda:us-west-2:123456789012:function:ThumbNail", - "arn:aws-us-gov:lambda:us-west-2:123456789012:function:ThumbNail", - "FunctionName", - "function-name", - } - for _, v := range validNames { - _, errors := validateLambdaFunctionName(v, "name") - if len(errors) != 0 { - t.Fatalf("%q should be a valid Lambda function name: %q", v, errors) - } - } - - invalidNames := []string{ - "/FunctionNameWithSlash", - "function.name.with.dots", - // length > 140 - "arn:aws:lambda:us-west-2:123456789012:function:TooLoooooo" + - "ooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + - "ooooooooooooooooongFunctionName", - } - for _, v := range invalidNames { - _, errors := validateLambdaFunctionName(v, "name") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid Lambda function name", v) - } - } -} - -func TestValidateLambdaQualifier(t *testing.T) { - validNames := []string{ - "123", - "prod", - "PROD", - "MyTestEnv", - "contains-dashes", - "contains_underscores", - "$LATEST", - } - for _, v := range validNames { - _, errors := validateLambdaQualifier(v, "name") - if len(errors) != 0 { - t.Fatalf("%q should be a valid Lambda function qualifier: %q", v, errors) - } - } - - invalidNames := []string{ - // No ARNs allowed - "arn:aws:lambda:us-west-2:123456789012:function:prod", - // length > 128 - "TooLooooooooooooooooooooooooooooooooooooooooooooooooooo" + - "ooooooooooooooooooooooooooooooooooooooooooooooooooo" + - "oooooooooooongQualifier", - } - for _, v := range invalidNames { - _, errors := validateLambdaQualifier(v, "name") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid Lambda function qualifier", v) - } - } -} - -func TestValidateLambdaPermissionAction(t *testing.T) { - validNames := []string{ - "lambda:*", - "lambda:InvokeFunction", - "*", - } - for _, v := range validNames { - _, errors := validateLambdaPermissionAction(v, "action") - if len(errors) != 0 { - t.Fatalf("%q should be a valid Lambda permission action: %q", v, errors) - } - } - - invalidNames := []string{ - "yada", - "lambda:123", - "*:*", - "lambda:Invoke*", - } - for _, v := range invalidNames { - _, errors := validateLambdaPermissionAction(v, "action") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid Lambda permission action", v) - } - } -} - -func TestValidateAwsAccountId(t *testing.T) { - validNames := []string{ - "123456789012", - "999999999999", - } - for _, v := range validNames { - _, errors := validateAwsAccountId(v, "account_id") - if len(errors) != 0 { - t.Fatalf("%q should be a valid AWS Account ID: %q", v, errors) - } - } - - invalidNames := []string{ - "12345678901", // too short - "1234567890123", // too long - "invalid", - "x123456789012", - } - for _, v := range invalidNames { - _, errors := validateAwsAccountId(v, "account_id") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid AWS Account ID", v) - } - } -} - -func TestValidateArn(t *testing.T) { - v := "" - _, errors := validateArn(v, "arn") - if len(errors) != 0 { - t.Fatalf("%q should not be validated as an ARN: %q", v, errors) - } - - validNames := []string{ - "arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment", // Beanstalk - "arn:aws:iam::123456789012:user/David", // IAM User - "arn:aws:rds:eu-west-1:123456789012:db:mysql-db", // RDS - "arn:aws:s3:::my_corporate_bucket/exampleobject.png", // S3 object - "arn:aws:events:us-east-1:319201112229:rule/rule_name", // CloudWatch Rule - "arn:aws:lambda:eu-west-1:319201112229:function:myCustomFunction", // Lambda function - "arn:aws:lambda:eu-west-1:319201112229:function:myCustomFunction:Qualifier", // Lambda func qualifier - "arn:aws-us-gov:s3:::corp_bucket/object.png", // GovCloud ARN - "arn:aws-us-gov:kms:us-gov-west-1:123456789012:key/some-uuid-abc123", // GovCloud KMS ARN - } - for _, v := range validNames { - _, errors := validateArn(v, "arn") - if len(errors) != 0 { - t.Fatalf("%q should be a valid ARN: %q", v, errors) - } - } - - invalidNames := []string{ - "arn", - "123456789012", - "arn:aws", - "arn:aws:logs", - "arn:aws:logs:region:*:*", - } - for _, v := range invalidNames { - _, errors := validateArn(v, "arn") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid ARN", v) - } - } -} - -func TestValidatePolicyStatementId(t *testing.T) { - validNames := []string{ - "YadaHereAndThere", - "Valid-5tatement_Id", - "1234", - } - for _, v := range validNames { - _, errors := validatePolicyStatementId(v, "statement_id") - if len(errors) != 0 { - t.Fatalf("%q should be a valid Statement ID: %q", v, errors) - } - } - - invalidNames := []string{ - "Invalid/StatementId/with/slashes", - "InvalidStatementId.with.dots", - // length > 100 - "TooooLoooooooooooooooooooooooooooooooooooooooooooo" + - "ooooooooooooooooooooooooooooooooooooooooStatementId", - } - for _, v := range invalidNames { - _, errors := validatePolicyStatementId(v, "statement_id") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid Statement ID", v) - } - } -} - -func TestValidateCIDRNetworkAddress(t *testing.T) { - cases := []struct { - CIDR string - ExpectedErrSubstr string - }{ - {"notacidr", `must contain a valid CIDR`}, - {"10.0.1.0/16", `must contain a valid network CIDR`}, - {"10.0.1.0/24", ``}, - } - - for i, tc := range cases { - _, errs := validateCIDRNetworkAddress(tc.CIDR, "foo") - if tc.ExpectedErrSubstr == "" { - if len(errs) != 0 { - t.Fatalf("%d/%d: Expected no error, got errs: %#v", - i+1, len(cases), errs) - } - } else { - if len(errs) != 1 { - t.Fatalf("%d/%d: Expected 1 err containing %q, got %d errs", - i+1, len(cases), tc.ExpectedErrSubstr, len(errs)) - } - if !strings.Contains(errs[0].Error(), tc.ExpectedErrSubstr) { - t.Fatalf("%d/%d: Expected err: %q, to include %q", - i+1, len(cases), errs[0], tc.ExpectedErrSubstr) - } - } - } -} - -func TestValidateHTTPMethod(t *testing.T) { - type testCases struct { - Value string - ErrCount int - } - - invalidCases := []testCases{ - { - Value: "incorrect", - ErrCount: 1, - }, - { - Value: "delete", - ErrCount: 1, - }, - } - - for _, tc := range invalidCases { - _, errors := validateHTTPMethod(tc.Value, "http_method") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q to trigger a validation error.", tc.Value) - } - } - - validCases := []testCases{ - { - Value: "ANY", - ErrCount: 0, - }, - { - Value: "DELETE", - ErrCount: 0, - }, - { - Value: "OPTIONS", - ErrCount: 0, - }, - } - - for _, tc := range validCases { - _, errors := validateHTTPMethod(tc.Value, "http_method") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q not to trigger a validation error.", tc.Value) - } - } -} - -func TestValidateLogMetricFilterName(t *testing.T) { - validNames := []string{ - "YadaHereAndThere", - "Valid-5Metric_Name", - "This . is also %% valid@!)+(", - "1234", - strings.Repeat("W", 512), - } - for _, v := range validNames { - _, errors := validateLogMetricFilterName(v, "name") - if len(errors) != 0 { - t.Fatalf("%q should be a valid Log Metric Filter Name: %q", v, errors) - } - } - - invalidNames := []string{ - "Here is a name with: colon", - "and here is another * invalid name", - "*", - // length > 512 - strings.Repeat("W", 513), - } - for _, v := range invalidNames { - _, errors := validateLogMetricFilterName(v, "name") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid Log Metric Filter Name", v) - } - } -} - -func TestValidateLogMetricTransformationName(t *testing.T) { - validNames := []string{ - "YadaHereAndThere", - "Valid-5Metric_Name", - "This . is also %% valid@!)+(", - "1234", - "", - strings.Repeat("W", 255), - } - for _, v := range validNames { - _, errors := validateLogMetricFilterTransformationName(v, "name") - if len(errors) != 0 { - t.Fatalf("%q should be a valid Log Metric Filter Transformation Name: %q", v, errors) - } - } - - invalidNames := []string{ - "Here is a name with: colon", - "and here is another * invalid name", - "also $ invalid", - "*", - // length > 255 - strings.Repeat("W", 256), - } - for _, v := range invalidNames { - _, errors := validateLogMetricFilterTransformationName(v, "name") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid Log Metric Filter Transformation Name", v) - } - } -} - -func TestValidateLogGroupName(t *testing.T) { - validNames := []string{ - "ValidLogGroupName", - "ValidLogGroup.Name", - "valid/Log-group", - "1234", - "YadaValid#0123", - "Also_valid-name", - strings.Repeat("W", 512), - } - for _, v := range validNames { - _, errors := validateLogGroupName(v, "name") - if len(errors) != 0 { - t.Fatalf("%q should be a valid Log Group name: %q", v, errors) - } - } - - invalidNames := []string{ - "Here is a name with: colon", - "and here is another * invalid name", - "also $ invalid", - "This . is also %% invalid@!)+(", - "*", - "", - // length > 512 - strings.Repeat("W", 513), - } - for _, v := range invalidNames { - _, errors := validateLogGroupName(v, "name") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid Log Group name", v) - } - } -} - -func TestValidateLogGroupNamePrefix(t *testing.T) { - validNames := []string{ - "ValidLogGroupName", - "ValidLogGroup.Name", - "valid/Log-group", - "1234", - "YadaValid#0123", - "Also_valid-name", - strings.Repeat("W", 483), - } - for _, v := range validNames { - _, errors := validateLogGroupNamePrefix(v, "name_prefix") - if len(errors) != 0 { - t.Fatalf("%q should be a valid Log Group name prefix: %q", v, errors) - } - } - - invalidNames := []string{ - "Here is a name with: colon", - "and here is another * invalid name", - "also $ invalid", - "This . is also %% invalid@!)+(", - "*", - "", - // length > 483 - strings.Repeat("W", 484), - } - for _, v := range invalidNames { - _, errors := validateLogGroupNamePrefix(v, "name_prefix") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid Log Group name prefix", v) - } - } -} - -func TestValidateS3BucketLifecycleTimestamp(t *testing.T) { - validDates := []string{ - "2016-01-01", - "2006-01-02", - } - - for _, v := range validDates { - _, errors := validateS3BucketLifecycleTimestamp(v, "date") - if len(errors) != 0 { - t.Fatalf("%q should be valid date: %q", v, errors) - } - } - - invalidDates := []string{ - "Jan 01 2016", - "20160101", - } - - for _, v := range invalidDates { - _, errors := validateS3BucketLifecycleTimestamp(v, "date") - if len(errors) == 0 { - t.Fatalf("%q should be invalid date", v) - } - } -} - -func TestValidateS3BucketLifecycleStorageClass(t *testing.T) { - validStorageClass := []string{ - "STANDARD_IA", - "GLACIER", - } - - for _, v := range validStorageClass { - _, errors := validateS3BucketLifecycleStorageClass(v, "storage_class") - if len(errors) != 0 { - t.Fatalf("%q should be valid storage class: %q", v, errors) - } - } - - invalidStorageClass := []string{ - "STANDARD", - "1234", - } - for _, v := range invalidStorageClass { - _, errors := validateS3BucketLifecycleStorageClass(v, "storage_class") - if len(errors) == 0 { - t.Fatalf("%q should be invalid storage class", v) - } - } -} - -func TestValidateS3BucketReplicationRuleId(t *testing.T) { - validId := []string{ - "YadaHereAndThere", - "Valid-5Rule_ID", - "This . is also %% valid@!)+*(:ID", - "1234", - strings.Repeat("W", 255), - } - for _, v := range validId { - _, errors := validateS3BucketReplicationRuleId(v, "id") - if len(errors) != 0 { - t.Fatalf("%q should be a valid lifecycle rule id: %q", v, errors) - } - } - - invalidId := []string{ - // length > 255 - strings.Repeat("W", 256), - } - for _, v := range invalidId { - _, errors := validateS3BucketReplicationRuleId(v, "id") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid replication configuration rule id", v) - } - } -} - -func TestValidateS3BucketReplicationRulePrefix(t *testing.T) { - validId := []string{ - "YadaHereAndThere", - "Valid-5Rule_ID", - "This . is also %% valid@!)+*(:ID", - "1234", - strings.Repeat("W", 1024), - } - for _, v := range validId { - _, errors := validateS3BucketReplicationRulePrefix(v, "id") - if len(errors) != 0 { - t.Fatalf("%q should be a valid lifecycle rule id: %q", v, errors) - } - } - - invalidId := []string{ - // length > 1024 - strings.Repeat("W", 1025), - } - for _, v := range invalidId { - _, errors := validateS3BucketReplicationRulePrefix(v, "id") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid replication configuration rule id", v) - } - } -} - -func TestValidateS3BucketReplicationDestinationStorageClass(t *testing.T) { - validStorageClass := []string{ - s3.StorageClassStandard, - s3.StorageClassStandardIa, - s3.StorageClassReducedRedundancy, - } - - for _, v := range validStorageClass { - _, errors := validateS3BucketReplicationDestinationStorageClass(v, "storage_class") - if len(errors) != 0 { - t.Fatalf("%q should be valid storage class: %q", v, errors) - } - } - - invalidStorageClass := []string{ - "FOO", - "1234", - } - for _, v := range invalidStorageClass { - _, errors := validateS3BucketReplicationDestinationStorageClass(v, "storage_class") - if len(errors) == 0 { - t.Fatalf("%q should be invalid storage class", v) - } - } -} - -func TestValidateS3BucketReplicationRuleStatus(t *testing.T) { - validRuleStatuses := []string{ - s3.ReplicationRuleStatusEnabled, - s3.ReplicationRuleStatusDisabled, - } - - for _, v := range validRuleStatuses { - _, errors := validateS3BucketReplicationRuleStatus(v, "status") - if len(errors) != 0 { - t.Fatalf("%q should be valid rule status: %q", v, errors) - } - } - - invalidRuleStatuses := []string{ - "FOO", - "1234", - } - for _, v := range invalidRuleStatuses { - _, errors := validateS3BucketReplicationRuleStatus(v, "status") - if len(errors) == 0 { - t.Fatalf("%q should be invalid rule status", v) - } - } -} - -func TestValidateS3BucketLifecycleRuleId(t *testing.T) { - validId := []string{ - "YadaHereAndThere", - "Valid-5Rule_ID", - "This . is also %% valid@!)+*(:ID", - "1234", - strings.Repeat("W", 255), - } - for _, v := range validId { - _, errors := validateS3BucketLifecycleRuleId(v, "id") - if len(errors) != 0 { - t.Fatalf("%q should be a valid lifecycle rule id: %q", v, errors) - } - } - - invalidId := []string{ - // length > 255 - strings.Repeat("W", 256), - } - for _, v := range invalidId { - _, errors := validateS3BucketLifecycleRuleId(v, "id") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid lifecycle rule id", v) - } - } -} - -func TestValidateIntegerInRange(t *testing.T) { - validIntegers := []int{-259, 0, 1, 5, 999} - min := -259 - max := 999 - for _, v := range validIntegers { - _, errors := validateIntegerInRange(min, max)(v, "name") - if len(errors) != 0 { - t.Fatalf("%q should be an integer in range (%d, %d): %q", v, min, max, errors) - } - } - - invalidIntegers := []int{-260, -99999, 1000, 25678} - for _, v := range invalidIntegers { - _, errors := validateIntegerInRange(min, max)(v, "name") - if len(errors) == 0 { - t.Fatalf("%q should be an integer outside range (%d, %d)", v, min, max) - } - } -} - -func TestResourceAWSElastiCacheClusterIdValidation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "tEsting", - ErrCount: 1, - }, - { - Value: "t.sting", - ErrCount: 1, - }, - { - Value: "t--sting", - ErrCount: 1, - }, - { - Value: "1testing", - ErrCount: 1, - }, - { - Value: "testing-", - ErrCount: 1, - }, - { - Value: randomString(65), - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateElastiCacheClusterId(tc.Value, "aws_elasticache_cluster_cluster_id") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the ElastiCache Cluster cluster_id to trigger a validation error") - } - } -} - -func TestValidateDbEventSubscriptionName(t *testing.T) { - validNames := []string{ - "valid-name", - "valid02-name", - "Valid-Name1", - } - for _, v := range validNames { - _, errors := validateDbEventSubscriptionName(v, "name") - if len(errors) != 0 { - t.Fatalf("%q should be a valid RDS Event Subscription Name: %q", v, errors) - } - } - - invalidNames := []string{ - "Here is a name with: colon", - "and here is another * invalid name", - "also $ invalid", - "This . is also %% invalid@!)+(", - "*", - "", - " ", - "_", - // length > 255 - strings.Repeat("W", 256), - } - for _, v := range invalidNames { - _, errors := validateDbEventSubscriptionName(v, "name") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid RDS Event Subscription Name", v) - } - } -} - -func TestValidateJsonString(t *testing.T) { - type testCases struct { - Value string - ErrCount int - } - - invalidCases := []testCases{ - { - Value: `{0:"1"}`, - ErrCount: 1, - }, - { - Value: `{'abc':1}`, - ErrCount: 1, - }, - { - Value: `{"def":}`, - ErrCount: 1, - }, - { - Value: `{"xyz":[}}`, - ErrCount: 1, - }, - } - - for _, tc := range invalidCases { - _, errors := validateJsonString(tc.Value, "json") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q to trigger a validation error.", tc.Value) - } - } - - validCases := []testCases{ - { - Value: ``, - ErrCount: 0, - }, - { - Value: `{}`, - ErrCount: 0, - }, - { - Value: `{"abc":["1","2"]}`, - ErrCount: 0, - }, - } - - for _, tc := range validCases { - _, errors := validateJsonString(tc.Value, "json") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q not to trigger a validation error.", tc.Value) - } - } -} - -func TestValidateIAMPolicyJsonString(t *testing.T) { - type testCases struct { - Value string - ErrCount int - } - - invalidCases := []testCases{ - { - Value: `{0:"1"}`, - ErrCount: 1, - }, - { - Value: `{'abc':1}`, - ErrCount: 1, - }, - { - Value: `{"def":}`, - ErrCount: 1, - }, - { - Value: `{"xyz":[}}`, - ErrCount: 1, - }, - { - Value: ``, - ErrCount: 1, - }, - { - Value: ` {"xyz": "foo"}`, - ErrCount: 1, - }, - } - - for _, tc := range invalidCases { - _, errors := validateIAMPolicyJson(tc.Value, "json") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q to trigger a validation error.", tc.Value) - } - } - - validCases := []testCases{ - { - Value: `{}`, - ErrCount: 0, - }, - { - Value: `{"abc":["1","2"]}`, - ErrCount: 0, - }, - } - - for _, tc := range validCases { - _, errors := validateIAMPolicyJson(tc.Value, "json") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q not to trigger a validation error.", tc.Value) - } - } -} - -func TestValidateCloudFormationTemplate(t *testing.T) { - type testCases struct { - Value string - ErrCount int - } - - invalidCases := []testCases{ - { - Value: `{"abc":"`, - ErrCount: 1, - }, - { - Value: "abc: [", - ErrCount: 1, - }, - } - - for _, tc := range invalidCases { - _, errors := validateCloudFormationTemplate(tc.Value, "template") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q to trigger a validation error.", tc.Value) - } - } - - validCases := []testCases{ - { - Value: `{"abc":"1"}`, - ErrCount: 0, - }, - { - Value: `abc: 1`, - ErrCount: 0, - }, - } - - for _, tc := range validCases { - _, errors := validateCloudFormationTemplate(tc.Value, "template") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q not to trigger a validation error.", tc.Value) - } - } -} - -func TestValidateApiGatewayIntegrationType(t *testing.T) { - type testCases struct { - Value string - ErrCount int - } - - invalidCases := []testCases{ - { - Value: "incorrect", - ErrCount: 1, - }, - { - Value: "aws_proxy", - ErrCount: 1, - }, - } - - for _, tc := range invalidCases { - _, errors := validateApiGatewayIntegrationType(tc.Value, "types") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q to trigger a validation error.", tc.Value) - } - } - - validCases := []testCases{ - { - Value: "MOCK", - ErrCount: 0, - }, - { - Value: "AWS_PROXY", - ErrCount: 0, - }, - } - - for _, tc := range validCases { - _, errors := validateApiGatewayIntegrationType(tc.Value, "types") - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q not to trigger a validation error.", tc.Value) - } - } -} - -func TestValidateSQSQueueName(t *testing.T) { - validNames := []string{ - "valid-name", - "valid02-name", - "Valid-Name1", - "_", - "-", - strings.Repeat("W", 80), - } - for _, v := range validNames { - if errors := validateSQSQueueName(v, "name"); len(errors) > 0 { - t.Fatalf("%q should be a valid SQS queue Name", v) - } - } - - invalidNames := []string{ - "Here is a name with: colon", - "another * invalid name", - "also $ invalid", - "This . is also %% invalid@!)+(", - "*", - "", - " ", - ".", - strings.Repeat("W", 81), // length > 80 - } - for _, v := range invalidNames { - if errors := validateSQSQueueName(v, "name"); len(errors) == 0 { - t.Fatalf("%q should be an invalid SQS queue Name", v) - } - } -} - -func TestValidateSQSFifoQueueName(t *testing.T) { - validNames := []string{ - "valid-name.fifo", - "valid02-name.fifo", - "Valid-Name1.fifo", - "_.fifo", - "a.fifo", - "A.fifo", - "9.fifo", - "-.fifo", - fmt.Sprintf("%s.fifo", strings.Repeat("W", 75)), - } - for _, v := range validNames { - if errors := validateSQSFifoQueueName(v, "name"); len(errors) > 0 { - t.Fatalf("%q should be a valid SQS FIFO queue Name: %v", v, errors) - } - } - - invalidNames := []string{ - "Here is a name with: colon", - "another * invalid name", - "also $ invalid", - "This . is also %% invalid@!)+(", - ".fifo", - "*", - "", - " ", - ".", - strings.Repeat("W", 81), // length > 80 - } - for _, v := range invalidNames { - if errors := validateSQSFifoQueueName(v, "name"); len(errors) == 0 { - t.Fatalf("%q should be an invalid SQS FIFO queue Name: %v", v, errors) - } - } -} - -func TestValidateSNSSubscriptionProtocol(t *testing.T) { - validProtocols := []string{ - "lambda", - "sqs", - "sqs", - "application", - "http", - "https", - } - for _, v := range validProtocols { - if _, errors := validateSNSSubscriptionProtocol(v, "protocol"); len(errors) > 0 { - t.Fatalf("%q should be a valid SNS Subscription protocol: %v", v, errors) - } - } - - invalidProtocols := []string{ - "Email", - "email", - "Email-JSON", - "email-json", - "SMS", - "sms", - } - for _, v := range invalidProtocols { - if _, errors := validateSNSSubscriptionProtocol(v, "protocol"); len(errors) == 0 { - t.Fatalf("%q should be an invalid SNS Subscription protocol: %v", v, errors) - } - } -} - -func TestValidateSecurityRuleType(t *testing.T) { - validTypes := []string{ - "ingress", - "egress", - } - for _, v := range validTypes { - if _, errors := validateSecurityRuleType(v, "type"); len(errors) > 0 { - t.Fatalf("%q should be a valid Security Group Rule type: %v", v, errors) - } - } - - invalidTypes := []string{ - "foo", - "ingresss", - } - for _, v := range invalidTypes { - if _, errors := validateSecurityRuleType(v, "type"); len(errors) == 0 { - t.Fatalf("%q should be an invalid Security Group Rule type: %v", v, errors) - } - } -} - -func TestValidateOnceAWeekWindowFormat(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - // once a day window format - Value: "04:00-05:00", - ErrCount: 1, - }, - { - // invalid day of week - Value: "san:04:00-san:05:00", - ErrCount: 1, - }, - { - // invalid hour - Value: "sun:24:00-san:25:00", - ErrCount: 1, - }, - { - // invalid min - Value: "sun:04:00-sun:04:60", - ErrCount: 1, - }, - { - // valid format - Value: "sun:04:00-sun:05:00", - ErrCount: 0, - }, - { - // "Sun" can also be used - Value: "Sun:04:00-Sun:05:00", - ErrCount: 0, - }, - { - // valid format - Value: "", - ErrCount: 0, - }, - } - - for _, tc := range cases { - _, errors := validateOnceAWeekWindowFormat(tc.Value, "maintenance_window") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %d validation errors, But got %d errors for \"%s\"", tc.ErrCount, len(errors), tc.Value) - } - } -} - -func TestValidateOnceADayWindowFormat(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - // once a week window format - Value: "sun:04:00-sun:05:00", - ErrCount: 1, - }, - { - // invalid hour - Value: "24:00-25:00", - ErrCount: 1, - }, - { - // invalid min - Value: "04:00-04:60", - ErrCount: 1, - }, - { - // valid format - Value: "04:00-05:00", - ErrCount: 0, - }, - { - // valid format - Value: "", - ErrCount: 0, - }, - } - - for _, tc := range cases { - _, errors := validateOnceADayWindowFormat(tc.Value, "backup_window") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %d validation errors, But got %d errors for \"%s\"", tc.ErrCount, len(errors), tc.Value) - } - } -} - -func TestValidateRoute53RecordType(t *testing.T) { - validTypes := []string{ - "AAAA", - "SOA", - "A", - "TXT", - "CNAME", - "MX", - "NAPTR", - "PTR", - "SPF", - "SRV", - "NS", - } - - invalidTypes := []string{ - "a", - "alias", - "SpF", - "Txt", - "AaAA", - } - - for _, v := range validTypes { - _, errors := validateRoute53RecordType(v, "route53_record") - if len(errors) != 0 { - t.Fatalf("%q should be a valid Route53 record type: %v", v, errors) - } - } - - for _, v := range invalidTypes { - _, errors := validateRoute53RecordType(v, "route53_record") - if len(errors) == 0 { - t.Fatalf("%q should not be a valid Route53 record type", v) - } - } -} - -func TestValidateEcsPlacementConstraint(t *testing.T) { - cases := []struct { - constType string - constExpr string - Err bool - }{ - { - constType: "distinctInstance", - constExpr: "", - Err: false, - }, - { - constType: "memberOf", - constExpr: "", - Err: true, - }, - { - constType: "distinctInstance", - constExpr: "expression", - Err: false, - }, - { - constType: "memberOf", - constExpr: "expression", - Err: false, - }, - } - - for _, tc := range cases { - if err := validateAwsEcsPlacementConstraint(tc.constType, tc.constExpr); err != nil && !tc.Err { - t.Fatalf("Unexpected validation error for \"%s:%s\": %s", - tc.constType, tc.constExpr, err) - } - - } -} - -func TestValidateEcsPlacementStrategy(t *testing.T) { - cases := []struct { - stratType string - stratField string - Err bool - }{ - { - stratType: "random", - stratField: "", - Err: false, - }, - { - stratType: "spread", - stratField: "instanceID", - Err: false, - }, - { - stratType: "binpack", - stratField: "cpu", - Err: false, - }, - { - stratType: "binpack", - stratField: "memory", - Err: false, - }, - { - stratType: "binpack", - stratField: "disk", - Err: true, - }, - { - stratType: "fakeType", - stratField: "", - Err: true, - }, - } - - for _, tc := range cases { - if err := validateAwsEcsPlacementStrategy(tc.stratType, tc.stratField); err != nil && !tc.Err { - t.Fatalf("Unexpected validation error for \"%s:%s\": %s", - tc.stratType, tc.stratField, err) - } - } -} - -func TestValidateStepFunctionActivityName(t *testing.T) { - validTypes := []string{ - "foo", - "FooBar123", - } - - invalidTypes := []string{ - strings.Repeat("W", 81), // length > 80 - } - - for _, v := range validTypes { - _, errors := validateSfnActivityName(v, "name") - if len(errors) != 0 { - t.Fatalf("%q should be a valid Step Function Activity name: %v", v, errors) - } - } - - for _, v := range invalidTypes { - _, errors := validateSfnActivityName(v, "name") - if len(errors) == 0 { - t.Fatalf("%q should not be a valid Step Function Activity name", v) - } - } -} - -func TestValidateStepFunctionStateMachineDefinition(t *testing.T) { - validDefinitions := []string{ - "foobar", - strings.Repeat("W", 1048576), - } - - invalidDefinitions := []string{ - strings.Repeat("W", 1048577), // length > 1048576 - } - - for _, v := range validDefinitions { - _, errors := validateSfnStateMachineDefinition(v, "definition") - if len(errors) != 0 { - t.Fatalf("%q should be a valid Step Function State Machine definition: %v", v, errors) - } - } - - for _, v := range invalidDefinitions { - _, errors := validateSfnStateMachineDefinition(v, "definition") - if len(errors) == 0 { - t.Fatalf("%q should not be a valid Step Function State Machine definition", v) - } - } -} - -func TestValidateStepFunctionStateMachineName(t *testing.T) { - validTypes := []string{ - "foo", - "BAR", - "FooBar123", - "FooBar123Baz-_", - } - - invalidTypes := []string{ - "foo bar", - "foo", - "foo{bar}", - "foo[bar]", - "foo*bar", - "foo?bar", - "foo#bar", - "foo%bar", - "foo\bar", - "foo^bar", - "foo|bar", - "foo~bar", - "foo$bar", - "foo&bar", - "foo,bar", - "foo:bar", - "foo;bar", - "foo/bar", - strings.Repeat("W", 81), // length > 80 - } - - for _, v := range validTypes { - _, errors := validateSfnStateMachineName(v, "name") - if len(errors) != 0 { - t.Fatalf("%q should be a valid Step Function State Machine name: %v", v, errors) - } - } - - for _, v := range invalidTypes { - _, errors := validateSfnStateMachineName(v, "name") - if len(errors) == 0 { - t.Fatalf("%q should not be a valid Step Function State Machine name", v) - } - } -} - -func TestValidateEmrEbsVolumeType(t *testing.T) { - cases := []struct { - VolType string - ErrCount int - }{ - { - VolType: "gp2", - ErrCount: 0, - }, - { - VolType: "io1", - ErrCount: 0, - }, - { - VolType: "standard", - ErrCount: 0, - }, - { - VolType: "stand", - ErrCount: 1, - }, - { - VolType: "io", - ErrCount: 1, - }, - { - VolType: "gp1", - ErrCount: 1, - }, - { - VolType: "fast-disk", - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateAwsEmrEbsVolumeType(tc.VolType, "volume") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %d errors, got %d: %s", tc.ErrCount, len(errors), errors) - } - } -} - -func TestValidateAppautoscalingScalableDimension(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "ecs:service:DesiredCount", - ErrCount: 0, - }, - { - Value: "ec2:spot-fleet-request:TargetCapacity", - ErrCount: 0, - }, - { - Value: "ec2:service:DesiredCount", - ErrCount: 1, - }, - { - Value: "ecs:spot-fleet-request:TargetCapacity", - ErrCount: 1, - }, - { - Value: "", - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateAppautoscalingScalableDimension(tc.Value, "scalable_dimension") - if len(errors) != tc.ErrCount { - t.Fatalf("Scalable Dimension validation failed for value %q: %q", tc.Value, errors) - } - } -} - -func TestValidateAppautoscalingServiceNamespace(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "ecs", - ErrCount: 0, - }, - { - Value: "ec2", - ErrCount: 0, - }, - { - Value: "autoscaling", - ErrCount: 1, - }, - { - Value: "s3", - ErrCount: 1, - }, - { - Value: "es", - ErrCount: 1, - }, - { - Value: "", - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateAppautoscalingServiceNamespace(tc.Value, "service_namespace") - if len(errors) != tc.ErrCount { - t.Fatalf("Service Namespace validation failed for value %q: %q", tc.Value, errors) - } - } -} - -func TestValidateDmsEndpointId(t *testing.T) { - validIds := []string{ - "tf-test-endpoint-1", - "tfTestEndpoint", - } - - for _, s := range validIds { - _, errors := validateDmsEndpointId(s, "endpoint_id") - if len(errors) > 0 { - t.Fatalf("%q should be a valid endpoint id: %v", s, errors) - } - } - - invalidIds := []string{ - "tf_test_endpoint_1", - "tf.test.endpoint.1", - "tf test endpoint 1", - "tf-test-endpoint-1!", - "tf-test-endpoint-1-", - "tf-test-endpoint--1", - "tf-test-endpoint-1tf-test-endpoint-1tf-test-endpoint-1tf-test-endpoint-1tf-test-endpoint-1tf-test-endpoint-1tf-test-endpoint-1tf-test-endpoint-1tf-test-endpoint-1tf-test-endpoint-1tf-test-endpoint-1tf-test-endpoint-1tf-test-endpoint-1tf-test-endpoint-1tf-test-endpoint-1", - } - - for _, s := range invalidIds { - _, errors := validateDmsEndpointId(s, "endpoint_id") - if len(errors) == 0 { - t.Fatalf("%q should not be a valid endpoint id: %v", s, errors) - } - } -} - -func TestValidateDmsCertificateId(t *testing.T) { - validIds := []string{ - "tf-test-certificate-1", - "tfTestEndpoint", - } - - for _, s := range validIds { - _, errors := validateDmsCertificateId(s, "certificate_id") - if len(errors) > 0 { - t.Fatalf("%q should be a valid certificate id: %v", s, errors) - } - } - - invalidIds := []string{ - "tf_test_certificate_1", - "tf.test.certificate.1", - "tf test certificate 1", - "tf-test-certificate-1!", - "tf-test-certificate-1-", - "tf-test-certificate--1", - "tf-test-certificate-1tf-test-certificate-1tf-test-certificate-1tf-test-certificate-1tf-test-certificate-1tf-test-certificate-1tf-test-certificate-1tf-test-certificate-1tf-test-certificate-1tf-test-certificate-1tf-test-certificate-1tf-test-certificate-1tf-test-certificate-1tf-test-certificate-1tf-test-certificate-1", - } - - for _, s := range invalidIds { - _, errors := validateDmsEndpointId(s, "certificate_id") - if len(errors) == 0 { - t.Fatalf("%q should not be a valid certificate id: %v", s, errors) - } - } -} - -func TestValidateDmsReplicationInstanceId(t *testing.T) { - validIds := []string{ - "tf-test-replication-instance-1", - "tfTestReplicaitonInstance", - } - - for _, s := range validIds { - _, errors := validateDmsReplicationInstanceId(s, "replicaiton_instance_id") - if len(errors) > 0 { - t.Fatalf("%q should be a valid replication instance id: %v", s, errors) - } - } - - invalidIds := []string{ - "tf_test_replication-instance_1", - "tf.test.replication.instance.1", - "tf test replication instance 1", - "tf-test-replication-instance-1!", - "tf-test-replication-instance-1-", - "tf-test-replication-instance--1", - "tf-test-replication-instance-1tf-test-replication-instance-1tf-test-replication-instance-1", - } - - for _, s := range invalidIds { - _, errors := validateDmsReplicationInstanceId(s, "replication_instance_id") - if len(errors) == 0 { - t.Fatalf("%q should not be a valid replication instance id: %v", s, errors) - } - } -} - -func TestValidateDmsReplicationSubnetGroupId(t *testing.T) { - validIds := []string{ - "tf-test-replication-subnet-group-1", - "tf_test_replication_subnet_group_1", - "tf.test.replication.subnet.group.1", - "tf test replication subnet group 1", - "tfTestReplicationSubnetGroup", - } - - for _, s := range validIds { - _, errors := validateDmsReplicationSubnetGroupId(s, "replication_subnet_group_id") - if len(errors) > 0 { - t.Fatalf("%q should be a valid replication subnet group id: %v", s, errors) - } - } - - invalidIds := []string{ - "default", - "tf-test-replication-subnet-group-1!", - "tf-test-replication-subnet-group-1tf-test-replication-subnet-group-1tf-test-replication-subnet-group-1tf-test-replication-subnet-group-1tf-test-replication-subnet-group-1tf-test-replication-subnet-group-1tf-test-replication-subnet-group-1tf-test-replication-subnet-group-1", - } - - for _, s := range invalidIds { - _, errors := validateDmsReplicationSubnetGroupId(s, "replication_subnet_group_id") - if len(errors) == 0 { - t.Fatalf("%q should not be a valid replication subnet group id: %v", s, errors) - } - } -} - -func TestValidateDmsReplicationTaskId(t *testing.T) { - validIds := []string{ - "tf-test-replication-task-1", - "tfTestReplicationTask", - } - - for _, s := range validIds { - _, errors := validateDmsReplicationTaskId(s, "replication_task_id") - if len(errors) > 0 { - t.Fatalf("%q should be a valid replication task id: %v", s, errors) - } - } - - invalidIds := []string{ - "tf_test_replication_task_1", - "tf.test.replication.task.1", - "tf test replication task 1", - "tf-test-replication-task-1!", - "tf-test-replication-task-1-", - "tf-test-replication-task--1", - "tf-test-replication-task-1tf-test-replication-task-1tf-test-replication-task-1tf-test-replication-task-1tf-test-replication-task-1tf-test-replication-task-1tf-test-replication-task-1tf-test-replication-task-1tf-test-replication-task-1tf-test-replication-task-1", - } - - for _, s := range invalidIds { - _, errors := validateDmsReplicationTaskId(s, "replication_task_id") - if len(errors) == 0 { - t.Fatalf("%q should not be a valid replication task id: %v", s, errors) - } - } -} - -func TestValidateAccountAlias(t *testing.T) { - validAliases := []string{ - "tf-alias", - "0tf-alias1", - } - - for _, s := range validAliases { - _, errors := validateAccountAlias(s, "account_alias") - if len(errors) > 0 { - t.Fatalf("%q should be a valid account alias: %v", s, errors) - } - } - - invalidAliases := []string{ - "tf", - "-tf", - "tf-", - "TF-Alias", - "tf-alias-tf-alias-tf-alias-tf-alias-tf-alias-tf-alias-tf-alias-tf-alias", - } - - for _, s := range invalidAliases { - _, errors := validateAccountAlias(s, "account_alias") - if len(errors) == 0 { - t.Fatalf("%q should not be a valid account alias: %v", s, errors) - } - } -} - -func TestValidateIamRoleProfileName(t *testing.T) { - validNames := []string{ - "tf-test-role-profile-1", - } - - for _, s := range validNames { - _, errors := validateIamRolePolicyName(s, "name") - if len(errors) > 0 { - t.Fatalf("%q should be a valid IAM role policy name: %v", s, errors) - } - } - - invalidNames := []string{ - "invalid#name", - "this-is-a-very-long-role-policy-name-this-is-a-very-long-role-policy-name-this-is-a-very-long-role-policy-name-this-is-a-very-long", - } - - for _, s := range invalidNames { - _, errors := validateIamRolePolicyName(s, "name") - if len(errors) == 0 { - t.Fatalf("%q should not be a valid IAM role policy name: %v", s, errors) - } - } -} - -func TestValidateIamRoleProfileNamePrefix(t *testing.T) { - validNamePrefixes := []string{ - "tf-test-role-profile-", - } - - for _, s := range validNamePrefixes { - _, errors := validateIamRolePolicyNamePrefix(s, "name_prefix") - if len(errors) > 0 { - t.Fatalf("%q should be a valid IAM role policy name prefix: %v", s, errors) - } - } - - invalidNamePrefixes := []string{ - "invalid#name_prefix", - "this-is-a-very-long-role-policy-name-prefix-this-is-a-very-long-role-policy-name-prefix-this-is-a-very-", - } - - for _, s := range invalidNamePrefixes { - _, errors := validateIamRolePolicyNamePrefix(s, "name_prefix") - if len(errors) == 0 { - t.Fatalf("%q should not be a valid IAM role policy name prefix: %v", s, errors) - } - } -} - -func TestValidateApiGatewayUsagePlanQuotaSettingsPeriod(t *testing.T) { - validEntries := []string{ - "DAY", - "WEEK", - "MONTH", - } - - invalidEntries := []string{ - "fooBAR", - "foobar45Baz", - "foobar45Baz@!", - } - - for _, v := range validEntries { - _, errors := validateApiGatewayUsagePlanQuotaSettingsPeriod(v, "name") - if len(errors) != 0 { - t.Fatalf("%q should be a valid API Gateway Quota Settings Period: %v", v, errors) - } - } - - for _, v := range invalidEntries { - _, errors := validateApiGatewayUsagePlanQuotaSettingsPeriod(v, "name") - if len(errors) == 0 { - t.Fatalf("%q should not be a API Gateway Quota Settings Period", v) - } - } -} - -func TestValidateApiGatewayUsagePlanQuotaSettings(t *testing.T) { - cases := []struct { - Offset int - Period string - ErrCount int - }{ - { - Offset: 0, - Period: "DAY", - ErrCount: 0, - }, - { - Offset: -1, - Period: "DAY", - ErrCount: 1, - }, - { - Offset: 1, - Period: "DAY", - ErrCount: 1, - }, - { - Offset: 0, - Period: "WEEK", - ErrCount: 0, - }, - { - Offset: 6, - Period: "WEEK", - ErrCount: 0, - }, - { - Offset: -1, - Period: "WEEK", - ErrCount: 1, - }, - { - Offset: 7, - Period: "WEEK", - ErrCount: 1, - }, - { - Offset: 0, - Period: "MONTH", - ErrCount: 0, - }, - { - Offset: 27, - Period: "MONTH", - ErrCount: 0, - }, - { - Offset: -1, - Period: "MONTH", - ErrCount: 1, - }, - { - Offset: 28, - Period: "MONTH", - ErrCount: 1, - }, - } - - for _, tc := range cases { - m := make(map[string]interface{}) - m["offset"] = tc.Offset - m["period"] = tc.Period - - errors := validateApiGatewayUsagePlanQuotaSettings(m) - if len(errors) != tc.ErrCount { - t.Fatalf("API Gateway Usage Plan Quota Settings validation failed: %v", errors) - } - } -} - -func TestValidateElbName(t *testing.T) { - validNames := []string{ - "tf-test-elb", - } - - for _, s := range validNames { - _, errors := validateElbName(s, "name") - if len(errors) > 0 { - t.Fatalf("%q should be a valid ELB name: %v", s, errors) - } - } - - invalidNames := []string{ - "tf.test.elb.1", - "tf-test-elb-tf-test-elb-tf-test-elb", - "-tf-test-elb", - "tf-test-elb-", - } - - for _, s := range invalidNames { - _, errors := validateElbName(s, "name") - if len(errors) == 0 { - t.Fatalf("%q should not be a valid ELB name: %v", s, errors) - } - } -} - -func TestValidateElbNamePrefix(t *testing.T) { - validNamePrefixes := []string{ - "test-", - } - - for _, s := range validNamePrefixes { - _, errors := validateElbNamePrefix(s, "name_prefix") - if len(errors) > 0 { - t.Fatalf("%q should be a valid ELB name prefix: %v", s, errors) - } - } - - invalidNamePrefixes := []string{ - "tf.test.elb.", - "tf-test", - "-test", - } - - for _, s := range invalidNamePrefixes { - _, errors := validateElbNamePrefix(s, "name_prefix") - if len(errors) == 0 { - t.Fatalf("%q should not be a valid ELB name prefix: %v", s, errors) - } - } -} - -func TestValidateDbSubnetGroupName(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "tEsting", - ErrCount: 1, - }, - { - Value: "testing?", - ErrCount: 1, - }, - { - Value: "default", - ErrCount: 1, - }, - { - Value: randomString(300), - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateDbSubnetGroupName(tc.Value, "aws_db_subnet_group") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the DB Subnet Group name to trigger a validation error") - } - } -} - -func TestValidateDbSubnetGroupNamePrefix(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "tEsting", - ErrCount: 1, - }, - { - Value: "testing?", - ErrCount: 1, - }, - { - Value: randomString(230), - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateDbSubnetGroupNamePrefix(tc.Value, "aws_db_subnet_group") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the DB Subnet Group name prefix to trigger a validation error") - } - } -} - -func TestValidateDbOptionGroupName(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "testing123!", - ErrCount: 1, - }, - { - Value: "1testing123", - ErrCount: 1, - }, - { - Value: "testing--123", - ErrCount: 1, - }, - { - Value: "testing123-", - ErrCount: 1, - }, - { - Value: randomString(256), - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateDbOptionGroupName(tc.Value, "aws_db_option_group_name") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the DB Option Group Name to trigger a validation error") - } - } -} - -func TestValidateDbOptionGroupNamePrefix(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "testing123!", - ErrCount: 1, - }, - { - Value: "1testing123", - ErrCount: 1, - }, - { - Value: "testing--123", - ErrCount: 1, - }, - { - Value: randomString(230), - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateDbOptionGroupNamePrefix(tc.Value, "aws_db_option_group_name") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the DB Option Group name prefix to trigger a validation error") - } - } -} - -func TestValidateOpenIdURL(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "http://wrong.scheme.com", - ErrCount: 1, - }, - { - Value: "ftp://wrong.scheme.co.uk", - ErrCount: 1, - }, - { - Value: "%@invalidUrl", - ErrCount: 1, - }, - { - Value: "https://example.com/?query=param", - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateOpenIdURL(tc.Value, "url") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %d of OpenID URL validation errors, got %d", tc.ErrCount, len(errors)) - } - } -} - -func TestValidateAwsKmsName(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "alias/aws/s3", - ErrCount: 0, - }, - { - Value: "alias/hashicorp", - ErrCount: 0, - }, - { - Value: "hashicorp", - ErrCount: 1, - }, - { - Value: "hashicorp/terraform", - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateAwsKmsName(tc.Value, "name") - if len(errors) != tc.ErrCount { - t.Fatalf("AWS KMS Alias Name validation failed: %v", errors) - } - } -} - -func TestValidateCognitoIdentityPoolName(t *testing.T) { - validValues := []string{ - "123", - "1 2 3", - "foo", - "foo bar", - "foo_bar", - "1foo 2bar 3", - } - - for _, s := range validValues { - _, errors := validateCognitoIdentityPoolName(s, "identity_pool_name") - if len(errors) > 0 { - t.Fatalf("%q should be a valid Cognito Identity Pool Name: %v", s, errors) - } - } - - invalidValues := []string{ - "1-2-3", - "foo!", - "foo-bar", - "foo-bar", - "foo1-bar2", - } - - for _, s := range invalidValues { - _, errors := validateCognitoIdentityPoolName(s, "identity_pool_name") - if len(errors) == 0 { - t.Fatalf("%q should not be a valid Cognito Identity Pool Name: %v", s, errors) - } - } -} - -func TestValidateCognitoProviderDeveloperName(t *testing.T) { - validValues := []string{ - "1", - "foo", - "1.2", - "foo1-bar2-baz3", - "foo_bar", - } - - for _, s := range validValues { - _, errors := validateCognitoProviderDeveloperName(s, "developer_provider_name") - if len(errors) > 0 { - t.Fatalf("%q should be a valid Cognito Provider Developer Name: %v", s, errors) - } - } - - invalidValues := []string{ - "foo!", - "foo:bar", - "foo/bar", - "foo;bar", - } - - for _, s := range invalidValues { - _, errors := validateCognitoProviderDeveloperName(s, "developer_provider_name") - if len(errors) == 0 { - t.Fatalf("%q should not be a valid Cognito Provider Developer Name: %v", s, errors) - } - } -} - -func TestValidateCognitoSupportedLoginProviders(t *testing.T) { - validValues := []string{ - "foo", - "7346241598935552", - "123456789012.apps.googleusercontent.com", - "foo_bar", - "foo;bar", - "foo/bar", - "foo-bar", - "xvz1evFS4wEEPTGEFPHBog;kAcSOqF21Fu85e7zjz7ZN2U4ZRhfV3WpwPAoE3Z7kBw", - strings.Repeat("W", 128), - } - - for _, s := range validValues { - _, errors := validateCognitoSupportedLoginProviders(s, "supported_login_providers") - if len(errors) > 0 { - t.Fatalf("%q should be a valid Cognito Supported Login Providers: %v", s, errors) - } - } - - invalidValues := []string{ - "", - strings.Repeat("W", 129), // > 128 - "foo:bar_baz", - "foobar,foobaz", - "foobar=foobaz", - } - - for _, s := range invalidValues { - _, errors := validateCognitoSupportedLoginProviders(s, "supported_login_providers") - if len(errors) == 0 { - t.Fatalf("%q should not be a valid Cognito Supported Login Providers: %v", s, errors) - } - } -} - -func TestValidateCognitoIdentityProvidersClientId(t *testing.T) { - validValues := []string{ - "7lhlkkfbfb4q5kpp90urffao", - "12345678", - "foo_123", - strings.Repeat("W", 128), - } - - for _, s := range validValues { - _, errors := validateCognitoIdentityProvidersClientId(s, "client_id") - if len(errors) > 0 { - t.Fatalf("%q should be a valid Cognito Identity Provider Client ID: %v", s, errors) - } - } - - invalidValues := []string{ - "", - strings.Repeat("W", 129), // > 128 - "foo-bar", - "foo:bar", - "foo;bar", - } - - for _, s := range invalidValues { - _, errors := validateCognitoIdentityProvidersClientId(s, "client_id") - if len(errors) == 0 { - t.Fatalf("%q should not be a valid Cognito Identity Provider Client ID: %v", s, errors) - } - } -} - -func TestValidateCognitoIdentityProvidersProviderName(t *testing.T) { - validValues := []string{ - "foo", - "7346241598935552", - "foo_bar", - "foo:bar", - "foo/bar", - "foo-bar", - "cognito-idp.us-east-1.amazonaws.com/us-east-1_Zr231apJu", - strings.Repeat("W", 128), - } - - for _, s := range validValues { - _, errors := validateCognitoIdentityProvidersProviderName(s, "provider_name") - if len(errors) > 0 { - t.Fatalf("%q should be a valid Cognito Identity Provider Name: %v", s, errors) - } - } - - invalidValues := []string{ - "", - strings.Repeat("W", 129), // > 128 - "foo;bar_baz", - "foobar,foobaz", - "foobar=foobaz", - } - - for _, s := range invalidValues { - _, errors := validateCognitoIdentityProvidersProviderName(s, "provider_name") - if len(errors) == 0 { - t.Fatalf("%q should not be a valid Cognito Identity Provider Name: %v", s, errors) - } - } -} - -func TestValidateWafMetricName(t *testing.T) { - validNames := []string{ - "testrule", - "testRule", - "testRule123", - } - for _, v := range validNames { - _, errors := validateWafMetricName(v, "name") - if len(errors) != 0 { - t.Fatalf("%q should be a valid WAF metric name: %q", v, errors) - } - } - - invalidNames := []string{ - "!", - "/", - " ", - ":", - ";", - "white space", - "/slash-at-the-beginning", - "slash-at-the-end/", - } - for _, v := range invalidNames { - _, errors := validateWafMetricName(v, "name") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid WAF metric name", v) - } - } -} - -func TestValidateIamRoleDescription(t *testing.T) { - validNames := []string{ - "This 1s a D3scr!pti0n with weird content: @ #^ù£ê®æ ø]ŒîÏî~ÈÙ£÷=,ë", - strings.Repeat("W", 1000), - } - for _, v := range validNames { - _, errors := validateIamRoleDescription(v, "description") - if len(errors) != 0 { - t.Fatalf("%q should be a valid IAM Role Description: %q", v, errors) - } - } - - invalidNames := []string{ - strings.Repeat("W", 1001), // > 1000 - } - for _, v := range invalidNames { - _, errors := validateIamRoleDescription(v, "description") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid IAM Role Description", v) - } - } -} - -func TestValidateSsmParameterType(t *testing.T) { - validTypes := []string{ - "String", - "StringList", - "SecureString", - } - for _, v := range validTypes { - _, errors := validateSsmParameterType(v, "name") - if len(errors) != 0 { - t.Fatalf("%q should be a valid SSM parameter type: %q", v, errors) - } - } - - invalidTypes := []string{ - "foo", - "string", - "Securestring", - } - for _, v := range invalidTypes { - _, errors := validateSsmParameterType(v, "name") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid SSM parameter type", v) - } - } -} diff --git a/builtin/providers/aws/waf_token_handlers.go b/builtin/providers/aws/waf_token_handlers.go deleted file mode 100644 index ac99f0950..000000000 --- a/builtin/providers/aws/waf_token_handlers.go +++ /dev/null @@ -1,49 +0,0 @@ -package aws - -import ( - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/waf" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/resource" -) - -type WafRetryer struct { - Connection *waf.WAF - Region string -} - -type withTokenFunc func(token *string) (interface{}, error) - -func (t *WafRetryer) RetryWithToken(f withTokenFunc) (interface{}, error) { - awsMutexKV.Lock(t.Region) - defer awsMutexKV.Unlock(t.Region) - - var out interface{} - err := resource.Retry(15*time.Minute, func() *resource.RetryError { - var err error - var tokenOut *waf.GetChangeTokenOutput - - tokenOut, err = t.Connection.GetChangeToken(&waf.GetChangeTokenInput{}) - if err != nil { - return resource.NonRetryableError(errwrap.Wrapf("Failed to acquire change token: {{err}}", err)) - } - - out, err = f(tokenOut.ChangeToken) - if err != nil { - awsErr, ok := err.(awserr.Error) - if ok && awsErr.Code() == "WAFStaleDataException" { - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - } - return nil - }) - - return out, err -} - -func newWafRetryer(conn *waf.WAF, region string) *WafRetryer { - return &WafRetryer{Connection: conn, Region: region} -} diff --git a/builtin/providers/aws/wafregionl_token_handlers.go b/builtin/providers/aws/wafregionl_token_handlers.go deleted file mode 100644 index da3d8b58f..000000000 --- a/builtin/providers/aws/wafregionl_token_handlers.go +++ /dev/null @@ -1,50 +0,0 @@ -package aws - -import ( - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/waf" - "github.com/aws/aws-sdk-go/service/wafregional" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/resource" -) - -type WafRegionalRetryer struct { - Connection *wafregional.WAFRegional - Region string -} - -type withRegionalTokenFunc func(token *string) (interface{}, error) - -func (t *WafRegionalRetryer) RetryWithToken(f withRegionalTokenFunc) (interface{}, error) { - awsMutexKV.Lock(t.Region) - defer awsMutexKV.Unlock(t.Region) - - var out interface{} - err := resource.Retry(15*time.Minute, func() *resource.RetryError { - var err error - var tokenOut *waf.GetChangeTokenOutput - - tokenOut, err = t.Connection.GetChangeToken(&waf.GetChangeTokenInput{}) - if err != nil { - return resource.NonRetryableError(errwrap.Wrapf("Failed to acquire change token: {{err}}", err)) - } - - out, err = f(tokenOut.ChangeToken) - if err != nil { - awsErr, ok := err.(awserr.Error) - if ok && awsErr.Code() == "WAFStaleDataException" { - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - } - return nil - }) - - return out, err -} - -func newWafRegionalRetryer(conn *wafregional.WAFRegional, region string) *WafRegionalRetryer { - return &WafRegionalRetryer{Connection: conn, Region: region} -} diff --git a/builtin/providers/aws/website_endpoint_url_test.go b/builtin/providers/aws/website_endpoint_url_test.go deleted file mode 100644 index 67f6b3542..000000000 --- a/builtin/providers/aws/website_endpoint_url_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package aws - -import "testing" - -// http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html -var websiteEndpoints = []struct { - in string - out string -}{ - {"", "bucket-name.s3-website-us-east-1.amazonaws.com"}, - {"us-west-2", "bucket-name.s3-website-us-west-2.amazonaws.com"}, - {"us-west-1", "bucket-name.s3-website-us-west-1.amazonaws.com"}, - {"eu-west-1", "bucket-name.s3-website-eu-west-1.amazonaws.com"}, - {"eu-central-1", "bucket-name.s3-website.eu-central-1.amazonaws.com"}, - {"ap-south-1", "bucket-name.s3-website.ap-south-1.amazonaws.com"}, - {"ap-southeast-1", "bucket-name.s3-website-ap-southeast-1.amazonaws.com"}, - {"ap-northeast-1", "bucket-name.s3-website-ap-northeast-1.amazonaws.com"}, - {"ap-southeast-2", "bucket-name.s3-website-ap-southeast-2.amazonaws.com"}, - {"ap-northeast-2", "bucket-name.s3-website.ap-northeast-2.amazonaws.com"}, - {"sa-east-1", "bucket-name.s3-website-sa-east-1.amazonaws.com"}, -} - -func TestWebsiteEndpointUrl(t *testing.T) { - for _, tt := range websiteEndpoints { - s := WebsiteEndpoint("bucket-name", tt.in) - if s.Endpoint != tt.out { - t.Errorf("WebsiteEndpointUrl(\"bucket-name\", %q) => %q, want %q", tt.in, s.Endpoint, tt.out) - } - } -} diff --git a/builtin/providers/azure/config.go b/builtin/providers/azure/config.go deleted file mode 100644 index 0dd7b142e..000000000 --- a/builtin/providers/azure/config.go +++ /dev/null @@ -1,146 +0,0 @@ -package azure - -import ( - "fmt" - "sync" - - "github.com/Azure/azure-sdk-for-go/management" - "github.com/Azure/azure-sdk-for-go/management/affinitygroup" - "github.com/Azure/azure-sdk-for-go/management/hostedservice" - "github.com/Azure/azure-sdk-for-go/management/networksecuritygroup" - "github.com/Azure/azure-sdk-for-go/management/osimage" - "github.com/Azure/azure-sdk-for-go/management/sql" - "github.com/Azure/azure-sdk-for-go/management/storageservice" - "github.com/Azure/azure-sdk-for-go/management/virtualmachine" - "github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk" - "github.com/Azure/azure-sdk-for-go/management/virtualmachineimage" - "github.com/Azure/azure-sdk-for-go/management/virtualnetwork" - "github.com/Azure/azure-sdk-for-go/storage" -) - -// Config is the configuration structure used to instantiate a -// new Azure management client. -type Config struct { - Settings []byte - SubscriptionID string - Certificate []byte - ManagementURL string -} - -// Client contains all the handles required for managing Azure services. -type Client struct { - mgmtClient management.Client - - affinityGroupClient affinitygroup.AffinityGroupClient - - hostedServiceClient hostedservice.HostedServiceClient - - osImageClient osimage.OSImageClient - - sqlClient sql.SQLDatabaseClient - - storageServiceClient storageservice.StorageServiceClient - - vmClient virtualmachine.VirtualMachineClient - - vmDiskClient virtualmachinedisk.DiskClient - - vmImageClient virtualmachineimage.Client - - // unfortunately; because of how Azure's network API works; doing networking operations - // concurrently is very hazardous, and we need a mutex to guard the VirtualNetworkClient. - vnetClient virtualnetwork.VirtualNetworkClient - vnetMutex *sync.Mutex - - // same as the above for security group rule operations: - secGroupClient networksecuritygroup.SecurityGroupClient - secGroupMutex *sync.Mutex -} - -// getStorageClientForStorageService is helper method which returns the -// storage.Client associated to the given storage service name. -func (c Client) getStorageClientForStorageService(serviceName string) (storage.Client, error) { - var storageClient storage.Client - - keys, err := c.storageServiceClient.GetStorageServiceKeys(serviceName) - if err != nil { - return storageClient, fmt.Errorf("Failed getting Storage Service keys for %s: %s", serviceName, err) - } - - storageClient, err = storage.NewBasicClient(serviceName, keys.PrimaryKey) - if err != nil { - return storageClient, fmt.Errorf("Failed creating Storage Service client for %s: %s", serviceName, err) - } - - return storageClient, err -} - -// getStorageServiceBlobClient is a helper method which returns the -// storage.BlobStorageClient associated to the given storage service name. -func (c Client) getStorageServiceBlobClient(serviceName string) (storage.BlobStorageClient, error) { - storageClient, err := c.getStorageClientForStorageService(serviceName) - if err != nil { - return storage.BlobStorageClient{}, err - } - - return storageClient.GetBlobService(), nil -} - -// getStorageServiceQueueClient is a helper method which returns the -// storage.QueueServiceClient associated to the given storage service name. -func (c Client) getStorageServiceQueueClient(serviceName string) (storage.QueueServiceClient, error) { - storageClient, err := c.getStorageClientForStorageService(serviceName) - if err != nil { - return storage.QueueServiceClient{}, err - } - - return storageClient.GetQueueService(), err -} - -func (c *Config) NewClientFromSettingsData() (*Client, error) { - mc, err := management.ClientFromPublishSettingsData(c.Settings, c.SubscriptionID) - if err != nil { - return nil, err - } - - return &Client{ - mgmtClient: mc, - affinityGroupClient: affinitygroup.NewClient(mc), - hostedServiceClient: hostedservice.NewClient(mc), - secGroupClient: networksecuritygroup.NewClient(mc), - secGroupMutex: &sync.Mutex{}, - osImageClient: osimage.NewClient(mc), - sqlClient: sql.NewClient(mc), - storageServiceClient: storageservice.NewClient(mc), - vmClient: virtualmachine.NewClient(mc), - vmDiskClient: virtualmachinedisk.NewClient(mc), - vmImageClient: virtualmachineimage.NewClient(mc), - vnetClient: virtualnetwork.NewClient(mc), - vnetMutex: &sync.Mutex{}, - }, nil -} - -// NewClient returns a new Azure management client created -// using a subscription ID and certificate. -func (c *Config) NewClient() (*Client, error) { - mc, err := management.NewClient(c.SubscriptionID, c.Certificate) - if err != nil { - return nil, nil - } - - return &Client{ - mgmtClient: mc, - affinityGroupClient: affinitygroup.NewClient(mc), - hostedServiceClient: hostedservice.NewClient(mc), - secGroupClient: networksecuritygroup.NewClient(mc), - secGroupMutex: &sync.Mutex{}, - osImageClient: osimage.NewClient(mc), - sqlClient: sql.NewClient(mc), - storageServiceClient: storageservice.NewClient(mc), - vmClient: virtualmachine.NewClient(mc), - vmDiskClient: virtualmachinedisk.NewClient(mc), - vmImageClient: virtualmachineimage.NewClient(mc), - vnetClient: virtualnetwork.NewClient(mc), - vnetMutex: &sync.Mutex{}, - }, nil -} diff --git a/builtin/providers/azure/constants.go b/builtin/providers/azure/constants.go deleted file mode 100644 index 55264f969..000000000 --- a/builtin/providers/azure/constants.go +++ /dev/null @@ -1,48 +0,0 @@ -package azure - -const ( - // terraformAzureLabel is used as the label for the hosted service created - // by Terraform on Azure. - terraformAzureLabel = "terraform-on-azure" - - // terraformAzureDescription is the description used for the hosted service - // created by Terraform on Azure. - terraformAzureDescription = "Hosted service automatically created by terraform." -) - -// parameterDescriptions holds a list of descriptions for all the available -// parameters of an Azure configuration. -var parameterDescriptions = map[string]string{ - // provider descriptions: - "management_url": "The URL of the management API all requests should be sent to.\n" + - "Defaults to 'https://management.core.windows.net/', which is the default Azure API URL.\n" + - "This should be filled in only if you have your own datacenter with its own hosted management API.", - "management_certificate": "The certificate for connecting to the management API specified with 'management_url'", - "subscription_id": "The subscription ID to be used when connecting to the management API.", - "publish_settings_file": "The publish settings file, either created by you or downloaded from 'https://manage.windowsazure.com/publishsettings'", - // general resource descriptions: - "name": "Name of the resource to be created as it will appear in the Azure dashboard.", - "service_name": "Name of the hosted service within Azure. Will have a DNS entry as dns-name.cloudapp.net", - "location": "The Azure location where the resource will be located.\n" + - "A list of Azure locations can be found here: http://azure.microsoft.com/en-us/regions/", - "reverse_dns_fqdn": "The reverse of the fully qualified domain name. Optional.", - "label": "Label by which the resource will be identified by. Optional.", - "description": "Brief description of the resource. Optional.", - // hosted service descriptions: - "ephemeral_contents": "Sets whether the associated contents of this resource should also be\n" + - "deleted upon this resource's deletion. Default is false.", - // instance descriptions: - "image": "The image the new VM will be booted from. Mandatory.", - "size": "The size in GB of the disk to be created. Mandatory.", - "os_type": "The OS type of the VM. Either Windows or Linux. Mandatory.", - "storage_account": "The storage account (pool) name. Mandatory.", - "storage_container": "The storage container name from the storage pool given with 'storage_pool'.", - "user_name": "The user name to be configured on the new VM.", - "user_password": "The user password to be configured on the new VM.", - "default_certificate_thumbprint": "The thumbprint of the WinRM Certificate to be used as a default.", - // local network descriptions: - "vpn_gateway_address": "The IP address of the VPN gateway bridged through this virtual network.", - "address_space_prefixes": "List of address space prefixes in the format '/netmask'", - // dns descriptions: - "dns_address": "Address of the DNS server. Required.", -} diff --git a/builtin/providers/azure/errors.go b/builtin/providers/azure/errors.go deleted file mode 100644 index 89fa6e4b3..000000000 --- a/builtin/providers/azure/errors.go +++ /dev/null @@ -1,5 +0,0 @@ -package azure - -import "errors" - -var PlatformStorageError = errors.New("When using a platform image, the 'storage' parameter is required") diff --git a/builtin/providers/azure/provider.go b/builtin/providers/azure/provider.go deleted file mode 100644 index b897d2811..000000000 --- a/builtin/providers/azure/provider.go +++ /dev/null @@ -1,146 +0,0 @@ -package azure - -import ( - "encoding/xml" - "fmt" - - "github.com/hashicorp/terraform/helper/pathorcontents" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "settings_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("AZURE_SETTINGS_FILE", nil), - ValidateFunc: validateSettingsFile, - Deprecated: "Use the publish_settings field instead", - }, - - "publish_settings": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("AZURE_PUBLISH_SETTINGS", nil), - ValidateFunc: validatePublishSettings, - }, - - "subscription_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("AZURE_SUBSCRIPTION_ID", ""), - }, - - "certificate": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("AZURE_CERTIFICATE", ""), - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "azure_instance": resourceAzureInstance(), - "azure_affinity_group": resourceAzureAffinityGroup(), - "azure_data_disk": resourceAzureDataDisk(), - "azure_sql_database_server": resourceAzureSqlDatabaseServer(), - "azure_sql_database_server_firewall_rule": resourceAzureSqlDatabaseServerFirewallRule(), - "azure_sql_database_service": resourceAzureSqlDatabaseService(), - "azure_hosted_service": resourceAzureHostedService(), - "azure_storage_service": resourceAzureStorageService(), - "azure_storage_container": resourceAzureStorageContainer(), - "azure_storage_blob": resourceAzureStorageBlob(), - "azure_storage_queue": resourceAzureStorageQueue(), - "azure_virtual_network": resourceAzureVirtualNetwork(), - "azure_dns_server": resourceAzureDnsServer(), - "azure_local_network_connection": resourceAzureLocalNetworkConnection(), - "azure_security_group": resourceAzureSecurityGroup(), - "azure_security_group_rule": resourceAzureSecurityGroupRule(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - config := Config{ - SubscriptionID: d.Get("subscription_id").(string), - Certificate: []byte(d.Get("certificate").(string)), - } - - publishSettings := d.Get("publish_settings").(string) - if publishSettings == "" { - publishSettings = d.Get("settings_file").(string) - } - if publishSettings != "" { - // any errors from readSettings would have been caught at the validate - // step, so we can avoid handling them now - settings, _, _ := readSettings(publishSettings) - config.Settings = settings - return config.NewClientFromSettingsData() - } - - if config.SubscriptionID != "" && len(config.Certificate) > 0 { - return config.NewClient() - } - - return nil, fmt.Errorf( - "Insufficient configuration data. Please specify either a 'settings_file'\n" + - "or both a 'subscription_id' and 'certificate'.") -} - -func validateSettingsFile(v interface{}, k string) ([]string, []error) { - value := v.(string) - if value == "" { - return nil, nil - } - - _, warnings, errors := readSettings(value) - return warnings, errors -} - -func validatePublishSettings(v interface{}, k string) (ws []string, es []error) { - value := v.(string) - if value == "" { - return - } - - var settings settingsData - if err := xml.Unmarshal([]byte(value), &settings); err != nil { - es = append(es, fmt.Errorf("error parsing publish_settings as XML: %s", err)) - } - - return -} - -const settingsPathWarnMsg = ` -settings_file was provided as a file path. This support -will be removed in the future. Please update your configuration -to use ${file("filename.publishsettings")} instead.` - -func readSettings(pathOrContents string) (s []byte, ws []string, es []error) { - contents, wasPath, err := pathorcontents.Read(pathOrContents) - if err != nil { - es = append(es, fmt.Errorf("error reading settings_file: %s", err)) - } - if wasPath { - ws = append(ws, settingsPathWarnMsg) - } - - var settings settingsData - if err := xml.Unmarshal([]byte(contents), &settings); err != nil { - es = append(es, fmt.Errorf("error parsing settings_file as XML: %s", err)) - } - - s = []byte(contents) - - return -} - -// settingsData is a private struct used to test the unmarshalling of the -// settingsFile contents, to determine if the contents are valid XML -type settingsData struct { - XMLName xml.Name `xml:"PublishData"` -} diff --git a/builtin/providers/azure/provider_test.go b/builtin/providers/azure/provider_test.go deleted file mode 100644 index 8e88dea93..000000000 --- a/builtin/providers/azure/provider_test.go +++ /dev/null @@ -1,169 +0,0 @@ -package azure - -import ( - "io" - "io/ioutil" - "os" - "strings" - "testing" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/go-homedir" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -const ( - testAccSecurityGroupName = "terraform-security-group" - testAccHostedServiceName = "terraform-testing-service" -) - -// testAccStorageServiceName is used as the name for the Storage Service -// created in all storage-related tests. -// It is much more convenient to provide a Storage Service which -// has been created beforehand as the creation of one takes a lot -// and would greatly impede the multitude of tests which rely on one. -// NOTE: the storage container should be located in `West US`. -var testAccStorageServiceName = os.Getenv("AZURE_STORAGE") - -const testAccStorageContainerName = "terraform-testing-container" - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "azure": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - sf := os.Getenv("PUBLISH_SETTINGS_FILE") - if sf != "" { - publishSettings, err := ioutil.ReadFile(sf) - if err != nil { - t.Fatalf("Error reading AZURE_SETTINGS_FILE path: %s", err) - } - - os.Setenv("AZURE_PUBLISH_SETTINGS", string(publishSettings)) - } - - if v := os.Getenv("AZURE_PUBLISH_SETTINGS"); v == "" { - subscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID") - certificate := os.Getenv("AZURE_CERTIFICATE") - - if subscriptionID == "" || certificate == "" { - t.Fatal("either AZURE_PUBLISH_SETTINGS, PUBLISH_SETTINGS_FILE, or AZURE_SUBSCRIPTION_ID " + - "and AZURE_CERTIFICATE must be set for acceptance tests") - } - } - - if v := os.Getenv("AZURE_STORAGE"); v == "" { - t.Fatal("AZURE_STORAGE must be set for acceptance tests") - } -} - -func TestAzure_validateSettingsFile(t *testing.T) { - f, err := ioutil.TempFile("", "tf-test") - if err != nil { - t.Fatalf("Error creating temporary file in TestAzure_validateSettingsFile: %s", err) - } - defer os.Remove(f.Name()) - - fx, err := ioutil.TempFile("", "tf-test-xml") - if err != nil { - t.Fatalf("Error creating temporary file with XML in TestAzure_validateSettingsFile: %s", err) - } - defer os.Remove(fx.Name()) - _, err = io.WriteString(fx, "") - if err != nil { - t.Fatalf("Error writing XML File: %s", err) - } - fx.Close() - - home, err := homedir.Dir() - if err != nil { - t.Fatalf("Error fetching homedir: %s", err) - } - fh, err := ioutil.TempFile(home, "tf-test-home") - if err != nil { - t.Fatalf("Error creating homedir-based temporary file: %s", err) - } - defer os.Remove(fh.Name()) - _, err = io.WriteString(fh, "") - if err != nil { - t.Fatalf("Error writing XML File: %s", err) - } - fh.Close() - - r := strings.NewReplacer(home, "~") - homePath := r.Replace(fh.Name()) - - cases := []struct { - Input string // String of XML or a path to an XML file - W int // expected count of warnings - E int // expected count of errors - }{ - {"test", 0, 1}, - {f.Name(), 1, 1}, - {fx.Name(), 1, 0}, - {homePath, 1, 0}, - {"", 0, 0}, - } - - for _, tc := range cases { - w, e := validateSettingsFile(tc.Input, "") - - if len(w) != tc.W { - t.Errorf("Error in TestAzureValidateSettingsFile: input: %s , warnings: %v, errors: %v", tc.Input, w, e) - } - if len(e) != tc.E { - t.Errorf("Error in TestAzureValidateSettingsFile: input: %s , warnings: %v, errors: %v", tc.Input, w, e) - } - } -} - -func TestAzure_providerConfigure(t *testing.T) { - rp := Provider() - raw := map[string]interface{}{ - "publish_settings": testAzurePublishSettingsStr, - } - - rawConfig, err := config.NewRawConfig(raw) - if err != nil { - t.Fatalf("err: %s", err) - } - - err = rp.Configure(terraform.NewResourceConfig(rawConfig)) - meta := rp.(*schema.Provider).Meta() - if meta == nil { - t.Fatalf("Expected metadata, got nil: err: %s", err) - } -} - -// testAzurePublishSettingsStr is a revoked publishsettings file -const testAzurePublishSettingsStr = ` - - - - - - -` diff --git a/builtin/providers/azure/resource_azure_affinity_group.go b/builtin/providers/azure/resource_azure_affinity_group.go deleted file mode 100644 index 5dd5bced4..000000000 --- a/builtin/providers/azure/resource_azure_affinity_group.go +++ /dev/null @@ -1,168 +0,0 @@ -package azure - -import ( - "fmt" - "log" - - "github.com/Azure/azure-sdk-for-go/management" - "github.com/Azure/azure-sdk-for-go/management/affinitygroup" - "github.com/hashicorp/terraform/helper/schema" -) - -// resourceAzureAffinityGroup returns the *schema.Resource associated to a -// resource affinity group on Azure. -func resourceAzureAffinityGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceAzureAffinityGroupCreate, - Read: resourceAzureAffinityGroupRead, - Update: resourceAzureAffinityGroupUpdate, - Exists: resourceAzureAffinityGroupExists, - Delete: resourceAzureAffinityGroupDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "location": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "label": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -// resourceAzureAffinityGroupCreate does all the necessary API calls to -// create an affinity group on Azure. -func resourceAzureAffinityGroupCreate(d *schema.ResourceData, meta interface{}) error { - affinityGroupClient := meta.(*Client).affinityGroupClient - - log.Println("[INFO] Begun creating Azure Affinity Group creation request.") - name := d.Get("name").(string) - params := affinitygroup.CreateAffinityGroupParams{ - Name: name, - Label: d.Get("label").(string), - Location: d.Get("location").(string), - } - - if desc, ok := d.GetOk("description"); ok { - params.Description = desc.(string) - } - - log.Println("[INFO] Sending Affinity Group creation request to Azure.") - err := affinityGroupClient.CreateAffinityGroup(params) - if err != nil { - return fmt.Errorf("Error issuing Azure Affinity Group creation: %s", err) - } - - d.SetId(name) - return nil -} - -// resourceAzureAffinityGroupRead does all the necessary API calls to -// read the state of the affinity group off Azure. -func resourceAzureAffinityGroupRead(d *schema.ResourceData, meta interface{}) error { - affinityGroupClient := meta.(*Client).affinityGroupClient - - log.Println("[INFO] Issuing Azure Affinity Group list request.") - affinityGroups, err := affinityGroupClient.ListAffinityGroups() - if err != nil { - return fmt.Errorf("Error obtaining Affinity Group list off Azure: %s", err) - } - - var found bool - name := d.Get("name").(string) - for _, group := range affinityGroups.AffinityGroups { - if group.Name == name { - found = true - d.Set("location", group.Location) - d.Set("label", group.Label) - d.Set("description", group.Description) - break - } - } - - if !found { - // it means the affinity group has been deleted in the meantime, so we - // must stop tracking it: - d.SetId("") - } - - return nil -} - -// resourceAzureAffinityGroupUpdate does all the necessary API calls to -// update the state of the affinity group on Azure. -func resourceAzureAffinityGroupUpdate(d *schema.ResourceData, meta interface{}) error { - affinityGroupClient := meta.(*Client).affinityGroupClient - - name := d.Get("name").(string) - clabel := d.HasChange("label") - cdesc := d.HasChange("description") - if clabel || cdesc { - log.Println("[INFO] Beginning Affinity Group update process.") - params := affinitygroup.UpdateAffinityGroupParams{} - - if clabel { - params.Label = d.Get("label").(string) - } - if cdesc { - params.Description = d.Get("description").(string) - } - - log.Println("[INFO] Sending Affinity Group update request to Azure.") - err := affinityGroupClient.UpdateAffinityGroup(name, params) - if err != nil { - return fmt.Errorf("Error updating Azure Affinity Group parameters: %s", err) - } - } - - return nil -} - -// resourceAzureAffinityGroupExists does all the necessary API calls to -// check for the existence of the affinity group on Azure. -func resourceAzureAffinityGroupExists(d *schema.ResourceData, meta interface{}) (bool, error) { - affinityGroupClient := meta.(*Client).affinityGroupClient - - log.Println("[INFO] Issuing Azure Affinity Group get request.") - name := d.Get("name").(string) - _, err := affinityGroupClient.GetAffinityGroup(name) - if err != nil { - if management.IsResourceNotFoundError(err) { - // it means that the affinity group has been deleted in the - // meantime, so we must untrack it from the schema: - d.SetId("") - return false, nil - } else { - return false, fmt.Errorf("Error getting Affinity Group off Azure: %s", err) - } - } - - return true, nil -} - -// resourceAzureAffinityGroupDelete does all the necessary API calls to -// delete the affinity group off Azure. -func resourceAzureAffinityGroupDelete(d *schema.ResourceData, meta interface{}) error { - affinityGroupClient := meta.(*Client).affinityGroupClient - - log.Println("[INFO] Sending Affinity Group deletion request to Azure.") - name := d.Get("name").(string) - err := affinityGroupClient.DeleteAffinityGroup(name) - if err != nil { - return fmt.Errorf("Error deleting Azure Affinity Group: %s", err) - } - - return nil -} diff --git a/builtin/providers/azure/resource_azure_affinity_group_test.go b/builtin/providers/azure/resource_azure_affinity_group_test.go deleted file mode 100644 index 1cc5211c4..000000000 --- a/builtin/providers/azure/resource_azure_affinity_group_test.go +++ /dev/null @@ -1,121 +0,0 @@ -package azure - -import ( - "fmt" - "testing" - - "github.com/Azure/azure-sdk-for-go/management" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureAffinityGroupBasic(t *testing.T) { - name := "azure_affinity_group.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureAffinityGroupDestroyed, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureAffinityGroupConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureAffinityGroupExists(name), - resource.TestCheckResourceAttr(name, "name", "terraform-testing-group"), - resource.TestCheckResourceAttr(name, "location", "West US"), - resource.TestCheckResourceAttr(name, "label", "A nice label."), - resource.TestCheckResourceAttr(name, "description", "A nice description."), - ), - }, - }, - }) -} - -func TestAccAzureAffinityGroupUpdate(t *testing.T) { - name := "azure_affinity_group.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureAffinityGroupDestroyed, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureAffinityGroupConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureAffinityGroupExists(name), - resource.TestCheckResourceAttr(name, "name", "terraform-testing-group"), - resource.TestCheckResourceAttr(name, "location", "West US"), - resource.TestCheckResourceAttr(name, "label", "A nice label."), - resource.TestCheckResourceAttr(name, "description", "A nice description."), - ), - }, - resource.TestStep{ - Config: testAccAzureAffinityGroupUpdateConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureAffinityGroupExists(name), - resource.TestCheckResourceAttr(name, "name", "terraform-testing-group"), - resource.TestCheckResourceAttr(name, "location", "West US"), - resource.TestCheckResourceAttr(name, "label", "An even nicer label."), - resource.TestCheckResourceAttr(name, "description", "An even nicer description."), - ), - }, - }, - }) -} - -func testAccCheckAzureAffinityGroupExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - resource, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Affinity Group resource %q doesn't exist.", name) - } - - if resource.Primary.ID == "" { - return fmt.Errorf("Affinity Group resource %q ID not set.", name) - } - - affinityGroupClient := testAccProvider.Meta().(*Client).affinityGroupClient - _, err := affinityGroupClient.GetAffinityGroup(resource.Primary.ID) - return err - } -} - -func testAccCheckAzureAffinityGroupDestroyed(s *terraform.State) error { - var err error - affinityGroupClient := testAccProvider.Meta().(*Client).affinityGroupClient - - for _, resource := range s.RootModule().Resources { - if resource.Type != "azure_affinity_group" { - continue - } - - if resource.Primary.ID == "" { - return fmt.Errorf("Affinity Group resource ID not set.") - } - - _, err = affinityGroupClient.GetAffinityGroup(resource.Primary.ID) - if !management.IsResourceNotFoundError(err) { - return err - } - } - - return nil -} - -const testAccAzureAffinityGroupConfig = ` -resource "azure_affinity_group" "foo" { - name = "terraform-testing-group" - location = "West US" - label = "A nice label." - description = "A nice description." -} -` - -const testAccAzureAffinityGroupUpdateConfig = ` -resource "azure_affinity_group" "foo" { - name = "terraform-testing-group" - location = "West US" - label = "An even nicer label." - description = "An even nicer description." -} -` diff --git a/builtin/providers/azure/resource_azure_data_disk.go b/builtin/providers/azure/resource_azure_data_disk.go deleted file mode 100644 index 6e48b976a..000000000 --- a/builtin/providers/azure/resource_azure_data_disk.go +++ /dev/null @@ -1,343 +0,0 @@ -package azure - -import ( - "fmt" - "log" - "time" - - "github.com/Azure/azure-sdk-for-go/management" - "github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk" - "github.com/hashicorp/terraform/helper/schema" -) - -const dataDiskBlobStorageURL = "http://%s.blob.core.windows.net/disks/%s.vhd" - -func resourceAzureDataDisk() *schema.Resource { - return &schema.Resource{ - Create: resourceAzureDataDiskCreate, - Read: resourceAzureDataDiskRead, - Update: resourceAzureDataDiskUpdate, - Delete: resourceAzureDataDiskDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "label": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "lun": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "size": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - - "caching": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "None", - }, - - "storage_service_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "media_link": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "source_media_link": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "virtual_machine": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - } -} - -func resourceAzureDataDiskCreate(d *schema.ResourceData, meta interface{}) error { - mc := meta.(*Client).mgmtClient - vmDiskClient := meta.(*Client).vmDiskClient - - if err := verifyDataDiskParameters(d); err != nil { - return err - } - - lun := d.Get("lun").(int) - vm := d.Get("virtual_machine").(string) - - label := d.Get("label").(string) - if label == "" { - label = fmt.Sprintf("%s-%d", vm, lun) - } - - p := virtualmachinedisk.CreateDataDiskParameters{ - DiskLabel: label, - Lun: lun, - LogicalDiskSizeInGB: d.Get("size").(int), - HostCaching: hostCaching(d), - MediaLink: mediaLink(d), - SourceMediaLink: d.Get("source_media_link").(string), - } - - if name, ok := d.GetOk("name"); ok { - p.DiskName = name.(string) - } - - log.Printf("[DEBUG] Adding data disk %d to instance: %s", lun, vm) - req, err := vmDiskClient.AddDataDisk(vm, vm, vm, p) - if err != nil { - return fmt.Errorf("Error adding data disk %d to instance %s: %s", lun, vm, err) - } - - // Wait until the data disk is added - if err := mc.WaitForOperation(req, nil); err != nil { - return fmt.Errorf( - "Error waiting for data disk %d to be added to instance %s: %s", lun, vm, err) - } - - log.Printf("[DEBUG] Retrieving data disk %d from instance %s", lun, vm) - disk, err := vmDiskClient.GetDataDisk(vm, vm, vm, lun) - if err != nil { - return fmt.Errorf("Error retrieving data disk %d from instance %s: %s", lun, vm, err) - } - - d.SetId(disk.DiskName) - - return resourceAzureDataDiskRead(d, meta) -} - -func resourceAzureDataDiskRead(d *schema.ResourceData, meta interface{}) error { - vmDiskClient := meta.(*Client).vmDiskClient - - lun := d.Get("lun").(int) - vm := d.Get("virtual_machine").(string) - - log.Printf("[DEBUG] Retrieving data disk: %s", d.Id()) - datadisk, err := vmDiskClient.GetDataDisk(vm, vm, vm, lun) - if err != nil { - if management.IsResourceNotFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error retrieving data disk %s: %s", d.Id(), err) - } - - d.Set("name", datadisk.DiskName) - d.Set("label", datadisk.DiskLabel) - d.Set("lun", datadisk.Lun) - d.Set("size", datadisk.LogicalDiskSizeInGB) - d.Set("caching", datadisk.HostCaching) - d.Set("media_link", datadisk.MediaLink) - - log.Printf("[DEBUG] Retrieving disk: %s", d.Id()) - disk, err := vmDiskClient.GetDisk(d.Id()) - if err != nil { - return fmt.Errorf("Error retrieving disk %s: %s", d.Id(), err) - } - - d.Set("virtual_machine", disk.AttachedTo.RoleName) - - return nil -} - -func resourceAzureDataDiskUpdate(d *schema.ResourceData, meta interface{}) error { - mc := meta.(*Client).mgmtClient - vmDiskClient := meta.(*Client).vmDiskClient - - lun := d.Get("lun").(int) - vm := d.Get("virtual_machine").(string) - - if d.HasChange("lun") || d.HasChange("size") || d.HasChange("virtual_machine") { - olun, _ := d.GetChange("lun") - ovm, _ := d.GetChange("virtual_machine") - - log.Printf("[DEBUG] Detaching data disk: %s", d.Id()) - req, err := vmDiskClient. - DeleteDataDisk(ovm.(string), ovm.(string), ovm.(string), olun.(int), false) - if err != nil { - return fmt.Errorf("Error detaching data disk %s: %s", d.Id(), err) - } - - // Wait until the data disk is detached - if err := mc.WaitForOperation(req, nil); err != nil { - return fmt.Errorf( - "Error waiting for data disk %s to be detached: %s", d.Id(), err) - } - - log.Printf("[DEBUG] Verifying data disk %s is properly detached...", d.Id()) - for i := 0; i < 6; i++ { - disk, err := vmDiskClient.GetDisk(d.Id()) - if err != nil { - return fmt.Errorf("Error retrieving disk %s: %s", d.Id(), err) - } - - // Check if the disk is really detached - if disk.AttachedTo.RoleName == "" { - break - } - - // If not, wait 30 seconds and try it again... - time.Sleep(time.Duration(30 * time.Second)) - } - - if d.HasChange("size") { - p := virtualmachinedisk.UpdateDiskParameters{ - Name: d.Id(), - Label: d.Get("label").(string), - ResizedSizeInGB: d.Get("size").(int), - } - - log.Printf("[DEBUG] Updating disk: %s", d.Id()) - req, err := vmDiskClient.UpdateDisk(d.Id(), p) - if err != nil { - return fmt.Errorf("Error updating disk %s: %s", d.Id(), err) - } - - // Wait until the disk is updated - if err := mc.WaitForOperation(req, nil); err != nil { - return fmt.Errorf( - "Error waiting for disk %s to be updated: %s", d.Id(), err) - } - } - - p := virtualmachinedisk.CreateDataDiskParameters{ - DiskName: d.Id(), - Lun: lun, - HostCaching: hostCaching(d), - MediaLink: mediaLink(d), - } - - log.Printf("[DEBUG] Attaching data disk: %s", d.Id()) - req, err = vmDiskClient.AddDataDisk(vm, vm, vm, p) - if err != nil { - return fmt.Errorf("Error attaching data disk %s to instance %s: %s", d.Id(), vm, err) - } - - // Wait until the data disk is attached - if err := mc.WaitForOperation(req, nil); err != nil { - return fmt.Errorf( - "Error waiting for data disk %s to be attached to instance %s: %s", d.Id(), vm, err) - } - - // Make sure we return here since all possible changes are - // already updated if we reach this point - return nil - } - - if d.HasChange("caching") { - p := virtualmachinedisk.UpdateDataDiskParameters{ - DiskName: d.Id(), - Lun: lun, - HostCaching: hostCaching(d), - MediaLink: mediaLink(d), - } - - log.Printf("[DEBUG] Updating data disk: %s", d.Id()) - req, err := vmDiskClient.UpdateDataDisk(vm, vm, vm, lun, p) - if err != nil { - return fmt.Errorf("Error updating data disk %s: %s", d.Id(), err) - } - - // Wait until the data disk is updated - if err := mc.WaitForOperation(req, nil); err != nil { - return fmt.Errorf( - "Error waiting for data disk %s to be updated: %s", d.Id(), err) - } - } - - return resourceAzureDataDiskRead(d, meta) -} - -func resourceAzureDataDiskDelete(d *schema.ResourceData, meta interface{}) error { - mc := meta.(*Client).mgmtClient - vmDiskClient := meta.(*Client).vmDiskClient - - lun := d.Get("lun").(int) - vm := d.Get("virtual_machine").(string) - - // If a name was not supplied, it means we created a new emtpy disk and we now want to - // delete that disk again. Otherwise we only want to detach the disk and keep the blob. - _, removeBlob := d.GetOk("name") - - log.Printf("[DEBUG] Detaching data disk %s with removeBlob = %t", d.Id(), removeBlob) - req, err := vmDiskClient.DeleteDataDisk(vm, vm, vm, lun, removeBlob) - if err != nil { - return fmt.Errorf( - "Error detaching data disk %s with removeBlob = %t: %s", d.Id(), removeBlob, err) - } - - // Wait until the data disk is detached and optionally deleted - if err := mc.WaitForOperation(req, nil); err != nil { - return fmt.Errorf( - "Error waiting for data disk %s to be detached with removeBlob = %t: %s", - d.Id(), removeBlob, err) - } - - d.SetId("") - - return nil -} - -func hostCaching(d *schema.ResourceData) virtualmachinedisk.HostCachingType { - switch d.Get("caching").(string) { - case "ReadOnly": - return virtualmachinedisk.HostCachingTypeReadOnly - case "ReadWrite": - return virtualmachinedisk.HostCachingTypeReadWrite - default: - return virtualmachinedisk.HostCachingTypeNone - } -} - -func mediaLink(d *schema.ResourceData) string { - mediaLink, ok := d.GetOk("media_link") - if ok { - return mediaLink.(string) - } - - name, ok := d.GetOk("name") - if !ok { - name = fmt.Sprintf("%s-%d", d.Get("virtual_machine").(string), d.Get("lun").(int)) - } - - return fmt.Sprintf(dataDiskBlobStorageURL, d.Get("storage_service_name").(string), name.(string)) -} - -func verifyDataDiskParameters(d *schema.ResourceData) error { - caching := d.Get("caching").(string) - if caching != "None" && caching != "ReadOnly" && caching != "ReadWrite" { - return fmt.Errorf( - "Invalid caching type %s! Valid options are 'None', 'ReadOnly' and 'ReadWrite'.", caching) - } - - if _, ok := d.GetOk("media_link"); !ok { - if _, ok := d.GetOk("storage_service_name"); !ok { - return fmt.Errorf("If not supplying 'media_link', you must supply 'storage'.") - } - } - - return nil -} diff --git a/builtin/providers/azure/resource_azure_data_disk_test.go b/builtin/providers/azure/resource_azure_data_disk_test.go deleted file mode 100644 index fbbfc0018..000000000 --- a/builtin/providers/azure/resource_azure_data_disk_test.go +++ /dev/null @@ -1,244 +0,0 @@ -package azure - -import ( - "fmt" - "strconv" - "testing" - - "github.com/Azure/azure-sdk-for-go/management" - "github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureDataDisk_basic(t *testing.T) { - var disk virtualmachinedisk.DataDiskResponse - name := fmt.Sprintf("terraform-test%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureDataDiskDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureDataDisk_basic(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureDataDiskExists( - "azure_data_disk.foo", &disk), - testAccCheckAzureDataDiskAttributes(&disk), - resource.TestCheckResourceAttr( - "azure_data_disk.foo", "label", fmt.Sprintf("%s-0", name)), - resource.TestCheckResourceAttr( - "azure_data_disk.foo", "size", "10"), - ), - }, - }, - }) -} - -func TestAccAzureDataDisk_update(t *testing.T) { - var disk virtualmachinedisk.DataDiskResponse - name := fmt.Sprintf("terraform-test%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureDataDiskDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureDataDisk_advanced(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureDataDiskExists( - "azure_data_disk.foo", &disk), - resource.TestCheckResourceAttr( - "azure_data_disk.foo", "label", fmt.Sprintf("%s-1", name)), - resource.TestCheckResourceAttr( - "azure_data_disk.foo", "lun", "1"), - resource.TestCheckResourceAttr( - "azure_data_disk.foo", "size", "10"), - resource.TestCheckResourceAttr( - "azure_data_disk.foo", "caching", "ReadOnly"), - resource.TestCheckResourceAttr( - "azure_data_disk.foo", "virtual_machine", name), - ), - }, - - resource.TestStep{ - Config: testAccAzureDataDisk_update(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureDataDiskExists( - "azure_data_disk.foo", &disk), - resource.TestCheckResourceAttr( - "azure_data_disk.foo", "label", fmt.Sprintf("%s-1", name)), - resource.TestCheckResourceAttr( - "azure_data_disk.foo", "lun", "2"), - resource.TestCheckResourceAttr( - "azure_data_disk.foo", "size", "20"), - resource.TestCheckResourceAttr( - "azure_data_disk.foo", "caching", "ReadWrite"), - resource.TestCheckResourceAttr( - "azure_data_disk.foo", "virtual_machine", "terraform-test2"), - ), - }, - }, - }) -} - -func testAccCheckAzureDataDiskExists( - n string, - disk *virtualmachinedisk.DataDiskResponse) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Data Disk ID is set") - } - - vm := rs.Primary.Attributes["virtual_machine"] - lun, err := strconv.Atoi(rs.Primary.Attributes["lun"]) - if err != nil { - return err - } - - vmDiskClient := testAccProvider.Meta().(*Client).vmDiskClient - d, err := vmDiskClient.GetDataDisk(vm, vm, vm, lun) - if err != nil { - return err - } - - if d.DiskName != rs.Primary.ID { - return fmt.Errorf("Data Disk not found") - } - - *disk = d - - return nil - } -} - -func testAccCheckAzureDataDiskAttributes( - disk *virtualmachinedisk.DataDiskResponse) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if disk.Lun != 0 { - return fmt.Errorf("Bad lun: %d", disk.Lun) - } - - if disk.LogicalDiskSizeInGB != 10 { - return fmt.Errorf("Bad size: %d", disk.LogicalDiskSizeInGB) - } - - if disk.HostCaching != "None" { - return fmt.Errorf("Bad caching: %s", disk.HostCaching) - } - - return nil - } -} - -func testAccCheckAzureDataDiskDestroy(s *terraform.State) error { - vmDiskClient := testAccProvider.Meta().(*Client).vmDiskClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azure_data_disk" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Disk ID is set") - } - - vm := rs.Primary.Attributes["virtual_machine"] - lun, err := strconv.Atoi(rs.Primary.Attributes["lun"]) - if err != nil { - return err - } - - _, err = vmDiskClient.GetDataDisk(vm, vm, vm, lun) - if err == nil { - return fmt.Errorf("Data disk %s still exists", rs.Primary.ID) - } - - if !management.IsResourceNotFoundError(err) { - return err - } - } - - return nil -} - -func testAccAzureDataDisk_basic(name string) string { - return fmt.Sprintf(` - resource "azure_instance" "foo" { - name = "%s" - image = "Ubuntu Server 14.04 LTS" - size = "Basic_A1" - storage_service_name = "%s" - location = "West US" - username = "terraform" - password = "Pass!admin123" - } - - resource "azure_data_disk" "foo" { - lun = 0 - size = 10 - storage_service_name = "${azure_instance.foo.storage_service_name}" - virtual_machine = "${azure_instance.foo.id}" - }`, name, testAccStorageServiceName) -} - -func testAccAzureDataDisk_advanced(name string) string { - return fmt.Sprintf(` - resource "azure_instance" "foo" { - name = "%s" - image = "Ubuntu Server 14.04 LTS" - size = "Basic_A1" - storage_service_name = "%s" - location = "West US" - username = "terraform" - password = "Pass!admin123" - } - - resource "azure_data_disk" "foo" { - lun = 1 - size = 10 - caching = "ReadOnly" - storage_service_name = "${azure_instance.foo.storage_service_name}" - virtual_machine = "${azure_instance.foo.id}" - }`, name, testAccStorageServiceName) -} - -func testAccAzureDataDisk_update(name string) string { - return fmt.Sprintf(` - resource "azure_instance" "foo" { - name = "%s" - image = "Ubuntu Server 14.04 LTS" - size = "Basic_A1" - storage_service_name = "%s" - location = "West US" - username = "terraform" - password = "Pass!admin123" - } - - resource "azure_instance" "bar" { - name = "terraform-test2" - image = "Ubuntu Server 14.04 LTS" - size = "Basic_A1" - storage_service_name = "${azure_instance.foo.storage_service_name}" - location = "West US" - username = "terraform" - password = "Pass!admin123" - } - - resource "azure_data_disk" "foo" { - lun = 2 - size = 20 - caching = "ReadWrite" - storage_service_name = "${azure_instance.bar.storage_service_name}" - virtual_machine = "${azure_instance.bar.id}" - }`, name, testAccStorageServiceName) -} diff --git a/builtin/providers/azure/resource_azure_dns_server.go b/builtin/providers/azure/resource_azure_dns_server.go deleted file mode 100644 index ffd306f7b..000000000 --- a/builtin/providers/azure/resource_azure_dns_server.go +++ /dev/null @@ -1,240 +0,0 @@ -package azure - -import ( - "fmt" - "log" - - "github.com/Azure/azure-sdk-for-go/management" - "github.com/Azure/azure-sdk-for-go/management/virtualnetwork" - "github.com/hashicorp/terraform/helper/schema" -) - -// resourceAzureDnsServer returns the *schema.Resource associated -// to an Azure hosted service. -func resourceAzureDnsServer() *schema.Resource { - return &schema.Resource{ - Create: resourceAzureDnsServerCreate, - Read: resourceAzureDnsServerRead, - Update: resourceAzureDnsServerUpdate, - Exists: resourceAzureDnsServerExists, - Delete: resourceAzureDnsServerDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Required: true, - Description: parameterDescriptions["name"], - }, - "dns_address": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: parameterDescriptions["dns_address"], - }, - }, - } -} - -// resourceAzureDnsServerCreate does all the necessary API calls -// to create a new DNS server definition on Azure. -func resourceAzureDnsServerCreate(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - mgmtClient := azureClient.mgmtClient - vnetClient := azureClient.vnetClient - - log.Println("[INFO] Fetching current network configuration from Azure.") - azureClient.vnetMutex.Lock() - defer azureClient.vnetMutex.Unlock() - netConf, err := vnetClient.GetVirtualNetworkConfiguration() - if err != nil { - if management.IsResourceNotFoundError(err) { - // if no network configuration exists yet; create one now: - netConf = virtualnetwork.NetworkConfiguration{} - } else { - return fmt.Errorf("Failed to get the current network configuration from Azure: %s", err) - } - } - - log.Println("[DEBUG] Adding new DNS server definition to Azure.") - name := d.Get("name").(string) - address := d.Get("dns_address").(string) - netConf.Configuration.DNS.DNSServers = append( - netConf.Configuration.DNS.DNSServers, - virtualnetwork.DNSServer{ - Name: name, - IPAddress: address, - }) - - // send the configuration back to Azure: - log.Println("[INFO] Sending updated network configuration back to Azure.") - reqID, err := vnetClient.SetVirtualNetworkConfiguration(netConf) - if err != nil { - return fmt.Errorf("Failed issuing update to network configuration: %s", err) - } - err = mgmtClient.WaitForOperation(reqID, nil) - if err != nil { - return fmt.Errorf("Error setting network configuration: %s", err) - } - - d.SetId(name) - return nil -} - -// resourceAzureDnsServerRead does all the necessary API calls to read -// the state of the DNS server off Azure. -func resourceAzureDnsServerRead(d *schema.ResourceData, meta interface{}) error { - vnetClient := meta.(*Client).vnetClient - - log.Println("[INFO] Fetching current network configuration from Azure.") - netConf, err := vnetClient.GetVirtualNetworkConfiguration() - if err != nil { - return fmt.Errorf("Failed to get the current network configuration from Azure: %s", err) - } - - var found bool - name := d.Get("name").(string) - - // search for our DNS and update it if the IP has been changed: - for _, dns := range netConf.Configuration.DNS.DNSServers { - if dns.Name == name { - found = true - d.Set("dns_address", dns.IPAddress) - break - } - } - - // remove the resource from the state if it has been deleted in the meantime: - if !found { - d.SetId("") - } - - return nil -} - -// resourceAzureDnsServerUpdate does all the necessary API calls -// to update the DNS definition on Azure. -func resourceAzureDnsServerUpdate(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - mgmtClient := azureClient.mgmtClient - vnetClient := azureClient.vnetClient - - var found bool - name := d.Get("name").(string) - - if d.HasChange("dns_address") { - log.Println("[DEBUG] DNS server address has changes; updating it on Azure.") - log.Println("[INFO] Fetching current network configuration from Azure.") - azureClient.vnetMutex.Lock() - defer azureClient.vnetMutex.Unlock() - netConf, err := vnetClient.GetVirtualNetworkConfiguration() - if err != nil { - return fmt.Errorf("Failed to get the current network configuration from Azure: %s", err) - } - - // search for our DNS and update its address value: - for i, dns := range netConf.Configuration.DNS.DNSServers { - if dns.Name == name { - found = true - netConf.Configuration.DNS.DNSServers[i].IPAddress = d.Get("dns_address").(string) - break - } - } - - // if the config has changes, send the configuration back to Azure: - if found { - log.Println("[INFO] Sending updated network configuration back to Azure.") - reqID, err := vnetClient.SetVirtualNetworkConfiguration(netConf) - if err != nil { - return fmt.Errorf("Failed issuing update to network configuration: %s", err) - } - err = mgmtClient.WaitForOperation(reqID, nil) - if err != nil { - return fmt.Errorf("Error setting network configuration: %s", err) - } - - return nil - } - } - - // remove the resource from the state if it has been deleted in the meantime: - if !found { - d.SetId("") - } - - return nil -} - -// resourceAzureDnsServerExists does all the necessary API calls to -// check if the DNS server definition already exists on Azure. -func resourceAzureDnsServerExists(d *schema.ResourceData, meta interface{}) (bool, error) { - azureClient := meta.(*Client) - vnetClient := azureClient.vnetClient - - log.Println("[INFO] Fetching current network configuration from Azure.") - netConf, err := vnetClient.GetVirtualNetworkConfiguration() - if err != nil { - return false, fmt.Errorf("Failed to get the current network configuration from Azure: %s", err) - } - - name := d.Get("name").(string) - - // search for the DNS server's definition: - for _, dns := range netConf.Configuration.DNS.DNSServers { - if dns.Name == name { - return true, nil - } - } - - // if we reached this point; the resource must have been deleted; and we must untrack it: - d.SetId("") - return false, nil -} - -// resourceAzureDnsServerDelete does all the necessary API calls -// to delete the DNS server definition from Azure. -func resourceAzureDnsServerDelete(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - mgmtClient := azureClient.mgmtClient - vnetClient := azureClient.vnetClient - - log.Println("[INFO] Fetching current network configuration from Azure.") - azureClient.vnetMutex.Lock() - defer azureClient.vnetMutex.Unlock() - netConf, err := vnetClient.GetVirtualNetworkConfiguration() - if err != nil { - return fmt.Errorf("Failed to get the current network configuration from Azure: %s", err) - } - - name := d.Get("name").(string) - - // search for the DNS server's definition and remove it: - var found bool - for i, dns := range netConf.Configuration.DNS.DNSServers { - if dns.Name == name { - found = true - netConf.Configuration.DNS.DNSServers = append( - netConf.Configuration.DNS.DNSServers[:i], - netConf.Configuration.DNS.DNSServers[i+1:]..., - ) - break - } - } - - // if not found; don't bother re-sending the natwork config: - if !found { - return nil - } - - // send the configuration back to Azure: - log.Println("[INFO] Sending updated network configuration back to Azure.") - reqID, err := vnetClient.SetVirtualNetworkConfiguration(netConf) - if err != nil { - return fmt.Errorf("Failed issuing update to network configuration: %s", err) - } - err = mgmtClient.WaitForOperation(reqID, nil) - if err != nil { - return fmt.Errorf("Error setting network configuration: %s", err) - } - - return nil -} diff --git a/builtin/providers/azure/resource_azure_dns_server_test.go b/builtin/providers/azure/resource_azure_dns_server_test.go deleted file mode 100644 index ef5188ecb..000000000 --- a/builtin/providers/azure/resource_azure_dns_server_test.go +++ /dev/null @@ -1,145 +0,0 @@ -package azure - -import ( - "fmt" - "testing" - - "github.com/Azure/azure-sdk-for-go/management" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureDnsServerBasic(t *testing.T) { - name := "azure_dns_server.foo" - - random := acctest.RandInt() - config := testAccAzureDnsServerBasic(random) - serverName := fmt.Sprintf("tf-dns-server-%d", random) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureDnsServerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureDnsServerExists(name), - resource.TestCheckResourceAttr(name, "name", serverName), - resource.TestCheckResourceAttr(name, "dns_address", "8.8.8.8"), - ), - }, - }, - }) -} - -func TestAccAzureDnsServerUpdate(t *testing.T) { - name := "azure_dns_server.foo" - - random := acctest.RandInt() - basicConfig := testAccAzureDnsServerBasic(random) - updateConfig := testAccAzureDnsServerUpdate(random) - serverName := fmt.Sprintf("tf-dns-server-%d", random) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureDnsServerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: basicConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureDnsServerExists(name), - resource.TestCheckResourceAttr(name, "name", serverName), - resource.TestCheckResourceAttr(name, "dns_address", "8.8.8.8"), - ), - }, - - resource.TestStep{ - Config: updateConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureDnsServerExists(name), - resource.TestCheckResourceAttr(name, "name", serverName), - resource.TestCheckResourceAttr(name, "dns_address", "8.8.4.4"), - ), - }, - }, - }) -} - -func testAccCheckAzureDnsServerExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - resource, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Resource not found: %s", name) - } - - if resource.Primary.ID == "" { - return fmt.Errorf("No DNS Server ID set.") - } - - vnetClient := testAccProvider.Meta().(*Client).vnetClient - netConf, err := vnetClient.GetVirtualNetworkConfiguration() - if err != nil { - return fmt.Errorf("Failed fetching networking configuration: %s", err) - } - - for _, dns := range netConf.Configuration.DNS.DNSServers { - if dns.Name == resource.Primary.ID { - return nil - } - } - - return fmt.Errorf("Azure DNS Server not found.") - } -} - -func testAccCheckAzureDnsServerDestroy(s *terraform.State) error { - vnetClient := testAccProvider.Meta().(*Client).vnetClient - - for _, resource := range s.RootModule().Resources { - if resource.Type != "azure_dns_server" { - continue - } - - if resource.Primary.ID == "" { - return fmt.Errorf("No DNS Server ID is set.") - } - - netConf, err := vnetClient.GetVirtualNetworkConfiguration() - if err != nil { - // This is desirable - if there is no network config there can't be any DNS Servers - if management.IsResourceNotFoundError(err) { - continue - } - return fmt.Errorf("Error retrieving networking configuration from Azure: %s", err) - } - - for _, dns := range netConf.Configuration.DNS.DNSServers { - if dns.Name == resource.Primary.ID { - return fmt.Errorf("Azure DNS Server still exists.") - } - } - } - - return nil -} - -func testAccAzureDnsServerBasic(random int) string { - return fmt.Sprintf(` -resource "azure_dns_server" "foo" { - name = "tf-dns-server-%d" - dns_address = "8.8.8.8" -} -`, random) -} - -func testAccAzureDnsServerUpdate(random int) string { - return fmt.Sprintf(` -resource "azure_dns_server" "foo" { - name = "tf-dns-server-%d" - dns_address = "8.8.4.4" -} -`, random) -} diff --git a/builtin/providers/azure/resource_azure_hosted_service.go b/builtin/providers/azure/resource_azure_hosted_service.go deleted file mode 100644 index 3d7a638a6..000000000 --- a/builtin/providers/azure/resource_azure_hosted_service.go +++ /dev/null @@ -1,168 +0,0 @@ -package azure - -import ( - "encoding/base64" - "fmt" - "log" - - "github.com/Azure/azure-sdk-for-go/management" - "github.com/Azure/azure-sdk-for-go/management/hostedservice" - "github.com/hashicorp/terraform/helper/schema" -) - -// resourceAzureHostedService returns the schema.Resource associated to an -// Azure hosted service. -func resourceAzureHostedService() *schema.Resource { - return &schema.Resource{ - Create: resourceAzureHostedServiceCreate, - Read: resourceAzureHostedServiceRead, - Update: resourceAzureHostedServiceUpdate, - Delete: resourceAzureHostedServiceDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: parameterDescriptions["name"], - }, - "location": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: parameterDescriptions["location"], - }, - "ephemeral_contents": &schema.Schema{ - Type: schema.TypeBool, - Required: true, - Description: parameterDescriptions["ephemeral_contents"], - }, - "url": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "status": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "reverse_dns_fqdn": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: parameterDescriptions["reverse_dns_fqdn"], - }, - "label": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Default: "Made by Terraform.", - Description: parameterDescriptions["label"], - }, - "description": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Description: parameterDescriptions["description"], - }, - "default_certificate_thumbprint": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: parameterDescriptions["default_certificate_thumbprint"], - }, - }, - } -} - -// resourceAzureHostedServiceCreate does all the necessary API calls -// to create a hosted service on Azure. -func resourceAzureHostedServiceCreate(d *schema.ResourceData, meta interface{}) error { - hostedServiceClient := meta.(*Client).hostedServiceClient - - serviceName := d.Get("name").(string) - location := d.Get("location").(string) - reverseDNS := d.Get("reverse_dns_fqdn").(string) - description := d.Get("description").(string) - label := base64.StdEncoding.EncodeToString([]byte(d.Get("label").(string))) - - err := hostedServiceClient.CreateHostedService( - hostedservice.CreateHostedServiceParameters{ - ServiceName: serviceName, - Location: location, - Label: label, - Description: description, - ReverseDNSFqdn: reverseDNS, - }, - ) - if err != nil { - return fmt.Errorf("Failed defining new Azure hosted service: %s", err) - } - - d.SetId(serviceName) - return nil -} - -// resourceAzureHostedServiceRead does all the necessary API calls -// to read the state of a hosted service from Azure. -func resourceAzureHostedServiceRead(d *schema.ResourceData, meta interface{}) error { - hostedServiceClient := meta.(*Client).hostedServiceClient - - log.Println("[INFO] Querying for hosted service info.") - serviceName := d.Get("name").(string) - hostedService, err := hostedServiceClient.GetHostedService(serviceName) - if err != nil { - if management.IsResourceNotFoundError(err) { - // it means the hosted service was deleted in the meantime, - // so we must remove it here: - d.SetId("") - return nil - } else { - return fmt.Errorf("Failed to get hosted service: %s", err) - } - } - - log.Println("[DEBUG] Reading hosted service query result data.") - d.Set("name", hostedService.ServiceName) - d.Set("url", hostedService.URL) - d.Set("location", hostedService.Location) - d.Set("description", hostedService.Description) - d.Set("label", hostedService.Label) - d.Set("status", hostedService.Status) - d.Set("reverse_dns_fqdn", hostedService.ReverseDNSFqdn) - d.Set("default_certificate_thumbprint", hostedService.DefaultWinRmCertificateThumbprint) - - return nil -} - -// resourceAzureHostedServiceUpdate does all the necessary API calls to -// update some settings of a hosted service on Azure. -func resourceAzureHostedServiceUpdate(d *schema.ResourceData, meta interface{}) error { - // NOTE: although no-op; this is still required in order for updates to - // ephemeral_contents to be possible. - - // check if the service still exists: - return resourceAzureHostedServiceRead(d, meta) -} - -// resourceAzureHostedServiceDelete does all the necessary API calls to -// delete a hosted service from Azure. -func resourceAzureHostedServiceDelete(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - mgmtClient := azureClient.mgmtClient - hostedServiceClient := azureClient.hostedServiceClient - - log.Println("[INFO] Issuing hosted service deletion.") - serviceName := d.Get("name").(string) - ephemeral := d.Get("ephemeral_contents").(bool) - reqID, err := hostedServiceClient.DeleteHostedService(serviceName, ephemeral) - if err != nil { - return fmt.Errorf("Failed issuing hosted service deletion request: %s", err) - } - - log.Println("[DEBUG] Awaiting confirmation on hosted service deletion.") - err = mgmtClient.WaitForOperation(reqID, nil) - if err != nil { - return fmt.Errorf("Error on hosted service deletion: %s", err) - } - - return nil -} diff --git a/builtin/providers/azure/resource_azure_hosted_service_test.go b/builtin/providers/azure/resource_azure_hosted_service_test.go deleted file mode 100644 index fb1a71934..000000000 --- a/builtin/providers/azure/resource_azure_hosted_service_test.go +++ /dev/null @@ -1,132 +0,0 @@ -package azure - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureHostedServiceBasic(t *testing.T) { - name := "azure_hosted_service.foo" - - hostedServiceName := fmt.Sprintf("terraform-testing-service%d", acctest.RandInt()) - config := fmt.Sprintf(testAccAzureHostedServiceBasic, hostedServiceName) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureHostedServiceDestroyed, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureHostedServiceExists(name), - resource.TestCheckResourceAttr(name, "name", hostedServiceName), - resource.TestCheckResourceAttr(name, "location", "North Europe"), - resource.TestCheckResourceAttr(name, "ephemeral_contents", "false"), - resource.TestCheckResourceAttr(name, "description", "very discriptive"), - resource.TestCheckResourceAttr(name, "label", "very identifiable"), - ), - }, - }, - }) -} - -func TestAccAzureHostedServiceUpdate(t *testing.T) { - name := "azure_hosted_service.foo" - - hostedServiceName := fmt.Sprintf("terraform-testing-service%d", acctest.RandInt()) - - basicConfig := fmt.Sprintf(testAccAzureHostedServiceBasic, hostedServiceName) - updateConfig := fmt.Sprintf(testAccAzureHostedServiceUpdate, hostedServiceName) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureHostedServiceDestroyed, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: basicConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureHostedServiceExists(name), - resource.TestCheckResourceAttr(name, "name", hostedServiceName), - resource.TestCheckResourceAttr(name, "location", "North Europe"), - resource.TestCheckResourceAttr(name, "ephemeral_contents", "false"), - resource.TestCheckResourceAttr(name, "description", "very discriptive"), - resource.TestCheckResourceAttr(name, "label", "very identifiable"), - ), - }, - - resource.TestStep{ - Config: updateConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureHostedServiceExists(name), - resource.TestCheckResourceAttr(name, "name", hostedServiceName), - resource.TestCheckResourceAttr(name, "location", "North Europe"), - resource.TestCheckResourceAttr(name, "ephemeral_contents", "true"), - resource.TestCheckResourceAttr(name, "description", "very discriptive"), - resource.TestCheckResourceAttr(name, "label", "very identifiable"), - ), - }, - }, - }) -} - -func testAccCheckAzureHostedServiceExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - resource, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Hosted Service resource not found.") - } - - if resource.Primary.ID == "" { - return fmt.Errorf("Resource's ID is not set.") - } - - hostedServiceClient := testAccProvider.Meta().(*Client).hostedServiceClient - _, err := hostedServiceClient.GetHostedService(resource.Primary.ID) - return err - } -} - -func testAccCheckAzureHostedServiceDestroyed(s *terraform.State) error { - hostedServiceClient := testAccProvider.Meta().(*Client).hostedServiceClient - - for _, resource := range s.RootModule().Resources { - if resource.Type != "azure_hosted_service" { - continue - } - - if resource.Primary.ID == "" { - return fmt.Errorf("No Azure Hosted Service Resource found.") - } - - _, err := hostedServiceClient.GetHostedService(resource.Primary.ID) - - return testAccResourceDestroyedErrorFilter("Hosted Service", err) - } - - return nil -} - -const testAccAzureHostedServiceBasic = ` -resource "azure_hosted_service" "foo" { - name = "%s" - location = "North Europe" - ephemeral_contents = false - description = "very discriptive" - label = "very identifiable" -} -` -const testAccAzureHostedServiceUpdate = ` -resource "azure_hosted_service" "foo" { - name = "%s" - location = "North Europe" - ephemeral_contents = true - description = "very discriptive" - label = "very identifiable" -} -` diff --git a/builtin/providers/azure/resource_azure_instance.go b/builtin/providers/azure/resource_azure_instance.go deleted file mode 100644 index d270514e2..000000000 --- a/builtin/providers/azure/resource_azure_instance.go +++ /dev/null @@ -1,847 +0,0 @@ -package azure - -import ( - "bytes" - "crypto/sha1" - "encoding/base64" - "encoding/hex" - "fmt" - "log" - "strings" - "time" - - "github.com/Azure/azure-sdk-for-go/management" - "github.com/Azure/azure-sdk-for-go/management/hostedservice" - "github.com/Azure/azure-sdk-for-go/management/osimage" - "github.com/Azure/azure-sdk-for-go/management/virtualmachine" - "github.com/Azure/azure-sdk-for-go/management/virtualmachineimage" - "github.com/Azure/azure-sdk-for-go/management/vmutils" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -const ( - linux = "Linux" - windows = "Windows" - storageContainterName = "vhds" - osDiskBlobNameFormat = "%s.vhd" - osDiskBlobStorageURL = "http://%s.blob.core.windows.net/" + storageContainterName + "/" + osDiskBlobNameFormat -) - -func resourceAzureInstance() *schema.Resource { - return &schema.Resource{ - Create: resourceAzureInstanceCreate, - Read: resourceAzureInstanceRead, - Update: resourceAzureInstanceUpdate, - Delete: resourceAzureInstanceDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "hosted_service_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - // in order to prevent an unintentional delete of a containing - // hosted service in the case the same name are given to both the - // service and the instance despite their being created separately, - // we must maintain a flag to definitively denote whether this - // instance had a hosted service created for it or not: - "has_dedicated_service": &schema.Schema{ - Type: schema.TypeBool, - Computed: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "image": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "size": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "subnet": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "virtual_network": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "storage_service_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "reverse_dns": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "location": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "automatic_updates": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - }, - - "time_zone": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "username": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "password": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "ssh_key_thumbprint": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "endpoint": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "protocol": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "tcp", - }, - - "public_port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "private_port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - }, - }, - Set: resourceAzureEndpointHash, - }, - - "security_group": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "ip_address": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "vip_address": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "domain_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "domain_username": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "domain_password": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "domain_ou": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "custom_data": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - StateFunc: func(v interface{}) string { - if s, ok := v.(string); ok && s != "" { - hash := sha1.Sum([]byte(s)) - return hex.EncodeToString(hash[:]) - } - return "" - }, - }, - }, - } -} - -func resourceAzureInstanceCreate(d *schema.ResourceData, meta interface{}) (err error) { - azureClient := meta.(*Client) - mc := azureClient.mgmtClient - hostedServiceClient := azureClient.hostedServiceClient - vmClient := azureClient.vmClient - - name := d.Get("name").(string) - - // Compute/set the description - description := d.Get("description").(string) - if description == "" { - description = name - } - - // Retrieve the needed details of the image - configureForImage, osType, err := retrieveImageDetails( - meta, - d.Get("image").(string), - name, - d.Get("storage_service_name").(string), - ) - if err != nil { - return err - } - - // Verify if we have all required parameters - if err := verifyInstanceParameters(d, osType); err != nil { - return err - } - - var hostedServiceName string - // check if hosted service name parameter was given: - if serviceName, ok := d.GetOk("hosted_service_name"); !ok { - // if not provided; just use the name of the instance to create a new one: - hostedServiceName = name - d.Set("hosted_service_name", hostedServiceName) - d.Set("has_dedicated_service", true) - - p := hostedservice.CreateHostedServiceParameters{ - ServiceName: hostedServiceName, - Label: base64.StdEncoding.EncodeToString([]byte(name)), - Description: fmt.Sprintf("Cloud Service created automatically for instance %s", name), - Location: d.Get("location").(string), - ReverseDNSFqdn: d.Get("reverse_dns").(string), - } - - log.Printf("[DEBUG] Creating Cloud Service for instance: %s", name) - err = hostedServiceClient.CreateHostedService(p) - if err != nil { - return fmt.Errorf("Error creating Cloud Service for instance %s: %s", name, err) - } - } else { - // else; use the provided hosted service name: - hostedServiceName = serviceName.(string) - } - - // Create a new role for the instance - role := vmutils.NewVMConfiguration(name, d.Get("size").(string)) - - log.Printf("[DEBUG] Configuring deployment from image...") - err = configureForImage(&role) - if err != nil { - return fmt.Errorf("Error configuring the deployment for %s: %s", name, err) - } - - var customData string - if data, ok := d.GetOk("custom_data"); ok { - data := data.(string) - - // Ensure the custom_data is not double-encoded. - if _, err := base64.StdEncoding.DecodeString(data); err != nil { - customData = base64.StdEncoding.EncodeToString([]byte(data)) - } else { - customData = data - } - } - - if osType == linux { - // This is pretty ugly, but the Azure SDK leaves me no other choice... - if tp, ok := d.GetOk("ssh_key_thumbprint"); ok { - err = vmutils.ConfigureForLinux( - &role, - name, - d.Get("username").(string), - d.Get("password").(string), - tp.(string), - ) - } else { - err = vmutils.ConfigureForLinux( - &role, - name, - d.Get("username").(string), - d.Get("password").(string), - ) - } - if err != nil { - return fmt.Errorf("Error configuring %s for Linux: %s", name, err) - } - - if customData != "" { - err = vmutils.ConfigureWithCustomDataForLinux(&role, customData) - if err != nil { - return fmt.Errorf("Error configuring custom data for %s: %s", name, err) - } - } - } - - if osType == windows { - err = vmutils.ConfigureForWindows( - &role, - name, - d.Get("username").(string), - d.Get("password").(string), - d.Get("automatic_updates").(bool), - d.Get("time_zone").(string), - ) - if err != nil { - return fmt.Errorf("Error configuring %s for Windows: %s", name, err) - } - - if domain_name, ok := d.GetOk("domain_name"); ok { - err = vmutils.ConfigureWindowsToJoinDomain( - &role, - d.Get("domain_username").(string), - d.Get("domain_password").(string), - domain_name.(string), - d.Get("domain_ou").(string), - ) - if err != nil { - return fmt.Errorf("Error configuring %s for WindowsToJoinDomain: %s", name, err) - } - } - - if customData != "" { - err = vmutils.ConfigureWithCustomDataForWindows(&role, customData) - if err != nil { - return fmt.Errorf("Error configuring custom data for %s: %s", name, err) - } - } - } - - if s := d.Get("endpoint").(*schema.Set); s.Len() > 0 { - for _, v := range s.List() { - m := v.(map[string]interface{}) - err := vmutils.ConfigureWithExternalPort( - &role, - m["name"].(string), - m["private_port"].(int), - m["public_port"].(int), - endpointProtocol(m["protocol"].(string)), - ) - if err != nil { - return fmt.Errorf( - "Error adding endpoint %s for instance %s: %s", m["name"].(string), name, err) - } - } - } - - if subnet, ok := d.GetOk("subnet"); ok { - err = vmutils.ConfigureWithSubnet(&role, subnet.(string)) - if err != nil { - return fmt.Errorf( - "Error associating subnet %s with instance %s: %s", d.Get("subnet").(string), name, err) - } - } - - if sg, ok := d.GetOk("security_group"); ok { - err = vmutils.ConfigureWithSecurityGroup(&role, sg.(string)) - if err != nil { - return fmt.Errorf( - "Error associating security group %s with instance %s: %s", sg.(string), name, err) - } - } - - options := virtualmachine.CreateDeploymentOptions{ - VirtualNetworkName: d.Get("virtual_network").(string), - } - - log.Printf("[DEBUG] Creating the new instance...") - req, err := vmClient.CreateDeployment(role, hostedServiceName, options) - if err != nil { - return fmt.Errorf("Error creating instance %s: %s", name, err) - } - - log.Printf("[DEBUG] Waiting for the new instance to be created...") - if err := mc.WaitForOperation(req, nil); err != nil { - return fmt.Errorf( - "Error waiting for instance %s to be created: %s", name, err) - } - - d.SetId(name) - - return resourceAzureInstanceRead(d, meta) -} - -func resourceAzureInstanceRead(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - hostedServiceClient := azureClient.hostedServiceClient - vmClient := azureClient.vmClient - - name := d.Get("name").(string) - - // check if the instance belongs to an independent hosted service - // or it had one created for it. - var hostedServiceName string - if serviceName, ok := d.GetOk("hosted_service_name"); ok { - // if independent; use that hosted service name: - hostedServiceName = serviceName.(string) - } else { - // else; suppose it's the instance's name: - hostedServiceName = name - } - - log.Printf("[DEBUG] Retrieving Cloud Service for instance: %s", name) - cs, err := hostedServiceClient.GetHostedService(hostedServiceName) - if err != nil { - return fmt.Errorf("Error retrieving Cloud Service of instance %s (%q): %s", name, hostedServiceName, err) - } - - d.Set("reverse_dns", cs.ReverseDNSFqdn) - d.Set("location", cs.Location) - - log.Printf("[DEBUG] Retrieving instance: %s", name) - dpmt, err := vmClient.GetDeployment(hostedServiceName, name) - if err != nil { - if management.IsResourceNotFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error retrieving instance %s: %s", name, err) - } - - if len(dpmt.RoleList) != 1 { - return fmt.Errorf( - "Instance %s has an unexpected number of roles: %d", name, len(dpmt.RoleList)) - } - - d.Set("size", dpmt.RoleList[0].RoleSize) - - if len(dpmt.RoleInstanceList) != 1 { - return fmt.Errorf( - "Instance %s has an unexpected number of role instances: %d", - name, len(dpmt.RoleInstanceList)) - } - d.Set("ip_address", dpmt.RoleInstanceList[0].IPAddress) - - if len(dpmt.RoleInstanceList[0].InstanceEndpoints) > 0 { - d.Set("vip_address", dpmt.RoleInstanceList[0].InstanceEndpoints[0].Vip) - } - - // Find the network configuration set - for _, c := range dpmt.RoleList[0].ConfigurationSets { - if c.ConfigurationSetType == virtualmachine.ConfigurationSetTypeNetwork { - // Create a new set to hold all configured endpoints - endpoints := &schema.Set{ - F: resourceAzureEndpointHash, - } - - // Loop through all endpoints - for _, ep := range c.InputEndpoints { - endpoint := map[string]interface{}{} - - // Update the values - endpoint["name"] = ep.Name - endpoint["protocol"] = string(ep.Protocol) - endpoint["public_port"] = ep.Port - endpoint["private_port"] = ep.LocalPort - endpoints.Add(endpoint) - } - d.Set("endpoint", endpoints) - - // Update the subnet - switch len(c.SubnetNames) { - case 1: - d.Set("subnet", c.SubnetNames[0]) - case 0: - d.Set("subnet", "") - default: - return fmt.Errorf( - "Instance %s has an unexpected number of associated subnets %d", - name, len(dpmt.RoleInstanceList)) - } - - // Update the security group - d.Set("security_group", c.NetworkSecurityGroup) - } - } - - connType := "ssh" - if dpmt.RoleList[0].OSVirtualHardDisk.OS == windows { - connType = "winrm" - } - - // Set the connection info for any configured provisioners - d.SetConnInfo(map[string]string{ - "type": connType, - "host": dpmt.VirtualIPs[0].Address, - "user": d.Get("username").(string), - "password": d.Get("password").(string), - }) - - return nil -} - -func resourceAzureInstanceUpdate(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - mc := azureClient.mgmtClient - vmClient := azureClient.vmClient - - // First check if anything we can update changed, and if not just return - if !d.HasChange("size") && !d.HasChange("endpoint") && !d.HasChange("security_group") { - return nil - } - - name := d.Get("name").(string) - hostedServiceName := d.Get("hosted_service_name").(string) - - // Get the current role - role, err := vmClient.GetRole(hostedServiceName, name, name) - if err != nil { - return fmt.Errorf("Error retrieving role of instance %s: %s", name, err) - } - - // Verify if we have all required parameters - if err := verifyInstanceParameters(d, role.OSVirtualHardDisk.OS); err != nil { - return err - } - - if d.HasChange("size") { - role.RoleSize = d.Get("size").(string) - } - - if d.HasChange("endpoint") { - _, n := d.GetChange("endpoint") - - // Delete the existing endpoints - for i, c := range role.ConfigurationSets { - if c.ConfigurationSetType == virtualmachine.ConfigurationSetTypeNetwork { - c.InputEndpoints = nil - role.ConfigurationSets[i] = c - } - } - - // And add the ones we still want - if s := n.(*schema.Set); s.Len() > 0 { - for _, v := range s.List() { - m := v.(map[string]interface{}) - err := vmutils.ConfigureWithExternalPort( - role, - m["name"].(string), - m["private_port"].(int), - m["public_port"].(int), - endpointProtocol(m["protocol"].(string)), - ) - if err != nil { - return fmt.Errorf( - "Error adding endpoint %s for instance %s: %s", m["name"].(string), name, err) - } - } - } - } - - if d.HasChange("security_group") { - sg := d.Get("security_group").(string) - err := vmutils.ConfigureWithSecurityGroup(role, sg) - if err != nil { - return fmt.Errorf( - "Error associating security group %s with instance %s: %s", sg, name, err) - } - } - - // Update the adjusted role - req, err := vmClient.UpdateRole(hostedServiceName, name, name, *role) - if err != nil { - return fmt.Errorf("Error updating role of instance %s: %s", name, err) - } - - if err := mc.WaitForOperation(req, nil); err != nil { - return fmt.Errorf( - "Error waiting for role of instance %s to be updated: %s", name, err) - } - - return resourceAzureInstanceRead(d, meta) -} - -func resourceAzureInstanceDelete(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - mc := azureClient.mgmtClient - vmClient := azureClient.vmClient - - name := d.Get("name").(string) - hostedServiceName := d.Get("hosted_service_name").(string) - - log.Printf("[DEBUG] Deleting instance: %s", name) - - // check if the instance had a hosted service created especially for it: - if d.Get("has_dedicated_service").(bool) { - // if so; we must delete the associated hosted service as well: - hostedServiceClient := azureClient.hostedServiceClient - req, err := hostedServiceClient.DeleteHostedService(name, true) - if err != nil { - return fmt.Errorf("Error deleting instance and hosted service %s: %s", name, err) - } - - // Wait until the hosted service and the instance it contains is deleted: - if err := mc.WaitForOperation(req, nil); err != nil { - return fmt.Errorf( - "Error waiting for instance %s to be deleted: %s", name, err) - } - } else { - // else; just delete the instance: - reqID, err := vmClient.DeleteDeployment(hostedServiceName, name) - if err != nil { - return fmt.Errorf("Error deleting instance %s off hosted service %s: %s", name, hostedServiceName, err) - } - - // and wait for the deletion: - if err := mc.WaitForOperation(reqID, nil); err != nil { - return fmt.Errorf("Error waiting for intance %s to be deleted off the hosted service %s: %s", - name, hostedServiceName, err) - } - } - - log.Printf("[INFO] Waiting for the deletion of instance '%s''s disk blob.", name) - - // in order to avoid `terraform taint`-like scenarios in which the instance - // is deleted and re-created so fast the previous storage blob which held - // the image doesn't manage to get deleted (despite it being in a - // 'deleting' state) and a lease conflict occurs over it, we must ensure - // the blob got completely deleted as well: - storName := d.Get("storage_service_name").(string) - blobClient, err := azureClient.getStorageServiceBlobClient(storName) - if err != nil { - return err - } - - err = resource.Retry(15*time.Minute, func() *resource.RetryError { - container := blobClient.GetContainerReference(storageContainterName) - blobName := fmt.Sprintf(osDiskBlobNameFormat, name) - blob := container.GetBlobReference(blobName) - exists, err := blob.Exists() - if err != nil { - return resource.NonRetryableError(err) - } - - if exists { - return resource.RetryableError( - fmt.Errorf("Instance '%s''s disk storage blob still exists.", name)) - } - - return nil - }) - - return err -} - -func resourceAzureEndpointHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["protocol"].(string))) - buf.WriteString(fmt.Sprintf("%d-", m["public_port"].(int))) - buf.WriteString(fmt.Sprintf("%d-", m["private_port"].(int))) - - return hashcode.String(buf.String()) -} - -func retrieveImageDetails( - meta interface{}, - label string, - name string, - storage string) (func(*virtualmachine.Role) error, string, error) { - - azureClient := meta.(*Client) - vmImageClient := azureClient.vmImageClient - osImageClient := azureClient.osImageClient - - configureForImage, osType, VMLabels, err := retrieveVMImageDetails(vmImageClient, label) - if err == nil { - return configureForImage, osType, nil - } - - configureForImage, osType, OSLabels, err := retrieveOSImageDetails(osImageClient, label, name, storage) - if err == nil { - return configureForImage, osType, nil - } - - if err == PlatformStorageError { - return nil, "", err - } - - return nil, "", fmt.Errorf("Could not find image with label '%s'. Available images are: %s", - label, strings.Join(append(VMLabels, OSLabels...), ", ")) -} - -func retrieveVMImageDetails( - vmImageClient virtualmachineimage.Client, - label string) (func(*virtualmachine.Role) error, string, []string, error) { - imgs, err := vmImageClient.ListVirtualMachineImages(virtualmachineimage.ListParameters{}) - if err != nil { - return nil, "", nil, fmt.Errorf("Error retrieving image details: %s", err) - } - - var labels []string - for _, img := range imgs.VMImages { - if img.Label == label { - if img.OSDiskConfiguration.OS != linux && img.OSDiskConfiguration.OS != windows { - return nil, "", nil, fmt.Errorf("Unsupported image OS: %s", img.OSDiskConfiguration.OS) - } - - configureForImage := func(role *virtualmachine.Role) error { - return vmutils.ConfigureDeploymentFromPublishedVMImage( - role, - img.Name, - "", - true, - ) - } - - return configureForImage, img.OSDiskConfiguration.OS, nil, nil - } - - labels = append(labels, img.Label) - } - - return nil, "", labels, fmt.Errorf("Could not find image with label '%s'", label) -} - -func retrieveOSImageDetails( - osImageClient osimage.OSImageClient, - label string, - name string, - storage string) (func(*virtualmachine.Role) error, string, []string, error) { - - imgs, err := osImageClient.ListOSImages() - if err != nil { - return nil, "", nil, fmt.Errorf("Error retrieving image details: %s", err) - } - - var labels []string - for _, img := range imgs.OSImages { - if img.Label == label { - if img.OS != linux && img.OS != windows { - return nil, "", nil, fmt.Errorf("Unsupported image OS: %s", img.OS) - } - if img.MediaLink == "" { - if storage == "" { - return nil, "", nil, PlatformStorageError - } - img.MediaLink = fmt.Sprintf(osDiskBlobStorageURL, storage, name) - } - - configureForImage := func(role *virtualmachine.Role) error { - return vmutils.ConfigureDeploymentFromPlatformImage( - role, - img.Name, - img.MediaLink, - label, - ) - } - - return configureForImage, img.OS, nil, nil - } - - labels = append(labels, img.Label) - } - - return nil, "", labels, fmt.Errorf("Could not find image with label '%s'", label) -} - -func endpointProtocol(p string) virtualmachine.InputEndpointProtocol { - if p == "tcp" { - return virtualmachine.InputEndpointProtocolTCP - } - - return virtualmachine.InputEndpointProtocolUDP -} - -func verifyInstanceParameters(d *schema.ResourceData, osType string) error { - if osType == linux { - _, pass := d.GetOk("password") - _, key := d.GetOk("ssh_key_thumbprint") - - if !pass && !key { - return fmt.Errorf( - "You must supply a 'password' and/or a 'ssh_key_thumbprint' when using a Linux image") - } - } - - if osType == windows { - if _, ok := d.GetOk("password"); !ok { - return fmt.Errorf("You must supply a 'password' when using a Windows image") - } - - if _, ok := d.GetOk("time_zone"); !ok { - return fmt.Errorf("You must supply a 'time_zone' when using a Windows image") - } - } - - if _, ok := d.GetOk("subnet"); ok { - if _, ok := d.GetOk("virtual_network"); !ok { - return fmt.Errorf("You must also supply a 'virtual_network' when supplying a 'subnet'") - } - } - - if s := d.Get("endpoint").(*schema.Set); s.Len() > 0 { - for _, v := range s.List() { - protocol := v.(map[string]interface{})["protocol"].(string) - - if protocol != "tcp" && protocol != "udp" { - return fmt.Errorf( - "Invalid endpoint protocol %s! Valid options are 'tcp' and 'udp'.", protocol) - } - } - } - - return nil -} diff --git a/builtin/providers/azure/resource_azure_instance_test.go b/builtin/providers/azure/resource_azure_instance_test.go deleted file mode 100644 index fbe58ad66..000000000 --- a/builtin/providers/azure/resource_azure_instance_test.go +++ /dev/null @@ -1,615 +0,0 @@ -package azure - -import ( - "fmt" - "math/rand" - "testing" - "time" - - "github.com/Azure/azure-sdk-for-go/management" - "github.com/Azure/azure-sdk-for-go/management/virtualmachine" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -var randInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int() -var instanceName = fmt.Sprintf("terraform-test-%d", randInt) - -func TestAccAzureInstance_basic(t *testing.T) { - var dpmt virtualmachine.DeploymentResponse - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureInstanceDestroyed(""), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureInstance_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureInstanceExists( - "azure_instance.foo", "", &dpmt), - testAccCheckAzureInstanceBasicAttributes(&dpmt), - resource.TestCheckResourceAttr( - "azure_instance.foo", "name", instanceName), - resource.TestCheckResourceAttr( - "azure_instance.foo", "hosted_service_name", instanceName), - resource.TestCheckResourceAttr( - "azure_instance.foo", "location", "West US"), - resource.TestCheckResourceAttr( - "azure_instance.foo", "endpoint.2462817782.public_port", "22"), - resource.TestCheckResourceAttr( - "azure_instance.foo", "custom_data", "0ea0f28b0c42d6bef7d0c7ab4886324feaa8b5e1"), - ), - }, - }, - }) -} - -func TestAccAzureInstance_separateHostedService(t *testing.T) { - var dpmt virtualmachine.DeploymentResponse - - hostedServiceName := fmt.Sprintf("terraform-testing-service%d", acctest.RandInt()) - - config := fmt.Sprintf(testAccAzureInstance_separateHostedService, hostedServiceName, instanceName, testAccStorageServiceName) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureInstanceDestroyed(hostedServiceName), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureInstanceExists( - "azure_instance.foo", hostedServiceName, &dpmt), - testAccCheckAzureInstanceBasicAttributes(&dpmt), - resource.TestCheckResourceAttr( - "azure_instance.foo", "name", instanceName), - resource.TestCheckResourceAttr( - "azure_instance.foo", "hosted_service_name", hostedServiceName), - resource.TestCheckResourceAttr( - "azure_instance.foo", "location", "West US"), - resource.TestCheckResourceAttr( - "azure_instance.foo", "endpoint.2462817782.public_port", "22"), - ), - }, - }, - }) -} - -func TestAccAzureInstance_advanced(t *testing.T) { - var dpmt virtualmachine.DeploymentResponse - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureInstanceDestroyed(""), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureInstance_advanced, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureInstanceExists( - "azure_instance.foo", "", &dpmt), - testAccCheckAzureInstanceAdvancedAttributes(&dpmt), - resource.TestCheckResourceAttr( - "azure_instance.foo", "name", "terraform-test1"), - resource.TestCheckResourceAttr( - "azure_instance.foo", "hosted_service_name", "terraform-test1"), - resource.TestCheckResourceAttr( - "azure_instance.foo", "size", "Basic_A1"), - resource.TestCheckResourceAttr( - "azure_instance.foo", "subnet", "subnet1"), - resource.TestCheckResourceAttr( - "azure_instance.foo", "virtual_network", "terraform-vnet-advanced-test"), - resource.TestCheckResourceAttr( - "azure_instance.foo", "security_group", "terraform-security-group1"), - resource.TestCheckResourceAttr( - "azure_instance.foo", "endpoint.1814039778.public_port", "3389"), - resource.TestCheckResourceAttr( - "azure_instance.foo", "custom_data", "04c589e0edaa5ffe185d1e5532e77d1b2ac4b948"), - ), - }, - }, - }) -} - -func TestAccAzureInstance_update(t *testing.T) { - var dpmt virtualmachine.DeploymentResponse - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureInstanceDestroyed(""), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureInstance_advanced, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureInstanceExists( - "azure_instance.foo", "", &dpmt), - testAccCheckAzureInstanceAdvancedAttributes(&dpmt), - resource.TestCheckResourceAttr( - "azure_instance.foo", "name", "terraform-test1"), - resource.TestCheckResourceAttr( - "azure_instance.foo", "hosted_service_name", "terraform-test1"), - resource.TestCheckResourceAttr( - "azure_instance.foo", "size", "Basic_A1"), - resource.TestCheckResourceAttr( - "azure_instance.foo", "subnet", "subnet1"), - resource.TestCheckResourceAttr( - "azure_instance.foo", "virtual_network", "terraform-vnet-advanced-test"), - resource.TestCheckResourceAttr( - "azure_instance.foo", "security_group", "terraform-security-group1"), - resource.TestCheckResourceAttr( - "azure_instance.foo", "endpoint.1814039778.public_port", "3389"), - ), - }, - - resource.TestStep{ - Config: testAccAzureInstance_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureInstanceExists( - "azure_instance.foo", "", &dpmt), - testAccCheckAzureInstanceUpdatedAttributes(&dpmt), - resource.TestCheckResourceAttr( - "azure_instance.foo", "size", "Basic_A2"), - resource.TestCheckResourceAttr( - "azure_instance.foo", "security_group", "terraform-security-update-group2"), - resource.TestCheckResourceAttr( - "azure_instance.foo", "endpoint.1814039778.public_port", "3389"), - resource.TestCheckResourceAttr( - "azure_instance.foo", "endpoint.3713350066.public_port", "5985"), - ), - }, - }, - }) -} - -func testAccCheckAzureInstanceExists( - n string, - hostedServiceName string, - dpmt *virtualmachine.DeploymentResponse) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No instance ID is set") - } - - // if not hosted service was provided; it means that we expect it - // to be identical with the name of the instance; which is in the ID. - var serviceName string - if hostedServiceName == "" { - serviceName = rs.Primary.ID - } else { - serviceName = hostedServiceName - } - - vmClient := testAccProvider.Meta().(*Client).vmClient - vm, err := vmClient.GetDeployment(serviceName, rs.Primary.ID) - if err != nil { - return err - } - - if vm.Name != rs.Primary.ID { - return fmt.Errorf("Instance not found") - } - - *dpmt = vm - - return nil - } -} - -func testAccCheckAzureInstanceBasicAttributes( - dpmt *virtualmachine.DeploymentResponse) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if dpmt.Name != instanceName { - return fmt.Errorf("Bad name: %s", dpmt.Name) - } - - if len(dpmt.RoleList) != 1 { - return fmt.Errorf( - "Instance %s has an unexpected number of roles: %d", dpmt.Name, len(dpmt.RoleList)) - } - - if dpmt.RoleList[0].RoleSize != "Basic_A1" { - return fmt.Errorf("Bad size: %s", dpmt.RoleList[0].RoleSize) - } - - return nil - } -} - -func testAccCheckAzureInstanceAdvancedAttributes( - dpmt *virtualmachine.DeploymentResponse) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if dpmt.Name != "terraform-test1" { - return fmt.Errorf("Bad name: %s", dpmt.Name) - } - - if dpmt.VirtualNetworkName != "terraform-vnet-advanced-test" { - return fmt.Errorf("Bad virtual network: %s", dpmt.VirtualNetworkName) - } - - if len(dpmt.RoleList) != 1 { - return fmt.Errorf( - "Instance %s has an unexpected number of roles: %d", dpmt.Name, len(dpmt.RoleList)) - } - - if dpmt.RoleList[0].RoleSize != "Basic_A1" { - return fmt.Errorf("Bad size: %s", dpmt.RoleList[0].RoleSize) - } - - for _, c := range dpmt.RoleList[0].ConfigurationSets { - if c.ConfigurationSetType == virtualmachine.ConfigurationSetTypeNetwork { - if len(c.InputEndpoints) != 1 { - return fmt.Errorf( - "Instance %s has an unexpected number of endpoints %d", - dpmt.Name, len(c.InputEndpoints)) - } - - if c.InputEndpoints[0].Name != "RDP" { - return fmt.Errorf("Bad endpoint name: %s", c.InputEndpoints[0].Name) - } - - if c.InputEndpoints[0].Port != 3389 { - return fmt.Errorf("Bad endpoint port: %d", c.InputEndpoints[0].Port) - } - - if len(c.SubnetNames) != 1 { - return fmt.Errorf( - "Instance %s has an unexpected number of associated subnets %d", - dpmt.Name, len(c.SubnetNames)) - } - - if c.SubnetNames[0] != "subnet1" { - return fmt.Errorf("Bad subnet: %s", c.SubnetNames[0]) - } - - if c.NetworkSecurityGroup != "terraform-security-group1" { - return fmt.Errorf("Bad security group: %s", c.NetworkSecurityGroup) - } - } - } - - return nil - } -} - -func testAccCheckAzureInstanceAdvancedUpdatedAttributes( - dpmt *virtualmachine.DeploymentResponse) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if dpmt.Name != "terraform-test1" { - return fmt.Errorf("Bad name: %s", dpmt.Name) - } - - if dpmt.VirtualNetworkName != "terraform-vnet-update-test" { - return fmt.Errorf("Bad virtual network: %s", dpmt.VirtualNetworkName) - } - - if len(dpmt.RoleList) != 1 { - return fmt.Errorf( - "Instance %s has an unexpected number of roles: %d", dpmt.Name, len(dpmt.RoleList)) - } - - if dpmt.RoleList[0].RoleSize != "Basic_A1" { - return fmt.Errorf("Bad size: %s", dpmt.RoleList[0].RoleSize) - } - - for _, c := range dpmt.RoleList[0].ConfigurationSets { - if c.ConfigurationSetType == virtualmachine.ConfigurationSetTypeNetwork { - if len(c.InputEndpoints) != 1 { - return fmt.Errorf( - "Instance %s has an unexpected number of endpoints %d", - dpmt.Name, len(c.InputEndpoints)) - } - - if c.InputEndpoints[0].Name != "RDP" { - return fmt.Errorf("Bad endpoint name: %s", c.InputEndpoints[0].Name) - } - - if c.InputEndpoints[0].Port != 3389 { - return fmt.Errorf("Bad endpoint port: %d", c.InputEndpoints[0].Port) - } - - if len(c.SubnetNames) != 1 { - return fmt.Errorf( - "Instance %s has an unexpected number of associated subnets %d", - dpmt.Name, len(c.SubnetNames)) - } - - if c.SubnetNames[0] != "subnet1" { - return fmt.Errorf("Bad subnet: %s", c.SubnetNames[0]) - } - - if c.NetworkSecurityGroup != "terraform-security-group1" { - return fmt.Errorf("Bad security group: %s", c.NetworkSecurityGroup) - } - } - } - - return nil - } -} - -func testAccCheckAzureInstanceUpdatedAttributes( - dpmt *virtualmachine.DeploymentResponse) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if dpmt.Name != "terraform-test1" { - return fmt.Errorf("Bad name: %s", dpmt.Name) - } - - if dpmt.VirtualNetworkName != "terraform-vnet-update-test" { - return fmt.Errorf("Bad virtual network: %s", dpmt.VirtualNetworkName) - } - - if len(dpmt.RoleList) != 1 { - return fmt.Errorf( - "Instance %s has an unexpected number of roles: %d", dpmt.Name, len(dpmt.RoleList)) - } - - if dpmt.RoleList[0].RoleSize != "Basic_A2" { - return fmt.Errorf("Bad size: %s", dpmt.RoleList[0].RoleSize) - } - - for _, c := range dpmt.RoleList[0].ConfigurationSets { - if c.ConfigurationSetType == virtualmachine.ConfigurationSetTypeNetwork { - if len(c.InputEndpoints) != 2 { - return fmt.Errorf( - "Instance %s has an unexpected number of endpoints %d", - dpmt.Name, len(c.InputEndpoints)) - } - - if c.InputEndpoints[1].Name != "WINRM" { - return fmt.Errorf("Bad endpoint name: %s", c.InputEndpoints[1].Name) - } - - if c.InputEndpoints[1].Port != 5985 { - return fmt.Errorf("Bad endpoint port: %d", c.InputEndpoints[1].Port) - } - - if len(c.SubnetNames) != 1 { - return fmt.Errorf( - "Instance %s has an unexpected number of associated subnets %d", - dpmt.Name, len(c.SubnetNames)) - } - - if c.SubnetNames[0] != "subnet1" { - return fmt.Errorf("Bad subnet: %s", c.SubnetNames[0]) - } - - if c.NetworkSecurityGroup != "terraform-security-update-group2" { - return fmt.Errorf("Bad security group: %s", c.NetworkSecurityGroup) - } - } - } - - return nil - } -} - -func testAccCheckAzureInstanceDestroyed(hostedServiceName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - hostedServiceClient := testAccProvider.Meta().(*Client).hostedServiceClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azure_instance" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No instance ID is set") - } - - // if not hosted service was provided; it means that we expect it - // to be identical with the name of the instance; which is in the ID. - var serviceName string - if hostedServiceName == "" { - serviceName = rs.Primary.ID - } else { - serviceName = hostedServiceName - } - - _, err := hostedServiceClient.GetHostedService(serviceName) - if err == nil { - return fmt.Errorf("Instance %s still exists", rs.Primary.ID) - } - - if !management.IsResourceNotFoundError(err) { - return err - } - } - - return nil - } -} - -var testAccAzureInstance_basic = fmt.Sprintf(` -resource "azure_instance" "foo" { - name = "%s" - image = "Ubuntu Server 14.04 LTS" - size = "Basic_A1" - storage_service_name = "%s" - location = "West US" - username = "terraform" - password = "Pass!admin123" - custom_data = "# Hello world" - - endpoint { - name = "SSH" - protocol = "tcp" - public_port = 22 - private_port = 22 - } -}`, instanceName, testAccStorageServiceName) - -var testAccAzureInstance_separateHostedService = ` -resource "azure_hosted_service" "foo" { - name = "%s" - location = "West US" - ephemeral_contents = true -} - -resource "azure_instance" "foo" { - name = "%s" - hosted_service_name = "${azure_hosted_service.foo.name}" - image = "Ubuntu Server 14.04 LTS" - size = "Basic_A1" - storage_service_name = "%s" - location = "West US" - username = "terraform" - password = "Pass!admin123" - - endpoint { - name = "SSH" - protocol = "tcp" - public_port = 22 - private_port = 22 - } -}` - -var testAccAzureInstance_advanced = fmt.Sprintf(` -resource "azure_virtual_network" "foo" { - name = "terraform-vnet-advanced-test" - address_space = ["10.1.2.0/24"] - location = "West US" - - subnet { - name = "subnet1" - address_prefix = "10.1.2.0/25" - } - - subnet { - name = "subnet2" - address_prefix = "10.1.2.128/25" - } -} - -resource "azure_security_group" "foo" { - name = "terraform-security-group1" - location = "West US" -} - -resource "azure_security_group_rule" "foo" { - name = "rdp" - security_group_names = ["${azure_security_group.foo.name}"] - priority = 101 - source_address_prefix = "*" - source_port_range = "*" - destination_address_prefix = "*" - destination_port_range = "3389" - action = "Deny" - type = "Inbound" - protocol = "TCP" -} - -resource "azure_instance" "foo" { - name = "terraform-test1" - image = "Windows Server 2012 R2 Datacenter, April 2017" - size = "Basic_A1" - storage_service_name = "%s" - location = "West US" - time_zone = "America/Los_Angeles" - subnet = "subnet1" - virtual_network = "${azure_virtual_network.foo.name}" - security_group = "${azure_security_group.foo.name}" - username = "terraform" - password = "Pass!admin123" - custom_data = "IyBIZWxsbyB3b3JsZA==" - - endpoint { - name = "RDP" - protocol = "tcp" - public_port = 3389 - private_port = 3389 - } -}`, testAccStorageServiceName) - -var testAccAzureInstance_update = fmt.Sprintf(` -resource "azure_virtual_network" "foo" { - name = "terraform-vnet-update-test" - address_space = ["10.1.2.0/24"] - location = "West US" - - subnet { - name = "subnet1" - address_prefix = "10.1.2.0/25" - } - - subnet { - name = "subnet2" - address_prefix = "10.1.2.128/25" - } -} - -resource "azure_security_group" "foo" { - name = "terraform-security-group1" - location = "West US" -} - -resource "azure_security_group_rule" "foo" { - name = "rdp" - security_group_names = ["${azure_security_group.foo.name}"] - priority = 101 - source_address_prefix = "*" - source_port_range = "*" - destination_address_prefix = "*" - destination_port_range = "3389" - type = "Inbound" - action = "Deny" - protocol = "TCP" -} - -resource "azure_security_group" "bar" { - name = "terraform-security-update-group2" - location = "West US" -} - -resource "azure_security_group_rule" "bar" { - name = "rdp" - security_group_names = ["${azure_security_group.bar.name}"] - priority = 101 - source_address_prefix = "192.168.0.0/24" - source_port_range = "*" - destination_address_prefix = "*" - destination_port_range = "3389" - type = "Inbound" - action = "Deny" - protocol = "TCP" -} - -resource "azure_instance" "foo" { - name = "terraform-test1" - image = "Windows Server 2012 R2 Datacenter, April 2017" - size = "Basic_A2" - storage_service_name = "%s" - location = "West US" - time_zone = "America/Los_Angeles" - subnet = "subnet1" - virtual_network = "${azure_virtual_network.foo.name}" - security_group = "${azure_security_group.bar.name}" - username = "terraform" - password = "Pass!admin123" - - endpoint { - name = "RDP" - protocol = "tcp" - public_port = 3389 - private_port = 3389 - } - - endpoint { - name = "WINRM" - protocol = "tcp" - public_port = 5985 - private_port = 5985 - } -}`, testAccStorageServiceName) diff --git a/builtin/providers/azure/resource_azure_local_network.go b/builtin/providers/azure/resource_azure_local_network.go deleted file mode 100644 index 82b451739..000000000 --- a/builtin/providers/azure/resource_azure_local_network.go +++ /dev/null @@ -1,253 +0,0 @@ -package azure - -import ( - "fmt" - "log" - - "github.com/Azure/azure-sdk-for-go/management" - "github.com/Azure/azure-sdk-for-go/management/virtualnetwork" - "github.com/hashicorp/terraform/helper/schema" -) - -// resourceAzureLocalNetworkConnetion returns the schema.Resource associated to an -// Azure hosted service. -func resourceAzureLocalNetworkConnection() *schema.Resource { - return &schema.Resource{ - Create: resourceAzureLocalNetworkConnectionCreate, - Read: resourceAzureLocalNetworkConnectionRead, - Update: resourceAzureLocalNetworkConnectionUpdate, - Exists: resourceAzureLocalNetworkConnectionExists, - Delete: resourceAzureLocalNetworkConnectionDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: parameterDescriptions["name"], - }, - "vpn_gateway_address": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: parameterDescriptions["vpn_gateway_address"], - }, - "address_space_prefixes": &schema.Schema{ - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: parameterDescriptions["address_space_prefixes"], - }, - }, - } -} - -// sourceAzureLocalNetworkConnectionCreate issues all the necessary API calls -// to create a virtual network on Azure. -func resourceAzureLocalNetworkConnectionCreate(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - mgmtClient := azureClient.mgmtClient - vnetClient := azureClient.vnetClient - - log.Println("[INFO] Fetching current network configuration from Azure.") - azureClient.vnetMutex.Lock() - defer azureClient.vnetMutex.Unlock() - netConf, err := vnetClient.GetVirtualNetworkConfiguration() - if err != nil { - if management.IsResourceNotFoundError(err) { - // if no network config exists yet; create a new one now: - netConf = virtualnetwork.NetworkConfiguration{} - } else { - return fmt.Errorf("Failed to get the current network configuration from Azure: %s", err) - } - } - - // get provided configuration: - name := d.Get("name").(string) - vpnGateway := d.Get("vpn_gateway_address").(string) - var prefixes []string - for _, prefix := range d.Get("address_space_prefixes").([]interface{}) { - prefixes = append(prefixes, prefix.(string)) - } - - // add configuration to network config: - netConf.Configuration.LocalNetworkSites = append(netConf.Configuration.LocalNetworkSites, - virtualnetwork.LocalNetworkSite{ - Name: name, - VPNGatewayAddress: vpnGateway, - AddressSpace: virtualnetwork.AddressSpace{ - AddressPrefix: prefixes, - }, - }) - - // send the configuration back to Azure: - log.Println("[INFO] Sending updated network configuration back to Azure.") - reqID, err := vnetClient.SetVirtualNetworkConfiguration(netConf) - if err != nil { - return fmt.Errorf("Failed setting updated network configuration: %s", err) - } - err = mgmtClient.WaitForOperation(reqID, nil) - if err != nil { - return fmt.Errorf("Failed updating the network configuration: %s", err) - } - - d.SetId(name) - return nil -} - -// resourceAzureLocalNetworkConnectionRead does all the necessary API calls to -// read the state of our local natwork from Azure. -func resourceAzureLocalNetworkConnectionRead(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - vnetClient := azureClient.vnetClient - - log.Println("[INFO] Fetching current network configuration from Azure.") - netConf, err := vnetClient.GetVirtualNetworkConfiguration() - if err != nil { - return fmt.Errorf("Failed to get the current network configuration from Azure: %s", err) - } - - var found bool - name := d.Get("name").(string) - - // browsing for our network config: - for _, lnet := range netConf.Configuration.LocalNetworkSites { - if lnet.Name == name { - found = true - d.Set("vpn_gateway_address", lnet.VPNGatewayAddress) - d.Set("address_space_prefixes", lnet.AddressSpace.AddressPrefix) - break - } - } - - // remove the resource from the state of it has been deleted in the meantime: - if !found { - log.Println(fmt.Printf("[INFO] Azure local network '%s' has been deleted remotely. Removimg from Terraform.", name)) - d.SetId("") - } - - return nil -} - -// resourceAzureLocalNetworkConnectionUpdate does all the necessary API calls -// update the settings of our Local Network on Azure. -func resourceAzureLocalNetworkConnectionUpdate(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - mgmtClient := azureClient.mgmtClient - vnetClient := azureClient.vnetClient - - log.Println("[INFO] Fetching current network configuration from Azure.") - azureClient.vnetMutex.Lock() - defer azureClient.vnetMutex.Unlock() - netConf, err := vnetClient.GetVirtualNetworkConfiguration() - if err != nil { - return fmt.Errorf("Failed to get the current network configuration from Azure: %s", err) - } - - name := d.Get("name").(string) - cvpn := d.HasChange("vpn_gateway_address") - cprefixes := d.HasChange("address_space_prefixes") - - var found bool - for i, lnet := range netConf.Configuration.LocalNetworkSites { - if lnet.Name == name { - found = true - if cvpn { - netConf.Configuration.LocalNetworkSites[i].VPNGatewayAddress = d.Get("vpn_gateway_address").(string) - } - if cprefixes { - var prefixes []string - for _, prefix := range d.Get("address_space_prefixes").([]interface{}) { - prefixes = append(prefixes, prefix.(string)) - } - netConf.Configuration.LocalNetworkSites[i].AddressSpace.AddressPrefix = prefixes - } - break - } - } - - // remove the resource from the state of it has been deleted in the meantime: - if !found { - log.Println(fmt.Printf("[INFO] Azure local network '%s' has been deleted remotely. Removimg from Terraform.", name)) - d.SetId("") - } else if cvpn || cprefixes { - // else, send the configuration back to Azure: - log.Println("[INFO] Sending updated network configuration back to Azure.") - reqID, err := vnetClient.SetVirtualNetworkConfiguration(netConf) - if err != nil { - return fmt.Errorf("Failed setting updated network configuration: %s", err) - } - err = mgmtClient.WaitForOperation(reqID, nil) - if err != nil { - return fmt.Errorf("Failed updating the network configuration: %s", err) - } - } - - return nil -} - -// resourceAzureLocalNetworkConnectionExists does all the necessary API calls -// to check if the local network already exists on Azure. -func resourceAzureLocalNetworkConnectionExists(d *schema.ResourceData, meta interface{}) (bool, error) { - vnetClient := meta.(*Client).vnetClient - - log.Println("[INFO] Fetching current network configuration from Azure.") - netConf, err := vnetClient.GetVirtualNetworkConfiguration() - if err != nil { - return false, fmt.Errorf("Failed to get the current network configuration from Azure: %s", err) - } - - name := d.Get("name") - - for _, lnet := range netConf.Configuration.LocalNetworkSites { - if lnet.Name == name { - return true, nil - } - } - - return false, nil -} - -// resourceAzureLocalNetworkConnectionDelete does all the necessary API calls -// to delete a local network off Azure. -func resourceAzureLocalNetworkConnectionDelete(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - mgmtClient := azureClient.mgmtClient - vnetClient := azureClient.vnetClient - - log.Println("[INFO] Fetching current network configuration from Azure.") - azureClient.vnetMutex.Lock() - defer azureClient.vnetMutex.Unlock() - netConf, err := vnetClient.GetVirtualNetworkConfiguration() - if err != nil { - return fmt.Errorf("Failed to get the current network configuration from Azure: %s", err) - } - - name := d.Get("name").(string) - - // search for our local network and remove it if found: - for i, lnet := range netConf.Configuration.LocalNetworkSites { - if lnet.Name == name { - netConf.Configuration.LocalNetworkSites = append( - netConf.Configuration.LocalNetworkSites[:i], - netConf.Configuration.LocalNetworkSites[i+1:]..., - ) - break - } - } - - // send the configuration back to Azure: - log.Println("[INFO] Sending updated network configuration back to Azure.") - reqID, err := vnetClient.SetVirtualNetworkConfiguration(netConf) - if err != nil { - return fmt.Errorf("Failed setting updated network configuration: %s", err) - } - err = mgmtClient.WaitForOperation(reqID, nil) - if err != nil { - return fmt.Errorf("Failed updating the network configuration: %s", err) - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/azure/resource_azure_local_network_test.go b/builtin/providers/azure/resource_azure_local_network_test.go deleted file mode 100644 index 18e09de34..000000000 --- a/builtin/providers/azure/resource_azure_local_network_test.go +++ /dev/null @@ -1,144 +0,0 @@ -package azure - -import ( - "fmt" - "testing" - - "github.com/Azure/azure-sdk-for-go/management" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureLocalNetworkConnectionBasic(t *testing.T) { - name := "azure_local_network_connection.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccAzureLocalNetworkConnectionDestroyed, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureLocalNetworkConnectionBasic, - Check: resource.ComposeTestCheckFunc( - testAccAzureLocalNetworkConnectionExists(name), - resource.TestCheckResourceAttr(name, "name", "terraform-local-network-connection"), - resource.TestCheckResourceAttr(name, "vpn_gateway_address", "10.11.12.13"), - resource.TestCheckResourceAttr(name, "address_space_prefixes.0", "10.10.10.0/31"), - resource.TestCheckResourceAttr(name, "address_space_prefixes.1", "10.10.10.1/31"), - ), - }, - }, - }) -} - -func TestAccAzureLocalNetworkConnectionUpdate(t *testing.T) { - name := "azure_local_network_connection.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccAzureLocalNetworkConnectionDestroyed, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureLocalNetworkConnectionBasic, - Check: resource.ComposeTestCheckFunc( - testAccAzureLocalNetworkConnectionExists(name), - resource.TestCheckResourceAttr(name, "name", "terraform-local-network-connection"), - resource.TestCheckResourceAttr(name, "vpn_gateway_address", "10.11.12.13"), - resource.TestCheckResourceAttr(name, "address_space_prefixes.0", "10.10.10.0/31"), - resource.TestCheckResourceAttr(name, "address_space_prefixes.1", "10.10.10.1/31"), - ), - }, - - resource.TestStep{ - Config: testAccAzureLocalNetworkConnectionUpdate, - Check: resource.ComposeTestCheckFunc( - testAccAzureLocalNetworkConnectionExists(name), - resource.TestCheckResourceAttr(name, "name", "terraform-local-network-connection"), - resource.TestCheckResourceAttr(name, "vpn_gateway_address", "10.11.12.14"), - resource.TestCheckResourceAttr(name, "address_space_prefixes.0", "10.10.10.2/30"), - resource.TestCheckResourceAttr(name, "address_space_prefixes.1", "10.10.10.3/30"), - ), - }, - }, - }) -} - -// testAccAzureLocalNetworkConnectionExists checks whether the given local network -// connection exists on Azure. -func testAccAzureLocalNetworkConnectionExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - resource, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Azure Local Network Connection not found: %s", name) - } - - if resource.Primary.ID == "" { - return fmt.Errorf("Azure Local Network Connection ID not set.") - } - - vnetClient := testAccProvider.Meta().(*Client).vnetClient - netConf, err := vnetClient.GetVirtualNetworkConfiguration() - if err != nil { - return err - } - - for _, lnet := range netConf.Configuration.LocalNetworkSites { - if lnet.Name == resource.Primary.ID { - return nil - } - break - } - - return fmt.Errorf("Local Network Connection not found: %s", name) - } -} - -// testAccAzureLocalNetworkConnectionDestroyed checks whether the local network -// connection has been destroyed on Azure or not. -func testAccAzureLocalNetworkConnectionDestroyed(s *terraform.State) error { - vnetClient := testAccProvider.Meta().(*Client).vnetClient - - for _, resource := range s.RootModule().Resources { - if resource.Type != "azure_local_network_connection" { - continue - } - - if resource.Primary.ID == "" { - return fmt.Errorf("Azure Local Network Connection ID not set.") - } - - netConf, err := vnetClient.GetVirtualNetworkConfiguration() - if err != nil { - // This is desirable - if there is no network config there can be no gateways - if management.IsResourceNotFoundError(err) { - continue - } - return err - } - - for _, lnet := range netConf.Configuration.LocalNetworkSites { - if lnet.Name == resource.Primary.ID { - return fmt.Errorf("Azure Local Network Connection still exists.") - } - } - } - - return nil -} - -const testAccAzureLocalNetworkConnectionBasic = ` -resource "azure_local_network_connection" "foo" { - name = "terraform-local-network-connection" - vpn_gateway_address = "10.11.12.13" - address_space_prefixes = ["10.10.10.0/31", "10.10.10.1/31"] -} -` - -const testAccAzureLocalNetworkConnectionUpdate = ` -resource "azure_local_network_connection" "foo" { - name = "terraform-local-network-connection" - vpn_gateway_address = "10.11.12.14" - address_space_prefixes = ["10.10.10.2/30", "10.10.10.3/30"] -} -` diff --git a/builtin/providers/azure/resource_azure_security_group.go b/builtin/providers/azure/resource_azure_security_group.go deleted file mode 100644 index 1d066d031..000000000 --- a/builtin/providers/azure/resource_azure_security_group.go +++ /dev/null @@ -1,110 +0,0 @@ -package azure - -import ( - "fmt" - "log" - - "github.com/Azure/azure-sdk-for-go/management" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceAzureSecurityGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceAzureSecurityGroupCreate, - Read: resourceAzureSecurityGroupRead, - Delete: resourceAzureSecurityGroupDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "label": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "location": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceAzureSecurityGroupCreate(d *schema.ResourceData, meta interface{}) (err error) { - azureClient := meta.(*Client) - mc := azureClient.mgmtClient - secGroupClient := azureClient.secGroupClient - - name := d.Get("name").(string) - - // Compute/set the label - label := d.Get("label").(string) - if label == "" { - label = name - } - - req, err := secGroupClient.CreateNetworkSecurityGroup( - name, - label, - d.Get("location").(string), - ) - if err != nil { - return fmt.Errorf("Error creating Network Security Group %s: %s", name, err) - } - - if err := mc.WaitForOperation(req, nil); err != nil { - return fmt.Errorf( - "Error waiting for Network Security Group %s to be created: %s", name, err) - } - - d.SetId(name) - - return resourceAzureSecurityGroupRead(d, meta) -} - -func resourceAzureSecurityGroupRead(d *schema.ResourceData, meta interface{}) error { - secGroupClient := meta.(*Client).secGroupClient - - sg, err := secGroupClient.GetNetworkSecurityGroup(d.Id()) - if err != nil { - if management.IsResourceNotFoundError(err) { - d.SetId("") - return nil - } - return fmt.Errorf("Error retrieving Network Security Group %s: %s", d.Id(), err) - } - - d.Set("label", sg.Label) - d.Set("location", sg.Location) - - return nil -} - -func resourceAzureSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - mc := azureClient.mgmtClient - secGroupClient := azureClient.secGroupClient - - log.Printf("[DEBUG] Deleting Network Security Group: %s", d.Id()) - req, err := secGroupClient.DeleteNetworkSecurityGroup(d.Id()) - if err != nil { - return fmt.Errorf("Error deleting Network Security Group %s: %s", d.Id(), err) - } - - // Wait until the network security group is deleted - if err := mc.WaitForOperation(req, nil); err != nil { - return fmt.Errorf( - "Error waiting for Network Security Group %s to be deleted: %s", d.Id(), err) - } - - d.SetId("") - - return nil -} diff --git a/builtin/providers/azure/resource_azure_security_group_rule.go b/builtin/providers/azure/resource_azure_security_group_rule.go deleted file mode 100644 index d404d020b..000000000 --- a/builtin/providers/azure/resource_azure_security_group_rule.go +++ /dev/null @@ -1,310 +0,0 @@ -package azure - -import ( - "fmt" - "log" - - "github.com/Azure/azure-sdk-for-go/management" - netsecgroup "github.com/Azure/azure-sdk-for-go/management/networksecuritygroup" - "github.com/hashicorp/terraform/helper/schema" -) - -// resourceAzureSecurityGroupRule returns the *schema.Resource for -// a network security group rule on Azure. -func resourceAzureSecurityGroupRule() *schema.Resource { - return &schema.Resource{ - Create: resourceAzureSecurityGroupRuleCreate, - Read: resourceAzureSecurityGroupRuleRead, - Update: resourceAzureSecurityGroupRuleUpdate, - Delete: resourceAzureSecurityGroupRuleDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: parameterDescriptions["name"], - }, - "security_group_names": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - ForceNew: true, - Description: parameterDescriptions["netsecgroup_secgroup_names"], - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - }, - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: parameterDescriptions["netsecgroup_type"], - }, - "priority": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - Description: parameterDescriptions["netsecgroup_priority"], - }, - "action": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: parameterDescriptions["netsecgroup_action"], - }, - "source_address_prefix": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: parameterDescriptions["netsecgroup_src_addr_prefix"], - }, - "source_port_range": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: parameterDescriptions["netsecgroup_src_port_range"], - }, - "destination_address_prefix": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: parameterDescriptions["netsecgroup_dest_addr_prefix"], - }, - "destination_port_range": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: parameterDescriptions["netsecgroup_dest_port_range"], - }, - "protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: parameterDescriptions["netsecgroup_protocol"], - }, - }, - } -} - -// resourceAzureSecurityGroupRuleCreate does all the necessary API calls to -// create a new network security group rule on Azure. -func resourceAzureSecurityGroupRuleCreate(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - mgmtClient := azureClient.mgmtClient - secGroupClient := azureClient.secGroupClient - - azureClient.secGroupMutex.Lock() - defer azureClient.secGroupMutex.Unlock() - - // create and configure the RuleResponse: - name := d.Get("name").(string) - rule := netsecgroup.RuleRequest{ - Name: name, - Type: netsecgroup.RuleType(d.Get("type").(string)), - Priority: d.Get("priority").(int), - Action: netsecgroup.RuleAction(d.Get("action").(string)), - SourceAddressPrefix: d.Get("source_address_prefix").(string), - SourcePortRange: d.Get("source_port_range").(string), - DestinationAddressPrefix: d.Get("destination_address_prefix").(string), - DestinationPortRange: d.Get("destination_port_range").(string), - Protocol: netsecgroup.RuleProtocol(d.Get("protocol").(string)), - } - - // apply the rule to all the necessary network security groups: - secGroups := d.Get("security_group_names").(*schema.Set).List() - for _, sg := range secGroups { - secGroup := sg.(string) - - // send the create request to Azure: - log.Printf("[INFO] Sending Azure security group rule addition request for security group %q.", secGroup) - reqID, err := secGroupClient.SetNetworkSecurityGroupRule( - secGroup, - rule, - ) - if err != nil { - return fmt.Errorf("Error sending Azure network security group rule creation request for security group %q: %s", secGroup, err) - } - err = mgmtClient.WaitForOperation(reqID, nil) - if err != nil { - return fmt.Errorf("Error creating Azure network security group rule for security group %q: %s", secGroup, err) - } - } - - d.SetId(name) - return nil -} - -// resourceAzureSecurityGroupRuleRead does all the necessary API calls to -// read the state of a network security group ruke off Azure. -func resourceAzureSecurityGroupRuleRead(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - secGroupClient := azureClient.secGroupClient - - var found bool - name := d.Get("name").(string) - - secGroups := d.Get("security_group_names").(*schema.Set).List() - remaining := schema.NewSet(schema.HashString, nil) - - // for each of our security groups; check for our rule: - for _, sg := range secGroups { - secGroupName := sg.(string) - - // get info on the network security group and check its rules for this one: - log.Printf("[INFO] Sending Azure network security group rule query for security group %s.", secGroupName) - secgroup, err := secGroupClient.GetNetworkSecurityGroup(secGroupName) - if err != nil { - if !management.IsResourceNotFoundError(err) { - return fmt.Errorf("Error issuing network security group rules query for security group %q: %s", secGroupName, err) - } else { - // it meants that the network security group this rule belonged to has - // been deleted; so we skip this iteration: - continue - } - } - - // find our security rule: - for _, rule := range secgroup.Rules { - if rule.Name == name { - // note the fact that this rule still apllies to this security group: - found = true - remaining.Add(secGroupName) - - break - } - } - } - - // check to see if there is any security group still having this rule: - if !found { - d.SetId("") - return nil - } - - // now; we must update the set of security groups still having this rule: - d.Set("security_group_names", remaining) - return nil -} - -// resourceAzureSecurityGroupRuleUpdate does all the necessary API calls to -// update the state of a network security group rule off Azure. -func resourceAzureSecurityGroupRuleUpdate(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - mgmtClient := azureClient.mgmtClient - secGroupClient := azureClient.secGroupClient - - azureClient.secGroupMutex.Lock() - defer azureClient.secGroupMutex.Unlock() - - var found bool - name := d.Get("name").(string) - newRule := netsecgroup.RuleRequest{ - Name: d.Get("name").(string), - Type: netsecgroup.RuleType(d.Get("type").(string)), - Priority: d.Get("priority").(int), - Action: netsecgroup.RuleAction(d.Get("action").(string)), - SourceAddressPrefix: d.Get("source_address_prefix").(string), - SourcePortRange: d.Get("source_port_range").(string), - DestinationAddressPrefix: d.Get("destination_address_prefix").(string), - DestinationPortRange: d.Get("destination_port_range").(string), - Protocol: netsecgroup.RuleProtocol(d.Get("protocol").(string)), - } - - // iterate over all the security groups that should have this rule and - // update it per security group: - remaining := schema.NewSet(schema.HashString, nil) - secGroupNames := d.Get("security_group_names").(*schema.Set).List() - for _, sg := range secGroupNames { - secGroupName := sg.(string) - - // get info on the network security group and check its rules for this one: - log.Printf("[INFO] Sending Azure network security group rule query for security group %q.", secGroupName) - secgroup, err := secGroupClient.GetNetworkSecurityGroup(secGroupName) - if err != nil { - if !management.IsResourceNotFoundError(err) { - return fmt.Errorf("Error issuing network security group rules query: %s", err) - } else { - // it meants that the network security group this rule belonged to has - // been deleted; so we skip this iteration: - continue - } - } - - // try and find our security group rule: - for _, rule := range secgroup.Rules { - if rule.Name == name { - // note the fact that this rule still applies to this security group: - found = true - remaining.Add(secGroupName) - - // and go ahead and update it: - log.Printf("[INFO] Sending Azure network security group rule update request for security group %q.", secGroupName) - reqID, err := secGroupClient.SetNetworkSecurityGroupRule( - secGroupName, - newRule, - ) - if err != nil { - return fmt.Errorf("Error sending Azure network security group rule update request for security group %q: %s", secGroupName, err) - } - err = mgmtClient.WaitForOperation(reqID, nil) - if err != nil { - return fmt.Errorf("Error updating Azure network security group rule for security group %q: %s", secGroupName, err) - } - - break - } - } - } - - // check to see if there is any security group still having this rule: - if !found { - d.SetId("") - return nil - } - - // here; we must update the set of security groups still having this rule: - d.Set("security_group_names", remaining) - - return nil -} - -// resourceAzureSecurityGroupRuleDelete does all the necessary API calls to -// delete a network security group rule off Azure. -func resourceAzureSecurityGroupRuleDelete(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - mgmtClient := azureClient.mgmtClient - secGroupClient := azureClient.secGroupClient - - azureClient.secGroupMutex.Lock() - defer azureClient.secGroupMutex.Unlock() - - name := d.Get("name").(string) - secGroupNames := d.Get("security_group_names").(*schema.Set).List() - for _, sg := range secGroupNames { - secGroupName := sg.(string) - - // get info on the network security group and search for our rule: - log.Printf("[INFO] Sending network security group rule query for security group %q.", secGroupName) - secgroup, err := secGroupClient.GetNetworkSecurityGroup(secGroupName) - if err != nil { - if management.IsResourceNotFoundError(err) { - // it means that this network security group this rule belonged to has - // been deleted; so we need not do anything more here: - continue - } else { - return fmt.Errorf("Error issuing Azure network security group rules query for security group %q: %s", secGroupName, err) - } - } - - // check if the rule has been deleted in the meantime: - for _, rule := range secgroup.Rules { - if rule.Name == name { - // if not; we shall issue the delete: - reqID, err := secGroupClient.DeleteNetworkSecurityGroupRule(secGroupName, name) - if err != nil { - return fmt.Errorf("Error sending network security group rule delete request to Azure: %s", err) - } - err = mgmtClient.WaitForOperation(reqID, nil) - if err != nil { - return fmt.Errorf("Error deleting network security group rule off Azure: %s", err) - } - } - break - } - } - - return nil -} diff --git a/builtin/providers/azure/resource_azure_security_group_rule_test.go b/builtin/providers/azure/resource_azure_security_group_rule_test.go deleted file mode 100644 index ec430f098..000000000 --- a/builtin/providers/azure/resource_azure_security_group_rule_test.go +++ /dev/null @@ -1,249 +0,0 @@ -package azure - -import ( - "fmt" - "testing" - - "github.com/Azure/azure-sdk-for-go/management" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var ( - testAcctestingSecurityGroup1 = fmt.Sprintf("%s-%d", testAccSecurityGroupName, 1) - testAccTestingSecurityGroupHash1 = fmt.Sprintf("%d", schema.HashString(testAcctestingSecurityGroup1)) - - testAcctestingSecurityGroup2 = fmt.Sprintf("%s-%d", testAccSecurityGroupName, 2) - testAccTestingSecurityGroupHash2 = fmt.Sprintf("%d", schema.HashString(testAcctestingSecurityGroup2)) -) - -func TestAccAzureSecurityGroupRuleBasic(t *testing.T) { - name := "azure_security_group_rule.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureSecurityGroupRuleDeleted([]string{testAccSecurityGroupName}), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureSecurityGroupRuleBasicConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureSecurityGroupRuleExists(name, testAccSecurityGroupName), - resource.TestCheckResourceAttr(name, "name", "terraform-secgroup-rule"), - resource.TestCheckResourceAttr(name, - fmt.Sprintf("security_group_names.%d", schema.HashString(testAccSecurityGroupName)), - testAccSecurityGroupName), - resource.TestCheckResourceAttr(name, "type", "Inbound"), - resource.TestCheckResourceAttr(name, "action", "Deny"), - resource.TestCheckResourceAttr(name, "priority", "200"), - resource.TestCheckResourceAttr(name, "source_address_prefix", "100.0.0.0/32"), - resource.TestCheckResourceAttr(name, "source_port_range", "1000"), - resource.TestCheckResourceAttr(name, "destination_address_prefix", "10.0.0.0/32"), - resource.TestCheckResourceAttr(name, "destination_port_range", "1000"), - resource.TestCheckResourceAttr(name, "protocol", "TCP"), - ), - }, - }, - }) -} - -func TestAccAzureSecurityGroupRuleAdvanced(t *testing.T) { - name := "azure_security_group_rule.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureSecurityGroupRuleDeleted( - []string{ - testAcctestingSecurityGroup1, - testAcctestingSecurityGroup2, - }, - ), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureSecurityGroupRuleAdvancedConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureSecurityGroupRuleExists(name, testAcctestingSecurityGroup1), - testAccCheckAzureSecurityGroupRuleExists(name, testAcctestingSecurityGroup2), - resource.TestCheckResourceAttr(name, "name", "terraform-secgroup-rule"), - resource.TestCheckResourceAttr(name, fmt.Sprintf("security_group_names.%s", - testAccTestingSecurityGroupHash1), testAcctestingSecurityGroup1), - resource.TestCheckResourceAttr(name, fmt.Sprintf("security_group_names.%s", - testAccTestingSecurityGroupHash2), testAcctestingSecurityGroup2), - resource.TestCheckResourceAttr(name, "type", "Inbound"), - resource.TestCheckResourceAttr(name, "action", "Deny"), - resource.TestCheckResourceAttr(name, "priority", "200"), - resource.TestCheckResourceAttr(name, "source_address_prefix", "100.0.0.0/32"), - resource.TestCheckResourceAttr(name, "source_port_range", "1000"), - resource.TestCheckResourceAttr(name, "destination_address_prefix", "10.0.0.0/32"), - resource.TestCheckResourceAttr(name, "destination_port_range", "1000"), - resource.TestCheckResourceAttr(name, "protocol", "TCP"), - ), - }, - }, - }) -} - -func TestAccAzureSecurityGroupRuleUpdate(t *testing.T) { - name := "azure_security_group_rule.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureSecurityGroupRuleDeleted( - []string{ - testAcctestingSecurityGroup1, - testAcctestingSecurityGroup2, - }, - ), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureSecurityGroupRuleAdvancedConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureSecurityGroupRuleExists(name, testAcctestingSecurityGroup1), - testAccCheckAzureSecurityGroupRuleExists(name, testAcctestingSecurityGroup2), - resource.TestCheckResourceAttr(name, "name", "terraform-secgroup-rule"), - resource.TestCheckResourceAttr(name, fmt.Sprintf("security_group_names.%s", - testAccTestingSecurityGroupHash1), testAcctestingSecurityGroup1), - resource.TestCheckResourceAttr(name, fmt.Sprintf("security_group_names.%s", - testAccTestingSecurityGroupHash2), testAcctestingSecurityGroup2), - resource.TestCheckResourceAttr(name, "type", "Inbound"), - resource.TestCheckResourceAttr(name, "action", "Deny"), - resource.TestCheckResourceAttr(name, "priority", "200"), - resource.TestCheckResourceAttr(name, "source_address_prefix", "100.0.0.0/32"), - resource.TestCheckResourceAttr(name, "source_port_range", "1000"), - resource.TestCheckResourceAttr(name, "destination_address_prefix", "10.0.0.0/32"), - resource.TestCheckResourceAttr(name, "destination_port_range", "1000"), - resource.TestCheckResourceAttr(name, "protocol", "TCP"), - ), - }, - - resource.TestStep{ - Config: testAccAzureSecurityGroupRuleUpdateConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureSecurityGroupRuleExists(name, testAcctestingSecurityGroup2), - resource.TestCheckResourceAttr(name, "name", "terraform-secgroup-rule"), - resource.TestCheckResourceAttr(name, fmt.Sprintf("security_group_names.%s", - testAccTestingSecurityGroupHash2), testAcctestingSecurityGroup2), - resource.TestCheckResourceAttr(name, "type", "Outbound"), - resource.TestCheckResourceAttr(name, "action", "Allow"), - resource.TestCheckResourceAttr(name, "priority", "100"), - resource.TestCheckResourceAttr(name, "source_address_prefix", "101.0.0.0/32"), - resource.TestCheckResourceAttr(name, "source_port_range", "1000"), - resource.TestCheckResourceAttr(name, "destination_address_prefix", "10.0.0.0/32"), - resource.TestCheckResourceAttr(name, "destination_port_range", "1001"), - resource.TestCheckResourceAttr(name, "protocol", "UDP"), - ), - }, - }, - }) -} - -func testAccCheckAzureSecurityGroupRuleExists(name, groupName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - resource, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Azure security group rule not found: %s", name) - } - - if resource.Primary.ID == "" { - return fmt.Errorf("Azure network security group rule ID not set: %s", name) - } - - secGroupClient := testAccProvider.Meta().(*Client).secGroupClient - - secGroup, err := secGroupClient.GetNetworkSecurityGroup(groupName) - if err != nil { - return fmt.Errorf("Failed getting network security group details for %q: %s", groupName, err) - } - - for _, rule := range secGroup.Rules { - if rule.Name == resource.Primary.ID { - return nil - } - } - - return fmt.Errorf("Azure security group rule doesn't exist: %s", name) - } -} - -func testAccCheckAzureSecurityGroupRuleDeleted(groups []string) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, resource := range s.RootModule().Resources { - if resource.Type != "azure_security_group_rule" { - continue - } - - if resource.Primary.ID == "" { - return fmt.Errorf("Azure network security group ID not set.") - } - - secGroupClient := testAccProvider.Meta().(*Client).secGroupClient - - for _, groupName := range groups { - secGroup, err := secGroupClient.GetNetworkSecurityGroup(groupName) - if err != nil { - if !management.IsResourceNotFoundError(err) { - return fmt.Errorf("Failed getting network security group details for %q: %s", groupName, err) - } - } - - for _, rule := range secGroup.Rules { - if rule.Name == resource.Primary.ID { - return fmt.Errorf("Azure network security group rule still exists!") - } - } - } - } - - return nil - } -} - -var testAccAzureSecurityGroupRuleBasicConfig = testAccAzureSecurityGroupConfig + ` -resource "azure_security_group_rule" "foo" { - name = "terraform-secgroup-rule" - security_group_names = ["${azure_security_group.foo.name}"] - type = "Inbound" - action = "Deny" - priority = 200 - source_address_prefix = "100.0.0.0/32" - source_port_range = "1000" - destination_address_prefix = "10.0.0.0/32" - destination_port_range = "1000" - protocol = "TCP" -} -` -var testAccAzureSecurityGroupRuleAdvancedConfig = fmt.Sprintf(testAccAzureSecurityGroupConfigTemplate, "foo", testAcctestingSecurityGroup1) + - fmt.Sprintf(testAccAzureSecurityGroupConfigTemplate, "bar", testAcctestingSecurityGroup2) + ` -resource "azure_security_group_rule" "foo" { - name = "terraform-secgroup-rule" - security_group_names = ["${azure_security_group.foo.name}", "${azure_security_group.bar.name}"] - type = "Inbound" - action = "Deny" - priority = 200 - source_address_prefix = "100.0.0.0/32" - source_port_range = "1000" - destination_address_prefix = "10.0.0.0/32" - destination_port_range = "1000" - protocol = "TCP" -} -` - -var testAccAzureSecurityGroupRuleUpdateConfig = fmt.Sprintf(testAccAzureSecurityGroupConfigTemplate, "foo", testAcctestingSecurityGroup1) + - fmt.Sprintf(testAccAzureSecurityGroupConfigTemplate, "bar", testAcctestingSecurityGroup2) + ` -resource "azure_security_group_rule" "foo" { - name = "terraform-secgroup-rule" - security_group_names = ["${azure_security_group.bar.name}"] - type = "Outbound" - action = "Allow" - priority = 100 - source_address_prefix = "101.0.0.0/32" - source_port_range = "1000" - destination_address_prefix = "10.0.0.0/32" - destination_port_range = "1001" - protocol = "UDP" -} -` diff --git a/builtin/providers/azure/resource_azure_security_group_test.go b/builtin/providers/azure/resource_azure_security_group_test.go deleted file mode 100644 index 3d86d0873..000000000 --- a/builtin/providers/azure/resource_azure_security_group_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package azure - -import ( - "fmt" - "testing" - - "github.com/Azure/azure-sdk-for-go/management" - "github.com/Azure/azure-sdk-for-go/management/networksecuritygroup" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureSecurityGroup_basic(t *testing.T) { - var group networksecuritygroup.SecurityGroupResponse - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureSecurityGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureSecurityGroupConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureSecurityGroupExists( - "azure_security_group.foo", &group), - resource.TestCheckResourceAttr( - "azure_security_group.foo", "name", "terraform-security-group"), - resource.TestCheckResourceAttr( - "azure_security_group.foo", "location", "West US"), - resource.TestCheckResourceAttr( - "azure_security_group.foo", "label", "terraform testing security group"), - ), - }, - }, - }) -} - -func testAccCheckAzureSecurityGroupExists( - n string, - group *networksecuritygroup.SecurityGroupResponse) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Network Security Group ID is set") - } - - secGroupClient := testAccProvider.Meta().(*Client).secGroupClient - sg, err := secGroupClient.GetNetworkSecurityGroup(rs.Primary.ID) - if err != nil { - return err - } - - if sg.Name != rs.Primary.ID { - return fmt.Errorf("Security Group not found") - } - - *group = sg - - return nil - } -} - -func testAccCheckAzureSecurityGroupDestroy(s *terraform.State) error { - secGroupClient := testAccProvider.Meta().(*Client).secGroupClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azure_security_group" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Network Security Group ID is set") - } - - _, err := secGroupClient.GetNetworkSecurityGroup(rs.Primary.ID) - if err == nil { - return fmt.Errorf("Network Security Group %s still exists", rs.Primary.ID) - } - - if !management.IsResourceNotFoundError(err) { - return err - } - } - - return nil -} - -const testAccAzureSecurityGroupConfigTemplate = ` -resource "azure_security_group" "%s" { - name = "%s" - location = "West US" - label = "terraform testing security group" -}` - -var testAccAzureSecurityGroupConfig = fmt.Sprintf( - testAccAzureSecurityGroupConfigTemplate, - "foo", "terraform-security-group", -) diff --git a/builtin/providers/azure/resource_azure_sql_database_server.go b/builtin/providers/azure/resource_azure_sql_database_server.go deleted file mode 100644 index b3412b7b3..000000000 --- a/builtin/providers/azure/resource_azure_sql_database_server.go +++ /dev/null @@ -1,118 +0,0 @@ -package azure - -import ( - "fmt" - "log" - - "github.com/Azure/azure-sdk-for-go/management/sql" - "github.com/hashicorp/terraform/helper/schema" -) - -// resourceAzureDatabaseServer returns the *schema.Resource associated -// to a database server on Azure. -func resourceAzureSqlDatabaseServer() *schema.Resource { - return &schema.Resource{ - Create: resourceAzureSqlDatabaseServerCreate, - Read: resourceAzureSqlDatabaseServerRead, - Delete: resourceAzureSqlDatabaseServerDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - ForceNew: true, - }, - "location": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "username": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "password": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "version": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "12.0", - ForceNew: true, - }, - "url": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -// resourceAzureSqlDatabaseServerCreate does all the necessary API calls to -// create an SQL database server off Azure. -func resourceAzureSqlDatabaseServerCreate(d *schema.ResourceData, meta interface{}) error { - sqlClient := meta.(*Client).sqlClient - - log.Println("[INFO] Began constructing SQL Server creation request.") - params := sql.DatabaseServerCreateParams{ - Location: d.Get("location").(string), - AdministratorLogin: d.Get("username").(string), - AdministratorLoginPassword: d.Get("password").(string), - Version: d.Get("version").(string), - } - - log.Println("[INFO] Issuing SQL Server creation request to Azure.") - name, err := sqlClient.CreateServer(params) - if err != nil { - return fmt.Errorf("Error creating SQL Server on Azure: %s", err) - } - - d.Set("name", name) - - d.SetId(name) - return resourceAzureSqlDatabaseServerRead(d, meta) -} - -// resourceAzureSqlDatabaseServerRead does all the necessary API calls to -// read the state of the SQL database server off Azure. -func resourceAzureSqlDatabaseServerRead(d *schema.ResourceData, meta interface{}) error { - sqlClient := meta.(*Client).sqlClient - - log.Println("[INFO] Sending SQL Servers list query to Azure.") - srvList, err := sqlClient.ListServers() - if err != nil { - return fmt.Errorf("Error issuing SQL Servers list query to Azure: %s", err) - } - - // search for our particular server: - name := d.Get("name") - for _, srv := range srvList.DatabaseServers { - if srv.Name == name { - d.Set("url", srv.FullyQualifiedDomainName) - d.Set("state", srv.State) - return nil - } - } - - // if reached here; it means out server doesn't exist, so we must untrack it: - d.SetId("") - return nil -} - -// resourceAzureSqlDatabaseServerDelete does all the necessary API calls to -// delete the SQL database server off Azure. -func resourceAzureSqlDatabaseServerDelete(d *schema.ResourceData, meta interface{}) error { - sqlClient := meta.(*Client).sqlClient - - log.Println("[INFO] Sending SQL Server deletion request to Azure.") - name := d.Get("name").(string) - err := sqlClient.DeleteServer(name) - if err != nil { - return fmt.Errorf("Error while issuing SQL Server deletion request to Azure: %s", err) - } - - return nil -} diff --git a/builtin/providers/azure/resource_azure_sql_database_server_firewall_rule.go b/builtin/providers/azure/resource_azure_sql_database_server_firewall_rule.go deleted file mode 100644 index 06df80ce1..000000000 --- a/builtin/providers/azure/resource_azure_sql_database_server_firewall_rule.go +++ /dev/null @@ -1,225 +0,0 @@ -package azure - -import ( - "fmt" - "log" - "strings" - - "github.com/Azure/azure-sdk-for-go/management/sql" - "github.com/hashicorp/terraform/helper/schema" -) - -// resourceAzureSqlDatabaseServerFirewallRule returns the *schema.Resource -// associated to a firewall rule of a database server in Azure. -func resourceAzureSqlDatabaseServerFirewallRule() *schema.Resource { - return &schema.Resource{ - Create: resourceAzureSqlDatabaseServerFirewallRuleCreate, - Read: resourceAzureSqlDatabaseServerFirewallRuleRead, - Update: resourceAzureSqlDatabaseServerFirewallRuleUpdate, - Delete: resourceAzureSqlDatabaseServerFirewallRuleDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "database_server_names": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - }, - "start_ip": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "end_ip": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - } -} - -// resourceAzureSqlDatabaseServerFirewallRuleCreate does all the necessary API -// calls to create the SQL Database Server Firewall Rule on Azure. -func resourceAzureSqlDatabaseServerFirewallRuleCreate(d *schema.ResourceData, meta interface{}) error { - sqlClient := meta.(*Client).sqlClient - - name := d.Get("name").(string) - params := sql.FirewallRuleCreateParams{ - Name: name, - StartIPAddress: d.Get("start_ip").(string), - EndIPAddress: d.Get("end_ip").(string), - } - - // loop over all the database servers and apply the firewall rule to each: - serverNames := d.Get("database_server_names").(*schema.Set).List() - for _, srv := range serverNames { - serverName := srv.(string) - - log.Printf("[INFO] Sending Azure Database Server Firewall Rule %q creation request for Server %q.", name, serverName) - if err := sqlClient.CreateFirewallRule(serverName, params); err != nil { - return fmt.Errorf("Error creating Azure Database Server Firewall Rule %q for Server %q: %s", name, serverName, err) - } - } - - d.SetId(name) - return nil -} - -// resourceAzureSqlDatabaseServerFirewallRuleRead does all the necessary API -// calls to read the state of the SQL Database Server Firewall Rule on Azure. -func resourceAzureSqlDatabaseServerFirewallRuleRead(d *schema.ResourceData, meta interface{}) error { - sqlClient := meta.(*Client).sqlClient - - name := d.Get("name").(string) - remaining := schema.NewSet(schema.HashString, nil) - - // for each of our servers; check to see if the rule is still present: - var found bool - for _, srv := range d.Get("database_server_names").(*schema.Set).List() { - serverName := srv.(string) - - log.Printf("[INFO] Sending Azure Database Server Firewall Rule list query for server %q.", serverName) - rules, err := sqlClient.ListFirewallRules(serverName) - if err != nil { - if strings.Contains(err.Error(), "does not exist") { - // it means that the database server this rule belonged to has - // been deleted in the meantime. - continue - } else { - return fmt.Errorf("Error getting Azure Firewall Rules for Database Server %q: %s", serverName, err) - } - - } - - // look for our rule: - for _, rule := range rules.FirewallRules { - if rule.Name == name { - found = true - remaining.Add(serverName) - - break - } - } - } - - // check to see if there is still any Database Server still having this rule: - if !found { - d.SetId("") - return nil - } - - // else; update the list of Database Servers still having this rule: - d.Set("database_server_names", remaining) - return nil -} - -// resourceAzureSqlDatabaseServerFirewallRuleUpdate does all the necessary API -// calls to update the state of the SQL Database Server Firewall Rule on Azure. -func resourceAzureSqlDatabaseServerFirewallRuleUpdate(d *schema.ResourceData, meta interface{}) error { - sqlClient := meta.(*Client).sqlClient - - var found bool - name := d.Get("name").(string) - updateParams := sql.FirewallRuleUpdateParams{ - Name: name, - StartIPAddress: d.Get("start_ip").(string), - EndIPAddress: d.Get("end_ip").(string), - } - - // for each of the Database Servers our rules concerns; issue the update: - remaining := schema.NewSet(schema.HashString, nil) - for _, srv := range d.Get("database_server_names").(*schema.Set).List() { - serverName := srv.(string) - - log.Printf("[INFO] Issuing Azure Database Server Firewall Rule list for Database Server %q: %s.", name, serverName) - rules, err := sqlClient.ListFirewallRules(serverName) - if err != nil { - if strings.Contains(err.Error(), "does not exist") { - // it means that the database server this rule belonged to has - // been deleted in the meantime. - continue - } else { - return fmt.Errorf("Error getting Azure Firewall Rules for Database Server %q: %s", serverName, err) - } - - } - - // look for our rule: - for _, rule := range rules.FirewallRules { - if rule.Name == name { - // take note of the fact that this Database Server still has - // this rule: - found = true - remaining.Add(serverName) - - // go ahead and update the rule: - log.Printf("[INFO] Issuing update of Azure Database Server Firewall Rule %q in Server %q.", name, serverName) - if err := sqlClient.UpdateFirewallRule(serverName, name, updateParams); err != nil { - return fmt.Errorf("Error updating Azure Database Server Firewall Rule %q for Server %q: %s", name, serverName, err) - } - - break - } - } - } - - // check to see if the rule is still exists on any of the servers: - if !found { - d.SetId("") - return nil - } - - // else; update the list with the remaining Servers: - d.Set("database_server_names", remaining) - return nil -} - -// resourceAzureSqlDatabaseServerFirewallRuleDelete does all the necessary API -// calls to delete the SQL Database Server Firewall Rule on Azure. -func resourceAzureSqlDatabaseServerFirewallRuleDelete(d *schema.ResourceData, meta interface{}) error { - sqlClient := meta.(*Client).sqlClient - - name := d.Get("name").(string) - for _, srv := range d.Get("database_server_names").(*schema.Set).List() { - serverName := srv.(string) - - log.Printf("[INFO] Sending Azure Database Server Firewall Rule list query for Server %q.", serverName) - rules, err := sqlClient.ListFirewallRules(serverName) - if err != nil { - if strings.Contains(err.Error(), "does not exist") { - // it means that the database server this rule belonged to has - // been deleted in the meantime. - continue - } else { - return fmt.Errorf("Error getting Azure Firewall Rules for Database Server %q: %s", serverName, err) - } - - } - - // look for our rule: - for _, rule := range rules.FirewallRules { - if rule.Name == name { - // go ahead and delete the rule: - log.Printf("[INFO] Issuing deletion of Azure Database Server Firewall Rule %q in Server %q.", name, serverName) - if err := sqlClient.DeleteFirewallRule(serverName, name); err != nil { - if strings.Contains(err.Error(), "Cannot open server") { - break - } - return fmt.Errorf("Error deleting Azure Database Server Firewall Rule %q for Server %q: %s", name, serverName, err) - } - - break - } - } - - } - - return nil -} diff --git a/builtin/providers/azure/resource_azure_sql_database_server_firewall_rule_test.go b/builtin/providers/azure/resource_azure_sql_database_server_firewall_rule_test.go deleted file mode 100644 index c2bcbebbf..000000000 --- a/builtin/providers/azure/resource_azure_sql_database_server_firewall_rule_test.go +++ /dev/null @@ -1,253 +0,0 @@ -package azure - -import ( - "fmt" - "strings" - "testing" - "time" - - "github.com/Azure/azure-sdk-for-go/management/sql" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureSqlDatabaseServerFirewallRuleBasic(t *testing.T) { - name := "azure_sql_database_server_firewall_rule.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccAzureDatabaseServerFirewallRuleDeleted(testAccAzureSqlServerNames), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureDatabaseServerFirewallRuleBasicConfig, - Check: resource.ComposeTestCheckFunc( - testAccAzureSqlDatabaseServerGetNames, - testAccAzureSqlDatabaseServersNumber(1), - testAccAzureDatabaseServerFirewallRuleExists(name, testAccAzureSqlServerNames), - resource.TestCheckResourceAttr(name, "name", "terraform-testing-rule"), - resource.TestCheckResourceAttr(name, "start_ip", "10.0.0.0"), - resource.TestCheckResourceAttr(name, "end_ip", "10.0.0.255"), - ), - }, - }, - }) -} - -func TestAccAzureSqlDatabaseServerFirewallRuleAdvanced(t *testing.T) { - name1 := "azure_sql_database_server_firewall_rule.foo" - name2 := "azure_sql_database_server_firewall_rule.bar" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccAzureDatabaseServerFirewallRuleDeleted(testAccAzureSqlServerNames), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureDatabaseServerFirewallRuleAdvancedConfig, - Check: resource.ComposeTestCheckFunc( - testAccAzureSqlDatabaseServerGetNames, - testAccAzureSqlDatabaseServersNumber(2), - //testAccAzureDatabaseServerFirewallRuleExists(name1, testAccAzureSqlServerNames), - resource.TestCheckResourceAttr(name1, "name", "terraform-testing-rule1"), - resource.TestCheckResourceAttr(name1, "start_ip", "10.0.0.0"), - resource.TestCheckResourceAttr(name1, "end_ip", "10.0.0.255"), - //testAccAzureDatabaseServerFirewallRuleExists(name2, testAccAzureSqlServerNames), - resource.TestCheckResourceAttr(name2, "name", "terraform-testing-rule2"), - resource.TestCheckResourceAttr(name2, "start_ip", "200.0.0.0"), - resource.TestCheckResourceAttr(name2, "end_ip", "200.255.255.255"), - ), - }, - }, - }) -} - -func TestAccAzureSqlDatabaseServerFirewallRuleUpdate(t *testing.T) { - name1 := "azure_sql_database_server_firewall_rule.foo" - name2 := "azure_sql_database_server_firewall_rule.bar" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccAzureDatabaseServerFirewallRuleDeleted(testAccAzureSqlServerNames), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureDatabaseServerFirewallRuleAdvancedConfig, - Check: resource.ComposeTestCheckFunc( - testAccAzureSqlDatabaseServerGetNames, - testAccAzureSqlDatabaseServersNumber(2), - //testAccAzureDatabaseServerFirewallRuleExists(name1, testAccAzureSqlServerNames), - resource.TestCheckResourceAttr(name1, "name", "terraform-testing-rule1"), - resource.TestCheckResourceAttr(name1, "start_ip", "10.0.0.0"), - resource.TestCheckResourceAttr(name1, "end_ip", "10.0.0.255"), - //testAccAzureDatabaseServerFirewallRuleExists(name2, testAccAzureSqlServerNames), - resource.TestCheckResourceAttr(name2, "name", "terraform-testing-rule2"), - resource.TestCheckResourceAttr(name2, "start_ip", "200.0.0.0"), - resource.TestCheckResourceAttr(name2, "end_ip", "200.255.255.255"), - ), - }, - resource.TestStep{ - Config: testAccAzureDatabaseServerFirewallRuleUpdateConfig, - Check: resource.ComposeTestCheckFunc( - testAccAzureSqlDatabaseServerGetNames, - testAccAzureSqlDatabaseServersNumber(2), - //testAccAzureDatabaseServerFirewallRuleExists(name1, testAccAzureSqlServerNames), - resource.TestCheckResourceAttr(name1, "name", "terraform-testing-rule1"), - resource.TestCheckResourceAttr(name1, "start_ip", "11.0.0.0"), - resource.TestCheckResourceAttr(name1, "end_ip", "11.0.0.255"), - ), - }, - }, - }) -} - -func testAccAzureDatabaseServerFirewallRuleExists(name string, servers []string) resource.TestCheckFunc { - return func(s *terraform.State) error { - res, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Azure Database Server Firewall Rule %q doesn't exist.", name) - } - - if res.Primary.ID == "" { - return fmt.Errorf("Azure Database Server Firewall Rule %q res ID not set.", name) - } - - sqlClient := testAccProvider.Meta().(*Client).sqlClient - - for _, server := range servers { - var rules sql.ListFirewallRulesResponse - - err := resource.Retry(15*time.Minute, func() *resource.RetryError { - var erri error - rules, erri = sqlClient.ListFirewallRules(server) - if erri != nil { - return resource.RetryableError( - fmt.Errorf("Error listing Azure Database Server Firewall Rules for Server %q: %s", server, erri)) - } - - return nil - }) - if err != nil { - return err - } - - var found bool - for _, rule := range rules.FirewallRules { - if rule.Name == res.Primary.ID { - found = true - break - } - } - if !found { - return fmt.Errorf("Azure Database Server Firewall Rule %q doesn't exists on server %q.", res.Primary.ID, server) - } - } - - return nil - } -} - -func testAccAzureDatabaseServerFirewallRuleDeleted(servers []string) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, resource := range s.RootModule().Resources { - if resource.Type != "azure_sql_database_server_firewall_rule" { - continue - } - - if resource.Primary.ID == "" { - return fmt.Errorf("Azure Database Server Firewall Rule resource ID not set.") - } - - sqlClient := testAccProvider.Meta().(*Client).sqlClient - - for _, server := range servers { - rules, err := sqlClient.ListFirewallRules(server) - if err != nil { - // ¯\_(ツ)_/¯ - if strings.Contains(err.Error(), "Cannot open server") { - return nil - } - return fmt.Errorf("Error listing Azure Database Server Firewall Rules for Server %q: %s", server, err) - } - - for _, rule := range rules.FirewallRules { - if rule.Name == resource.Primary.ID { - return fmt.Errorf("Azure Database Server Firewall Rule %q still exists on Server %q.", resource.Primary.ID, err) - } - } - } - } - - return nil - } -} - -var testAccAzureDatabaseServerFirewallRuleBasicConfig = ` -resource "azure_sql_database_server" "foo" { - location = "West US" - username = "SuperUser" - password = "SuperSEKR3T" - version = "12.0" -} - -resource "azure_sql_database_server_firewall_rule" "foo" { - name = "terraform-testing-rule" - depends_on = ["azure_sql_database_server.foo"] - start_ip = "10.0.0.0" - end_ip = "10.0.0.255" - database_server_names = ["${azure_sql_database_server.foo.name}"] -} -` - -var testAccAzureDatabaseServerFirewallRuleAdvancedConfig = ` -resource "azure_sql_database_server" "foo" { - location = "West US" - username = "SuperUser" - password = "SuperSEKR3T" - version = "12.0" -} - -resource "azure_sql_database_server" "bar" { - location = "West US" - username = "SuperUser" - password = "SuperSEKR3T" - version = "12.0" -} - -resource "azure_sql_database_server_firewall_rule" "foo" { - name = "terraform-testing-rule1" - start_ip = "10.0.0.0" - end_ip = "10.0.0.255" - database_server_names = ["${azure_sql_database_server.foo.name}", "${azure_sql_database_server.bar.name}"] -} - -resource "azure_sql_database_server_firewall_rule" "bar" { - name = "terraform-testing-rule2" - start_ip = "200.0.0.0" - end_ip = "200.255.255.255" - database_server_names = ["${azure_sql_database_server.foo.name}", "${azure_sql_database_server.bar.name}"] -} -` - -var testAccAzureDatabaseServerFirewallRuleUpdateConfig = ` -resource "azure_sql_database_server" "foo" { - location = "West US" - username = "SuperUser" - password = "SuperSEKR3T" - version = "12.0" -} - -resource "azure_sql_database_server" "bar" { - location = "West US" - username = "SuperUser" - password = "SuperSEKR3T" - version = "12.0" -} - -resource "azure_sql_database_server_firewall_rule" "foo" { - name = "terraform-testing-rule1" - start_ip = "11.0.0.0" - end_ip = "11.0.0.255" - database_server_names = ["${azure_sql_database_server.foo.name}"] -} -` diff --git a/builtin/providers/azure/resource_azure_sql_database_server_test.go b/builtin/providers/azure/resource_azure_sql_database_server_test.go deleted file mode 100644 index d2789712b..000000000 --- a/builtin/providers/azure/resource_azure_sql_database_server_test.go +++ /dev/null @@ -1,158 +0,0 @@ -package azure - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -// testAccAzureSqlServerName is a helper variable in which to store -// the randomly-generated name of the SQL Server after it is created. -// The anonymous function is there because go is too good to &"" directly. -var testAccAzureSqlServerName *string = func(s string) *string { return &s }("") -var testAccAzureSqlServerNames []string = []string{} - -func TestAccAzureSqlDatabaseServer(t *testing.T) { - name := "azure_sql_database_server.foo" - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureSqlDatabaseServerDeleted, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureSqlDatabaseServerConfig, - Check: resource.ComposeTestCheckFunc( - testAccAzureSqlDatabaseServerGetName, - testAccCheckAzureSqlDatabaseServerExists(name), - resource.TestCheckResourceAttrPtr(name, "name", testAccAzureSqlServerName), - resource.TestCheckResourceAttr(name, "username", "SuperUser"), - resource.TestCheckResourceAttr(name, "password", "SuperSEKR3T"), - resource.TestCheckResourceAttr(name, "version", "12.0"), - ), - }, - }, - }) -} - -func testAccCheckAzureSqlDatabaseServerExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - resource, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("SQL Server %s doesn't exist.", name) - } - - if resource.Primary.ID == "" { - return fmt.Errorf("SQL Server %s resource ID not set.", name) - } - - sqlClient := testAccProvider.Meta().(*Client).sqlClient - servers, err := sqlClient.ListServers() - if err != nil { - return fmt.Errorf("Error issuing Azure SQL Server list request: %s", err) - } - - for _, srv := range servers.DatabaseServers { - if srv.Name == resource.Primary.ID { - return nil - } - } - - return fmt.Errorf("SQL Server %s doesn't exist.", name) - } -} - -func testAccCheckAzureSqlDatabaseServerDeleted(s *terraform.State) error { - for _, resource := range s.RootModule().Resources { - if resource.Type != "azure_sql_database_server" { - continue - } - - if resource.Primary.ID == "" { - return fmt.Errorf("SQL Server resource ID not set.") - } - - sqlClient := testAccProvider.Meta().(*Client).sqlClient - servers, err := sqlClient.ListServers() - if err != nil { - return fmt.Errorf("Error issuing Azure SQL Server list request: %s", err) - } - - for _, srv := range servers.DatabaseServers { - if srv.Name == resource.Primary.ID { - return fmt.Errorf("SQL Server %s still exists.", resource.Primary.ID) - } - } - } - return nil -} - -// testAccAzureSqlDatabaseServerGetName is ahelper function which reads the current -// state form Terraform and sets the testAccAzureSqlServerName variable -// to the ID (which is actually the name) of the newly created server. -// It is modeled as a resource.TestCheckFunc so as to be easily-embeddable in -// test cases and run live. -func testAccAzureSqlDatabaseServerGetName(s *terraform.State) error { - for _, resource := range s.RootModule().Resources { - if resource.Type != "azure_sql_database_server" { - continue - } - - if resource.Primary.ID == "" { - return fmt.Errorf("Azure SQL Server resource ID not set.") - } - - *testAccAzureSqlServerName = resource.Primary.ID - return nil - } - - return fmt.Errorf("No Azure SQL Servers found.") -} - -// testAccAzureSqlDatabaseServerGetNames is the same as the above; only it gets -// all the servers' names. -func testAccAzureSqlDatabaseServerGetNames(s *terraform.State) error { - testAccAzureSqlServerNames = []string{} - - for _, resource := range s.RootModule().Resources { - if resource.Type != "azure_sql_database_server" { - continue - } - - if resource.Primary.ID == "" { - return fmt.Errorf("Azure SQL Server resource ID not set.") - } - - testAccAzureSqlServerNames = append(testAccAzureSqlServerNames, resource.Primary.ID) - } - - if len(testAccAzureSqlServerNames) == 0 { - return fmt.Errorf("No Azure SQL Servers found.") - } - - return nil -} - -// testAccAzureSqlDatabaseServersNumber checks if the numbers of servers is -// exactly equal to the given number. It is modeled as a resource.TestCheckFunc -// to be easily embeddable in test checks. -func testAccAzureSqlDatabaseServersNumber(n int) resource.TestCheckFunc { - return func(_ *terraform.State) error { - if len(testAccAzureSqlServerNames) != n { - return fmt.Errorf("Erroneous number of Azure Sql Database Servers. Expected %d; have %d.", n, - len(testAccAzureSqlServerNames)) - } - - return nil - } -} - -const testAccAzureSqlDatabaseServerConfig = ` -resource "azure_sql_database_server" "foo" { - location = "West US" - username = "SuperUser" - password = "SuperSEKR3T" - version = "12.0" -} -` diff --git a/builtin/providers/azure/resource_azure_sql_database_service.go b/builtin/providers/azure/resource_azure_sql_database_service.go deleted file mode 100644 index c3c617048..000000000 --- a/builtin/providers/azure/resource_azure_sql_database_service.go +++ /dev/null @@ -1,234 +0,0 @@ -package azure - -import ( - "fmt" - "log" - "strconv" - "strings" - - "github.com/Azure/azure-sdk-for-go/management/sql" - "github.com/hashicorp/terraform/helper/schema" -) - -// resourceAzureSqlDatabaseService returns the *schema.Resource -// associated to an SQL Database Service on Azure. -func resourceAzureSqlDatabaseService() *schema.Resource { - return &schema.Resource{ - Create: resourceAzureSqlDatabaseServiceCreate, - Read: resourceAzureSqlDatabaseServiceRead, - Update: resourceAzureSqlDatabaseServiceUpdate, - Exists: resourceAzureSqlDatabaseServiceExists, - Delete: resourceAzureSqlDatabaseServiceDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "database_server_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "collation": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "edition": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "max_size_bytes": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "service_level_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - } -} - -// resourceAzureSqlDatabaseServiceCreate does all the necessary API calls to -// create an SQL Database Service on Azure. -func resourceAzureSqlDatabaseServiceCreate(d *schema.ResourceData, meta interface{}) error { - sqlClient := meta.(*Client).sqlClient - - log.Println("[INFO] Creating Azure SQL Database service creation request.") - name := d.Get("name").(string) - serverName := d.Get("database_server_name").(string) - params := sql.DatabaseCreateParams{ - Name: name, - Edition: d.Get("edition").(string), - CollationName: d.Get("collation").(string), - ServiceObjectiveID: d.Get("service_level_id").(string), - } - - if maxSize, ok := d.GetOk("max_size_bytes"); ok { - val, err := strconv.ParseInt(maxSize.(string), 10, 64) - if err != nil { - return fmt.Errorf("Provided max_size_bytes is not an integer: %s", err) - } - params.MaxSizeBytes = val - } - - log.Println("[INFO] Sending SQL Database Service creation request to Azure.") - err := sqlClient.CreateDatabase(serverName, params) - if err != nil { - return fmt.Errorf("Error issuing Azure SQL Database Service creation: %s", err) - } - - log.Println("[INFO] Beginning wait for Azure SQL Database Service creation.") - err = sqlClient.WaitForDatabaseCreation(serverName, name, nil) - if err != nil { - return fmt.Errorf("Error whilst waiting for Azure SQL Database Service creation: %s", err) - } - - d.SetId(name) - - return resourceAzureSqlDatabaseServiceRead(d, meta) -} - -// resourceAzureSqlDatabaseServiceRead does all the necessary API calls to -// read the state of the SQL Database Service off Azure. -func resourceAzureSqlDatabaseServiceRead(d *schema.ResourceData, meta interface{}) error { - sqlClient := meta.(*Client).sqlClient - - log.Println("[INFO] Issuing Azure SQL Database Services list operation.") - serverName := d.Get("database_server_name").(string) - dbs, err := sqlClient.ListDatabases(serverName) - if err != nil { - return fmt.Errorf("Error whilst listing Database Services off Azure: %s", err) - } - - // search for our database: - var found bool - name := d.Get("name").(string) - for _, db := range dbs.ServiceResources { - if db.Name == name { - found = true - d.Set("edition", db.Edition) - d.Set("collation", db.CollationName) - d.Set("max_size_bytes", strconv.FormatInt(db.MaxSizeBytes, 10)) - d.Set("service_level_id", db.ServiceObjectiveID) - break - } - } - - // if not found; we must untrack the resource: - if !found { - d.SetId("") - } - - return nil -} - -// resourceAzureSqlDatabaseServiceUpdate does all the necessary API calls to -// update the state of the SQL Database Service off Azure. -func resourceAzureSqlDatabaseServiceUpdate(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - mgmtClient := azureClient.mgmtClient - sqlClient := azureClient.sqlClient - serverName := d.Get("database_server_name").(string) - - // changes to the name must occur separately from changes to the attributes: - if d.HasChange("name") { - oldv, newv := d.GetChange("name") - - // issue the update request: - log.Println("[INFO] Issuing Azure Database Service name change.") - reqID, err := sqlClient.UpdateDatabase(serverName, oldv.(string), - sql.ServiceResourceUpdateParams{ - Name: newv.(string), - }) - - // wait for the update to occur: - log.Println("[INFO] Waiting for Azure SQL Database Service name change.") - err = mgmtClient.WaitForOperation(reqID, nil) - if err != nil { - return fmt.Errorf("Error waiting for Azure SQL Database Service name update: %s", err) - } - - // set the new name as the ID: - d.SetId(newv.(string)) - } - - name := d.Get("name").(string) - cedition := d.HasChange("edition") - cmaxsize := d.HasChange("max_size_bytes") - clevel := d.HasChange("service_level_id") - if cedition || cmaxsize || clevel { - updateParams := sql.ServiceResourceUpdateParams{ - // we still have to stick the name in here for good measure: - Name: name, - } - - // build the update request: - if cedition { - updateParams.Edition = d.Get("edition").(string) - } - if maxSize, ok := d.GetOk("max_size_bytes"); cmaxsize && ok && maxSize.(string) != "" { - val, err := strconv.ParseInt(maxSize.(string), 10, 64) - if err != nil { - return fmt.Errorf("Provided max_size_bytes is not an integer: %s", err) - } - updateParams.MaxSizeBytes = val - } - if clevel { - updateParams.ServiceObjectiveID = d.Get("service_level_id").(string) - } - - // issue the update: - log.Println("[INFO] Issuing Azure Database Service parameter update.") - reqID, err := sqlClient.UpdateDatabase(serverName, name, updateParams) - if err != nil { - return fmt.Errorf("Failed issuing Azure SQL Service parameter update: %s", err) - } - - log.Println("[INFO] Waiting for Azure SQL Database Service parameter update.") - err = mgmtClient.WaitForOperation(reqID, nil) - if err != nil { - return fmt.Errorf("Error waiting for Azure SQL Database Service parameter update: %s", err) - } - } - - return nil -} - -// resourceAzureSqlDatabaseServiceExists does all the necessary API calls to -// check for the existence of the SQL Database Service off Azure. -func resourceAzureSqlDatabaseServiceExists(d *schema.ResourceData, meta interface{}) (bool, error) { - sqlClient := meta.(*Client).sqlClient - - log.Println("[INFO] Issuing Azure SQL Database Service get request.") - name := d.Get("name").(string) - serverName := d.Get("database_server_name").(string) - _, err := sqlClient.GetDatabase(serverName, name) - if err != nil { - if strings.Contains(err.Error(), "does not exist") { - d.SetId("") - return false, nil - } else { - return false, fmt.Errorf("Error whilst getting Azure SQL Database Service info: %s", err) - } - } - - return true, nil -} - -// resourceAzureSqlDatabaseServiceDelete does all the necessary API calls to -// delete the SQL Database Service off Azure. -func resourceAzureSqlDatabaseServiceDelete(d *schema.ResourceData, meta interface{}) error { - sqlClient := meta.(*Client).sqlClient - - log.Println("[INFO] Issuing Azure SQL Database deletion request.") - name := d.Get("name").(string) - serverName := d.Get("database_server_name").(string) - return sqlClient.DeleteDatabase(serverName, name) -} diff --git a/builtin/providers/azure/resource_azure_sql_database_service_test.go b/builtin/providers/azure/resource_azure_sql_database_service_test.go deleted file mode 100644 index f368abf68..000000000 --- a/builtin/providers/azure/resource_azure_sql_database_service_test.go +++ /dev/null @@ -1,195 +0,0 @@ -package azure - -import ( - "fmt" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureSqlDatabaseServiceBasic(t *testing.T) { - name := "azure_sql_database_service.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureSqlDatabaseServiceDeleted, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureSqlDatabaseServiceConfigBasic, - Check: resource.ComposeTestCheckFunc( - testAccAzureSqlDatabaseServerGetName, - testAccCheckAzureSqlDatabaseServiceExists(name), - resource.TestCheckResourceAttr(name, "name", "terraform-testing-db"), - resource.TestCheckResourceAttrPtr(name, "database_server_name", - testAccAzureSqlServerName), - resource.TestCheckResourceAttr(name, "collation", - "SQL_Latin1_General_CP1_CI_AS"), - resource.TestCheckResourceAttr(name, "edition", "Standard"), - ), - }, - }, - }) -} - -func TestAccAzureSqlDatabaseServiceAdvanced(t *testing.T) { - name := "azure_sql_database_service.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureSqlDatabaseServiceDeleted, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureSqlDatabaseServiceConfigAdvanced, - Check: resource.ComposeTestCheckFunc( - testAccAzureSqlDatabaseServerGetName, - testAccCheckAzureSqlDatabaseServiceExists(name), - resource.TestCheckResourceAttr(name, "name", "terraform-testing-db"), - resource.TestCheckResourceAttrPtr(name, "database_server_name", - testAccAzureSqlServerName), - resource.TestCheckResourceAttr(name, "edition", "Premium"), - resource.TestCheckResourceAttr(name, "collation", - "Arabic_BIN"), - resource.TestCheckResourceAttr(name, "max_size_bytes", "10737418240"), - resource.TestCheckResourceAttr(name, "service_level_id", - "7203483a-c4fb-4304-9e9f-17c71c904f5d"), - ), - }, - }, - }) -} - -func TestAccAzureSqlDatabaseServiceUpdate(t *testing.T) { - name := "azure_sql_database_service.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureSqlDatabaseServiceDeleted, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureSqlDatabaseServiceConfigAdvanced, - Check: resource.ComposeTestCheckFunc( - testAccAzureSqlDatabaseServerGetName, - testAccCheckAzureSqlDatabaseServiceExists(name), - resource.TestCheckResourceAttr(name, "name", "terraform-testing-db"), - resource.TestCheckResourceAttrPtr(name, "database_server_name", - testAccAzureSqlServerName), - resource.TestCheckResourceAttr(name, "edition", "Premium"), - resource.TestCheckResourceAttr(name, "collation", - "Arabic_BIN"), - resource.TestCheckResourceAttr(name, "max_size_bytes", "10737418240"), - resource.TestCheckResourceAttr(name, "service_level_id", - "7203483a-c4fb-4304-9e9f-17c71c904f5d"), - ), - }, - resource.TestStep{ - Config: testAccAzureSqlDatabaseServiceConfigUpdate, - Check: resource.ComposeTestCheckFunc( - testAccAzureSqlDatabaseServerGetName, - testAccCheckAzureSqlDatabaseServiceExists(name), - resource.TestCheckResourceAttr(name, "name", - "terraform-testing-db-renamed"), - resource.TestCheckResourceAttrPtr(name, "database_server_name", - testAccAzureSqlServerName), - resource.TestCheckResourceAttr(name, "edition", "Standard"), - resource.TestCheckResourceAttr(name, "collation", - "SQL_Latin1_General_CP1_CI_AS"), - resource.TestCheckResourceAttr(name, "max_size_bytes", "5368709120"), - resource.TestCheckResourceAttr(name, "service_level_id", - "f1173c43-91bd-4aaa-973c-54e79e15235b"), - ), - }, - }, - }) -} - -func testAccCheckAzureSqlDatabaseServiceExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - resource, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("SQL Service %s doesn't exist.", name) - } - - if resource.Primary.ID == "" { - return fmt.Errorf("SQL Service %s resource ID not set.", name) - } - - sqlClient := testAccProvider.Meta().(*Client).sqlClient - dbs, err := sqlClient.ListDatabases(*testAccAzureSqlServerName) - if err != nil { - return fmt.Errorf("Error issuing Azure SQL Service list request: %s", err) - } - - for _, srv := range dbs.ServiceResources { - if srv.Name == resource.Primary.ID { - return nil - } - } - - return fmt.Errorf("SQL Service %s doesn't exist.", name) - } -} - -func testAccCheckAzureSqlDatabaseServiceDeleted(s *terraform.State) error { - for _, resource := range s.RootModule().Resources { - if resource.Type != "azure_sql_database_service" { - continue - } - - if resource.Primary.ID == "" { - return fmt.Errorf("SQL Service resource ID not set.") - } - - sqlClient := testAccProvider.Meta().(*Client).sqlClient - dbs, err := sqlClient.ListDatabases(*testAccAzureSqlServerName) - if err != nil { - // ¯\_(ツ)_/¯ - if strings.Contains(err.Error(), "Cannot open server") { - return nil - } - return fmt.Errorf("Error issuing Azure SQL Service list request: %s", err) - } - - for _, srv := range dbs.ServiceResources { - if srv.Name == resource.Primary.ID { - return fmt.Errorf("SQL Service %s still exists.", resource.Primary.ID) - } - } - } - - return nil -} - -const testAccAzureSqlDatabaseServiceConfigBasic = testAccAzureSqlDatabaseServerConfig + ` -resource "azure_sql_database_service" "foo" { - name = "terraform-testing-db" - database_server_name = "${azure_sql_database_server.foo.name}" - edition = "Standard" -} -` - -const testAccAzureSqlDatabaseServiceConfigAdvanced = testAccAzureSqlDatabaseServerConfig + ` -resource "azure_sql_database_service" "foo" { - name = "terraform-testing-db" - database_server_name = "${azure_sql_database_server.foo.name}" - edition = "Premium" - collation = "Arabic_BIN" - max_size_bytes = "10737418240" - service_level_id = "7203483a-c4fb-4304-9e9f-17c71c904f5d" -} -` - -const testAccAzureSqlDatabaseServiceConfigUpdate = testAccAzureSqlDatabaseServerConfig + ` -resource "azure_sql_database_service" "foo" { - name = "terraform-testing-db-renamed" - database_server_name = "${azure_sql_database_server.foo.name}" - edition = "Standard" - collation = "SQL_Latin1_General_CP1_CI_AS" - max_size_bytes = "5368709120" - service_level_id = "f1173c43-91bd-4aaa-973c-54e79e15235b" -} -` diff --git a/builtin/providers/azure/resource_azure_storage_blob.go b/builtin/providers/azure/resource_azure_storage_blob.go deleted file mode 100644 index 1aeee7216..000000000 --- a/builtin/providers/azure/resource_azure_storage_blob.go +++ /dev/null @@ -1,190 +0,0 @@ -package azure - -import ( - "fmt" - "log" - - "github.com/Azure/azure-sdk-for-go/storage" - "github.com/hashicorp/terraform/helper/schema" -) - -// resourceAzureStorageBlob returns the *schema.Resource associated -// with a storage blob on Azure. -func resourceAzureStorageBlob() *schema.Resource { - return &schema.Resource{ - Create: resourceAzureStorageBlobCreate, - Read: resourceAzureStorageBlobRead, - Exists: resourceAzureStorageBlobExists, - Delete: resourceAzureStorageBlobDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: parameterDescriptions["name"], - }, - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: parameterDescriptions["type"], - }, - "size": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - DefaultFunc: func() (interface{}, error) { - return int64(0), nil - }, - }, - "storage_container_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: parameterDescriptions["storage_container_name"], - }, - "storage_service_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: parameterDescriptions["storage_service_name"], - }, - "url": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: parameterDescriptions["url"], - }, - }, - } -} - -// resourceAzureStorageBlobCreate does all the necessary API calls to -// create the storage blob on Azure. -func resourceAzureStorageBlobCreate(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - storName := d.Get("storage_service_name").(string) - - blobClient, err := azureClient.getStorageServiceBlobClient(storName) - if err != nil { - return err - } - - log.Println("[INFO] Issuing create on Azure storage blob.") - name := d.Get("name").(string) - blobType := d.Get("type").(string) - cont := d.Get("storage_container_name").(string) - - container := blobClient.GetContainerReference(cont) - blob := container.GetBlobReference(name) - - switch blobType { - case "BlockBlob": - options := &storage.PutBlobOptions{} - err = blob.CreateBlockBlob(options) - case "PageBlob": - size := int64(d.Get("size").(int)) - options := &storage.PutBlobOptions{} - blob.Properties.ContentLength = size - err = blob.PutPageBlob(options) - default: - err = fmt.Errorf("Invalid blob type specified; see parameter desciptions for more info.") - } - if err != nil { - return fmt.Errorf("Error creating storage blob on Azure: %s", err) - } - - d.SetId(name) - return resourceAzureStorageBlobRead(d, meta) -} - -// resourceAzureStorageBlobRead does all the necessary API calls to -// read the status of the storage blob off Azure. -func resourceAzureStorageBlobRead(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - - // check for it's existence: - exists, err := resourceAzureStorageBlobExists(d, meta) - if err != nil { - return err - } - - // if it exists; read relevant information: - if exists { - storName := d.Get("storage_service_name").(string) - - blobClient, err := azureClient.getStorageServiceBlobClient(storName) - if err != nil { - return err - } - - name := d.Get("name").(string) - cont := d.Get("storage_container_name").(string) - container := blobClient.GetContainerReference(cont) - blob := container.GetBlobReference(name) - url := blob.GetURL() - d.Set("url", url) - } - - // NOTE: no need to unset the ID here, as resourceAzureStorageBlobExists - // already should have done so if it were required. - return nil -} - -// resourceAzureStorageBlobExists does all the necessary API calls to -// check for the existence of the blob on Azure. -func resourceAzureStorageBlobExists(d *schema.ResourceData, meta interface{}) (bool, error) { - azureClient := meta.(*Client) - storName := d.Get("storage_service_name").(string) - - blobClient, err := azureClient.getStorageServiceBlobClient(storName) - if err != nil { - return false, err - } - - log.Println("[INFO] Querying Azure for storage blob's existence.") - name := d.Get("name").(string) - cont := d.Get("storage_container_name").(string) - container := blobClient.GetContainerReference(cont) - blob := container.GetBlobReference(name) - exists, err := blob.Exists() - if err != nil { - return false, fmt.Errorf("Error whilst checking for Azure storage blob's existence: %s", err) - } - - // if not found; it means it was deleted in the meantime and - // we must remove it from the schema. - if !exists { - d.SetId("") - } - - return exists, nil -} - -// resourceAzureStorageBlobDelete does all the necessary API calls to -// delete the blob off Azure. -func resourceAzureStorageBlobDelete(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - storName := d.Get("storage_service_name").(string) - - blobClient, err := azureClient.getStorageServiceBlobClient(storName) - if err != nil { - return err - } - - log.Println("[INFO] Issuing storage blob delete command off Azure.") - name := d.Get("name").(string) - cont := d.Get("storage_container_name").(string) - - container := blobClient.GetContainerReference(cont) - blob := container.GetBlobReference(name) - - options := &storage.DeleteBlobOptions{} - _, err = blob.DeleteIfExists(options) - if err != nil { - return fmt.Errorf("Error whilst deleting storage blob: %s", err) - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/azure/resource_azure_storage_blob_test.go b/builtin/providers/azure/resource_azure_storage_blob_test.go deleted file mode 100644 index dad87ea16..000000000 --- a/builtin/providers/azure/resource_azure_storage_blob_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package azure - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureStorageBlockBlob(t *testing.T) { - name := "azure_storage_blob.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureStorageBlobDeleted("block"), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureStorageBlockBlobConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureStorageBlobExists(name, "block"), - resource.TestCheckResourceAttr(name, "name", "tftesting-blob"), - resource.TestCheckResourceAttr(name, "type", "BlockBlob"), - resource.TestCheckResourceAttr(name, "storage_container_name", - fmt.Sprintf("%s-block", testAccStorageContainerName)), - resource.TestCheckResourceAttr(name, "storage_service_name", testAccStorageServiceName), - ), - }, - }, - }) -} - -func TestAccAzureStoragePageBlob(t *testing.T) { - name := "azure_storage_blob.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureStorageBlobDeleted("page"), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureStoragePageBlobConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureStorageBlobExists(name, "page"), - resource.TestCheckResourceAttr(name, "name", "tftesting-blob"), - resource.TestCheckResourceAttr(name, "type", "PageBlob"), - resource.TestCheckResourceAttr(name, "size", "512"), - resource.TestCheckResourceAttr(name, "storage_container_name", - fmt.Sprintf("%s-page", testAccStorageContainerName)), - resource.TestCheckResourceAttr(name, "storage_service_name", testAccStorageServiceName), - ), - }, - }, - }) -} - -func testAccCheckAzureStorageBlobExists(name, typ string) resource.TestCheckFunc { - return func(s *terraform.State) error { - resource, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Azure Storage Container resource not found: %s", name) - } - - if resource.Primary.ID == "" { - return fmt.Errorf("Azure Storage Container ID not set: %s", name) - } - - azureClient := testAccProvider.Meta().(*Client) - blobClient, err := azureClient.getStorageServiceBlobClient(testAccStorageServiceName) - if err != nil { - return err - } - - containerName := fmt.Sprintf("%s-%s", testAccStorageContainerName, typ) - container := blobClient.GetContainerReference(containerName) - blob := container.GetBlobReference(resource.Primary.ID) - exists, err := blob.Exists() - if err != nil { - return err - } - if !exists { - return fmt.Errorf("Azure Storage Blob %s doesn't exist.", name) - } - - return nil - } -} - -func testAccCheckAzureStorageBlobDeleted(typ string) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, resource := range s.RootModule().Resources { - if resource.Type != "azure_storage_blob" { - continue - } - - azureClient := testAccProvider.Meta().(*Client) - blobClient, err := azureClient.getStorageServiceBlobClient(testAccStorageServiceName) - if err != nil { - return err - } - - containerName := fmt.Sprintf("%s-%s", testAccStorageContainerName, typ) - container := blobClient.GetContainerReference(containerName) - blob := container.GetBlobReference(resource.Primary.ID) - exists, err := blob.Exists() - if err != nil { - return err - } - if exists { - return fmt.Errorf("Azure Storage Blob still exists.") - } - } - - return nil - } -} - -var testAccAzureStorageBlockBlobConfig = fmt.Sprintf(` -resource "azure_storage_container" "foo" { - name = "%s-block" - container_access_type = "blob" - # NOTE: A pre-existing Storage Service is used here so as to avoid - # the huge wait for creation of one. - storage_service_name = "%s" -} - -resource "azure_storage_blob" "foo" { - name = "tftesting-blob" - type = "BlockBlob" - # NOTE: A pre-existing Storage Service is used here so as to avoid - # the huge wait for creation of one. - storage_service_name = "${azure_storage_container.foo.storage_service_name}" - storage_container_name = "${azure_storage_container.foo.name}" -} -`, testAccStorageContainerName, testAccStorageServiceName) - -var testAccAzureStoragePageBlobConfig = fmt.Sprintf(` -resource "azure_storage_container" "foo" { - name = "%s-page" - container_access_type = "blob" - # NOTE: A pre-existing Storage Service is used here so as to avoid - # the huge wait for creation of one. - storage_service_name = "%s" -} - -resource "azure_storage_blob" "foo" { - name = "tftesting-blob" - type = "PageBlob" - # NOTE: A pre-existing Storage Service is used here so as to avoid - # the huge wait for creation of one. - storage_service_name = "${azure_storage_container.foo.storage_service_name}" - storage_container_name = "${azure_storage_container.foo.name}" - # NOTE: must be a multiple of 512: - size = 512 -} -`, testAccStorageContainerName, testAccStorageServiceName) diff --git a/builtin/providers/azure/resource_azure_storage_container.go b/builtin/providers/azure/resource_azure_storage_container.go deleted file mode 100644 index e4f3d2886..000000000 --- a/builtin/providers/azure/resource_azure_storage_container.go +++ /dev/null @@ -1,171 +0,0 @@ -package azure - -import ( - "fmt" - "log" - - "github.com/Azure/azure-sdk-for-go/storage" - "github.com/hashicorp/terraform/helper/schema" -) - -// resourceAzureStorageContainer returns the *schema.Resource associated -// to a storage container on Azure. -func resourceAzureStorageContainer() *schema.Resource { - return &schema.Resource{ - Create: resourceAzureStorageContainerCreate, - Read: resourceAzureStorageContainerRead, - Exists: resourceAzureStorageContainerExists, - Delete: resourceAzureStorageContainerDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: parameterDescriptions["name"], - }, - "storage_service_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: parameterDescriptions["storage_service_name"], - }, - "container_access_type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: parameterDescriptions["container_access_type"], - }, - "properties": &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - Elem: schema.TypeString, - Description: parameterDescriptions["properties"], - }, - }, - } -} - -// resourceAzureStorageContainerCreate does all the necessary API calls to -// create the storage container on Azure. -func resourceAzureStorageContainerCreate(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - storName := d.Get("storage_service_name").(string) - - blobClient, err := azureClient.getStorageServiceBlobClient(storName) - if err != nil { - return err - } - - log.Println("[INFO] Creating storage container on Azure.") - name := d.Get("name").(string) - accessType := storage.ContainerAccessType(d.Get("container_access_type").(string)) - container := blobClient.GetContainerReference(name) - options := &storage.CreateContainerOptions{ - Access: accessType, - } - err = container.Create(options) - if err != nil { - return fmt.Errorf("Failed to create storage container on Azure: %s", err) - } - - d.SetId(name) - return resourceAzureStorageContainerRead(d, meta) -} - -// resourceAzureStorageContainerRead does all the necessary API calls to -// read the status of the storage container off Azure. -func resourceAzureStorageContainerRead(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - storName := d.Get("storage_service_name").(string) - - blobClient, err := azureClient.getStorageServiceBlobClient(storName) - if err != nil { - return err - } - - log.Println("[INFO] Querying Azure for storage containers.") - name := d.Get("name").(string) - containers, err := blobClient.ListContainers(storage.ListContainersParameters{ - Prefix: name, - Timeout: 90, - }) - if err != nil { - return fmt.Errorf("Failed to query Azure for its storage containers: %s", err) - } - - // search for our storage container and update its stats: - var found bool - // loop just to make sure we got the right container: - for _, cont := range containers.Containers { - if cont.Name == name { - found = true - - props := make(map[string]interface{}) - props["last_modified"] = cont.Properties.LastModified - props["lease_status"] = cont.Properties.LeaseStatus - props["lease_state"] = cont.Properties.LeaseState - props["lease_duration"] = cont.Properties.LeaseDuration - - d.Set("properties", props) - } - } - - // if not found; it means the resource has been deleted - // in the meantime; so we must untrack it: - if !found { - d.SetId("") - } - - return nil -} - -// resourceAzureStorageContainerExists does all the necessary API calls to -// check if the storage container already exists on Azure. -func resourceAzureStorageContainerExists(d *schema.ResourceData, meta interface{}) (bool, error) { - azureClient := meta.(*Client) - storName := d.Get("storage_service_name").(string) - - blobClient, err := azureClient.getStorageServiceBlobClient(storName) - if err != nil { - return false, err - } - - log.Println("[INFO] Checking existence of storage container on Azure.") - name := d.Get("name").(string) - container := blobClient.GetContainerReference(name) - exists, err := container.Exists() - if err != nil { - return false, fmt.Errorf("Failed to query for Azure storage container existence: %s", err) - } - - // if it does not exist; untrack the resource: - if !exists { - d.SetId("") - } - return exists, nil -} - -// resourceAzureStorageContainerDelete does all the necessary API calls to -// delete a storage container off Azure. -func resourceAzureStorageContainerDelete(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - storName := d.Get("storage_service_name").(string) - - blobClient, err := azureClient.getStorageServiceBlobClient(storName) - if err != nil { - return err - } - - log.Println("[INFO] Issuing Azure storage container deletion call.") - name := d.Get("name").(string) - container := blobClient.GetContainerReference(name) - options := &storage.DeleteContainerOptions{} - _, err = container.DeleteIfExists(options) - if err != nil { - return fmt.Errorf("Failed deleting storage container off Azure: %s", err) - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/azure/resource_azure_storage_container_test.go b/builtin/providers/azure/resource_azure_storage_container_test.go deleted file mode 100644 index 7fce5463f..000000000 --- a/builtin/providers/azure/resource_azure_storage_container_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package azure - -import ( - "fmt" - "testing" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureStorageContainer(t *testing.T) { - name := "azure_storage_container.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureStorageContainerDestroyed, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureStorageContainerConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureStorageContainerExists(name), - resource.TestCheckResourceAttr(name, "name", testAccStorageContainerName), - resource.TestCheckResourceAttr(name, "storage_service_name", testAccStorageServiceName), - resource.TestCheckResourceAttr(name, "container_access_type", "blob"), - ), - }, - }, - }) - - // because containers take a while to get deleted, sleep for one minute: - time.Sleep(3 * time.Minute) -} - -func testAccCheckAzureStorageContainerExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - resource, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Azure Storage Container resource not found: %s", name) - } - - if resource.Primary.ID == "" { - return fmt.Errorf("Azure Storage Container ID not set: %s", name) - } - - azureClient := testAccProvider.Meta().(*Client) - blobClient, err := azureClient.getStorageServiceBlobClient(testAccStorageServiceName) - if err != nil { - return err - } - - container := blobClient.GetContainerReference(resource.Primary.ID) - exists, err := container.Exists() - if err != nil { - return err - } - if !exists { - return fmt.Errorf("Azure Storage Container %s doesn't exist.", name) - } - - return nil - } -} - -func testAccCheckAzureStorageContainerDestroyed(s *terraform.State) error { - for _, resource := range s.RootModule().Resources { - if resource.Type != "azure_storage_container" { - continue - } - - azureClient := testAccProvider.Meta().(*Client) - blobClient, err := azureClient.getStorageServiceBlobClient(testAccStorageServiceName) - if err != nil { - return err - } - - container := blobClient.GetContainerReference(resource.Primary.ID) - exists, err := container.Exists() - if err != nil { - return err - } - if exists { - return fmt.Errorf("Azure Storage Container still exists.") - } - } - - return nil -} - -var testAccAzureStorageContainerConfig = fmt.Sprintf(` -resource "azure_storage_container" "foo" { - name = "%s" - container_access_type = "blob" - # NOTE: A pre-existing Storage Service is used here so as to avoid - # the huge wait for creation of one. - storage_service_name = "%s" -} -`, testAccStorageContainerName, testAccStorageServiceName) diff --git a/builtin/providers/azure/resource_azure_storage_queue.go b/builtin/providers/azure/resource_azure_storage_queue.go deleted file mode 100644 index ecbb71559..000000000 --- a/builtin/providers/azure/resource_azure_storage_queue.go +++ /dev/null @@ -1,109 +0,0 @@ -package azure - -import ( - "fmt" - "log" - - "github.com/Azure/azure-sdk-for-go/storage" - "github.com/hashicorp/terraform/helper/schema" -) - -// resourceAzureStorageQueue returns the *schema.Resource associated -// to a storage queue on Azure. -func resourceAzureStorageQueue() *schema.Resource { - return &schema.Resource{ - Create: resourceAzureStorageQueueCreate, - Read: resourceAzureStorageQueueRead, - Delete: resourceAzureStorageQueueDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: parameterDescriptions["name"], - }, - "storage_service_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: parameterDescriptions["storage_service_name"], - }, - }, - } -} - -// resourceAzureStorageQueueCreate does all the necessary API calls to -// create a storage queue on Azure. -func resourceAzureStorageQueueCreate(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - storServName := d.Get("storage_service_name").(string) - queueClient, err := azureClient.getStorageServiceQueueClient(storServName) - if err != nil { - return err - } - - // create the queue: - log.Println("Sending Storage Queue creation request to Azure.") - name := d.Get("name").(string) - queue := queueClient.GetQueueReference(name) - options := &storage.QueueServiceOptions{} - err = queue.Create(options) - if err != nil { - return fmt.Errorf("Error creation Storage Queue on Azure: %s", err) - } - - d.SetId(name) - return nil -} - -// resourceAzureStorageQueueRead does all the necessary API calls to -// read the state of the storage queue off Azure. -func resourceAzureStorageQueueRead(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - storServName := d.Get("storage_service_name").(string) - queueClient, err := azureClient.getStorageServiceQueueClient(storServName) - if err != nil { - return err - } - - // check for queue's existence: - log.Println("[INFO] Sending Storage Queue existence query to Azure.") - name := d.Get("name").(string) - queue := queueClient.GetQueueReference(name) - exists, err := queue.Exists() - if err != nil { - return fmt.Errorf("Error checking for Storage Queue existence: %s", err) - } - - // If the queue has been deleted in the meantime; - // untrack the resource from the schema. - if !exists { - d.SetId("") - } - - return nil -} - -// resourceAzureStorageQueueDelete does all the necessary API calls to -// delete the storage queue off Azure. -func resourceAzureStorageQueueDelete(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - storServName := d.Get("storage_service_name").(string) - queueClient, err := azureClient.getStorageServiceQueueClient(storServName) - if err != nil { - return err - } - - // issue the deletion of the storage queue: - log.Println("[INFO] Sending Storage Queue deletion request to Azure.") - name := d.Get("name").(string) - queue := queueClient.GetQueueReference(name) - options := &storage.QueueServiceOptions{} - err = queue.Delete(options) - if err != nil { - return fmt.Errorf("Error deleting Storage queue off Azure: %s", err) - } - - return nil -} diff --git a/builtin/providers/azure/resource_azure_storage_queue_test.go b/builtin/providers/azure/resource_azure_storage_queue_test.go deleted file mode 100644 index 7c3cb1fd8..000000000 --- a/builtin/providers/azure/resource_azure_storage_queue_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package azure - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureStorageQueue(t *testing.T) { - name := "azure_storage_queue.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureStorageQueueDeleted, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureStorageQueueConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureStorageQueueExists(name), - resource.TestCheckResourceAttr(name, "name", "terraform-queue"), - resource.TestCheckResourceAttr(name, "storage_service_name", testAccStorageServiceName), - ), - }, - }, - }) -} - -func testAccCheckAzureStorageQueueExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - resource, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Azure Storage Queue resource '%s' is missing.", name) - } - - if resource.Primary.ID == "" { - return fmt.Errorf("Azure Storage Service Queue ID %s is missing.", name) - } - - azureClient := testAccProvider.Meta().(*Client) - queueClient, err := azureClient.getStorageServiceQueueClient(testAccStorageServiceName) - if err != nil { - return err - } - - queue := queueClient.GetQueueReference(resource.Primary.ID) - exists, err := queue.Exists() - if err != nil { - return fmt.Errorf("Error querying Azure for Storage Queue existence: %s", err) - } - if !exists { - return fmt.Errorf("Azure Storage Queue %s doesn't exist!", resource.Primary.ID) - } - - return nil - } -} - -func testAccCheckAzureStorageQueueDeleted(s *terraform.State) error { - for _, resource := range s.RootModule().Resources { - if resource.Type != "azure_storage_queue" { - continue - } - - if resource.Primary.ID == "" { - return fmt.Errorf("Azure Storage Service Queue ID %s is missing.", resource.Primary.ID) - } - - azureClient := testAccProvider.Meta().(*Client) - queueClient, err := azureClient.getStorageServiceQueueClient(testAccStorageServiceName) - if err != nil { - return err - } - - queue := queueClient.GetQueueReference(resource.Primary.ID) - exists, err := queue.Exists() - if err != nil { - return fmt.Errorf("Error querying Azure for Storage Queue existence: %s", err) - } - if exists { - return fmt.Errorf("Azure Storage Queue %s still exists!", resource.Primary.ID) - } - } - - return nil -} - -var testAccAzureStorageQueueConfig = fmt.Sprintf(` -resource "azure_storage_queue" "foo" { - name = "terraform-queue" - storage_service_name = "%s" -} -`, testAccStorageServiceName) diff --git a/builtin/providers/azure/resource_azure_storage_service.go b/builtin/providers/azure/resource_azure_storage_service.go deleted file mode 100644 index 1d3a0c251..000000000 --- a/builtin/providers/azure/resource_azure_storage_service.go +++ /dev/null @@ -1,217 +0,0 @@ -package azure - -import ( - "encoding/base64" - "fmt" - "log" - - "github.com/Azure/azure-sdk-for-go/management" - "github.com/Azure/azure-sdk-for-go/management/storageservice" - "github.com/hashicorp/terraform/helper/schema" -) - -// resourceAzureStorageService returns the *schema.Resource associated -// to an Azure hosted service. -func resourceAzureStorageService() *schema.Resource { - return &schema.Resource{ - Create: resourceAzureStorageServiceCreate, - Read: resourceAzureStorageServiceRead, - Exists: resourceAzureStorageServiceExists, - Delete: resourceAzureStorageServiceDelete, - - Schema: map[string]*schema.Schema{ - // General attributes: - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - // TODO(aznashwan): constrain name in description - Description: parameterDescriptions["name"], - }, - "location": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: parameterDescriptions["location"], - }, - "label": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "Made by Terraform.", - Description: parameterDescriptions["label"], - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: parameterDescriptions["description"], - }, - // Functional attributes: - "account_type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: parameterDescriptions["account_type"], - }, - "affinity_group": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: parameterDescriptions["affinity_group"], - }, - "properties": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Elem: schema.TypeString, - }, - // Computed attributes: - "url": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "primary_key": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "secondary_key": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -// resourceAzureStorageServiceCreate does all the necessary API calls to -// create a new Azure storage service. -func resourceAzureStorageServiceCreate(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - mgmtClient := azureClient.mgmtClient - storageServiceClient := azureClient.storageServiceClient - - // get all the values: - log.Println("[INFO] Creating Azure Storage Service creation parameters.") - name := d.Get("name").(string) - location := d.Get("location").(string) - accountType := storageservice.AccountType(d.Get("account_type").(string)) - affinityGroup := d.Get("affinity_group").(string) - description := d.Get("description").(string) - label := base64.StdEncoding.EncodeToString([]byte(d.Get("label").(string))) - var props []storageservice.ExtendedProperty - if given := d.Get("properties").(map[string]interface{}); len(given) > 0 { - props = []storageservice.ExtendedProperty{} - for k, v := range given { - props = append(props, storageservice.ExtendedProperty{ - Name: k, - Value: v.(string), - }) - } - } - - // create parameters and send request: - log.Println("[INFO] Sending Storage Service creation request to Azure.") - reqID, err := storageServiceClient.CreateStorageService( - storageservice.StorageAccountCreateParameters{ - ServiceName: name, - Location: location, - Description: description, - Label: label, - AffinityGroup: affinityGroup, - AccountType: accountType, - ExtendedProperties: storageservice.ExtendedPropertyList{ - ExtendedProperty: props, - }, - }) - if err != nil { - return fmt.Errorf("Failed to create Azure storage service %s: %s", name, err) - } - err = mgmtClient.WaitForOperation(reqID, nil) - if err != nil { - return fmt.Errorf("Failed creating storage service %s: %s", name, err) - } - - d.SetId(name) - return resourceAzureStorageServiceRead(d, meta) -} - -// resourceAzureStorageServiceRead does all the necessary API calls to -// read the state of the storage service off Azure. -func resourceAzureStorageServiceRead(d *schema.ResourceData, meta interface{}) error { - storageServiceClient := meta.(*Client).storageServiceClient - - // get our storage service: - log.Println("[INFO] Sending query about storage service to Azure.") - name := d.Get("name").(string) - storsvc, err := storageServiceClient.GetStorageService(name) - if err != nil { - if !management.IsResourceNotFoundError(err) { - return fmt.Errorf("Failed to query about Azure about storage service: %s", err) - } else { - // it means that the resource has been deleted from Azure - // in the meantime and we must remove its associated Resource. - d.SetId("") - return nil - - } - } - - // read values: - d.Set("url", storsvc.URL) - log.Println("[INFO] Querying keys of Azure storage service.") - keys, err := storageServiceClient.GetStorageServiceKeys(name) - if err != nil { - return fmt.Errorf("Failed querying keys for Azure storage service: %s", err) - } - d.Set("primary_key", keys.PrimaryKey) - d.Set("secondary_key", keys.SecondaryKey) - - return nil -} - -// resourceAzureStorageServiceExists does all the necessary API calls to -// check if the storage service exists on Azure. -func resourceAzureStorageServiceExists(d *schema.ResourceData, meta interface{}) (bool, error) { - storageServiceClient := meta.(*Client).storageServiceClient - - // get our storage service: - log.Println("[INFO] Sending query about storage service to Azure.") - name := d.Get("name").(string) - _, err := storageServiceClient.GetStorageService(name) - if err != nil { - if !management.IsResourceNotFoundError(err) { - return false, fmt.Errorf("Failed to query about Azure about storage service: %s", err) - } else { - // it means that the resource has been deleted from Azure - // in the meantime and we must remove its associated Resource. - d.SetId("") - return false, nil - - } - } - - return true, nil -} - -// resourceAzureStorageServiceDelete does all the necessary API calls to -// delete the storage service off Azure. -func resourceAzureStorageServiceDelete(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - mgmtClient := azureClient.mgmtClient - storageServiceClient := azureClient.storageServiceClient - - // issue the deletion: - name := d.Get("name").(string) - log.Println("[INFO] Issuing delete of storage service off Azure.") - reqID, err := storageServiceClient.DeleteStorageService(name) - if err != nil { - return fmt.Errorf("Error whilst issuing deletion of storage service off Azure: %s", err) - } - err = mgmtClient.WaitForOperation(reqID, nil) - if err != nil { - return fmt.Errorf("Error whilst deleting storage service off Azure: %s", err) - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/azure/resource_azure_storage_service_test.go b/builtin/providers/azure/resource_azure_storage_service_test.go deleted file mode 100644 index 4067e2a94..000000000 --- a/builtin/providers/azure/resource_azure_storage_service_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package azure - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureStorageService(t *testing.T) { - name := "azure_storage_service.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccAzureStorageServiceDestroyed, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureStorageServiceConfig, - Check: resource.ComposeTestCheckFunc( - testAccAzureStorageServiceExists(name), - resource.TestCheckResourceAttr(name, "name", "tftestingdis"), - resource.TestCheckResourceAttr(name, "location", "West US"), - resource.TestCheckResourceAttr(name, "description", "very descriptive"), - resource.TestCheckResourceAttr(name, "account_type", "Standard_LRS"), - ), - }, - }, - }) -} - -func testAccAzureStorageServiceExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - resource, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Azure Storage Service Resource not found: %s", name) - } - - if resource.Primary.ID == "" { - return fmt.Errorf("Azure Storage Service ID not set.") - } - - storageServiceClient := testAccProvider.Meta().(*Client).storageServiceClient - _, err := storageServiceClient.GetStorageService(resource.Primary.ID) - - return err - } -} - -func testAccAzureStorageServiceDestroyed(s *terraform.State) error { - storageServiceClient := testAccProvider.Meta().(*Client).storageServiceClient - - for _, resource := range s.RootModule().Resources { - if resource.Type != "azure_storage_service" { - continue - } - - if resource.Primary.ID == "" { - return fmt.Errorf("Azure Storage Service ID not set.") - } - - _, err := storageServiceClient.GetStorageService(resource.Primary.ID) - return testAccResourceDestroyedErrorFilter("Storage Service", err) - } - - return nil -} - -var testAccAzureStorageServiceConfig = ` -resource "azure_storage_service" "foo" { - # NOTE: storage service names constrained to lowercase letters only. - name = "tftestingdis" - location = "West US" - description = "very descriptive" - account_type = "Standard_LRS" -} -` diff --git a/builtin/providers/azure/resource_azure_virtual_network.go b/builtin/providers/azure/resource_azure_virtual_network.go deleted file mode 100644 index f41f9955e..000000000 --- a/builtin/providers/azure/resource_azure_virtual_network.go +++ /dev/null @@ -1,367 +0,0 @@ -package azure - -import ( - "fmt" - "log" - - "github.com/Azure/azure-sdk-for-go/management" - "github.com/Azure/azure-sdk-for-go/management/virtualnetwork" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -const ( - virtualNetworkRetrievalError = "Error retrieving Virtual Network Configuration: %s" -) - -func resourceAzureVirtualNetwork() *schema.Resource { - return &schema.Resource{ - Create: resourceAzureVirtualNetworkCreate, - Read: resourceAzureVirtualNetworkRead, - Update: resourceAzureVirtualNetworkUpdate, - Delete: resourceAzureVirtualNetworkDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "address_space": &schema.Schema{ - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "dns_servers_names": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "subnet": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "address_prefix": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "security_group": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - Set: resourceAzureSubnetHash, - }, - - "location": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceAzureVirtualNetworkCreate(d *schema.ResourceData, meta interface{}) error { - ac := meta.(*Client) - mc := ac.mgmtClient - vnetClient := ac.vnetClient - - name := d.Get("name").(string) - - // Lock the client just before we get the virtual network configuration and immediately - // set an defer to unlock the client again whenever this function exits - ac.vnetMutex.Lock() - defer ac.vnetMutex.Unlock() - - nc, err := vnetClient.GetVirtualNetworkConfiguration() - if err != nil { - if management.IsResourceNotFoundError(err) { - // if no network config exists yet; create a new one now: - nc = virtualnetwork.NetworkConfiguration{} - } else { - return fmt.Errorf(virtualNetworkRetrievalError, err) - } - } - - for _, n := range nc.Configuration.VirtualNetworkSites { - if n.Name == name { - return fmt.Errorf("Virtual Network %s already exists!", name) - } - } - - network := createVirtualNetwork(d) - nc.Configuration.VirtualNetworkSites = append(nc.Configuration.VirtualNetworkSites, network) - - req, err := vnetClient.SetVirtualNetworkConfiguration(nc) - if err != nil { - return fmt.Errorf("Error creating Virtual Network %s: %s", name, err) - } - - // Wait until the virtual network is created - if err := mc.WaitForOperation(req, nil); err != nil { - return fmt.Errorf("Error waiting for Virtual Network %s to be created: %s", name, err) - } - - d.SetId(name) - - if err := associateSecurityGroups(d, meta); err != nil { - return err - } - - return resourceAzureVirtualNetworkRead(d, meta) -} - -func resourceAzureVirtualNetworkRead(d *schema.ResourceData, meta interface{}) error { - ac := meta.(*Client) - vnetClient := ac.vnetClient - secGroupClient := ac.secGroupClient - - nc, err := vnetClient.GetVirtualNetworkConfiguration() - if err != nil { - return fmt.Errorf(virtualNetworkRetrievalError, err) - } - - for _, n := range nc.Configuration.VirtualNetworkSites { - if n.Name == d.Id() { - d.Set("address_space", n.AddressSpace.AddressPrefix) - d.Set("location", n.Location) - - // Create a new set to hold all configured subnets - subnets := &schema.Set{ - F: resourceAzureSubnetHash, - } - - // Loop through all endpoints - for _, s := range n.Subnets { - subnet := map[string]interface{}{} - - // Get the associated (if any) security group - sg, err := secGroupClient.GetNetworkSecurityGroupForSubnet(s.Name, d.Id()) - if err != nil && !management.IsResourceNotFoundError(err) { - return fmt.Errorf( - "Error retrieving Network Security Group associations of subnet %s: %s", s.Name, err) - } - - // Update the values - subnet["name"] = s.Name - subnet["address_prefix"] = s.AddressPrefix - subnet["security_group"] = sg.Name - - subnets.Add(subnet) - } - - d.Set("subnet", subnets) - - return nil - } - } - - log.Printf("[DEBUG] Virtual Network %s does no longer exist", d.Id()) - d.SetId("") - - return nil -} - -func resourceAzureVirtualNetworkUpdate(d *schema.ResourceData, meta interface{}) error { - ac := meta.(*Client) - mc := ac.mgmtClient - vnetClient := ac.vnetClient - - // Lock the client just before we get the virtual network configuration and immediately - // set an defer to unlock the client again whenever this function exits - ac.vnetMutex.Lock() - defer ac.vnetMutex.Unlock() - - nc, err := vnetClient.GetVirtualNetworkConfiguration() - if err != nil { - return fmt.Errorf(virtualNetworkRetrievalError, err) - } - - found := false - for i, n := range nc.Configuration.VirtualNetworkSites { - if n.Name == d.Id() { - network := createVirtualNetwork(d) - nc.Configuration.VirtualNetworkSites[i] = network - - found = true - } - } - - if !found { - return fmt.Errorf("Virtual Network %s does not exists!", d.Id()) - } - - req, err := vnetClient.SetVirtualNetworkConfiguration(nc) - if err != nil { - return fmt.Errorf("Error updating Virtual Network %s: %s", d.Id(), err) - } - - // Wait until the virtual network is updated - if err := mc.WaitForOperation(req, nil); err != nil { - return fmt.Errorf("Error waiting for Virtual Network %s to be updated: %s", d.Id(), err) - } - - if err := associateSecurityGroups(d, meta); err != nil { - return err - } - - return resourceAzureVirtualNetworkRead(d, meta) -} - -func resourceAzureVirtualNetworkDelete(d *schema.ResourceData, meta interface{}) error { - ac := meta.(*Client) - mc := ac.mgmtClient - vnetClient := ac.vnetClient - - // Lock the client just before we get the virtual network configuration and immediately - // set an defer to unlock the client again whenever this function exits - ac.vnetMutex.Lock() - defer ac.vnetMutex.Unlock() - - nc, err := vnetClient.GetVirtualNetworkConfiguration() - if err != nil { - return fmt.Errorf(virtualNetworkRetrievalError, err) - } - - filtered := nc.Configuration.VirtualNetworkSites[:0] - for _, n := range nc.Configuration.VirtualNetworkSites { - if n.Name != d.Id() { - filtered = append(filtered, n) - } - } - - nc.Configuration.VirtualNetworkSites = filtered - - req, err := vnetClient.SetVirtualNetworkConfiguration(nc) - if err != nil { - return fmt.Errorf("Error deleting Virtual Network %s: %s", d.Id(), err) - } - - // Wait until the virtual network is deleted - if err := mc.WaitForOperation(req, nil); err != nil { - return fmt.Errorf("Error waiting for Virtual Network %s to be deleted: %s", d.Id(), err) - } - - d.SetId("") - - return nil -} - -func resourceAzureSubnetHash(v interface{}) int { - m := v.(map[string]interface{}) - subnet := m["name"].(string) + m["address_prefix"].(string) + m["security_group"].(string) - return hashcode.String(subnet) -} - -func createVirtualNetwork(d *schema.ResourceData) virtualnetwork.VirtualNetworkSite { - // fetch address spaces: - var prefixes []string - for _, prefix := range d.Get("address_space").([]interface{}) { - prefixes = append(prefixes, prefix.(string)) - } - - // fetch DNS references: - var dnsRefs []virtualnetwork.DNSServerRef - for _, dns := range d.Get("dns_servers_names").([]interface{}) { - dnsRefs = append(dnsRefs, virtualnetwork.DNSServerRef{ - Name: dns.(string), - }) - } - - // Add all subnets that are configured - var subnets []virtualnetwork.Subnet - if rs := d.Get("subnet").(*schema.Set); rs.Len() > 0 { - for _, subnet := range rs.List() { - subnet := subnet.(map[string]interface{}) - subnets = append(subnets, virtualnetwork.Subnet{ - Name: subnet["name"].(string), - AddressPrefix: subnet["address_prefix"].(string), - }) - } - } - - return virtualnetwork.VirtualNetworkSite{ - Name: d.Get("name").(string), - Location: d.Get("location").(string), - AddressSpace: virtualnetwork.AddressSpace{ - AddressPrefix: prefixes, - }, - DNSServersRef: dnsRefs, - Subnets: subnets, - } -} - -func associateSecurityGroups(d *schema.ResourceData, meta interface{}) error { - azureClient := meta.(*Client) - mc := azureClient.mgmtClient - secGroupClient := azureClient.secGroupClient - - virtualNetwork := d.Get("name").(string) - - if rs := d.Get("subnet").(*schema.Set); rs.Len() > 0 { - for _, subnet := range rs.List() { - subnet := subnet.(map[string]interface{}) - securityGroup := subnet["security_group"].(string) - subnetName := subnet["name"].(string) - - // Get the associated (if any) security group - sg, err := secGroupClient.GetNetworkSecurityGroupForSubnet(subnetName, d.Id()) - if err != nil && !management.IsResourceNotFoundError(err) { - return fmt.Errorf( - "Error retrieving Network Security Group associations of subnet %s: %s", subnetName, err) - } - - // If the desired and actual security group are the same, were done so can just continue - if sg.Name == securityGroup { - continue - } - - // If there is an associated security group, make sure we first remove it from the subnet - if sg.Name != "" { - req, err := secGroupClient.RemoveNetworkSecurityGroupFromSubnet(sg.Name, subnetName, virtualNetwork) - if err != nil { - return fmt.Errorf("Error removing Network Security Group %s from subnet %s: %s", - securityGroup, subnetName, err) - } - - // Wait until the security group is associated - if err := mc.WaitForOperation(req, nil); err != nil { - return fmt.Errorf( - "Error waiting for Network Security Group %s to be removed from subnet %s: %s", - securityGroup, subnetName, err) - } - } - - // If the desired security group is not empty, assign the security group to the subnet - if securityGroup != "" { - req, err := secGroupClient.AddNetworkSecurityToSubnet(securityGroup, subnetName, virtualNetwork) - if err != nil { - return fmt.Errorf("Error associating Network Security Group %s to subnet %s: %s", - securityGroup, subnetName, err) - } - - // Wait until the security group is associated - if err := mc.WaitForOperation(req, nil); err != nil { - return fmt.Errorf( - "Error waiting for Network Security Group %s to be associated with subnet %s: %s", - securityGroup, subnetName, err) - } - } - - } - } - - return nil -} diff --git a/builtin/providers/azure/resource_azure_virtual_network_test.go b/builtin/providers/azure/resource_azure_virtual_network_test.go deleted file mode 100644 index 716556bbd..000000000 --- a/builtin/providers/azure/resource_azure_virtual_network_test.go +++ /dev/null @@ -1,283 +0,0 @@ -package azure - -import ( - "fmt" - "testing" - - "github.com/Azure/azure-sdk-for-go/management" - "github.com/Azure/azure-sdk-for-go/management/virtualnetwork" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureVirtualNetwork_basic(t *testing.T) { - var network virtualnetwork.VirtualNetworkSite - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureVirtualNetworkDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureVirtualNetwork_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureVirtualNetworkExists( - "azure_virtual_network.foo", &network), - testAccCheckAzureVirtualNetworkAttributes(&network), - resource.TestCheckResourceAttr( - "azure_virtual_network.foo", "name", "terraform-vnet"), - resource.TestCheckResourceAttr( - "azure_virtual_network.foo", "location", "West US"), - resource.TestCheckResourceAttr( - "azure_virtual_network.foo", "address_space.0", "10.1.2.0/24"), - resource.TestCheckResourceAttr( - "azure_virtual_network.foo", "subnet.1787288781.name", "subnet1"), - resource.TestCheckResourceAttr( - "azure_virtual_network.foo", "subnet.1787288781.address_prefix", "10.1.2.0/25"), - ), - }, - }, - }) -} - -func TestAccAzureVirtualNetwork_advanced(t *testing.T) { - var network virtualnetwork.VirtualNetworkSite - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureVirtualNetworkDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureVirtualNetwork_advanced, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureVirtualNetworkExists( - "azure_virtual_network.foo", &network), - testAccCheckAzureVirtualNetworkAttributes(&network), - resource.TestCheckResourceAttr( - "azure_virtual_network.foo", "name", "terraform-vnet"), - resource.TestCheckResourceAttr( - "azure_virtual_network.foo", "location", "West US"), - resource.TestCheckResourceAttr( - "azure_virtual_network.foo", "address_space.0", "10.1.2.0/24"), - resource.TestCheckResourceAttr( - "azure_virtual_network.foo", "subnet.33778499.name", "subnet1"), - resource.TestCheckResourceAttr( - "azure_virtual_network.foo", "subnet.33778499.address_prefix", "10.1.2.0/25"), - resource.TestCheckResourceAttr( - "azure_virtual_network.foo", "subnet.33778499.security_group", "terraform-security-group1"), - ), - }, - }, - }) -} - -func TestAccAzureVirtualNetwork_update(t *testing.T) { - var network virtualnetwork.VirtualNetworkSite - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAzureVirtualNetworkDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureVirtualNetwork_advanced, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureVirtualNetworkExists( - "azure_virtual_network.foo", &network), - testAccCheckAzureVirtualNetworkAttributes(&network), - resource.TestCheckResourceAttr( - "azure_virtual_network.foo", "name", "terraform-vnet"), - resource.TestCheckResourceAttr( - "azure_virtual_network.foo", "location", "West US"), - resource.TestCheckResourceAttr( - "azure_virtual_network.foo", "address_space.0", "10.1.2.0/24"), - resource.TestCheckResourceAttr( - "azure_virtual_network.foo", "subnet.33778499.name", "subnet1"), - resource.TestCheckResourceAttr( - "azure_virtual_network.foo", "subnet.33778499.address_prefix", "10.1.2.0/25"), - resource.TestCheckResourceAttr( - "azure_virtual_network.foo", "subnet.33778499.security_group", "terraform-security-group1"), - ), - }, - - resource.TestStep{ - Config: testAccAzureVirtualNetwork_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckAzureVirtualNetworkExists( - "azure_virtual_network.foo", &network), - testAccCheckAzureVirtualNetworkAttributes(&network), - resource.TestCheckResourceAttr( - "azure_virtual_network.foo", "name", "terraform-vnet"), - resource.TestCheckResourceAttr( - "azure_virtual_network.foo", "location", "West US"), - resource.TestCheckResourceAttr( - "azure_virtual_network.foo", "address_space.0", "10.1.3.0/24"), - resource.TestCheckResourceAttr( - "azure_virtual_network.foo", "subnet.514595123.name", "subnet1"), - resource.TestCheckResourceAttr( - "azure_virtual_network.foo", "subnet.514595123.address_prefix", "10.1.3.128/25"), - resource.TestCheckResourceAttr( - "azure_virtual_network.foo", "subnet.514595123.security_group", "terraform-security-group2"), - ), - }, - }, - }) -} - -func testAccCheckAzureVirtualNetworkExists( - n string, - network *virtualnetwork.VirtualNetworkSite) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Virtual Network ID is set") - } - - vnetClient := testAccProvider.Meta().(*Client).vnetClient - nc, err := vnetClient.GetVirtualNetworkConfiguration() - if err != nil { - return err - } - - for _, n := range nc.Configuration.VirtualNetworkSites { - if n.Name == rs.Primary.ID { - *network = n - - return nil - } - } - - return fmt.Errorf("Virtual Network not found") - } -} - -func testAccCheckAzureVirtualNetworkAttributes( - network *virtualnetwork.VirtualNetworkSite) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if network.Name != "terraform-vnet" { - return fmt.Errorf("Bad name: %s", network.Name) - } - - if network.Location != "West US" { - return fmt.Errorf("Bad location: %s", network.Location) - } - - return nil - } -} - -func testAccCheckAzureVirtualNetworkDestroy(s *terraform.State) error { - vnetClient := testAccProvider.Meta().(*Client).vnetClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azure_virtual_network" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Virtual Network ID is set") - } - - nc, err := vnetClient.GetVirtualNetworkConfiguration() - if err != nil { - if management.IsResourceNotFoundError(err) { - // This is desirable - no configuration = no networks - continue - } - return fmt.Errorf("Error retrieving Virtual Network Configuration: %s", err) - } - - for _, n := range nc.Configuration.VirtualNetworkSites { - if n.Name == rs.Primary.ID { - return fmt.Errorf("Virtual Network %s still exists", rs.Primary.ID) - } - } - } - - return nil -} - -const testAccAzureVirtualNetwork_basic = ` -resource "azure_virtual_network" "foo" { - name = "terraform-vnet" - address_space = ["10.1.2.0/24"] - location = "West US" - - subnet { - name = "subnet1" - address_prefix = "10.1.2.0/25" - } -}` - -const testAccAzureVirtualNetwork_advanced = ` -resource "azure_security_group" "foo" { - name = "terraform-security-group1" - location = "West US" -} - -resource "azure_security_group_rule" "foo" { - name = "terraform-secgroup-rule" - security_group_names = ["${azure_security_group.foo.name}"] - type = "Inbound" - action = "Deny" - priority = 200 - source_address_prefix = "100.0.0.0/32" - source_port_range = "1000" - destination_address_prefix = "10.0.0.0/32" - destination_port_range = "1000" - protocol = "TCP" -} - -resource "azure_virtual_network" "foo" { - name = "terraform-vnet" - address_space = ["10.1.2.0/24"] - location = "West US" - - subnet { - name = "subnet1" - address_prefix = "10.1.2.0/25" - security_group = "${azure_security_group.foo.name}" - } -}` - -const testAccAzureVirtualNetwork_update = ` -resource "azure_security_group" "foo" { - name = "terraform-security-group1" - location = "West US" -} - -resource "azure_security_group_rule" "foo" { - name = "terraform-secgroup-rule" - security_group_names = ["${azure_security_group.foo.name}"] - type = "Inbound" - action = "Deny" - priority = 200 - source_address_prefix = "100.0.0.0/32" - source_port_range = "1000" - destination_address_prefix = "10.0.0.0/32" - destination_port_range = "1000" - protocol = "TCP" -} - -resource "azure_security_group" "bar" { - name = "terraform-security-group2" - location = "West US" -} - -resource "azure_virtual_network" "foo" { - name = "terraform-vnet" - address_space = ["10.1.3.0/24"] - location = "West US" - - subnet { - name = "subnet1" - address_prefix = "10.1.3.128/25" - security_group = "${azure_security_group.bar.name}" - } -}` diff --git a/builtin/providers/azure/resources.go b/builtin/providers/azure/resources.go deleted file mode 100644 index 6512f735e..000000000 --- a/builtin/providers/azure/resources.go +++ /dev/null @@ -1 +0,0 @@ -package azure diff --git a/builtin/providers/azure/utils_test.go b/builtin/providers/azure/utils_test.go deleted file mode 100644 index 3a3212263..000000000 --- a/builtin/providers/azure/utils_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package azure - -import ( - "fmt" - - "github.com/Azure/azure-sdk-for-go/management" -) - -// testAccResourceDestroyedErrorFilter tests whether the given error is an azure ResourceNotFound -// error and properly annotates it if otherwise: -func testAccResourceDestroyedErrorFilter(resource string, err error) error { - switch { - case err == nil: - return fmt.Errorf("Azure %s still exists.", resource) - case err != nil && management.IsResourceNotFoundError(err): - return nil - default: - return err - } -} diff --git a/builtin/providers/azurerm/config.go b/builtin/providers/azurerm/config.go deleted file mode 100644 index 8fdb95d19..000000000 --- a/builtin/providers/azurerm/config.go +++ /dev/null @@ -1,575 +0,0 @@ -package azurerm - -import ( - "context" - "fmt" - "log" - "net/http" - "net/http/httputil" - - "github.com/Azure/azure-sdk-for-go/arm/cdn" - "github.com/Azure/azure-sdk-for-go/arm/compute" - "github.com/Azure/azure-sdk-for-go/arm/containerregistry" - "github.com/Azure/azure-sdk-for-go/arm/containerservice" - "github.com/Azure/azure-sdk-for-go/arm/disk" - "github.com/Azure/azure-sdk-for-go/arm/eventhub" - "github.com/Azure/azure-sdk-for-go/arm/keyvault" - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/Azure/azure-sdk-for-go/arm/redis" - "github.com/Azure/azure-sdk-for-go/arm/resources/resources" - "github.com/Azure/azure-sdk-for-go/arm/scheduler" - "github.com/Azure/azure-sdk-for-go/arm/servicebus" - "github.com/Azure/azure-sdk-for-go/arm/sql" - "github.com/Azure/azure-sdk-for-go/arm/storage" - "github.com/Azure/azure-sdk-for-go/arm/trafficmanager" - mainStorage "github.com/Azure/azure-sdk-for-go/storage" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/adal" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/hashicorp/terraform/terraform" - riviera "github.com/jen20/riviera/azure" -) - -// ArmClient contains the handles to all the specific Azure Resource Manager -// resource classes' respective clients. -type ArmClient struct { - clientId string - tenantId string - subscriptionId string - environment azure.Environment - - StopContext context.Context - - rivieraClient *riviera.Client - - availSetClient compute.AvailabilitySetsClient - usageOpsClient compute.UsageClient - vmExtensionImageClient compute.VirtualMachineExtensionImagesClient - vmExtensionClient compute.VirtualMachineExtensionsClient - vmScaleSetClient compute.VirtualMachineScaleSetsClient - vmImageClient compute.VirtualMachineImagesClient - vmClient compute.VirtualMachinesClient - - diskClient disk.DisksClient - - appGatewayClient network.ApplicationGatewaysClient - ifaceClient network.InterfacesClient - expressRouteCircuitClient network.ExpressRouteCircuitsClient - loadBalancerClient network.LoadBalancersClient - localNetConnClient network.LocalNetworkGatewaysClient - publicIPClient network.PublicIPAddressesClient - secGroupClient network.SecurityGroupsClient - secRuleClient network.SecurityRulesClient - subnetClient network.SubnetsClient - netUsageClient network.UsagesClient - vnetGatewayConnectionsClient network.VirtualNetworkGatewayConnectionsClient - vnetGatewayClient network.VirtualNetworkGatewaysClient - vnetClient network.VirtualNetworksClient - vnetPeeringsClient network.VirtualNetworkPeeringsClient - routeTablesClient network.RouteTablesClient - routesClient network.RoutesClient - - cdnProfilesClient cdn.ProfilesClient - cdnEndpointsClient cdn.EndpointsClient - - containerRegistryClient containerregistry.RegistriesClient - containerServicesClient containerservice.ContainerServicesClient - - eventHubClient eventhub.EventHubsClient - eventHubConsumerGroupClient eventhub.ConsumerGroupsClient - eventHubNamespacesClient eventhub.NamespacesClient - - providers resources.ProvidersClient - resourceGroupClient resources.GroupsClient - tagsClient resources.TagsClient - resourceFindClient resources.GroupClient - - jobsClient scheduler.JobsClient - jobsCollectionsClient scheduler.JobCollectionsClient - - storageServiceClient storage.AccountsClient - storageUsageClient storage.UsageClient - - deploymentsClient resources.DeploymentsClient - - redisClient redis.GroupClient - - trafficManagerProfilesClient trafficmanager.ProfilesClient - trafficManagerEndpointsClient trafficmanager.EndpointsClient - - serviceBusNamespacesClient servicebus.NamespacesClient - serviceBusTopicsClient servicebus.TopicsClient - serviceBusSubscriptionsClient servicebus.SubscriptionsClient - - keyVaultClient keyvault.VaultsClient - - sqlElasticPoolsClient sql.ElasticPoolsClient -} - -func withRequestLogging() autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { - // dump request to wire format - if dump, err := httputil.DumpRequestOut(r, true); err == nil { - log.Printf("[DEBUG] AzureRM Request: \n%s\n", dump) - } else { - // fallback to basic message - log.Printf("[DEBUG] AzureRM Request: %s to %s\n", r.Method, r.URL) - } - - resp, err := s.Do(r) - if resp != nil { - // dump response to wire format - if dump, err := httputil.DumpResponse(resp, true); err == nil { - log.Printf("[DEBUG] AzureRM Response for %s: \n%s\n", r.URL, dump) - } else { - // fallback to basic message - log.Printf("[DEBUG] AzureRM Response: %s for %s\n", resp.Status, r.URL) - } - } else { - log.Printf("[DEBUG] Request to %s completed with no response", r.URL) - } - return resp, err - }) - } -} - -func setUserAgent(client *autorest.Client) { - version := terraform.VersionString() - client.UserAgent = fmt.Sprintf("HashiCorp-Terraform-v%s", version) -} - -// getArmClient is a helper method which returns a fully instantiated -// *ArmClient based on the Config's current settings. -func (c *Config) getArmClient() (*ArmClient, error) { - // detect cloud from environment - env, envErr := azure.EnvironmentFromName(c.Environment) - if envErr != nil { - // try again with wrapped value to support readable values like german instead of AZUREGERMANCLOUD - wrapped := fmt.Sprintf("AZURE%sCLOUD", c.Environment) - var innerErr error - if env, innerErr = azure.EnvironmentFromName(wrapped); innerErr != nil { - return nil, envErr - } - } - - // client declarations: - client := ArmClient{ - clientId: c.ClientID, - tenantId: c.TenantID, - subscriptionId: c.SubscriptionID, - environment: env, - } - - rivieraClient, err := riviera.NewClient(&riviera.AzureResourceManagerCredentials{ - ClientID: c.ClientID, - ClientSecret: c.ClientSecret, - TenantID: c.TenantID, - SubscriptionID: c.SubscriptionID, - ResourceManagerEndpoint: env.ResourceManagerEndpoint, - ActiveDirectoryEndpoint: env.ActiveDirectoryEndpoint, - }) - if err != nil { - return nil, fmt.Errorf("Error creating Riviera client: %s", err) - } - client.rivieraClient = rivieraClient - - oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, c.TenantID) - if err != nil { - return nil, err - } - - // OAuthConfigForTenant returns a pointer, which can be nil. - if oauthConfig == nil { - return nil, fmt.Errorf("Unable to configure OAuthConfig for tenant %s", c.TenantID) - } - - spt, err := adal.NewServicePrincipalToken(*oauthConfig, c.ClientID, c.ClientSecret, env.ResourceManagerEndpoint) - if err != nil { - return nil, err - } - - endpoint := env.ResourceManagerEndpoint - auth := autorest.NewBearerAuthorizer(spt) - - // NOTE: these declarations should be left separate for clarity should the - // clients be wished to be configured with custom Responders/PollingModess etc... - asc := compute.NewAvailabilitySetsClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&asc.Client) - asc.Authorizer = auth - asc.Sender = autorest.CreateSender(withRequestLogging()) - client.availSetClient = asc - - uoc := compute.NewUsageClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&uoc.Client) - uoc.Authorizer = auth - uoc.Sender = autorest.CreateSender(withRequestLogging()) - client.usageOpsClient = uoc - - vmeic := compute.NewVirtualMachineExtensionImagesClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&vmeic.Client) - vmeic.Authorizer = auth - vmeic.Sender = autorest.CreateSender(withRequestLogging()) - client.vmExtensionImageClient = vmeic - - vmec := compute.NewVirtualMachineExtensionsClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&vmec.Client) - vmec.Authorizer = auth - vmec.Sender = autorest.CreateSender(withRequestLogging()) - client.vmExtensionClient = vmec - - vmic := compute.NewVirtualMachineImagesClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&vmic.Client) - vmic.Authorizer = auth - vmic.Sender = autorest.CreateSender(withRequestLogging()) - client.vmImageClient = vmic - - vmssc := compute.NewVirtualMachineScaleSetsClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&vmssc.Client) - vmssc.Authorizer = auth - vmssc.Sender = autorest.CreateSender(withRequestLogging()) - client.vmScaleSetClient = vmssc - - vmc := compute.NewVirtualMachinesClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&vmc.Client) - vmc.Authorizer = auth - vmc.Sender = autorest.CreateSender(withRequestLogging()) - client.vmClient = vmc - - agc := network.NewApplicationGatewaysClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&agc.Client) - agc.Authorizer = auth - agc.Sender = autorest.CreateSender(withRequestLogging()) - client.appGatewayClient = agc - - crc := containerregistry.NewRegistriesClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&crc.Client) - crc.Authorizer = auth - crc.Sender = autorest.CreateSender(withRequestLogging()) - client.containerRegistryClient = crc - - csc := containerservice.NewContainerServicesClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&csc.Client) - csc.Authorizer = auth - csc.Sender = autorest.CreateSender(withRequestLogging()) - client.containerServicesClient = csc - - dkc := disk.NewDisksClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&dkc.Client) - dkc.Authorizer = auth - dkc.Sender = autorest.CreateSender(withRequestLogging()) - client.diskClient = dkc - - ehc := eventhub.NewEventHubsClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&ehc.Client) - ehc.Authorizer = auth - ehc.Sender = autorest.CreateSender(withRequestLogging()) - client.eventHubClient = ehc - - chcgc := eventhub.NewConsumerGroupsClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&chcgc.Client) - chcgc.Authorizer = auth - chcgc.Sender = autorest.CreateSender(withRequestLogging()) - client.eventHubConsumerGroupClient = chcgc - - ehnc := eventhub.NewNamespacesClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&ehnc.Client) - ehnc.Authorizer = auth - ehnc.Sender = autorest.CreateSender(withRequestLogging()) - client.eventHubNamespacesClient = ehnc - - ifc := network.NewInterfacesClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&ifc.Client) - ifc.Authorizer = auth - ifc.Sender = autorest.CreateSender(withRequestLogging()) - client.ifaceClient = ifc - - erc := network.NewExpressRouteCircuitsClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&erc.Client) - erc.Authorizer = auth - erc.Sender = autorest.CreateSender(withRequestLogging()) - client.expressRouteCircuitClient = erc - - lbc := network.NewLoadBalancersClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&lbc.Client) - lbc.Authorizer = auth - lbc.Sender = autorest.CreateSender(withRequestLogging()) - client.loadBalancerClient = lbc - - lgc := network.NewLocalNetworkGatewaysClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&lgc.Client) - lgc.Authorizer = auth - lgc.Sender = autorest.CreateSender(withRequestLogging()) - client.localNetConnClient = lgc - - pipc := network.NewPublicIPAddressesClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&pipc.Client) - pipc.Authorizer = auth - pipc.Sender = autorest.CreateSender(withRequestLogging()) - client.publicIPClient = pipc - - sgc := network.NewSecurityGroupsClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&sgc.Client) - sgc.Authorizer = auth - sgc.Sender = autorest.CreateSender(withRequestLogging()) - client.secGroupClient = sgc - - src := network.NewSecurityRulesClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&src.Client) - src.Authorizer = auth - src.Sender = autorest.CreateSender(withRequestLogging()) - client.secRuleClient = src - - snc := network.NewSubnetsClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&snc.Client) - snc.Authorizer = auth - snc.Sender = autorest.CreateSender(withRequestLogging()) - client.subnetClient = snc - - vgcc := network.NewVirtualNetworkGatewayConnectionsClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&vgcc.Client) - vgcc.Authorizer = auth - vgcc.Sender = autorest.CreateSender(withRequestLogging()) - client.vnetGatewayConnectionsClient = vgcc - - vgc := network.NewVirtualNetworkGatewaysClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&vgc.Client) - vgc.Authorizer = auth - vgc.Sender = autorest.CreateSender(withRequestLogging()) - client.vnetGatewayClient = vgc - - vnc := network.NewVirtualNetworksClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&vnc.Client) - vnc.Authorizer = auth - vnc.Sender = autorest.CreateSender(withRequestLogging()) - client.vnetClient = vnc - - vnpc := network.NewVirtualNetworkPeeringsClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&vnpc.Client) - vnpc.Authorizer = auth - vnpc.Sender = autorest.CreateSender(withRequestLogging()) - client.vnetPeeringsClient = vnpc - - rtc := network.NewRouteTablesClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&rtc.Client) - rtc.Authorizer = auth - rtc.Sender = autorest.CreateSender(withRequestLogging()) - client.routeTablesClient = rtc - - rc := network.NewRoutesClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&rc.Client) - rc.Authorizer = auth - rc.Sender = autorest.CreateSender(withRequestLogging()) - client.routesClient = rc - - rgc := resources.NewGroupsClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&rgc.Client) - rgc.Authorizer = auth - rgc.Sender = autorest.CreateSender(withRequestLogging()) - client.resourceGroupClient = rgc - - pc := resources.NewProvidersClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&pc.Client) - pc.Authorizer = auth - pc.Sender = autorest.CreateSender(withRequestLogging()) - client.providers = pc - - tc := resources.NewTagsClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&tc.Client) - tc.Authorizer = auth - tc.Sender = autorest.CreateSender(withRequestLogging()) - client.tagsClient = tc - - rf := resources.NewGroupClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&rf.Client) - rf.Authorizer = auth - rf.Sender = autorest.CreateSender(withRequestLogging()) - client.resourceFindClient = rf - - jc := scheduler.NewJobsClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&jc.Client) - jc.Authorizer = auth - jc.Sender = autorest.CreateSender(withRequestLogging()) - client.jobsClient = jc - - jcc := scheduler.NewJobCollectionsClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&jcc.Client) - jcc.Authorizer = auth - jcc.Sender = autorest.CreateSender(withRequestLogging()) - client.jobsCollectionsClient = jcc - - ssc := storage.NewAccountsClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&ssc.Client) - ssc.Authorizer = auth - ssc.Sender = autorest.CreateSender(withRequestLogging()) - client.storageServiceClient = ssc - - suc := storage.NewUsageClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&suc.Client) - suc.Authorizer = auth - suc.Sender = autorest.CreateSender(withRequestLogging()) - client.storageUsageClient = suc - - cpc := cdn.NewProfilesClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&cpc.Client) - cpc.Authorizer = auth - cpc.Sender = autorest.CreateSender(withRequestLogging()) - client.cdnProfilesClient = cpc - - cec := cdn.NewEndpointsClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&cec.Client) - cec.Authorizer = auth - cec.Sender = autorest.CreateSender(withRequestLogging()) - client.cdnEndpointsClient = cec - - dc := resources.NewDeploymentsClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&dc.Client) - dc.Authorizer = auth - dc.Sender = autorest.CreateSender(withRequestLogging()) - client.deploymentsClient = dc - - tmpc := trafficmanager.NewProfilesClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&tmpc.Client) - tmpc.Authorizer = auth - tmpc.Sender = autorest.CreateSender(withRequestLogging()) - client.trafficManagerProfilesClient = tmpc - - tmec := trafficmanager.NewEndpointsClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&tmec.Client) - tmec.Authorizer = auth - tmec.Sender = autorest.CreateSender(withRequestLogging()) - client.trafficManagerEndpointsClient = tmec - - rdc := redis.NewGroupClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&rdc.Client) - rdc.Authorizer = auth - rdc.Sender = autorest.CreateSender(withRequestLogging()) - client.redisClient = rdc - - sbnc := servicebus.NewNamespacesClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&sbnc.Client) - sbnc.Authorizer = auth - sbnc.Sender = autorest.CreateSender(withRequestLogging()) - client.serviceBusNamespacesClient = sbnc - - sbtc := servicebus.NewTopicsClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&sbtc.Client) - sbtc.Authorizer = auth - sbtc.Sender = autorest.CreateSender(withRequestLogging()) - client.serviceBusTopicsClient = sbtc - - sbsc := servicebus.NewSubscriptionsClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&sbsc.Client) - sbsc.Authorizer = auth - sbsc.Sender = autorest.CreateSender(withRequestLogging()) - client.serviceBusSubscriptionsClient = sbsc - - kvc := keyvault.NewVaultsClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&kvc.Client) - kvc.Authorizer = auth - kvc.Sender = autorest.CreateSender(withRequestLogging()) - client.keyVaultClient = kvc - - sqlepc := sql.NewElasticPoolsClientWithBaseURI(endpoint, c.SubscriptionID) - setUserAgent(&sqlepc.Client) - sqlepc.Authorizer = auth - sqlepc.Sender = autorest.CreateSender(withRequestLogging()) - client.sqlElasticPoolsClient = sqlepc - - return &client, nil -} - -func (armClient *ArmClient) getKeyForStorageAccount(resourceGroupName, storageAccountName string) (string, bool, error) { - accountKeys, err := armClient.storageServiceClient.ListKeys(resourceGroupName, storageAccountName) - if accountKeys.StatusCode == http.StatusNotFound { - return "", false, nil - } - if err != nil { - // We assume this is a transient error rather than a 404 (which is caught above), so assume the - // account still exists. - return "", true, fmt.Errorf("Error retrieving keys for storage account %q: %s", storageAccountName, err) - } - - if accountKeys.Keys == nil { - return "", false, fmt.Errorf("Nil key returned for storage account %q", storageAccountName) - } - - keys := *accountKeys.Keys - return *keys[0].Value, true, nil -} - -func (armClient *ArmClient) getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName string) (*mainStorage.BlobStorageClient, bool, error) { - key, accountExists, err := armClient.getKeyForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - return nil, accountExists, err - } - if accountExists == false { - return nil, false, nil - } - - storageClient, err := mainStorage.NewClient(storageAccountName, key, armClient.environment.StorageEndpointSuffix, - mainStorage.DefaultAPIVersion, true) - if err != nil { - return nil, true, fmt.Errorf("Error creating storage client for storage account %q: %s", storageAccountName, err) - } - - blobClient := storageClient.GetBlobService() - return &blobClient, true, nil -} - -func (armClient *ArmClient) getFileServiceClientForStorageAccount(resourceGroupName, storageAccountName string) (*mainStorage.FileServiceClient, bool, error) { - key, accountExists, err := armClient.getKeyForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - return nil, accountExists, err - } - if accountExists == false { - return nil, false, nil - } - - storageClient, err := mainStorage.NewClient(storageAccountName, key, armClient.environment.StorageEndpointSuffix, - mainStorage.DefaultAPIVersion, true) - if err != nil { - return nil, true, fmt.Errorf("Error creating storage client for storage account %q: %s", storageAccountName, err) - } - - fileClient := storageClient.GetFileService() - return &fileClient, true, nil -} - -func (armClient *ArmClient) getTableServiceClientForStorageAccount(resourceGroupName, storageAccountName string) (*mainStorage.TableServiceClient, bool, error) { - key, accountExists, err := armClient.getKeyForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - return nil, accountExists, err - } - if accountExists == false { - return nil, false, nil - } - - storageClient, err := mainStorage.NewClient(storageAccountName, key, armClient.environment.StorageEndpointSuffix, - mainStorage.DefaultAPIVersion, true) - if err != nil { - return nil, true, fmt.Errorf("Error creating storage client for storage account %q: %s", storageAccountName, err) - } - - tableClient := storageClient.GetTableService() - return &tableClient, true, nil -} - -func (armClient *ArmClient) getQueueServiceClientForStorageAccount(resourceGroupName, storageAccountName string) (*mainStorage.QueueServiceClient, bool, error) { - key, accountExists, err := armClient.getKeyForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - return nil, accountExists, err - } - if accountExists == false { - return nil, false, nil - } - - storageClient, err := mainStorage.NewClient(storageAccountName, key, armClient.environment.StorageEndpointSuffix, - mainStorage.DefaultAPIVersion, true) - if err != nil { - return nil, true, fmt.Errorf("Error creating storage client for storage account %q: %s", storageAccountName, err) - } - - queueClient := storageClient.GetQueueService() - return &queueClient, true, nil -} diff --git a/builtin/providers/azurerm/data_source_arm_client_config.go b/builtin/providers/azurerm/data_source_arm_client_config.go deleted file mode 100644 index 42756b893..000000000 --- a/builtin/providers/azurerm/data_source_arm_client_config.go +++ /dev/null @@ -1,39 +0,0 @@ -package azurerm - -import ( - "time" - - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceArmClientConfig() *schema.Resource { - return &schema.Resource{ - Read: dataSourceArmClientConfigRead, - - Schema: map[string]*schema.Schema{ - "client_id": { - Type: schema.TypeString, - Computed: true, - }, - "tenant_id": { - Type: schema.TypeString, - Computed: true, - }, - "subscription_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceArmClientConfigRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - - d.SetId(time.Now().UTC().String()) - d.Set("client_id", client.clientId) - d.Set("tenant_id", client.tenantId) - d.Set("subscription_id", client.subscriptionId) - - return nil -} diff --git a/builtin/providers/azurerm/data_source_arm_client_config_test.go b/builtin/providers/azurerm/data_source_arm_client_config_test.go deleted file mode 100644 index 8962b2d3b..000000000 --- a/builtin/providers/azurerm/data_source_arm_client_config_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package azurerm - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMClientConfig_basic(t *testing.T) { - clientId := os.Getenv("ARM_CLIENT_ID") - tenantId := os.Getenv("ARM_TENANT_ID") - subscriptionId := os.Getenv("ARM_SUBSCRIPTION_ID") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckArmClientConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAzureRMClientConfigAttr("data.azurerm_client_config.current", "client_id", clientId), - testAzureRMClientConfigAttr("data.azurerm_client_config.current", "tenant_id", tenantId), - testAzureRMClientConfigAttr("data.azurerm_client_config.current", "subscription_id", subscriptionId), - ), - }, - }, - }) -} - -// Wraps resource.TestCheckResourceAttr to prevent leaking values to console -// in case of mismatch -func testAzureRMClientConfigAttr(name, key, value string) resource.TestCheckFunc { - return func(s *terraform.State) error { - err := resource.TestCheckResourceAttr(name, key, value)(s) - if err != nil { - // return fmt.Errorf("%s: Attribute '%s', failed check (values hidden)", name, key) - return err - } - - return nil - } -} - -const testAccCheckArmClientConfig_basic = ` -data "azurerm_client_config" "current" { } -` diff --git a/builtin/providers/azurerm/data_source_arm_public_ip.go b/builtin/providers/azurerm/data_source_arm_public_ip.go deleted file mode 100644 index bc71f9c48..000000000 --- a/builtin/providers/azurerm/data_source_arm_public_ip.go +++ /dev/null @@ -1,86 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceArmPublicIP() *schema.Resource { - return &schema.Resource{ - Read: dataSourceArmPublicIPRead, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - }, - - "domain_name_label": { - Type: schema.TypeString, - Computed: true, - }, - - "idle_timeout_in_minutes": { - Type: schema.TypeInt, - Computed: true, - }, - - "fqdn": { - Type: schema.TypeString, - Computed: true, - }, - - "ip_address": { - Type: schema.TypeString, - Computed: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func dataSourceArmPublicIPRead(d *schema.ResourceData, meta interface{}) error { - publicIPClient := meta.(*ArmClient).publicIPClient - - resGroup := d.Get("resource_group_name").(string) - name := d.Get("name").(string) - - resp, err := publicIPClient.Get(resGroup, name, "") - if err != nil { - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - } - return fmt.Errorf("Error making Read request on Azure public ip %s: %s", name, err) - } - - d.SetId(*resp.ID) - - if resp.PublicIPAddressPropertiesFormat.DNSSettings != nil { - - if resp.PublicIPAddressPropertiesFormat.DNSSettings.Fqdn != nil && *resp.PublicIPAddressPropertiesFormat.DNSSettings.Fqdn != "" { - d.Set("fqdn", resp.PublicIPAddressPropertiesFormat.DNSSettings.Fqdn) - } - - if resp.PublicIPAddressPropertiesFormat.DNSSettings.DomainNameLabel != nil && *resp.PublicIPAddressPropertiesFormat.DNSSettings.DomainNameLabel != "" { - d.Set("domain_name_label", resp.PublicIPAddressPropertiesFormat.DNSSettings.DomainNameLabel) - } - } - - if resp.PublicIPAddressPropertiesFormat.IPAddress != nil && *resp.PublicIPAddressPropertiesFormat.IPAddress != "" { - d.Set("ip_address", resp.PublicIPAddressPropertiesFormat.IPAddress) - } - - if resp.PublicIPAddressPropertiesFormat.IdleTimeoutInMinutes != nil { - d.Set("idle_timeout_in_minutes", *resp.PublicIPAddressPropertiesFormat.IdleTimeoutInMinutes) - } - - flattenAndSetTags(d, resp.Tags) - return nil -} diff --git a/builtin/providers/azurerm/data_source_arm_public_ip_test.go b/builtin/providers/azurerm/data_source_arm_public_ip_test.go deleted file mode 100644 index f9956b602..000000000 --- a/builtin/providers/azurerm/data_source_arm_public_ip_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccDataSourceAzureRMPublicIP_basic(t *testing.T) { - ri := acctest.RandInt() - - name := fmt.Sprintf("acctestpublicip-%d", ri) - resourceGroupName := fmt.Sprintf("acctestRG-%d", ri) - - config := testAccDatSourceAzureRMPublicIPBasic(name, resourceGroupName) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMPublicIpDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.azurerm_public_ip.test", "name", name), - resource.TestCheckResourceAttr("data.azurerm_public_ip.test", "resource_group_name", resourceGroupName), - resource.TestCheckResourceAttr("data.azurerm_public_ip.test", "domain_name_label", "mylabel01"), - resource.TestCheckResourceAttr("data.azurerm_public_ip.test", "idle_timeout_in_minutes", "30"), - resource.TestCheckResourceAttrSet("data.azurerm_public_ip.test", "fqdn"), - resource.TestCheckResourceAttrSet("data.azurerm_public_ip.test", "ip_address"), - resource.TestCheckResourceAttr("data.azurerm_public_ip.test", "tags.%", "1"), - resource.TestCheckResourceAttr("data.azurerm_public_ip.test", "tags.environment", "test"), - ), - }, - }, - }) -} - -func testAccDatSourceAzureRMPublicIPBasic(name string, resourceGroupName string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "%s" - location = "West US" -} -resource "azurerm_public_ip" "test" { - name = "%s" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" - domain_name_label = "mylabel01" - idle_timeout_in_minutes = 30 - - tags { - environment = "test" - } -} - -data "azurerm_public_ip" "test" { - name = "${azurerm_public_ip.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" -} -`, resourceGroupName, name) -} diff --git a/builtin/providers/azurerm/data_source_arm_resource_group.go b/builtin/providers/azurerm/data_source_arm_resource_group.go deleted file mode 100644 index 21248a389..000000000 --- a/builtin/providers/azurerm/data_source_arm_resource_group.go +++ /dev/null @@ -1,44 +0,0 @@ -package azurerm - -import ( - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceArmResourceGroup() *schema.Resource { - return &schema.Resource{ - Read: dataSourceArmResourceGroupRead, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "location": locationForDataSourceSchema(), - "tags": tagsForDataSourceSchema(), - }, - } -} - -func dataSourceArmResourceGroupRead(d *schema.ResourceData, meta interface{}) error { - armClient := meta.(*ArmClient) - - resourceGroupName := d.Get("name").(string) - resourceId := &ResourceID{ - SubscriptionID: armClient.subscriptionId, - ResourceGroup: resourceGroupName, - } - resourceIdString, err := composeAzureResourceID(resourceId) - - if err != nil { - return err - } - - d.SetId(resourceIdString) - - if err := resourceArmResourceGroupRead(d, meta); err != nil { - return err - } - - return nil -} diff --git a/builtin/providers/azurerm/data_source_arm_resource_group_test.go b/builtin/providers/azurerm/data_source_arm_resource_group_test.go deleted file mode 100644 index c7c22149e..000000000 --- a/builtin/providers/azurerm/data_source_arm_resource_group_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package azurerm - -import ( - "testing" - - "fmt" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccDataSourceAzureRMResourceGroup_basic(t *testing.T) { - ri := acctest.RandInt() - name := fmt.Sprintf("acctestRg_%d", ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAzureRMResourceGroupBasic(name), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.azurerm_resource_group.test", "name", name), - resource.TestCheckResourceAttr("data.azurerm_resource_group.test", "location", "westus2"), - resource.TestCheckResourceAttr("data.azurerm_resource_group.test", "tags.%", "1"), - resource.TestCheckResourceAttr("data.azurerm_resource_group.test", "tags.env", "test"), - ), - }, - }, - }) -} - -func testAccDataSourceAzureRMResourceGroupBasic(name string) string { - return fmt.Sprintf(`resource "azurerm_resource_group" "test" { - name = "%s" - location = "West US 2" - tags { - env = "test" - } - } - - data "azurerm_resource_group" "test" { - name = "${azurerm_resource_group.test.name}" - } - `, name) -} diff --git a/builtin/providers/azurerm/express_route_circuit.go b/builtin/providers/azurerm/express_route_circuit.go deleted file mode 100644 index 297b55f56..000000000 --- a/builtin/providers/azurerm/express_route_circuit.go +++ /dev/null @@ -1,40 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/errwrap" -) - -func extractResourceGroupAndErcName(resourceId string) (resourceGroup string, name string, err error) { - id, err := parseAzureResourceID(resourceId) - - if err != nil { - return "", "", err - } - resourceGroup = id.ResourceGroup - name = id.Path["expressRouteCircuits"] - - return -} - -func retrieveErcByResourceId(resourceId string, meta interface{}) (erc *network.ExpressRouteCircuit, resourceGroup string, e error) { - ercClient := meta.(*ArmClient).expressRouteCircuitClient - - resGroup, name, err := extractResourceGroupAndErcName(resourceId) - if err != nil { - return nil, "", errwrap.Wrapf("Error Parsing Azure Resource ID - {{err}}", err) - } - - resp, err := ercClient.Get(resGroup, name) - if err != nil { - if resp.StatusCode == http.StatusNotFound { - return nil, "", nil - } - return nil, "", errwrap.Wrapf(fmt.Sprintf("Error making Read request on Express Route Circuit %s: {{err}}", name), err) - } - - return &resp, resGroup, nil -} diff --git a/builtin/providers/azurerm/import_arm_availability_set_test.go b/builtin/providers/azurerm/import_arm_availability_set_test.go deleted file mode 100644 index edeb3e0f4..000000000 --- a/builtin/providers/azurerm/import_arm_availability_set_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package azurerm - -import ( - "testing" - - "fmt" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMAvailabilitySet_importBasic(t *testing.T) { - resourceName := "azurerm_availability_set.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVAvailabilitySet_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMAvailabilitySetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_cdn_endpoint_test.go b/builtin/providers/azurerm/import_arm_cdn_endpoint_test.go deleted file mode 100644 index ee5c8a4bd..000000000 --- a/builtin/providers/azurerm/import_arm_cdn_endpoint_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMCdnEndpoint_importWithTags(t *testing.T) { - resourceName := "azurerm_cdn_endpoint.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMCdnEndpoint_withTags, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMCdnEndpointDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_cdn_profile_test.go b/builtin/providers/azurerm/import_arm_cdn_profile_test.go deleted file mode 100644 index 1db618a9e..000000000 --- a/builtin/providers/azurerm/import_arm_cdn_profile_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMCdnProfile_importWithTags(t *testing.T) { - resourceName := "azurerm_cdn_profile.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMCdnProfile_withTags, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMCdnProfileDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_container_registry_test.go b/builtin/providers/azurerm/import_arm_container_registry_test.go deleted file mode 100644 index 00cb54695..000000000 --- a/builtin/providers/azurerm/import_arm_container_registry_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package azurerm - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMContainerRegistry_importBasic(t *testing.T) { - resourceName := "azurerm_container_registry.test" - - ri := acctest.RandInt() - rs := acctest.RandString(4) - config := testAccAzureRMContainerRegistry_basic(ri, rs) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMContainerRegistryDestroy, - Steps: []resource.TestStep{ - { - Config: config, - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"storage_account"}, - }, - }, - }) -} - -func TestAccAzureRMContainerRegistry_importComplete(t *testing.T) { - resourceName := "azurerm_container_registry.test" - - ri := acctest.RandInt() - rs := acctest.RandString(4) - config := testAccAzureRMContainerRegistry_complete(ri, rs) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMContainerRegistryDestroy, - Steps: []resource.TestStep{ - { - Config: config, - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"storage_account"}, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_dns_a_record_test.go b/builtin/providers/azurerm/import_arm_dns_a_record_test.go deleted file mode 100644 index 373239e56..000000000 --- a/builtin/providers/azurerm/import_arm_dns_a_record_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMDnsARecord_importBasic(t *testing.T) { - resourceName := "azurerm_dns_a_record.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMDnsARecord_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsARecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_dns_aaaa_record_test.go b/builtin/providers/azurerm/import_arm_dns_aaaa_record_test.go deleted file mode 100644 index dbf385110..000000000 --- a/builtin/providers/azurerm/import_arm_dns_aaaa_record_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMDnsAAAARecord_importBasic(t *testing.T) { - resourceName := "azurerm_dns_aaaa_record.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMDnsAAAARecord_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsAAAARecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_dns_cname_record_test.go b/builtin/providers/azurerm/import_arm_dns_cname_record_test.go deleted file mode 100644 index a4e376060..000000000 --- a/builtin/providers/azurerm/import_arm_dns_cname_record_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMDnsCNameRecord_importBasic(t *testing.T) { - resourceName := "azurerm_dns_cname_record.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMDnsCNameRecord_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsCNameRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_dns_mx_record_test.go b/builtin/providers/azurerm/import_arm_dns_mx_record_test.go deleted file mode 100644 index ef87fffd1..000000000 --- a/builtin/providers/azurerm/import_arm_dns_mx_record_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMDnsMxRecord_importBasic(t *testing.T) { - resourceName := "azurerm_dns_mx_record.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMDnsMxRecord_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsMxRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_dns_ns_record_test.go b/builtin/providers/azurerm/import_arm_dns_ns_record_test.go deleted file mode 100644 index ea2a1a611..000000000 --- a/builtin/providers/azurerm/import_arm_dns_ns_record_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMDnsNsRecord_importBasic(t *testing.T) { - resourceName := "azurerm_dns_ns_record.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMDnsNsRecord_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsNsRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_dns_srv_record_test.go b/builtin/providers/azurerm/import_arm_dns_srv_record_test.go deleted file mode 100644 index 0d4de6bae..000000000 --- a/builtin/providers/azurerm/import_arm_dns_srv_record_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMDnsSrvRecord_importBasic(t *testing.T) { - resourceName := "azurerm_dns_srv_record.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMDnsSrvRecord_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsSrvRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_dns_txt_record_test.go b/builtin/providers/azurerm/import_arm_dns_txt_record_test.go deleted file mode 100644 index 30abf25a6..000000000 --- a/builtin/providers/azurerm/import_arm_dns_txt_record_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMDnsTxtRecord_importBasic(t *testing.T) { - resourceName := "azurerm_dns_txt_record.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMDnsTxtRecord_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsTxtRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_dns_zone_test.go b/builtin/providers/azurerm/import_arm_dns_zone_test.go deleted file mode 100644 index 0cee95522..000000000 --- a/builtin/providers/azurerm/import_arm_dns_zone_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMDnsZone_importBasic(t *testing.T) { - resourceName := "azurerm_dns_zone.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMDnsZone_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsZoneDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_eventhub_authorization_rule_test.go b/builtin/providers/azurerm/import_arm_eventhub_authorization_rule_test.go deleted file mode 100644 index b79b296c8..000000000 --- a/builtin/providers/azurerm/import_arm_eventhub_authorization_rule_test.go +++ /dev/null @@ -1,106 +0,0 @@ -package azurerm - -import ( - "testing" - - "fmt" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMEventHubAuthorizationRule_importListen(t *testing.T) { - resourceName := "azurerm_eventhub_authorization_rule.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMEventHubAuthorizationRule_listen, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMEventHubAuthorizationRuleDestroy, - Steps: []resource.TestStep{ - { - Config: config, - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAzureRMEventHubAuthorizationRule_importSend(t *testing.T) { - resourceName := "azurerm_eventhub_authorization_rule.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMEventHubAuthorizationRule_send, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMEventHubAuthorizationRuleDestroy, - Steps: []resource.TestStep{ - { - Config: config, - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAzureRMEventHubAuthorizationRule_importReadWrite(t *testing.T) { - resourceName := "azurerm_eventhub_authorization_rule.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMEventHubAuthorizationRule_readwrite, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMEventHubAuthorizationRuleDestroy, - Steps: []resource.TestStep{ - { - Config: config, - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAzureRMEventHubAuthorizationRule_importManage(t *testing.T) { - resourceName := "azurerm_eventhub_authorization_rule.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMEventHubAuthorizationRule_manage, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMEventHubAuthorizationRuleDestroy, - Steps: []resource.TestStep{ - { - Config: config, - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_eventhub_consumer_group_test.go b/builtin/providers/azurerm/import_arm_eventhub_consumer_group_test.go deleted file mode 100644 index eb40cb76f..000000000 --- a/builtin/providers/azurerm/import_arm_eventhub_consumer_group_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package azurerm - -import ( - "testing" - - "fmt" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMEventHubConsumerGroup_importBasic(t *testing.T) { - resourceName := "azurerm_eventhub_consumer_group.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMEventHubConsumerGroup_basic, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMEventHubConsumerGroupDestroy, - Steps: []resource.TestStep{ - { - Config: config, - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAzureRMEventHubConsumerGroup_importComplete(t *testing.T) { - resourceName := "azurerm_eventhub_consumer_group.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMEventHubConsumerGroup_complete, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMEventHubConsumerGroupDestroy, - Steps: []resource.TestStep{ - { - Config: config, - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_eventhub_namespace_test.go b/builtin/providers/azurerm/import_arm_eventhub_namespace_test.go deleted file mode 100644 index fb1d16a2e..000000000 --- a/builtin/providers/azurerm/import_arm_eventhub_namespace_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package azurerm - -import ( - "testing" - - "fmt" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMEventHubNamespace_importBasic(t *testing.T) { - resourceName := "azurerm_eventhub_namespace.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMEventHubNamespace_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMEventHubNamespaceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_eventhub_test.go b/builtin/providers/azurerm/import_arm_eventhub_test.go deleted file mode 100644 index 108af408a..000000000 --- a/builtin/providers/azurerm/import_arm_eventhub_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package azurerm - -import ( - "testing" - - "fmt" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMEventHub_importBasic(t *testing.T) { - resourceName := "azurerm_eventhub.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMEventHub_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMEventHubDestroy, - Steps: []resource.TestStep{ - { - Config: config, - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_express_route_circuit_test.go b/builtin/providers/azurerm/import_arm_express_route_circuit_test.go deleted file mode 100644 index 3e887c44f..000000000 --- a/builtin/providers/azurerm/import_arm_express_route_circuit_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package azurerm - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMExpressRouteCircuit_importBasic(t *testing.T) { - resourceName := "azurerm_express_route_circuit.test" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMExpressRouteCircuitDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureRMExpressRouteCircuit_basic(acctest.RandInt()), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_key_vault_test.go b/builtin/providers/azurerm/import_arm_key_vault_test.go deleted file mode 100644 index 087902e02..000000000 --- a/builtin/providers/azurerm/import_arm_key_vault_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMKeyVault_importBasic(t *testing.T) { - resourceName := "azurerm_key_vault.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMKeyVault_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKeyVaultDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_loadbalancer_backend_address_pool_test.go b/builtin/providers/azurerm/import_arm_loadbalancer_backend_address_pool_test.go deleted file mode 100644 index aadab6fda..000000000 --- a/builtin/providers/azurerm/import_arm_loadbalancer_backend_address_pool_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMLoadBalancerBackEndAddressPool_importBasic(t *testing.T) { - resourceName := "azurerm_lb_backend_address_pool.test" - - ri := acctest.RandInt() - addressPoolName := fmt.Sprintf("%d-address-pool", ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureRMLoadBalancerBackEndAddressPool_basic(ri, addressPoolName), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - // location is deprecated and was never actually used - ImportStateVerifyIgnore: []string{"location"}, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_loadbalancer_nat_pool_test.go b/builtin/providers/azurerm/import_arm_loadbalancer_nat_pool_test.go deleted file mode 100644 index 47e33a3c6..000000000 --- a/builtin/providers/azurerm/import_arm_loadbalancer_nat_pool_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMLoadBalancerNatPool_importBasic(t *testing.T) { - resourceName := "azurerm_lb_nat_pool.test" - - ri := acctest.RandInt() - natPoolName := fmt.Sprintf("NatPool-%d", ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureRMLoadBalancerNatPool_basic(ri, natPoolName), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - // location is deprecated and was never actually used - ImportStateVerifyIgnore: []string{"location"}, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_loadbalancer_nat_rule_test.go b/builtin/providers/azurerm/import_arm_loadbalancer_nat_rule_test.go deleted file mode 100644 index e7e0d89d9..000000000 --- a/builtin/providers/azurerm/import_arm_loadbalancer_nat_rule_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMLoadBalancerNatRule_importBasic(t *testing.T) { - resourceName := "azurerm_lb_nat_rule.test" - - ri := acctest.RandInt() - natRuleName := fmt.Sprintf("NatRule-%d", ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureRMLoadBalancerNatRule_basic(ri, natRuleName), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - // location is deprecated and was never actually used - ImportStateVerifyIgnore: []string{"location"}, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_loadbalancer_probe_test.go b/builtin/providers/azurerm/import_arm_loadbalancer_probe_test.go deleted file mode 100644 index 86447dd01..000000000 --- a/builtin/providers/azurerm/import_arm_loadbalancer_probe_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMLoadBalancerProbe_importBasic(t *testing.T) { - resourceName := "azurerm_lb_probe.test" - - ri := acctest.RandInt() - probeName := fmt.Sprintf("probe-%d", ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureRMLoadBalancerProbe_basic(ri, probeName), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - // location is deprecated and was never actually used - ImportStateVerifyIgnore: []string{"location"}, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_loadbalancer_rule_test.go b/builtin/providers/azurerm/import_arm_loadbalancer_rule_test.go deleted file mode 100644 index 3f0af6fc8..000000000 --- a/builtin/providers/azurerm/import_arm_loadbalancer_rule_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMLoadBalancerRule_importBasic(t *testing.T) { - resourceName := "azurerm_lb_rule.test" - - ri := acctest.RandInt() - lbRuleName := fmt.Sprintf("LbRule-%s", acctest.RandStringFromCharSet(8, acctest.CharSetAlpha)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureRMLoadBalancerRule_basic(ri, lbRuleName), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - // location is deprecated and was never actually used - ImportStateVerifyIgnore: []string{"location"}, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_loadbalancer_test.go b/builtin/providers/azurerm/import_arm_loadbalancer_test.go deleted file mode 100644 index 6f82c60e9..000000000 --- a/builtin/providers/azurerm/import_arm_loadbalancer_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package azurerm - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMLoadBalancer_importBasic(t *testing.T) { - resourceName := "azurerm_lb.test" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureRMLoadBalancer_basic(acctest.RandInt()), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_local_network_gateway_test.go b/builtin/providers/azurerm/import_arm_local_network_gateway_test.go deleted file mode 100644 index 6da4e96a5..000000000 --- a/builtin/providers/azurerm/import_arm_local_network_gateway_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package azurerm - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMLocalNetworkGateway_importBasic(t *testing.T) { - resourceName := "azurerm_local_network_gateway.test" - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLocalNetworkGatewayDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLocalNetworkGatewayConfig_basic(rInt), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_managed_disk_test.go b/builtin/providers/azurerm/import_arm_managed_disk_test.go deleted file mode 100644 index 51eaa6abd..000000000 --- a/builtin/providers/azurerm/import_arm_managed_disk_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMManagedDisk_importEmpty(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMManagedDisk_empty, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMManagedDiskDestroy, - Steps: []resource.TestStep{ - { - Config: config, - }, - { - ResourceName: "azurerm_managed_disk.test", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_network_security_group_test.go b/builtin/providers/azurerm/import_arm_network_security_group_test.go deleted file mode 100644 index 7a13fc134..000000000 --- a/builtin/providers/azurerm/import_arm_network_security_group_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package azurerm - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMNetworkSecurityGroup_importBasic(t *testing.T) { - resourceName := "azurerm_network_security_group.test" - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMNetworkSecurityGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureRMNetworkSecurityGroup_basic(rInt), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_network_security_rule_test.go b/builtin/providers/azurerm/import_arm_network_security_rule_test.go deleted file mode 100644 index a09bd1e65..000000000 --- a/builtin/providers/azurerm/import_arm_network_security_rule_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package azurerm - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMNetworkSecurityRule_importBasic(t *testing.T) { - rInt := acctest.RandInt() - resourceName := "azurerm_network_security_rule.test" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMNetworkSecurityRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAzureRMNetworkSecurityRule_basic(rInt), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"network_security_group_name"}, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_public_ip_test.go b/builtin/providers/azurerm/import_arm_public_ip_test.go deleted file mode 100644 index 6cd116687..000000000 --- a/builtin/providers/azurerm/import_arm_public_ip_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMPublicIpStatic_importBasic(t *testing.T) { - resourceName := "azurerm_public_ip.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVPublicIpStatic_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMPublicIpDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_resource_group_test.go b/builtin/providers/azurerm/import_arm_resource_group_test.go deleted file mode 100644 index aadf4817e..000000000 --- a/builtin/providers/azurerm/import_arm_resource_group_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMResourceGroup_importBasic(t *testing.T) { - resourceName := "azurerm_resource_group.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMResourceGroup_basic, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMResourceGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_route_table_test.go b/builtin/providers/azurerm/import_arm_route_table_test.go deleted file mode 100644 index 727cc878d..000000000 --- a/builtin/providers/azurerm/import_arm_route_table_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMRouteTable_importBasic(t *testing.T) { - resourceName := "azurerm_route_table.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMRouteTable_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMRouteTableDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_route_test.go b/builtin/providers/azurerm/import_arm_route_test.go deleted file mode 100644 index a39220e6d..000000000 --- a/builtin/providers/azurerm/import_arm_route_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMRoute_importBasic(t *testing.T) { - resourceName := "azurerm_route.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMRoute_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMRouteDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_servicebus_namespace_test.go b/builtin/providers/azurerm/import_arm_servicebus_namespace_test.go deleted file mode 100644 index 2fa623ed3..000000000 --- a/builtin/providers/azurerm/import_arm_servicebus_namespace_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMServiceBusNamespace_importBasic(t *testing.T) { - resourceName := "azurerm_servicebus_namespace.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMServiceBusNamespace_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMServiceBusNamespaceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_servicebus_subscription_test.go b/builtin/providers/azurerm/import_arm_servicebus_subscription_test.go deleted file mode 100644 index 7b2ae9516..000000000 --- a/builtin/providers/azurerm/import_arm_servicebus_subscription_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMServiceBusSubscription_importBasic(t *testing.T) { - resourceName := "azurerm_servicebus_subscription.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMServiceBusSubscription_basic, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMServiceBusSubscriptionDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_servicebus_topic_test.go b/builtin/providers/azurerm/import_arm_servicebus_topic_test.go deleted file mode 100644 index b5a933c65..000000000 --- a/builtin/providers/azurerm/import_arm_servicebus_topic_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMServiceBusTopic_importBasic(t *testing.T) { - resourceName := "azurerm_servicebus_topic.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMServiceBusTopic_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMServiceBusTopicDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_sql_elasticpool_test.go b/builtin/providers/azurerm/import_arm_sql_elasticpool_test.go deleted file mode 100644 index 1657f5c15..000000000 --- a/builtin/providers/azurerm/import_arm_sql_elasticpool_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package azurerm - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "testing" -) - -func TestAccAzureRMSqlElasticPool_importBasic(t *testing.T) { - resourceName := "azurerm_sql_elasticpool.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMSqlElasticPool_basic, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMSqlElasticPoolDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_sql_firewall_rule_test.go b/builtin/providers/azurerm/import_arm_sql_firewall_rule_test.go deleted file mode 100644 index fde1203a6..000000000 --- a/builtin/providers/azurerm/import_arm_sql_firewall_rule_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMSqlFirewallRule_importBasic(t *testing.T) { - resourceName := "azurerm_sql_firewall_rule.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMSqlFirewallRule_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMSqlFirewallRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_sql_server_test.go b/builtin/providers/azurerm/import_arm_sql_server_test.go deleted file mode 100644 index f14e47592..000000000 --- a/builtin/providers/azurerm/import_arm_sql_server_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMSqlServer_importBasic(t *testing.T) { - resourceName := "azurerm_sql_server.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMSqlServer_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMSqlServerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"administrator_login_password"}, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_storage_account_test.go b/builtin/providers/azurerm/import_arm_storage_account_test.go deleted file mode 100644 index 89a4e9599..000000000 --- a/builtin/providers/azurerm/import_arm_storage_account_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package azurerm - -import ( - "testing" - - "fmt" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMStorageAccount_importBasic(t *testing.T) { - resourceName := "azurerm_storage_account.testsa" - - ri := acctest.RandInt() - rs := acctest.RandString(4) - config := fmt.Sprintf(testAccAzureRMStorageAccount_basic, ri, rs) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMStorageAccountDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_subnet_test.go b/builtin/providers/azurerm/import_arm_subnet_test.go deleted file mode 100644 index 3b16097db..000000000 --- a/builtin/providers/azurerm/import_arm_subnet_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package azurerm - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMSubnet_importBasic(t *testing.T) { - resourceName := "azurerm_subnet.test" - - ri := acctest.RandInt() - config := testAccAzureRMSubnet_basic(ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMSubnetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAzureRMSubnet_importWithRouteTable(t *testing.T) { - resourceName := "azurerm_subnet.test" - - ri := acctest.RandInt() - config := testAccAzureRMSubnet_routeTable(ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMSubnetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_traffic_manager_endpoint_test.go b/builtin/providers/azurerm/import_arm_traffic_manager_endpoint_test.go deleted file mode 100644 index 533ed11f8..000000000 --- a/builtin/providers/azurerm/import_arm_traffic_manager_endpoint_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMTrafficManagerEndpoint_importBasic(t *testing.T) { - resourceName := "azurerm_traffic_manager_endpoint.testExternal" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMTrafficManagerEndpoint_basic, ri, ri, ri, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMTrafficManagerEndpointDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_traffic_manager_profile_test.go b/builtin/providers/azurerm/import_arm_traffic_manager_profile_test.go deleted file mode 100644 index 6e55347a9..000000000 --- a/builtin/providers/azurerm/import_arm_traffic_manager_profile_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMTrafficManagerProfile_importBasic(t *testing.T) { - resourceName := "azurerm_traffic_manager_profile.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMTrafficManagerProfile_performance, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMTrafficManagerProfileDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_virtual_machine_extension_test.go b/builtin/providers/azurerm/import_arm_virtual_machine_extension_test.go deleted file mode 100644 index 839c2c9f8..000000000 --- a/builtin/providers/azurerm/import_arm_virtual_machine_extension_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMVirtualMachineExtension_importBasic(t *testing.T) { - resourceName := "azurerm_virtual_machine_extension.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualMachineExtension_basic, ri, ri, ri, ri, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineExtensionDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"protected_settings"}, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_virtual_machine_scale_set_test.go b/builtin/providers/azurerm/import_arm_virtual_machine_scale_set_test.go deleted file mode 100644 index a8c6602ab..000000000 --- a/builtin/providers/azurerm/import_arm_virtual_machine_scale_set_test.go +++ /dev/null @@ -1,158 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMVirtualMachineScaleSet_importBasic(t *testing.T) { - resourceName := "azurerm_virtual_machine_scale_set.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSet_basic, ri, ri, ri, ri, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, - Steps: []resource.TestStep{ - { - Config: config, - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAzureRMVirtualMachineScaleSet_importBasic_managedDisk(t *testing.T) { - resourceName := "azurerm_virtual_machine_scale_set.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSet_basicLinux_managedDisk, ri, ri, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, - Steps: []resource.TestStep{ - { - Config: config, - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAzureRMVirtualMachineScaleSet_importLinux(t *testing.T) { - resourceName := "azurerm_virtual_machine_scale_set.test" - - ri := acctest.RandInt() - config := testAccAzureRMVirtualMachineScaleSet_linux(ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, - Steps: []resource.TestStep{ - { - Config: config, - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAzureRMVirtualMachineScaleSet_importLoadBalancer(t *testing.T) { - resourceName := "azurerm_virtual_machine_scale_set.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSetLoadbalancerTemplate, ri, ri, ri, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, - Steps: []resource.TestStep{ - { - Config: config, - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAzureRMVirtualMachineScaleSet_importOverProvision(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSetOverprovisionTemplate, ri, ri, ri, ri, ri, ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineScaleSetExists("azurerm_virtual_machine_scale_set.test"), - testCheckAzureRMVirtualMachineScaleSetOverprovision("azurerm_virtual_machine_scale_set.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMVirtualMachineScaleSet_importExtension(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSetExtensionTemplate, ri, ri, ri, ri, ri, ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineScaleSetExists("azurerm_virtual_machine_scale_set.test"), - testCheckAzureRMVirtualMachineScaleSetExtension("azurerm_virtual_machine_scale_set.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMVirtualMachineScaleSet_importMultipleExtensions(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSetMultipleExtensionsTemplate, ri, ri, ri, ri, ri, ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineScaleSetExists("azurerm_virtual_machine_scale_set.test"), - testCheckAzureRMVirtualMachineScaleSetExtension("azurerm_virtual_machine_scale_set.test"), - ), - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_virtual_machine_test.go b/builtin/providers/azurerm/import_arm_virtual_machine_test.go deleted file mode 100644 index f6b72e208..000000000 --- a/builtin/providers/azurerm/import_arm_virtual_machine_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMVirtualMachine_importBasic(t *testing.T) { - resourceName := "azurerm_virtual_machine.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachine, ri, ri, ri, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineDestroy, - Steps: []resource.TestStep{ - { - Config: config, - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "delete_data_disks_on_termination", - "delete_os_disk_on_termination", - }, - }, - }, - }) -} - -func TestAccAzureRMVirtualMachine_importBasic_managedDisk(t *testing.T) { - resourceName := "azurerm_virtual_machine.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachine_managedDisk_explicit, ri, ri, ri, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineDestroy, - Steps: []resource.TestStep{ - { - Config: config, - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "delete_data_disks_on_termination", - "delete_os_disk_on_termination", - }, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_virtual_network_peering_test.go b/builtin/providers/azurerm/import_arm_virtual_network_peering_test.go deleted file mode 100644 index ab2561d2f..000000000 --- a/builtin/providers/azurerm/import_arm_virtual_network_peering_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMVirtualNetworkPeering_importBasic(t *testing.T) { - resourceName := "azurerm_virtual_network_peering.test1" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualNetworkPeering_basic, ri, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualNetworkPeeringDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/import_arm_virtual_network_test.go b/builtin/providers/azurerm/import_arm_virtual_network_test.go deleted file mode 100644 index 1f06ca05d..000000000 --- a/builtin/providers/azurerm/import_arm_virtual_network_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAzureRMVirtualNetwork_importBasic(t *testing.T) { - resourceName := "azurerm_virtual_network.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualNetwork_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualNetworkDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/azurerm/loadbalancer.go b/builtin/providers/azurerm/loadbalancer.go deleted file mode 100644 index ac3af979a..000000000 --- a/builtin/providers/azurerm/loadbalancer.go +++ /dev/null @@ -1,167 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "regexp" - "strings" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceGroupAndLBNameFromId(loadBalancerId string) (string, string, error) { - id, err := parseAzureResourceID(loadBalancerId) - if err != nil { - return "", "", err - } - name := id.Path["loadBalancers"] - resGroup := id.ResourceGroup - - return resGroup, name, nil -} - -func retrieveLoadBalancerById(loadBalancerId string, meta interface{}) (*network.LoadBalancer, bool, error) { - loadBalancerClient := meta.(*ArmClient).loadBalancerClient - - resGroup, name, err := resourceGroupAndLBNameFromId(loadBalancerId) - if err != nil { - return nil, false, errwrap.Wrapf("Error Getting LoadBalancer Name and Group: {{err}}", err) - } - - resp, err := loadBalancerClient.Get(resGroup, name, "") - if err != nil { - if resp.StatusCode == http.StatusNotFound { - return nil, false, nil - } - return nil, false, fmt.Errorf("Error making Read request on Azure LoadBalancer %s: %s", name, err) - } - - return &resp, true, nil -} - -func findLoadBalancerBackEndAddressPoolByName(lb *network.LoadBalancer, name string) (*network.BackendAddressPool, int, bool) { - if lb == nil || lb.LoadBalancerPropertiesFormat == nil || lb.LoadBalancerPropertiesFormat.BackendAddressPools == nil { - return nil, -1, false - } - - for i, apc := range *lb.LoadBalancerPropertiesFormat.BackendAddressPools { - if apc.Name != nil && *apc.Name == name { - return &apc, i, true - } - } - - return nil, -1, false -} - -func findLoadBalancerFrontEndIpConfigurationByName(lb *network.LoadBalancer, name string) (*network.FrontendIPConfiguration, int, bool) { - if lb == nil || lb.LoadBalancerPropertiesFormat == nil || lb.LoadBalancerPropertiesFormat.FrontendIPConfigurations == nil { - return nil, -1, false - } - - for i, feip := range *lb.LoadBalancerPropertiesFormat.FrontendIPConfigurations { - if feip.Name != nil && *feip.Name == name { - return &feip, i, true - } - } - - return nil, -1, false -} - -func findLoadBalancerRuleByName(lb *network.LoadBalancer, name string) (*network.LoadBalancingRule, int, bool) { - if lb == nil || lb.LoadBalancerPropertiesFormat == nil || lb.LoadBalancerPropertiesFormat.LoadBalancingRules == nil { - return nil, -1, false - } - - for i, lbr := range *lb.LoadBalancerPropertiesFormat.LoadBalancingRules { - if lbr.Name != nil && *lbr.Name == name { - return &lbr, i, true - } - } - - return nil, -1, false -} - -func findLoadBalancerNatRuleByName(lb *network.LoadBalancer, name string) (*network.InboundNatRule, int, bool) { - if lb == nil || lb.LoadBalancerPropertiesFormat == nil || lb.LoadBalancerPropertiesFormat.InboundNatRules == nil { - return nil, -1, false - } - - for i, nr := range *lb.LoadBalancerPropertiesFormat.InboundNatRules { - if nr.Name != nil && *nr.Name == name { - return &nr, i, true - } - } - - return nil, -1, false -} - -func findLoadBalancerNatPoolByName(lb *network.LoadBalancer, name string) (*network.InboundNatPool, int, bool) { - if lb == nil || lb.LoadBalancerPropertiesFormat == nil || lb.LoadBalancerPropertiesFormat.InboundNatPools == nil { - return nil, -1, false - } - - for i, np := range *lb.LoadBalancerPropertiesFormat.InboundNatPools { - if np.Name != nil && *np.Name == name { - return &np, i, true - } - } - - return nil, -1, false -} - -func findLoadBalancerProbeByName(lb *network.LoadBalancer, name string) (*network.Probe, int, bool) { - if lb == nil || lb.LoadBalancerPropertiesFormat == nil || lb.LoadBalancerPropertiesFormat.Probes == nil { - return nil, -1, false - } - - for i, p := range *lb.LoadBalancerPropertiesFormat.Probes { - if p.Name != nil && *p.Name == name { - return &p, i, true - } - } - - return nil, -1, false -} - -func loadbalancerStateRefreshFunc(client *ArmClient, resourceGroupName string, loadbalancer string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - res, err := client.loadBalancerClient.Get(resourceGroupName, loadbalancer, "") - if err != nil { - return nil, "", fmt.Errorf("Error issuing read request in loadbalancerStateRefreshFunc to Azure ARM for LoadBalancer '%s' (RG: '%s'): %s", loadbalancer, resourceGroupName, err) - } - - return res, *res.LoadBalancerPropertiesFormat.ProvisioningState, nil - } -} - -func validateLoadBalancerPrivateIpAddressAllocation(v interface{}, k string) (ws []string, errors []error) { - value := strings.ToLower(v.(string)) - if value != "static" && value != "dynamic" { - errors = append(errors, fmt.Errorf("LoadBalancer Allocations can only be Static or Dynamic")) - } - return -} - -// sets the loadbalancer_id in the ResourceData from the sub resources full id -func loadBalancerSubResourceStateImporter(d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { - r, err := regexp.Compile(`.+\/loadBalancers\/.+?\/`) - if err != nil { - return nil, err - } - - lbID := strings.TrimSuffix(r.FindString(d.Id()), "/") - parsed, err := parseAzureResourceID(lbID) - if err != nil { - return nil, fmt.Errorf("unable to parse loadbalancer id from %s", d.Id()) - } - - if parsed.Path["loadBalancers"] == "" { - return nil, fmt.Errorf("parsed ID is invalid") - } - - d.Set("loadbalancer_id", lbID) - return []*schema.ResourceData{d}, nil -} diff --git a/builtin/providers/azurerm/location.go b/builtin/providers/azurerm/location.go deleted file mode 100644 index e866ff20f..000000000 --- a/builtin/providers/azurerm/location.go +++ /dev/null @@ -1,48 +0,0 @@ -package azurerm - -import ( - "strings" - - "github.com/hashicorp/terraform/helper/schema" -) - -func locationSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - StateFunc: azureRMNormalizeLocation, - DiffSuppressFunc: azureRMSuppressLocationDiff, - } -} - -func locationForDataSourceSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeString, - Computed: true, - } -} - -func deprecatedLocationSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Optional: true, - StateFunc: azureRMNormalizeLocation, - DiffSuppressFunc: azureRMSuppressLocationDiff, - Deprecated: "location is no longer used", - } -} - -// azureRMNormalizeLocation is a function which normalises human-readable region/location -// names (e.g. "West US") to the values used and returned by the Azure API (e.g. "westus"). -// In state we track the API internal version as it is easier to go from the human form -// to the canonical form than the other way around. -func azureRMNormalizeLocation(location interface{}) string { - input := location.(string) - return strings.Replace(strings.ToLower(input), " ", "", -1) -} - -func azureRMSuppressLocationDiff(k, old, new string, d *schema.ResourceData) bool { - return azureRMNormalizeLocation(old) == azureRMNormalizeLocation(new) -} diff --git a/builtin/providers/azurerm/location_test.go b/builtin/providers/azurerm/location_test.go deleted file mode 100644 index aa0e4dea9..000000000 --- a/builtin/providers/azurerm/location_test.go +++ /dev/null @@ -1,10 +0,0 @@ -package azurerm - -import "testing" - -func TestAzureRMNormalizeLocation(t *testing.T) { - s := azureRMNormalizeLocation("West US") - if s != "westus" { - t.Fatalf("expected location to equal westus, actual %s", s) - } -} diff --git a/builtin/providers/azurerm/locks.go b/builtin/providers/azurerm/locks.go deleted file mode 100644 index ee6774af6..000000000 --- a/builtin/providers/azurerm/locks.go +++ /dev/null @@ -1,12 +0,0 @@ -package azurerm - -func azureRMUnlockMultiple(names *[]string) { - for _, name := range *names { - armMutexKV.Unlock(name) - } -} -func azureRMLockMultiple(names *[]string) { - for _, name := range *names { - armMutexKV.Lock(name) - } -} diff --git a/builtin/providers/azurerm/network_security_rule.go b/builtin/providers/azurerm/network_security_rule.go deleted file mode 100644 index f7b41d559..000000000 --- a/builtin/providers/azurerm/network_security_rule.go +++ /dev/null @@ -1,46 +0,0 @@ -package azurerm - -import ( - "fmt" - "strings" -) - -func validateNetworkSecurityRuleProtocol(v interface{}, k string) (ws []string, errors []error) { - value := strings.ToLower(v.(string)) - protocols := map[string]bool{ - "tcp": true, - "udp": true, - "*": true, - } - - if !protocols[value] { - errors = append(errors, fmt.Errorf("Network Security Rule Protocol can only be Tcp, Udp or *")) - } - return -} - -func validateNetworkSecurityRuleAccess(v interface{}, k string) (ws []string, errors []error) { - value := strings.ToLower(v.(string)) - accessTypes := map[string]bool{ - "allow": true, - "deny": true, - } - - if !accessTypes[value] { - errors = append(errors, fmt.Errorf("Network Security Rule Access can only be Allow or Deny")) - } - return -} - -func validateNetworkSecurityRuleDirection(v interface{}, k string) (ws []string, errors []error) { - value := strings.ToLower(v.(string)) - directions := map[string]bool{ - "inbound": true, - "outbound": true, - } - - if !directions[value] { - errors = append(errors, fmt.Errorf("Network Security Rule Directions can only be Inbound or Outbound")) - } - return -} diff --git a/builtin/providers/azurerm/network_security_rule_test.go b/builtin/providers/azurerm/network_security_rule_test.go deleted file mode 100644 index f1f71e8f2..000000000 --- a/builtin/providers/azurerm/network_security_rule_test.go +++ /dev/null @@ -1,115 +0,0 @@ -package azurerm - -import "testing" - -func TestResourceAzureRMNetworkSecurityRuleProtocol_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "Random", - ErrCount: 1, - }, - { - Value: "tcp", - ErrCount: 0, - }, - { - Value: "TCP", - ErrCount: 0, - }, - { - Value: "*", - ErrCount: 0, - }, - { - Value: "Udp", - ErrCount: 0, - }, - { - Value: "Tcp", - ErrCount: 0, - }, - } - - for _, tc := range cases { - _, errors := validateNetworkSecurityRuleProtocol(tc.Value, "azurerm_network_security_rule") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM Network Security Rule protocol to trigger a validation error") - } - } -} - -func TestResourceAzureRMNetworkSecurityRuleAccess_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "Random", - ErrCount: 1, - }, - { - Value: "Allow", - ErrCount: 0, - }, - { - Value: "Deny", - ErrCount: 0, - }, - { - Value: "ALLOW", - ErrCount: 0, - }, - { - Value: "deny", - ErrCount: 0, - }, - } - - for _, tc := range cases { - _, errors := validateNetworkSecurityRuleAccess(tc.Value, "azurerm_network_security_rule") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM Network Security Rule access to trigger a validation error") - } - } -} - -func TestResourceAzureRMNetworkSecurityRuleDirection_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "Random", - ErrCount: 1, - }, - { - Value: "Inbound", - ErrCount: 0, - }, - { - Value: "Outbound", - ErrCount: 0, - }, - { - Value: "INBOUND", - ErrCount: 0, - }, - { - Value: "Inbound", - ErrCount: 0, - }, - } - - for _, tc := range cases { - _, errors := validateNetworkSecurityRuleDirection(tc.Value, "azurerm_network_security_rule") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM Network Security Rule direction to trigger a validation error") - } - } -} diff --git a/builtin/providers/azurerm/provider.go b/builtin/providers/azurerm/provider.go deleted file mode 100644 index 05c10505c..000000000 --- a/builtin/providers/azurerm/provider.go +++ /dev/null @@ -1,369 +0,0 @@ -package azurerm - -import ( - "crypto/sha1" - "encoding/base64" - "encoding/hex" - "fmt" - "log" - "reflect" - "strings" - "sync" - - "github.com/Azure/azure-sdk-for-go/arm/resources/resources" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/mutexkv" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - riviera "github.com/jen20/riviera/azure" -) - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - var p *schema.Provider - p = &schema.Provider{ - Schema: map[string]*schema.Schema{ - "subscription_id": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("ARM_SUBSCRIPTION_ID", ""), - }, - - "client_id": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_ID", ""), - }, - - "client_secret": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET", ""), - }, - - "tenant_id": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("ARM_TENANT_ID", ""), - }, - - "environment": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("ARM_ENVIRONMENT", "public"), - }, - - "skip_provider_registration": { - Type: schema.TypeBool, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("ARM_SKIP_PROVIDER_REGISTRATION", false), - }, - }, - - DataSourcesMap: map[string]*schema.Resource{ - "azurerm_client_config": dataSourceArmClientConfig(), - "azurerm_resource_group": dataSourceArmResourceGroup(), - "azurerm_public_ip": dataSourceArmPublicIP(), - }, - - ResourcesMap: map[string]*schema.Resource{ - // These resources use the Azure ARM SDK - "azurerm_availability_set": resourceArmAvailabilitySet(), - "azurerm_cdn_endpoint": resourceArmCdnEndpoint(), - "azurerm_cdn_profile": resourceArmCdnProfile(), - "azurerm_container_registry": resourceArmContainerRegistry(), - "azurerm_container_service": resourceArmContainerService(), - - "azurerm_eventhub": resourceArmEventHub(), - "azurerm_eventhub_authorization_rule": resourceArmEventHubAuthorizationRule(), - "azurerm_eventhub_consumer_group": resourceArmEventHubConsumerGroup(), - "azurerm_eventhub_namespace": resourceArmEventHubNamespace(), - - "azurerm_express_route_circuit": resourceArmExpressRouteCircuit(), - - "azurerm_lb": resourceArmLoadBalancer(), - "azurerm_lb_backend_address_pool": resourceArmLoadBalancerBackendAddressPool(), - "azurerm_lb_nat_rule": resourceArmLoadBalancerNatRule(), - "azurerm_lb_nat_pool": resourceArmLoadBalancerNatPool(), - "azurerm_lb_probe": resourceArmLoadBalancerProbe(), - "azurerm_lb_rule": resourceArmLoadBalancerRule(), - - "azurerm_managed_disk": resourceArmManagedDisk(), - - "azurerm_key_vault": resourceArmKeyVault(), - "azurerm_local_network_gateway": resourceArmLocalNetworkGateway(), - "azurerm_network_interface": resourceArmNetworkInterface(), - "azurerm_network_security_group": resourceArmNetworkSecurityGroup(), - "azurerm_network_security_rule": resourceArmNetworkSecurityRule(), - "azurerm_public_ip": resourceArmPublicIp(), - "azurerm_redis_cache": resourceArmRedisCache(), - "azurerm_route": resourceArmRoute(), - "azurerm_route_table": resourceArmRouteTable(), - "azurerm_servicebus_namespace": resourceArmServiceBusNamespace(), - "azurerm_servicebus_subscription": resourceArmServiceBusSubscription(), - "azurerm_servicebus_topic": resourceArmServiceBusTopic(), - "azurerm_sql_elasticpool": resourceArmSqlElasticPool(), - "azurerm_storage_account": resourceArmStorageAccount(), - "azurerm_storage_blob": resourceArmStorageBlob(), - "azurerm_storage_container": resourceArmStorageContainer(), - "azurerm_storage_share": resourceArmStorageShare(), - "azurerm_storage_queue": resourceArmStorageQueue(), - "azurerm_storage_table": resourceArmStorageTable(), - "azurerm_subnet": resourceArmSubnet(), - "azurerm_template_deployment": resourceArmTemplateDeployment(), - "azurerm_traffic_manager_endpoint": resourceArmTrafficManagerEndpoint(), - "azurerm_traffic_manager_profile": resourceArmTrafficManagerProfile(), - "azurerm_virtual_machine_extension": resourceArmVirtualMachineExtensions(), - "azurerm_virtual_machine": resourceArmVirtualMachine(), - "azurerm_virtual_machine_scale_set": resourceArmVirtualMachineScaleSet(), - "azurerm_virtual_network": resourceArmVirtualNetwork(), - "azurerm_virtual_network_peering": resourceArmVirtualNetworkPeering(), - - // These resources use the Riviera SDK - "azurerm_dns_a_record": resourceArmDnsARecord(), - "azurerm_dns_aaaa_record": resourceArmDnsAAAARecord(), - "azurerm_dns_cname_record": resourceArmDnsCNameRecord(), - "azurerm_dns_mx_record": resourceArmDnsMxRecord(), - "azurerm_dns_ns_record": resourceArmDnsNsRecord(), - "azurerm_dns_srv_record": resourceArmDnsSrvRecord(), - "azurerm_dns_txt_record": resourceArmDnsTxtRecord(), - "azurerm_dns_zone": resourceArmDnsZone(), - "azurerm_resource_group": resourceArmResourceGroup(), - "azurerm_search_service": resourceArmSearchService(), - "azurerm_sql_database": resourceArmSqlDatabase(), - "azurerm_sql_firewall_rule": resourceArmSqlFirewallRule(), - "azurerm_sql_server": resourceArmSqlServer(), - }, - } - - p.ConfigureFunc = providerConfigure(p) - - return p -} - -// Config is the configuration structure used to instantiate a -// new Azure management client. -type Config struct { - ManagementURL string - - SubscriptionID string - ClientID string - ClientSecret string - TenantID string - Environment string - SkipProviderRegistration bool - - validateCredentialsOnce sync.Once -} - -func (c *Config) validate() error { - var err *multierror.Error - - if c.SubscriptionID == "" { - err = multierror.Append(err, fmt.Errorf("Subscription ID must be configured for the AzureRM provider")) - } - if c.ClientID == "" { - err = multierror.Append(err, fmt.Errorf("Client ID must be configured for the AzureRM provider")) - } - if c.ClientSecret == "" { - err = multierror.Append(err, fmt.Errorf("Client Secret must be configured for the AzureRM provider")) - } - if c.TenantID == "" { - err = multierror.Append(err, fmt.Errorf("Tenant ID must be configured for the AzureRM provider")) - } - if c.Environment == "" { - err = multierror.Append(err, fmt.Errorf("Environment must be configured for the AzureRM provider")) - } - - return err.ErrorOrNil() -} - -func providerConfigure(p *schema.Provider) schema.ConfigureFunc { - return func(d *schema.ResourceData) (interface{}, error) { - config := &Config{ - SubscriptionID: d.Get("subscription_id").(string), - ClientID: d.Get("client_id").(string), - ClientSecret: d.Get("client_secret").(string), - TenantID: d.Get("tenant_id").(string), - Environment: d.Get("environment").(string), - SkipProviderRegistration: d.Get("skip_provider_registration").(bool), - } - - if err := config.validate(); err != nil { - return nil, err - } - - client, err := config.getArmClient() - if err != nil { - return nil, err - } - - client.StopContext = p.StopContext() - - // replaces the context between tests - p.MetaReset = func() error { - client.StopContext = p.StopContext() - return nil - } - - // List all the available providers and their registration state to avoid unnecessary - // requests. This also lets us check if the provider credentials are correct. - providerList, err := client.providers.List(nil, "") - if err != nil { - return nil, fmt.Errorf("Unable to list provider registration status, it is possible that this is due to invalid "+ - "credentials or the service principal does not have permission to use the Resource Manager API, Azure "+ - "error: %s", err) - } - - if !config.SkipProviderRegistration { - err = registerAzureResourceProvidersWithSubscription(*providerList.Value, client.providers) - if err != nil { - return nil, err - } - } - - return client, nil - } -} - -func registerProviderWithSubscription(providerName string, client resources.ProvidersClient) error { - _, err := client.Register(providerName) - if err != nil { - return fmt.Errorf("Cannot register provider %s with Azure Resource Manager: %s.", providerName, err) - } - - return nil -} - -var providerRegistrationOnce sync.Once - -// registerAzureResourceProvidersWithSubscription uses the providers client to register -// all Azure resource providers which the Terraform provider may require (regardless of -// whether they are actually used by the configuration or not). It was confirmed by Microsoft -// that this is the approach their own internal tools also take. -func registerAzureResourceProvidersWithSubscription(providerList []resources.Provider, client resources.ProvidersClient) error { - var err error - providerRegistrationOnce.Do(func() { - providers := map[string]struct{}{ - "Microsoft.Compute": struct{}{}, - "Microsoft.Cache": struct{}{}, - "Microsoft.ContainerRegistry": struct{}{}, - "Microsoft.ContainerService": struct{}{}, - "Microsoft.Network": struct{}{}, - "Microsoft.Cdn": struct{}{}, - "Microsoft.Storage": struct{}{}, - "Microsoft.Sql": struct{}{}, - "Microsoft.Search": struct{}{}, - "Microsoft.Resources": struct{}{}, - "Microsoft.ServiceBus": struct{}{}, - "Microsoft.KeyVault": struct{}{}, - "Microsoft.EventHub": struct{}{}, - } - - // filter out any providers already registered - for _, p := range providerList { - if _, ok := providers[*p.Namespace]; !ok { - continue - } - - if strings.ToLower(*p.RegistrationState) == "registered" { - log.Printf("[DEBUG] Skipping provider registration for namespace %s\n", *p.Namespace) - delete(providers, *p.Namespace) - } - } - - var wg sync.WaitGroup - wg.Add(len(providers)) - for providerName := range providers { - go func(p string) { - defer wg.Done() - log.Printf("[DEBUG] Registering provider with namespace %s\n", p) - if innerErr := registerProviderWithSubscription(p, client); err != nil { - err = innerErr - } - }(providerName) - } - wg.Wait() - }) - - return err -} - -// armMutexKV is the instance of MutexKV for ARM resources -var armMutexKV = mutexkv.NewMutexKV() - -func azureStateRefreshFunc(resourceURI string, client *ArmClient, command riviera.APICall) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - req := client.rivieraClient.NewRequestForURI(resourceURI) - req.Command = command - - res, err := req.Execute() - if err != nil { - return nil, "", fmt.Errorf("Error executing %T command in azureStateRefreshFunc", req.Command) - } - - var value reflect.Value - if reflect.ValueOf(res.Parsed).Kind() == reflect.Ptr { - value = reflect.ValueOf(res.Parsed).Elem() - } else { - value = reflect.ValueOf(res.Parsed) - } - - for i := 0; i < value.NumField(); i++ { // iterates through every struct type field - tag := value.Type().Field(i).Tag // returns the tag string - tagValue := tag.Get("mapstructure") - if tagValue == "provisioningState" { - return res.Parsed, value.Field(i).Elem().String(), nil - } - } - - panic(fmt.Errorf("azureStateRefreshFunc called on structure %T with no mapstructure:provisioningState tag. This is a bug", res.Parsed)) - } -} - -// Resource group names can be capitalised, but we store them in lowercase. -// Use a custom diff function to avoid creation of new resources. -func resourceAzurermResourceGroupNameDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - return strings.ToLower(old) == strings.ToLower(new) -} - -// ignoreCaseDiffSuppressFunc is a DiffSuppressFunc from helper/schema that is -// used to ignore any case-changes in a return value. -func ignoreCaseDiffSuppressFunc(k, old, new string, d *schema.ResourceData) bool { - return strings.ToLower(old) == strings.ToLower(new) -} - -// ignoreCaseStateFunc is a StateFunc from helper/schema that converts the -// supplied value to lower before saving to state for consistency. -func ignoreCaseStateFunc(val interface{}) string { - return strings.ToLower(val.(string)) -} - -func userDataStateFunc(v interface{}) string { - switch s := v.(type) { - case string: - s = base64Encode(s) - hash := sha1.Sum([]byte(s)) - return hex.EncodeToString(hash[:]) - default: - return "" - } -} - -// base64Encode encodes data if the input isn't already encoded using -// base64.StdEncoding.EncodeToString. If the input is already base64 encoded, -// return the original input unchanged. -func base64Encode(data string) string { - // Check whether the data is already Base64 encoded; don't double-encode - if isBase64Encoded(data) { - return data - } - // data has not been encoded encode and return - return base64.StdEncoding.EncodeToString([]byte(data)) -} - -func isBase64Encoded(data string) bool { - _, err := base64.StdEncoding.DecodeString(data) - return err == nil -} diff --git a/builtin/providers/azurerm/provider_test.go b/builtin/providers/azurerm/provider_test.go deleted file mode 100644 index a26249f58..000000000 --- a/builtin/providers/azurerm/provider_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package azurerm - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "azurerm": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - subscriptionID := os.Getenv("ARM_SUBSCRIPTION_ID") - clientID := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - tenantID := os.Getenv("ARM_TENANT_ID") - - if subscriptionID == "" || clientID == "" || clientSecret == "" || tenantID == "" { - t.Fatal("ARM_SUBSCRIPTION_ID, ARM_CLIENT_ID, ARM_CLIENT_SECRET and ARM_TENANT_ID must be set for acceptance tests") - } -} diff --git a/builtin/providers/azurerm/resource_arm_availability_set.go b/builtin/providers/azurerm/resource_arm_availability_set.go deleted file mode 100644 index a51ff0446..000000000 --- a/builtin/providers/azurerm/resource_arm_availability_set.go +++ /dev/null @@ -1,170 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "net/http" - "strings" - - "github.com/Azure/azure-sdk-for-go/arm/compute" - "github.com/hashicorp/terraform/helper/schema" - "github.com/jen20/riviera/azure" -) - -func resourceArmAvailabilitySet() *schema.Resource { - return &schema.Resource{ - Create: resourceArmAvailabilitySetCreate, - Read: resourceArmAvailabilitySetRead, - Update: resourceArmAvailabilitySetCreate, - Delete: resourceArmAvailabilitySetDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "platform_update_domain_count": { - Type: schema.TypeInt, - Optional: true, - Default: 5, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value > 20 { - errors = append(errors, fmt.Errorf( - "Maximum value for `platform_update_domain_count` is 20")) - } - return - }, - }, - - "platform_fault_domain_count": { - Type: schema.TypeInt, - Optional: true, - Default: 3, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value > 3 { - errors = append(errors, fmt.Errorf( - "Maximum value for (%s) is 3", k)) - } - return - }, - }, - - "managed": { - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmAvailabilitySetCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - availSetClient := client.availSetClient - - log.Printf("[INFO] preparing arguments for Azure ARM Availability Set creation.") - - name := d.Get("name").(string) - location := d.Get("location").(string) - resGroup := d.Get("resource_group_name").(string) - updateDomainCount := d.Get("platform_update_domain_count").(int) - faultDomainCount := d.Get("platform_fault_domain_count").(int) - tags := d.Get("tags").(map[string]interface{}) - managed := d.Get("managed").(bool) - - availSet := compute.AvailabilitySet{ - Name: &name, - Location: &location, - AvailabilitySetProperties: &compute.AvailabilitySetProperties{ - PlatformFaultDomainCount: azure.Int32(int32(faultDomainCount)), - PlatformUpdateDomainCount: azure.Int32(int32(updateDomainCount)), - }, - Tags: expandTags(tags), - } - - if managed == true { - n := "Aligned" - availSet.Sku = &compute.Sku{ - Name: &n, - } - } - - resp, err := availSetClient.CreateOrUpdate(resGroup, name, availSet) - if err != nil { - return err - } - - d.SetId(*resp.ID) - - return resourceArmAvailabilitySetRead(d, meta) -} - -func resourceArmAvailabilitySetRead(d *schema.ResourceData, meta interface{}) error { - availSetClient := meta.(*ArmClient).availSetClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["availabilitySets"] - - resp, err := availSetClient.Get(resGroup, name) - if err != nil { - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - return fmt.Errorf("Error making Read request on Azure Availability Set %s: %s", name, err) - } - - availSet := *resp.AvailabilitySetProperties - d.Set("resource_group_name", resGroup) - d.Set("platform_update_domain_count", availSet.PlatformUpdateDomainCount) - d.Set("platform_fault_domain_count", availSet.PlatformFaultDomainCount) - d.Set("name", resp.Name) - d.Set("location", resp.Location) - - if resp.Sku != nil && resp.Sku.Name != nil { - d.Set("managed", strings.EqualFold(*resp.Sku.Name, "Aligned")) - } - - flattenAndSetTags(d, resp.Tags) - - return nil -} - -func resourceArmAvailabilitySetDelete(d *schema.ResourceData, meta interface{}) error { - availSetClient := meta.(*ArmClient).availSetClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["availabilitySets"] - - _, err = availSetClient.Delete(resGroup, name) - - return err -} diff --git a/builtin/providers/azurerm/resource_arm_availability_set_test.go b/builtin/providers/azurerm/resource_arm_availability_set_test.go deleted file mode 100644 index 5e1b77912..000000000 --- a/builtin/providers/azurerm/resource_arm_availability_set_test.go +++ /dev/null @@ -1,297 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMAvailabilitySet_basic(t *testing.T) { - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVAvailabilitySet_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMAvailabilitySetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMAvailabilitySetExists("azurerm_availability_set.test"), - resource.TestCheckResourceAttr( - "azurerm_availability_set.test", "platform_update_domain_count", "5"), - resource.TestCheckResourceAttr( - "azurerm_availability_set.test", "platform_fault_domain_count", "3"), - ), - }, - }, - }) -} - -func TestAccAzureRMAvailabilitySet_disappears(t *testing.T) { - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVAvailabilitySet_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMAvailabilitySetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMAvailabilitySetExists("azurerm_availability_set.test"), - resource.TestCheckResourceAttr( - "azurerm_availability_set.test", "platform_update_domain_count", "5"), - resource.TestCheckResourceAttr( - "azurerm_availability_set.test", "platform_fault_domain_count", "3"), - testCheckAzureRMAvailabilitySetDisappears("azurerm_availability_set.test"), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAzureRMAvailabilitySet_withTags(t *testing.T) { - - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMVAvailabilitySet_withTags, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMVAvailabilitySet_withUpdatedTags, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMAvailabilitySetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMAvailabilitySetExists("azurerm_availability_set.test"), - resource.TestCheckResourceAttr( - "azurerm_availability_set.test", "tags.%", "2"), - resource.TestCheckResourceAttr( - "azurerm_availability_set.test", "tags.environment", "Production"), - resource.TestCheckResourceAttr( - "azurerm_availability_set.test", "tags.cost_center", "MSFT"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMAvailabilitySetExists("azurerm_availability_set.test"), - resource.TestCheckResourceAttr( - "azurerm_availability_set.test", "tags.%", "1"), - resource.TestCheckResourceAttr( - "azurerm_availability_set.test", "tags.environment", "staging"), - ), - }, - }, - }) -} - -func TestAccAzureRMAvailabilitySet_withDomainCounts(t *testing.T) { - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVAvailabilitySet_withDomainCounts, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMAvailabilitySetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMAvailabilitySetExists("azurerm_availability_set.test"), - resource.TestCheckResourceAttr( - "azurerm_availability_set.test", "platform_update_domain_count", "10"), - resource.TestCheckResourceAttr( - "azurerm_availability_set.test", "platform_fault_domain_count", "1"), - ), - }, - }, - }) -} - -func TestAccAzureRMAvailabilitySet_managed(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVAvailabilitySet_managed, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMAvailabilitySetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMAvailabilitySetExists("azurerm_availability_set.test"), - resource.TestCheckResourceAttr( - "azurerm_availability_set.test", "managed", "true"), - ), - }, - }, - }) -} - -func testCheckAzureRMAvailabilitySetExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - availSetName := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for availability set: %s", availSetName) - } - - conn := testAccProvider.Meta().(*ArmClient).availSetClient - - resp, err := conn.Get(resourceGroup, availSetName) - if err != nil { - return fmt.Errorf("Bad: Get on availSetClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: Availability Set %q (resource group: %q) does not exist", name, resourceGroup) - } - - return nil - } -} - -func testCheckAzureRMAvailabilitySetDisappears(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - availSetName := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for availability set: %s", availSetName) - } - - conn := testAccProvider.Meta().(*ArmClient).availSetClient - - _, err := conn.Delete(resourceGroup, availSetName) - if err != nil { - return fmt.Errorf("Bad: Delete on availSetClient: %s", err) - } - - return nil - } -} - -func testCheckAzureRMAvailabilitySetDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).availSetClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_availability_set" { - continue - } - - name := rs.Primary.Attributes["name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.Get(resourceGroup, name) - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Availability Set still exists:\n%#v", resp.AvailabilitySetProperties) - } - } - - return nil -} - -var testAccAzureRMVAvailabilitySet_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_availability_set" "test" { - name = "acctestavset-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" -} -` - -var testAccAzureRMVAvailabilitySet_withTags = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_availability_set" "test" { - name = "acctestavset-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - tags { - environment = "Production" - cost_center = "MSFT" - } -} -` - -var testAccAzureRMVAvailabilitySet_withUpdatedTags = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_availability_set" "test" { - name = "acctestavset-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - tags { - environment = "staging" - } -} -` - -var testAccAzureRMVAvailabilitySet_withDomainCounts = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_availability_set" "test" { - name = "acctestavset-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - platform_update_domain_count = 10 - platform_fault_domain_count = 1 -} -` - -var testAccAzureRMVAvailabilitySet_managed = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_availability_set" "test" { - name = "acctestavset-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - platform_update_domain_count = 10 - platform_fault_domain_count = 1 - managed = true -} -` diff --git a/builtin/providers/azurerm/resource_arm_cdn_endpoint.go b/builtin/providers/azurerm/resource_arm_cdn_endpoint.go deleted file mode 100644 index a06a24e60..000000000 --- a/builtin/providers/azurerm/resource_arm_cdn_endpoint.go +++ /dev/null @@ -1,430 +0,0 @@ -package azurerm - -import ( - "bytes" - "fmt" - "log" - "net/http" - "strings" - - "github.com/Azure/azure-sdk-for-go/arm/cdn" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceArmCdnEndpoint() *schema.Resource { - return &schema.Resource{ - Create: resourceArmCdnEndpointCreate, - Read: resourceArmCdnEndpointRead, - Update: resourceArmCdnEndpointUpdate, - Delete: resourceArmCdnEndpointDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "profile_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "origin_host_header": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "is_http_allowed": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - - "is_https_allowed": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - - "origin": { - Type: schema.TypeSet, - Required: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - - "host_name": { - Type: schema.TypeString, - Required: true, - }, - - "http_port": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "https_port": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - }, - }, - Set: resourceArmCdnEndpointOriginHash, - }, - - "origin_path": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "querystring_caching_behaviour": { - Type: schema.TypeString, - Optional: true, - Default: "IgnoreQueryString", - ValidateFunc: validateCdnEndpointQuerystringCachingBehaviour, - }, - - "content_types_to_compress": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - }, - - "is_compression_enabled": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "host_name": { - Type: schema.TypeString, - Computed: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmCdnEndpointCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - cdnEndpointsClient := client.cdnEndpointsClient - - log.Printf("[INFO] preparing arguments for Azure ARM CDN EndPoint creation.") - - name := d.Get("name").(string) - location := d.Get("location").(string) - resGroup := d.Get("resource_group_name").(string) - profileName := d.Get("profile_name").(string) - http_allowed := d.Get("is_http_allowed").(bool) - https_allowed := d.Get("is_https_allowed").(bool) - compression_enabled := d.Get("is_compression_enabled").(bool) - caching_behaviour := d.Get("querystring_caching_behaviour").(string) - tags := d.Get("tags").(map[string]interface{}) - - properties := cdn.EndpointProperties{ - IsHTTPAllowed: &http_allowed, - IsHTTPSAllowed: &https_allowed, - IsCompressionEnabled: &compression_enabled, - QueryStringCachingBehavior: cdn.QueryStringCachingBehavior(caching_behaviour), - } - - origins, originsErr := expandAzureRmCdnEndpointOrigins(d) - if originsErr != nil { - return fmt.Errorf("Error Building list of CDN Endpoint Origins: %s", originsErr) - } - if len(origins) > 0 { - properties.Origins = &origins - } - - if v, ok := d.GetOk("origin_host_header"); ok { - host_header := v.(string) - properties.OriginHostHeader = &host_header - } - - if v, ok := d.GetOk("origin_path"); ok { - origin_path := v.(string) - properties.OriginPath = &origin_path - } - - if v, ok := d.GetOk("content_types_to_compress"); ok { - var content_types []string - ctypes := v.(*schema.Set).List() - for _, ct := range ctypes { - str := ct.(string) - content_types = append(content_types, str) - } - - properties.ContentTypesToCompress = &content_types - } - - cdnEndpoint := cdn.Endpoint{ - Location: &location, - EndpointProperties: &properties, - Tags: expandTags(tags), - } - - _, error := cdnEndpointsClient.Create(resGroup, profileName, name, cdnEndpoint, make(<-chan struct{})) - err := <-error - if err != nil { - return err - } - - read, err := cdnEndpointsClient.Get(resGroup, profileName, name) - if err != nil { - return err - } - if read.ID == nil { - return fmt.Errorf("Cannot read CND Endpoint %s/%s (resource group %s) ID", profileName, name, resGroup) - } - - d.SetId(*read.ID) - - return resourceArmCdnEndpointRead(d, meta) -} - -func resourceArmCdnEndpointRead(d *schema.ResourceData, meta interface{}) error { - cdnEndpointsClient := meta.(*ArmClient).cdnEndpointsClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["endpoints"] - profileName := id.Path["profiles"] - if profileName == "" { - profileName = id.Path["Profiles"] - } - log.Printf("[INFO] Trying to find the AzureRM CDN Endpoint %s (Profile: %s, RG: %s)", name, profileName, resGroup) - resp, err := cdnEndpointsClient.Get(resGroup, profileName, name) - if err != nil { - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - return fmt.Errorf("Error making Read request on Azure CDN Endpoint %s: %s", name, err) - } - - d.Set("name", resp.Name) - d.Set("resource_group_name", resGroup) - d.Set("location", azureRMNormalizeLocation(*resp.Location)) - d.Set("profile_name", profileName) - d.Set("host_name", resp.EndpointProperties.HostName) - d.Set("is_compression_enabled", resp.EndpointProperties.IsCompressionEnabled) - d.Set("is_http_allowed", resp.EndpointProperties.IsHTTPAllowed) - d.Set("is_https_allowed", resp.EndpointProperties.IsHTTPSAllowed) - d.Set("querystring_caching_behaviour", resp.EndpointProperties.QueryStringCachingBehavior) - if resp.EndpointProperties.OriginHostHeader != nil && *resp.EndpointProperties.OriginHostHeader != "" { - d.Set("origin_host_header", resp.EndpointProperties.OriginHostHeader) - } - if resp.EndpointProperties.OriginPath != nil && *resp.EndpointProperties.OriginPath != "" { - d.Set("origin_path", resp.EndpointProperties.OriginPath) - } - if resp.EndpointProperties.ContentTypesToCompress != nil { - d.Set("content_types_to_compress", flattenAzureRMCdnEndpointContentTypes(resp.EndpointProperties.ContentTypesToCompress)) - } - d.Set("origin", flattenAzureRMCdnEndpointOrigin(resp.EndpointProperties.Origins)) - - flattenAndSetTags(d, resp.Tags) - - return nil -} - -func resourceArmCdnEndpointUpdate(d *schema.ResourceData, meta interface{}) error { - cdnEndpointsClient := meta.(*ArmClient).cdnEndpointsClient - - if !d.HasChange("tags") { - return nil - } - - name := d.Get("name").(string) - resGroup := d.Get("resource_group_name").(string) - profileName := d.Get("profile_name").(string) - http_allowed := d.Get("is_http_allowed").(bool) - https_allowed := d.Get("is_https_allowed").(bool) - compression_enabled := d.Get("is_compression_enabled").(bool) - caching_behaviour := d.Get("querystring_caching_behaviour").(string) - newTags := d.Get("tags").(map[string]interface{}) - - properties := cdn.EndpointPropertiesUpdateParameters{ - IsHTTPAllowed: &http_allowed, - IsHTTPSAllowed: &https_allowed, - IsCompressionEnabled: &compression_enabled, - QueryStringCachingBehavior: cdn.QueryStringCachingBehavior(caching_behaviour), - } - - if d.HasChange("origin_host_header") { - host_header := d.Get("origin_host_header").(string) - properties.OriginHostHeader = &host_header - } - - if d.HasChange("origin_path") { - origin_path := d.Get("origin_path").(string) - properties.OriginPath = &origin_path - } - - if d.HasChange("content_types_to_compress") { - var content_types []string - ctypes := d.Get("content_types_to_compress").(*schema.Set).List() - for _, ct := range ctypes { - str := ct.(string) - content_types = append(content_types, str) - } - - properties.ContentTypesToCompress = &content_types - } - - updateProps := cdn.EndpointUpdateParameters{ - Tags: expandTags(newTags), - EndpointPropertiesUpdateParameters: &properties, - } - - _, error := cdnEndpointsClient.Update(resGroup, profileName, name, updateProps, make(<-chan struct{})) - err := <-error - if err != nil { - return fmt.Errorf("Error issuing Azure ARM update request to update CDN Endpoint %q: %s", name, err) - } - - return resourceArmCdnEndpointRead(d, meta) -} - -func resourceArmCdnEndpointDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).cdnEndpointsClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - profileName := id.Path["profiles"] - if profileName == "" { - profileName = id.Path["Profiles"] - } - name := id.Path["endpoints"] - - accResp, error := client.Delete(resGroup, profileName, name, make(<-chan struct{})) - resp := <-accResp - err = <-error - if err != nil { - if resp.StatusCode == http.StatusNotFound { - return nil - } - return fmt.Errorf("Error issuing AzureRM delete request for CDN Endpoint %q: %s", name, err) - } - - return nil -} - -func validateCdnEndpointQuerystringCachingBehaviour(v interface{}, k string) (ws []string, errors []error) { - value := strings.ToLower(v.(string)) - cachingTypes := map[string]bool{ - "ignorequerystring": true, - "bypasscaching": true, - "usequerystring": true, - } - - if !cachingTypes[value] { - errors = append(errors, fmt.Errorf("CDN Endpoint querystringCachingBehaviours can only be IgnoreQueryString, BypassCaching or UseQueryString")) - } - return -} - -func resourceArmCdnEndpointOriginHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["host_name"].(string))) - - return hashcode.String(buf.String()) -} - -func expandAzureRmCdnEndpointOrigins(d *schema.ResourceData) ([]cdn.DeepCreatedOrigin, error) { - configs := d.Get("origin").(*schema.Set).List() - origins := make([]cdn.DeepCreatedOrigin, 0, len(configs)) - - for _, configRaw := range configs { - data := configRaw.(map[string]interface{}) - - host_name := data["host_name"].(string) - - properties := cdn.DeepCreatedOriginProperties{ - HostName: &host_name, - } - - if v, ok := data["https_port"]; ok { - https_port := int32(v.(int)) - properties.HTTPSPort = &https_port - - } - - if v, ok := data["http_port"]; ok { - http_port := int32(v.(int)) - properties.HTTPPort = &http_port - } - - name := data["name"].(string) - - origin := cdn.DeepCreatedOrigin{ - Name: &name, - DeepCreatedOriginProperties: &properties, - } - - origins = append(origins, origin) - } - - return origins, nil -} - -func flattenAzureRMCdnEndpointOrigin(list *[]cdn.DeepCreatedOrigin) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(*list)) - for _, i := range *list { - l := map[string]interface{}{ - "name": *i.Name, - "host_name": *i.DeepCreatedOriginProperties.HostName, - } - - if i.DeepCreatedOriginProperties.HTTPPort != nil { - l["http_port"] = *i.DeepCreatedOriginProperties.HTTPPort - } - if i.DeepCreatedOriginProperties.HTTPSPort != nil { - l["https_port"] = *i.DeepCreatedOriginProperties.HTTPSPort - } - result = append(result, l) - } - return result -} - -func flattenAzureRMCdnEndpointContentTypes(list *[]string) []interface{} { - vs := make([]interface{}, 0, len(*list)) - for _, v := range *list { - vs = append(vs, v) - } - return vs -} diff --git a/builtin/providers/azurerm/resource_arm_cdn_endpoint_test.go b/builtin/providers/azurerm/resource_arm_cdn_endpoint_test.go deleted file mode 100644 index 1c4928079..000000000 --- a/builtin/providers/azurerm/resource_arm_cdn_endpoint_test.go +++ /dev/null @@ -1,260 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMCdnEndpoint_basic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMCdnEndpoint_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMCdnEndpointDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMCdnEndpointExists("azurerm_cdn_endpoint.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMCdnEndpoint_disappears(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMCdnEndpoint_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMCdnEndpointDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMCdnEndpointExists("azurerm_cdn_endpoint.test"), - testCheckAzureRMCdnEndpointDisappears("azurerm_cdn_endpoint.test"), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAzureRMCdnEndpoint_withTags(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMCdnEndpoint_withTags, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMCdnEndpoint_withTagsUpdate, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMCdnEndpointDestroy, - Steps: []resource.TestStep{ - { - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMCdnEndpointExists("azurerm_cdn_endpoint.test"), - resource.TestCheckResourceAttr( - "azurerm_cdn_endpoint.test", "tags.%", "2"), - resource.TestCheckResourceAttr( - "azurerm_cdn_endpoint.test", "tags.environment", "Production"), - resource.TestCheckResourceAttr( - "azurerm_cdn_endpoint.test", "tags.cost_center", "MSFT"), - ), - }, - - { - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMCdnEndpointExists("azurerm_cdn_endpoint.test"), - resource.TestCheckResourceAttr( - "azurerm_cdn_endpoint.test", "tags.%", "1"), - resource.TestCheckResourceAttr( - "azurerm_cdn_endpoint.test", "tags.environment", "staging"), - ), - }, - }, - }) -} - -func testCheckAzureRMCdnEndpointExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - profileName := rs.Primary.Attributes["profile_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for cdn endpoint: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).cdnEndpointsClient - - resp, err := conn.Get(resourceGroup, profileName, name) - if err != nil { - return fmt.Errorf("Bad: Get on cdnEndpointsClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: CDN Endpoint %q (resource group: %q) does not exist", name, resourceGroup) - } - - return nil - } -} - -func testCheckAzureRMCdnEndpointDisappears(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - profileName := rs.Primary.Attributes["profile_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for cdn endpoint: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).cdnEndpointsClient - - _, error := conn.Delete(resourceGroup, profileName, name, make(chan struct{})) - err := <-error - if err != nil { - return fmt.Errorf("Bad: Delete on cdnEndpointsClient: %s", err) - } - - return nil - } -} - -func testCheckAzureRMCdnEndpointDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).cdnEndpointsClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_cdn_endpoint" { - continue - } - - name := rs.Primary.Attributes["name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - profileName := rs.Primary.Attributes["profile_name"] - - resp, err := conn.Get(resourceGroup, profileName, name) - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("CDN Endpoint still exists:\n%#v", resp.EndpointProperties) - } - } - - return nil -} - -var testAccAzureRMCdnEndpoint_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_cdn_profile" "test" { - name = "acctestcdnprof%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "Standard_Verizon" -} - -resource "azurerm_cdn_endpoint" "test" { - name = "acctestcdnend%d" - profile_name = "${azurerm_cdn_profile.test.name}" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - origin { - name = "acceptanceTestCdnOrigin1" - host_name = "www.example.com" - https_port = 443 - http_port = 80 - } -} -` - -var testAccAzureRMCdnEndpoint_withTags = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_cdn_profile" "test" { - name = "acctestcdnprof%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "Standard_Verizon" -} - -resource "azurerm_cdn_endpoint" "test" { - name = "acctestcdnend%d" - profile_name = "${azurerm_cdn_profile.test.name}" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - origin { - name = "acceptanceTestCdnOrigin2" - host_name = "www.example.com" - https_port = 443 - http_port = 80 - } - - tags { - environment = "Production" - cost_center = "MSFT" - } -} -` - -var testAccAzureRMCdnEndpoint_withTagsUpdate = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_cdn_profile" "test" { - name = "acctestcdnprof%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "Standard_Verizon" -} - -resource "azurerm_cdn_endpoint" "test" { - name = "acctestcdnend%d" - profile_name = "${azurerm_cdn_profile.test.name}" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - origin { - name = "acceptanceTestCdnOrigin2" - host_name = "www.example.com" - https_port = 443 - http_port = 80 - } - - tags { - environment = "staging" - } -} -` diff --git a/builtin/providers/azurerm/resource_arm_cdn_profile.go b/builtin/providers/azurerm/resource_arm_cdn_profile.go deleted file mode 100644 index 801b1e6dd..000000000 --- a/builtin/providers/azurerm/resource_arm_cdn_profile.go +++ /dev/null @@ -1,176 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "net/http" - - "strings" - - "github.com/Azure/azure-sdk-for-go/arm/cdn" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceArmCdnProfile() *schema.Resource { - return &schema.Resource{ - Create: resourceArmCdnProfileCreate, - Read: resourceArmCdnProfileRead, - Update: resourceArmCdnProfileUpdate, - Delete: resourceArmCdnProfileDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "sku": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateCdnProfileSku, - DiffSuppressFunc: ignoreCaseDiffSuppressFunc, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmCdnProfileCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - cdnProfilesClient := client.cdnProfilesClient - - log.Printf("[INFO] preparing arguments for Azure ARM CDN Profile creation.") - - name := d.Get("name").(string) - location := d.Get("location").(string) - resGroup := d.Get("resource_group_name").(string) - sku := d.Get("sku").(string) - tags := d.Get("tags").(map[string]interface{}) - - cdnProfile := cdn.Profile{ - Location: &location, - Tags: expandTags(tags), - Sku: &cdn.Sku{ - Name: cdn.SkuName(sku), - }, - } - - _, error := cdnProfilesClient.Create(resGroup, name, cdnProfile, make(chan struct{})) - err := <-error - if err != nil { - return err - } - - read, err := cdnProfilesClient.Get(resGroup, name) - if err != nil { - return err - } - if read.ID == nil { - return fmt.Errorf("Cannot read CDN Profile %s (resource group %s) ID", name, resGroup) - } - - d.SetId(*read.ID) - - return resourceArmCdnProfileRead(d, meta) -} - -func resourceArmCdnProfileRead(d *schema.ResourceData, meta interface{}) error { - cdnProfilesClient := meta.(*ArmClient).cdnProfilesClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["profiles"] - - resp, err := cdnProfilesClient.Get(resGroup, name) - if err != nil { - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - return fmt.Errorf("Error making Read request on Azure CDN Profile %s: %s", name, err) - } - - d.Set("name", name) - d.Set("resource_group_name", resGroup) - d.Set("location", azureRMNormalizeLocation(*resp.Location)) - - if resp.Sku != nil { - d.Set("sku", string(resp.Sku.Name)) - } - - flattenAndSetTags(d, resp.Tags) - - return nil -} - -func resourceArmCdnProfileUpdate(d *schema.ResourceData, meta interface{}) error { - cdnProfilesClient := meta.(*ArmClient).cdnProfilesClient - - if !d.HasChange("tags") { - return nil - } - - name := d.Get("name").(string) - resGroup := d.Get("resource_group_name").(string) - newTags := d.Get("tags").(map[string]interface{}) - - props := cdn.ProfileUpdateParameters{ - Tags: expandTags(newTags), - } - - _, error := cdnProfilesClient.Update(resGroup, name, props, make(chan struct{})) - err := <-error - if err != nil { - return fmt.Errorf("Error issuing Azure ARM update request to update CDN Profile %q: %s", name, err) - } - - return resourceArmCdnProfileRead(d, meta) -} - -func resourceArmCdnProfileDelete(d *schema.ResourceData, meta interface{}) error { - cdnProfilesClient := meta.(*ArmClient).cdnProfilesClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["profiles"] - - _, error := cdnProfilesClient.Delete(resGroup, name, make(chan struct{})) - err = <-error - // TODO: check the status code - - return err -} - -func validateCdnProfileSku(v interface{}, k string) (ws []string, errors []error) { - value := strings.ToLower(v.(string)) - skus := map[string]bool{ - "standard_akamai": true, - "premium_verizon": true, - "standard_verizon": true, - } - - if !skus[value] { - errors = append(errors, fmt.Errorf("CDN Profile SKU can only be Premium_Verizon, Standard_Verizon or Standard_Akamai")) - } - return -} diff --git a/builtin/providers/azurerm/resource_arm_cdn_profile_test.go b/builtin/providers/azurerm/resource_arm_cdn_profile_test.go deleted file mode 100644 index fbbd5e8fa..000000000 --- a/builtin/providers/azurerm/resource_arm_cdn_profile_test.go +++ /dev/null @@ -1,252 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestResourceAzureRMCdnProfileSKU_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "Random", - ErrCount: 1, - }, - { - Value: "Standard_Verizon", - ErrCount: 0, - }, - { - Value: "Premium_Verizon", - ErrCount: 0, - }, - { - Value: "Standard_Akamai", - ErrCount: 0, - }, - { - Value: "STANDARD_AKAMAI", - ErrCount: 0, - }, - { - Value: "standard_akamai", - ErrCount: 0, - }, - } - - for _, tc := range cases { - _, errors := validateCdnProfileSku(tc.Value, "azurerm_cdn_profile") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM CDN Profile SKU to trigger a validation error") - } - } -} - -func TestAccAzureRMCdnProfile_basic(t *testing.T) { - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMCdnProfile_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMCdnProfileDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMCdnProfileExists("azurerm_cdn_profile.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMCdnProfile_withTags(t *testing.T) { - - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMCdnProfile_withTags, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMCdnProfile_withTagsUpdate, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMCdnProfileDestroy, - Steps: []resource.TestStep{ - { - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMCdnProfileExists("azurerm_cdn_profile.test"), - resource.TestCheckResourceAttr( - "azurerm_cdn_profile.test", "tags.%", "2"), - resource.TestCheckResourceAttr( - "azurerm_cdn_profile.test", "tags.environment", "Production"), - resource.TestCheckResourceAttr( - "azurerm_cdn_profile.test", "tags.cost_center", "MSFT"), - ), - }, - - { - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMCdnProfileExists("azurerm_cdn_profile.test"), - resource.TestCheckResourceAttr( - "azurerm_cdn_profile.test", "tags.%", "1"), - resource.TestCheckResourceAttr( - "azurerm_cdn_profile.test", "tags.environment", "staging"), - ), - }, - }, - }) -} - -func TestAccAzureRMCdnProfile_NonStandardCasing(t *testing.T) { - - ri := acctest.RandInt() - config := testAccAzureRMCdnProfileNonStandardCasing(ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMCdnProfileDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMCdnProfileExists("azurerm_cdn_profile.test"), - ), - }, - - resource.TestStep{ - Config: config, - PlanOnly: true, - ExpectNonEmptyPlan: false, - }, - }, - }) -} - -func testCheckAzureRMCdnProfileExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for cdn profile: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).cdnProfilesClient - - resp, err := conn.Get(resourceGroup, name) - if err != nil { - return fmt.Errorf("Bad: Get on cdnProfilesClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: CDN Profile %q (resource group: %q) does not exist", name, resourceGroup) - } - - return nil - } -} - -func testCheckAzureRMCdnProfileDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).cdnProfilesClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_cdn_profile" { - continue - } - - name := rs.Primary.Attributes["name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.Get(resourceGroup, name) - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("CDN Profile still exists:\n%#v", resp.ProfileProperties) - } - } - - return nil -} - -var testAccAzureRMCdnProfile_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_cdn_profile" "test" { - name = "acctestcdnprof%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "Standard_Verizon" -} -` - -var testAccAzureRMCdnProfile_withTags = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_cdn_profile" "test" { - name = "acctestcdnprof%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "Standard_Verizon" - - tags { - environment = "Production" - cost_center = "MSFT" - } -} -` - -var testAccAzureRMCdnProfile_withTagsUpdate = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_cdn_profile" "test" { - name = "acctestcdnprof%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "Standard_Verizon" - - tags { - environment = "staging" - } -} -` - -func testAccAzureRMCdnProfileNonStandardCasing(ri int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_cdn_profile" "test" { - name = "acctestcdnprof%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "standard_verizon" -} -`, ri, ri) -} diff --git a/builtin/providers/azurerm/resource_arm_container_registry.go b/builtin/providers/azurerm/resource_arm_container_registry.go deleted file mode 100644 index efd03cd75..000000000 --- a/builtin/providers/azurerm/resource_arm_container_registry.go +++ /dev/null @@ -1,308 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - - "net/http" - - "regexp" - - "github.com/Azure/azure-sdk-for-go/arm/containerregistry" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" - "github.com/jen20/riviera/azure" -) - -func resourceArmContainerRegistry() *schema.Resource { - return &schema.Resource{ - Create: resourceArmContainerRegistryCreate, - Read: resourceArmContainerRegistryRead, - Update: resourceArmContainerRegistryUpdate, - Delete: resourceArmContainerRegistryDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - MigrateState: resourceAzureRMContainerRegistryMigrateState, - SchemaVersion: 1, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateAzureRMContainerRegistryName, - }, - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "sku": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: string(containerregistry.Basic), - DiffSuppressFunc: ignoreCaseDiffSuppressFunc, - ValidateFunc: validation.StringInSlice([]string{ - string(containerregistry.Basic), - }, true), - }, - - "admin_enabled": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "storage_account": { - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - - "access_key": { - Type: schema.TypeString, - Required: true, - Sensitive: true, - }, - }, - }, - }, - - "login_server": { - Type: schema.TypeString, - Computed: true, - }, - - "admin_username": { - Type: schema.TypeString, - Computed: true, - }, - - "admin_password": { - Type: schema.TypeString, - Computed: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmContainerRegistryCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).containerRegistryClient - log.Printf("[INFO] preparing arguments for AzureRM Container Registry creation.") - - resourceGroup := d.Get("resource_group_name").(string) - name := d.Get("name").(string) - location := d.Get("location").(string) - sku := d.Get("sku").(string) - - adminUserEnabled := d.Get("admin_enabled").(bool) - tags := d.Get("tags").(map[string]interface{}) - - parameters := containerregistry.RegistryCreateParameters{ - Location: &location, - Sku: &containerregistry.Sku{ - Name: &sku, - Tier: containerregistry.SkuTier(sku), - }, - RegistryPropertiesCreateParameters: &containerregistry.RegistryPropertiesCreateParameters{ - AdminUserEnabled: &adminUserEnabled, - }, - Tags: expandTags(tags), - } - - accounts := d.Get("storage_account").(*schema.Set).List() - account := accounts[0].(map[string]interface{}) - storageAccountName := account["name"].(string) - storageAccountAccessKey := account["access_key"].(string) - parameters.RegistryPropertiesCreateParameters.StorageAccount = &containerregistry.StorageAccountParameters{ - Name: azure.String(storageAccountName), - AccessKey: azure.String(storageAccountAccessKey), - } - - _, error := client.Create(resourceGroup, name, parameters, make(<-chan struct{})) - err := <-error - if err != nil { - return err - } - - read, err := client.Get(resourceGroup, name) - if err != nil { - return err - } - - if read.ID == nil { - return fmt.Errorf("Cannot read Container Registry %s (resource group %s) ID", name, resourceGroup) - } - - d.SetId(*read.ID) - - return resourceArmContainerRegistryRead(d, meta) -} - -func resourceArmContainerRegistryUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).containerRegistryClient - log.Printf("[INFO] preparing arguments for AzureRM Container Registry update.") - - resourceGroup := d.Get("resource_group_name").(string) - name := d.Get("name").(string) - - accounts := d.Get("storage_account").(*schema.Set).List() - account := accounts[0].(map[string]interface{}) - storageAccountName := account["name"].(string) - storageAccountAccessKey := account["access_key"].(string) - - adminUserEnabled := d.Get("admin_enabled").(bool) - tags := d.Get("tags").(map[string]interface{}) - - parameters := containerregistry.RegistryUpdateParameters{ - RegistryPropertiesUpdateParameters: &containerregistry.RegistryPropertiesUpdateParameters{ - AdminUserEnabled: &adminUserEnabled, - StorageAccount: &containerregistry.StorageAccountParameters{ - Name: azure.String(storageAccountName), - AccessKey: azure.String(storageAccountAccessKey), - }, - }, - Tags: expandTags(tags), - } - - _, err := client.Update(resourceGroup, name, parameters) - if err != nil { - return err - } - - read, err := client.Get(resourceGroup, name) - if err != nil { - return err - } - - if read.ID == nil { - return fmt.Errorf("Cannot read Container Registry %s (resource group %s) ID", name, resourceGroup) - } - - d.SetId(*read.ID) - - return resourceArmContainerRegistryRead(d, meta) -} - -func resourceArmContainerRegistryRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).containerRegistryClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resourceGroup := id.ResourceGroup - name := id.Path["registries"] - - resp, err := client.Get(resourceGroup, name) - if err != nil { - return fmt.Errorf("Error making Read request on Azure Container Registry %s: %s", name, err) - } - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - - d.Set("name", resp.Name) - d.Set("resource_group_name", resourceGroup) - d.Set("location", azureRMNormalizeLocation(*resp.Location)) - d.Set("admin_enabled", resp.AdminUserEnabled) - d.Set("login_server", resp.LoginServer) - - if resp.Sku != nil { - d.Set("sku", string(resp.Sku.Tier)) - } - - if resp.StorageAccount != nil { - flattenArmContainerRegistryStorageAccount(d, resp.StorageAccount) - } - - if *resp.AdminUserEnabled { - credsResp, err := client.ListCredentials(resourceGroup, name) - if err != nil { - return fmt.Errorf("Error making Read request on Azure Container Registry %s for Credentials: %s", name, err) - } - - d.Set("admin_username", credsResp.Username) - for _, v := range *credsResp.Passwords { - d.Set("admin_password", v.Value) - break - } - } else { - d.Set("admin_username", "") - d.Set("admin_password", "") - } - - flattenAndSetTags(d, resp.Tags) - - return nil -} - -func resourceArmContainerRegistryDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).containerRegistryClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resourceGroup := id.ResourceGroup - name := id.Path["registries"] - - resp, err := client.Delete(resourceGroup, name) - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("Error issuing Azure ARM delete request of Container Registry '%s': %s", name, err) - } - - return nil -} - -func flattenArmContainerRegistryStorageAccount(d *schema.ResourceData, properties *containerregistry.StorageAccountProperties) { - storageAccounts := schema.Set{ - F: resourceAzureRMContainerRegistryStorageAccountHash, - } - - storageAccount := map[string]interface{}{} - storageAccount["name"] = properties.Name - storageAccounts.Add(storageAccount) - - d.Set("storage_account", &storageAccounts) -} - -func resourceAzureRMContainerRegistryStorageAccountHash(v interface{}) int { - m := v.(map[string]interface{}) - name := m["name"].(*string) - return hashcode.String(*name) -} - -func validateAzureRMContainerRegistryName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "alpha numeric characters only are allowed in %q: %q", k, value)) - } - - if 5 > len(value) { - errors = append(errors, fmt.Errorf("%q cannot be less than 5 characters: %q", k, value)) - } - - if len(value) >= 50 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 50 characters: %q %d", k, value, len(value))) - } - - return -} diff --git a/builtin/providers/azurerm/resource_arm_container_registry_migrate.go b/builtin/providers/azurerm/resource_arm_container_registry_migrate.go deleted file mode 100644 index 6c6ff58f3..000000000 --- a/builtin/providers/azurerm/resource_arm_container_registry_migrate.go +++ /dev/null @@ -1,34 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/terraform" -) - -func resourceAzureRMContainerRegistryMigrateState( - v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - switch v { - case 0: - log.Println("[INFO] Found AzureRM Container Registry State v0; migrating to v1") - return migrateAzureRMContainerRegistryStateV0toV1(is) - default: - return is, fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateAzureRMContainerRegistryStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { - if is.Empty() { - log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return is, nil - } - - log.Printf("[DEBUG] ARM Container Registry Attributes before Migration: %#v", is.Attributes) - - is.Attributes["sku"] = "Basic" - - log.Printf("[DEBUG] ARM Container Registry Attributes after State Migration: %#v", is.Attributes) - - return is, nil -} diff --git a/builtin/providers/azurerm/resource_arm_container_registry_migrate_test.go b/builtin/providers/azurerm/resource_arm_container_registry_migrate_test.go deleted file mode 100644 index 43d65a242..000000000 --- a/builtin/providers/azurerm/resource_arm_container_registry_migrate_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package azurerm - -import ( - "testing" - - "github.com/hashicorp/terraform/terraform" -) - -func TestAzureRMContainerRegistryMigrateState(t *testing.T) { - cases := map[string]struct { - StateVersion int - ID string - Attributes map[string]string - Expected string - Meta interface{} - }{ - "v0_1_without_value": { - StateVersion: 0, - ID: "some_id", - Attributes: map[string]string{}, - Expected: "Basic", - }, - } - - for tn, tc := range cases { - is := &terraform.InstanceState{ - ID: tc.ID, - Attributes: tc.Attributes, - } - is, err := resourceAzureRMContainerRegistryMigrateState(tc.StateVersion, is, tc.Meta) - - if err != nil { - t.Fatalf("bad: %s, err: %#v", tn, err) - } - - if is.Attributes["sku"] != tc.Expected { - t.Fatalf("bad Container Registry Migrate: %s\n\n expected: %s", is.Attributes["sku"], tc.Expected) - } - } -} diff --git a/builtin/providers/azurerm/resource_arm_container_registry_test.go b/builtin/providers/azurerm/resource_arm_container_registry_test.go deleted file mode 100644 index 7efc92472..000000000 --- a/builtin/providers/azurerm/resource_arm_container_registry_test.go +++ /dev/null @@ -1,282 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMContainerRegistryName_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "four", - ErrCount: 1, - }, - { - Value: "5five", - ErrCount: 0, - }, - { - Value: "hello-world", - ErrCount: 1, - }, - { - Value: "hello_world", - ErrCount: 1, - }, - { - Value: "helloWorld", - ErrCount: 0, - }, - { - Value: "helloworld12", - ErrCount: 0, - }, - { - Value: "hello@world", - ErrCount: 1, - }, - { - Value: "qfvbdsbvipqdbwsbddbdcwqffewsqwcdw21ddwqwd3324120", - ErrCount: 0, - }, - { - Value: "qfvbdsbvipqdbwsbddbdcwqffewsqwcdw21ddwqwd33241202", - ErrCount: 0, - }, - { - Value: "qfvbdsbvipqdbwsbddbdcwqfjjfewsqwcdw21ddwqwd3324120", - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateAzureRMContainerRegistryName(tc.Value, "azurerm_container_registry") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM Container Registry Name to trigger a validation error: %v", errors) - } - } -} - -func TestAccAzureRMContainerRegistry_basic(t *testing.T) { - ri := acctest.RandInt() - rs := acctest.RandString(4) - config := testAccAzureRMContainerRegistry_basic(ri, rs) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMContainerRegistryDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMContainerRegistryExists("azurerm_container_registry.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMContainerRegistry_complete(t *testing.T) { - ri := acctest.RandInt() - rs := acctest.RandString(4) - config := testAccAzureRMContainerRegistry_complete(ri, rs) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMContainerRegistryDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMContainerRegistryExists("azurerm_container_registry.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMContainerRegistry_update(t *testing.T) { - ri := acctest.RandInt() - rs := acctest.RandString(4) - config := testAccAzureRMContainerRegistry_complete(ri, rs) - updatedConfig := testAccAzureRMContainerRegistry_completeUpdated(ri, rs) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMContainerRegistryDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMContainerRegistryExists("azurerm_container_registry.test"), - ), - }, - { - Config: updatedConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMContainerRegistryExists("azurerm_container_registry.test"), - ), - }, - }, - }) -} - -func testCheckAzureRMContainerRegistryDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).containerRegistryClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_container_registry" { - continue - } - - name := rs.Primary.Attributes["name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.Get(resourceGroup, name) - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Container Registry still exists:\n%#v", resp) - } - } - - return nil -} - -func testCheckAzureRMContainerRegistryExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for Container Registry: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).containerRegistryClient - - resp, err := conn.Get(resourceGroup, name) - if err != nil { - return fmt.Errorf("Bad: Get on containerRegistryClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: Container Registry %q (resource group: %q) does not exist", name, resourceGroup) - } - - return nil - } -} - -func testAccAzureRMContainerRegistry_basic(rInt int, rStr string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "testAccRg-%d" - location = "West US" -} - -resource "azurerm_storage_account" "test" { - name = "testaccsa%s" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - account_type = "Standard_LRS" -} - -resource "azurerm_container_registry" "test" { - name = "testacccr%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - sku = "Basic" - - storage_account { - name = "${azurerm_storage_account.test.name}" - access_key = "${azurerm_storage_account.test.primary_access_key}" - } -} -`, rInt, rStr, rInt) -} - -func testAccAzureRMContainerRegistry_complete(rInt int, rStr string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "testAccRg-%d" - location = "West US" -} - -resource "azurerm_storage_account" "test" { - name = "testaccsa%s" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - account_type = "Standard_LRS" -} - -resource "azurerm_container_registry" "test" { - name = "testacccr%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - admin_enabled = false - sku = "Basic" - - storage_account { - name = "${azurerm_storage_account.test.name}" - access_key = "${azurerm_storage_account.test.primary_access_key}" - } - - tags { - environment = "production" - } -} -`, rInt, rStr, rInt) -} - -func testAccAzureRMContainerRegistry_completeUpdated(rInt int, rStr string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "testAccRg-%d" - location = "West US" -} - -resource "azurerm_storage_account" "test" { - name = "testaccsa%s" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - account_type = "Standard_LRS" -} - -resource "azurerm_container_registry" "test" { - name = "testacccr%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - admin_enabled = true - sku = "Basic" - - storage_account { - name = "${azurerm_storage_account.test.name}" - access_key = "${azurerm_storage_account.test.primary_access_key}" - } - - tags { - environment = "production" - } -} -`, rInt, rStr, rInt) -} diff --git a/builtin/providers/azurerm/resource_arm_container_service.go b/builtin/providers/azurerm/resource_arm_container_service.go deleted file mode 100644 index da9db3580..000000000 --- a/builtin/providers/azurerm/resource_arm_container_service.go +++ /dev/null @@ -1,650 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - - "net/http" - - "time" - - "bytes" - - "github.com/Azure/azure-sdk-for-go/arm/containerservice" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceArmContainerService() *schema.Resource { - return &schema.Resource{ - Create: resourceArmContainerServiceCreate, - Read: resourceArmContainerServiceRead, - Update: resourceArmContainerServiceCreate, - Delete: resourceArmContainerServiceDelete, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "orchestration_platform": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateArmContainerServiceOrchestrationPlatform, - }, - - "master_profile": { - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "count": { - Type: schema.TypeInt, - Optional: true, - Default: 1, - ValidateFunc: validateArmContainerServiceMasterProfileCount, - }, - - "dns_prefix": { - Type: schema.TypeString, - Required: true, - }, - - "fqdn": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - Set: resourceAzureRMContainerServiceMasterProfileHash, - }, - - "linux_profile": { - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "admin_username": { - Type: schema.TypeString, - Required: true, - }, - "ssh_key": { - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key_data": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - }, - Set: resourceAzureRMContainerServiceLinuxProfilesHash, - }, - - "agent_pool_profile": { - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "count": { - Type: schema.TypeInt, - Optional: true, - Default: 1, - ValidateFunc: validateArmContainerServiceAgentPoolProfileCount, - }, - - "dns_prefix": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "fqdn": { - Type: schema.TypeString, - Computed: true, - }, - - "vm_size": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - Set: resourceAzureRMContainerServiceAgentPoolProfilesHash, - }, - - "service_principal": { - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "client_id": { - Type: schema.TypeString, - Required: true, - }, - - "client_secret": { - Type: schema.TypeString, - Required: true, - Sensitive: true, - }, - }, - }, - Set: resourceAzureRMContainerServiceServicePrincipalProfileHash, - }, - - "diagnostics_profile": { - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Required: true, - }, - - "storage_uri": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - Set: resourceAzureRMContainerServiceDiagnosticProfilesHash, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmContainerServiceCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - containerServiceClient := client.containerServicesClient - - log.Printf("[INFO] preparing arguments for Azure ARM Container Service creation.") - - resGroup := d.Get("resource_group_name").(string) - name := d.Get("name").(string) - location := d.Get("location").(string) - - orchestrationPlatform := d.Get("orchestration_platform").(string) - - masterProfile := expandAzureRmContainerServiceMasterProfile(d) - linuxProfile := expandAzureRmContainerServiceLinuxProfile(d) - agentProfiles := expandAzureRmContainerServiceAgentProfiles(d) - diagnosticsProfile := expandAzureRmContainerServiceDiagnostics(d) - - tags := d.Get("tags").(map[string]interface{}) - - parameters := containerservice.ContainerService{ - Name: &name, - Location: &location, - Properties: &containerservice.Properties{ - MasterProfile: &masterProfile, - LinuxProfile: &linuxProfile, - OrchestratorProfile: &containerservice.OrchestratorProfile{ - OrchestratorType: containerservice.OchestratorTypes(orchestrationPlatform), - }, - AgentPoolProfiles: &agentProfiles, - DiagnosticsProfile: &diagnosticsProfile, - }, - Tags: expandTags(tags), - } - - servicePrincipalProfile := expandAzureRmContainerServiceServicePrincipal(d) - if servicePrincipalProfile != nil { - parameters.ServicePrincipalProfile = servicePrincipalProfile - } - - _, error := containerServiceClient.CreateOrUpdate(resGroup, name, parameters, make(chan struct{})) - err := <-error - if err != nil { - return err - } - - read, err := containerServiceClient.Get(resGroup, name) - if err != nil { - return err - } - - if read.ID == nil { - return fmt.Errorf("Cannot read Container Service %s (resource group %s) ID", name, resGroup) - } - - log.Printf("[DEBUG] Waiting for Container Service (%s) to become available", d.Get("name")) - stateConf := &resource.StateChangeConf{ - Pending: []string{"Updating", "Creating"}, - Target: []string{"Succeeded"}, - Refresh: containerServiceStateRefreshFunc(client, resGroup, name), - Timeout: 30 * time.Minute, - MinTimeout: 15 * time.Second, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for Container Service (%s) to become available: %s", d.Get("name"), err) - } - - d.SetId(*read.ID) - - return resourceArmContainerServiceRead(d, meta) -} - -func resourceArmContainerServiceRead(d *schema.ResourceData, meta interface{}) error { - containerServiceClient := meta.(*ArmClient).containerServicesClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["containerServices"] - - resp, err := containerServiceClient.Get(resGroup, name) - if err != nil { - return fmt.Errorf("Error making Read request on Azure Container Service %s: %s", name, err) - } - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - - d.Set("name", resp.Name) - d.Set("location", azureRMNormalizeLocation(*resp.Location)) - d.Set("resource_group_name", resGroup) - - d.Set("orchestration_platform", string(resp.Properties.OrchestratorProfile.OrchestratorType)) - - masterProfiles := flattenAzureRmContainerServiceMasterProfile(*resp.Properties.MasterProfile) - d.Set("master_profile", &masterProfiles) - - linuxProfile := flattenAzureRmContainerServiceLinuxProfile(*resp.Properties.LinuxProfile) - d.Set("linux_profile", &linuxProfile) - - agentPoolProfiles := flattenAzureRmContainerServiceAgentPoolProfiles(resp.Properties.AgentPoolProfiles) - d.Set("agent_pool_profile", &agentPoolProfiles) - - servicePrincipal := flattenAzureRmContainerServiceServicePrincipalProfile(resp.Properties.ServicePrincipalProfile) - if servicePrincipal != nil { - d.Set("service_principal", servicePrincipal) - } - - diagnosticProfile := flattenAzureRmContainerServiceDiagnosticsProfile(resp.Properties.DiagnosticsProfile) - if diagnosticProfile != nil { - d.Set("diagnostics_profile", diagnosticProfile) - } - - flattenAndSetTags(d, resp.Tags) - - return nil -} - -func resourceArmContainerServiceDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - containerServiceClient := client.containerServicesClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["containerServices"] - - delResp, error := containerServiceClient.Delete(resGroup, name, make(chan struct{})) - resp := <-delResp - err = <-error - if err != nil { - return err - } - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("Error issuing Azure ARM delete request of Container Service '%s': %s", name, err) - } - - return nil - -} - -func flattenAzureRmContainerServiceMasterProfile(profile containerservice.MasterProfile) *schema.Set { - masterProfiles := &schema.Set{ - F: resourceAzureRMContainerServiceMasterProfileHash, - } - - masterProfile := make(map[string]interface{}, 2) - - masterProfile["count"] = int(*profile.Count) - masterProfile["dns_prefix"] = *profile.DNSPrefix - - masterProfiles.Add(masterProfile) - - return masterProfiles -} - -func flattenAzureRmContainerServiceLinuxProfile(profile containerservice.LinuxProfile) *schema.Set { - profiles := &schema.Set{ - F: resourceAzureRMContainerServiceLinuxProfilesHash, - } - - values := map[string]interface{}{} - - sshKeys := &schema.Set{ - F: resourceAzureRMContainerServiceLinuxProfilesSSHKeysHash, - } - for _, ssh := range *profile.SSH.PublicKeys { - keys := map[string]interface{}{} - keys["key_data"] = *ssh.KeyData - sshKeys.Add(keys) - } - - values["admin_username"] = *profile.AdminUsername - values["ssh_key"] = sshKeys - profiles.Add(values) - - return profiles -} - -func flattenAzureRmContainerServiceAgentPoolProfiles(profiles *[]containerservice.AgentPoolProfile) *schema.Set { - agentPoolProfiles := &schema.Set{ - F: resourceAzureRMContainerServiceAgentPoolProfilesHash, - } - - for _, profile := range *profiles { - agentPoolProfile := map[string]interface{}{} - agentPoolProfile["count"] = int(*profile.Count) - agentPoolProfile["dns_prefix"] = *profile.DNSPrefix - agentPoolProfile["fqdn"] = *profile.Fqdn - agentPoolProfile["name"] = *profile.Name - agentPoolProfile["vm_size"] = string(profile.VMSize) - agentPoolProfiles.Add(agentPoolProfile) - } - - return agentPoolProfiles -} - -func flattenAzureRmContainerServiceServicePrincipalProfile(profile *containerservice.ServicePrincipalProfile) *schema.Set { - - if profile == nil { - return nil - } - - servicePrincipalProfiles := &schema.Set{ - F: resourceAzureRMContainerServiceServicePrincipalProfileHash, - } - - values := map[string]interface{}{} - - values["client_id"] = *profile.ClientID - if profile.Secret != nil { - values["client_secret"] = *profile.Secret - } - - servicePrincipalProfiles.Add(values) - - return servicePrincipalProfiles -} - -func flattenAzureRmContainerServiceDiagnosticsProfile(profile *containerservice.DiagnosticsProfile) *schema.Set { - diagnosticProfiles := &schema.Set{ - F: resourceAzureRMContainerServiceDiagnosticProfilesHash, - } - - values := map[string]interface{}{} - - values["enabled"] = *profile.VMDiagnostics.Enabled - if profile.VMDiagnostics.StorageURI != nil { - values["storage_uri"] = *profile.VMDiagnostics.StorageURI - } - diagnosticProfiles.Add(values) - - return diagnosticProfiles -} - -func expandAzureRmContainerServiceDiagnostics(d *schema.ResourceData) containerservice.DiagnosticsProfile { - configs := d.Get("diagnostics_profile").(*schema.Set).List() - profile := containerservice.DiagnosticsProfile{} - - data := configs[0].(map[string]interface{}) - - enabled := data["enabled"].(bool) - - profile = containerservice.DiagnosticsProfile{ - VMDiagnostics: &containerservice.VMDiagnostics{ - Enabled: &enabled, - }, - } - - return profile -} - -func expandAzureRmContainerServiceLinuxProfile(d *schema.ResourceData) containerservice.LinuxProfile { - profiles := d.Get("linux_profile").(*schema.Set).List() - config := profiles[0].(map[string]interface{}) - - adminUsername := config["admin_username"].(string) - - linuxKeys := config["ssh_key"].(*schema.Set).List() - sshPublicKeys := []containerservice.SSHPublicKey{} - - key := linuxKeys[0].(map[string]interface{}) - keyData := key["key_data"].(string) - - sshPublicKey := containerservice.SSHPublicKey{ - KeyData: &keyData, - } - - sshPublicKeys = append(sshPublicKeys, sshPublicKey) - - profile := containerservice.LinuxProfile{ - AdminUsername: &adminUsername, - SSH: &containerservice.SSHConfiguration{ - PublicKeys: &sshPublicKeys, - }, - } - - return profile -} - -func expandAzureRmContainerServiceMasterProfile(d *schema.ResourceData) containerservice.MasterProfile { - configs := d.Get("master_profile").(*schema.Set).List() - config := configs[0].(map[string]interface{}) - - count := int32(config["count"].(int)) - dnsPrefix := config["dns_prefix"].(string) - - profile := containerservice.MasterProfile{ - Count: &count, - DNSPrefix: &dnsPrefix, - } - - return profile -} - -func expandAzureRmContainerServiceServicePrincipal(d *schema.ResourceData) *containerservice.ServicePrincipalProfile { - - value, exists := d.GetOk("service_principal") - if !exists { - return nil - } - - configs := value.(*schema.Set).List() - - config := configs[0].(map[string]interface{}) - - clientId := config["client_id"].(string) - clientSecret := config["client_secret"].(string) - - principal := containerservice.ServicePrincipalProfile{ - ClientID: &clientId, - Secret: &clientSecret, - } - - return &principal -} - -func expandAzureRmContainerServiceAgentProfiles(d *schema.ResourceData) []containerservice.AgentPoolProfile { - configs := d.Get("agent_pool_profile").(*schema.Set).List() - config := configs[0].(map[string]interface{}) - profiles := make([]containerservice.AgentPoolProfile, 0, len(configs)) - - name := config["name"].(string) - count := int32(config["count"].(int)) - dnsPrefix := config["dns_prefix"].(string) - vmSize := config["vm_size"].(string) - - profile := containerservice.AgentPoolProfile{ - Name: &name, - Count: &count, - VMSize: containerservice.VMSizeTypes(vmSize), - DNSPrefix: &dnsPrefix, - } - - profiles = append(profiles, profile) - - return profiles -} - -func containerServiceStateRefreshFunc(client *ArmClient, resourceGroupName string, containerServiceName string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - res, err := client.containerServicesClient.Get(resourceGroupName, containerServiceName) - if err != nil { - return nil, "", fmt.Errorf("Error issuing read request in containerServiceStateRefreshFunc to Azure ARM for Container Service '%s' (RG: '%s'): %s", containerServiceName, resourceGroupName, err) - } - - return res, *res.Properties.ProvisioningState, nil - } -} - -func resourceAzureRMContainerServiceMasterProfileHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - count := m["count"].(int) - dnsPrefix := m["dns_prefix"].(string) - - buf.WriteString(fmt.Sprintf("%d-", count)) - buf.WriteString(fmt.Sprintf("%s-", dnsPrefix)) - - return hashcode.String(buf.String()) -} - -func resourceAzureRMContainerServiceLinuxProfilesHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - adminUsername := m["admin_username"].(string) - - buf.WriteString(fmt.Sprintf("%s-", adminUsername)) - - return hashcode.String(buf.String()) -} - -func resourceAzureRMContainerServiceLinuxProfilesSSHKeysHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - keyData := m["key_data"].(string) - - buf.WriteString(fmt.Sprintf("%s-", keyData)) - - return hashcode.String(buf.String()) -} - -func resourceAzureRMContainerServiceAgentPoolProfilesHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - count := m["count"].(int) - dnsPrefix := m["dns_prefix"].(string) - name := m["name"].(string) - vm_size := m["vm_size"].(string) - - buf.WriteString(fmt.Sprintf("%d-", count)) - buf.WriteString(fmt.Sprintf("%s-", dnsPrefix)) - buf.WriteString(fmt.Sprintf("%s-", name)) - buf.WriteString(fmt.Sprintf("%s-", vm_size)) - - return hashcode.String(buf.String()) -} - -func resourceAzureRMContainerServiceServicePrincipalProfileHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - clientId := m["client_id"].(string) - buf.WriteString(fmt.Sprintf("%s-", clientId)) - - return hashcode.String(buf.String()) -} - -func resourceAzureRMContainerServiceDiagnosticProfilesHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - enabled := m["enabled"].(bool) - - buf.WriteString(fmt.Sprintf("%t", enabled)) - - return hashcode.String(buf.String()) -} - -func validateArmContainerServiceOrchestrationPlatform(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - capacities := map[string]bool{ - "DCOS": true, - "Kubernetes": true, - "Swarm": true, - } - - if !capacities[value] { - errors = append(errors, fmt.Errorf("Container Service: Orchestration Platgorm can only be DCOS / Kubernetes / Swarm")) - } - return -} - -func validateArmContainerServiceMasterProfileCount(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - capacities := map[int]bool{ - 1: true, - 3: true, - 5: true, - } - - if !capacities[value] { - errors = append(errors, fmt.Errorf("The number of master nodes must be 1, 3 or 5.")) - } - return -} - -func validateArmContainerServiceAgentPoolProfileCount(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value > 100 || 0 >= value { - errors = append(errors, fmt.Errorf("The Count for an Agent Pool Profile can only be between 1 and 100.")) - } - return -} diff --git a/builtin/providers/azurerm/resource_arm_container_service_test.go b/builtin/providers/azurerm/resource_arm_container_service_test.go deleted file mode 100644 index 36c1ff19f..000000000 --- a/builtin/providers/azurerm/resource_arm_container_service_test.go +++ /dev/null @@ -1,372 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMContainerService_orchestrationPlatformValidation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - {Value: "DCOS", ErrCount: 0}, - {Value: "Kubernetes", ErrCount: 0}, - {Value: "Swarm", ErrCount: 0}, - {Value: "Mesos", ErrCount: 1}, - } - - for _, tc := range cases { - _, errors := validateArmContainerServiceOrchestrationPlatform(tc.Value, "azurerm_container_service") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM Container Service Orchestration Platform to trigger a validation error") - } - } -} - -func TestAccAzureRMContainerService_masterProfileCountValidation(t *testing.T) { - cases := []struct { - Value int - ErrCount int - }{ - {Value: 0, ErrCount: 1}, - {Value: 1, ErrCount: 0}, - {Value: 2, ErrCount: 1}, - {Value: 3, ErrCount: 0}, - {Value: 4, ErrCount: 1}, - {Value: 5, ErrCount: 0}, - {Value: 6, ErrCount: 1}, - } - - for _, tc := range cases { - _, errors := validateArmContainerServiceMasterProfileCount(tc.Value, "azurerm_container_service") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM Container Service Master Profile Count to trigger a validation error") - } - } -} - -func TestAccAzureRMContainerService_agentProfilePoolCountValidation(t *testing.T) { - cases := []struct { - Value int - ErrCount int - }{ - {Value: 0, ErrCount: 1}, - {Value: 1, ErrCount: 0}, - {Value: 2, ErrCount: 0}, - {Value: 99, ErrCount: 0}, - {Value: 100, ErrCount: 0}, - {Value: 101, ErrCount: 1}, - } - - for _, tc := range cases { - _, errors := validateArmContainerServiceAgentPoolProfileCount(tc.Value, "azurerm_container_service") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM Container Service Agent Pool Profile Count to trigger a validation error") - } - } -} - -func TestAccAzureRMContainerService_dcosBasic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMContainerService_dcosBasic, ri, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMContainerServiceDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMContainerServiceExists("azurerm_container_service.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMContainerService_kubernetesBasic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMContainerService_kubernetesBasic, ri, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMContainerServiceDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMContainerServiceExists("azurerm_container_service.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMContainerService_kubernetesComplete(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMContainerService_kubernetesComplete, ri, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMContainerServiceDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMContainerServiceExists("azurerm_container_service.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMContainerService_swarmBasic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMContainerService_swarmBasic, ri, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMContainerServiceDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMContainerServiceExists("azurerm_container_service.test"), - ), - }, - }, - }) -} - -var testAccAzureRMContainerService_dcosBasic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "East US" -} - -resource "azurerm_container_service" "test" { - name = "acctestcontservice%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - orchestration_platform = "DCOS" - - master_profile { - count = 1 - dns_prefix = "acctestmaster%d" - } - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } - - agent_pool_profile { - name = "default" - count = 1 - dns_prefix = "acctestagent%d" - vm_size = "Standard_A0" - } - - diagnostics_profile { - enabled = false - } -} -` - -var testAccAzureRMContainerService_kubernetesBasic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "East US" -} - -resource "azurerm_container_service" "test" { - name = "acctestcontservice%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - orchestration_platform = "Kubernetes" - - master_profile { - count = 1 - dns_prefix = "acctestmaster%d" - } - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } - - agent_pool_profile { - name = "default" - count = 1 - dns_prefix = "acctestagent%d" - vm_size = "Standard_A0" - } - - service_principal { - client_id = "00000000-0000-0000-0000-000000000000" - client_secret = "00000000000000000000000000000000" - } - - diagnostics_profile { - enabled = false - } -} -` - -var testAccAzureRMContainerService_kubernetesComplete = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "East US" -} - -resource "azurerm_container_service" "test" { - name = "acctestcontservice%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - orchestration_platform = "Kubernetes" - - master_profile { - count = 1 - dns_prefix = "acctestmaster%d" - } - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } - - agent_pool_profile { - name = "default" - count = 1 - dns_prefix = "acctestagent%d" - vm_size = "Standard_A0" - } - - service_principal { - client_id = "00000000-0000-0000-0000-000000000000" - client_secret = "00000000000000000000000000000000" - } - - diagnostics_profile { - enabled = false - } - - tags { - you = "me" - } -} -` - -var testAccAzureRMContainerService_swarmBasic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "East US" -} - -resource "azurerm_container_service" "test" { - name = "acctestcontservice%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - orchestration_platform = "Swarm" - - master_profile { - count = 1 - dns_prefix = "acctestmaster%d" - } - - linux_profile { - admin_username = "acctestuser%d" - - ssh_key { - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" - } - } - - agent_pool_profile { - name = "default" - count = 1 - dns_prefix = "acctestagent%d" - vm_size = "Standard_A0" - } - - diagnostics_profile { - enabled = false - } -} -` - -func testCheckAzureRMContainerServiceExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for Container Service Instance: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).containerServicesClient - - resp, err := conn.Get(resourceGroup, name) - if err != nil { - return fmt.Errorf("Bad: Get on containerServicesClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: Container Service Instance %q (resource group: %q) does not exist", name, resourceGroup) - } - - return nil - } -} - -func testCheckAzureRMContainerServiceDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).containerServicesClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_container_service" { - continue - } - - name := rs.Primary.Attributes["name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.Get(resourceGroup, name) - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Container Service Instance still exists:\n%#v", resp) - } - } - - return nil -} diff --git a/builtin/providers/azurerm/resource_arm_dns_a_record.go b/builtin/providers/azurerm/resource_arm_dns_a_record.go deleted file mode 100644 index ba56deb63..000000000 --- a/builtin/providers/azurerm/resource_arm_dns_a_record.go +++ /dev/null @@ -1,176 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/jen20/riviera/dns" -) - -func resourceArmDnsARecord() *schema.Resource { - return &schema.Resource{ - Create: resourceArmDnsARecordCreate, - Read: resourceArmDnsARecordRead, - Update: resourceArmDnsARecordCreate, - Delete: resourceArmDnsARecordDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "resource_group_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "zone_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "records": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "ttl": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmDnsARecordCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - tags := d.Get("tags").(map[string]interface{}) - expandedTags := expandTags(tags) - - createCommand := &dns.CreateARecordSet{ - Name: d.Get("name").(string), - Location: "global", - ResourceGroupName: d.Get("resource_group_name").(string), - ZoneName: d.Get("zone_name").(string), - TTL: d.Get("ttl").(int), - Tags: *expandedTags, - } - - recordStrings := d.Get("records").(*schema.Set).List() - records := make([]dns.ARecord, len(recordStrings)) - for i, v := range recordStrings { - records[i] = dns.ARecord{ - IPv4Address: v.(string), - } - } - createCommand.ARecords = records - - createRequest := rivieraClient.NewRequest() - createRequest.Command = createCommand - - createResponse, err := createRequest.Execute() - if err != nil { - return fmt.Errorf("Error creating DNS A Record: %s", err) - } - if !createResponse.IsSuccessful() { - return fmt.Errorf("Error creating DNS A Record: %s", createResponse.Error) - } - - readRequest := rivieraClient.NewRequest() - readRequest.Command = &dns.GetARecordSet{ - Name: d.Get("name").(string), - ResourceGroupName: d.Get("resource_group_name").(string), - ZoneName: d.Get("zone_name").(string), - } - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading DNS A Record: %s", err) - } - if !readResponse.IsSuccessful() { - return fmt.Errorf("Error reading DNS A Record: %s", readResponse.Error) - } - - resp := readResponse.Parsed.(*dns.GetARecordSetResponse) - d.SetId(resp.ID) - - return resourceArmDnsARecordRead(d, meta) -} - -func resourceArmDnsARecordRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - - readRequest := rivieraClient.NewRequestForURI(d.Id()) - readRequest.Command = &dns.GetARecordSet{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading DNS A Record: %s", err) - } - if !readResponse.IsSuccessful() { - log.Printf("[INFO] Error reading DNS A Record %q - removing from state", d.Id()) - d.SetId("") - return fmt.Errorf("Error reading DNS A Record: %s", readResponse.Error) - } - - resp := readResponse.Parsed.(*dns.GetARecordSetResponse) - - d.Set("name", resp.Name) - d.Set("resource_group_name", id.ResourceGroup) - d.Set("zone_name", id.Path["dnszones"]) - d.Set("ttl", resp.TTL) - - if resp.ARecords != nil { - records := make([]string, 0, len(resp.ARecords)) - for _, record := range resp.ARecords { - records = append(records, record.IPv4Address) - } - - if err := d.Set("records", records); err != nil { - return err - } - } - - flattenAndSetTags(d, &resp.Tags) - - return nil -} - -func resourceArmDnsARecordDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - deleteRequest := rivieraClient.NewRequestForURI(d.Id()) - deleteRequest.Command = &dns.DeleteRecordSet{ - RecordSetType: "A", - } - - deleteResponse, err := deleteRequest.Execute() - if err != nil { - return fmt.Errorf("Error deleting DNS A Record: %s", err) - } - if !deleteResponse.IsSuccessful() { - return fmt.Errorf("Error deleting DNS A Record: %s", deleteResponse.Error) - } - - return nil -} diff --git a/builtin/providers/azurerm/resource_arm_dns_a_record_test.go b/builtin/providers/azurerm/resource_arm_dns_a_record_test.go deleted file mode 100644 index 41724cf14..000000000 --- a/builtin/providers/azurerm/resource_arm_dns_a_record_test.go +++ /dev/null @@ -1,226 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/jen20/riviera/dns" -) - -func TestAccAzureRMDnsARecord_basic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMDnsARecord_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsARecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsARecordExists("azurerm_dns_a_record.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMDnsARecord_updateRecords(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMDnsARecord_basic, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMDnsARecord_updateRecords, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsARecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsARecordExists("azurerm_dns_a_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_a_record.test", "records.#", "2"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsARecordExists("azurerm_dns_a_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_a_record.test", "records.#", "3"), - ), - }, - }, - }) -} - -func TestAccAzureRMDnsARecord_withTags(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMDnsARecord_withTags, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMDnsARecord_withTagsUpdate, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsARecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsARecordExists("azurerm_dns_a_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_a_record.test", "tags.%", "2"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsARecordExists("azurerm_dns_a_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_a_record.test", "tags.%", "1"), - ), - }, - }, - }) -} - -func testCheckAzureRMDnsARecordExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).rivieraClient - - readRequest := conn.NewRequestForURI(rs.Primary.ID) - readRequest.Command = &dns.GetARecordSet{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Bad: GetARecordSet: %s", err) - } - if !readResponse.IsSuccessful() { - return fmt.Errorf("Bad: GetARecordSet: %s", readResponse.Error) - } - - return nil - } -} - -func testCheckAzureRMDnsARecordDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).rivieraClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_dns_a_record" { - continue - } - - readRequest := conn.NewRequestForURI(rs.Primary.ID) - readRequest.Command = &dns.GetARecordSet{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Bad: GetARecordSet: %s", err) - } - - if readResponse.IsSuccessful() { - return fmt.Errorf("Bad: DNS A Record still exists: %s", readResponse.Error) - } - } - - return nil -} - -var testAccAzureRMDnsARecord_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_a_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - records = ["1.2.3.4", "1.2.4.5"] -} -` - -var testAccAzureRMDnsARecord_updateRecords = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_a_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - records = ["1.2.3.4", "1.2.4.5", "1.2.3.7"] -} -` - -var testAccAzureRMDnsARecord_withTags = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_a_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - records = ["1.2.3.4", "1.2.4.5"] - - tags { - environment = "Production" - cost_center = "MSFT" - } -} -` - -var testAccAzureRMDnsARecord_withTagsUpdate = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_a_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - records = ["1.2.3.4", "1.2.4.5"] - - tags { - environment = "staging" - } -} -` diff --git a/builtin/providers/azurerm/resource_arm_dns_aaaa_record.go b/builtin/providers/azurerm/resource_arm_dns_aaaa_record.go deleted file mode 100644 index 5246e9f4c..000000000 --- a/builtin/providers/azurerm/resource_arm_dns_aaaa_record.go +++ /dev/null @@ -1,176 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/jen20/riviera/dns" -) - -func resourceArmDnsAAAARecord() *schema.Resource { - return &schema.Resource{ - Create: resourceArmDnsAAAARecordCreate, - Read: resourceArmDnsAAAARecordRead, - Update: resourceArmDnsAAAARecordCreate, - Delete: resourceArmDnsAAAARecordDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "resource_group_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "zone_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "records": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "ttl": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmDnsAAAARecordCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - tags := d.Get("tags").(map[string]interface{}) - expandedTags := expandTags(tags) - - createCommand := &dns.CreateAAAARecordSet{ - Name: d.Get("name").(string), - Location: "global", - ResourceGroupName: d.Get("resource_group_name").(string), - ZoneName: d.Get("zone_name").(string), - TTL: d.Get("ttl").(int), - Tags: *expandedTags, - } - - recordStrings := d.Get("records").(*schema.Set).List() - records := make([]dns.AAAARecord, len(recordStrings)) - for i, v := range recordStrings { - records[i] = dns.AAAARecord{ - IPv6Address: v.(string), - } - } - createCommand.AAAARecords = records - - createRequest := rivieraClient.NewRequest() - createRequest.Command = createCommand - - createResponse, err := createRequest.Execute() - if err != nil { - return fmt.Errorf("Error creating DNS AAAA Record: %s", err) - } - if !createResponse.IsSuccessful() { - return fmt.Errorf("Error creating DNS AAAA Record: %s", createResponse.Error) - } - - readRequest := rivieraClient.NewRequest() - readRequest.Command = &dns.GetAAAARecordSet{ - Name: d.Get("name").(string), - ResourceGroupName: d.Get("resource_group_name").(string), - ZoneName: d.Get("zone_name").(string), - } - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading DNS AAAA Record: %s", err) - } - if !readResponse.IsSuccessful() { - return fmt.Errorf("Error reading DNS AAAA Record: %s", readResponse.Error) - } - - resp := readResponse.Parsed.(*dns.GetAAAARecordSetResponse) - d.SetId(resp.ID) - - return resourceArmDnsAAAARecordRead(d, meta) -} - -func resourceArmDnsAAAARecordRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - - readRequest := rivieraClient.NewRequestForURI(d.Id()) - readRequest.Command = &dns.GetAAAARecordSet{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading DNS AAAA Record: %s", err) - } - if !readResponse.IsSuccessful() { - log.Printf("[INFO] Error reading DNS AAAA Record %q - removing from state", d.Id()) - d.SetId("") - return fmt.Errorf("Error reading DNS AAAA Record: %s", readResponse.Error) - } - - resp := readResponse.Parsed.(*dns.GetAAAARecordSetResponse) - - d.Set("name", resp.Name) - d.Set("resource_group_name", id.ResourceGroup) - d.Set("zone_name", id.Path["dnszones"]) - d.Set("ttl", resp.TTL) - - if resp.AAAARecords != nil { - records := make([]string, 0, len(resp.AAAARecords)) - for _, record := range resp.AAAARecords { - records = append(records, record.IPv6Address) - } - - if err := d.Set("records", records); err != nil { - return err - } - } - - flattenAndSetTags(d, &resp.Tags) - - return nil -} - -func resourceArmDnsAAAARecordDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - deleteRequest := rivieraClient.NewRequestForURI(d.Id()) - deleteRequest.Command = &dns.DeleteRecordSet{ - RecordSetType: "AAAA", - } - - deleteResponse, err := deleteRequest.Execute() - if err != nil { - return fmt.Errorf("Error deleting DNS AAAA Record: %s", err) - } - if !deleteResponse.IsSuccessful() { - return fmt.Errorf("Error deleting DNS AAAA Record: %s", deleteResponse.Error) - } - - return nil -} diff --git a/builtin/providers/azurerm/resource_arm_dns_aaaa_record_test.go b/builtin/providers/azurerm/resource_arm_dns_aaaa_record_test.go deleted file mode 100644 index a0e3dafc7..000000000 --- a/builtin/providers/azurerm/resource_arm_dns_aaaa_record_test.go +++ /dev/null @@ -1,226 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/jen20/riviera/dns" -) - -func TestAccAzureRMDnsAAAARecord_basic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMDnsAAAARecord_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsAAAARecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsAAAARecordExists("azurerm_dns_aaaa_record.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMDnsAAAARecord_updateRecords(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMDnsAAAARecord_basic, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMDnsAAAARecord_updateRecords, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsAAAARecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsAAAARecordExists("azurerm_dns_aaaa_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_aaaa_record.test", "records.#", "2"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsAAAARecordExists("azurerm_dns_aaaa_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_aaaa_record.test", "records.#", "3"), - ), - }, - }, - }) -} - -func TestAccAzureRMDnsAAAARecord_withTags(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMDnsAAAARecord_withTags, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMDnsAAAARecord_withTagsUpdate, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsAAAARecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsAAAARecordExists("azurerm_dns_aaaa_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_aaaa_record.test", "tags.%", "2"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsAAAARecordExists("azurerm_dns_aaaa_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_aaaa_record.test", "tags.%", "1"), - ), - }, - }, - }) -} - -func testCheckAzureRMDnsAAAARecordExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).rivieraClient - - readRequest := conn.NewRequestForURI(rs.Primary.ID) - readRequest.Command = &dns.GetAAAARecordSet{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Bad: GetAAAARecordSet: %s", err) - } - if !readResponse.IsSuccessful() { - return fmt.Errorf("Bad: GetAAAARecordSet: %s", readResponse.Error) - } - - return nil - } -} - -func testCheckAzureRMDnsAAAARecordDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).rivieraClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_dns_aaaa_record" { - continue - } - - readRequest := conn.NewRequestForURI(rs.Primary.ID) - readRequest.Command = &dns.GetAAAARecordSet{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Bad: GetAAAARecordSet: %s", err) - } - - if readResponse.IsSuccessful() { - return fmt.Errorf("Bad: DNS AAAA Record still exists: %s", readResponse.Error) - } - } - - return nil -} - -var testAccAzureRMDnsAAAARecord_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_aaaa_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - records = ["2607:f8b0:4009:1803::1005", "2607:f8b0:4009:1803::1006"] -} -` - -var testAccAzureRMDnsAAAARecord_updateRecords = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_aaaa_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - records = ["2607:f8b0:4009:1803::1005", "2607:f8b0:4009:1803::1006", "::1"] -} -` - -var testAccAzureRMDnsAAAARecord_withTags = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_aaaa_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - records = ["2607:f8b0:4009:1803::1005", "2607:f8b0:4009:1803::1006"] - - tags { - environment = "Production" - cost_center = "MSFT" - } -} -` - -var testAccAzureRMDnsAAAARecord_withTagsUpdate = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_aaaa_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - records = ["2607:f8b0:4009:1803::1005", "2607:f8b0:4009:1803::1006"] - - tags { - environment = "staging" - } -} -` diff --git a/builtin/providers/azurerm/resource_arm_dns_cname_record.go b/builtin/providers/azurerm/resource_arm_dns_cname_record.go deleted file mode 100644 index 7541fc46c..000000000 --- a/builtin/providers/azurerm/resource_arm_dns_cname_record.go +++ /dev/null @@ -1,166 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/jen20/riviera/dns" -) - -func resourceArmDnsCNameRecord() *schema.Resource { - return &schema.Resource{ - Create: resourceArmDnsCNameRecordCreate, - Read: resourceArmDnsCNameRecordRead, - Update: resourceArmDnsCNameRecordCreate, - Delete: resourceArmDnsCNameRecordDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "resource_group_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "zone_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "records": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - Removed: "Use `record` instead. This attribute will be removed in a future version", - }, - - "record": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "ttl": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmDnsCNameRecordCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - tags := d.Get("tags").(map[string]interface{}) - expandedTags := expandTags(tags) - - createCommand := &dns.CreateCNAMERecordSet{ - Name: d.Get("name").(string), - Location: "global", - ResourceGroupName: d.Get("resource_group_name").(string), - ZoneName: d.Get("zone_name").(string), - TTL: d.Get("ttl").(int), - Tags: *expandedTags, - CNAMERecord: dns.CNAMERecord{ - CNAME: d.Get("record").(string), - }, - } - - createRequest := rivieraClient.NewRequest() - createRequest.Command = createCommand - - createResponse, err := createRequest.Execute() - if err != nil { - return fmt.Errorf("Error creating DNS CName Record: %s", err) - } - if !createResponse.IsSuccessful() { - return fmt.Errorf("Error creating DNS CName Record: %s", createResponse.Error) - } - - readRequest := rivieraClient.NewRequest() - readRequest.Command = &dns.GetCNAMERecordSet{ - Name: d.Get("name").(string), - ResourceGroupName: d.Get("resource_group_name").(string), - ZoneName: d.Get("zone_name").(string), - } - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading DNS CName Record: %s", err) - } - if !readResponse.IsSuccessful() { - return fmt.Errorf("Error reading DNS CName Record: %s", readResponse.Error) - } - - resp := readResponse.Parsed.(*dns.GetCNAMERecordSetResponse) - d.SetId(resp.ID) - - return resourceArmDnsCNameRecordRead(d, meta) -} - -func resourceArmDnsCNameRecordRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - - readRequest := rivieraClient.NewRequestForURI(d.Id()) - readRequest.Command = &dns.GetCNAMERecordSet{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading DNS A Record: %s", err) - } - if !readResponse.IsSuccessful() { - log.Printf("[INFO] Error reading DNS A Record %q - removing from state", d.Id()) - d.SetId("") - return fmt.Errorf("Error reading DNS A Record: %s", readResponse.Error) - } - - resp := readResponse.Parsed.(*dns.GetCNAMERecordSetResponse) - - d.Set("name", resp.Name) - d.Set("resource_group_name", id.ResourceGroup) - d.Set("zone_name", id.Path["dnszones"]) - d.Set("ttl", resp.TTL) - d.Set("record", resp.CNAMERecord.CNAME) - - flattenAndSetTags(d, &resp.Tags) - - return nil -} - -func resourceArmDnsCNameRecordDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - deleteRequest := rivieraClient.NewRequestForURI(d.Id()) - deleteRequest.Command = &dns.DeleteRecordSet{ - RecordSetType: "CNAME", - } - - deleteResponse, err := deleteRequest.Execute() - if err != nil { - return fmt.Errorf("Error deleting DNS CName Record: %s", err) - } - if !deleteResponse.IsSuccessful() { - return fmt.Errorf("Error deleting DNS CName Record: %s", deleteResponse.Error) - } - - return nil -} diff --git a/builtin/providers/azurerm/resource_arm_dns_cname_record_test.go b/builtin/providers/azurerm/resource_arm_dns_cname_record_test.go deleted file mode 100644 index 0207a2dc0..000000000 --- a/builtin/providers/azurerm/resource_arm_dns_cname_record_test.go +++ /dev/null @@ -1,262 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/jen20/riviera/dns" -) - -func TestAccAzureRMDnsCNameRecord_basic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMDnsCNameRecord_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsCNameRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsCNameRecordExists("azurerm_dns_cname_record.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMDnsCNameRecord_subdomain(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMDnsCNameRecord_subdomain, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsCNameRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsCNameRecordExists("azurerm_dns_cname_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_cname_record.test", "record", "test.contoso.com"), - ), - }, - }, - }) -} - -func TestAccAzureRMDnsCNameRecord_updateRecords(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMDnsCNameRecord_basic, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMDnsCNameRecord_updateRecords, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsCNameRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsCNameRecordExists("azurerm_dns_cname_record.test"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsCNameRecordExists("azurerm_dns_cname_record.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMDnsCNameRecord_withTags(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMDnsCNameRecord_withTags, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMDnsCNameRecord_withTagsUpdate, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsCNameRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsCNameRecordExists("azurerm_dns_cname_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_cname_record.test", "tags.%", "2"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsCNameRecordExists("azurerm_dns_cname_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_cname_record.test", "tags.%", "1"), - ), - }, - }, - }) -} - -func testCheckAzureRMDnsCNameRecordExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).rivieraClient - - readRequest := conn.NewRequestForURI(rs.Primary.ID) - readRequest.Command = &dns.GetCNAMERecordSet{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Bad: GetCNAMERecordSet: %s", err) - } - if !readResponse.IsSuccessful() { - return fmt.Errorf("Bad: GetCNAMERecordSet: %s", readResponse.Error) - } - - return nil - } -} - -func testCheckAzureRMDnsCNameRecordDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).rivieraClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_dns_cname_record" { - continue - } - - readRequest := conn.NewRequestForURI(rs.Primary.ID) - readRequest.Command = &dns.GetCNAMERecordSet{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Bad: GetCNAMERecordSet: %s", err) - } - - if readResponse.IsSuccessful() { - return fmt.Errorf("Bad: DNS CNAME Record still exists: %s", readResponse.Error) - } - } - - return nil -} - -var testAccAzureRMDnsCNameRecord_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_cname_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - record = "contoso.com" -} -` - -var testAccAzureRMDnsCNameRecord_subdomain = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_cname_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - record = "test.contoso.com" -} -` - -var testAccAzureRMDnsCNameRecord_updateRecords = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_cname_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - record = "contoso.co.uk" -} -` - -var testAccAzureRMDnsCNameRecord_withTags = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_cname_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - record = "contoso.com" - - tags { - environment = "Production" - cost_center = "MSFT" - } -} -` - -var testAccAzureRMDnsCNameRecord_withTagsUpdate = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_cname_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - record = "contoso.com" - - tags { - environment = "staging" - } -} -` diff --git a/builtin/providers/azurerm/resource_arm_dns_mx_record.go b/builtin/providers/azurerm/resource_arm_dns_mx_record.go deleted file mode 100644 index d4881c5f5..000000000 --- a/builtin/providers/azurerm/resource_arm_dns_mx_record.go +++ /dev/null @@ -1,223 +0,0 @@ -package azurerm - -import ( - "bytes" - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" - "github.com/jen20/riviera/dns" -) - -func resourceArmDnsMxRecord() *schema.Resource { - return &schema.Resource{ - Create: resourceArmDnsMxRecordCreate, - Read: resourceArmDnsMxRecordRead, - Update: resourceArmDnsMxRecordCreate, - Delete: resourceArmDnsMxRecordDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "resource_group_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "zone_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "record": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "preference": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "exchange": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - Set: resourceArmDnsMxRecordHash, - }, - - "ttl": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmDnsMxRecordCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - tags := d.Get("tags").(map[string]interface{}) - expandedTags := expandTags(tags) - - createCommand := &dns.CreateMXRecordSet{ - Name: d.Get("name").(string), - Location: "global", - ResourceGroupName: d.Get("resource_group_name").(string), - ZoneName: d.Get("zone_name").(string), - TTL: d.Get("ttl").(int), - Tags: *expandedTags, - } - - mxRecords, recordErr := expandAzureRmDnsMxRecord(d) - if recordErr != nil { - return fmt.Errorf("Error Building Azure RM MX Record: %s", recordErr) - } - createCommand.MXRecords = mxRecords - - createRequest := rivieraClient.NewRequest() - createRequest.Command = createCommand - - createResponse, err := createRequest.Execute() - if err != nil { - return fmt.Errorf("Error creating DNS MX Record: %s", err) - } - if !createResponse.IsSuccessful() { - return fmt.Errorf("Error creating DNS MX Record: %s", createResponse.Error) - } - - readRequest := rivieraClient.NewRequest() - readRequest.Command = &dns.GetMXRecordSet{ - Name: d.Get("name").(string), - ResourceGroupName: d.Get("resource_group_name").(string), - ZoneName: d.Get("zone_name").(string), - } - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading DNS MX Record: %s", err) - } - if !readResponse.IsSuccessful() { - return fmt.Errorf("Error reading DNS MX Record: %s", readResponse.Error) - } - - resp := readResponse.Parsed.(*dns.GetMXRecordSetResponse) - d.SetId(resp.ID) - - return resourceArmDnsMxRecordRead(d, meta) -} - -func resourceArmDnsMxRecordRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - - readRequest := rivieraClient.NewRequestForURI(d.Id()) - readRequest.Command = &dns.GetMXRecordSet{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading DNS MX Record: %s", err) - } - if !readResponse.IsSuccessful() { - log.Printf("[INFO] Error reading DNS MX Record %q - removing from state", d.Id()) - d.SetId("") - return fmt.Errorf("Error reading DNS MX Record: %s", readResponse.Error) - } - - resp := readResponse.Parsed.(*dns.GetMXRecordSetResponse) - - d.Set("name", resp.Name) - d.Set("resource_group_name", id.ResourceGroup) - d.Set("zone_name", id.Path["dnszones"]) - d.Set("ttl", resp.TTL) - - if err := d.Set("record", flattenAzureRmDnsMxRecord(resp.MXRecords)); err != nil { - log.Printf("[INFO] Error setting the Azure RM MX Record State: %s", err) - return err - } - - flattenAndSetTags(d, &resp.Tags) - - return nil -} - -func resourceArmDnsMxRecordDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - deleteRequest := rivieraClient.NewRequestForURI(d.Id()) - deleteRequest.Command = &dns.DeleteRecordSet{ - RecordSetType: "MX", - } - - deleteResponse, err := deleteRequest.Execute() - if err != nil { - return fmt.Errorf("Error deleting DNS MX Record: %s", err) - } - if !deleteResponse.IsSuccessful() { - return fmt.Errorf("Error deleting DNS MX Record: %s", deleteResponse.Error) - } - - return nil -} - -func expandAzureRmDnsMxRecord(d *schema.ResourceData) ([]dns.MXRecord, error) { - config := d.Get("record").(*schema.Set).List() - records := make([]dns.MXRecord, 0, len(config)) - - for _, pRaw := range config { - data := pRaw.(map[string]interface{}) - - mxrecord := dns.MXRecord{ - Preference: data["preference"].(string), - Exchange: data["exchange"].(string), - } - - records = append(records, mxrecord) - - } - - return records, nil - -} - -func flattenAzureRmDnsMxRecord(records []dns.MXRecord) []map[string]interface{} { - - result := make([]map[string]interface{}, 0, len(records)) - for _, record := range records { - result = append(result, map[string]interface{}{ - "preference": record.Preference, - "exchange": record.Exchange, - }) - } - return result - -} - -func resourceArmDnsMxRecordHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["preference"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["exchange"].(string))) - - return hashcode.String(buf.String()) -} diff --git a/builtin/providers/azurerm/resource_arm_dns_mx_record_test.go b/builtin/providers/azurerm/resource_arm_dns_mx_record_test.go deleted file mode 100644 index e3ef8b836..000000000 --- a/builtin/providers/azurerm/resource_arm_dns_mx_record_test.go +++ /dev/null @@ -1,267 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/jen20/riviera/dns" -) - -func TestAccAzureRMDnsMxRecord_basic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMDnsMxRecord_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsMxRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsMxRecordExists("azurerm_dns_mx_record.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMDnsMxRecord_updateRecords(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMDnsMxRecord_basic, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMDnsMxRecord_updateRecords, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsMxRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsMxRecordExists("azurerm_dns_mx_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_mx_record.test", "record.#", "2"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsMxRecordExists("azurerm_dns_mx_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_mx_record.test", "record.#", "3"), - ), - }, - }, - }) -} - -func TestAccAzureRMDnsMxRecord_withTags(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMDnsMxRecord_withTags, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMDnsMxRecord_withTagsUpdate, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsMxRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsMxRecordExists("azurerm_dns_mx_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_mx_record.test", "tags.%", "2"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsMxRecordExists("azurerm_dns_mx_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_mx_record.test", "tags.%", "1"), - ), - }, - }, - }) -} - -func testCheckAzureRMDnsMxRecordExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).rivieraClient - - readRequest := conn.NewRequestForURI(rs.Primary.ID) - readRequest.Command = &dns.GetMXRecordSet{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Bad: GetMXRecordSet: %s", err) - } - if !readResponse.IsSuccessful() { - return fmt.Errorf("Bad: GetMXRecordSet: %s", readResponse.Error) - } - - return nil - } -} - -func testCheckAzureRMDnsMxRecordDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).rivieraClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_dns_mx_record" { - continue - } - - readRequest := conn.NewRequestForURI(rs.Primary.ID) - readRequest.Command = &dns.GetMXRecordSet{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Bad: GetMXRecordSet: %s", err) - } - - if readResponse.IsSuccessful() { - return fmt.Errorf("Bad: DNS MX Record still exists: %s", readResponse.Error) - } - } - - return nil -} - -var testAccAzureRMDnsMxRecord_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_mx_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - - record { - preference = "10" - exchange = "mail1.contoso.com" - } - - record { - preference = "20" - exchange = "mail2.contoso.com" - } -} -` - -var testAccAzureRMDnsMxRecord_updateRecords = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_mx_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - - record { - preference = "10" - exchange = "mail1.contoso.com" - } - - record { - preference = "20" - exchange = "mail2.contoso.com" - } - - record { - preference = "50" - exchange = "mail3.contoso.com" - } -} -` - -var testAccAzureRMDnsMxRecord_withTags = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_mx_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - - record { - preference = "10" - exchange = "mail1.contoso.com" - } - - record { - preference = "20" - exchange = "mail2.contoso.com" - } - - tags { - environment = "Production" - cost_center = "MSFT" - } -} -` - -var testAccAzureRMDnsMxRecord_withTagsUpdate = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_mx_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - - record { - preference = "10" - exchange = "mail1.contoso.com" - } - - record { - preference = "20" - exchange = "mail2.contoso.com" - } - - tags { - environment = "staging" - } -} -` diff --git a/builtin/providers/azurerm/resource_arm_dns_ns_record.go b/builtin/providers/azurerm/resource_arm_dns_ns_record.go deleted file mode 100644 index 17173239b..000000000 --- a/builtin/providers/azurerm/resource_arm_dns_ns_record.go +++ /dev/null @@ -1,205 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/jen20/riviera/dns" -) - -func resourceArmDnsNsRecord() *schema.Resource { - return &schema.Resource{ - Create: resourceArmDnsNsRecordCreate, - Read: resourceArmDnsNsRecordRead, - Update: resourceArmDnsNsRecordCreate, - Delete: resourceArmDnsNsRecordDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "resource_group_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "zone_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "record": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "nsdname": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - - "ttl": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmDnsNsRecordCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - tags := d.Get("tags").(map[string]interface{}) - expandedTags := expandTags(tags) - - createCommand := &dns.CreateNSRecordSet{ - Name: d.Get("name").(string), - Location: "global", - ResourceGroupName: d.Get("resource_group_name").(string), - ZoneName: d.Get("zone_name").(string), - TTL: d.Get("ttl").(int), - Tags: *expandedTags, - } - - nsRecords, recordErr := expandAzureRmDnsNsRecords(d) - if recordErr != nil { - return fmt.Errorf("Error Building list of Azure RM NS Records: %s", recordErr) - } - createCommand.NSRecords = nsRecords - - createRequest := rivieraClient.NewRequest() - createRequest.Command = createCommand - - createResponse, err := createRequest.Execute() - if err != nil { - return fmt.Errorf("Error creating DNS NS Record: %s", err) - } - if !createResponse.IsSuccessful() { - return fmt.Errorf("Error creating DNS NS Record: %s", createResponse.Error) - } - - readRequest := rivieraClient.NewRequest() - readRequest.Command = &dns.GetNSRecordSet{ - Name: d.Get("name").(string), - ResourceGroupName: d.Get("resource_group_name").(string), - ZoneName: d.Get("zone_name").(string), - } - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading DNS NS Record: %s", err) - } - if !readResponse.IsSuccessful() { - return fmt.Errorf("Error reading DNS NS Record: %s", readResponse.Error) - } - - resp := readResponse.Parsed.(*dns.GetNSRecordSetResponse) - d.SetId(resp.ID) - - return resourceArmDnsNsRecordRead(d, meta) -} - -func resourceArmDnsNsRecordRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - - readRequest := rivieraClient.NewRequestForURI(d.Id()) - readRequest.Command = &dns.GetNSRecordSet{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading DNS Ns Record: %s", err) - } - if !readResponse.IsSuccessful() { - log.Printf("[INFO] Error reading DNS NS Record %q - removing from state", d.Id()) - d.SetId("") - return fmt.Errorf("Error reading DNS NS Record: %s", readResponse.Error) - } - - resp := readResponse.Parsed.(*dns.GetNSRecordSetResponse) - - d.Set("name", resp.Name) - d.Set("resource_group_name", id.ResourceGroup) - d.Set("zone_name", id.Path["dnszones"]) - d.Set("ttl", resp.TTL) - - if resp.NSRecords != nil { - if err := d.Set("record", flattenAzureRmDnsNsRecords(resp.NSRecords)); err != nil { - log.Printf("[INFO] Error setting the Azure RM NS Record State: %s", err) - return err - } - } - - flattenAndSetTags(d, &resp.Tags) - - return nil -} - -func resourceArmDnsNsRecordDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - deleteRequest := rivieraClient.NewRequestForURI(d.Id()) - deleteRequest.Command = &dns.DeleteRecordSet{ - RecordSetType: "NS", - } - - deleteResponse, err := deleteRequest.Execute() - if err != nil { - return fmt.Errorf("Error deleting DNS TXT Record: %s", err) - } - if !deleteResponse.IsSuccessful() { - return fmt.Errorf("Error deleting DNS TXT Record: %s", deleteResponse.Error) - } - - return nil -} - -func expandAzureRmDnsNsRecords(d *schema.ResourceData) ([]dns.NSRecord, error) { - configs := d.Get("record").(*schema.Set).List() - nsRecords := make([]dns.NSRecord, 0, len(configs)) - - for _, configRaw := range configs { - data := configRaw.(map[string]interface{}) - - nsRecord := dns.NSRecord{ - NSDName: data["nsdname"].(string), - } - - nsRecords = append(nsRecords, nsRecord) - - } - - return nsRecords, nil - -} - -func flattenAzureRmDnsNsRecords(records []dns.NSRecord) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(records)) - for _, record := range records { - nsRecord := make(map[string]interface{}) - nsRecord["nsdname"] = record.NSDName - - result = append(result, nsRecord) - } - return result -} diff --git a/builtin/providers/azurerm/resource_arm_dns_ns_record_test.go b/builtin/providers/azurerm/resource_arm_dns_ns_record_test.go deleted file mode 100644 index 2636c5ed2..000000000 --- a/builtin/providers/azurerm/resource_arm_dns_ns_record_test.go +++ /dev/null @@ -1,257 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/jen20/riviera/dns" -) - -func TestAccAzureRMDnsNsRecord_basic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMDnsNsRecord_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsNsRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsNsRecordExists("azurerm_dns_ns_record.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMDnsNsRecord_updateRecords(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMDnsNsRecord_basic, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMDnsNsRecord_updateRecords, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsNsRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsNsRecordExists("azurerm_dns_ns_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_ns_record.test", "record.#", "2"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsNsRecordExists("azurerm_dns_ns_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_ns_record.test", "record.#", "3"), - ), - }, - }, - }) -} - -func TestAccAzureRMDnsNsRecord_withTags(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMDnsNsRecord_withTags, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMDnsNsRecord_withTagsUpdate, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsNsRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsNsRecordExists("azurerm_dns_ns_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_ns_record.test", "tags.%", "2"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsNsRecordExists("azurerm_dns_ns_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_ns_record.test", "tags.%", "1"), - ), - }, - }, - }) -} - -func testCheckAzureRMDnsNsRecordExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).rivieraClient - - readRequest := conn.NewRequestForURI(rs.Primary.ID) - readRequest.Command = &dns.GetNSRecordSet{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Bad: GetNSRecordSet: %s", err) - } - if !readResponse.IsSuccessful() { - return fmt.Errorf("Bad: GetNSRecordSet: %s", readResponse.Error) - } - - return nil - } -} - -func testCheckAzureRMDnsNsRecordDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).rivieraClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_dns_ns_record" { - continue - } - - readRequest := conn.NewRequestForURI(rs.Primary.ID) - readRequest.Command = &dns.GetNSRecordSet{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Bad: GetNSRecordSet: %s", err) - } - - if readResponse.IsSuccessful() { - return fmt.Errorf("Bad: DNS NS Record still exists: %s", readResponse.Error) - } - } - - return nil -} - -var testAccAzureRMDnsNsRecord_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_ns_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - - record { - nsdname = "ns1.contoso.com" - } - - record { - nsdname = "ns2.contoso.com" - } -} -` - -var testAccAzureRMDnsNsRecord_updateRecords = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_ns_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - - record { - nsdname = "ns1.contoso.com" - } - - record { - nsdname = "ns2.contoso.com" - } - - record { - nsdname = "ns3.contoso.com" - } -} -` - -var testAccAzureRMDnsNsRecord_withTags = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_ns_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - - record { - nsdname = "ns1.contoso.com" - } - - record { - nsdname = "ns2.contoso.com" - } - - tags { - environment = "Production" - cost_center = "MSFT" - } -} -` - -var testAccAzureRMDnsNsRecord_withTagsUpdate = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_ns_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - record { - nsdname = "ns1.contoso.com" - } - - record { - nsdname = "ns2.contoso.com" - } - - tags { - environment = "staging" - } -} -` diff --git a/builtin/providers/azurerm/resource_arm_dns_srv_record.go b/builtin/providers/azurerm/resource_arm_dns_srv_record.go deleted file mode 100644 index ca7e2bac5..000000000 --- a/builtin/providers/azurerm/resource_arm_dns_srv_record.go +++ /dev/null @@ -1,239 +0,0 @@ -package azurerm - -import ( - "bytes" - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" - "github.com/jen20/riviera/dns" -) - -func resourceArmDnsSrvRecord() *schema.Resource { - return &schema.Resource{ - Create: resourceArmDnsSrvRecordCreate, - Read: resourceArmDnsSrvRecordRead, - Update: resourceArmDnsSrvRecordCreate, - Delete: resourceArmDnsSrvRecordDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "resource_group_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "zone_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "record": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "priority": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "weight": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "target": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - Set: resourceArmDnsSrvRecordHash, - }, - - "ttl": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmDnsSrvRecordCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - tags := d.Get("tags").(map[string]interface{}) - expandedTags := expandTags(tags) - - createCommand := &dns.CreateSRVRecordSet{ - Name: d.Get("name").(string), - Location: "global", - ResourceGroupName: d.Get("resource_group_name").(string), - ZoneName: d.Get("zone_name").(string), - TTL: d.Get("ttl").(int), - Tags: *expandedTags, - } - - srvRecords, recordErr := expandAzureRmDnsSrvRecord(d) - if recordErr != nil { - return fmt.Errorf("Error Building Azure RM SRV Record: %s", recordErr) - } - createCommand.SRVRecords = srvRecords - - createRequest := rivieraClient.NewRequest() - createRequest.Command = createCommand - - createResponse, err := createRequest.Execute() - if err != nil { - return fmt.Errorf("Error creating DNS SRV Record: %s", err) - } - if !createResponse.IsSuccessful() { - return fmt.Errorf("Error creating DNS SRV Record: %s", createResponse.Error) - } - - readRequest := rivieraClient.NewRequest() - readRequest.Command = &dns.GetSRVRecordSet{ - Name: d.Get("name").(string), - ResourceGroupName: d.Get("resource_group_name").(string), - ZoneName: d.Get("zone_name").(string), - } - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading DNS SRV Record: %s", err) - } - if !readResponse.IsSuccessful() { - return fmt.Errorf("Error reading DNS SRV Record: %s", readResponse.Error) - } - - resp := readResponse.Parsed.(*dns.GetSRVRecordSetResponse) - d.SetId(resp.ID) - - return resourceArmDnsSrvRecordRead(d, meta) -} - -func resourceArmDnsSrvRecordRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - - readRequest := rivieraClient.NewRequestForURI(d.Id()) - readRequest.Command = &dns.GetSRVRecordSet{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading DNS SRV Record: %s", err) - } - if !readResponse.IsSuccessful() { - log.Printf("[INFO] Error reading DNS SRV Record %q - removing from state", d.Id()) - d.SetId("") - return fmt.Errorf("Error reading DNS SRV Record: %s", readResponse.Error) - } - - resp := readResponse.Parsed.(*dns.GetSRVRecordSetResponse) - - d.Set("name", resp.Name) - d.Set("resource_group_name", id.ResourceGroup) - d.Set("zone_name", id.Path["dnszones"]) - d.Set("ttl", resp.TTL) - - if err := d.Set("record", flattenAzureRmDnsSrvRecord(resp.SRVRecords)); err != nil { - log.Printf("[INFO] Error setting the Azure RM SRV Record State: %s", err) - return err - } - - flattenAndSetTags(d, &resp.Tags) - - return nil -} - -func resourceArmDnsSrvRecordDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - deleteRequest := rivieraClient.NewRequestForURI(d.Id()) - deleteRequest.Command = &dns.DeleteRecordSet{ - RecordSetType: "SRV", - } - - deleteResponse, err := deleteRequest.Execute() - if err != nil { - return fmt.Errorf("Error deleting DNS SRV Record: %s", err) - } - if !deleteResponse.IsSuccessful() { - return fmt.Errorf("Error deleting DNS SRV Record: %s", deleteResponse.Error) - } - - return nil -} - -func expandAzureRmDnsSrvRecord(d *schema.ResourceData) ([]dns.SRVRecord, error) { - config := d.Get("record").(*schema.Set).List() - records := make([]dns.SRVRecord, 0, len(config)) - - for _, pRaw := range config { - data := pRaw.(map[string]interface{}) - - srvRecord := dns.SRVRecord{ - Priority: data["priority"].(int), - Weight: data["weight"].(int), - Port: data["port"].(int), - Target: data["target"].(string), - } - - records = append(records, srvRecord) - - } - - return records, nil - -} - -func flattenAzureRmDnsSrvRecord(records []dns.SRVRecord) []map[string]interface{} { - - result := make([]map[string]interface{}, 0, len(records)) - for _, record := range records { - result = append(result, map[string]interface{}{ - "priority": record.Priority, - "weight": record.Weight, - "port": record.Port, - "target": record.Target, - }) - } - return result - -} - -func resourceArmDnsSrvRecordHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%d-", m["priority"].(int))) - buf.WriteString(fmt.Sprintf("%d-", m["weight"].(int))) - buf.WriteString(fmt.Sprintf("%d-", m["port"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["target"].(string))) - - return hashcode.String(buf.String()) -} diff --git a/builtin/providers/azurerm/resource_arm_dns_srv_record_test.go b/builtin/providers/azurerm/resource_arm_dns_srv_record_test.go deleted file mode 100644 index 15b30b29b..000000000 --- a/builtin/providers/azurerm/resource_arm_dns_srv_record_test.go +++ /dev/null @@ -1,285 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/jen20/riviera/dns" -) - -func TestAccAzureRMDnsSrvRecord_basic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMDnsSrvRecord_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsSrvRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsSrvRecordExists("azurerm_dns_srv_record.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMDnsSrvRecord_updateRecords(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMDnsSrvRecord_basic, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMDnsSrvRecord_updateRecords, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsSrvRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsSrvRecordExists("azurerm_dns_srv_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_srv_record.test", "record.#", "2"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsSrvRecordExists("azurerm_dns_srv_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_srv_record.test", "record.#", "3"), - ), - }, - }, - }) -} - -func TestAccAzureRMDnsSrvRecord_withTags(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMDnsSrvRecord_withTags, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMDnsSrvRecord_withTagsUpdate, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsSrvRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsSrvRecordExists("azurerm_dns_srv_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_srv_record.test", "tags.%", "2"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsSrvRecordExists("azurerm_dns_srv_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_srv_record.test", "tags.%", "1"), - ), - }, - }, - }) -} - -func testCheckAzureRMDnsSrvRecordExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).rivieraClient - - readRequest := conn.NewRequestForURI(rs.Primary.ID) - readRequest.Command = &dns.GetSRVRecordSet{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Bad: GetSRVRecordSet: %s", err) - } - if !readResponse.IsSuccessful() { - return fmt.Errorf("Bad: GetSRVRecordSet: %s", readResponse.Error) - } - - return nil - } -} - -func testCheckAzureRMDnsSrvRecordDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).rivieraClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_dns_srv_record" { - continue - } - - readRequest := conn.NewRequestForURI(rs.Primary.ID) - readRequest.Command = &dns.GetSRVRecordSet{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Bad: GetSRVRecordSet: %s", err) - } - - if readResponse.IsSuccessful() { - return fmt.Errorf("Bad: DNS SRV Record still exists: %s", readResponse.Error) - } - } - - return nil -} - -var testAccAzureRMDnsSrvRecord_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_srv_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - - record { - priority = 1 - weight = 5 - port = 8080 - target = "target1.contoso.com" - } - - record { - priority = 2 - weight = 25 - port = 8080 - target = "target2.contoso.com" - } -} -` - -var testAccAzureRMDnsSrvRecord_updateRecords = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_srv_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - - record { - priority = 1 - weight = 5 - port = 8080 - target = "target1.contoso.com" - } - - record { - priority = 2 - weight = 25 - port = 8080 - target = "target2.contoso.com" - } - - record { - priority = 3 - weight = 100 - port = 8080 - target = "target3.contoso.com" - } -} -` - -var testAccAzureRMDnsSrvRecord_withTags = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_srv_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - - record { - priority = 1 - weight = 5 - port = 8080 - target = "target1.contoso.com" - } - - record { - priority = 2 - weight = 25 - port = 8080 - target = "target2.contoso.com" - } - - tags { - environment = "Production" - cost_center = "MSFT" - } -} -` - -var testAccAzureRMDnsSrvRecord_withTagsUpdate = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_srv_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - - record { - priority = 1 - weight = 5 - port = 8080 - target = "target1.contoso.com" - } - - record { - priority = 2 - weight = 25 - port = 8080 - target = "target2.contoso.com" - } - - tags { - environment = "staging" - } -} -` diff --git a/builtin/providers/azurerm/resource_arm_dns_txt_record.go b/builtin/providers/azurerm/resource_arm_dns_txt_record.go deleted file mode 100644 index 37f45deee..000000000 --- a/builtin/providers/azurerm/resource_arm_dns_txt_record.go +++ /dev/null @@ -1,205 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/jen20/riviera/dns" -) - -func resourceArmDnsTxtRecord() *schema.Resource { - return &schema.Resource{ - Create: resourceArmDnsTxtRecordCreate, - Read: resourceArmDnsTxtRecordRead, - Update: resourceArmDnsTxtRecordCreate, - Delete: resourceArmDnsTxtRecordDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "resource_group_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "zone_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "record": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "value": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - - "ttl": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmDnsTxtRecordCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - tags := d.Get("tags").(map[string]interface{}) - expandedTags := expandTags(tags) - - createCommand := &dns.CreateTXTRecordSet{ - Name: d.Get("name").(string), - Location: "global", - ResourceGroupName: d.Get("resource_group_name").(string), - ZoneName: d.Get("zone_name").(string), - TTL: d.Get("ttl").(int), - Tags: *expandedTags, - } - - txtRecords, recordErr := expandAzureRmDnsTxtRecords(d) - if recordErr != nil { - return fmt.Errorf("Error Building list of Azure RM Txt Records: %s", recordErr) - } - createCommand.TXTRecords = txtRecords - - createRequest := rivieraClient.NewRequest() - createRequest.Command = createCommand - - createResponse, err := createRequest.Execute() - if err != nil { - return fmt.Errorf("Error creating DNS TXT Record: %s", err) - } - if !createResponse.IsSuccessful() { - return fmt.Errorf("Error creating DNS TXT Record: %s", createResponse.Error) - } - - readRequest := rivieraClient.NewRequest() - readRequest.Command = &dns.GetTXTRecordSet{ - Name: d.Get("name").(string), - ResourceGroupName: d.Get("resource_group_name").(string), - ZoneName: d.Get("zone_name").(string), - } - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading DNS TXT Record: %s", err) - } - if !readResponse.IsSuccessful() { - return fmt.Errorf("Error reading DNS TXT Record: %s", readResponse.Error) - } - - resp := readResponse.Parsed.(*dns.GetTXTRecordSetResponse) - d.SetId(resp.ID) - - return resourceArmDnsTxtRecordRead(d, meta) -} - -func resourceArmDnsTxtRecordRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - - readRequest := rivieraClient.NewRequestForURI(d.Id()) - readRequest.Command = &dns.GetTXTRecordSet{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading DNS TXT Record: %s", err) - } - if !readResponse.IsSuccessful() { - log.Printf("[INFO] Error reading DNS TXT Record %q - removing from state", d.Id()) - d.SetId("") - return fmt.Errorf("Error reading DNS TXT Record: %s", readResponse.Error) - } - - resp := readResponse.Parsed.(*dns.GetTXTRecordSetResponse) - - d.Set("name", resp.Name) - d.Set("resource_group_name", id.ResourceGroup) - d.Set("zone_name", id.Path["dnszones"]) - d.Set("ttl", resp.TTL) - - if resp.TXTRecords != nil { - if err := d.Set("record", flattenAzureRmDnsTxtRecords(resp.TXTRecords)); err != nil { - log.Printf("[INFO] Error setting the Azure RM TXT Record State: %s", err) - return err - } - } - - flattenAndSetTags(d, &resp.Tags) - - return nil -} - -func resourceArmDnsTxtRecordDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - deleteRequest := rivieraClient.NewRequestForURI(d.Id()) - deleteRequest.Command = &dns.DeleteRecordSet{ - RecordSetType: "TXT", - } - - deleteResponse, err := deleteRequest.Execute() - if err != nil { - return fmt.Errorf("Error deleting DNS TXT Record: %s", err) - } - if !deleteResponse.IsSuccessful() { - return fmt.Errorf("Error deleting DNS TXT Record: %s", deleteResponse.Error) - } - - return nil -} - -func expandAzureRmDnsTxtRecords(d *schema.ResourceData) ([]dns.TXTRecord, error) { - configs := d.Get("record").(*schema.Set).List() - txtRecords := make([]dns.TXTRecord, 0, len(configs)) - - for _, configRaw := range configs { - data := configRaw.(map[string]interface{}) - - txtRecord := dns.TXTRecord{ - Value: data["value"].(string), - } - - txtRecords = append(txtRecords, txtRecord) - - } - - return txtRecords, nil - -} - -func flattenAzureRmDnsTxtRecords(records []dns.TXTRecord) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(records)) - for _, record := range records { - txtRecord := make(map[string]interface{}) - txtRecord["value"] = record.Value - - result = append(result, txtRecord) - } - return result -} diff --git a/builtin/providers/azurerm/resource_arm_dns_txt_record_test.go b/builtin/providers/azurerm/resource_arm_dns_txt_record_test.go deleted file mode 100644 index a80385bd5..000000000 --- a/builtin/providers/azurerm/resource_arm_dns_txt_record_test.go +++ /dev/null @@ -1,257 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/jen20/riviera/dns" -) - -func TestAccAzureRMDnsTxtRecord_basic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMDnsTxtRecord_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsTxtRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsTxtRecordExists("azurerm_dns_txt_record.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMDnsTxtRecord_updateRecords(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMDnsTxtRecord_basic, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMDnsTxtRecord_updateRecords, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsTxtRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsTxtRecordExists("azurerm_dns_txt_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_txt_record.test", "record.#", "2"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsTxtRecordExists("azurerm_dns_txt_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_txt_record.test", "record.#", "3"), - ), - }, - }, - }) -} - -func TestAccAzureRMDnsTxtRecord_withTags(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMDnsTxtRecord_withTags, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMDnsTxtRecord_withTagsUpdate, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsTxtRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsTxtRecordExists("azurerm_dns_txt_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_txt_record.test", "tags.%", "2"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsTxtRecordExists("azurerm_dns_txt_record.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_txt_record.test", "tags.%", "1"), - ), - }, - }, - }) -} - -func testCheckAzureRMDnsTxtRecordExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).rivieraClient - - readRequest := conn.NewRequestForURI(rs.Primary.ID) - readRequest.Command = &dns.GetTXTRecordSet{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Bad: GetTXTRecordSet: %s", err) - } - if !readResponse.IsSuccessful() { - return fmt.Errorf("Bad: GetTXTRecordSet: %s", readResponse.Error) - } - - return nil - } -} - -func testCheckAzureRMDnsTxtRecordDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).rivieraClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_dns_txt_record" { - continue - } - - readRequest := conn.NewRequestForURI(rs.Primary.ID) - readRequest.Command = &dns.GetTXTRecordSet{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Bad: GetTXTRecordSet: %s", err) - } - - if readResponse.IsSuccessful() { - return fmt.Errorf("Bad: DNS TXT Record still exists: %s", readResponse.Error) - } - } - - return nil -} - -var testAccAzureRMDnsTxtRecord_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_txt_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - - record { - value = "Quick brown fox" - } - - record { - value = "Another test txt string" - } -} -` - -var testAccAzureRMDnsTxtRecord_updateRecords = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_txt_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - - record { - value = "Quick brown fox" - } - - record { - value = "Another test txt string" - } - - record { - value = "A wild 3rd record appears" - } -} -` - -var testAccAzureRMDnsTxtRecord_withTags = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_txt_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - - record { - value = "Quick brown fox" - } - - record { - value = "Another test txt string" - } - - tags { - environment = "Production" - cost_center = "MSFT" - } -} -` - -var testAccAzureRMDnsTxtRecord_withTagsUpdate = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_dns_txt_record" "test" { - name = "myarecord%d" - resource_group_name = "${azurerm_resource_group.test.name}" - zone_name = "${azurerm_dns_zone.test.name}" - ttl = "300" - record { - value = "Quick brown fox" - } - - record { - value = "Another test txt string" - } - - tags { - environment = "staging" - } -} -` diff --git a/builtin/providers/azurerm/resource_arm_dns_zone.go b/builtin/providers/azurerm/resource_arm_dns_zone.go deleted file mode 100644 index a5d9ed931..000000000 --- a/builtin/providers/azurerm/resource_arm_dns_zone.go +++ /dev/null @@ -1,161 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/jen20/riviera/dns" -) - -func resourceArmDnsZone() *schema.Resource { - return &schema.Resource{ - Create: resourceArmDnsZoneCreate, - Read: resourceArmDnsZoneRead, - Update: resourceArmDnsZoneCreate, - Delete: resourceArmDnsZoneDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "resource_group_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: resourceAzurermResourceGroupNameDiffSuppress, - }, - - "number_of_record_sets": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "max_number_of_record_sets": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "name_servers": &schema.Schema{ - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmDnsZoneCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - tags := d.Get("tags").(map[string]interface{}) - expandedTags := expandTags(tags) - - createRequest := rivieraClient.NewRequest() - createRequest.Command = &dns.CreateDNSZone{ - Name: d.Get("name").(string), - Location: "global", - ResourceGroupName: d.Get("resource_group_name").(string), - Tags: *expandedTags, - } - - createResponse, err := createRequest.Execute() - if err != nil { - return fmt.Errorf("Error creating DNS Zone: %s", err) - } - if !createResponse.IsSuccessful() { - return fmt.Errorf("Error creating DNS Zone: %s", createResponse.Error) - } - - readRequest := rivieraClient.NewRequest() - readRequest.Command = &dns.GetDNSZone{ - Name: d.Get("name").(string), - ResourceGroupName: d.Get("resource_group_name").(string), - } - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading DNS Zone: %s", err) - } - if !readResponse.IsSuccessful() { - return fmt.Errorf("Error reading DNS Zone: %s", readResponse.Error) - } - - resp := readResponse.Parsed.(*dns.GetDNSZoneResponse) - d.SetId(*resp.ID) - - return resourceArmDnsZoneRead(d, meta) -} - -func resourceArmDnsZoneRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - - readRequest := rivieraClient.NewRequestForURI(d.Id()) - readRequest.Command = &dns.GetDNSZone{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading DNS Zone: %s", err) - } - if !readResponse.IsSuccessful() { - log.Printf("[INFO] Error reading DNS Zone %q - removing from state", d.Id()) - d.SetId("") - return fmt.Errorf("Error reading DNS Zone: %s", readResponse.Error) - } - - resp := readResponse.Parsed.(*dns.GetDNSZoneResponse) - - d.Set("resource_group_name", resGroup) - d.Set("number_of_record_sets", resp.NumberOfRecordSets) - d.Set("max_number_of_record_sets", resp.MaxNumberOfRecordSets) - d.Set("name", resp.Name) - - nameServers := make([]string, 0, len(resp.NameServers)) - for _, ns := range resp.NameServers { - nameServers = append(nameServers, *ns) - } - if err := d.Set("name_servers", nameServers); err != nil { - return err - } - - flattenAndSetTags(d, resp.Tags) - - return nil -} - -func resourceArmDnsZoneDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - deleteRequest := rivieraClient.NewRequestForURI(d.Id()) - deleteRequest.Command = &dns.DeleteDNSZone{} - - deleteResponse, err := deleteRequest.Execute() - if err != nil { - return fmt.Errorf("Error deleting DNS Zone: %s", err) - } - if !deleteResponse.IsSuccessful() { - return fmt.Errorf("Error deleting DNS Zone: %s", deleteResponse.Error) - } - - return nil -} diff --git a/builtin/providers/azurerm/resource_arm_dns_zone_test.go b/builtin/providers/azurerm/resource_arm_dns_zone_test.go deleted file mode 100644 index 312573de8..000000000 --- a/builtin/providers/azurerm/resource_arm_dns_zone_test.go +++ /dev/null @@ -1,150 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/jen20/riviera/dns" -) - -func TestAccAzureRMDnsZone_basic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMDnsZone_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsZoneDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsZoneExists("azurerm_dns_zone.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMDnsZone_withTags(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMDnsZone_withTags, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMDnsZone_withTagsUupdate, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMDnsZoneDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsZoneExists("azurerm_dns_zone.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_zone.test", "tags.%", "2"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMDnsZoneExists("azurerm_dns_zone.test"), - resource.TestCheckResourceAttr( - "azurerm_dns_zone.test", "tags.%", "1"), - ), - }, - }, - }) -} - -func testCheckAzureRMDnsZoneExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).rivieraClient - - readRequest := conn.NewRequestForURI(rs.Primary.ID) - readRequest.Command = &dns.GetDNSZone{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Bad: GetDNSZone: %s", err) - } - if !readResponse.IsSuccessful() { - return fmt.Errorf("Bad: GetDNSZone: %s", readResponse.Error) - } - - return nil - } -} - -func testCheckAzureRMDnsZoneDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).rivieraClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_dns_zone" { - continue - } - - readRequest := conn.NewRequestForURI(rs.Primary.ID) - readRequest.Command = &dns.GetDNSZone{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Bad: GetDNSZone: %s", err) - } - - if readResponse.IsSuccessful() { - return fmt.Errorf("Bad: DNS zone still exists: %s", readResponse.Error) - } - } - - return nil -} - -var testAccAzureRMDnsZone_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" -} -` - -var testAccAzureRMDnsZone_withTags = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" - tags { - environment = "Production" - cost_center = "MSFT" - } -} -` - -var testAccAzureRMDnsZone_withTagsUupdate = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_dns_zone" "test" { - name = "acctestzone%d.com" - resource_group_name = "${azurerm_resource_group.test.name}" - tags { - environment = "staging" - } -} -` diff --git a/builtin/providers/azurerm/resource_arm_eventhub.go b/builtin/providers/azurerm/resource_arm_eventhub.go deleted file mode 100644 index 672f8a927..000000000 --- a/builtin/providers/azurerm/resource_arm_eventhub.go +++ /dev/null @@ -1,173 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - - "net/http" - - "github.com/Azure/azure-sdk-for-go/arm/eventhub" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceArmEventHub() *schema.Resource { - return &schema.Resource{ - Create: resourceArmEventHubCreate, - Read: resourceArmEventHubRead, - Update: resourceArmEventHubCreate, - Delete: resourceArmEventHubDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "namespace_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "partition_count": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validateEventHubPartitionCount, - }, - - "message_retention": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validateEventHubMessageRetentionCount, - }, - - "partition_ids": { - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - Computed: true, - }, - }, - } -} - -func resourceArmEventHubCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - eventhubClient := client.eventHubClient - log.Printf("[INFO] preparing arguments for Azure ARM EventHub creation.") - - name := d.Get("name").(string) - location := d.Get("location").(string) - namespaceName := d.Get("namespace_name").(string) - resGroup := d.Get("resource_group_name").(string) - partitionCount := int64(d.Get("partition_count").(int)) - messageRetention := int64(d.Get("message_retention").(int)) - - parameters := eventhub.CreateOrUpdateParameters{ - Location: &location, - Properties: &eventhub.Properties{ - PartitionCount: &partitionCount, - MessageRetentionInDays: &messageRetention, - }, - } - - _, err := eventhubClient.CreateOrUpdate(resGroup, namespaceName, name, parameters) - if err != nil { - return err - } - - read, err := eventhubClient.Get(resGroup, namespaceName, name) - if err != nil { - return err - } - - if read.ID == nil { - return fmt.Errorf("Cannot read EventHub %s (resource group %s) ID", name, resGroup) - } - - d.SetId(*read.ID) - - return resourceArmEventHubRead(d, meta) -} - -func resourceArmEventHubRead(d *schema.ResourceData, meta interface{}) error { - eventhubClient := meta.(*ArmClient).eventHubClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - namespaceName := id.Path["namespaces"] - name := id.Path["eventhubs"] - - resp, err := eventhubClient.Get(resGroup, namespaceName, name) - if err != nil { - return fmt.Errorf("Error making Read request on Azure EventHub %s: %s", name, err) - } - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - - d.Set("name", resp.Name) - d.Set("location", azureRMNormalizeLocation(*resp.Location)) - d.Set("namespace_name", namespaceName) - d.Set("resource_group_name", resGroup) - - d.Set("partition_count", resp.Properties.PartitionCount) - d.Set("message_retention", resp.Properties.MessageRetentionInDays) - d.Set("partition_ids", resp.Properties.PartitionIds) - - return nil -} - -func resourceArmEventHubDelete(d *schema.ResourceData, meta interface{}) error { - eventhubClient := meta.(*ArmClient).eventHubClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - namespaceName := id.Path["namespaces"] - name := id.Path["eventhubs"] - - resp, err := eventhubClient.Delete(resGroup, namespaceName, name) - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("Error issuing Azure ARM delete request of EventHub'%s': %s", name, err) - } - - return nil -} - -func validateEventHubPartitionCount(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - - if !(32 >= value && value >= 2) { - errors = append(errors, fmt.Errorf("EventHub Partition Count has to be between 2 and 32")) - } - return -} - -func validateEventHubMessageRetentionCount(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - - if !(7 >= value && value >= 1) { - errors = append(errors, fmt.Errorf("EventHub Retention Count has to be between 1 and 7")) - } - return -} diff --git a/builtin/providers/azurerm/resource_arm_eventhub_authorization_rule.go b/builtin/providers/azurerm/resource_arm_eventhub_authorization_rule.go deleted file mode 100644 index 2c3e53622..000000000 --- a/builtin/providers/azurerm/resource_arm_eventhub_authorization_rule.go +++ /dev/null @@ -1,246 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - - "net/http" - - "github.com/Azure/azure-sdk-for-go/arm/eventhub" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceArmEventHubAuthorizationRule() *schema.Resource { - return &schema.Resource{ - Create: resourceArmEventHubAuthorizationRuleCreateUpdate, - Read: resourceArmEventHubAuthorizationRuleRead, - Update: resourceArmEventHubAuthorizationRuleCreateUpdate, - Delete: resourceArmEventHubAuthorizationRuleDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "namespace_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "eventhub_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "listen": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "send": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "manage": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "primary_key": { - Type: schema.TypeString, - Computed: true, - }, - - "primary_connection_string": { - Type: schema.TypeString, - Computed: true, - }, - - "secondary_key": { - Type: schema.TypeString, - Computed: true, - }, - - "secondary_connection_string": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceArmEventHubAuthorizationRuleCreateUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).eventHubClient - log.Printf("[INFO] preparing arguments for AzureRM EventHub Authorization Rule creation.") - - name := d.Get("name").(string) - location := d.Get("location").(string) - namespaceName := d.Get("namespace_name").(string) - eventHubName := d.Get("eventhub_name").(string) - resGroup := d.Get("resource_group_name").(string) - - rights, err := expandEventHubAuthorizationRuleAccessRights(d) - if err != nil { - return err - } - - parameters := eventhub.SharedAccessAuthorizationRuleCreateOrUpdateParameters{ - Name: &name, - Location: &location, - SharedAccessAuthorizationRuleProperties: &eventhub.SharedAccessAuthorizationRuleProperties{ - Rights: rights, - }, - } - - _, err = client.CreateOrUpdateAuthorizationRule(resGroup, namespaceName, eventHubName, name, parameters) - if err != nil { - return err - } - - read, err := client.GetAuthorizationRule(resGroup, namespaceName, eventHubName, name) - if err != nil { - return err - } - - if read.ID == nil { - return fmt.Errorf("Cannot read EventHub Authorization Rule %s (resource group %s) ID", name, resGroup) - } - - d.SetId(*read.ID) - - return resourceArmEventHubAuthorizationRuleRead(d, meta) -} - -func resourceArmEventHubAuthorizationRuleRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).eventHubClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - namespaceName := id.Path["namespaces"] - eventHubName := id.Path["eventhubs"] - name := id.Path["authorizationRules"] - - resp, err := client.GetAuthorizationRule(resGroup, namespaceName, eventHubName, name) - if err != nil { - return fmt.Errorf("Error making Read request on Azure EventHub Authorization Rule %s: %+v", name, err) - } - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - - keysResp, err := client.ListKeys(resGroup, namespaceName, eventHubName, name) - if err != nil { - return fmt.Errorf("Error making Read request on Azure EventHub Authorization Rule List Keys %s: %+v", name, err) - } - - d.Set("name", name) - d.Set("location", azureRMNormalizeLocation(*resp.Location)) - d.Set("eventhub_name", eventHubName) - d.Set("namespace_name", namespaceName) - d.Set("resource_group_name", resGroup) - - flattenEventHubAuthorizationRuleAccessRights(d, resp) - - d.Set("primary_key", keysResp.PrimaryKey) - d.Set("primary_connection_string", keysResp.PrimaryConnectionString) - d.Set("secondary_key", keysResp.SecondaryKey) - d.Set("secondary_connection_string", keysResp.SecondaryConnectionString) - - return nil -} - -func resourceArmEventHubAuthorizationRuleDelete(d *schema.ResourceData, meta interface{}) error { - eventhubClient := meta.(*ArmClient).eventHubClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - namespaceName := id.Path["namespaces"] - eventHubName := id.Path["eventhubs"] - name := id.Path["authorizationRules"] - - resp, err := eventhubClient.DeleteAuthorizationRule(resGroup, namespaceName, eventHubName, name) - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("Error issuing Azure ARM delete request of EventHub Authorization Rule '%s': %+v", name, err) - } - - return nil -} - -func expandEventHubAuthorizationRuleAccessRights(d *schema.ResourceData) (*[]eventhub.AccessRights, error) { - canSend := d.Get("send").(bool) - canListen := d.Get("listen").(bool) - canManage := d.Get("manage").(bool) - rights := []eventhub.AccessRights{} - if canListen { - rights = append(rights, eventhub.Listen) - } - - if canSend { - rights = append(rights, eventhub.Send) - } - - if canManage { - rights = append(rights, eventhub.Manage) - } - - if len(rights) == 0 { - return nil, fmt.Errorf("At least one Authorization Rule State must be enabled (e.g. Listen/Manage/Send)") - } - - if canManage && !(canListen && canSend) { - return nil, fmt.Errorf("In order to enable the 'Manage' Authorization Rule - both the 'Listen' and 'Send' rules must be enabled") - } - - return &rights, nil -} - -func flattenEventHubAuthorizationRuleAccessRights(d *schema.ResourceData, resp eventhub.SharedAccessAuthorizationRuleResource) { - - var canListen = false - var canSend = false - var canManage = false - - for _, right := range *resp.Rights { - switch right { - case eventhub.Listen: - canListen = true - case eventhub.Send: - canSend = true - case eventhub.Manage: - canManage = true - default: - log.Printf("[DEBUG] Unknown Authorization Rule Right '%s'", right) - } - } - - d.Set("listen", canListen) - d.Set("send", canSend) - d.Set("manage", canManage) -} diff --git a/builtin/providers/azurerm/resource_arm_eventhub_authorization_rule_test.go b/builtin/providers/azurerm/resource_arm_eventhub_authorization_rule_test.go deleted file mode 100644 index aee455b27..000000000 --- a/builtin/providers/azurerm/resource_arm_eventhub_authorization_rule_test.go +++ /dev/null @@ -1,263 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMEventHubAuthorizationRule_listen(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMEventHubAuthorizationRule_listen, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMEventHubAuthorizationRuleDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMEventHubAuthorizationRuleExists("azurerm_eventhub_authorization_rule.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMEventHubAuthorizationRule_send(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMEventHubAuthorizationRule_send, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMEventHubAuthorizationRuleDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMEventHubAuthorizationRuleExists("azurerm_eventhub_authorization_rule.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMEventHubAuthorizationRule_readwrite(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMEventHubAuthorizationRule_readwrite, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMEventHubAuthorizationRuleDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMEventHubAuthorizationRuleExists("azurerm_eventhub_authorization_rule.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMEventHubAuthorizationRule_manage(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMEventHubAuthorizationRule_manage, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMEventHubAuthorizationRuleDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMEventHubAuthorizationRuleExists("azurerm_eventhub_authorization_rule.test"), - ), - }, - }, - }) -} - -func testCheckAzureRMEventHubAuthorizationRuleDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).eventHubClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_eventhub_authorization_rule" { - continue - } - - name := rs.Primary.Attributes["name"] - namespaceName := rs.Primary.Attributes["namespace_name"] - eventHubName := rs.Primary.Attributes["eventhub_name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.GetAuthorizationRule(resourceGroup, namespaceName, eventHubName, name) - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("EventHub Authorization Rule still exists:\n%#v", resp) - } - } - - return nil -} - -func testCheckAzureRMEventHubAuthorizationRuleExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - namespaceName := rs.Primary.Attributes["namespace_name"] - eventHubName := rs.Primary.Attributes["eventhub_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for Event Hub: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).eventHubClient - resp, err := conn.GetAuthorizationRule(resourceGroup, namespaceName, eventHubName, name) - if err != nil { - return fmt.Errorf("Bad: Get on eventHubClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: Event Hub Authorization Rule %q (eventhub %s, namespace %s / resource group: %s) does not exist", name, eventHubName, namespaceName, resourceGroup) - } - - return nil - } -} - -var testAccAzureRMEventHubAuthorizationRule_listen = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_eventhub_namespace" "test" { - name = "acctesteventhubnamespace-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "Standard" -} -resource "azurerm_eventhub" "test" { - name = "acctesteventhub-%d" - namespace_name = "${azurerm_eventhub_namespace.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - partition_count = 2 - message_retention = 7 -} -resource "azurerm_eventhub_authorization_rule" "test" { - name = "acctesteventhubrule-%d" - namespace_name = "${azurerm_eventhub_namespace.test.name}" - eventhub_name = "${azurerm_eventhub.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - listen = true - send = false - manage = false -}` - -var testAccAzureRMEventHubAuthorizationRule_send = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_eventhub_namespace" "test" { - name = "acctesteventhubnamespace-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "Standard" -} -resource "azurerm_eventhub" "test" { - name = "acctesteventhub-%d" - namespace_name = "${azurerm_eventhub_namespace.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - partition_count = 2 - message_retention = 7 -} -resource "azurerm_eventhub_authorization_rule" "test" { - name = "acctesteventhubrule-%d" - namespace_name = "${azurerm_eventhub_namespace.test.name}" - eventhub_name = "${azurerm_eventhub.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - listen = false - send = true - manage = false -}` - -var testAccAzureRMEventHubAuthorizationRule_readwrite = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_eventhub_namespace" "test" { - name = "acctesteventhubnamespace-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "Standard" -} -resource "azurerm_eventhub" "test" { - name = "acctesteventhub-%d" - namespace_name = "${azurerm_eventhub_namespace.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - partition_count = 2 - message_retention = 7 -} -resource "azurerm_eventhub_authorization_rule" "test" { - name = "acctesteventhubrule-%d" - namespace_name = "${azurerm_eventhub_namespace.test.name}" - eventhub_name = "${azurerm_eventhub.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - listen = true - send = true - manage = false -}` - -var testAccAzureRMEventHubAuthorizationRule_manage = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_eventhub_namespace" "test" { - name = "acctesteventhubnamespace-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "Standard" -} -resource "azurerm_eventhub" "test" { - name = "acctesteventhub-%d" - namespace_name = "${azurerm_eventhub_namespace.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - partition_count = 2 - message_retention = 7 -} -resource "azurerm_eventhub_authorization_rule" "test" { - name = "acctesteventhubrule-%d" - namespace_name = "${azurerm_eventhub_namespace.test.name}" - eventhub_name = "${azurerm_eventhub.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - listen = true - send = true - manage = true -}` diff --git a/builtin/providers/azurerm/resource_arm_eventhub_consumer_group.go b/builtin/providers/azurerm/resource_arm_eventhub_consumer_group.go deleted file mode 100644 index 618ec0db1..000000000 --- a/builtin/providers/azurerm/resource_arm_eventhub_consumer_group.go +++ /dev/null @@ -1,148 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - - "net/http" - - "github.com/Azure/azure-sdk-for-go/arm/eventhub" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceArmEventHubConsumerGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceArmEventHubConsumerGroupCreateUpdate, - Read: resourceArmEventHubConsumerGroupRead, - Update: resourceArmEventHubConsumerGroupCreateUpdate, - Delete: resourceArmEventHubConsumerGroupDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "namespace_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "eventhub_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "user_metadata": { - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -func resourceArmEventHubConsumerGroupCreateUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - eventhubClient := client.eventHubConsumerGroupClient - log.Printf("[INFO] preparing arguments for AzureRM EventHub Consumer Group creation.") - - name := d.Get("name").(string) - location := d.Get("location").(string) - namespaceName := d.Get("namespace_name").(string) - eventHubName := d.Get("eventhub_name").(string) - resGroup := d.Get("resource_group_name").(string) - userMetaData := d.Get("user_metadata").(string) - - parameters := eventhub.ConsumerGroupCreateOrUpdateParameters{ - Name: &name, - Location: &location, - ConsumerGroupProperties: &eventhub.ConsumerGroupProperties{ - UserMetadata: &userMetaData, - }, - } - - _, err := eventhubClient.CreateOrUpdate(resGroup, namespaceName, eventHubName, name, parameters) - if err != nil { - return err - } - - read, err := eventhubClient.Get(resGroup, namespaceName, eventHubName, name) - - if err != nil { - return err - } - - if read.ID == nil { - return fmt.Errorf("Cannot read EventHub Consumer Group %s (resource group %s) ID", name, resGroup) - } - - d.SetId(*read.ID) - - return resourceArmEventHubConsumerGroupRead(d, meta) -} - -func resourceArmEventHubConsumerGroupRead(d *schema.ResourceData, meta interface{}) error { - eventhubClient := meta.(*ArmClient).eventHubConsumerGroupClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - namespaceName := id.Path["namespaces"] - eventHubName := id.Path["eventhubs"] - name := id.Path["consumergroups"] - - resp, err := eventhubClient.Get(resGroup, namespaceName, eventHubName, name) - if err != nil { - return fmt.Errorf("Error making Read request on Azure EventHub Consumer Group %s: %+v", name, err) - } - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - - d.Set("name", name) - d.Set("location", azureRMNormalizeLocation(*resp.Location)) - d.Set("eventhub_name", eventHubName) - d.Set("namespace_name", namespaceName) - d.Set("resource_group_name", resGroup) - d.Set("user_metadata", resp.ConsumerGroupProperties.UserMetadata) - - return nil -} - -func resourceArmEventHubConsumerGroupDelete(d *schema.ResourceData, meta interface{}) error { - eventhubClient := meta.(*ArmClient).eventHubConsumerGroupClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - namespaceName := id.Path["namespaces"] - eventHubName := id.Path["eventhubs"] - name := id.Path["consumergroups"] - - resp, err := eventhubClient.Delete(resGroup, namespaceName, eventHubName, name) - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("Error issuing Azure ARM delete request of EventHub Consumer Group '%s': %+v", name, err) - } - - return nil -} diff --git a/builtin/providers/azurerm/resource_arm_eventhub_consumer_group_test.go b/builtin/providers/azurerm/resource_arm_eventhub_consumer_group_test.go deleted file mode 100644 index 04f3a9503..000000000 --- a/builtin/providers/azurerm/resource_arm_eventhub_consumer_group_test.go +++ /dev/null @@ -1,171 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMEventHubConsumerGroup_basic(t *testing.T) { - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMEventHubConsumerGroup_basic, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMEventHubConsumerGroupDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMEventHubConsumerGroupExists("azurerm_eventhub_consumer_group.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMEventHubConsumerGroup_complete(t *testing.T) { - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMEventHubConsumerGroup_complete, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMEventHubConsumerGroupDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMEventHubConsumerGroupExists("azurerm_eventhub_consumer_group.test"), - ), - }, - }, - }) -} - -func testCheckAzureRMEventHubConsumerGroupDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).eventHubConsumerGroupClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_eventhub_consumer_group" { - continue - } - - name := rs.Primary.Attributes["name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - namespaceName := rs.Primary.Attributes["namespace_name"] - eventHubName := rs.Primary.Attributes["eventhub_name"] - - resp, err := conn.Get(resourceGroup, namespaceName, eventHubName, name) - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("EventHub Consumer Group still exists:\n%#v", resp.ConsumerGroupProperties) - } - } - - return nil -} - -func testCheckAzureRMEventHubConsumerGroupExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for Event Hub Consumer Group: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).eventHubConsumerGroupClient - - namespaceName := rs.Primary.Attributes["namespace_name"] - eventHubName := rs.Primary.Attributes["eventhub_name"] - - resp, err := conn.Get(resourceGroup, namespaceName, eventHubName, name) - if err != nil { - return fmt.Errorf("Bad: Get on eventHubConsumerGroupClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: Event Hub Consumer Group %q (resource group: %q) does not exist", name, resourceGroup) - } - - return nil - } -} - -var testAccAzureRMEventHubConsumerGroup_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_eventhub_namespace" "test" { - name = "acctesteventhubnamespace-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "Standard" -} - -resource "azurerm_eventhub" "test" { - name = "acctesteventhub-%d" - namespace_name = "${azurerm_eventhub_namespace.test.name}" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - partition_count = 2 - message_retention = 7 -} - -resource "azurerm_eventhub_consumer_group" "test" { - name = "acctesteventhubcg-%d" - namespace_name = "${azurerm_eventhub_namespace.test.name}" - eventhub_name = "${azurerm_eventhub.test.name}" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" -} -` - -var testAccAzureRMEventHubConsumerGroup_complete = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_eventhub_namespace" "test" { - name = "acctesteventhubnamespace-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "Standard" -} - -resource "azurerm_eventhub" "test" { - name = "acctesteventhub-%d" - namespace_name = "${azurerm_eventhub_namespace.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - partition_count = 2 - message_retention = 7 -} - -resource "azurerm_eventhub_consumer_group" "test" { - name = "acctesteventhubcg-%d" - namespace_name = "${azurerm_eventhub_namespace.test.name}" - eventhub_name = "${azurerm_eventhub.test.name}" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - user_metadata = "some-meta-data" -} -` diff --git a/builtin/providers/azurerm/resource_arm_eventhub_namespace.go b/builtin/providers/azurerm/resource_arm_eventhub_namespace.go deleted file mode 100644 index aa940dae2..000000000 --- a/builtin/providers/azurerm/resource_arm_eventhub_namespace.go +++ /dev/null @@ -1,214 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "strings" - - "net/http" - - "github.com/Azure/azure-sdk-for-go/arm/eventhub" - "github.com/hashicorp/terraform/helper/schema" -) - -// Default Authorization Rule/Policy created by Azure, used to populate the -// default connection strings and keys -var eventHubNamespaceDefaultAuthorizationRule = "RootManageSharedAccessKey" - -func resourceArmEventHubNamespace() *schema.Resource { - return &schema.Resource{ - Create: resourceArmEventHubNamespaceCreate, - Read: resourceArmEventHubNamespaceRead, - Update: resourceArmEventHubNamespaceCreate, - Delete: resourceArmEventHubNamespaceDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "sku": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEventHubNamespaceSku, - DiffSuppressFunc: ignoreCaseDiffSuppressFunc, - }, - - "capacity": { - Type: schema.TypeInt, - Optional: true, - Default: 1, - ValidateFunc: validateEventHubNamespaceCapacity, - }, - - "default_primary_connection_string": { - Type: schema.TypeString, - Computed: true, - }, - - "default_secondary_connection_string": { - Type: schema.TypeString, - Computed: true, - }, - - "default_primary_key": { - Type: schema.TypeString, - Computed: true, - }, - - "default_secondary_key": { - Type: schema.TypeString, - Computed: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmEventHubNamespaceCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - namespaceClient := client.eventHubNamespacesClient - log.Printf("[INFO] preparing arguments for Azure ARM EventHub Namespace creation.") - - name := d.Get("name").(string) - location := d.Get("location").(string) - resGroup := d.Get("resource_group_name").(string) - sku := d.Get("sku").(string) - capacity := int32(d.Get("capacity").(int)) - tags := d.Get("tags").(map[string]interface{}) - - parameters := eventhub.NamespaceCreateOrUpdateParameters{ - Location: &location, - Sku: &eventhub.Sku{ - Name: eventhub.SkuName(sku), - Tier: eventhub.SkuTier(sku), - Capacity: &capacity, - }, - Tags: expandTags(tags), - } - - _, error := namespaceClient.CreateOrUpdate(resGroup, name, parameters, make(chan struct{})) - err := <-error - if err != nil { - return err - } - - read, err := namespaceClient.Get(resGroup, name) - if err != nil { - return err - } - - if read.ID == nil { - return fmt.Errorf("Cannot read EventHub Namespace %s (resource group %s) ID", name, resGroup) - } - - d.SetId(*read.ID) - - return resourceArmEventHubNamespaceRead(d, meta) -} - -func resourceArmEventHubNamespaceRead(d *schema.ResourceData, meta interface{}) error { - namespaceClient := meta.(*ArmClient).eventHubNamespacesClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["namespaces"] - - resp, err := namespaceClient.Get(resGroup, name) - if err != nil { - return fmt.Errorf("Error making Read request on Azure EventHub Namespace %s: %+v", name, err) - } - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - - d.Set("name", resp.Name) - d.Set("location", azureRMNormalizeLocation(*resp.Location)) - d.Set("resource_group_name", resGroup) - d.Set("sku", string(resp.Sku.Name)) - d.Set("capacity", resp.Sku.Capacity) - - keys, err := namespaceClient.ListKeys(resGroup, name, eventHubNamespaceDefaultAuthorizationRule) - if err != nil { - log.Printf("[ERROR] Unable to List default keys for Namespace %s: %+v", name, err) - } else { - d.Set("default_primary_connection_string", keys.PrimaryConnectionString) - d.Set("default_secondary_connection_string", keys.SecondaryConnectionString) - d.Set("default_primary_key", keys.PrimaryKey) - d.Set("default_secondary_key", keys.SecondaryKey) - } - - flattenAndSetTags(d, resp.Tags) - - return nil -} - -func resourceArmEventHubNamespaceDelete(d *schema.ResourceData, meta interface{}) error { - namespaceClient := meta.(*ArmClient).eventHubNamespacesClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["namespaces"] - - deleteResp, error := namespaceClient.Delete(resGroup, name, make(chan struct{})) - resp := <-deleteResp - err = <-error - - if resp.StatusCode == http.StatusNotFound { - return nil - } - - if err != nil { - return fmt.Errorf("Error issuing Azure ARM delete request of EventHub Namespace '%s': %+v", name, err) - } - - return nil -} - -func validateEventHubNamespaceSku(v interface{}, k string) (ws []string, errors []error) { - value := strings.ToLower(v.(string)) - skus := map[string]bool{ - "basic": true, - "standard": true, - } - - if !skus[value] { - errors = append(errors, fmt.Errorf("EventHub Namespace SKU can only be Basic or Standard")) - } - return -} - -func validateEventHubNamespaceCapacity(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - capacities := map[int]bool{ - 1: true, - 2: true, - 4: true, - } - - if !capacities[value] { - errors = append(errors, fmt.Errorf("EventHub Namespace Capacity can only be 1, 2 or 4")) - } - return -} diff --git a/builtin/providers/azurerm/resource_arm_eventhub_namespace_test.go b/builtin/providers/azurerm/resource_arm_eventhub_namespace_test.go deleted file mode 100644 index 6abfd9784..000000000 --- a/builtin/providers/azurerm/resource_arm_eventhub_namespace_test.go +++ /dev/null @@ -1,268 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMEventHubNamespaceCapacity_validation(t *testing.T) { - cases := []struct { - Value int - ErrCount int - }{ - { - Value: 17, - ErrCount: 1, - }, - { - Value: 1, - ErrCount: 0, - }, - { - Value: 2, - ErrCount: 0, - }, - { - Value: 3, - ErrCount: 1, - }, - { - Value: 4, - ErrCount: 0, - }, - } - - for _, tc := range cases { - _, errors := validateEventHubNamespaceCapacity(tc.Value, "azurerm_eventhub_namespace") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM EventHub Namespace Capacity to trigger a validation error") - } - } -} - -func TestAccAzureRMEventHubNamespaceSku_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "Basic", - ErrCount: 0, - }, - { - Value: "Standard", - ErrCount: 0, - }, - { - Value: "Premium", - ErrCount: 1, - }, - { - Value: "Random", - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateEventHubNamespaceSku(tc.Value, "azurerm_eventhub_namespace") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM EventHub Namespace Sku to trigger a validation error") - } - } -} - -func TestAccAzureRMEventHubNamespace_basic(t *testing.T) { - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMEventHubNamespace_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMEventHubNamespaceDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMEventHubNamespaceExists("azurerm_eventhub_namespace.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMEventHubNamespace_standard(t *testing.T) { - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMEventHubNamespace_standard, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMEventHubNamespaceDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMEventHubNamespaceExists("azurerm_eventhub_namespace.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMEventHubNamespace_readDefaultKeys(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMEventHubNamespace_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMEventHubNamespaceDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMEventHubNamespaceExists("azurerm_eventhub_namespace.test"), - resource.TestMatchResourceAttr( - "azurerm_eventhub_namespace.test", "default_primary_connection_string", regexp.MustCompile("Endpoint=.+")), - resource.TestMatchResourceAttr( - "azurerm_eventhub_namespace.test", "default_secondary_connection_string", regexp.MustCompile("Endpoint=.+")), - resource.TestMatchResourceAttr( - "azurerm_eventhub_namespace.test", "default_primary_key", regexp.MustCompile(".+")), - resource.TestMatchResourceAttr( - "azurerm_eventhub_namespace.test", "default_secondary_key", regexp.MustCompile(".+")), - ), - }, - }, - }) -} - -func TestAccAzureRMEventHubNamespace_NonStandardCasing(t *testing.T) { - - ri := acctest.RandInt() - config := testAccAzureRMEventHubNamespaceNonStandardCasing(ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMEventHubNamespaceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMEventHubNamespaceExists("azurerm_eventhub_namespace.test"), - ), - }, - resource.TestStep{ - Config: config, - PlanOnly: true, - ExpectNonEmptyPlan: false, - }, - }, - }) -} - -func testCheckAzureRMEventHubNamespaceDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).eventHubNamespacesClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_eventhub_namespace" { - continue - } - - name := rs.Primary.Attributes["name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.Get(resourceGroup, name) - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("EventHub Namespace still exists:\n%#v", resp.NamespaceProperties) - } - } - - return nil -} - -func testCheckAzureRMEventHubNamespaceExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - namespaceName := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for Event Hub Namespace: %s", namespaceName) - } - - conn := testAccProvider.Meta().(*ArmClient).eventHubNamespacesClient - - resp, err := conn.Get(resourceGroup, namespaceName) - if err != nil { - return fmt.Errorf("Bad: Get on eventHubNamespacesClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: Event Hub Namespace %q (resource group: %q) does not exist", namespaceName, resourceGroup) - } - - return nil - } -} - -var testAccAzureRMEventHubNamespace_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_eventhub_namespace" "test" { - name = "acctesteventhubnamespace-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "Basic" -} -` - -var testAccAzureRMEventHubNamespace_standard = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_eventhub_namespace" "test" { - name = "acctesteventhubnamespace-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "Standard" - capacity = "2" -} -` - -func testAccAzureRMEventHubNamespaceNonStandardCasing(ri int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_eventhub_namespace" "test" { - name = "acctesteventhubnamespace-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "basic" -} -`, ri, ri) -} diff --git a/builtin/providers/azurerm/resource_arm_eventhub_test.go b/builtin/providers/azurerm/resource_arm_eventhub_test.go deleted file mode 100644 index 773d0ffca..000000000 --- a/builtin/providers/azurerm/resource_arm_eventhub_test.go +++ /dev/null @@ -1,235 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMEventHubPartitionCount_validation(t *testing.T) { - cases := []struct { - Value int - ErrCount int - }{ - { - Value: 1, - ErrCount: 1, - }, - { - Value: 2, - ErrCount: 0, - }, - { - Value: 3, - ErrCount: 0, - }, - { - Value: 21, - ErrCount: 0, - }, - { - Value: 32, - ErrCount: 0, - }, - { - Value: 33, - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateEventHubPartitionCount(tc.Value, "azurerm_eventhub") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM EventHub Partition Count to trigger a validation error") - } - } -} - -func TestAccAzureRMEventHubMessageRetentionCount_validation(t *testing.T) { - cases := []struct { - Value int - ErrCount int - }{ - { - Value: 0, - ErrCount: 1, - }, { - Value: 1, - ErrCount: 0, - }, { - Value: 2, - ErrCount: 0, - }, { - Value: 3, - ErrCount: 0, - }, { - Value: 4, - ErrCount: 0, - }, { - Value: 5, - ErrCount: 0, - }, { - Value: 6, - ErrCount: 0, - }, { - Value: 7, - ErrCount: 0, - }, { - Value: 8, - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateEventHubMessageRetentionCount(tc.Value, "azurerm_eventhub") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM EventHub Message Retention Count to trigger a validation error") - } - } -} - -func TestAccAzureRMEventHub_basic(t *testing.T) { - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMEventHub_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMEventHubDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMEventHubExists("azurerm_eventhub.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMEventHub_standard(t *testing.T) { - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMEventHub_standard, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMEventHubDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMEventHubExists("azurerm_eventhub.test"), - ), - }, - }, - }) -} - -func testCheckAzureRMEventHubDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).eventHubClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_eventhub" { - continue - } - - name := rs.Primary.Attributes["name"] - namespaceName := rs.Primary.Attributes["namespace_name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.Get(resourceGroup, namespaceName, name) - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("EventHub still exists:\n%#v", resp.Properties) - } - } - - return nil -} - -func testCheckAzureRMEventHubExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - namespaceName := rs.Primary.Attributes["namespace_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for Event Hub: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).eventHubClient - - resp, err := conn.Get(resourceGroup, namespaceName, name) - if err != nil { - return fmt.Errorf("Bad: Get on eventHubClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: Event Hub %q (namespace %q / resource group: %q) does not exist", name, namespaceName, resourceGroup) - } - - return nil - } -} - -var testAccAzureRMEventHub_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_eventhub_namespace" "test" { - name = "acctesteventhubnamespace-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "Basic" -} - -resource "azurerm_eventhub" "test" { - name = "acctesteventhub-%d" - namespace_name = "${azurerm_eventhub_namespace.test.name}" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - partition_count = 2 - message_retention = 1 -} -` - -var testAccAzureRMEventHub_standard = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_eventhub_namespace" "test" { - name = "acctesteventhubnamespace-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "Standard" -} - -resource "azurerm_eventhub" "test" { - name = "acctesteventhub-%d" - namespace_name = "${azurerm_eventhub_namespace.test.name}" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - partition_count = 2 - message_retention = 7 -} -` diff --git a/builtin/providers/azurerm/resource_arm_express_route_circuit.go b/builtin/providers/azurerm/resource_arm_express_route_circuit.go deleted file mode 100644 index 2c03cbf03..000000000 --- a/builtin/providers/azurerm/resource_arm_express_route_circuit.go +++ /dev/null @@ -1,242 +0,0 @@ -package azurerm - -import ( - "bytes" - "log" - "strings" - - "fmt" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" -) - -func resourceArmExpressRouteCircuit() *schema.Resource { - return &schema.Resource{ - Create: resourceArmExpressRouteCircuitCreateOrUpdate, - Read: resourceArmExpressRouteCircuitRead, - Update: resourceArmExpressRouteCircuitCreateOrUpdate, - Delete: resourceArmExpressRouteCircuitDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "service_provider_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: ignoreCaseDiffSuppressFunc, - }, - - "peering_location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: ignoreCaseDiffSuppressFunc, - }, - - "bandwidth_in_mbps": { - Type: schema.TypeInt, - Required: true, - }, - - "sku": { - Type: schema.TypeSet, - Required: true, - MinItems: 1, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "tier": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - string(network.ExpressRouteCircuitSkuTierStandard), - string(network.ExpressRouteCircuitSkuTierPremium), - }, true), - DiffSuppressFunc: ignoreCaseDiffSuppressFunc, - }, - - "family": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - string(network.MeteredData), - string(network.UnlimitedData), - }, true), - DiffSuppressFunc: ignoreCaseDiffSuppressFunc, - }, - }, - }, - Set: resourceArmExpressRouteCircuitSkuHash, - }, - - "allow_classic_operations": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "service_provider_provisioning_state": { - Type: schema.TypeString, - Computed: true, - }, - - "service_key": { - Type: schema.TypeString, - Computed: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmExpressRouteCircuitCreateOrUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - ercClient := client.expressRouteCircuitClient - - log.Printf("[INFO] preparing arguments for Azure ARM ExpressRouteCircuit creation.") - - name := d.Get("name").(string) - resGroup := d.Get("resource_group_name").(string) - location := d.Get("location").(string) - serviceProviderName := d.Get("service_provider_name").(string) - peeringLocation := d.Get("peering_location").(string) - bandwidthInMbps := int32(d.Get("bandwidth_in_mbps").(int)) - sku := expandExpressRouteCircuitSku(d) - allowRdfeOps := d.Get("allow_classic_operations").(bool) - tags := d.Get("tags").(map[string]interface{}) - expandedTags := expandTags(tags) - - erc := network.ExpressRouteCircuit{ - Name: &name, - Location: &location, - Sku: sku, - ExpressRouteCircuitPropertiesFormat: &network.ExpressRouteCircuitPropertiesFormat{ - AllowClassicOperations: &allowRdfeOps, - ServiceProviderProperties: &network.ExpressRouteCircuitServiceProviderProperties{ - ServiceProviderName: &serviceProviderName, - PeeringLocation: &peeringLocation, - BandwidthInMbps: &bandwidthInMbps, - }, - }, - Tags: expandedTags, - } - - _, error := ercClient.CreateOrUpdate(resGroup, name, erc, make(chan struct{})) - err := <-error - if err != nil { - return errwrap.Wrapf("Error Creating/Updating ExpressRouteCircuit {{err}}", err) - } - - read, err := ercClient.Get(resGroup, name) - if err != nil { - return errwrap.Wrapf("Error Getting ExpressRouteCircuit {{err}}", err) - } - if read.ID == nil { - return fmt.Errorf("Cannot read ExpressRouteCircuit %s (resource group %s) ID", name, resGroup) - } - - d.SetId(*read.ID) - - return resourceArmExpressRouteCircuitRead(d, meta) -} - -func resourceArmExpressRouteCircuitRead(d *schema.ResourceData, meta interface{}) error { - erc, resGroup, err := retrieveErcByResourceId(d.Id(), meta) - if err != nil { - return err - } - - if erc == nil { - d.SetId("") - log.Printf("[INFO] Express Route Circuit %q not found. Removing from state", d.Get("name").(string)) - return nil - } - - d.Set("name", erc.Name) - d.Set("resource_group_name", resGroup) - d.Set("location", erc.Location) - - if erc.ServiceProviderProperties != nil { - d.Set("service_provider_name", erc.ServiceProviderProperties.ServiceProviderName) - d.Set("peering_location", erc.ServiceProviderProperties.PeeringLocation) - d.Set("bandwidth_in_mbps", erc.ServiceProviderProperties.BandwidthInMbps) - } - - if erc.Sku != nil { - d.Set("sku", schema.NewSet(resourceArmExpressRouteCircuitSkuHash, flattenExpressRouteCircuitSku(erc.Sku))) - } - - d.Set("service_provider_provisioning_state", string(erc.ServiceProviderProvisioningState)) - d.Set("service_key", erc.ServiceKey) - d.Set("allow_classic_operations", erc.AllowClassicOperations) - - flattenAndSetTags(d, erc.Tags) - - return nil -} - -func resourceArmExpressRouteCircuitDelete(d *schema.ResourceData, meta interface{}) error { - ercClient := meta.(*ArmClient).expressRouteCircuitClient - - resGroup, name, err := extractResourceGroupAndErcName(d.Id()) - if err != nil { - return errwrap.Wrapf("Error Parsing Azure Resource ID {{err}}", err) - } - - _, error := ercClient.Delete(resGroup, name, make(chan struct{})) - err = <-error - return err -} - -func expandExpressRouteCircuitSku(d *schema.ResourceData) *network.ExpressRouteCircuitSku { - skuSettings := d.Get("sku").(*schema.Set) - v := skuSettings.List()[0].(map[string]interface{}) // [0] is guarded by MinItems in schema. - tier := v["tier"].(string) - family := v["family"].(string) - name := fmt.Sprintf("%s_%s", tier, family) - - return &network.ExpressRouteCircuitSku{ - Name: &name, - Tier: network.ExpressRouteCircuitSkuTier(tier), - Family: network.ExpressRouteCircuitSkuFamily(family), - } -} - -func flattenExpressRouteCircuitSku(sku *network.ExpressRouteCircuitSku) []interface{} { - return []interface{}{ - map[string]interface{}{ - "tier": string(sku.Tier), - "family": string(sku.Family), - }, - } -} - -func resourceArmExpressRouteCircuitSkuHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["tier"].(string)))) - buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["family"].(string)))) - - return hashcode.String(buf.String()) -} diff --git a/builtin/providers/azurerm/resource_arm_express_route_circuit_test.go b/builtin/providers/azurerm/resource_arm_express_route_circuit_test.go deleted file mode 100644 index 24c6c7fae..000000000 --- a/builtin/providers/azurerm/resource_arm_express_route_circuit_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMExpressRouteCircuit_basic(t *testing.T) { - var erc network.ExpressRouteCircuit - ri := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMExpressRouteCircuitDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMExpressRouteCircuit_basic(ri), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMExpressRouteCircuitExists("azurerm_express_route_circuit.test", &erc), - ), - }, - }, - }) -} - -func testCheckAzureRMExpressRouteCircuitExists(name string, erc *network.ExpressRouteCircuit) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - expressRouteCircuitName := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for Express Route Circuit: %s", expressRouteCircuitName) - } - - conn := testAccProvider.Meta().(*ArmClient).expressRouteCircuitClient - - resp, err := conn.Get(resourceGroup, expressRouteCircuitName) - if err != nil { - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: Express Route Circuit %q (resource group: %q) does not exist", expressRouteCircuitName, resourceGroup) - } - - return fmt.Errorf("Bad: Get on expressRouteCircuitClient: %s", err) - } - - *erc = resp - - return nil - } -} - -func testCheckAzureRMExpressRouteCircuitDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).expressRouteCircuitClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_express_route_circuit" { - continue - } - - name := rs.Primary.Attributes["name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.Get(resourceGroup, name) - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Express Route Circuit still exists:\n%#v", resp.ExpressRouteCircuitPropertiesFormat) - } - } - - return nil -} - -func testAccAzureRMExpressRouteCircuit_basic(rInt int) string { - return fmt.Sprintf(` - resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" - } - - resource "azurerm_express_route_circuit" "test" { - name = "acctest-erc-%[1]d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - service_provider_name = "Equinix" - peering_location = "Silicon Valley" - bandwidth_in_mbps = 50 - sku { - tier = "Standard" - family = "MeteredData" - } - allow_classic_operations = false - - tags { - Environment = "production" - Purpose = "AcceptanceTests" - } - }`, rInt) -} diff --git a/builtin/providers/azurerm/resource_arm_key_vault.go b/builtin/providers/azurerm/resource_arm_key_vault.go deleted file mode 100644 index 2038f9b03..000000000 --- a/builtin/providers/azurerm/resource_arm_key_vault.go +++ /dev/null @@ -1,324 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "net/http" - - "github.com/Azure/azure-sdk-for-go/arm/keyvault" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" - "github.com/satori/uuid" -) - -// As can be seen in the API definition, the Sku Family only supports the value -// `A` and is a required field -// https://github.com/Azure/azure-rest-api-specs/blob/master/arm-keyvault/2015-06-01/swagger/keyvault.json#L239 -var armKeyVaultSkuFamily = "A" - -func resourceArmKeyVault() *schema.Resource { - return &schema.Resource{ - Create: resourceArmKeyVaultCreate, - Read: resourceArmKeyVaultRead, - Update: resourceArmKeyVaultCreate, - Delete: resourceArmKeyVaultDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "sku": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - string(keyvault.Standard), - string(keyvault.Premium), - }, false), - }, - }, - }, - }, - - "vault_uri": { - Type: schema.TypeString, - Computed: true, - }, - - "tenant_id": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateUUID, - }, - - "access_policy": { - Type: schema.TypeList, - Optional: true, - MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "tenant_id": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateUUID, - }, - "object_id": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateUUID, - }, - "key_permissions": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{ - string(keyvault.KeyPermissionsAll), - string(keyvault.KeyPermissionsBackup), - string(keyvault.KeyPermissionsCreate), - string(keyvault.KeyPermissionsDecrypt), - string(keyvault.KeyPermissionsDelete), - string(keyvault.KeyPermissionsEncrypt), - string(keyvault.KeyPermissionsGet), - string(keyvault.KeyPermissionsImport), - string(keyvault.KeyPermissionsList), - string(keyvault.KeyPermissionsRestore), - string(keyvault.KeyPermissionsSign), - string(keyvault.KeyPermissionsUnwrapKey), - string(keyvault.KeyPermissionsUpdate), - string(keyvault.KeyPermissionsVerify), - string(keyvault.KeyPermissionsWrapKey), - }, false), - }, - }, - "secret_permissions": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{ - string(keyvault.SecretPermissionsAll), - string(keyvault.SecretPermissionsDelete), - string(keyvault.SecretPermissionsGet), - string(keyvault.SecretPermissionsList), - string(keyvault.SecretPermissionsSet), - }, false), - }, - }, - }, - }, - }, - - "enabled_for_deployment": { - Type: schema.TypeBool, - Optional: true, - }, - - "enabled_for_disk_encryption": { - Type: schema.TypeBool, - Optional: true, - }, - - "enabled_for_template_deployment": { - Type: schema.TypeBool, - Optional: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmKeyVaultCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).keyVaultClient - log.Printf("[INFO] preparing arguments for Azure ARM KeyVault creation.") - - name := d.Get("name").(string) - location := d.Get("location").(string) - resGroup := d.Get("resource_group_name").(string) - tenantUUID := uuid.FromStringOrNil(d.Get("tenant_id").(string)) - enabledForDeployment := d.Get("enabled_for_deployment").(bool) - enabledForDiskEncryption := d.Get("enabled_for_disk_encryption").(bool) - enabledForTemplateDeployment := d.Get("enabled_for_template_deployment").(bool) - tags := d.Get("tags").(map[string]interface{}) - - parameters := keyvault.VaultCreateOrUpdateParameters{ - Location: &location, - Properties: &keyvault.VaultProperties{ - TenantID: &tenantUUID, - Sku: expandKeyVaultSku(d), - AccessPolicies: expandKeyVaultAccessPolicies(d), - EnabledForDeployment: &enabledForDeployment, - EnabledForDiskEncryption: &enabledForDiskEncryption, - EnabledForTemplateDeployment: &enabledForTemplateDeployment, - }, - Tags: expandTags(tags), - } - - _, err := client.CreateOrUpdate(resGroup, name, parameters) - if err != nil { - return err - } - - read, err := client.Get(resGroup, name) - if err != nil { - return err - } - if read.ID == nil { - return fmt.Errorf("Cannot read KeyVault %s (resource group %s) ID", name, resGroup) - } - - d.SetId(*read.ID) - - return resourceArmKeyVaultRead(d, meta) -} - -func resourceArmKeyVaultRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).keyVaultClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["vaults"] - - resp, err := client.Get(resGroup, name) - if err != nil { - return fmt.Errorf("Error making Read request on Azure KeyVault %s: %s", name, err) - } - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - - d.Set("name", resp.Name) - d.Set("resource_group_name", resGroup) - d.Set("location", azureRMNormalizeLocation(*resp.Location)) - d.Set("tenant_id", resp.Properties.TenantID.String()) - d.Set("enabled_for_deployment", resp.Properties.EnabledForDeployment) - d.Set("enabled_for_disk_encryption", resp.Properties.EnabledForDiskEncryption) - d.Set("enabled_for_template_deployment", resp.Properties.EnabledForTemplateDeployment) - d.Set("sku", flattenKeyVaultSku(resp.Properties.Sku)) - d.Set("access_policy", flattenKeyVaultAccessPolicies(resp.Properties.AccessPolicies)) - d.Set("vault_uri", resp.Properties.VaultURI) - - flattenAndSetTags(d, resp.Tags) - - return nil -} - -func resourceArmKeyVaultDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).keyVaultClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["vaults"] - - _, err = client.Delete(resGroup, name) - - return err -} - -func expandKeyVaultSku(d *schema.ResourceData) *keyvault.Sku { - skuSets := d.Get("sku").(*schema.Set).List() - sku := skuSets[0].(map[string]interface{}) - - return &keyvault.Sku{ - Family: &armKeyVaultSkuFamily, - Name: keyvault.SkuName(sku["name"].(string)), - } -} - -func expandKeyVaultAccessPolicies(d *schema.ResourceData) *[]keyvault.AccessPolicyEntry { - policies := d.Get("access_policy").([]interface{}) - result := make([]keyvault.AccessPolicyEntry, 0, len(policies)) - - for _, policySet := range policies { - policyRaw := policySet.(map[string]interface{}) - - keyPermissionsRaw := policyRaw["key_permissions"].([]interface{}) - keyPermissions := []keyvault.KeyPermissions{} - for _, permission := range keyPermissionsRaw { - keyPermissions = append(keyPermissions, keyvault.KeyPermissions(permission.(string))) - } - - secretPermissionsRaw := policyRaw["secret_permissions"].([]interface{}) - secretPermissions := []keyvault.SecretPermissions{} - for _, permission := range secretPermissionsRaw { - secretPermissions = append(secretPermissions, keyvault.SecretPermissions(permission.(string))) - } - - policy := keyvault.AccessPolicyEntry{ - Permissions: &keyvault.Permissions{ - Keys: &keyPermissions, - Secrets: &secretPermissions, - }, - } - - tenantUUID := uuid.FromStringOrNil(policyRaw["tenant_id"].(string)) - policy.TenantID = &tenantUUID - objectUUID := policyRaw["object_id"].(string) - policy.ObjectID = &objectUUID - - result = append(result, policy) - } - - return &result -} - -func flattenKeyVaultSku(sku *keyvault.Sku) []interface{} { - result := map[string]interface{}{ - "name": string(sku.Name), - } - - return []interface{}{result} -} - -func flattenKeyVaultAccessPolicies(policies *[]keyvault.AccessPolicyEntry) []interface{} { - result := make([]interface{}, 0, len(*policies)) - - for _, policy := range *policies { - policyRaw := make(map[string]interface{}) - - keyPermissionsRaw := make([]interface{}, 0, len(*policy.Permissions.Keys)) - for _, keyPermission := range *policy.Permissions.Keys { - keyPermissionsRaw = append(keyPermissionsRaw, string(keyPermission)) - } - - secretPermissionsRaw := make([]interface{}, 0, len(*policy.Permissions.Secrets)) - for _, secretPermission := range *policy.Permissions.Secrets { - secretPermissionsRaw = append(secretPermissionsRaw, string(secretPermission)) - } - - policyRaw["tenant_id"] = policy.TenantID.String() - policyRaw["object_id"] = policy.ObjectID - policyRaw["key_permissions"] = keyPermissionsRaw - policyRaw["secret_permissions"] = secretPermissionsRaw - - result = append(result, policyRaw) - } - - return result -} diff --git a/builtin/providers/azurerm/resource_arm_key_vault_test.go b/builtin/providers/azurerm/resource_arm_key_vault_test.go deleted file mode 100644 index 9b9bb8733..000000000 --- a/builtin/providers/azurerm/resource_arm_key_vault_test.go +++ /dev/null @@ -1,198 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMKeyVault_basic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMKeyVault_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKeyVaultDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKeyVaultExists("azurerm_key_vault.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMKeyVault_update(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMKeyVault_basic, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMKeyVault_update, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMKeyVaultDestroy, - Steps: []resource.TestStep{ - { - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKeyVaultExists("azurerm_key_vault.test"), - resource.TestCheckResourceAttr("azurerm_key_vault.test", "access_policy.0.key_permissions.0", "all"), - resource.TestCheckResourceAttr("azurerm_key_vault.test", "access_policy.0.secret_permissions.0", "all"), - resource.TestCheckResourceAttr("azurerm_key_vault.test", "tags.environment", "Production"), - ), - }, - { - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("azurerm_key_vault.test", "access_policy.0.key_permissions.0", "get"), - resource.TestCheckResourceAttr("azurerm_key_vault.test", "access_policy.0.secret_permissions.0", "get"), - resource.TestCheckResourceAttr("azurerm_key_vault.test", "enabled_for_deployment", "true"), - resource.TestCheckResourceAttr("azurerm_key_vault.test", "enabled_for_disk_encryption", "true"), - resource.TestCheckResourceAttr("azurerm_key_vault.test", "enabled_for_template_deployment", "true"), - resource.TestCheckResourceAttr("azurerm_key_vault.test", "tags.environment", "Staging"), - ), - }, - }, - }) -} - -func testCheckAzureRMKeyVaultDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*ArmClient).keyVaultClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_key_vault" { - continue - } - - name := rs.Primary.Attributes["name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := client.Get(resourceGroup, name) - if err != nil { - if resp.StatusCode == http.StatusNotFound { - return nil - } - return err - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Key Vault still exists:\n%#v", resp.Properties) - } - } - - return nil -} - -func testCheckAzureRMKeyVaultExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - vaultName := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for vault: %s", vaultName) - } - - client := testAccProvider.Meta().(*ArmClient).keyVaultClient - - resp, err := client.Get(resourceGroup, vaultName) - if err != nil { - return fmt.Errorf("Bad: Get on keyVaultClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: Vault %q (resource group: %q) does not exist", vaultName, resourceGroup) - } - - return nil - } -} - -var testAccAzureRMKeyVault_basic = ` -data "azurerm_client_config" "current" {} - -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_key_vault" "test" { - name = "vault%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - tenant_id = "${data.azurerm_client_config.current.tenant_id}" - - sku { - name = "premium" - } - - access_policy { - tenant_id = "${data.azurerm_client_config.current.tenant_id}" - object_id = "${data.azurerm_client_config.current.client_id}" - - key_permissions = [ - "all" - ] - - secret_permissions = [ - "all" - ] - } - - tags { - environment = "Production" - } -} -` - -var testAccAzureRMKeyVault_update = ` -data "azurerm_client_config" "current" {} - -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_key_vault" "test" { - name = "vault%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - tenant_id = "${data.azurerm_client_config.current.tenant_id}" - - sku { - name = "premium" - } - - access_policy { - tenant_id = "${data.azurerm_client_config.current.tenant_id}" - object_id = "${data.azurerm_client_config.current.client_id}" - - key_permissions = [ - "get" - ] - - secret_permissions = [ - "get" - ] - } - - enabled_for_deployment = true - enabled_for_disk_encryption = true - enabled_for_template_deployment = true - - tags { - environment = "Staging" - } -} -` diff --git a/builtin/providers/azurerm/resource_arm_loadbalancer.go b/builtin/providers/azurerm/resource_arm_loadbalancer.go deleted file mode 100644 index 6b9e4aacc..000000000 --- a/builtin/providers/azurerm/resource_arm_loadbalancer.go +++ /dev/null @@ -1,301 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "time" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/jen20/riviera/azure" -) - -func resourceArmLoadBalancer() *schema.Resource { - return &schema.Resource{ - Create: resourceArmLoadBalancerCreate, - Read: resourecArmLoadBalancerRead, - Update: resourceArmLoadBalancerCreate, - Delete: resourceArmLoadBalancerDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "frontend_ip_configuration": { - Type: schema.TypeList, - Optional: true, - MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - - "subnet_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "private_ip_address": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "public_ip_address_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "private_ip_address_allocation": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validateLoadBalancerPrivateIpAddressAllocation, - StateFunc: ignoreCaseStateFunc, - DiffSuppressFunc: ignoreCaseDiffSuppressFunc, - }, - - "load_balancer_rules": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "inbound_nat_rules": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - }, - }, - - "private_ip_address": { - Type: schema.TypeString, - Computed: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmLoadBalancerCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - loadBalancerClient := client.loadBalancerClient - - log.Printf("[INFO] preparing arguments for Azure ARM LoadBalancer creation.") - - name := d.Get("name").(string) - location := d.Get("location").(string) - resGroup := d.Get("resource_group_name").(string) - tags := d.Get("tags").(map[string]interface{}) - expandedTags := expandTags(tags) - - properties := network.LoadBalancerPropertiesFormat{} - - if _, ok := d.GetOk("frontend_ip_configuration"); ok { - properties.FrontendIPConfigurations = expandAzureRmLoadBalancerFrontendIpConfigurations(d) - } - - loadbalancer := network.LoadBalancer{ - Name: azure.String(name), - Location: azure.String(location), - Tags: expandedTags, - LoadBalancerPropertiesFormat: &properties, - } - - _, error := loadBalancerClient.CreateOrUpdate(resGroup, name, loadbalancer, make(chan struct{})) - err := <-error - if err != nil { - return errwrap.Wrapf("Error Creating/Updating LoadBalancer {{err}}", err) - } - - read, err := loadBalancerClient.Get(resGroup, name, "") - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer {{err}", err) - } - if read.ID == nil { - return fmt.Errorf("Cannot read LoadBalancer %s (resource group %s) ID", name, resGroup) - } - - d.SetId(*read.ID) - - log.Printf("[DEBUG] Waiting for LoadBalancer (%s) to become available", name) - stateConf := &resource.StateChangeConf{ - Pending: []string{"Accepted", "Updating"}, - Target: []string{"Succeeded"}, - Refresh: loadbalancerStateRefreshFunc(client, resGroup, name), - Timeout: 10 * time.Minute, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for LoadBalancer (%s) to become available: %s", name, err) - } - - return resourecArmLoadBalancerRead(d, meta) -} - -func resourecArmLoadBalancerRead(d *schema.ResourceData, meta interface{}) error { - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - - loadBalancer, exists, err := retrieveLoadBalancerById(d.Id(), meta) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err) - } - if !exists { - d.SetId("") - log.Printf("[INFO] LoadBalancer %q not found. Removing from state", d.Get("name").(string)) - return nil - } - - d.Set("name", loadBalancer.Name) - d.Set("location", loadBalancer.Location) - d.Set("resource_group_name", id.ResourceGroup) - - if loadBalancer.LoadBalancerPropertiesFormat != nil && loadBalancer.LoadBalancerPropertiesFormat.FrontendIPConfigurations != nil { - ipconfigs := loadBalancer.LoadBalancerPropertiesFormat.FrontendIPConfigurations - d.Set("frontend_ip_configuration", flattenLoadBalancerFrontendIpConfiguration(ipconfigs)) - - for _, config := range *ipconfigs { - if config.FrontendIPConfigurationPropertiesFormat.PrivateIPAddress != nil { - d.Set("private_ip_address", config.FrontendIPConfigurationPropertiesFormat.PrivateIPAddress) - - // set the private IP address at most once - break - } - } - } - - flattenAndSetTags(d, loadBalancer.Tags) - - return nil -} - -func resourceArmLoadBalancerDelete(d *schema.ResourceData, meta interface{}) error { - loadBalancerClient := meta.(*ArmClient).loadBalancerClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return errwrap.Wrapf("Error Parsing Azure Resource ID {{err}}", err) - } - resGroup := id.ResourceGroup - name := id.Path["loadBalancers"] - - _, error := loadBalancerClient.Delete(resGroup, name, make(chan struct{})) - err = <-error - if err != nil { - return errwrap.Wrapf("Error Deleting LoadBalancer {{err}}", err) - } - - d.SetId("") - return nil -} - -func expandAzureRmLoadBalancerFrontendIpConfigurations(d *schema.ResourceData) *[]network.FrontendIPConfiguration { - configs := d.Get("frontend_ip_configuration").([]interface{}) - frontEndConfigs := make([]network.FrontendIPConfiguration, 0, len(configs)) - - for _, configRaw := range configs { - data := configRaw.(map[string]interface{}) - - private_ip_allocation_method := data["private_ip_address_allocation"].(string) - properties := network.FrontendIPConfigurationPropertiesFormat{ - PrivateIPAllocationMethod: network.IPAllocationMethod(private_ip_allocation_method), - } - - if v := data["private_ip_address"].(string); v != "" { - properties.PrivateIPAddress = &v - } - - if v := data["public_ip_address_id"].(string); v != "" { - properties.PublicIPAddress = &network.PublicIPAddress{ - ID: &v, - } - } - - if v := data["subnet_id"].(string); v != "" { - properties.Subnet = &network.Subnet{ - ID: &v, - } - } - - name := data["name"].(string) - frontEndConfig := network.FrontendIPConfiguration{ - Name: &name, - FrontendIPConfigurationPropertiesFormat: &properties, - } - - frontEndConfigs = append(frontEndConfigs, frontEndConfig) - } - - return &frontEndConfigs -} - -func flattenLoadBalancerFrontendIpConfiguration(ipConfigs *[]network.FrontendIPConfiguration) []interface{} { - result := make([]interface{}, 0, len(*ipConfigs)) - for _, config := range *ipConfigs { - ipConfig := make(map[string]interface{}) - ipConfig["name"] = *config.Name - ipConfig["private_ip_address_allocation"] = config.FrontendIPConfigurationPropertiesFormat.PrivateIPAllocationMethod - - if config.FrontendIPConfigurationPropertiesFormat.Subnet != nil { - ipConfig["subnet_id"] = *config.FrontendIPConfigurationPropertiesFormat.Subnet.ID - } - - if config.FrontendIPConfigurationPropertiesFormat.PrivateIPAddress != nil { - ipConfig["private_ip_address"] = *config.FrontendIPConfigurationPropertiesFormat.PrivateIPAddress - } - - if config.FrontendIPConfigurationPropertiesFormat.PublicIPAddress != nil { - ipConfig["public_ip_address_id"] = *config.FrontendIPConfigurationPropertiesFormat.PublicIPAddress.ID - } - - if config.FrontendIPConfigurationPropertiesFormat.LoadBalancingRules != nil { - load_balancing_rules := make([]string, 0, len(*config.FrontendIPConfigurationPropertiesFormat.LoadBalancingRules)) - for _, rule := range *config.FrontendIPConfigurationPropertiesFormat.LoadBalancingRules { - load_balancing_rules = append(load_balancing_rules, *rule.ID) - } - - ipConfig["load_balancer_rules"] = load_balancing_rules - - } - - if config.FrontendIPConfigurationPropertiesFormat.InboundNatRules != nil { - inbound_nat_rules := make([]string, 0, len(*config.FrontendIPConfigurationPropertiesFormat.InboundNatRules)) - for _, rule := range *config.FrontendIPConfigurationPropertiesFormat.InboundNatRules { - inbound_nat_rules = append(inbound_nat_rules, *rule.ID) - } - - ipConfig["inbound_nat_rules"] = inbound_nat_rules - - } - - result = append(result, ipConfig) - } - return result -} diff --git a/builtin/providers/azurerm/resource_arm_loadbalancer_backend_address_pool.go b/builtin/providers/azurerm/resource_arm_loadbalancer_backend_address_pool.go deleted file mode 100644 index 465860f84..000000000 --- a/builtin/providers/azurerm/resource_arm_loadbalancer_backend_address_pool.go +++ /dev/null @@ -1,235 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "time" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/jen20/riviera/azure" -) - -func resourceArmLoadBalancerBackendAddressPool() *schema.Resource { - return &schema.Resource{ - Create: resourceArmLoadBalancerBackendAddressPoolCreate, - Read: resourceArmLoadBalancerBackendAddressPoolRead, - Delete: resourceArmLoadBalancerBackendAddressPoolDelete, - Importer: &schema.ResourceImporter{ - State: loadBalancerSubResourceStateImporter, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": deprecatedLocationSchema(), - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "loadbalancer_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "backend_ip_configurations": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "load_balancing_rules": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - } -} - -func resourceArmLoadBalancerBackendAddressPoolCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - lbClient := client.loadBalancerClient - - loadBalancerID := d.Get("loadbalancer_id").(string) - armMutexKV.Lock(loadBalancerID) - defer armMutexKV.Unlock(loadBalancerID) - - loadBalancer, exists, err := retrieveLoadBalancerById(loadBalancerID, meta) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err) - } - if !exists { - d.SetId("") - log.Printf("[INFO] LoadBalancer %q not found. Removing from state", d.Get("name").(string)) - return nil - } - - backendAddressPools := append(*loadBalancer.LoadBalancerPropertiesFormat.BackendAddressPools, expandAzureRmLoadBalancerBackendAddressPools(d)) - existingPool, existingPoolIndex, exists := findLoadBalancerBackEndAddressPoolByName(loadBalancer, d.Get("name").(string)) - if exists { - if d.Get("name").(string) == *existingPool.Name { - // this pool is being updated/reapplied remove old copy from the slice - backendAddressPools = append(backendAddressPools[:existingPoolIndex], backendAddressPools[existingPoolIndex+1:]...) - } - } - - loadBalancer.LoadBalancerPropertiesFormat.BackendAddressPools = &backendAddressPools - resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string)) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer Name and Group: {{err}}", err) - } - - _, error := lbClient.CreateOrUpdate(resGroup, loadBalancerName, *loadBalancer, make(chan struct{})) - err = <-error - if err != nil { - return errwrap.Wrapf("Error Creating/Updating LoadBalancer {{err}}", err) - } - - read, err := lbClient.Get(resGroup, loadBalancerName, "") - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer {{err}}", err) - } - if read.ID == nil { - return fmt.Errorf("Cannot read LoadBalancer %s (resource group %s) ID", loadBalancerName, resGroup) - } - - var pool_id string - for _, BackendAddressPool := range *(*read.LoadBalancerPropertiesFormat).BackendAddressPools { - if *BackendAddressPool.Name == d.Get("name").(string) { - pool_id = *BackendAddressPool.ID - } - } - - if pool_id != "" { - d.SetId(pool_id) - } else { - return fmt.Errorf("Cannot find created LoadBalancer Backend Address Pool ID %q", pool_id) - } - - log.Printf("[DEBUG] Waiting for LoadBalancer (%s) to become available", loadBalancerName) - stateConf := &resource.StateChangeConf{ - Pending: []string{"Accepted", "Updating"}, - Target: []string{"Succeeded"}, - Refresh: loadbalancerStateRefreshFunc(client, resGroup, loadBalancerName), - Timeout: 10 * time.Minute, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for LoadBalancer (%s) to become available: %s", loadBalancerName, err) - } - - return resourceArmLoadBalancerBackendAddressPoolRead(d, meta) -} - -func resourceArmLoadBalancerBackendAddressPoolRead(d *schema.ResourceData, meta interface{}) error { - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - name := id.Path["backendAddressPools"] - - loadBalancer, exists, err := retrieveLoadBalancerById(d.Get("loadbalancer_id").(string), meta) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err) - } - if !exists { - d.SetId("") - log.Printf("[INFO] LoadBalancer %q not found. Removing from state", name) - return nil - } - - config, _, exists := findLoadBalancerBackEndAddressPoolByName(loadBalancer, name) - if !exists { - d.SetId("") - log.Printf("[INFO] LoadBalancer Backend Address Pool %q not found. Removing from state", name) - return nil - } - - d.Set("name", config.Name) - d.Set("resource_group_name", id.ResourceGroup) - - var backend_ip_configurations []string - if config.BackendAddressPoolPropertiesFormat.BackendIPConfigurations != nil { - for _, backendConfig := range *config.BackendAddressPoolPropertiesFormat.BackendIPConfigurations { - backend_ip_configurations = append(backend_ip_configurations, *backendConfig.ID) - } - - } - d.Set("backend_ip_configurations", backend_ip_configurations) - - var load_balancing_rules []string - if config.BackendAddressPoolPropertiesFormat.LoadBalancingRules != nil { - for _, rule := range *config.BackendAddressPoolPropertiesFormat.LoadBalancingRules { - load_balancing_rules = append(load_balancing_rules, *rule.ID) - } - } - d.Set("load_balancing_rules", load_balancing_rules) - - return nil -} - -func resourceArmLoadBalancerBackendAddressPoolDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - lbClient := client.loadBalancerClient - - loadBalancerID := d.Get("loadbalancer_id").(string) - armMutexKV.Lock(loadBalancerID) - defer armMutexKV.Unlock(loadBalancerID) - - loadBalancer, exists, err := retrieveLoadBalancerById(loadBalancerID, meta) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err) - } - if !exists { - d.SetId("") - return nil - } - - _, index, exists := findLoadBalancerBackEndAddressPoolByName(loadBalancer, d.Get("name").(string)) - if !exists { - return nil - } - - oldBackEndPools := *loadBalancer.LoadBalancerPropertiesFormat.BackendAddressPools - newBackEndPools := append(oldBackEndPools[:index], oldBackEndPools[index+1:]...) - loadBalancer.LoadBalancerPropertiesFormat.BackendAddressPools = &newBackEndPools - - resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string)) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer Name and Group: {{err}}", err) - } - - _, error := lbClient.CreateOrUpdate(resGroup, loadBalancerName, *loadBalancer, make(chan struct{})) - err = <-error - if err != nil { - return errwrap.Wrapf("Error Creating/Updating LoadBalancer {{err}}", err) - } - - read, err := lbClient.Get(resGroup, loadBalancerName, "") - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer {{err}}", err) - } - if read.ID == nil { - return fmt.Errorf("Cannot read LoadBalancer %s (resource group %s) ID", loadBalancerName, resGroup) - } - - return nil -} - -func expandAzureRmLoadBalancerBackendAddressPools(d *schema.ResourceData) network.BackendAddressPool { - return network.BackendAddressPool{ - Name: azure.String(d.Get("name").(string)), - } -} diff --git a/builtin/providers/azurerm/resource_arm_loadbalancer_backend_address_pool_test.go b/builtin/providers/azurerm/resource_arm_loadbalancer_backend_address_pool_test.go deleted file mode 100644 index 14a52135f..000000000 --- a/builtin/providers/azurerm/resource_arm_loadbalancer_backend_address_pool_test.go +++ /dev/null @@ -1,231 +0,0 @@ -package azurerm - -import ( - "fmt" - "os" - "testing" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMLoadBalancerBackEndAddressPool_basic(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - addressPoolName := fmt.Sprintf("%d-address-pool", ri) - - subscriptionID := os.Getenv("ARM_SUBSCRIPTION_ID") - backendAddressPool_id := fmt.Sprintf( - "/subscriptions/%s/resourceGroups/acctestrg-%d/providers/Microsoft.Network/loadBalancers/arm-test-loadbalancer-%d/backendAddressPools/%s", - subscriptionID, ri, ri, addressPoolName) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerBackEndAddressPool_basic(ri, addressPoolName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerBackEndAddressPoolExists(addressPoolName, &lb), - resource.TestCheckResourceAttr( - "azurerm_lb_backend_address_pool.test", "id", backendAddressPool_id), - ), - }, - }, - }) -} - -func TestAccAzureRMLoadBalancerBackEndAddressPool_removal(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - addressPoolName := fmt.Sprintf("%d-address-pool", ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerBackEndAddressPool_removal(ri), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerBackEndAddressPoolNotExists(addressPoolName, &lb), - ), - }, - }, - }) -} - -func TestAccAzureRMLoadBalancerBackEndAddressPool_reapply(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - addressPoolName := fmt.Sprintf("%d-address-pool", ri) - - deleteAddressPoolState := func(s *terraform.State) error { - return s.Remove("azurerm_lb_backend_address_pool.test") - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerBackEndAddressPool_basic(ri, addressPoolName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerBackEndAddressPoolExists(addressPoolName, &lb), - deleteAddressPoolState, - ), - ExpectNonEmptyPlan: true, - }, - { - Config: testAccAzureRMLoadBalancerBackEndAddressPool_basic(ri, addressPoolName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerBackEndAddressPoolExists(addressPoolName, &lb), - ), - }, - }, - }) -} - -func TestAccAzureRMLoadBalancerBackEndAddressPool_disappears(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - addressPoolName := fmt.Sprintf("%d-address-pool", ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerBackEndAddressPool_basic(ri, addressPoolName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerBackEndAddressPoolExists(addressPoolName, &lb), - testCheckAzureRMLoadBalancerBackEndAddressPoolDisappears(addressPoolName, &lb), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testCheckAzureRMLoadBalancerBackEndAddressPoolExists(addressPoolName string, lb *network.LoadBalancer) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, _, exists := findLoadBalancerBackEndAddressPoolByName(lb, addressPoolName) - if !exists { - return fmt.Errorf("A BackEnd Address Pool with name %q cannot be found.", addressPoolName) - } - - return nil - } -} - -func testCheckAzureRMLoadBalancerBackEndAddressPoolNotExists(addressPoolName string, lb *network.LoadBalancer) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, _, exists := findLoadBalancerBackEndAddressPoolByName(lb, addressPoolName) - if exists { - return fmt.Errorf("A BackEnd Address Pool with name %q has been found.", addressPoolName) - } - - return nil - } -} - -func testCheckAzureRMLoadBalancerBackEndAddressPoolDisappears(addressPoolName string, lb *network.LoadBalancer) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).loadBalancerClient - - _, i, exists := findLoadBalancerBackEndAddressPoolByName(lb, addressPoolName) - if !exists { - return fmt.Errorf("A BackEnd Address Pool with name %q cannot be found.", addressPoolName) - } - - currentPools := *lb.LoadBalancerPropertiesFormat.BackendAddressPools - pools := append(currentPools[:i], currentPools[i+1:]...) - lb.LoadBalancerPropertiesFormat.BackendAddressPools = &pools - - id, err := parseAzureResourceID(*lb.ID) - if err != nil { - return err - } - - _, error := conn.CreateOrUpdate(id.ResourceGroup, *lb.Name, *lb, make(chan struct{})) - err = <-error - if err != nil { - return fmt.Errorf("Error Creating/Updating LoadBalancer %s", err) - } - - _, err = conn.Get(id.ResourceGroup, *lb.Name, "") - return err - } -} - -func testAccAzureRMLoadBalancerBackEndAddressPool_basic(rInt int, addressPoolName string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_public_ip" "test" { - name = "test-ip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "one-%d" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } -} - -resource "azurerm_lb_backend_address_pool" "test" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - name = "%s" -} - -`, rInt, rInt, rInt, rInt, addressPoolName) -} - -func testAccAzureRMLoadBalancerBackEndAddressPool_removal(rInt int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_public_ip" "test" { - name = "test-ip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "one-%d" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } -} -`, rInt, rInt, rInt, rInt) -} diff --git a/builtin/providers/azurerm/resource_arm_loadbalancer_nat_pool.go b/builtin/providers/azurerm/resource_arm_loadbalancer_nat_pool.go deleted file mode 100644 index 59356ac91..000000000 --- a/builtin/providers/azurerm/resource_arm_loadbalancer_nat_pool.go +++ /dev/null @@ -1,281 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "time" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/jen20/riviera/azure" -) - -func resourceArmLoadBalancerNatPool() *schema.Resource { - return &schema.Resource{ - Create: resourceArmLoadBalancerNatPoolCreate, - Read: resourceArmLoadBalancerNatPoolRead, - Update: resourceArmLoadBalancerNatPoolCreate, - Delete: resourceArmLoadBalancerNatPoolDelete, - Importer: &schema.ResourceImporter{ - State: loadBalancerSubResourceStateImporter, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": deprecatedLocationSchema(), - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "loadbalancer_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "protocol": { - Type: schema.TypeString, - Required: true, - StateFunc: ignoreCaseStateFunc, - DiffSuppressFunc: ignoreCaseDiffSuppressFunc, - }, - - "frontend_port_start": { - Type: schema.TypeInt, - Required: true, - }, - - "frontend_port_end": { - Type: schema.TypeInt, - Required: true, - }, - - "backend_port": { - Type: schema.TypeInt, - Required: true, - }, - - "frontend_ip_configuration_name": { - Type: schema.TypeString, - Required: true, - }, - - "frontend_ip_configuration_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceArmLoadBalancerNatPoolCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - lbClient := client.loadBalancerClient - - loadBalancerID := d.Get("loadbalancer_id").(string) - armMutexKV.Lock(loadBalancerID) - defer armMutexKV.Unlock(loadBalancerID) - - loadBalancer, exists, err := retrieveLoadBalancerById(loadBalancerID, meta) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err) - } - if !exists { - d.SetId("") - log.Printf("[INFO] LoadBalancer %q not found. Removing from state", d.Get("name").(string)) - return nil - } - - newNatPool, err := expandAzureRmLoadBalancerNatPool(d, loadBalancer) - if err != nil { - return errwrap.Wrapf("Error Expanding NAT Pool {{err}}", err) - } - - natPools := append(*loadBalancer.LoadBalancerPropertiesFormat.InboundNatPools, *newNatPool) - - existingNatPool, existingNatPoolIndex, exists := findLoadBalancerNatPoolByName(loadBalancer, d.Get("name").(string)) - if exists { - if d.Get("name").(string) == *existingNatPool.Name { - // this probe is being updated/reapplied remove old copy from the slice - natPools = append(natPools[:existingNatPoolIndex], natPools[existingNatPoolIndex+1:]...) - } - } - - loadBalancer.LoadBalancerPropertiesFormat.InboundNatPools = &natPools - resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string)) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer Name and Group: {{err}}", err) - } - - _, error := lbClient.CreateOrUpdate(resGroup, loadBalancerName, *loadBalancer, make(chan struct{})) - err = <-error - if err != nil { - return errwrap.Wrapf("Error Creating/Updating LoadBalancer {{err}}", err) - } - - read, err := lbClient.Get(resGroup, loadBalancerName, "") - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer {{err}}", err) - } - if read.ID == nil { - return fmt.Errorf("Cannot read LoadBalancer %s (resource group %s) ID", loadBalancerName, resGroup) - } - - var natPool_id string - for _, InboundNatPool := range *(*read.LoadBalancerPropertiesFormat).InboundNatPools { - if *InboundNatPool.Name == d.Get("name").(string) { - natPool_id = *InboundNatPool.ID - } - } - - if natPool_id != "" { - d.SetId(natPool_id) - } else { - return fmt.Errorf("Cannot find created LoadBalancer NAT Pool ID %q", natPool_id) - } - - log.Printf("[DEBUG] Waiting for LoadBalancer (%s) to become available", loadBalancerName) - stateConf := &resource.StateChangeConf{ - Pending: []string{"Accepted", "Updating"}, - Target: []string{"Succeeded"}, - Refresh: loadbalancerStateRefreshFunc(client, resGroup, loadBalancerName), - Timeout: 10 * time.Minute, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for LoadBalancer (%s) to become available: %s", loadBalancerName, err) - } - - return resourceArmLoadBalancerNatPoolRead(d, meta) -} - -func resourceArmLoadBalancerNatPoolRead(d *schema.ResourceData, meta interface{}) error { - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - name := id.Path["inboundNatPools"] - - loadBalancer, exists, err := retrieveLoadBalancerById(d.Get("loadbalancer_id").(string), meta) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err) - } - if !exists { - d.SetId("") - log.Printf("[INFO] LoadBalancer %q not found. Removing from state", name) - return nil - } - - config, _, exists := findLoadBalancerNatPoolByName(loadBalancer, name) - if !exists { - d.SetId("") - log.Printf("[INFO] LoadBalancer Nat Pool %q not found. Removing from state", name) - return nil - } - - d.Set("name", config.Name) - d.Set("resource_group_name", id.ResourceGroup) - d.Set("protocol", config.InboundNatPoolPropertiesFormat.Protocol) - d.Set("frontend_port_start", config.InboundNatPoolPropertiesFormat.FrontendPortRangeStart) - d.Set("frontend_port_end", config.InboundNatPoolPropertiesFormat.FrontendPortRangeEnd) - d.Set("backend_port", config.InboundNatPoolPropertiesFormat.BackendPort) - - if config.InboundNatPoolPropertiesFormat.FrontendIPConfiguration != nil { - fipID, err := parseAzureResourceID(*config.InboundNatPoolPropertiesFormat.FrontendIPConfiguration.ID) - if err != nil { - return err - } - - d.Set("frontend_ip_configuration_name", fipID.Path["frontendIPConfigurations"]) - d.Set("frontend_ip_configuration_id", config.InboundNatPoolPropertiesFormat.FrontendIPConfiguration.ID) - } - - return nil -} - -func resourceArmLoadBalancerNatPoolDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - lbClient := client.loadBalancerClient - - loadBalancerID := d.Get("loadbalancer_id").(string) - armMutexKV.Lock(loadBalancerID) - defer armMutexKV.Unlock(loadBalancerID) - - loadBalancer, exists, err := retrieveLoadBalancerById(loadBalancerID, meta) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err) - } - if !exists { - d.SetId("") - return nil - } - - _, index, exists := findLoadBalancerNatPoolByName(loadBalancer, d.Get("name").(string)) - if !exists { - return nil - } - - oldNatPools := *loadBalancer.LoadBalancerPropertiesFormat.InboundNatPools - newNatPools := append(oldNatPools[:index], oldNatPools[index+1:]...) - loadBalancer.LoadBalancerPropertiesFormat.InboundNatPools = &newNatPools - - resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string)) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer Name and Group: {{err}}", err) - } - - _, error := lbClient.CreateOrUpdate(resGroup, loadBalancerName, *loadBalancer, make(chan struct{})) - err = <-error - if err != nil { - return errwrap.Wrapf("Error Creating/Updating LoadBalancer {{err}}", err) - } - - read, err := lbClient.Get(resGroup, loadBalancerName, "") - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer {{err}}", err) - } - if read.ID == nil { - return fmt.Errorf("Cannot read LoadBalancer %s (resource group %s) ID", loadBalancerName, resGroup) - } - - return nil -} - -func expandAzureRmLoadBalancerNatPool(d *schema.ResourceData, lb *network.LoadBalancer) (*network.InboundNatPool, error) { - - properties := network.InboundNatPoolPropertiesFormat{ - Protocol: network.TransportProtocol(d.Get("protocol").(string)), - FrontendPortRangeStart: azure.Int32(int32(d.Get("frontend_port_start").(int))), - FrontendPortRangeEnd: azure.Int32(int32(d.Get("frontend_port_end").(int))), - BackendPort: azure.Int32(int32(d.Get("backend_port").(int))), - } - - if v := d.Get("frontend_ip_configuration_name").(string); v != "" { - rule, _, exists := findLoadBalancerFrontEndIpConfigurationByName(lb, v) - if !exists { - return nil, fmt.Errorf("[ERROR] Cannot find FrontEnd IP Configuration with the name %s", v) - } - - feip := network.SubResource{ - ID: rule.ID, - } - - properties.FrontendIPConfiguration = &feip - } - - natPool := network.InboundNatPool{ - Name: azure.String(d.Get("name").(string)), - InboundNatPoolPropertiesFormat: &properties, - } - - return &natPool, nil -} diff --git a/builtin/providers/azurerm/resource_arm_loadbalancer_nat_pool_test.go b/builtin/providers/azurerm/resource_arm_loadbalancer_nat_pool_test.go deleted file mode 100644 index f6e77b1fd..000000000 --- a/builtin/providers/azurerm/resource_arm_loadbalancer_nat_pool_test.go +++ /dev/null @@ -1,380 +0,0 @@ -package azurerm - -import ( - "fmt" - "os" - "testing" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMLoadBalancerNatPool_basic(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - natPoolName := fmt.Sprintf("NatPool-%d", ri) - - subscriptionID := os.Getenv("ARM_SUBSCRIPTION_ID") - natPool_id := fmt.Sprintf( - "/subscriptions/%s/resourceGroups/acctestrg-%d/providers/Microsoft.Network/loadBalancers/arm-test-loadbalancer-%d/inboundNatPools/%s", - subscriptionID, ri, ri, natPoolName) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerNatPool_basic(ri, natPoolName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerNatPoolExists(natPoolName, &lb), - resource.TestCheckResourceAttr( - "azurerm_lb_nat_pool.test", "id", natPool_id), - ), - }, - }, - }) -} - -func TestAccAzureRMLoadBalancerNatPool_removal(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - natPoolName := fmt.Sprintf("NatPool-%d", ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerNatPool_basic(ri, natPoolName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerNatPoolExists(natPoolName, &lb), - ), - }, - { - Config: testAccAzureRMLoadBalancerNatPool_removal(ri), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerNatPoolNotExists(natPoolName, &lb), - ), - }, - }, - }) -} - -func TestAccAzureRMLoadBalancerNatPool_update(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - natPoolName := fmt.Sprintf("NatPool-%d", ri) - natPool2Name := fmt.Sprintf("NatPool-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerNatPool_multiplePools(ri, natPoolName, natPool2Name), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerNatPoolExists(natPoolName, &lb), - testCheckAzureRMLoadBalancerNatPoolExists(natPool2Name, &lb), - resource.TestCheckResourceAttr("azurerm_lb_nat_pool.test2", "backend_port", "3390"), - ), - }, - { - Config: testAccAzureRMLoadBalancerNatPool_multiplePoolsUpdate(ri, natPoolName, natPool2Name), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerNatPoolExists(natPoolName, &lb), - testCheckAzureRMLoadBalancerNatPoolExists(natPool2Name, &lb), - resource.TestCheckResourceAttr("azurerm_lb_nat_pool.test2", "backend_port", "3391"), - ), - }, - }, - }) -} - -func TestAccAzureRMLoadBalancerNatPool_reapply(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - natPoolName := fmt.Sprintf("NatPool-%d", ri) - - deleteNatPoolState := func(s *terraform.State) error { - return s.Remove("azurerm_lb_nat_pool.test") - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerNatPool_basic(ri, natPoolName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerNatPoolExists(natPoolName, &lb), - deleteNatPoolState, - ), - ExpectNonEmptyPlan: true, - }, - { - Config: testAccAzureRMLoadBalancerNatPool_basic(ri, natPoolName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerNatPoolExists(natPoolName, &lb), - ), - }, - }, - }) -} - -func TestAccAzureRMLoadBalancerNatPool_disappears(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - natPoolName := fmt.Sprintf("NatPool-%d", ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerNatPool_basic(ri, natPoolName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerNatPoolExists(natPoolName, &lb), - testCheckAzureRMLoadBalancerNatPoolDisappears(natPoolName, &lb), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testCheckAzureRMLoadBalancerNatPoolExists(natPoolName string, lb *network.LoadBalancer) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, _, exists := findLoadBalancerNatPoolByName(lb, natPoolName) - if !exists { - return fmt.Errorf("A NAT Pool with name %q cannot be found.", natPoolName) - } - - return nil - } -} - -func testCheckAzureRMLoadBalancerNatPoolNotExists(natPoolName string, lb *network.LoadBalancer) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, _, exists := findLoadBalancerNatPoolByName(lb, natPoolName) - if exists { - return fmt.Errorf("A NAT Pool with name %q has been found.", natPoolName) - } - - return nil - } -} - -func testCheckAzureRMLoadBalancerNatPoolDisappears(natPoolName string, lb *network.LoadBalancer) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).loadBalancerClient - - _, i, exists := findLoadBalancerNatPoolByName(lb, natPoolName) - if !exists { - return fmt.Errorf("A Nat Pool with name %q cannot be found.", natPoolName) - } - - currentPools := *lb.LoadBalancerPropertiesFormat.InboundNatPools - pools := append(currentPools[:i], currentPools[i+1:]...) - lb.LoadBalancerPropertiesFormat.InboundNatPools = &pools - - id, err := parseAzureResourceID(*lb.ID) - if err != nil { - return err - } - - _, error := conn.CreateOrUpdate(id.ResourceGroup, *lb.Name, *lb, make(chan struct{})) - err = <-error - if err != nil { - return fmt.Errorf("Error Creating/Updating LoadBalancer %s", err) - } - - _, err = conn.Get(id.ResourceGroup, *lb.Name, "") - return err - } -} - -func testAccAzureRMLoadBalancerNatPool_basic(rInt int, natPoolName string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_public_ip" "test" { - name = "test-ip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "one-%d" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } -} - -resource "azurerm_lb_nat_pool" "test" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - name = "%s" - protocol = "Tcp" - frontend_port_start = 80 - frontend_port_end = 81 - backend_port = 3389 - frontend_ip_configuration_name = "one-%d" -} - -`, rInt, rInt, rInt, rInt, natPoolName, rInt) -} - -func testAccAzureRMLoadBalancerNatPool_removal(rInt int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_public_ip" "test" { - name = "test-ip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "one-%d" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } -} -`, rInt, rInt, rInt, rInt) -} - -func testAccAzureRMLoadBalancerNatPool_multiplePools(rInt int, natPoolName, natPool2Name string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_public_ip" "test" { - name = "test-ip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "one-%d" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } -} - -resource "azurerm_lb_nat_pool" "test" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - name = "%s" - protocol = "Tcp" - frontend_port_start = 80 - frontend_port_end = 81 - backend_port = 3389 - frontend_ip_configuration_name = "one-%d" -} - -resource "azurerm_lb_nat_pool" "test2" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - name = "%s" - protocol = "Tcp" - frontend_port_start = 82 - frontend_port_end = 83 - backend_port = 3390 - frontend_ip_configuration_name = "one-%d" -} - -`, rInt, rInt, rInt, rInt, natPoolName, rInt, natPool2Name, rInt) -} - -func testAccAzureRMLoadBalancerNatPool_multiplePoolsUpdate(rInt int, natPoolName, natPool2Name string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_public_ip" "test" { - name = "test-ip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "one-%d" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } -} - -resource "azurerm_lb_nat_pool" "test" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - name = "%s" - protocol = "Tcp" - frontend_port_start = 80 - frontend_port_end = 81 - backend_port = 3389 - frontend_ip_configuration_name = "one-%d" -} - -resource "azurerm_lb_nat_pool" "test2" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - name = "%s" - protocol = "Tcp" - frontend_port_start = 82 - frontend_port_end = 83 - backend_port = 3391 - frontend_ip_configuration_name = "one-%d" -} - -`, rInt, rInt, rInt, rInt, natPoolName, rInt, natPool2Name, rInt) -} diff --git a/builtin/providers/azurerm/resource_arm_loadbalancer_nat_rule.go b/builtin/providers/azurerm/resource_arm_loadbalancer_nat_rule.go deleted file mode 100644 index 6c1b15dac..000000000 --- a/builtin/providers/azurerm/resource_arm_loadbalancer_nat_rule.go +++ /dev/null @@ -1,283 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "time" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/jen20/riviera/azure" -) - -func resourceArmLoadBalancerNatRule() *schema.Resource { - return &schema.Resource{ - Create: resourceArmLoadBalancerNatRuleCreate, - Read: resourceArmLoadBalancerNatRuleRead, - Update: resourceArmLoadBalancerNatRuleCreate, - Delete: resourceArmLoadBalancerNatRuleDelete, - Importer: &schema.ResourceImporter{ - State: loadBalancerSubResourceStateImporter, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": deprecatedLocationSchema(), - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "loadbalancer_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "protocol": { - Type: schema.TypeString, - Required: true, - StateFunc: ignoreCaseStateFunc, - DiffSuppressFunc: ignoreCaseDiffSuppressFunc, - }, - - "frontend_port": { - Type: schema.TypeInt, - Required: true, - }, - - "backend_port": { - Type: schema.TypeInt, - Required: true, - }, - - "frontend_ip_configuration_name": { - Type: schema.TypeString, - Required: true, - }, - - "frontend_ip_configuration_id": { - Type: schema.TypeString, - Computed: true, - }, - - "backend_ip_configuration_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceArmLoadBalancerNatRuleCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - lbClient := client.loadBalancerClient - - loadBalancerID := d.Get("loadbalancer_id").(string) - armMutexKV.Lock(loadBalancerID) - defer armMutexKV.Unlock(loadBalancerID) - - loadBalancer, exists, err := retrieveLoadBalancerById(loadBalancerID, meta) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err) - } - if !exists { - d.SetId("") - log.Printf("[INFO] LoadBalancer %q not found. Removing from state", d.Get("name").(string)) - return nil - } - - newNatRule, err := expandAzureRmLoadBalancerNatRule(d, loadBalancer) - if err != nil { - return errwrap.Wrapf("Error Expanding NAT Rule {{err}}", err) - } - - natRules := append(*loadBalancer.LoadBalancerPropertiesFormat.InboundNatRules, *newNatRule) - - existingNatRule, existingNatRuleIndex, exists := findLoadBalancerNatRuleByName(loadBalancer, d.Get("name").(string)) - if exists { - if d.Get("name").(string) == *existingNatRule.Name { - // this probe is being updated/reapplied remove old copy from the slice - natRules = append(natRules[:existingNatRuleIndex], natRules[existingNatRuleIndex+1:]...) - } - } - - loadBalancer.LoadBalancerPropertiesFormat.InboundNatRules = &natRules - resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string)) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer Name and Group: {{err}}", err) - } - - _, error := lbClient.CreateOrUpdate(resGroup, loadBalancerName, *loadBalancer, make(chan struct{})) - err = <-error - if err != nil { - return errwrap.Wrapf("Error Creating / Updating LoadBalancer {{err}}", err) - } - - read, err := lbClient.Get(resGroup, loadBalancerName, "") - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer {{err}}", err) - } - if read.ID == nil { - return fmt.Errorf("Cannot read LoadBalancer %s (resource group %s) ID", loadBalancerName, resGroup) - } - - var natRule_id string - for _, InboundNatRule := range *(*read.LoadBalancerPropertiesFormat).InboundNatRules { - if *InboundNatRule.Name == d.Get("name").(string) { - natRule_id = *InboundNatRule.ID - } - } - - if natRule_id != "" { - d.SetId(natRule_id) - } else { - return fmt.Errorf("Cannot find created LoadBalancer NAT Rule ID %q", natRule_id) - } - - log.Printf("[DEBUG] Waiting for LoadBalancer (%s) to become available", loadBalancerName) - stateConf := &resource.StateChangeConf{ - Pending: []string{"Accepted", "Updating"}, - Target: []string{"Succeeded"}, - Refresh: loadbalancerStateRefreshFunc(client, resGroup, loadBalancerName), - Timeout: 10 * time.Minute, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for LoadBalancer (%s) to become available: %s", loadBalancerName, err) - } - - return resourceArmLoadBalancerNatRuleRead(d, meta) -} - -func resourceArmLoadBalancerNatRuleRead(d *schema.ResourceData, meta interface{}) error { - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - name := id.Path["inboundNatRules"] - - loadBalancer, exists, err := retrieveLoadBalancerById(d.Get("loadbalancer_id").(string), meta) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err) - } - if !exists { - d.SetId("") - log.Printf("[INFO] LoadBalancer %q not found. Removing from state", name) - return nil - } - - config, _, exists := findLoadBalancerNatRuleByName(loadBalancer, name) - if !exists { - d.SetId("") - log.Printf("[INFO] LoadBalancer Nat Rule %q not found. Removing from state", name) - return nil - } - - d.Set("name", config.Name) - d.Set("resource_group_name", id.ResourceGroup) - d.Set("protocol", config.InboundNatRulePropertiesFormat.Protocol) - d.Set("frontend_port", config.InboundNatRulePropertiesFormat.FrontendPort) - d.Set("backend_port", config.InboundNatRulePropertiesFormat.BackendPort) - - if config.InboundNatRulePropertiesFormat.FrontendIPConfiguration != nil { - fipID, err := parseAzureResourceID(*config.InboundNatRulePropertiesFormat.FrontendIPConfiguration.ID) - if err != nil { - return err - } - - d.Set("frontend_ip_configuration_name", fipID.Path["frontendIPConfigurations"]) - d.Set("frontend_ip_configuration_id", config.InboundNatRulePropertiesFormat.FrontendIPConfiguration.ID) - } - - if config.InboundNatRulePropertiesFormat.BackendIPConfiguration != nil { - d.Set("backend_ip_configuration_id", config.InboundNatRulePropertiesFormat.BackendIPConfiguration.ID) - } - - return nil -} - -func resourceArmLoadBalancerNatRuleDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - lbClient := client.loadBalancerClient - - loadBalancerID := d.Get("loadbalancer_id").(string) - armMutexKV.Lock(loadBalancerID) - defer armMutexKV.Unlock(loadBalancerID) - - loadBalancer, exists, err := retrieveLoadBalancerById(loadBalancerID, meta) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err) - } - if !exists { - d.SetId("") - return nil - } - - _, index, exists := findLoadBalancerNatRuleByName(loadBalancer, d.Get("name").(string)) - if !exists { - return nil - } - - oldNatRules := *loadBalancer.LoadBalancerPropertiesFormat.InboundNatRules - newNatRules := append(oldNatRules[:index], oldNatRules[index+1:]...) - loadBalancer.LoadBalancerPropertiesFormat.InboundNatRules = &newNatRules - - resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string)) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer Name and Group: {{err}}", err) - } - - _, error := lbClient.CreateOrUpdate(resGroup, loadBalancerName, *loadBalancer, make(chan struct{})) - err = <-error - if err != nil { - return errwrap.Wrapf("Error Creating/Updating LoadBalancer {{err}}", err) - } - - read, err := lbClient.Get(resGroup, loadBalancerName, "") - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer {{err}}", err) - } - if read.ID == nil { - return fmt.Errorf("Cannot read LoadBalancer %s (resource group %s) ID", loadBalancerName, resGroup) - } - - return nil -} - -func expandAzureRmLoadBalancerNatRule(d *schema.ResourceData, lb *network.LoadBalancer) (*network.InboundNatRule, error) { - - properties := network.InboundNatRulePropertiesFormat{ - Protocol: network.TransportProtocol(d.Get("protocol").(string)), - FrontendPort: azure.Int32(int32(d.Get("frontend_port").(int))), - BackendPort: azure.Int32(int32(d.Get("backend_port").(int))), - } - - if v := d.Get("frontend_ip_configuration_name").(string); v != "" { - rule, _, exists := findLoadBalancerFrontEndIpConfigurationByName(lb, v) - if !exists { - return nil, fmt.Errorf("[ERROR] Cannot find FrontEnd IP Configuration with the name %s", v) - } - - feip := network.SubResource{ - ID: rule.ID, - } - - properties.FrontendIPConfiguration = &feip - } - - natRule := network.InboundNatRule{ - Name: azure.String(d.Get("name").(string)), - InboundNatRulePropertiesFormat: &properties, - } - - return &natRule, nil -} diff --git a/builtin/providers/azurerm/resource_arm_loadbalancer_nat_rule_test.go b/builtin/providers/azurerm/resource_arm_loadbalancer_nat_rule_test.go deleted file mode 100644 index aef65b154..000000000 --- a/builtin/providers/azurerm/resource_arm_loadbalancer_nat_rule_test.go +++ /dev/null @@ -1,377 +0,0 @@ -package azurerm - -import ( - "fmt" - "os" - "testing" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMLoadBalancerNatRule_basic(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - natRuleName := fmt.Sprintf("NatRule-%d", ri) - - subscriptionID := os.Getenv("ARM_SUBSCRIPTION_ID") - natRule_id := fmt.Sprintf( - "/subscriptions/%s/resourceGroups/acctestrg-%d/providers/Microsoft.Network/loadBalancers/arm-test-loadbalancer-%d/inboundNatRules/%s", - subscriptionID, ri, ri, natRuleName) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerNatRule_basic(ri, natRuleName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerNatRuleExists(natRuleName, &lb), - resource.TestCheckResourceAttr( - "azurerm_lb_nat_rule.test", "id", natRule_id), - ), - }, - }, - }) -} - -func TestAccAzureRMLoadBalancerNatRule_removal(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - natRuleName := fmt.Sprintf("NatRule-%d", ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerNatRule_basic(ri, natRuleName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerNatRuleExists(natRuleName, &lb), - ), - }, - { - Config: testAccAzureRMLoadBalancerNatRule_removal(ri), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerNatRuleNotExists(natRuleName, &lb), - ), - }, - }, - }) -} - -func TestAccAzureRMLoadBalancerNatRule_update(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - natRuleName := fmt.Sprintf("NatRule-%d", ri) - natRule2Name := fmt.Sprintf("NatRule-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerNatRule_multipleRules(ri, natRuleName, natRule2Name), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerNatRuleExists(natRuleName, &lb), - testCheckAzureRMLoadBalancerNatRuleExists(natRule2Name, &lb), - resource.TestCheckResourceAttr("azurerm_lb_nat_rule.test2", "frontend_port", "3390"), - resource.TestCheckResourceAttr("azurerm_lb_nat_rule.test2", "backend_port", "3390"), - ), - }, - { - Config: testAccAzureRMLoadBalancerNatRule_multipleRulesUpdate(ri, natRuleName, natRule2Name), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerNatRuleExists(natRuleName, &lb), - testCheckAzureRMLoadBalancerNatRuleExists(natRule2Name, &lb), - resource.TestCheckResourceAttr("azurerm_lb_nat_rule.test2", "frontend_port", "3391"), - resource.TestCheckResourceAttr("azurerm_lb_nat_rule.test2", "backend_port", "3391"), - ), - }, - }, - }) -} - -func TestAccAzureRMLoadBalancerNatRule_reapply(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - natRuleName := fmt.Sprintf("NatRule-%d", ri) - - deleteNatRuleState := func(s *terraform.State) error { - return s.Remove("azurerm_lb_nat_rule.test") - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerNatRule_basic(ri, natRuleName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerNatRuleExists(natRuleName, &lb), - deleteNatRuleState, - ), - ExpectNonEmptyPlan: true, - }, - { - Config: testAccAzureRMLoadBalancerNatRule_basic(ri, natRuleName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerNatRuleExists(natRuleName, &lb), - ), - }, - }, - }) -} - -func TestAccAzureRMLoadBalancerNatRule_disappears(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - natRuleName := fmt.Sprintf("NatRule-%d", ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerNatRule_basic(ri, natRuleName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerNatRuleExists(natRuleName, &lb), - testCheckAzureRMLoadBalancerNatRuleDisappears(natRuleName, &lb), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testCheckAzureRMLoadBalancerNatRuleExists(natRuleName string, lb *network.LoadBalancer) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, _, exists := findLoadBalancerNatRuleByName(lb, natRuleName) - if !exists { - return fmt.Errorf("A NAT Rule with name %q cannot be found.", natRuleName) - } - - return nil - } -} - -func testCheckAzureRMLoadBalancerNatRuleNotExists(natRuleName string, lb *network.LoadBalancer) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, _, exists := findLoadBalancerNatRuleByName(lb, natRuleName) - if exists { - return fmt.Errorf("A NAT Rule with name %q has been found.", natRuleName) - } - - return nil - } -} - -func testCheckAzureRMLoadBalancerNatRuleDisappears(natRuleName string, lb *network.LoadBalancer) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).loadBalancerClient - - _, i, exists := findLoadBalancerNatRuleByName(lb, natRuleName) - if !exists { - return fmt.Errorf("A Nat Rule with name %q cannot be found.", natRuleName) - } - - currentRules := *lb.LoadBalancerPropertiesFormat.InboundNatRules - rules := append(currentRules[:i], currentRules[i+1:]...) - lb.LoadBalancerPropertiesFormat.InboundNatRules = &rules - - id, err := parseAzureResourceID(*lb.ID) - if err != nil { - return err - } - - _, error := conn.CreateOrUpdate(id.ResourceGroup, *lb.Name, *lb, make(chan struct{})) - err = <-error - if err != nil { - return fmt.Errorf("Error Creating/Updating LoadBalancer %s", err) - } - - _, err = conn.Get(id.ResourceGroup, *lb.Name, "") - return err - } -} - -func testAccAzureRMLoadBalancerNatRule_basic(rInt int, natRuleName string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_public_ip" "test" { - name = "test-ip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "one-%d" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } -} - -resource "azurerm_lb_nat_rule" "test" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - name = "%s" - protocol = "Tcp" - frontend_port = 3389 - backend_port = 3389 - frontend_ip_configuration_name = "one-%d" -} - -`, rInt, rInt, rInt, rInt, natRuleName, rInt) -} - -func testAccAzureRMLoadBalancerNatRule_removal(rInt int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_public_ip" "test" { - name = "test-ip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "one-%d" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } -} -`, rInt, rInt, rInt, rInt) -} - -func testAccAzureRMLoadBalancerNatRule_multipleRules(rInt int, natRuleName, natRule2Name string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_public_ip" "test" { - name = "test-ip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "one-%d" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } -} - -resource "azurerm_lb_nat_rule" "test" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - name = "%s" - protocol = "Tcp" - frontend_port = 3389 - backend_port = 3389 - frontend_ip_configuration_name = "one-%d" -} - -resource "azurerm_lb_nat_rule" "test2" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - name = "%s" - protocol = "Tcp" - frontend_port = 3390 - backend_port = 3390 - frontend_ip_configuration_name = "one-%d" -} - -`, rInt, rInt, rInt, rInt, natRuleName, rInt, natRule2Name, rInt) -} - -func testAccAzureRMLoadBalancerNatRule_multipleRulesUpdate(rInt int, natRuleName, natRule2Name string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_public_ip" "test" { - name = "test-ip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "one-%d" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } -} - -resource "azurerm_lb_nat_rule" "test" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - name = "%s" - protocol = "Tcp" - frontend_port = 3389 - backend_port = 3389 - frontend_ip_configuration_name = "one-%d" -} - -resource "azurerm_lb_nat_rule" "test2" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - name = "%s" - protocol = "Tcp" - frontend_port = 3391 - backend_port = 3391 - frontend_ip_configuration_name = "one-%d" -} - -`, rInt, rInt, rInt, rInt, natRuleName, rInt, natRule2Name, rInt) -} diff --git a/builtin/providers/azurerm/resource_arm_loadbalancer_probe.go b/builtin/providers/azurerm/resource_arm_loadbalancer_probe.go deleted file mode 100644 index 640865696..000000000 --- a/builtin/providers/azurerm/resource_arm_loadbalancer_probe.go +++ /dev/null @@ -1,279 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "time" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/jen20/riviera/azure" -) - -func resourceArmLoadBalancerProbe() *schema.Resource { - return &schema.Resource{ - Create: resourceArmLoadBalancerProbeCreate, - Read: resourceArmLoadBalancerProbeRead, - Update: resourceArmLoadBalancerProbeCreate, - Delete: resourceArmLoadBalancerProbeDelete, - Importer: &schema.ResourceImporter{ - State: loadBalancerSubResourceStateImporter, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": deprecatedLocationSchema(), - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "loadbalancer_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "protocol": { - Type: schema.TypeString, - Computed: true, - Optional: true, - StateFunc: ignoreCaseStateFunc, - DiffSuppressFunc: ignoreCaseDiffSuppressFunc, - }, - - "port": { - Type: schema.TypeInt, - Required: true, - }, - - "request_path": { - Type: schema.TypeString, - Optional: true, - }, - - "interval_in_seconds": { - Type: schema.TypeInt, - Optional: true, - Default: 15, - }, - - "number_of_probes": { - Type: schema.TypeInt, - Optional: true, - Default: 2, - }, - - "load_balancer_rules": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - } -} - -func resourceArmLoadBalancerProbeCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - lbClient := client.loadBalancerClient - - loadBalancerID := d.Get("loadbalancer_id").(string) - armMutexKV.Lock(loadBalancerID) - defer armMutexKV.Unlock(loadBalancerID) - - loadBalancer, exists, err := retrieveLoadBalancerById(loadBalancerID, meta) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err) - } - if !exists { - d.SetId("") - log.Printf("[INFO] LoadBalancer %q not found. Removing from state", d.Get("name").(string)) - return nil - } - - newProbe, err := expandAzureRmLoadBalancerProbe(d, loadBalancer) - if err != nil { - return errwrap.Wrapf("Error Expanding Probe {{err}}", err) - } - - probes := append(*loadBalancer.LoadBalancerPropertiesFormat.Probes, *newProbe) - - existingProbe, existingProbeIndex, exists := findLoadBalancerProbeByName(loadBalancer, d.Get("name").(string)) - if exists { - if d.Get("name").(string) == *existingProbe.Name { - // this probe is being updated/reapplied remove old copy from the slice - probes = append(probes[:existingProbeIndex], probes[existingProbeIndex+1:]...) - } - } - - loadBalancer.LoadBalancerPropertiesFormat.Probes = &probes - resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string)) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer Name and Group: {{err}}", err) - } - - _, error := lbClient.CreateOrUpdate(resGroup, loadBalancerName, *loadBalancer, make(chan struct{})) - err = <-error - if err != nil { - return errwrap.Wrapf("Error Creating/Updating LoadBalancer {{err}}", err) - } - - read, err := lbClient.Get(resGroup, loadBalancerName, "") - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer {{err}}", err) - } - if read.ID == nil { - return fmt.Errorf("Cannot read LoadBalancer %s (resource group %s) ID", loadBalancerName, resGroup) - } - - var createdProbe_id string - for _, Probe := range *(*read.LoadBalancerPropertiesFormat).Probes { - if *Probe.Name == d.Get("name").(string) { - createdProbe_id = *Probe.ID - } - } - - if createdProbe_id != "" { - d.SetId(createdProbe_id) - } else { - return fmt.Errorf("Cannot find created LoadBalancer Probe ID %q", createdProbe_id) - } - - log.Printf("[DEBUG] Waiting for LoadBalancer (%s) to become available", loadBalancerName) - stateConf := &resource.StateChangeConf{ - Pending: []string{"Accepted", "Updating"}, - Target: []string{"Succeeded"}, - Refresh: loadbalancerStateRefreshFunc(client, resGroup, loadBalancerName), - Timeout: 10 * time.Minute, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for LoadBalancer (%s) to become available: %s", loadBalancerName, err) - } - - return resourceArmLoadBalancerProbeRead(d, meta) -} - -func resourceArmLoadBalancerProbeRead(d *schema.ResourceData, meta interface{}) error { - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - name := id.Path["probes"] - - loadBalancer, exists, err := retrieveLoadBalancerById(d.Get("loadbalancer_id").(string), meta) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err) - } - if !exists { - d.SetId("") - log.Printf("[INFO] LoadBalancer %q not found. Removing from state", name) - return nil - } - - config, _, exists := findLoadBalancerProbeByName(loadBalancer, name) - if !exists { - d.SetId("") - log.Printf("[INFO] LoadBalancer Probe %q not found. Removing from state", name) - return nil - } - - d.Set("name", config.Name) - d.Set("resource_group_name", id.ResourceGroup) - d.Set("protocol", config.ProbePropertiesFormat.Protocol) - d.Set("interval_in_seconds", config.ProbePropertiesFormat.IntervalInSeconds) - d.Set("number_of_probes", config.ProbePropertiesFormat.NumberOfProbes) - d.Set("port", config.ProbePropertiesFormat.Port) - d.Set("request_path", config.ProbePropertiesFormat.RequestPath) - - var load_balancer_rules []string - if config.ProbePropertiesFormat.LoadBalancingRules != nil { - for _, ruleConfig := range *config.ProbePropertiesFormat.LoadBalancingRules { - load_balancer_rules = append(load_balancer_rules, *ruleConfig.ID) - } - } - d.Set("load_balancer_rules", load_balancer_rules) - - return nil -} - -func resourceArmLoadBalancerProbeDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - lbClient := client.loadBalancerClient - - loadBalancerID := d.Get("loadbalancer_id").(string) - armMutexKV.Lock(loadBalancerID) - defer armMutexKV.Unlock(loadBalancerID) - - loadBalancer, exists, err := retrieveLoadBalancerById(loadBalancerID, meta) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err) - } - if !exists { - d.SetId("") - return nil - } - - _, index, exists := findLoadBalancerProbeByName(loadBalancer, d.Get("name").(string)) - if !exists { - return nil - } - - oldProbes := *loadBalancer.LoadBalancerPropertiesFormat.Probes - newProbes := append(oldProbes[:index], oldProbes[index+1:]...) - loadBalancer.LoadBalancerPropertiesFormat.Probes = &newProbes - - resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string)) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer Name and Group: {{err}}", err) - } - - _, error := lbClient.CreateOrUpdate(resGroup, loadBalancerName, *loadBalancer, make(chan struct{})) - err = <-error - if err != nil { - return errwrap.Wrapf("Error Creating/Updating LoadBalancer {{err}}", err) - } - - read, err := lbClient.Get(resGroup, loadBalancerName, "") - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer {{err}}", err) - } - if read.ID == nil { - return fmt.Errorf("Cannot read LoadBalancer %s (resource group %s) ID", loadBalancerName, resGroup) - } - - return nil -} - -func expandAzureRmLoadBalancerProbe(d *schema.ResourceData, lb *network.LoadBalancer) (*network.Probe, error) { - - properties := network.ProbePropertiesFormat{ - NumberOfProbes: azure.Int32(int32(d.Get("number_of_probes").(int))), - IntervalInSeconds: azure.Int32(int32(d.Get("interval_in_seconds").(int))), - Port: azure.Int32(int32(d.Get("port").(int))), - } - - if v, ok := d.GetOk("protocol"); ok { - properties.Protocol = network.ProbeProtocol(v.(string)) - } - - if v, ok := d.GetOk("request_path"); ok { - properties.RequestPath = azure.String(v.(string)) - } - - probe := network.Probe{ - Name: azure.String(d.Get("name").(string)), - ProbePropertiesFormat: &properties, - } - - return &probe, nil -} diff --git a/builtin/providers/azurerm/resource_arm_loadbalancer_probe_test.go b/builtin/providers/azurerm/resource_arm_loadbalancer_probe_test.go deleted file mode 100644 index 560aae7b2..000000000 --- a/builtin/providers/azurerm/resource_arm_loadbalancer_probe_test.go +++ /dev/null @@ -1,460 +0,0 @@ -package azurerm - -import ( - "fmt" - "os" - "testing" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMLoadBalancerProbe_basic(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - probeName := fmt.Sprintf("probe-%d", ri) - - subscriptionID := os.Getenv("ARM_SUBSCRIPTION_ID") - probe_id := fmt.Sprintf( - "/subscriptions/%s/resourceGroups/acctestrg-%d/providers/Microsoft.Network/loadBalancers/arm-test-loadbalancer-%d/probes/%s", - subscriptionID, ri, ri, probeName) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerProbe_basic(ri, probeName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerProbeExists(probeName, &lb), - resource.TestCheckResourceAttr( - "azurerm_lb_probe.test", "id", probe_id), - ), - }, - }, - }) -} - -func TestAccAzureRMLoadBalancerProbe_removal(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - probeName := fmt.Sprintf("probe-%d", ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerProbe_basic(ri, probeName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerProbeExists(probeName, &lb), - ), - }, - { - Config: testAccAzureRMLoadBalancerProbe_removal(ri), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerProbeNotExists(probeName, &lb), - ), - }, - }, - }) -} - -func TestAccAzureRMLoadBalancerProbe_update(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - probeName := fmt.Sprintf("probe-%d", ri) - probe2Name := fmt.Sprintf("probe-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerProbe_multipleProbes(ri, probeName, probe2Name), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerProbeExists(probeName, &lb), - testCheckAzureRMLoadBalancerProbeExists(probe2Name, &lb), - resource.TestCheckResourceAttr("azurerm_lb_probe.test2", "port", "80"), - ), - }, - { - Config: testAccAzureRMLoadBalancerProbe_multipleProbesUpdate(ri, probeName, probe2Name), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerProbeExists(probeName, &lb), - testCheckAzureRMLoadBalancerProbeExists(probe2Name, &lb), - resource.TestCheckResourceAttr("azurerm_lb_probe.test2", "port", "8080"), - ), - }, - }, - }) -} - -func TestAccAzureRMLoadBalancerProbe_updateProtocol(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - probeName := fmt.Sprintf("probe-%d", ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerProbe_updateProtocolBefore(ri, probeName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerProbeExists(probeName, &lb), - resource.TestCheckResourceAttr("azurerm_lb_probe.test", "protocol", "Http"), - ), - }, - { - Config: testAccAzureRMLoadBalancerProbe_updateProtocolAfter(ri, probeName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerProbeExists(probeName, &lb), - resource.TestCheckResourceAttr("azurerm_lb_probe.test", "protocol", "Tcp"), - ), - }, - }, - }) -} - -func TestAccAzureRMLoadBalancerProbe_reapply(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - probeName := fmt.Sprintf("probe-%d", ri) - - deleteProbeState := func(s *terraform.State) error { - return s.Remove("azurerm_lb_probe.test") - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerProbe_basic(ri, probeName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerProbeExists(probeName, &lb), - deleteProbeState, - ), - ExpectNonEmptyPlan: true, - }, - { - Config: testAccAzureRMLoadBalancerProbe_basic(ri, probeName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerProbeExists(probeName, &lb), - ), - }, - }, - }) -} - -func TestAccAzureRMLoadBalancerProbe_disappears(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - probeName := fmt.Sprintf("probe-%d", ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerProbe_basic(ri, probeName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerProbeExists(probeName, &lb), - testCheckAzureRMLoadBalancerProbeDisappears(probeName, &lb), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testCheckAzureRMLoadBalancerProbeExists(natRuleName string, lb *network.LoadBalancer) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, _, exists := findLoadBalancerProbeByName(lb, natRuleName) - if !exists { - return fmt.Errorf("A Probe with name %q cannot be found.", natRuleName) - } - - return nil - } -} - -func testCheckAzureRMLoadBalancerProbeNotExists(natRuleName string, lb *network.LoadBalancer) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, _, exists := findLoadBalancerProbeByName(lb, natRuleName) - if exists { - return fmt.Errorf("A Probe with name %q has been found.", natRuleName) - } - - return nil - } -} - -func testCheckAzureRMLoadBalancerProbeDisappears(addressPoolName string, lb *network.LoadBalancer) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).loadBalancerClient - - _, i, exists := findLoadBalancerProbeByName(lb, addressPoolName) - if !exists { - return fmt.Errorf("A Probe with name %q cannot be found.", addressPoolName) - } - - currentProbes := *lb.LoadBalancerPropertiesFormat.Probes - probes := append(currentProbes[:i], currentProbes[i+1:]...) - lb.LoadBalancerPropertiesFormat.Probes = &probes - - id, err := parseAzureResourceID(*lb.ID) - if err != nil { - return err - } - - _, error := conn.CreateOrUpdate(id.ResourceGroup, *lb.Name, *lb, make(chan struct{})) - err = <-error - if err != nil { - return fmt.Errorf("Error Creating/Updating LoadBalancer %s", err) - } - - _, err = conn.Get(id.ResourceGroup, *lb.Name, "") - return err - } -} - -func testAccAzureRMLoadBalancerProbe_basic(rInt int, probeName string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_public_ip" "test" { - name = "test-ip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "one-%d" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } -} - -resource "azurerm_lb_probe" "test" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - name = "%s" - port = 22 -} -`, rInt, rInt, rInt, rInt, probeName) -} - -func testAccAzureRMLoadBalancerProbe_removal(rInt int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_public_ip" "test" { - name = "test-ip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "one-%d" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } -} -`, rInt, rInt, rInt, rInt) -} - -func testAccAzureRMLoadBalancerProbe_multipleProbes(rInt int, probeName, probe2Name string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_public_ip" "test" { - name = "test-ip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "one-%d" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } -} - -resource "azurerm_lb_probe" "test" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - name = "%s" - port = 22 -} - -resource "azurerm_lb_probe" "test2" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - name = "%s" - port = 80 -} -`, rInt, rInt, rInt, rInt, probeName, probe2Name) -} - -func testAccAzureRMLoadBalancerProbe_multipleProbesUpdate(rInt int, probeName, probe2Name string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_public_ip" "test" { - name = "test-ip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "one-%d" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } -} - -resource "azurerm_lb_probe" "test" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - name = "%s" - port = 22 -} - -resource "azurerm_lb_probe" "test2" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - name = "%s" - port = 8080 -} -`, rInt, rInt, rInt, rInt, probeName, probe2Name) -} - -func testAccAzureRMLoadBalancerProbe_updateProtocolBefore(rInt int, probeName string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_public_ip" "test" { - name = "test-ip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "one-%d" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } -} - -resource "azurerm_lb_probe" "test" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - name = "%s" - protocol = "Http" - request_path = "/" - port = 80 -} -`, rInt, rInt, rInt, rInt, probeName) -} - -func testAccAzureRMLoadBalancerProbe_updateProtocolAfter(rInt int, probeName string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_public_ip" "test" { - name = "test-ip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "one-%d" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } -} - -resource "azurerm_lb_probe" "test" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - name = "%s" - protocol = "Tcp" - port = 80 -} -`, rInt, rInt, rInt, rInt, probeName) -} diff --git a/builtin/providers/azurerm/resource_arm_loadbalancer_rule.go b/builtin/providers/azurerm/resource_arm_loadbalancer_rule.go deleted file mode 100644 index 89d39c602..000000000 --- a/builtin/providers/azurerm/resource_arm_loadbalancer_rule.go +++ /dev/null @@ -1,382 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "regexp" - "time" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/jen20/riviera/azure" -) - -func resourceArmLoadBalancerRule() *schema.Resource { - return &schema.Resource{ - Create: resourceArmLoadBalancerRuleCreate, - Read: resourceArmLoadBalancerRuleRead, - Update: resourceArmLoadBalancerRuleCreate, - Delete: resourceArmLoadBalancerRuleDelete, - Importer: &schema.ResourceImporter{ - State: loadBalancerSubResourceStateImporter, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateArmLoadBalancerRuleName, - }, - - "location": deprecatedLocationSchema(), - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "loadbalancer_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "frontend_ip_configuration_name": { - Type: schema.TypeString, - Required: true, - }, - - "frontend_ip_configuration_id": { - Type: schema.TypeString, - Computed: true, - }, - - "backend_address_pool_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "protocol": { - Type: schema.TypeString, - Required: true, - StateFunc: ignoreCaseStateFunc, - DiffSuppressFunc: ignoreCaseDiffSuppressFunc, - }, - - "frontend_port": { - Type: schema.TypeInt, - Required: true, - }, - - "backend_port": { - Type: schema.TypeInt, - Required: true, - }, - - "probe_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "enable_floating_ip": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "idle_timeout_in_minutes": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "load_distribution": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - } -} - -func resourceArmLoadBalancerRuleCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - lbClient := client.loadBalancerClient - - loadBalancerID := d.Get("loadbalancer_id").(string) - armMutexKV.Lock(loadBalancerID) - defer armMutexKV.Unlock(loadBalancerID) - - loadBalancer, exists, err := retrieveLoadBalancerById(loadBalancerID, meta) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err) - } - if !exists { - d.SetId("") - log.Printf("[INFO] LoadBalancer %q not found. Removing from state", d.Get("name").(string)) - return nil - } - - newLbRule, err := expandAzureRmLoadBalancerRule(d, loadBalancer) - if err != nil { - return errwrap.Wrapf("Error Exanding LoadBalancer Rule {{err}}", err) - } - - lbRules := append(*loadBalancer.LoadBalancerPropertiesFormat.LoadBalancingRules, *newLbRule) - - existingRule, existingRuleIndex, exists := findLoadBalancerRuleByName(loadBalancer, d.Get("name").(string)) - if exists { - if d.Get("name").(string) == *existingRule.Name { - // this rule is being updated/reapplied remove old copy from the slice - lbRules = append(lbRules[:existingRuleIndex], lbRules[existingRuleIndex+1:]...) - } - } - - loadBalancer.LoadBalancerPropertiesFormat.LoadBalancingRules = &lbRules - resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string)) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer Name and Group: {{err}}", err) - } - - _, error := lbClient.CreateOrUpdate(resGroup, loadBalancerName, *loadBalancer, make(chan struct{})) - err = <-error - if err != nil { - return errwrap.Wrapf("Error Creating/Updating LoadBalancer {{err}}", err) - } - - read, err := lbClient.Get(resGroup, loadBalancerName, "") - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer {{err}}", err) - } - if read.ID == nil { - return fmt.Errorf("Cannot read LoadBalancer %s (resource group %s) ID", loadBalancerName, resGroup) - } - - var rule_id string - for _, LoadBalancingRule := range *(*read.LoadBalancerPropertiesFormat).LoadBalancingRules { - if *LoadBalancingRule.Name == d.Get("name").(string) { - rule_id = *LoadBalancingRule.ID - } - } - - if rule_id != "" { - d.SetId(rule_id) - } else { - return fmt.Errorf("Cannot find created LoadBalancer Rule ID %q", rule_id) - } - - log.Printf("[DEBUG] Waiting for LoadBalancer (%s) to become available", loadBalancerName) - stateConf := &resource.StateChangeConf{ - Pending: []string{"Accepted", "Updating"}, - Target: []string{"Succeeded"}, - Refresh: loadbalancerStateRefreshFunc(client, resGroup, loadBalancerName), - Timeout: 10 * time.Minute, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for LoadBalancer (%s) to become available: %s", loadBalancerName, err) - } - - return resourceArmLoadBalancerRuleRead(d, meta) -} - -func resourceArmLoadBalancerRuleRead(d *schema.ResourceData, meta interface{}) error { - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - name := id.Path["loadBalancingRules"] - - loadBalancer, exists, err := retrieveLoadBalancerById(d.Get("loadbalancer_id").(string), meta) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err) - } - if !exists { - d.SetId("") - log.Printf("[INFO] LoadBalancer %q not found. Removing from state", name) - return nil - } - - config, _, exists := findLoadBalancerRuleByName(loadBalancer, name) - if !exists { - d.SetId("") - log.Printf("[INFO] LoadBalancer Rule %q not found. Removing from state", name) - return nil - } - - d.Set("name", config.Name) - d.Set("resource_group_name", id.ResourceGroup) - - d.Set("protocol", config.LoadBalancingRulePropertiesFormat.Protocol) - d.Set("frontend_port", config.LoadBalancingRulePropertiesFormat.FrontendPort) - d.Set("backend_port", config.LoadBalancingRulePropertiesFormat.BackendPort) - - if config.LoadBalancingRulePropertiesFormat.EnableFloatingIP != nil { - d.Set("enable_floating_ip", config.LoadBalancingRulePropertiesFormat.EnableFloatingIP) - } - - if config.LoadBalancingRulePropertiesFormat.IdleTimeoutInMinutes != nil { - d.Set("idle_timeout_in_minutes", config.LoadBalancingRulePropertiesFormat.IdleTimeoutInMinutes) - } - - if config.LoadBalancingRulePropertiesFormat.FrontendIPConfiguration != nil { - fipID, err := parseAzureResourceID(*config.LoadBalancingRulePropertiesFormat.FrontendIPConfiguration.ID) - if err != nil { - return err - } - - d.Set("frontend_ip_configuration_name", fipID.Path["frontendIPConfigurations"]) - d.Set("frontend_ip_configuration_id", config.LoadBalancingRulePropertiesFormat.FrontendIPConfiguration.ID) - } - - if config.LoadBalancingRulePropertiesFormat.BackendAddressPool != nil { - d.Set("backend_address_pool_id", config.LoadBalancingRulePropertiesFormat.BackendAddressPool.ID) - } - - if config.LoadBalancingRulePropertiesFormat.Probe != nil { - d.Set("probe_id", config.LoadBalancingRulePropertiesFormat.Probe.ID) - } - - if config.LoadBalancingRulePropertiesFormat.LoadDistribution != "" { - d.Set("load_distribution", config.LoadBalancingRulePropertiesFormat.LoadDistribution) - } - - return nil -} - -func resourceArmLoadBalancerRuleDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - lbClient := client.loadBalancerClient - - loadBalancerID := d.Get("loadbalancer_id").(string) - armMutexKV.Lock(loadBalancerID) - defer armMutexKV.Unlock(loadBalancerID) - - loadBalancer, exists, err := retrieveLoadBalancerById(loadBalancerID, meta) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err) - } - if !exists { - d.SetId("") - return nil - } - - _, index, exists := findLoadBalancerRuleByName(loadBalancer, d.Get("name").(string)) - if !exists { - return nil - } - - oldLbRules := *loadBalancer.LoadBalancerPropertiesFormat.LoadBalancingRules - newLbRules := append(oldLbRules[:index], oldLbRules[index+1:]...) - loadBalancer.LoadBalancerPropertiesFormat.LoadBalancingRules = &newLbRules - - resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string)) - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer Name and Group: {{err}}", err) - } - - _, error := lbClient.CreateOrUpdate(resGroup, loadBalancerName, *loadBalancer, make(chan struct{})) - err = <-error - if err != nil { - return errwrap.Wrapf("Error Creating/Updating LoadBalancer {{err}}", err) - } - - read, err := lbClient.Get(resGroup, loadBalancerName, "") - if err != nil { - return errwrap.Wrapf("Error Getting LoadBalancer {{err}}", err) - } - if read.ID == nil { - return fmt.Errorf("Cannot read LoadBalancer %s (resource group %s) ID", loadBalancerName, resGroup) - } - - return nil -} - -func expandAzureRmLoadBalancerRule(d *schema.ResourceData, lb *network.LoadBalancer) (*network.LoadBalancingRule, error) { - - properties := network.LoadBalancingRulePropertiesFormat{ - Protocol: network.TransportProtocol(d.Get("protocol").(string)), - FrontendPort: azure.Int32(int32(d.Get("frontend_port").(int))), - BackendPort: azure.Int32(int32(d.Get("backend_port").(int))), - EnableFloatingIP: azure.Bool(d.Get("enable_floating_ip").(bool)), - } - - if v, ok := d.GetOk("idle_timeout_in_minutes"); ok { - properties.IdleTimeoutInMinutes = azure.Int32(int32(v.(int))) - } - - if v := d.Get("load_distribution").(string); v != "" { - properties.LoadDistribution = network.LoadDistribution(v) - } - - if v := d.Get("frontend_ip_configuration_name").(string); v != "" { - rule, _, exists := findLoadBalancerFrontEndIpConfigurationByName(lb, v) - if !exists { - return nil, fmt.Errorf("[ERROR] Cannot find FrontEnd IP Configuration with the name %s", v) - } - - feip := network.SubResource{ - ID: rule.ID, - } - - properties.FrontendIPConfiguration = &feip - } - - if v := d.Get("backend_address_pool_id").(string); v != "" { - beAP := network.SubResource{ - ID: &v, - } - - properties.BackendAddressPool = &beAP - } - - if v := d.Get("probe_id").(string); v != "" { - pid := network.SubResource{ - ID: &v, - } - - properties.Probe = &pid - } - - lbRule := network.LoadBalancingRule{ - Name: azure.String(d.Get("name").(string)), - LoadBalancingRulePropertiesFormat: &properties, - } - - return &lbRule, nil -} - -func validateArmLoadBalancerRuleName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[a-zA-Z_0-9.-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only word characters, numbers, underscores, periods, and hyphens allowed in %q: %q", - k, value)) - } - - if len(value) > 80 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 80 characters: %q", k, value)) - } - - if len(value) == 0 { - errors = append(errors, fmt.Errorf( - "%q cannot be an empty string: %q", k, value)) - } - if !regexp.MustCompile(`[a-zA-Z0-9_]$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q must end with a word character, number, or underscore: %q", k, value)) - } - - if !regexp.MustCompile(`^[a-zA-Z0-9]`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q must start with a word character or number: %q", k, value)) - } - - return -} diff --git a/builtin/providers/azurerm/resource_arm_loadbalancer_rule_test.go b/builtin/providers/azurerm/resource_arm_loadbalancer_rule_test.go deleted file mode 100644 index cb20b63c7..000000000 --- a/builtin/providers/azurerm/resource_arm_loadbalancer_rule_test.go +++ /dev/null @@ -1,527 +0,0 @@ -package azurerm - -import ( - "fmt" - "os" - "testing" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestResourceAzureRMLoadBalancerRuleNameLabel_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "-word", - ErrCount: 1, - }, - { - Value: "testing-", - ErrCount: 1, - }, - { - Value: "test#test", - ErrCount: 1, - }, - { - Value: acctest.RandStringFromCharSet(81, "abcdedfed"), - ErrCount: 1, - }, - { - Value: "test.rule", - ErrCount: 0, - }, - { - Value: "test_rule", - ErrCount: 0, - }, - { - Value: "test-rule", - ErrCount: 0, - }, - { - Value: "TestRule", - ErrCount: 0, - }, - { - Value: "Test123Rule", - ErrCount: 0, - }, - { - Value: "TestRule", - ErrCount: 0, - }, - } - - for _, tc := range cases { - _, errors := validateArmLoadBalancerRuleName(tc.Value, "azurerm_lb_rule") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM LoadBalancer Rule Name Label to trigger a validation error") - } - } -} - -func TestAccAzureRMLoadBalancerRule_basic(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - lbRuleName := fmt.Sprintf("LbRule-%s", acctest.RandStringFromCharSet(8, acctest.CharSetAlpha)) - - subscriptionID := os.Getenv("ARM_SUBSCRIPTION_ID") - lbRule_id := fmt.Sprintf( - "/subscriptions/%s/resourceGroups/acctestrg-%d/providers/Microsoft.Network/loadBalancers/arm-test-loadbalancer-%d/loadBalancingRules/%s", - subscriptionID, ri, ri, lbRuleName) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerRule_basic(ri, lbRuleName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerRuleExists(lbRuleName, &lb), - resource.TestCheckResourceAttr( - "azurerm_lb_rule.test", "id", lbRule_id), - ), - }, - }, - }) -} - -func TestAccAzureRMLoadBalancerRule_removal(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - lbRuleName := fmt.Sprintf("LbRule-%s", acctest.RandStringFromCharSet(8, acctest.CharSetAlpha)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerRule_basic(ri, lbRuleName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerRuleExists(lbRuleName, &lb), - ), - }, - { - Config: testAccAzureRMLoadBalancerRule_removal(ri), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerRuleNotExists(lbRuleName, &lb), - ), - }, - }, - }) -} - -// https://github.com/hashicorp/terraform/issues/9424 -func TestAccAzureRMLoadBalancerRule_inconsistentReads(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - backendPoolName := fmt.Sprintf("LbPool-%s", acctest.RandStringFromCharSet(8, acctest.CharSetAlpha)) - lbRuleName := fmt.Sprintf("LbRule-%s", acctest.RandStringFromCharSet(8, acctest.CharSetAlpha)) - probeName := fmt.Sprintf("LbProbe-%s", acctest.RandStringFromCharSet(8, acctest.CharSetAlpha)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerRule_inconsistentRead(ri, backendPoolName, probeName, lbRuleName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerBackEndAddressPoolExists(backendPoolName, &lb), - testCheckAzureRMLoadBalancerRuleExists(lbRuleName, &lb), - testCheckAzureRMLoadBalancerProbeExists(probeName, &lb), - ), - }, - }, - }) -} - -func TestAccAzureRMLoadBalancerRule_update(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - lbRuleName := fmt.Sprintf("LbRule-%s", acctest.RandStringFromCharSet(8, acctest.CharSetAlpha)) - lbRule2Name := fmt.Sprintf("LbRule-%s", acctest.RandStringFromCharSet(8, acctest.CharSetAlpha)) - - subscriptionID := os.Getenv("ARM_SUBSCRIPTION_ID") - lbRuleID := fmt.Sprintf( - "/subscriptions/%s/resourceGroups/acctestrg-%d/providers/Microsoft.Network/loadBalancers/arm-test-loadbalancer-%d/loadBalancingRules/%s", - subscriptionID, ri, ri, lbRuleName) - - lbRule2ID := fmt.Sprintf( - "/subscriptions/%s/resourceGroups/acctestrg-%d/providers/Microsoft.Network/loadBalancers/arm-test-loadbalancer-%d/loadBalancingRules/%s", - subscriptionID, ri, ri, lbRule2Name) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerRule_multipleRules(ri, lbRuleName, lbRule2Name), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerRuleExists(lbRuleName, &lb), - testCheckAzureRMLoadBalancerRuleExists(lbRule2Name, &lb), - resource.TestCheckResourceAttr("azurerm_lb_rule.test", "id", lbRuleID), - resource.TestCheckResourceAttr("azurerm_lb_rule.test2", "id", lbRule2ID), - resource.TestCheckResourceAttr("azurerm_lb_rule.test2", "frontend_port", "3390"), - resource.TestCheckResourceAttr("azurerm_lb_rule.test2", "backend_port", "3390"), - ), - }, - { - Config: testAccAzureRMLoadBalancerRule_multipleRulesUpdate(ri, lbRuleName, lbRule2Name), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerRuleExists(lbRuleName, &lb), - testCheckAzureRMLoadBalancerRuleExists(lbRule2Name, &lb), - resource.TestCheckResourceAttr("azurerm_lb_rule.test", "id", lbRuleID), - resource.TestCheckResourceAttr("azurerm_lb_rule.test2", "id", lbRule2ID), - resource.TestCheckResourceAttr("azurerm_lb_rule.test2", "frontend_port", "3391"), - resource.TestCheckResourceAttr("azurerm_lb_rule.test2", "backend_port", "3391"), - ), - }, - }, - }) -} - -func TestAccAzureRMLoadBalancerRule_reapply(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - lbRuleName := fmt.Sprintf("LbRule-%s", acctest.RandStringFromCharSet(8, acctest.CharSetAlpha)) - - deleteRuleState := func(s *terraform.State) error { - return s.Remove("azurerm_lb_rule.test") - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerRule_basic(ri, lbRuleName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerRuleExists(lbRuleName, &lb), - deleteRuleState, - ), - ExpectNonEmptyPlan: true, - }, - { - Config: testAccAzureRMLoadBalancerRule_basic(ri, lbRuleName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerRuleExists(lbRuleName, &lb), - ), - }, - }, - }) -} - -func TestAccAzureRMLoadBalancerRule_disappears(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - lbRuleName := fmt.Sprintf("LbRule-%s", acctest.RandStringFromCharSet(8, acctest.CharSetAlpha)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancerRule_basic(ri, lbRuleName), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - testCheckAzureRMLoadBalancerRuleExists(lbRuleName, &lb), - testCheckAzureRMLoadBalancerRuleDisappears(lbRuleName, &lb), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testCheckAzureRMLoadBalancerRuleExists(lbRuleName string, lb *network.LoadBalancer) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, _, exists := findLoadBalancerRuleByName(lb, lbRuleName) - if !exists { - return fmt.Errorf("A LoadBalancer Rule with name %q cannot be found.", lbRuleName) - } - - return nil - } -} - -func testCheckAzureRMLoadBalancerRuleNotExists(lbRuleName string, lb *network.LoadBalancer) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, _, exists := findLoadBalancerRuleByName(lb, lbRuleName) - if exists { - return fmt.Errorf("A LoadBalancer Rule with name %q has been found.", lbRuleName) - } - - return nil - } -} - -func testCheckAzureRMLoadBalancerRuleDisappears(ruleName string, lb *network.LoadBalancer) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).loadBalancerClient - - _, i, exists := findLoadBalancerRuleByName(lb, ruleName) - if !exists { - return fmt.Errorf("A Rule with name %q cannot be found.", ruleName) - } - - currentRules := *lb.LoadBalancerPropertiesFormat.LoadBalancingRules - rules := append(currentRules[:i], currentRules[i+1:]...) - lb.LoadBalancerPropertiesFormat.LoadBalancingRules = &rules - - id, err := parseAzureResourceID(*lb.ID) - if err != nil { - return err - } - - _, error := conn.CreateOrUpdate(id.ResourceGroup, *lb.Name, *lb, make(chan struct{})) - err = <-error - if err != nil { - return fmt.Errorf("Error Creating/Updating LoadBalancer %s", err) - } - - _, err = conn.Get(id.ResourceGroup, *lb.Name, "") - return err - } -} - -func testAccAzureRMLoadBalancerRule_basic(rInt int, lbRuleName string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_public_ip" "test" { - name = "test-ip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "one-%d" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } -} - -resource "azurerm_lb_rule" "test" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - name = "%s" - protocol = "Tcp" - frontend_port = 3389 - backend_port = 3389 - frontend_ip_configuration_name = "one-%d" -} - -`, rInt, rInt, rInt, rInt, lbRuleName, rInt) -} - -func testAccAzureRMLoadBalancerRule_removal(rInt int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_public_ip" "test" { - name = "test-ip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "one-%d" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } -} -`, rInt, rInt, rInt, rInt) -} - -// https://github.com/hashicorp/terraform/issues/9424 -func testAccAzureRMLoadBalancerRule_inconsistentRead(rInt int, backendPoolName, probeName, lbRuleName string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_public_ip" "test" { - name = "test-ip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "one-%d" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } -} - -resource "azurerm_lb_backend_address_pool" "teset" { - name = "%s" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" -} - -resource "azurerm_lb_probe" "test" { - name = "%s" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - protocol = "Tcp" - port = 443 -} - -resource "azurerm_lb_rule" "test" { - name = "%s" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - protocol = "Tcp" - frontend_port = 3389 - backend_port = 3389 - frontend_ip_configuration_name = "one-%d" -} -`, rInt, rInt, rInt, rInt, backendPoolName, probeName, lbRuleName, rInt) -} - -func testAccAzureRMLoadBalancerRule_multipleRules(rInt int, lbRuleName, lbRule2Name string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_public_ip" "test" { - name = "test-ip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "one-%d" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } -} - -resource "azurerm_lb_rule" "test" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - name = "%s" - protocol = "Udp" - frontend_port = 3389 - backend_port = 3389 - frontend_ip_configuration_name = "one-%d" -} - -resource "azurerm_lb_rule" "test2" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - name = "%s" - protocol = "Udp" - frontend_port = 3390 - backend_port = 3390 - frontend_ip_configuration_name = "one-%d" -} - -`, rInt, rInt, rInt, rInt, lbRuleName, rInt, lbRule2Name, rInt) -} - -func testAccAzureRMLoadBalancerRule_multipleRulesUpdate(rInt int, lbRuleName, lbRule2Name string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_public_ip" "test" { - name = "test-ip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "one-%d" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } -} - -resource "azurerm_lb_rule" "test" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - name = "%s" - protocol = "Udp" - frontend_port = 3389 - backend_port = 3389 - frontend_ip_configuration_name = "one-%d" -} - -resource "azurerm_lb_rule" "test2" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" - name = "%s" - protocol = "Udp" - frontend_port = 3391 - backend_port = 3391 - frontend_ip_configuration_name = "one-%d" -} - -`, rInt, rInt, rInt, rInt, lbRuleName, rInt, lbRule2Name, rInt) -} diff --git a/builtin/providers/azurerm/resource_arm_loadbalancer_test.go b/builtin/providers/azurerm/resource_arm_loadbalancer_test.go deleted file mode 100644 index e8d20457b..000000000 --- a/builtin/providers/azurerm/resource_arm_loadbalancer_test.go +++ /dev/null @@ -1,293 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestResourceAzureRMLoadBalancerPrivateIpAddressAllocation_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "Random", - ErrCount: 1, - }, - { - Value: "Static", - ErrCount: 0, - }, - { - Value: "Dynamic", - ErrCount: 0, - }, - { - Value: "STATIC", - ErrCount: 0, - }, - { - Value: "static", - ErrCount: 0, - }, - } - - for _, tc := range cases { - _, errors := validateLoadBalancerPrivateIpAddressAllocation(tc.Value, "azurerm_lb") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM LoadBalancer private_ip_address_allocation to trigger a validation error") - } - } -} - -func TestAccAzureRMLoadBalancer_basic(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancer_basic(ri), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - ), - }, - }, - }) -} - -func TestAccAzureRMLoadBalancer_frontEndConfig(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancer_frontEndConfig(ri), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - resource.TestCheckResourceAttr( - "azurerm_lb.test", "frontend_ip_configuration.#", "2"), - ), - }, - { - Config: testAccAzureRMLoadBalancer_frontEndConfigRemoval(ri), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - resource.TestCheckResourceAttr( - "azurerm_lb.test", "frontend_ip_configuration.#", "1"), - ), - }, - }, - }) -} - -func TestAccAzureRMLoadBalancer_tags(t *testing.T) { - var lb network.LoadBalancer - ri := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLoadBalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLoadBalancer_basic(ri), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - resource.TestCheckResourceAttr( - "azurerm_lb.test", "tags.%", "2"), - resource.TestCheckResourceAttr( - "azurerm_lb.test", "tags.Environment", "production"), - resource.TestCheckResourceAttr( - "azurerm_lb.test", "tags.Purpose", "AcceptanceTests"), - ), - }, - { - Config: testAccAzureRMLoadBalancer_updatedTags(ri), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb), - resource.TestCheckResourceAttr( - "azurerm_lb.test", "tags.%", "1"), - resource.TestCheckResourceAttr( - "azurerm_lb.test", "tags.Purpose", "AcceptanceTests"), - ), - }, - }, - }) -} - -func testCheckAzureRMLoadBalancerExists(name string, lb *network.LoadBalancer) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - loadbalancerName := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for loadbalancer: %s", loadbalancerName) - } - - conn := testAccProvider.Meta().(*ArmClient).loadBalancerClient - - resp, err := conn.Get(resourceGroup, loadbalancerName, "") - if err != nil { - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: LoadBalancer %q (resource group: %q) does not exist", loadbalancerName, resourceGroup) - } - - return fmt.Errorf("Bad: Get on loadBalancerClient: %s", err) - } - - *lb = resp - - return nil - } -} - -func testCheckAzureRMLoadBalancerDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).loadBalancerClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_lb" { - continue - } - - name := rs.Primary.Attributes["name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.Get(resourceGroup, name, "") - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("LoadBalancer still exists:\n%#v", resp.LoadBalancerPropertiesFormat) - } - } - - return nil -} - -func testAccAzureRMLoadBalancer_basic(rInt int) string { - return fmt.Sprintf(` - -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - tags { - Environment = "production" - Purpose = "AcceptanceTests" - } - -}`, rInt, rInt) -} - -func testAccAzureRMLoadBalancer_updatedTags(rInt int) string { - return fmt.Sprintf(` - -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - tags { - Purpose = "AcceptanceTests" - } - -}`, rInt, rInt) -} - -func testAccAzureRMLoadBalancer_frontEndConfig(rInt int) string { - return fmt.Sprintf(` - -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_public_ip" "test" { - name = "test-ip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_public_ip" "test1" { - name = "another-test-ip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "one-%d" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } - - frontend_ip_configuration { - name = "two-%d" - public_ip_address_id = "${azurerm_public_ip.test1.id}" - } -}`, rInt, rInt, rInt, rInt, rInt, rInt) -} - -func testAccAzureRMLoadBalancer_frontEndConfigRemoval(rInt int) string { - return fmt.Sprintf(` - -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_public_ip" "test" { - name = "test-ip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "test" { - name = "arm-test-loadbalancer-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "one-%d" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } -}`, rInt, rInt, rInt, rInt) -} diff --git a/builtin/providers/azurerm/resource_arm_local_network_gateway.go b/builtin/providers/azurerm/resource_arm_local_network_gateway.go deleted file mode 100644 index 1a6005397..000000000 --- a/builtin/providers/azurerm/resource_arm_local_network_gateway.go +++ /dev/null @@ -1,157 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceArmLocalNetworkGateway() *schema.Resource { - return &schema.Resource{ - Create: resourceArmLocalNetworkGatewayCreate, - Read: resourceArmLocalNetworkGatewayRead, - Update: resourceArmLocalNetworkGatewayCreate, - Delete: resourceArmLocalNetworkGatewayDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "resource_group_name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "gateway_address": { - Type: schema.TypeString, - Required: true, - }, - - "address_space": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - } -} - -func resourceArmLocalNetworkGatewayCreate(d *schema.ResourceData, meta interface{}) error { - lnetClient := meta.(*ArmClient).localNetConnClient - - name := d.Get("name").(string) - location := d.Get("location").(string) - resGroup := d.Get("resource_group_name").(string) - ipAddress := d.Get("gateway_address").(string) - - // fetch the 'address_space_prefixes: - prefixes := []string{} - for _, pref := range d.Get("address_space").([]interface{}) { - prefixes = append(prefixes, pref.(string)) - } - - gateway := network.LocalNetworkGateway{ - Name: &name, - Location: &location, - LocalNetworkGatewayPropertiesFormat: &network.LocalNetworkGatewayPropertiesFormat{ - LocalNetworkAddressSpace: &network.AddressSpace{ - AddressPrefixes: &prefixes, - }, - GatewayIPAddress: &ipAddress, - }, - } - - _, error := lnetClient.CreateOrUpdate(resGroup, name, gateway, make(chan struct{})) - err := <-error - if err != nil { - return fmt.Errorf("Error creating Azure ARM Local Network Gateway '%s': %s", name, err) - } - - read, err := lnetClient.Get(resGroup, name) - if err != nil { - return err - } - if read.ID == nil { - return fmt.Errorf("Cannot read Virtual Network %s (resource group %s) ID", name, resGroup) - } - - d.SetId(*read.ID) - - return resourceArmLocalNetworkGatewayRead(d, meta) -} - -// resourceArmLocalNetworkGatewayRead goes ahead and reads the state of the corresponding ARM local network gateway. -func resourceArmLocalNetworkGatewayRead(d *schema.ResourceData, meta interface{}) error { - lnetClient := meta.(*ArmClient).localNetConnClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - name := id.Path["localNetworkGateways"] - if name == "" { - return fmt.Errorf("Cannot find 'localNetworkGateways' in '%s', make sure it is specified in the ID parameter", d.Id()) - } - resGroup := id.ResourceGroup - - resp, err := lnetClient.Get(resGroup, name) - if err != nil { - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading the state of Azure ARM local network gateway '%s': %s", name, err) - } - - d.Set("resource_group_name", resGroup) - d.Set("name", resp.Name) - d.Set("location", resp.Location) - d.Set("gateway_address", resp.LocalNetworkGatewayPropertiesFormat.GatewayIPAddress) - - prefs := []string{} - if ps := *resp.LocalNetworkGatewayPropertiesFormat.LocalNetworkAddressSpace.AddressPrefixes; ps != nil { - prefs = ps - } - d.Set("address_space", prefs) - - return nil -} - -// resourceArmLocalNetworkGatewayDelete deletes the specified ARM local network gateway. -func resourceArmLocalNetworkGatewayDelete(d *schema.ResourceData, meta interface{}) error { - lnetClient := meta.(*ArmClient).localNetConnClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - name := id.Path["localNetworkGateways"] - resGroup := id.ResourceGroup - - deleteResp, error := lnetClient.Delete(resGroup, name, make(chan struct{})) - resp := <-deleteResp - err = <-error - - if resp.StatusCode == http.StatusNotFound { - return nil - } - - if err != nil { - return fmt.Errorf("Error issuing Azure ARM delete request of local network gateway '%s': %s", name, err) - } - - return nil -} diff --git a/builtin/providers/azurerm/resource_arm_local_network_gateway_test.go b/builtin/providers/azurerm/resource_arm_local_network_gateway_test.go deleted file mode 100644 index 13d5b336d..000000000 --- a/builtin/providers/azurerm/resource_arm_local_network_gateway_test.go +++ /dev/null @@ -1,167 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMLocalNetworkGateway_basic(t *testing.T) { - name := "azurerm_local_network_gateway.test" - - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLocalNetworkGatewayDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLocalNetworkGatewayConfig_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLocalNetworkGatewayExists(name), - resource.TestCheckResourceAttr(name, "gateway_address", "127.0.0.1"), - resource.TestCheckResourceAttr(name, "address_space.0", "127.0.0.0/8"), - ), - }, - }, - }) -} - -func TestAccAzureRMLocalNetworkGateway_disappears(t *testing.T) { - name := "azurerm_local_network_gateway.test" - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMLocalNetworkGatewayDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMLocalNetworkGatewayConfig_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMLocalNetworkGatewayExists(name), - resource.TestCheckResourceAttr(name, "gateway_address", "127.0.0.1"), - resource.TestCheckResourceAttr(name, "address_space.0", "127.0.0.0/8"), - testCheckAzureRMLocalNetworkGatewayDisappears(name), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -// testCheckAzureRMLocalNetworkGatewayExists returns the resurce.TestCheckFunc -// which checks whether or not the expected local network gateway exists both -// in the schema, and on Azure. -func testCheckAzureRMLocalNetworkGatewayExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // first check within the schema for the local network gateway: - res, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Local network gateway '%s' not found.", name) - } - - // then, extract the name and the resource group: - id, err := parseAzureResourceID(res.Primary.ID) - if err != nil { - return err - } - localNetName := id.Path["localNetworkGateways"] - resGrp := id.ResourceGroup - - // and finally, check that it exists on Azure: - lnetClient := testAccProvider.Meta().(*ArmClient).localNetConnClient - - resp, err := lnetClient.Get(resGrp, localNetName) - if err != nil { - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Local network gateway '%s' (resource group '%s') does not exist on Azure.", localNetName, resGrp) - } - return fmt.Errorf("Error reading the state of local network gateway '%s'.", localNetName) - } - - return nil - } -} - -func testCheckAzureRMLocalNetworkGatewayDisappears(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // first check within the schema for the local network gateway: - res, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Local network gateway '%s' not found.", name) - } - - // then, extract the name and the resource group: - id, err := parseAzureResourceID(res.Primary.ID) - if err != nil { - return err - } - localNetName := id.Path["localNetworkGateways"] - resGrp := id.ResourceGroup - - // and finally, check that it exists on Azure: - lnetClient := testAccProvider.Meta().(*ArmClient).localNetConnClient - - deleteResp, error := lnetClient.Delete(resGrp, localNetName, make(chan struct{})) - resp := <-deleteResp - err = <-error - if err != nil { - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Local network gateway '%s' (resource group '%s') does not exist on Azure.", localNetName, resGrp) - } - return fmt.Errorf("Error deleting the state of local network gateway '%s'.", localNetName) - } - - return nil - } -} - -func testCheckAzureRMLocalNetworkGatewayDestroy(s *terraform.State) error { - for _, res := range s.RootModule().Resources { - if res.Type != "azurerm_local_network_gateway" { - continue - } - - id, err := parseAzureResourceID(res.Primary.ID) - if err != nil { - return err - } - localNetName := id.Path["localNetworkGateways"] - resGrp := id.ResourceGroup - - lnetClient := testAccProvider.Meta().(*ArmClient).localNetConnClient - resp, err := lnetClient.Get(resGrp, localNetName) - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Local network gateway still exists:\n%#v", resp.LocalNetworkGatewayPropertiesFormat) - } - } - - return nil -} - -func testAccAzureRMLocalNetworkGatewayConfig_basic(rInt int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctest-%d" - location = "West US" -} - -resource "azurerm_local_network_gateway" "test" { - name = "acctestlng-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - gateway_address = "127.0.0.1" - address_space = ["127.0.0.0/8"] -} -`, rInt, rInt) -} diff --git a/builtin/providers/azurerm/resource_arm_managed_disk.go b/builtin/providers/azurerm/resource_arm_managed_disk.go deleted file mode 100644 index 2ca41abf4..000000000 --- a/builtin/providers/azurerm/resource_arm_managed_disk.go +++ /dev/null @@ -1,243 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "net/http" - "strings" - - "github.com/Azure/azure-sdk-for-go/arm/disk" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" -) - -func resourceArmManagedDisk() *schema.Resource { - return &schema.Resource{ - Create: resourceArmManagedDiskCreate, - Read: resourceArmManagedDiskRead, - Update: resourceArmManagedDiskCreate, - Delete: resourceArmManagedDiskDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "storage_account_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - string(disk.PremiumLRS), - string(disk.StandardLRS), - }, true), - DiffSuppressFunc: ignoreCaseDiffSuppressFunc, - }, - - "create_option": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{ - string(disk.Import), - string(disk.Empty), - string(disk.Copy), - }, true), - }, - - "source_uri": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "source_resource_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "os_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{ - string(disk.Windows), - string(disk.Linux), - }, true), - }, - - "disk_size_gb": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validateDiskSizeGB, - }, - - "tags": tagsSchema(), - }, - } -} - -func validateDiskSizeGB(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 1 || value > 4095 { - errors = append(errors, fmt.Errorf( - "The `disk_size_gb` can only be between 1 and 4095")) - } - return -} - -func resourceArmManagedDiskCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - diskClient := client.diskClient - - log.Printf("[INFO] preparing arguments for Azure ARM Managed Disk creation.") - - name := d.Get("name").(string) - location := d.Get("location").(string) - resGroup := d.Get("resource_group_name").(string) - tags := d.Get("tags").(map[string]interface{}) - expandedTags := expandTags(tags) - - createDisk := disk.Model{ - Name: &name, - Location: &location, - Tags: expandedTags, - } - - storageAccountType := d.Get("storage_account_type").(string) - osType := d.Get("os_type").(string) - - createDisk.Properties = &disk.Properties{ - AccountType: disk.StorageAccountTypes(storageAccountType), - OsType: disk.OperatingSystemTypes(osType), - } - - if v := d.Get("disk_size_gb"); v != 0 { - diskSize := int32(v.(int)) - createDisk.Properties.DiskSizeGB = &diskSize - } - createOption := d.Get("create_option").(string) - - creationData := &disk.CreationData{ - CreateOption: disk.CreateOption(createOption), - } - - if strings.EqualFold(createOption, string(disk.Import)) { - if sourceUri := d.Get("source_uri").(string); sourceUri != "" { - creationData.SourceURI = &sourceUri - } else { - return fmt.Errorf("[ERROR] source_uri must be specified when create_option is `%s`", disk.Import) - } - } else if strings.EqualFold(createOption, string(disk.Copy)) { - if sourceResourceId := d.Get("source_resource_id").(string); sourceResourceId != "" { - creationData.SourceResourceID = &sourceResourceId - } else { - return fmt.Errorf("[ERROR] source_resource_id must be specified when create_option is `%s`", disk.Copy) - } - } - - createDisk.CreationData = creationData - - _, diskErr := diskClient.CreateOrUpdate(resGroup, name, createDisk, make(chan struct{})) - err := <-diskErr - if err != nil { - return err - } - - read, err := diskClient.Get(resGroup, name) - if err != nil { - return err - } - if read.ID == nil { - return fmt.Errorf("[ERROR] Cannot read Managed Disk %s (resource group %s) ID", name, resGroup) - } - - d.SetId(*read.ID) - - return resourceArmManagedDiskRead(d, meta) -} - -func resourceArmManagedDiskRead(d *schema.ResourceData, meta interface{}) error { - diskClient := meta.(*ArmClient).diskClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["disks"] - - resp, err := diskClient.Get(resGroup, name) - if err != nil { - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - return fmt.Errorf("[ERROR] Error making Read request on Azure Managed Disk %s (resource group %s): %s", name, resGroup, err) - } - - d.Set("name", resp.Name) - d.Set("resource_group_name", resGroup) - d.Set("location", resp.Location) - - if resp.Properties != nil { - flattenAzureRmManagedDiskProperties(d, resp.Properties) - } - - if resp.CreationData != nil { - flattenAzureRmManagedDiskCreationData(d, resp.CreationData) - } - - flattenAndSetTags(d, resp.Tags) - - return nil -} - -func resourceArmManagedDiskDelete(d *schema.ResourceData, meta interface{}) error { - diskClient := meta.(*ArmClient).diskClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["disks"] - - _, error := diskClient.Delete(resGroup, name, make(chan struct{})) - err = <-error - if err != nil { - return err - } - - return nil -} - -func flattenAzureRmManagedDiskProperties(d *schema.ResourceData, properties *disk.Properties) { - d.Set("storage_account_type", string(properties.AccountType)) - if properties.DiskSizeGB != nil { - d.Set("disk_size_gb", *properties.DiskSizeGB) - } - if properties.OsType != "" { - d.Set("os_type", string(properties.OsType)) - } -} - -func flattenAzureRmManagedDiskCreationData(d *schema.ResourceData, creationData *disk.CreationData) { - d.Set("create_option", string(creationData.CreateOption)) - if creationData.SourceURI != nil { - d.Set("source_uri", *creationData.SourceURI) - } -} diff --git a/builtin/providers/azurerm/resource_arm_managed_disk_test.go b/builtin/providers/azurerm/resource_arm_managed_disk_test.go deleted file mode 100644 index 45bfe4dfe..000000000 --- a/builtin/providers/azurerm/resource_arm_managed_disk_test.go +++ /dev/null @@ -1,366 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/Azure/azure-sdk-for-go/arm/compute" - "github.com/Azure/azure-sdk-for-go/arm/disk" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMManagedDisk_empty(t *testing.T) { - var d disk.Model - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMManagedDisk_empty, ri, ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMManagedDiskDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMManagedDiskExists("azurerm_managed_disk.test", &d, true), - ), - }, - }, - }) -} - -func TestAccAzureRMManagedDisk_import(t *testing.T) { - var d disk.Model - var vm compute.VirtualMachine - ri := acctest.RandInt() - vmConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachine, ri, ri, ri, ri, ri, ri, ri) - config := fmt.Sprintf(testAccAzureRMManagedDisk_import, ri, ri, ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMManagedDiskDestroy, - Steps: []resource.TestStep{ - { - //need to create a vm and then delete it so we can use the vhd to test import - Config: vmConfig, - Destroy: false, - ExpectNonEmptyPlan: true, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test", &vm), - testDeleteAzureRMVirtualMachine("azurerm_virtual_machine.test"), - ), - }, - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMManagedDiskExists("azurerm_managed_disk.test", &d, true), - ), - }, - }, - }) -} - -func TestAccAzureRMManagedDisk_copy(t *testing.T) { - var d disk.Model - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMManagedDisk_copy, ri, ri, ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMManagedDiskDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMManagedDiskExists("azurerm_managed_disk.test", &d, true), - ), - }, - }, - }) -} - -func TestAccAzureRMManagedDisk_update(t *testing.T) { - var d disk.Model - - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMManagedDisk_empty, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMManagedDisk_empty_updated, ri, ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMManagedDiskDestroy, - Steps: []resource.TestStep{ - { - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMManagedDiskExists("azurerm_managed_disk.test", &d, true), - resource.TestCheckResourceAttr( - "azurerm_managed_disk.test", "tags.%", "2"), - resource.TestCheckResourceAttr( - "azurerm_managed_disk.test", "tags.environment", "acctest"), - resource.TestCheckResourceAttr( - "azurerm_managed_disk.test", "tags.cost-center", "ops"), - resource.TestCheckResourceAttr( - "azurerm_managed_disk.test", "disk_size_gb", "1"), - resource.TestCheckResourceAttr( - "azurerm_managed_disk.test", "storage_account_type", string(disk.StandardLRS)), - ), - }, - { - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMManagedDiskExists("azurerm_managed_disk.test", &d, true), - resource.TestCheckResourceAttr( - "azurerm_managed_disk.test", "tags.%", "1"), - resource.TestCheckResourceAttr( - "azurerm_managed_disk.test", "tags.environment", "acctest"), - resource.TestCheckResourceAttr( - "azurerm_managed_disk.test", "disk_size_gb", "2"), - resource.TestCheckResourceAttr( - "azurerm_managed_disk.test", "storage_account_type", string(disk.PremiumLRS)), - ), - }, - }, - }) -} - -func TestAccAzureRMManagedDisk_NonStandardCasing(t *testing.T) { - var d disk.Model - ri := acctest.RandInt() - config := testAccAzureRMManagedDiskNonStandardCasing(ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMManagedDiskDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMManagedDiskExists("azurerm_managed_disk.test", &d, true), - ), - }, - resource.TestStep{ - Config: config, - PlanOnly: true, - ExpectNonEmptyPlan: false, - }, - }, - }) -} - -func testCheckAzureRMManagedDiskExists(name string, d *disk.Model, shouldExist bool) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - dName := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for disk: %s", dName) - } - - conn := testAccProvider.Meta().(*ArmClient).diskClient - - resp, err := conn.Get(resourceGroup, dName) - if err != nil { - return fmt.Errorf("Bad: Get on diskClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound && shouldExist { - return fmt.Errorf("Bad: ManagedDisk %q (resource group %q) does not exist", dName, resourceGroup) - } - if resp.StatusCode != http.StatusNotFound && !shouldExist { - return fmt.Errorf("Bad: ManagedDisk %q (resource group %q) still exists", dName, resourceGroup) - } - - *d = resp - - return nil - } -} - -func testCheckAzureRMManagedDiskDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).diskClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_managed_disk" { - continue - } - - name := rs.Primary.Attributes["name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.Get(resourceGroup, name) - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Managed Disk still exists: \n%#v", resp.Properties) - } - } - - return nil -} - -func testDeleteAzureRMVirtualMachine(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - vmName := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for virtual machine: %s", vmName) - } - - conn := testAccProvider.Meta().(*ArmClient).vmClient - - _, error := conn.Delete(resourceGroup, vmName, make(chan struct{})) - err := <-error - if err != nil { - return fmt.Errorf("Bad: Delete on vmClient: %s", err) - } - - return nil - } -} - -var testAccAzureRMManagedDisk_empty = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US 2" -} - -resource "azurerm_managed_disk" "test" { - name = "acctestd-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_type = "Standard_LRS" - create_option = "Empty" - disk_size_gb = "1" - - tags { - environment = "acctest" - cost-center = "ops" - } -}` - -var testAccAzureRMManagedDisk_import = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US 2" -} - -resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US 2" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } -} - -resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} - -resource "azurerm_managed_disk" "test" { - name = "acctestd-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_type = "Standard_LRS" - create_option = "Import" - source_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdisk1.vhd" - disk_size_gb = "45" - - tags { - environment = "acctest" - } -}` - -var testAccAzureRMManagedDisk_copy = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US 2" -} - -resource "azurerm_managed_disk" "source" { - name = "acctestd1-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_type = "Standard_LRS" - create_option = "Empty" - disk_size_gb = "1" - - tags { - environment = "acctest" - cost-center = "ops" - } -} - -resource "azurerm_managed_disk" "test" { - name = "acctestd2-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_type = "Standard_LRS" - create_option = "Copy" - source_resource_id = "${azurerm_managed_disk.source.id}" - disk_size_gb = "1" - - tags { - environment = "acctest" - cost-center = "ops" - } -}` - -var testAccAzureRMManagedDisk_empty_updated = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US 2" -} - -resource "azurerm_managed_disk" "test" { - name = "acctestd-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_type = "Premium_LRS" - create_option = "Empty" - disk_size_gb = "2" - - tags { - environment = "acctest" - } -}` - -func testAccAzureRMManagedDiskNonStandardCasing(ri int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US 2" -} -resource "azurerm_managed_disk" "test" { - name = "acctestd-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_type = "standard_lrs" - create_option = "Empty" - disk_size_gb = "1" - tags { - environment = "acctest" - cost-center = "ops" - } -}`, ri, ri) -} diff --git a/builtin/providers/azurerm/resource_arm_network_interface_card.go b/builtin/providers/azurerm/resource_arm_network_interface_card.go deleted file mode 100644 index 9ad2825da..000000000 --- a/builtin/providers/azurerm/resource_arm_network_interface_card.go +++ /dev/null @@ -1,490 +0,0 @@ -package azurerm - -import ( - "bytes" - "fmt" - "log" - "net/http" - "strings" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceArmNetworkInterface() *schema.Resource { - return &schema.Resource{ - Create: resourceArmNetworkInterfaceCreate, - Read: resourceArmNetworkInterfaceRead, - Update: resourceArmNetworkInterfaceCreate, - Delete: resourceArmNetworkInterfaceDelete, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "network_security_group_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "mac_address": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "private_ip_address": { - Type: schema.TypeString, - Computed: true, - }, - - "virtual_machine_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "ip_configuration": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - - "subnet_id": { - Type: schema.TypeString, - Required: true, - }, - - "private_ip_address": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "private_ip_address_allocation": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateNetworkInterfacePrivateIpAddressAllocation, - StateFunc: ignoreCaseStateFunc, - DiffSuppressFunc: ignoreCaseDiffSuppressFunc, - }, - - "public_ip_address_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "load_balancer_backend_address_pools_ids": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "load_balancer_inbound_nat_rules_ids": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - }, - Set: resourceArmNetworkInterfaceIpConfigurationHash, - }, - - "dns_servers": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "internal_dns_name_label": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "applied_dns_servers": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "internal_fqdn": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "enable_ip_forwarding": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmNetworkInterfaceCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - ifaceClient := client.ifaceClient - - log.Printf("[INFO] preparing arguments for Azure ARM Network Interface creation.") - - name := d.Get("name").(string) - location := d.Get("location").(string) - resGroup := d.Get("resource_group_name").(string) - enableIpForwarding := d.Get("enable_ip_forwarding").(bool) - tags := d.Get("tags").(map[string]interface{}) - - properties := network.InterfacePropertiesFormat{ - EnableIPForwarding: &enableIpForwarding, - } - - if v, ok := d.GetOk("network_security_group_id"); ok { - nsgId := v.(string) - properties.NetworkSecurityGroup = &network.SecurityGroup{ - ID: &nsgId, - } - - networkSecurityGroupName, err := parseNetworkSecurityGroupName(nsgId) - if err != nil { - return err - } - - armMutexKV.Lock(networkSecurityGroupName) - defer armMutexKV.Unlock(networkSecurityGroupName) - } - - dns, hasDns := d.GetOk("dns_servers") - nameLabel, hasNameLabel := d.GetOk("internal_dns_name_label") - if hasDns || hasNameLabel { - ifaceDnsSettings := network.InterfaceDNSSettings{} - - if hasDns { - var dnsServers []string - dns := dns.(*schema.Set).List() - for _, v := range dns { - str := v.(string) - dnsServers = append(dnsServers, str) - } - ifaceDnsSettings.DNSServers = &dnsServers - } - - if hasNameLabel { - name_label := nameLabel.(string) - ifaceDnsSettings.InternalDNSNameLabel = &name_label - } - - properties.DNSSettings = &ifaceDnsSettings - } - - ipConfigs, namesToLock, sgErr := expandAzureRmNetworkInterfaceIpConfigurations(d) - if sgErr != nil { - return fmt.Errorf("Error Building list of Network Interface IP Configurations: %s", sgErr) - } - - azureRMLockMultiple(namesToLock) - defer azureRMUnlockMultiple(namesToLock) - - if len(ipConfigs) > 0 { - properties.IPConfigurations = &ipConfigs - } - - iface := network.Interface{ - Name: &name, - Location: &location, - InterfacePropertiesFormat: &properties, - Tags: expandTags(tags), - } - - _, error := ifaceClient.CreateOrUpdate(resGroup, name, iface, make(chan struct{})) - err := <-error - if err != nil { - return err - } - - read, err := ifaceClient.Get(resGroup, name, "") - if err != nil { - return err - } - if read.ID == nil { - return fmt.Errorf("Cannot read NIC %s (resource group %s) ID", name, resGroup) - } - - d.SetId(*read.ID) - - return resourceArmNetworkInterfaceRead(d, meta) -} - -func resourceArmNetworkInterfaceRead(d *schema.ResourceData, meta interface{}) error { - ifaceClient := meta.(*ArmClient).ifaceClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["networkInterfaces"] - - resp, err := ifaceClient.Get(resGroup, name, "") - if err != nil { - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - return fmt.Errorf("Error making Read request on Azure Network Interface %s: %s", name, err) - } - - iface := *resp.InterfacePropertiesFormat - - if iface.MacAddress != nil { - if *iface.MacAddress != "" { - d.Set("mac_address", iface.MacAddress) - } - } - - if iface.IPConfigurations != nil && len(*iface.IPConfigurations) > 0 { - var privateIPAddress *string - ///TODO: Change this to a loop when https://github.com/Azure/azure-sdk-for-go/issues/259 is fixed - if (*iface.IPConfigurations)[0].InterfaceIPConfigurationPropertiesFormat != nil { - privateIPAddress = (*iface.IPConfigurations)[0].InterfaceIPConfigurationPropertiesFormat.PrivateIPAddress - } - - if *privateIPAddress != "" { - d.Set("private_ip_address", *privateIPAddress) - } - } - - if iface.VirtualMachine != nil { - if *iface.VirtualMachine.ID != "" { - d.Set("virtual_machine_id", *iface.VirtualMachine.ID) - } - } - - if iface.DNSSettings != nil { - if iface.DNSSettings.AppliedDNSServers != nil && len(*iface.DNSSettings.AppliedDNSServers) > 0 { - dnsServers := make([]string, 0, len(*iface.DNSSettings.AppliedDNSServers)) - for _, dns := range *iface.DNSSettings.AppliedDNSServers { - dnsServers = append(dnsServers, dns) - } - - if err := d.Set("applied_dns_servers", dnsServers); err != nil { - return err - } - } - - if iface.DNSSettings.InternalFqdn != nil && *iface.DNSSettings.InternalFqdn != "" { - d.Set("internal_fqdn", iface.DNSSettings.InternalFqdn) - } - } - - flattenAndSetTags(d, resp.Tags) - - return nil -} - -func resourceArmNetworkInterfaceDelete(d *schema.ResourceData, meta interface{}) error { - ifaceClient := meta.(*ArmClient).ifaceClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["networkInterfaces"] - - if v, ok := d.GetOk("network_security_group_id"); ok { - networkSecurityGroupId := v.(string) - networkSecurityGroupName, err := parseNetworkSecurityGroupName(networkSecurityGroupId) - if err != nil { - return err - } - - armMutexKV.Lock(networkSecurityGroupName) - defer armMutexKV.Unlock(networkSecurityGroupName) - } - - configs := d.Get("ip_configuration").(*schema.Set).List() - namesToLock := make([]string, 0) - - for _, configRaw := range configs { - data := configRaw.(map[string]interface{}) - - subnet_id := data["subnet_id"].(string) - subnetId, err := parseAzureResourceID(subnet_id) - if err != nil { - return err - } - subnetName := subnetId.Path["subnets"] - virtualNetworkName := subnetId.Path["virtualNetworks"] - namesToLock = append(namesToLock, subnetName) - namesToLock = append(namesToLock, virtualNetworkName) - } - - azureRMLockMultiple(&namesToLock) - defer azureRMUnlockMultiple(&namesToLock) - - _, error := ifaceClient.Delete(resGroup, name, make(chan struct{})) - err = <-error - - return err -} - -func resourceArmNetworkInterfaceIpConfigurationHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["subnet_id"].(string))) - if m["private_ip_address"] != nil { - buf.WriteString(fmt.Sprintf("%s-", m["private_ip_address"].(string))) - } - buf.WriteString(fmt.Sprintf("%s-", m["private_ip_address_allocation"].(string))) - if m["public_ip_address_id"] != nil { - buf.WriteString(fmt.Sprintf("%s-", m["public_ip_address_id"].(string))) - } - if m["load_balancer_backend_address_pools_ids"] != nil { - ids := m["load_balancer_backend_address_pools_ids"].(*schema.Set).List() - for _, id := range ids { - buf.WriteString(fmt.Sprintf("%d-", schema.HashString(id.(string)))) - } - } - if m["load_balancer_inbound_nat_rules_ids"] != nil { - ids := m["load_balancer_inbound_nat_rules_ids"].(*schema.Set).List() - for _, id := range ids { - buf.WriteString(fmt.Sprintf("%d-", schema.HashString(id.(string)))) - } - } - - return hashcode.String(buf.String()) -} - -func validateNetworkInterfacePrivateIpAddressAllocation(v interface{}, k string) (ws []string, errors []error) { - value := strings.ToLower(v.(string)) - allocations := map[string]bool{ - "static": true, - "dynamic": true, - } - - if !allocations[value] { - errors = append(errors, fmt.Errorf("Network Interface Allocations can only be Static or Dynamic")) - } - return -} - -func expandAzureRmNetworkInterfaceIpConfigurations(d *schema.ResourceData) ([]network.InterfaceIPConfiguration, *[]string, error) { - configs := d.Get("ip_configuration").(*schema.Set).List() - ipConfigs := make([]network.InterfaceIPConfiguration, 0, len(configs)) - namesToLock := make([]string, 0) - - for _, configRaw := range configs { - data := configRaw.(map[string]interface{}) - - subnet_id := data["subnet_id"].(string) - private_ip_allocation_method := data["private_ip_address_allocation"].(string) - - var allocationMethod network.IPAllocationMethod - switch strings.ToLower(private_ip_allocation_method) { - case "dynamic": - allocationMethod = network.Dynamic - case "static": - allocationMethod = network.Static - default: - return []network.InterfaceIPConfiguration{}, nil, fmt.Errorf( - "valid values for private_ip_allocation_method are 'dynamic' and 'static' - got '%s'", - private_ip_allocation_method) - } - - properties := network.InterfaceIPConfigurationPropertiesFormat{ - Subnet: &network.Subnet{ - ID: &subnet_id, - }, - PrivateIPAllocationMethod: allocationMethod, - } - - subnetId, err := parseAzureResourceID(subnet_id) - if err != nil { - return []network.InterfaceIPConfiguration{}, nil, err - } - subnetName := subnetId.Path["subnets"] - virtualNetworkName := subnetId.Path["virtualNetworks"] - namesToLock = append(namesToLock, subnetName) - namesToLock = append(namesToLock, virtualNetworkName) - - if v := data["private_ip_address"].(string); v != "" { - properties.PrivateIPAddress = &v - } - - if v := data["public_ip_address_id"].(string); v != "" { - properties.PublicIPAddress = &network.PublicIPAddress{ - ID: &v, - } - } - - if v, ok := data["load_balancer_backend_address_pools_ids"]; ok { - var ids []network.BackendAddressPool - pools := v.(*schema.Set).List() - for _, p := range pools { - pool_id := p.(string) - id := network.BackendAddressPool{ - ID: &pool_id, - } - - ids = append(ids, id) - } - - properties.LoadBalancerBackendAddressPools = &ids - } - - if v, ok := data["load_balancer_inbound_nat_rules_ids"]; ok { - var natRules []network.InboundNatRule - rules := v.(*schema.Set).List() - for _, r := range rules { - rule_id := r.(string) - rule := network.InboundNatRule{ - ID: &rule_id, - } - - natRules = append(natRules, rule) - } - - properties.LoadBalancerInboundNatRules = &natRules - } - - name := data["name"].(string) - ipConfig := network.InterfaceIPConfiguration{ - Name: &name, - InterfaceIPConfigurationPropertiesFormat: &properties, - } - - ipConfigs = append(ipConfigs, ipConfig) - } - - return ipConfigs, &namesToLock, nil -} diff --git a/builtin/providers/azurerm/resource_arm_network_interface_card_test.go b/builtin/providers/azurerm/resource_arm_network_interface_card_test.go deleted file mode 100644 index c885cc0b0..000000000 --- a/builtin/providers/azurerm/resource_arm_network_interface_card_test.go +++ /dev/null @@ -1,477 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMNetworkInterface_basic(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMNetworkInterfaceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMNetworkInterface_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMNetworkInterfaceExists("azurerm_network_interface.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMNetworkInterface_disappears(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMNetworkInterfaceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMNetworkInterface_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMNetworkInterfaceExists("azurerm_network_interface.test"), - testCheckAzureRMNetworkInterfaceDisappears("azurerm_network_interface.test"), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAzureRMNetworkInterface_enableIPForwarding(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMNetworkInterfaceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMNetworkInterface_ipForwarding(rInt), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMNetworkInterfaceExists("azurerm_network_interface.test"), - resource.TestCheckResourceAttr( - "azurerm_network_interface.test", "enable_ip_forwarding", "true"), - ), - }, - }, - }) -} - -func TestAccAzureRMNetworkInterface_multipleLoadBalancers(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMNetworkInterfaceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMNetworkInterface_multipleLoadBalancers(rInt), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMNetworkInterfaceExists("azurerm_network_interface.test1"), - testCheckAzureRMNetworkInterfaceExists("azurerm_network_interface.test2"), - ), - }, - }, - }) -} - -func TestAccAzureRMNetworkInterface_withTags(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMNetworkInterfaceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMNetworkInterface_withTags(rInt), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMNetworkInterfaceExists("azurerm_network_interface.test"), - resource.TestCheckResourceAttr( - "azurerm_network_interface.test", "tags.%", "2"), - resource.TestCheckResourceAttr( - "azurerm_network_interface.test", "tags.environment", "Production"), - resource.TestCheckResourceAttr( - "azurerm_network_interface.test", "tags.cost_center", "MSFT"), - ), - }, - { - Config: testAccAzureRMNetworkInterface_withTagsUpdate(rInt), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMNetworkInterfaceExists("azurerm_network_interface.test"), - resource.TestCheckResourceAttr( - "azurerm_network_interface.test", "tags.%", "1"), - resource.TestCheckResourceAttr( - "azurerm_network_interface.test", "tags.environment", "staging"), - ), - }, - }, - }) -} - -func testCheckAzureRMNetworkInterfaceExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for availability set: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).ifaceClient - - resp, err := conn.Get(resourceGroup, name, "") - if err != nil { - return fmt.Errorf("Bad: Get on ifaceClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: Network Interface %q (resource group: %q) does not exist", name, resourceGroup) - } - - return nil - } -} - -func testCheckAzureRMNetworkInterfaceDisappears(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for availability set: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).ifaceClient - - _, error := conn.Delete(resourceGroup, name, make(chan struct{})) - err := <-error - if err != nil { - return fmt.Errorf("Bad: Delete on ifaceClient: %s", err) - } - - return nil - } -} - -func testCheckAzureRMNetworkInterfaceDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).ifaceClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_network_interface" { - continue - } - - name := rs.Primary.Attributes["name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.Get(resourceGroup, name, "") - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Network Interface still exists:\n%#v", resp.InterfacePropertiesFormat) - } - } - - return nil -} - -func testAccAzureRMNetworkInterface_basic(rInt int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctest-rg-%d" - location = "West US" -} - -resource "azurerm_virtual_network" "test" { - name = "acceptanceTestVirtualNetwork1" - address_space = ["10.0.0.0/16"] - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "testsubnet" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_network_interface" "test" { - name = "acceptanceTestNetworkInterface1" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } -} -`, rInt) -} - -func testAccAzureRMNetworkInterface_ipForwarding(rInt int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctest-rg-%d" - location = "West US" -} - -resource "azurerm_virtual_network" "test" { - name = "acceptanceTestVirtualNetwork1" - address_space = ["10.0.0.0/16"] - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "testsubnet" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_network_interface" "test" { - name = "acceptanceTestNetworkInterface1" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - enable_ip_forwarding = true - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } -} -`, rInt) -} - -func testAccAzureRMNetworkInterface_withTags(rInt int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctest-rg-%d" - location = "West US" -} - -resource "azurerm_virtual_network" "test" { - name = "acceptanceTestVirtualNetwork1" - address_space = ["10.0.0.0/16"] - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "testsubnet" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_network_interface" "test" { - name = "acceptanceTestNetworkInterface1" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } - - tags { - environment = "Production" - cost_center = "MSFT" - } -} -`, rInt) -} - -func testAccAzureRMNetworkInterface_withTagsUpdate(rInt int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctest-rg-%d" - location = "West US" -} - -resource "azurerm_virtual_network" "test" { - name = "acceptanceTestVirtualNetwork1" - address_space = ["10.0.0.0/16"] - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "testsubnet" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_network_interface" "test" { - name = "acceptanceTestNetworkInterface1" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } - - tags { - environment = "staging" - } -} -`, rInt) -} - -func testAccAzureRMNetworkInterface_multipleLoadBalancers(rInt int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctest-rg-%d" - location = "West US" -} - -resource "azurerm_virtual_network" "test" { - name = "acceptanceTestVirtualNetwork1" - address_space = ["10.0.0.0/16"] - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "testsubnet" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_public_ip" "testext" { - name = "testpublicipext" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "testext" { - name = "testlbext" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "publicipext" - public_ip_address_id = "${azurerm_public_ip.testext.id}" - } -} - -resource "azurerm_lb_backend_address_pool" "testext" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.testext.id}" - name = "testbackendpoolext" -} - -resource "azurerm_lb_nat_rule" "testext" { - name = "testnatruleext" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.testext.id}" - protocol = "Tcp" - frontend_port = 3389 - backend_port = 3390 - frontend_ip_configuration_name = "publicipext" -} - -resource "azurerm_public_ip" "testint" { - name = "testpublicipint" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} - -resource "azurerm_lb" "testint" { - name = "testlbint" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "publicipint" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "Dynamic" - } -} - -resource "azurerm_lb_backend_address_pool" "testint" { - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.testint.id}" - name = "testbackendpoolint" -} - -resource "azurerm_lb_nat_rule" "testint" { - name = "testnatruleint" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.testint.id}" - protocol = "Tcp" - frontend_port = 3389 - backend_port = 3391 - frontend_ip_configuration_name = "publicipint" -} - -resource "azurerm_network_interface" "test1" { - name = "acceptanceTestNetworkInterface1" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - enable_ip_forwarding = true - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - load_balancer_backend_address_pools_ids = [ - "${azurerm_lb_backend_address_pool.testext.id}", - "${azurerm_lb_backend_address_pool.testint.id}", - ] - } -} - -resource "azurerm_network_interface" "test2" { - name = "acceptanceTestNetworkInterface2" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - enable_ip_forwarding = true - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - load_balancer_inbound_nat_rules_ids = [ - "${azurerm_lb_nat_rule.testext.id}", - "${azurerm_lb_nat_rule.testint.id}", - ] - } -} -`, rInt) -} diff --git a/builtin/providers/azurerm/resource_arm_network_security_group.go b/builtin/providers/azurerm/resource_arm_network_security_group.go deleted file mode 100644 index e8bc156f0..000000000 --- a/builtin/providers/azurerm/resource_arm_network_security_group.go +++ /dev/null @@ -1,314 +0,0 @@ -package azurerm - -import ( - "bytes" - "fmt" - "log" - "net/http" - "time" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceArmNetworkSecurityGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceArmNetworkSecurityGroupCreate, - Read: resourceArmNetworkSecurityGroupRead, - Update: resourceArmNetworkSecurityGroupCreate, - Delete: resourceArmNetworkSecurityGroupDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "security_rule": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - - "description": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 140 { - errors = append(errors, fmt.Errorf( - "The network security rule description can be no longer than 140 chars")) - } - return - }, - }, - - "protocol": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateNetworkSecurityRuleProtocol, - StateFunc: ignoreCaseStateFunc, - }, - - "source_port_range": { - Type: schema.TypeString, - Required: true, - }, - - "destination_port_range": { - Type: schema.TypeString, - Required: true, - }, - - "source_address_prefix": { - Type: schema.TypeString, - Required: true, - }, - - "destination_address_prefix": { - Type: schema.TypeString, - Required: true, - }, - - "access": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateNetworkSecurityRuleAccess, - }, - - "priority": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 100 || value > 4096 { - errors = append(errors, fmt.Errorf( - "The `priority` can only be between 100 and 4096")) - } - return - }, - }, - - "direction": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateNetworkSecurityRuleDirection, - }, - }, - }, - Set: resourceArmNetworkSecurityGroupRuleHash, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmNetworkSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - secClient := client.secGroupClient - - name := d.Get("name").(string) - location := d.Get("location").(string) - resGroup := d.Get("resource_group_name").(string) - tags := d.Get("tags").(map[string]interface{}) - - sgRules, sgErr := expandAzureRmSecurityRules(d) - if sgErr != nil { - return fmt.Errorf("Error Building list of Network Security Group Rules: %s", sgErr) - } - - sg := network.SecurityGroup{ - Name: &name, - Location: &location, - SecurityGroupPropertiesFormat: &network.SecurityGroupPropertiesFormat{ - SecurityRules: &sgRules, - }, - Tags: expandTags(tags), - } - - _, error := secClient.CreateOrUpdate(resGroup, name, sg, make(chan struct{})) - err := <-error - if err != nil { - return err - } - - read, err := secClient.Get(resGroup, name, "") - if err != nil { - return err - } - if read.ID == nil { - return fmt.Errorf("Cannot read Virtual Network %s (resource group %s) ID", name, resGroup) - } - - log.Printf("[DEBUG] Waiting for NSG (%s) to become available", d.Get("name")) - stateConf := &resource.StateChangeConf{ - Pending: []string{"Updating", "Creating"}, - Target: []string{"Succeeded"}, - Refresh: networkSecurityGroupStateRefreshFunc(client, resGroup, name), - Timeout: 30 * time.Minute, - MinTimeout: 15 * time.Second, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for NSG (%s) to become available: %s", d.Get("name"), err) - } - - d.SetId(*read.ID) - - return resourceArmNetworkSecurityGroupRead(d, meta) -} - -func resourceArmNetworkSecurityGroupRead(d *schema.ResourceData, meta interface{}) error { - secGroupClient := meta.(*ArmClient).secGroupClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["networkSecurityGroups"] - - resp, err := secGroupClient.Get(resGroup, name, "") - if err != nil { - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - return fmt.Errorf("Error making Read request on Azure Network Security Group %s: %s", name, err) - } - - if resp.SecurityGroupPropertiesFormat.SecurityRules != nil { - d.Set("security_rule", flattenNetworkSecurityRules(resp.SecurityGroupPropertiesFormat.SecurityRules)) - } - - d.Set("resource_group_name", resGroup) - d.Set("name", resp.Name) - d.Set("location", resp.Location) - flattenAndSetTags(d, resp.Tags) - - return nil -} - -func resourceArmNetworkSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error { - secGroupClient := meta.(*ArmClient).secGroupClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["networkSecurityGroups"] - - _, error := secGroupClient.Delete(resGroup, name, make(chan struct{})) - err = <-error - - return err -} - -func resourceArmNetworkSecurityGroupRuleHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["protocol"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["source_port_range"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["destination_port_range"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["source_address_prefix"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["destination_address_prefix"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["access"].(string))) - buf.WriteString(fmt.Sprintf("%d-", m["priority"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["direction"].(string))) - - return hashcode.String(buf.String()) -} - -func flattenNetworkSecurityRules(rules *[]network.SecurityRule) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(*rules)) - for _, rule := range *rules { - sgRule := make(map[string]interface{}) - sgRule["name"] = *rule.Name - sgRule["destination_address_prefix"] = *rule.SecurityRulePropertiesFormat.DestinationAddressPrefix - sgRule["destination_port_range"] = *rule.SecurityRulePropertiesFormat.DestinationPortRange - sgRule["source_address_prefix"] = *rule.SecurityRulePropertiesFormat.SourceAddressPrefix - sgRule["source_port_range"] = *rule.SecurityRulePropertiesFormat.SourcePortRange - sgRule["priority"] = int(*rule.SecurityRulePropertiesFormat.Priority) - sgRule["access"] = rule.SecurityRulePropertiesFormat.Access - sgRule["direction"] = rule.SecurityRulePropertiesFormat.Direction - sgRule["protocol"] = rule.SecurityRulePropertiesFormat.Protocol - - if rule.SecurityRulePropertiesFormat.Description != nil { - sgRule["description"] = *rule.SecurityRulePropertiesFormat.Description - } - - result = append(result, sgRule) - } - return result -} - -func expandAzureRmSecurityRules(d *schema.ResourceData) ([]network.SecurityRule, error) { - sgRules := d.Get("security_rule").(*schema.Set).List() - rules := make([]network.SecurityRule, 0, len(sgRules)) - - for _, sgRaw := range sgRules { - data := sgRaw.(map[string]interface{}) - - source_port_range := data["source_port_range"].(string) - destination_port_range := data["destination_port_range"].(string) - source_address_prefix := data["source_address_prefix"].(string) - destination_address_prefix := data["destination_address_prefix"].(string) - priority := int32(data["priority"].(int)) - - properties := network.SecurityRulePropertiesFormat{ - SourcePortRange: &source_port_range, - DestinationPortRange: &destination_port_range, - SourceAddressPrefix: &source_address_prefix, - DestinationAddressPrefix: &destination_address_prefix, - Priority: &priority, - Access: network.SecurityRuleAccess(data["access"].(string)), - Direction: network.SecurityRuleDirection(data["direction"].(string)), - Protocol: network.SecurityRuleProtocol(data["protocol"].(string)), - } - - if v := data["description"].(string); v != "" { - properties.Description = &v - } - - name := data["name"].(string) - rule := network.SecurityRule{ - Name: &name, - SecurityRulePropertiesFormat: &properties, - } - - rules = append(rules, rule) - } - - return rules, nil -} - -func networkSecurityGroupStateRefreshFunc(client *ArmClient, resourceGroupName string, sgName string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - res, err := client.secGroupClient.Get(resourceGroupName, sgName, "") - if err != nil { - return nil, "", fmt.Errorf("Error issuing read request in networkSecurityGroupStateRefreshFunc to Azure ARM for NSG '%s' (RG: '%s'): %s", sgName, resourceGroupName, err) - } - - return res, *res.SecurityGroupPropertiesFormat.ProvisioningState, nil - } -} diff --git a/builtin/providers/azurerm/resource_arm_network_security_group_test.go b/builtin/providers/azurerm/resource_arm_network_security_group_test.go deleted file mode 100644 index 629fa3f7d..000000000 --- a/builtin/providers/azurerm/resource_arm_network_security_group_test.go +++ /dev/null @@ -1,319 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMNetworkSecurityGroup_basic(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMNetworkSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMNetworkSecurityGroup_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMNetworkSecurityGroupExists("azurerm_network_security_group.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMNetworkSecurityGroup_disappears(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMNetworkSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMNetworkSecurityGroup_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMNetworkSecurityGroupExists("azurerm_network_security_group.test"), - testCheckAzureRMNetworkSecurityGroupDisappears("azurerm_network_security_group.test"), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAzureRMNetworkSecurityGroup_withTags(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMNetworkSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMNetworkSecurityGroup_withTags(rInt), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMNetworkSecurityGroupExists("azurerm_network_security_group.test"), - resource.TestCheckResourceAttr( - "azurerm_network_security_group.test", "tags.%", "2"), - resource.TestCheckResourceAttr( - "azurerm_network_security_group.test", "tags.environment", "Production"), - resource.TestCheckResourceAttr( - "azurerm_network_security_group.test", "tags.cost_center", "MSFT"), - ), - }, - - { - Config: testAccAzureRMNetworkSecurityGroup_withTagsUpdate(rInt), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMNetworkSecurityGroupExists("azurerm_network_security_group.test"), - resource.TestCheckResourceAttr( - "azurerm_network_security_group.test", "tags.%", "1"), - resource.TestCheckResourceAttr( - "azurerm_network_security_group.test", "tags.environment", "staging"), - ), - }, - }, - }) -} - -func TestAccAzureRMNetworkSecurityGroup_addingExtraRules(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMNetworkSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMNetworkSecurityGroup_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMNetworkSecurityGroupExists("azurerm_network_security_group.test"), - resource.TestCheckResourceAttr( - "azurerm_network_security_group.test", "security_rule.#", "1"), - ), - }, - - { - Config: testAccAzureRMNetworkSecurityGroup_anotherRule(rInt), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMNetworkSecurityGroupExists("azurerm_network_security_group.test"), - resource.TestCheckResourceAttr( - "azurerm_network_security_group.test", "security_rule.#", "2"), - ), - }, - }, - }) -} - -func testCheckAzureRMNetworkSecurityGroupExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - sgName := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for network security group: %s", sgName) - } - - conn := testAccProvider.Meta().(*ArmClient).secGroupClient - - resp, err := conn.Get(resourceGroup, sgName, "") - if err != nil { - return fmt.Errorf("Bad: Get on secGroupClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: Network Security Group %q (resource group: %q) does not exist", name, resourceGroup) - } - - return nil - } -} - -func testCheckAzureRMNetworkSecurityGroupDisappears(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - sgName := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for network security group: %s", sgName) - } - - conn := testAccProvider.Meta().(*ArmClient).secGroupClient - - _, error := conn.Delete(resourceGroup, sgName, make(chan struct{})) - err := <-error - if err != nil { - return fmt.Errorf("Bad: Delete on secGroupClient: %s", err) - } - - return nil - } -} - -func testCheckAzureRMNetworkSecurityGroupDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).secGroupClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_network_security_group" { - continue - } - - name := rs.Primary.Attributes["name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.Get(resourceGroup, name, "") - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Network Security Group still exists:\n%#v", resp.SecurityGroupPropertiesFormat) - } - } - - return nil -} - -func testAccAzureRMNetworkSecurityGroup_basic(rInt int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_network_security_group" "test" { - name = "acceptanceTestSecurityGroup1" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - security_rule { - name = "test123" - priority = 100 - direction = "Inbound" - access = "Allow" - protocol = "TCP" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "*" - destination_address_prefix = "*" - } -} -`, rInt) -} - -func testAccAzureRMNetworkSecurityGroup_anotherRule(rInt int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_network_security_group" "test" { - name = "acceptanceTestSecurityGroup1" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - security_rule { - name = "test123" - priority = 100 - direction = "Inbound" - access = "Allow" - protocol = "Tcp" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "*" - destination_address_prefix = "*" - } - - security_rule { - name = "testDeny" - priority = 101 - direction = "Inbound" - access = "Deny" - protocol = "Udp" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "*" - destination_address_prefix = "*" - } -} -`, rInt) -} - -func testAccAzureRMNetworkSecurityGroup_withTags(rInt int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_network_security_group" "test" { - name = "acceptanceTestSecurityGroup1" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - security_rule { - name = "test123" - priority = 100 - direction = "Inbound" - access = "Allow" - protocol = "Tcp" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "*" - destination_address_prefix = "*" - } - - - tags { - environment = "Production" - cost_center = "MSFT" - } -} -`, rInt) -} - -func testAccAzureRMNetworkSecurityGroup_withTagsUpdate(rInt int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_network_security_group" "test" { - name = "acceptanceTestSecurityGroup1" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - security_rule { - name = "test123" - priority = 100 - direction = "Inbound" - access = "Allow" - protocol = "Tcp" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "*" - destination_address_prefix = "*" - } - - tags { - environment = "staging" - } -} -`, rInt) -} diff --git a/builtin/providers/azurerm/resource_arm_network_security_rule.go b/builtin/providers/azurerm/resource_arm_network_security_rule.go deleted file mode 100644 index 470fb8c3e..000000000 --- a/builtin/providers/azurerm/resource_arm_network_security_rule.go +++ /dev/null @@ -1,221 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceArmNetworkSecurityRule() *schema.Resource { - return &schema.Resource{ - Create: resourceArmNetworkSecurityRuleCreate, - Read: resourceArmNetworkSecurityRuleRead, - Update: resourceArmNetworkSecurityRuleCreate, - Delete: resourceArmNetworkSecurityRuleDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "network_security_group_name": { - Type: schema.TypeString, - Required: true, - }, - - "description": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 140 { - errors = append(errors, fmt.Errorf( - "The network security rule description can be no longer than 140 chars")) - } - return - }, - }, - - "protocol": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateNetworkSecurityRuleProtocol, - }, - - "source_port_range": { - Type: schema.TypeString, - Required: true, - }, - - "destination_port_range": { - Type: schema.TypeString, - Required: true, - }, - - "source_address_prefix": { - Type: schema.TypeString, - Required: true, - }, - - "destination_address_prefix": { - Type: schema.TypeString, - Required: true, - }, - - "access": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateNetworkSecurityRuleAccess, - }, - - "priority": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 100 || value > 4096 { - errors = append(errors, fmt.Errorf( - "The `priority` can only be between 100 and 4096")) - } - return - }, - }, - - "direction": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateNetworkSecurityRuleDirection, - }, - }, - } -} - -func resourceArmNetworkSecurityRuleCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - secClient := client.secRuleClient - - name := d.Get("name").(string) - nsgName := d.Get("network_security_group_name").(string) - resGroup := d.Get("resource_group_name").(string) - - source_port_range := d.Get("source_port_range").(string) - destination_port_range := d.Get("destination_port_range").(string) - source_address_prefix := d.Get("source_address_prefix").(string) - destination_address_prefix := d.Get("destination_address_prefix").(string) - priority := int32(d.Get("priority").(int)) - access := d.Get("access").(string) - direction := d.Get("direction").(string) - protocol := d.Get("protocol").(string) - - armMutexKV.Lock(nsgName) - defer armMutexKV.Unlock(nsgName) - - properties := network.SecurityRulePropertiesFormat{ - SourcePortRange: &source_port_range, - DestinationPortRange: &destination_port_range, - SourceAddressPrefix: &source_address_prefix, - DestinationAddressPrefix: &destination_address_prefix, - Priority: &priority, - Access: network.SecurityRuleAccess(access), - Direction: network.SecurityRuleDirection(direction), - Protocol: network.SecurityRuleProtocol(protocol), - } - - if v, ok := d.GetOk("description"); ok { - description := v.(string) - properties.Description = &description - } - - sgr := network.SecurityRule{ - Name: &name, - SecurityRulePropertiesFormat: &properties, - } - - _, error := secClient.CreateOrUpdate(resGroup, nsgName, name, sgr, make(chan struct{})) - err := <-error - if err != nil { - return err - } - - read, err := secClient.Get(resGroup, nsgName, name) - if err != nil { - return err - } - if read.ID == nil { - return fmt.Errorf("Cannot read Security Group Rule %s/%s (resource group %s) ID", - nsgName, name, resGroup) - } - - d.SetId(*read.ID) - - return resourceArmNetworkSecurityRuleRead(d, meta) -} - -func resourceArmNetworkSecurityRuleRead(d *schema.ResourceData, meta interface{}) error { - secRuleClient := meta.(*ArmClient).secRuleClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - networkSGName := id.Path["networkSecurityGroups"] - sgRuleName := id.Path["securityRules"] - - resp, err := secRuleClient.Get(resGroup, networkSGName, sgRuleName) - if err != nil { - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - return fmt.Errorf("Error making Read request on Azure Network Security Rule %s: %s", sgRuleName, err) - } - - d.Set("resource_group_name", resGroup) - d.Set("access", resp.SecurityRulePropertiesFormat.Access) - d.Set("destination_address_prefix", resp.SecurityRulePropertiesFormat.DestinationAddressPrefix) - d.Set("destination_port_range", resp.SecurityRulePropertiesFormat.DestinationPortRange) - d.Set("direction", resp.SecurityRulePropertiesFormat.Direction) - d.Set("description", resp.SecurityRulePropertiesFormat.Description) - d.Set("name", resp.Name) - d.Set("priority", resp.SecurityRulePropertiesFormat.Priority) - d.Set("protocol", resp.SecurityRulePropertiesFormat.Protocol) - d.Set("source_address_prefix", resp.SecurityRulePropertiesFormat.SourceAddressPrefix) - d.Set("source_port_range", resp.SecurityRulePropertiesFormat.SourcePortRange) - - return nil -} - -func resourceArmNetworkSecurityRuleDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - secRuleClient := client.secRuleClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - nsgName := id.Path["networkSecurityGroups"] - sgRuleName := id.Path["securityRules"] - - armMutexKV.Lock(nsgName) - defer armMutexKV.Unlock(nsgName) - - _, error := secRuleClient.Delete(resGroup, nsgName, sgRuleName, make(chan struct{})) - err = <-error - - return err -} diff --git a/builtin/providers/azurerm/resource_arm_network_security_rule_test.go b/builtin/providers/azurerm/resource_arm_network_security_rule_test.go deleted file mode 100644 index d1392be66..000000000 --- a/builtin/providers/azurerm/resource_arm_network_security_rule_test.go +++ /dev/null @@ -1,258 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMNetworkSecurityRule_basic(t *testing.T) { - rInt := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMNetworkSecurityRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMNetworkSecurityRule_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMNetworkSecurityRuleExists("azurerm_network_security_rule.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMNetworkSecurityRule_disappears(t *testing.T) { - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMNetworkSecurityRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMNetworkSecurityRule_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMNetworkSecurityRuleExists("azurerm_network_security_rule.test"), - testCheckAzureRMNetworkSecurityRuleDisappears("azurerm_network_security_rule.test"), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAzureRMNetworkSecurityRule_addingRules(t *testing.T) { - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMNetworkSecurityRuleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAzureRMNetworkSecurityRule_updateBasic(rInt), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMNetworkSecurityRuleExists("azurerm_network_security_rule.test1"), - ), - }, - - { - Config: testAccAzureRMNetworkSecurityRule_updateExtraRule(rInt), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMNetworkSecurityRuleExists("azurerm_network_security_rule.test2"), - ), - }, - }, - }) -} - -func testCheckAzureRMNetworkSecurityRuleExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - sgName := rs.Primary.Attributes["network_security_group_name"] - sgrName := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for network security rule: %s", sgName) - } - - conn := testAccProvider.Meta().(*ArmClient).secRuleClient - - resp, err := conn.Get(resourceGroup, sgName, sgrName) - if err != nil { - return fmt.Errorf("Bad: Get on secRuleClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: Network Security Rule %q (resource group: %q) (network security group: %q) does not exist", sgrName, sgName, resourceGroup) - } - - return nil - } -} - -func testCheckAzureRMNetworkSecurityRuleDisappears(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - sgName := rs.Primary.Attributes["network_security_group_name"] - sgrName := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for network security rule: %s", sgName) - } - - conn := testAccProvider.Meta().(*ArmClient).secRuleClient - - _, error := conn.Delete(resourceGroup, sgName, sgrName, make(chan struct{})) - err := <-error - if err != nil { - return fmt.Errorf("Bad: Delete on secRuleClient: %s", err) - } - - return nil - } -} - -func testCheckAzureRMNetworkSecurityRuleDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).secRuleClient - - for _, rs := range s.RootModule().Resources { - - if rs.Type != "azurerm_network_security_rule" { - continue - } - - sgName := rs.Primary.Attributes["network_security_group_name"] - sgrName := rs.Primary.Attributes["name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.Get(resourceGroup, sgName, sgrName) - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Network Security Rule still exists:\n%#v", resp.SecurityRulePropertiesFormat) - } - } - - return nil -} - -func testAccAzureRMNetworkSecurityRule_basic(rInt int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_network_security_group" "test" { - name = "acceptanceTestSecurityGroup1" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_network_security_rule" "test" { - name = "test123" - priority = 100 - direction = "Outbound" - access = "Allow" - protocol = "Tcp" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "*" - destination_address_prefix = "*" - resource_group_name = "${azurerm_resource_group.test.name}" - network_security_group_name = "${azurerm_network_security_group.test.name}" -} -`, rInt) -} - -func testAccAzureRMNetworkSecurityRule_updateBasic(rInt int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test1" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_network_security_group" "test1" { - name = "acceptanceTestSecurityGroup2" - location = "West US" - resource_group_name = "${azurerm_resource_group.test1.name}" -} - -resource "azurerm_network_security_rule" "test1" { - name = "test123" - priority = 100 - direction = "Outbound" - access = "Allow" - protocol = "Tcp" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "*" - destination_address_prefix = "*" - resource_group_name = "${azurerm_resource_group.test1.name}" - network_security_group_name = "${azurerm_network_security_group.test1.name}" -} -`, rInt) -} - -func testAccAzureRMNetworkSecurityRule_updateExtraRule(rInt int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test1" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_network_security_group" "test1" { - name = "acceptanceTestSecurityGroup2" - location = "West US" - resource_group_name = "${azurerm_resource_group.test1.name}" -} - -resource "azurerm_network_security_rule" "test1" { - name = "test123" - priority = 100 - direction = "Outbound" - access = "Allow" - protocol = "Tcp" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "*" - destination_address_prefix = "*" - resource_group_name = "${azurerm_resource_group.test1.name}" - network_security_group_name = "${azurerm_network_security_group.test1.name}" -} - -resource "azurerm_network_security_rule" "test2" { - name = "testing456" - priority = 101 - direction = "Inbound" - access = "Deny" - protocol = "Tcp" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "*" - destination_address_prefix = "*" - resource_group_name = "${azurerm_resource_group.test1.name}" - network_security_group_name = "${azurerm_network_security_group.test1.name}" -} -`, rInt) -} diff --git a/builtin/providers/azurerm/resource_arm_public_ip.go b/builtin/providers/azurerm/resource_arm_public_ip.go deleted file mode 100644 index 934839d8c..000000000 --- a/builtin/providers/azurerm/resource_arm_public_ip.go +++ /dev/null @@ -1,241 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "net/http" - "regexp" - "strings" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceArmPublicIp() *schema.Resource { - return &schema.Resource{ - Create: resourceArmPublicIpCreate, - Read: resourceArmPublicIpRead, - Update: resourceArmPublicIpCreate, - Delete: resourceArmPublicIpDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "public_ip_address_allocation": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validatePublicIpAllocation, - StateFunc: ignoreCaseStateFunc, - DiffSuppressFunc: ignoreCaseDiffSuppressFunc, - }, - - "idle_timeout_in_minutes": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 4 || value > 30 { - errors = append(errors, fmt.Errorf( - "The idle timeout must be between 4 and 30 minutes")) - } - return - }, - }, - - "domain_name_label": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validatePublicIpDomainNameLabel, - }, - - "reverse_fqdn": { - Type: schema.TypeString, - Optional: true, - }, - - "fqdn": { - Type: schema.TypeString, - Computed: true, - }, - - "ip_address": { - Type: schema.TypeString, - Computed: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmPublicIpCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - publicIPClient := client.publicIPClient - - log.Printf("[INFO] preparing arguments for Azure ARM Public IP creation.") - - name := d.Get("name").(string) - location := d.Get("location").(string) - resGroup := d.Get("resource_group_name").(string) - tags := d.Get("tags").(map[string]interface{}) - - properties := network.PublicIPAddressPropertiesFormat{ - PublicIPAllocationMethod: network.IPAllocationMethod(d.Get("public_ip_address_allocation").(string)), - } - - dnl, hasDnl := d.GetOk("domain_name_label") - rfqdn, hasRfqdn := d.GetOk("reverse_fqdn") - - if hasDnl || hasRfqdn { - dnsSettings := network.PublicIPAddressDNSSettings{} - - if hasRfqdn { - reverse_fqdn := rfqdn.(string) - dnsSettings.ReverseFqdn = &reverse_fqdn - } - - if hasDnl { - domain_name_label := dnl.(string) - dnsSettings.DomainNameLabel = &domain_name_label - - } - - properties.DNSSettings = &dnsSettings - } - - if v, ok := d.GetOk("idle_timeout_in_minutes"); ok { - idle_timeout := int32(v.(int)) - properties.IdleTimeoutInMinutes = &idle_timeout - } - - publicIp := network.PublicIPAddress{ - Name: &name, - Location: &location, - PublicIPAddressPropertiesFormat: &properties, - Tags: expandTags(tags), - } - - _, error := publicIPClient.CreateOrUpdate(resGroup, name, publicIp, make(chan struct{})) - err := <-error - if err != nil { - return err - } - - read, err := publicIPClient.Get(resGroup, name, "") - if err != nil { - return err - } - if read.ID == nil { - return fmt.Errorf("Cannot read Public IP %s (resource group %s) ID", name, resGroup) - } - - d.SetId(*read.ID) - - return resourceArmPublicIpRead(d, meta) -} - -func resourceArmPublicIpRead(d *schema.ResourceData, meta interface{}) error { - publicIPClient := meta.(*ArmClient).publicIPClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["publicIPAddresses"] - - resp, err := publicIPClient.Get(resGroup, name, "") - if err != nil { - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - return fmt.Errorf("Error making Read request on Azure public ip %s: %s", name, err) - } - - d.Set("resource_group_name", resGroup) - d.Set("location", resp.Location) - d.Set("name", resp.Name) - d.Set("public_ip_address_allocation", strings.ToLower(string(resp.PublicIPAddressPropertiesFormat.PublicIPAllocationMethod))) - - if resp.PublicIPAddressPropertiesFormat.DNSSettings != nil && resp.PublicIPAddressPropertiesFormat.DNSSettings.Fqdn != nil && *resp.PublicIPAddressPropertiesFormat.DNSSettings.Fqdn != "" { - d.Set("fqdn", resp.PublicIPAddressPropertiesFormat.DNSSettings.Fqdn) - } - - if resp.PublicIPAddressPropertiesFormat.IPAddress != nil && *resp.PublicIPAddressPropertiesFormat.IPAddress != "" { - d.Set("ip_address", resp.PublicIPAddressPropertiesFormat.IPAddress) - } - - flattenAndSetTags(d, resp.Tags) - - return nil -} - -func resourceArmPublicIpDelete(d *schema.ResourceData, meta interface{}) error { - publicIPClient := meta.(*ArmClient).publicIPClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["publicIPAddresses"] - - _, error := publicIPClient.Delete(resGroup, name, make(chan struct{})) - err = <-error - - return err -} - -func validatePublicIpAllocation(v interface{}, k string) (ws []string, errors []error) { - value := strings.ToLower(v.(string)) - allocations := map[string]bool{ - "static": true, - "dynamic": true, - } - - if !allocations[value] { - errors = append(errors, fmt.Errorf("Public IP Allocation can only be Static of Dynamic")) - } - return -} - -func validatePublicIpDomainNameLabel(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[a-z0-9-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only alphanumeric characters and hyphens allowed in %q: %q", - k, value)) - } - - if len(value) > 61 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 61 characters: %q", k, value)) - } - - if len(value) == 0 { - errors = append(errors, fmt.Errorf( - "%q cannot be an empty string: %q", k, value)) - } - if regexp.MustCompile(`-$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot end with a hyphen: %q", k, value)) - } - - return -} diff --git a/builtin/providers/azurerm/resource_arm_public_ip_test.go b/builtin/providers/azurerm/resource_arm_public_ip_test.go deleted file mode 100644 index 77041a332..000000000 --- a/builtin/providers/azurerm/resource_arm_public_ip_test.go +++ /dev/null @@ -1,403 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestResourceAzureRMPublicIpAllocation_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "Random", - ErrCount: 1, - }, - { - Value: "Static", - ErrCount: 0, - }, - { - Value: "Dynamic", - ErrCount: 0, - }, - { - Value: "STATIC", - ErrCount: 0, - }, - { - Value: "static", - ErrCount: 0, - }, - } - - for _, tc := range cases { - _, errors := validatePublicIpAllocation(tc.Value, "azurerm_public_ip") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM Public IP allocation to trigger a validation error") - } - } -} - -func TestResourceAzureRMPublicIpDomainNameLabel_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "tEsting123", - ErrCount: 1, - }, - { - Value: "testing123!", - ErrCount: 1, - }, - { - Value: "testing123-", - ErrCount: 1, - }, - { - Value: acctest.RandString(80), - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validatePublicIpDomainNameLabel(tc.Value, "azurerm_public_ip") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM Public IP Domain Name Label to trigger a validation error") - } - } -} - -func TestAccAzureRMPublicIpStatic_basic(t *testing.T) { - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVPublicIpStatic_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMPublicIpDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMPublicIpExists("azurerm_public_ip.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMPublicIpStatic_disappears(t *testing.T) { - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVPublicIpStatic_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMPublicIpDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMPublicIpExists("azurerm_public_ip.test"), - testCheckAzureRMPublicIpDisappears("azurerm_public_ip.test"), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAzureRMPublicIpStatic_idleTimeout(t *testing.T) { - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVPublicIpStatic_idleTimeout, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMPublicIpDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMPublicIpExists("azurerm_public_ip.test"), - resource.TestCheckResourceAttr( - "azurerm_public_ip.test", - "idle_timeout_in_minutes", - "30", - ), - ), - }, - }, - }) -} - -func TestAccAzureRMPublicIpStatic_withTags(t *testing.T) { - - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMVPublicIpStatic_withTags, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMVPublicIpStatic_withTagsUpdate, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMPublicIpDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMPublicIpExists("azurerm_public_ip.test"), - resource.TestCheckResourceAttr( - "azurerm_public_ip.test", "tags.%", "2"), - resource.TestCheckResourceAttr( - "azurerm_public_ip.test", "tags.environment", "Production"), - resource.TestCheckResourceAttr( - "azurerm_public_ip.test", "tags.cost_center", "MSFT"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMPublicIpExists("azurerm_public_ip.test"), - resource.TestCheckResourceAttr( - "azurerm_public_ip.test", "tags.%", "1"), - resource.TestCheckResourceAttr( - "azurerm_public_ip.test", "tags.environment", "staging"), - ), - }, - }, - }) -} - -func TestAccAzureRMPublicIpStatic_update(t *testing.T) { - - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMVPublicIpStatic_basic, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMVPublicIpStatic_update, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMPublicIpDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMPublicIpExists("azurerm_public_ip.test"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMPublicIpExists("azurerm_public_ip.test"), - resource.TestCheckResourceAttr( - "azurerm_public_ip.test", "domain_name_label", "mylabel01"), - ), - }, - }, - }) -} - -func TestAccAzureRMPublicIpDynamic_basic(t *testing.T) { - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVPublicIpDynamic_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMPublicIpDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMPublicIpExists("azurerm_public_ip.test"), - ), - }, - }, - }) -} - -func testCheckAzureRMPublicIpExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - availSetName := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for public ip: %s", availSetName) - } - - conn := testAccProvider.Meta().(*ArmClient).publicIPClient - - resp, err := conn.Get(resourceGroup, availSetName, "") - if err != nil { - return fmt.Errorf("Bad: Get on publicIPClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: Public IP %q (resource group: %q) does not exist", name, resourceGroup) - } - - return nil - } -} - -func testCheckAzureRMPublicIpDisappears(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - publicIpName := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for public ip: %s", publicIpName) - } - - conn := testAccProvider.Meta().(*ArmClient).publicIPClient - - _, error := conn.Delete(resourceGroup, publicIpName, make(chan struct{})) - err := <-error - if err != nil { - return fmt.Errorf("Bad: Delete on publicIPClient: %s", err) - } - - return nil - } -} - -func testCheckAzureRMPublicIpDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).publicIPClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_public_ip" { - continue - } - - name := rs.Primary.Attributes["name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.Get(resourceGroup, name, "") - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Public IP still exists:\n%#v", resp.PublicIPAddressPropertiesFormat) - } - } - - return nil -} - -var testAccAzureRMVPublicIpStatic_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_public_ip" "test" { - name = "acctestpublicip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" -} -` - -var testAccAzureRMVPublicIpStatic_update = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_public_ip" "test" { - name = "acctestpublicip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" - domain_name_label = "mylabel01" -} -` - -var testAccAzureRMVPublicIpStatic_idleTimeout = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_public_ip" "test" { - name = "acctestpublicip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" - idle_timeout_in_minutes = 30 -} -` - -var testAccAzureRMVPublicIpDynamic_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_public_ip" "test" { - name = "acctestpublicip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "dynamic" -} -` - -var testAccAzureRMVPublicIpStatic_withTags = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_public_ip" "test" { - name = "acctestpublicip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" - - tags { - environment = "Production" - cost_center = "MSFT" - } -} -` - -var testAccAzureRMVPublicIpStatic_withTagsUpdate = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_public_ip" "test" { - name = "acctestpublicip-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - public_ip_address_allocation = "static" - - tags { - environment = "staging" - } -} -` diff --git a/builtin/providers/azurerm/resource_arm_redis_cache.go b/builtin/providers/azurerm/resource_arm_redis_cache.go deleted file mode 100644 index 93505ec91..000000000 --- a/builtin/providers/azurerm/resource_arm_redis_cache.go +++ /dev/null @@ -1,460 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - - "net/http" - "strings" - "time" - - "github.com/Azure/azure-sdk-for-go/arm/redis" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/jen20/riviera/azure" -) - -func resourceArmRedisCache() *schema.Resource { - return &schema.Resource{ - Create: resourceArmRedisCacheCreate, - Read: resourceArmRedisCacheRead, - Update: resourceArmRedisCacheUpdate, - Delete: resourceArmRedisCacheDelete, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - StateFunc: azureRMNormalizeLocation, - }, - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "capacity": { - Type: schema.TypeInt, - Required: true, - }, - - "family": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateRedisFamily, - DiffSuppressFunc: ignoreCaseDiffSuppressFunc, - }, - - "sku_name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateRedisSku, - DiffSuppressFunc: ignoreCaseDiffSuppressFunc, - }, - - "shard_count": { - Type: schema.TypeInt, - Optional: true, - }, - - "enable_non_ssl_port": { - Type: schema.TypeBool, - Default: false, - Optional: true, - }, - - "redis_configuration": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "maxclients": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "maxmemory_delta": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "maxmemory_reserved": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "maxmemory_policy": { - Type: schema.TypeString, - Optional: true, - Default: "volatile-lru", - ValidateFunc: validateRedisMaxMemoryPolicy, - }, - }, - }, - }, - - "hostname": { - Type: schema.TypeString, - Computed: true, - }, - - "port": { - Type: schema.TypeInt, - Computed: true, - }, - - "ssl_port": { - Type: schema.TypeInt, - Computed: true, - }, - - "primary_access_key": { - Type: schema.TypeString, - Computed: true, - }, - - "secondary_access_key": { - Type: schema.TypeString, - Computed: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmRedisCacheCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).redisClient - log.Printf("[INFO] preparing arguments for Azure ARM Redis Cache creation.") - - name := d.Get("name").(string) - location := d.Get("location").(string) - resGroup := d.Get("resource_group_name").(string) - - enableNonSSLPort := d.Get("enable_non_ssl_port").(bool) - - capacity := int32(d.Get("capacity").(int)) - family := redis.SkuFamily(d.Get("family").(string)) - sku := redis.SkuName(d.Get("sku_name").(string)) - - tags := d.Get("tags").(map[string]interface{}) - expandedTags := expandTags(tags) - - parameters := redis.CreateParameters{ - Name: &name, - Location: &location, - CreateProperties: &redis.CreateProperties{ - EnableNonSslPort: &enableNonSSLPort, - Sku: &redis.Sku{ - Capacity: &capacity, - Family: family, - Name: sku, - }, - RedisConfiguration: expandRedisConfiguration(d), - }, - Tags: expandedTags, - } - - if v, ok := d.GetOk("shard_count"); ok { - shardCount := int32(v.(int)) - parameters.ShardCount = &shardCount - } - - _, error := client.Create(resGroup, name, parameters, make(chan struct{})) - err := <-error - if err != nil { - return err - } - - read, err := client.Get(resGroup, name) - if err != nil { - return err - } - if read.ID == nil { - return fmt.Errorf("Cannot read Redis Instance %s (resource group %s) ID", name, resGroup) - } - - log.Printf("[DEBUG] Waiting for Redis Instance (%s) to become available", d.Get("name")) - stateConf := &resource.StateChangeConf{ - Pending: []string{"Updating", "Creating"}, - Target: []string{"Succeeded"}, - Refresh: redisStateRefreshFunc(client, resGroup, name), - Timeout: 60 * time.Minute, - MinTimeout: 15 * time.Second, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for Redis Instance (%s) to become available: %s", d.Get("name"), err) - } - - d.SetId(*read.ID) - - return resourceArmRedisCacheRead(d, meta) -} - -func resourceArmRedisCacheUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).redisClient - log.Printf("[INFO] preparing arguments for Azure ARM Redis Cache update.") - - name := d.Get("name").(string) - resGroup := d.Get("resource_group_name").(string) - - enableNonSSLPort := d.Get("enable_non_ssl_port").(bool) - - capacity := int32(d.Get("capacity").(int)) - family := redis.SkuFamily(d.Get("family").(string)) - sku := redis.SkuName(d.Get("sku_name").(string)) - - tags := d.Get("tags").(map[string]interface{}) - expandedTags := expandTags(tags) - - parameters := redis.UpdateParameters{ - UpdateProperties: &redis.UpdateProperties{ - EnableNonSslPort: &enableNonSSLPort, - Sku: &redis.Sku{ - Capacity: &capacity, - Family: family, - Name: sku, - }, - Tags: expandedTags, - }, - } - - if v, ok := d.GetOk("shard_count"); ok { - if d.HasChange("shard_count") { - shardCount := int32(v.(int)) - parameters.ShardCount = &shardCount - } - } - - if d.HasChange("redis_configuration") { - redisConfiguration := expandRedisConfiguration(d) - parameters.RedisConfiguration = redisConfiguration - } - - _, err := client.Update(resGroup, name, parameters) - if err != nil { - return err - } - - read, err := client.Get(resGroup, name) - if err != nil { - return err - } - if read.ID == nil { - return fmt.Errorf("Cannot read Redis Instance %s (resource group %s) ID", name, resGroup) - } - - log.Printf("[DEBUG] Waiting for Redis Instance (%s) to become available", d.Get("name")) - stateConf := &resource.StateChangeConf{ - Pending: []string{"Updating", "Creating"}, - Target: []string{"Succeeded"}, - Refresh: redisStateRefreshFunc(client, resGroup, name), - Timeout: 60 * time.Minute, - MinTimeout: 15 * time.Second, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for Redis Instance (%s) to become available: %s", d.Get("name"), err) - } - - d.SetId(*read.ID) - - return resourceArmRedisCacheRead(d, meta) -} - -func resourceArmRedisCacheRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).redisClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["Redis"] - - resp, err := client.Get(resGroup, name) - - // covers if the resource has been deleted outside of TF, but is still in the state - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - - if err != nil { - return fmt.Errorf("Error making Read request on Azure Redis Cache %s: %s", name, err) - } - - keysResp, err := client.ListKeys(resGroup, name) - if err != nil { - return fmt.Errorf("Error making ListKeys request on Azure Redis Cache %s: %s", name, err) - } - - d.Set("name", name) - d.Set("resource_group_name", resGroup) - d.Set("location", azureRMNormalizeLocation(*resp.Location)) - d.Set("ssl_port", resp.SslPort) - d.Set("hostname", resp.HostName) - d.Set("port", resp.Port) - d.Set("enable_non_ssl_port", resp.EnableNonSslPort) - d.Set("capacity", resp.Sku.Capacity) - d.Set("family", resp.Sku.Family) - d.Set("sku_name", resp.Sku.Name) - - if resp.ShardCount != nil { - d.Set("shard_count", resp.ShardCount) - } - - redisConfiguration := flattenRedisConfiguration(resp.RedisConfiguration) - d.Set("redis_configuration", &redisConfiguration) - - d.Set("primary_access_key", keysResp.PrimaryKey) - d.Set("secondary_access_key", keysResp.SecondaryKey) - - flattenAndSetTags(d, resp.Tags) - - return nil -} - -func resourceArmRedisCacheDelete(d *schema.ResourceData, meta interface{}) error { - redisClient := meta.(*ArmClient).redisClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["Redis"] - - deleteResp, error := redisClient.Delete(resGroup, name, make(chan struct{})) - resp := <-deleteResp - err = <-error - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("Error issuing Azure ARM delete request of Redis Cache Instance '%s': %s", name, err) - } - - checkResp, _ := redisClient.Get(resGroup, name) - if checkResp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Error issuing Azure ARM delete request of Redis Cache Instance '%s': it still exists after deletion", name) - } - - return nil -} - -func redisStateRefreshFunc(client redis.GroupClient, resourceGroupName string, sgName string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - res, err := client.Get(resourceGroupName, sgName) - if err != nil { - return nil, "", fmt.Errorf("Error issuing read request in redisStateRefreshFunc to Azure ARM for Redis Cache Instance '%s' (RG: '%s'): %s", sgName, resourceGroupName, err) - } - - return res, *res.ProvisioningState, nil - } -} - -func expandRedisConfiguration(d *schema.ResourceData) *map[string]*string { - configuration := d.Get("redis_configuration").([]interface{}) - - output := make(map[string]*string) - - if configuration == nil { - return &output - } - - // TODO: can we use this to remove the below? \/ - //config := configuration[0].(map[string]interface{}) - - for _, v := range configuration { - config := v.(map[string]interface{}) - - maxClients := config["maxclients"].(string) - if maxClients != "" { - output["maxclients"] = azure.String(maxClients) - } - - maxMemoryDelta := config["maxmemory_delta"].(string) - if maxMemoryDelta != "" { - output["maxmemory-delta"] = azure.String(maxMemoryDelta) - } - - maxMemoryReserved := config["maxmemory_reserved"].(string) - if maxMemoryReserved != "" { - output["maxmemory-reserved"] = azure.String(maxMemoryReserved) - } - - maxMemoryPolicy := config["maxmemory_policy"].(string) - if maxMemoryPolicy != "" { - output["maxmemory-policy"] = azure.String(maxMemoryPolicy) - } - } - - return &output -} - -func flattenRedisConfiguration(configuration *map[string]*string) map[string]*string { - redisConfiguration := make(map[string]*string, len(*configuration)) - config := *configuration - - redisConfiguration["maxclients"] = config["maxclients"] - redisConfiguration["maxmemory_delta"] = config["maxmemory-delta"] - redisConfiguration["maxmemory_reserved"] = config["maxmemory-reserved"] - redisConfiguration["maxmemory_policy"] = config["maxmemory-policy"] - - return redisConfiguration -} - -func validateRedisFamily(v interface{}, k string) (ws []string, errors []error) { - value := strings.ToLower(v.(string)) - families := map[string]bool{ - "c": true, - "p": true, - } - - if !families[value] { - errors = append(errors, fmt.Errorf("Redis Family can only be C or P")) - } - return -} - -func validateRedisMaxMemoryPolicy(v interface{}, k string) (ws []string, errors []error) { - value := strings.ToLower(v.(string)) - families := map[string]bool{ - "noeviction": true, - "allkeys-lru": true, - "volatile-lru": true, - "allkeys-random": true, - "volatile-random": true, - "volatile-ttl": true, - } - - if !families[value] { - errors = append(errors, fmt.Errorf("Redis Max Memory Policy can only be 'noeviction' / 'allkeys-lru' / 'volatile-lru' / 'allkeys-random' / 'volatile-random' / 'volatile-ttl'")) - } - - return -} - -func validateRedisSku(v interface{}, k string) (ws []string, errors []error) { - value := strings.ToLower(v.(string)) - skus := map[string]bool{ - "basic": true, - "standard": true, - "premium": true, - } - - if !skus[value] { - errors = append(errors, fmt.Errorf("Redis SKU can only be Basic, Standard or Premium")) - } - return -} diff --git a/builtin/providers/azurerm/resource_arm_redis_cache_test.go b/builtin/providers/azurerm/resource_arm_redis_cache_test.go deleted file mode 100644 index dacafe135..000000000 --- a/builtin/providers/azurerm/resource_arm_redis_cache_test.go +++ /dev/null @@ -1,378 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMRedisCacheFamily_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "C", - ErrCount: 0, - }, - { - Value: "P", - ErrCount: 0, - }, - { - Value: "c", - ErrCount: 0, - }, - { - Value: "p", - ErrCount: 0, - }, - { - Value: "a", - ErrCount: 1, - }, - { - Value: "b", - ErrCount: 1, - }, - { - Value: "D", - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateRedisFamily(tc.Value, "azurerm_redis_cache") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM Redis Cache Family to trigger a validation error") - } - } -} - -func TestAccAzureRMRedisCacheMaxMemoryPolicy_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - {Value: "noeviction", ErrCount: 0}, - {Value: "allkeys-lru", ErrCount: 0}, - {Value: "volatile-lru", ErrCount: 0}, - {Value: "allkeys-random", ErrCount: 0}, - {Value: "volatile-random", ErrCount: 0}, - {Value: "volatile-ttl", ErrCount: 0}, - {Value: "something-else", ErrCount: 1}, - } - - for _, tc := range cases { - _, errors := validateRedisMaxMemoryPolicy(tc.Value, "azurerm_redis_cache") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM Redis Cache Max Memory Policy to trigger a validation error") - } - } -} - -func TestAccAzureRMRedisCacheSku_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "Basic", - ErrCount: 0, - }, - { - Value: "Standard", - ErrCount: 0, - }, - { - Value: "Premium", - ErrCount: 0, - }, - { - Value: "Random", - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateRedisSku(tc.Value, "azurerm_redis_cache") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM Redis Cache Sku to trigger a validation error") - } - } -} - -func TestAccAzureRMRedisCache_basic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMRedisCache_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMRedisCacheDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMRedisCacheExists("azurerm_redis_cache.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMRedisCache_standard(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMRedisCache_standard, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMRedisCacheDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMRedisCacheExists("azurerm_redis_cache.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMRedisCache_premium(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMRedisCache_premium, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMRedisCacheDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMRedisCacheExists("azurerm_redis_cache.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMRedisCache_premiumSharded(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMRedisCache_premiumSharded, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMRedisCacheDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMRedisCacheExists("azurerm_redis_cache.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMRedisCache_NonStandardCasing(t *testing.T) { - ri := acctest.RandInt() - config := testAccAzureRMRedisCacheNonStandardCasing(ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMRedisCacheDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMRedisCacheExists("azurerm_redis_cache.test"), - ), - }, - - resource.TestStep{ - Config: config, - PlanOnly: true, - ExpectNonEmptyPlan: false, - }, - }, - }) -} - -func testCheckAzureRMRedisCacheExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - redisName := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for Redis Instance: %s", redisName) - } - - conn := testAccProvider.Meta().(*ArmClient).redisClient - - resp, err := conn.Get(resourceGroup, redisName) - if err != nil { - return fmt.Errorf("Bad: Get on redisClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: Redis Instance %q (resource group: %q) does not exist", redisName, resourceGroup) - } - - return nil - } -} - -func testCheckAzureRMRedisCacheDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).redisClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_redis_cache" { - continue - } - - name := rs.Primary.Attributes["name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.Get(resourceGroup, name) - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Redis Instance still exists:\n%#v", resp) - } - } - - return nil -} - -var testAccAzureRMRedisCache_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_redis_cache" "test" { - name = "acctestRedis-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - capacity = 1 - family = "C" - sku_name = "Basic" - enable_non_ssl_port = false - - redis_configuration { - maxclients = "256" - } -} -` - -var testAccAzureRMRedisCache_standard = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_redis_cache" "test" { - name = "acctestRedis-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - capacity = 1 - family = "C" - sku_name = "Standard" - enable_non_ssl_port = false - redis_configuration { - maxclients = "256" - } - - tags { - environment = "production" - } -} -` - -var testAccAzureRMRedisCache_premium = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_redis_cache" "test" { - name = "acctestRedis-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - capacity = 1 - family = "P" - sku_name = "Premium" - enable_non_ssl_port = false - redis_configuration { - maxclients = "256", - maxmemory_reserved = "2", - maxmemory_delta = "2" - maxmemory_policy = "allkeys-lru" - } -} -` - -var testAccAzureRMRedisCache_premiumSharded = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_redis_cache" "test" { - name = "acctestRedis-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - capacity = 1 - family = "P" - sku_name = "Premium" - enable_non_ssl_port = true - shard_count = 3 - redis_configuration { - maxclients = "256", - maxmemory_reserved = "2", - maxmemory_delta = "2" - maxmemory_policy = "allkeys-lru" - } -} -` - -func testAccAzureRMRedisCacheNonStandardCasing(ri int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_redis_cache" "test" { - name = "acctestRedis-%d" - location = "${azurerm_resource_group.test.location}" - resource_group_name = "${azurerm_resource_group.test.name}" - capacity = 1 - family = "c" - sku_name = "basic" - enable_non_ssl_port = false - redis_configuration { - maxclients = "256" - } -} -`, ri, ri) -} diff --git a/builtin/providers/azurerm/resource_arm_resource_group.go b/builtin/providers/azurerm/resource_arm_resource_group.go deleted file mode 100644 index 7d71f7607..000000000 --- a/builtin/providers/azurerm/resource_arm_resource_group.go +++ /dev/null @@ -1,183 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "regexp" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/jen20/riviera/azure" -) - -func resourceArmResourceGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceArmResourceGroupCreate, - Read: resourceArmResourceGroupRead, - Update: resourceArmResourceGroupUpdate, - Exists: resourceArmResourceGroupExists, - Delete: resourceArmResourceGroupDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateArmResourceGroupName, - }, - - "location": locationSchema(), - - "tags": tagsSchema(), - }, - } -} - -func validateArmResourceGroupName(v interface{}, k string) (ws []string, es []error) { - value := v.(string) - - if len(value) > 80 { - es = append(es, fmt.Errorf("%q may not exceed 80 characters in length", k)) - } - - if strings.HasSuffix(value, ".") { - es = append(es, fmt.Errorf("%q may not end with a period", k)) - } - - if matched := regexp.MustCompile(`[\(\)\.a-zA-Z0-9_-]`).Match([]byte(value)); !matched { - es = append(es, fmt.Errorf("%q may only contain alphanumeric characters, dash, underscores, parentheses and periods", k)) - } - - return -} - -func resourceArmResourceGroupUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - if !d.HasChange("tags") { - return nil - } - - name := d.Get("name").(string) - newTags := d.Get("tags").(map[string]interface{}) - - updateRequest := rivieraClient.NewRequestForURI(d.Id()) - updateRequest.Command = &azure.UpdateResourceGroup{ - Name: name, - Tags: *expandTags(newTags), - } - - updateResponse, err := updateRequest.Execute() - if err != nil { - return fmt.Errorf("Error updating resource group: %s", err) - } - if !updateResponse.IsSuccessful() { - return fmt.Errorf("Error updating resource group: %s", updateResponse.Error) - } - - return resourceArmResourceGroupRead(d, meta) -} - -func resourceArmResourceGroupCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - createRequest := rivieraClient.NewRequest() - createRequest.Command = &azure.CreateResourceGroup{ - Name: d.Get("name").(string), - Location: d.Get("location").(string), - Tags: *expandTags(d.Get("tags").(map[string]interface{})), - } - - createResponse, err := createRequest.Execute() - if err != nil { - return fmt.Errorf("Error creating resource group: %s", err) - } - if !createResponse.IsSuccessful() { - return fmt.Errorf("Error creating resource group: %s", createResponse.Error) - } - - resp := createResponse.Parsed.(*azure.CreateResourceGroupResponse) - d.SetId(*resp.ID) - - // TODO(jen20): Decide whether we need this or not and migrate to use @stack72's work if so - // log.Printf("[DEBUG] Waiting for Resource Group (%s) to become available", name) - // stateConf := &resource.StateChangeConf{ - // Pending: []string{"Accepted"}, - // Target: []string{"Succeeded"}, - // Refresh: resourceGroupStateRefreshFunc(client, name), - // Timeout: 10 * time.Minute, - // } - // if _, err := stateConf.WaitForState(); err != nil { - // return fmt.Errorf("Error waiting for Resource Group (%s) to become available: %s", name, err) - // } - - return resourceArmResourceGroupRead(d, meta) -} - -func resourceArmResourceGroupRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - readRequest := rivieraClient.NewRequestForURI(d.Id()) - readRequest.Command = &azure.GetResourceGroup{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading resource group: %s", err) - } - if !readResponse.IsSuccessful() { - log.Printf("[INFO] Error reading resource group %q - removing from state", d.Id()) - d.SetId("") - return fmt.Errorf("Error reading resource group: %s", readResponse.Error) - } - - resp := readResponse.Parsed.(*azure.GetResourceGroupResponse) - - d.Set("name", resp.Name) - d.Set("location", resp.Location) - flattenAndSetTags(d, resp.Tags) - - return nil -} - -func resourceArmResourceGroupExists(d *schema.ResourceData, meta interface{}) (bool, error) { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - readRequest := rivieraClient.NewRequestForURI(d.Id()) - readRequest.Command = &azure.GetResourceGroup{} - - readResponse, err := readRequest.Execute() - if err != nil { - return false, fmt.Errorf("Error reading resource group: %s", err) - } - if readResponse.IsSuccessful() { - return true, nil - } - - return false, nil -} - -func resourceArmResourceGroupDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - deleteRequest := rivieraClient.NewRequestForURI(d.Id()) - deleteRequest.Command = &azure.DeleteResourceGroup{} - - deleteResponse, err := deleteRequest.Execute() - if err != nil { - return fmt.Errorf("Error deleting resource group: %s", err) - } - if !deleteResponse.IsSuccessful() { - return fmt.Errorf("Error deleting resource group: %s", deleteResponse.Error) - } - - return nil - -} diff --git a/builtin/providers/azurerm/resource_arm_resource_group_test.go b/builtin/providers/azurerm/resource_arm_resource_group_test.go deleted file mode 100644 index bf091a938..000000000 --- a/builtin/providers/azurerm/resource_arm_resource_group_test.go +++ /dev/null @@ -1,190 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMResourceGroup_basic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMResourceGroup_basic, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMResourceGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMResourceGroupExists("azurerm_resource_group.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMResourceGroup_disappears(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMResourceGroup_basic, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMResourceGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMResourceGroupExists("azurerm_resource_group.test"), - testCheckAzureRMResourceGroupDisappears("azurerm_resource_group.test"), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAzureRMResourceGroup_withTags(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMResourceGroup_withTags, ri) - postConfig := fmt.Sprintf(testAccAzureRMResourceGroup_withTagsUpdated, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMResourceGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMResourceGroupExists("azurerm_resource_group.test"), - resource.TestCheckResourceAttr( - "azurerm_resource_group.test", "tags.%", "2"), - resource.TestCheckResourceAttr( - "azurerm_resource_group.test", "tags.environment", "Production"), - resource.TestCheckResourceAttr( - "azurerm_resource_group.test", "tags.cost_center", "MSFT"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMResourceGroupExists("azurerm_resource_group.test"), - resource.TestCheckResourceAttr( - "azurerm_resource_group.test", "tags.%", "1"), - resource.TestCheckResourceAttr( - "azurerm_resource_group.test", "tags.environment", "staging"), - ), - }, - }, - }) -} - -func testCheckAzureRMResourceGroupExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - resourceGroup := rs.Primary.Attributes["name"] - - // Ensure resource group exists in API - conn := testAccProvider.Meta().(*ArmClient).resourceGroupClient - - resp, err := conn.Get(resourceGroup) - if err != nil { - return fmt.Errorf("Bad: Get on resourceGroupClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: Virtual Network %q (resource group: %q) does not exist", name, resourceGroup) - } - - return nil - } -} - -func testCheckAzureRMResourceGroupDisappears(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - resourceGroup := rs.Primary.Attributes["name"] - - // Ensure resource group exists in API - conn := testAccProvider.Meta().(*ArmClient).resourceGroupClient - - _, error := conn.Delete(resourceGroup, make(chan struct{})) - err := <-error - if err != nil { - return fmt.Errorf("Bad: Delete on resourceGroupClient: %s", err) - } - - return nil - } -} - -func testCheckAzureRMResourceGroupDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).resourceGroupClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_resource_group" { - continue - } - - resourceGroup := rs.Primary.ID - - resp, err := conn.Get(resourceGroup) - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Resource Group still exists:\n%#v", resp.Properties) - } - } - - return nil -} - -var testAccAzureRMResourceGroup_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -` - -var testAccAzureRMResourceGroup_withTags = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" - - tags { - environment = "Production" - cost_center = "MSFT" - } -} -` - -var testAccAzureRMResourceGroup_withTagsUpdated = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" - - tags { - environment = "staging" - } -} -` diff --git a/builtin/providers/azurerm/resource_arm_route.go b/builtin/providers/azurerm/resource_arm_route.go deleted file mode 100644 index 0f0899ac1..000000000 --- a/builtin/providers/azurerm/resource_arm_route.go +++ /dev/null @@ -1,163 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "strings" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceArmRoute() *schema.Resource { - return &schema.Resource{ - Create: resourceArmRouteCreate, - Read: resourceArmRouteRead, - Update: resourceArmRouteCreate, - Delete: resourceArmRouteDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "route_table_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "address_prefix": { - Type: schema.TypeString, - Required: true, - }, - - "next_hop_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateRouteTableNextHopType, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - return strings.ToLower(old) == strings.ToLower(new) - }, - }, - - "next_hop_in_ip_address": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - } -} - -func resourceArmRouteCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - routesClient := client.routesClient - - name := d.Get("name").(string) - rtName := d.Get("route_table_name").(string) - resGroup := d.Get("resource_group_name").(string) - - addressPrefix := d.Get("address_prefix").(string) - nextHopType := d.Get("next_hop_type").(string) - - armMutexKV.Lock(rtName) - defer armMutexKV.Unlock(rtName) - - properties := network.RoutePropertiesFormat{ - AddressPrefix: &addressPrefix, - NextHopType: network.RouteNextHopType(nextHopType), - } - - if v, ok := d.GetOk("next_hop_in_ip_address"); ok { - nextHopInIpAddress := v.(string) - properties.NextHopIPAddress = &nextHopInIpAddress - } - - route := network.Route{ - Name: &name, - RoutePropertiesFormat: &properties, - } - - _, error := routesClient.CreateOrUpdate(resGroup, rtName, name, route, make(chan struct{})) - err := <-error - if err != nil { - return err - } - - read, err := routesClient.Get(resGroup, rtName, name) - if err != nil { - return err - } - if read.ID == nil { - return fmt.Errorf("Cannot read Route %s/%s (resource group %s) ID", rtName, name, resGroup) - } - d.SetId(*read.ID) - - return resourceArmRouteRead(d, meta) -} - -func resourceArmRouteRead(d *schema.ResourceData, meta interface{}) error { - routesClient := meta.(*ArmClient).routesClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - rtName := id.Path["routeTables"] - routeName := id.Path["routes"] - - resp, err := routesClient.Get(resGroup, rtName, routeName) - if err != nil { - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - return fmt.Errorf("Error making Read request on Azure Route %s: %s", routeName, err) - } - - d.Set("name", routeName) - d.Set("resource_group_name", resGroup) - d.Set("route_table_name", rtName) - d.Set("address_prefix", resp.RoutePropertiesFormat.AddressPrefix) - d.Set("next_hop_type", string(resp.RoutePropertiesFormat.NextHopType)) - - if resp.RoutePropertiesFormat.NextHopIPAddress != nil { - d.Set("next_hop_in_ip_address", resp.RoutePropertiesFormat.NextHopIPAddress) - } - - return nil -} - -func resourceArmRouteDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - routesClient := client.routesClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - rtName := id.Path["routeTables"] - routeName := id.Path["routes"] - - armMutexKV.Lock(rtName) - defer armMutexKV.Unlock(rtName) - - _, error := routesClient.Delete(resGroup, rtName, routeName, make(chan struct{})) - err = <-error - - return err -} diff --git a/builtin/providers/azurerm/resource_arm_route_table.go b/builtin/providers/azurerm/resource_arm_route_table.go deleted file mode 100644 index bca0c29dd..000000000 --- a/builtin/providers/azurerm/resource_arm_route_table.go +++ /dev/null @@ -1,262 +0,0 @@ -package azurerm - -import ( - "bytes" - "fmt" - "log" - "net/http" - "strings" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceArmRouteTable() *schema.Resource { - return &schema.Resource{ - Create: resourceArmRouteTableCreate, - Read: resourceArmRouteTableRead, - Update: resourceArmRouteTableCreate, - Delete: resourceArmRouteTableDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "route": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - - "address_prefix": { - Type: schema.TypeString, - Required: true, - }, - - "next_hop_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateRouteTableNextHopType, - }, - - "next_hop_in_ip_address": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - }, - Set: resourceArmRouteTableRouteHash, - }, - - "subnets": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmRouteTableCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - routeTablesClient := client.routeTablesClient - - log.Printf("[INFO] preparing arguments for Azure ARM Route Table creation.") - - name := d.Get("name").(string) - location := d.Get("location").(string) - resGroup := d.Get("resource_group_name").(string) - tags := d.Get("tags").(map[string]interface{}) - - routeSet := network.RouteTable{ - Name: &name, - Location: &location, - Tags: expandTags(tags), - } - - if _, ok := d.GetOk("route"); ok { - routes, routeErr := expandAzureRmRouteTableRoutes(d) - if routeErr != nil { - return fmt.Errorf("Error Building list of Route Table Routes: %s", routeErr) - } - - if len(routes) > 0 { - routeSet.RouteTablePropertiesFormat = &network.RouteTablePropertiesFormat{ - Routes: &routes, - } - } - } - - _, error := routeTablesClient.CreateOrUpdate(resGroup, name, routeSet, make(chan struct{})) - err := <-error - if err != nil { - return err - } - - read, err := routeTablesClient.Get(resGroup, name, "") - if err != nil { - return err - } - if read.ID == nil { - return fmt.Errorf("Cannot read Route Table %s (resource group %s) ID", name, resGroup) - } - - d.SetId(*read.ID) - - return resourceArmRouteTableRead(d, meta) -} - -func resourceArmRouteTableRead(d *schema.ResourceData, meta interface{}) error { - routeTablesClient := meta.(*ArmClient).routeTablesClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["routeTables"] - - resp, err := routeTablesClient.Get(resGroup, name, "") - if err != nil { - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - return fmt.Errorf("Error making Read request on Azure Route Table %s: %s", name, err) - } - - d.Set("name", name) - d.Set("resource_group_name", resGroup) - d.Set("location", resp.Location) - - if resp.RouteTablePropertiesFormat.Routes != nil { - d.Set("route", schema.NewSet(resourceArmRouteTableRouteHash, flattenAzureRmRouteTableRoutes(resp.RouteTablePropertiesFormat.Routes))) - } - - subnets := []string{} - if resp.RouteTablePropertiesFormat.Subnets != nil { - for _, subnet := range *resp.RouteTablePropertiesFormat.Subnets { - id := subnet.ID - subnets = append(subnets, *id) - } - } - d.Set("subnets", subnets) - - flattenAndSetTags(d, resp.Tags) - - return nil -} - -func resourceArmRouteTableDelete(d *schema.ResourceData, meta interface{}) error { - routeTablesClient := meta.(*ArmClient).routeTablesClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["routeTables"] - - _, error := routeTablesClient.Delete(resGroup, name, make(chan struct{})) - err = <-error - - return err -} - -func expandAzureRmRouteTableRoutes(d *schema.ResourceData) ([]network.Route, error) { - configs := d.Get("route").(*schema.Set).List() - routes := make([]network.Route, 0, len(configs)) - - for _, configRaw := range configs { - data := configRaw.(map[string]interface{}) - - address_prefix := data["address_prefix"].(string) - next_hop_type := data["next_hop_type"].(string) - - properties := network.RoutePropertiesFormat{ - AddressPrefix: &address_prefix, - NextHopType: network.RouteNextHopType(next_hop_type), - } - - if v := data["next_hop_in_ip_address"].(string); v != "" { - properties.NextHopIPAddress = &v - } - - name := data["name"].(string) - route := network.Route{ - Name: &name, - RoutePropertiesFormat: &properties, - } - - routes = append(routes, route) - } - - return routes, nil -} - -func flattenAzureRmRouteTableRoutes(routes *[]network.Route) []interface{} { - results := make([]interface{}, 0, len(*routes)) - - for _, route := range *routes { - r := make(map[string]interface{}) - r["name"] = *route.Name - r["address_prefix"] = *route.RoutePropertiesFormat.AddressPrefix - r["next_hop_type"] = string(route.RoutePropertiesFormat.NextHopType) - if route.RoutePropertiesFormat.NextHopIPAddress != nil { - r["next_hop_in_ip_address"] = *route.RoutePropertiesFormat.NextHopIPAddress - } - results = append(results, r) - } - - return results -} - -func resourceArmRouteTableRouteHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["address_prefix"].(string))) - buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["next_hop_type"].(string)))) - - return hashcode.String(buf.String()) -} - -func validateRouteTableNextHopType(v interface{}, k string) (ws []string, errors []error) { - value := strings.ToLower(v.(string)) - hopTypes := map[string]bool{ - "virtualnetworkgateway": true, - "vnetlocal": true, - "internet": true, - "virtualappliance": true, - "none": true, - } - - if !hopTypes[value] { - errors = append(errors, fmt.Errorf("Route Table NextHopType Protocol can only be VirtualNetworkGateway, VnetLocal, Internet or VirtualAppliance")) - } - return -} diff --git a/builtin/providers/azurerm/resource_arm_route_table_test.go b/builtin/providers/azurerm/resource_arm_route_table_test.go deleted file mode 100644 index 940d259ed..000000000 --- a/builtin/providers/azurerm/resource_arm_route_table_test.go +++ /dev/null @@ -1,342 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestResourceAzureRMRouteTableNextHopType_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "Random", - ErrCount: 1, - }, - { - Value: "VirtualNetworkGateway", - ErrCount: 0, - }, - { - Value: "VNETLocal", - ErrCount: 0, - }, - { - Value: "Internet", - ErrCount: 0, - }, - { - Value: "VirtualAppliance", - ErrCount: 0, - }, - { - Value: "None", - ErrCount: 0, - }, - { - Value: "VIRTUALNETWORKGATEWAY", - ErrCount: 0, - }, - { - Value: "virtualnetworkgateway", - ErrCount: 0, - }, - } - - for _, tc := range cases { - _, errors := validateRouteTableNextHopType(tc.Value, "azurerm_route_table") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM Route Table nextHopType to trigger a validation error") - } - } -} - -func TestAccAzureRMRouteTable_basic(t *testing.T) { - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMRouteTable_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMRouteTableDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMRouteTableExists("azurerm_route_table.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMRouteTable_disappears(t *testing.T) { - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMRouteTable_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMRouteTableDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMRouteTableExists("azurerm_route_table.test"), - testCheckAzureRMRouteTableDisappears("azurerm_route_table.test"), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAzureRMRouteTable_withTags(t *testing.T) { - - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMRouteTable_withTags, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMRouteTable_withTagsUpdate, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMRouteTableDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMRouteTableExists("azurerm_route_table.test"), - resource.TestCheckResourceAttr( - "azurerm_route_table.test", "tags.%", "2"), - resource.TestCheckResourceAttr( - "azurerm_route_table.test", "tags.environment", "Production"), - resource.TestCheckResourceAttr( - "azurerm_route_table.test", "tags.cost_center", "MSFT"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMRouteTableExists("azurerm_route_table.test"), - resource.TestCheckResourceAttr( - "azurerm_route_table.test", "tags.%", "1"), - resource.TestCheckResourceAttr( - "azurerm_route_table.test", "tags.environment", "staging"), - ), - }, - }, - }) -} - -func TestAccAzureRMRouteTable_multipleRoutes(t *testing.T) { - - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMRouteTable_basic, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMRouteTable_multipleRoutes, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMRouteTableDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMRouteTableExists("azurerm_route_table.test"), - resource.TestCheckResourceAttr( - "azurerm_route_table.test", "route.#", "1"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMRouteTableExists("azurerm_route_table.test"), - resource.TestCheckResourceAttr( - "azurerm_route_table.test", "route.#", "2"), - ), - }, - }, - }) -} - -func testCheckAzureRMRouteTableExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for route table: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).routeTablesClient - - resp, err := conn.Get(resourceGroup, name, "") - if err != nil { - return fmt.Errorf("Bad: Get on routeTablesClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: Route Table %q (resource group: %q) does not exist", name, resourceGroup) - } - - return nil - } -} - -func testCheckAzureRMRouteTableDisappears(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for route table: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).routeTablesClient - - _, error := conn.Delete(resourceGroup, name, make(chan struct{})) - err := <-error - if err != nil { - return fmt.Errorf("Bad: Delete on routeTablesClient: %s", err) - } - - return nil - } -} - -func testCheckAzureRMRouteTableDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).routeTablesClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_route_table" { - continue - } - - name := rs.Primary.Attributes["name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.Get(resourceGroup, name, "") - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Route Table still exists:\n%#v", resp.RouteTablePropertiesFormat) - } - } - - return nil -} - -var testAccAzureRMRouteTable_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_route_table" "test" { - name = "acctestrt%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - route { - name = "route1" - address_prefix = "10.1.0.0/16" - next_hop_type = "vnetlocal" - } -} -` - -var testAccAzureRMRouteTable_multipleRoutes = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_route_table" "test" { - name = "acctestrt%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - route { - name = "route1" - address_prefix = "10.1.0.0/16" - next_hop_type = "vnetlocal" - } - - route { - name = "route2" - address_prefix = "10.2.0.0/16" - next_hop_type = "vnetlocal" - } -} -` - -var testAccAzureRMRouteTable_withTags = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_route_table" "test" { - name = "acctestrt%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - route { - name = "route1" - address_prefix = "10.1.0.0/16" - next_hop_type = "vnetlocal" - } - - tags { - environment = "Production" - cost_center = "MSFT" - } -} -` - -var testAccAzureRMRouteTable_withTagsUpdate = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_route_table" "test" { - name = "acctestrt%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - route { - name = "route1" - address_prefix = "10.1.0.0/16" - next_hop_type = "vnetlocal" - } - - tags { - environment = "staging" - } -} -` diff --git a/builtin/providers/azurerm/resource_arm_route_test.go b/builtin/providers/azurerm/resource_arm_route_test.go deleted file mode 100644 index 68decbe1d..000000000 --- a/builtin/providers/azurerm/resource_arm_route_test.go +++ /dev/null @@ -1,208 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMRoute_basic(t *testing.T) { - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMRoute_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMRouteDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMRouteExists("azurerm_route.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMRoute_disappears(t *testing.T) { - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMRoute_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMRouteDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMRouteExists("azurerm_route.test"), - testCheckAzureRMRouteDisappears("azurerm_route.test"), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAzureRMRoute_multipleRoutes(t *testing.T) { - - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMRoute_basic, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMRoute_multipleRoutes, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMRouteDestroy, - Steps: []resource.TestStep{ - { - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMRouteExists("azurerm_route.test"), - ), - }, - - { - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMRouteExists("azurerm_route.test1"), - ), - }, - }, - }) -} - -func testCheckAzureRMRouteExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - rtName := rs.Primary.Attributes["route_table_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for route: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).routesClient - - resp, err := conn.Get(resourceGroup, rtName, name) - if err != nil { - return fmt.Errorf("Bad: Get on routesClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: Route %q (resource group: %q) does not exist", name, resourceGroup) - } - - return nil - } -} - -func testCheckAzureRMRouteDisappears(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - rtName := rs.Primary.Attributes["route_table_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for route: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).routesClient - - _, error := conn.Delete(resourceGroup, rtName, name, make(chan struct{})) - err := <-error - if err != nil { - return fmt.Errorf("Bad: Delete on routesClient: %s", err) - } - - return nil - } -} - -func testCheckAzureRMRouteDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).routesClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_route" { - continue - } - - name := rs.Primary.Attributes["name"] - rtName := rs.Primary.Attributes["route_table_name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.Get(resourceGroup, rtName, name) - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Route still exists:\n%#v", resp.RoutePropertiesFormat) - } - } - - return nil -} - -var testAccAzureRMRoute_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_route_table" "test" { - name = "acctestrt%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_route" "test" { - name = "acctestroute%d" - resource_group_name = "${azurerm_resource_group.test.name}" - route_table_name = "${azurerm_route_table.test.name}" - - address_prefix = "10.1.0.0/16" - next_hop_type = "vnetlocal" -} -` - -var testAccAzureRMRoute_multipleRoutes = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_route_table" "test" { - name = "acctestrt%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_route" "test1" { - name = "acctestroute%d" - resource_group_name = "${azurerm_resource_group.test.name}" - route_table_name = "${azurerm_route_table.test.name}" - - address_prefix = "10.2.0.0/16" - next_hop_type = "none" -} -` diff --git a/builtin/providers/azurerm/resource_arm_search_service.go b/builtin/providers/azurerm/resource_arm_search_service.go deleted file mode 100644 index 8f451be14..000000000 --- a/builtin/providers/azurerm/resource_arm_search_service.go +++ /dev/null @@ -1,173 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/jen20/riviera/search" -) - -func resourceArmSearchService() *schema.Resource { - return &schema.Resource{ - Create: resourceArmSearchServiceCreate, - Read: resourceArmSearchServiceRead, - Update: resourceArmSearchServiceCreate, - Delete: resourceArmSearchServiceDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "resource_group_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "sku": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "replica_count": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "partition_count": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmSearchServiceCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - tags := d.Get("tags").(map[string]interface{}) - expandedTags := expandTags(tags) - - command := &search.CreateOrUpdateSearchService{ - Name: d.Get("name").(string), - Location: d.Get("location").(string), - ResourceGroupName: d.Get("resource_group_name").(string), - Tags: *expandedTags, - Sku: search.Sku{ - Name: d.Get("sku").(string), - }, - } - - if v, ok := d.GetOk("replica_count"); ok { - replica_count := v.(int) - command.ReplicaCount = &replica_count - } - - if v, ok := d.GetOk("partition_count"); ok { - partition_count := v.(int) - command.PartitionCount = &partition_count - } - - createRequest := rivieraClient.NewRequest() - createRequest.Command = command - - createResponse, err := createRequest.Execute() - if err != nil { - return fmt.Errorf("Error creating Search Service: %s", err) - } - if !createResponse.IsSuccessful() { - return fmt.Errorf("Error creating Search Service: %s", createResponse.Error) - } - - getSearchServiceCommand := &search.GetSearchService{ - Name: d.Get("name").(string), - ResourceGroupName: d.Get("resource_group_name").(string), - } - - readRequest := rivieraClient.NewRequest() - readRequest.Command = getSearchServiceCommand - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading Search Service: %s", err) - } - if !readResponse.IsSuccessful() { - return fmt.Errorf("Error reading Search Service: %s", readResponse.Error) - } - resp := readResponse.Parsed.(*search.GetSearchServiceResponse) - - log.Printf("[DEBUG] Waiting for Search Service (%s) to become available", d.Get("name")) - stateConf := &resource.StateChangeConf{ - Pending: []string{"provisioning"}, - Target: []string{"succeeded"}, - Refresh: azureStateRefreshFunc(*resp.ID, client, getSearchServiceCommand), - Timeout: 30 * time.Minute, - MinTimeout: 15 * time.Second, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for Search Service (%s) to become available: %s", d.Get("name"), err) - } - - d.SetId(*resp.ID) - - return resourceArmSearchServiceRead(d, meta) -} - -func resourceArmSearchServiceRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - readRequest := rivieraClient.NewRequestForURI(d.Id()) - readRequest.Command = &search.GetSearchService{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading Search Service: %s", err) - } - if !readResponse.IsSuccessful() { - log.Printf("[INFO] Error reading Search Service %q - removing from state", d.Id()) - d.SetId("") - return fmt.Errorf("Error reading Search Service: %s", readResponse.Error) - } - - resp := readResponse.Parsed.(*search.GetSearchServiceResponse) - d.Set("sku", resp.Sku) - if resp.PartitionCount != nil { - d.Set("partition_count", resp.PartitionCount) - } - if resp.ReplicaCount != nil { - d.Set("replica_count", resp.ReplicaCount) - } - return nil -} - -func resourceArmSearchServiceDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - deleteRequest := rivieraClient.NewRequestForURI(d.Id()) - deleteRequest.Command = &search.DeleteSearchService{} - - deleteResponse, err := deleteRequest.Execute() - if err != nil { - return fmt.Errorf("Error deleting Search Service: %s", err) - } - if !deleteResponse.IsSuccessful() { - return fmt.Errorf("Error deleting Search Service: %s", deleteResponse.Error) - } - - return nil -} diff --git a/builtin/providers/azurerm/resource_arm_search_service_test.go b/builtin/providers/azurerm/resource_arm_search_service_test.go deleted file mode 100644 index 133b90396..000000000 --- a/builtin/providers/azurerm/resource_arm_search_service_test.go +++ /dev/null @@ -1,152 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/jen20/riviera/search" -) - -func TestAccAzureRMSearchService_basic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMSearchService_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMSearchServiceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMSearchServiceExists("azurerm_search_service.test"), - resource.TestCheckResourceAttr( - "azurerm_search_service.test", "tags.%", "2"), - ), - }, - }, - }) -} - -func TestAccAzureRMSearchService_updateReplicaCountAndTags(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMSearchService_basic, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMSearchService_updated, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMSearchServiceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMSearchServiceExists("azurerm_search_service.test"), - resource.TestCheckResourceAttr( - "azurerm_search_service.test", "tags.%", "2"), - resource.TestCheckResourceAttr( - "azurerm_search_service.test", "replica_count", "1"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMSearchServiceExists("azurerm_search_service.test"), - resource.TestCheckResourceAttr( - "azurerm_search_service.test", "tags.%", "1"), - resource.TestCheckResourceAttr( - "azurerm_search_service.test", "replica_count", "2"), - ), - }, - }, - }) -} - -func testCheckAzureRMSearchServiceExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).rivieraClient - - readRequest := conn.NewRequestForURI(rs.Primary.ID) - readRequest.Command = &search.GetSearchService{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Bad: GetSearchService: %s", err) - } - if !readResponse.IsSuccessful() { - return fmt.Errorf("Bad: GetSearchService: %s", readResponse.Error) - } - - return nil - } -} - -func testCheckAzureRMSearchServiceDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).rivieraClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_search_service" { - continue - } - - readRequest := conn.NewRequestForURI(rs.Primary.ID) - readRequest.Command = &search.GetSearchService{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Bad: GetSearchService: %s", err) - } - - if readResponse.IsSuccessful() { - return fmt.Errorf("Bad: Search Service still exists: %s", readResponse.Error) - } - } - - return nil -} - -var testAccAzureRMSearchService_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_search_service" "test" { - name = "acctestsearchservice%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US" - sku = "standard" - - tags { - environment = "staging" - database = "test" - } -} -` - -var testAccAzureRMSearchService_updated = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_search_service" "test" { - name = "acctestsearchservice%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US" - sku = "standard" - replica_count = 2 - - tags { - environment = "production" - } -} -` diff --git a/builtin/providers/azurerm/resource_arm_servicebus_namespace.go b/builtin/providers/azurerm/resource_arm_servicebus_namespace.go deleted file mode 100644 index d9c93cf48..000000000 --- a/builtin/providers/azurerm/resource_arm_servicebus_namespace.go +++ /dev/null @@ -1,212 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "net/http" - "strings" - - "github.com/Azure/azure-sdk-for-go/arm/servicebus" - "github.com/hashicorp/terraform/helper/schema" -) - -// Default Authorization Rule/Policy created by Azure, used to populate the -// default connection strings and keys -var serviceBusNamespaceDefaultAuthorizationRule = "RootManageSharedAccessKey" - -func resourceArmServiceBusNamespace() *schema.Resource { - return &schema.Resource{ - Create: resourceArmServiceBusNamespaceCreate, - Read: resourceArmServiceBusNamespaceRead, - Update: resourceArmServiceBusNamespaceCreate, - Delete: resourceArmServiceBusNamespaceDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "sku": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateServiceBusNamespaceSku, - DiffSuppressFunc: ignoreCaseDiffSuppressFunc, - }, - - "capacity": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Default: 1, - ValidateFunc: validateServiceBusNamespaceCapacity, - }, - - "default_primary_connection_string": { - Type: schema.TypeString, - Computed: true, - }, - - "default_secondary_connection_string": { - Type: schema.TypeString, - Computed: true, - }, - - "default_primary_key": { - Type: schema.TypeString, - Computed: true, - }, - - "default_secondary_key": { - Type: schema.TypeString, - Computed: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmServiceBusNamespaceCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - namespaceClient := client.serviceBusNamespacesClient - log.Printf("[INFO] preparing arguments for Azure ARM ServiceBus Namespace creation.") - - name := d.Get("name").(string) - location := d.Get("location").(string) - resGroup := d.Get("resource_group_name").(string) - sku := d.Get("sku").(string) - capacity := int32(d.Get("capacity").(int)) - tags := d.Get("tags").(map[string]interface{}) - - parameters := servicebus.NamespaceCreateOrUpdateParameters{ - Location: &location, - Sku: &servicebus.Sku{ - Name: servicebus.SkuName(sku), - Tier: servicebus.SkuTier(sku), - Capacity: &capacity, - }, - Tags: expandTags(tags), - } - - _, error := namespaceClient.CreateOrUpdate(resGroup, name, parameters, make(chan struct{})) - err := <-error - if err != nil { - return err - } - - read, err := namespaceClient.Get(resGroup, name) - if err != nil { - return err - } - - if read.ID == nil { - return fmt.Errorf("Cannot read ServiceBus Namespace %s (resource group %s) ID", name, resGroup) - } - - d.SetId(*read.ID) - - return resourceArmServiceBusNamespaceRead(d, meta) -} - -func resourceArmServiceBusNamespaceRead(d *schema.ResourceData, meta interface{}) error { - namespaceClient := meta.(*ArmClient).serviceBusNamespacesClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["namespaces"] - - resp, err := namespaceClient.Get(resGroup, name) - if err != nil { - return fmt.Errorf("Error making Read request on Azure ServiceBus Namespace %s: %+v", name, err) - } - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - - d.Set("name", resp.Name) - d.Set("resource_group_name", resGroup) - d.Set("location", azureRMNormalizeLocation(*resp.Location)) - d.Set("sku", strings.ToLower(string(resp.Sku.Name))) - d.Set("capacity", resp.Sku.Capacity) - - keys, err := namespaceClient.ListKeys(resGroup, name, serviceBusNamespaceDefaultAuthorizationRule) - if err != nil { - log.Printf("[ERROR] Unable to List default keys for Namespace %s: %+v", name, err) - } else { - d.Set("default_primary_connection_string", keys.PrimaryConnectionString) - d.Set("default_secondary_connection_string", keys.SecondaryConnectionString) - d.Set("default_primary_key", keys.PrimaryKey) - d.Set("default_secondary_key", keys.SecondaryKey) - } - - flattenAndSetTags(d, resp.Tags) - - return nil -} - -func resourceArmServiceBusNamespaceDelete(d *schema.ResourceData, meta interface{}) error { - namespaceClient := meta.(*ArmClient).serviceBusNamespacesClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["namespaces"] - - deleteResp, error := namespaceClient.Delete(resGroup, name, make(chan struct{})) - resp := <-deleteResp - err = <-error - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Error issuing Azure ARM delete request of ServiceBus Namespace'%s': %+v", name, err) - } - - return nil -} - -func validateServiceBusNamespaceSku(v interface{}, k string) (ws []string, errors []error) { - value := strings.ToLower(v.(string)) - skus := map[string]bool{ - "basic": true, - "standard": true, - "premium": true, - } - - if !skus[value] { - errors = append(errors, fmt.Errorf("ServiceBus Namespace SKU can only be Basic, Standard or Premium")) - } - return -} - -func validateServiceBusNamespaceCapacity(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - capacities := map[int]bool{ - 1: true, - 2: true, - 4: true, - } - - if !capacities[value] { - errors = append(errors, fmt.Errorf("ServiceBus Namespace Capacity can only be 1, 2 or 4")) - } - return -} diff --git a/builtin/providers/azurerm/resource_arm_servicebus_namespace_test.go b/builtin/providers/azurerm/resource_arm_servicebus_namespace_test.go deleted file mode 100644 index 0e0942e5a..000000000 --- a/builtin/providers/azurerm/resource_arm_servicebus_namespace_test.go +++ /dev/null @@ -1,230 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMServiceBusNamespaceCapacity_validation(t *testing.T) { - cases := []struct { - Value int - ErrCount int - }{ - { - Value: 17, - ErrCount: 1, - }, - { - Value: 1, - ErrCount: 0, - }, - { - Value: 2, - ErrCount: 0, - }, - { - Value: 4, - ErrCount: 0, - }, - } - - for _, tc := range cases { - _, errors := validateServiceBusNamespaceCapacity(tc.Value, "azurerm_servicebus_namespace") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM ServiceBus Namespace Capacity to trigger a validation error") - } - } -} - -func TestAccAzureRMServiceBusNamespaceSku_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "Basic", - ErrCount: 0, - }, - { - Value: "Standard", - ErrCount: 0, - }, - { - Value: "Premium", - ErrCount: 0, - }, - { - Value: "Random", - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateServiceBusNamespaceSku(tc.Value, "azurerm_servicebus_namespace") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM ServiceBus Namespace Sku to trigger a validation error") - } - } -} - -func TestAccAzureRMServiceBusNamespace_basic(t *testing.T) { - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMServiceBusNamespace_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMServiceBusNamespaceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMServiceBusNamespaceExists("azurerm_servicebus_namespace.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMServiceBusNamespace_readDefaultKeys(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMServiceBusNamespace_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMServiceBusNamespaceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMServiceBusNamespaceExists("azurerm_servicebus_namespace.test"), - resource.TestMatchResourceAttr( - "azurerm_servicebus_namespace.test", "default_primary_connection_string", regexp.MustCompile("Endpoint=.+")), - resource.TestMatchResourceAttr( - "azurerm_servicebus_namespace.test", "default_secondary_connection_string", regexp.MustCompile("Endpoint=.+")), - resource.TestMatchResourceAttr( - "azurerm_servicebus_namespace.test", "default_primary_key", regexp.MustCompile(".+")), - resource.TestMatchResourceAttr( - "azurerm_servicebus_namespace.test", "default_secondary_key", regexp.MustCompile(".+")), - ), - }, - }, - }) -} - -func TestAccAzureRMServiceBusNamespace_NonStandardCasing(t *testing.T) { - - ri := acctest.RandInt() - config := testAccAzureRMServiceBusNamespaceNonStandardCasing(ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMServiceBusNamespaceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMServiceBusNamespaceExists("azurerm_servicebus_namespace.test"), - ), - }, - resource.TestStep{ - Config: config, - PlanOnly: true, - ExpectNonEmptyPlan: false, - }, - }, - }) -} - -func testCheckAzureRMServiceBusNamespaceDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).serviceBusNamespacesClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_servicebus_namespace" { - continue - } - - name := rs.Primary.Attributes["name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.Get(resourceGroup, name) - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("ServiceBus Namespace still exists:\n%+v", resp) - } - } - - return nil -} - -func testCheckAzureRMServiceBusNamespaceExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - namespaceName := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for Service Bus Namespace: %s", namespaceName) - } - - conn := testAccProvider.Meta().(*ArmClient).serviceBusNamespacesClient - - resp, err := conn.Get(resourceGroup, namespaceName) - if err != nil { - return fmt.Errorf("Bad: Get on serviceBusNamespacesClient: %+v", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: Service Bus Namespace %q (resource group: %q) does not exist", namespaceName, resourceGroup) - } - - return nil - } -} - -var testAccAzureRMServiceBusNamespace_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_servicebus_namespace" "test" { - name = "acctestservicebusnamespace-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "basic" -} -` - -func testAccAzureRMServiceBusNamespaceNonStandardCasing(ri int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} -resource "azurerm_servicebus_namespace" "test" { - name = "acctestservicebusnamespace-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "Basic" -} -`, ri, ri) -} diff --git a/builtin/providers/azurerm/resource_arm_servicebus_subscription.go b/builtin/providers/azurerm/resource_arm_servicebus_subscription.go deleted file mode 100644 index d0c4fc64e..000000000 --- a/builtin/providers/azurerm/resource_arm_servicebus_subscription.go +++ /dev/null @@ -1,207 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "net/http" - - "github.com/Azure/azure-sdk-for-go/arm/servicebus" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceArmServiceBusSubscription() *schema.Resource { - return &schema.Resource{ - Create: resourceArmServiceBusSubscriptionCreate, - Read: resourceArmServiceBusSubscriptionRead, - Update: resourceArmServiceBusSubscriptionCreate, - Delete: resourceArmServiceBusSubscriptionDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "namespace_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "topic_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "auto_delete_on_idle": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "default_message_ttl": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "lock_duration": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "dead_lettering_on_filter_evaluation_exceptions": { - Type: schema.TypeBool, - Optional: true, - }, - - "dead_lettering_on_message_expiration": { - Type: schema.TypeBool, - Optional: true, - }, - - "enable_batched_operations": { - Type: schema.TypeBool, - Optional: true, - }, - - "max_delivery_count": { - Type: schema.TypeInt, - Required: true, - }, - - "requires_session": { - Type: schema.TypeBool, - Optional: true, - // cannot be modified - ForceNew: true, - }, - }, - } -} - -func resourceArmServiceBusSubscriptionCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).serviceBusSubscriptionsClient - log.Printf("[INFO] preparing arguments for Azure ARM ServiceBus Subscription creation.") - - name := d.Get("name").(string) - topicName := d.Get("topic_name").(string) - namespaceName := d.Get("namespace_name").(string) - location := d.Get("location").(string) - resGroup := d.Get("resource_group_name").(string) - - parameters := servicebus.SubscriptionCreateOrUpdateParameters{ - Location: &location, - SubscriptionProperties: &servicebus.SubscriptionProperties{}, - } - - if autoDeleteOnIdle := d.Get("auto_delete_on_idle").(string); autoDeleteOnIdle != "" { - parameters.SubscriptionProperties.AutoDeleteOnIdle = &autoDeleteOnIdle - } - - if lockDuration := d.Get("lock_duration").(string); lockDuration != "" { - parameters.SubscriptionProperties.LockDuration = &lockDuration - } - - deadLetteringFilterExceptions := d.Get("dead_lettering_on_filter_evaluation_exceptions").(bool) - deadLetteringExpiration := d.Get("dead_lettering_on_message_expiration").(bool) - enableBatchedOps := d.Get("enable_batched_operations").(bool) - maxDeliveryCount := int32(d.Get("max_delivery_count").(int)) - requiresSession := d.Get("requires_session").(bool) - - parameters.SubscriptionProperties.DeadLetteringOnFilterEvaluationExceptions = &deadLetteringFilterExceptions - parameters.SubscriptionProperties.DeadLetteringOnMessageExpiration = &deadLetteringExpiration - parameters.SubscriptionProperties.EnableBatchedOperations = &enableBatchedOps - parameters.SubscriptionProperties.MaxDeliveryCount = &maxDeliveryCount - parameters.SubscriptionProperties.RequiresSession = &requiresSession - - _, err := client.CreateOrUpdate(resGroup, namespaceName, topicName, name, parameters) - if err != nil { - return err - } - - read, err := client.Get(resGroup, namespaceName, topicName, name) - if err != nil { - return err - } - if read.ID == nil { - return fmt.Errorf("Cannot read ServiceBus Subscription %s (resource group %s) ID", name, resGroup) - } - - d.SetId(*read.ID) - - return resourceArmServiceBusSubscriptionRead(d, meta) -} - -func resourceArmServiceBusSubscriptionRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).serviceBusSubscriptionsClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - namespaceName := id.Path["namespaces"] - topicName := id.Path["topics"] - name := id.Path["subscriptions"] - - log.Printf("[INFO] subscriptionID: %s, args: %s, %s, %s, %s", d.Id(), resGroup, namespaceName, topicName, name) - - resp, err := client.Get(resGroup, namespaceName, topicName, name) - if err != nil { - return fmt.Errorf("Error making Read request on Azure ServiceBus Subscription %s: %+v", name, err) - } - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - - d.Set("name", resp.Name) - d.Set("resource_group_name", resGroup) - d.Set("namespace_name", namespaceName) - d.Set("topic_name", topicName) - d.Set("location", azureRMNormalizeLocation(*resp.Location)) - - props := resp.SubscriptionProperties - d.Set("auto_delete_on_idle", props.AutoDeleteOnIdle) - d.Set("default_message_ttl", props.DefaultMessageTimeToLive) - d.Set("lock_duration", props.LockDuration) - d.Set("dead_lettering_on_filter_evaluation_exceptions", props.DeadLetteringOnFilterEvaluationExceptions) - d.Set("dead_lettering_on_message_expiration", props.DeadLetteringOnMessageExpiration) - d.Set("enable_batched_operations", props.EnableBatchedOperations) - d.Set("max_delivery_count", int(*props.MaxDeliveryCount)) - d.Set("requires_session", props.RequiresSession) - - return nil -} - -func resourceArmServiceBusSubscriptionDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).serviceBusSubscriptionsClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - namespaceName := id.Path["namespaces"] - topicName := id.Path["topics"] - name := id.Path["subscriptions"] - - _, err = client.Delete(resGroup, namespaceName, topicName, name) - - return err -} diff --git a/builtin/providers/azurerm/resource_arm_servicebus_subscription_test.go b/builtin/providers/azurerm/resource_arm_servicebus_subscription_test.go deleted file mode 100644 index a5efbc820..000000000 --- a/builtin/providers/azurerm/resource_arm_servicebus_subscription_test.go +++ /dev/null @@ -1,236 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMServiceBusSubscription_basic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMServiceBusSubscription_basic, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMServiceBusTopicDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMServiceBusSubscriptionExists("azurerm_servicebus_subscription.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMServiceBusSubscription_update(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMServiceBusSubscription_basic, ri, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMServiceBusSubscription_update, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMServiceBusTopicDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMServiceBusSubscriptionExists("azurerm_servicebus_subscription.test"), - ), - }, - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "azurerm_servicebus_subscription.test", "enable_batched_operations", "true"), - ), - }, - }, - }) -} - -func TestAccAzureRMServiceBusSubscription_updateRequiresSession(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMServiceBusSubscription_basic, ri, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMServiceBusSubscription_updateRequiresSession, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMServiceBusTopicDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMServiceBusSubscriptionExists("azurerm_servicebus_subscription.test"), - ), - }, - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "azurerm_servicebus_subscription.test", "requires_session", "true"), - ), - }, - }, - }) -} - -func testCheckAzureRMServiceBusSubscriptionDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*ArmClient).serviceBusSubscriptionsClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_servicebus_subscription" { - continue - } - - name := rs.Primary.Attributes["name"] - topicName := rs.Primary.Attributes["topic_name"] - namespaceName := rs.Primary.Attributes["namespace_name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := client.Get(resourceGroup, namespaceName, topicName, name) - if err != nil { - if resp.StatusCode == http.StatusNotFound { - return nil - } - return err - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("ServiceBus Subscription still exists:\n%+v", resp.SubscriptionProperties) - } - } - - return nil -} - -func testCheckAzureRMServiceBusSubscriptionExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - subscriptionName := rs.Primary.Attributes["name"] - topicName := rs.Primary.Attributes["topic_name"] - namespaceName := rs.Primary.Attributes["namespace_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for subscription: %s", topicName) - } - - client := testAccProvider.Meta().(*ArmClient).serviceBusSubscriptionsClient - - resp, err := client.Get(resourceGroup, namespaceName, topicName, subscriptionName) - if err != nil { - return fmt.Errorf("Bad: Get on serviceBusSubscriptionsClient: %+v", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: Subscription %q (resource group: %q) does not exist", subscriptionName, resourceGroup) - } - - return nil - } -} - -var testAccAzureRMServiceBusSubscription_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_servicebus_namespace" "test" { - name = "acctestservicebusnamespace-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "standard" -} - -resource "azurerm_servicebus_topic" "test" { - name = "acctestservicebustopic-%d" - location = "West US" - namespace_name = "${azurerm_servicebus_namespace.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_servicebus_subscription" "test" { - name = "acctestservicebussubscription-%d" - location = "West US" - namespace_name = "${azurerm_servicebus_namespace.test.name}" - topic_name = "${azurerm_servicebus_topic.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" - max_delivery_count = 10 -} -` - -var testAccAzureRMServiceBusSubscription_update = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_servicebus_namespace" "test" { - name = "acctestservicebusnamespace-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "standard" -} - -resource "azurerm_servicebus_topic" "test" { - name = "acctestservicebustopic-%d" - location = "West US" - namespace_name = "${azurerm_servicebus_namespace.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_servicebus_subscription" "test" { - name = "acctestservicebussubscription-%d" - location = "West US" - namespace_name = "${azurerm_servicebus_namespace.test.name}" - topic_name = "${azurerm_servicebus_topic.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" - max_delivery_count = 10 - enable_batched_operations = true -} -` - -var testAccAzureRMServiceBusSubscription_updateRequiresSession = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_servicebus_namespace" "test" { - name = "acctestservicebusnamespace-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "standard" -} - -resource "azurerm_servicebus_topic" "test" { - name = "acctestservicebustopic-%d" - location = "West US" - namespace_name = "${azurerm_servicebus_namespace.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_servicebus_subscription" "test" { - name = "acctestservicebussubscription-%d" - location = "West US" - namespace_name = "${azurerm_servicebus_namespace.test.name}" - topic_name = "${azurerm_servicebus_topic.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" - max_delivery_count = 10 - requires_session = true -} -` diff --git a/builtin/providers/azurerm/resource_arm_servicebus_topic.go b/builtin/providers/azurerm/resource_arm_servicebus_topic.go deleted file mode 100644 index 982b8ea73..000000000 --- a/builtin/providers/azurerm/resource_arm_servicebus_topic.go +++ /dev/null @@ -1,238 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "net/http" - - "github.com/Azure/azure-sdk-for-go/arm/servicebus" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceArmServiceBusTopic() *schema.Resource { - return &schema.Resource{ - Create: resourceArmServiceBusTopicCreate, - Read: resourceArmServiceBusTopicRead, - Update: resourceArmServiceBusTopicCreate, - Delete: resourceArmServiceBusTopicDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "namespace_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "auto_delete_on_idle": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "default_message_ttl": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "duplicate_detection_history_time_window": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "enable_batched_operations": { - Type: schema.TypeBool, - Optional: true, - }, - - "enable_express": { - Type: schema.TypeBool, - Optional: true, - }, - - "enable_filtering_messages_before_publishing": { - Type: schema.TypeBool, - Optional: true, - }, - - "enable_partitioning": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - - "max_size_in_megabytes": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "requires_duplicate_detection": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - - "support_ordering": { - Type: schema.TypeBool, - Optional: true, - }, - }, - } -} - -func resourceArmServiceBusTopicCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).serviceBusTopicsClient - log.Printf("[INFO] preparing arguments for Azure ARM ServiceBus Topic creation.") - - name := d.Get("name").(string) - namespaceName := d.Get("namespace_name").(string) - location := d.Get("location").(string) - resGroup := d.Get("resource_group_name").(string) - - parameters := servicebus.TopicCreateOrUpdateParameters{ - Name: &name, - Location: &location, - TopicProperties: &servicebus.TopicProperties{}, - } - - if autoDeleteOnIdle := d.Get("auto_delete_on_idle").(string); autoDeleteOnIdle != "" { - parameters.TopicProperties.AutoDeleteOnIdle = &autoDeleteOnIdle - } - - if defaultTTL := d.Get("default_message_ttl").(string); defaultTTL != "" { - parameters.TopicProperties.DefaultMessageTimeToLive = &defaultTTL - } - - if duplicateWindow := d.Get("duplicate_detection_history_time_window").(string); duplicateWindow != "" { - parameters.TopicProperties.DuplicateDetectionHistoryTimeWindow = &duplicateWindow - } - - enableBatchedOps := d.Get("enable_batched_operations").(bool) - enableExpress := d.Get("enable_express").(bool) - enableFiltering := d.Get("enable_filtering_messages_before_publishing").(bool) - enablePartitioning := d.Get("enable_partitioning").(bool) - maxSize := int64(d.Get("max_size_in_megabytes").(int)) - requiresDuplicateDetection := d.Get("requires_duplicate_detection").(bool) - supportOrdering := d.Get("support_ordering").(bool) - - parameters.TopicProperties.EnableBatchedOperations = &enableBatchedOps - parameters.TopicProperties.EnableExpress = &enableExpress - parameters.TopicProperties.FilteringMessagesBeforePublishing = &enableFiltering - parameters.TopicProperties.EnablePartitioning = &enablePartitioning - parameters.TopicProperties.MaxSizeInMegabytes = &maxSize - parameters.TopicProperties.RequiresDuplicateDetection = &requiresDuplicateDetection - parameters.TopicProperties.SupportOrdering = &supportOrdering - - _, err := client.CreateOrUpdate(resGroup, namespaceName, name, parameters) - if err != nil { - return err - } - - read, err := client.Get(resGroup, namespaceName, name) - if err != nil { - return err - } - if read.ID == nil { - return fmt.Errorf("Cannot read ServiceBus Topic %s (resource group %s) ID", name, resGroup) - } - - d.SetId(*read.ID) - - return resourceArmServiceBusTopicRead(d, meta) -} - -func resourceArmServiceBusTopicRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).serviceBusTopicsClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - namespaceName := id.Path["namespaces"] - name := id.Path["topics"] - - resp, err := client.Get(resGroup, namespaceName, name) - if err != nil { - return fmt.Errorf("Error making Read request on Azure ServiceBus Topic %s: %+v", name, err) - } - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - - d.Set("name", resp.Name) - d.Set("resource_group_name", resGroup) - d.Set("namespace_name", namespaceName) - d.Set("location", azureRMNormalizeLocation(*resp.Location)) - - props := resp.TopicProperties - d.Set("auto_delete_on_idle", props.AutoDeleteOnIdle) - d.Set("default_message_ttl", props.DefaultMessageTimeToLive) - - if props.DuplicateDetectionHistoryTimeWindow != nil && *props.DuplicateDetectionHistoryTimeWindow != "" { - d.Set("duplicate_detection_history_time_window", props.DuplicateDetectionHistoryTimeWindow) - } - - d.Set("enable_batched_operations", props.EnableBatchedOperations) - d.Set("enable_express", props.EnableExpress) - d.Set("enable_filtering_messages_before_publishing", props.FilteringMessagesBeforePublishing) - d.Set("enable_partitioning", props.EnablePartitioning) - d.Set("requires_duplicate_detection", props.RequiresDuplicateDetection) - d.Set("support_ordering", props.SupportOrdering) - - maxSize := int(*props.MaxSizeInMegabytes) - - // if the topic is in a premium namespace and partitioning is enabled then the - // max size returned by the API will be 16 times greater than the value set - if *props.EnablePartitioning { - namespace, err := meta.(*ArmClient).serviceBusNamespacesClient.Get(resGroup, namespaceName) - if err != nil { - return err - } - - if namespace.Sku.Name != servicebus.Premium { - const partitionCount = 16 - maxSize = int(*props.MaxSizeInMegabytes / partitionCount) - } - } - - d.Set("max_size_in_megabytes", maxSize) - - return nil -} - -func resourceArmServiceBusTopicDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).serviceBusTopicsClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - namespaceName := id.Path["namespaces"] - name := id.Path["topics"] - - _, err = client.Delete(resGroup, namespaceName, name) - - return err -} diff --git a/builtin/providers/azurerm/resource_arm_servicebus_topic_test.go b/builtin/providers/azurerm/resource_arm_servicebus_topic_test.go deleted file mode 100644 index 8ea9fd9dd..000000000 --- a/builtin/providers/azurerm/resource_arm_servicebus_topic_test.go +++ /dev/null @@ -1,336 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMServiceBusTopic_basic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMServiceBusTopic_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMServiceBusTopicDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMServiceBusTopicExists("azurerm_servicebus_topic.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMServiceBusTopic_update(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMServiceBusTopic_basic, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMServiceBusTopic_update, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMServiceBusTopicDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMServiceBusTopicExists("azurerm_servicebus_topic.test"), - ), - }, - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "azurerm_servicebus_topic.test", "enable_batched_operations", "true"), - resource.TestCheckResourceAttr( - "azurerm_servicebus_topic.test", "enable_express", "true"), - ), - }, - }, - }) -} - -func TestAccAzureRMServiceBusTopic_enablePartitioningStandard(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMServiceBusTopic_basic, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMServiceBusTopic_enablePartitioningStandard, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMServiceBusTopicDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMServiceBusTopicExists("azurerm_servicebus_topic.test"), - ), - }, - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "azurerm_servicebus_topic.test", "enable_partitioning", "true"), - // Ensure size is read back in it's original value and not the x16 value returned by Azure - resource.TestCheckResourceAttr( - "azurerm_servicebus_topic.test", "max_size_in_megabytes", "5120"), - ), - }, - }, - }) -} - -func TestAccAzureRMServiceBusTopic_enablePartitioningPremium(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMServiceBusTopic_basic, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMServiceBusTopic_enablePartitioningPremium, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMServiceBusTopicDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMServiceBusTopicExists("azurerm_servicebus_topic.test"), - ), - }, - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "azurerm_servicebus_topic.test", "enable_partitioning", "true"), - resource.TestCheckResourceAttr( - "azurerm_servicebus_topic.test", "max_size_in_megabytes", "81920"), - ), - }, - }, - }) -} - -func TestAccAzureRMServiceBusTopic_enableDuplicateDetection(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMServiceBusTopic_basic, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMServiceBusTopic_enableDuplicateDetection, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMServiceBusTopicDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMServiceBusTopicExists("azurerm_servicebus_topic.test"), - ), - }, - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "azurerm_servicebus_topic.test", "requires_duplicate_detection", "true"), - ), - }, - }, - }) -} - -func testCheckAzureRMServiceBusTopicDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*ArmClient).serviceBusTopicsClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_servicebus_topic" { - continue - } - - name := rs.Primary.Attributes["name"] - namespaceName := rs.Primary.Attributes["namespace_name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := client.Get(resourceGroup, namespaceName, name) - if err != nil { - if resp.StatusCode == http.StatusNotFound { - return nil - } - return err - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("ServiceBus Topic still exists:\n%+v", resp.TopicProperties) - } - } - - return nil -} - -func testCheckAzureRMServiceBusTopicExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - topicName := rs.Primary.Attributes["name"] - namespaceName := rs.Primary.Attributes["namespace_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for topic: %s", topicName) - } - - client := testAccProvider.Meta().(*ArmClient).serviceBusTopicsClient - - resp, err := client.Get(resourceGroup, namespaceName, topicName) - if err != nil { - return fmt.Errorf("Bad: Get on serviceBusTopicsClient: %+v", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: Topic %q (resource group: %q) does not exist", namespaceName, resourceGroup) - } - - return nil - } -} - -var testAccAzureRMServiceBusTopic_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_servicebus_namespace" "test" { - name = "acctestservicebusnamespace-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "standard" -} - -resource "azurerm_servicebus_topic" "test" { - name = "acctestservicebustopic-%d" - location = "West US" - namespace_name = "${azurerm_servicebus_namespace.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" -} -` - -var testAccAzureRMServiceBusTopic_basicPremium = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_servicebus_namespace" "test" { - name = "acctestservicebusnamespace-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "premium" -} - -resource "azurerm_servicebus_topic" "test" { - name = "acctestservicebustopic-%d" - location = "West US" - namespace_name = "${azurerm_servicebus_namespace.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" -} -` - -var testAccAzureRMServiceBusTopic_update = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_servicebus_namespace" "test" { - name = "acctestservicebusnamespace-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "standard" -} - -resource "azurerm_servicebus_topic" "test" { - name = "acctestservicebustopic-%d" - location = "West US" - namespace_name = "${azurerm_servicebus_namespace.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" - enable_batched_operations = true - enable_express = true -} -` - -var testAccAzureRMServiceBusTopic_enablePartitioningStandard = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_servicebus_namespace" "test" { - name = "acctestservicebusnamespace-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "standard" -} - -resource "azurerm_servicebus_topic" "test" { - name = "acctestservicebustopic-%d" - location = "West US" - namespace_name = "${azurerm_servicebus_namespace.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" - enable_partitioning = true - max_size_in_megabytes = 5120 -} -` - -var testAccAzureRMServiceBusTopic_enablePartitioningPremium = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_servicebus_namespace" "test" { - name = "acctestservicebusnamespace-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "premium" -} - -resource "azurerm_servicebus_topic" "test" { - name = "acctestservicebustopic-%d" - location = "West US" - namespace_name = "${azurerm_servicebus_namespace.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" - enable_partitioning = true - max_size_in_megabytes = 81920 -} -` - -var testAccAzureRMServiceBusTopic_enableDuplicateDetection = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_servicebus_namespace" "test" { - name = "acctestservicebusnamespace-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - sku = "standard" -} - -resource "azurerm_servicebus_topic" "test" { - name = "acctestservicebustopic-%d" - location = "West US" - namespace_name = "${azurerm_servicebus_namespace.test.name}" - resource_group_name = "${azurerm_resource_group.test.name}" - requires_duplicate_detection = true -} -` diff --git a/builtin/providers/azurerm/resource_arm_sql_database.go b/builtin/providers/azurerm/resource_arm_sql_database.go deleted file mode 100644 index 6959586b5..000000000 --- a/builtin/providers/azurerm/resource_arm_sql_database.go +++ /dev/null @@ -1,259 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/jen20/riviera/azure" - "github.com/jen20/riviera/sql" -) - -func resourceArmSqlDatabase() *schema.Resource { - return &schema.Resource{ - Create: resourceArmSqlDatabaseCreate, - Read: resourceArmSqlDatabaseRead, - Update: resourceArmSqlDatabaseCreate, - Delete: resourceArmSqlDatabaseDelete, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "server_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "create_mode": { - Type: schema.TypeString, - Optional: true, - Default: "Default", - }, - - "source_database_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "restore_point_in_time": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "edition": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validateArmSqlDatabaseEdition, - }, - - "collation": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "max_size_bytes": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "requested_service_objective_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "requested_service_objective_name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "source_database_deletion_date": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "elastic_pool_name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "encryption": { - Type: schema.TypeString, - Computed: true, - }, - - "creation_date": { - Type: schema.TypeString, - Computed: true, - }, - - "default_secondary_location": { - Type: schema.TypeString, - Computed: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmSqlDatabaseCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - tags := d.Get("tags").(map[string]interface{}) - expandedTags := expandTags(tags) - - command := &sql.CreateOrUpdateDatabase{ - Name: d.Get("name").(string), - Location: d.Get("location").(string), - ResourceGroupName: d.Get("resource_group_name").(string), - ServerName: d.Get("server_name").(string), - Tags: *expandedTags, - CreateMode: azure.String(d.Get("create_mode").(string)), - } - - if v, ok := d.GetOk("source_database_id"); ok { - command.SourceDatabaseID = azure.String(v.(string)) - } - - if v, ok := d.GetOk("edition"); ok { - command.Edition = azure.String(v.(string)) - } - - if v, ok := d.GetOk("collation"); ok { - command.Collation = azure.String(v.(string)) - } - - if v, ok := d.GetOk("max_size_bytes"); ok { - command.MaxSizeBytes = azure.String(v.(string)) - } - - if v, ok := d.GetOk("source_database_deletion_date"); ok { - command.SourceDatabaseDeletionDate = azure.String(v.(string)) - } - - if v, ok := d.GetOk("requested_service_objective_id"); ok { - command.RequestedServiceObjectiveID = azure.String(v.(string)) - } - - if v, ok := d.GetOk("elastic_pool_name"); ok { - command.ElasticPoolName = azure.String(v.(string)) - } - - if v, ok := d.GetOk("requested_service_objective_name"); ok { - command.RequestedServiceObjectiveName = azure.String(v.(string)) - } - - createRequest := rivieraClient.NewRequest() - createRequest.Command = command - - createResponse, err := createRequest.Execute() - if err != nil { - return fmt.Errorf("Error creating SQL Database: %s", err) - } - if !createResponse.IsSuccessful() { - return fmt.Errorf("Error creating SQL Database: %s", createResponse.Error) - } - - readRequest := rivieraClient.NewRequest() - readRequest.Command = &sql.GetDatabase{ - Name: d.Get("name").(string), - ResourceGroupName: d.Get("resource_group_name").(string), - ServerName: d.Get("server_name").(string), - } - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading SQL Database: %s", err) - } - if !readResponse.IsSuccessful() { - return fmt.Errorf("Error reading SQL Database: %s", readResponse.Error) - } - - resp := readResponse.Parsed.(*sql.GetDatabaseResponse) - d.SetId(*resp.ID) - - return resourceArmSqlDatabaseRead(d, meta) -} - -func resourceArmSqlDatabaseRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - readRequest := rivieraClient.NewRequestForURI(d.Id()) - readRequest.Command = &sql.GetDatabase{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading SQL Database: %s", err) - } - if !readResponse.IsSuccessful() { - log.Printf("[INFO] Error reading SQL Database %q - removing from state", d.Id()) - d.SetId("") - return fmt.Errorf("Error reading SQL Database: %s", readResponse.Error) - } - - resp := readResponse.Parsed.(*sql.GetDatabaseResponse) - - d.Set("name", resp.Name) - d.Set("creation_date", resp.CreationDate) - d.Set("default_secondary_location", resp.DefaultSecondaryLocation) - d.Set("elastic_pool_name", resp.ElasticPoolName) - - flattenAndSetTags(d, resp.Tags) - - return nil -} - -func resourceArmSqlDatabaseDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - deleteRequest := rivieraClient.NewRequestForURI(d.Id()) - deleteRequest.Command = &sql.DeleteDatabase{} - - deleteResponse, err := deleteRequest.Execute() - if err != nil { - return fmt.Errorf("Error deleting SQL Database: %s", err) - } - if !deleteResponse.IsSuccessful() { - return fmt.Errorf("Error deleting SQL Database: %s", deleteResponse.Error) - } - - return nil -} - -func validateArmSqlDatabaseEdition(v interface{}, k string) (ws []string, errors []error) { - editions := map[string]bool{ - "Basic": true, - "Standard": true, - "Premium": true, - "DataWarehouse": true, - } - if !editions[v.(string)] { - errors = append(errors, fmt.Errorf("SQL Database Edition can only be Basic, Standard, Premium or DataWarehouse")) - } - return -} diff --git a/builtin/providers/azurerm/resource_arm_sql_database_test.go b/builtin/providers/azurerm/resource_arm_sql_database_test.go deleted file mode 100644 index fb306e04a..000000000 --- a/builtin/providers/azurerm/resource_arm_sql_database_test.go +++ /dev/null @@ -1,334 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/jen20/riviera/sql" -) - -func TestResourceAzureRMSqlDatabaseEdition_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "Random", - ErrCount: 1, - }, - { - Value: "Basic", - ErrCount: 0, - }, - { - Value: "Standard", - ErrCount: 0, - }, - { - Value: "Premium", - ErrCount: 0, - }, - { - Value: "DataWarehouse", - ErrCount: 0, - }, - } - - for _, tc := range cases { - _, errors := validateArmSqlDatabaseEdition(tc.Value, "azurerm_sql_database") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM SQL Database edition to trigger a validation error") - } - } -} - -func TestAccAzureRMSqlDatabase_basic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMSqlDatabase_basic, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMSqlDatabaseDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMSqlDatabaseExists("azurerm_sql_database.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMSqlDatabase_elasticPool(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMSqlDatabase_elasticPool, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMSqlDatabaseDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMSqlDatabaseExists("azurerm_sql_database.test"), - resource.TestCheckResourceAttr("azurerm_sql_database.test", "elastic_pool_name", fmt.Sprintf("acctestep%d", ri)), - ), - }, - }, - }) -} - -func TestAccAzureRMSqlDatabase_withTags(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMSqlDatabase_withTags, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMSqlDatabase_withTagsUpdate, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMSqlDatabaseDestroy, - Steps: []resource.TestStep{ - { - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMSqlDatabaseExists("azurerm_sql_database.test"), - resource.TestCheckResourceAttr( - "azurerm_sql_database.test", "tags.%", "2"), - ), - }, - { - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMSqlDatabaseExists("azurerm_sql_database.test"), - resource.TestCheckResourceAttr( - "azurerm_sql_database.test", "tags.%", "1"), - ), - }, - }, - }) -} - -func TestAccAzureRMSqlDatabase_datawarehouse(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMSqlDatabase_datawarehouse, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMSqlDatabaseDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMSqlDatabaseExists("azurerm_sql_database.test"), - ), - }, - }, - }) -} - -func testCheckAzureRMSqlDatabaseExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).rivieraClient - - readRequest := conn.NewRequestForURI(rs.Primary.ID) - readRequest.Command = &sql.GetDatabase{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Bad: GetDatabase: %s", err) - } - if !readResponse.IsSuccessful() { - return fmt.Errorf("Bad: GetDatabase: %s", readResponse.Error) - } - - return nil - } -} - -func testCheckAzureRMSqlDatabaseDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).rivieraClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_sql_database" { - continue - } - - readRequest := conn.NewRequestForURI(rs.Primary.ID) - readRequest.Command = &sql.GetDatabase{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Bad: GetDatabase: %s", err) - } - - if readResponse.IsSuccessful() { - return fmt.Errorf("Bad: SQL Database still exists: %s", readResponse.Error) - } - } - - return nil -} - -var testAccAzureRMSqlDatabase_elasticPool = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} - -resource "azurerm_sql_server" "test" { - name = "acctestsqlserver%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US" - version = "12.0" - administrator_login = "mradministrator" - administrator_login_password = "thisIsDog11" -} - -resource "azurerm_sql_elasticpool" "test" { - name = "acctestep%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US" - server_name = "${azurerm_sql_server.test.name}" - edition = "Basic" - dtu = 50 - pool_size = 5000 -} - -resource "azurerm_sql_database" "test" { - name = "acctestdb%d" - resource_group_name = "${azurerm_resource_group.test.name}" - server_name = "${azurerm_sql_server.test.name}" - location = "West US" - edition = "${azurerm_sql_elasticpool.test.edition}" - collation = "SQL_Latin1_General_CP1_CI_AS" - max_size_bytes = "1073741824" - elastic_pool_name = "${azurerm_sql_elasticpool.test.name}" - requested_service_objective_name = "ElasticPool" -} -` - -var testAccAzureRMSqlDatabase_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_sql_server" "test" { - name = "acctestsqlserver%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US" - version = "12.0" - administrator_login = "mradministrator" - administrator_login_password = "thisIsDog11" -} - -resource "azurerm_sql_database" "test" { - name = "acctestdb%d" - resource_group_name = "${azurerm_resource_group.test.name}" - server_name = "${azurerm_sql_server.test.name}" - location = "West US" - edition = "Standard" - collation = "SQL_Latin1_General_CP1_CI_AS" - max_size_bytes = "1073741824" - requested_service_objective_name = "S0" -} -` - -var testAccAzureRMSqlDatabase_withTags = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_sql_server" "test" { - name = "acctestsqlserver%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US" - version = "12.0" - administrator_login = "mradministrator" - administrator_login_password = "thisIsDog11" -} - -resource "azurerm_sql_database" "test" { - name = "acctestdb%d" - resource_group_name = "${azurerm_resource_group.test.name}" - server_name = "${azurerm_sql_server.test.name}" - location = "West US" - edition = "Standard" - collation = "SQL_Latin1_General_CP1_CI_AS" - max_size_bytes = "1073741824" - requested_service_objective_name = "S0" - - tags { - environment = "staging" - database = "test" - } -} -` - -var testAccAzureRMSqlDatabase_withTagsUpdate = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_sql_server" "test" { - name = "acctestsqlserver%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US" - version = "12.0" - administrator_login = "mradministrator" - administrator_login_password = "thisIsDog11" -} - -resource "azurerm_sql_database" "test" { - name = "acctestdb%d" - resource_group_name = "${azurerm_resource_group.test.name}" - server_name = "${azurerm_sql_server.test.name}" - location = "West US" - edition = "Standard" - collation = "SQL_Latin1_General_CP1_CI_AS" - max_size_bytes = "1073741824" - requested_service_objective_name = "S0" - - tags { - environment = "production" - } -} -` - -var testAccAzureRMSqlDatabase_datawarehouse = ` -resource "azurerm_resource_group" "test" { - name = "acctest_rg_%d" - location = "West US" -} -resource "azurerm_sql_server" "test" { - name = "acctestsqlserver%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US" - version = "12.0" - administrator_login = "mradministrator" - administrator_login_password = "thisIsDog11" -} - -resource "azurerm_sql_database" "test" { - name = "acctestdb%d" - resource_group_name = "${azurerm_resource_group.test.name}" - server_name = "${azurerm_sql_server.test.name}" - location = "West US" - edition = "DataWarehouse" - collation = "SQL_Latin1_General_CP1_CI_AS" - requested_service_objective_name = "DW400" -} -` diff --git a/builtin/providers/azurerm/resource_arm_sql_elasticpool.go b/builtin/providers/azurerm/resource_arm_sql_elasticpool.go deleted file mode 100644 index 67f9f32c3..000000000 --- a/builtin/providers/azurerm/resource_arm_sql_elasticpool.go +++ /dev/null @@ -1,221 +0,0 @@ -package azurerm - -import ( - "fmt" - "github.com/Azure/azure-sdk-for-go/arm/sql" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" - "log" - "net/http" - "time" -) - -func resourceArmSqlElasticPool() *schema.Resource { - return &schema.Resource{ - Create: resourceArmSqlElasticPoolCreate, - Read: resourceArmSqlElasticPoolRead, - Update: resourceArmSqlElasticPoolCreate, - Delete: resourceArmSqlElasticPoolDelete, - - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "server_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "edition": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateSqlElasticPoolEdition(), - }, - - "dtu": { - Type: schema.TypeInt, - Required: true, - }, - - "db_dtu_min": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "db_dtu_max": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "pool_size": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "creation_date": { - Type: schema.TypeString, - Computed: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmSqlElasticPoolCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - elasticPoolsClient := client.sqlElasticPoolsClient - - log.Printf("[INFO] preparing arguments for Azure ARM SQL ElasticPool creation.") - - name := d.Get("name").(string) - serverName := d.Get("server_name").(string) - location := d.Get("location").(string) - resGroup := d.Get("resource_group_name").(string) - tags := d.Get("tags").(map[string]interface{}) - - elasticPool := sql.ElasticPool{ - Name: &name, - Location: &location, - ElasticPoolProperties: getArmSqlElasticPoolProperties(d), - Tags: expandTags(tags), - } - - _, error := elasticPoolsClient.CreateOrUpdate(resGroup, serverName, name, elasticPool, make(chan struct{})) - err := <-error - if err != nil { - return err - } - - read, err := elasticPoolsClient.Get(resGroup, serverName, name) - if err != nil { - return err - } - if read.ID == nil { - return fmt.Errorf("Cannot read SQL ElasticPool %s (resource group %s) ID", name, resGroup) - } - - d.SetId(*read.ID) - - return resourceArmSqlElasticPoolRead(d, meta) -} - -func resourceArmSqlElasticPoolRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - elasticPoolsClient := client.sqlElasticPoolsClient - - resGroup, serverName, name, err := parseArmSqlElasticPoolId(d.Id()) - if err != nil { - return err - } - - resp, err := elasticPoolsClient.Get(resGroup, serverName, name) - if err != nil { - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - return fmt.Errorf("Error making Read request on Sql Elastic Pool %s: %s", name, err) - } - - d.Set("name", resp.Name) - d.Set("resource_group_name", resGroup) - d.Set("location", azureRMNormalizeLocation(*resp.Location)) - d.Set("server_name", serverName) - - elasticPool := resp.ElasticPoolProperties - - if elasticPool != nil { - d.Set("edition", string(elasticPool.Edition)) - d.Set("dtu", int(*elasticPool.Dtu)) - d.Set("db_dtu_min", int(*elasticPool.DatabaseDtuMin)) - d.Set("db_dtu_max", int(*elasticPool.DatabaseDtuMax)) - d.Set("pool_size", int(*elasticPool.StorageMB)) - - if elasticPool.CreationDate != nil { - d.Set("creation_date", elasticPool.CreationDate.Format(time.RFC3339)) - } - } - - flattenAndSetTags(d, resp.Tags) - - return nil -} - -func resourceArmSqlElasticPoolDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - elasticPoolsClient := client.sqlElasticPoolsClient - - resGroup, serverName, name, err := parseArmSqlElasticPoolId(d.Id()) - if err != nil { - return err - } - - _, err = elasticPoolsClient.Delete(resGroup, serverName, name) - - return err -} - -func getArmSqlElasticPoolProperties(d *schema.ResourceData) *sql.ElasticPoolProperties { - edition := sql.ElasticPoolEdition(d.Get("edition").(string)) - dtu := int32(d.Get("dtu").(int)) - - props := &sql.ElasticPoolProperties{ - Edition: edition, - Dtu: &dtu, - } - - if databaseDtuMin, ok := d.GetOk("db_dtu_min"); ok { - databaseDtuMin := int32(databaseDtuMin.(int)) - props.DatabaseDtuMin = &databaseDtuMin - } - - if databaseDtuMax, ok := d.GetOk("db_dtu_max"); ok { - databaseDtuMax := int32(databaseDtuMax.(int)) - props.DatabaseDtuMax = &databaseDtuMax - } - - if poolSize, ok := d.GetOk("pool_size"); ok { - poolSize := int32(poolSize.(int)) - props.StorageMB = &poolSize - } - - return props -} - -func parseArmSqlElasticPoolId(sqlElasticPoolId string) (string, string, string, error) { - id, err := parseAzureResourceID(sqlElasticPoolId) - if err != nil { - return "", "", "", fmt.Errorf("[ERROR] Unable to parse SQL ElasticPool ID '%s': %+v", sqlElasticPoolId, err) - } - - return id.ResourceGroup, id.Path["servers"], id.Path["elasticPools"], nil -} - -func validateSqlElasticPoolEdition() schema.SchemaValidateFunc { - return validation.StringInSlice([]string{ - string(sql.ElasticPoolEditionBasic), - string(sql.ElasticPoolEditionStandard), - string(sql.ElasticPoolEditionPremium), - }, false) -} diff --git a/builtin/providers/azurerm/resource_arm_sql_elasticpool_test.go b/builtin/providers/azurerm/resource_arm_sql_elasticpool_test.go deleted file mode 100644 index 991eb691b..000000000 --- a/builtin/providers/azurerm/resource_arm_sql_elasticpool_test.go +++ /dev/null @@ -1,168 +0,0 @@ -package azurerm - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "net/http" - "testing" -) - -func TestAccAzureRMSqlElasticPool_basic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMSqlElasticPool_basic, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMSqlElasticPoolDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMSqlElasticPoolExists("azurerm_sql_elasticpool.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMSqlElasticPool_resizeDtu(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMSqlElasticPool_basic, ri) - postConfig := fmt.Sprintf(testAccAzureRMSqlElasticPool_resizedDtu, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMSqlElasticPoolDestroy, - Steps: []resource.TestStep{ - { - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMSqlElasticPoolExists("azurerm_sql_elasticpool.test"), - resource.TestCheckResourceAttr( - "azurerm_sql_elasticpool.test", "dtu", "50"), - resource.TestCheckResourceAttr( - "azurerm_sql_elasticpool.test", "pool_size", "5000"), - ), - }, - { - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMSqlElasticPoolExists("azurerm_sql_elasticpool.test"), - resource.TestCheckResourceAttr( - "azurerm_sql_elasticpool.test", "dtu", "100"), - resource.TestCheckResourceAttr( - "azurerm_sql_elasticpool.test", "pool_size", "10000"), - ), - }, - }, - }) -} - -func testCheckAzureRMSqlElasticPoolExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - ressource, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - resourceGroup, serverName, name, err := parseArmSqlElasticPoolId(ressource.Primary.ID) - if err != nil { - return err - } - - conn := testAccProvider.Meta().(*ArmClient).sqlElasticPoolsClient - - resp, err := conn.Get(resourceGroup, serverName, name) - if err != nil { - return fmt.Errorf("Bad: Get on sqlElasticPoolsClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: SQL Elastic Pool %q on server: %q (resource group: %q) does not exist", name, serverName, resourceGroup) - } - - return nil - } -} - -func testCheckAzureRMSqlElasticPoolDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).sqlElasticPoolsClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_sql_elasticpool" { - continue - } - - name := rs.Primary.Attributes["name"] - serverName := rs.Primary.Attributes["server_name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.Get(resourceGroup, serverName, name) - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("SQL Elastic Pool still exists:\n%#v", resp.ElasticPoolProperties) - } - } - - return nil -} - -var testAccAzureRMSqlElasticPool_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctest-%[1]d" - location = "West US" -} - -resource "azurerm_sql_server" "test" { - name = "acctest%[1]d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US" - version = "12.0" - administrator_login = "4dm1n157r470r" - administrator_login_password = "4-v3ry-53cr37-p455w0rd" -} - -resource "azurerm_sql_elasticpool" "test" { - name = "acctest-pool-%[1]d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US" - server_name = "${azurerm_sql_server.test.name}" - edition = "Basic" - dtu = 50 - pool_size = 5000 -} -` - -var testAccAzureRMSqlElasticPool_resizedDtu = ` -resource "azurerm_resource_group" "test" { - name = "acctest-%[1]d" - location = "West US" -} - -resource "azurerm_sql_server" "test" { - name = "acctest%[1]d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US" - version = "12.0" - administrator_login = "4dm1n157r470r" - administrator_login_password = "4-v3ry-53cr37-p455w0rd" -} - -resource "azurerm_sql_elasticpool" "test" { - name = "acctest-pool-%[1]d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US" - server_name = "${azurerm_sql_server.test.name}" - edition = "Basic" - dtu = 100 - pool_size = 10000 -} -` diff --git a/builtin/providers/azurerm/resource_arm_sql_firewall_rule.go b/builtin/providers/azurerm/resource_arm_sql_firewall_rule.go deleted file mode 100644 index 1150fc6f0..000000000 --- a/builtin/providers/azurerm/resource_arm_sql_firewall_rule.go +++ /dev/null @@ -1,147 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/jen20/riviera/azure" - "github.com/jen20/riviera/sql" -) - -func resourceArmSqlFirewallRule() *schema.Resource { - return &schema.Resource{ - Create: resourceArmSqlFirewallRuleCreate, - Read: resourceArmSqlFirewallRuleRead, - Update: resourceArmSqlFirewallRuleCreate, - Delete: resourceArmSqlFirewallRuleDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "resource_group_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "server_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "start_ip_address": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "end_ip_address": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - } -} - -func resourceArmSqlFirewallRuleCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - createRequest := rivieraClient.NewRequest() - createRequest.Command = &sql.CreateOrUpdateFirewallRule{ - Name: d.Get("name").(string), - ResourceGroupName: d.Get("resource_group_name").(string), - ServerName: d.Get("server_name").(string), - StartIPAddress: azure.String(d.Get("start_ip_address").(string)), - EndIPAddress: azure.String(d.Get("end_ip_address").(string)), - } - - createResponse, err := createRequest.Execute() - if err != nil { - return fmt.Errorf("Error creating SQL Server Firewall Rule: %s", err) - } - if !createResponse.IsSuccessful() { - return fmt.Errorf("Error creating SQL Server Firewall Rule: %s", createResponse.Error) - } - - readRequest := rivieraClient.NewRequest() - readRequest.Command = &sql.GetFirewallRule{ - Name: d.Get("name").(string), - ResourceGroupName: d.Get("resource_group_name").(string), - ServerName: d.Get("server_name").(string), - } - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading SQL Server Firewall Rule: %s", err) - } - if !readResponse.IsSuccessful() { - return fmt.Errorf("Error reading SQL Server Firewall Rule: %s", readResponse.Error) - } - - resp := readResponse.Parsed.(*sql.GetFirewallRuleResponse) - d.SetId(*resp.ID) - - return resourceArmSqlFirewallRuleRead(d, meta) -} - -func resourceArmSqlFirewallRuleRead(d *schema.ResourceData, meta interface{}) error { - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - readRequest := rivieraClient.NewRequestForURI(d.Id()) - readRequest.Command = &sql.GetFirewallRule{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading SQL Server Firewall Rule: %s", err) - } - if !readResponse.IsSuccessful() { - log.Printf("[INFO] Error reading SQL Server Firewall Rule %q - removing from state", d.Id()) - d.SetId("") - return fmt.Errorf("Error reading SQL Server Firewall Rule: %s", readResponse.Error) - } - - resp := readResponse.Parsed.(*sql.GetFirewallRuleResponse) - - d.Set("resource_group_name", resGroup) - d.Set("location", azureRMNormalizeLocation(*resp.Location)) - d.Set("name", resp.Name) - d.Set("server_name", id.Path["servers"]) - d.Set("start_ip_address", resp.StartIPAddress) - d.Set("end_ip_address", resp.EndIPAddress) - - return nil -} - -func resourceArmSqlFirewallRuleDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - deleteRequest := rivieraClient.NewRequestForURI(d.Id()) - deleteRequest.Command = &sql.DeleteFirewallRule{} - - deleteResponse, err := deleteRequest.Execute() - if err != nil { - return fmt.Errorf("Error deleting SQL Server Firewall Rule: %s", err) - } - if !deleteResponse.IsSuccessful() { - return fmt.Errorf("Error deleting SQL Server Firewall Rule: %s", deleteResponse.Error) - } - - return nil -} diff --git a/builtin/providers/azurerm/resource_arm_sql_firewall_rule_test.go b/builtin/providers/azurerm/resource_arm_sql_firewall_rule_test.go deleted file mode 100644 index 9770a10e6..000000000 --- a/builtin/providers/azurerm/resource_arm_sql_firewall_rule_test.go +++ /dev/null @@ -1,137 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/jen20/riviera/sql" -) - -func TestAccAzureRMSqlFirewallRule_basic(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMSqlFirewallRule_basic, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMSqlFirewallRule_withUpdates, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMSqlFirewallRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMSqlFirewallRuleExists("azurerm_sql_firewall_rule.test"), - resource.TestCheckResourceAttr("azurerm_sql_firewall_rule.test", "start_ip_address", "0.0.0.0"), - resource.TestCheckResourceAttr("azurerm_sql_firewall_rule.test", "end_ip_address", "255.255.255.255"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMSqlFirewallRuleExists("azurerm_sql_firewall_rule.test"), - resource.TestCheckResourceAttr("azurerm_sql_firewall_rule.test", "start_ip_address", "10.0.17.62"), - resource.TestCheckResourceAttr("azurerm_sql_firewall_rule.test", "end_ip_address", "10.0.17.62"), - ), - }, - }, - }) -} - -func testCheckAzureRMSqlFirewallRuleExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).rivieraClient - - readRequest := conn.NewRequestForURI(rs.Primary.ID) - readRequest.Command = &sql.GetFirewallRule{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Bad: GetFirewallRule: %s", err) - } - if !readResponse.IsSuccessful() { - return fmt.Errorf("Bad: GetFirewallRule: %s", readResponse.Error) - } - - return nil - } -} - -func testCheckAzureRMSqlFirewallRuleDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).rivieraClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_sql_firewall_rule" { - continue - } - - readRequest := conn.NewRequestForURI(rs.Primary.ID) - readRequest.Command = &sql.GetFirewallRule{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Bad: GetFirewallRule: %s", err) - } - - if readResponse.IsSuccessful() { - return fmt.Errorf("Bad: SQL Server Firewall Rule still exists: %s", readResponse.Error) - } - } - - return nil -} - -var testAccAzureRMSqlFirewallRule_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_sql_server" "test" { - name = "acctestsqlserver%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US" - version = "12.0" - administrator_login = "mradministrator" - administrator_login_password = "thisIsDog11" -} - -resource "azurerm_sql_firewall_rule" "test" { - name = "acctestsqlserver%d" - resource_group_name = "${azurerm_resource_group.test.name}" - server_name = "${azurerm_sql_server.test.name}" - start_ip_address = "0.0.0.0" - end_ip_address = "255.255.255.255" -} -` - -var testAccAzureRMSqlFirewallRule_withUpdates = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_sql_server" "test" { - name = "acctestsqlserver%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US" - version = "12.0" - administrator_login = "mradministrator" - administrator_login_password = "thisIsDog11" -} - -resource "azurerm_sql_firewall_rule" "test" { - name = "acctestsqlserver%d" - resource_group_name = "${azurerm_resource_group.test.name}" - server_name = "${azurerm_sql_server.test.name}" - start_ip_address = "10.0.17.62" - end_ip_address = "10.0.17.62" -} -` diff --git a/builtin/providers/azurerm/resource_arm_sql_server.go b/builtin/providers/azurerm/resource_arm_sql_server.go deleted file mode 100644 index 8bf4aba0f..000000000 --- a/builtin/providers/azurerm/resource_arm_sql_server.go +++ /dev/null @@ -1,161 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/jen20/riviera/azure" - "github.com/jen20/riviera/sql" -) - -func resourceArmSqlServer() *schema.Resource { - return &schema.Resource{ - Create: resourceArmSqlServerCreate, - Read: resourceArmSqlServerRead, - Update: resourceArmSqlServerCreate, - Delete: resourceArmSqlServerDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "resource_group_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "version": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "administrator_login": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "administrator_login_password": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Sensitive: true, - }, - - "fully_qualified_domain_name": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmSqlServerCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - tags := d.Get("tags").(map[string]interface{}) - expandedTags := expandTags(tags) - - createRequest := rivieraClient.NewRequest() - createRequest.Command = &sql.CreateOrUpdateServer{ - Name: d.Get("name").(string), - Location: d.Get("location").(string), - ResourceGroupName: d.Get("resource_group_name").(string), - AdministratorLogin: azure.String(d.Get("administrator_login").(string)), - AdministratorLoginPassword: azure.String(d.Get("administrator_login_password").(string)), - Version: azure.String(d.Get("version").(string)), - Tags: *expandedTags, - } - - createResponse, err := createRequest.Execute() - if err != nil { - return fmt.Errorf("Error creating SQL Server: %s", err) - } - if !createResponse.IsSuccessful() { - return fmt.Errorf("Error creating SQL Server: %s", createResponse.Error) - } - - readRequest := rivieraClient.NewRequest() - readRequest.Command = &sql.GetServer{ - Name: d.Get("name").(string), - ResourceGroupName: d.Get("resource_group_name").(string), - } - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading SQL Server: %s", err) - } - if !readResponse.IsSuccessful() { - return fmt.Errorf("Error reading SQL Server: %s", readResponse.Error) - } - - resp := readResponse.Parsed.(*sql.GetServerResponse) - d.SetId(*resp.ID) - - return resourceArmSqlServerRead(d, meta) -} - -func resourceArmSqlServerRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - - readRequest := rivieraClient.NewRequestForURI(d.Id()) - readRequest.Command = &sql.GetServer{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Error reading SQL Server: %s", err) - } - if !readResponse.IsSuccessful() { - log.Printf("[INFO] Error reading SQL Server %q - removing from state", d.Id()) - d.SetId("") - return fmt.Errorf("Error reading SQL Server: %s", readResponse.Error) - } - - resp := readResponse.Parsed.(*sql.GetServerResponse) - - d.Set("name", id.Path["servers"]) - d.Set("resource_group_name", id.ResourceGroup) - d.Set("location", azureRMNormalizeLocation(*resp.Location)) - d.Set("fully_qualified_domain_name", resp.FullyQualifiedDomainName) - d.Set("administrator_login", resp.AdministratorLogin) - d.Set("version", resp.Version) - - flattenAndSetTags(d, resp.Tags) - - return nil -} - -func resourceArmSqlServerDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - rivieraClient := client.rivieraClient - - deleteRequest := rivieraClient.NewRequestForURI(d.Id()) - deleteRequest.Command = &sql.DeleteServer{} - - deleteResponse, err := deleteRequest.Execute() - if err != nil { - return fmt.Errorf("Error deleting SQL Server: %s", err) - } - if !deleteResponse.IsSuccessful() { - return fmt.Errorf("Error deleting SQL Server: %s", deleteResponse.Error) - } - - return nil -} diff --git a/builtin/providers/azurerm/resource_arm_sql_server_test.go b/builtin/providers/azurerm/resource_arm_sql_server_test.go deleted file mode 100644 index fb0a44f10..000000000 --- a/builtin/providers/azurerm/resource_arm_sql_server_test.go +++ /dev/null @@ -1,164 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/jen20/riviera/sql" -) - -func TestAccAzureRMSqlServer_basic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMSqlServer_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMSqlServerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMSqlServerExists("azurerm_sql_server.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMSqlServer_withTags(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMSqlServer_withTags, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMSqlServer_withTagsUpdated, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMSqlServerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMSqlServerExists("azurerm_sql_server.test"), - resource.TestCheckResourceAttr( - "azurerm_sql_server.test", "tags.%", "2"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMSqlServerExists("azurerm_sql_server.test"), - resource.TestCheckResourceAttr( - "azurerm_sql_server.test", "tags.%", "1"), - ), - }, - }, - }) -} - -func testCheckAzureRMSqlServerExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).rivieraClient - - readRequest := conn.NewRequestForURI(rs.Primary.ID) - readRequest.Command = &sql.GetServer{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Bad: GetServer: %s", err) - } - if !readResponse.IsSuccessful() { - return fmt.Errorf("Bad: GetServer: %s", readResponse.Error) - } - - return nil - } -} - -func testCheckAzureRMSqlServerDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).rivieraClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_sql_server" { - continue - } - - readRequest := conn.NewRequestForURI(rs.Primary.ID) - readRequest.Command = &sql.GetServer{} - - readResponse, err := readRequest.Execute() - if err != nil { - return fmt.Errorf("Bad: GetServer: %s", err) - } - - if readResponse.IsSuccessful() { - return fmt.Errorf("Bad: SQL Server still exists: %s", readResponse.Error) - } - } - - return nil -} - -var testAccAzureRMSqlServer_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_sql_server" "test" { - name = "acctestsqlserver%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US" - version = "12.0" - administrator_login = "mradministrator" - administrator_login_password = "thisIsDog11" -} -` - -var testAccAzureRMSqlServer_withTags = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_sql_server" "test" { - name = "acctestsqlserver%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US" - version = "12.0" - administrator_login = "mradministrator" - administrator_login_password = "thisIsDog11" - - tags { - environment = "staging" - database = "test" - } -} -` - -var testAccAzureRMSqlServer_withTagsUpdated = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG_%d" - location = "West US" -} -resource "azurerm_sql_server" "test" { - name = "acctestsqlserver%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US" - version = "12.0" - administrator_login = "mradministrator" - administrator_login_password = "thisIsDog11" - - tags { - environment = "production" - } -} -` diff --git a/builtin/providers/azurerm/resource_arm_storage_account.go b/builtin/providers/azurerm/resource_arm_storage_account.go deleted file mode 100644 index ffb4e14ee..000000000 --- a/builtin/providers/azurerm/resource_arm_storage_account.go +++ /dev/null @@ -1,455 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "net/http" - "regexp" - "strings" - "time" - - "github.com/Azure/azure-sdk-for-go/arm/storage" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" -) - -// The KeySource of storage.Encryption appears to require this value -// for Encryption services to work -var storageAccountEncryptionSource = "Microsoft.Storage" - -const blobStorageAccountDefaultAccessTier = "Hot" - -func resourceArmStorageAccount() *schema.Resource { - return &schema.Resource{ - Create: resourceArmStorageAccountCreate, - Read: resourceArmStorageAccountRead, - Update: resourceArmStorageAccountUpdate, - Delete: resourceArmStorageAccountDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateArmStorageAccountName, - }, - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: resourceAzurermResourceGroupNameDiffSuppress, - }, - - "location": locationSchema(), - - "account_kind": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{ - string(storage.Storage), - string(storage.BlobStorage), - }, true), - Default: string(storage.Storage), - }, - - "account_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateArmStorageAccountType, - DiffSuppressFunc: ignoreCaseDiffSuppressFunc, - }, - - // Only valid for BlobStorage accounts, defaults to "Hot" in create function - "access_tier": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice([]string{ - string(storage.Cool), - string(storage.Hot), - }, true), - }, - - "enable_blob_encryption": { - Type: schema.TypeBool, - Optional: true, - }, - - "primary_location": { - Type: schema.TypeString, - Computed: true, - }, - - "secondary_location": { - Type: schema.TypeString, - Computed: true, - }, - - "primary_blob_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - - "secondary_blob_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - - "primary_queue_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - - "secondary_queue_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - - "primary_table_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - - "secondary_table_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - - // NOTE: The API does not appear to expose a secondary file endpoint - "primary_file_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - - "primary_access_key": { - Type: schema.TypeString, - Computed: true, - }, - - "secondary_access_key": { - Type: schema.TypeString, - Computed: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmStorageAccountCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - storageClient := client.storageServiceClient - - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("name").(string) - accountKind := d.Get("account_kind").(string) - accountType := d.Get("account_type").(string) - - location := d.Get("location").(string) - tags := d.Get("tags").(map[string]interface{}) - enableBlobEncryption := d.Get("enable_blob_encryption").(bool) - - sku := storage.Sku{ - Name: storage.SkuName(accountType), - } - - opts := storage.AccountCreateParameters{ - Location: &location, - Sku: &sku, - Tags: expandTags(tags), - Kind: storage.Kind(accountKind), - AccountPropertiesCreateParameters: &storage.AccountPropertiesCreateParameters{ - Encryption: &storage.Encryption{ - Services: &storage.EncryptionServices{ - Blob: &storage.EncryptionService{ - Enabled: &enableBlobEncryption, - }, - }, - KeySource: &storageAccountEncryptionSource, - }, - }, - } - - // AccessTier is only valid for BlobStorage accounts - if accountKind == string(storage.BlobStorage) { - accessTier, ok := d.GetOk("access_tier") - if !ok { - // default to "Hot" - accessTier = blobStorageAccountDefaultAccessTier - } - - opts.AccountPropertiesCreateParameters.AccessTier = storage.AccessTier(accessTier.(string)) - } - - // Create - _, createError := storageClient.Create(resourceGroupName, storageAccountName, opts, make(chan struct{})) - createErr := <-createError - - // The only way to get the ID back apparently is to read the resource again - read, err := storageClient.GetProperties(resourceGroupName, storageAccountName) - - // Set the ID right away if we have one - if err == nil && read.ID != nil { - log.Printf("[INFO] storage account %q ID: %q", storageAccountName, *read.ID) - d.SetId(*read.ID) - } - - // If we had a create error earlier then we return with that error now. - // We do this later here so that we can grab the ID above is possible. - if createErr != nil { - return fmt.Errorf( - "Error creating Azure Storage Account '%s': %s", - storageAccountName, createErr) - } - - // Check the read error now that we know it would exist without a create err - if err != nil { - return err - } - - // If we got no ID then the resource group doesn't yet exist - if read.ID == nil { - return fmt.Errorf("Cannot read Storage Account %s (resource group %s) ID", - storageAccountName, resourceGroupName) - } - - log.Printf("[DEBUG] Waiting for Storage Account (%s) to become available", storageAccountName) - stateConf := &resource.StateChangeConf{ - Pending: []string{"Updating", "Creating"}, - Target: []string{"Succeeded"}, - Refresh: storageAccountStateRefreshFunc(client, resourceGroupName, storageAccountName), - Timeout: 30 * time.Minute, - MinTimeout: 15 * time.Second, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for Storage Account (%s) to become available: %s", storageAccountName, err) - } - - return resourceArmStorageAccountRead(d, meta) -} - -// resourceArmStorageAccountUpdate is unusual in the ARM API where most resources have a combined -// and idempotent operation for CreateOrUpdate. In particular updating all of the parameters -// available requires a call to Update per parameter... -func resourceArmStorageAccountUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).storageServiceClient - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - storageAccountName := id.Path["storageAccounts"] - resourceGroupName := id.ResourceGroup - - d.Partial(true) - - if d.HasChange("account_type") { - accountType := d.Get("account_type").(string) - - sku := storage.Sku{ - Name: storage.SkuName(accountType), - } - - opts := storage.AccountUpdateParameters{ - Sku: &sku, - } - _, err := client.Update(resourceGroupName, storageAccountName, opts) - if err != nil { - return fmt.Errorf("Error updating Azure Storage Account type %q: %s", storageAccountName, err) - } - - d.SetPartial("account_type") - } - - if d.HasChange("access_tier") { - accessTier := d.Get("access_tier").(string) - - opts := storage.AccountUpdateParameters{ - AccountPropertiesUpdateParameters: &storage.AccountPropertiesUpdateParameters{ - AccessTier: storage.AccessTier(accessTier), - }, - } - _, err := client.Update(resourceGroupName, storageAccountName, opts) - if err != nil { - return fmt.Errorf("Error updating Azure Storage Account access_tier %q: %s", storageAccountName, err) - } - - d.SetPartial("access_tier") - } - - if d.HasChange("tags") { - tags := d.Get("tags").(map[string]interface{}) - - opts := storage.AccountUpdateParameters{ - Tags: expandTags(tags), - } - _, err := client.Update(resourceGroupName, storageAccountName, opts) - if err != nil { - return fmt.Errorf("Error updating Azure Storage Account tags %q: %s", storageAccountName, err) - } - - d.SetPartial("tags") - } - - if d.HasChange("enable_blob_encryption") { - enableBlobEncryption := d.Get("enable_blob_encryption").(bool) - - opts := storage.AccountUpdateParameters{ - AccountPropertiesUpdateParameters: &storage.AccountPropertiesUpdateParameters{ - Encryption: &storage.Encryption{ - Services: &storage.EncryptionServices{ - Blob: &storage.EncryptionService{ - Enabled: &enableBlobEncryption, - }, - }, - KeySource: &storageAccountEncryptionSource, - }, - }, - } - _, err := client.Update(resourceGroupName, storageAccountName, opts) - if err != nil { - return fmt.Errorf("Error updating Azure Storage Account enable_blob_encryption %q: %s", storageAccountName, err) - } - - d.SetPartial("enable_blob_encryption") - } - - d.Partial(false) - return nil -} - -func resourceArmStorageAccountRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).storageServiceClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - name := id.Path["storageAccounts"] - resGroup := id.ResourceGroup - - resp, err := client.GetProperties(resGroup, name) - if err != nil { - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading the state of AzureRM Storage Account %q: %s", name, err) - } - - keys, err := client.ListKeys(resGroup, name) - if err != nil { - return err - } - - accessKeys := *keys.Keys - d.Set("resource_group_name", resGroup) - d.Set("primary_access_key", accessKeys[0].Value) - d.Set("secondary_access_key", accessKeys[1].Value) - d.Set("location", resp.Location) - d.Set("account_kind", resp.Kind) - d.Set("account_type", resp.Sku.Name) - d.Set("primary_location", resp.AccountProperties.PrimaryLocation) - d.Set("secondary_location", resp.AccountProperties.SecondaryLocation) - - if resp.AccountProperties.AccessTier != "" { - d.Set("access_tier", resp.AccountProperties.AccessTier) - } - - if resp.AccountProperties.PrimaryEndpoints != nil { - d.Set("primary_blob_endpoint", resp.AccountProperties.PrimaryEndpoints.Blob) - d.Set("primary_queue_endpoint", resp.AccountProperties.PrimaryEndpoints.Queue) - d.Set("primary_table_endpoint", resp.AccountProperties.PrimaryEndpoints.Table) - d.Set("primary_file_endpoint", resp.AccountProperties.PrimaryEndpoints.File) - } - - if resp.AccountProperties.SecondaryEndpoints != nil { - if resp.AccountProperties.SecondaryEndpoints.Blob != nil { - d.Set("secondary_blob_endpoint", resp.AccountProperties.SecondaryEndpoints.Blob) - } else { - d.Set("secondary_blob_endpoint", "") - } - if resp.AccountProperties.SecondaryEndpoints.Queue != nil { - d.Set("secondary_queue_endpoint", resp.AccountProperties.SecondaryEndpoints.Queue) - } else { - d.Set("secondary_queue_endpoint", "") - } - if resp.AccountProperties.SecondaryEndpoints.Table != nil { - d.Set("secondary_table_endpoint", resp.AccountProperties.SecondaryEndpoints.Table) - } else { - d.Set("secondary_table_endpoint", "") - } - } - - if resp.AccountProperties.Encryption != nil { - if resp.AccountProperties.Encryption.Services.Blob != nil { - d.Set("enable_blob_encryption", resp.AccountProperties.Encryption.Services.Blob.Enabled) - } - } - - d.Set("name", resp.Name) - - flattenAndSetTags(d, resp.Tags) - - return nil -} - -func resourceArmStorageAccountDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).storageServiceClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - name := id.Path["storageAccounts"] - resGroup := id.ResourceGroup - - _, err = client.Delete(resGroup, name) - if err != nil { - return fmt.Errorf("Error issuing AzureRM delete request for storage account %q: %s", name, err) - } - - return nil -} - -func validateArmStorageAccountName(v interface{}, k string) (ws []string, es []error) { - input := v.(string) - - if !regexp.MustCompile(`\A([a-z0-9]{3,24})\z`).MatchString(input) { - es = append(es, fmt.Errorf("name can only consist of lowercase letters and numbers, and must be between 3 and 24 characters long")) - } - - return -} - -func validateArmStorageAccountType(v interface{}, k string) (ws []string, es []error) { - validAccountTypes := []string{"standard_lrs", "standard_zrs", - "standard_grs", "standard_ragrs", "premium_lrs"} - - input := strings.ToLower(v.(string)) - - for _, valid := range validAccountTypes { - if valid == input { - return - } - } - - es = append(es, fmt.Errorf("Invalid storage account type %q", input)) - return -} - -func storageAccountStateRefreshFunc(client *ArmClient, resourceGroupName string, storageAccountName string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - res, err := client.storageServiceClient.GetProperties(resourceGroupName, storageAccountName) - if err != nil { - return nil, "", fmt.Errorf("Error issuing read request in storageAccountStateRefreshFunc to Azure ARM for Storage Account '%s' (RG: '%s'): %s", storageAccountName, resourceGroupName, err) - } - - return res, string(res.AccountProperties.ProvisioningState), nil - } -} diff --git a/builtin/providers/azurerm/resource_arm_storage_account_test.go b/builtin/providers/azurerm/resource_arm_storage_account_test.go deleted file mode 100644 index 6599a7058..000000000 --- a/builtin/providers/azurerm/resource_arm_storage_account_test.go +++ /dev/null @@ -1,402 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestValidateArmStorageAccountType(t *testing.T) { - testCases := []struct { - input string - shouldError bool - }{ - {"standard_lrs", false}, - {"invalid", true}, - } - - for _, test := range testCases { - _, es := validateArmStorageAccountType(test.input, "account_type") - - if test.shouldError && len(es) == 0 { - t.Fatalf("Expected validating account_type %q to fail", test.input) - } - } -} - -func TestValidateArmStorageAccountName(t *testing.T) { - testCases := []struct { - input string - shouldError bool - }{ - {"ab", true}, - {"ABC", true}, - {"abc", false}, - {"123456789012345678901234", false}, - {"1234567890123456789012345", true}, - {"abc12345", false}, - } - - for _, test := range testCases { - _, es := validateArmStorageAccountName(test.input, "name") - - if test.shouldError && len(es) == 0 { - t.Fatalf("Expected validating name %q to fail", test.input) - } - } -} - -func TestAccAzureRMStorageAccount_basic(t *testing.T) { - ri := acctest.RandInt() - rs := acctest.RandString(4) - preConfig := fmt.Sprintf(testAccAzureRMStorageAccount_basic, ri, rs) - postConfig := fmt.Sprintf(testAccAzureRMStorageAccount_update, ri, rs) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMStorageAccountDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageAccountExists("azurerm_storage_account.testsa"), - resource.TestCheckResourceAttr("azurerm_storage_account.testsa", "account_type", "Standard_LRS"), - resource.TestCheckResourceAttr("azurerm_storage_account.testsa", "tags.%", "1"), - resource.TestCheckResourceAttr("azurerm_storage_account.testsa", "tags.environment", "production"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageAccountExists("azurerm_storage_account.testsa"), - resource.TestCheckResourceAttr("azurerm_storage_account.testsa", "account_type", "Standard_GRS"), - resource.TestCheckResourceAttr("azurerm_storage_account.testsa", "tags.%", "1"), - resource.TestCheckResourceAttr("azurerm_storage_account.testsa", "tags.environment", "staging"), - ), - }, - }, - }) -} - -func TestAccAzureRMStorageAccount_disappears(t *testing.T) { - ri := acctest.RandInt() - rs := acctest.RandString(4) - preConfig := fmt.Sprintf(testAccAzureRMStorageAccount_basic, ri, rs) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMStorageAccountDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageAccountExists("azurerm_storage_account.testsa"), - resource.TestCheckResourceAttr("azurerm_storage_account.testsa", "account_type", "Standard_LRS"), - resource.TestCheckResourceAttr("azurerm_storage_account.testsa", "tags.%", "1"), - resource.TestCheckResourceAttr("azurerm_storage_account.testsa", "tags.environment", "production"), - testCheckAzureRMStorageAccountDisappears("azurerm_storage_account.testsa"), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAzureRMStorageAccount_blobEncryption(t *testing.T) { - ri := acctest.RandInt() - rs := acctest.RandString(4) - preConfig := fmt.Sprintf(testAccAzureRMStorageAccount_blobEncryption, ri, rs) - postConfig := fmt.Sprintf(testAccAzureRMStorageAccount_blobEncryptionDisabled, ri, rs) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMStorageAccountDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageAccountExists("azurerm_storage_account.testsa"), - resource.TestCheckResourceAttr("azurerm_storage_account.testsa", "enable_blob_encryption", "true"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageAccountExists("azurerm_storage_account.testsa"), - resource.TestCheckResourceAttr("azurerm_storage_account.testsa", "enable_blob_encryption", "false"), - ), - }, - }, - }) -} - -func TestAccAzureRMStorageAccount_blobStorageWithUpdate(t *testing.T) { - ri := acctest.RandInt() - rs := acctest.RandString(4) - preConfig := fmt.Sprintf(testAccAzureRMStorageAccount_blobStorage, ri, rs) - postConfig := fmt.Sprintf(testAccAzureRMStorageAccount_blobStorageUpdate, ri, rs) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMStorageAccountDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageAccountExists("azurerm_storage_account.testsa"), - resource.TestCheckResourceAttr("azurerm_storage_account.testsa", "account_kind", "BlobStorage"), - resource.TestCheckResourceAttr("azurerm_storage_account.testsa", "access_tier", "Hot"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageAccountExists("azurerm_storage_account.testsa"), - resource.TestCheckResourceAttr("azurerm_storage_account.testsa", "access_tier", "Cool"), - ), - }, - }, - }) -} - -func TestAccAzureRMStorageAccount_NonStandardCasing(t *testing.T) { - ri := acctest.RandInt() - rs := acctest.RandString(4) - preConfig := testAccAzureRMStorageAccountNonStandardCasing(ri, rs) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMStorageAccountDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageAccountExists("azurerm_storage_account.testsa"), - ), - }, - - resource.TestStep{ - Config: preConfig, - PlanOnly: true, - ExpectNonEmptyPlan: false, - }, - }, - }) -} - -func testCheckAzureRMStorageAccountExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - storageAccount := rs.Primary.Attributes["name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - // Ensure resource group exists in API - conn := testAccProvider.Meta().(*ArmClient).storageServiceClient - - resp, err := conn.GetProperties(resourceGroup, storageAccount) - if err != nil { - return fmt.Errorf("Bad: Get on storageServiceClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: StorageAccount %q (resource group: %q) does not exist", name, resourceGroup) - } - - return nil - } -} - -func testCheckAzureRMStorageAccountDisappears(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - storageAccount := rs.Primary.Attributes["name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - // Ensure resource group exists in API - conn := testAccProvider.Meta().(*ArmClient).storageServiceClient - - _, err := conn.Delete(resourceGroup, storageAccount) - if err != nil { - return fmt.Errorf("Bad: Delete on storageServiceClient: %s", err) - } - - return nil - } -} - -func testCheckAzureRMStorageAccountDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).storageServiceClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_storage_account" { - continue - } - - name := rs.Primary.Attributes["name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.GetProperties(resourceGroup, name) - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Storage Account still exists:\n%#v", resp.AccountProperties) - } - } - - return nil -} - -var testAccAzureRMStorageAccount_basic = ` -resource "azurerm_resource_group" "testrg" { - name = "testAccAzureRMSA-%d" - location = "westus" -} - -resource "azurerm_storage_account" "testsa" { - name = "unlikely23exst2acct%s" - resource_group_name = "${azurerm_resource_group.testrg.name}" - - location = "westus" - account_type = "Standard_LRS" - - tags { - environment = "production" - } -}` - -var testAccAzureRMStorageAccount_update = ` -resource "azurerm_resource_group" "testrg" { - name = "testAccAzureRMSA-%d" - location = "westus" -} - -resource "azurerm_storage_account" "testsa" { - name = "unlikely23exst2acct%s" - resource_group_name = "${azurerm_resource_group.testrg.name}" - - location = "westus" - account_type = "Standard_GRS" - - tags { - environment = "staging" - } -}` - -var testAccAzureRMStorageAccount_blobEncryption = ` -resource "azurerm_resource_group" "testrg" { - name = "testAccAzureRMSA-%d" - location = "westus" -} - -resource "azurerm_storage_account" "testsa" { - name = "unlikely23exst2acct%s" - resource_group_name = "${azurerm_resource_group.testrg.name}" - - location = "westus" - account_type = "Standard_LRS" - enable_blob_encryption = true - - tags { - environment = "production" - } -}` - -var testAccAzureRMStorageAccount_blobEncryptionDisabled = ` -resource "azurerm_resource_group" "testrg" { - name = "testAccAzureRMSA-%d" - location = "westus" -} - -resource "azurerm_storage_account" "testsa" { - name = "unlikely23exst2acct%s" - resource_group_name = "${azurerm_resource_group.testrg.name}" - - location = "westus" - account_type = "Standard_LRS" - enable_blob_encryption = false - - tags { - environment = "production" - } -}` - -// BlobStorage accounts are not available in WestUS -var testAccAzureRMStorageAccount_blobStorage = ` -resource "azurerm_resource_group" "testrg" { - name = "testAccAzureRMSA-%d" - location = "northeurope" -} - -resource "azurerm_storage_account" "testsa" { - name = "unlikely23exst2acct%s" - resource_group_name = "${azurerm_resource_group.testrg.name}" - - location = "northeurope" - account_kind = "BlobStorage" - account_type = "Standard_LRS" - - tags { - environment = "production" - } -}` - -var testAccAzureRMStorageAccount_blobStorageUpdate = ` -resource "azurerm_resource_group" "testrg" { - name = "testAccAzureRMSA-%d" - location = "northeurope" -} - -resource "azurerm_storage_account" "testsa" { - name = "unlikely23exst2acct%s" - resource_group_name = "${azurerm_resource_group.testrg.name}" - - location = "northeurope" - account_kind = "BlobStorage" - account_type = "Standard_LRS" - access_tier = "Cool" - - tags { - environment = "production" - } -}` - -func testAccAzureRMStorageAccountNonStandardCasing(ri int, rs string) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "testrg" { - name = "testAccAzureRMSA-%d" - location = "westus" -} -resource "azurerm_storage_account" "testsa" { - name = "unlikely23exst2acct%s" - resource_group_name = "${azurerm_resource_group.testrg.name}" - location = "westus" - account_type = "standard_LRS" - tags { - environment = "production" - } -}`, ri, rs) -} diff --git a/builtin/providers/azurerm/resource_arm_storage_blob.go b/builtin/providers/azurerm/resource_arm_storage_blob.go deleted file mode 100644 index 8ee2ea6c2..000000000 --- a/builtin/providers/azurerm/resource_arm_storage_blob.go +++ /dev/null @@ -1,634 +0,0 @@ -package azurerm - -import ( - "bytes" - "crypto/rand" - "encoding/base64" - "fmt" - "io" - "log" - "os" - "runtime" - "strings" - "sync" - - "github.com/Azure/azure-sdk-for-go/storage" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceArmStorageBlob() *schema.Resource { - return &schema.Resource{ - Create: resourceArmStorageBlobCreate, - Read: resourceArmStorageBlobRead, - Exists: resourceArmStorageBlobExists, - Delete: resourceArmStorageBlobDelete, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "storage_account_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "storage_container_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateArmStorageBlobType, - }, - "size": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Default: 0, - ValidateFunc: validateArmStorageBlobSize, - }, - "source": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"source_uri"}, - }, - "source_uri": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"source"}, - }, - "url": { - Type: schema.TypeString, - Computed: true, - }, - "parallelism": { - Type: schema.TypeInt, - Optional: true, - Default: 8, - ForceNew: true, - ValidateFunc: validateArmStorageBlobParallelism, - }, - "attempts": { - Type: schema.TypeInt, - Optional: true, - Default: 1, - ForceNew: true, - ValidateFunc: validateArmStorageBlobAttempts, - }, - }, - } -} - -func validateArmStorageBlobParallelism(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - - if value <= 0 { - errors = append(errors, fmt.Errorf("Blob Parallelism %q is invalid, must be greater than 0", value)) - } - - return -} - -func validateArmStorageBlobAttempts(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - - if value <= 0 { - errors = append(errors, fmt.Errorf("Blob Attempts %q is invalid, must be greater than 0", value)) - } - - return -} - -func validateArmStorageBlobSize(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - - if value%512 != 0 { - errors = append(errors, fmt.Errorf("Blob Size %q is invalid, must be a multiple of 512", value)) - } - - return -} - -func validateArmStorageBlobType(v interface{}, k string) (ws []string, errors []error) { - value := strings.ToLower(v.(string)) - validTypes := map[string]struct{}{ - "block": struct{}{}, - "page": struct{}{}, - } - - if _, ok := validTypes[value]; !ok { - errors = append(errors, fmt.Errorf("Blob type %q is invalid, must be %q or %q", value, "block", "page")) - } - return -} - -func resourceArmStorageBlobCreate(d *schema.ResourceData, meta interface{}) error { - armClient := meta.(*ArmClient) - - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) - - blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - return err - } - if !accountExists { - return fmt.Errorf("Storage Account %q Not Found", storageAccountName) - } - - name := d.Get("name").(string) - blobType := d.Get("type").(string) - cont := d.Get("storage_container_name").(string) - sourceUri := d.Get("source_uri").(string) - - log.Printf("[INFO] Creating blob %q in storage account %q", name, storageAccountName) - if sourceUri != "" { - options := &storage.CopyOptions{} - container := blobClient.GetContainerReference(cont) - blob := container.GetBlobReference(name) - err := blob.Copy(sourceUri, options) - if err != nil { - return fmt.Errorf("Error creating storage blob on Azure: %s", err) - } - } else { - switch strings.ToLower(blobType) { - case "block": - options := &storage.PutBlobOptions{} - container := blobClient.GetContainerReference(cont) - blob := container.GetBlobReference(name) - err := blob.CreateBlockBlob(options) - if err != nil { - return fmt.Errorf("Error creating storage blob on Azure: %s", err) - } - - source := d.Get("source").(string) - if source != "" { - parallelism := d.Get("parallelism").(int) - attempts := d.Get("attempts").(int) - if err := resourceArmStorageBlobBlockUploadFromSource(cont, name, source, blobClient, parallelism, attempts); err != nil { - return fmt.Errorf("Error creating storage blob on Azure: %s", err) - } - } - case "page": - source := d.Get("source").(string) - if source != "" { - parallelism := d.Get("parallelism").(int) - attempts := d.Get("attempts").(int) - if err := resourceArmStorageBlobPageUploadFromSource(cont, name, source, blobClient, parallelism, attempts); err != nil { - return fmt.Errorf("Error creating storage blob on Azure: %s", err) - } - } else { - size := int64(d.Get("size").(int)) - options := &storage.PutBlobOptions{} - - container := blobClient.GetContainerReference(cont) - blob := container.GetBlobReference(name) - blob.Properties.ContentLength = size - err := blob.PutPageBlob(options) - if err != nil { - return fmt.Errorf("Error creating storage blob on Azure: %s", err) - } - } - } - } - - d.SetId(name) - return resourceArmStorageBlobRead(d, meta) -} - -type resourceArmStorageBlobPage struct { - offset int64 - section *io.SectionReader -} - -func resourceArmStorageBlobPageUploadFromSource(container, name, source string, client *storage.BlobStorageClient, parallelism, attempts int) error { - workerCount := parallelism * runtime.NumCPU() - - file, err := os.Open(source) - if err != nil { - return fmt.Errorf("Error opening source file for upload %q: %s", source, err) - } - defer file.Close() - - blobSize, pageList, err := resourceArmStorageBlobPageSplit(file) - if err != nil { - return fmt.Errorf("Error splitting source file %q into pages: %s", source, err) - } - - options := &storage.PutBlobOptions{} - containerRef := client.GetContainerReference(container) - blob := containerRef.GetBlobReference(name) - blob.Properties.ContentLength = blobSize - err = blob.PutPageBlob(options) - if err != nil { - return fmt.Errorf("Error creating storage blob on Azure: %s", err) - } - - pages := make(chan resourceArmStorageBlobPage, len(pageList)) - errors := make(chan error, len(pageList)) - wg := &sync.WaitGroup{} - wg.Add(len(pageList)) - - total := int64(0) - for _, page := range pageList { - total += page.section.Size() - pages <- page - } - close(pages) - - for i := 0; i < workerCount; i++ { - go resourceArmStorageBlobPageUploadWorker(resourceArmStorageBlobPageUploadContext{ - container: container, - name: name, - source: source, - blobSize: blobSize, - client: client, - pages: pages, - errors: errors, - wg: wg, - attempts: attempts, - }) - } - - wg.Wait() - - if len(errors) > 0 { - return fmt.Errorf("Error while uploading source file %q: %s", source, <-errors) - } - - return nil -} - -func resourceArmStorageBlobPageSplit(file *os.File) (int64, []resourceArmStorageBlobPage, error) { - const ( - minPageSize int64 = 4 * 1024 - maxPageSize int64 = 4 * 1024 * 1024 - ) - - info, err := file.Stat() - if err != nil { - return int64(0), nil, fmt.Errorf("Could not stat file %q: %s", file.Name(), err) - } - - blobSize := info.Size() - if info.Size()%minPageSize != 0 { - blobSize = info.Size() + (minPageSize - (info.Size() % minPageSize)) - } - - emptyPage := make([]byte, minPageSize) - - type byteRange struct { - offset int64 - length int64 - } - - var nonEmptyRanges []byteRange - var currentRange byteRange - for i := int64(0); i < blobSize; i += minPageSize { - pageBuf := make([]byte, minPageSize) - _, err = file.ReadAt(pageBuf, i) - if err != nil && err != io.EOF { - return int64(0), nil, fmt.Errorf("Could not read chunk at %d: %s", i, err) - } - - if bytes.Equal(pageBuf, emptyPage) { - if currentRange.length != 0 { - nonEmptyRanges = append(nonEmptyRanges, currentRange) - } - currentRange = byteRange{ - offset: i + minPageSize, - } - } else { - currentRange.length += minPageSize - if currentRange.length == maxPageSize || (currentRange.offset+currentRange.length == blobSize) { - nonEmptyRanges = append(nonEmptyRanges, currentRange) - currentRange = byteRange{ - offset: i + minPageSize, - } - } - } - } - - var pages []resourceArmStorageBlobPage - for _, nonEmptyRange := range nonEmptyRanges { - pages = append(pages, resourceArmStorageBlobPage{ - offset: nonEmptyRange.offset, - section: io.NewSectionReader(file, nonEmptyRange.offset, nonEmptyRange.length), - }) - } - - return info.Size(), pages, nil -} - -type resourceArmStorageBlobPageUploadContext struct { - container string - name string - source string - blobSize int64 - client *storage.BlobStorageClient - pages chan resourceArmStorageBlobPage - errors chan error - wg *sync.WaitGroup - attempts int -} - -func resourceArmStorageBlobPageUploadWorker(ctx resourceArmStorageBlobPageUploadContext) { - for page := range ctx.pages { - start := page.offset - end := page.offset + page.section.Size() - 1 - if end > ctx.blobSize-1 { - end = ctx.blobSize - 1 - } - size := end - start + 1 - - chunk := make([]byte, size) - _, err := page.section.Read(chunk) - if err != nil && err != io.EOF { - ctx.errors <- fmt.Errorf("Error reading source file %q at offset %d: %s", ctx.source, page.offset, err) - ctx.wg.Done() - continue - } - - for x := 0; x < ctx.attempts; x++ { - container := ctx.client.GetContainerReference(ctx.container) - blob := container.GetBlobReference(ctx.name) - blobRange := storage.BlobRange{ - Start: uint64(start), - End: uint64(end), - } - options := &storage.PutPageOptions{} - reader := bytes.NewReader(chunk) - err = blob.WriteRange(blobRange, reader, options) - if err == nil { - break - } - } - if err != nil { - ctx.errors <- fmt.Errorf("Error writing page at offset %d for file %q: %s", page.offset, ctx.source, err) - ctx.wg.Done() - continue - } - - ctx.wg.Done() - } -} - -type resourceArmStorageBlobBlock struct { - section *io.SectionReader - id string -} - -func resourceArmStorageBlobBlockUploadFromSource(container, name, source string, client *storage.BlobStorageClient, parallelism, attempts int) error { - workerCount := parallelism * runtime.NumCPU() - - file, err := os.Open(source) - if err != nil { - return fmt.Errorf("Error opening source file for upload %q: %s", source, err) - } - defer file.Close() - - blockList, parts, err := resourceArmStorageBlobBlockSplit(file) - if err != nil { - return fmt.Errorf("Error reading and splitting source file for upload %q: %s", source, err) - } - - wg := &sync.WaitGroup{} - blocks := make(chan resourceArmStorageBlobBlock, len(parts)) - errors := make(chan error, len(parts)) - - wg.Add(len(parts)) - for _, p := range parts { - blocks <- p - } - close(blocks) - - for i := 0; i < workerCount; i++ { - go resourceArmStorageBlobBlockUploadWorker(resourceArmStorageBlobBlockUploadContext{ - client: client, - source: source, - container: container, - name: name, - blocks: blocks, - errors: errors, - wg: wg, - attempts: attempts, - }) - } - - wg.Wait() - - if len(errors) > 0 { - return fmt.Errorf("Error while uploading source file %q: %s", source, <-errors) - } - - containerReference := client.GetContainerReference(container) - blobReference := containerReference.GetBlobReference(name) - options := &storage.PutBlockListOptions{} - err = blobReference.PutBlockList(blockList, options) - if err != nil { - return fmt.Errorf("Error updating block list for source file %q: %s", source, err) - } - - return nil -} - -func resourceArmStorageBlobBlockSplit(file *os.File) ([]storage.Block, []resourceArmStorageBlobBlock, error) { - const ( - idSize = 64 - blockSize int64 = 4 * 1024 * 1024 - ) - var parts []resourceArmStorageBlobBlock - var blockList []storage.Block - - info, err := file.Stat() - if err != nil { - return nil, nil, fmt.Errorf("Error stating source file %q: %s", file.Name(), err) - } - - for i := int64(0); i < info.Size(); i = i + blockSize { - entropy := make([]byte, idSize) - _, err = rand.Read(entropy) - if err != nil { - return nil, nil, fmt.Errorf("Error generating a random block ID for source file %q: %s", file.Name(), err) - } - - sectionSize := blockSize - remainder := info.Size() - i - if remainder < blockSize { - sectionSize = remainder - } - - block := storage.Block{ - ID: base64.StdEncoding.EncodeToString(entropy), - Status: storage.BlockStatusUncommitted, - } - - blockList = append(blockList, block) - - parts = append(parts, resourceArmStorageBlobBlock{ - id: block.ID, - section: io.NewSectionReader(file, i, sectionSize), - }) - } - - return blockList, parts, nil -} - -type resourceArmStorageBlobBlockUploadContext struct { - client *storage.BlobStorageClient - container string - name string - source string - attempts int - blocks chan resourceArmStorageBlobBlock - errors chan error - wg *sync.WaitGroup -} - -func resourceArmStorageBlobBlockUploadWorker(ctx resourceArmStorageBlobBlockUploadContext) { - for block := range ctx.blocks { - buffer := make([]byte, block.section.Size()) - - _, err := block.section.Read(buffer) - if err != nil { - ctx.errors <- fmt.Errorf("Error reading source file %q: %s", ctx.source, err) - ctx.wg.Done() - continue - } - - for i := 0; i < ctx.attempts; i++ { - container := ctx.client.GetContainerReference(ctx.container) - blob := container.GetBlobReference(ctx.name) - options := &storage.PutBlockOptions{} - err = blob.PutBlock(block.id, buffer, options) - if err == nil { - break - } - } - if err != nil { - ctx.errors <- fmt.Errorf("Error uploading block %q for source file %q: %s", block.id, ctx.source, err) - ctx.wg.Done() - continue - } - - ctx.wg.Done() - } -} - -func resourceArmStorageBlobRead(d *schema.ResourceData, meta interface{}) error { - armClient := meta.(*ArmClient) - - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) - - blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - return err - } - if !accountExists { - log.Printf("[DEBUG] Storage account %q not found, removing blob %q from state", storageAccountName, d.Id()) - d.SetId("") - return nil - } - - exists, err := resourceArmStorageBlobExists(d, meta) - if err != nil { - return err - } - - if !exists { - // Exists already removed this from state - return nil - } - - name := d.Get("name").(string) - storageContainerName := d.Get("storage_container_name").(string) - - container := blobClient.GetContainerReference(storageContainerName) - blob := container.GetBlobReference(name) - url := blob.GetURL() - if url == "" { - log.Printf("[INFO] URL for %q is empty", name) - } - d.Set("url", url) - - return nil -} - -func resourceArmStorageBlobExists(d *schema.ResourceData, meta interface{}) (bool, error) { - armClient := meta.(*ArmClient) - - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) - - blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - return false, err - } - if !accountExists { - log.Printf("[DEBUG] Storage account %q not found, removing blob %q from state", storageAccountName, d.Id()) - d.SetId("") - return false, nil - } - - name := d.Get("name").(string) - storageContainerName := d.Get("storage_container_name").(string) - - log.Printf("[INFO] Checking for existence of storage blob %q.", name) - container := blobClient.GetContainerReference(storageContainerName) - blob := container.GetBlobReference(name) - exists, err := blob.Exists() - if err != nil { - return false, fmt.Errorf("error testing existence of storage blob %q: %s", name, err) - } - - if !exists { - log.Printf("[INFO] Storage blob %q no longer exists, removing from state...", name) - d.SetId("") - } - - return exists, nil -} - -func resourceArmStorageBlobDelete(d *schema.ResourceData, meta interface{}) error { - armClient := meta.(*ArmClient) - - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) - - blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - return err - } - if !accountExists { - log.Printf("[INFO]Storage Account %q doesn't exist so the blob won't exist", storageAccountName) - return nil - } - - name := d.Get("name").(string) - storageContainerName := d.Get("storage_container_name").(string) - - log.Printf("[INFO] Deleting storage blob %q", name) - options := &storage.DeleteBlobOptions{} - container := blobClient.GetContainerReference(storageContainerName) - blob := container.GetBlobReference(name) - _, err = blob.DeleteIfExists(options) - if err != nil { - return fmt.Errorf("Error deleting storage blob %q: %s", name, err) - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/azurerm/resource_arm_storage_blob_test.go b/builtin/providers/azurerm/resource_arm_storage_blob_test.go deleted file mode 100644 index 74fb1f5bf..000000000 --- a/builtin/providers/azurerm/resource_arm_storage_blob_test.go +++ /dev/null @@ -1,654 +0,0 @@ -package azurerm - -import ( - "crypto/rand" - "fmt" - "io" - "io/ioutil" - "testing" - - "strings" - - "github.com/Azure/azure-sdk-for-go/storage" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestResourceAzureRMStorageBlobType_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "unknown", - ErrCount: 1, - }, - { - Value: "page", - ErrCount: 0, - }, - { - Value: "block", - ErrCount: 0, - }, - { - Value: "BLOCK", - ErrCount: 0, - }, - { - Value: "Block", - ErrCount: 0, - }, - } - - for _, tc := range cases { - _, errors := validateArmStorageBlobType(tc.Value, "azurerm_storage_blob") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM Storage Blob type to trigger a validation error") - } - } -} - -func TestResourceAzureRMStorageBlobSize_validation(t *testing.T) { - cases := []struct { - Value int - ErrCount int - }{ - { - Value: 511, - ErrCount: 1, - }, - { - Value: 512, - ErrCount: 0, - }, - { - Value: 1024, - ErrCount: 0, - }, - { - Value: 2048, - ErrCount: 0, - }, - { - Value: 5120, - ErrCount: 0, - }, - } - - for _, tc := range cases { - _, errors := validateArmStorageBlobSize(tc.Value, "azurerm_storage_blob") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM Storage Blob size to trigger a validation error") - } - } -} - -func TestResourceAzureRMStorageBlobParallelism_validation(t *testing.T) { - cases := []struct { - Value int - ErrCount int - }{ - { - Value: 1, - ErrCount: 0, - }, - { - Value: 0, - ErrCount: 1, - }, - { - Value: -1, - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateArmStorageBlobParallelism(tc.Value, "azurerm_storage_blob") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM Storage Blob parallelism to trigger a validation error") - } - } -} - -func TestResourceAzureRMStorageBlobAttempts_validation(t *testing.T) { - cases := []struct { - Value int - ErrCount int - }{ - { - Value: 1, - ErrCount: 0, - }, - { - Value: 0, - ErrCount: 1, - }, - { - Value: -1, - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateArmStorageBlobAttempts(tc.Value, "azurerm_storage_blob") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Azure RM Storage Blob attempts to trigger a validation error") - } - } -} - -func TestAccAzureRMStorageBlob_basic(t *testing.T) { - ri := acctest.RandInt() - rs := strings.ToLower(acctest.RandString(11)) - config := fmt.Sprintf(testAccAzureRMStorageBlob_basic, ri, rs) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMStorageBlobDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageBlobExists("azurerm_storage_blob.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMStorageBlob_disappears(t *testing.T) { - ri := acctest.RandInt() - rs := strings.ToLower(acctest.RandString(11)) - config := fmt.Sprintf(testAccAzureRMStorageBlob_basic, ri, rs) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMStorageBlobDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageBlobExists("azurerm_storage_blob.test"), - testCheckAzureRMStorageBlobDisappears("azurerm_storage_blob.test"), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAzureRMStorageBlobBlock_source(t *testing.T) { - ri := acctest.RandInt() - rs1 := strings.ToLower(acctest.RandString(11)) - sourceBlob, err := ioutil.TempFile("", "") - if err != nil { - t.Fatalf("Failed to create local source blob file") - } - - _, err = io.CopyN(sourceBlob, rand.Reader, 25*1024*1024) - if err != nil { - t.Fatalf("Failed to write random test to source blob") - } - - err = sourceBlob.Close() - if err != nil { - t.Fatalf("Failed to close source blob") - } - - config := fmt.Sprintf(testAccAzureRMStorageBlobBlock_source, ri, rs1, sourceBlob.Name()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMStorageBlobDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageBlobMatchesFile("azurerm_storage_blob.source", storage.BlobTypeBlock, sourceBlob.Name()), - ), - }, - }, - }) -} - -func TestAccAzureRMStorageBlobPage_source(t *testing.T) { - ri := acctest.RandInt() - rs1 := strings.ToLower(acctest.RandString(11)) - sourceBlob, err := ioutil.TempFile("", "") - if err != nil { - t.Fatalf("Failed to create local source blob file") - } - - err = sourceBlob.Truncate(25*1024*1024 + 512) - if err != nil { - t.Fatalf("Failed to truncate file to 25M") - } - - for i := int64(0); i < 20; i = i + 2 { - randomBytes := make([]byte, 1*1024*1024) - _, err = rand.Read(randomBytes) - if err != nil { - t.Fatalf("Failed to read random bytes") - } - - _, err = sourceBlob.WriteAt(randomBytes, i*1024*1024) - if err != nil { - t.Fatalf("Failed to write random bytes to file") - } - } - - randomBytes := make([]byte, 5*1024*1024) - _, err = rand.Read(randomBytes) - if err != nil { - t.Fatalf("Failed to read random bytes") - } - - _, err = sourceBlob.WriteAt(randomBytes, 20*1024*1024) - if err != nil { - t.Fatalf("Failed to write random bytes to file") - } - - err = sourceBlob.Close() - if err != nil { - t.Fatalf("Failed to close source blob") - } - - config := fmt.Sprintf(testAccAzureRMStorageBlobPage_source, ri, rs1, sourceBlob.Name()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMStorageBlobDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageBlobMatchesFile("azurerm_storage_blob.source", storage.BlobTypePage, sourceBlob.Name()), - ), - }, - }, - }) -} - -func TestAccAzureRMStorageBlob_source_uri(t *testing.T) { - ri := acctest.RandInt() - rs1 := strings.ToLower(acctest.RandString(11)) - sourceBlob, err := ioutil.TempFile("", "") - if err != nil { - t.Fatalf("Failed to create local source blob file") - } - - _, err = io.CopyN(sourceBlob, rand.Reader, 25*1024*1024) - if err != nil { - t.Fatalf("Failed to write random test to source blob") - } - - err = sourceBlob.Close() - if err != nil { - t.Fatalf("Failed to close source blob") - } - - config := fmt.Sprintf(testAccAzureRMStorageBlob_source_uri, ri, rs1, sourceBlob.Name()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMStorageBlobDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageBlobMatchesFile("azurerm_storage_blob.destination", storage.BlobTypeBlock, sourceBlob.Name()), - ), - }, - }, - }) -} - -func testCheckAzureRMStorageBlobExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - storageAccountName := rs.Primary.Attributes["storage_account_name"] - storageContainerName := rs.Primary.Attributes["storage_container_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for storage blob: %s", name) - } - - armClient := testAccProvider.Meta().(*ArmClient) - blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroup, storageAccountName) - if err != nil { - return err - } - if !accountExists { - return fmt.Errorf("Bad: Storage Account %q does not exist", storageAccountName) - } - - container := blobClient.GetContainerReference(storageContainerName) - blob := container.GetBlobReference(name) - exists, err := blob.Exists() - if err != nil { - return err - } - - if !exists { - return fmt.Errorf("Bad: Storage Blob %q (storage container: %q) does not exist", name, storageContainerName) - } - - return nil - } -} - -func testCheckAzureRMStorageBlobDisappears(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - storageAccountName := rs.Primary.Attributes["storage_account_name"] - storageContainerName := rs.Primary.Attributes["storage_container_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for storage blob: %s", name) - } - - armClient := testAccProvider.Meta().(*ArmClient) - blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroup, storageAccountName) - if err != nil { - return err - } - if !accountExists { - return fmt.Errorf("Bad: Storage Account %q does not exist", storageAccountName) - } - - container := blobClient.GetContainerReference(storageContainerName) - blob := container.GetBlobReference(name) - options := &storage.DeleteBlobOptions{} - _, err = blob.DeleteIfExists(options) - if err != nil { - return err - } - - return nil - } -} - -func testCheckAzureRMStorageBlobMatchesFile(name string, kind storage.BlobType, filePath string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - storageAccountName := rs.Primary.Attributes["storage_account_name"] - storageContainerName := rs.Primary.Attributes["storage_container_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for storage blob: %s", name) - } - - armClient := testAccProvider.Meta().(*ArmClient) - blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroup, storageAccountName) - if err != nil { - return err - } - if !accountExists { - return fmt.Errorf("Bad: Storage Account %q does not exist", storageAccountName) - } - - containerReference := blobClient.GetContainerReference(storageContainerName) - blobReference := containerReference.GetBlobReference(name) - propertyOptions := &storage.GetBlobPropertiesOptions{} - err = blobReference.GetProperties(propertyOptions) - if err != nil { - return err - } - - properties := blobReference.Properties - - if properties.BlobType != kind { - return fmt.Errorf("Bad: blob type %q does not match expected type %q", properties.BlobType, kind) - } - - getOptions := &storage.GetBlobOptions{} - blob, err := blobReference.Get(getOptions) - if err != nil { - return err - } - - contents, err := ioutil.ReadAll(blob) - if err != nil { - return err - } - defer blob.Close() - - expectedContents, err := ioutil.ReadFile(filePath) - if err != nil { - return err - } - - if string(contents) != string(expectedContents) { - return fmt.Errorf("Bad: Storage Blob %q (storage container: %q) does not match contents", name, storageContainerName) - } - - return nil - } -} - -func testCheckAzureRMStorageBlobDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_storage_blob" { - continue - } - - name := rs.Primary.Attributes["name"] - storageAccountName := rs.Primary.Attributes["storage_account_name"] - storageContainerName := rs.Primary.Attributes["storage_container_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for storage blob: %s", name) - } - - armClient := testAccProvider.Meta().(*ArmClient) - blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroup, storageAccountName) - if err != nil { - return nil - } - if !accountExists { - return nil - } - - container := blobClient.GetContainerReference(storageContainerName) - blob := container.GetBlobReference(name) - exists, err := blob.Exists() - if err != nil { - return nil - } - - if exists { - return fmt.Errorf("Bad: Storage Blob %q (storage container: %q) still exists", name, storageContainerName) - } - } - - return nil -} - -var testAccAzureRMStorageBlob_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "westus" -} - -resource "azurerm_storage_account" "test" { - name = "acctestacc%s" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "westus" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } -} - -resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} - -resource "azurerm_storage_blob" "test" { - name = "herpderp1.vhd" - - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - storage_container_name = "${azurerm_storage_container.test.name}" - - type = "page" - size = 5120 -} -` - -var testAccAzureRMStorageBlobBlock_source = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "westus" -} - -resource "azurerm_storage_account" "source" { - name = "acctestacc%s" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "westus" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } -} - -resource "azurerm_storage_container" "source" { - name = "source" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.source.name}" - container_access_type = "blob" -} - -resource "azurerm_storage_blob" "source" { - name = "source.vhd" - - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.source.name}" - storage_container_name = "${azurerm_storage_container.source.name}" - - type = "block" - source = "%s" - parallelism = 4 - attempts = 2 -} -` - -var testAccAzureRMStorageBlobPage_source = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "westus" -} - -resource "azurerm_storage_account" "source" { - name = "acctestacc%s" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "westus" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } -} - -resource "azurerm_storage_container" "source" { - name = "source" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.source.name}" - container_access_type = "blob" -} - -resource "azurerm_storage_blob" "source" { - name = "source.vhd" - - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.source.name}" - storage_container_name = "${azurerm_storage_container.source.name}" - - type = "page" - source = "%s" - parallelism = 3 - attempts = 3 -} -` - -var testAccAzureRMStorageBlob_source_uri = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "westus" -} - -resource "azurerm_storage_account" "source" { - name = "acctestacc%s" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "westus" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } -} - -resource "azurerm_storage_container" "source" { - name = "source" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.source.name}" - container_access_type = "blob" -} - -resource "azurerm_storage_blob" "source" { - name = "source.vhd" - - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.source.name}" - storage_container_name = "${azurerm_storage_container.source.name}" - - type = "block" - source = "%s" - parallelism = 4 - attempts = 2 -} - -resource "azurerm_storage_blob" "destination" { - name = "destination.vhd" - - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.source.name}" - storage_container_name = "${azurerm_storage_container.source.name}" - - source_uri = "${azurerm_storage_blob.source.url}" -} -` diff --git a/builtin/providers/azurerm/resource_arm_storage_container.go b/builtin/providers/azurerm/resource_arm_storage_container.go deleted file mode 100644 index b6f50ef8c..000000000 --- a/builtin/providers/azurerm/resource_arm_storage_container.go +++ /dev/null @@ -1,242 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "strings" - - "regexp" - - "github.com/Azure/azure-sdk-for-go/storage" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceArmStorageContainer() *schema.Resource { - return &schema.Resource{ - Create: resourceArmStorageContainerCreate, - Read: resourceArmStorageContainerRead, - Exists: resourceArmStorageContainerExists, - Delete: resourceArmStorageContainerDelete, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateArmStorageContainerName, - }, - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "storage_account_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "container_access_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "private", - ValidateFunc: validateArmStorageContainerAccessType, - }, - "properties": { - Type: schema.TypeMap, - Computed: true, - }, - }, - } -} - -//Following the naming convention as laid out in the docs -func validateArmStorageContainerName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^\$root$|^[0-9a-z-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only lowercase alphanumeric characters and hyphens allowed in %q: %q", - k, value)) - } - if len(value) < 3 || len(value) > 63 { - errors = append(errors, fmt.Errorf( - "%q must be between 3 and 63 characters: %q", k, value)) - } - if regexp.MustCompile(`^-`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot begin with a hyphen: %q", k, value)) - } - return -} - -func validateArmStorageContainerAccessType(v interface{}, k string) (ws []string, errors []error) { - value := strings.ToLower(v.(string)) - validTypes := map[string]struct{}{ - "private": struct{}{}, - "blob": struct{}{}, - "container": struct{}{}, - } - - if _, ok := validTypes[value]; !ok { - errors = append(errors, fmt.Errorf("Storage container access type %q is invalid, must be %q, %q or %q", value, "private", "blob", "page")) - } - return -} - -func resourceArmStorageContainerCreate(d *schema.ResourceData, meta interface{}) error { - armClient := meta.(*ArmClient) - - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) - - blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - return err - } - if !accountExists { - return fmt.Errorf("Storage Account %q Not Found", storageAccountName) - } - - name := d.Get("name").(string) - - var accessType storage.ContainerAccessType - if d.Get("container_access_type").(string) == "private" { - accessType = storage.ContainerAccessType("") - } else { - accessType = storage.ContainerAccessType(d.Get("container_access_type").(string)) - } - - log.Printf("[INFO] Creating container %q in storage account %q.", name, storageAccountName) - reference := blobClient.GetContainerReference(name) - - createOptions := &storage.CreateContainerOptions{} - _, err = reference.CreateIfNotExists(createOptions) - if err != nil { - return fmt.Errorf("Error creating container %q in storage account %q: %s", name, storageAccountName, err) - } - - permissions := storage.ContainerPermissions{ - AccessType: accessType, - } - permissionOptions := &storage.SetContainerPermissionOptions{} - err = reference.SetPermissions(permissions, permissionOptions) - if err != nil { - return fmt.Errorf("Error setting permissions for container %s in storage account %s: %+v", name, storageAccountName, err) - } - - d.SetId(name) - return resourceArmStorageContainerRead(d, meta) -} - -// resourceAzureStorageContainerRead does all the necessary API calls to -// read the status of the storage container off Azure. -func resourceArmStorageContainerRead(d *schema.ResourceData, meta interface{}) error { - armClient := meta.(*ArmClient) - - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) - - blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - return err - } - if !accountExists { - log.Printf("[DEBUG] Storage account %q not found, removing container %q from state", storageAccountName, d.Id()) - d.SetId("") - return nil - } - - name := d.Get("name").(string) - containers, err := blobClient.ListContainers(storage.ListContainersParameters{ - Prefix: name, - Timeout: 90, - }) - if err != nil { - return fmt.Errorf("Failed to retrieve storage containers in account %q: %s", name, err) - } - - var found bool - for _, cont := range containers.Containers { - if cont.Name == name { - found = true - - props := make(map[string]interface{}) - props["last_modified"] = cont.Properties.LastModified - props["lease_status"] = cont.Properties.LeaseStatus - props["lease_state"] = cont.Properties.LeaseState - props["lease_duration"] = cont.Properties.LeaseDuration - - d.Set("properties", props) - } - } - - if !found { - log.Printf("[INFO] Storage container %q does not exist in account %q, removing from state...", name, storageAccountName) - d.SetId("") - } - - return nil -} - -func resourceArmStorageContainerExists(d *schema.ResourceData, meta interface{}) (bool, error) { - armClient := meta.(*ArmClient) - - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) - - blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - return false, err - } - if !accountExists { - log.Printf("[DEBUG] Storage account %q not found, removing container %q from state", storageAccountName, d.Id()) - d.SetId("") - return false, nil - } - - name := d.Get("name").(string) - - log.Printf("[INFO] Checking existence of storage container %q in storage account %q", name, storageAccountName) - reference := blobClient.GetContainerReference(name) - exists, err := reference.Exists() - if err != nil { - return false, fmt.Errorf("Error querying existence of storage container %q in storage account %q: %s", name, storageAccountName, err) - } - - if !exists { - log.Printf("[INFO] Storage container %q does not exist in account %q, removing from state...", name, storageAccountName) - d.SetId("") - } - - return exists, nil -} - -// resourceAzureStorageContainerDelete does all the necessary API calls to -// delete a storage container off Azure. -func resourceArmStorageContainerDelete(d *schema.ResourceData, meta interface{}) error { - armClient := meta.(*ArmClient) - - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) - - blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - return err - } - if !accountExists { - log.Printf("[INFO]Storage Account %q doesn't exist so the container won't exist", storageAccountName) - return nil - } - - name := d.Get("name").(string) - - log.Printf("[INFO] Deleting storage container %q in account %q", name, storageAccountName) - reference := blobClient.GetContainerReference(name) - deleteOptions := &storage.DeleteContainerOptions{} - if _, err := reference.DeleteIfExists(deleteOptions); err != nil { - return fmt.Errorf("Error deleting storage container %q from storage account %q: %s", name, storageAccountName, err) - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/azurerm/resource_arm_storage_container_test.go b/builtin/providers/azurerm/resource_arm_storage_container_test.go deleted file mode 100644 index 77fb17a55..000000000 --- a/builtin/providers/azurerm/resource_arm_storage_container_test.go +++ /dev/null @@ -1,293 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "strings" - "testing" - - "github.com/Azure/azure-sdk-for-go/storage" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMStorageContainer_basic(t *testing.T) { - var c storage.Container - - ri := acctest.RandInt() - rs := strings.ToLower(acctest.RandString(11)) - config := fmt.Sprintf(testAccAzureRMStorageContainer_basic, ri, rs) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMStorageContainerDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageContainerExists("azurerm_storage_container.test", &c), - ), - }, - }, - }) -} - -func TestAccAzureRMStorageContainer_disappears(t *testing.T) { - var c storage.Container - - ri := acctest.RandInt() - rs := strings.ToLower(acctest.RandString(11)) - config := fmt.Sprintf(testAccAzureRMStorageContainer_basic, ri, rs) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMStorageContainerDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageContainerExists("azurerm_storage_container.test", &c), - testAccARMStorageContainerDisappears("azurerm_storage_container.test", &c), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAzureRMStorageContainer_root(t *testing.T) { - var c storage.Container - - ri := acctest.RandInt() - rs := strings.ToLower(acctest.RandString(11)) - config := fmt.Sprintf(testAccAzureRMStorageContainer_root, ri, rs) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMStorageContainerDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageContainerExists("azurerm_storage_container.test", &c), - resource.TestCheckResourceAttr("azurerm_storage_container.test", "name", "$root"), - ), - }, - }, - }) -} - -func testCheckAzureRMStorageContainerExists(name string, c *storage.Container) resource.TestCheckFunc { - return func(s *terraform.State) error { - - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - storageAccountName := rs.Primary.Attributes["storage_account_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for storage container: %s", name) - } - - armClient := testAccProvider.Meta().(*ArmClient) - blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroup, storageAccountName) - if err != nil { - return err - } - if !accountExists { - return fmt.Errorf("Bad: Storage Account %q does not exist", storageAccountName) - } - - containers, err := blobClient.ListContainers(storage.ListContainersParameters{ - Prefix: name, - Timeout: 90, - }) - - if len(containers.Containers) == 0 { - return fmt.Errorf("Bad: Storage Container %q (storage account: %q) does not exist", name, storageAccountName) - } - - var found bool - for _, container := range containers.Containers { - if container.Name == name { - found = true - *c = container - } - } - - if !found { - return fmt.Errorf("Bad: Storage Container %q (storage account: %q) does not exist", name, storageAccountName) - } - - return nil - } -} - -func testAccARMStorageContainerDisappears(name string, c *storage.Container) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - armClient := testAccProvider.Meta().(*ArmClient) - - storageAccountName := rs.Primary.Attributes["storage_account_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for storage container: %s", c.Name) - } - - blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroup, storageAccountName) - if err != nil { - return err - } - if !accountExists { - log.Printf("[INFO]Storage Account %q doesn't exist so the container won't exist", storageAccountName) - return nil - } - - reference := blobClient.GetContainerReference(c.Name) - options := &storage.DeleteContainerOptions{} - _, err = reference.DeleteIfExists(options) - if err != nil { - return err - } - - return nil - } -} - -func testCheckAzureRMStorageContainerDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_storage_container" { - continue - } - - name := rs.Primary.Attributes["name"] - storageAccountName := rs.Primary.Attributes["storage_account_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for storage container: %s", name) - } - - armClient := testAccProvider.Meta().(*ArmClient) - blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroup, storageAccountName) - if err != nil { - //If we can't get keys then the blob can't exist - return nil - } - if !accountExists { - return nil - } - - containers, err := blobClient.ListContainers(storage.ListContainersParameters{ - Prefix: name, - Timeout: 90, - }) - - if err != nil { - return nil - } - - var found bool - for _, container := range containers.Containers { - if container.Name == name { - found = true - } - } - - if found { - return fmt.Errorf("Bad: Storage Container %q (storage account: %q) still exist", name, storageAccountName) - } - } - - return nil -} - -func TestValidateArmStorageContainerName(t *testing.T) { - validNames := []string{ - "valid-name", - "valid02-name", - "$root", - } - for _, v := range validNames { - _, errors := validateArmStorageContainerName(v, "name") - if len(errors) != 0 { - t.Fatalf("%q should be a valid Storage Container Name: %q", v, errors) - } - } - - invalidNames := []string{ - "InvalidName1", - "-invalidname1", - "invalid_name", - "invalid!", - "ww", - "$notroot", - strings.Repeat("w", 65), - } - for _, v := range invalidNames { - _, errors := validateArmStorageContainerName(v, "name") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid Storage Container Name", v) - } - } -} - -var testAccAzureRMStorageContainer_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "westus" -} - -resource "azurerm_storage_account" "test" { - name = "acctestacc%s" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "westus" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } -} - -resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} -` - -var testAccAzureRMStorageContainer_root = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "westus" -} - -resource "azurerm_storage_account" "test" { - name = "acctestacc%s" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "westus" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } -} - -resource "azurerm_storage_container" "test" { - name = "$root" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} -` diff --git a/builtin/providers/azurerm/resource_arm_storage_queue.go b/builtin/providers/azurerm/resource_arm_storage_queue.go deleted file mode 100644 index 7ef603a91..000000000 --- a/builtin/providers/azurerm/resource_arm_storage_queue.go +++ /dev/null @@ -1,171 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "regexp" - - "github.com/Azure/azure-sdk-for-go/storage" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceArmStorageQueue() *schema.Resource { - return &schema.Resource{ - Create: resourceArmStorageQueueCreate, - Read: resourceArmStorageQueueRead, - Exists: resourceArmStorageQueueExists, - Delete: resourceArmStorageQueueDelete, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateArmStorageQueueName, - }, - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "storage_account_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func validateArmStorageQueueName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if !regexp.MustCompile(`^[a-z0-9-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only lowercase alphanumeric characters and hyphens allowed in %q", k)) - } - - if regexp.MustCompile(`^-`).MatchString(value) { - errors = append(errors, fmt.Errorf("%q cannot start with a hyphen", k)) - } - - if regexp.MustCompile(`-$`).MatchString(value) { - errors = append(errors, fmt.Errorf("%q cannot end with a hyphen", k)) - } - - if len(value) > 63 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 63 characters", k)) - } - - if len(value) < 3 { - errors = append(errors, fmt.Errorf( - "%q must be at least 3 characters", k)) - } - - return -} - -func resourceArmStorageQueueCreate(d *schema.ResourceData, meta interface{}) error { - armClient := meta.(*ArmClient) - - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) - - queueClient, accountExists, err := armClient.getQueueServiceClientForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - return err - } - if !accountExists { - return fmt.Errorf("Storage Account %q Not Found", storageAccountName) - } - - name := d.Get("name").(string) - - log.Printf("[INFO] Creating queue %q in storage account %q", name, storageAccountName) - queueReference := queueClient.GetQueueReference(name) - options := &storage.QueueServiceOptions{} - err = queueReference.Create(options) - if err != nil { - return fmt.Errorf("Error creating storage queue on Azure: %s", err) - } - - d.SetId(name) - return resourceArmStorageQueueRead(d, meta) -} - -func resourceArmStorageQueueRead(d *schema.ResourceData, meta interface{}) error { - - exists, err := resourceArmStorageQueueExists(d, meta) - if err != nil { - return err - } - - if !exists { - // Exists already removed this from state - return nil - } - - return nil -} - -func resourceArmStorageQueueExists(d *schema.ResourceData, meta interface{}) (bool, error) { - armClient := meta.(*ArmClient) - - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) - - queueClient, accountExists, err := armClient.getQueueServiceClientForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - return false, err - } - if !accountExists { - log.Printf("[DEBUG] Storage account %q not found, removing queue %q from state", storageAccountName, d.Id()) - d.SetId("") - return false, nil - } - - name := d.Get("name").(string) - - log.Printf("[INFO] Checking for existence of storage queue %q.", name) - queueReference := queueClient.GetQueueReference(name) - exists, err := queueReference.Exists() - if err != nil { - return false, fmt.Errorf("error testing existence of storage queue %q: %s", name, err) - } - - if !exists { - log.Printf("[INFO] Storage queue %q no longer exists, removing from state...", name) - d.SetId("") - } - - return exists, nil -} - -func resourceArmStorageQueueDelete(d *schema.ResourceData, meta interface{}) error { - armClient := meta.(*ArmClient) - - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) - - queueClient, accountExists, err := armClient.getQueueServiceClientForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - return err - } - if !accountExists { - log.Printf("[INFO]Storage Account %q doesn't exist so the blob won't exist", storageAccountName) - return nil - } - - name := d.Get("name").(string) - - log.Printf("[INFO] Deleting storage queue %q", name) - queueReference := queueClient.GetQueueReference(name) - options := &storage.QueueServiceOptions{} - if err = queueReference.Delete(options); err != nil { - return fmt.Errorf("Error deleting storage queue %q: %s", name, err) - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/azurerm/resource_arm_storage_queue_test.go b/builtin/providers/azurerm/resource_arm_storage_queue_test.go deleted file mode 100644 index ebc802727..000000000 --- a/builtin/providers/azurerm/resource_arm_storage_queue_test.go +++ /dev/null @@ -1,170 +0,0 @@ -package azurerm - -import ( - "fmt" - "testing" - - "strings" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestResourceAzureRMStorageQueueName_Validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "testing_123", - ErrCount: 1, - }, - { - Value: "testing123-", - ErrCount: 1, - }, - { - Value: "-testing123", - ErrCount: 1, - }, - { - Value: "TestingSG", - ErrCount: 1, - }, - { - Value: acctest.RandString(256), - ErrCount: 1, - }, - { - Value: acctest.RandString(1), - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateArmStorageQueueName(tc.Value, "azurerm_storage_queue") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the ARM Storage Queue Name to trigger a validation error") - } - } -} - -func TestAccAzureRMStorageQueue_basic(t *testing.T) { - ri := acctest.RandInt() - rs := strings.ToLower(acctest.RandString(11)) - config := fmt.Sprintf(testAccAzureRMStorageQueue_basic, ri, rs, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMStorageQueueDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageQueueExists("azurerm_storage_queue.test"), - ), - }, - }, - }) -} - -func testCheckAzureRMStorageQueueExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - storageAccountName := rs.Primary.Attributes["storage_account_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for storage queue: %s", name) - } - - armClient := testAccProvider.Meta().(*ArmClient) - queueClient, accountExists, err := armClient.getQueueServiceClientForStorageAccount(resourceGroup, storageAccountName) - if err != nil { - return err - } - if !accountExists { - return fmt.Errorf("Bad: Storage Account %q does not exist", storageAccountName) - } - - queueReference := queueClient.GetQueueReference(name) - exists, err := queueReference.Exists() - if err != nil { - return err - } - - if !exists { - return fmt.Errorf("Bad: Storage Queue %q (storage account: %q) does not exist", name, storageAccountName) - } - - return nil - } -} - -func testCheckAzureRMStorageQueueDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_storage_queue" { - continue - } - - name := rs.Primary.Attributes["name"] - storageAccountName := rs.Primary.Attributes["storage_account_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for storage queue: %s", name) - } - - armClient := testAccProvider.Meta().(*ArmClient) - queueClient, accountExists, err := armClient.getQueueServiceClientForStorageAccount(resourceGroup, storageAccountName) - if err != nil { - return nil - } - if !accountExists { - return nil - } - - queueReference := queueClient.GetQueueReference(name) - exists, err := queueReference.Exists() - if err != nil { - return nil - } - - if exists { - return fmt.Errorf("Bad: Storage Queue %q (storage account: %q) still exists", name, storageAccountName) - } - } - - return nil -} - -var testAccAzureRMStorageQueue_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "westus" -} - -resource "azurerm_storage_account" "test" { - name = "acctestacc%s" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "westus" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } -} - -resource "azurerm_storage_queue" "test" { - name = "mysamplequeue-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" -} -` diff --git a/builtin/providers/azurerm/resource_arm_storage_share.go b/builtin/providers/azurerm/resource_arm_storage_share.go deleted file mode 100644 index f7543ef03..000000000 --- a/builtin/providers/azurerm/resource_arm_storage_share.go +++ /dev/null @@ -1,205 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "regexp" - - "github.com/Azure/azure-sdk-for-go/storage" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceArmStorageShare() *schema.Resource { - return &schema.Resource{ - Create: resourceArmStorageShareCreate, - Read: resourceArmStorageShareRead, - Exists: resourceArmStorageShareExists, - Delete: resourceArmStorageShareDelete, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateArmStorageShareName, - }, - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "storage_account_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "quota": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Default: 0, - }, - "url": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} -func resourceArmStorageShareCreate(d *schema.ResourceData, meta interface{}) error { - armClient := meta.(*ArmClient) - - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) - - fileClient, accountExists, err := armClient.getFileServiceClientForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - return err - } - if !accountExists { - return fmt.Errorf("Storage Account %q Not Found", storageAccountName) - } - - name := d.Get("name").(string) - metaData := make(map[string]string) // TODO: support MetaData - options := &storage.FileRequestOptions{} - - log.Printf("[INFO] Creating share %q in storage account %q", name, storageAccountName) - reference := fileClient.GetShareReference(name) - err = reference.Create(options) - - log.Printf("[INFO] Setting share %q metadata in storage account %q", name, storageAccountName) - reference.Metadata = metaData - reference.SetMetadata(options) - - log.Printf("[INFO] Setting share %q properties in storage account %q", name, storageAccountName) - reference.Properties = storage.ShareProperties{ - Quota: d.Get("quota").(int), - } - reference.SetProperties(options) - - d.SetId(name) - return resourceArmStorageShareRead(d, meta) -} - -func resourceArmStorageShareRead(d *schema.ResourceData, meta interface{}) error { - armClient := meta.(*ArmClient) - - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) - - fileClient, accountExists, err := armClient.getFileServiceClientForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - return err - } - if !accountExists { - log.Printf("[DEBUG] Storage account %q not found, removing file %q from state", storageAccountName, d.Id()) - d.SetId("") - return nil - } - - exists, err := resourceArmStorageShareExists(d, meta) - if err != nil { - return err - } - - if !exists { - // Exists already removed this from state - return nil - } - - name := d.Get("name").(string) - - reference := fileClient.GetShareReference(name) - url := reference.URL() - if url == "" { - log.Printf("[INFO] URL for %q is empty", name) - } - d.Set("url", url) - - return nil -} - -func resourceArmStorageShareExists(d *schema.ResourceData, meta interface{}) (bool, error) { - armClient := meta.(*ArmClient) - - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) - - fileClient, accountExists, err := armClient.getFileServiceClientForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - return false, err - } - if !accountExists { - log.Printf("[DEBUG] Storage account %q not found, removing share %q from state", storageAccountName, d.Id()) - d.SetId("") - return false, nil - } - - name := d.Get("name").(string) - - log.Printf("[INFO] Checking for existence of share %q.", name) - reference := fileClient.GetShareReference(name) - exists, err := reference.Exists() - if err != nil { - return false, fmt.Errorf("Error testing existence of share %q: %s", name, err) - } - - if !exists { - log.Printf("[INFO] Share %q no longer exists, removing from state...", name) - d.SetId("") - } - - return exists, nil -} - -func resourceArmStorageShareDelete(d *schema.ResourceData, meta interface{}) error { - armClient := meta.(*ArmClient) - - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) - - fileClient, accountExists, err := armClient.getFileServiceClientForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - return err - } - if !accountExists { - log.Printf("[INFO]Storage Account %q doesn't exist so the file won't exist", storageAccountName) - return nil - } - - name := d.Get("name").(string) - - reference := fileClient.GetShareReference(name) - options := &storage.FileRequestOptions{} - - if _, err = reference.DeleteIfExists(options); err != nil { - return fmt.Errorf("Error deleting storage file %q: %s", name, err) - } - - d.SetId("") - return nil -} - -//Following the naming convention as laid out in the docs https://msdn.microsoft.com/library/azure/dn167011.aspx -func validateArmStorageShareName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only lowercase alphanumeric characters and hyphens allowed in %q: %q", - k, value)) - } - if len(value) < 3 || len(value) > 63 { - errors = append(errors, fmt.Errorf( - "%q must be between 3 and 63 characters: %q", k, value)) - } - if regexp.MustCompile(`^-`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot begin with a hyphen: %q", k, value)) - } - if regexp.MustCompile(`[-]{2,}`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q does not allow consecutive hyphens: %q", k, value)) - } - return -} diff --git a/builtin/providers/azurerm/resource_arm_storage_share_test.go b/builtin/providers/azurerm/resource_arm_storage_share_test.go deleted file mode 100644 index fd1592c01..000000000 --- a/builtin/providers/azurerm/resource_arm_storage_share_test.go +++ /dev/null @@ -1,244 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "strings" - "testing" - - "github.com/Azure/azure-sdk-for-go/storage" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMStorageShare_basic(t *testing.T) { - var sS storage.Share - - ri := acctest.RandInt() - rs := strings.ToLower(acctest.RandString(11)) - config := fmt.Sprintf(testAccAzureRMStorageShare_basic, ri, rs) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMStorageShareDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageShareExists("azurerm_storage_share.test", &sS), - ), - }, - }, - }) -} - -func TestAccAzureRMStorageShare_disappears(t *testing.T) { - var sS storage.Share - - ri := acctest.RandInt() - rs := strings.ToLower(acctest.RandString(11)) - config := fmt.Sprintf(testAccAzureRMStorageShare_basic, ri, rs) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMStorageShareDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageShareExists("azurerm_storage_share.test", &sS), - testAccARMStorageShareDisappears("azurerm_storage_share.test", &sS), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testCheckAzureRMStorageShareExists(name string, sS *storage.Share) resource.TestCheckFunc { - return func(s *terraform.State) error { - - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - storageAccountName := rs.Primary.Attributes["storage_account_name"] - resourceGroupName, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for share: %s", name) - } - - armClient := testAccProvider.Meta().(*ArmClient) - fileClient, accountExists, err := armClient.getFileServiceClientForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - return err - } - if !accountExists { - return fmt.Errorf("Bad: Storage Account %q does not exist", storageAccountName) - } - - shares, err := fileClient.ListShares(storage.ListSharesParameters{ - Prefix: name, - Timeout: 90, - }) - - if len(shares.Shares) == 0 { - return fmt.Errorf("Bad: Share %q (storage account: %q) does not exist", name, storageAccountName) - } - - var found bool - for _, share := range shares.Shares { - if share.Name == name { - found = true - *sS = share - } - } - - if !found { - return fmt.Errorf("Bad: Share %q (storage account: %q) does not exist", name, storageAccountName) - } - - return nil - } -} - -func testAccARMStorageShareDisappears(name string, sS *storage.Share) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - armClient := testAccProvider.Meta().(*ArmClient) - - storageAccountName := rs.Primary.Attributes["storage_account_name"] - resourceGroupName, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for storage share: %s", sS.Name) - } - - fileClient, accountExists, err := armClient.getFileServiceClientForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - return err - } - if !accountExists { - log.Printf("[INFO]Storage Account %q doesn't exist so the share won't exist", storageAccountName) - return nil - } - - reference := fileClient.GetShareReference(sS.Name) - options := &storage.FileRequestOptions{} - err = reference.Create(options) - - if _, err = reference.DeleteIfExists(options); err != nil { - return fmt.Errorf("Error deleting storage file %q: %s", name, err) - } - - return nil - } -} - -func testCheckAzureRMStorageShareDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_storage_share" { - continue - } - - name := rs.Primary.Attributes["name"] - storageAccountName := rs.Primary.Attributes["storage_account_name"] - resourceGroupName, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for share: %s", name) - } - - armClient := testAccProvider.Meta().(*ArmClient) - fileClient, accountExists, err := armClient.getFileServiceClientForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - //If we can't get keys then the blob can't exist - return nil - } - if !accountExists { - return nil - } - - shares, err := fileClient.ListShares(storage.ListSharesParameters{ - Prefix: name, - Timeout: 90, - }) - - if err != nil { - return nil - } - - var found bool - for _, share := range shares.Shares { - if share.Name == name { - found = true - } - } - - if found { - return fmt.Errorf("Bad: Share %q (storage account: %q) still exists", name, storageAccountName) - } - } - - return nil -} - -func TestValidateArmStorageShareName(t *testing.T) { - validNames := []string{ - "valid-name", - "valid02-name", - } - for _, v := range validNames { - _, errors := validateArmStorageShareName(v, "name") - if len(errors) != 0 { - t.Fatalf("%q should be a valid Share Name: %q", v, errors) - } - } - - invalidNames := []string{ - "InvalidName1", - "-invalidname1", - "invalid_name", - "invalid!", - "double-hyphen--invalid", - "ww", - strings.Repeat("w", 65), - } - for _, v := range invalidNames { - _, errors := validateArmStorageShareName(v, "name") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid Share Name", v) - } - } -} - -var testAccAzureRMStorageShare_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "westus" -} - -resource "azurerm_storage_account" "test" { - name = "acctestacc%s" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "westus" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } -} - -resource "azurerm_storage_share" "test" { - name = "testshare" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" -} -` diff --git a/builtin/providers/azurerm/resource_arm_storage_table.go b/builtin/providers/azurerm/resource_arm_storage_table.go deleted file mode 100644 index 3db39165a..000000000 --- a/builtin/providers/azurerm/resource_arm_storage_table.go +++ /dev/null @@ -1,154 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "regexp" - - "github.com/Azure/azure-sdk-for-go/storage" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceArmStorageTable() *schema.Resource { - return &schema.Resource{ - Create: resourceArmStorageTableCreate, - Read: resourceArmStorageTableRead, - Delete: resourceArmStorageTableDelete, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateArmStorageTableName, - }, - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "storage_account_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func validateArmStorageTableName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value == "table" { - errors = append(errors, fmt.Errorf( - "Table Storage %q cannot use the word `table`: %q", - k, value)) - } - if !regexp.MustCompile(`^[A-Za-z][A-Za-z0-9]{6,63}$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "Table Storage %q cannot begin with a numeric character, only alphanumeric characters are allowed and must be between 6 and 63 characters long: %q", - k, value)) - } - - return -} - -func resourceArmStorageTableCreate(d *schema.ResourceData, meta interface{}) error { - armClient := meta.(*ArmClient) - - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) - - tableClient, accountExists, err := armClient.getTableServiceClientForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - return err - } - if !accountExists { - return fmt.Errorf("Storage Account %q Not Found", storageAccountName) - } - - name := d.Get("name").(string) - table := tableClient.GetTableReference(name) - - log.Printf("[INFO] Creating table %q in storage account %q.", name, storageAccountName) - - timeout := uint(60) - options := &storage.TableOptions{} - err = table.Create(timeout, storage.NoMetadata, options) - if err != nil { - return fmt.Errorf("Error creating table %q in storage account %q: %s", name, storageAccountName, err) - } - - d.SetId(name) - - return resourceArmStorageTableRead(d, meta) -} - -func resourceArmStorageTableRead(d *schema.ResourceData, meta interface{}) error { - armClient := meta.(*ArmClient) - - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) - - tableClient, accountExists, err := armClient.getTableServiceClientForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - return err - } - if !accountExists { - log.Printf("[DEBUG] Storage account %q not found, removing table %q from state", storageAccountName, d.Id()) - d.SetId("") - return nil - } - - name := d.Get("name").(string) - metaDataLevel := storage.MinimalMetadata - options := &storage.QueryTablesOptions{} - tables, err := tableClient.QueryTables(metaDataLevel, options) - if err != nil { - return fmt.Errorf("Failed to retrieve storage tables in account %q: %s", name, err) - } - - var found bool - for _, table := range tables.Tables { - tableName := string(table.Name) - if tableName == name { - found = true - d.Set("name", tableName) - } - } - - if !found { - log.Printf("[INFO] Storage table %q does not exist in account %q, removing from state...", name, storageAccountName) - d.SetId("") - } - - return nil -} - -func resourceArmStorageTableDelete(d *schema.ResourceData, meta interface{}) error { - armClient := meta.(*ArmClient) - - resourceGroupName := d.Get("resource_group_name").(string) - storageAccountName := d.Get("storage_account_name").(string) - - tableClient, accountExists, err := armClient.getTableServiceClientForStorageAccount(resourceGroupName, storageAccountName) - if err != nil { - return err - } - if !accountExists { - log.Printf("[INFO] Storage Account %q doesn't exist so the table won't exist", storageAccountName) - return nil - } - - name := d.Get("name").(string) - table := tableClient.GetTableReference(name) - timeout := uint(60) - options := &storage.TableOptions{} - - log.Printf("[INFO] Deleting storage table %q in account %q", name, storageAccountName) - if err := table.Delete(timeout, options); err != nil { - return fmt.Errorf("Error deleting storage table %q from storage account %q: %s", name, storageAccountName, err) - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/azurerm/resource_arm_storage_table_test.go b/builtin/providers/azurerm/resource_arm_storage_table_test.go deleted file mode 100644 index 33cc095aa..000000000 --- a/builtin/providers/azurerm/resource_arm_storage_table_test.go +++ /dev/null @@ -1,241 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "strings" - "testing" - - "github.com/Azure/azure-sdk-for-go/storage" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMStorageTable_basic(t *testing.T) { - var table storage.Table - - ri := acctest.RandInt() - rs := strings.ToLower(acctest.RandString(11)) - config := fmt.Sprintf(testAccAzureRMStorageTable_basic, ri, rs, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMStorageTableDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageTableExists("azurerm_storage_table.test", &table), - ), - }, - }, - }) -} - -func TestAccAzureRMStorageTable_disappears(t *testing.T) { - var table storage.Table - - ri := acctest.RandInt() - rs := strings.ToLower(acctest.RandString(11)) - config := fmt.Sprintf(testAccAzureRMStorageTable_basic, ri, rs, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMStorageTableDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMStorageTableExists("azurerm_storage_table.test", &table), - testAccARMStorageTableDisappears("azurerm_storage_table.test", &table), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testCheckAzureRMStorageTableExists(name string, t *storage.Table) resource.TestCheckFunc { - return func(s *terraform.State) error { - - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - storageAccountName := rs.Primary.Attributes["storage_account_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for storage table: %s", name) - } - - armClient := testAccProvider.Meta().(*ArmClient) - tableClient, accountExists, err := armClient.getTableServiceClientForStorageAccount(resourceGroup, storageAccountName) - if err != nil { - return err - } - if !accountExists { - return fmt.Errorf("Bad: Storage Account %q does not exist", storageAccountName) - } - - options := &storage.QueryTablesOptions{} - tables, err := tableClient.QueryTables(storage.MinimalMetadata, options) - - if len(tables.Tables) == 0 { - return fmt.Errorf("Bad: Storage Table %q (storage account: %q) does not exist", name, storageAccountName) - } - - var found bool - for _, table := range tables.Tables { - if table.Name == name { - found = true - *t = table - } - } - - if !found { - return fmt.Errorf("Bad: Storage Table %q (storage account: %q) does not exist", name, storageAccountName) - } - - return nil - } -} - -func testAccARMStorageTableDisappears(name string, t *storage.Table) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - armClient := testAccProvider.Meta().(*ArmClient) - - storageAccountName := rs.Primary.Attributes["storage_account_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for storage table: %s", t.Name) - } - - tableClient, accountExists, err := armClient.getTableServiceClientForStorageAccount(resourceGroup, storageAccountName) - if err != nil { - return err - } - if !accountExists { - log.Printf("[INFO]Storage Account %q doesn't exist so the table won't exist", storageAccountName) - return nil - } - - table := tableClient.GetTableReference(t.Name) - timeout := uint(60) - options := &storage.TableOptions{} - err = table.Delete(timeout, options) - if err != nil { - return err - } - - return nil - } -} - -func testCheckAzureRMStorageTableDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_storage_table" { - continue - } - - name := rs.Primary.Attributes["name"] - storageAccountName := rs.Primary.Attributes["storage_account_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for storage table: %s", name) - } - - armClient := testAccProvider.Meta().(*ArmClient) - tableClient, accountExists, err := armClient.getTableServiceClientForStorageAccount(resourceGroup, storageAccountName) - if err != nil { - //If we can't get keys then the table can't exist - return nil - } - if !accountExists { - return nil - } - - options := &storage.QueryTablesOptions{} - tables, err := tableClient.QueryTables(storage.NoMetadata, options) - - if err != nil { - return nil - } - - var found bool - for _, table := range tables.Tables { - if table.Name == name { - found = true - } - } - - if found { - return fmt.Errorf("Bad: Storage Table %q (storage account: %q) still exist", name, storageAccountName) - } - } - - return nil -} - -func TestValidateArmStorageTableName(t *testing.T) { - validNames := []string{ - "mytable01", - "mytable", - "myTable", - "MYTABLE", - } - for _, v := range validNames { - _, errors := validateArmStorageTableName(v, "name") - if len(errors) != 0 { - t.Fatalf("%q should be a valid Storage Table Name: %q", v, errors) - } - } - - invalidNames := []string{ - "table", - "-invalidname1", - "invalid_name", - "invalid!", - "ww", - strings.Repeat("w", 65), - } - for _, v := range invalidNames { - _, errors := validateArmStorageTableName(v, "name") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid Storage Table Name", v) - } - } -} - -var testAccAzureRMStorageTable_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "westus" -} - -resource "azurerm_storage_account" "test" { - name = "acctestacc%s" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "westus" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } -} - -resource "azurerm_storage_table" "test" { - name = "tfacceptancetest%d" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" -} -` diff --git a/builtin/providers/azurerm/resource_arm_subnet.go b/builtin/providers/azurerm/resource_arm_subnet.go deleted file mode 100644 index 044c6d4f1..000000000 --- a/builtin/providers/azurerm/resource_arm_subnet.go +++ /dev/null @@ -1,237 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "net/http" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceArmSubnet() *schema.Resource { - return &schema.Resource{ - Create: resourceArmSubnetCreate, - Read: resourceArmSubnetRead, - Update: resourceArmSubnetCreate, - Delete: resourceArmSubnetDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "virtual_network_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "address_prefix": { - Type: schema.TypeString, - Required: true, - }, - - "network_security_group_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "route_table_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "ip_configurations": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - } -} - -func resourceArmSubnetCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - subnetClient := client.subnetClient - - log.Printf("[INFO] preparing arguments for Azure ARM Subnet creation.") - - name := d.Get("name").(string) - vnetName := d.Get("virtual_network_name").(string) - resGroup := d.Get("resource_group_name").(string) - addressPrefix := d.Get("address_prefix").(string) - - armMutexKV.Lock(name) - defer armMutexKV.Unlock(name) - - armMutexKV.Lock(vnetName) - defer armMutexKV.Unlock(vnetName) - - properties := network.SubnetPropertiesFormat{ - AddressPrefix: &addressPrefix, - } - - if v, ok := d.GetOk("network_security_group_id"); ok { - nsgId := v.(string) - properties.NetworkSecurityGroup = &network.SecurityGroup{ - ID: &nsgId, - } - - networkSecurityGroupName, err := parseNetworkSecurityGroupName(nsgId) - if err != nil { - return err - } - - armMutexKV.Lock(networkSecurityGroupName) - defer armMutexKV.Unlock(networkSecurityGroupName) - } - - if v, ok := d.GetOk("route_table_id"); ok { - rtId := v.(string) - properties.RouteTable = &network.RouteTable{ - ID: &rtId, - } - - routeTableName, err := parseRouteTableName(rtId) - if err != nil { - return err - } - - armMutexKV.Lock(routeTableName) - defer armMutexKV.Unlock(routeTableName) - } - - subnet := network.Subnet{ - Name: &name, - SubnetPropertiesFormat: &properties, - } - - _, error := subnetClient.CreateOrUpdate(resGroup, vnetName, name, subnet, make(chan struct{})) - err := <-error - if err != nil { - return err - } - - read, err := subnetClient.Get(resGroup, vnetName, name, "") - if err != nil { - return err - } - if read.ID == nil { - return fmt.Errorf("Cannot read Subnet %s/%s (resource group %s) ID", vnetName, name, resGroup) - } - - d.SetId(*read.ID) - - return resourceArmSubnetRead(d, meta) -} - -func resourceArmSubnetRead(d *schema.ResourceData, meta interface{}) error { - subnetClient := meta.(*ArmClient).subnetClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - vnetName := id.Path["virtualNetworks"] - name := id.Path["subnets"] - - resp, err := subnetClient.Get(resGroup, vnetName, name, "") - - if err != nil { - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - return fmt.Errorf("Error making Read request on Azure Subnet %s: %s", name, err) - } - - d.Set("name", name) - d.Set("resource_group_name", resGroup) - d.Set("virtual_network_name", vnetName) - d.Set("address_prefix", resp.SubnetPropertiesFormat.AddressPrefix) - - if resp.SubnetPropertiesFormat.NetworkSecurityGroup != nil { - d.Set("network_security_group_id", resp.SubnetPropertiesFormat.NetworkSecurityGroup.ID) - } - - if resp.SubnetPropertiesFormat.RouteTable != nil { - d.Set("route_table_id", resp.SubnetPropertiesFormat.RouteTable.ID) - } - - if resp.SubnetPropertiesFormat.IPConfigurations != nil { - ips := make([]string, 0, len(*resp.SubnetPropertiesFormat.IPConfigurations)) - for _, ip := range *resp.SubnetPropertiesFormat.IPConfigurations { - ips = append(ips, *ip.ID) - } - - if err := d.Set("ip_configurations", ips); err != nil { - return err - } - } else { - d.Set("ip_configurations", []string{}) - } - - return nil -} - -func resourceArmSubnetDelete(d *schema.ResourceData, meta interface{}) error { - subnetClient := meta.(*ArmClient).subnetClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["subnets"] - vnetName := id.Path["virtualNetworks"] - - if v, ok := d.GetOk("network_security_group_id"); ok { - networkSecurityGroupId := v.(string) - networkSecurityGroupName, err := parseNetworkSecurityGroupName(networkSecurityGroupId) - if err != nil { - return err - } - - armMutexKV.Lock(networkSecurityGroupName) - defer armMutexKV.Unlock(networkSecurityGroupName) - } - - if v, ok := d.GetOk("route_table_id"); ok { - rtId := v.(string) - routeTableName, err := parseRouteTableName(rtId) - if err != nil { - return err - } - - armMutexKV.Lock(routeTableName) - defer armMutexKV.Unlock(routeTableName) - } - - armMutexKV.Lock(vnetName) - defer armMutexKV.Unlock(vnetName) - - armMutexKV.Lock(name) - defer armMutexKV.Unlock(name) - - _, error := subnetClient.Delete(resGroup, vnetName, name, make(chan struct{})) - err = <-error - - return err -} diff --git a/builtin/providers/azurerm/resource_arm_subnet_test.go b/builtin/providers/azurerm/resource_arm_subnet_test.go deleted file mode 100644 index 06d8ba473..000000000 --- a/builtin/providers/azurerm/resource_arm_subnet_test.go +++ /dev/null @@ -1,371 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "net/http" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMSubnet_basic(t *testing.T) { - - ri := acctest.RandInt() - config := testAccAzureRMSubnet_basic(ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMSubnetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMSubnetExists("azurerm_subnet.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMSubnet_routeTableUpdate(t *testing.T) { - - ri := acctest.RandInt() - initConfig := testAccAzureRMSubnet_routeTable(ri) - updatedConfig := testAccAzureRMSubnet_updatedRouteTable(ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMSubnetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: initConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMSubnetExists("azurerm_subnet.test"), - ), - }, - - resource.TestStep{ - Config: updatedConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMSubnetRouteTableExists("azurerm_subnet.test", fmt.Sprintf("acctest-%d", ri)), - ), - }, - }, - }) -} - -func TestAccAzureRMSubnet_disappears(t *testing.T) { - - ri := acctest.RandInt() - config := testAccAzureRMSubnet_basic(ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMSubnetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMSubnetExists("azurerm_subnet.test"), - testCheckAzureRMSubnetDisappears("azurerm_subnet.test"), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testCheckAzureRMSubnetExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - log.Printf("[INFO] Checking Subnet addition.") - - name := rs.Primary.Attributes["name"] - vnetName := rs.Primary.Attributes["virtual_network_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for subnet: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).subnetClient - - resp, err := conn.Get(resourceGroup, vnetName, name, "") - if err != nil { - return fmt.Errorf("Bad: Get on subnetClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: Subnet %q (resource group: %q) does not exist", name, resourceGroup) - } - - if resp.RouteTable == nil { - return fmt.Errorf("Bad: Subnet %q (resource group: %q) does not contain route tables after add", name, resourceGroup) - } - - return nil - } -} - -func testCheckAzureRMSubnetRouteTableExists(subnetName string, routeTableId string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[subnetName] - if !ok { - return fmt.Errorf("Not found: %s", subnetName) - } - - log.Printf("[INFO] Checking Subnet update.") - - name := rs.Primary.Attributes["name"] - vnetName := rs.Primary.Attributes["virtual_network_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for subnet: %s", name) - } - - vnetConn := testAccProvider.Meta().(*ArmClient).vnetClient - vnetResp, vnetErr := vnetConn.Get(resourceGroup, vnetName, "") - if vnetErr != nil { - return fmt.Errorf("Bad: Get on vnetClient: %s", vnetErr) - } - - if vnetResp.Subnets == nil { - return fmt.Errorf("Bad: Vnet %q (resource group: %q) does not have subnets after update", vnetName, resourceGroup) - } - - conn := testAccProvider.Meta().(*ArmClient).subnetClient - - resp, err := conn.Get(resourceGroup, vnetName, name, "") - if err != nil { - return fmt.Errorf("Bad: Get on subnetClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: Subnet %q (resource group: %q) does not exist", subnetName, resourceGroup) - } - - if resp.RouteTable == nil { - return fmt.Errorf("Bad: Subnet %q (resource group: %q) does not contain route tables after update", subnetName, resourceGroup) - } - - if !strings.Contains(*resp.RouteTable.ID, routeTableId) { - return fmt.Errorf("Bad: Subnet %q (resource group: %q) does not have route table %q", subnetName, resourceGroup, routeTableId) - } - - return nil - } -} - -func testCheckAzureRMSubnetDisappears(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - vnetName := rs.Primary.Attributes["virtual_network_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for subnet: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).subnetClient - - _, error := conn.Delete(resourceGroup, vnetName, name, make(chan struct{})) - err := <-error - if err != nil { - return fmt.Errorf("Bad: Delete on subnetClient: %s", err) - } - - return nil - } -} - -func testCheckAzureRMSubnetDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).subnetClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_subnet" { - continue - } - - name := rs.Primary.Attributes["name"] - vnetName := rs.Primary.Attributes["virtual_network_name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.Get(resourceGroup, vnetName, name, "") - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Subnet still exists:\n%#v", resp.SubnetPropertiesFormat) - } - } - - return nil -} - -func testAccAzureRMSubnet_basic(rInt int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_virtual_network" "test" { - name = "acctestvirtnet%d" - address_space = ["10.0.0.0/16"] - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "acctestsubnet%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" - route_table_id = "${azurerm_route_table.test.id}" -} - -resource "azurerm_route_table" "test" { - name = "acctestroutetable%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US" -} - -resource "azurerm_route" "test" { - name = "acctestroute%d" - resource_group_name = "${azurerm_resource_group.test.name}" - route_table_name = "${azurerm_route_table.test.name}" - - address_prefix = "10.100.0.0/14" - next_hop_type = "VirtualAppliance" - next_hop_in_ip_address = "10.10.1.1" -} -`, rInt, rInt, rInt, rInt, rInt) -} - -func testAccAzureRMSubnet_routeTable(rInt int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_virtual_network" "test" { - name = "acctestvirtnet%d" - address_space = ["10.0.0.0/16"] - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "acctestsubnet%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" - route_table_id = "${azurerm_route_table.test.id}" -} - -resource "azurerm_route_table" "test" { - name = "acctest-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_route" "route_a" { - name = "acctest-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - route_table_name = "${azurerm_route_table.test.name}" - - address_prefix = "10.100.0.0/14" - next_hop_type = "VirtualAppliance" - next_hop_in_ip_address = "10.10.1.1" -}`, rInt, rInt, rInt, rInt, rInt) -} - -func testAccAzureRMSubnet_updatedRouteTable(rInt int) string { - return fmt.Sprintf(` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" - tags { - environment = "Testing" - } -} - -resource "azurerm_network_security_group" "test_secgroup" { - name = "acctest-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - security_rule { - name = "acctest-%d" - priority = 100 - direction = "Inbound" - access = "Allow" - protocol = "Tcp" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "*" - destination_address_prefix = "*" - } - - tags { - environment = "Testing" - } -} - -resource "azurerm_virtual_network" "test" { - name = "acctestvirtnet%d" - address_space = ["10.0.0.0/16"] - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - tags { - environment = "Testing" - } -} - -resource "azurerm_subnet" "test" { - name = "acctestsubnet%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" - route_table_id = "${azurerm_route_table.test.id}" -} - -resource "azurerm_route_table" "test" { - name = "acctest-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - tags { - environment = "Testing" - } -} - -resource "azurerm_route" "route_a" { - name = "acctest-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - route_table_name = "${azurerm_route_table.test.name}" - - address_prefix = "10.100.0.0/14" - next_hop_type = "VirtualAppliance" - next_hop_in_ip_address = "10.10.1.1" -}`, rInt, rInt, rInt, rInt, rInt, rInt, rInt) -} diff --git a/builtin/providers/azurerm/resource_arm_template_deployment.go b/builtin/providers/azurerm/resource_arm_template_deployment.go deleted file mode 100644 index 4431f537e..000000000 --- a/builtin/providers/azurerm/resource_arm_template_deployment.go +++ /dev/null @@ -1,247 +0,0 @@ -package azurerm - -import ( - "encoding/json" - "fmt" - "log" - "net/http" - "strconv" - "strings" - "time" - - "github.com/Azure/azure-sdk-for-go/arm/resources/resources" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceArmTemplateDeployment() *schema.Resource { - return &schema.Resource{ - Create: resourceArmTemplateDeploymentCreate, - Read: resourceArmTemplateDeploymentRead, - Update: resourceArmTemplateDeploymentCreate, - Delete: resourceArmTemplateDeploymentDelete, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "template_body": { - Type: schema.TypeString, - Optional: true, - Computed: true, - StateFunc: normalizeJson, - }, - - "parameters": { - Type: schema.TypeMap, - Optional: true, - }, - - "outputs": { - Type: schema.TypeMap, - Computed: true, - }, - - "deployment_mode": { - Type: schema.TypeString, - Required: true, - }, - }, - } -} - -func resourceArmTemplateDeploymentCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - deployClient := client.deploymentsClient - - name := d.Get("name").(string) - resGroup := d.Get("resource_group_name").(string) - deploymentMode := d.Get("deployment_mode").(string) - - log.Printf("[INFO] preparing arguments for Azure ARM Template Deployment creation.") - properties := resources.DeploymentProperties{ - Mode: resources.DeploymentMode(deploymentMode), - } - - if v, ok := d.GetOk("parameters"); ok { - params := v.(map[string]interface{}) - - newParams := make(map[string]interface{}, len(params)) - for key, val := range params { - newParams[key] = struct { - Value interface{} - }{ - Value: val, - } - } - - properties.Parameters = &newParams - } - - if v, ok := d.GetOk("template_body"); ok { - template, err := expandTemplateBody(v.(string)) - if err != nil { - return err - } - - properties.Template = &template - } - - deployment := resources.Deployment{ - Properties: &properties, - } - - _, error := deployClient.CreateOrUpdate(resGroup, name, deployment, make(chan struct{})) - err := <-error - if err != nil { - return fmt.Errorf("Error creating deployment: %s", err) - } - - read, err := deployClient.Get(resGroup, name) - if err != nil { - return err - } - if read.ID == nil { - return fmt.Errorf("Cannot read Template Deployment %s (resource group %s) ID", name, resGroup) - } - - d.SetId(*read.ID) - - log.Printf("[DEBUG] Waiting for Template Deployment (%s) to become available", name) - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating", "updating", "accepted", "running"}, - Target: []string{"succeeded"}, - Refresh: templateDeploymentStateRefreshFunc(client, resGroup, name), - Timeout: 40 * time.Minute, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for Template Deployment (%s) to become available: %s", name, err) - } - - return resourceArmTemplateDeploymentRead(d, meta) -} - -func resourceArmTemplateDeploymentRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - deployClient := client.deploymentsClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["deployments"] - if name == "" { - name = id.Path["Deployments"] - } - - resp, err := deployClient.Get(resGroup, name) - if err != nil { - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - return fmt.Errorf("Error making Read request on Azure RM Template Deployment %s: %s", name, err) - } - - var outputs map[string]string - if resp.Properties.Outputs != nil && len(*resp.Properties.Outputs) > 0 { - outputs = make(map[string]string) - for key, output := range *resp.Properties.Outputs { - log.Printf("[DEBUG] Processing deployment output %s", key) - outputMap := output.(map[string]interface{}) - outputValue, ok := outputMap["value"] - if !ok { - log.Printf("[DEBUG] No value - skipping") - continue - } - outputType, ok := outputMap["type"] - if !ok { - log.Printf("[DEBUG] No type - skipping") - continue - } - - var outputValueString string - switch strings.ToLower(outputType.(string)) { - case "bool": - outputValueString = strconv.FormatBool(outputValue.(bool)) - - case "string": - outputValueString = outputValue.(string) - - case "int": - outputValueString = fmt.Sprint(outputValue) - - default: - log.Printf("[WARN] Ignoring output %s: Outputs of type %s are not currently supported in azurerm_template_deployment.", - key, outputType) - continue - } - outputs[key] = outputValueString - } - } - - return d.Set("outputs", outputs) -} - -func resourceArmTemplateDeploymentDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - deployClient := client.deploymentsClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["deployments"] - if name == "" { - name = id.Path["Deployments"] - } - - _, error := deployClient.Delete(resGroup, name, make(chan struct{})) - err = <-error - - return err -} - -func expandTemplateBody(template string) (map[string]interface{}, error) { - var templateBody map[string]interface{} - err := json.Unmarshal([]byte(template), &templateBody) - if err != nil { - return nil, fmt.Errorf("Error Expanding the template_body for Azure RM Template Deployment") - } - return templateBody, nil -} - -func normalizeJson(jsonString interface{}) string { - if jsonString == nil || jsonString == "" { - return "" - } - var j interface{} - err := json.Unmarshal([]byte(jsonString.(string)), &j) - if err != nil { - return fmt.Sprintf("Error parsing JSON: %s", err) - } - b, _ := json.Marshal(j) - return string(b[:]) -} - -func templateDeploymentStateRefreshFunc(client *ArmClient, resourceGroupName string, name string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - res, err := client.deploymentsClient.Get(resourceGroupName, name) - if err != nil { - return nil, "", fmt.Errorf("Error issuing read request in templateDeploymentStateRefreshFunc to Azure ARM for Template Deployment '%s' (RG: '%s'): %s", name, resourceGroupName, err) - } - - return res, strings.ToLower(*res.Properties.ProvisioningState), nil - } -} diff --git a/builtin/providers/azurerm/resource_arm_template_deployment_test.go b/builtin/providers/azurerm/resource_arm_template_deployment_test.go deleted file mode 100644 index 7344c8f7e..000000000 --- a/builtin/providers/azurerm/resource_arm_template_deployment_test.go +++ /dev/null @@ -1,571 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMTemplateDeployment_basic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMTemplateDeployment_basicMultiple, ri, ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMTemplateDeploymentDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMTemplateDeploymentExists("azurerm_template_deployment.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMTemplateDeployment_disappears(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMTemplateDeployment_basicSingle, ri, ri, ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMTemplateDeploymentDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMTemplateDeploymentExists("azurerm_template_deployment.test"), - testCheckAzureRMTemplateDeploymentDisappears("azurerm_template_deployment.test"), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAzureRMTemplateDeployment_withParams(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMTemplateDeployment_withParams, ri, ri, ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMTemplateDeploymentDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMTemplateDeploymentExists("azurerm_template_deployment.test"), - resource.TestCheckResourceAttr("azurerm_template_deployment.test", "outputs.testOutput", "Output Value"), - ), - }, - }, - }) -} - -func TestAccAzureRMTemplateDeployment_withOutputs(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMTemplateDeployment_withOutputs, ri, ri, ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMTemplateDeploymentDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMTemplateDeploymentExists("azurerm_template_deployment.test"), - resource.TestCheckOutput("tfIntOutput", "-123"), - resource.TestCheckOutput("tfStringOutput", "Standard_GRS"), - resource.TestCheckOutput("tfFalseOutput", "false"), - resource.TestCheckOutput("tfTrueOutput", "true"), - resource.TestCheckResourceAttr("azurerm_template_deployment.test", "outputs.stringOutput", "Standard_GRS"), - ), - }, - }, - }) -} - -func TestAccAzureRMTemplateDeployment_withError(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMTemplateDeployment_withError, ri, ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMTemplateDeploymentDestroy, - Steps: []resource.TestStep{ - { - Config: config, - ExpectError: regexp.MustCompile("The deployment operation failed"), - }, - }, - }) -} - -func testCheckAzureRMTemplateDeploymentExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for template deployment: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).deploymentsClient - - resp, err := conn.Get(resourceGroup, name) - if err != nil { - return fmt.Errorf("Bad: Get on deploymentsClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: TemplateDeployment %q (resource group: %q) does not exist", name, resourceGroup) - } - - return nil - } -} - -func testCheckAzureRMTemplateDeploymentDisappears(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for template deployment: %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).deploymentsClient - - _, error := conn.Delete(resourceGroup, name, make(chan struct{})) - err := <-error - if err != nil { - return fmt.Errorf("Bad: Delete on deploymentsClient: %s", err) - } - - return nil - } -} - -func testCheckAzureRMTemplateDeploymentDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).vmClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_template_deployment" { - continue - } - - name := rs.Primary.Attributes["name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.Get(resourceGroup, name, "") - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Template Deployment still exists:\n%#v", resp.VirtualMachineProperties) - } - } - - return nil -} - -var testAccAzureRMTemplateDeployment_basicSingle = ` - resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" - } - - resource "azurerm_template_deployment" "test" { - name = "acctesttemplate-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - template_body = < 0 { - ssh_keys := make([]map[string]interface{}, 0, len(*config.SSH.PublicKeys)) - for _, i := range *config.SSH.PublicKeys { - key := make(map[string]interface{}) - key["path"] = *i.Path - - if i.KeyData != nil { - key["key_data"] = *i.KeyData - } - - ssh_keys = append(ssh_keys, key) - } - - result["ssh_keys"] = ssh_keys - } - - return []interface{}{result} -} - -func flattenAzureRmVirtualMachineOsDisk(disk *compute.OSDisk) []interface{} { - result := make(map[string]interface{}) - result["name"] = *disk.Name - if disk.Vhd != nil { - result["vhd_uri"] = *disk.Vhd.URI - } - if disk.ManagedDisk != nil { - result["managed_disk_type"] = string(disk.ManagedDisk.StorageAccountType) - result["managed_disk_id"] = *disk.ManagedDisk.ID - } - result["create_option"] = disk.CreateOption - result["caching"] = disk.Caching - if disk.DiskSizeGB != nil { - result["disk_size_gb"] = *disk.DiskSizeGB - } - - return []interface{}{result} -} - -func expandAzureRmVirtualMachinePlan(d *schema.ResourceData) (*compute.Plan, error) { - planConfigs := d.Get("plan").(*schema.Set).List() - - planConfig := planConfigs[0].(map[string]interface{}) - - publisher := planConfig["publisher"].(string) - name := planConfig["name"].(string) - product := planConfig["product"].(string) - - return &compute.Plan{ - Publisher: &publisher, - Name: &name, - Product: &product, - }, nil -} - -func expandAzureRmVirtualMachineOsProfile(d *schema.ResourceData) (*compute.OSProfile, error) { - osProfiles := d.Get("os_profile").(*schema.Set).List() - - osProfile := osProfiles[0].(map[string]interface{}) - - adminUsername := osProfile["admin_username"].(string) - adminPassword := osProfile["admin_password"].(string) - computerName := osProfile["computer_name"].(string) - - profile := &compute.OSProfile{ - AdminUsername: &adminUsername, - ComputerName: &computerName, - } - - if adminPassword != "" { - profile.AdminPassword = &adminPassword - } - - if _, ok := d.GetOk("os_profile_windows_config"); ok { - winConfig, err := expandAzureRmVirtualMachineOsProfileWindowsConfig(d) - if err != nil { - return nil, err - } - if winConfig != nil { - profile.WindowsConfiguration = winConfig - } - } - - if _, ok := d.GetOk("os_profile_linux_config"); ok { - linuxConfig, err := expandAzureRmVirtualMachineOsProfileLinuxConfig(d) - if err != nil { - return nil, err - } - if linuxConfig != nil { - profile.LinuxConfiguration = linuxConfig - } - } - - if _, ok := d.GetOk("os_profile_secrets"); ok { - secrets := expandAzureRmVirtualMachineOsProfileSecrets(d) - if secrets != nil { - profile.Secrets = secrets - } - } - - if v := osProfile["custom_data"].(string); v != "" { - v = base64Encode(v) - profile.CustomData = &v - } - - return profile, nil -} - -func expandAzureRmVirtualMachineOsProfileSecrets(d *schema.ResourceData) *[]compute.VaultSecretGroup { - secretsConfig := d.Get("os_profile_secrets").(*schema.Set).List() - secrets := make([]compute.VaultSecretGroup, 0, len(secretsConfig)) - - for _, secretConfig := range secretsConfig { - config := secretConfig.(map[string]interface{}) - sourceVaultId := config["source_vault_id"].(string) - - vaultSecretGroup := compute.VaultSecretGroup{ - SourceVault: &compute.SubResource{ - ID: &sourceVaultId, - }, - } - - if v := config["vault_certificates"]; v != nil { - certsConfig := v.([]interface{}) - certs := make([]compute.VaultCertificate, 0, len(certsConfig)) - for _, certConfig := range certsConfig { - config := certConfig.(map[string]interface{}) - - certUrl := config["certificate_url"].(string) - cert := compute.VaultCertificate{ - CertificateURL: &certUrl, - } - if v := config["certificate_store"].(string); v != "" { - cert.CertificateStore = &v - } - - certs = append(certs, cert) - } - vaultSecretGroup.VaultCertificates = &certs - } - - secrets = append(secrets, vaultSecretGroup) - } - - return &secrets -} - -func expandAzureRmVirtualMachineOsProfileLinuxConfig(d *schema.ResourceData) (*compute.LinuxConfiguration, error) { - osProfilesLinuxConfig := d.Get("os_profile_linux_config").(*schema.Set).List() - - linuxConfig := osProfilesLinuxConfig[0].(map[string]interface{}) - disablePasswordAuth := linuxConfig["disable_password_authentication"].(bool) - - config := &compute.LinuxConfiguration{ - DisablePasswordAuthentication: &disablePasswordAuth, - } - - linuxKeys := linuxConfig["ssh_keys"].([]interface{}) - sshPublicKeys := []compute.SSHPublicKey{} - for _, key := range linuxKeys { - - sshKey, ok := key.(map[string]interface{}) - if !ok { - continue - } - path := sshKey["path"].(string) - keyData := sshKey["key_data"].(string) - - sshPublicKey := compute.SSHPublicKey{ - Path: &path, - KeyData: &keyData, - } - - sshPublicKeys = append(sshPublicKeys, sshPublicKey) - } - - if len(sshPublicKeys) > 0 { - config.SSH = &compute.SSHConfiguration{ - PublicKeys: &sshPublicKeys, - } - } - - return config, nil -} - -func expandAzureRmVirtualMachineOsProfileWindowsConfig(d *schema.ResourceData) (*compute.WindowsConfiguration, error) { - osProfilesWindowsConfig := d.Get("os_profile_windows_config").(*schema.Set).List() - - osProfileConfig := osProfilesWindowsConfig[0].(map[string]interface{}) - config := &compute.WindowsConfiguration{} - - if v := osProfileConfig["provision_vm_agent"]; v != nil { - provision := v.(bool) - config.ProvisionVMAgent = &provision - } - - if v := osProfileConfig["enable_automatic_upgrades"]; v != nil { - update := v.(bool) - config.EnableAutomaticUpdates = &update - } - - if v := osProfileConfig["winrm"]; v != nil { - winRm := v.([]interface{}) - if len(winRm) > 0 { - winRmListeners := make([]compute.WinRMListener, 0, len(winRm)) - for _, winRmConfig := range winRm { - config := winRmConfig.(map[string]interface{}) - - protocol := config["protocol"].(string) - winRmListener := compute.WinRMListener{ - Protocol: compute.ProtocolTypes(protocol), - } - if v := config["certificate_url"].(string); v != "" { - winRmListener.CertificateURL = &v - } - - winRmListeners = append(winRmListeners, winRmListener) - } - config.WinRM = &compute.WinRMConfiguration{ - Listeners: &winRmListeners, - } - } - } - if v := osProfileConfig["additional_unattend_config"]; v != nil { - additionalConfig := v.([]interface{}) - if len(additionalConfig) > 0 { - additionalConfigContent := make([]compute.AdditionalUnattendContent, 0, len(additionalConfig)) - for _, addConfig := range additionalConfig { - config := addConfig.(map[string]interface{}) - pass := config["pass"].(string) - component := config["component"].(string) - settingName := config["setting_name"].(string) - content := config["content"].(string) - - addContent := compute.AdditionalUnattendContent{ - PassName: compute.PassNames(pass), - ComponentName: compute.ComponentNames(component), - SettingName: compute.SettingNames(settingName), - Content: &content, - } - - additionalConfigContent = append(additionalConfigContent, addContent) - } - config.AdditionalUnattendContent = &additionalConfigContent - } - } - return config, nil -} - -func expandAzureRmVirtualMachineDataDisk(d *schema.ResourceData) ([]compute.DataDisk, error) { - disks := d.Get("storage_data_disk").([]interface{}) - data_disks := make([]compute.DataDisk, 0, len(disks)) - for _, disk_config := range disks { - config := disk_config.(map[string]interface{}) - - name := config["name"].(string) - createOption := config["create_option"].(string) - vhdURI := config["vhd_uri"].(string) - managedDiskType := config["managed_disk_type"].(string) - managedDiskID := config["managed_disk_id"].(string) - lun := int32(config["lun"].(int)) - - data_disk := compute.DataDisk{ - Name: &name, - Lun: &lun, - CreateOption: compute.DiskCreateOptionTypes(createOption), - } - - if vhdURI != "" { - data_disk.Vhd = &compute.VirtualHardDisk{ - URI: &vhdURI, - } - } - - managedDisk := &compute.ManagedDiskParameters{} - - if managedDiskType != "" { - managedDisk.StorageAccountType = compute.StorageAccountTypes(managedDiskType) - data_disk.ManagedDisk = managedDisk - } - - if managedDiskID != "" { - managedDisk.ID = &managedDiskID - data_disk.ManagedDisk = managedDisk - } - - //BEGIN: code to be removed after GH-13016 is merged - if vhdURI != "" && managedDiskID != "" { - return nil, fmt.Errorf("[ERROR] Conflict between `vhd_uri` and `managed_disk_id` (only one or the other can be used)") - } - if vhdURI != "" && managedDiskType != "" { - return nil, fmt.Errorf("[ERROR] Conflict between `vhd_uri` and `managed_disk_type` (only one or the other can be used)") - } - //END: code to be removed after GH-13016 is merged - if managedDiskID == "" && strings.EqualFold(string(data_disk.CreateOption), string(compute.Attach)) { - return nil, fmt.Errorf("[ERROR] Must specify which disk to attach") - } - - if v := config["caching"].(string); v != "" { - data_disk.Caching = compute.CachingTypes(v) - } - - if v := config["disk_size_gb"]; v != nil { - diskSize := int32(config["disk_size_gb"].(int)) - data_disk.DiskSizeGB = &diskSize - } - - data_disks = append(data_disks, data_disk) - } - - return data_disks, nil -} - -func expandAzureRmVirtualMachineDiagnosticsProfile(d *schema.ResourceData) *compute.DiagnosticsProfile { - bootDiagnostics := d.Get("boot_diagnostics").([]interface{}) - - diagnosticsProfile := &compute.DiagnosticsProfile{} - if len(bootDiagnostics) > 0 { - bootDiagnostic := bootDiagnostics[0].(map[string]interface{}) - - diagnostic := &compute.BootDiagnostics{ - Enabled: riviera.Bool(bootDiagnostic["enabled"].(bool)), - StorageURI: riviera.String(bootDiagnostic["storage_uri"].(string)), - } - - diagnosticsProfile.BootDiagnostics = diagnostic - - return diagnosticsProfile - } - - return nil -} - -func expandAzureRmVirtualMachineImageReference(d *schema.ResourceData) (*compute.ImageReference, error) { - storageImageRefs := d.Get("storage_image_reference").(*schema.Set).List() - - storageImageRef := storageImageRefs[0].(map[string]interface{}) - - publisher := storageImageRef["publisher"].(string) - offer := storageImageRef["offer"].(string) - sku := storageImageRef["sku"].(string) - version := storageImageRef["version"].(string) - - return &compute.ImageReference{ - Publisher: &publisher, - Offer: &offer, - Sku: &sku, - Version: &version, - }, nil -} - -func expandAzureRmVirtualMachineNetworkProfile(d *schema.ResourceData) compute.NetworkProfile { - nicIds := d.Get("network_interface_ids").(*schema.Set).List() - primaryNicId := d.Get("primary_network_interface_id").(string) - network_interfaces := make([]compute.NetworkInterfaceReference, 0, len(nicIds)) - - network_profile := compute.NetworkProfile{} - - for _, nic := range nicIds { - id := nic.(string) - primary := id == primaryNicId - - network_interface := compute.NetworkInterfaceReference{ - ID: &id, - NetworkInterfaceReferenceProperties: &compute.NetworkInterfaceReferenceProperties{ - Primary: &primary, - }, - } - network_interfaces = append(network_interfaces, network_interface) - } - - network_profile.NetworkInterfaces = &network_interfaces - - return network_profile -} - -func expandAzureRmVirtualMachineOsDisk(d *schema.ResourceData) (*compute.OSDisk, error) { - disks := d.Get("storage_os_disk").(*schema.Set).List() - - config := disks[0].(map[string]interface{}) - - name := config["name"].(string) - imageURI := config["image_uri"].(string) - createOption := config["create_option"].(string) - vhdURI := config["vhd_uri"].(string) - managedDiskType := config["managed_disk_type"].(string) - managedDiskID := config["managed_disk_id"].(string) - - osDisk := &compute.OSDisk{ - Name: &name, - CreateOption: compute.DiskCreateOptionTypes(createOption), - } - - if vhdURI != "" { - osDisk.Vhd = &compute.VirtualHardDisk{ - URI: &vhdURI, - } - } - - managedDisk := &compute.ManagedDiskParameters{} - - if managedDiskType != "" { - managedDisk.StorageAccountType = compute.StorageAccountTypes(managedDiskType) - osDisk.ManagedDisk = managedDisk - } - - if managedDiskID != "" { - managedDisk.ID = &managedDiskID - osDisk.ManagedDisk = managedDisk - } - - //BEGIN: code to be removed after GH-13016 is merged - if vhdURI != "" && managedDiskID != "" { - return nil, fmt.Errorf("[ERROR] Conflict between `vhd_uri` and `managed_disk_id` (only one or the other can be used)") - } - if vhdURI != "" && managedDiskType != "" { - return nil, fmt.Errorf("[ERROR] Conflict between `vhd_uri` and `managed_disk_type` (only one or the other can be used)") - } - //END: code to be removed after GH-13016 is merged - if managedDiskID == "" && vhdURI == "" && strings.EqualFold(string(osDisk.CreateOption), string(compute.Attach)) { - return nil, fmt.Errorf("[ERROR] Must specify `vhd_uri` or `managed_disk_id` to attach") - } - - if v := config["image_uri"].(string); v != "" { - osDisk.Image = &compute.VirtualHardDisk{ - URI: &imageURI, - } - } - - if v := config["os_type"].(string); v != "" { - if v == "linux" { - osDisk.OsType = compute.Linux - } else if v == "windows" { - osDisk.OsType = compute.Windows - } else { - return nil, fmt.Errorf("[ERROR] os_type must be 'linux' or 'windows'") - } - } - - if v := config["caching"].(string); v != "" { - osDisk.Caching = compute.CachingTypes(v) - } - - if v := config["disk_size_gb"].(int); v != 0 { - diskSize := int32(v) - osDisk.DiskSizeGB = &diskSize - } - - return osDisk, nil -} - -func findStorageAccountResourceGroup(meta interface{}, storageAccountName string) (string, error) { - client := meta.(*ArmClient).resourceFindClient - filter := fmt.Sprintf("name eq '%s' and resourceType eq 'Microsoft.Storage/storageAccounts'", storageAccountName) - expand := "" - var pager *int32 - - rf, err := client.List(filter, expand, pager) - if err != nil { - return "", fmt.Errorf("Error making resource request for query %s: %s", filter, err) - } - - results := *rf.Value - if len(results) != 1 { - return "", fmt.Errorf("Wrong number of results making resource request for query %s: %d", filter, len(results)) - } - - id, err := parseAzureResourceID(*results[0].ID) - if err != nil { - return "", err - } - - return id.ResourceGroup, nil -} diff --git a/builtin/providers/azurerm/resource_arm_virtual_machine_extension.go b/builtin/providers/azurerm/resource_arm_virtual_machine_extension.go deleted file mode 100644 index 49155cd39..000000000 --- a/builtin/providers/azurerm/resource_arm_virtual_machine_extension.go +++ /dev/null @@ -1,203 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - - "github.com/Azure/azure-sdk-for-go/arm/compute" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/structure" - "github.com/hashicorp/terraform/helper/validation" -) - -func resourceArmVirtualMachineExtensions() *schema.Resource { - return &schema.Resource{ - Create: resourceArmVirtualMachineExtensionsCreate, - Read: resourceArmVirtualMachineExtensionsRead, - Update: resourceArmVirtualMachineExtensionsCreate, - Delete: resourceArmVirtualMachineExtensionsDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": locationSchema(), - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "virtual_machine_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "publisher": { - Type: schema.TypeString, - Required: true, - }, - - "type": { - Type: schema.TypeString, - Required: true, - }, - - "type_handler_version": { - Type: schema.TypeString, - Required: true, - }, - - "auto_upgrade_minor_version": { - Type: schema.TypeBool, - Optional: true, - }, - - "settings": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.ValidateJsonString, - DiffSuppressFunc: structure.SuppressJsonDiff, - }, - - // due to the sensitive nature, these are not returned by the API - "protected_settings": { - Type: schema.TypeString, - Optional: true, - Sensitive: true, - ValidateFunc: validation.ValidateJsonString, - DiffSuppressFunc: structure.SuppressJsonDiff, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmVirtualMachineExtensionsCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).vmExtensionClient - - name := d.Get("name").(string) - location := d.Get("location").(string) - vmName := d.Get("virtual_machine_name").(string) - resGroup := d.Get("resource_group_name").(string) - publisher := d.Get("publisher").(string) - extensionType := d.Get("type").(string) - typeHandlerVersion := d.Get("type_handler_version").(string) - autoUpgradeMinor := d.Get("auto_upgrade_minor_version").(bool) - tags := d.Get("tags").(map[string]interface{}) - - extension := compute.VirtualMachineExtension{ - Location: &location, - VirtualMachineExtensionProperties: &compute.VirtualMachineExtensionProperties{ - Publisher: &publisher, - Type: &extensionType, - TypeHandlerVersion: &typeHandlerVersion, - AutoUpgradeMinorVersion: &autoUpgradeMinor, - }, - Tags: expandTags(tags), - } - - if settingsString := d.Get("settings").(string); settingsString != "" { - settings, err := structure.ExpandJsonFromString(settingsString) - if err != nil { - return fmt.Errorf("unable to parse settings: %s", err) - } - extension.VirtualMachineExtensionProperties.Settings = &settings - } - - if protectedSettingsString := d.Get("protected_settings").(string); protectedSettingsString != "" { - protectedSettings, err := structure.ExpandJsonFromString(protectedSettingsString) - if err != nil { - return fmt.Errorf("unable to parse protected_settings: %s", err) - } - extension.VirtualMachineExtensionProperties.ProtectedSettings = &protectedSettings - } - - _, error := client.CreateOrUpdate(resGroup, vmName, name, extension, make(chan struct{})) - err := <-error - if err != nil { - return err - } - - read, err := client.Get(resGroup, vmName, name, "") - if err != nil { - return err - } - - if read.ID == nil { - return fmt.Errorf("Cannot read Virtual Machine Extension %s (resource group %s) ID", name, resGroup) - } - - d.SetId(*read.ID) - - return resourceArmVirtualMachineExtensionsRead(d, meta) -} - -func resourceArmVirtualMachineExtensionsRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).vmExtensionClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - vmName := id.Path["virtualMachines"] - name := id.Path["extensions"] - - resp, err := client.Get(resGroup, vmName, name, "") - - if err != nil { - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - return fmt.Errorf("Error making Read request on Virtual Machine Extension %s: %s", name, err) - } - - d.Set("name", resp.Name) - d.Set("location", azureRMNormalizeLocation(*resp.Location)) - d.Set("virtual_machine_name", vmName) - d.Set("resource_group_name", resGroup) - d.Set("publisher", resp.VirtualMachineExtensionProperties.Publisher) - d.Set("type", resp.VirtualMachineExtensionProperties.Type) - d.Set("type_handler_version", resp.VirtualMachineExtensionProperties.TypeHandlerVersion) - d.Set("auto_upgrade_minor_version", resp.VirtualMachineExtensionProperties.AutoUpgradeMinorVersion) - - if resp.VirtualMachineExtensionProperties.Settings != nil { - settings, err := structure.FlattenJsonToString(*resp.VirtualMachineExtensionProperties.Settings) - if err != nil { - return fmt.Errorf("unable to parse settings from response: %s", err) - } - d.Set("settings", settings) - } - - flattenAndSetTags(d, resp.Tags) - - return nil -} - -func resourceArmVirtualMachineExtensionsDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).vmExtensionClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["extensions"] - vmName := id.Path["virtualMachines"] - - _, error := client.Delete(resGroup, vmName, name, make(chan struct{})) - err = <-error - - return err -} diff --git a/builtin/providers/azurerm/resource_arm_virtual_machine_extension_test.go b/builtin/providers/azurerm/resource_arm_virtual_machine_extension_test.go deleted file mode 100644 index 37cd0e6cf..000000000 --- a/builtin/providers/azurerm/resource_arm_virtual_machine_extension_test.go +++ /dev/null @@ -1,561 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "regexp" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMVirtualMachineExtension_basic(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMVirtualMachineExtension_basic, ri, ri, ri, ri, ri, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMVirtualMachineExtension_basicUpdate, ri, ri, ri, ri, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineExtensionDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineExtensionExists("azurerm_virtual_machine_extension.test"), - resource.TestMatchResourceAttr("azurerm_virtual_machine_extension.test", "settings", regexp.MustCompile("hostname")), - ), - }, - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineExtensionExists("azurerm_virtual_machine_extension.test"), - resource.TestMatchResourceAttr("azurerm_virtual_machine_extension.test", "settings", regexp.MustCompile("whoami")), - ), - }, - }, - }) -} - -func TestAccAzureRMVirtualMachineExtension_concurrent(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualMachineExtension_concurrent, ri, ri, ri, ri, ri, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineExtensionDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineExtensionExists("azurerm_virtual_machine_extension.test"), - testCheckAzureRMVirtualMachineExtensionExists("azurerm_virtual_machine_extension.test2"), - resource.TestMatchResourceAttr("azurerm_virtual_machine_extension.test", "settings", regexp.MustCompile("hostname")), - resource.TestMatchResourceAttr("azurerm_virtual_machine_extension.test2", "settings", regexp.MustCompile("whoami")), - ), - }, - }, - }) -} - -func TestAccAzureRMVirtualMachineExtension_linuxDiagnostics(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualMachineExtension_linuxDiagnostics, ri, ri, ri, ri, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineExtensionDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineExtensionExists("azurerm_virtual_machine_extension.test"), - ), - }, - }, - }) -} - -func testCheckAzureRMVirtualMachineExtensionExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - vmName := rs.Primary.Attributes["virtual_machine_name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - conn := testAccProvider.Meta().(*ArmClient).vmExtensionClient - - resp, err := conn.Get(resourceGroup, vmName, name, "") - if err != nil { - return fmt.Errorf("Bad: Get on vmExtensionClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: VirtualMachine Extension %q (resource group: %q) does not exist", name, resourceGroup) - } - - return nil - } -} - -func testCheckAzureRMVirtualMachineExtensionDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).vmExtensionClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_virtual_machine_extension" { - continue - } - - name := rs.Primary.Attributes["name"] - vmName := rs.Primary.Attributes["virtual_machine_name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.Get(resourceGroup, vmName, name, "") - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Virtual Machine Extension still exists:\n%#v", resp.VirtualMachineExtensionProperties) - } - } - - return nil -} - -var testAccAzureRMVirtualMachineExtension_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West US" -} - -resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_network_interface" "test" { - name = "acctni-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } -} - -resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "westus" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } -} - -resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} - -resource "azurerm_virtual_machine" "test" { - name = "acctvm-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - network_interface_ids = ["${azurerm_network_interface.test.id}"] - vm_size = "Standard_A0" - - storage_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "14.04.2-LTS" - version = "latest" - } - - storage_os_disk { - name = "myosdisk1" - vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdisk1.vhd" - caching = "ReadWrite" - create_option = "FromImage" - } - - os_profile { - computer_name = "hostname%d" - admin_username = "testadmin" - admin_password = "Password1234!" - } - - os_profile_linux_config { - disable_password_authentication = false - } -} - -resource "azurerm_virtual_machine_extension" "test" { - name = "acctvme-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_machine_name = "${azurerm_virtual_machine.test.name}" - publisher = "Microsoft.Azure.Extensions" - type = "CustomScript" - type_handler_version = "2.0" - - settings = < 0 { - ssh_keys := make([]map[string]interface{}, 0, len(*config.SSH.PublicKeys)) - for _, i := range *config.SSH.PublicKeys { - key := make(map[string]interface{}) - key["path"] = *i.Path - - if i.KeyData != nil { - key["key_data"] = *i.KeyData - } - - ssh_keys = append(ssh_keys, key) - } - - result["ssh_keys"] = ssh_keys - } - - return []interface{}{result} -} - -func flattenAzureRmVirtualMachineScaleSetOsProfileWindowsConfig(config *compute.WindowsConfiguration) []interface{} { - result := make(map[string]interface{}) - - if config.ProvisionVMAgent != nil { - result["provision_vm_agent"] = *config.ProvisionVMAgent - } - - if config.EnableAutomaticUpdates != nil { - result["enable_automatic_upgrades"] = *config.EnableAutomaticUpdates - } - - if config.WinRM != nil { - listeners := make([]map[string]interface{}, 0, len(*config.WinRM.Listeners)) - for _, i := range *config.WinRM.Listeners { - listener := make(map[string]interface{}) - listener["protocol"] = i.Protocol - - if i.CertificateURL != nil { - listener["certificate_url"] = *i.CertificateURL - } - - listeners = append(listeners, listener) - } - - result["winrm"] = listeners - } - - if config.AdditionalUnattendContent != nil { - content := make([]map[string]interface{}, 0, len(*config.AdditionalUnattendContent)) - for _, i := range *config.AdditionalUnattendContent { - c := make(map[string]interface{}) - c["pass"] = i.PassName - c["component"] = i.ComponentName - c["setting_name"] = i.SettingName - c["content"] = *i.Content - - content = append(content, c) - } - - result["additional_unattend_config"] = content - } - - return []interface{}{result} -} - -func flattenAzureRmVirtualMachineScaleSetOsProfileSecrets(secrets *[]compute.VaultSecretGroup) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(*secrets)) - for _, secret := range *secrets { - s := map[string]interface{}{ - "source_vault_id": *secret.SourceVault.ID, - } - - if secret.VaultCertificates != nil { - certs := make([]map[string]interface{}, 0, len(*secret.VaultCertificates)) - for _, cert := range *secret.VaultCertificates { - vaultCert := make(map[string]interface{}) - vaultCert["certificate_url"] = *cert.CertificateURL - - if cert.CertificateStore != nil { - vaultCert["certificate_store"] = *cert.CertificateStore - } - - certs = append(certs, vaultCert) - } - - s["vault_certificates"] = certs - } - - result = append(result, s) - } - return result -} - -func flattenAzureRmVirtualMachineScaleSetNetworkProfile(profile *compute.VirtualMachineScaleSetNetworkProfile) []map[string]interface{} { - networkConfigurations := profile.NetworkInterfaceConfigurations - result := make([]map[string]interface{}, 0, len(*networkConfigurations)) - for _, netConfig := range *networkConfigurations { - s := map[string]interface{}{ - "name": *netConfig.Name, - "primary": *netConfig.VirtualMachineScaleSetNetworkConfigurationProperties.Primary, - } - - if netConfig.VirtualMachineScaleSetNetworkConfigurationProperties.IPConfigurations != nil { - ipConfigs := make([]map[string]interface{}, 0, len(*netConfig.VirtualMachineScaleSetNetworkConfigurationProperties.IPConfigurations)) - for _, ipConfig := range *netConfig.VirtualMachineScaleSetNetworkConfigurationProperties.IPConfigurations { - config := make(map[string]interface{}) - config["name"] = *ipConfig.Name - - properties := ipConfig.VirtualMachineScaleSetIPConfigurationProperties - - if ipConfig.VirtualMachineScaleSetIPConfigurationProperties.Subnet != nil { - config["subnet_id"] = *properties.Subnet.ID - } - - if properties.LoadBalancerBackendAddressPools != nil { - addressPools := make([]interface{}, 0, len(*properties.LoadBalancerBackendAddressPools)) - for _, pool := range *properties.LoadBalancerBackendAddressPools { - addressPools = append(addressPools, *pool.ID) - } - config["load_balancer_backend_address_pool_ids"] = schema.NewSet(schema.HashString, addressPools) - } - - if properties.LoadBalancerInboundNatPools != nil { - inboundNatPools := make([]interface{}, 0, len(*properties.LoadBalancerInboundNatPools)) - for _, rule := range *properties.LoadBalancerInboundNatPools { - inboundNatPools = append(inboundNatPools, *rule.ID) - } - config["load_balancer_inbound_nat_rules_ids"] = schema.NewSet(schema.HashString, inboundNatPools) - } - - ipConfigs = append(ipConfigs, config) - } - - s["ip_configuration"] = ipConfigs - } - - result = append(result, s) - } - - return result -} - -func flattenAzureRMVirtualMachineScaleSetOsProfile(profile *compute.VirtualMachineScaleSetOSProfile) ([]interface{}, error) { - result := make(map[string]interface{}) - - result["computer_name_prefix"] = *profile.ComputerNamePrefix - result["admin_username"] = *profile.AdminUsername - - if profile.CustomData != nil { - result["custom_data"] = *profile.CustomData - } - - return []interface{}{result}, nil -} - -func flattenAzureRmVirtualMachineScaleSetStorageProfileOSDisk(profile *compute.VirtualMachineScaleSetOSDisk) []interface{} { - result := make(map[string]interface{}) - - if profile.Name != nil { - result["name"] = *profile.Name - } - - if profile.Image != nil { - result["image"] = *profile.Image.URI - } - - if profile.VhdContainers != nil { - containers := make([]interface{}, 0, len(*profile.VhdContainers)) - for _, container := range *profile.VhdContainers { - containers = append(containers, container) - } - result["vhd_containers"] = schema.NewSet(schema.HashString, containers) - } - - if profile.ManagedDisk != nil { - result["managed_disk_type"] = string(profile.ManagedDisk.StorageAccountType) - } - - result["caching"] = profile.Caching - result["create_option"] = profile.CreateOption - result["os_type"] = profile.OsType - - return []interface{}{result} -} - -func flattenAzureRmVirtualMachineScaleSetStorageProfileDataDisk(disks *[]compute.VirtualMachineScaleSetDataDisk) interface{} { - result := make([]interface{}, len(*disks)) - for i, disk := range *disks { - l := make(map[string]interface{}) - if disk.ManagedDisk != nil { - l["managed_disk_type"] = string(disk.ManagedDisk.StorageAccountType) - } - - l["create_option"] = disk.CreateOption - l["caching"] = string(disk.Caching) - if disk.DiskSizeGB != nil { - l["disk_size_gb"] = *disk.DiskSizeGB - } - l["lun"] = *disk.Lun - - result[i] = l - } - return result -} - -func flattenAzureRmVirtualMachineScaleSetStorageProfileImageReference(profile *compute.ImageReference) []interface{} { - result := make(map[string]interface{}) - result["publisher"] = *profile.Publisher - result["offer"] = *profile.Offer - result["sku"] = *profile.Sku - result["version"] = *profile.Version - - return []interface{}{result} -} - -func flattenAzureRmVirtualMachineScaleSetSku(sku *compute.Sku) []interface{} { - result := make(map[string]interface{}) - result["name"] = *sku.Name - result["capacity"] = *sku.Capacity - - if *sku.Tier != "" { - result["tier"] = *sku.Tier - } - - return []interface{}{result} -} - -func flattenAzureRmVirtualMachineScaleSetExtensionProfile(profile *compute.VirtualMachineScaleSetExtensionProfile) ([]map[string]interface{}, error) { - if profile.Extensions == nil { - return nil, nil - } - - result := make([]map[string]interface{}, 0, len(*profile.Extensions)) - for _, extension := range *profile.Extensions { - e := make(map[string]interface{}) - e["name"] = *extension.Name - properties := extension.VirtualMachineScaleSetExtensionProperties - if properties != nil { - e["publisher"] = *properties.Publisher - e["type"] = *properties.Type - e["type_handler_version"] = *properties.TypeHandlerVersion - if properties.AutoUpgradeMinorVersion != nil { - e["auto_upgrade_minor_version"] = *properties.AutoUpgradeMinorVersion - } - - if properties.Settings != nil { - settings, err := structure.FlattenJsonToString(*properties.Settings) - if err != nil { - return nil, err - } - e["settings"] = settings - } - } - - result = append(result, e) - } - - return result, nil -} - -func resourceArmVirtualMachineScaleSetStorageProfileImageReferenceHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["publisher"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["offer"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["sku"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["version"].(string))) - - return hashcode.String(buf.String()) -} - -func resourceArmVirtualMachineScaleSetSkuHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - if m["tier"] != nil { - buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["tier"].(string)))) - } - buf.WriteString(fmt.Sprintf("%d-", m["capacity"].(int))) - - return hashcode.String(buf.String()) -} - -func resourceArmVirtualMachineScaleSetStorageProfileOsDiskHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - - if m["vhd_containers"] != nil { - buf.WriteString(fmt.Sprintf("%s-", m["vhd_containers"].(*schema.Set).List())) - } - - return hashcode.String(buf.String()) -} - -func resourceArmVirtualMachineScaleSetNetworkConfigurationHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%t-", m["primary"].(bool))) - return hashcode.String(buf.String()) -} - -func resourceArmVirtualMachineScaleSetsOsProfileHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["computer_name_prefix"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["admin_username"].(string))) - if m["custom_data"] != nil { - customData := m["custom_data"].(string) - if !isBase64Encoded(customData) { - customData = base64Encode(customData) - } - buf.WriteString(fmt.Sprintf("%s-", customData)) - } - return hashcode.String(buf.String()) -} - -func resourceArmVirtualMachineScaleSetOsProfileLinuxConfigHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%t-", m["disable_password_authentication"].(bool))) - - return hashcode.String(buf.String()) -} - -func resourceArmVirtualMachineScaleSetOsProfileLWindowsConfigHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - if m["provision_vm_agent"] != nil { - buf.WriteString(fmt.Sprintf("%t-", m["provision_vm_agent"].(bool))) - } - if m["enable_automatic_upgrades"] != nil { - buf.WriteString(fmt.Sprintf("%t-", m["enable_automatic_upgrades"].(bool))) - } - return hashcode.String(buf.String()) -} - -func resourceArmVirtualMachineScaleSetExtensionHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["publisher"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["type"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["type_handler_version"].(string))) - if m["auto_upgrade_minor_version"] != nil { - buf.WriteString(fmt.Sprintf("%t-", m["auto_upgrade_minor_version"].(bool))) - } - - return hashcode.String(buf.String()) -} - -func expandVirtualMachineScaleSetSku(d *schema.ResourceData) (*compute.Sku, error) { - skuConfig := d.Get("sku").(*schema.Set).List() - - config := skuConfig[0].(map[string]interface{}) - - name := config["name"].(string) - tier := config["tier"].(string) - capacity := int64(config["capacity"].(int)) - - sku := &compute.Sku{ - Name: &name, - Capacity: &capacity, - } - - if tier != "" { - sku.Tier = &tier - } - - return sku, nil -} - -func expandAzureRmVirtualMachineScaleSetNetworkProfile(d *schema.ResourceData) *compute.VirtualMachineScaleSetNetworkProfile { - scaleSetNetworkProfileConfigs := d.Get("network_profile").(*schema.Set).List() - networkProfileConfig := make([]compute.VirtualMachineScaleSetNetworkConfiguration, 0, len(scaleSetNetworkProfileConfigs)) - - for _, npProfileConfig := range scaleSetNetworkProfileConfigs { - config := npProfileConfig.(map[string]interface{}) - - name := config["name"].(string) - primary := config["primary"].(bool) - - ipConfigurationConfigs := config["ip_configuration"].([]interface{}) - ipConfigurations := make([]compute.VirtualMachineScaleSetIPConfiguration, 0, len(ipConfigurationConfigs)) - for _, ipConfigConfig := range ipConfigurationConfigs { - ipconfig := ipConfigConfig.(map[string]interface{}) - name := ipconfig["name"].(string) - subnetId := ipconfig["subnet_id"].(string) - - ipConfiguration := compute.VirtualMachineScaleSetIPConfiguration{ - Name: &name, - VirtualMachineScaleSetIPConfigurationProperties: &compute.VirtualMachineScaleSetIPConfigurationProperties{ - Subnet: &compute.APIEntityReference{ - ID: &subnetId, - }, - }, - } - - if v := ipconfig["load_balancer_backend_address_pool_ids"]; v != nil { - pools := v.(*schema.Set).List() - resources := make([]compute.SubResource, 0, len(pools)) - for _, p := range pools { - id := p.(string) - resources = append(resources, compute.SubResource{ - ID: &id, - }) - } - ipConfiguration.LoadBalancerBackendAddressPools = &resources - } - - if v := ipconfig["load_balancer_inbound_nat_rules_ids"]; v != nil { - rules := v.(*schema.Set).List() - rulesResources := make([]compute.SubResource, 0, len(rules)) - for _, m := range rules { - id := m.(string) - rulesResources = append(rulesResources, compute.SubResource{ - ID: &id, - }) - } - ipConfiguration.LoadBalancerInboundNatPools = &rulesResources - } - - ipConfigurations = append(ipConfigurations, ipConfiguration) - } - - nProfile := compute.VirtualMachineScaleSetNetworkConfiguration{ - Name: &name, - VirtualMachineScaleSetNetworkConfigurationProperties: &compute.VirtualMachineScaleSetNetworkConfigurationProperties{ - Primary: &primary, - IPConfigurations: &ipConfigurations, - }, - } - - networkProfileConfig = append(networkProfileConfig, nProfile) - } - - return &compute.VirtualMachineScaleSetNetworkProfile{ - NetworkInterfaceConfigurations: &networkProfileConfig, - } -} - -func expandAzureRMVirtualMachineScaleSetsOsProfile(d *schema.ResourceData) (*compute.VirtualMachineScaleSetOSProfile, error) { - osProfileConfigs := d.Get("os_profile").(*schema.Set).List() - - osProfileConfig := osProfileConfigs[0].(map[string]interface{}) - namePrefix := osProfileConfig["computer_name_prefix"].(string) - username := osProfileConfig["admin_username"].(string) - password := osProfileConfig["admin_password"].(string) - customData := osProfileConfig["custom_data"].(string) - - osProfile := &compute.VirtualMachineScaleSetOSProfile{ - ComputerNamePrefix: &namePrefix, - AdminUsername: &username, - } - - if password != "" { - osProfile.AdminPassword = &password - } - - if customData != "" { - customData = base64Encode(customData) - osProfile.CustomData = &customData - } - - if _, ok := d.GetOk("os_profile_secrets"); ok { - secrets := expandAzureRmVirtualMachineScaleSetOsProfileSecrets(d) - if secrets != nil { - osProfile.Secrets = secrets - } - } - - if _, ok := d.GetOk("os_profile_linux_config"); ok { - linuxConfig, err := expandAzureRmVirtualMachineScaleSetOsProfileLinuxConfig(d) - if err != nil { - return nil, err - } - osProfile.LinuxConfiguration = linuxConfig - } - - if _, ok := d.GetOk("os_profile_windows_config"); ok { - winConfig, err := expandAzureRmVirtualMachineScaleSetOsProfileWindowsConfig(d) - if err != nil { - return nil, err - } - if winConfig != nil { - osProfile.WindowsConfiguration = winConfig - } - } - - return osProfile, nil -} - -func expandAzureRMVirtualMachineScaleSetsStorageProfileOsDisk(d *schema.ResourceData) (*compute.VirtualMachineScaleSetOSDisk, error) { - osDiskConfigs := d.Get("storage_profile_os_disk").(*schema.Set).List() - - osDiskConfig := osDiskConfigs[0].(map[string]interface{}) - name := osDiskConfig["name"].(string) - image := osDiskConfig["image"].(string) - vhd_containers := osDiskConfig["vhd_containers"].(*schema.Set).List() - caching := osDiskConfig["caching"].(string) - osType := osDiskConfig["os_type"].(string) - createOption := osDiskConfig["create_option"].(string) - managedDiskType := osDiskConfig["managed_disk_type"].(string) - - osDisk := &compute.VirtualMachineScaleSetOSDisk{ - Name: &name, - Caching: compute.CachingTypes(caching), - OsType: compute.OperatingSystemTypes(osType), - CreateOption: compute.DiskCreateOptionTypes(createOption), - } - - if image != "" { - osDisk.Image = &compute.VirtualHardDisk{ - URI: &image, - } - } - - if len(vhd_containers) > 0 { - var vhdContainers []string - for _, v := range vhd_containers { - str := v.(string) - vhdContainers = append(vhdContainers, str) - } - osDisk.VhdContainers = &vhdContainers - } - - managedDisk := &compute.VirtualMachineScaleSetManagedDiskParameters{} - - if managedDiskType != "" { - if name == "" { - osDisk.Name = nil - managedDisk.StorageAccountType = compute.StorageAccountTypes(managedDiskType) - osDisk.ManagedDisk = managedDisk - } else { - return nil, fmt.Errorf("[ERROR] Conflict between `name` and `managed_disk_type` on `storage_profile_os_disk` (please set name to blank)") - } - } - - //BEGIN: code to be removed after GH-13016 is merged - if image != "" && managedDiskType != "" { - return nil, fmt.Errorf("[ERROR] Conflict between `image` and `managed_disk_type` on `storage_profile_os_disk` (only one or the other can be used)") - } - - if len(vhd_containers) > 0 && managedDiskType != "" { - return nil, fmt.Errorf("[ERROR] Conflict between `vhd_containers` and `managed_disk_type` on `storage_profile_os_disk` (only one or the other can be used)") - } - //END: code to be removed after GH-13016 is merged - - return osDisk, nil -} - -func expandAzureRMVirtualMachineScaleSetsStorageProfileDataDisk(d *schema.ResourceData) ([]compute.VirtualMachineScaleSetDataDisk, error) { - disks := d.Get("storage_profile_data_disk").([]interface{}) - dataDisks := make([]compute.VirtualMachineScaleSetDataDisk, 0, len(disks)) - for _, diskConfig := range disks { - config := diskConfig.(map[string]interface{}) - - createOption := config["create_option"].(string) - managedDiskType := config["managed_disk_type"].(string) - lun := int32(config["lun"].(int)) - - dataDisk := compute.VirtualMachineScaleSetDataDisk{ - Lun: &lun, - CreateOption: compute.DiskCreateOptionTypes(createOption), - } - - managedDiskVMSS := &compute.VirtualMachineScaleSetManagedDiskParameters{} - - if managedDiskType != "" { - managedDiskVMSS.StorageAccountType = compute.StorageAccountTypes(managedDiskType) - } else { - managedDiskVMSS.StorageAccountType = compute.StorageAccountTypes(compute.StandardLRS) - } - - //assume that data disks in VMSS can only be Managed Disks - dataDisk.ManagedDisk = managedDiskVMSS - if v := config["caching"].(string); v != "" { - dataDisk.Caching = compute.CachingTypes(v) - } - - if v := config["disk_size_gb"]; v != nil { - diskSize := int32(config["disk_size_gb"].(int)) - dataDisk.DiskSizeGB = &diskSize - } - - dataDisks = append(dataDisks, dataDisk) - } - - return dataDisks, nil -} - -func expandAzureRmVirtualMachineScaleSetStorageProfileImageReference(d *schema.ResourceData) (*compute.ImageReference, error) { - storageImageRefs := d.Get("storage_profile_image_reference").(*schema.Set).List() - - storageImageRef := storageImageRefs[0].(map[string]interface{}) - - publisher := storageImageRef["publisher"].(string) - offer := storageImageRef["offer"].(string) - sku := storageImageRef["sku"].(string) - version := storageImageRef["version"].(string) - - return &compute.ImageReference{ - Publisher: &publisher, - Offer: &offer, - Sku: &sku, - Version: &version, - }, nil -} - -func expandAzureRmVirtualMachineScaleSetOsProfileLinuxConfig(d *schema.ResourceData) (*compute.LinuxConfiguration, error) { - osProfilesLinuxConfig := d.Get("os_profile_linux_config").(*schema.Set).List() - - linuxConfig := osProfilesLinuxConfig[0].(map[string]interface{}) - disablePasswordAuth := linuxConfig["disable_password_authentication"].(bool) - - linuxKeys := linuxConfig["ssh_keys"].([]interface{}) - sshPublicKeys := make([]compute.SSHPublicKey, 0, len(linuxKeys)) - for _, key := range linuxKeys { - if key == nil { - continue - } - sshKey := key.(map[string]interface{}) - path := sshKey["path"].(string) - keyData := sshKey["key_data"].(string) - - sshPublicKey := compute.SSHPublicKey{ - Path: &path, - KeyData: &keyData, - } - - sshPublicKeys = append(sshPublicKeys, sshPublicKey) - } - - config := &compute.LinuxConfiguration{ - DisablePasswordAuthentication: &disablePasswordAuth, - SSH: &compute.SSHConfiguration{ - PublicKeys: &sshPublicKeys, - }, - } - - return config, nil -} - -func expandAzureRmVirtualMachineScaleSetOsProfileWindowsConfig(d *schema.ResourceData) (*compute.WindowsConfiguration, error) { - osProfilesWindowsConfig := d.Get("os_profile_windows_config").(*schema.Set).List() - - osProfileConfig := osProfilesWindowsConfig[0].(map[string]interface{}) - config := &compute.WindowsConfiguration{} - - if v := osProfileConfig["provision_vm_agent"]; v != nil { - provision := v.(bool) - config.ProvisionVMAgent = &provision - } - - if v := osProfileConfig["enable_automatic_upgrades"]; v != nil { - update := v.(bool) - config.EnableAutomaticUpdates = &update - } - - if v := osProfileConfig["winrm"]; v != nil { - winRm := v.(*schema.Set).List() - if len(winRm) > 0 { - winRmListeners := make([]compute.WinRMListener, 0, len(winRm)) - for _, winRmConfig := range winRm { - config := winRmConfig.(map[string]interface{}) - - protocol := config["protocol"].(string) - winRmListener := compute.WinRMListener{ - Protocol: compute.ProtocolTypes(protocol), - } - if v := config["certificate_url"].(string); v != "" { - winRmListener.CertificateURL = &v - } - - winRmListeners = append(winRmListeners, winRmListener) - } - config.WinRM = &compute.WinRMConfiguration{ - Listeners: &winRmListeners, - } - } - } - if v := osProfileConfig["additional_unattend_config"]; v != nil { - additionalConfig := v.(*schema.Set).List() - if len(additionalConfig) > 0 { - additionalConfigContent := make([]compute.AdditionalUnattendContent, 0, len(additionalConfig)) - for _, addConfig := range additionalConfig { - config := addConfig.(map[string]interface{}) - pass := config["pass"].(string) - component := config["component"].(string) - settingName := config["setting_name"].(string) - content := config["content"].(string) - - addContent := compute.AdditionalUnattendContent{ - PassName: compute.PassNames(pass), - ComponentName: compute.ComponentNames(component), - SettingName: compute.SettingNames(settingName), - Content: &content, - } - - additionalConfigContent = append(additionalConfigContent, addContent) - } - config.AdditionalUnattendContent = &additionalConfigContent - } - } - return config, nil -} - -func expandAzureRmVirtualMachineScaleSetOsProfileSecrets(d *schema.ResourceData) *[]compute.VaultSecretGroup { - secretsConfig := d.Get("os_profile_secrets").(*schema.Set).List() - secrets := make([]compute.VaultSecretGroup, 0, len(secretsConfig)) - - for _, secretConfig := range secretsConfig { - config := secretConfig.(map[string]interface{}) - sourceVaultId := config["source_vault_id"].(string) - - vaultSecretGroup := compute.VaultSecretGroup{ - SourceVault: &compute.SubResource{ - ID: &sourceVaultId, - }, - } - - if v := config["vault_certificates"]; v != nil { - certsConfig := v.([]interface{}) - certs := make([]compute.VaultCertificate, 0, len(certsConfig)) - for _, certConfig := range certsConfig { - config := certConfig.(map[string]interface{}) - - certUrl := config["certificate_url"].(string) - cert := compute.VaultCertificate{ - CertificateURL: &certUrl, - } - if v := config["certificate_store"].(string); v != "" { - cert.CertificateStore = &v - } - - certs = append(certs, cert) - } - vaultSecretGroup.VaultCertificates = &certs - } - - secrets = append(secrets, vaultSecretGroup) - } - - return &secrets -} - -func expandAzureRMVirtualMachineScaleSetExtensions(d *schema.ResourceData) (*compute.VirtualMachineScaleSetExtensionProfile, error) { - extensions := d.Get("extension").(*schema.Set).List() - resources := make([]compute.VirtualMachineScaleSetExtension, 0, len(extensions)) - for _, e := range extensions { - config := e.(map[string]interface{}) - name := config["name"].(string) - publisher := config["publisher"].(string) - t := config["type"].(string) - version := config["type_handler_version"].(string) - - extension := compute.VirtualMachineScaleSetExtension{ - Name: &name, - VirtualMachineScaleSetExtensionProperties: &compute.VirtualMachineScaleSetExtensionProperties{ - Publisher: &publisher, - Type: &t, - TypeHandlerVersion: &version, - }, - } - - if u := config["auto_upgrade_minor_version"]; u != nil { - upgrade := u.(bool) - extension.VirtualMachineScaleSetExtensionProperties.AutoUpgradeMinorVersion = &upgrade - } - - if s := config["settings"].(string); s != "" { - settings, err := structure.ExpandJsonFromString(s) - if err != nil { - return nil, fmt.Errorf("unable to parse settings: %s", err) - } - extension.VirtualMachineScaleSetExtensionProperties.Settings = &settings - } - - if s := config["protected_settings"].(string); s != "" { - protectedSettings, err := structure.ExpandJsonFromString(s) - if err != nil { - return nil, fmt.Errorf("unable to parse protected_settings: %s", err) - } - extension.VirtualMachineScaleSetExtensionProperties.ProtectedSettings = &protectedSettings - } - - resources = append(resources, extension) - } - - return &compute.VirtualMachineScaleSetExtensionProfile{ - Extensions: &resources, - }, nil -} diff --git a/builtin/providers/azurerm/resource_arm_virtual_machine_scale_set_test.go b/builtin/providers/azurerm/resource_arm_virtual_machine_scale_set_test.go deleted file mode 100644 index 4687c3924..000000000 --- a/builtin/providers/azurerm/resource_arm_virtual_machine_scale_set_test.go +++ /dev/null @@ -1,1529 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "regexp" - "testing" - - "github.com/Azure/azure-sdk-for-go/arm/compute" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMVirtualMachineScaleSet_basic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSet_basic, ri, ri, ri, ri, ri, ri, ri, ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineScaleSetExists("azurerm_virtual_machine_scale_set.test"), - - // single placement group should default to true - testCheckAzureRMVirtualMachineScaleSetSinglePlacementGroup("azurerm_virtual_machine_scale_set.test", true), - ), - }, - }, - }) -} - -func TestAccAzureRMVirtualMachineScaleSet_singlePlacementGroupFalse(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSet_singlePlacementGroupFalse, ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineScaleSetExists("azurerm_virtual_machine_scale_set.test"), - testCheckAzureRMVirtualMachineScaleSetSinglePlacementGroup("azurerm_virtual_machine_scale_set.test", false), - ), - }, - }, - }) -} - -func TestAccAzureRMVirtualMachineScaleSet_linuxUpdated(t *testing.T) { - resourceName := "azurerm_virtual_machine_scale_set.test" - ri := acctest.RandInt() - config := testAccAzureRMVirtualMachineScaleSet_linux(ri) - updatedConfig := testAccAzureRMVirtualMachineScaleSet_linuxUpdated(ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineScaleSetExists(resourceName), - ), - }, - { - Config: updatedConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineScaleSetExists(resourceName), - ), - }, - }, - }) -} - -func TestAccAzureRMVirtualMachineScaleSet_basicLinux_managedDisk(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSet_basicLinux_managedDisk, ri, ri, ri, ri, ri, ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineScaleSetExists("azurerm_virtual_machine_scale_set.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMVirtualMachineScaleSet_basicLinux_disappears(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSet_basic, ri, ri, ri, ri, ri, ri, ri, ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineScaleSetExists("azurerm_virtual_machine_scale_set.test"), - testCheckAzureRMVirtualMachineScaleSetDisappears("azurerm_virtual_machine_scale_set.test"), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAzureRMVirtualMachineScaleSet_loadBalancer(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSetLoadbalancerTemplate, ri, ri, ri, ri, ri, ri, ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineScaleSetExists("azurerm_virtual_machine_scale_set.test"), - testCheckAzureRMVirtualMachineScaleSetHasLoadbalancer("azurerm_virtual_machine_scale_set.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMVirtualMachineScaleSet_loadBalancerManagedDataDisks(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSetLoadbalancerTemplateManagedDataDisks, ri, ri, ri, ri, ri, ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineScaleSetExists("azurerm_virtual_machine_scale_set.test"), - testCheckAzureRMVirtualMachineScaleSetHasDataDisks("azurerm_virtual_machine_scale_set.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMVirtualMachineScaleSet_overprovision(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSetOverprovisionTemplate, ri, ri, ri, ri, ri, ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineScaleSetExists("azurerm_virtual_machine_scale_set.test"), - testCheckAzureRMVirtualMachineScaleSetOverprovision("azurerm_virtual_machine_scale_set.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMVirtualMachineScaleSet_extension(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSetExtensionTemplate, ri, ri, ri, ri, ri, ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineScaleSetExists("azurerm_virtual_machine_scale_set.test"), - testCheckAzureRMVirtualMachineScaleSetExtension("azurerm_virtual_machine_scale_set.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMVirtualMachineScaleSet_multipleExtensions(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSetMultipleExtensionsTemplate, ri, ri, ri, ri, ri, ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineScaleSetExists("azurerm_virtual_machine_scale_set.test"), - testCheckAzureRMVirtualMachineScaleSetExtension("azurerm_virtual_machine_scale_set.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMVirtualMachineScaleSet_osDiskTypeConflict(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSet_osDiskTypeConflict, ri, ri, ri, ri, ri, ri, ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, - Steps: []resource.TestStep{ - { - Config: config, - ExpectError: regexp.MustCompile("Conflict between `vhd_containers`"), - //Use below code instead once GH-13019 has been merged - //ExpectError: regexp.MustCompile("conflicts with storage_profile_os_disk.0.vhd_containers"), - }, - }, - }) -} - -func TestAccAzureRMVirtualMachineScaleSet_NonStandardCasing(t *testing.T) { - ri := acctest.RandInt() - config := testAccAzureRMVirtualMachineScaleSetNonStandardCasing(ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy, - Steps: []resource.TestStep{ - - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineScaleSetExists("azurerm_virtual_machine_scale_set.test"), - ), - }, - - resource.TestStep{ - Config: config, - PlanOnly: true, - ExpectNonEmptyPlan: false, - }, - }, - }) -} - -func testGetAzureRMVirtualMachineScaleSet(s *terraform.State, resourceName string) (result *compute.VirtualMachineScaleSet, err error) { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return nil, fmt.Errorf("Not found: %s", resourceName) - } - - // Name of the actual scale set - name := rs.Primary.Attributes["name"] - - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return nil, fmt.Errorf("Bad: no resource group found in state for virtual machine: scale set %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).vmScaleSetClient - - vmss, err := conn.Get(resourceGroup, name) - if err != nil { - return nil, fmt.Errorf("Bad: Get on vmScaleSetClient: %s", err) - } - - if vmss.StatusCode == http.StatusNotFound { - return nil, fmt.Errorf("Bad: VirtualMachineScaleSet %q (resource group: %q) does not exist", name, resourceGroup) - } - - return &vmss, err -} - -func testCheckAzureRMVirtualMachineScaleSetExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, err := testGetAzureRMVirtualMachineScaleSet(s, name) - return err - } -} - -func testCheckAzureRMVirtualMachineScaleSetDisappears(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for virtual machine: scale set %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).vmScaleSetClient - - _, error := conn.Delete(resourceGroup, name, make(chan struct{})) - err := <-error - if err != nil { - return fmt.Errorf("Bad: Delete on vmScaleSetClient: %s", err) - } - - return nil - } -} - -func testCheckAzureRMVirtualMachineScaleSetDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).vmScaleSetClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_virtual_machine_scale_set" { - continue - } - - name := rs.Primary.Attributes["name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.Get(resourceGroup, name) - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Virtual Machine Scale Set still exists:\n%#v", resp.VirtualMachineScaleSetProperties) - } - } - - return nil -} - -func testCheckAzureRMVirtualMachineScaleSetHasLoadbalancer(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - resp, err := testGetAzureRMVirtualMachineScaleSet(s, name) - if err != nil { - return err - } - - n := resp.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations - if n == nil || len(*n) == 0 { - return fmt.Errorf("Bad: Could not get network interface configurations for scale set %v", name) - } - - ip := (*n)[0].IPConfigurations - if ip == nil || len(*ip) == 0 { - return fmt.Errorf("Bad: Could not get ip configurations for scale set %v", name) - } - - pools := (*ip)[0].LoadBalancerBackendAddressPools - if pools == nil || len(*pools) == 0 { - return fmt.Errorf("Bad: Load balancer backend pools is empty for scale set %v", name) - } - - return nil - } -} - -func testCheckAzureRMVirtualMachineScaleSetOverprovision(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - resp, err := testGetAzureRMVirtualMachineScaleSet(s, name) - if err != nil { - return err - } - - if *resp.Overprovision { - return fmt.Errorf("Bad: Overprovision should have been false for scale set %v", name) - } - - return nil - } -} - -func testCheckAzureRMVirtualMachineScaleSetSinglePlacementGroup(name string, expectedSinglePlacementGroup bool) resource.TestCheckFunc { - return func(s *terraform.State) error { - resp, err := testGetAzureRMVirtualMachineScaleSet(s, name) - if err != nil { - return err - } - - if *resp.SinglePlacementGroup != expectedSinglePlacementGroup { - return fmt.Errorf("Bad: Overprovision should have been %t for scale set %v", expectedSinglePlacementGroup, name) - } - - return nil - } -} - -func testCheckAzureRMVirtualMachineScaleSetExtension(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - resp, err := testGetAzureRMVirtualMachineScaleSet(s, name) - if err != nil { - return err - } - - n := resp.VirtualMachineProfile.ExtensionProfile.Extensions - if n == nil || len(*n) == 0 { - return fmt.Errorf("Bad: Could not get extensions for scale set %v", name) - } - - return nil - } -} - -func testCheckAzureRMVirtualMachineScaleSetHasDataDisks(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for virtual machine: scale set %s", name) - } - - conn := testAccProvider.Meta().(*ArmClient).vmScaleSetClient - resp, err := conn.Get(resourceGroup, name) - if err != nil { - return fmt.Errorf("Bad: Get on vmScaleSetClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: VirtualMachineScaleSet %q (resource group: %q) does not exist", name, resourceGroup) - } - - storageProfile := resp.VirtualMachineProfile.StorageProfile.DataDisks - if storageProfile == nil || len(*storageProfile) == 0 { - return fmt.Errorf("Bad: Could not get data disks configurations for scale set %v", name) - } - - return nil - } -} - -var testAccAzureRMVirtualMachineScaleSet_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US 2" -} - -resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_network_interface" "test" { - name = "acctni-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } -} - -resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US 2" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } -} - -resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} - -resource "azurerm_virtual_machine_scale_set" "test" { - name = "acctvmss-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - upgrade_policy_mode = "Manual" - - sku { - name = "Standard_D1_v2" - tier = "Standard" - capacity = 2 - } - - os_profile { - computer_name_prefix = "testvm-%d" - admin_username = "myadmin" - admin_password = "Passwword1234" - } - - network_profile { - name = "TestNetworkProfile-%d" - primary = true - ip_configuration { - name = "TestIPConfiguration" - subnet_id = "${azurerm_subnet.test.id}" - } - } - - storage_profile_os_disk { - name = "osDiskProfile" - caching = "ReadWrite" - create_option = "FromImage" - vhd_containers = ["${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}"] - } - - storage_profile_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "16.04-LTS" - version = "latest" - } -} -` - -var testAccAzureRMVirtualMachineScaleSet_singlePlacementGroupFalse = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%[1]d" - location = "West US 2" -} - -resource "azurerm_virtual_network" "test" { - name = "acctvn-%[1]d" - address_space = ["10.0.0.0/16"] - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "acctsub-%[1]d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_network_interface" "test" { - name = "acctni-%[1]d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } -} - -resource "azurerm_storage_account" "test" { - name = "accsa%[1]d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US 2" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } -} - -resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} - -resource "azurerm_virtual_machine_scale_set" "test" { - name = "acctvmss-%[1]d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - upgrade_policy_mode = "Manual" - single_placement_group = false - - sku { - name = "Standard_D1_v2" - tier = "Standard" - capacity = 2 - } - - os_profile { - computer_name_prefix = "testvm-%[1]d" - admin_username = "myadmin" - admin_password = "Passwword1234" - } - - network_profile { - name = "TestNetworkProfile-%[1]d" - primary = true - ip_configuration { - name = "TestIPConfiguration" - subnet_id = "${azurerm_subnet.test.id}" - } - } - - storage_profile_os_disk { - name = "" - caching = "ReadWrite" - create_option = "FromImage" - managed_disk_type = "Standard_LRS" - } - - storage_profile_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "16.04-LTS" - version = "latest" - } -} -` - -func testAccAzureRMVirtualMachineScaleSet_linux(rInt int) string { - return fmt.Sprintf(` - resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West Europe" -} -resource "azurerm_virtual_network" "test" { - name = "acctestvn-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - address_space = ["10.0.0.0/8"] -} -resource "azurerm_subnet" "test" { - name = "acctestsn-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.1.0/24" -} -resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - account_type = "Standard_LRS" -} -resource "azurerm_storage_container" "test" { - name = "acctestsc-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} -resource "azurerm_public_ip" "test" { - name = "acctestpip-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - public_ip_address_allocation = "static" -} -resource "azurerm_lb" "test" { - name = "acctestlb-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - frontend_ip_configuration { - name = "ip-address" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } -} -resource "azurerm_lb_backend_address_pool" "test" { - name = "acctestbap-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" -} -resource "azurerm_virtual_machine_scale_set" "test" { - name = "acctestvmss-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - upgrade_policy_mode = "Automatic" - sku { - name = "Standard_A0" - tier = "Standard" - capacity = "1" - } - os_profile { - computer_name_prefix = "prefix" - admin_username = "ubuntu" - admin_password = "password" - custom_data = "custom data!" - } - os_profile_linux_config { - disable_password_authentication = true - ssh_keys { - path = "/home/ubuntu/.ssh/authorized_keys" - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDCsTcryUl51Q2VSEHqDRNmceUFo55ZtcIwxl2QITbN1RREti5ml/VTytC0yeBOvnZA4x4CFpdw/lCDPk0yrH9Ei5vVkXmOrExdTlT3qI7YaAzj1tUVlBd4S6LX1F7y6VLActvdHuDDuXZXzCDd/97420jrDfWZqJMlUK/EmCE5ParCeHIRIvmBxcEnGfFIsw8xQZl0HphxWOtJil8qsUWSdMyCiJYYQpMoMliO99X40AUc4/AlsyPyT5ddbKk08YrZ+rKDVHF7o29rh4vi5MmHkVgVQHKiKybWlHq+b71gIAUQk9wrJxD+dqt4igrmDSpIjfjwnd+l5UIn5fJSO5DYV4YT/4hwK7OKmuo7OFHD0WyY5YnkYEMtFgzemnRBdE8ulcT60DQpVgRMXFWHvhyCWy0L6sgj1QWDZlLpvsIvNfHsyhKFMG1frLnMt/nP0+YCcfg+v1JYeCKjeoJxB8DWcRBsjzItY0CGmzP8UYZiYKl/2u+2TgFS5r7NWH11bxoUzjKdaa1NLw+ieA8GlBFfCbfWe6YVB9ggUte4VtYFMZGxOjS2bAiYtfgTKFJv+XqORAwExG6+G2eDxIDyo80/OA9IG7Xv/jwQr7D6KDjDuULFcN/iTxuttoKrHeYz1hf5ZQlBdllwJHYx6fK2g8kha6r2JIQKocvsAXiiONqSfw== hello@world.com" - } - } - network_profile { - name = "TestNetworkProfile" - primary = true - ip_configuration { - name = "TestIPConfiguration" - subnet_id = "${azurerm_subnet.test.id}" - load_balancer_backend_address_pool_ids = ["${azurerm_lb_backend_address_pool.test.id}"] - } - } - storage_profile_os_disk { - name = "osDiskProfile" - caching = "ReadWrite" - create_option = "FromImage" - os_type = "linux" - vhd_containers = ["${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}"] - } - storage_profile_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "14.04.2-LTS" - version = "latest" - } -} -`, rInt, rInt, rInt, rInt, rInt, rInt, rInt, rInt, rInt) -} - -func testAccAzureRMVirtualMachineScaleSet_linuxUpdated(rInt int) string { - return fmt.Sprintf(` - resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "West Europe" -} -resource "azurerm_virtual_network" "test" { - name = "acctestvn-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - address_space = ["10.0.0.0/8"] -} -resource "azurerm_subnet" "test" { - name = "acctestsn-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.1.0/24" -} -resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - account_type = "Standard_LRS" -} -resource "azurerm_storage_container" "test" { - name = "acctestsc-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} -resource "azurerm_public_ip" "test" { - name = "acctestpip-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - public_ip_address_allocation = "static" -} -resource "azurerm_lb" "test" { - name = "acctestlb-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - frontend_ip_configuration { - name = "ip-address" - public_ip_address_id = "${azurerm_public_ip.test.id}" - } -} -resource "azurerm_lb_backend_address_pool" "test" { - name = "acctestbap-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - loadbalancer_id = "${azurerm_lb.test.id}" -} -resource "azurerm_virtual_machine_scale_set" "test" { - name = "acctestvmss-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "${azurerm_resource_group.test.location}" - upgrade_policy_mode = "Automatic" - sku { - name = "Standard_A0" - tier = "Standard" - capacity = "1" - } - os_profile { - computer_name_prefix = "prefix" - admin_username = "ubuntu" - admin_password = "password" - custom_data = "custom data!" - } - os_profile_linux_config { - disable_password_authentication = true - ssh_keys { - path = "/home/ubuntu/.ssh/authorized_keys" - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDCsTcryUl51Q2VSEHqDRNmceUFo55ZtcIwxl2QITbN1RREti5ml/VTytC0yeBOvnZA4x4CFpdw/lCDPk0yrH9Ei5vVkXmOrExdTlT3qI7YaAzj1tUVlBd4S6LX1F7y6VLActvdHuDDuXZXzCDd/97420jrDfWZqJMlUK/EmCE5ParCeHIRIvmBxcEnGfFIsw8xQZl0HphxWOtJil8qsUWSdMyCiJYYQpMoMliO99X40AUc4/AlsyPyT5ddbKk08YrZ+rKDVHF7o29rh4vi5MmHkVgVQHKiKybWlHq+b71gIAUQk9wrJxD+dqt4igrmDSpIjfjwnd+l5UIn5fJSO5DYV4YT/4hwK7OKmuo7OFHD0WyY5YnkYEMtFgzemnRBdE8ulcT60DQpVgRMXFWHvhyCWy0L6sgj1QWDZlLpvsIvNfHsyhKFMG1frLnMt/nP0+YCcfg+v1JYeCKjeoJxB8DWcRBsjzItY0CGmzP8UYZiYKl/2u+2TgFS5r7NWH11bxoUzjKdaa1NLw+ieA8GlBFfCbfWe6YVB9ggUte4VtYFMZGxOjS2bAiYtfgTKFJv+XqORAwExG6+G2eDxIDyo80/OA9IG7Xv/jwQr7D6KDjDuULFcN/iTxuttoKrHeYz1hf5ZQlBdllwJHYx6fK2g8kha6r2JIQKocvsAXiiONqSfw== hello@world.com" - } - } - network_profile { - name = "TestNetworkProfile" - primary = true - ip_configuration { - name = "TestIPConfiguration" - subnet_id = "${azurerm_subnet.test.id}" - load_balancer_backend_address_pool_ids = ["${azurerm_lb_backend_address_pool.test.id}"] - } - } - storage_profile_os_disk { - name = "osDiskProfile" - caching = "ReadWrite" - create_option = "FromImage" - os_type = "linux" - vhd_containers = ["${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}"] - } - storage_profile_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "14.04.2-LTS" - version = "latest" - } - tags { - ThisIs = "a test" - } -} -`, rInt, rInt, rInt, rInt, rInt, rInt, rInt, rInt, rInt) -} - -var testAccAzureRMVirtualMachineScaleSet_basicLinux_managedDisk = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US 2" -} - -resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_virtual_machine_scale_set" "test" { - name = "acctvmss-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - upgrade_policy_mode = "Manual" - - sku { - name = "Standard_D1_v2" - tier = "Standard" - capacity = 2 - } - - os_profile { - computer_name_prefix = "testvm-%d" - admin_username = "myadmin" - admin_password = "Passwword1234" - } - - network_profile { - name = "TestNetworkProfile-%d" - primary = true - ip_configuration { - name = "TestIPConfiguration" - subnet_id = "${azurerm_subnet.test.id}" - } - } - - storage_profile_os_disk { - name = "" - caching = "ReadWrite" - create_option = "FromImage" - managed_disk_type = "Standard_LRS" - } - - storage_profile_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "16.04-LTS" - version = "latest" - } -} -` - -var testAccAzureRMVirtualMachineScaleSetLoadbalancerTemplate = ` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "southcentralus" -} - -resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "southcentralus" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "southcentralus" - account_type = "Standard_LRS" -} - -resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} - -resource "azurerm_lb" "test" { - name = "acctestlb-%d" - location = "southcentralus" - resource_group_name = "${azurerm_resource_group.test.name}" - - frontend_ip_configuration { - name = "default" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "Dynamic" - } -} - -resource "azurerm_lb_backend_address_pool" "test" { - name = "test" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "southcentralus" - loadbalancer_id = "${azurerm_lb.test.id}" -} - -resource "azurerm_lb_nat_pool" "test" { - resource_group_name = "${azurerm_resource_group.test.name}" - name = "ssh" - loadbalancer_id = "${azurerm_lb.test.id}" - protocol = "Tcp" - frontend_port_start = 50000 - frontend_port_end = 50119 - backend_port = 22 - frontend_ip_configuration_name = "default" -} - -resource "azurerm_virtual_machine_scale_set" "test" { - name = "acctvmss-%d" - location = "southcentralus" - resource_group_name = "${azurerm_resource_group.test.name}" - upgrade_policy_mode = "Manual" - - sku { - name = "Standard_D1_v2" - tier = "Standard" - capacity = 1 - } - - os_profile { - computer_name_prefix = "testvm-%d" - admin_username = "myadmin" - admin_password = "Passwword1234" - } - - network_profile { - name = "TestNetworkProfile" - primary = true - ip_configuration { - name = "TestIPConfiguration" - subnet_id = "${azurerm_subnet.test.id}" - load_balancer_backend_address_pool_ids = [ "${azurerm_lb_backend_address_pool.test.id}" ] - load_balancer_inbound_nat_rules_ids = ["${azurerm_lb_nat_pool.test.id}"] - } - } - - storage_profile_os_disk { - name = "os-disk" - caching = "ReadWrite" - create_option = "FromImage" - vhd_containers = [ "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}" ] - } - - storage_profile_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "16.04-LTS" - version = "latest" - } -} -` - -var testAccAzureRMVirtualMachineScaleSetOverprovisionTemplate = ` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "southcentralus" -} - -resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "southcentralus" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "southcentralus" - account_type = "Standard_LRS" -} - -resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} - -resource "azurerm_virtual_machine_scale_set" "test" { - name = "acctvmss-%d" - location = "southcentralus" - resource_group_name = "${azurerm_resource_group.test.name}" - upgrade_policy_mode = "Manual" - overprovision = false - - sku { - name = "Standard_D1_v2" - tier = "Standard" - capacity = 1 - } - - os_profile { - computer_name_prefix = "testvm-%d" - admin_username = "myadmin" - admin_password = "Passwword1234" - } - - network_profile { - name = "TestNetworkProfile" - primary = true - ip_configuration { - name = "TestIPConfiguration" - subnet_id = "${azurerm_subnet.test.id}" - } - } - - storage_profile_os_disk { - name = "os-disk" - caching = "ReadWrite" - create_option = "FromImage" - vhd_containers = [ "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}" ] - } - - storage_profile_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "16.04-LTS" - version = "latest" - } -} -` - -var testAccAzureRMVirtualMachineScaleSetExtensionTemplate = ` -resource "azurerm_resource_group" "test" { - name = "acctestrg-%d" - location = "southcentralus" -} - -resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "southcentralus" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "southcentralus" - account_type = "Standard_LRS" -} - -resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} - -resource "azurerm_virtual_machine_scale_set" "test" { - name = "acctvmss-%d" - location = "southcentralus" - resource_group_name = "${azurerm_resource_group.test.name}" - upgrade_policy_mode = "Manual" - overprovision = false - - sku { - name = "Standard_D1_v2" - tier = "Standard" - capacity = 1 - } - - os_profile { - computer_name_prefix = "testvm-%d" - admin_username = "myadmin" - admin_password = "Passwword1234" - } - - network_profile { - name = "TestNetworkProfile" - primary = true - ip_configuration { - name = "TestIPConfiguration" - subnet_id = "${azurerm_subnet.test.id}" - } - } - - storage_profile_os_disk { - name = "os-disk" - caching = "ReadWrite" - create_option = "FromImage" - vhd_containers = [ "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}" ] - } - - storage_profile_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "16.04-LTS" - version = "latest" - } - - extension { - name = "CustomScript" - publisher = "Microsoft.Azure.Extensions" - type = "CustomScript" - type_handler_version = "2.0" - auto_upgrade_minor_version = true - settings = <reboot1" - } - } - -} -` - -var testAccAzureRMVirtualMachine_diagnosticsProfile = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US 2" -} - -resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_network_interface" "test" { - name = "acctni-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } -} - -resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US 2" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } -} - -resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} - -resource "azurerm_virtual_machine" "test" { - name = "acctvm-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - network_interface_ids = ["${azurerm_network_interface.test.id}"] - vm_size = "Standard_D1_v2" - - storage_image_reference { - publisher = "MicrosoftWindowsServer" - offer = "WindowsServer" - sku = "2012-Datacenter" - version = "latest" - } - - storage_os_disk { - name = "myosdisk1" - vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdisk1.vhd" - caching = "ReadWrite" - create_option = "FromImage" - } - - os_profile { - computer_name = "winhost01" - admin_username = "testadmin" - admin_password = "Password1234!" - } - - boot_diagnostics { - enabled = true - storage_uri = "${azurerm_storage_account.test.primary_blob_endpoint}" - } - - os_profile_windows_config { - winrm { - protocol = "http" - } - } -} - -` - -var testAccAzureRMVirtualMachine_winRMConfig = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US 2" -} - -resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_network_interface" "test" { - name = "acctni-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } -} - -resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US 2" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } -} - -resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} - -resource "azurerm_virtual_machine" "test" { - name = "acctvm-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - network_interface_ids = ["${azurerm_network_interface.test.id}"] - vm_size = "Standard_D1_v2" - - storage_image_reference { - publisher = "MicrosoftWindowsServer" - offer = "WindowsServer" - sku = "2012-Datacenter" - version = "latest" - } - - storage_os_disk { - name = "myosdisk1" - vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdisk1.vhd" - caching = "ReadWrite" - create_option = "FromImage" - } - - os_profile { - computer_name = "winhost01" - admin_username = "testadmin" - admin_password = "Password1234!" - } - - os_profile_windows_config { - winrm { - protocol = "http" - } - } -} -` - -var testAccAzureRMVirtualMachine_withAvailabilitySet = ` - resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US 2" - } - - resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - } - - resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" - } - - resource "azurerm_network_interface" "test" { - name = "acctni-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } - } - - resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US 2" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } - } - - resource "azurerm_availability_set" "test" { - name = "availabilityset%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" -} - - resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" - } - - resource "azurerm_virtual_machine" "test" { - name = "acctvm-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - network_interface_ids = ["${azurerm_network_interface.test.id}"] - vm_size = "Standard_D1_v2" - availability_set_id = "${azurerm_availability_set.test.id}" - delete_os_disk_on_termination = true - - storage_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "14.04.2-LTS" - version = "latest" - } - - storage_os_disk { - name = "myosdisk1" - vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdisk1.vhd" - caching = "ReadWrite" - create_option = "FromImage" - } - - os_profile { - computer_name = "hn%d" - admin_username = "testadmin" - admin_password = "Password1234!" - } - - os_profile_linux_config { - disable_password_authentication = false - } - } -` - -var testAccAzureRMVirtualMachine_updateAvailabilitySet = ` - resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US 2" - } - - resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - } - - resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" - } - - resource "azurerm_network_interface" "test" { - name = "acctni-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } - } - - resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US 2" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } - } - - resource "azurerm_availability_set" "test" { - name = "updatedAvailabilitySet%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" -} - - resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" - } - - resource "azurerm_virtual_machine" "test" { - name = "acctvm-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - network_interface_ids = ["${azurerm_network_interface.test.id}"] - vm_size = "Standard_D1_v2" - availability_set_id = "${azurerm_availability_set.test.id}" - delete_os_disk_on_termination = true - - storage_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "14.04.2-LTS" - version = "latest" - } - - storage_os_disk { - name = "myosdisk1" - vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdisk1.vhd" - caching = "ReadWrite" - create_option = "FromImage" - } - - os_profile { - computer_name = "hn%d" - admin_username = "testadmin" - admin_password = "Password1234!" - } - - os_profile_linux_config { - disable_password_authentication = false - } - } -` - -var testAccAzureRMVirtualMachine_updateMachineName = ` - resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US 2" - } - - resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - } - - resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" - } - - resource "azurerm_network_interface" "test" { - name = "acctni-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } - } - - resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US 2" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } - } - - resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" - } - - resource "azurerm_virtual_machine" "test" { - name = "acctvm-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - network_interface_ids = ["${azurerm_network_interface.test.id}"] - vm_size = "Standard_D1_v2" - delete_os_disk_on_termination = true - - storage_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "14.04.2-LTS" - version = "latest" - } - - storage_os_disk { - name = "myosdisk1" - vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdisk1.vhd" - caching = "ReadWrite" - create_option = "FromImage" - } - - os_profile { - computer_name = "newhostname%d" - admin_username = "testadmin" - admin_password = "Password1234!" - } - - os_profile_linux_config { - disable_password_authentication = false - } - } - ` - -var testAccAzureRMVirtualMachine_basicLinuxMachineStorageImageBefore = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US 2" -} - -resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_network_interface" "test" { - name = "acctni-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } -} - -resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US 2" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } -} - -resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} - -resource "azurerm_virtual_machine" "test" { - name = "acctvm-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - network_interface_ids = ["${azurerm_network_interface.test.id}"] - vm_size = "Standard_D1_v2" - delete_os_disk_on_termination = true - - storage_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "14.04.2-LTS" - version = "latest" - } - - storage_os_disk { - name = "myosdisk1" - vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdisk1.vhd" - caching = "ReadWrite" - create_option = "FromImage" - disk_size_gb = "45" - } - - os_profile { - computer_name = "hn%d" - admin_username = "testadmin" - admin_password = "Password1234!" - } - - os_profile_linux_config { - disable_password_authentication = false - } - - tags { - environment = "Production" - cost-center = "Ops" - } -} -` - -var testAccAzureRMVirtualMachine_basicLinuxMachineStorageImageAfter = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US 2" -} - -resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_network_interface" "test" { - name = "acctni-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } -} - -resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US 2" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } -} - -resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} - -resource "azurerm_virtual_machine" "test" { - name = "acctvm-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - network_interface_ids = ["${azurerm_network_interface.test.id}"] - vm_size = "Standard_D1_v2" - delete_os_disk_on_termination = true - - storage_image_reference { - publisher = "CoreOS" - offer = "CoreOS" - sku = "Stable" - version = "latest" - } - - storage_os_disk { - name = "myosdisk1" - vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdisk1.vhd" - caching = "ReadWrite" - create_option = "FromImage" - disk_size_gb = "45" - } - - os_profile { - computer_name = "hn%d" - admin_username = "testadmin" - admin_password = "Password1234!" - } - - os_profile_linux_config { - disable_password_authentication = false - } - - tags { - environment = "Production" - cost-center = "Ops" - } -} -` - -var testAccAzureRMVirtualMachine_basicLinuxMachineWithOSDiskVhdUriChanged = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US 2" -} - -resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_network_interface" "test" { - name = "acctni-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } -} - -resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US 2" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } -} - -resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} - -resource "azurerm_virtual_machine" "test" { - name = "acctvm-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - network_interface_ids = ["${azurerm_network_interface.test.id}"] - vm_size = "Standard_D1_v2" - - storage_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "14.04.2-LTS" - version = "latest" - } - - storage_os_disk { - name = "myosdisk1" - vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdiskchanged2.vhd" - caching = "ReadWrite" - create_option = "FromImage" - disk_size_gb = "45" - } - - os_profile { - computer_name = "hn%d" - admin_username = "testadmin" - admin_password = "Password1234!" - } - - os_profile_linux_config { - disable_password_authentication = false - } - - tags { - environment = "Production" - cost-center = "Ops" - } -} -` - -var testAccAzureRMVirtualMachine_windowsLicenseType = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US 2" -} - -resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_network_interface" "test" { - name = "acctni-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } -} - -resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US 2" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } -} - -resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} - -resource "azurerm_virtual_machine" "test" { - name = "acctvm-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - network_interface_ids = ["${azurerm_network_interface.test.id}"] - vm_size = "Standard_D1_v2" - license_type = "Windows_Server" - - storage_image_reference { - publisher = "MicrosoftWindowsServer" - offer = "WindowsServer-HUB" - sku = "2008-R2-SP1-HUB" - version = "latest" - } - - storage_os_disk { - name = "myosdisk1" - vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdisk1.vhd" - caching = "ReadWrite" - create_option = "FromImage" - } - - os_profile { - computer_name = "winhost01" - admin_username = "testadmin" - admin_password = "Password1234!" - } - - os_profile_windows_config { - enable_automatic_upgrades = false - provision_vm_agent = true - } -} -` - -var testAccAzureRMVirtualMachine_plan = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US 2" -} - -resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_network_interface" "test" { - name = "acctni-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } -} - -resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US 2" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } -} - -resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} - -resource "azurerm_virtual_machine" "test" { - name = "acctvm-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - network_interface_ids = ["${azurerm_network_interface.test.id}"] - vm_size = "Standard_DS1_v2" - - storage_image_reference { - publisher = "kemptech" - offer = "vlm-azure" - sku = "freeloadmaster" - version = "latest" - } - - storage_os_disk { - name = "myosdisk1" - vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdisk1.vhd" - caching = "ReadWrite" - create_option = "FromImage" - disk_size_gb = "45" - } - - os_profile { - computer_name = "hn%d" - admin_username = "testadmin" - admin_password = "Password1234!" - } - - os_profile_linux_config { - disable_password_authentication = false - } - - plan { - name = "freeloadmaster" - publisher = "kemptech" - product = "vlm-azure" - } - - tags { - environment = "Production" - cost-center = "Ops" - } -} -` - -var testAccAzureRMVirtualMachine_linuxMachineWithSSH = ` -resource "azurerm_resource_group" "test" { - name = "acctestrg%s" - location = "southcentralus" -} - -resource "azurerm_virtual_network" "test" { - name = "acctvn%s" - address_space = ["10.0.0.0/16"] - location = "southcentralus" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "acctsub%s" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_network_interface" "test" { - name = "acctni%s" - location = "southcentralus" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } -} - -resource "azurerm_storage_account" "test" { - name = "accsa%s" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "southcentralus" - account_type = "Standard_LRS" -} - -resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} - -resource "azurerm_virtual_machine" "test" { - name = "acctvm%s" - location = "southcentralus" - resource_group_name = "${azurerm_resource_group.test.name}" - network_interface_ids = ["${azurerm_network_interface.test.id}"] - vm_size = "Standard_D1_v2" - - storage_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "14.04.2-LTS" - version = "latest" - } - - storage_os_disk { - name = "myosdisk1" - vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdisk1.vhd" - caching = "ReadWrite" - create_option = "FromImage" - disk_size_gb = "45" - } - - os_profile { - computer_name = "hostname%s" - admin_username = "testadmin" - admin_password = "Password1234!" - } - - os_profile_linux_config { - disable_password_authentication = true - ssh_keys { - path = "/home/testadmin/.ssh/authorized_keys" - key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCfGyt5W1eJVpDIxlyvAWO594j/azEGohmlxYe7mgSfmUCWjuzILI6nHuHbxhpBDIZJhQ+JAeduXpii61dmThbI89ghGMhzea0OlT3p12e093zqa4goB9g40jdNKmJArER3pMVqs6hmv8y3GlUNkMDSmuoyI8AYzX4n26cUKZbwXQ== mk@mk3" - } - } -} -` - -var testAccAzureRMVirtualMachine_linuxMachineWithSSHRemoved = ` -resource "azurerm_resource_group" "test" { - name = "acctestrg%s" - location = "southcentralus" -} - -resource "azurerm_virtual_network" "test" { - name = "acctvn%s" - address_space = ["10.0.0.0/16"] - location = "southcentralus" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "acctsub%s" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_network_interface" "test" { - name = "acctni%s" - location = "southcentralus" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } -} - -resource "azurerm_storage_account" "test" { - name = "accsa%s" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "southcentralus" - account_type = "Standard_LRS" -} - -resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} - -resource "azurerm_virtual_machine" "test" { - name = "acctvm%s" - location = "southcentralus" - resource_group_name = "${azurerm_resource_group.test.name}" - network_interface_ids = ["${azurerm_network_interface.test.id}"] - vm_size = "Standard_D1_v2" - - storage_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "14.04.2-LTS" - version = "latest" - } - - storage_os_disk { - name = "myosdisk1" - vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdisk1.vhd" - caching = "ReadWrite" - create_option = "FromImage" - disk_size_gb = "45" - } - - os_profile { - computer_name = "hostname%s" - admin_username = "testadmin" - admin_password = "Password1234!" - } - - os_profile_linux_config { - disable_password_authentication = true - } -} -` -var testAccAzureRMVirtualMachine_osDiskTypeConflict = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US 2" -} - -resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_network_interface" "test" { - name = "acctni-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } -} - -resource "azurerm_virtual_machine" "test" { - name = "acctvm-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - network_interface_ids = ["${azurerm_network_interface.test.id}"] - vm_size = "Standard_D1_v2" - - storage_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "14.04.2-LTS" - version = "latest" - } - - storage_os_disk { - name = "osd-%d" - caching = "ReadWrite" - create_option = "FromImage" - disk_size_gb = "10" - managed_disk_type = "Standard_LRS" - vhd_uri = "should_cause_conflict" - } - - storage_data_disk { - name = "mydatadisk1" - caching = "ReadWrite" - create_option = "Empty" - disk_size_gb = "45" - managed_disk_type = "Standard_LRS" - lun = "0" - } - - os_profile { - computer_name = "hn%d" - admin_username = "testadmin" - admin_password = "Password1234!" - } - - os_profile_linux_config { - disable_password_authentication = false - } - - tags { - environment = "Production" - cost-center = "Ops" - } -} -` - -var testAccAzureRMVirtualMachine_dataDiskTypeConflict = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US 2" -} - -resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_network_interface" "test" { - name = "acctni-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } -} - -resource "azurerm_virtual_machine" "test" { - name = "acctvm-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - network_interface_ids = ["${azurerm_network_interface.test.id}"] - vm_size = "Standard_D1_v2" - - storage_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "14.04.2-LTS" - version = "latest" - } - - storage_os_disk { - name = "osd-%d" - caching = "ReadWrite" - create_option = "FromImage" - disk_size_gb = "10" - managed_disk_type = "Standard_LRS" - } - - storage_data_disk { - name = "mydatadisk1" - caching = "ReadWrite" - create_option = "Empty" - disk_size_gb = "45" - managed_disk_type = "Standard_LRS" - lun = "0" - } - - storage_data_disk { - name = "mydatadisk1" - vhd_uri = "should_cause_conflict" - caching = "ReadWrite" - create_option = "Empty" - disk_size_gb = "45" - managed_disk_type = "Standard_LRS" - lun = "1" - } - - os_profile { - computer_name = "hn%d" - admin_username = "testadmin" - admin_password = "Password1234!" - } - - os_profile_linux_config { - disable_password_authentication = false - } - - tags { - environment = "Production" - cost-center = "Ops" - } -} -` - -var testAccAzureRMVirtualMachine_primaryNetworkInterfaceId = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_network_interface" "test" { - name = "acctni-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } -} - -resource "azurerm_network_interface" "test2" { - name = "acctni2-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration2" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } -} - -resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "westus" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } -} - -resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} - -resource "azurerm_virtual_machine" "test" { - name = "acctvm-%d" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - network_interface_ids = ["${azurerm_network_interface.test.id}","${azurerm_network_interface.test2.id}"] - primary_network_interface_id = "${azurerm_network_interface.test.id}" - vm_size = "Standard_A3" - - storage_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "14.04.2-LTS" - version = "latest" - } - - storage_os_disk { - name = "myosdisk1" - vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdisk1.vhd" - caching = "ReadWrite" - create_option = "FromImage" - disk_size_gb = "45" - } - - os_profile { - computer_name = "hostname" - admin_username = "testadmin" - admin_password = "Password1234!" - } - - os_profile_linux_config { - disable_password_authentication = false - } - - tags { - environment = "Production" - cost-center = "Ops" - } -} -` -var testAccAzureRMVirtualMachine_basicLinuxMachine_destroy = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US 2" -} - -resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_network_interface" "test" { - name = "acctni-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } -} - -resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US 2" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } -} - -resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} -` - -var testAccAzureRMVirtualMachine_basicLinuxMachine_attach_without_osProfile = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US 2" -} - -resource "azurerm_virtual_network" "test" { - name = "acctvn-%d" - address_space = ["10.0.0.0/16"] - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" -} - -resource "azurerm_subnet" "test" { - name = "acctsub-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test.name}" - address_prefix = "10.0.2.0/24" -} - -resource "azurerm_network_interface" "test" { - name = "acctni-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } -} - -resource "azurerm_storage_account" "test" { - name = "accsa%d" - resource_group_name = "${azurerm_resource_group.test.name}" - location = "West US 2" - account_type = "Standard_LRS" - - tags { - environment = "staging" - } -} - -resource "azurerm_storage_container" "test" { - name = "vhds" - resource_group_name = "${azurerm_resource_group.test.name}" - storage_account_name = "${azurerm_storage_account.test.name}" - container_access_type = "private" -} - -resource "azurerm_virtual_machine" "test" { - name = "acctvm-%d" - location = "West US 2" - resource_group_name = "${azurerm_resource_group.test.name}" - network_interface_ids = ["${azurerm_network_interface.test.id}"] - vm_size = "Standard_F2" - - storage_os_disk { - name = "myosdisk1" - vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdisk1.vhd" - os_type = "linux" - caching = "ReadWrite" - create_option = "Attach" - } - - tags { - environment = "Production" - cost-center = "Ops" - } -} -` diff --git a/builtin/providers/azurerm/resource_arm_virtual_network.go b/builtin/providers/azurerm/resource_arm_virtual_network.go deleted file mode 100644 index 22118b80c..000000000 --- a/builtin/providers/azurerm/resource_arm_virtual_network.go +++ /dev/null @@ -1,359 +0,0 @@ -package azurerm - -import ( - "bytes" - "fmt" - "log" - "net/http" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceArmVirtualNetwork() *schema.Resource { - return &schema.Resource{ - Create: resourceArmVirtualNetworkCreate, - Read: resourceArmVirtualNetworkRead, - Update: resourceArmVirtualNetworkCreate, - Delete: resourceArmVirtualNetworkDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "address_space": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "dns_servers": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "subnet": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "address_prefix": { - Type: schema.TypeString, - Required: true, - }, - "security_group": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - Set: resourceAzureSubnetHash, - }, - - "location": locationSchema(), - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceArmVirtualNetworkCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient) - vnetClient := client.vnetClient - - log.Printf("[INFO] preparing arguments for Azure ARM virtual network creation.") - - name := d.Get("name").(string) - location := d.Get("location").(string) - resGroup := d.Get("resource_group_name").(string) - tags := d.Get("tags").(map[string]interface{}) - vnetProperties, vnetPropsErr := getVirtualNetworkProperties(d, meta) - if vnetPropsErr != nil { - return vnetPropsErr - } - - vnet := network.VirtualNetwork{ - Name: &name, - Location: &location, - VirtualNetworkPropertiesFormat: vnetProperties, - Tags: expandTags(tags), - } - - networkSecurityGroupNames := make([]string, 0) - for _, subnet := range *vnet.VirtualNetworkPropertiesFormat.Subnets { - if subnet.NetworkSecurityGroup != nil { - nsgName, err := parseNetworkSecurityGroupName(*subnet.NetworkSecurityGroup.ID) - if err != nil { - return err - } - - networkSecurityGroupNames = append(networkSecurityGroupNames, nsgName) - } - } - - azureRMLockMultiple(&networkSecurityGroupNames) - defer azureRMUnlockMultiple(&networkSecurityGroupNames) - - _, error := vnetClient.CreateOrUpdate(resGroup, name, vnet, make(chan struct{})) - err := <-error - if err != nil { - return err - } - - read, err := vnetClient.Get(resGroup, name, "") - if err != nil { - return err - } - if read.ID == nil { - return fmt.Errorf("Cannot read Virtual Network %s (resource group %s) ID", name, resGroup) - } - - d.SetId(*read.ID) - - return resourceArmVirtualNetworkRead(d, meta) -} - -func resourceArmVirtualNetworkRead(d *schema.ResourceData, meta interface{}) error { - vnetClient := meta.(*ArmClient).vnetClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["virtualNetworks"] - - resp, err := vnetClient.Get(resGroup, name, "") - if err != nil { - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - return fmt.Errorf("Error making Read request on Azure virtual network %s: %s", name, err) - } - - vnet := *resp.VirtualNetworkPropertiesFormat - - // update appropriate values - d.Set("resource_group_name", resGroup) - d.Set("name", resp.Name) - d.Set("location", resp.Location) - d.Set("address_space", vnet.AddressSpace.AddressPrefixes) - - subnets := &schema.Set{ - F: resourceAzureSubnetHash, - } - - for _, subnet := range *vnet.Subnets { - s := map[string]interface{}{} - - s["name"] = *subnet.Name - s["address_prefix"] = *subnet.SubnetPropertiesFormat.AddressPrefix - if subnet.SubnetPropertiesFormat.NetworkSecurityGroup != nil { - s["security_group"] = *subnet.SubnetPropertiesFormat.NetworkSecurityGroup.ID - } - - subnets.Add(s) - } - d.Set("subnet", subnets) - - if vnet.DhcpOptions != nil && vnet.DhcpOptions.DNSServers != nil { - dnses := []string{} - for _, dns := range *vnet.DhcpOptions.DNSServers { - dnses = append(dnses, dns) - } - d.Set("dns_servers", dnses) - } - - flattenAndSetTags(d, resp.Tags) - - return nil -} - -func resourceArmVirtualNetworkDelete(d *schema.ResourceData, meta interface{}) error { - vnetClient := meta.(*ArmClient).vnetClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["virtualNetworks"] - - nsgNames, err := expandAzureRmVirtualNetworkVirtualNetworkSecurityGroupNames(d) - if err != nil { - return fmt.Errorf("[ERROR] Error parsing Network Security Group ID's: %+v", err) - } - - azureRMLockMultiple(&nsgNames) - defer azureRMUnlockMultiple(&nsgNames) - - _, error := vnetClient.Delete(resGroup, name, make(chan struct{})) - err = <-error - - return err -} - -func getVirtualNetworkProperties(d *schema.ResourceData, meta interface{}) (*network.VirtualNetworkPropertiesFormat, error) { - // first; get address space prefixes: - prefixes := []string{} - for _, prefix := range d.Get("address_space").([]interface{}) { - prefixes = append(prefixes, prefix.(string)) - } - - // then; the dns servers: - dnses := []string{} - for _, dns := range d.Get("dns_servers").([]interface{}) { - dnses = append(dnses, dns.(string)) - } - - // then; the subnets: - subnets := []network.Subnet{} - if subs := d.Get("subnet").(*schema.Set); subs.Len() > 0 { - for _, subnet := range subs.List() { - subnet := subnet.(map[string]interface{}) - - name := subnet["name"].(string) - log.Printf("[INFO] setting subnets inside vNet, processing %q", name) - //since subnets can also be created outside of vNet definition (as root objects) - // do a GET on subnet properties from the server before setting them - resGroup := d.Get("resource_group_name").(string) - vnetName := d.Get("name").(string) - subnetObj, err := getExistingSubnet(resGroup, vnetName, name, meta) - if err != nil { - return nil, err - } - log.Printf("[INFO] Completed GET of Subnet props ") - - prefix := subnet["address_prefix"].(string) - secGroup := subnet["security_group"].(string) - - //set the props from config and leave the rest intact - subnetObj.Name = &name - if subnetObj.SubnetPropertiesFormat == nil { - subnetObj.SubnetPropertiesFormat = &network.SubnetPropertiesFormat{} - } - - subnetObj.SubnetPropertiesFormat.AddressPrefix = &prefix - - if secGroup != "" { - subnetObj.SubnetPropertiesFormat.NetworkSecurityGroup = &network.SecurityGroup{ - ID: &secGroup, - } - } else { - subnetObj.SubnetPropertiesFormat.NetworkSecurityGroup = nil - } - - subnets = append(subnets, *subnetObj) - } - } - - properties := &network.VirtualNetworkPropertiesFormat{ - AddressSpace: &network.AddressSpace{ - AddressPrefixes: &prefixes, - }, - DhcpOptions: &network.DhcpOptions{ - DNSServers: &dnses, - }, - Subnets: &subnets, - } - // finally; return the struct: - return properties, nil -} - -func resourceAzureSubnetHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%s", m["address_prefix"].(string))) - if v, ok := m["security_group"]; ok { - buf.WriteString(v.(string)) - } - return hashcode.String(buf.String()) -} - -func getExistingSubnet(resGroup string, vnetName string, subnetName string, meta interface{}) (*network.Subnet, error) { - //attempt to retrieve existing subnet from the server - existingSubnet := network.Subnet{} - subnetClient := meta.(*ArmClient).subnetClient - resp, err := subnetClient.Get(resGroup, vnetName, subnetName, "") - - if err != nil { - if resp.StatusCode == http.StatusNotFound { - return &existingSubnet, nil - } - //raise an error if there was an issue other than 404 in getting subnet properties - return nil, err - } - - existingSubnet.SubnetPropertiesFormat = &network.SubnetPropertiesFormat{} - existingSubnet.SubnetPropertiesFormat.AddressPrefix = resp.SubnetPropertiesFormat.AddressPrefix - - if resp.SubnetPropertiesFormat.NetworkSecurityGroup != nil { - existingSubnet.SubnetPropertiesFormat.NetworkSecurityGroup = resp.SubnetPropertiesFormat.NetworkSecurityGroup - } - - if resp.SubnetPropertiesFormat.RouteTable != nil { - existingSubnet.SubnetPropertiesFormat.RouteTable = resp.SubnetPropertiesFormat.RouteTable - } - - if resp.SubnetPropertiesFormat.IPConfigurations != nil { - ips := make([]string, 0, len(*resp.SubnetPropertiesFormat.IPConfigurations)) - for _, ip := range *resp.SubnetPropertiesFormat.IPConfigurations { - ips = append(ips, *ip.ID) - } - - existingSubnet.SubnetPropertiesFormat.IPConfigurations = resp.SubnetPropertiesFormat.IPConfigurations - } - - return &existingSubnet, nil -} - -func expandAzureRmVirtualNetworkVirtualNetworkSecurityGroupNames(d *schema.ResourceData) ([]string, error) { - nsgNames := make([]string, 0) - - if v, ok := d.GetOk("subnet"); ok { - subnets := v.(*schema.Set).List() - for _, subnet := range subnets { - subnet, ok := subnet.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("[ERROR] Subnet should be a Hash - was '%+v'", subnet) - } - - networkSecurityGroupId := subnet["security_group"].(string) - if networkSecurityGroupId != "" { - nsgName, err := parseNetworkSecurityGroupName(networkSecurityGroupId) - if err != nil { - return nil, err - } - - nsgNames = append(nsgNames, nsgName) - } - } - } - - return nsgNames, nil -} diff --git a/builtin/providers/azurerm/resource_arm_virtual_network_peering.go b/builtin/providers/azurerm/resource_arm_virtual_network_peering.go deleted file mode 100644 index e783d6cd8..000000000 --- a/builtin/providers/azurerm/resource_arm_virtual_network_peering.go +++ /dev/null @@ -1,186 +0,0 @@ -package azurerm - -import ( - "fmt" - "log" - "net/http" - "sync" - - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/hashicorp/terraform/helper/schema" -) - -// peerMutex is used to prevet multiple Peering resources being creaed, updated -// or deleted at the same time -var peerMutex = &sync.Mutex{} - -func resourceArmVirtualNetworkPeering() *schema.Resource { - return &schema.Resource{ - Create: resourceArmVirtualNetworkPeeringCreate, - Read: resourceArmVirtualNetworkPeeringRead, - Update: resourceArmVirtualNetworkPeeringCreate, - Delete: resourceArmVirtualNetworkPeeringDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "resource_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "virtual_network_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "remote_virtual_network_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "allow_virtual_network_access": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "allow_forwarded_traffic": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "allow_gateway_transit": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "use_remote_gateways": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - }, - } -} - -func resourceArmVirtualNetworkPeeringCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).vnetPeeringsClient - - log.Printf("[INFO] preparing arguments for Azure ARM virtual network peering creation.") - - name := d.Get("name").(string) - vnetName := d.Get("virtual_network_name").(string) - resGroup := d.Get("resource_group_name").(string) - - peer := network.VirtualNetworkPeering{ - Name: &name, - VirtualNetworkPeeringPropertiesFormat: getVirtualNetworkPeeringProperties(d), - } - - peerMutex.Lock() - defer peerMutex.Unlock() - - _, error := client.CreateOrUpdate(resGroup, vnetName, name, peer, make(chan struct{})) - err := <-error - if err != nil { - return err - } - - read, err := client.Get(resGroup, vnetName, name) - if err != nil { - return err - } - if read.ID == nil { - return fmt.Errorf("Cannot read Virtual Network Peering %s (resource group %s) ID", name, resGroup) - } - - d.SetId(*read.ID) - - return resourceArmVirtualNetworkPeeringRead(d, meta) -} - -func resourceArmVirtualNetworkPeeringRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).vnetPeeringsClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - vnetName := id.Path["virtualNetworks"] - name := id.Path["virtualNetworkPeerings"] - - resp, err := client.Get(resGroup, vnetName, name) - if err != nil { - if resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - return fmt.Errorf("Error making Read request on Azure virtual network peering %s: %s", name, err) - } - - peer := *resp.VirtualNetworkPeeringPropertiesFormat - - // update appropriate values - d.Set("resource_group_name", resGroup) - d.Set("name", resp.Name) - d.Set("virtual_network_name", vnetName) - d.Set("allow_virtual_network_access", peer.AllowVirtualNetworkAccess) - d.Set("allow_forwarded_traffic", peer.AllowForwardedTraffic) - d.Set("allow_gateway_transit", peer.AllowGatewayTransit) - d.Set("use_remote_gateways", peer.UseRemoteGateways) - d.Set("remote_virtual_network_id", peer.RemoteVirtualNetwork.ID) - - return nil -} - -func resourceArmVirtualNetworkPeeringDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).vnetPeeringsClient - - id, err := parseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - vnetName := id.Path["virtualNetworks"] - name := id.Path["virtualNetworkPeerings"] - - peerMutex.Lock() - defer peerMutex.Unlock() - - _, error := client.Delete(resGroup, vnetName, name, make(chan struct{})) - err = <-error - - return err -} - -func getVirtualNetworkPeeringProperties(d *schema.ResourceData) *network.VirtualNetworkPeeringPropertiesFormat { - allowVirtualNetworkAccess := d.Get("allow_virtual_network_access").(bool) - allowForwardedTraffic := d.Get("allow_forwarded_traffic").(bool) - allowGatewayTransit := d.Get("allow_gateway_transit").(bool) - useRemoteGateways := d.Get("use_remote_gateways").(bool) - remoteVirtualNetworkID := d.Get("remote_virtual_network_id").(string) - - return &network.VirtualNetworkPeeringPropertiesFormat{ - AllowVirtualNetworkAccess: &allowVirtualNetworkAccess, - AllowForwardedTraffic: &allowForwardedTraffic, - AllowGatewayTransit: &allowGatewayTransit, - UseRemoteGateways: &useRemoteGateways, - RemoteVirtualNetwork: &network.SubResource{ - ID: &remoteVirtualNetworkID, - }, - } -} diff --git a/builtin/providers/azurerm/resource_arm_virtual_network_peering_test.go b/builtin/providers/azurerm/resource_arm_virtual_network_peering_test.go deleted file mode 100644 index 3f6832183..000000000 --- a/builtin/providers/azurerm/resource_arm_virtual_network_peering_test.go +++ /dev/null @@ -1,266 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMVirtualNetworkPeering_basic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualNetworkPeering_basic, ri, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualNetworkPeeringDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualNetworkPeeringExists("azurerm_virtual_network_peering.test1"), - testCheckAzureRMVirtualNetworkPeeringExists("azurerm_virtual_network_peering.test2"), - resource.TestCheckResourceAttr( - "azurerm_virtual_network_peering.test1", "allow_virtual_network_access", "true"), - resource.TestCheckResourceAttr( - "azurerm_virtual_network_peering.test2", "allow_virtual_network_access", "true"), - ), - }, - }, - }) -} - -func TestAccAzureRMVirtualNetworkPeering_disappears(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualNetworkPeering_basic, ri, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualNetworkPeeringDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualNetworkPeeringExists("azurerm_virtual_network_peering.test1"), - testCheckAzureRMVirtualNetworkPeeringExists("azurerm_virtual_network_peering.test2"), - resource.TestCheckResourceAttr( - "azurerm_virtual_network_peering.test1", "allow_virtual_network_access", "true"), - resource.TestCheckResourceAttr( - "azurerm_virtual_network_peering.test2", "allow_virtual_network_access", "true"), - testCheckAzureRMVirtualNetworkPeeringDisappears("azurerm_virtual_network_peering.test1"), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAzureRMVirtualNetworkPeering_update(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMVirtualNetworkPeering_basic, ri, ri, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMVirtualNetworkPeering_basicUpdate, ri, ri, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualNetworkPeeringDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualNetworkPeeringExists("azurerm_virtual_network_peering.test1"), - testCheckAzureRMVirtualNetworkPeeringExists("azurerm_virtual_network_peering.test2"), - resource.TestCheckResourceAttr( - "azurerm_virtual_network_peering.test1", "allow_virtual_network_access", "true"), - resource.TestCheckResourceAttr( - "azurerm_virtual_network_peering.test2", "allow_virtual_network_access", "true"), - resource.TestCheckResourceAttr( - "azurerm_virtual_network_peering.test1", "allow_forwarded_traffic", "false"), - resource.TestCheckResourceAttr( - "azurerm_virtual_network_peering.test2", "allow_forwarded_traffic", "false"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualNetworkPeeringExists("azurerm_virtual_network_peering.test1"), - testCheckAzureRMVirtualNetworkPeeringExists("azurerm_virtual_network_peering.test2"), - resource.TestCheckResourceAttr( - "azurerm_virtual_network_peering.test1", "allow_virtual_network_access", "true"), - resource.TestCheckResourceAttr( - "azurerm_virtual_network_peering.test2", "allow_virtual_network_access", "true"), - resource.TestCheckResourceAttr( - "azurerm_virtual_network_peering.test1", "allow_forwarded_traffic", "true"), - resource.TestCheckResourceAttr( - "azurerm_virtual_network_peering.test2", "allow_forwarded_traffic", "true"), - ), - }, - }, - }) -} - -func testCheckAzureRMVirtualNetworkPeeringExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - vnetName := rs.Primary.Attributes["virtual_network_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for virtual network peering: %s", name) - } - - // Ensure resource group/virtual network peering combination exists in API - conn := testAccProvider.Meta().(*ArmClient).vnetPeeringsClient - - resp, err := conn.Get(resourceGroup, vnetName, name) - if err != nil { - return fmt.Errorf("Bad: Get on vnetPeeringsClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: Virtual Network Peering %q (resource group: %q) does not exist", name, resourceGroup) - } - - return nil - } -} - -func testCheckAzureRMVirtualNetworkPeeringDisappears(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - name := rs.Primary.Attributes["name"] - vnetName := rs.Primary.Attributes["virtual_network_name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for virtual network peering: %s", name) - } - - // Ensure resource group/virtual network peering combination exists in API - conn := testAccProvider.Meta().(*ArmClient).vnetPeeringsClient - - _, error := conn.Delete(resourceGroup, vnetName, name, make(chan struct{})) - err := <-error - if err != nil { - return fmt.Errorf("Bad: Delete on vnetPeeringsClient: %s", err) - } - - return nil - } -} - -func testCheckAzureRMVirtualNetworkPeeringDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).vnetPeeringsClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_virtual_network_peering" { - continue - } - - name := rs.Primary.Attributes["name"] - vnetName := rs.Primary.Attributes["virtual_network_name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.Get(resourceGroup, vnetName, name) - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Virtual Network Peering sitll exists:\n%#v", resp.VirtualNetworkPeeringPropertiesFormat) - } - } - - return nil -} - -var testAccAzureRMVirtualNetworkPeering_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_virtual_network" "test1" { - name = "acctestvirtnet-1-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - address_space = ["10.0.1.0/24"] - location = "${azurerm_resource_group.test.location}" -} - -resource "azurerm_virtual_network" "test2" { - name = "acctestvirtnet-2-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - address_space = ["10.0.2.0/24"] - location = "${azurerm_resource_group.test.location}" -} - -resource "azurerm_virtual_network_peering" "test1" { - name = "acctestpeer-1-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test1.name}" - remote_virtual_network_id = "${azurerm_virtual_network.test2.id}" - allow_virtual_network_access = true -} - -resource "azurerm_virtual_network_peering" "test2" { - name = "acctestpeer-2-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test2.name}" - remote_virtual_network_id = "${azurerm_virtual_network.test1.id}" - allow_virtual_network_access = true -} -` - -var testAccAzureRMVirtualNetworkPeering_basicUpdate = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_virtual_network" "test1" { - name = "acctestvirtnet-1-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - address_space = ["10.0.1.0/24"] - location = "${azurerm_resource_group.test.location}" -} - -resource "azurerm_virtual_network" "test2" { - name = "acctestvirtnet-2-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - address_space = ["10.0.2.0/24"] - location = "${azurerm_resource_group.test.location}" -} - -resource "azurerm_virtual_network_peering" "test1" { - name = "acctestpeer-1-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test1.name}" - remote_virtual_network_id = "${azurerm_virtual_network.test2.id}" - allow_forwarded_traffic = true - allow_virtual_network_access = true -} - -resource "azurerm_virtual_network_peering" "test2" { - name = "acctestpeer-2-%d" - resource_group_name = "${azurerm_resource_group.test.name}" - virtual_network_name = "${azurerm_virtual_network.test2.name}" - remote_virtual_network_id = "${azurerm_virtual_network.test1.id}" - allow_forwarded_traffic = true - allow_virtual_network_access = true -} -` diff --git a/builtin/providers/azurerm/resource_arm_virtual_network_test.go b/builtin/providers/azurerm/resource_arm_virtual_network_test.go deleted file mode 100644 index 856d4fe2e..000000000 --- a/builtin/providers/azurerm/resource_arm_virtual_network_test.go +++ /dev/null @@ -1,239 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/http" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccAzureRMVirtualNetwork_basic(t *testing.T) { - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualNetwork_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualNetworkDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualNetworkExists("azurerm_virtual_network.test"), - ), - }, - }, - }) -} - -func TestAccAzureRMVirtualNetwork_disappears(t *testing.T) { - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccAzureRMVirtualNetwork_basic, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualNetworkDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualNetworkExists("azurerm_virtual_network.test"), - testCheckAzureRMVirtualNetworkDisappears("azurerm_virtual_network.test"), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAzureRMVirtualNetwork_withTags(t *testing.T) { - - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMVirtualNetwork_withTags, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMVirtualNetwork_withTagsUpdated, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualNetworkDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualNetworkExists("azurerm_virtual_network.test"), - resource.TestCheckResourceAttr( - "azurerm_virtual_network.test", "tags.%", "2"), - resource.TestCheckResourceAttr( - "azurerm_virtual_network.test", "tags.environment", "Production"), - resource.TestCheckResourceAttr( - "azurerm_virtual_network.test", "tags.cost_center", "MSFT"), - ), - }, - - resource.TestStep{ - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualNetworkExists("azurerm_virtual_network.test"), - resource.TestCheckResourceAttr( - "azurerm_virtual_network.test", "tags.%", "1"), - resource.TestCheckResourceAttr( - "azurerm_virtual_network.test", "tags.environment", "staging"), - ), - }, - }, - }) -} - -func testCheckAzureRMVirtualNetworkExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - virtualNetworkName := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for virtual network: %s", virtualNetworkName) - } - - // Ensure resource group/virtual network combination exists in API - conn := testAccProvider.Meta().(*ArmClient).vnetClient - - resp, err := conn.Get(resourceGroup, virtualNetworkName, "") - if err != nil { - return fmt.Errorf("Bad: Get on vnetClient: %s", err) - } - - if resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("Bad: Virtual Network %q (resource group: %q) does not exist", name, resourceGroup) - } - - return nil - } -} - -func testCheckAzureRMVirtualNetworkDisappears(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - virtualNetworkName := rs.Primary.Attributes["name"] - resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] - if !hasResourceGroup { - return fmt.Errorf("Bad: no resource group found in state for virtual network: %s", virtualNetworkName) - } - - // Ensure resource group/virtual network combination exists in API - conn := testAccProvider.Meta().(*ArmClient).vnetClient - - _, error := conn.Delete(resourceGroup, virtualNetworkName, make(chan struct{})) - err := <-error - if err != nil { - return fmt.Errorf("Bad: Delete on vnetClient: %s", err) - } - - return nil - } -} - -func testCheckAzureRMVirtualNetworkDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*ArmClient).vnetClient - - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_virtual_network" { - continue - } - - name := rs.Primary.Attributes["name"] - resourceGroup := rs.Primary.Attributes["resource_group_name"] - - resp, err := conn.Get(resourceGroup, name, "") - - if err != nil { - return nil - } - - if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Virtual Network sitll exists:\n%#v", resp.VirtualNetworkPropertiesFormat) - } - } - - return nil -} - -var testAccAzureRMVirtualNetwork_basic = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_virtual_network" "test" { - name = "acctestvirtnet%d" - address_space = ["10.0.0.0/16"] - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - subnet { - name = "subnet1" - address_prefix = "10.0.1.0/24" - } -} -` - -var testAccAzureRMVirtualNetwork_withTags = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_virtual_network" "test" { - name = "acctestvirtnet%d" - address_space = ["10.0.0.0/16"] - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - subnet { - name = "subnet1" - address_prefix = "10.0.1.0/24" - } - - tags { - environment = "Production" - cost_center = "MSFT" - } -} -` - -var testAccAzureRMVirtualNetwork_withTagsUpdated = ` -resource "azurerm_resource_group" "test" { - name = "acctestRG-%d" - location = "West US" -} - -resource "azurerm_virtual_network" "test" { - name = "acctestvirtnet%d" - address_space = ["10.0.0.0/16"] - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - subnet { - name = "subnet1" - address_prefix = "10.0.1.0/24" - } - - tags { - environment = "staging" - } -} -` diff --git a/builtin/providers/azurerm/resourceid.go b/builtin/providers/azurerm/resourceid.go deleted file mode 100644 index bcf5eb45b..000000000 --- a/builtin/providers/azurerm/resourceid.go +++ /dev/null @@ -1,145 +0,0 @@ -package azurerm - -import ( - "fmt" - "net/url" - "strings" -) - -// ResourceID represents a parsed long-form Azure Resource Manager ID -// with the Subscription ID, Resource Group and the Provider as top- -// level fields, and other key-value pairs available via a map in the -// Path field. -type ResourceID struct { - SubscriptionID string - ResourceGroup string - Provider string - Path map[string]string -} - -// parseAzureResourceID converts a long-form Azure Resource Manager ID -// into a ResourceID. We make assumptions about the structure of URLs, -// which is obviously not good, but the best thing available given the -// SDK. -func parseAzureResourceID(id string) (*ResourceID, error) { - idURL, err := url.ParseRequestURI(id) - if err != nil { - return nil, fmt.Errorf("Cannot parse Azure Id: %s", err) - } - - path := idURL.Path - - path = strings.TrimSpace(path) - if strings.HasPrefix(path, "/") { - path = path[1:] - } - - if strings.HasSuffix(path, "/") { - path = path[:len(path)-1] - } - - components := strings.Split(path, "/") - - // We should have an even number of key-value pairs. - if len(components)%2 != 0 { - return nil, fmt.Errorf("The number of path segments is not divisible by 2 in %q", path) - } - - var subscriptionID string - - // Put the constituent key-value pairs into a map - componentMap := make(map[string]string, len(components)/2) - for current := 0; current < len(components); current += 2 { - key := components[current] - value := components[current+1] - - // Check key/value for empty strings. - if key == "" || value == "" { - return nil, fmt.Errorf("Key/Value cannot be empty strings. Key: '%s', Value: '%s'", key, value) - } - - // Catch the subscriptionID before it can be overwritten by another "subscriptions" - // value in the ID which is the case for the Service Bus subscription resource - if key == "subscriptions" && subscriptionID == "" { - subscriptionID = value - } else { - componentMap[key] = value - } - } - - // Build up a ResourceID from the map - idObj := &ResourceID{} - idObj.Path = componentMap - - if subscriptionID != "" { - idObj.SubscriptionID = subscriptionID - } else { - return nil, fmt.Errorf("No subscription ID found in: %q", path) - } - - if resourceGroup, ok := componentMap["resourceGroups"]; ok { - idObj.ResourceGroup = resourceGroup - delete(componentMap, "resourceGroups") - } else { - // Some Azure APIs are weird and provide things in lower case... - // However it's not clear whether the casing of other elements in the URI - // matter, so we explicitly look for that case here. - if resourceGroup, ok := componentMap["resourcegroups"]; ok { - idObj.ResourceGroup = resourceGroup - delete(componentMap, "resourcegroups") - } else { - return nil, fmt.Errorf("No resource group name found in: %q", path) - } - } - - // It is OK not to have a provider in the case of a resource group - if provider, ok := componentMap["providers"]; ok { - idObj.Provider = provider - delete(componentMap, "providers") - } - - return idObj, nil -} - -func composeAzureResourceID(idObj *ResourceID) (id string, err error) { - if idObj.SubscriptionID == "" || idObj.ResourceGroup == "" { - return "", fmt.Errorf("SubscriptionID and ResourceGroup cannot be empty") - } - - id = fmt.Sprintf("/subscriptions/%s/resourceGroups/%s", idObj.SubscriptionID, idObj.ResourceGroup) - - if idObj.Provider != "" { - if len(idObj.Path) < 1 { - return "", fmt.Errorf("ResourceID.Path should have at least one item when ResourceID.Provider is specified") - } - - id += fmt.Sprintf("/providers/%s", idObj.Provider) - - for k, v := range idObj.Path { - if k == "" || v == "" { - return "", fmt.Errorf("ResourceID.Path cannot contain empty strings") - } - id += fmt.Sprintf("/%s/%s", k, v) - } - } - - return -} - -func parseNetworkSecurityGroupName(networkSecurityGroupId string) (string, error) { - id, err := parseAzureResourceID(networkSecurityGroupId) - if err != nil { - return "", fmt.Errorf("[ERROR] Unable to Parse Network Security Group ID '%s': %+v", networkSecurityGroupId, err) - } - - return id.Path["networkSecurityGroups"], nil -} - -func parseRouteTableName(routeTableId string) (string, error) { - id, err := parseAzureResourceID(routeTableId) - if err != nil { - return "", fmt.Errorf("[ERROR] Unable to parse Route Table ID '%s': %+v", routeTableId, err) - } - - return id.Path["routeTables"], nil -} diff --git a/builtin/providers/azurerm/resourceid_test.go b/builtin/providers/azurerm/resourceid_test.go deleted file mode 100644 index 69d64be93..000000000 --- a/builtin/providers/azurerm/resourceid_test.go +++ /dev/null @@ -1,230 +0,0 @@ -package azurerm - -import ( - "reflect" - "testing" -) - -func TestParseAzureResourceID(t *testing.T) { - testCases := []struct { - id string - expectedResourceID *ResourceID - expectError bool - }{ - { - // Missing "resourceGroups". - "/subscriptions/00000000-0000-0000-0000-000000000000//myResourceGroup/", - nil, - true, - }, - { - // Empty resource group ID. - "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups//", - nil, - true, - }, - { - "random", - nil, - true, - }, - { - "/subscriptions/6d74bdd2-9f84-11e5-9bd9-7831c1c4c038", - nil, - true, - }, - { - "subscriptions/6d74bdd2-9f84-11e5-9bd9-7831c1c4c038", - nil, - true, - }, - { - "/subscriptions/6d74bdd2-9f84-11e5-9bd9-7831c1c4c038/resourceGroups/testGroup1", - &ResourceID{ - SubscriptionID: "6d74bdd2-9f84-11e5-9bd9-7831c1c4c038", - ResourceGroup: "testGroup1", - Provider: "", - Path: map[string]string{}, - }, - false, - }, - { - "/subscriptions/6d74bdd2-9f84-11e5-9bd9-7831c1c4c038/resourceGroups/testGroup1/providers/Microsoft.Network", - &ResourceID{ - SubscriptionID: "6d74bdd2-9f84-11e5-9bd9-7831c1c4c038", - ResourceGroup: "testGroup1", - Provider: "Microsoft.Network", - Path: map[string]string{}, - }, - false, - }, - { - // Missing leading / - "subscriptions/6d74bdd2-9f84-11e5-9bd9-7831c1c4c038/resourceGroups/testGroup1/providers/Microsoft.Network/virtualNetworks/virtualNetwork1/", - nil, - true, - }, - { - "/subscriptions/6d74bdd2-9f84-11e5-9bd9-7831c1c4c038/resourceGroups/testGroup1/providers/Microsoft.Network/virtualNetworks/virtualNetwork1", - &ResourceID{ - SubscriptionID: "6d74bdd2-9f84-11e5-9bd9-7831c1c4c038", - ResourceGroup: "testGroup1", - Provider: "Microsoft.Network", - Path: map[string]string{ - "virtualNetworks": "virtualNetwork1", - }, - }, - false, - }, - { - "/subscriptions/6d74bdd2-9f84-11e5-9bd9-7831c1c4c038/resourceGroups/testGroup1/providers/Microsoft.Network/virtualNetworks/virtualNetwork1?api-version=2006-01-02-preview", - &ResourceID{ - SubscriptionID: "6d74bdd2-9f84-11e5-9bd9-7831c1c4c038", - ResourceGroup: "testGroup1", - Provider: "Microsoft.Network", - Path: map[string]string{ - "virtualNetworks": "virtualNetwork1", - }, - }, - false, - }, - { - "/subscriptions/6d74bdd2-9f84-11e5-9bd9-7831c1c4c038/resourceGroups/testGroup1/providers/Microsoft.Network/virtualNetworks/virtualNetwork1/subnets/publicInstances1?api-version=2006-01-02-preview", - &ResourceID{ - SubscriptionID: "6d74bdd2-9f84-11e5-9bd9-7831c1c4c038", - ResourceGroup: "testGroup1", - Provider: "Microsoft.Network", - Path: map[string]string{ - "virtualNetworks": "virtualNetwork1", - "subnets": "publicInstances1", - }, - }, - false, - }, - { - "/subscriptions/34ca515c-4629-458e-bf7c-738d77e0d0ea/resourcegroups/acceptanceTestResourceGroup1/providers/Microsoft.Cdn/profiles/acceptanceTestCdnProfile1", - &ResourceID{ - SubscriptionID: "34ca515c-4629-458e-bf7c-738d77e0d0ea", - ResourceGroup: "acceptanceTestResourceGroup1", - Provider: "Microsoft.Cdn", - Path: map[string]string{ - "profiles": "acceptanceTestCdnProfile1", - }, - }, - false, - }, - { - "/subscriptions/34ca515c-4629-458e-bf7c-738d77e0d0ea/resourceGroups/testGroup1/providers/Microsoft.ServiceBus/namespaces/testNamespace1/topics/testTopic1/subscriptions/testSubscription1", - &ResourceID{ - SubscriptionID: "34ca515c-4629-458e-bf7c-738d77e0d0ea", - ResourceGroup: "testGroup1", - Provider: "Microsoft.ServiceBus", - Path: map[string]string{ - "namespaces": "testNamespace1", - "topics": "testTopic1", - "subscriptions": "testSubscription1", - }, - }, - false, - }, - } - - for _, test := range testCases { - parsed, err := parseAzureResourceID(test.id) - if test.expectError && err != nil { - continue - } - if err != nil { - t.Fatalf("Unexpected error: %s", err) - } - - if !reflect.DeepEqual(test.expectedResourceID, parsed) { - t.Fatalf("Unexpected resource ID:\nExpected: %+v\nGot: %+v\n", test.expectedResourceID, parsed) - } - } -} - -func TestComposeAzureResourceID(t *testing.T) { - testCases := []struct { - resourceID *ResourceID - expectedID string - expectError bool - }{ - { - &ResourceID{ - SubscriptionID: "00000000-0000-0000-0000-000000000000", - ResourceGroup: "testGroup1", - Provider: "foo.bar", - Path: map[string]string{ - "k1": "v1", - "k2": "v2", - "k3": "v3", - }, - }, - "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/testGroup1/providers/foo.bar/k1/v1/k2/v2/k3/v3", - false, - }, - { - &ResourceID{ - SubscriptionID: "00000000-0000-0000-0000-000000000000", - ResourceGroup: "testGroup1", - }, - "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/testGroup1", - false, - }, - { - // If Provider is specified, there must be at least one element in Path. - &ResourceID{ - SubscriptionID: "00000000-0000-0000-0000-000000000000", - ResourceGroup: "testGroup1", - Provider: "foo.bar", - }, - "", - true, - }, - { - // One of the keys in Path is an empty string. - &ResourceID{ - SubscriptionID: "00000000-0000-0000-0000-000000000000", - ResourceGroup: "testGroup1", - Provider: "foo.bar", - Path: map[string]string{ - "k2": "v2", - "": "v1", - }, - }, - "", - true, - }, - { - // One of the values in Path is an empty string. - &ResourceID{ - SubscriptionID: "00000000-0000-0000-0000-000000000000", - ResourceGroup: "testGroup1", - Provider: "foo.bar", - Path: map[string]string{ - "k1": "v1", - "k2": "", - }, - }, - "", - true, - }, - } - - for _, test := range testCases { - idString, err := composeAzureResourceID(test.resourceID) - - if test.expectError && err != nil { - continue - } - - if err != nil { - t.Fatalf("Unexpected error: %s", err) - } - - if test.expectedID != idString { - t.Fatalf("Unexpected resource ID string:\nExpected: %s\nGot: %s\n", test.expectedID, idString) - } - } -} diff --git a/builtin/providers/azurerm/tags.go b/builtin/providers/azurerm/tags.go deleted file mode 100644 index 41a6701fb..000000000 --- a/builtin/providers/azurerm/tags.go +++ /dev/null @@ -1,85 +0,0 @@ -package azurerm - -import ( - "errors" - "fmt" - - "github.com/hashicorp/terraform/helper/schema" -) - -func tagsSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Computed: true, - ValidateFunc: validateAzureRMTags, - } -} - -func tagsForDataSourceSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - } -} - -func tagValueToString(v interface{}) (string, error) { - switch value := v.(type) { - case string: - return value, nil - case int: - return fmt.Sprintf("%d", value), nil - default: - return "", fmt.Errorf("unknown tag type %T in tag value", value) - } -} - -func validateAzureRMTags(v interface{}, k string) (ws []string, es []error) { - tagsMap := v.(map[string]interface{}) - - if len(tagsMap) > 15 { - es = append(es, errors.New("a maximum of 15 tags can be applied to each ARM resource")) - } - - for k, v := range tagsMap { - if len(k) > 512 { - es = append(es, fmt.Errorf("the maximum length for a tag key is 512 characters: %q is %d characters", k, len(k))) - } - - value, err := tagValueToString(v) - if err != nil { - es = append(es, err) - } else if len(value) > 256 { - es = append(es, fmt.Errorf("the maximum length for a tag value is 256 characters: the value for %q is %d characters", k, len(value))) - } - } - - return -} - -func expandTags(tagsMap map[string]interface{}) *map[string]*string { - output := make(map[string]*string, len(tagsMap)) - - for i, v := range tagsMap { - //Validate should have ignored this error already - value, _ := tagValueToString(v) - output[i] = &value - } - - return &output -} - -func flattenAndSetTags(d *schema.ResourceData, tagsMap *map[string]*string) { - if tagsMap == nil { - d.Set("tags", make(map[string]interface{})) - return - } - - output := make(map[string]interface{}, len(*tagsMap)) - - for i, v := range *tagsMap { - output[i] = *v - } - - d.Set("tags", output) -} diff --git a/builtin/providers/azurerm/tags_test.go b/builtin/providers/azurerm/tags_test.go deleted file mode 100644 index fb75c04f0..000000000 --- a/builtin/providers/azurerm/tags_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package azurerm - -import ( - "fmt" - "strings" - "testing" -) - -func TestValidateMaximumNumberOfARMTags(t *testing.T) { - tagsMap := make(map[string]interface{}) - for i := 0; i < 16; i++ { - tagsMap[fmt.Sprintf("key%d", i)] = fmt.Sprintf("value%d", i) - } - - _, es := validateAzureRMTags(tagsMap, "tags") - - if len(es) != 1 { - t.Fatal("Expected one validation error for too many tags") - } - - if !strings.Contains(es[0].Error(), "a maximum of 15 tags") { - t.Fatal("Wrong validation error message for too many tags") - } -} - -func TestValidateARMTagMaxKeyLength(t *testing.T) { - tooLongKey := strings.Repeat("long", 128) + "a" - tagsMap := make(map[string]interface{}) - tagsMap[tooLongKey] = "value" - - _, es := validateAzureRMTags(tagsMap, "tags") - if len(es) != 1 { - t.Fatal("Expected one validation error for a key which is > 512 chars") - } - - if !strings.Contains(es[0].Error(), "maximum length for a tag key") { - t.Fatal("Wrong validation error message maximum tag key length") - } - - if !strings.Contains(es[0].Error(), tooLongKey) { - t.Fatal("Expected validated error to contain the key name") - } - - if !strings.Contains(es[0].Error(), "513") { - t.Fatal("Expected the length in the validation error for tag key") - } -} - -func TestValidateARMTagMaxValueLength(t *testing.T) { - tagsMap := make(map[string]interface{}) - tagsMap["toolong"] = strings.Repeat("long", 64) + "a" - - _, es := validateAzureRMTags(tagsMap, "tags") - if len(es) != 1 { - t.Fatal("Expected one validation error for a value which is > 256 chars") - } - - if !strings.Contains(es[0].Error(), "maximum length for a tag value") { - t.Fatal("Wrong validation error message for maximum tag value length") - } - - if !strings.Contains(es[0].Error(), "toolong") { - t.Fatal("Expected validated error to contain the key name") - } - - if !strings.Contains(es[0].Error(), "257") { - t.Fatal("Expected the length in the validation error for value") - } -} - -func TestExpandARMTags(t *testing.T) { - testData := make(map[string]interface{}) - testData["key1"] = "value1" - testData["key2"] = 21 - testData["key3"] = "value3" - - tempExpanded := expandTags(testData) - expanded := *tempExpanded - - if len(expanded) != 3 { - t.Fatalf("Expected 3 results in expanded tag map, got %d", len(expanded)) - } - - for k, v := range testData { - var strVal string - switch v.(type) { - case string: - strVal = v.(string) - case int: - strVal = fmt.Sprintf("%d", v.(int)) - } - - if *expanded[k] != strVal { - t.Fatalf("Expanded value %q incorrect: expected %q, got %q", k, strVal, expanded[k]) - } - } -} diff --git a/builtin/providers/azurerm/validators.go b/builtin/providers/azurerm/validators.go deleted file mode 100644 index 2151f09e0..000000000 --- a/builtin/providers/azurerm/validators.go +++ /dev/null @@ -1,14 +0,0 @@ -package azurerm - -import ( - "fmt" - - "github.com/satori/uuid" -) - -func validateUUID(v interface{}, k string) (ws []string, errors []error) { - if _, err := uuid.FromString(v.(string)); err != nil { - errors = append(errors, fmt.Errorf("%q is an invalid UUUID: %s", k, err)) - } - return -} diff --git a/builtin/providers/bitbucket/client.go b/builtin/providers/bitbucket/client.go deleted file mode 100644 index bd2cebcce..000000000 --- a/builtin/providers/bitbucket/client.go +++ /dev/null @@ -1,108 +0,0 @@ -package bitbucket - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" -) - -// Error represents a error from the bitbucket api. -type Error struct { - APIError struct { - Message string `json:"message,omitempty"` - } `json:"error,omitempty"` - Type string `json:"type,omitempty"` - StatusCode int - Endpoint string -} - -func (e Error) Error() string { - return fmt.Sprintf("API Error: %d %s %s", e.StatusCode, e.Endpoint, e.APIError.Message) -} - -const ( - // BitbucketEndpoint is the fqdn used to talk to bitbucket - BitbucketEndpoint string = "https://api.bitbucket.org/" -) - -type BitbucketClient struct { - Username string - Password string - HTTPClient *http.Client -} - -func (c *BitbucketClient) Do(method, endpoint string, payload *bytes.Buffer) (*http.Response, error) { - - absoluteendpoint := BitbucketEndpoint + endpoint - log.Printf("[DEBUG] Sending request to %s %s", method, absoluteendpoint) - - var bodyreader io.Reader - - if payload != nil { - log.Printf("[DEBUG] With payload %s", payload.String()) - bodyreader = payload - } - - req, err := http.NewRequest(method, absoluteendpoint, bodyreader) - if err != nil { - return nil, err - } - - req.SetBasicAuth(c.Username, c.Password) - - if payload != nil { - // Can cause bad request when putting default reviews if set. - req.Header.Add("Content-Type", "application/json") - } - - req.Close = true - - resp, err := c.HTTPClient.Do(req) - log.Printf("[DEBUG] Resp: %v Err: %v", resp, err) - if resp.StatusCode >= 400 || resp.StatusCode < 200 { - apiError := Error{ - StatusCode: resp.StatusCode, - Endpoint: endpoint, - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - log.Printf("[DEBUG] Resp Body: %s", string(body)) - - err = json.Unmarshal(body, &apiError) - if err != nil { - apiError.APIError.Message = string(body) - } - - return resp, error(apiError) - - } - return resp, err -} - -func (c *BitbucketClient) Get(endpoint string) (*http.Response, error) { - return c.Do("GET", endpoint, nil) -} - -func (c *BitbucketClient) Post(endpoint string, jsonpayload *bytes.Buffer) (*http.Response, error) { - return c.Do("POST", endpoint, jsonpayload) -} - -func (c *BitbucketClient) Put(endpoint string, jsonpayload *bytes.Buffer) (*http.Response, error) { - return c.Do("PUT", endpoint, jsonpayload) -} - -func (c *BitbucketClient) PutOnly(endpoint string) (*http.Response, error) { - return c.Do("PUT", endpoint, nil) -} - -func (c *BitbucketClient) Delete(endpoint string) (*http.Response, error) { - return c.Do("DELETE", endpoint, nil) -} diff --git a/builtin/providers/bitbucket/provider.go b/builtin/providers/bitbucket/provider.go deleted file mode 100644 index e50f9295f..000000000 --- a/builtin/providers/bitbucket/provider.go +++ /dev/null @@ -1,41 +0,0 @@ -package bitbucket - -import ( - "net/http" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "username": { - Required: true, - Type: schema.TypeString, - DefaultFunc: schema.EnvDefaultFunc("BITBUCKET_USERNAME", nil), - }, - "password": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("BITBUCKET_PASSWORD", nil), - }, - }, - ConfigureFunc: providerConfigure, - ResourcesMap: map[string]*schema.Resource{ - "bitbucket_hook": resourceHook(), - "bitbucket_default_reviewers": resourceDefaultReviewers(), - "bitbucket_repository": resourceRepository(), - }, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - client := &BitbucketClient{ - Username: d.Get("username").(string), - Password: d.Get("password").(string), - HTTPClient: &http.Client{}, - } - - return client, nil -} diff --git a/builtin/providers/bitbucket/provider_test.go b/builtin/providers/bitbucket/provider_test.go deleted file mode 100644 index 647b3d8f7..000000000 --- a/builtin/providers/bitbucket/provider_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package bitbucket - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - "os" - "testing" -) - -const testRepo string = "test-repo" - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "bitbucket": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("BITBUCKET_USERNAME"); v == "" { - t.Fatal("BITBUCKET_USERNAME must be set for acceptence tests") - } - if v := os.Getenv("BITBUCKET_PASSWORD"); v == "" { - t.Fatal("BITBUCKET_PASSWORD must be set for acceptence tests") - } -} diff --git a/builtin/providers/bitbucket/resource_default_reviewers.go b/builtin/providers/bitbucket/resource_default_reviewers.go deleted file mode 100644 index 9fc5d1e0a..000000000 --- a/builtin/providers/bitbucket/resource_default_reviewers.go +++ /dev/null @@ -1,122 +0,0 @@ -package bitbucket - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/terraform/helper/schema" -) - -type Reviewer struct { - DisplayName string `json:"display_name,omitempty"` - UUID string `json:"uuid,omitempty"` - Username string `json:"username,omitempty"` - Type string `json:"type,omitempty"` -} - -type PaginatedReviewers struct { - Values []Reviewer `json:"values,omitempty"` -} - -func resourceDefaultReviewers() *schema.Resource { - return &schema.Resource{ - Create: resourceDefaultReviewersCreate, - Read: resourceDefaultReviewersRead, - Delete: resourceDefaultReviewersDelete, - - Schema: map[string]*schema.Schema{ - "owner": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "repository": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "reviewers": { - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Required: true, - Set: schema.HashString, - ForceNew: true, - }, - }, - } -} - -func resourceDefaultReviewersCreate(d *schema.ResourceData, m interface{}) error { - client := m.(*BitbucketClient) - - for _, user := range d.Get("reviewers").(*schema.Set).List() { - reviewerResp, err := client.PutOnly(fmt.Sprintf("2.0/repositories/%s/%s/default-reviewers/%s", - d.Get("owner").(string), - d.Get("repository").(string), - user, - )) - - if err != nil { - return err - } - - if reviewerResp.StatusCode != 200 { - return fmt.Errorf("Failed to create reviewer %s got code %d", user.(string), reviewerResp.StatusCode) - } - - defer reviewerResp.Body.Close() - } - - d.SetId(fmt.Sprintf("%s/%s/reviewers", d.Get("owner").(string), d.Get("repository").(string))) - return resourceDefaultReviewersRead(d, m) -} -func resourceDefaultReviewersRead(d *schema.ResourceData, m interface{}) error { - client := m.(*BitbucketClient) - - reviewersResponse, err := client.Get(fmt.Sprintf("2.0/repositories/%s/%s/default-reviewers", - d.Get("owner").(string), - d.Get("repository").(string), - )) - - var reviewers PaginatedReviewers - - decoder := json.NewDecoder(reviewersResponse.Body) - err = decoder.Decode(&reviewers) - if err != nil { - return err - } - - terraformReviewers := make([]string, 0, len(reviewers.Values)) - - for _, reviewer := range reviewers.Values { - terraformReviewers = append(terraformReviewers, reviewer.Username) - } - - d.Set("reviewers", terraformReviewers) - - return nil -} -func resourceDefaultReviewersDelete(d *schema.ResourceData, m interface{}) error { - client := m.(*BitbucketClient) - - for _, user := range d.Get("reviewers").(*schema.Set).List() { - resp, err := client.Delete(fmt.Sprintf("2.0/repositories/%s/%s/default-reviewers/%s", - d.Get("owner").(string), - d.Get("repository").(string), - user.(string), - )) - - if err != nil { - return err - } - - if resp.StatusCode != 204 { - return fmt.Errorf("[%d] Could not delete %s from default reviewer", - resp.StatusCode, - user.(string), - ) - } - defer resp.Body.Close() - } - return nil -} diff --git a/builtin/providers/bitbucket/resource_default_reviewers_test.go b/builtin/providers/bitbucket/resource_default_reviewers_test.go deleted file mode 100644 index 253911127..000000000 --- a/builtin/providers/bitbucket/resource_default_reviewers_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package bitbucket - -import ( - "fmt" - "os" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccBitbucketDefaultReviewers_basic(t *testing.T) { - - testUser := os.Getenv("BITBUCKET_USERNAME") - testAccBitbucketDefaultReviewersConfig := fmt.Sprintf(` - resource "bitbucket_repository" "test_repo" { - owner = "%s" - name = "test-repo-default-reviewers" - } - - resource "bitbucket_default_reviewers" "test_reviewers" { - owner = "%s" - repository = "${bitbucket_repository.test_repo.name}" - reviewers = [ - "%s", - ] - } - `, testUser, testUser, testUser) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBitbucketDefaultReviewersDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccBitbucketDefaultReviewersConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckBitbucketDefaultReviewersExists("bitbucket_default_reviewers.test_reviewers"), - ), - }, - }, - }) -} - -func testAccCheckBitbucketDefaultReviewersDestroy(s *terraform.State) error { - _, ok := s.RootModule().Resources["bitbucket_default_reviewers.test_reviewers"] - if !ok { - return fmt.Errorf("Not found %s", "bitbucket_default_reviewers.test_reviewers") - } - return nil -} - -func testAccCheckBitbucketDefaultReviewersExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No default reviewers ID is set") - } - - return nil - } -} diff --git a/builtin/providers/bitbucket/resource_hook.go b/builtin/providers/bitbucket/resource_hook.go deleted file mode 100644 index 745292ad1..000000000 --- a/builtin/providers/bitbucket/resource_hook.go +++ /dev/null @@ -1,212 +0,0 @@ -package bitbucket - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "log" - "net/url" - - "github.com/hashicorp/terraform/helper/schema" -) - -type Hook struct { - Uuid string `json:"uuid,omitempty"` - Url string `json:"url,omitempty"` - Description string `json:"description,omitempty"` - Active bool `json:"active,omitempty"` - Events []string `json:"events,omitempty"` -} - -func resourceHook() *schema.Resource { - return &schema.Resource{ - Create: resourceHookCreate, - Read: resourceHookRead, - Update: resourceHookUpdate, - Delete: resourceHookDelete, - Exists: resourceHookExists, - - Schema: map[string]*schema.Schema{ - "owner": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "repository": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "active": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "url": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "uuid": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "events": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - } -} - -func createHook(d *schema.ResourceData) *Hook { - - events := make([]string, 0, len(d.Get("events").(*schema.Set).List())) - - for _, item := range d.Get("events").(*schema.Set).List() { - events = append(events, item.(string)) - } - - return &Hook{ - Url: d.Get("url").(string), - Description: d.Get("description").(string), - Active: d.Get("active").(bool), - Events: events, - } -} - -func resourceHookCreate(d *schema.ResourceData, m interface{}) error { - client := m.(*BitbucketClient) - hook := createHook(d) - - payload, err := json.Marshal(hook) - if err != nil { - return err - } - - hook_req, err := client.Post(fmt.Sprintf("2.0/repositories/%s/%s/hooks", - d.Get("owner").(string), - d.Get("repository").(string), - ), bytes.NewBuffer(payload)) - - if err != nil { - return err - } - - body, readerr := ioutil.ReadAll(hook_req.Body) - if readerr != nil { - return readerr - } - - decodeerr := json.Unmarshal(body, &hook) - if decodeerr != nil { - return decodeerr - } - - d.SetId(hook.Uuid) - - return resourceHookRead(d, m) -} -func resourceHookRead(d *schema.ResourceData, m interface{}) error { - client := m.(*BitbucketClient) - - hook_req, _ := client.Get(fmt.Sprintf("2.0/repositories/%s/%s/hooks/%s", - d.Get("owner").(string), - d.Get("repository").(string), - url.PathEscape(d.Id()), - )) - - log.Printf("ID: %s", url.PathEscape(d.Id())) - - if hook_req.StatusCode == 200 { - var hook Hook - - body, readerr := ioutil.ReadAll(hook_req.Body) - if readerr != nil { - return readerr - } - - decodeerr := json.Unmarshal(body, &hook) - if decodeerr != nil { - return decodeerr - } - - d.Set("uuid", hook.Uuid) - d.Set("description", hook.Description) - d.Set("active", hook.Active) - d.Set("url", hook.Url) - - eventsList := make([]string, 0, len(hook.Events)) - - for _, event := range hook.Events { - eventsList = append(eventsList, event) - } - - d.Set("events", eventsList) - } - - return nil -} - -func resourceHookUpdate(d *schema.ResourceData, m interface{}) error { - client := m.(*BitbucketClient) - hook := createHook(d) - payload, err := json.Marshal(hook) - if err != nil { - return err - } - - _, err = client.Put(fmt.Sprintf("2.0/repositories/%s/%s/hooks/%s", - d.Get("owner").(string), - d.Get("repository").(string), - url.PathEscape(d.Id()), - ), bytes.NewBuffer(payload)) - - if err != nil { - return err - } - - return resourceHookRead(d, m) -} - -func resourceHookExists(d *schema.ResourceData, m interface{}) (bool, error) { - client := m.(*BitbucketClient) - if _, okay := d.GetOk("uuid"); okay { - hook_req, err := client.Get(fmt.Sprintf("2.0/repositories/%s/%s/hooks/%s", - d.Get("owner").(string), - d.Get("repository").(string), - url.PathEscape(d.Id()), - )) - - if err != nil { - panic(err) - } - - if hook_req.StatusCode != 200 { - return false, err - } - - return true, nil - } - - return false, nil - -} - -func resourceHookDelete(d *schema.ResourceData, m interface{}) error { - client := m.(*BitbucketClient) - _, err := client.Delete(fmt.Sprintf("2.0/repositories/%s/%s/hooks/%s", - d.Get("owner").(string), - d.Get("repository").(string), - url.PathEscape(d.Id()), - )) - - return err - -} diff --git a/builtin/providers/bitbucket/resource_hook_test.go b/builtin/providers/bitbucket/resource_hook_test.go deleted file mode 100644 index 59a719b87..000000000 --- a/builtin/providers/bitbucket/resource_hook_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package bitbucket - -import ( - "fmt" - "net/url" - "os" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccBitbucketHook_basic(t *testing.T) { - var hook Hook - - testUser := os.Getenv("BITBUCKET_USERNAME") - testAccBitbucketHookConfig := fmt.Sprintf(` - resource "bitbucket_repository" "test_repo" { - owner = "%s" - name = "test-repo-for-webhook-test" - } - resource "bitbucket_hook" "test_repo_hook" { - owner = "%s" - repository = "${bitbucket_repository.test_repo.name}" - description = "Test hook for terraform" - url = "https://httpbin.org" - events = [ - "repo:push", - ] - } - `, testUser, testUser) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBitbucketHookDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccBitbucketHookConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckBitbucketHookExists("bitbucket_hook.test_repo_hook", &hook), - ), - }, - }, - }) -} - -func testAccCheckBitbucketHookDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*BitbucketClient) - rs, ok := s.RootModule().Resources["bitbucket_hook.test_repo_hook"] - if !ok { - return fmt.Errorf("Not found %s", "bitbucket_hook.test_repo_hook") - } - - response, err := client.Get(fmt.Sprintf("2.0/repositories/%s/%s/hooks/%s", rs.Primary.Attributes["owner"], rs.Primary.Attributes["repository"], url.PathEscape(rs.Primary.Attributes["uuid"]))) - - if err == nil { - return fmt.Errorf("The resource was found should have errored") - } - - if response.StatusCode != 404 { - return fmt.Errorf("Hook still exists") - } - - return nil -} - -func testAccCheckBitbucketHookExists(n string, hook *Hook) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No Hook ID is set") - } - return nil - } -} diff --git a/builtin/providers/bitbucket/resource_repository.go b/builtin/providers/bitbucket/resource_repository.go deleted file mode 100644 index f57db3ea8..000000000 --- a/builtin/providers/bitbucket/resource_repository.go +++ /dev/null @@ -1,223 +0,0 @@ -package bitbucket - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - - "github.com/hashicorp/terraform/helper/schema" -) - -type CloneUrl struct { - Href string `json:"href,omitempty"` - Name string `json:"name,omitempty"` -} - -type Repository struct { - SCM string `json:"scm,omitempty"` - HasWiki bool `json:"has_wiki,omitempty"` - HasIssues bool `json:"has_issues,omitempty"` - Website string `json:"website,omitempty"` - IsPrivate bool `json:"is_private,omitempty"` - ForkPolicy string `json:"fork_policy,omitempty"` - Language string `json:"language,omitempty"` - Description string `json:"description,omitempty"` - Name string `json:"name,omitempty"` - UUID string `json:"uuid,omitempty"` - Project struct { - Key string `json:"key,omitempty"` - } `json:"project,omitempty"` - Links struct { - Clone []CloneUrl `json:"clone,omitempty"` - } `json:"links,omitempty"` -} - -func resourceRepository() *schema.Resource { - return &schema.Resource{ - Create: resourceRepositoryCreate, - Update: resourceRepositoryUpdate, - Read: resourceRepositoryRead, - Delete: resourceRepositoryDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "scm": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "git", - }, - "has_wiki": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "has_issues": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "website": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "clone_ssh": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "clone_https": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "project_key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "is_private": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "fork_policy": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "allow_forks", - }, - "language": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "owner": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - } -} - -func newRepositoryFromResource(d *schema.ResourceData) *Repository { - repo := &Repository{ - Name: d.Get("name").(string), - Language: d.Get("language").(string), - IsPrivate: d.Get("is_private").(bool), - Description: d.Get("description").(string), - ForkPolicy: d.Get("fork_policy").(string), - HasWiki: d.Get("has_wiki").(bool), - HasIssues: d.Get("has_issues").(bool), - SCM: d.Get("scm").(string), - Website: d.Get("website").(string), - } - - repo.Project.Key = d.Get("project_key").(string) - return repo -} - -func resourceRepositoryUpdate(d *schema.ResourceData, m interface{}) error { - client := m.(*BitbucketClient) - repository := newRepositoryFromResource(d) - - var jsonbuffer []byte - - jsonpayload := bytes.NewBuffer(jsonbuffer) - enc := json.NewEncoder(jsonpayload) - enc.Encode(repository) - - _, err := client.Put(fmt.Sprintf("2.0/repositories/%s/%s", - d.Get("owner").(string), - d.Get("name").(string), - ), jsonpayload) - - if err != nil { - return err - } - - return resourceRepositoryRead(d, m) -} - -func resourceRepositoryCreate(d *schema.ResourceData, m interface{}) error { - client := m.(*BitbucketClient) - repo := newRepositoryFromResource(d) - - bytedata, err := json.Marshal(repo) - - if err != nil { - return err - } - - _, err = client.Post(fmt.Sprintf("2.0/repositories/%s/%s", - d.Get("owner").(string), - d.Get("name").(string), - ), bytes.NewBuffer(bytedata)) - - if err != nil { - return err - } - - d.SetId(string(fmt.Sprintf("%s/%s", d.Get("owner").(string), d.Get("name").(string)))) - - return resourceRepositoryRead(d, m) -} -func resourceRepositoryRead(d *schema.ResourceData, m interface{}) error { - - client := m.(*BitbucketClient) - repo_req, _ := client.Get(fmt.Sprintf("2.0/repositories/%s/%s", - d.Get("owner").(string), - d.Get("name").(string), - )) - - if repo_req.StatusCode == 200 { - - var repo Repository - - body, readerr := ioutil.ReadAll(repo_req.Body) - if readerr != nil { - return readerr - } - - decodeerr := json.Unmarshal(body, &repo) - if decodeerr != nil { - return decodeerr - } - - d.Set("scm", repo.SCM) - d.Set("is_private", repo.IsPrivate) - d.Set("has_wiki", repo.HasWiki) - d.Set("has_issues", repo.HasIssues) - d.Set("name", repo.Name) - d.Set("language", repo.Language) - d.Set("fork_policy", repo.ForkPolicy) - d.Set("website", repo.Website) - d.Set("description", repo.Description) - d.Set("project_key", repo.Project.Key) - - for _, clone_url := range repo.Links.Clone { - if clone_url.Name == "https" { - d.Set("clone_https", clone_url.Href) - } else { - d.Set("clone_ssh", clone_url.Href) - } - } - } - - return nil -} - -func resourceRepositoryDelete(d *schema.ResourceData, m interface{}) error { - client := m.(*BitbucketClient) - _, err := client.Delete(fmt.Sprintf("2.0/repositories/%s/%s", - d.Get("owner").(string), - d.Get("name").(string), - )) - - return err -} diff --git a/builtin/providers/bitbucket/resource_repository_test.go b/builtin/providers/bitbucket/resource_repository_test.go deleted file mode 100644 index 1fa47a71f..000000000 --- a/builtin/providers/bitbucket/resource_repository_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package bitbucket - -import ( - "fmt" - "os" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccBitbucketRepository_basic(t *testing.T) { - var repo Repository - - testUser := os.Getenv("BITBUCKET_USERNAME") - testAccBitbucketRepositoryConfig := fmt.Sprintf(` - resource "bitbucket_repository" "test_repo" { - owner = "%s" - name = "test-repo-for-repository-test" - } - `, testUser) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBitbucketRepositoryDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccBitbucketRepositoryConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckBitbucketRepositoryExists("bitbucket_repository.test_repo", &repo), - ), - }, - }, - }) -} - -func testAccCheckBitbucketRepositoryDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*BitbucketClient) - rs, ok := s.RootModule().Resources["bitbucket_repository.test_repo"] - if !ok { - return fmt.Errorf("Not found %s", "bitbucket_repository.test_repo") - } - - response, _ := client.Get(fmt.Sprintf("2.0/repositories/%s/%s", rs.Primary.Attributes["owner"], rs.Primary.Attributes["name"])) - - if response.StatusCode != 404 { - return fmt.Errorf("Repository still exists") - } - - return nil -} - -func testAccCheckBitbucketRepositoryExists(n string, repository *Repository) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No repository ID is set") - } - return nil - } -} diff --git a/builtin/providers/chef/provider.go b/builtin/providers/chef/provider.go deleted file mode 100644 index c1b2d8f46..000000000 --- a/builtin/providers/chef/provider.go +++ /dev/null @@ -1,125 +0,0 @@ -package chef - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "strings" - "time" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - - chefc "github.com/go-chef/chef" -) - -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "server_url": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("CHEF_SERVER_URL", nil), - Description: "URL of the root of the target Chef server or organization.", - }, - "client_name": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("CHEF_CLIENT_NAME", nil), - Description: "Name of a registered client within the Chef server.", - }, - "private_key_pem": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: providerPrivateKeyEnvDefault, - Deprecated: "Please use key_material instead", - Description: "PEM-formatted private key for client authentication.", - }, - "key_material": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("CHEF_KEY_MATERIAL", ""), - }, - "allow_unverified_ssl": { - Type: schema.TypeBool, - Optional: true, - Description: "If set, the Chef client will permit unverifiable SSL certificates.", - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - //"chef_acl": resourceChefAcl(), - //"chef_client": resourceChefClient(), - //"chef_cookbook": resourceChefCookbook(), - "chef_data_bag": resourceChefDataBag(), - "chef_data_bag_item": resourceChefDataBagItem(), - "chef_environment": resourceChefEnvironment(), - "chef_node": resourceChefNode(), - "chef_role": resourceChefRole(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - config := &chefc.Config{ - Name: d.Get("client_name").(string), - BaseURL: d.Get("server_url").(string), - SkipSSL: d.Get("allow_unverified_ssl").(bool), - Timeout: 10 * time.Second, - } - - if v, ok := d.GetOk("private_key_pem"); ok { - config.Key = v.(string) - } - - if v, ok := d.GetOk("key_material"); ok { - config.Key = v.(string) - } - - return chefc.NewClient(config) -} - -func providerPrivateKeyEnvDefault() (interface{}, error) { - if fn := os.Getenv("CHEF_PRIVATE_KEY_FILE"); fn != "" { - contents, err := ioutil.ReadFile(fn) - if err != nil { - return nil, err - } - return string(contents), nil - } - - return nil, nil -} - -func jsonStateFunc(value interface{}) string { - // Parse and re-stringify the JSON to make sure it's always kept - // in a normalized form. - in, ok := value.(string) - if !ok { - return "null" - } - var tmp map[string]interface{} - - // Assuming the value must be valid JSON since it passed okay through - // our prepareDataBagItemContent function earlier. - json.Unmarshal([]byte(in), &tmp) - - jsonValue, _ := json.Marshal(&tmp) - return string(jsonValue) -} - -func runListEntryStateFunc(value interface{}) string { - // Recipes in run lists can either be naked, like "foo", or can - // be explicitly qualified as "recipe[foo]". Whichever form we use, - // the server will always normalize to the explicit form, - // so we'll normalize too and then we won't generate unnecessary - // diffs when we refresh. - in := value.(string) - if !strings.Contains(in, "[") { - return fmt.Sprintf("recipe[%s]", in) - } - return in -} diff --git a/builtin/providers/chef/provider_test.go b/builtin/providers/chef/provider_test.go deleted file mode 100644 index 53188e314..000000000 --- a/builtin/providers/chef/provider_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package chef - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// To run these acceptance tests, you will need access to a Chef server. -// An easy way to get one is to sign up for a hosted Chef server account -// at https://manage.chef.io/signup , after which your base URL will -// be something like https://api.opscode.com/organizations/example/ . -// You will also need to create a "client" and write its private key to -// a file somewhere. -// -// You can then set the following environment variables to make these -// tests work: -// CHEF_SERVER_URL to the base URL as described above. -// CHEF_CLIENT_NAME to the name of the client object you created. -// CHEF_KEY_MATERIAL the key file contents. -// -// You will probably need to edit the global permissions on your Chef -// Server account to allow this client (or all clients, if you're lazy) -// to have both List and Create access on all types of object: -// https://manage.chef.io/organizations/yourorg/global_permissions -// -// With all of that done, you can run like this: -// make testacc TEST=./builtin/providers/chef - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "chef": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("CHEF_SERVER_URL"); v == "" { - t.Fatal("CHEF_SERVER_URL must be set for acceptance tests") - } - if v := os.Getenv("CHEF_CLIENT_NAME"); v == "" { - t.Fatal("CHEF_CLIENT_NAME must be set for acceptance tests") - } - if v := os.Getenv("CHEF_KEY_MATERIAL"); v == "" { - t.Fatal("CHEF_KEY_MATERIAL must be set for acceptance tests") - } -} diff --git a/builtin/providers/chef/resource_data_bag.go b/builtin/providers/chef/resource_data_bag.go deleted file mode 100644 index a9c08748c..000000000 --- a/builtin/providers/chef/resource_data_bag.go +++ /dev/null @@ -1,77 +0,0 @@ -package chef - -import ( - "github.com/hashicorp/terraform/helper/schema" - - chefc "github.com/go-chef/chef" -) - -func resourceChefDataBag() *schema.Resource { - return &schema.Resource{ - Create: CreateDataBag, - Read: ReadDataBag, - Delete: DeleteDataBag, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "api_uri": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func CreateDataBag(d *schema.ResourceData, meta interface{}) error { - client := meta.(*chefc.Client) - - dataBag := &chefc.DataBag{ - Name: d.Get("name").(string), - } - - result, err := client.DataBags.Create(dataBag) - if err != nil { - return err - } - - d.SetId(dataBag.Name) - d.Set("api_uri", result.URI) - return nil -} - -func ReadDataBag(d *schema.ResourceData, meta interface{}) error { - client := meta.(*chefc.Client) - - // The Chef API provides no API to read a data bag's metadata, - // but we can try to read its items and use that as a proxy for - // whether it still exists. - - name := d.Id() - - _, err := client.DataBags.ListItems(name) - if err != nil { - if errRes, ok := err.(*chefc.ErrorResponse); ok { - if errRes.Response.StatusCode == 404 { - d.SetId("") - return nil - } - } - } - return err -} - -func DeleteDataBag(d *schema.ResourceData, meta interface{}) error { - client := meta.(*chefc.Client) - - name := d.Id() - - _, err := client.DataBags.Delete(name) - if err == nil { - d.SetId("") - } - return err -} diff --git a/builtin/providers/chef/resource_data_bag_item.go b/builtin/providers/chef/resource_data_bag_item.go deleted file mode 100644 index ff6f7ac67..000000000 --- a/builtin/providers/chef/resource_data_bag_item.go +++ /dev/null @@ -1,120 +0,0 @@ -package chef - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/terraform/helper/schema" - - chefc "github.com/go-chef/chef" -) - -func resourceChefDataBagItem() *schema.Resource { - return &schema.Resource{ - Create: CreateDataBagItem, - Read: ReadDataBagItem, - Delete: DeleteDataBagItem, - - Schema: map[string]*schema.Schema{ - "data_bag_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "content_json": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - StateFunc: jsonStateFunc, - }, - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func CreateDataBagItem(d *schema.ResourceData, meta interface{}) error { - client := meta.(*chefc.Client) - - dataBagName := d.Get("data_bag_name").(string) - itemId, itemContent, err := prepareDataBagItemContent(d.Get("content_json").(string)) - if err != nil { - return err - } - - err = client.DataBags.CreateItem(dataBagName, itemContent) - if err != nil { - return err - } - - d.SetId(itemId) - d.Set("id", itemId) - return nil -} - -func ReadDataBagItem(d *schema.ResourceData, meta interface{}) error { - client := meta.(*chefc.Client) - - // The Chef API provides no API to read a data bag's metadata, - // but we can try to read its items and use that as a proxy for - // whether it still exists. - - itemId := d.Id() - dataBagName := d.Get("data_bag_name").(string) - - value, err := client.DataBags.GetItem(dataBagName, itemId) - if err != nil { - if errRes, ok := err.(*chefc.ErrorResponse); ok { - if errRes.Response.StatusCode == 404 { - d.SetId("") - return nil - } - } else { - return err - } - } - - jsonContent, err := json.Marshal(value) - if err != nil { - return err - } - - d.Set("content_json", string(jsonContent)) - - return nil -} - -func DeleteDataBagItem(d *schema.ResourceData, meta interface{}) error { - client := meta.(*chefc.Client) - - itemId := d.Id() - dataBagName := d.Get("data_bag_name").(string) - - err := client.DataBags.DeleteItem(dataBagName, itemId) - if err == nil { - d.SetId("") - d.Set("id", "") - } - return err -} - -func prepareDataBagItemContent(contentJson string) (string, interface{}, error) { - var value map[string]interface{} - err := json.Unmarshal([]byte(contentJson), &value) - if err != nil { - return "", nil, err - } - - var itemId string - if itemIdI, ok := value["id"]; ok { - itemId, _ = itemIdI.(string) - } - - if itemId == "" { - return "", nil, fmt.Errorf("content_json must have id attribute, set to a string") - } - - return itemId, value, nil -} diff --git a/builtin/providers/chef/resource_data_bag_item_test.go b/builtin/providers/chef/resource_data_bag_item_test.go deleted file mode 100644 index 9630d8b6c..000000000 --- a/builtin/providers/chef/resource_data_bag_item_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package chef - -import ( - "fmt" - "reflect" - "testing" - - chefc "github.com/go-chef/chef" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataBagItem_basic(t *testing.T) { - var dataBagItemName string - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccDataBagItemCheckDestroy(dataBagItemName), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataBagItemConfig_basic, - Check: testAccDataBagItemCheck( - "chef_data_bag_item.test", &dataBagItemName, - ), - }, - }, - }) -} - -func testAccDataBagItemCheck(rn string, name *string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[rn] - if !ok { - return fmt.Errorf("resource not found: %s", rn) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("data bag item id not set") - } - - client := testAccProvider.Meta().(*chefc.Client) - content, err := client.DataBags.GetItem("terraform-acc-test-bag-item-basic", rs.Primary.ID) - if err != nil { - return fmt.Errorf("error getting data bag item: %s", err) - } - - expectedContent := map[string]interface{}{ - "id": "terraform_acc_test", - "something_else": true, - } - if !reflect.DeepEqual(content, expectedContent) { - return fmt.Errorf("wrong content: expected %#v, got %#v", expectedContent, content) - } - - if expected := "terraform_acc_test"; rs.Primary.Attributes["id"] != expected { - return fmt.Errorf("wrong id; expected %#v, got %#v", expected, rs.Primary.Attributes["id"]) - } - - *name = rs.Primary.ID - - return nil - } -} - -func testAccDataBagItemCheckDestroy(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*chefc.Client) - _, err := client.DataBags.GetItem("terraform-acc-test-bag-item-basic", name) - if err == nil { - return fmt.Errorf("data bag item still exists") - } - if _, ok := err.(*chefc.ErrorResponse); err != nil && !ok { - return fmt.Errorf("got something other than an HTTP error (%v) when getting data bag item", err) - } - - return nil - } -} - -const testAccDataBagItemConfig_basic = ` -resource "chef_data_bag" "test" { - name = "terraform-acc-test-bag-item-basic" -} -resource "chef_data_bag_item" "test" { - data_bag_name = "terraform-acc-test-bag-item-basic" - depends_on = ["chef_data_bag.test"] - content_json = <&1 make -C ../../.. testacc TEST=./builtin/providers/circonus | tee test.log diff --git a/builtin/providers/circonus/check.go b/builtin/providers/circonus/check.go deleted file mode 100644 index 8058b315e..000000000 --- a/builtin/providers/circonus/check.go +++ /dev/null @@ -1,133 +0,0 @@ -package circonus - -import ( - "fmt" - "log" - - "github.com/circonus-labs/circonus-gometrics/api" - "github.com/circonus-labs/circonus-gometrics/api/config" - "github.com/hashicorp/errwrap" -) - -// The circonusCheck type is the backing store of the `circonus_check` resource. - -type circonusCheck struct { - api.CheckBundle -} - -type circonusCheckType string - -const ( - // CheckBundle.Status can be one of these values - checkStatusActive = "active" - checkStatusDisabled = "disabled" -) - -const ( - apiCheckTypeCAQL circonusCheckType = "caql" - apiCheckTypeConsul circonusCheckType = "consul" - apiCheckTypeICMPPing circonusCheckType = "ping_icmp" - apiCheckTypeHTTP circonusCheckType = "http" - apiCheckTypeJSON circonusCheckType = "json" - apiCheckTypeMySQL circonusCheckType = "mysql" - apiCheckTypeStatsd circonusCheckType = "statsd" - apiCheckTypePostgreSQL circonusCheckType = "postgres" - apiCheckTypeTCP circonusCheckType = "tcp" -) - -func newCheck() circonusCheck { - return circonusCheck{ - CheckBundle: *api.NewCheckBundle(), - } -} - -func loadCheck(ctxt *providerContext, cid api.CIDType) (circonusCheck, error) { - var c circonusCheck - cb, err := ctxt.client.FetchCheckBundle(cid) - if err != nil { - return circonusCheck{}, err - } - c.CheckBundle = *cb - - return c, nil -} - -func checkAPIStatusToBool(s string) bool { - var active bool - switch s { - case checkStatusActive: - active = true - case checkStatusDisabled: - active = false - default: - log.Printf("[ERROR] PROVIDER BUG: check status %q unsupported", s) - } - - return active -} - -func checkActiveToAPIStatus(active bool) string { - if active { - return checkStatusActive - } - - return checkStatusDisabled -} - -func (c *circonusCheck) Create(ctxt *providerContext) error { - cb, err := ctxt.client.CreateCheckBundle(&c.CheckBundle) - if err != nil { - return err - } - - c.CID = cb.CID - - return nil -} - -func (c *circonusCheck) Update(ctxt *providerContext) error { - _, err := ctxt.client.UpdateCheckBundle(&c.CheckBundle) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to update check bundle %s: {{err}}", c.CID), err) - } - - return nil -} - -func (c *circonusCheck) Fixup() error { - switch apiCheckType(c.Type) { - case apiCheckTypeCloudWatchAttr: - switch c.Period { - case 60: - c.Config[config.Granularity] = "1" - case 300: - c.Config[config.Granularity] = "5" - } - } - - return nil -} - -func (c *circonusCheck) Validate() error { - if len(c.Metrics) == 0 { - return fmt.Errorf("At least one %s must be specified", checkMetricAttr) - } - - if c.Timeout > float32(c.Period) { - return fmt.Errorf("Timeout (%f) can not exceed period (%d)", c.Timeout, c.Period) - } - - // Check-type specific validation - switch apiCheckType(c.Type) { - case apiCheckTypeCloudWatchAttr: - if !(c.Period == 60 || c.Period == 300) { - return fmt.Errorf("Period must be either 1m or 5m for a %s check", apiCheckTypeCloudWatchAttr) - } - case apiCheckTypeConsulAttr: - if v, found := c.Config[config.URL]; !found || v == "" { - return fmt.Errorf("%s must have at least one check mode set: %s, %s, or %s must be set", checkConsulAttr, checkConsulServiceAttr, checkConsulNodeAttr, checkConsulStateAttr) - } - } - - return nil -} diff --git a/builtin/providers/circonus/consts.go b/builtin/providers/circonus/consts.go deleted file mode 100644 index 9dd0d248f..000000000 --- a/builtin/providers/circonus/consts.go +++ /dev/null @@ -1,140 +0,0 @@ -package circonus - -const ( - // Provider-level constants - - // defaultAutoTag determines the default behavior of circonus.auto_tag. - defaultAutoTag = false - - // When auto_tag is enabled, the default tag category and value will be set to - // the following value unless overriden. - defaultCirconusTag circonusTag = "author:terraform" - - // When hashing a Set, default to a buffer this size - defaultHashBufSize = 512 - - providerAPIURLAttr = "api_url" - providerAutoTagAttr = "auto_tag" - providerKeyAttr = "key" - - apiConsulCheckBlacklist = "check_name_blacklist" - apiConsulDatacenterAttr = "dc" - apiConsulNodeBlacklist = "node_blacklist" - apiConsulServiceBlacklist = "service_blacklist" - apiConsulStaleAttr = "stale" - checkConsulTokenHeader = `X-Consul-Token` - checkConsulV1NodePrefix = "node" - checkConsulV1Prefix = "/v1/health" - checkConsulV1ServicePrefix = "service" - checkConsulV1StatePrefix = "state" - defaultCheckConsulHTTPAddr = "http://consul.service.consul" - defaultCheckConsulPort = "8500" - - defaultCheckJSONMethod = "GET" - defaultCheckJSONPort = "443" - defaultCheckJSONVersion = "1.1" - - defaultCheckICMPPingAvailability = 100.0 - defaultCheckICMPPingCount = 5 - defaultCheckICMPPingInterval = "2s" - - defaultCheckCAQLTarget = "q._caql" - - defaultCheckHTTPCodeRegexp = `^200$` - defaultCheckHTTPMethod = "GET" - defaultCheckHTTPVersion = "1.1" - - defaultCheckHTTPTrapAsync = false - - defaultCheckCloudWatchVersion = "2010-08-01" - - defaultCollectorDetailAttrs = 10 - - defaultGraphDatapoints = 8 - defaultGraphLineStyle = "stepped" - defaultGraphStyle = "line" - defaultGraphFunction = "gauge" - - metricUnit = "" - metricUnitRegexp = `^.*$` - - defaultRuleSetLast = "300s" - defaultRuleSetMetricType = "numeric" - defaultRuleSetRuleLen = 4 - defaultAlertSeverity = 1 - defaultRuleSetWindowFunc = "average" - ruleSetAbsentMin = "70s" -) - -// Consts and their close relative, Go pseudo-consts. - -// validMetricTypes: See `type`: https://login.circonus.com/resources/api/calls/check_bundle -var validMetricTypes = validStringValues{ - `caql`, - `composite`, - `histogram`, - `numeric`, - `text`, -} - -// validAggregateFuncs: See `aggregate_function`: https://login.circonus.com/resources/api/calls/graph -var validAggregateFuncs = validStringValues{ - `none`, - `min`, - `max`, - `sum`, - `mean`, - `geometric_mean`, -} - -// validGraphLineStyles: See `line_style`: https://login.circonus.com/resources/api/calls/graph -var validGraphLineStyles = validStringValues{ - `stepped`, - `interpolated`, -} - -// validGraphStyles: See `style`: https://login.circonus.com/resources/api/calls/graph -var validGraphStyles = validStringValues{ - `area`, - `line`, -} - -// validAxisAttrs: See `line_style`: https://login.circonus.com/resources/api/calls/graph -var validAxisAttrs = validStringValues{ - `left`, - `right`, -} - -// validGraphFunctionValues: See `derive`: https://login.circonus.com/resources/api/calls/graph -var validGraphFunctionValues = validStringValues{ - `counter`, - `derive`, - `gauge`, -} - -// validRuleSetWindowFuncs: See `derive` or `windowing_func`: https://login.circonus.com/resources/api/calls/rule_set -var validRuleSetWindowFuncs = validStringValues{ - `average`, - `stddev`, - `derive`, - `derive_stddev`, - `counter`, - `counter_stddev`, - `derive_2`, - `derive_2_stddev`, - `counter_2`, - `counter_2_stddev`, -} - -const ( - // Supported circonus_trigger.metric_types. See `metric_type`: - // https://login.circonus.com/resources/api/calls/rule_set - ruleSetMetricTypeNumeric = "numeric" - ruleSetMetricTypeText = "text" -) - -// validRuleSetMetricTypes: See `metric_type`: https://login.circonus.com/resources/api/calls/rule_set -var validRuleSetMetricTypes = validStringValues{ - ruleSetMetricTypeNumeric, - ruleSetMetricTypeText, -} diff --git a/builtin/providers/circonus/data_source_circonus_account.go b/builtin/providers/circonus/data_source_circonus_account.go deleted file mode 100644 index c7a9121bc..000000000 --- a/builtin/providers/circonus/data_source_circonus_account.go +++ /dev/null @@ -1,271 +0,0 @@ -package circonus - -import ( - "fmt" - - "github.com/circonus-labs/circonus-gometrics/api" - "github.com/circonus-labs/circonus-gometrics/api/config" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -const ( - accountAddress1Attr = "address1" - accountAddress2Attr = "address2" - accountCCEmailAttr = "cc_email" - accountCityAttr = "city" - accountContactGroupsAttr = "contact_groups" - accountCountryAttr = "country" - accountCurrentAttr = "current" - accountDescriptionAttr = "description" - accountEmailAttr = "email" - accountIDAttr = "id" - accountInvitesAttr = "invites" - accountLimitAttr = "limit" - accountNameAttr = "name" - accountOwnerAttr = "owner" - accountRoleAttr = "role" - accountStateProvAttr = "state" - accountTimezoneAttr = "timezone" - accountTypeAttr = "type" - accountUIBaseURLAttr = "ui_base_url" - accountUsageAttr = "usage" - accountUsedAttr = "used" - accountUserIDAttr = "id" - accountUsersAttr = "users" -) - -var accountDescription = map[schemaAttr]string{ - accountContactGroupsAttr: "Contact Groups in this account", - accountInvitesAttr: "Outstanding invites attached to the account", - accountUsageAttr: "Account's usage limits", - accountUsersAttr: "Users attached to this account", -} - -func dataSourceCirconusAccount() *schema.Resource { - return &schema.Resource{ - Read: dataSourceCirconusAccountRead, - - Schema: map[string]*schema.Schema{ - accountAddress1Attr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: accountDescription[accountAddress1Attr], - }, - accountAddress2Attr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: accountDescription[accountAddress2Attr], - }, - accountCCEmailAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: accountDescription[accountCCEmailAttr], - }, - accountIDAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ConflictsWith: []string{accountCurrentAttr}, - ValidateFunc: validateFuncs( - validateRegexp(accountIDAttr, config.AccountCIDRegex), - ), - Description: accountDescription[accountIDAttr], - }, - accountCityAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: accountDescription[accountCityAttr], - }, - accountContactGroupsAttr: &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: accountDescription[accountContactGroupsAttr], - }, - accountCountryAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: accountDescription[accountCountryAttr], - }, - accountCurrentAttr: &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - ConflictsWith: []string{accountIDAttr}, - Description: accountDescription[accountCurrentAttr], - }, - accountDescriptionAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: accountDescription[accountDescriptionAttr], - }, - accountInvitesAttr: &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Description: accountDescription[accountInvitesAttr], - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - accountEmailAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: accountDescription[accountEmailAttr], - }, - accountRoleAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: accountDescription[accountRoleAttr], - }, - }, - }, - }, - accountNameAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: accountDescription[accountNameAttr], - }, - accountOwnerAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: accountDescription[accountOwnerAttr], - }, - accountStateProvAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: accountDescription[accountStateProvAttr], - }, - accountTimezoneAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: accountDescription[accountTimezoneAttr], - }, - accountUIBaseURLAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: accountDescription[accountUIBaseURLAttr], - }, - accountUsageAttr: &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Description: accountDescription[accountUsageAttr], - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - accountLimitAttr: &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - Description: accountDescription[accountLimitAttr], - }, - accountTypeAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: accountDescription[accountTypeAttr], - }, - accountUsedAttr: &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - Description: accountDescription[accountUsedAttr], - }, - }, - }, - }, - accountUsersAttr: &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Description: accountDescription[accountUsersAttr], - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - accountUserIDAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: accountDescription[accountUserIDAttr], - }, - accountRoleAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: accountDescription[accountRoleAttr], - }, - }, - }, - }, - }, - } -} - -func dataSourceCirconusAccountRead(d *schema.ResourceData, meta interface{}) error { - c := meta.(*providerContext) - - var cid string - - var a *api.Account - var err error - if v, ok := d.GetOk(accountIDAttr); ok { - cid = v.(string) - } - - if v, ok := d.GetOk(accountCurrentAttr); ok { - if v.(bool) { - cid = "" - } - } - - a, err = c.client.FetchAccount(api.CIDType(&cid)) - if err != nil { - return err - } - - invitesList := make([]interface{}, 0, len(a.Invites)) - for i := range a.Invites { - invitesList = append(invitesList, map[string]interface{}{ - accountEmailAttr: a.Invites[i].Email, - accountRoleAttr: a.Invites[i].Role, - }) - } - - usageList := make([]interface{}, 0, len(a.Usage)) - for i := range a.Usage { - usageList = append(usageList, map[string]interface{}{ - accountLimitAttr: a.Usage[i].Limit, - accountTypeAttr: a.Usage[i].Type, - accountUsedAttr: a.Usage[i].Used, - }) - } - - usersList := make([]interface{}, 0, len(a.Users)) - for i := range a.Users { - usersList = append(usersList, map[string]interface{}{ - accountUserIDAttr: a.Users[i].UserCID, - accountRoleAttr: a.Users[i].Role, - }) - } - - d.SetId(a.CID) - - d.Set(accountAddress1Attr, a.Address1) - d.Set(accountAddress2Attr, a.Address2) - d.Set(accountCCEmailAttr, a.CCEmail) - d.Set(accountIDAttr, a.CID) - d.Set(accountCityAttr, a.City) - d.Set(accountContactGroupsAttr, a.ContactGroups) - d.Set(accountCountryAttr, a.Country) - d.Set(accountDescriptionAttr, a.Description) - - if err := d.Set(accountInvitesAttr, invitesList); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store account %q attribute: {{err}}", accountInvitesAttr), err) - } - - d.Set(accountNameAttr, a.Name) - d.Set(accountOwnerAttr, a.OwnerCID) - d.Set(accountStateProvAttr, a.StateProv) - d.Set(accountTimezoneAttr, a.Timezone) - d.Set(accountUIBaseURLAttr, a.UIBaseURL) - - if err := d.Set(accountUsageAttr, usageList); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store account %q attribute: {{err}}", accountUsageAttr), err) - } - - if err := d.Set(accountUsersAttr, usersList); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store account %q attribute: {{err}}", accountUsersAttr), err) - } - - return nil -} diff --git a/builtin/providers/circonus/data_source_circonus_account_test.go b/builtin/providers/circonus/data_source_circonus_account_test.go deleted file mode 100644 index 78b08c52d..000000000 --- a/builtin/providers/circonus/data_source_circonus_account_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package circonus - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourceCirconusAccount(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataSourceCirconusAccountCurrentConfig, - Check: resource.ComposeTestCheckFunc( - testAccDataSourceCirconusAccountCheck("data.circonus_account.by_current", "/account/3081"), - ), - }, - }, - }) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataSourceCirconusAccountIDConfig, - Check: resource.ComposeTestCheckFunc( - testAccDataSourceCirconusAccountCheck("data.circonus_account.by_id", "/account/3081"), - ), - }, - }, - }) -} - -func testAccDataSourceCirconusAccountCheck(name, cid string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("root module has no resource called %s", name) - } - - attr := rs.Primary.Attributes - - if attr[accountIDAttr] != cid { - return fmt.Errorf("bad %s %s", accountIDAttr, attr[accountIDAttr]) - } - - return nil - } -} - -const testAccDataSourceCirconusAccountCurrentConfig = ` -data "circonus_account" "by_current" { - current = true -} -` - -const testAccDataSourceCirconusAccountIDConfig = ` -data "circonus_account" "by_id" { - id = "/account/3081" -} -` diff --git a/builtin/providers/circonus/data_source_circonus_collector.go b/builtin/providers/circonus/data_source_circonus_collector.go deleted file mode 100644 index 6dda66a82..000000000 --- a/builtin/providers/circonus/data_source_circonus_collector.go +++ /dev/null @@ -1,214 +0,0 @@ -package circonus - -import ( - "fmt" - - "github.com/circonus-labs/circonus-gometrics/api" - "github.com/circonus-labs/circonus-gometrics/api/config" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -const ( - collectorCNAttr = "cn" - collectorIDAttr = "id" - collectorDetailsAttr = "details" - collectorExternalHostAttr = "external_host" - collectorExternalPortAttr = "external_port" - collectorIPAttr = "ip" - collectorLatitudeAttr = "latitude" - collectorLongitudeAttr = "longitude" - collectorMinVersionAttr = "min_version" - collectorModulesAttr = "modules" - collectorNameAttr = "name" - collectorPortAttr = "port" - collectorSkewAttr = "skew" - collectorStatusAttr = "status" - collectorTagsAttr = "tags" - collectorTypeAttr = "type" - collectorVersionAttr = "version" -) - -var collectorDescription = map[schemaAttr]string{ - collectorDetailsAttr: "Details associated with individual collectors (a.k.a. broker)", - collectorTagsAttr: "Tags assigned to a collector", -} - -func dataSourceCirconusCollector() *schema.Resource { - return &schema.Resource{ - Read: dataSourceCirconusCollectorRead, - - Schema: map[string]*schema.Schema{ - collectorDetailsAttr: &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Description: collectorDescription[collectorDetailsAttr], - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - collectorCNAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: collectorDescription[collectorCNAttr], - }, - collectorExternalHostAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: collectorDescription[collectorExternalHostAttr], - }, - collectorExternalPortAttr: &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - Description: collectorDescription[collectorExternalPortAttr], - }, - collectorIPAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: collectorDescription[collectorIPAttr], - }, - collectorMinVersionAttr: &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - Description: collectorDescription[collectorMinVersionAttr], - }, - collectorModulesAttr: &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: collectorDescription[collectorModulesAttr], - }, - collectorPortAttr: &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - Description: collectorDescription[collectorPortAttr], - }, - collectorSkewAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: collectorDescription[collectorSkewAttr], - }, - collectorStatusAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: collectorDescription[collectorStatusAttr], - }, - collectorVersionAttr: &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - Description: collectorDescription[collectorVersionAttr], - }, - }, - }, - }, - collectorIDAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validateRegexp(collectorIDAttr, config.BrokerCIDRegex), - Description: collectorDescription[collectorIDAttr], - }, - collectorLatitudeAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: collectorDescription[collectorLatitudeAttr], - }, - collectorLongitudeAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: collectorDescription[collectorLongitudeAttr], - }, - collectorNameAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: collectorDescription[collectorNameAttr], - }, - collectorTagsAttr: tagMakeConfigSchema(collectorTagsAttr), - collectorTypeAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: collectorDescription[collectorTypeAttr], - }, - }, - } -} - -func dataSourceCirconusCollectorRead(d *schema.ResourceData, meta interface{}) error { - ctxt := meta.(*providerContext) - - var collector *api.Broker - var err error - cid := d.Id() - if cidRaw, ok := d.GetOk(collectorIDAttr); ok { - cid = cidRaw.(string) - } - collector, err = ctxt.client.FetchBroker(api.CIDType(&cid)) - if err != nil { - return err - } - - d.SetId(collector.CID) - - if err := d.Set(collectorDetailsAttr, collectorDetailsToState(collector)); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store collector %q attribute: {{err}}", collectorDetailsAttr), err) - } - - d.Set(collectorIDAttr, collector.CID) - d.Set(collectorLatitudeAttr, collector.Latitude) - d.Set(collectorLongitudeAttr, collector.Longitude) - d.Set(collectorNameAttr, collector.Name) - d.Set(collectorTagsAttr, collector.Tags) - d.Set(collectorTypeAttr, collector.Type) - - return nil -} - -func collectorDetailsToState(c *api.Broker) []interface{} { - details := make([]interface{}, 0, len(c.Details)) - - for _, collector := range c.Details { - collectorDetails := make(map[string]interface{}, defaultCollectorDetailAttrs) - - collectorDetails[collectorCNAttr] = collector.CN - - if collector.ExternalHost != nil { - collectorDetails[collectorExternalHostAttr] = *collector.ExternalHost - } - - if collector.ExternalPort != 0 { - collectorDetails[collectorExternalPortAttr] = collector.ExternalPort - } - - if collector.IP != nil { - collectorDetails[collectorIPAttr] = *collector.IP - } - - if collector.MinVer != 0 { - collectorDetails[collectorMinVersionAttr] = collector.MinVer - } - - if len(collector.Modules) > 0 { - collectorDetails[collectorModulesAttr] = collector.Modules - } - - if collector.Port != nil { - collectorDetails[collectorPortAttr] = *collector.Port - } - - if collector.Skew != nil { - collectorDetails[collectorSkewAttr] = *collector.Skew - } - - if collector.Status != "" { - collectorDetails[collectorStatusAttr] = collector.Status - } - - if collector.Version != nil { - collectorDetails[collectorVersionAttr] = *collector.Version - } - - details = append(details, collectorDetails) - } - - return details -} diff --git a/builtin/providers/circonus/data_source_circonus_collector_test.go b/builtin/providers/circonus/data_source_circonus_collector_test.go deleted file mode 100644 index 54d5feba9..000000000 --- a/builtin/providers/circonus/data_source_circonus_collector_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package circonus - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourceCirconusCollector(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataSourceCirconusCollectorConfig, - Check: resource.ComposeTestCheckFunc( - testAccDataSourceCirconusCollectorCheck("data.circonus_collector.by_id", "/broker/1"), - ), - }, - }, - }) -} - -func testAccDataSourceCirconusCollectorCheck(name, cid string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("root module has no resource called %s", name) - } - - attr := rs.Primary.Attributes - - if attr[collectorIDAttr] != cid { - return fmt.Errorf("bad id %s", attr[collectorIDAttr]) - } - - return nil - } -} - -const testAccDataSourceCirconusCollectorConfig = ` -data "circonus_collector" "by_id" { - id = "/broker/1" -} -` diff --git a/builtin/providers/circonus/interface.go b/builtin/providers/circonus/interface.go deleted file mode 100644 index d5777cf81..000000000 --- a/builtin/providers/circonus/interface.go +++ /dev/null @@ -1,83 +0,0 @@ -package circonus - -import "log" - -type interfaceList []interface{} -type interfaceMap map[string]interface{} - -// newInterfaceMap returns a helper type that has methods for common operations -// for accessing data. -func newInterfaceMap(l interface{}) interfaceMap { - return interfaceMap(l.(map[string]interface{})) -} - -// CollectList returns []string of values that matched the key attrName. -// interfaceList most likely came from a schema.TypeSet. -func (l interfaceList) CollectList(attrName schemaAttr) []string { - stringList := make([]string, 0, len(l)) - - for _, mapRaw := range l { - mapAttrs := mapRaw.(map[string]interface{}) - - if v, ok := mapAttrs[string(attrName)]; ok { - stringList = append(stringList, v.(string)) - } - } - - return stringList -} - -// List returns a list of values in a Set as a string slice -func (l interfaceList) List() []string { - stringList := make([]string, 0, len(l)) - for _, e := range l { - switch e.(type) { - case string: - stringList = append(stringList, e.(string)) - case []interface{}: - for _, v := range e.([]interface{}) { - stringList = append(stringList, v.(string)) - } - default: - log.Printf("[ERROR] PROVIDER BUG: unable to convert %#v to list", e) - return nil - } - } - return stringList -} - -// CollectList returns []string of values that matched the key attrName. -// interfaceMap most likely came from a schema.TypeSet. -func (m interfaceMap) CollectList(attrName schemaAttr) []string { - stringList := make([]string, 0, len(m)) - - for _, mapRaw := range m { - mapAttrs := mapRaw.(map[string]interface{}) - - if v, ok := mapAttrs[string(attrName)]; ok { - stringList = append(stringList, v.(string)) - } - } - - return stringList -} - -// CollectMap returns map[string]string of values that matched the key attrName. -// interfaceMap most likely came from a schema.TypeSet. -func (m interfaceMap) CollectMap(attrName schemaAttr) map[string]string { - var mergedMap map[string]string - - if attrRaw, ok := m[string(attrName)]; ok { - attrMap := attrRaw.(map[string]interface{}) - mergedMap = make(map[string]string, len(m)) - for k, v := range attrMap { - mergedMap[k] = v.(string) - } - } - - if len(mergedMap) == 0 { - return nil - } - - return mergedMap -} diff --git a/builtin/providers/circonus/metric.go b/builtin/providers/circonus/metric.go deleted file mode 100644 index 78483ec79..000000000 --- a/builtin/providers/circonus/metric.go +++ /dev/null @@ -1,182 +0,0 @@ -package circonus - -// The circonusMetric type is the backing store of the `circonus_metric` resource. - -import ( - "bytes" - "fmt" - - "github.com/circonus-labs/circonus-gometrics/api" - "github.com/hashicorp/errwrap" - uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -type circonusMetric struct { - ID metricID - api.CheckBundleMetric -} - -func newMetric() circonusMetric { - return circonusMetric{} -} - -func (m *circonusMetric) Create(d *schema.ResourceData) error { - return m.SaveState(d) -} - -func (m *circonusMetric) ParseConfig(id string, d *schema.ResourceData) error { - m.ID = metricID(id) - - if v, found := d.GetOk(metricNameAttr); found { - m.Name = v.(string) - } - - if v, found := d.GetOk(metricActiveAttr); found { - m.Status = metricActiveToAPIStatus(v.(bool)) - } - - if v, found := d.GetOk(metricTagsAttr); found { - m.Tags = derefStringList(flattenSet(v.(*schema.Set))) - } - - if v, found := d.GetOk(metricTypeAttr); found { - m.Type = v.(string) - } - - if v, found := d.GetOk(metricUnitAttr); found { - s := v.(string) - m.Units = &s - } - - if m.Units != nil && *m.Units == "" { - m.Units = nil - } - - return nil -} - -func (m *circonusMetric) ParseConfigMap(id string, attrMap map[string]interface{}) error { - m.ID = metricID(id) - - if v, found := attrMap[metricNameAttr]; found { - m.Name = v.(string) - } - - if v, found := attrMap[metricActiveAttr]; found { - m.Status = metricActiveToAPIStatus(v.(bool)) - } - - if v, found := attrMap[metricTagsAttr]; found { - m.Tags = derefStringList(flattenSet(v.(*schema.Set))) - } - - if v, found := attrMap[metricTypeAttr]; found { - m.Type = v.(string) - } - - if v, found := attrMap[metricUnitAttr]; found { - s := v.(string) - m.Units = &s - } - - if m.Units != nil && *m.Units == "" { - m.Units = nil - } - - return nil -} - -func (m *circonusMetric) SaveState(d *schema.ResourceData) error { - d.SetId(string(m.ID)) - - d.Set(metricActiveAttr, metricAPIStatusToBool(m.Status)) - d.Set(metricNameAttr, m.Name) - d.Set(metricTagsAttr, tagsToState(apiToTags(m.Tags))) - d.Set(metricTypeAttr, m.Type) - d.Set(metricUnitAttr, indirect(m.Units)) - - return nil -} - -func (m *circonusMetric) Update(d *schema.ResourceData) error { - // NOTE: there are no "updates" to be made against an API server, so we just - // pass through a call to SaveState. Keep this method around for API - // symmetry. - return m.SaveState(d) -} - -func metricAPIStatusToBool(s string) bool { - switch s { - case metricStatusActive: - return true - case metricStatusAvailable: - return false - default: - // log.Printf("PROVIDER BUG: metric status %q unsupported", s) - return false - } -} - -func metricActiveToAPIStatus(active bool) string { - if active { - return metricStatusActive - } - - return metricStatusAvailable -} - -func newMetricID() (string, error) { - id, err := uuid.GenerateUUID() - if err != nil { - return "", errwrap.Wrapf("metric ID creation failed: {{err}}", err) - } - - return id, nil -} - -func metricChecksum(m interfaceMap) int { - b := &bytes.Buffer{} - b.Grow(defaultHashBufSize) - - // Order writes to the buffer using lexically sorted list for easy visual - // reconciliation with other lists. - if v, found := m[metricActiveAttr]; found { - fmt.Fprintf(b, "%t", v.(bool)) - } - - if v, found := m[metricNameAttr]; found { - fmt.Fprint(b, v.(string)) - } - - if v, found := m[metricTagsAttr]; found { - tags := derefStringList(flattenSet(v.(*schema.Set))) - for _, tag := range tags { - fmt.Fprint(b, tag) - } - } - - if v, found := m[metricTypeAttr]; found { - fmt.Fprint(b, v.(string)) - } - - if v, found := m[metricUnitAttr]; found { - if v != nil { - var s string - switch v.(type) { - case string: - s = v.(string) - case *string: - s = *v.(*string) - } - - if s != "" { - fmt.Fprint(b, s) - } - } - } - - s := b.String() - return hashcode.String(s) -} diff --git a/builtin/providers/circonus/metric_cluster.go b/builtin/providers/circonus/metric_cluster.go deleted file mode 100644 index 6d06834f4..000000000 --- a/builtin/providers/circonus/metric_cluster.go +++ /dev/null @@ -1,57 +0,0 @@ -package circonus - -import ( - "fmt" - - "github.com/circonus-labs/circonus-gometrics/api" - "github.com/hashicorp/errwrap" -) - -type circonusMetricCluster struct { - api.MetricCluster -} - -func newMetricCluster() circonusMetricCluster { - return circonusMetricCluster{ - MetricCluster: api.MetricCluster{}, - } -} - -func loadMetricCluster(ctxt *providerContext, cid api.CIDType) (circonusMetricCluster, error) { - var mc circonusMetricCluster - cmc, err := ctxt.client.FetchMetricCluster(cid, "") - if err != nil { - return circonusMetricCluster{}, err - } - mc.MetricCluster = *cmc - - return mc, nil -} - -func (mc *circonusMetricCluster) Create(ctxt *providerContext) error { - cmc, err := ctxt.client.CreateMetricCluster(&mc.MetricCluster) - if err != nil { - return err - } - - mc.CID = cmc.CID - - return nil -} - -func (mc *circonusMetricCluster) Update(ctxt *providerContext) error { - _, err := ctxt.client.UpdateMetricCluster(&mc.MetricCluster) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to update stream group %s: {{err}}", mc.CID), err) - } - - return nil -} - -func (mc *circonusMetricCluster) Validate() error { - if len(mc.Queries) < 1 { - return fmt.Errorf("there must be at least one stream group query present") - } - - return nil -} diff --git a/builtin/providers/circonus/metric_test.go b/builtin/providers/circonus/metric_test.go deleted file mode 100644 index b7b28efb8..000000000 --- a/builtin/providers/circonus/metric_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package circonus - -import "testing" - -func Test_MetricChecksum(t *testing.T) { - unit := "qty" - m := interfaceMap{ - string(metricActiveAttr): true, - string(metricNameAttr): "asdf", - string(metricTagsAttr): tagsToState(apiToTags([]string{"foo", "bar"})), - string(metricTypeAttr): "json", - string(metricUnitAttr): &unit, - } - - csum := metricChecksum(m) - if csum != 4250221491 { - t.Fatalf("Checksum mismatch") - } -} diff --git a/builtin/providers/circonus/provider.go b/builtin/providers/circonus/provider.go deleted file mode 100644 index 5ccd2d9a4..000000000 --- a/builtin/providers/circonus/provider.go +++ /dev/null @@ -1,126 +0,0 @@ -package circonus - -import ( - "fmt" - - "github.com/circonus-labs/circonus-gometrics/api" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -const ( - defaultCirconus404ErrorString = "API response code 404:" - defaultCirconusAggregationWindow = "300s" - defaultCirconusAlertMinEscalateAfter = "300s" - defaultCirconusCheckPeriodMax = "300s" - defaultCirconusCheckPeriodMin = "30s" - defaultCirconusHTTPFormat = "json" - defaultCirconusHTTPMethod = "POST" - defaultCirconusSlackUsername = "Circonus" - defaultCirconusTimeoutMax = "300s" - defaultCirconusTimeoutMin = "0s" - maxSeverity = 5 - minSeverity = 1 -) - -var providerDescription = map[string]string{ - providerAPIURLAttr: "URL of the Circonus API", - providerAutoTagAttr: "Signals that the provider should automatically add a tag to all API calls denoting that the resource was created by Terraform", - providerKeyAttr: "API token used to authenticate with the Circonus API", -} - -// Constants that want to be a constant but can't in Go -var ( - validContactHTTPFormats = validStringValues{"json", "params"} - validContactHTTPMethods = validStringValues{"GET", "POST"} -) - -type contactMethods string - -// globalAutoTag controls whether or not the provider should automatically add a -// tag to each resource. -// -// NOTE(sean): This is done as a global variable because the diff suppress -// functions does not have access to the providerContext, only the key, old, and -// new values. -var globalAutoTag bool - -type providerContext struct { - // Circonus API client - client *api.API - - // autoTag, when true, automatically appends defaultCirconusTag - autoTag bool - - // defaultTag make up the tag to be used when autoTag tags a tag. - defaultTag circonusTag -} - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - providerAPIURLAttr: { - Type: schema.TypeString, - Optional: true, - Default: "https://api.circonus.com/v2", - Description: providerDescription[providerAPIURLAttr], - }, - providerAutoTagAttr: { - Type: schema.TypeBool, - Optional: true, - Default: defaultAutoTag, - Description: providerDescription[providerAutoTagAttr], - }, - providerKeyAttr: { - Type: schema.TypeString, - Required: true, - Sensitive: true, - DefaultFunc: schema.EnvDefaultFunc("CIRCONUS_API_TOKEN", nil), - Description: providerDescription[providerKeyAttr], - }, - }, - - DataSourcesMap: map[string]*schema.Resource{ - "circonus_account": dataSourceCirconusAccount(), - "circonus_collector": dataSourceCirconusCollector(), - }, - - ResourcesMap: map[string]*schema.Resource{ - "circonus_check": resourceCheck(), - "circonus_contact_group": resourceContactGroup(), - "circonus_graph": resourceGraph(), - "circonus_metric": resourceMetric(), - "circonus_metric_cluster": resourceMetricCluster(), - "circonus_rule_set": resourceRuleSet(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - globalAutoTag = d.Get(providerAutoTagAttr).(bool) - - config := &api.Config{ - URL: d.Get(providerAPIURLAttr).(string), - TokenKey: d.Get(providerKeyAttr).(string), - TokenApp: tfAppName(), - } - - client, err := api.NewAPI(config) - if err != nil { - return nil, errwrap.Wrapf("Error initializing Circonus: %s", err) - } - - return &providerContext{ - client: client, - autoTag: d.Get(providerAutoTagAttr).(bool), - defaultTag: defaultCirconusTag, - }, nil -} - -func tfAppName() string { - return fmt.Sprintf("Terraform v%s", terraform.VersionString()) -} diff --git a/builtin/providers/circonus/provider_test.go b/builtin/providers/circonus/provider_test.go deleted file mode 100644 index 4a30f4877..000000000 --- a/builtin/providers/circonus/provider_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package circonus - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "circonus": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if apiToken := os.Getenv("CIRCONUS_API_TOKEN"); apiToken == "" { - t.Fatal("CIRCONUS_API_TOKEN must be set for acceptance tests") - } -} diff --git a/builtin/providers/circonus/resource_circonus_check.go b/builtin/providers/circonus/resource_circonus_check.go deleted file mode 100644 index 06bf2e9cf..000000000 --- a/builtin/providers/circonus/resource_circonus_check.go +++ /dev/null @@ -1,643 +0,0 @@ -package circonus - -/* - * Note to future readers: The `circonus_check` resource is actually a facade for - * the check_bundle call. check_bundle is an implementation detail that we mask - * over and expose just a "check" even though the "check" is actually a - * check_bundle. - * - * Style note: There are three directions that information flows: - * - * 1) Terraform Config file into API Objects. *Attr named objects are Config or - * Schema attribute names. In this file, all config constants should be - * named check*Attr. - * - * 2) API Objects into Statefile data. api*Attr named constants are parameters - * that originate from the API and need to be mapped into the provider's - * vernacular. - */ - -import ( - "fmt" - "time" - - "github.com/circonus-labs/circonus-gometrics/api" - "github.com/circonus-labs/circonus-gometrics/api/config" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -const ( - // circonus_check.* global resource attribute names - checkActiveAttr = "active" - checkCAQLAttr = "caql" - checkCloudWatchAttr = "cloudwatch" - checkCollectorAttr = "collector" - checkConsulAttr = "consul" - checkHTTPAttr = "http" - checkHTTPTrapAttr = "httptrap" - checkICMPPingAttr = "icmp_ping" - checkJSONAttr = "json" - checkMetricAttr = "metric" - checkMetricLimitAttr = "metric_limit" - checkMySQLAttr = "mysql" - checkNameAttr = "name" - checkNotesAttr = "notes" - checkPeriodAttr = "period" - checkPostgreSQLAttr = "postgresql" - checkStatsdAttr = "statsd" - checkTCPAttr = "tcp" - checkTagsAttr = "tags" - checkTargetAttr = "target" - checkTimeoutAttr = "timeout" - checkTypeAttr = "type" - - // circonus_check.collector.* resource attribute names - checkCollectorIDAttr = "id" - - // circonus_check.metric.* resource attribute names are aliased to - // circonus_metric.* resource attributes. - - // circonus_check.metric.* resource attribute names - // metricIDAttr = "id" - - // Out parameters for circonus_check - checkOutByCollectorAttr = "check_by_collector" - checkOutIDAttr = "check_id" - checkOutChecksAttr = "checks" - checkOutCreatedAttr = "created" - checkOutLastModifiedAttr = "last_modified" - checkOutLastModifiedByAttr = "last_modified_by" - checkOutReverseConnectURLsAttr = "reverse_connect_urls" - checkOutCheckUUIDsAttr = "uuids" -) - -const ( - // Circonus API constants from their API endpoints - apiCheckTypeCAQLAttr apiCheckType = "caql" - apiCheckTypeCloudWatchAttr apiCheckType = "cloudwatch" - apiCheckTypeConsulAttr apiCheckType = "consul" - apiCheckTypeHTTPAttr apiCheckType = "http" - apiCheckTypeHTTPTrapAttr apiCheckType = "httptrap" - apiCheckTypeICMPPingAttr apiCheckType = "ping_icmp" - apiCheckTypeJSONAttr apiCheckType = "json" - apiCheckTypeMySQLAttr apiCheckType = "mysql" - apiCheckTypePostgreSQLAttr apiCheckType = "postgres" - apiCheckTypeStatsdAttr apiCheckType = "statsd" - apiCheckTypeTCPAttr apiCheckType = "tcp" -) - -var checkDescriptions = attrDescrs{ - checkActiveAttr: "If the check is activate or disabled", - checkCAQLAttr: "CAQL check configuration", - checkCloudWatchAttr: "CloudWatch check configuration", - checkCollectorAttr: "The collector(s) that are responsible for gathering the metrics", - checkConsulAttr: "Consul check configuration", - checkHTTPAttr: "HTTP check configuration", - checkHTTPTrapAttr: "HTTP Trap check configuration", - checkICMPPingAttr: "ICMP ping check configuration", - checkJSONAttr: "JSON check configuration", - checkMetricAttr: "Configuration for a stream of metrics", - checkMetricLimitAttr: `Setting a metric_limit will enable all (-1), disable (0), or allow up to the specified limit of metrics for this check ("N+", where N is a positive integer)`, - checkMySQLAttr: "MySQL check configuration", - checkNameAttr: "The name of the check bundle that will be displayed in the web interface", - checkNotesAttr: "Notes about this check bundle", - checkPeriodAttr: "The period between each time the check is made", - checkPostgreSQLAttr: "PostgreSQL check configuration", - checkStatsdAttr: "statsd check configuration", - checkTCPAttr: "TCP check configuration", - checkTagsAttr: "A list of tags assigned to the check", - checkTargetAttr: "The target of the check (e.g. hostname, URL, IP, etc)", - checkTimeoutAttr: "The length of time in seconds (and fractions of a second) before the check will timeout if no response is returned to the collector", - checkTypeAttr: "The check type", - - checkOutByCollectorAttr: "", - checkOutCheckUUIDsAttr: "", - checkOutChecksAttr: "", - checkOutCreatedAttr: "", - checkOutIDAttr: "", - checkOutLastModifiedAttr: "", - checkOutLastModifiedByAttr: "", - checkOutReverseConnectURLsAttr: "", -} - -var checkCollectorDescriptions = attrDescrs{ - checkCollectorIDAttr: "The ID of the collector", -} - -var checkMetricDescriptions = metricDescriptions - -func resourceCheck() *schema.Resource { - return &schema.Resource{ - Create: checkCreate, - Read: checkRead, - Update: checkUpdate, - Delete: checkDelete, - Exists: checkExists, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: convertToHelperSchema(checkDescriptions, map[schemaAttr]*schema.Schema{ - checkActiveAttr: &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - checkCAQLAttr: schemaCheckCAQL, - checkCloudWatchAttr: schemaCheckCloudWatch, - checkCollectorAttr: &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - MinItems: 1, - Elem: &schema.Resource{ - Schema: convertToHelperSchema(checkCollectorDescriptions, map[schemaAttr]*schema.Schema{ - checkCollectorIDAttr: &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateRegexp(checkCollectorIDAttr, config.BrokerCIDRegex), - }, - }), - }, - }, - checkConsulAttr: schemaCheckConsul, - checkHTTPAttr: schemaCheckHTTP, - checkHTTPTrapAttr: schemaCheckHTTPTrap, - checkJSONAttr: schemaCheckJSON, - checkICMPPingAttr: schemaCheckICMPPing, - checkMetricAttr: &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Set: checkMetricChecksum, - MinItems: 1, - Elem: &schema.Resource{ - Schema: convertToHelperSchema(checkMetricDescriptions, map[schemaAttr]*schema.Schema{ - metricActiveAttr: &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - metricNameAttr: &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateRegexp(metricNameAttr, `[\S]+`), - }, - metricTagsAttr: tagMakeConfigSchema(metricTagsAttr), - metricTypeAttr: &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateMetricType, - }, - metricUnitAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: metricUnit, - ValidateFunc: validateRegexp(metricUnitAttr, metricUnitRegexp), - }, - }), - }, - }, - checkMetricLimitAttr: &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - ValidateFunc: validateFuncs( - validateIntMin(checkMetricLimitAttr, -1), - ), - }, - checkMySQLAttr: schemaCheckMySQL, - checkNameAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - checkNotesAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - StateFunc: suppressWhitespace, - }, - checkPeriodAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - StateFunc: normalizeTimeDurationStringToSeconds, - ValidateFunc: validateFuncs( - validateDurationMin(checkPeriodAttr, defaultCirconusCheckPeriodMin), - validateDurationMax(checkPeriodAttr, defaultCirconusCheckPeriodMax), - ), - }, - checkPostgreSQLAttr: schemaCheckPostgreSQL, - checkStatsdAttr: schemaCheckStatsd, - checkTagsAttr: tagMakeConfigSchema(checkTagsAttr), - checkTargetAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validateRegexp(checkTagsAttr, `.+`), - }, - checkTCPAttr: schemaCheckTCP, - checkTimeoutAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - StateFunc: normalizeTimeDurationStringToSeconds, - ValidateFunc: validateFuncs( - validateDurationMin(checkTimeoutAttr, defaultCirconusTimeoutMin), - validateDurationMax(checkTimeoutAttr, defaultCirconusTimeoutMax), - ), - }, - checkTypeAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: validateCheckType, - }, - - // Out parameters - checkOutIDAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - checkOutByCollectorAttr: &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - checkOutCheckUUIDsAttr: &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - checkOutChecksAttr: &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - checkOutCreatedAttr: &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - }, - checkOutLastModifiedAttr: &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - }, - checkOutLastModifiedByAttr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - checkOutReverseConnectURLsAttr: &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }), - } -} - -func checkCreate(d *schema.ResourceData, meta interface{}) error { - ctxt := meta.(*providerContext) - c := newCheck() - if err := c.ParseConfig(d); err != nil { - return errwrap.Wrapf("error parsing check schema during create: {{err}}", err) - } - - if err := c.Create(ctxt); err != nil { - return errwrap.Wrapf("error creating check: {{err}}", err) - } - - d.SetId(c.CID) - - return checkRead(d, meta) -} - -func checkExists(d *schema.ResourceData, meta interface{}) (bool, error) { - ctxt := meta.(*providerContext) - - cid := d.Id() - cb, err := ctxt.client.FetchCheckBundle(api.CIDType(&cid)) - if err != nil { - return false, err - } - - if cb.CID == "" { - return false, nil - } - - return true, nil -} - -// checkRead pulls data out of the CheckBundle object and stores it into the -// appropriate place in the statefile. -func checkRead(d *schema.ResourceData, meta interface{}) error { - ctxt := meta.(*providerContext) - - cid := d.Id() - c, err := loadCheck(ctxt, api.CIDType(&cid)) - if err != nil { - return err - } - - d.SetId(c.CID) - - // Global circonus_check attributes are saved first, followed by the check - // type specific attributes handled below in their respective checkRead*(). - - checkIDsByCollector := make(map[string]interface{}, len(c.Checks)) - for i, b := range c.Brokers { - checkIDsByCollector[b] = c.Checks[i] - } - - var checkID string - if len(c.Checks) == 1 { - checkID = c.Checks[0] - } - - metrics := schema.NewSet(checkMetricChecksum, nil) - for _, m := range c.Metrics { - metricAttrs := map[string]interface{}{ - string(metricActiveAttr): metricAPIStatusToBool(m.Status), - string(metricNameAttr): m.Name, - string(metricTagsAttr): tagsToState(apiToTags(m.Tags)), - string(metricTypeAttr): m.Type, - string(metricUnitAttr): indirect(m.Units), - } - - metrics.Add(metricAttrs) - } - - // Write the global circonus_check parameters followed by the check - // type-specific parameters. - - d.Set(checkActiveAttr, checkAPIStatusToBool(c.Status)) - - if err := d.Set(checkCollectorAttr, stringListToSet(c.Brokers, checkCollectorIDAttr)); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store check %q attribute: {{err}}", checkCollectorAttr), err) - } - - d.Set(checkMetricLimitAttr, c.MetricLimit) - d.Set(checkNameAttr, c.DisplayName) - d.Set(checkNotesAttr, c.Notes) - d.Set(checkPeriodAttr, fmt.Sprintf("%ds", c.Period)) - - if err := d.Set(checkMetricAttr, metrics); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store check %q attribute: {{err}}", checkMetricAttr), err) - } - - if err := d.Set(checkTagsAttr, c.Tags); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store check %q attribute: {{err}}", checkTagsAttr), err) - } - - d.Set(checkTargetAttr, c.Target) - - { - t, _ := time.ParseDuration(fmt.Sprintf("%fs", c.Timeout)) - d.Set(checkTimeoutAttr, t.String()) - } - - d.Set(checkTypeAttr, c.Type) - - // Last step: parse a check_bundle's config into the statefile. - if err := parseCheckTypeConfig(&c, d); err != nil { - return errwrap.Wrapf("Unable to parse check config: {{err}}", err) - } - - // Out parameters - if err := d.Set(checkOutByCollectorAttr, checkIDsByCollector); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store check %q attribute: {{err}}", checkOutByCollectorAttr), err) - } - - if err := d.Set(checkOutCheckUUIDsAttr, c.CheckUUIDs); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store check %q attribute: {{err}}", checkOutCheckUUIDsAttr), err) - } - - if err := d.Set(checkOutChecksAttr, c.Checks); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store check %q attribute: {{err}}", checkOutChecksAttr), err) - } - - if checkID != "" { - d.Set(checkOutIDAttr, checkID) - } - - d.Set(checkOutCreatedAttr, c.Created) - d.Set(checkOutLastModifiedAttr, c.LastModified) - d.Set(checkOutLastModifiedByAttr, c.LastModifedBy) - - if err := d.Set(checkOutReverseConnectURLsAttr, c.ReverseConnectURLs); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store check %q attribute: {{err}}", checkOutReverseConnectURLsAttr), err) - } - - return nil -} - -func checkUpdate(d *schema.ResourceData, meta interface{}) error { - ctxt := meta.(*providerContext) - c := newCheck() - if err := c.ParseConfig(d); err != nil { - return err - } - - c.CID = d.Id() - if err := c.Update(ctxt); err != nil { - return errwrap.Wrapf(fmt.Sprintf("unable to update check %q: {{err}}", d.Id()), err) - } - - return checkRead(d, meta) -} - -func checkDelete(d *schema.ResourceData, meta interface{}) error { - ctxt := meta.(*providerContext) - - if _, err := ctxt.client.Delete(d.Id()); err != nil { - return errwrap.Wrapf(fmt.Sprintf("unable to delete check %q: {{err}}", d.Id()), err) - } - - d.SetId("") - - return nil -} - -func checkMetricChecksum(v interface{}) int { - m := v.(map[string]interface{}) - csum := metricChecksum(m) - return csum -} - -// ParseConfig reads Terraform config data and stores the information into a -// Circonus CheckBundle object. -func (c *circonusCheck) ParseConfig(d *schema.ResourceData) error { - if v, found := d.GetOk(checkActiveAttr); found { - c.Status = checkActiveToAPIStatus(v.(bool)) - } - - if v, found := d.GetOk(checkCollectorAttr); found { - l := v.(*schema.Set).List() - c.Brokers = make([]string, 0, len(l)) - - for _, mapRaw := range l { - mapAttrs := mapRaw.(map[string]interface{}) - - if mv, mapFound := mapAttrs[checkCollectorIDAttr]; mapFound { - c.Brokers = append(c.Brokers, mv.(string)) - } - } - } - - if v, found := d.GetOk(checkMetricLimitAttr); found { - c.MetricLimit = v.(int) - } - - if v, found := d.GetOk(checkNameAttr); found { - c.DisplayName = v.(string) - } - - if v, found := d.GetOk(checkNotesAttr); found { - s := v.(string) - c.Notes = &s - } - - if v, found := d.GetOk(checkPeriodAttr); found { - d, err := time.ParseDuration(v.(string)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("unable to parse %q as a duration: {{err}}", checkPeriodAttr), err) - } - - c.Period = uint(d.Seconds()) - } - - if v, found := d.GetOk(checkMetricAttr); found { - metricList := v.(*schema.Set).List() - c.Metrics = make([]api.CheckBundleMetric, 0, len(metricList)) - - for _, metricListRaw := range metricList { - metricAttrs := metricListRaw.(map[string]interface{}) - - var id string - if av, found := metricAttrs[metricIDAttr]; found { - id = av.(string) - } else { - var err error - id, err = newMetricID() - if err != nil { - return errwrap.Wrapf("unable to create a new metric ID: {{err}}", err) - } - } - - m := newMetric() - if err := m.ParseConfigMap(id, metricAttrs); err != nil { - return errwrap.Wrapf("unable to parse config: {{err}}", err) - } - - c.Metrics = append(c.Metrics, m.CheckBundleMetric) - } - } - - if v, found := d.GetOk(checkTagsAttr); found { - c.Tags = derefStringList(flattenSet(v.(*schema.Set))) - } - - if v, found := d.GetOk(checkTargetAttr); found { - c.Target = v.(string) - } - - if v, found := d.GetOk(checkTimeoutAttr); found { - d, err := time.ParseDuration(v.(string)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("unable to parse %q as a duration: {{err}}", checkTimeoutAttr), err) - } - - t := float32(d.Seconds()) - c.Timeout = t - } - - // Last step: parse the individual check types - if err := checkConfigToAPI(c, d); err != nil { - return errwrap.Wrapf("unable to parse check type: {{err}}", err) - } - - if err := c.Fixup(); err != nil { - return err - } - - if err := c.Validate(); err != nil { - return err - } - - return nil -} - -// checkConfigToAPI parses the Terraform config into the respective per-check -// type api.Config attributes. -func checkConfigToAPI(c *circonusCheck, d *schema.ResourceData) error { - checkTypeParseMap := map[string]func(*circonusCheck, interfaceList) error{ - checkCAQLAttr: checkConfigToAPICAQL, - checkCloudWatchAttr: checkConfigToAPICloudWatch, - checkConsulAttr: checkConfigToAPIConsul, - checkHTTPAttr: checkConfigToAPIHTTP, - checkHTTPTrapAttr: checkConfigToAPIHTTPTrap, - checkICMPPingAttr: checkConfigToAPIICMPPing, - checkJSONAttr: checkConfigToAPIJSON, - checkMySQLAttr: checkConfigToAPIMySQL, - checkPostgreSQLAttr: checkConfigToAPIPostgreSQL, - checkStatsdAttr: checkConfigToAPIStatsd, - checkTCPAttr: checkConfigToAPITCP, - } - - for checkType, fn := range checkTypeParseMap { - if listRaw, found := d.GetOk(checkType); found { - switch u := listRaw.(type) { - case []interface{}: - if err := fn(c, u); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to parse type %q: {{err}}", string(checkType)), err) - } - case *schema.Set: - if err := fn(c, u.List()); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to parse type %q: {{err}}", string(checkType)), err) - } - default: - return fmt.Errorf("PROVIDER BUG: unsupported check type interface: %q", checkType) - } - } - } - - return nil -} - -// parseCheckTypeConfig parses an API Config object and stores the result in the -// statefile. -func parseCheckTypeConfig(c *circonusCheck, d *schema.ResourceData) error { - checkTypeConfigHandlers := map[apiCheckType]func(*circonusCheck, *schema.ResourceData) error{ - apiCheckTypeCAQLAttr: checkAPIToStateCAQL, - apiCheckTypeCloudWatchAttr: checkAPIToStateCloudWatch, - apiCheckTypeConsulAttr: checkAPIToStateConsul, - apiCheckTypeHTTPAttr: checkAPIToStateHTTP, - apiCheckTypeHTTPTrapAttr: checkAPIToStateHTTPTrap, - apiCheckTypeICMPPingAttr: checkAPIToStateICMPPing, - apiCheckTypeJSONAttr: checkAPIToStateJSON, - apiCheckTypeMySQLAttr: checkAPIToStateMySQL, - apiCheckTypePostgreSQLAttr: checkAPIToStatePostgreSQL, - apiCheckTypeStatsdAttr: checkAPIToStateStatsd, - apiCheckTypeTCPAttr: checkAPIToStateTCP, - } - - var checkType apiCheckType = apiCheckType(c.Type) - fn, ok := checkTypeConfigHandlers[checkType] - if !ok { - return fmt.Errorf("check type %q not supported", c.Type) - } - - if err := fn(c, d); err != nil { - return errwrap.Wrapf(fmt.Sprintf("unable to parse the API config for %q: {{err}}", c.Type), err) - } - - return nil -} diff --git a/builtin/providers/circonus/resource_circonus_check_caql.go b/builtin/providers/circonus/resource_circonus_check_caql.go deleted file mode 100644 index a3d876b63..000000000 --- a/builtin/providers/circonus/resource_circonus_check_caql.go +++ /dev/null @@ -1,89 +0,0 @@ -package circonus - -import ( - "bytes" - "fmt" - "strings" - - "github.com/circonus-labs/circonus-gometrics/api/config" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -const ( - // circonus_check.caql.* resource attribute names - checkCAQLQueryAttr = "query" -) - -var checkCAQLDescriptions = attrDescrs{ - checkCAQLQueryAttr: "The query definition", -} - -var schemaCheckCAQL = &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - MinItems: 1, - Set: hashCheckCAQL, - Elem: &schema.Resource{ - Schema: convertToHelperSchema(checkCAQLDescriptions, map[schemaAttr]*schema.Schema{ - checkCAQLQueryAttr: &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateRegexp(checkCAQLQueryAttr, `.+`), - }, - }), - }, -} - -// checkAPIToStateCAQL reads the Config data out of circonusCheck.CheckBundle -// into the statefile. -func checkAPIToStateCAQL(c *circonusCheck, d *schema.ResourceData) error { - caqlConfig := make(map[string]interface{}, len(c.Config)) - - caqlConfig[string(checkCAQLQueryAttr)] = c.Config[config.Query] - - if err := d.Set(checkCAQLAttr, schema.NewSet(hashCheckCAQL, []interface{}{caqlConfig})); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store check %q attribute: {{err}}", checkCAQLAttr), err) - } - - return nil -} - -// hashCheckCAQL creates a stable hash of the normalized values -func hashCheckCAQL(v interface{}) int { - m := v.(map[string]interface{}) - b := &bytes.Buffer{} - b.Grow(defaultHashBufSize) - - writeString := func(attrName schemaAttr) { - if v, ok := m[string(attrName)]; ok && v.(string) != "" { - fmt.Fprint(b, strings.TrimSpace(v.(string))) - } - } - - // Order writes to the buffer using lexically sorted list for easy visual - // reconciliation with other lists. - writeString(checkCAQLQueryAttr) - - s := b.String() - return hashcode.String(s) -} - -func checkConfigToAPICAQL(c *circonusCheck, l interfaceList) error { - c.Type = string(apiCheckTypeCAQL) - c.Target = defaultCheckCAQLTarget - - // Iterate over all `caql` attributes, even though we have a max of 1 in the - // schema. - for _, mapRaw := range l { - caqlConfig := newInterfaceMap(mapRaw) - - if v, found := caqlConfig[checkCAQLQueryAttr]; found { - c.Config[config.Query] = v.(string) - } - } - - return nil -} diff --git a/builtin/providers/circonus/resource_circonus_check_caql_test.go b/builtin/providers/circonus/resource_circonus_check_caql_test.go deleted file mode 100644 index 5efcb6ad9..000000000 --- a/builtin/providers/circonus/resource_circonus_check_caql_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package circonus - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccCirconusCheckCAQL_basic(t *testing.T) { - checkName := fmt.Sprintf("Consul's Go GC latency (Merged Histogram) - %s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDestroyCirconusCheckBundle, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testAccCirconusCheckCAQLConfigFmt, checkName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("circonus_check.go_gc_latency", "active", "true"), - resource.TestCheckResourceAttr("circonus_check.go_gc_latency", "collector.#", "1"), - resource.TestCheckResourceAttr("circonus_check.go_gc_latency", "collector.36214388.id", "/broker/1490"), - resource.TestCheckResourceAttr("circonus_check.go_gc_latency", "caql.#", "1"), - resource.TestCheckResourceAttr("circonus_check.go_gc_latency", "caql.4060628048.query", `search:metric:histogram("*consul*runtime`+"`"+`gc_pause_ns* (active:1)") | histogram:merge() | histogram:percentile(99)`), - resource.TestCheckResourceAttr("circonus_check.go_gc_latency", "name", checkName), - resource.TestCheckResourceAttr("circonus_check.go_gc_latency", "period", "60s"), - resource.TestCheckResourceAttr("circonus_check.go_gc_latency", "metric.#", "1"), - - resource.TestCheckResourceAttr("circonus_check.go_gc_latency", "tags.#", "4"), - resource.TestCheckResourceAttr("circonus_check.go_gc_latency", "tags.3728194417", "app:consul"), - resource.TestCheckResourceAttr("circonus_check.go_gc_latency", "tags.2087084518", "author:terraform"), - resource.TestCheckResourceAttr("circonus_check.go_gc_latency", "tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.go_gc_latency", "tags.3480593708", "source:goruntime"), - resource.TestCheckResourceAttr("circonus_check.go_gc_latency", "target", "q._caql"), - resource.TestCheckResourceAttr("circonus_check.go_gc_latency", "type", "caql"), - ), - }, - }, - }) -} - -const testAccCirconusCheckCAQLConfigFmt = ` -variable "test_tags" { - type = "list" - default = [ "app:consul", "author:terraform", "lifecycle:unittest", "source:goruntime" ] -} - -resource "circonus_check" "go_gc_latency" { - active = true - name = "%s" - period = "60s" - - collector { - id = "/broker/1490" - } - - caql { - query = < 0 { - consulConfig[string(checkConsulAllowStaleAttr)] = true - } - - if dc := queryArgs.Get(apiConsulDatacenterAttr); dc != "" { - consulConfig[string(checkConsulDatacenterAttr)] = dc - } - - httpAddrURL.Host = u.Host - httpAddrURL.Scheme = u.Scheme - - md := consulHealthCheckRE.FindStringSubmatch(u.EscapedPath()) - if md == nil { - return fmt.Errorf("config %q failed to match the health regexp", config.URL) - } - - checkMode := md[1] - checkArg := md[2] - switch checkMode { - case checkConsulV1NodePrefix: - consulConfig[string(checkConsulNodeAttr)] = checkArg - case checkConsulV1ServicePrefix: - consulConfig[string(checkConsulServiceAttr)] = checkArg - case checkConsulV1StatePrefix: - consulConfig[string(checkConsulStateAttr)] = checkArg - default: - return fmt.Errorf("PROVIDER BUG: unsupported check mode %q from %q", checkMode, u.EscapedPath()) - } - - delete(swamp, config.URL) - } - - if v, found := c.Config[config.Port]; found { - hostInfo := strings.SplitN(httpAddrURL.Host, ":", 2) - switch { - case len(hostInfo) == 1 && v != defaultCheckConsulPort, len(hostInfo) > 1: - httpAddrURL.Host = net.JoinHostPort(hostInfo[0], v) - } - - delete(swamp, config.Port) - } - - if v, found := c.Config[apiConsulCheckBlacklist]; found { - consulConfig[checkConsulCheckNameBlacklistAttr] = strings.Split(v, ",") - } - - if v, found := c.Config[apiConsulNodeBlacklist]; found { - consulConfig[checkConsulNodeBlacklistAttr] = strings.Split(v, ",") - } - - if v, found := c.Config[apiConsulServiceBlacklist]; found { - consulConfig[checkConsulServiceNameBlacklistAttr] = strings.Split(v, ",") - } - - // NOTE(sean@): headers attribute processed last. See below. - - consulConfig[string(checkConsulHTTPAddrAttr)] = httpAddrURL.String() - - saveStringConfigToState(config.KeyFile, checkConsulKeyFileAttr) - - // Process the headers last in order to provide an escape hatch capible of - // overriding any other derived value above. - for k, v := range c.Config { - if len(k) <= headerPrefixLen { - continue - } - - // Handle all of the prefix variable headers, like `header_` - if strings.Compare(string(k[:headerPrefixLen]), string(config.HeaderPrefix)) == 0 { - key := k[headerPrefixLen:] - switch key { - case checkConsulTokenHeader: - consulConfig[checkConsulACLTokenAttr] = v - default: - headers[string(key)] = v - } - } - - delete(swamp, k) - } - consulConfig[string(checkConsulHeadersAttr)] = headers - - whitelistedConfigKeys := map[config.Key]struct{}{ - config.Port: struct{}{}, - config.ReverseSecretKey: struct{}{}, - config.SubmissionURL: struct{}{}, - config.URL: struct{}{}, - } - - for k := range swamp { - if _, ok := whitelistedConfigKeys[k]; ok { - delete(c.Config, k) - } - - if _, ok := whitelistedConfigKeys[k]; !ok { - return fmt.Errorf("PROVIDER BUG: API Config not empty: %#v", swamp) - } - } - - if err := d.Set(checkConsulAttr, []interface{}{consulConfig}); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store check %q attribute: {{err}}", checkConsulAttr), err) - } - - return nil -} - -func checkConfigToAPIConsul(c *circonusCheck, l interfaceList) error { - c.Type = string(apiCheckTypeConsul) - - // Iterate over all `consul` attributes, even though we have a max of 1 in the - // schema. - for _, mapRaw := range l { - consulConfig := newInterfaceMap(mapRaw) - if v, found := consulConfig[checkConsulCAChainAttr]; found { - c.Config[config.CAChain] = v.(string) - } - - if v, found := consulConfig[checkConsulCertFileAttr]; found { - c.Config[config.CertFile] = v.(string) - } - - if v, found := consulConfig[checkConsulCheckNameBlacklistAttr]; found { - listRaw := v.([]interface{}) - checks := make([]string, 0, len(listRaw)) - for _, v := range listRaw { - checks = append(checks, v.(string)) - } - c.Config[apiConsulCheckBlacklist] = strings.Join(checks, ",") - } - - if v, found := consulConfig[checkConsulCiphersAttr]; found { - c.Config[config.Ciphers] = v.(string) - } - - if headers := consulConfig.CollectMap(checkConsulHeadersAttr); headers != nil { - for k, v := range headers { - h := config.HeaderPrefix + config.Key(k) - c.Config[h] = v - } - } - - if v, found := consulConfig[checkConsulKeyFileAttr]; found { - c.Config[config.KeyFile] = v.(string) - } - - { - // Extract all of the input attributes necessary to construct the - // Consul agent's URL. - - httpAddr := consulConfig[checkConsulHTTPAddrAttr].(string) - checkURL, err := url.Parse(httpAddr) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to parse %s's attribute %q: {{err}}", checkConsulAttr, httpAddr), err) - } - - hostInfo := strings.SplitN(checkURL.Host, ":", 2) - if len(c.Target) == 0 { - c.Target = hostInfo[0] - } - - if len(hostInfo) > 1 { - c.Config[config.Port] = hostInfo[1] - } - - if v, found := consulConfig[checkConsulNodeAttr]; found && v.(string) != "" { - checkURL.Path = strings.Join([]string{checkConsulV1Prefix, checkConsulV1NodePrefix, v.(string)}, "/") - } - - if v, found := consulConfig[checkConsulServiceAttr]; found && v.(string) != "" { - checkURL.Path = strings.Join([]string{checkConsulV1Prefix, checkConsulV1ServicePrefix, v.(string)}, "/") - } - - if v, found := consulConfig[checkConsulStateAttr]; found && v.(string) != "" { - checkURL.Path = strings.Join([]string{checkConsulV1Prefix, checkConsulV1StatePrefix, v.(string)}, "/") - } - - q := checkURL.Query() - - if v, found := consulConfig[checkConsulAllowStaleAttr]; found && v.(bool) { - q.Set(apiConsulStaleAttr, "") - } - - if v, found := consulConfig[checkConsulDatacenterAttr]; found && v.(string) != "" { - q.Set(apiConsulDatacenterAttr, v.(string)) - } - - checkURL.RawQuery = q.Encode() - - c.Config[config.URL] = checkURL.String() - } - - if v, found := consulConfig[checkConsulNodeBlacklistAttr]; found { - listRaw := v.([]interface{}) - checks := make([]string, 0, len(listRaw)) - for _, v := range listRaw { - checks = append(checks, v.(string)) - } - c.Config[apiConsulNodeBlacklist] = strings.Join(checks, ",") - } - - if v, found := consulConfig[checkConsulServiceNameBlacklistAttr]; found { - listRaw := v.([]interface{}) - checks := make([]string, 0, len(listRaw)) - for _, v := range listRaw { - checks = append(checks, v.(string)) - } - c.Config[apiConsulServiceBlacklist] = strings.Join(checks, ",") - } - } - - return nil -} diff --git a/builtin/providers/circonus/resource_circonus_check_consul_test.go b/builtin/providers/circonus/resource_circonus_check_consul_test.go deleted file mode 100644 index f7ca7993d..000000000 --- a/builtin/providers/circonus/resource_circonus_check_consul_test.go +++ /dev/null @@ -1,282 +0,0 @@ -package circonus - -import ( - "fmt" - "regexp" - "testing" - - "github.com/circonus-labs/circonus-gometrics/api/config" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccCirconusCheckConsul_node(t *testing.T) { - checkName := fmt.Sprintf("Terraform test: consul.service.consul mode=state check - %s", acctest.RandString(5)) - - checkNode := fmt.Sprintf("my-node-name-or-node-id-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDestroyCirconusCheckBundle, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testAccCirconusCheckConsulConfigV1HealthNodeFmt, checkName, checkNode), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("circonus_check.consul_server", "active", "true"), - resource.TestMatchResourceAttr("circonus_check.consul_server", "check_id", regexp.MustCompile(config.CheckCIDRegex)), - resource.TestCheckResourceAttr("circonus_check.consul_server", "collector.#", "1"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "collector.2084916526.id", "/broker/2110"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.#", "1"), - // resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.ca_chain", ""), - // resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.certificate_file", ""), - // resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.ciphers", ""), - // resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.key_file", ""), - resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.dc", "dc2"), - resource.TestCheckNoResourceAttr("circonus_check.consul_server", "consul.0.headers"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.http_addr", "http://consul.service.consul:8501"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.node", checkNode), - resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.node_blacklist.#", "3"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.node_blacklist.0", "a"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.node_blacklist.1", "bad"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.node_blacklist.2", "node"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "notes", ""), - resource.TestCheckResourceAttr("circonus_check.consul_server", "period", "60s"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.#", "2"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3333874791.active", "true"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3333874791.name", "KnownLeader"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3333874791.tags.#", "2"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3333874791.tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3333874791.tags.2058715988", "source:consul"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3333874791.type", "text"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3148913305.active", "true"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3148913305.name", "LastContact"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3148913305.tags.#", "2"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3148913305.tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3148913305.tags.2058715988", "source:consul"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3148913305.type", "numeric"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3148913305.unit", "seconds"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "tags.#", "2"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "tags.2058715988", "source:consul"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "target", "consul.service.consul"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "type", "consul"), - ), - }, - }, - }) -} - -func TestAccCirconusCheckConsul_service(t *testing.T) { - checkName := fmt.Sprintf("Terraform test: consul.service.consul mode=service check - %s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDestroyCirconusCheckBundle, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testAccCirconusCheckConsulConfigV1HealthServiceFmt, checkName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("circonus_check.consul_server", "active", "true"), - resource.TestMatchResourceAttr("circonus_check.consul_server", "check_id", regexp.MustCompile(config.CheckCIDRegex)), - resource.TestCheckResourceAttr("circonus_check.consul_server", "collector.#", "1"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "collector.2084916526.id", "/broker/2110"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.#", "1"), - // resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.ca_chain", ""), - // resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.certificate_file", ""), - // resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.ciphers", ""), - // resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.key_file", ""), - resource.TestCheckNoResourceAttr("circonus_check.consul_server", "consul.0.headers"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.http_addr", "http://consul.service.consul"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.service", "consul"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.service_blacklist.#", "3"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.service_blacklist.0", "bad"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.service_blacklist.1", "hombre"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.service_blacklist.2", "service"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "name", checkName), - resource.TestCheckResourceAttr("circonus_check.consul_server", "notes", ""), - resource.TestCheckResourceAttr("circonus_check.consul_server", "period", "60s"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.#", "2"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3333874791.active", "true"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3333874791.name", "KnownLeader"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3333874791.tags.#", "2"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3333874791.tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3333874791.tags.2058715988", "source:consul"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3333874791.type", "text"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3148913305.active", "true"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3148913305.name", "LastContact"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3148913305.tags.#", "2"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3148913305.tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3148913305.tags.2058715988", "source:consul"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3148913305.type", "numeric"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3148913305.unit", "seconds"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "tags.#", "2"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "tags.2058715988", "source:consul"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "target", "consul.service.consul"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "type", "consul"), - ), - }, - }, - }) -} - -func TestAccCirconusCheckConsul_state(t *testing.T) { - checkName := fmt.Sprintf("Terraform test: consul.service.consul mode=state check - %s", acctest.RandString(5)) - - checkState := "critical" - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDestroyCirconusCheckBundle, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testAccCirconusCheckConsulConfigV1HealthStateFmt, checkName, checkState), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("circonus_check.consul_server", "active", "true"), - resource.TestMatchResourceAttr("circonus_check.consul_server", "check_id", regexp.MustCompile(config.CheckCIDRegex)), - resource.TestCheckResourceAttr("circonus_check.consul_server", "collector.#", "1"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "collector.2084916526.id", "/broker/2110"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.#", "1"), - // resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.ca_chain", ""), - // resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.certificate_file", ""), - // resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.ciphers", ""), - // resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.key_file", ""), - resource.TestCheckNoResourceAttr("circonus_check.consul_server", "consul.0.headers"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.http_addr", "http://consul.service.consul"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.state", checkState), - resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.check_blacklist.#", "2"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.check_blacklist.0", "worthless"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "consul.0.check_blacklist.1", "check"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "name", checkName), - resource.TestCheckResourceAttr("circonus_check.consul_server", "notes", ""), - resource.TestCheckResourceAttr("circonus_check.consul_server", "period", "60s"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.#", "2"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3333874791.active", "true"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3333874791.name", "KnownLeader"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3333874791.tags.#", "2"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3333874791.tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3333874791.tags.2058715988", "source:consul"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3333874791.type", "text"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3148913305.active", "true"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3148913305.name", "LastContact"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3148913305.tags.#", "2"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3148913305.tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3148913305.tags.2058715988", "source:consul"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3148913305.type", "numeric"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "metric.3148913305.unit", "seconds"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "tags.#", "2"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "tags.2058715988", "source:consul"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "target", "consul.service.consul"), - resource.TestCheckResourceAttr("circonus_check.consul_server", "type", "consul"), - ), - }, - }, - }) -} - -const testAccCirconusCheckConsulConfigV1HealthNodeFmt = ` -resource "circonus_check" "consul_server" { - active = true - name = "%s" - period = "60s" - - collector { - id = "/broker/2110" - } - - consul { - dc = "dc2" - http_addr = "http://consul.service.consul:8501" - node = "%s" - node_blacklist = ["a","bad","node"] - } - - metric { - name = "LastContact" - tags = [ "source:consul", "lifecycle:unittest" ] - type = "numeric" - unit = "seconds" - } - - metric { - name = "KnownLeader" - tags = [ "source:consul", "lifecycle:unittest" ] - type = "text" - } - - tags = [ "source:consul", "lifecycle:unittest" ] - - target = "consul.service.consul" -} -` - -const testAccCirconusCheckConsulConfigV1HealthServiceFmt = ` -resource "circonus_check" "consul_server" { - active = true - name = "%s" - period = "60s" - - collector { - id = "/broker/2110" - } - - consul { - service = "consul" - service_blacklist = ["bad","hombre","service"] - } - - metric { - name = "LastContact" - tags = [ "source:consul", "lifecycle:unittest" ] - type = "numeric" - unit = "seconds" - } - - metric { - name = "KnownLeader" - tags = [ "source:consul", "lifecycle:unittest" ] - type = "text" - } - - tags = [ "source:consul", "lifecycle:unittest" ] - - target = "consul.service.consul" -} -` - -const testAccCirconusCheckConsulConfigV1HealthStateFmt = ` -resource "circonus_check" "consul_server" { - active = true - name = "%s" - period = "60s" - - collector { - id = "/broker/2110" - } - - consul { - state = "%s" - check_blacklist = ["worthless","check"] - } - - metric { - name = "LastContact" - tags = [ "source:consul", "lifecycle:unittest" ] - type = "numeric" - unit = "seconds" - } - - metric { - name = "KnownLeader" - tags = [ "source:consul", "lifecycle:unittest" ] - type = "text" - } - - tags = [ "source:consul", "lifecycle:unittest" ] - - target = "consul.service.consul" -} -` diff --git a/builtin/providers/circonus/resource_circonus_check_http.go b/builtin/providers/circonus/resource_circonus_check_http.go deleted file mode 100644 index 7b6d68b33..000000000 --- a/builtin/providers/circonus/resource_circonus_check_http.go +++ /dev/null @@ -1,387 +0,0 @@ -package circonus - -import ( - "bytes" - "fmt" - "log" - "net/url" - "sort" - "strconv" - "strings" - - "github.com/circonus-labs/circonus-gometrics/api/config" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -const ( - // circonus_check.http.* resource attribute names - checkHTTPAuthMethodAttr = "auth_method" - checkHTTPAuthPasswordAttr = "auth_password" - checkHTTPAuthUserAttr = "auth_user" - checkHTTPBodyRegexpAttr = "body_regexp" - checkHTTPCAChainAttr = "ca_chain" - checkHTTPCertFileAttr = "certificate_file" - checkHTTPCiphersAttr = "ciphers" - checkHTTPCodeRegexpAttr = "code" - checkHTTPExtractAttr = "extract" - checkHTTPHeadersAttr = "headers" - checkHTTPKeyFileAttr = "key_file" - checkHTTPMethodAttr = "method" - checkHTTPPayloadAttr = "payload" - checkHTTPReadLimitAttr = "read_limit" - checkHTTPURLAttr = "url" - checkHTTPVersionAttr = "version" -) - -var checkHTTPDescriptions = attrDescrs{ - checkHTTPAuthMethodAttr: "The HTTP Authentication method", - checkHTTPAuthPasswordAttr: "The HTTP Authentication user password", - checkHTTPAuthUserAttr: "The HTTP Authentication user name", - checkHTTPBodyRegexpAttr: `This regular expression is matched against the body of the response. If a match is not found, the check will be marked as "bad.`, - checkHTTPCAChainAttr: "A path to a file containing all the certificate authorities that should be loaded to validate the remote certificate (for TLS checks)", - checkHTTPCodeRegexpAttr: `The HTTP code that is expected. If the code received does not match this regular expression, the check is marked as "bad."`, - checkHTTPCiphersAttr: "A list of ciphers to be used in the TLS protocol (for HTTPS checks)", - checkHTTPCertFileAttr: "A path to a file containing the client certificate that will be presented to the remote server (for TLS-enabled checks)", - checkHTTPExtractAttr: "This regular expression is matched against the body of the response globally. The first capturing match is the key and the second capturing match is the value. Each key/value extracted is registered as a metric for the check.", - checkHTTPHeadersAttr: "Map of HTTP Headers to send along with HTTP Requests", - checkHTTPKeyFileAttr: "A path to a file containing key to be used in conjunction with the cilent certificate (for TLS checks)", - checkHTTPMethodAttr: "The HTTP method to use", - checkHTTPPayloadAttr: "The information transferred as the payload of an HTTP request", - checkHTTPReadLimitAttr: "Sets an approximate limit on the data read (0 means no limit)", - checkHTTPURLAttr: "The URL to use as the target of the check", - checkHTTPVersionAttr: "Sets the HTTP version for the check to use", -} - -var schemaCheckHTTP = &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - MinItems: 1, - Set: hashCheckHTTP, - Elem: &schema.Resource{ - Schema: convertToHelperSchema(checkHTTPDescriptions, map[schemaAttr]*schema.Schema{ - checkHTTPAuthMethodAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(checkHTTPAuthMethodAttr, `^(?:Basic|Digest|Auto)$`), - }, - checkHTTPAuthPasswordAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Sensitive: true, - ValidateFunc: validateRegexp(checkHTTPAuthPasswordAttr, `^.*`), - }, - checkHTTPAuthUserAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(checkHTTPAuthUserAttr, `[^:]+`), - }, - checkHTTPBodyRegexpAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(checkHTTPBodyRegexpAttr, `.+`), - }, - checkHTTPCAChainAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(checkHTTPCAChainAttr, `.+`), - }, - checkHTTPCertFileAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(checkHTTPCertFileAttr, `.+`), - }, - checkHTTPCiphersAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(checkHTTPCiphersAttr, `.+`), - }, - checkHTTPCodeRegexpAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: defaultCheckHTTPCodeRegexp, - ValidateFunc: validateRegexp(checkHTTPCodeRegexpAttr, `.+`), - }, - checkHTTPExtractAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(checkHTTPExtractAttr, `.+`), - }, - checkHTTPHeadersAttr: &schema.Schema{ - Type: schema.TypeMap, - Elem: schema.TypeString, - Optional: true, - ValidateFunc: validateHTTPHeaders, - }, - checkHTTPKeyFileAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(checkHTTPKeyFileAttr, `.+`), - }, - checkHTTPMethodAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: defaultCheckHTTPMethod, - ValidateFunc: validateRegexp(checkHTTPMethodAttr, `\S+`), - }, - checkHTTPPayloadAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(checkHTTPPayloadAttr, `\S+`), - }, - checkHTTPReadLimitAttr: &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validateFuncs( - validateIntMin(checkHTTPReadLimitAttr, 0), - ), - }, - checkHTTPURLAttr: &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateFuncs( - validateHTTPURL(checkHTTPURLAttr, urlIsAbs), - ), - }, - checkHTTPVersionAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: defaultCheckHTTPVersion, - ValidateFunc: validateStringIn(checkHTTPVersionAttr, supportedHTTPVersions), - }, - }), - }, -} - -// checkAPIToStateHTTP reads the Config data out of circonusCheck.CheckBundle into the -// statefile. -func checkAPIToStateHTTP(c *circonusCheck, d *schema.ResourceData) error { - httpConfig := make(map[string]interface{}, len(c.Config)) - - // swamp is a sanity check: it must be empty by the time this method returns - swamp := make(map[config.Key]string, len(c.Config)) - for k, v := range c.Config { - swamp[k] = v - } - - saveStringConfigToState := func(apiKey config.Key, attrName schemaAttr) { - if v, ok := c.Config[apiKey]; ok { - httpConfig[string(attrName)] = v - } - - delete(swamp, apiKey) - } - - saveIntConfigToState := func(apiKey config.Key, attrName schemaAttr) { - if v, ok := c.Config[apiKey]; ok { - i, err := strconv.ParseInt(v, 10, 64) - if err != nil { - log.Printf("[ERROR]: Unable to convert %s to an integer: %v", apiKey, err) - return - } - - httpConfig[string(attrName)] = int(i) - } - - delete(swamp, apiKey) - } - - saveStringConfigToState(config.AuthMethod, checkHTTPAuthMethodAttr) - saveStringConfigToState(config.AuthPassword, checkHTTPAuthPasswordAttr) - saveStringConfigToState(config.AuthUser, checkHTTPAuthUserAttr) - saveStringConfigToState(config.Body, checkHTTPBodyRegexpAttr) - saveStringConfigToState(config.CAChain, checkHTTPCAChainAttr) - saveStringConfigToState(config.CertFile, checkHTTPCertFileAttr) - saveStringConfigToState(config.Ciphers, checkHTTPCiphersAttr) - saveStringConfigToState(config.Code, checkHTTPCodeRegexpAttr) - saveStringConfigToState(config.Extract, checkHTTPExtractAttr) - - headers := make(map[string]interface{}, len(c.Config)) - headerPrefixLen := len(config.HeaderPrefix) - for k, v := range c.Config { - if len(k) <= headerPrefixLen { - continue - } - - if strings.Compare(string(k[:headerPrefixLen]), string(config.HeaderPrefix)) == 0 { - key := k[headerPrefixLen:] - headers[string(key)] = v - } - delete(swamp, k) - } - httpConfig[string(checkHTTPHeadersAttr)] = headers - - saveStringConfigToState(config.KeyFile, checkHTTPKeyFileAttr) - saveStringConfigToState(config.Method, checkHTTPMethodAttr) - saveStringConfigToState(config.Payload, checkHTTPPayloadAttr) - saveIntConfigToState(config.ReadLimit, checkHTTPReadLimitAttr) - saveStringConfigToState(config.URL, checkHTTPURLAttr) - saveStringConfigToState(config.HTTPVersion, checkHTTPVersionAttr) - - whitelistedConfigKeys := map[config.Key]struct{}{ - config.ReverseSecretKey: struct{}{}, - config.SubmissionURL: struct{}{}, - } - - for k := range swamp { - if _, ok := whitelistedConfigKeys[k]; ok { - delete(c.Config, k) - } - - if _, ok := whitelistedConfigKeys[k]; !ok { - return fmt.Errorf("PROVIDER BUG: API Config not empty: %#v", swamp) - } - } - - if err := d.Set(checkHTTPAttr, schema.NewSet(hashCheckHTTP, []interface{}{httpConfig})); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store check %q attribute: {{err}}", checkHTTPAttr), err) - } - - return nil -} - -// hashCheckHTTP creates a stable hash of the normalized values -func hashCheckHTTP(v interface{}) int { - m := v.(map[string]interface{}) - b := &bytes.Buffer{} - b.Grow(defaultHashBufSize) - - writeInt := func(attrName schemaAttr) { - if v, ok := m[string(attrName)]; ok { - fmt.Fprintf(b, "%x", v.(int)) - } - } - - writeString := func(attrName schemaAttr) { - if v, ok := m[string(attrName)]; ok && v.(string) != "" { - fmt.Fprint(b, strings.TrimSpace(v.(string))) - } - } - - // Order writes to the buffer using lexically sorted list for easy visual - // reconciliation with other lists. - writeString(checkHTTPAuthMethodAttr) - writeString(checkHTTPAuthPasswordAttr) - writeString(checkHTTPAuthUserAttr) - writeString(checkHTTPBodyRegexpAttr) - writeString(checkHTTPCAChainAttr) - writeString(checkHTTPCertFileAttr) - writeString(checkHTTPCiphersAttr) - writeString(checkHTTPCodeRegexpAttr) - writeString(checkHTTPExtractAttr) - - if headersRaw, ok := m[string(checkHTTPHeadersAttr)]; ok { - headerMap := headersRaw.(map[string]interface{}) - headers := make([]string, 0, len(headerMap)) - for k := range headerMap { - headers = append(headers, k) - } - - sort.Strings(headers) - for i := range headers { - fmt.Fprint(b, headers[i]) - fmt.Fprint(b, headerMap[headers[i]].(string)) - } - } - - writeString(checkHTTPKeyFileAttr) - writeString(checkHTTPMethodAttr) - writeString(checkHTTPPayloadAttr) - writeInt(checkHTTPReadLimitAttr) - writeString(checkHTTPURLAttr) - writeString(checkHTTPVersionAttr) - - s := b.String() - return hashcode.String(s) -} - -func checkConfigToAPIHTTP(c *circonusCheck, l interfaceList) error { - c.Type = string(apiCheckTypeHTTP) - - // Iterate over all `http` attributes, even though we have a max of 1 in the - // schema. - for _, mapRaw := range l { - httpConfig := newInterfaceMap(mapRaw) - - if v, found := httpConfig[checkHTTPAuthMethodAttr]; found { - c.Config[config.AuthMethod] = v.(string) - } - - if v, found := httpConfig[checkHTTPAuthPasswordAttr]; found { - c.Config[config.AuthPassword] = v.(string) - } - - if v, found := httpConfig[checkHTTPAuthUserAttr]; found { - c.Config[config.AuthUser] = v.(string) - } - - if v, found := httpConfig[checkHTTPBodyRegexpAttr]; found { - c.Config[config.Body] = v.(string) - } - - if v, found := httpConfig[checkHTTPCAChainAttr]; found { - c.Config[config.CAChain] = v.(string) - } - - if v, found := httpConfig[checkHTTPCertFileAttr]; found { - c.Config[config.CertFile] = v.(string) - } - - if v, found := httpConfig[checkHTTPCiphersAttr]; found { - c.Config[config.Ciphers] = v.(string) - } - - if v, found := httpConfig[checkHTTPCodeRegexpAttr]; found { - c.Config[config.Code] = v.(string) - } - - if v, found := httpConfig[checkHTTPExtractAttr]; found { - c.Config[config.Extract] = v.(string) - } - - if headers := httpConfig.CollectMap(checkHTTPHeadersAttr); headers != nil { - for k, v := range headers { - h := config.HeaderPrefix + config.Key(k) - c.Config[h] = v - } - } - - if v, found := httpConfig[checkHTTPKeyFileAttr]; found { - c.Config[config.KeyFile] = v.(string) - } - - if v, found := httpConfig[checkHTTPMethodAttr]; found { - c.Config[config.Method] = v.(string) - } - - if v, found := httpConfig[checkHTTPPayloadAttr]; found { - c.Config[config.Payload] = v.(string) - } - - if v, found := httpConfig[checkHTTPReadLimitAttr]; found { - c.Config[config.ReadLimit] = fmt.Sprintf("%d", v.(int)) - } - - if v, found := httpConfig[checkHTTPURLAttr]; found { - c.Config[config.URL] = v.(string) - - u, _ := url.Parse(v.(string)) - hostInfo := strings.SplitN(u.Host, ":", 2) - if len(c.Target) == 0 { - c.Target = hostInfo[0] - } - - if len(hostInfo) > 1 && c.Config[config.Port] == "" { - c.Config[config.Port] = hostInfo[1] - } - } - - if v, found := httpConfig[checkHTTPVersionAttr]; found { - c.Config[config.HTTPVersion] = v.(string) - } - } - - return nil -} diff --git a/builtin/providers/circonus/resource_circonus_check_http_test.go b/builtin/providers/circonus/resource_circonus_check_http_test.go deleted file mode 100644 index ce916c111..000000000 --- a/builtin/providers/circonus/resource_circonus_check_http_test.go +++ /dev/null @@ -1,184 +0,0 @@ -package circonus - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccCirconusCheckHTTP_basic(t *testing.T) { - checkName := fmt.Sprintf("Terraform test: noit's jezebel availability check - %s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDestroyCirconusCheckBundle, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testAccCirconusCheckHTTPConfigFmt, checkName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("circonus_check.jezebel", "active", "true"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "collector.#", "1"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "collector.2388330941.id", "/broker/1"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "http.#", "1"), - // resource.TestCheckResourceAttr("circonus_check.jezebel", "http.4213422905.auth_method", ""), - // resource.TestCheckResourceAttr("circonus_check.jezebel", "http.4213422905.auth_password", ""), - // resource.TestCheckResourceAttr("circonus_check.jezebel", "http.4213422905.auth_user", ""), - // resource.TestCheckResourceAttr("circonus_check.jezebel", "http.4213422905.body_regexp", ""), - // resource.TestCheckResourceAttr("circonus_check.jezebel", "http.4213422905.ca_chain", ""), - // resource.TestCheckResourceAttr("circonus_check.jezebel", "http.4213422905.certificate_file", ""), - // resource.TestCheckResourceAttr("circonus_check.jezebel", "http.4213422905.ciphers", ""), - resource.TestCheckResourceAttr("circonus_check.jezebel", "http.4213422905.code", `^200$`), - resource.TestCheckResourceAttr("circonus_check.jezebel", "http.4213422905.extract", `HTTP/1.1 200 OK`), - // resource.TestCheckResourceAttr("circonus_check.jezebel", "http.4213422905.key_file", ""), - // resource.TestCheckResourceAttr("circonus_check.jezebel", "http.4213422905.payload", ""), - resource.TestCheckResourceAttr("circonus_check.jezebel", "http.4213422905.headers.%", "1"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "http.4213422905.headers.Host", "127.0.0.1"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "http.4213422905.version", "1.1"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "http.4213422905.method", "GET"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "http.4213422905.read_limit", "1048576"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "http.4213422905.url", "http://127.0.0.1:8083/resmon"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "name", checkName), - resource.TestCheckResourceAttr("circonus_check.jezebel", "notes", "Check to make sure jezebel is working as expected"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "period", "60s"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.#", "4"), - - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.2380257438.active", "true"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.2380257438.name", "code"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.2380257438.tags.#", "4"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.2380257438.tags.30226350", "app:circonus"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.2380257438.tags.3219687752", "app:jezebel"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.2380257438.tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.2380257438.tags.3241999189", "source:circonus"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.2380257438.type", "text"), - - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.3634949264.active", "true"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.3634949264.name", "duration"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.3634949264.tags.#", "4"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.3634949264.tags.30226350", "app:circonus"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.3634949264.tags.3219687752", "app:jezebel"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.3634949264.tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.3634949264.tags.3241999189", "source:circonus"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.3634949264.type", "numeric"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.3634949264.unit", "seconds"), - - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.1717167158.active", "true"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.1717167158.name", "tt_connect"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.1717167158.tags.#", "4"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.1717167158.tags.30226350", "app:circonus"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.1717167158.tags.3219687752", "app:jezebel"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.1717167158.tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.1717167158.tags.3241999189", "source:circonus"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.1717167158.type", "numeric"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.1717167158.unit", "milliseconds"), - - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.2305894402.active", "true"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.2305894402.name", "tt_firstbyte"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.2305894402.tags.#", "4"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.2305894402.tags.30226350", "app:circonus"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.2305894402.tags.3219687752", "app:jezebel"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.2305894402.tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.2305894402.tags.3241999189", "source:circonus"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.2305894402.type", "numeric"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "metric.2305894402.unit", "milliseconds"), - - resource.TestCheckResourceAttr("circonus_check.jezebel", "tags.#", "4"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "tags.30226350", "app:circonus"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "tags.3219687752", "app:jezebel"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "tags.3241999189", "source:circonus"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "target", "127.0.0.1"), - resource.TestCheckResourceAttr("circonus_check.jezebel", "type", "http"), - ), - }, - }, - }) -} - -const testAccCirconusCheckHTTPConfigFmt = ` -variable "http_check_tags" { - type = "list" - default = [ "app:circonus", "app:jezebel", "lifecycle:unittest", "source:circonus" ] -} - -resource "circonus_metric" "status_code" { - name = "code" - tags = [ "${var.http_check_tags}" ] - type = "text" -} - -resource "circonus_metric" "request_duration" { - name = "duration" - tags = [ "${var.http_check_tags}" ] - type = "numeric" - unit = "seconds" -} - -resource "circonus_metric" "request_ttconnect" { - name = "tt_connect" - tags = [ "${var.http_check_tags}" ] - type = "numeric" - unit = "milliseconds" -} - -resource "circonus_metric" "request_ttfb" { - name = "tt_firstbyte" - tags = [ "${var.http_check_tags}" ] - type = "numeric" - unit = "milliseconds" -} - -resource "circonus_check" "jezebel" { - active = true - name = "%s" - notes = "Check to make sure jezebel is working as expected" - period = "60s" - - collector { - id = "/broker/1" - } - - http { - code = "^200$" - extract = "HTTP/1.1 200 OK" - headers = { - Host = "127.0.0.1", - } - version = "1.1" - method = "GET" - read_limit = 1048576 - url = "http://127.0.0.1:8083/resmon" - } - - metric { - name = "${circonus_metric.status_code.name}" - tags = [ "${circonus_metric.status_code.tags}" ] - type = "${circonus_metric.status_code.type}" - } - - metric { - name = "${circonus_metric.request_duration.name}" - tags = [ "${circonus_metric.request_duration.tags}" ] - type = "${circonus_metric.request_duration.type}" - unit = "${circonus_metric.request_duration.unit}" - } - - metric { - name = "${circonus_metric.request_ttconnect.name}" - tags = [ "${circonus_metric.request_ttconnect.tags}" ] - type = "${circonus_metric.request_ttconnect.type}" - unit = "${circonus_metric.request_ttconnect.unit}" - } - - metric { - name = "${circonus_metric.request_ttfb.name}" - tags = [ "${circonus_metric.request_ttfb.tags}" ] - type = "${circonus_metric.request_ttfb.type}" - unit = "${circonus_metric.request_ttfb.unit}" - } - - tags = [ "${var.http_check_tags}" ] -} -` diff --git a/builtin/providers/circonus/resource_circonus_check_httptrap.go b/builtin/providers/circonus/resource_circonus_check_httptrap.go deleted file mode 100644 index e91807b6c..000000000 --- a/builtin/providers/circonus/resource_circonus_check_httptrap.go +++ /dev/null @@ -1,156 +0,0 @@ -package circonus - -import ( - "bytes" - "fmt" - "log" - "strings" - - "github.com/circonus-labs/circonus-gometrics/api/config" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -const ( - // circonus_check.httptrap.* resource attribute names - checkHTTPTrapAsyncMetricsAttr = "async_metrics" - checkHTTPTrapSecretAttr = "secret" -) - -var checkHTTPTrapDescriptions = attrDescrs{ - checkHTTPTrapAsyncMetricsAttr: "Specify whether httptrap metrics are logged immediately or held until the status message is emitted", - checkHTTPTrapSecretAttr: "", -} - -var schemaCheckHTTPTrap = &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - MinItems: 1, - Set: hashCheckHTTPTrap, - Elem: &schema.Resource{ - Schema: convertToHelperSchema(checkHTTPTrapDescriptions, map[schemaAttr]*schema.Schema{ - checkHTTPTrapAsyncMetricsAttr: &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: defaultCheckHTTPTrapAsync, - }, - checkHTTPTrapSecretAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Sensitive: true, - ValidateFunc: validateRegexp(checkHTTPTrapSecretAttr, `^[a-zA-Z0-9_]+$`), - }, - }), - }, -} - -// checkAPIToStateHTTPTrap reads the Config data out of circonusCheck.CheckBundle into -// the statefile. -func checkAPIToStateHTTPTrap(c *circonusCheck, d *schema.ResourceData) error { - httpTrapConfig := make(map[string]interface{}, len(c.Config)) - - // swamp is a sanity check: it must be empty by the time this method returns - swamp := make(map[config.Key]string, len(c.Config)) - for k, v := range c.Config { - swamp[k] = v - } - - saveBoolConfigToState := func(apiKey config.Key, attrName schemaAttr) { - if s, ok := c.Config[apiKey]; ok { - switch s { - case "true", "on": - httpTrapConfig[string(attrName)] = true - case "false", "off": - httpTrapConfig[string(attrName)] = false - default: - log.Printf("PROVIDER BUG: unsupported value %q returned in key %q", s, apiKey) - } - } - - delete(swamp, apiKey) - } - - saveStringConfigToState := func(apiKey config.Key, attrName schemaAttr) { - if s, ok := c.Config[apiKey]; ok { - httpTrapConfig[string(attrName)] = s - } - - delete(swamp, apiKey) - } - - saveBoolConfigToState(config.AsyncMetrics, checkHTTPTrapAsyncMetricsAttr) - saveStringConfigToState(config.Secret, checkHTTPTrapSecretAttr) - - whitelistedConfigKeys := map[config.Key]struct{}{ - config.ReverseSecretKey: struct{}{}, - config.SubmissionURL: struct{}{}, - } - - for k := range swamp { - if _, ok := whitelistedConfigKeys[k]; ok { - delete(c.Config, k) - } - - if _, ok := whitelistedConfigKeys[k]; !ok { - log.Printf("[ERROR]: PROVIDER BUG: API Config not empty: %#v", swamp) - } - } - - if err := d.Set(checkHTTPTrapAttr, schema.NewSet(hashCheckHTTPTrap, []interface{}{httpTrapConfig})); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store check %q attribute: {{err}}", checkHTTPTrapAttr), err) - } - - return nil -} - -// hashCheckHTTPTrap creates a stable hash of the normalized values -func hashCheckHTTPTrap(v interface{}) int { - m := v.(map[string]interface{}) - b := &bytes.Buffer{} - b.Grow(defaultHashBufSize) - - writeBool := func(attrName schemaAttr) { - if v, ok := m[string(attrName)]; ok { - fmt.Fprintf(b, "%t", v.(bool)) - } - } - - writeString := func(attrName schemaAttr) { - if v, ok := m[string(attrName)]; ok && v.(string) != "" { - fmt.Fprint(b, strings.TrimSpace(v.(string))) - } - } - - // Order writes to the buffer using lexically sorted list for easy visual - // reconciliation with other lists. - writeBool(checkHTTPTrapAsyncMetricsAttr) - writeString(checkHTTPTrapSecretAttr) - - s := b.String() - return hashcode.String(s) -} - -func checkConfigToAPIHTTPTrap(c *circonusCheck, l interfaceList) error { - c.Type = string(apiCheckTypeHTTPTrapAttr) - - // Iterate over all `httptrap` attributes, even though we have a max of 1 in the - // schema. - for _, mapRaw := range l { - httpTrapConfig := newInterfaceMap(mapRaw) - - if v, found := httpTrapConfig[checkHTTPTrapAsyncMetricsAttr]; found { - b := v.(bool) - if b { - c.Config[config.AsyncMetrics] = fmt.Sprintf("%t", b) - } - } - - if v, found := httpTrapConfig[checkHTTPTrapSecretAttr]; found { - c.Config[config.Secret] = v.(string) - } - } - - return nil -} diff --git a/builtin/providers/circonus/resource_circonus_check_httptrap_test.go b/builtin/providers/circonus/resource_circonus_check_httptrap_test.go deleted file mode 100644 index 1890b809c..000000000 --- a/builtin/providers/circonus/resource_circonus_check_httptrap_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package circonus - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccCirconusCheckHTTPTrap_basic(t *testing.T) { - checkName := fmt.Sprintf("Terraform test: consul server httptrap check- %s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDestroyCirconusCheckBundle, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testAccCirconusCheckHTTPTrapConfigFmt, checkName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("circonus_check.consul", "active", "true"), - resource.TestCheckResourceAttr("circonus_check.consul", "collector.#", "1"), - resource.TestCheckResourceAttr("circonus_check.consul", "collector.2084916526.id", "/broker/2110"), - resource.TestCheckResourceAttr("circonus_check.consul", "httptrap.#", "1"), - resource.TestCheckResourceAttr("circonus_check.consul", "httptrap.2067899660.async_metrics", "false"), - resource.TestCheckResourceAttr("circonus_check.consul", "httptrap.2067899660.secret", "12345"), - resource.TestCheckResourceAttr("circonus_check.consul", "name", checkName), - resource.TestCheckResourceAttr("circonus_check.consul", "notes", "Check to receive consul server telemetry"), - resource.TestCheckResourceAttr("circonus_check.consul", "period", "60s"), - resource.TestCheckResourceAttr("circonus_check.consul", "metric.#", "3"), - - resource.TestCheckResourceAttr("circonus_check.consul", "metric.1608647530.active", "true"), - resource.TestCheckResourceAttr("circonus_check.consul", "metric.1608647530.name", "consul`consul-server-10-151-2-8`consul`session_ttl`active"), - resource.TestCheckResourceAttr("circonus_check.consul", "metric.1608647530.tags.#", "3"), - resource.TestCheckResourceAttr("circonus_check.consul", "metric.1608647530.tags.3728194417", "app:consul"), - resource.TestCheckResourceAttr("circonus_check.consul", "metric.1608647530.tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.consul", "metric.1608647530.tags.2058715988", "source:consul"), - resource.TestCheckResourceAttr("circonus_check.consul", "metric.1608647530.type", "numeric"), - - resource.TestCheckResourceAttr("circonus_check.consul", "metric.2293914935.active", "true"), - resource.TestCheckResourceAttr("circonus_check.consul", "metric.2293914935.name", "consul`consul-server-10-151-2-8`runtime`alloc_bytes"), - resource.TestCheckResourceAttr("circonus_check.consul", "metric.2293914935.tags.#", "3"), - resource.TestCheckResourceAttr("circonus_check.consul", "metric.2293914935.tags.3728194417", "app:consul"), - resource.TestCheckResourceAttr("circonus_check.consul", "metric.2293914935.tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.consul", "metric.2293914935.tags.2058715988", "source:consul"), - resource.TestCheckResourceAttr("circonus_check.consul", "metric.2293914935.type", "numeric"), - resource.TestCheckResourceAttr("circonus_check.consul", "metric.2293914935.unit", "bytes"), - - resource.TestCheckResourceAttr("circonus_check.consul", "metric.2489694876.active", "true"), - resource.TestCheckResourceAttr("circonus_check.consul", "metric.2489694876.name", "consul`consul`http`GET`v1`kv`_"), - resource.TestCheckResourceAttr("circonus_check.consul", "metric.2489694876.tags.#", "3"), - resource.TestCheckResourceAttr("circonus_check.consul", "metric.2489694876.tags.3728194417", "app:consul"), - resource.TestCheckResourceAttr("circonus_check.consul", "metric.2489694876.tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.consul", "metric.2489694876.tags.2058715988", "source:consul"), - resource.TestCheckResourceAttr("circonus_check.consul", "metric.2489694876.type", "histogram"), - resource.TestCheckResourceAttr("circonus_check.consul", "metric.2489694876.unit", "nanoseconds"), - - resource.TestCheckResourceAttr("circonus_check.consul", "tags.#", "3"), - resource.TestCheckResourceAttr("circonus_check.consul", "tags.3728194417", "app:consul"), - resource.TestCheckResourceAttr("circonus_check.consul", "tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.consul", "tags.2058715988", "source:consul"), - resource.TestCheckResourceAttr("circonus_check.consul", "target", "consul-server-10-151-2-8"), - resource.TestCheckResourceAttr("circonus_check.consul", "type", "httptrap"), - ), - }, - }, - }) -} - -const testAccCirconusCheckHTTPTrapConfigFmt = ` -variable "httptrap_check_tags" { - type = "list" - default = [ "app:consul", "lifecycle:unittest", "source:consul" ] -} - -variable "consul_hostname" { - type = "string" - default = "consul-server-10-151-2-8" -} - -resource "circonus_check" "consul" { - active = true - name = "%s" - notes = "Check to receive consul server telemetry" - period = "60s" - - collector { - id = "/broker/2110" - } - - httptrap { - async_metrics = "false" - secret = "12345" - } - - metric { - name = "consul` + "`" + `${var.consul_hostname}` + "`" + `consul` + "`" + `session_ttl` + "`" + `active" - tags = [ "${var.httptrap_check_tags}" ] - type = "numeric" - } - - metric { - name = "consul` + "`" + `${var.consul_hostname}` + "`" + `runtime` + "`" + `alloc_bytes" - tags = [ "${var.httptrap_check_tags}" ] - type = "numeric" - unit = "bytes" - } - - metric { - name = "consul` + "`" + `consul` + "`" + `http` + "`" + `GET` + "`" + `v1` + "`" + `kv` + "`" + `_" - tags = [ "${var.httptrap_check_tags}" ] - type = "histogram" - unit = "nanoseconds" - } - - tags = [ "${var.httptrap_check_tags}" ] - target = "${var.consul_hostname}" -} -` diff --git a/builtin/providers/circonus/resource_circonus_check_icmp_ping.go b/builtin/providers/circonus/resource_circonus_check_icmp_ping.go deleted file mode 100644 index c4597fcdf..000000000 --- a/builtin/providers/circonus/resource_circonus_check_icmp_ping.go +++ /dev/null @@ -1,157 +0,0 @@ -package circonus - -import ( - "bytes" - "fmt" - "strconv" - "time" - - "github.com/circonus-labs/circonus-gometrics/api/config" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -const ( - // circonus_check.icmp_ping.* resource attribute names - checkICMPPingAvailabilityAttr = "availability" - checkICMPPingCountAttr = "count" - checkICMPPingIntervalAttr = "interval" -) - -var checkICMPPingDescriptions = attrDescrs{ - checkICMPPingAvailabilityAttr: `The percentage of ICMP available required for the check to be considered "good."`, - checkICMPPingCountAttr: "The number of ICMP requests to send during a single check.", - checkICMPPingIntervalAttr: "The number of milliseconds between ICMP requests.", -} - -var schemaCheckICMPPing = &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - MinItems: 1, - Set: hashCheckICMPPing, - Elem: &schema.Resource{ - Schema: convertToHelperSchema(checkICMPPingDescriptions, map[schemaAttr]*schema.Schema{ - checkICMPPingAvailabilityAttr: &schema.Schema{ - Type: schema.TypeFloat, - Optional: true, - Default: defaultCheckICMPPingAvailability, - ValidateFunc: validateFuncs( - validateFloatMin(checkICMPPingAvailabilityAttr, 0.0), - validateFloatMax(checkICMPPingAvailabilityAttr, 100.0), - ), - }, - checkICMPPingCountAttr: &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: defaultCheckICMPPingCount, - ValidateFunc: validateFuncs( - validateIntMin(checkICMPPingCountAttr, 0), - validateIntMax(checkICMPPingCountAttr, 20), - ), - }, - checkICMPPingIntervalAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: defaultCheckICMPPingInterval, - ValidateFunc: validateFuncs( - validateDurationMin(checkICMPPingIntervalAttr, "100µs"), - validateDurationMax(checkICMPPingIntervalAttr, "5m"), - ), - }, - }), - }, -} - -// checkAPIToStateICMPPing reads the Config data out of circonusCheck.CheckBundle -// into the statefile. -func checkAPIToStateICMPPing(c *circonusCheck, d *schema.ResourceData) error { - icmpPingConfig := make(map[string]interface{}, len(c.Config)) - - availNeeded, err := strconv.ParseFloat(c.Config[config.AvailNeeded], 64) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("unable to parse %s: {{err}}", config.AvailNeeded), err) - } - - count, err := strconv.ParseInt(c.Config[config.Count], 10, 64) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("unable to parse %s: {{err}}", config.Count), err) - } - - interval, err := time.ParseDuration(fmt.Sprintf("%sms", c.Config[config.Interval])) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("unable to parse %s: {{err}}", config.Interval), err) - } - - icmpPingConfig[string(checkICMPPingAvailabilityAttr)] = availNeeded - icmpPingConfig[string(checkICMPPingCountAttr)] = int(count) - icmpPingConfig[string(checkICMPPingIntervalAttr)] = interval.String() - - if err := d.Set(checkICMPPingAttr, schema.NewSet(hashCheckICMPPing, []interface{}{icmpPingConfig})); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store check %q attribute: {{err}}", checkICMPPingAttr), err) - } - - return nil -} - -// hashCheckICMPPing creates a stable hash of the normalized values -func hashCheckICMPPing(v interface{}) int { - m := v.(map[string]interface{}) - b := &bytes.Buffer{} - b.Grow(defaultHashBufSize) - - writeFloat64 := func(attrName schemaAttr) { - if v, ok := m[string(attrName)]; ok { - fmt.Fprintf(b, "%f", v.(float64)) - } - } - - writeInt := func(attrName schemaAttr) { - if v, ok := m[string(attrName)]; ok { - fmt.Fprintf(b, "%x", v.(int)) - } - } - - writeDuration := func(attrName schemaAttr) { - if v, ok := m[string(attrName)]; ok && v.(string) != "" { - d, _ := time.ParseDuration(v.(string)) - fmt.Fprint(b, d.String()) - } - } - - // Order writes to the buffer using lexically sorted list for easy visual - // reconciliation with other lists. - writeFloat64(checkICMPPingAvailabilityAttr) - writeInt(checkICMPPingCountAttr) - writeDuration(checkICMPPingIntervalAttr) - - s := b.String() - return hashcode.String(s) -} - -func checkConfigToAPIICMPPing(c *circonusCheck, l interfaceList) error { - c.Type = string(apiCheckTypeICMPPing) - - // Iterate over all `icmp_ping` attributes, even though we have a max of 1 in - // the schema. - for _, mapRaw := range l { - icmpPingConfig := newInterfaceMap(mapRaw) - - if v, found := icmpPingConfig[checkICMPPingAvailabilityAttr]; found { - f := v.(float64) - c.Config[config.AvailNeeded] = fmt.Sprintf("%d", int(f)) - } - - if v, found := icmpPingConfig[checkICMPPingCountAttr]; found { - c.Config[config.Count] = fmt.Sprintf("%d", v.(int)) - } - - if v, found := icmpPingConfig[checkICMPPingIntervalAttr]; found { - d, _ := time.ParseDuration(v.(string)) - c.Config[config.Interval] = fmt.Sprintf("%d", int64(d/time.Millisecond)) - } - } - - return nil -} diff --git a/builtin/providers/circonus/resource_circonus_check_icmp_ping_test.go b/builtin/providers/circonus/resource_circonus_check_icmp_ping_test.go deleted file mode 100644 index 89d68cbd5..000000000 --- a/builtin/providers/circonus/resource_circonus_check_icmp_ping_test.go +++ /dev/null @@ -1,149 +0,0 @@ -package circonus - -import ( - "fmt" - "regexp" - "testing" - - "github.com/circonus-labs/circonus-gometrics/api/config" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccCirconusCheckICMPPing_basic(t *testing.T) { - checkName := fmt.Sprintf("ICMP Ping check - %s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDestroyCirconusCheckBundle, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testAccCirconusCheckICMPPingConfigFmt, checkName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "active", "true"), - resource.TestCheckNoResourceAttr("circonus_check.loopback_latency", "check_id"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "checks.#", "2"), - resource.TestMatchResourceAttr("circonus_check.loopback_latency", "checks.0", regexp.MustCompile(config.CheckCIDRegex)), - resource.TestMatchResourceAttr("circonus_check.loopback_latency", "checks.1", regexp.MustCompile(config.CheckCIDRegex)), - resource.TestCheckNoResourceAttr("circonus_check.loopback_latency", "check_id"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "check_by_collector.%", "2"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "collector.#", "2"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "collector.2388330941.id", "/broker/1"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "icmp_ping.#", "1"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "icmp_ping.979664239.availability", "100"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "icmp_ping.979664239.count", "5"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "icmp_ping.979664239.interval", "500ms"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "name", checkName), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "period", "300s"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.#", "5"), - - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.784357201.name", "available"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.784357201.tags.#", "2"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.784357201.tags.2087084518", "author:terraform"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.784357201.tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.784357201.type", "numeric"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.784357201.unit", "%"), - - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.3166992875.name", "average"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.3166992875.tags.#", "2"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.3166992875.tags.2087084518", "author:terraform"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.3166992875.tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.3166992875.type", "numeric"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.3166992875.unit", "seconds"), - - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.809361245.name", "count"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.809361245.tags.#", "2"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.809361245.tags.2087084518", "author:terraform"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.809361245.tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.809361245.type", "numeric"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.809361245.unit", "packets"), - - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.839816201.name", "maximum"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.839816201.tags.#", "2"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.839816201.tags.2087084518", "author:terraform"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.839816201.tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.839816201.type", "numeric"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.839816201.unit", "seconds"), - - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.1657693034.name", "minimum"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.1657693034.tags.#", "2"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.1657693034.tags.2087084518", "author:terraform"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.1657693034.tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.1657693034.type", "numeric"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "metric.1657693034.unit", "seconds"), - - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "tags.#", "2"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "tags.2087084518", "author:terraform"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "target", "api.circonus.com"), - resource.TestCheckResourceAttr("circonus_check.loopback_latency", "type", "ping_icmp"), - ), - }, - }, - }) -} - -const testAccCirconusCheckICMPPingConfigFmt = ` -variable "test_tags" { - type = "list" - default = [ "author:terraform", "lifecycle:unittest" ] -} -resource "circonus_check" "loopback_latency" { - active = true - name = "%s" - period = "300s" - - collector { - id = "/broker/1" - } - - collector { - id = "/broker/275" - } - - icmp_ping { - availability = "100.0" - count = 5 - interval = "500ms" - } - - metric { - name = "available" - tags = [ "${var.test_tags}" ] - type = "numeric" - unit = "%%" - } - - metric { - name = "average" - tags = [ "${var.test_tags}" ] - type = "numeric" - unit = "seconds" - } - - metric { - name = "count" - tags = [ "${var.test_tags}" ] - type = "numeric" - unit = "packets" - } - - metric { - name = "maximum" - tags = [ "${var.test_tags}" ] - type = "numeric" - unit = "seconds" - } - - metric { - name = "minimum" - tags = [ "${var.test_tags}" ] - type = "numeric" - unit = "seconds" - } - - tags = [ "${var.test_tags}" ] - target = "api.circonus.com" -} -` diff --git a/builtin/providers/circonus/resource_circonus_check_json.go b/builtin/providers/circonus/resource_circonus_check_json.go deleted file mode 100644 index e377d08f3..000000000 --- a/builtin/providers/circonus/resource_circonus_check_json.go +++ /dev/null @@ -1,370 +0,0 @@ -package circonus - -import ( - "bytes" - "fmt" - "log" - "net/url" - "sort" - "strconv" - "strings" - - "github.com/circonus-labs/circonus-gometrics/api/config" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -const ( - // circonus_check.json.* resource attribute names - checkJSONAuthMethodAttr = "auth_method" - checkJSONAuthPasswordAttr = "auth_password" - checkJSONAuthUserAttr = "auth_user" - checkJSONCAChainAttr = "ca_chain" - checkJSONCertFileAttr = "certificate_file" - checkJSONCiphersAttr = "ciphers" - checkJSONHeadersAttr = "headers" - checkJSONKeyFileAttr = "key_file" - checkJSONMethodAttr = "method" - checkJSONPayloadAttr = "payload" - checkJSONPortAttr = "port" - checkJSONReadLimitAttr = "read_limit" - checkJSONURLAttr = "url" - checkJSONVersionAttr = "version" -) - -var checkJSONDescriptions = attrDescrs{ - checkJSONAuthMethodAttr: "The HTTP Authentication method", - checkJSONAuthPasswordAttr: "The HTTP Authentication user password", - checkJSONAuthUserAttr: "The HTTP Authentication user name", - checkJSONCAChainAttr: "A path to a file containing all the certificate authorities that should be loaded to validate the remote certificate (for TLS checks)", - checkJSONCertFileAttr: "A path to a file containing the client certificate that will be presented to the remote server (for TLS-enabled checks)", - checkJSONCiphersAttr: "A list of ciphers to be used in the TLS protocol (for HTTPS checks)", - checkJSONHeadersAttr: "Map of HTTP Headers to send along with HTTP Requests", - checkJSONKeyFileAttr: "A path to a file containing key to be used in conjunction with the cilent certificate (for TLS checks)", - checkJSONMethodAttr: "The HTTP method to use", - checkJSONPayloadAttr: "The information transferred as the payload of an HTTP request", - checkJSONPortAttr: "Specifies the port on which the management interface can be reached", - checkJSONReadLimitAttr: "Sets an approximate limit on the data read (0 means no limit)", - checkJSONURLAttr: "The URL to use as the target of the check", - checkJSONVersionAttr: "Sets the HTTP version for the check to use", -} - -var schemaCheckJSON = &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - MinItems: 1, - Set: checkJSONConfigChecksum, - Elem: &schema.Resource{ - Schema: convertToHelperSchema(checkJSONDescriptions, map[schemaAttr]*schema.Schema{ - checkJSONAuthMethodAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(checkJSONAuthMethodAttr, `^(?:Basic|Digest|Auto)$`), - }, - checkJSONAuthPasswordAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Sensitive: true, - ValidateFunc: validateRegexp(checkJSONAuthPasswordAttr, `^.*`), - }, - checkJSONAuthUserAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(checkJSONAuthUserAttr, `[^:]+`), - }, - checkJSONCAChainAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(checkJSONCAChainAttr, `.+`), - }, - checkJSONCertFileAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(checkJSONCertFileAttr, `.+`), - }, - checkJSONCiphersAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(checkJSONCiphersAttr, `.+`), - }, - checkJSONHeadersAttr: &schema.Schema{ - Type: schema.TypeMap, - Elem: schema.TypeString, - Optional: true, - ValidateFunc: validateHTTPHeaders, - }, - checkJSONKeyFileAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(checkJSONKeyFileAttr, `.+`), - }, - checkJSONMethodAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: defaultCheckJSONMethod, - ValidateFunc: validateRegexp(checkJSONMethodAttr, `\S+`), - }, - checkJSONPayloadAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(checkJSONPayloadAttr, `\S+`), - }, - checkJSONPortAttr: &schema.Schema{ - Type: schema.TypeInt, - Default: defaultCheckJSONPort, - Optional: true, - ValidateFunc: validateFuncs( - validateIntMin(checkJSONPortAttr, 0), - validateIntMax(checkJSONPortAttr, 65535), - ), - }, - checkJSONReadLimitAttr: &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validateFuncs( - validateIntMin(checkJSONReadLimitAttr, 0), - ), - }, - checkJSONURLAttr: &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateFuncs( - validateHTTPURL(checkJSONURLAttr, urlIsAbs), - ), - }, - checkJSONVersionAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: defaultCheckJSONVersion, - ValidateFunc: validateStringIn(checkJSONVersionAttr, supportedHTTPVersions), - }, - }), - }, -} - -// checkAPIToStateJSON reads the Config data out of circonusCheck.CheckBundle into -// the statefile. -func checkAPIToStateJSON(c *circonusCheck, d *schema.ResourceData) error { - jsonConfig := make(map[string]interface{}, len(c.Config)) - - // swamp is a sanity check: it must be empty by the time this method returns - swamp := make(map[config.Key]string, len(c.Config)) - for k, s := range c.Config { - swamp[k] = s - } - - saveStringConfigToState := func(apiKey config.Key, attrName schemaAttr) { - if s, ok := c.Config[apiKey]; ok && s != "" { - jsonConfig[string(attrName)] = s - } - - delete(swamp, apiKey) - } - - saveIntConfigToState := func(apiKey config.Key, attrName schemaAttr) { - if s, ok := c.Config[apiKey]; ok && s != "0" { - i, err := strconv.ParseInt(s, 10, 64) - if err != nil { - log.Printf("[ERROR]: Unable to convert %s to an integer: %v", apiKey, err) - return - } - jsonConfig[string(attrName)] = int(i) - } - - delete(swamp, apiKey) - } - - saveStringConfigToState(config.AuthMethod, checkJSONAuthMethodAttr) - saveStringConfigToState(config.AuthPassword, checkJSONAuthPasswordAttr) - saveStringConfigToState(config.AuthUser, checkJSONAuthUserAttr) - saveStringConfigToState(config.CAChain, checkJSONCAChainAttr) - saveStringConfigToState(config.CertFile, checkJSONCertFileAttr) - saveStringConfigToState(config.Ciphers, checkJSONCiphersAttr) - - headers := make(map[string]interface{}, len(c.Config)) - headerPrefixLen := len(config.HeaderPrefix) - for k, v := range c.Config { - if len(k) <= headerPrefixLen { - continue - } - - if strings.Compare(string(k[:headerPrefixLen]), string(config.HeaderPrefix)) == 0 { - key := k[headerPrefixLen:] - headers[string(key)] = v - } - delete(swamp, k) - } - jsonConfig[string(checkJSONHeadersAttr)] = headers - - saveStringConfigToState(config.KeyFile, checkJSONKeyFileAttr) - saveStringConfigToState(config.Method, checkJSONMethodAttr) - saveStringConfigToState(config.Payload, checkJSONPayloadAttr) - saveIntConfigToState(config.Port, checkJSONPortAttr) - saveIntConfigToState(config.ReadLimit, checkJSONReadLimitAttr) - saveStringConfigToState(config.URL, checkJSONURLAttr) - saveStringConfigToState(config.HTTPVersion, checkJSONVersionAttr) - - whitelistedConfigKeys := map[config.Key]struct{}{ - config.ReverseSecretKey: struct{}{}, - config.SubmissionURL: struct{}{}, - } - - for k := range swamp { - if _, ok := whitelistedConfigKeys[k]; ok { - delete(c.Config, k) - } - - if _, ok := whitelistedConfigKeys[k]; !ok { - return fmt.Errorf("PROVIDER BUG: API Config not empty: %#v", swamp) - } - } - - if err := d.Set(checkJSONAttr, schema.NewSet(checkJSONConfigChecksum, []interface{}{jsonConfig})); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store check %q attribute: {{err}}", checkJSONAttr), err) - } - - return nil -} - -// checkJSONConfigChecksum creates a stable hash of the normalized values found -// in a user's Terraform config. -func checkJSONConfigChecksum(v interface{}) int { - m := v.(map[string]interface{}) - b := &bytes.Buffer{} - b.Grow(defaultHashBufSize) - - writeInt := func(attrName schemaAttr) { - if v, ok := m[string(attrName)]; ok && v.(int) != 0 { - fmt.Fprintf(b, "%x", v.(int)) - } - } - - writeString := func(attrName schemaAttr) { - if v, ok := m[string(attrName)]; ok && v.(string) != "" { - fmt.Fprint(b, strings.TrimSpace(v.(string))) - } - } - - // Order writes to the buffer using lexically sorted list for easy visual - // reconciliation with other lists. - writeString(checkJSONAuthMethodAttr) - writeString(checkJSONAuthPasswordAttr) - writeString(checkJSONAuthUserAttr) - writeString(checkJSONCAChainAttr) - writeString(checkJSONCertFileAttr) - writeString(checkJSONCiphersAttr) - - if headersRaw, ok := m[string(checkJSONHeadersAttr)]; ok { - headerMap := headersRaw.(map[string]interface{}) - headers := make([]string, 0, len(headerMap)) - for k := range headerMap { - headers = append(headers, k) - } - - sort.Strings(headers) - for i := range headers { - fmt.Fprint(b, headers[i]) - fmt.Fprint(b, headerMap[headers[i]].(string)) - } - } - - writeString(checkJSONKeyFileAttr) - writeString(checkJSONMethodAttr) - writeString(checkJSONPayloadAttr) - writeInt(checkJSONPortAttr) - writeInt(checkJSONReadLimitAttr) - writeString(checkJSONURLAttr) - writeString(checkJSONVersionAttr) - - s := b.String() - return hashcode.String(s) -} - -func checkConfigToAPIJSON(c *circonusCheck, l interfaceList) error { - c.Type = string(apiCheckTypeJSON) - - // Iterate over all `json` attributes, even though we have a max of 1 in the - // schema. - for _, mapRaw := range l { - jsonConfig := newInterfaceMap(mapRaw) - - if v, found := jsonConfig[checkJSONAuthMethodAttr]; found { - c.Config[config.AuthMethod] = v.(string) - } - - if v, found := jsonConfig[checkJSONAuthPasswordAttr]; found { - c.Config[config.AuthPassword] = v.(string) - } - - if v, found := jsonConfig[checkJSONAuthUserAttr]; found { - c.Config[config.AuthUser] = v.(string) - } - - if v, found := jsonConfig[checkJSONCAChainAttr]; found { - c.Config[config.CAChain] = v.(string) - } - - if v, found := jsonConfig[checkJSONCertFileAttr]; found { - c.Config[config.CertFile] = v.(string) - } - - if v, found := jsonConfig[checkJSONCiphersAttr]; found { - c.Config[config.Ciphers] = v.(string) - } - - if headers := jsonConfig.CollectMap(checkJSONHeadersAttr); headers != nil { - for k, v := range headers { - h := config.HeaderPrefix + config.Key(k) - c.Config[h] = v - } - } - - if v, found := jsonConfig[checkJSONKeyFileAttr]; found { - c.Config[config.KeyFile] = v.(string) - } - - if v, found := jsonConfig[checkJSONMethodAttr]; found { - c.Config[config.Method] = v.(string) - } - - if v, found := jsonConfig[checkJSONPayloadAttr]; found { - c.Config[config.Payload] = v.(string) - } - - if v, found := jsonConfig[checkJSONPortAttr]; found { - i := v.(int) - if i != 0 { - c.Config[config.Port] = fmt.Sprintf("%d", i) - } - } - - if v, found := jsonConfig[checkJSONReadLimitAttr]; found { - i := v.(int) - if i != 0 { - c.Config[config.ReadLimit] = fmt.Sprintf("%d", i) - } - } - - if v, found := jsonConfig[checkJSONURLAttr]; found { - c.Config[config.URL] = v.(string) - - u, _ := url.Parse(v.(string)) - hostInfo := strings.SplitN(u.Host, ":", 2) - if len(c.Target) == 0 { - c.Target = hostInfo[0] - } - - if len(hostInfo) > 1 && c.Config[config.Port] == "" { - c.Config[config.Port] = hostInfo[1] - } - } - - if v, found := jsonConfig[checkJSONVersionAttr]; found { - c.Config[config.HTTPVersion] = v.(string) - } - } - - return nil -} diff --git a/builtin/providers/circonus/resource_circonus_check_json_test.go b/builtin/providers/circonus/resource_circonus_check_json_test.go deleted file mode 100644 index a2b40b3f0..000000000 --- a/builtin/providers/circonus/resource_circonus_check_json_test.go +++ /dev/null @@ -1,233 +0,0 @@ -package circonus - -import ( - "regexp" - "testing" - - "github.com/circonus-labs/circonus-gometrics/api/config" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccCirconusCheckJSON_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDestroyCirconusCheckBundle, - Steps: []resource.TestStep{ - { - Config: testAccCirconusCheckJSONConfig1, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("circonus_check.usage", "active", "true"), - resource.TestMatchResourceAttr("circonus_check.usage", "check_id", regexp.MustCompile(config.CheckCIDRegex)), - resource.TestCheckResourceAttr("circonus_check.usage", "collector.#", "1"), - resource.TestCheckResourceAttr("circonus_check.usage", "collector.2388330941.id", "/broker/1"), - resource.TestCheckResourceAttr("circonus_check.usage", "json.#", "1"), - // resource.TestCheckResourceAttr("circonus_check.usage", "json.2626248092.auth_method", ""), - // resource.TestCheckResourceAttr("circonus_check.usage", "json.2626248092.auth_password", ""), - // resource.TestCheckResourceAttr("circonus_check.usage", "json.2626248092.auth_user", ""), - // resource.TestCheckResourceAttr("circonus_check.usage", "json.2626248092.ca_chain", ""), - // resource.TestCheckResourceAttr("circonus_check.usage", "json.2626248092.certificate_file", ""), - // resource.TestCheckResourceAttr("circonus_check.usage", "json.2626248092.ciphers", ""), - // resource.TestCheckResourceAttr("circonus_check.usage", "json.2626248092.key_file", ""), - // resource.TestCheckResourceAttr("circonus_check.usage", "json.2626248092.payload", ""), - resource.TestCheckResourceAttr("circonus_check.usage", "json.2626248092.headers.%", "3"), - resource.TestCheckResourceAttr("circonus_check.usage", "json.2626248092.headers.Accept", "application/json"), - resource.TestCheckResourceAttr("circonus_check.usage", "json.2626248092.headers.X-Circonus-App-Name", "TerraformCheck"), - resource.TestCheckResourceAttr("circonus_check.usage", "json.2626248092.headers.X-Circonus-Auth-Token", ""), - resource.TestCheckResourceAttr("circonus_check.usage", "json.2626248092.version", "1.0"), - resource.TestCheckResourceAttr("circonus_check.usage", "json.2626248092.method", "GET"), - resource.TestCheckResourceAttr("circonus_check.usage", "json.2626248092.port", "443"), - resource.TestCheckResourceAttr("circonus_check.usage", "json.2626248092.read_limit", "1048576"), - resource.TestCheckResourceAttr("circonus_check.usage", "json.2626248092.url", "https://api.circonus.com/account/current"), - resource.TestCheckResourceAttr("circonus_check.usage", "name", "Terraform test: api.circonus.com metric usage check"), - resource.TestCheckResourceAttr("circonus_check.usage", "notes", ""), - resource.TestCheckResourceAttr("circonus_check.usage", "period", "60s"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.#", "2"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.1992097900.active", "true"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.1992097900.name", "_usage`0`_limit"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.1992097900.tags.#", "1"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.1992097900.tags.3241999189", "source:circonus"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.1992097900.type", "numeric"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.1992097900.unit", "qty"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.3280673139.active", "true"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.3280673139.name", "_usage`0`_used"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.3280673139.tags.#", "1"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.3280673139.tags.3241999189", "source:circonus"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.3280673139.type", "numeric"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.3280673139.unit", "qty"), - resource.TestCheckResourceAttr("circonus_check.usage", "tags.#", "2"), - resource.TestCheckResourceAttr("circonus_check.usage", "tags.3241999189", "source:circonus"), - resource.TestCheckResourceAttr("circonus_check.usage", "tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.usage", "target", "api.circonus.com"), - resource.TestCheckResourceAttr("circonus_check.usage", "type", "json"), - ), - }, - { - Config: testAccCirconusCheckJSONConfig2, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("circonus_check.usage", "active", "true"), - resource.TestCheckResourceAttr("circonus_check.usage", "collector.#", "1"), - resource.TestCheckResourceAttr("circonus_check.usage", "collector.2388330941.id", "/broker/1"), - resource.TestCheckResourceAttr("circonus_check.usage", "json.#", "1"), - // resource.TestCheckResourceAttr("circonus_check.usage", "json.3951979786.auth_method", ""), - // resource.TestCheckResourceAttr("circonus_check.usage", "json.3951979786.auth_password", ""), - // resource.TestCheckResourceAttr("circonus_check.usage", "json.3951979786.auth_user", ""), - // resource.TestCheckResourceAttr("circonus_check.usage", "json.3951979786.ca_chain", ""), - // resource.TestCheckResourceAttr("circonus_check.usage", "json.3951979786.certificate_file", ""), - // resource.TestCheckResourceAttr("circonus_check.usage", "json.3951979786.ciphers", ""), - // resource.TestCheckResourceAttr("circonus_check.usage", "json.3951979786.key_file", ""), - // resource.TestCheckResourceAttr("circonus_check.usage", "json.3951979786.payload", ""), - resource.TestCheckResourceAttr("circonus_check.usage", "json.3951979786.headers.%", "3"), - resource.TestCheckResourceAttr("circonus_check.usage", "json.3951979786.headers.Accept", "application/json"), - resource.TestCheckResourceAttr("circonus_check.usage", "json.3951979786.headers.X-Circonus-App-Name", "TerraformCheck"), - resource.TestCheckResourceAttr("circonus_check.usage", "json.3951979786.headers.X-Circonus-Auth-Token", ""), - resource.TestCheckResourceAttr("circonus_check.usage", "json.3951979786.version", "1.1"), - resource.TestCheckResourceAttr("circonus_check.usage", "json.3951979786.method", "GET"), - resource.TestCheckResourceAttr("circonus_check.usage", "json.3951979786.port", "443"), - resource.TestCheckResourceAttr("circonus_check.usage", "json.3951979786.read_limit", "1048576"), - resource.TestCheckResourceAttr("circonus_check.usage", "json.3951979786.url", "https://api.circonus.com/account/current"), - resource.TestCheckResourceAttr("circonus_check.usage", "name", "Terraform test: api.circonus.com metric usage check"), - resource.TestCheckResourceAttr("circonus_check.usage", "notes", "notes!"), - resource.TestCheckResourceAttr("circonus_check.usage", "period", "300s"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.#", "2"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.1992097900.active", "true"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.1992097900.name", "_usage`0`_limit"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.1992097900.tags.#", "1"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.1992097900.tags.3241999189", "source:circonus"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.1992097900.type", "numeric"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.1992097900.unit", "qty"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.3280673139.active", "true"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.3280673139.name", "_usage`0`_used"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.3280673139.tags.#", "1"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.3280673139.tags.3241999189", "source:circonus"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.3280673139.type", "numeric"), - resource.TestCheckResourceAttr("circonus_check.usage", "metric.3280673139.unit", "qty"), - resource.TestCheckResourceAttr("circonus_check.usage", "tags.#", "2"), - resource.TestCheckResourceAttr("circonus_check.usage", "tags.3241999189", "source:circonus"), - resource.TestCheckResourceAttr("circonus_check.usage", "tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.usage", "target", "api.circonus.com"), - resource.TestCheckResourceAttr("circonus_check.usage", "type", "json"), - ), - }, - }, - }) -} - -const testAccCirconusCheckJSONConfig1 = ` -variable "usage_default_unit" { - default = "qty" -} - -resource "circonus_metric" "limit" { - name = "_usage` + "`0`" + `_limit" - tags = [ "source:circonus" ] - type = "numeric" - unit = "${var.usage_default_unit}" -} - -resource "circonus_metric" "used" { - name = "_usage` + "`0`" + `_used" - tags = [ "source:circonus" ] - type = "numeric" - unit = "${var.usage_default_unit}" -} - -resource "circonus_check" "usage" { - active = true - name = "Terraform test: api.circonus.com metric usage check" - period = "60s" - - collector { - id = "/broker/1" - } - - json { - url = "https://api.circonus.com/account/current" - headers = { - Accept = "application/json", - X-Circonus-App-Name = "TerraformCheck", - X-Circonus-Auth-Token = "", - } - version = "1.0" - method = "GET" - port = 443 - read_limit = 1048576 - } - - metric { - name = "${circonus_metric.used.name}" - tags = [ "${circonus_metric.used.tags}" ] - type = "${circonus_metric.used.type}" - unit = "${coalesce(circonus_metric.used.unit, var.usage_default_unit)}" - } - - metric { - name = "${circonus_metric.limit.name}" - tags = [ "${circonus_metric.limit.tags}" ] - type = "${circonus_metric.limit.type}" - unit = "${coalesce(circonus_metric.limit.unit, var.usage_default_unit)}" - } - - tags = [ "source:circonus", "lifecycle:unittest" ] -} -` - -const testAccCirconusCheckJSONConfig2 = ` -variable "usage_default_unit" { - default = "qty" -} - -resource "circonus_metric" "limit" { - name = "_usage` + "`0`" + `_limit" - tags = [ "source:circonus" ] - type = "numeric" - unit = "${var.usage_default_unit}" -} - -resource "circonus_metric" "used" { - name = "_usage` + "`0`" + `_used" - tags = [ "source:circonus" ] - type = "numeric" - unit = "${var.usage_default_unit}" -} - -resource "circonus_check" "usage" { - active = true - name = "Terraform test: api.circonus.com metric usage check" - notes = "notes!" - period = "300s" - - collector { - id = "/broker/1" - } - - json { - url = "https://api.circonus.com/account/current" - headers = { - Accept = "application/json", - X-Circonus-App-Name = "TerraformCheck", - X-Circonus-Auth-Token = "", - } - version = "1.1" - method = "GET" - port = 443 - read_limit = 1048576 - } - - metric { - name = "${circonus_metric.used.name}" - tags = [ "${circonus_metric.used.tags}" ] - type = "${circonus_metric.used.type}" - unit = "${coalesce(circonus_metric.used.unit, var.usage_default_unit)}" - } - - metric { - name = "${circonus_metric.limit.name}" - tags = [ "${circonus_metric.limit.tags}" ] - type = "${circonus_metric.limit.type}" - unit = "${coalesce(circonus_metric.limit.unit, var.usage_default_unit)}" - } - - tags = [ "source:circonus", "lifecycle:unittest" ] -} -` diff --git a/builtin/providers/circonus/resource_circonus_check_mysql.go b/builtin/providers/circonus/resource_circonus_check_mysql.go deleted file mode 100644 index 3fe2094ed..000000000 --- a/builtin/providers/circonus/resource_circonus_check_mysql.go +++ /dev/null @@ -1,102 +0,0 @@ -package circonus - -import ( - "bytes" - "fmt" - "strings" - - "github.com/circonus-labs/circonus-gometrics/api/config" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -const ( - // circonus_check.mysql.* resource attribute names - checkMySQLDSNAttr = "dsn" - checkMySQLQueryAttr = "query" -) - -var checkMySQLDescriptions = attrDescrs{ - checkMySQLDSNAttr: "The connect DSN for the MySQL instance", - checkMySQLQueryAttr: "The SQL to use as the query", -} - -var schemaCheckMySQL = &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - MinItems: 1, - Set: hashCheckMySQL, - Elem: &schema.Resource{ - Schema: convertToHelperSchema(checkMySQLDescriptions, map[schemaAttr]*schema.Schema{ - checkMySQLDSNAttr: &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateRegexp(checkMySQLDSNAttr, `^.+$`), - }, - checkMySQLQueryAttr: &schema.Schema{ - Type: schema.TypeString, - Required: true, - StateFunc: func(v interface{}) string { return strings.TrimSpace(v.(string)) }, - ValidateFunc: validateRegexp(checkMySQLQueryAttr, `.+`), - }, - }), - }, -} - -// checkAPIToStateMySQL reads the Config data out of circonusCheck.CheckBundle into the -// statefile. -func checkAPIToStateMySQL(c *circonusCheck, d *schema.ResourceData) error { - MySQLConfig := make(map[string]interface{}, len(c.Config)) - - MySQLConfig[string(checkMySQLDSNAttr)] = c.Config[config.DSN] - MySQLConfig[string(checkMySQLQueryAttr)] = c.Config[config.SQL] - - if err := d.Set(checkMySQLAttr, schema.NewSet(hashCheckMySQL, []interface{}{MySQLConfig})); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store check %q attribute: {{err}}", checkMySQLAttr), err) - } - - return nil -} - -// hashCheckMySQL creates a stable hash of the normalized values -func hashCheckMySQL(v interface{}) int { - m := v.(map[string]interface{}) - b := &bytes.Buffer{} - b.Grow(defaultHashBufSize) - - writeString := func(attrName schemaAttr) { - if v, ok := m[string(attrName)]; ok && v.(string) != "" { - fmt.Fprint(b, strings.TrimSpace(v.(string))) - } - } - - // Order writes to the buffer using lexically sorted list for easy visual - // reconciliation with other lists. - writeString(checkMySQLDSNAttr) - writeString(checkMySQLQueryAttr) - - s := b.String() - return hashcode.String(s) -} - -func checkConfigToAPIMySQL(c *circonusCheck, l interfaceList) error { - c.Type = string(apiCheckTypeMySQL) - - // Iterate over all `mysql` attributes, even though we have a max of 1 in the - // schema. - for _, mapRaw := range l { - mysqlConfig := newInterfaceMap(mapRaw) - - if v, found := mysqlConfig[checkMySQLDSNAttr]; found { - c.Config[config.DSN] = v.(string) - } - - if v, found := mysqlConfig[checkMySQLQueryAttr]; found { - c.Config[config.SQL] = v.(string) - } - } - - return nil -} diff --git a/builtin/providers/circonus/resource_circonus_check_mysql_test.go b/builtin/providers/circonus/resource_circonus_check_mysql_test.go deleted file mode 100644 index 063cc54b4..000000000 --- a/builtin/providers/circonus/resource_circonus_check_mysql_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package circonus - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccCirconusCheckMySQL_basic(t *testing.T) { - checkName := fmt.Sprintf("MySQL binlog total - %s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDestroyCirconusCheckBundle, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testAccCirconusCheckMySQLConfigFmt, checkName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("circonus_check.table_ops", "active", "true"), - resource.TestCheckResourceAttr("circonus_check.table_ops", "collector.#", "1"), - resource.TestCheckResourceAttr("circonus_check.table_ops", "collector.2388330941.id", "/broker/1"), - resource.TestCheckResourceAttr("circonus_check.table_ops", "mysql.#", "1"), - resource.TestCheckResourceAttr("circonus_check.table_ops", "mysql.3110376931.dsn", "user=mysql host=mydb1.example.org port=3306 password=12345 sslmode=require"), - resource.TestCheckResourceAttr("circonus_check.table_ops", "mysql.3110376931.query", `select 'binlog', total from (select variable_value as total from information_schema.global_status where variable_name='BINLOG_CACHE_USE') total`), - resource.TestCheckResourceAttr("circonus_check.table_ops", "name", checkName), - resource.TestCheckResourceAttr("circonus_check.table_ops", "period", "300s"), - resource.TestCheckResourceAttr("circonus_check.table_ops", "metric.#", "1"), - - resource.TestCheckResourceAttr("circonus_check.table_ops", "metric.885029470.name", "binlog`total"), - resource.TestCheckResourceAttr("circonus_check.table_ops", "metric.885029470.tags.#", "2"), - resource.TestCheckResourceAttr("circonus_check.table_ops", "metric.885029470.tags.2087084518", "author:terraform"), - resource.TestCheckResourceAttr("circonus_check.table_ops", "metric.885029470.tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.table_ops", "metric.885029470.type", "numeric"), - - resource.TestCheckResourceAttr("circonus_check.table_ops", "tags.#", "2"), - resource.TestCheckResourceAttr("circonus_check.table_ops", "tags.2087084518", "author:terraform"), - resource.TestCheckResourceAttr("circonus_check.table_ops", "tags.1401442048", "lifecycle:unittest"), - resource.TestCheckResourceAttr("circonus_check.table_ops", "target", "mydb.example.org"), - resource.TestCheckResourceAttr("circonus_check.table_ops", "type", "mysql"), - ), - }, - }, - }) -} - -const testAccCirconusCheckMySQLConfigFmt = ` -variable "test_tags" { - type = "list" - default = [ "author:terraform", "lifecycle:unittest" ] -} - -resource "circonus_check" "table_ops" { - active = true - name = "%s" - period = "300s" - - collector { - id = "/broker/1" - } - - mysql { - dsn = "user=mysql host=mydb1.example.org port=3306 password=12345 sslmode=require" - query = < 1 { - alertOptionsList = append(alertOptionsList, *alertOptions[i]) - } - } - - return alertOptionsList -} - -func contactGroupEmailToState(cg *api.ContactGroup) []interface{} { - emailContacts := make([]interface{}, 0, len(cg.Contacts.Users)+len(cg.Contacts.External)) - - for _, ext := range cg.Contacts.External { - switch ext.Method { - case circonusMethodEmail: - emailContacts = append(emailContacts, map[string]interface{}{ - contactEmailAddressAttr: ext.Info, - }) - } - } - - for _, user := range cg.Contacts.Users { - switch user.Method { - case circonusMethodEmail: - emailContacts = append(emailContacts, map[string]interface{}{ - contactUserCIDAttr: user.UserCID, - }) - } - } - - return emailContacts -} - -func contactGroupHTTPToState(cg *api.ContactGroup) ([]interface{}, error) { - httpContacts := make([]interface{}, 0, len(cg.Contacts.External)) - - for _, ext := range cg.Contacts.External { - switch ext.Method { - case circonusMethodHTTP: - url := contactHTTPInfo{} - if err := json.Unmarshal([]byte(ext.Info), &url); err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("unable to decode external %s JSON (%q): {{err}}", contactHTTPAttr, ext.Info), err) - } - - httpContacts = append(httpContacts, map[string]interface{}{ - string(contactHTTPAddressAttr): url.Address, - string(contactHTTPFormatAttr): url.Format, - string(contactHTTPMethodAttr): url.Method, - }) - } - } - - return httpContacts, nil -} - -func getContactGroupInput(d *schema.ResourceData) (*api.ContactGroup, error) { - cg := api.NewContactGroup() - if v, ok := d.GetOk(contactAggregationWindowAttr); ok { - aggWindow, _ := time.ParseDuration(v.(string)) - cg.AggregationWindow = uint(aggWindow.Seconds()) - } - - if v, ok := d.GetOk(contactAlertOptionAttr); ok { - alertOptionsRaw := v.(*schema.Set).List() - - ensureEscalationSeverity := func(severity int) { - if cg.Escalations[severity] == nil { - cg.Escalations[severity] = &api.ContactGroupEscalation{} - } - } - - for _, alertOptionRaw := range alertOptionsRaw { - alertOptionsMap := alertOptionRaw.(map[string]interface{}) - - severityIndex := -1 - - if optRaw, ok := alertOptionsMap[contactSeverityAttr]; ok { - severityIndex = optRaw.(int) - 1 - } - - if optRaw, ok := alertOptionsMap[contactEscalateAfterAttr]; ok { - if optRaw.(string) != "" { - d, _ := time.ParseDuration(optRaw.(string)) - if d != 0 { - ensureEscalationSeverity(severityIndex) - cg.Escalations[severityIndex].After = uint(d.Seconds()) - } - } - } - - if optRaw, ok := alertOptionsMap[contactEscalateToAttr]; ok && optRaw.(string) != "" { - ensureEscalationSeverity(severityIndex) - cg.Escalations[severityIndex].ContactGroupCID = optRaw.(string) - } - - if optRaw, ok := alertOptionsMap[contactReminderAttr]; ok { - if optRaw.(string) == "" { - optRaw = "0s" - } - - d, _ := time.ParseDuration(optRaw.(string)) - cg.Reminders[severityIndex] = uint(d.Seconds()) - } - } - } - - if v, ok := d.GetOk(contactNameAttr); ok { - cg.Name = v.(string) - } - - if v, ok := d.GetOk(contactEmailAttr); ok { - emailListRaw := v.(*schema.Set).List() - for _, emailMapRaw := range emailListRaw { - emailMap := emailMapRaw.(map[string]interface{}) - - var requiredAttrFound bool - if v, ok := emailMap[contactEmailAddressAttr]; ok && v.(string) != "" { - requiredAttrFound = true - cg.Contacts.External = append(cg.Contacts.External, api.ContactGroupContactsExternal{ - Info: v.(string), - Method: circonusMethodEmail, - }) - } - - if v, ok := emailMap[contactUserCIDAttr]; ok && v.(string) != "" { - requiredAttrFound = true - cg.Contacts.Users = append(cg.Contacts.Users, api.ContactGroupContactsUser{ - Method: circonusMethodEmail, - UserCID: v.(string), - }) - } - - // Can't mark two attributes that are conflicting as required so we do our - // own validation check here. - if !requiredAttrFound { - return nil, fmt.Errorf("In type %s, either %s or %s must be specified", contactEmailAttr, contactEmailAddressAttr, contactUserCIDAttr) - } - } - } - - if v, ok := d.GetOk(contactHTTPAttr); ok { - httpListRaw := v.(*schema.Set).List() - for _, httpMapRaw := range httpListRaw { - httpMap := httpMapRaw.(map[string]interface{}) - - httpInfo := contactHTTPInfo{} - - if v, ok := httpMap[string(contactHTTPAddressAttr)]; ok { - httpInfo.Address = v.(string) - } - - if v, ok := httpMap[string(contactHTTPFormatAttr)]; ok { - httpInfo.Format = v.(string) - } - - if v, ok := httpMap[string(contactHTTPMethodAttr)]; ok { - httpInfo.Method = v.(string) - } - - js, err := json.Marshal(httpInfo) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("error marshalling %s JSON config string: {{err}}", contactHTTPAttr), err) - } - - cg.Contacts.External = append(cg.Contacts.External, api.ContactGroupContactsExternal{ - Info: string(js), - Method: circonusMethodHTTP, - }) - } - } - - if v, ok := d.GetOk(contactIRCAttr); ok { - ircListRaw := v.(*schema.Set).List() - for _, ircMapRaw := range ircListRaw { - ircMap := ircMapRaw.(map[string]interface{}) - - if v, ok := ircMap[contactUserCIDAttr]; ok && v.(string) != "" { - cg.Contacts.Users = append(cg.Contacts.Users, api.ContactGroupContactsUser{ - Method: circonusMethodIRC, - UserCID: v.(string), - }) - } - } - } - - if v, ok := d.GetOk(contactPagerDutyAttr); ok { - pagerDutyListRaw := v.(*schema.Set).List() - for _, pagerDutyMapRaw := range pagerDutyListRaw { - pagerDutyMap := pagerDutyMapRaw.(map[string]interface{}) - - pagerDutyInfo := contactPagerDutyInfo{} - - if v, ok := pagerDutyMap[contactContactGroupFallbackAttr]; ok && v.(string) != "" { - cid := v.(string) - contactGroupID, err := failoverGroupCIDToID(api.CIDType(&cid)) - if err != nil { - return nil, errwrap.Wrapf("error reading contact group CID: {{err}}", err) - } - pagerDutyInfo.FallbackGroupCID = contactGroupID - } - - if v, ok := pagerDutyMap[string(contactPagerDutyServiceKeyAttr)]; ok { - pagerDutyInfo.ServiceKey = v.(string) - } - - if v, ok := pagerDutyMap[string(contactPagerDutyWebhookURLAttr)]; ok { - pagerDutyInfo.WebhookURL = v.(string) - } - - js, err := json.Marshal(pagerDutyInfo) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("error marshalling %s JSON config string: {{err}}", contactPagerDutyAttr), err) - } - - cg.Contacts.External = append(cg.Contacts.External, api.ContactGroupContactsExternal{ - Info: string(js), - Method: circonusMethodPagerDuty, - }) - } - } - - if v, ok := d.GetOk(contactSlackAttr); ok { - slackListRaw := v.(*schema.Set).List() - for _, slackMapRaw := range slackListRaw { - slackMap := slackMapRaw.(map[string]interface{}) - - slackInfo := contactSlackInfo{} - - var buttons int - if v, ok := slackMap[contactSlackButtonsAttr]; ok { - if v.(bool) { - buttons = 1 - } - slackInfo.Buttons = buttons - } - - if v, ok := slackMap[contactSlackChannelAttr]; ok { - slackInfo.Channel = v.(string) - } - - if v, ok := slackMap[contactContactGroupFallbackAttr]; ok && v.(string) != "" { - cid := v.(string) - contactGroupID, err := failoverGroupCIDToID(api.CIDType(&cid)) - if err != nil { - return nil, errwrap.Wrapf("error reading contact group CID: {{err}}", err) - } - slackInfo.FallbackGroupCID = contactGroupID - } - - if v, ok := slackMap[contactSlackTeamAttr]; ok { - slackInfo.Team = v.(string) - } - - if v, ok := slackMap[contactSlackUsernameAttr]; ok { - slackInfo.Username = v.(string) - } - - js, err := json.Marshal(slackInfo) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("error marshalling %s JSON config string: {{err}}", contactSlackAttr), err) - } - - cg.Contacts.External = append(cg.Contacts.External, api.ContactGroupContactsExternal{ - Info: string(js), - Method: circonusMethodSlack, - }) - } - } - - if v, ok := d.GetOk(contactSMSAttr); ok { - smsListRaw := v.(*schema.Set).List() - for _, smsMapRaw := range smsListRaw { - smsMap := smsMapRaw.(map[string]interface{}) - - var requiredAttrFound bool - if v, ok := smsMap[contactSMSAddressAttr]; ok && v.(string) != "" { - requiredAttrFound = true - cg.Contacts.External = append(cg.Contacts.External, api.ContactGroupContactsExternal{ - Info: v.(string), - Method: circonusMethodSMS, - }) - } - - if v, ok := smsMap[contactUserCIDAttr]; ok && v.(string) != "" { - requiredAttrFound = true - cg.Contacts.Users = append(cg.Contacts.Users, api.ContactGroupContactsUser{ - Method: circonusMethodSMS, - UserCID: v.(string), - }) - } - - // Can't mark two attributes that are conflicting as required so we do our - // own validation check here. - if !requiredAttrFound { - return nil, fmt.Errorf("In type %s, either %s or %s must be specified", contactEmailAttr, contactEmailAddressAttr, contactUserCIDAttr) - } - } - } - - if v, ok := d.GetOk(contactVictorOpsAttr); ok { - victorOpsListRaw := v.(*schema.Set).List() - for _, victorOpsMapRaw := range victorOpsListRaw { - victorOpsMap := victorOpsMapRaw.(map[string]interface{}) - - victorOpsInfo := contactVictorOpsInfo{} - - if v, ok := victorOpsMap[contactContactGroupFallbackAttr]; ok && v.(string) != "" { - cid := v.(string) - contactGroupID, err := failoverGroupCIDToID(api.CIDType(&cid)) - if err != nil { - return nil, errwrap.Wrapf("error reading contact group CID: {{err}}", err) - } - victorOpsInfo.FallbackGroupCID = contactGroupID - } - - if v, ok := victorOpsMap[contactVictorOpsAPIKeyAttr]; ok { - victorOpsInfo.APIKey = v.(string) - } - - if v, ok := victorOpsMap[contactVictorOpsCriticalAttr]; ok { - victorOpsInfo.Critical = v.(int) - } - - if v, ok := victorOpsMap[contactVictorOpsInfoAttr]; ok { - victorOpsInfo.Info = v.(int) - } - - if v, ok := victorOpsMap[contactVictorOpsTeamAttr]; ok { - victorOpsInfo.Team = v.(string) - } - - if v, ok := victorOpsMap[contactVictorOpsWarningAttr]; ok { - victorOpsInfo.Warning = v.(int) - } - - js, err := json.Marshal(victorOpsInfo) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("error marshalling %s JSON config string: {{err}}", contactVictorOpsAttr), err) - } - - cg.Contacts.External = append(cg.Contacts.External, api.ContactGroupContactsExternal{ - Info: string(js), - Method: circonusMethodVictorOps, - }) - } - } - - if v, ok := d.GetOk(contactXMPPAttr); ok { - xmppListRaw := v.(*schema.Set).List() - for _, xmppMapRaw := range xmppListRaw { - xmppMap := xmppMapRaw.(map[string]interface{}) - - if v, ok := xmppMap[contactXMPPAddressAttr]; ok && v.(string) != "" { - cg.Contacts.External = append(cg.Contacts.External, api.ContactGroupContactsExternal{ - Info: v.(string), - Method: circonusMethodXMPP, - }) - } - - if v, ok := xmppMap[contactUserCIDAttr]; ok && v.(string) != "" { - cg.Contacts.Users = append(cg.Contacts.Users, api.ContactGroupContactsUser{ - Method: circonusMethodXMPP, - UserCID: v.(string), - }) - } - } - } - - if v, ok := d.GetOk(contactLongMessageAttr); ok { - msg := v.(string) - cg.AlertFormats.LongMessage = &msg - } - - if v, ok := d.GetOk(contactLongSubjectAttr); ok { - msg := v.(string) - cg.AlertFormats.LongSubject = &msg - } - - if v, ok := d.GetOk(contactLongSummaryAttr); ok { - msg := v.(string) - cg.AlertFormats.LongSummary = &msg - } - - if v, ok := d.GetOk(contactShortMessageAttr); ok { - msg := v.(string) - cg.AlertFormats.ShortMessage = &msg - } - - if v, ok := d.GetOk(contactShortSummaryAttr); ok { - msg := v.(string) - cg.AlertFormats.ShortSummary = &msg - } - - if v, ok := d.GetOk(contactShortMessageAttr); ok { - msg := v.(string) - cg.AlertFormats.ShortMessage = &msg - } - - if v, found := d.GetOk(checkTagsAttr); found { - cg.Tags = derefStringList(flattenSet(v.(*schema.Set))) - } - - if err := validateContactGroup(cg); err != nil { - return nil, err - } - - return cg, nil -} - -func contactGroupIRCToState(cg *api.ContactGroup) []interface{} { - ircContacts := make([]interface{}, 0, len(cg.Contacts.Users)) - - for _, user := range cg.Contacts.Users { - switch user.Method { - case circonusMethodIRC: - ircContacts = append(ircContacts, map[string]interface{}{ - contactUserCIDAttr: user.UserCID, - }) - } - } - - return ircContacts -} - -func contactGroupPagerDutyToState(cg *api.ContactGroup) ([]interface{}, error) { - pdContacts := make([]interface{}, 0, len(cg.Contacts.External)) - - for _, ext := range cg.Contacts.External { - switch ext.Method { - case circonusMethodPagerDuty: - pdInfo := contactPagerDutyInfo{} - if err := json.Unmarshal([]byte(ext.Info), &pdInfo); err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("unable to decode external %s JSON (%q): {{err}}", contactPagerDutyAttr, ext.Info), err) - } - - pdContacts = append(pdContacts, map[string]interface{}{ - string(contactContactGroupFallbackAttr): failoverGroupIDToCID(pdInfo.FallbackGroupCID), - string(contactPagerDutyServiceKeyAttr): pdInfo.ServiceKey, - string(contactPagerDutyWebhookURLAttr): pdInfo.WebhookURL, - }) - } - } - - return pdContacts, nil -} - -func contactGroupSlackToState(cg *api.ContactGroup) ([]interface{}, error) { - slackContacts := make([]interface{}, 0, len(cg.Contacts.External)) - - for _, ext := range cg.Contacts.External { - switch ext.Method { - case circonusMethodSlack: - slackInfo := contactSlackInfo{} - if err := json.Unmarshal([]byte(ext.Info), &slackInfo); err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("unable to decode external %s JSON (%q): {{err}}", contactSlackAttr, ext.Info), err) - } - - slackContacts = append(slackContacts, map[string]interface{}{ - contactContactGroupFallbackAttr: failoverGroupIDToCID(slackInfo.FallbackGroupCID), - contactSlackButtonsAttr: int(slackInfo.Buttons) == int(1), - contactSlackChannelAttr: slackInfo.Channel, - contactSlackTeamAttr: slackInfo.Team, - contactSlackUsernameAttr: slackInfo.Username, - }) - } - } - - return slackContacts, nil -} - -func contactGroupSMSToState(cg *api.ContactGroup) ([]interface{}, error) { - smsContacts := make([]interface{}, 0, len(cg.Contacts.Users)+len(cg.Contacts.External)) - - for _, ext := range cg.Contacts.External { - switch ext.Method { - case circonusMethodSMS: - smsContacts = append(smsContacts, map[string]interface{}{ - contactSMSAddressAttr: ext.Info, - }) - } - } - - for _, user := range cg.Contacts.Users { - switch user.Method { - case circonusMethodSMS: - smsContacts = append(smsContacts, map[string]interface{}{ - contactUserCIDAttr: user.UserCID, - }) - } - } - - return smsContacts, nil -} - -func contactGroupVictorOpsToState(cg *api.ContactGroup) ([]interface{}, error) { - victorOpsContacts := make([]interface{}, 0, len(cg.Contacts.External)) - - for _, ext := range cg.Contacts.External { - switch ext.Method { - case circonusMethodVictorOps: - victorOpsInfo := contactVictorOpsInfo{} - if err := json.Unmarshal([]byte(ext.Info), &victorOpsInfo); err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("unable to decode external %s JSON (%q): {{err}}", contactVictorOpsInfoAttr, ext.Info), err) - } - - victorOpsContacts = append(victorOpsContacts, map[string]interface{}{ - contactContactGroupFallbackAttr: failoverGroupIDToCID(victorOpsInfo.FallbackGroupCID), - contactVictorOpsAPIKeyAttr: victorOpsInfo.APIKey, - contactVictorOpsCriticalAttr: victorOpsInfo.Critical, - contactVictorOpsInfoAttr: victorOpsInfo.Info, - contactVictorOpsTeamAttr: victorOpsInfo.Team, - contactVictorOpsWarningAttr: victorOpsInfo.Warning, - }) - } - } - - return victorOpsContacts, nil -} - -func contactGroupXMPPToState(cg *api.ContactGroup) ([]interface{}, error) { - xmppContacts := make([]interface{}, 0, len(cg.Contacts.Users)+len(cg.Contacts.External)) - - for _, ext := range cg.Contacts.External { - switch ext.Method { - case circonusMethodXMPP: - xmppContacts = append(xmppContacts, map[string]interface{}{ - contactXMPPAddressAttr: ext.Info, - }) - } - } - - for _, user := range cg.Contacts.Users { - switch user.Method { - case circonusMethodXMPP: - xmppContacts = append(xmppContacts, map[string]interface{}{ - contactUserCIDAttr: user.UserCID, - }) - } - } - - return xmppContacts, nil -} - -// contactGroupAlertOptionsChecksum creates a stable hash of the normalized values -func contactGroupAlertOptionsChecksum(v interface{}) int { - m := v.(map[string]interface{}) - b := &bytes.Buffer{} - b.Grow(defaultHashBufSize) - fmt.Fprintf(b, "%x", m[contactSeverityAttr].(int)) - fmt.Fprint(b, normalizeTimeDurationStringToSeconds(m[contactEscalateAfterAttr])) - fmt.Fprint(b, m[contactEscalateToAttr]) - fmt.Fprint(b, normalizeTimeDurationStringToSeconds(m[contactReminderAttr])) - return hashcode.String(b.String()) -} diff --git a/builtin/providers/circonus/resource_circonus_contact_test.go b/builtin/providers/circonus/resource_circonus_contact_test.go deleted file mode 100644 index 64186f27d..000000000 --- a/builtin/providers/circonus/resource_circonus_contact_test.go +++ /dev/null @@ -1,241 +0,0 @@ -package circonus - -import ( - "fmt" - "strings" - "testing" - - "github.com/circonus-labs/circonus-gometrics/api" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccCirconusContactGroup_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDestroyCirconusContactGroup, - Steps: []resource.TestStep{ - { - Config: testAccCirconusContactGroupConfig, - Check: resource.ComposeTestCheckFunc( - // testAccContactGroupExists("circonus_contact_group.staging-sev3", "foo"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "name", "ops-staging-sev3"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "email.#", "3"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "email.1119127802.address", ""), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "email.1119127802.user", "/user/5469"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "email.1456570992.address", ""), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "email.1456570992.user", "/user/6331"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "email.343263208.address", "user@example.com"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "email.343263208.user", ""), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "http.#", "1"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "http.1287846151.address", "https://www.example.org/post/endpoint"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "http.1287846151.format", "json"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "http.1287846151.method", "POST"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "irc.#", "0"), - // resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "irc.918937268.user", "/user/6331"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "slack.#", "1"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "slack.274933206.channel", "#ops-staging"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "slack.274933206.team", "T123UT98F"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "slack.274933206.username", "Circonus"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "slack.274933206.buttons", "true"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "sms.#", "1"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "sms.1119127802.user", "/user/5469"), - - // xmpp.# will be 0 for user faux user accounts that don't have an - // XMPP address setup. - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "xmpp.#", "0"), - // resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "xmpp.1119127802.user", "/user/5469"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "victorops.#", "1"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "victorops.2029434450.api_key", "123"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "victorops.2029434450.critical", "2"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "victorops.2029434450.info", "5"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "victorops.2029434450.team", "bender"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "victorops.2029434450.warning", "3"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "aggregation_window", "60s"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "alert_option.#", "5"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "alert_option.689365425.severity", "1"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "alert_option.689365425.reminder", "60s"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "alert_option.689365425.escalate_after", "3600s"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "alert_option.689365425.escalate_to", "/contact_group/2913"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "alert_option.551050940.severity", "2"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "alert_option.551050940.reminder", "120s"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "alert_option.551050940.escalate_after", "7200s"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "alert_option.551050940.escalate_to", "/contact_group/2913"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "alert_option.1292974544.severity", "3"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "alert_option.1292974544.reminder", "180s"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "alert_option.1292974544.escalate_after", "10800s"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "alert_option.1292974544.escalate_to", "/contact_group/2913"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "alert_option.1183354841.severity", "4"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "alert_option.1183354841.reminder", "240s"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "alert_option.1183354841.escalate_after", "14400s"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "alert_option.1183354841.escalate_to", "/contact_group/2913"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "alert_option.2942620849.severity", "5"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "alert_option.2942620849.reminder", "300s"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "alert_option.2942620849.escalate_after", "18000s"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "alert_option.2942620849.escalate_to", "/contact_group/2913"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "long_message", "a long message"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "long_subject", "long subject"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "long_summary", "long summary"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "short_message", "short message"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "short_summary", "short summary"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "tags.#", "2"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "tags.2087084518", "author:terraform"), - resource.TestCheckResourceAttr("circonus_contact_group.staging-sev3", "tags.393923453", "other:foo"), - ), - }, - }, - }) -} - -func testAccCheckDestroyCirconusContactGroup(s *terraform.State) error { - c := testAccProvider.Meta().(*providerContext) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "circonus_contact_group" { - continue - } - - cid := rs.Primary.ID - exists, err := checkContactGroupExists(c, api.CIDType(&cid)) - switch { - case !exists: - // noop - case exists: - return fmt.Errorf("contact group still exists after destroy") - case err != nil: - return fmt.Errorf("Error checking contact group %s", err) - } - } - - return nil -} - -func checkContactGroupExists(c *providerContext, contactGroupCID api.CIDType) (bool, error) { - cb, err := c.client.FetchContactGroup(contactGroupCID) - if err != nil { - if strings.Contains(err.Error(), defaultCirconus404ErrorString) { - return false, nil - } - - return false, err - } - - if api.CIDType(&cb.CID) == contactGroupCID { - return true, nil - } - - return false, nil -} - -const testAccCirconusContactGroupConfig = ` -resource "circonus_contact_group" "staging-sev3" { - name = "ops-staging-sev3" - - email { - user = "/user/5469" - } - - email { - address = "user@example.com" - } - - email { - user = "/user/6331" - } - - http { - address = "https://www.example.org/post/endpoint" - format = "json" - method = "POST" - } - -/* - // Account needs to be setup with IRC before this can work. - irc { - user = "/user/6331" - } -*/ - -/* - pager_duty { - // NOTE(sean@): needs to be filled in - } -*/ - - slack { - channel = "#ops-staging" - team = "T123UT98F" - username = "Circonus" - buttons = true - } - - sms { - user = "/user/5469" - } - - victorops { - api_key = "123" - critical = 2 - info = 5 - team = "bender" - warning = 3 - } - - // Faux user accounts that don't have an XMPP address setup will not return a - // valid response in the future. - // - // xmpp { - // user = "/user/5469" - // } - - aggregation_window = "1m" - - alert_option { - severity = 1 - reminder = "60s" - escalate_after = "3600s" - escalate_to = "/contact_group/2913" - } - - alert_option { - severity = 2 - reminder = "2m" - escalate_after = "2h" - escalate_to = "/contact_group/2913" - } - - alert_option { - severity = 3 - reminder = "3m" - escalate_after = "3h" - escalate_to = "/contact_group/2913" - } - - alert_option { - severity = 4 - reminder = "4m" - escalate_after = "4h" - escalate_to = "/contact_group/2913" - } - - alert_option { - severity = 5 - reminder = "5m" - escalate_after = "5h" - escalate_to = "/contact_group/2913" - } - - // alert_formats: omit to use defaults - long_message = "a long message" - long_subject = "long subject" - long_summary = "long summary" - short_message = "short message" - short_summary = "short summary" - - tags = [ - "author:terraform", - "other:foo", - ] -} -` diff --git a/builtin/providers/circonus/resource_circonus_graph.go b/builtin/providers/circonus/resource_circonus_graph.go deleted file mode 100644 index 836e42263..000000000 --- a/builtin/providers/circonus/resource_circonus_graph.go +++ /dev/null @@ -1,930 +0,0 @@ -package circonus - -import ( - "fmt" - "regexp" - "strconv" - "strings" - - "github.com/circonus-labs/circonus-gometrics/api" - "github.com/circonus-labs/circonus-gometrics/api/config" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -const ( - // circonus_graph.* resource attribute names - graphDescriptionAttr = "description" - graphLeftAttr = "left" - graphLineStyleAttr = "line_style" - graphMetricClusterAttr = "metric_cluster" - graphNameAttr = "name" - graphNotesAttr = "notes" - graphRightAttr = "right" - graphMetricAttr = "metric" - graphStyleAttr = "graph_style" - graphTagsAttr = "tags" - - // circonus_graph.metric.* resource attribute names - graphMetricActiveAttr = "active" - graphMetricAlphaAttr = "alpha" - graphMetricAxisAttr = "axis" - graphMetricCAQLAttr = "caql" - graphMetricCheckAttr = "check" - graphMetricColorAttr = "color" - graphMetricFormulaAttr = "formula" - graphMetricFormulaLegendAttr = "legend_formula" - graphMetricFunctionAttr = "function" - graphMetricHumanNameAttr = "name" - graphMetricMetricTypeAttr = "metric_type" - graphMetricNameAttr = "metric_name" - graphMetricStackAttr = "stack" - - // circonus_graph.metric_cluster.* resource attribute names - graphMetricClusterActiveAttr = "active" - graphMetricClusterAggregateAttr = "aggregate" - graphMetricClusterAxisAttr = "axis" - graphMetricClusterColorAttr = "color" - graphMetricClusterQueryAttr = "query" - graphMetricClusterHumanNameAttr = "name" - - // circonus_graph.{left,right}.* resource attribute names - graphAxisLogarithmicAttr = "logarithmic" - graphAxisMaxAttr = "max" - graphAxisMinAttr = "min" -) - -const ( - apiGraphStyleLine = "line" -) - -var graphDescriptions = attrDescrs{ - // circonus_graph.* resource attribute names - graphDescriptionAttr: "", - graphLeftAttr: "", - graphLineStyleAttr: "How the line should change between point. A string containing either 'stepped', 'interpolated' or null.", - graphNameAttr: "", - graphNotesAttr: "", - graphRightAttr: "", - graphMetricAttr: "", - graphMetricClusterAttr: "", - graphStyleAttr: "", - graphTagsAttr: "", -} - -var graphMetricDescriptions = attrDescrs{ - // circonus_graph.metric.* resource attribute names - graphMetricActiveAttr: "", - graphMetricAlphaAttr: "", - graphMetricAxisAttr: "", - graphMetricCAQLAttr: "", - graphMetricCheckAttr: "", - graphMetricColorAttr: "", - graphMetricFormulaAttr: "", - graphMetricFormulaLegendAttr: "", - graphMetricFunctionAttr: "", - graphMetricMetricTypeAttr: "", - graphMetricHumanNameAttr: "", - graphMetricNameAttr: "", - graphMetricStackAttr: "", -} - -var graphMetricClusterDescriptions = attrDescrs{ - // circonus_graph.metric_cluster.* resource attribute names - graphMetricClusterActiveAttr: "", - graphMetricClusterAggregateAttr: "", - graphMetricClusterAxisAttr: "", - graphMetricClusterColorAttr: "", - graphMetricClusterQueryAttr: "", - graphMetricClusterHumanNameAttr: "", -} - -// NOTE(sean@): There is no way to set a description on map inputs, but if that -// does happen: -// -// var graphMetricAxisOptionDescriptions = attrDescrs{ -// // circonus_graph.if.value.over.* resource attribute names -// graphAxisLogarithmicAttr: "", -// graphAxisMaxAttr: "", -// graphAxisMinAttr: "", -// } - -func resourceGraph() *schema.Resource { - makeConflictsWith := func(in ...schemaAttr) []string { - out := make([]string, 0, len(in)) - for _, attr := range in { - out = append(out, string(graphMetricAttr)+"."+string(attr)) - } - return out - } - - return &schema.Resource{ - Create: graphCreate, - Read: graphRead, - Update: graphUpdate, - Delete: graphDelete, - Exists: graphExists, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: convertToHelperSchema(graphDescriptions, map[schemaAttr]*schema.Schema{ - graphDescriptionAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - StateFunc: suppressWhitespace, - }, - graphLeftAttr: &schema.Schema{ - Type: schema.TypeMap, - Elem: schema.TypeString, - Optional: true, - ValidateFunc: validateGraphAxisOptions, - }, - graphLineStyleAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: defaultGraphLineStyle, - ValidateFunc: validateStringIn(graphLineStyleAttr, validGraphLineStyles), - }, - graphNameAttr: &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateRegexp(graphNameAttr, `.+`), - }, - graphNotesAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - graphRightAttr: &schema.Schema{ - Type: schema.TypeMap, - Elem: schema.TypeString, - Optional: true, - ValidateFunc: validateGraphAxisOptions, - }, - graphMetricAttr: &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MinItems: 1, - Elem: &schema.Resource{ - Schema: convertToHelperSchema(graphMetricDescriptions, map[schemaAttr]*schema.Schema{ - graphMetricActiveAttr: &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - graphMetricAlphaAttr: &schema.Schema{ - Type: schema.TypeFloat, - Optional: true, - ValidateFunc: validateFuncs( - validateFloatMin(graphMetricAlphaAttr, 0.0), - validateFloatMax(graphMetricAlphaAttr, 1.0), - ), - }, - graphMetricAxisAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "left", - ValidateFunc: validateStringIn(graphMetricAxisAttr, validAxisAttrs), - }, - graphMetricCAQLAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(graphMetricCAQLAttr, `.+`), - ConflictsWith: makeConflictsWith(graphMetricCheckAttr, graphMetricNameAttr), - }, - graphMetricCheckAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(graphMetricCheckAttr, config.CheckCIDRegex), - ConflictsWith: makeConflictsWith(graphMetricCAQLAttr), - }, - graphMetricColorAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(graphMetricColorAttr, `^#[0-9a-fA-F]{6}$`), - }, - graphMetricFormulaAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(graphMetricFormulaAttr, `^.+$`), - }, - graphMetricFormulaLegendAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(graphMetricFormulaLegendAttr, `^.+$`), - }, - graphMetricFunctionAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: defaultGraphFunction, - ValidateFunc: validateStringIn(graphMetricFunctionAttr, validGraphFunctionValues), - }, - graphMetricMetricTypeAttr: &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateStringIn(graphMetricMetricTypeAttr, validMetricTypes), - }, - graphMetricHumanNameAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(graphMetricHumanNameAttr, `.+`), - }, - graphMetricNameAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(graphMetricNameAttr, `^[\S]+$`), - }, - graphMetricStackAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(graphMetricStackAttr, `^[\d]*$`), - }, - }), - }, - }, - graphMetricClusterAttr: &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MinItems: 1, - Elem: &schema.Resource{ - Schema: convertToHelperSchema(graphMetricClusterDescriptions, map[schemaAttr]*schema.Schema{ - graphMetricClusterActiveAttr: &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - graphMetricClusterAggregateAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "none", - ValidateFunc: validateStringIn(graphMetricClusterAggregateAttr, validAggregateFuncs), - }, - graphMetricClusterAxisAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "left", - ValidateFunc: validateStringIn(graphMetricClusterAttr, validAxisAttrs), - }, - graphMetricClusterColorAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(graphMetricClusterColorAttr, `^#[0-9a-fA-F]{6}$`), - }, - graphMetricClusterQueryAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(graphMetricClusterQueryAttr, config.MetricClusterCIDRegex), - }, - graphMetricClusterHumanNameAttr: &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateRegexp(graphMetricHumanNameAttr, `.+`), - }, - }), - }, - }, - graphStyleAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: defaultGraphStyle, - ValidateFunc: validateStringIn(graphStyleAttr, validGraphStyles), - }, - graphTagsAttr: tagMakeConfigSchema(graphTagsAttr), - }), - } -} - -func graphCreate(d *schema.ResourceData, meta interface{}) error { - ctxt := meta.(*providerContext) - g := newGraph() - if err := g.ParseConfig(d); err != nil { - return errwrap.Wrapf("error parsing graph schema during create: {{err}}", err) - } - - if err := g.Create(ctxt); err != nil { - return errwrap.Wrapf("error creating graph: {{err}}", err) - } - - d.SetId(g.CID) - - return graphRead(d, meta) -} - -func graphExists(d *schema.ResourceData, meta interface{}) (bool, error) { - ctxt := meta.(*providerContext) - - cid := d.Id() - g, err := ctxt.client.FetchGraph(api.CIDType(&cid)) - if err != nil { - if strings.Contains(err.Error(), defaultCirconus404ErrorString) { - return false, nil - } - - return false, err - } - - if g.CID == "" { - return false, nil - } - - return true, nil -} - -// graphRead pulls data out of the Graph object and stores it into the -// appropriate place in the statefile. -func graphRead(d *schema.ResourceData, meta interface{}) error { - ctxt := meta.(*providerContext) - - cid := d.Id() - g, err := loadGraph(ctxt, api.CIDType(&cid)) - if err != nil { - return err - } - - d.SetId(g.CID) - - metrics := make([]interface{}, 0, len(g.Datapoints)) - for _, datapoint := range g.Datapoints { - dataPointAttrs := make(map[string]interface{}, 13) // 13 == len(members in api.GraphDatapoint) - - dataPointAttrs[string(graphMetricActiveAttr)] = !datapoint.Hidden - - if datapoint.Alpha != nil && *datapoint.Alpha != 0 { - dataPointAttrs[string(graphMetricAlphaAttr)] = *datapoint.Alpha - } - - switch datapoint.Axis { - case "l", "": - dataPointAttrs[string(graphMetricAxisAttr)] = "left" - case "r": - dataPointAttrs[string(graphMetricAxisAttr)] = "right" - default: - return fmt.Errorf("PROVIDER BUG: Unsupported axis type %q", datapoint.Axis) - } - - if datapoint.CAQL != nil { - dataPointAttrs[string(graphMetricCAQLAttr)] = *datapoint.CAQL - } - - if datapoint.CheckID != 0 { - dataPointAttrs[string(graphMetricCheckAttr)] = fmt.Sprintf("%s/%d", config.CheckPrefix, datapoint.CheckID) - } - - if datapoint.Color != nil { - dataPointAttrs[string(graphMetricColorAttr)] = *datapoint.Color - } - - if datapoint.DataFormula != nil { - dataPointAttrs[string(graphMetricFormulaAttr)] = *datapoint.DataFormula - } - - switch datapoint.Derive.(type) { - case bool: - case string: - dataPointAttrs[string(graphMetricFunctionAttr)] = datapoint.Derive.(string) - default: - return fmt.Errorf("PROVIDER BUG: Unsupported type for derive: %T", datapoint.Derive) - } - - if datapoint.LegendFormula != nil { - dataPointAttrs[string(graphMetricFormulaLegendAttr)] = *datapoint.LegendFormula - } - - if datapoint.MetricName != "" { - dataPointAttrs[string(graphMetricNameAttr)] = datapoint.MetricName - } - - if datapoint.MetricType != "" { - dataPointAttrs[string(graphMetricMetricTypeAttr)] = datapoint.MetricType - } - - if datapoint.Name != "" { - dataPointAttrs[string(graphMetricHumanNameAttr)] = datapoint.Name - } - - if datapoint.Stack != nil { - dataPointAttrs[string(graphMetricStackAttr)] = fmt.Sprintf("%d", *datapoint.Stack) - } - - metrics = append(metrics, dataPointAttrs) - } - - metricClusters := make([]interface{}, 0, len(g.MetricClusters)) - for _, metricCluster := range g.MetricClusters { - metricClusterAttrs := make(map[string]interface{}, 8) // 8 == len(num struct attrs in api.GraphMetricCluster) - - metricClusterAttrs[string(graphMetricClusterActiveAttr)] = !metricCluster.Hidden - - if metricCluster.AggregateFunc != "" { - metricClusterAttrs[string(graphMetricClusterAggregateAttr)] = metricCluster.AggregateFunc - } - - switch metricCluster.Axis { - case "l", "": - metricClusterAttrs[string(graphMetricClusterAxisAttr)] = "left" - case "r": - metricClusterAttrs[string(graphMetricClusterAxisAttr)] = "right" - default: - return fmt.Errorf("PROVIDER BUG: Unsupported axis type %q", metricCluster.Axis) - } - - if metricCluster.Color != nil { - metricClusterAttrs[string(graphMetricClusterColorAttr)] = *metricCluster.Color - } - - if metricCluster.DataFormula != nil { - metricClusterAttrs[string(graphMetricFormulaAttr)] = *metricCluster.DataFormula - } - - if metricCluster.LegendFormula != nil { - metricClusterAttrs[string(graphMetricFormulaLegendAttr)] = *metricCluster.LegendFormula - } - - if metricCluster.MetricCluster != "" { - metricClusterAttrs[string(graphMetricClusterQueryAttr)] = metricCluster.MetricCluster - } - - if metricCluster.Name != "" { - metricClusterAttrs[string(graphMetricHumanNameAttr)] = metricCluster.Name - } - - if metricCluster.Stack != nil { - metricClusterAttrs[string(graphMetricStackAttr)] = fmt.Sprintf("%d", *metricCluster.Stack) - } - - metricClusters = append(metricClusters, metricClusterAttrs) - } - - leftAxisMap := make(map[string]interface{}, 3) - if g.LogLeftY != nil { - leftAxisMap[string(graphAxisLogarithmicAttr)] = fmt.Sprintf("%d", *g.LogLeftY) - } - - if g.MaxLeftY != nil { - leftAxisMap[string(graphAxisMaxAttr)] = strconv.FormatFloat(*g.MaxLeftY, 'f', -1, 64) - } - - if g.MinLeftY != nil { - leftAxisMap[string(graphAxisMinAttr)] = strconv.FormatFloat(*g.MinLeftY, 'f', -1, 64) - } - - rightAxisMap := make(map[string]interface{}, 3) - if g.LogRightY != nil { - rightAxisMap[string(graphAxisLogarithmicAttr)] = fmt.Sprintf("%d", *g.LogRightY) - } - - if g.MaxRightY != nil { - rightAxisMap[string(graphAxisMaxAttr)] = strconv.FormatFloat(*g.MaxRightY, 'f', -1, 64) - } - - if g.MinRightY != nil { - rightAxisMap[string(graphAxisMinAttr)] = strconv.FormatFloat(*g.MinRightY, 'f', -1, 64) - } - - d.Set(graphDescriptionAttr, g.Description) - - if err := d.Set(graphLeftAttr, leftAxisMap); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store graph %q attribute: {{err}}", graphLeftAttr), err) - } - - d.Set(graphLineStyleAttr, g.LineStyle) - d.Set(graphNameAttr, g.Title) - d.Set(graphNotesAttr, indirect(g.Notes)) - - if err := d.Set(graphRightAttr, rightAxisMap); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store graph %q attribute: {{err}}", graphRightAttr), err) - } - - if err := d.Set(graphMetricAttr, metrics); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store graph %q attribute: {{err}}", graphMetricAttr), err) - } - - if err := d.Set(graphMetricClusterAttr, metricClusters); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store graph %q attribute: {{err}}", graphMetricClusterAttr), err) - } - - d.Set(graphStyleAttr, g.Style) - - if err := d.Set(graphTagsAttr, tagsToState(apiToTags(g.Tags))); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store graph %q attribute: {{err}}", graphTagsAttr), err) - } - - return nil -} - -func graphUpdate(d *schema.ResourceData, meta interface{}) error { - ctxt := meta.(*providerContext) - g := newGraph() - if err := g.ParseConfig(d); err != nil { - return err - } - - g.CID = d.Id() - if err := g.Update(ctxt); err != nil { - return errwrap.Wrapf(fmt.Sprintf("unable to update graph %q: {{err}}", d.Id()), err) - } - - return graphRead(d, meta) -} - -func graphDelete(d *schema.ResourceData, meta interface{}) error { - ctxt := meta.(*providerContext) - - cid := d.Id() - if _, err := ctxt.client.DeleteGraphByCID(api.CIDType(&cid)); err != nil { - return errwrap.Wrapf(fmt.Sprintf("unable to delete graph %q: {{err}}", d.Id()), err) - } - - d.SetId("") - - return nil -} - -type circonusGraph struct { - api.Graph -} - -func newGraph() circonusGraph { - g := circonusGraph{ - Graph: *api.NewGraph(), - } - - return g -} - -func loadGraph(ctxt *providerContext, cid api.CIDType) (circonusGraph, error) { - var g circonusGraph - ng, err := ctxt.client.FetchGraph(cid) - if err != nil { - return circonusGraph{}, err - } - g.Graph = *ng - - return g, nil -} - -// ParseConfig reads Terraform config data and stores the information into a -// Circonus Graph object. ParseConfig and graphRead() must be kept in sync. -func (g *circonusGraph) ParseConfig(d *schema.ResourceData) error { - g.Datapoints = make([]api.GraphDatapoint, 0, defaultGraphDatapoints) - - if v, found := d.GetOk(graphLeftAttr); found { - listRaw := v.(map[string]interface{}) - leftAxisMap := make(map[string]interface{}, len(listRaw)) - for k, v := range listRaw { - leftAxisMap[k] = v - } - - if v, ok := leftAxisMap[string(graphAxisLogarithmicAttr)]; ok { - i64, _ := strconv.ParseInt(v.(string), 10, 64) - i := int(i64) - g.LogLeftY = &i - } - - if v, ok := leftAxisMap[string(graphAxisMaxAttr)]; ok && v.(string) != "" { - f, _ := strconv.ParseFloat(v.(string), 64) - g.MaxLeftY = &f - } - - if v, ok := leftAxisMap[string(graphAxisMinAttr)]; ok && v.(string) != "" { - f, _ := strconv.ParseFloat(v.(string), 64) - g.MinLeftY = &f - } - } - - if v, found := d.GetOk(graphRightAttr); found { - listRaw := v.(map[string]interface{}) - rightAxisMap := make(map[string]interface{}, len(listRaw)) - for k, v := range listRaw { - rightAxisMap[k] = v - } - - if v, ok := rightAxisMap[string(graphAxisLogarithmicAttr)]; ok { - i64, _ := strconv.ParseInt(v.(string), 10, 64) - i := int(i64) - g.LogRightY = &i - } - - if v, ok := rightAxisMap[string(graphAxisMaxAttr)]; ok && v.(string) != "" { - f, _ := strconv.ParseFloat(v.(string), 64) - g.MaxRightY = &f - } - - if v, ok := rightAxisMap[string(graphAxisMinAttr)]; ok && v.(string) != "" { - f, _ := strconv.ParseFloat(v.(string), 64) - g.MinRightY = &f - } - } - - if v, found := d.GetOk(graphDescriptionAttr); found { - g.Description = v.(string) - } - - if v, found := d.GetOk(graphLineStyleAttr); found { - switch v.(type) { - case string: - s := v.(string) - g.LineStyle = &s - case *string: - g.LineStyle = v.(*string) - default: - return fmt.Errorf("PROVIDER BUG: unsupported type for %q: %T", graphLineStyleAttr, v) - } - } - - if v, found := d.GetOk(graphNameAttr); found { - g.Title = v.(string) - } - - if v, found := d.GetOk(graphNotesAttr); found { - s := v.(string) - g.Notes = &s - } - - if listRaw, found := d.GetOk(graphMetricAttr); found { - metricList := listRaw.([]interface{}) - for _, metricListElem := range metricList { - metricAttrs := newInterfaceMap(metricListElem.(map[string]interface{})) - datapoint := api.GraphDatapoint{} - - if v, found := metricAttrs[graphMetricActiveAttr]; found { - datapoint.Hidden = !(v.(bool)) - } - - if v, found := metricAttrs[graphMetricAlphaAttr]; found { - f := v.(float64) - if f != 0 { - datapoint.Alpha = &f - } - } - - if v, found := metricAttrs[graphMetricAxisAttr]; found { - switch v.(string) { - case "left", "": - datapoint.Axis = "l" - case "right": - datapoint.Axis = "r" - default: - return fmt.Errorf("PROVIDER BUG: Unsupported axis attribute %q: %q", graphMetricAxisAttr, v.(string)) - } - } - - if v, found := metricAttrs[graphMetricCheckAttr]; found { - re := regexp.MustCompile(config.CheckCIDRegex) - matches := re.FindStringSubmatch(v.(string)) - if len(matches) == 3 { - checkID, _ := strconv.ParseUint(matches[2], 10, 64) - datapoint.CheckID = uint(checkID) - } - } - - if v, found := metricAttrs[graphMetricColorAttr]; found { - s := v.(string) - datapoint.Color = &s - } - - if v, found := metricAttrs[graphMetricFormulaAttr]; found { - switch v.(type) { - case string: - s := v.(string) - datapoint.DataFormula = &s - case *string: - datapoint.DataFormula = v.(*string) - default: - return fmt.Errorf("PROVIDER BUG: unsupported type for %q: %T", graphMetricAttr, v) - } - } - - if v, found := metricAttrs[graphMetricFunctionAttr]; found { - s := v.(string) - if s != "" { - datapoint.Derive = s - } else { - datapoint.Derive = false - } - } else { - datapoint.Derive = false - } - - if v, found := metricAttrs[graphMetricFormulaLegendAttr]; found { - switch u := v.(type) { - case string: - datapoint.LegendFormula = &u - case *string: - datapoint.LegendFormula = u - default: - return fmt.Errorf("PROVIDER BUG: unsupported type for %q: %T", graphMetricAttr, v) - } - } - - if v, found := metricAttrs[graphMetricNameAttr]; found { - s := v.(string) - if s != "" { - datapoint.MetricName = s - } - } - - if v, found := metricAttrs[graphMetricMetricTypeAttr]; found { - s := v.(string) - if s != "" { - datapoint.MetricType = s - } - } - - if v, found := metricAttrs[graphMetricHumanNameAttr]; found { - s := v.(string) - if s != "" { - datapoint.Name = s - } - } - - if v, found := metricAttrs[graphMetricStackAttr]; found { - var stackStr string - switch u := v.(type) { - case string: - stackStr = u - case *string: - if u != nil { - stackStr = *u - } - default: - return fmt.Errorf("PROVIDER BUG: unsupported type for %q: %T", graphMetricStackAttr, v) - } - - if stackStr != "" { - u64, _ := strconv.ParseUint(stackStr, 10, 64) - u := uint(u64) - datapoint.Stack = &u - } - } - - g.Datapoints = append(g.Datapoints, datapoint) - } - } - - if listRaw, found := d.GetOk(graphMetricClusterAttr); found { - metricClusterList := listRaw.([]interface{}) - - for _, metricClusterListRaw := range metricClusterList { - metricClusterAttrs := newInterfaceMap(metricClusterListRaw.(map[string]interface{})) - - metricCluster := api.GraphMetricCluster{} - - if v, found := metricClusterAttrs[graphMetricClusterActiveAttr]; found { - metricCluster.Hidden = !(v.(bool)) - } - - if v, found := metricClusterAttrs[graphMetricClusterAggregateAttr]; found { - metricCluster.AggregateFunc = v.(string) - } - - if v, found := metricClusterAttrs[graphMetricClusterAxisAttr]; found { - switch v.(string) { - case "left", "": - metricCluster.Axis = "l" - case "right": - metricCluster.Axis = "r" - default: - return fmt.Errorf("PROVIDER BUG: Unsupported axis attribute %q: %q", graphMetricClusterAxisAttr, v.(string)) - } - } - - if v, found := metricClusterAttrs[graphMetricClusterColorAttr]; found { - s := v.(string) - if s != "" { - metricCluster.Color = &s - } - } - - if v, found := metricClusterAttrs[graphMetricFormulaAttr]; found { - switch v.(type) { - case string: - s := v.(string) - metricCluster.DataFormula = &s - case *string: - metricCluster.DataFormula = v.(*string) - default: - return fmt.Errorf("PROVIDER BUG: unsupported type for %q: %T", graphMetricFormulaAttr, v) - } - } - - if v, found := metricClusterAttrs[graphMetricFormulaLegendAttr]; found { - switch v.(type) { - case string: - s := v.(string) - metricCluster.LegendFormula = &s - case *string: - metricCluster.LegendFormula = v.(*string) - default: - return fmt.Errorf("PROVIDER BUG: unsupported type for %q: %T", graphMetricFormulaLegendAttr, v) - } - } - - if v, found := metricClusterAttrs[graphMetricClusterQueryAttr]; found { - s := v.(string) - if s != "" { - metricCluster.MetricCluster = s - } - } - - if v, found := metricClusterAttrs[graphMetricHumanNameAttr]; found { - s := v.(string) - if s != "" { - metricCluster.Name = s - } - } - - if v, found := metricClusterAttrs[graphMetricStackAttr]; found { - var stackStr string - switch u := v.(type) { - case string: - stackStr = u - case *string: - if u != nil { - stackStr = *u - } - default: - return fmt.Errorf("PROVIDER BUG: unsupported type for %q: %T", graphMetricStackAttr, v) - } - - if stackStr != "" { - u64, _ := strconv.ParseUint(stackStr, 10, 64) - u := uint(u64) - metricCluster.Stack = &u - } - } - - g.MetricClusters = append(g.MetricClusters, metricCluster) - } - } - - if v, found := d.GetOk(graphStyleAttr); found { - switch v.(type) { - case string: - s := v.(string) - g.Style = &s - case *string: - g.Style = v.(*string) - default: - return fmt.Errorf("PROVIDER BUG: unsupported type for %q: %T", graphStyleAttr, v) - } - } - - if v, found := d.GetOk(graphTagsAttr); found { - g.Tags = derefStringList(flattenSet(v.(*schema.Set))) - } - - if err := g.Validate(); err != nil { - return err - } - - return nil -} - -func (g *circonusGraph) Create(ctxt *providerContext) error { - ng, err := ctxt.client.CreateGraph(&g.Graph) - if err != nil { - return err - } - - g.CID = ng.CID - - return nil -} - -func (g *circonusGraph) Update(ctxt *providerContext) error { - _, err := ctxt.client.UpdateGraph(&g.Graph) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to update graph %s: {{err}}", g.CID), err) - } - - return nil -} - -func (g *circonusGraph) Validate() error { - for i, datapoint := range g.Datapoints { - if *g.Style == apiGraphStyleLine && datapoint.Alpha != nil && *datapoint.Alpha != 0 { - return fmt.Errorf("%s can not be set on graphs with style %s", graphMetricAlphaAttr, apiGraphStyleLine) - } - - if datapoint.CheckID != 0 && datapoint.MetricName == "" { - return fmt.Errorf("Error with %s[%d] name=%q: %s is set, missing attribute %s must also be set", graphMetricAttr, i, datapoint.Name, graphMetricCheckAttr, graphMetricNameAttr) - } - - if datapoint.CheckID == 0 && datapoint.MetricName != "" { - return fmt.Errorf("Error with %s[%d] name=%q: %s is set, missing attribute %s must also be set", graphMetricAttr, i, datapoint.Name, graphMetricNameAttr, graphMetricCheckAttr) - } - - if datapoint.CAQL != nil && (datapoint.CheckID != 0 || datapoint.MetricName != "") { - return fmt.Errorf("Error with %s[%d] name=%q: %q attribute is mutually exclusive with attributes %s or %s", graphMetricAttr, i, datapoint.Name, graphMetricCAQLAttr, graphMetricNameAttr, graphMetricCheckAttr) - } - } - - for i, mc := range g.MetricClusters { - if mc.AggregateFunc != "" && (mc.Color == nil || *mc.Color == "") { - return fmt.Errorf("Error with %s[%d] name=%q: %s is a required attribute for graphs with %s set", graphMetricClusterAttr, i, mc.Name, graphMetricClusterColorAttr, graphMetricClusterAggregateAttr) - } - } - - return nil -} diff --git a/builtin/providers/circonus/resource_circonus_graph_test.go b/builtin/providers/circonus/resource_circonus_graph_test.go deleted file mode 100644 index d51d00fc8..000000000 --- a/builtin/providers/circonus/resource_circonus_graph_test.go +++ /dev/null @@ -1,199 +0,0 @@ -package circonus - -import ( - "fmt" - "strings" - "testing" - - "github.com/circonus-labs/circonus-gometrics/api" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccCirconusGraph_basic(t *testing.T) { - graphName := fmt.Sprintf("Test Graph - %s", acctest.RandString(5)) - checkName := fmt.Sprintf("ICMP Ping check - %s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDestroyCirconusGraph, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testAccCirconusGraphConfigFmt, checkName, graphName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "name", graphName), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "description", "Terraform Test: mixed graph"), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "notes", "test notes"), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "graph_style", "line"), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "left.%", "1"), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "left.max", "11"), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "right.%", "3"), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "right.logarithmic", "10"), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "right.max", "20"), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "right.min", "-1"), - - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "line_style", "stepped"), - - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "metric.#", "2"), - - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "metric.0.caql", ""), - resource.TestCheckResourceAttrSet("circonus_graph.mixed-points", "metric.0.check"), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "metric.0.metric_name", "maximum"), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "metric.0.metric_type", "numeric"), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "metric.0.name", "Maximum Latency"), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "metric.0.axis", "left"), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "metric.0.color", "#657aa6"), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "metric.0.function", "gauge"), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "metric.0.active", "true"), - - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "metric.1.caql", ""), - resource.TestCheckResourceAttrSet("circonus_graph.mixed-points", "metric.1.check"), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "metric.1.metric_name", "minimum"), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "metric.1.metric_type", "numeric"), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "metric.1.name", "Minimum Latency"), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "metric.1.axis", "right"), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "metric.1.color", "#657aa6"), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "metric.1.function", "gauge"), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "metric.1.active", "true"), - - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "tags.#", "2"), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "tags.2087084518", "author:terraform"), - resource.TestCheckResourceAttr("circonus_graph.mixed-points", "tags.1401442048", "lifecycle:unittest"), - ), - }, - }, - }) -} - -func testAccCheckDestroyCirconusGraph(s *terraform.State) error { - ctxt := testAccProvider.Meta().(*providerContext) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "circonus_graph" { - continue - } - - cid := rs.Primary.ID - exists, err := checkGraphExists(ctxt, api.CIDType(&cid)) - switch { - case !exists: - // noop - case exists: - return fmt.Errorf("graph still exists after destroy") - case err != nil: - return fmt.Errorf("Error checking graph %s", err) - } - } - - return nil -} - -func checkGraphExists(c *providerContext, graphID api.CIDType) (bool, error) { - g, err := c.client.FetchGraph(graphID) - if err != nil { - if strings.Contains(err.Error(), defaultCirconus404ErrorString) { - return false, nil - } - - return false, err - } - - if api.CIDType(&g.CID) == graphID { - return true, nil - } - - return false, nil -} - -const testAccCirconusGraphConfigFmt = ` -variable "test_tags" { - type = "list" - default = [ "author:terraform", "lifecycle:unittest" ] -} - -resource "circonus_check" "api_latency" { - active = true - name = "%s" - period = "60s" - - collector { - id = "/broker/1" - } - - icmp_ping { - count = 5 - } - - metric { - name = "maximum" - tags = [ "${var.test_tags}" ] - type = "numeric" - unit = "seconds" - } - - metric { - name = "minimum" - tags = [ "${var.test_tags}" ] - type = "numeric" - unit = "seconds" - } - - tags = [ "${var.test_tags}" ] - target = "api.circonus.com" -} - -resource "circonus_graph" "mixed-points" { - name = "%s" - description = "Terraform Test: mixed graph" - notes = "test notes" - graph_style = "line" - line_style = "stepped" - - metric { - # caql = "" # conflicts with metric_name/check - check = "${circonus_check.api_latency.checks[0]}" - metric_name = "maximum" - metric_type = "numeric" - name = "Maximum Latency" - axis = "left" # right - color = "#657aa6" - function = "gauge" - active = true - } - - metric { - # caql = "" # conflicts with metric_name/check - check = "${circonus_check.api_latency.checks[0]}" - metric_name = "minimum" - metric_type = "numeric" - name = "Minimum Latency" - axis = "right" # left - color = "#657aa6" - function = "gauge" - active = true - } - - // metric_cluster { - // active = true - // aggregate = "average" - // axis = "left" # right - // color = "#657aa6" - // group = "${circonus_check.api_latency.checks[0]}" - // name = "Metrics Used" - // } - - left { - max = 11 - } - - right { - logarithmic = 10 - max = 20 - min = -1 - } - - tags = [ "${var.test_tags}" ] -} -` diff --git a/builtin/providers/circonus/resource_circonus_metric.go b/builtin/providers/circonus/resource_circonus_metric.go deleted file mode 100644 index 0b9bed1f2..000000000 --- a/builtin/providers/circonus/resource_circonus_metric.go +++ /dev/null @@ -1,138 +0,0 @@ -package circonus - -// The `circonus_metric` type is a synthetic, top-level resource that doesn't -// actually exist within Circonus. The `circonus_check` resource uses -// `circonus_metric` as input to its `metric` attribute. The `circonus_check` -// resource can, if configured, override various parameters in the -// `circonus_metric` resource if no value was set (e.g. the `icmp_ping` will -// implicitly set the `unit` metric to `seconds`). - -import ( - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -const ( - // circonus_metric.* resource attribute names - metricActiveAttr = "active" - metricIDAttr = "id" - metricNameAttr = "name" - metricTypeAttr = "type" - metricTagsAttr = "tags" - metricUnitAttr = "unit" - - // CheckBundle.Metric.Status can be one of these values - metricStatusActive = "active" - metricStatusAvailable = "available" -) - -var metricDescriptions = attrDescrs{ - metricActiveAttr: "Enables or disables the metric", - metricNameAttr: "Name of the metric", - metricTypeAttr: "Type of metric (e.g. numeric, histogram, text)", - metricTagsAttr: "Tags assigned to the metric", - metricUnitAttr: "The unit of measurement for a metric", -} - -func resourceMetric() *schema.Resource { - return &schema.Resource{ - Create: metricCreate, - Read: metricRead, - Update: metricUpdate, - Delete: metricDelete, - Exists: metricExists, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: convertToHelperSchema(metricDescriptions, map[schemaAttr]*schema.Schema{ - metricActiveAttr: &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - metricNameAttr: &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateRegexp(metricNameAttr, `[\S]+`), - }, - metricTypeAttr: &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateStringIn(metricTypeAttr, validMetricTypes), - }, - metricTagsAttr: tagMakeConfigSchema(metricTagsAttr), - metricUnitAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: metricUnit, - ValidateFunc: validateRegexp(metricUnitAttr, metricUnitRegexp), - }, - }), - } -} - -func metricCreate(d *schema.ResourceData, meta interface{}) error { - m := newMetric() - - id := d.Id() - if id == "" { - var err error - id, err = newMetricID() - if err != nil { - return errwrap.Wrapf("metric ID creation failed: {{err}}", err) - } - } - - if err := m.ParseConfig(id, d); err != nil { - return errwrap.Wrapf("error parsing metric schema during create: {{err}}", err) - } - - if err := m.Create(d); err != nil { - return errwrap.Wrapf("error creating metric: {{err}}", err) - } - - return metricRead(d, meta) -} - -func metricRead(d *schema.ResourceData, meta interface{}) error { - m := newMetric() - - if err := m.ParseConfig(d.Id(), d); err != nil { - return errwrap.Wrapf("error parsing metric schema during read: {{err}}", err) - } - - if err := m.SaveState(d); err != nil { - return errwrap.Wrapf("error saving metric during read: {{err}}", err) - } - - return nil -} - -func metricUpdate(d *schema.ResourceData, meta interface{}) error { - m := newMetric() - - if err := m.ParseConfig(d.Id(), d); err != nil { - return errwrap.Wrapf("error parsing metric schema during update: {{err}}", err) - } - - if err := m.Update(d); err != nil { - return errwrap.Wrapf("error updating metric: {{err}}", err) - } - - return nil -} - -func metricDelete(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - - return nil -} - -func metricExists(d *schema.ResourceData, meta interface{}) (bool, error) { - if id := d.Id(); id != "" { - return true, nil - } - - return false, nil -} diff --git a/builtin/providers/circonus/resource_circonus_metric_cluster.go b/builtin/providers/circonus/resource_circonus_metric_cluster.go deleted file mode 100644 index 77fde410a..000000000 --- a/builtin/providers/circonus/resource_circonus_metric_cluster.go +++ /dev/null @@ -1,260 +0,0 @@ -package circonus - -import ( - "bytes" - "fmt" - "strings" - - "github.com/circonus-labs/circonus-gometrics/api" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -const ( - // circonus_metric_cluster.* resource attribute names - metricClusterDescriptionAttr = "description" - metricClusterNameAttr = "name" - metricClusterQueryAttr = "query" - metricClusterTagsAttr = "tags" - - // circonus_metric_cluster.* out parameters - metricClusterIDAttr = "id" - - // circonus_metric_cluster.query.* resource attribute names - metricClusterDefinitionAttr = "definition" - metricClusterTypeAttr = "type" -) - -var metricClusterDescriptions = attrDescrs{ - metricClusterDescriptionAttr: "A description of the metric cluster", - metricClusterIDAttr: "The ID of this metric cluster", - metricClusterNameAttr: "The name of the metric cluster", - metricClusterQueryAttr: "A metric cluster query definition", - metricClusterTagsAttr: "A list of tags assigned to the metric cluster", -} - -var metricClusterQueryDescriptions = attrDescrs{ - metricClusterDefinitionAttr: "A query to select a collection of metric streams", - metricClusterTypeAttr: "The operation to perform on the matching metric streams", -} - -func resourceMetricCluster() *schema.Resource { - return &schema.Resource{ - Create: metricClusterCreate, - Read: metricClusterRead, - Update: metricClusterUpdate, - Delete: metricClusterDelete, - Exists: metricClusterExists, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: convertToHelperSchema(metricClusterDescriptions, map[schemaAttr]*schema.Schema{ - metricClusterDescriptionAttr: &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - StateFunc: suppressWhitespace, - }, - metricClusterNameAttr: &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - metricClusterQueryAttr: &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - MinItems: 1, - Elem: &schema.Resource{ - Schema: convertToHelperSchema(metricClusterQueryDescriptions, map[schemaAttr]*schema.Schema{ - metricClusterDefinitionAttr: &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateRegexp(metricClusterDefinitionAttr, `.+`), - }, - metricClusterTypeAttr: &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validateStringIn(metricClusterTypeAttr, supportedMetricClusterTypes), - }, - }), - }, - }, - metricClusterTagsAttr: tagMakeConfigSchema(metricClusterTagsAttr), - - // Out parameters - metricClusterIDAttr: &schema.Schema{ - Computed: true, - Type: schema.TypeString, - }, - }), - } -} - -func metricClusterCreate(d *schema.ResourceData, meta interface{}) error { - ctxt := meta.(*providerContext) - mc := newMetricCluster() - - if err := mc.ParseConfig(d); err != nil { - return errwrap.Wrapf("error parsing metric cluster schema during create: {{err}}", err) - } - - if err := mc.Create(ctxt); err != nil { - return errwrap.Wrapf("error creating metric cluster: {{err}}", err) - } - - d.SetId(mc.CID) - - return metricClusterRead(d, meta) -} - -func metricClusterExists(d *schema.ResourceData, meta interface{}) (bool, error) { - ctxt := meta.(*providerContext) - - cid := d.Id() - mc, err := ctxt.client.FetchMetricCluster(api.CIDType(&cid), "") - if err != nil { - if strings.Contains(err.Error(), defaultCirconus404ErrorString) { - return false, nil - } - - return false, err - } - - if mc.CID == "" { - return false, nil - } - - return true, nil -} - -// metricClusterRead pulls data out of the MetricCluster object and stores it -// into the appropriate place in the statefile. -func metricClusterRead(d *schema.ResourceData, meta interface{}) error { - ctxt := meta.(*providerContext) - - cid := d.Id() - mc, err := loadMetricCluster(ctxt, api.CIDType(&cid)) - if err != nil { - return err - } - - d.SetId(mc.CID) - - queries := schema.NewSet(metricClusterQueryChecksum, nil) - for _, query := range mc.Queries { - queryAttrs := map[string]interface{}{ - string(metricClusterDefinitionAttr): query.Query, - string(metricClusterTypeAttr): query.Type, - } - - queries.Add(queryAttrs) - } - - d.Set(metricClusterDescriptionAttr, mc.Description) - d.Set(metricClusterNameAttr, mc.Name) - - if err := d.Set(metricClusterTagsAttr, tagsToState(apiToTags(mc.Tags))); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store metric cluster %q attribute: {{err}}", metricClusterTagsAttr), err) - } - - d.Set(metricClusterIDAttr, mc.CID) - - return nil -} - -func metricClusterUpdate(d *schema.ResourceData, meta interface{}) error { - ctxt := meta.(*providerContext) - mc := newMetricCluster() - - if err := mc.ParseConfig(d); err != nil { - return err - } - - mc.CID = d.Id() - if err := mc.Update(ctxt); err != nil { - return errwrap.Wrapf(fmt.Sprintf("unable to update metric cluster %q: {{err}}", d.Id()), err) - } - - return metricClusterRead(d, meta) -} - -func metricClusterDelete(d *schema.ResourceData, meta interface{}) error { - ctxt := meta.(*providerContext) - - cid := d.Id() - if _, err := ctxt.client.DeleteMetricClusterByCID(api.CIDType(&cid)); err != nil { - return errwrap.Wrapf(fmt.Sprintf("unable to delete metric cluster %q: {{err}}", d.Id()), err) - } - - d.SetId("") - - return nil -} - -func metricClusterQueryChecksum(v interface{}) int { - m := v.(map[string]interface{}) - - b := &bytes.Buffer{} - b.Grow(defaultHashBufSize) - - // Order writes to the buffer using lexically sorted list for easy visual - // reconciliation with other lists. - if v, found := m[metricClusterDefinitionAttr]; found { - fmt.Fprint(b, v.(string)) - } - - if v, found := m[metricClusterTypeAttr]; found { - fmt.Fprint(b, v.(string)) - } - - s := b.String() - return hashcode.String(s) -} - -// ParseConfig reads Terraform config data and stores the information into a -// Circonus MetricCluster object. -func (mc *circonusMetricCluster) ParseConfig(d *schema.ResourceData) error { - if v, found := d.GetOk(metricClusterDescriptionAttr); found { - mc.Description = v.(string) - } - - if v, found := d.GetOk(metricClusterNameAttr); found { - mc.Name = v.(string) - } - - if queryListRaw, found := d.GetOk(metricClusterQueryAttr); found { - queryList := queryListRaw.(*schema.Set).List() - - mc.Queries = make([]api.MetricQuery, 0, len(queryList)) - - for _, queryRaw := range queryList { - queryAttrs := newInterfaceMap(queryRaw) - - var query string - if v, found := queryAttrs[metricClusterDefinitionAttr]; found { - query = v.(string) - } - - var queryType string - if v, found := queryAttrs[metricClusterTypeAttr]; found { - queryType = v.(string) - } - - mc.Queries = append(mc.Queries, api.MetricQuery{ - Query: query, - Type: queryType, - }) - } - } - - if v, found := d.GetOk(metricClusterTagsAttr); found { - mc.Tags = derefStringList(flattenSet(v.(*schema.Set))) - } - - if err := mc.Validate(); err != nil { - return err - } - - return nil -} diff --git a/builtin/providers/circonus/resource_circonus_metric_cluster_test.go b/builtin/providers/circonus/resource_circonus_metric_cluster_test.go deleted file mode 100644 index 8c501041d..000000000 --- a/builtin/providers/circonus/resource_circonus_metric_cluster_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package circonus - -import ( - "fmt" - "strings" - "testing" - - "github.com/circonus-labs/circonus-gometrics/api" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccCirconusMetricCluster_basic(t *testing.T) { - metricClusterName := fmt.Sprintf("job1-stream-agg - %s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDestroyCirconusMetricCluster, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testAccCirconusMetricClusterConfigFmt, metricClusterName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("circonus_metric_cluster.nomad-job1", "description", `Metric Cluster Description`), - resource.TestCheckResourceAttrSet("circonus_metric_cluster.nomad-job1", "id"), - resource.TestCheckResourceAttr("circonus_metric_cluster.nomad-job1", "name", metricClusterName), - resource.TestCheckResourceAttr("circonus_metric_cluster.nomad-job1", "query.236803225.definition", "*`nomad-jobname`memory`rss"), - resource.TestCheckResourceAttr("circonus_metric_cluster.nomad-job1", "query.236803225.type", "average"), - resource.TestCheckResourceAttr("circonus_metric_cluster.nomad-job1", "tags.2087084518", "author:terraform"), - resource.TestCheckResourceAttr("circonus_metric_cluster.nomad-job1", "tags.3354173695", "source:nomad"), - ), - }, - }, - }) -} - -func testAccCheckDestroyCirconusMetricCluster(s *terraform.State) error { - ctxt := testAccProvider.Meta().(*providerContext) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "circonus_metric_cluster" { - continue - } - - cid := rs.Primary.ID - exists, err := checkMetricClusterExists(ctxt, api.CIDType(&cid)) - switch { - case !exists: - // noop - case exists: - return fmt.Errorf("metric cluster still exists after destroy") - case err != nil: - return fmt.Errorf("Error checking metric cluster: %v", err) - } - } - - return nil -} - -func checkMetricClusterExists(c *providerContext, metricClusterCID api.CIDType) (bool, error) { - cmc, err := c.client.FetchMetricCluster(metricClusterCID, "") - if err != nil { - if strings.Contains(err.Error(), defaultCirconus404ErrorString) { - return false, nil - } - - return false, err - } - - if api.CIDType(&cmc.CID) == metricClusterCID { - return true, nil - } - - return false, nil -} - -const testAccCirconusMetricClusterConfigFmt = ` -resource "circonus_metric_cluster" "nomad-job1" { - description = < 0 { - thenAttrs[string(ruleSetAfterAttr)] = fmt.Sprintf("%ds", 60*rule.Wait) - } - thenAttrs[string(ruleSetSeverityAttr)] = int(rule.Severity) - - if rule.WindowingFunction != nil { - valueOverAttrs[string(ruleSetUsingAttr)] = *rule.WindowingFunction - - // NOTE: Only save the window duration if a function was specified - valueOverAttrs[string(ruleSetLastAttr)] = fmt.Sprintf("%ds", rule.WindowingDuration) - } - valueOverSet := schema.NewSet(ruleSetValueOverChecksum, nil) - valueOverSet.Add(valueOverAttrs) - valueAttrs[string(ruleSetOverAttr)] = valueOverSet - - if contactGroups, ok := rs.ContactGroups[uint8(rule.Severity)]; ok { - sort.Strings(contactGroups) - thenAttrs[string(ruleSetNotifyAttr)] = contactGroups - } - thenSet := schema.NewSet(ruleSetThenChecksum, nil) - thenSet.Add(thenAttrs) - - valueSet := schema.NewSet(ruleSetValueChecksum, nil) - valueSet.Add(valueAttrs) - ifAttrs[string(ruleSetThenAttr)] = thenSet - ifAttrs[string(ruleSetValueAttr)] = valueSet - - ifRules = append(ifRules, ifAttrs) - } - - d.Set(ruleSetCheckAttr, rs.CheckCID) - - if err := d.Set(ruleSetIfAttr, ifRules); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store rule set %q attribute: {{err}}", ruleSetIfAttr), err) - } - - d.Set(ruleSetLinkAttr, indirect(rs.Link)) - d.Set(ruleSetMetricNameAttr, rs.MetricName) - d.Set(ruleSetMetricTypeAttr, rs.MetricType) - d.Set(ruleSetNotesAttr, indirect(rs.Notes)) - d.Set(ruleSetParentAttr, indirect(rs.Parent)) - - if err := d.Set(ruleSetTagsAttr, tagsToState(apiToTags(rs.Tags))); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to store rule set %q attribute: {{err}}", ruleSetTagsAttr), err) - } - - return nil -} - -func ruleSetUpdate(d *schema.ResourceData, meta interface{}) error { - ctxt := meta.(*providerContext) - rs := newRuleSet() - - if err := rs.ParseConfig(d); err != nil { - return err - } - - rs.CID = d.Id() - if err := rs.Update(ctxt); err != nil { - return errwrap.Wrapf(fmt.Sprintf("unable to update rule set %q: {{err}}", d.Id()), err) - } - - return ruleSetRead(d, meta) -} - -func ruleSetDelete(d *schema.ResourceData, meta interface{}) error { - ctxt := meta.(*providerContext) - - cid := d.Id() - if _, err := ctxt.client.DeleteRuleSetByCID(api.CIDType(&cid)); err != nil { - return errwrap.Wrapf(fmt.Sprintf("unable to delete rule set %q: {{err}}", d.Id()), err) - } - - d.SetId("") - - return nil -} - -type circonusRuleSet struct { - api.RuleSet -} - -func newRuleSet() circonusRuleSet { - rs := circonusRuleSet{ - RuleSet: *api.NewRuleSet(), - } - - rs.ContactGroups = make(map[uint8][]string, config.NumSeverityLevels) - for i := uint8(0); i < config.NumSeverityLevels; i++ { - rs.ContactGroups[i+1] = make([]string, 0, 1) - } - - rs.Rules = make([]api.RuleSetRule, 0, 1) - - return rs -} - -func loadRuleSet(ctxt *providerContext, cid api.CIDType) (circonusRuleSet, error) { - var rs circonusRuleSet - crs, err := ctxt.client.FetchRuleSet(cid) - if err != nil { - return circonusRuleSet{}, err - } - rs.RuleSet = *crs - - return rs, nil -} - -func ruleSetThenChecksum(v interface{}) int { - b := &bytes.Buffer{} - b.Grow(defaultHashBufSize) - - writeInt := func(m map[string]interface{}, attrName string) { - if v, found := m[attrName]; found { - i := v.(int) - if i != 0 { - fmt.Fprintf(b, "%x", i) - } - } - } - - writeString := func(m map[string]interface{}, attrName string) { - if v, found := m[attrName]; found { - s := strings.TrimSpace(v.(string)) - if s != "" { - fmt.Fprint(b, s) - } - } - } - - writeStringArray := func(m map[string]interface{}, attrName string) { - if v, found := m[attrName]; found { - a := v.([]string) - if a != nil { - sort.Strings(a) - for _, s := range a { - fmt.Fprint(b, strings.TrimSpace(s)) - } - } - } - } - - m := v.(map[string]interface{}) - - writeString(m, ruleSetAfterAttr) - writeStringArray(m, ruleSetNotifyAttr) - writeInt(m, ruleSetSeverityAttr) - - s := b.String() - return hashcode.String(s) -} - -func ruleSetValueChecksum(v interface{}) int { - b := &bytes.Buffer{} - b.Grow(defaultHashBufSize) - - writeBool := func(m map[string]interface{}, attrName string) { - if v, found := m[attrName]; found { - fmt.Fprintf(b, "%t", v.(bool)) - } - } - - writeDuration := func(m map[string]interface{}, attrName string) { - if v, found := m[attrName]; found { - s := v.(string) - if s != "" { - d, _ := time.ParseDuration(s) - fmt.Fprint(b, d.String()) - } - } - } - - writeString := func(m map[string]interface{}, attrName string) { - if v, found := m[attrName]; found { - s := strings.TrimSpace(v.(string)) - if s != "" { - fmt.Fprint(b, s) - } - } - } - - m := v.(map[string]interface{}) - - if v, found := m[ruleSetValueAttr]; found { - valueMap := v.(map[string]interface{}) - if valueMap != nil { - writeDuration(valueMap, ruleSetAbsentAttr) - writeBool(valueMap, ruleSetChangedAttr) - writeString(valueMap, ruleSetContainsAttr) - writeString(valueMap, ruleSetMatchAttr) - writeString(valueMap, ruleSetNotMatchAttr) - writeString(valueMap, ruleSetMinValueAttr) - writeString(valueMap, ruleSetNotContainAttr) - writeString(valueMap, ruleSetMaxValueAttr) - - if v, found := valueMap[ruleSetOverAttr]; found { - overMap := v.(map[string]interface{}) - writeDuration(overMap, ruleSetLastAttr) - writeString(overMap, ruleSetUsingAttr) - } - } - } - - s := b.String() - return hashcode.String(s) -} - -func ruleSetValueOverChecksum(v interface{}) int { - b := &bytes.Buffer{} - b.Grow(defaultHashBufSize) - - writeString := func(m map[string]interface{}, attrName string) { - if v, found := m[attrName]; found { - s := strings.TrimSpace(v.(string)) - if s != "" { - fmt.Fprint(b, s) - } - } - } - - m := v.(map[string]interface{}) - - writeString(m, ruleSetLastAttr) - writeString(m, ruleSetUsingAttr) - - s := b.String() - return hashcode.String(s) -} - -// ParseConfig reads Terraform config data and stores the information into a -// Circonus RuleSet object. ParseConfig, ruleSetRead(), and ruleSetChecksum -// must be kept in sync. -func (rs *circonusRuleSet) ParseConfig(d *schema.ResourceData) error { - if v, found := d.GetOk(ruleSetCheckAttr); found { - rs.CheckCID = v.(string) - } - - if v, found := d.GetOk(ruleSetLinkAttr); found { - s := v.(string) - rs.Link = &s - } - - if v, found := d.GetOk(ruleSetMetricTypeAttr); found { - rs.MetricType = v.(string) - } - - if v, found := d.GetOk(ruleSetNotesAttr); found { - s := v.(string) - rs.Notes = &s - } - - if v, found := d.GetOk(ruleSetParentAttr); found { - s := v.(string) - rs.Parent = &s - } - - if v, found := d.GetOk(ruleSetMetricNameAttr); found { - rs.MetricName = v.(string) - } - - rs.Rules = make([]api.RuleSetRule, 0, defaultRuleSetRuleLen) - if ifListRaw, found := d.GetOk(ruleSetIfAttr); found { - ifList := ifListRaw.([]interface{}) - for _, ifListElem := range ifList { - ifAttrs := newInterfaceMap(ifListElem.(map[string]interface{})) - - rule := api.RuleSetRule{} - - if thenListRaw, found := ifAttrs[ruleSetThenAttr]; found { - thenList := thenListRaw.(*schema.Set).List() - - for _, thenListRaw := range thenList { - thenAttrs := newInterfaceMap(thenListRaw) - - if v, found := thenAttrs[ruleSetAfterAttr]; found { - s := v.(string) - if s != "" { - d, err := time.ParseDuration(v.(string)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("unable to parse %q duration %q: {{err}}", ruleSetAfterAttr, v.(string)), err) - } - rule.Wait = uint(d.Minutes()) - } - } - - // NOTE: break from convention of alpha sorting attributes and handle Notify after Severity - - if i, found := thenAttrs[ruleSetSeverityAttr]; found { - rule.Severity = uint(i.(int)) - } - - if notifyListRaw, found := thenAttrs[ruleSetNotifyAttr]; found { - notifyList := interfaceList(notifyListRaw.([]interface{})) - - sev := uint8(rule.Severity) - for _, contactGroupCID := range notifyList.List() { - var found bool - if contactGroups, ok := rs.ContactGroups[sev]; ok { - for _, contactGroup := range contactGroups { - if contactGroup == contactGroupCID { - found = true - break - } - } - } - if !found { - rs.ContactGroups[sev] = append(rs.ContactGroups[sev], contactGroupCID) - } - } - } - } - } - - if ruleSetValueListRaw, found := ifAttrs[ruleSetValueAttr]; found { - ruleSetValueList := ruleSetValueListRaw.(*schema.Set).List() - - for _, valueListRaw := range ruleSetValueList { - valueAttrs := newInterfaceMap(valueListRaw) - - METRIC_TYPE: - switch rs.MetricType { - case ruleSetMetricTypeNumeric: - if v, found := valueAttrs[ruleSetAbsentAttr]; found { - s := v.(string) - if s != "" { - d, _ := time.ParseDuration(s) - rule.Criteria = apiRuleSetAbsent - rule.Value = float64(d.Seconds()) - break METRIC_TYPE - } - } - - if v, found := valueAttrs[ruleSetChangedAttr]; found { - b := v.(bool) - if b { - rule.Criteria = apiRuleSetChanged - break METRIC_TYPE - } - } - - if v, found := valueAttrs[ruleSetMinValueAttr]; found { - s := v.(string) - if s != "" { - rule.Criteria = apiRuleSetMinValue - rule.Value = s - break METRIC_TYPE - } - } - - if v, found := valueAttrs[ruleSetMaxValueAttr]; found { - s := v.(string) - if s != "" { - rule.Criteria = apiRuleSetMaxValue - rule.Value = s - break METRIC_TYPE - } - } - case ruleSetMetricTypeText: - if v, found := valueAttrs[ruleSetAbsentAttr]; found { - s := v.(string) - if s != "" { - d, _ := time.ParseDuration(s) - rule.Criteria = apiRuleSetAbsent - rule.Value = float64(d.Seconds()) - break METRIC_TYPE - } - } - - if v, found := valueAttrs[ruleSetChangedAttr]; found { - b := v.(bool) - if b { - rule.Criteria = apiRuleSetChanged - break METRIC_TYPE - } - } - - if v, found := valueAttrs[ruleSetContainsAttr]; found { - s := v.(string) - if s != "" { - rule.Criteria = apiRuleSetContains - rule.Value = s - break METRIC_TYPE - } - } - - if v, found := valueAttrs[ruleSetMatchAttr]; found { - s := v.(string) - if s != "" { - rule.Criteria = apiRuleSetMatch - rule.Value = s - break METRIC_TYPE - } - } - - if v, found := valueAttrs[ruleSetNotMatchAttr]; found { - s := v.(string) - if s != "" { - rule.Criteria = apiRuleSetNotMatch - rule.Value = s - break METRIC_TYPE - } - } - - if v, found := valueAttrs[ruleSetNotContainAttr]; found { - s := v.(string) - if s != "" { - rule.Criteria = apiRuleSetNotContains - rule.Value = s - break METRIC_TYPE - } - } - default: - return fmt.Errorf("PROVIDER BUG: unsupported rule set metric type: %q", rs.MetricType) - } - - if ruleSetOverListRaw, found := valueAttrs[ruleSetOverAttr]; found { - overList := ruleSetOverListRaw.(*schema.Set).List() - - for _, overListRaw := range overList { - overAttrs := newInterfaceMap(overListRaw) - - if v, found := overAttrs[ruleSetLastAttr]; found { - last, err := time.ParseDuration(v.(string)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("unable to parse duration %s attribute", ruleSetLastAttr), err) - } - rule.WindowingDuration = uint(last.Seconds()) - } - - if v, found := overAttrs[ruleSetUsingAttr]; found { - s := v.(string) - rule.WindowingFunction = &s - } - } - } - } - } - rs.Rules = append(rs.Rules, rule) - } - } - - if v, found := d.GetOk(ruleSetTagsAttr); found { - rs.Tags = derefStringList(flattenSet(v.(*schema.Set))) - } - - if err := rs.Validate(); err != nil { - return err - } - - return nil -} - -func (rs *circonusRuleSet) Create(ctxt *providerContext) error { - crs, err := ctxt.client.CreateRuleSet(&rs.RuleSet) - if err != nil { - return err - } - - rs.CID = crs.CID - - return nil -} - -func (rs *circonusRuleSet) Update(ctxt *providerContext) error { - _, err := ctxt.client.UpdateRuleSet(&rs.RuleSet) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to update rule set %s: {{err}}", rs.CID), err) - } - - return nil -} - -func (rs *circonusRuleSet) Validate() error { - // TODO(sean@): From https://login.circonus.com/resources/api/calls/rule_set - // under `value`: - // - // For an 'on absence' rule this is the number of seconds the metric must not - // have been collected for, and should not be lower than either the period or - // timeout of the metric being collected. - - for i, rule := range rs.Rules { - if rule.Criteria == "" { - return fmt.Errorf("rule %d for check ID %s has an empty criteria", i, rs.CheckCID) - } - } - - return nil -} diff --git a/builtin/providers/circonus/resource_circonus_rule_set_test.go b/builtin/providers/circonus/resource_circonus_rule_set_test.go deleted file mode 100644 index 71cf94ceb..000000000 --- a/builtin/providers/circonus/resource_circonus_rule_set_test.go +++ /dev/null @@ -1,226 +0,0 @@ -package circonus - -import ( - "fmt" - "strings" - "testing" - - "github.com/circonus-labs/circonus-gometrics/api" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccCirconusRuleSet_basic(t *testing.T) { - checkName := fmt.Sprintf("ICMP Ping check - %s", acctest.RandString(5)) - contactGroupName := fmt.Sprintf("ops-staging-sev3 - %s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDestroyCirconusRuleSet, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testAccCirconusRuleSetConfigFmt, contactGroupName, checkName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("circonus_rule_set.icmp-latency-alarm", "check"), - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "metric_name", "maximum"), - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "metric_type", "numeric"), - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "notes", "Simple check to create notifications based on ICMP performance."), - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "link", "https://wiki.example.org/playbook/what-to-do-when-high-latency-strikes"), - // resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "parent", "some check ID"), - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.#", "4"), - - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.0.value.#", "1"), - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.0.value.360613670.absent", "70s"), - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.0.value.360613670.over.#", "0"), - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.0.then.#", "1"), - // Computed: - // resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.0.then..notify.#", "1"), - // resource.TestCheckResourceAttrSet("circonus_rule_set.icmp-latency-alarm", "if.0.then..notify.0"), - // resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.0.then..severity", "1"), - - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.1.value.#", "1"), - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.1.value.2300199732.over.#", "1"), - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.1.value.2300199732.over.689776960.last", "120s"), - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.1.value.2300199732.over.689776960.using", "average"), - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.1.value.2300199732.min_value", "2"), - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.1.then.#", "1"), - // Computed: - // resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.1.then..notify.#", "1"), - // resource.TestCheckResourceAttrSet("circonus_rule_set.icmp-latency-alarm", "if.1.then..notify.0"), - // resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.1.then..severity", "2"), - - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.2.value.#", "1"), - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.2.value.2842654150.over.#", "1"), - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.2.value.2842654150.over.999877839.last", "180s"), - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.2.value.2842654150.over.999877839.using", "average"), - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.2.value.2842654150.max_value", "300"), - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.2.then.#", "1"), - // Computed: - // resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.2.then..notify.#", "1"), - // resource.TestCheckResourceAttrSet("circonus_rule_set.icmp-latency-alarm", "if.2.then..notify.0"), - // resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.2.then..severity", "3"), - - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.3.value.#", "1"), - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.3.value.803690187.over.#", "0"), - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.3.value.803690187.max_value", "400"), - // Computed: - // resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.3.then..notify.#", "1"), - // resource.TestCheckResourceAttrSet("circonus_rule_set.icmp-latency-alarm", "if.3.then..notify.0"), - // resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.3.then..after", "2400s"), - // resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "if.3.then..severity", "4"), - - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "tags.#", "2"), - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "tags.2087084518", "author:terraform"), - resource.TestCheckResourceAttr("circonus_rule_set.icmp-latency-alarm", "tags.1401442048", "lifecycle:unittest"), - ), - }, - }, - }) -} - -func testAccCheckDestroyCirconusRuleSet(s *terraform.State) error { - ctxt := testAccProvider.Meta().(*providerContext) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "circonus_rule_set" { - continue - } - - cid := rs.Primary.ID - exists, err := checkRuleSetExists(ctxt, api.CIDType(&cid)) - switch { - case !exists: - // noop - case exists: - return fmt.Errorf("rule set still exists after destroy") - case err != nil: - return fmt.Errorf("Error checking rule set: %v", err) - } - } - - return nil -} - -func checkRuleSetExists(c *providerContext, ruleSetCID api.CIDType) (bool, error) { - rs, err := c.client.FetchRuleSet(ruleSetCID) - if err != nil { - if strings.Contains(err.Error(), defaultCirconus404ErrorString) { - return false, nil - } - - return false, err - } - - if api.CIDType(&rs.CID) == ruleSetCID { - return true, nil - } - - return false, nil -} - -const testAccCirconusRuleSetConfigFmt = ` -variable "test_tags" { - type = "list" - default = [ "author:terraform", "lifecycle:unittest" ] -} - -resource "circonus_contact_group" "test-trigger" { - name = "%s" - tags = [ "${var.test_tags}" ] -} - -resource "circonus_check" "api_latency" { - active = true - name = "%s" - period = "60s" - - collector { - id = "/broker/1" - } - - icmp_ping { - count = 1 - } - - metric { - name = "maximum" - tags = [ "${var.test_tags}" ] - type = "numeric" - unit = "seconds" - } - - tags = [ "${var.test_tags}" ] - target = "api.circonus.com" -} - -resource "circonus_rule_set" "icmp-latency-alarm" { - check = "${circonus_check.api_latency.checks[0]}" - metric_name = "maximum" - // metric_name = "${circonus_check.api_latency.metric["maximum"].name}" - // metric_type = "${circonus_check.api_latency.metric["maximum"].type}" - notes = <", v.(string), err) - } - - return fmt.Sprintf("%ds", int(d.Seconds())) - default: - return fmt.Sprintf("", v) - } -} - -func indirect(v interface{}) interface{} { - switch v.(type) { - case string: - return v - case *string: - p := v.(*string) - if p == nil { - return nil - } - return *p - default: - return v - } -} - -func suppressEquivalentTimeDurations(k, old, new string, d *schema.ResourceData) bool { - d1, err := time.ParseDuration(old) - if err != nil { - return false - } - - d2, err := time.ParseDuration(new) - if err != nil { - return false - } - - return d1 == d2 -} - -func suppressWhitespace(v interface{}) string { - return strings.TrimSpace(v.(string)) -} diff --git a/builtin/providers/circonus/validators.go b/builtin/providers/circonus/validators.go deleted file mode 100644 index c98ec2799..000000000 --- a/builtin/providers/circonus/validators.go +++ /dev/null @@ -1,381 +0,0 @@ -package circonus - -import ( - "fmt" - "net/url" - "regexp" - "strings" - "time" - - "github.com/circonus-labs/circonus-gometrics/api" - "github.com/circonus-labs/circonus-gometrics/api/config" - "github.com/hashicorp/errwrap" -) - -var knownCheckTypes map[circonusCheckType]struct{} -var knownContactMethods map[contactMethods]struct{} - -var userContactMethods map[contactMethods]struct{} -var externalContactMethods map[contactMethods]struct{} -var supportedHTTPVersions = validStringValues{"0.9", "1.0", "1.1", "2.0"} -var supportedMetricClusterTypes = validStringValues{ - "average", "count", "counter", "counter2", "counter2_stddev", - "counter_stddev", "derive", "derive2", "derive2_stddev", "derive_stddev", - "histogram", "stddev", "text", -} - -func init() { - checkTypes := []circonusCheckType{ - "caql", "cim", "circonuswindowsagent", "circonuswindowsagent,nad", - "collectd", "composite", "dcm", "dhcp", "dns", "elasticsearch", - "external", "ganglia", "googleanalytics", "haproxy", "http", - "http,apache", "httptrap", "imap", "jmx", "json", "json,couchdb", - "json,mongodb", "json,nad", "json,riak", "ldap", "memcached", - "munin", "mysql", "newrelic_rpm", "nginx", "nrpe", "ntp", - "oracle", "ping_icmp", "pop3", "postgres", "redis", "resmon", - "smtp", "snmp", "snmp,momentum", "sqlserver", "ssh2", "statsd", - "tcp", "varnish", "keynote", "keynote_pulse", "cloudwatch", - "ec_console", "mongodb", - } - - knownCheckTypes = make(map[circonusCheckType]struct{}, len(checkTypes)) - for _, k := range checkTypes { - knownCheckTypes[k] = struct{}{} - } - - userMethods := []contactMethods{"email", "sms", "xmpp"} - externalMethods := []contactMethods{"slack"} - - knownContactMethods = make(map[contactMethods]struct{}, len(externalContactMethods)+len(userContactMethods)) - - externalContactMethods = make(map[contactMethods]struct{}, len(externalMethods)) - for _, k := range externalMethods { - knownContactMethods[k] = struct{}{} - externalContactMethods[k] = struct{}{} - } - - userContactMethods = make(map[contactMethods]struct{}, len(userMethods)) - for _, k := range userMethods { - knownContactMethods[k] = struct{}{} - userContactMethods[k] = struct{}{} - } -} - -func validateCheckType(v interface{}, key string) (warnings []string, errors []error) { - if _, ok := knownCheckTypes[circonusCheckType(v.(string))]; !ok { - warnings = append(warnings, fmt.Sprintf("Possibly unsupported check type: %s", v.(string))) - } - - return warnings, errors -} - -func validateCheckCloudWatchDimmensions(v interface{}, key string) (warnings []string, errors []error) { - validDimmensionName := regexp.MustCompile(`^[\S]+$`) - validDimmensionValue := regexp.MustCompile(`^[\S]+$`) - - dimmensions := v.(map[string]interface{}) - for k, vRaw := range dimmensions { - if !validDimmensionName.MatchString(k) { - errors = append(errors, fmt.Errorf("Invalid CloudWatch Dimmension Name specified: %q", k)) - continue - } - - v := vRaw.(string) - if !validDimmensionValue.MatchString(v) { - errors = append(errors, fmt.Errorf("Invalid value for CloudWatch Dimmension %q specified: %q", k, v)) - } - } - - return warnings, errors -} - -func validateContactGroup(cg *api.ContactGroup) error { - for i := range cg.Reminders { - if cg.Reminders[i] != 0 && cg.AggregationWindow > cg.Reminders[i] { - return fmt.Errorf("severity %d reminder (%ds) is shorter than the aggregation window (%ds)", i+1, cg.Reminders[i], cg.AggregationWindow) - } - } - - for severityIndex := range cg.Escalations { - switch { - case cg.Escalations[severityIndex] == nil: - continue - case cg.Escalations[severityIndex].After > 0 && cg.Escalations[severityIndex].ContactGroupCID == "", - cg.Escalations[severityIndex].After == 0 && cg.Escalations[severityIndex].ContactGroupCID != "": - return fmt.Errorf("severity %d escalation requires both and %s and %s be set", severityIndex+1, contactEscalateToAttr, contactEscalateAfterAttr) - } - } - - return nil -} - -func validateContactGroupCID(attrName schemaAttr) func(v interface{}, key string) (warnings []string, errors []error) { - return func(v interface{}, key string) (warnings []string, errors []error) { - validContactGroupCID := regexp.MustCompile(config.ContactGroupCIDRegex) - - if !validContactGroupCID.MatchString(v.(string)) { - errors = append(errors, fmt.Errorf("Invalid %s specified (%q)", attrName, v.(string))) - } - - return warnings, errors - } -} - -func validateDurationMin(attrName schemaAttr, minDuration string) func(v interface{}, key string) (warnings []string, errors []error) { - var min time.Duration - { - var err error - min, err = time.ParseDuration(minDuration) - if err != nil { - return func(interface{}, string) (warnings []string, errors []error) { - errors = []error{errwrap.Wrapf(fmt.Sprintf("Invalid time +%q: {{err}}", minDuration), err)} - return warnings, errors - } - } - } - - return func(v interface{}, key string) (warnings []string, errors []error) { - d, err := time.ParseDuration(v.(string)) - switch { - case err != nil: - errors = append(errors, errwrap.Wrapf(fmt.Sprintf("Invalid %s specified (%q): {{err}}", attrName, v.(string)), err)) - case d < min: - errors = append(errors, fmt.Errorf("Invalid %s specified (%q): minimum value must be %s", attrName, v.(string), min)) - } - - return warnings, errors - } -} - -func validateDurationMax(attrName schemaAttr, maxDuration string) func(v interface{}, key string) (warnings []string, errors []error) { - var max time.Duration - { - var err error - max, err = time.ParseDuration(maxDuration) - if err != nil { - return func(interface{}, string) (warnings []string, errors []error) { - errors = []error{errwrap.Wrapf(fmt.Sprintf("Invalid time +%q: {{err}}", maxDuration), err)} - return warnings, errors - } - } - } - - return func(v interface{}, key string) (warnings []string, errors []error) { - d, err := time.ParseDuration(v.(string)) - switch { - case err != nil: - errors = append(errors, errwrap.Wrapf(fmt.Sprintf("Invalid %s specified (%q): {{err}}", attrName, v.(string)), err)) - case d > max: - errors = append(errors, fmt.Errorf("Invalid %s specified (%q): maximum value must be less than or equal to %s", attrName, v.(string), max)) - } - - return warnings, errors - } -} - -func validateFloatMin(attrName schemaAttr, min float64) func(v interface{}, key string) (warnings []string, errors []error) { - return func(v interface{}, key string) (warnings []string, errors []error) { - if v.(float64) < min { - errors = append(errors, fmt.Errorf("Invalid %s specified (%f): minimum value must be %f", attrName, v.(float64), min)) - } - - return warnings, errors - } -} - -func validateFloatMax(attrName schemaAttr, max float64) func(v interface{}, key string) (warnings []string, errors []error) { - return func(v interface{}, key string) (warnings []string, errors []error) { - if v.(float64) > max { - errors = append(errors, fmt.Errorf("Invalid %s specified (%f): maximum value must be %f", attrName, v.(float64), max)) - } - - return warnings, errors - } -} - -// validateFuncs takes a list of functions and runs them in serial until either -// a warning or error is returned from the first validation function argument. -func validateFuncs(fns ...func(v interface{}, key string) (warnings []string, errors []error)) func(v interface{}, key string) (warnings []string, errors []error) { - return func(v interface{}, key string) (warnings []string, errors []error) { - for _, fn := range fns { - warnings, errors = fn(v, key) - if len(warnings) > 0 || len(errors) > 0 { - break - } - } - return warnings, errors - } -} - -func validateHTTPHeaders(v interface{}, key string) (warnings []string, errors []error) { - validHTTPHeader := regexp.MustCompile(`.+`) - validHTTPValue := regexp.MustCompile(`.+`) - - headers := v.(map[string]interface{}) - for k, vRaw := range headers { - if !validHTTPHeader.MatchString(k) { - errors = append(errors, fmt.Errorf("Invalid HTTP Header specified: %q", k)) - continue - } - - v := vRaw.(string) - if !validHTTPValue.MatchString(v) { - errors = append(errors, fmt.Errorf("Invalid value for HTTP Header %q specified: %q", k, v)) - } - } - - return warnings, errors -} - -func validateGraphAxisOptions(v interface{}, key string) (warnings []string, errors []error) { - axisOptionsMap := v.(map[string]interface{}) - validOpts := map[schemaAttr]struct{}{ - graphAxisLogarithmicAttr: struct{}{}, - graphAxisMaxAttr: struct{}{}, - graphAxisMinAttr: struct{}{}, - } - - for k := range axisOptionsMap { - if _, ok := validOpts[schemaAttr(k)]; !ok { - errors = append(errors, fmt.Errorf("Invalid axis option specified: %q", k)) - continue - } - } - - return warnings, errors -} - -func validateIntMin(attrName schemaAttr, min int) func(v interface{}, key string) (warnings []string, errors []error) { - return func(v interface{}, key string) (warnings []string, errors []error) { - if v.(int) < min { - errors = append(errors, fmt.Errorf("Invalid %s specified (%d): minimum value must be %d", attrName, v.(int), min)) - } - - return warnings, errors - } -} - -func validateIntMax(attrName schemaAttr, max int) func(v interface{}, key string) (warnings []string, errors []error) { - return func(v interface{}, key string) (warnings []string, errors []error) { - if v.(int) > max { - errors = append(errors, fmt.Errorf("Invalid %s specified (%d): maximum value must be %d", attrName, v.(int), max)) - } - - return warnings, errors - } -} - -func validateMetricType(v interface{}, key string) (warnings []string, errors []error) { - value := v.(string) - switch value { - case "caql", "composite", "histogram", "numeric", "text": - default: - errors = append(errors, fmt.Errorf("unsupported metric type %s", value)) - } - - return warnings, errors -} - -func validateRegexp(attrName schemaAttr, reString string) func(v interface{}, key string) (warnings []string, errors []error) { - re := regexp.MustCompile(reString) - - return func(v interface{}, key string) (warnings []string, errors []error) { - if !re.MatchString(v.(string)) { - errors = append(errors, fmt.Errorf("Invalid %s specified (%q): regexp failed to match string", attrName, v.(string))) - } - - return warnings, errors - } -} - -func validateTag(v interface{}, key string) (warnings []string, errors []error) { - tag := v.(string) - if !strings.ContainsRune(tag, ':') { - errors = append(errors, fmt.Errorf("tag %q is missing a category", tag)) - } - - return warnings, errors -} - -func validateUserCID(attrName string) func(v interface{}, key string) (warnings []string, errors []error) { - return func(v interface{}, key string) (warnings []string, errors []error) { - valid := regexp.MustCompile(config.UserCIDRegex) - - if !valid.MatchString(v.(string)) { - errors = append(errors, fmt.Errorf("Invalid %s specified (%q)", attrName, v.(string))) - } - - return warnings, errors - } -} - -type urlParseFlags int - -const ( - urlIsAbs urlParseFlags = 1 << iota - urlOptional - urlWithoutPath - urlWithoutPort - urlWithoutSchema -) - -const urlBasicCheck urlParseFlags = 0 - -func validateHTTPURL(attrName schemaAttr, checkFlags urlParseFlags) func(v interface{}, key string) (warnings []string, errors []error) { - return func(v interface{}, key string) (warnings []string, errors []error) { - s := v.(string) - if checkFlags&urlOptional != 0 && s == "" { - return warnings, errors - } - - u, err := url.Parse(v.(string)) - switch { - case err != nil: - errors = append(errors, errwrap.Wrapf(fmt.Sprintf("Invalid %s specified (%q): {{err}}", attrName, v.(string)), err)) - case u.Host == "": - errors = append(errors, fmt.Errorf("Invalid %s specified: host can not be empty", attrName)) - case !(u.Scheme == "http" || u.Scheme == "https"): - errors = append(errors, fmt.Errorf("Invalid %s specified: scheme unsupported (only support http and https)", attrName)) - } - - if checkFlags&urlIsAbs != 0 && !u.IsAbs() { - errors = append(errors, fmt.Errorf("Schema is missing from URL %q (HINT: https://%s)", v.(string), v.(string))) - } - - if checkFlags&urlWithoutSchema != 0 && u.IsAbs() { - errors = append(errors, fmt.Errorf("Schema is present on URL %q (HINT: drop the https://%s)", v.(string), v.(string))) - } - - if checkFlags&urlWithoutPath != 0 && u.Path != "" { - errors = append(errors, fmt.Errorf("Path is present on URL %q (HINT: drop the %s)", v.(string), u.Path)) - } - - if checkFlags&urlWithoutPort != 0 { - hostParts := strings.SplitN(u.Host, ":", 2) - if len(hostParts) != 1 { - errors = append(errors, fmt.Errorf("Port is present on URL %q (HINT: drop the :%s)", v.(string), hostParts[1])) - } - } - - return warnings, errors - } -} - -func validateStringIn(attrName schemaAttr, valid validStringValues) func(v interface{}, key string) (warnings []string, errors []error) { - return func(v interface{}, key string) (warnings []string, errors []error) { - s := v.(string) - var found bool - for i := range valid { - if s == string(valid[i]) { - found = true - break - } - } - - if !found { - errors = append(errors, fmt.Errorf("Invalid %q specified: %q not found in list %#v", string(attrName), s, valid)) - } - - return warnings, errors - } -} diff --git a/builtin/providers/clc/provider.go b/builtin/providers/clc/provider.go deleted file mode 100644 index 7febf2a62..000000000 --- a/builtin/providers/clc/provider.go +++ /dev/null @@ -1,228 +0,0 @@ -package clc - -import ( - "fmt" - "log" - "strconv" - - clc "github.com/CenturyLinkCloud/clc-sdk" - "github.com/CenturyLinkCloud/clc-sdk/api" - "github.com/CenturyLinkCloud/clc-sdk/group" - "github.com/CenturyLinkCloud/clc-sdk/server" - "github.com/CenturyLinkCloud/clc-sdk/status" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider implements ResourceProvider for CLC -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "username": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("CLC_USERNAME", nil), - Description: "Your CLC username", - }, - "password": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("CLC_PASSWORD", nil), - Description: "Your CLC password", - }, - "account": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("CLC_ACCOUNT", ""), - Description: "Account alias override", - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "clc_server": resourceCLCServer(), - "clc_group": resourceCLCGroup(), - "clc_public_ip": resourceCLCPublicIP(), - "clc_load_balancer": resourceCLCLoadBalancer(), - "clc_load_balancer_pool": resourceCLCLoadBalancerPool(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - un := d.Get("username").(string) - pw := d.Get("password").(string) - - config, err := api.NewConfig(un, pw) - if err != nil { - return nil, fmt.Errorf("Failed to create CLC config with provided details: %v", err) - } - config.UserAgent = fmt.Sprintf("terraform-clc terraform/%s", terraform.Version) - // user requested alias override or sub-account - if al := d.Get("account").(string); al != "" { - config.Alias = al - } - - client := clc.New(config) - if err := client.Authenticate(); err != nil { - return nil, fmt.Errorf("Failed authenticated with provided credentials: %v", err) - } - - alerts, err := client.Alert.GetAll() - if err != nil { - return nil, fmt.Errorf("Failed to connect to the CLC api because %s", err) - } - for _, a := range alerts.Items { - log.Printf("[WARN] Received alert: %v", a) - } - return client, nil -} - -// package utility functions - -func waitStatus(client *clc.Client, id string) error { - // block until queue is processed and server is up - poll := make(chan *status.Response, 1) - err := client.Status.Poll(id, poll) - if err != nil { - return nil - } - status := <-poll - log.Printf("[DEBUG] status %v", status) - if status.Failed() { - return fmt.Errorf("unsuccessful job %v failed with status: %v", id, status.Status) - } - return nil -} - -func dcGroups(dcname string, client *clc.Client) (map[string]string, error) { - dc, _ := client.DC.Get(dcname) - _, id := dc.Links.GetID("group") - m := map[string]string{} - resp, _ := client.Group.Get(id) - m[resp.Name] = resp.ID // top - m[resp.ID] = resp.ID - for _, x := range resp.Groups { - deepGroups(x, &m) - } - return m, nil -} - -func deepGroups(g group.Groups, m *map[string]string) { - (*m)[g.Name] = g.ID - (*m)[g.ID] = g.ID - for _, sg := range g.Groups { - deepGroups(sg, m) - } -} - -// resolveGroupByNameOrId takes a reference to a group (either name or guid) -// and returns the guid of the group -func resolveGroupByNameOrId(ref, dc string, client *clc.Client) (string, error) { - m, err := dcGroups(dc, client) - if err != nil { - return "", fmt.Errorf("Failed pulling groups in location %v - %v", dc, err) - } - if id, ok := m[ref]; ok { - return id, nil - } - return "", fmt.Errorf("Failed resolving group '%v' in location %v", ref, dc) -} - -func stateFromString(st string) server.PowerState { - switch st { - case "on", "started": - return server.On - case "off", "stopped": - return server.Off - case "pause", "paused": - return server.Pause - case "reboot": - return server.Reboot - case "reset": - return server.Reset - case "shutdown": - return server.ShutDown - case "start_maintenance": - return server.StartMaintenance - case "stop_maintenance": - return server.StopMaintenance - } - return -1 -} - -func parseCustomFields(d *schema.ResourceData) ([]api.Customfields, error) { - var fields []api.Customfields - if v := d.Get("custom_fields"); v != nil { - for _, v := range v.([]interface{}) { - m := v.(map[string]interface{}) - f := api.Customfields{ - ID: m["id"].(string), - Value: m["value"].(string), - } - fields = append(fields, f) - } - } - return fields, nil -} - -func parseAdditionalDisks(d *schema.ResourceData) ([]server.Disk, error) { - // some complexity here: create has a different format than update - // on-create: { path, sizeGB, type } - // on-update: { diskId, sizeGB, (path), (type=partitioned) } - var disks []server.Disk - if v := d.Get("additional_disks"); v != nil { - for _, v := range v.([]interface{}) { - m := v.(map[string]interface{}) - ty := m["type"].(string) - var pa string - if nil != m["path"] { - pa = m["path"].(string) - } - sz, err := strconv.Atoi(m["size_gb"].(string)) - if err != nil { - log.Printf("[WARN] Failed parsing size '%v'. skipping", m["size_gb"]) - return nil, fmt.Errorf("Unable to parse %v as int", m["size_gb"]) - } - if ty != "raw" && ty != "partitioned" { - return nil, fmt.Errorf("Expected type of { raw | partitioned }. received %v", ty) - } - if ty == "raw" && pa != "" { - return nil, fmt.Errorf("Path can not be specified for raw disks") - } - disk := server.Disk{ - SizeGB: sz, - Type: ty, - } - if pa != "" { - disk.Path = pa - } - disks = append(disks, disk) - } - } - return disks, nil -} - -func parsePackages(d *schema.ResourceData) ([]server.Package, error) { - var pkgs []server.Package - if e := d.Get("packages"); e != nil { - for _, e := range e.([]interface{}) { - m := e.(map[string]interface{}) - id := m["id"].(string) - delete(m, "id") - ms := make(map[string]string) - for k, v := range m { - if s, ok := v.(string); ok { - ms[k] = s - } - } - p := server.Package{ - ID: id, - Params: ms, - } - pkgs = append(pkgs, p) - } - } - return pkgs, nil -} diff --git a/builtin/providers/clc/provider_test.go b/builtin/providers/clc/provider_test.go deleted file mode 100644 index 35c0b6c8a..000000000 --- a/builtin/providers/clc/provider_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package clc - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -const testAccDC = "IL1" - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "clc": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("CLC_USERNAME"); v == "" { - t.Fatal("CLC_USERNAME must be set for acceptance tests") - } - if v := os.Getenv("CLC_PASSWORD"); v == "" { - t.Fatal("CLC_PASSWORD must be set for acceptance tests") - } -} diff --git a/builtin/providers/clc/resource_clc_group.go b/builtin/providers/clc/resource_clc_group.go deleted file mode 100644 index dff3bc7e9..000000000 --- a/builtin/providers/clc/resource_clc_group.go +++ /dev/null @@ -1,163 +0,0 @@ -package clc - -import ( - "fmt" - "log" - "time" - - "github.com/CenturyLinkCloud/clc-sdk" - "github.com/CenturyLinkCloud/clc-sdk/api" - "github.com/CenturyLinkCloud/clc-sdk/group" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceCLCGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceCLCGroupCreate, - Read: resourceCLCGroupRead, - Update: resourceCLCGroupUpdate, - Delete: resourceCLCGroupDelete, - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "", - }, - "parent": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "location_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "parent_group_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "custom_fields": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeMap}, - }, - }, - } -} - -func resourceCLCGroupCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clc.Client) - name := d.Get("name").(string) - desc := d.Get("description").(string) - parent := d.Get("parent").(string) - dc := d.Get("location_id").(string) - - // clc doesn't enforce uniqueness by name - // so skip the trad'l error we'd raise - e, err := resolveGroupByNameOrId(name, dc, client) - if e != "" { - log.Printf("[INFO] Resolved existing group: %v => %v", name, e) - d.SetId(e) - return nil - } - - var pgid string - p, err := resolveGroupByNameOrId(parent, dc, client) - if p != "" { - log.Printf("[INFO] Resolved parent group: %v => %v", parent, p) - pgid = p - } else { - return fmt.Errorf("Failed resolving parent group %s - %s err:%s", parent, p, err) - } - - d.Set("parent_group_id", pgid) - spec := group.Group{ - Name: name, - Description: desc, - ParentGroupID: pgid, - } - resp, err := client.Group.Create(spec) - if err != nil { - return fmt.Errorf("Failed creating group: %s", err) - } - log.Println("[INFO] Group created") - d.SetId(resp.ID) - return nil -} - -func resourceCLCGroupRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clc.Client) - id := d.Id() - g, err := client.Group.Get(id) - if err != nil { - log.Printf("[INFO] Failed finding group: %s - %s. Marking destroyed", id, err) - d.SetId("") - return nil - } - d.Set("name", g.Name) - d.Set("description", g.Description) - d.Set("parent_group_id", g.ParentGroupID()) - return nil -} - -func resourceCLCGroupUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clc.Client) - id := d.Id() - var err error - var patches []api.Update - - g, err := client.Group.Get(id) - if err != nil { - return fmt.Errorf("Failed fetching group: %v - %v", id, err) - } - - if delta, orig := d.Get("name").(string), g.Name; delta != orig { - patches = append(patches, group.UpdateName(delta)) - } - if delta, orig := d.Get("description").(string), g.Description; delta != orig { - patches = append(patches, group.UpdateDescription(delta)) - } - newParent := d.Get("parent").(string) - pgid, err := resolveGroupByNameOrId(newParent, g.Locationid, client) - log.Printf("[DEBUG] PARENT current:%v new:%v resolved:%v", g.ParentGroupID(), newParent, pgid) - if pgid == "" { - return fmt.Errorf("Unable to resolve parent group %v: %v", newParent, err) - } else if newParent != g.ParentGroupID() { - patches = append(patches, group.UpdateParentGroupID(pgid)) - } - - if len(patches) == 0 { - return nil - } - err = client.Group.Update(id, patches...) - if err != nil { - return fmt.Errorf("Failed updating group %v: %v", id, err) - } - return resource.Retry(1*time.Minute, func() *resource.RetryError { - _, err := client.Group.Get(id) - if err != nil { - return resource.RetryableError(err) - } - err = resourceCLCGroupRead(d, meta) - if err != nil { - return resource.NonRetryableError(err) - } - return nil - }) -} - -func resourceCLCGroupDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clc.Client) - id := d.Id() - log.Printf("[INFO] Deleting group %v", id) - st, err := client.Group.Delete(id) - if err != nil { - return fmt.Errorf("Failed deleting group: %v with err: %v", id, err) - } - waitStatus(client, st.ID) - return nil -} diff --git a/builtin/providers/clc/resource_clc_group_test.go b/builtin/providers/clc/resource_clc_group_test.go deleted file mode 100644 index 20120fe4d..000000000 --- a/builtin/providers/clc/resource_clc_group_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package clc - -import ( - "fmt" - "testing" - - clc "github.com/CenturyLinkCloud/clc-sdk" - "github.com/CenturyLinkCloud/clc-sdk/group" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -// things to test: -// resolves to existing group -// does not nuke a group w/ no parents (root group) -// change a name on a group - -func TestAccGroupBasic(t *testing.T) { - var resp group.Response - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckGroupConfigBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckGroupExists("clc_group.acc_test_group", &resp), - testAccCheckGroupParent(&resp, "Default Group"), - resource.TestCheckResourceAttr( - "clc_group.acc_test_group", "name", "okcomputer"), - resource.TestCheckResourceAttr( - "clc_group.acc_test_group", "location_id", testAccDC), - ), - }, - resource.TestStep{ - Config: testAccCheckGroupConfigUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckGroupExists("clc_group.acc_test_group", &resp), - testAccCheckGroupParent(&resp, "Default Group"), - resource.TestCheckResourceAttr( - "clc_group.acc_test_group", "name", "foobar"), - resource.TestCheckResourceAttr( - "clc_group.acc_test_group", "location_id", testAccDC), - ), - }, - resource.TestStep{ - Config: testAccCheckGroupConfigReparent, - Check: resource.ComposeTestCheckFunc( - testAccCheckGroupExists("clc_group.acc_test_group", &resp), - testAccCheckGroupParent(&resp, "reparent"), - resource.TestCheckResourceAttr( - "clc_group.acc_test_group", "name", "foobar"), - resource.TestCheckResourceAttr( - "clc_group.acc_test_group", "location_id", testAccDC), - ), - }, - }, - }) -} - -func testAccCheckGroupDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*clc.Client) - for _, rs := range s.RootModule().Resources { - if rs.Type != "clc_group" { - continue - } - _, err := client.Group.Get(rs.Primary.ID) - if err == nil { - return fmt.Errorf("Group still exists") - } - } - return nil -} - -func testAccCheckGroupParent(resp *group.Response, expectedName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*clc.Client) - ok, l := resp.Links.GetLink("parentGroup") - if !ok { - return fmt.Errorf("Missing parent group: %v", resp) - } - parent, err := client.Group.Get(l.ID) - if err != nil { - return fmt.Errorf("Failed fetching parent %v: %v", l.ID, err) - } - if parent.Name != expectedName { - return fmt.Errorf("Incorrect parent found:'%v' expected:'%v'", parent.Name, expectedName) - } - // would be good to test parent but we'd have to make a bunch of calls - return nil - } -} - -func testAccCheckGroupExists(n string, resp *group.Response) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No Group ID is set") - } - - client := testAccProvider.Meta().(*clc.Client) - g, err := client.Group.Get(rs.Primary.ID) - if err != nil { - return err - } - - if g.ID != rs.Primary.ID { - return fmt.Errorf("Group not found") - } - *resp = *g - return nil - } -} - -const testAccCheckGroupConfigBasic = ` -variable "dc" { default = "IL1" } - -resource "clc_group" "acc_test_group" { - location_id = "${var.dc}" - name = "okcomputer" - description = "mishaps happening" - parent = "Default Group" -}` - -const testAccCheckGroupConfigUpdate = ` -variable "dc" { default = "IL1" } - -resource "clc_group" "acc_test_group" { - location_id = "${var.dc}" - name = "foobar" - description = "update test" - parent = "Default Group" -}` - -const testAccCheckGroupConfigReparent = ` -variable "dc" { default = "IL1" } - -resource "clc_group" "acc_test_group_reparent" { - location_id = "${var.dc}" - name = "reparent" - description = "introduce a parent group in place" - parent = "Default Group" -} - -resource "clc_group" "acc_test_group" { - location_id = "${var.dc}" - name = "foobar" - description = "update test" - parent = "${clc_group.acc_test_group_reparent.id}" -} -` diff --git a/builtin/providers/clc/resource_clc_load_balancer.go b/builtin/providers/clc/resource_clc_load_balancer.go deleted file mode 100644 index 0f0017506..000000000 --- a/builtin/providers/clc/resource_clc_load_balancer.go +++ /dev/null @@ -1,130 +0,0 @@ -package clc - -import ( - "fmt" - "log" - "time" - - clc "github.com/CenturyLinkCloud/clc-sdk" - "github.com/CenturyLinkCloud/clc-sdk/lb" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceCLCLoadBalancer() *schema.Resource { - return &schema.Resource{ - Create: resourceCLCLoadBalancerCreate, - Read: resourceCLCLoadBalancerRead, - Update: resourceCLCLoadBalancerUpdate, - Delete: resourceCLCLoadBalancerDelete, - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "data_center": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - // optional - "status": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "enabled", - }, - // computed - "ip_address": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceCLCLoadBalancerCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clc.Client) - dc := d.Get("data_center").(string) - name := d.Get("name").(string) - desc := d.Get("description").(string) - status := d.Get("status").(string) - r1 := lb.LoadBalancer{ - Name: name, - Description: desc, - Status: status, - } - l, err := client.LB.Create(dc, r1) - if err != nil { - return fmt.Errorf("Failed creating load balancer under %v/%v: %v", dc, name, err) - } - d.SetId(l.ID) - return resource.Retry(1*time.Minute, func() *resource.RetryError { - _, err := client.LB.Get(dc, l.ID) - if err != nil { - return resource.RetryableError(err) - } - err = resourceCLCLoadBalancerRead(d, meta) - if err != nil { - return resource.NonRetryableError(err) - } - return nil - }) -} - -func resourceCLCLoadBalancerRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clc.Client) - dc := d.Get("data_center").(string) - id := d.Id() - resp, err := client.LB.Get(dc, id) - if err != nil { - log.Printf("[INFO] Failed finding load balancer %v/%v. Marking destroyed", dc, id) - d.SetId("") - return nil - } - d.Set("description", resp.Description) - d.Set("ip_address", resp.IPaddress) - d.Set("status", resp.Status) - d.Set("pools", resp.Pools) - d.Set("links", resp.Links) - return nil -} - -func resourceCLCLoadBalancerUpdate(d *schema.ResourceData, meta interface{}) error { - update := lb.LoadBalancer{} - client := meta.(*clc.Client) - dc := d.Get("data_center").(string) - id := d.Id() - - if d.HasChange("name") { - update.Name = d.Get("name").(string) - } - if d.HasChange("description") { - update.Description = d.Get("description").(string) - } - if d.HasChange("status") { - update.Status = d.Get("status").(string) - } - if update.Name != "" || update.Description != "" || update.Status != "" { - update.Name = d.Get("name").(string) // required on every PUT - err := client.LB.Update(dc, id, update) - if err != nil { - return fmt.Errorf("Failed updating load balancer under %v/%v: %v", dc, id, err) - } - } - return resourceCLCLoadBalancerRead(d, meta) -} - -func resourceCLCLoadBalancerDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clc.Client) - dc := d.Get("data_center").(string) - id := d.Id() - err := client.LB.Delete(dc, id) - if err != nil { - return fmt.Errorf("Failed deleting loadbalancer %v: %v", id, err) - } - return nil -} diff --git a/builtin/providers/clc/resource_clc_load_balancer_pool.go b/builtin/providers/clc/resource_clc_load_balancer_pool.go deleted file mode 100644 index 4e5936cde..000000000 --- a/builtin/providers/clc/resource_clc_load_balancer_pool.go +++ /dev/null @@ -1,179 +0,0 @@ -package clc - -import ( - "fmt" - "log" - "strconv" - - clc "github.com/CenturyLinkCloud/clc-sdk" - "github.com/CenturyLinkCloud/clc-sdk/lb" - - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceCLCLoadBalancerPool() *schema.Resource { - return &schema.Resource{ - Create: resourceCLCLoadBalancerPoolCreate, - Read: resourceCLCLoadBalancerPoolRead, - Update: resourceCLCLoadBalancerPoolUpdate, - Delete: resourceCLCLoadBalancerPoolDelete, - Schema: map[string]*schema.Schema{ - // pool args - "port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - "data_center": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "load_balancer": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "method": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "roundRobin", - }, - "persistence": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "standard", - }, - "nodes": &schema.Schema{ - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{Type: schema.TypeMap}, - }, - }, - } -} - -func resourceCLCLoadBalancerPoolCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clc.Client) - dc := d.Get("data_center").(string) - lbid := d.Get("load_balancer").(string) - - s1 := d.Get("method").(string) - m := lb.LeastConn - if s1 == string(lb.RoundRobin) { - m = lb.RoundRobin - } - s2 := d.Get("persistence").(string) - p := lb.Standard - if s2 == string(lb.Sticky) { - p = lb.Sticky - } - r2 := lb.Pool{ - Port: d.Get("port").(int), - Method: m, - Persistence: p, - } - lbp, err := client.LB.CreatePool(dc, lbid, r2) - if err != nil { - return fmt.Errorf("Failed creating pool under %v/%v: %v", dc, lbid, err) - } - d.SetId(lbp.ID) - return resourceCLCLoadBalancerPoolUpdate(d, meta) -} - -func resourceCLCLoadBalancerPoolRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clc.Client) - dc := d.Get("data_center").(string) - lbid := d.Get("load_balancer").(string) - id := d.Id() - pool, err := client.LB.GetPool(dc, lbid, id) - if err != nil { - log.Printf("[INFO] Failed fetching pool %v/%v. Marking destroyed", lbid, d.Id()) - d.SetId("") - return nil - } - nodes, err := client.LB.GetAllNodes(dc, lbid, id) - nodes2 := make([]lb.Node, len(nodes)) - for i, n := range nodes { - nodes2[i] = *n - } - pool.Nodes = nodes2 - d.Set("port", pool.Port) - d.Set("method", pool.Method) - d.Set("persistence", pool.Persistence) - d.Set("nodes", pool.Nodes) - d.Set("links", pool.Links) - return nil -} - -func resourceCLCLoadBalancerPoolUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clc.Client) - dc := d.Get("data_center").(string) - lbid := d.Get("load_balancer").(string) - id := d.Id() - pool, err := client.LB.GetPool(dc, lbid, d.Id()) - pool.Port = 0 // triggers empty value => omission from POST - - if d.HasChange("method") { - d.SetPartial("method") - pool.Method = lb.Method(d.Get("method").(string)) - } - if d.HasChange("persistence") { - d.SetPartial("persistence") - pool.Persistence = lb.Persistence(d.Get("persistence").(string)) - } - err = client.LB.UpdatePool(dc, lbid, id, *pool) - if err != nil { - return fmt.Errorf("Failed updating pool %v: %v", id, err) - } - - if d.HasChange("nodes") { - d.SetPartial("nodes") - nodes, err := parseNodes(d) - if err != nil { - return err - } - err = client.LB.UpdateNodes(dc, lbid, id, nodes...) - if err != nil { - return fmt.Errorf("Failed updating pool nodes %v: %v", id, err) - } - } - return resourceCLCLoadBalancerPoolRead(d, meta) -} - -func resourceCLCLoadBalancerPoolDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clc.Client) - dc := d.Get("data_center").(string) - lbid := d.Get("load_balancer").(string) - id := d.Id() - err := client.LB.DeletePool(dc, lbid, id) - if err != nil { - return fmt.Errorf("Failed deleting pool %v: %v", id, err) - } - return nil -} - -func parseNodes(d *schema.ResourceData) ([]lb.Node, error) { - var nodes []lb.Node - raw := d.Get("nodes") - if raw == nil { - log.Println("WARNING: pool missing nodes") - return nil, nil - } - if arr, ok := raw.([]interface{}); ok { - for _, v := range arr { - m := v.(map[string]interface{}) - p, err := strconv.Atoi(m["privatePort"].(string)) - if err != nil { - log.Printf("[WARN] Failed parsing port '%v'. skipping", m["privatePort"]) - continue - } - n := lb.Node{ - Status: m["status"].(string), - IPaddress: m["ipAddress"].(string), - PrivatePort: p, - } - nodes = append(nodes, n) - } - } else { - return nil, fmt.Errorf("Failed parsing nodes from pool spec: %v", raw) - } - return nodes, nil -} diff --git a/builtin/providers/clc/resource_clc_load_balancer_pool_test.go b/builtin/providers/clc/resource_clc_load_balancer_pool_test.go deleted file mode 100644 index e231e1f93..000000000 --- a/builtin/providers/clc/resource_clc_load_balancer_pool_test.go +++ /dev/null @@ -1,173 +0,0 @@ -package clc - -import ( - "fmt" - "testing" - - clc "github.com/CenturyLinkCloud/clc-sdk" - lb "github.com/CenturyLinkCloud/clc-sdk/lb" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -// things to test: -// basic create/delete -// update nodes -// works for 80 and 443 together - -func TestAccLBPoolBasic(t *testing.T) { - var pool lb.Pool - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBPDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckLBPConfigBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckLBPExists("clc_load_balancer_pool.acc_test_pool", &pool), - resource.TestCheckResourceAttr("clc_load_balancer_pool.acc_test_pool", "port", "80"), - ), - }, - resource.TestStep{ - Config: testAccCheckLBPConfigUpdates, - Check: resource.ComposeTestCheckFunc( - testAccCheckLBPExists("clc_load_balancer_pool.acc_test_pool", &pool), - resource.TestCheckResourceAttr("clc_load_balancer.acc_test_lbp", "description", "description modified"), - resource.TestCheckResourceAttr("clc_load_balancer.acc_test_lbp", "status", "disabled"), - resource.TestCheckResourceAttr("clc_load_balancer_pool.acc_test_pool", "nodes.0.privatePort", "8080"), - ), - }, - }, - }) -} - -func testAccCheckLBPDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*clc.Client) - for _, rs := range s.RootModule().Resources { - if rs.Type != "clc_load_balancer_pool" { - continue - } - lbid := rs.Primary.Attributes["load_balancer"] - if _, err := client.LB.Get(testAccDC, rs.Primary.ID); err != nil { - return nil // parent LB already gone - } - if _, err := client.LB.GetPool(testAccDC, lbid, rs.Primary.ID); err == nil { - return fmt.Errorf("LB still exists") - } - } - return nil -} - -func testAccCheckLBPExists(n string, resp *lb.Pool) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - lbid := rs.Primary.Attributes["load_balancer"] - client := testAccProvider.Meta().(*clc.Client) - p, err := client.LB.GetPool(testAccDC, lbid, rs.Primary.ID) - if err != nil { - return err - } - if p.ID != rs.Primary.ID { - return fmt.Errorf("Pool not found") - } - *resp = *p - return nil - } -} - -const testAccCheckLBPConfigBasic = ` -variable "dc" { default = "IL1" } - -resource "clc_group" "acc_test_lbp_group" { - location_id = "${var.dc}" - name = "acc_test_lbp_group" - parent = "Default Group" -} - -# need a server here because we need to reference an ip owned by this account -resource "clc_server" "acc_test_lbp_server" { - name_template = "node" - description = "load balanced" - source_server_id = "UBUNTU-14-64-TEMPLATE" - type = "standard" - group_id = "${clc_group.acc_test_lbp_group.id}" - cpu = 1 - memory_mb = 1024 - password = "Green123$" - power_state = "started" - -} - -resource "clc_load_balancer" "acc_test_lbp" { - data_center = "${var.dc}" - name = "acc_test_lb" - description = "load balancer test" - status = "enabled" - depends_on = ["clc_server.acc_test_lbp_server"] -} - -resource "clc_load_balancer_pool" "acc_test_pool" { - port = 80 - data_center = "${var.dc}" - load_balancer = "${clc_load_balancer.acc_test_lbp.id}" - nodes - { - status = "enabled" - ipAddress = "${clc_server.acc_test_lbp_server.private_ip_address}" - privatePort = 80 - } - depends_on = ["clc_server.acc_test_lbp_server"] -} -` - -const testAccCheckLBPConfigUpdates = ` -variable "dc" { default = "IL1" } - -resource "clc_group" "acc_test_lbp_group" { - location_id = "${var.dc}" - name = "acc_test_lbp_group" - parent = "Default Group" -} - -# need a server here because we need to reference an ip owned by this account -resource "clc_server" "acc_test_lbp_server" { - name_template = "node" - description = "load balanced" - source_server_id = "UBUNTU-14-64-TEMPLATE" - type = "standard" - group_id = "${clc_group.acc_test_lbp_group.id}" - cpu = 1 - memory_mb = 1024 - password = "Green123$" - power_state = "started" - -} - -resource "clc_load_balancer" "acc_test_lbp" { - data_center = "${var.dc}" - name = "acc_test_lb" - description = "description modified" - status = "disabled" - depends_on = ["clc_server.acc_test_lbp_server"] -} - -resource "clc_load_balancer_pool" "acc_test_pool" { - port = 80 - data_center = "${var.dc}" - load_balancer = "${clc_load_balancer.acc_test_lbp.id}" - nodes - { - status = "enabled" - ipAddress = "${clc_server.acc_test_lbp_server.private_ip_address}" - privatePort = 8080 - } - depends_on = ["clc_server.acc_test_lbp_server"] -} -` diff --git a/builtin/providers/clc/resource_clc_load_balancer_test.go b/builtin/providers/clc/resource_clc_load_balancer_test.go deleted file mode 100644 index 9bfc9d335..000000000 --- a/builtin/providers/clc/resource_clc_load_balancer_test.go +++ /dev/null @@ -1,3 +0,0 @@ -package clc - -// clc_load_balancer covered by clc_load_balancer_pool tests: resource_clc_load_balancer_pool_test.go diff --git a/builtin/providers/clc/resource_clc_public_ip.go b/builtin/providers/clc/resource_clc_public_ip.go deleted file mode 100644 index fa54bc650..000000000 --- a/builtin/providers/clc/resource_clc_public_ip.go +++ /dev/null @@ -1,193 +0,0 @@ -package clc - -import ( - "fmt" - "log" - "strconv" - - clc "github.com/CenturyLinkCloud/clc-sdk" - "github.com/CenturyLinkCloud/clc-sdk/server" - - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceCLCPublicIP() *schema.Resource { - return &schema.Resource{ - Create: resourceCLCPublicIPCreate, - Read: resourceCLCPublicIPRead, - Update: resourceCLCPublicIPUpdate, - Delete: resourceCLCPublicIPDelete, - Schema: map[string]*schema.Schema{ - "server_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "internal_ip_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - Default: nil, - }, - "ports": &schema.Schema{ - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{Type: schema.TypeMap}, - }, - "source_restrictions": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeMap}, - }, - }, - } -} - -func resourceCLCPublicIPCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clc.Client) - sid := d.Get("server_id").(string) - priv := d.Get("internal_ip_address").(string) - ports, sources := parseIPSpec(d) - req := server.PublicIP{ - Ports: *ports, - SourceRestrictions: *sources, - } - - // since the API doesn't tell us the public IP it allocated, - // track what was added after the call. - ips := make(map[string]string) - prev, err := client.Server.Get(sid) - if err != nil { - return fmt.Errorf("Failed finding server %v: %v", sid, err) - } - for _, i := range prev.Details.IPaddresses { - ips[i.Internal] = i.Public - } - - if priv != "" { - // use existing private ip - if _, present := ips[priv]; !present { - return fmt.Errorf("Failed finding internal ip to use %v", priv) - } - req.InternalIP = priv - } - // execute the request - resp, err := client.Server.AddPublicIP(sid, req) - if err != nil { - return fmt.Errorf("Failed reserving public ip: %v", err) - } - err = waitStatus(client, resp.ID) - if err != nil { - return err - } - - server, err := client.Server.Get(sid) - if err != nil { - return fmt.Errorf("Failed refreshing server for public ip: %v", err) - } - for _, i := range server.Details.IPaddresses { - if priv != "" && i.Internal == priv { - // bind - log.Printf("[DEBUG] Public IP bound on existing internal:%v - %v", i.Internal, i.Public) - d.SetId(i.Public) - break - } else if ips[i.Internal] == "" && i.Public != "" { - // allocate - log.Printf("[DEBUG] Public IP allocated on new internal:%v - %v", i.Internal, i.Public) - d.SetId(i.Public) - break - } - } - return resourceCLCPublicIPRead(d, meta) -} - -func resourceCLCPublicIPRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clc.Client) - pip := d.Id() - s := d.Get("server_id").(string) - resp, err := client.Server.GetPublicIP(s, pip) - if err != nil { - log.Printf("[INFO] Failed finding public ip: %v. Marking destroyed", d.Id()) - d.SetId("") - return nil - } - - d.Set("internal_ip_address", resp.InternalIP) - d.Set("ports", resp.Ports) - d.Set("source_restrictions", resp.SourceRestrictions) - return nil -} - -func resourceCLCPublicIPUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clc.Client) - ip := d.Id() - sid := d.Get("server_id").(string) - if d.HasChange("ports") || d.HasChange("source_restrictions") { - ports, sources := parseIPSpec(d) - req := server.PublicIP{ - Ports: *ports, - SourceRestrictions: *sources, - } - resp, err := client.Server.UpdatePublicIP(sid, ip, req) - if err != nil { - return fmt.Errorf("Failed updating public ip: %v", err) - } - err = waitStatus(client, resp.ID) - if err != nil { - return err - } - log.Printf("[INFO] Successfully updated %v with %v", ip, req) - } - return nil -} - -func resourceCLCPublicIPDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clc.Client) - s := d.Get("server_id").(string) - ip := d.Id() - log.Printf("[INFO] Deleting public ip %v", ip) - resp, err := client.Server.DeletePublicIP(s, ip) - if err != nil { - return fmt.Errorf("Failed deleting public ip: %v", err) - } - err = waitStatus(client, resp.ID) - if err != nil { - return err - } - log.Printf("[INFO] Public IP sucessfully deleted: %v", ip) - return nil -} - -func parseIPSpec(d *schema.ResourceData) (*[]server.Port, *[]server.SourceRestriction) { - var ports []server.Port - var sources []server.SourceRestriction - if v := d.Get("ports"); v != nil { - for _, v := range v.([]interface{}) { - m := v.(map[string]interface{}) - p := server.Port{} - port, err := strconv.Atoi(m["port"].(string)) - if err != nil { - log.Printf("[WARN] Failed parsing port '%v'. skipping", m["port"]) - continue - } - p.Protocol = m["protocol"].(string) - p.Port = port - through := -1 - if to := m["port_to"]; to != nil { - through, _ = strconv.Atoi(to.(string)) - log.Printf("[DEBUG] port range: %v-%v", port, through) - p.PortTo = through - } - ports = append(ports, p) - } - } - if v := d.Get("source_restrictions"); v != nil { - for _, v := range v.([]interface{}) { - m := v.(map[string]interface{}) - r := server.SourceRestriction{} - r.CIDR = m["cidr"].(string) - sources = append(sources, r) - } - } - return &ports, &sources -} diff --git a/builtin/providers/clc/resource_clc_public_ip_test.go b/builtin/providers/clc/resource_clc_public_ip_test.go deleted file mode 100644 index 3599c3007..000000000 --- a/builtin/providers/clc/resource_clc_public_ip_test.go +++ /dev/null @@ -1,160 +0,0 @@ -package clc - -import ( - "fmt" - "testing" - - clc "github.com/CenturyLinkCloud/clc-sdk" - "github.com/CenturyLinkCloud/clc-sdk/server" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -// things to test: -// maps to internal specified ip -// port range -// update existing rule -// CIDR restriction - -func TestAccPublicIPBasic(t *testing.T) { - var resp server.PublicIP - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPublicIPDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckPublicIPConfigBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckPublicIPExists("clc_public_ip.acc_test_public_ip", &resp), - testAccCheckPublicIPNIC("clc_public_ip.acc_test_public_ip", &resp), - testAccCheckPublicIPPortRange("clc_public_ip.acc_test_public_ip", &resp), - testAccCheckPublicIPBlockCIDR("clc_public_ip.acc_test_public_ip", &resp), - //testAccCheckPublicIPUpdated("clc_public_ip.eip", &resp), - ), - }, - }, - }) -} - -func testAccCheckPublicIPDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*clc.Client) - for _, rs := range s.RootModule().Resources { - if rs.Type != "clc_public_ip" { - continue - } - sid := rs.Primary.Attributes["server_id"] - _, err := client.Server.GetPublicIP(sid, rs.Primary.ID) - if err == nil { - return fmt.Errorf("IP still exists") - } - } - return nil - -} - -func testAccCheckPublicIPExists(n string, resp *server.PublicIP) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No PublicIP ID is set") - } - client := testAccProvider.Meta().(*clc.Client) - sid := rs.Primary.Attributes["server_id"] - p, err := client.Server.GetPublicIP(sid, rs.Primary.ID) - if err != nil { - return err - } - *resp = *p - return nil - } -} - -func testAccCheckPublicIPPortRange(n string, resp *server.PublicIP) resource.TestCheckFunc { - return func(s *terraform.State) error { - // check the passed port range made it through - var spec server.Port - for _, p := range resp.Ports { - if p.Protocol == "UDP" { - spec = p - break - } - } - if spec.Port != 53 || spec.PortTo != 55 { - return fmt.Errorf("Expected udp ports from 53-55 but found: %v", spec) - } - return nil - } -} -func testAccCheckPublicIPBlockCIDR(n string, resp *server.PublicIP) resource.TestCheckFunc { - return func(s *terraform.State) error { - // check the passed port range made it through - spec := resp.SourceRestrictions[0] - if spec.CIDR != "108.19.67.15/32" { - return fmt.Errorf("Expected cidr restriction but found: %v", spec) - } - return nil - } -} - -func testAccCheckPublicIPNIC(n string, resp *server.PublicIP) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - sid := rs.Primary.Attributes["server_id"] - nic := rs.Primary.Attributes["internal_ip_address"] - - client := testAccProvider.Meta().(*clc.Client) - srv, err := client.Server.Get(sid) - if err != nil { - return fmt.Errorf("Failed fetching server? %v", err) - } - first := srv.Details.IPaddresses[0].Internal - if nic != first { - return fmt.Errorf("Expected public ip to be mapped to %s but found: %s", first, nic) - } - return nil - } -} - -var testAccCheckPublicIPConfigBasic = ` -variable "dc" { default = "IL1" } - -resource "clc_group" "acc_test_group_ip" { - location_id = "${var.dc}" - name = "acc_test_group_ip" - parent = "Default Group" -} - -resource "clc_server" "acc_test_server" { - name_template = "test" - source_server_id = "UBUNTU-14-64-TEMPLATE" - group_id = "${clc_group.acc_test_group_ip.id}" - cpu = 1 - memory_mb = 1024 - password = "Green123$" -} - -resource "clc_public_ip" "acc_test_public_ip" { - server_id = "${clc_server.acc_test_server.id}" - internal_ip_address = "${clc_server.acc_test_server.private_ip_address}" - source_restrictions - { cidr = "108.19.67.15/32" } - ports - { - protocol = "TCP" - port = 80 - } - ports - { - protocol = "UDP" - port = 53 - port_to = 55 - } -} -` diff --git a/builtin/providers/clc/resource_clc_server.go b/builtin/providers/clc/resource_clc_server.go deleted file mode 100644 index bf1ae11af..000000000 --- a/builtin/providers/clc/resource_clc_server.go +++ /dev/null @@ -1,375 +0,0 @@ -package clc - -import ( - "fmt" - "log" - "strings" - - clc "github.com/CenturyLinkCloud/clc-sdk" - "github.com/CenturyLinkCloud/clc-sdk/api" - "github.com/CenturyLinkCloud/clc-sdk/server" - "github.com/CenturyLinkCloud/clc-sdk/status" - - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceCLCServer() *schema.Resource { - return &schema.Resource{ - Create: resourceCLCServerCreate, - Read: resourceCLCServerRead, - Update: resourceCLCServerUpdate, - Delete: resourceCLCServerDelete, - Schema: map[string]*schema.Schema{ - "name_template": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "group_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "source_server_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "cpu": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - "memory_mb": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - // optional - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "", - }, - "type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "standard", - ForceNew: true, - }, - "network_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "custom_fields": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeMap}, - }, - "additional_disks": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeMap}, - }, - "packages": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeMap}, - }, - - // optional: misc state storage. non-CLC field - "metadata": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - }, - - // optional - "storage_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "standard", - }, - "aa_policy_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - // optional fields for bareMetal - "configuration_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "os_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - // sorta computed - "password": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - Default: nil, - }, - "private_ip_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - Default: nil, - }, - "power_state": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - Default: nil, - }, - - // computed - "created_date": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "modified_date": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "public_ip_address": &schema.Schema{ - // RO: if a public_ip is on this server, populate it - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceCLCServerCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clc.Client) - spec := server.Server{ - Name: d.Get("name_template").(string), - Password: d.Get("password").(string), - Description: d.Get("description").(string), - GroupID: d.Get("group_id").(string), - CPU: d.Get("cpu").(int), - MemoryGB: d.Get("memory_mb").(int) / 1024, - SourceServerID: d.Get("source_server_id").(string), - Type: d.Get("type").(string), - IPaddress: d.Get("private_ip_address").(string), - NetworkID: d.Get("network_id").(string), - Storagetype: d.Get("storage_type").(string), - AntiAffinityPolicyID: d.Get("aa_policy_id").(string), - } - - var err error - disks, err := parseAdditionalDisks(d) - if err != nil { - return fmt.Errorf("Failed parsing disks: %v", err) - } - spec.Additionaldisks = disks - fields, err := parseCustomFields(d) - if err != nil { - return fmt.Errorf("Failed setting customfields: %v", err) - } - spec.Customfields = fields - - pkgs, err := parsePackages(d) - if err != nil { - return fmt.Errorf("Failed setting packages: %v", err) - } - spec.Packages = pkgs - - if spec.Type == "bareMetal" { - // additional bareMetal fields - if conf_id := d.Get("configuration_id").(string); conf_id != "" { - spec.ConfigurationID = conf_id - } - if os_type := d.Get("os_type").(string); os_type != "" { - spec.OSType = os_type - } - } - - resp, err := client.Server.Create(spec) - if err != nil || !resp.IsQueued { - return fmt.Errorf("Failed creating server: %v", err) - } - // server's UUID returned under rel=self link - _, uuid := resp.Links.GetID("self") - - ok, st := resp.GetStatusID() - if !ok { - return fmt.Errorf("Failed extracting status to poll on %v: %v", resp, err) - } - err = waitStatus(client, st) - if err != nil { - return err - } - - s, err := client.Server.Get(uuid) - d.SetId(strings.ToUpper(s.Name)) - log.Printf("[INFO] Server created. id: %v", s.Name) - return resourceCLCServerRead(d, meta) -} - -func resourceCLCServerRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clc.Client) - s, err := client.Server.Get(d.Id()) - if err != nil { - log.Printf("[INFO] Failed finding server: %v. Marking destroyed", d.Id()) - d.SetId("") - return nil - } - if len(s.Details.IPaddresses) > 0 { - d.Set("private_ip_address", s.Details.IPaddresses[0].Internal) - if "" != s.Details.IPaddresses[0].Public { - d.Set("public_ip_address", s.Details.IPaddresses[0].Public) - } - } - - d.Set("name", s.Name) - d.Set("groupId", s.GroupID) - d.Set("status", s.Status) - d.Set("power_state", s.Details.Powerstate) - d.Set("cpu", s.Details.CPU) - d.Set("memory_mb", s.Details.MemoryMB) - d.Set("disk_gb", s.Details.Storagegb) - d.Set("status", s.Status) - d.Set("storage_type", s.Storagetype) - d.Set("created_date", s.ChangeInfo.CreatedDate) - d.Set("modified_date", s.ChangeInfo.ModifiedDate) - - creds, err := client.Server.GetCredentials(d.Id()) - if err != nil { - return err - } - d.Set("password", creds.Password) - return nil -} - -func resourceCLCServerUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clc.Client) - id := d.Id() - - var err error - var edits []api.Update - var updates []api.Update - var i int - - poll := make(chan *status.Response, 1) - d.Partial(true) - s, err := client.Server.Get(id) - if err != nil { - return fmt.Errorf("Failed fetching server: %v - %v", d.Id(), err) - } - // edits happen synchronously - if delta, orig := d.Get("description").(string), s.Description; delta != orig { - d.SetPartial("description") - edits = append(edits, server.UpdateDescription(delta)) - } - if delta, orig := d.Get("group_id").(string), s.GroupID; delta != orig { - d.SetPartial("group_id") - edits = append(edits, server.UpdateGroup(delta)) - } - if len(edits) > 0 { - err = client.Server.Edit(id, edits...) - if err != nil { - return fmt.Errorf("Failed saving edits: %v", err) - } - } - // updates are queue processed - if d.HasChange("password") { - d.SetPartial("password") - creds, _ := client.Server.GetCredentials(id) - old := creds.Password - pass := d.Get("password").(string) - updates = append(updates, server.UpdateCredentials(old, pass)) - } - if i = d.Get("cpu").(int); i != s.Details.CPU { - d.SetPartial("cpu") - updates = append(updates, server.UpdateCPU(i)) - } - if i = d.Get("memory_mb").(int); i != s.Details.MemoryMB { - d.SetPartial("memory_mb") - updates = append(updates, server.UpdateMemory(i/1024)) // takes GB - } - - if d.HasChange("custom_fields") { - d.SetPartial("custom_fields") - fields, err := parseCustomFields(d) - if err != nil { - return fmt.Errorf("Failed setting customfields: %v", err) - } - updates = append(updates, server.UpdateCustomfields(fields)) - } - if d.HasChange("additional_disks") { - d.SetPartial("additional_disks") - disks, err := parseAdditionalDisks(d) - if err != nil { - return fmt.Errorf("Failed parsing disks: %v", err) - } - updates = append(updates, server.UpdateAdditionaldisks(disks)) - } - - if len(updates) > 0 { - resp, err := client.Server.Update(id, updates...) - if err != nil { - return fmt.Errorf("Failed saving updates: %v", err) - } - - err = client.Status.Poll(resp.ID, poll) - if err != nil { - return err - } - status := <-poll - if status.Failed() { - return fmt.Errorf("Update failed") - } - log.Printf("[INFO] Server updated! status: %v", status.Status) - } - - if d.HasChange("power_state") { - st := d.Get("power_state").(string) - log.Printf("[DEBUG] POWER: %v => %v", s.Details.Powerstate, st) - newst := stateFromString(st) - servers, err := client.Server.PowerState(newst, s.Name) - if err != nil { - return fmt.Errorf("Failed setting power state to: %v", newst) - } - ok, id := servers[0].GetStatusID() - if !ok { - return fmt.Errorf("Failed extracting power state queue status from: %v", servers[0]) - } - err = client.Status.Poll(id, poll) - if err != nil { - return err - } - status := <-poll - if status.Failed() { - return fmt.Errorf("Update failed") - } - log.Printf("[INFO] state updated: %v", status) - } - - d.Partial(false) - return resourceCLCServerRead(d, meta) -} - -func resourceCLCServerDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clc.Client) - id := d.Id() - resp, err := client.Server.Delete(id) - if err != nil || !resp.IsQueued { - return fmt.Errorf("Failed queueing delete of %v - %v", id, err) - } - - ok, st := resp.GetStatusID() - if !ok { - return fmt.Errorf("Failed extracting status to poll on %v: %v", resp, err) - } - err = waitStatus(client, st) - if err != nil { - return err - } - log.Printf("[INFO] Server sucessfully deleted: %v", st) - return nil -} diff --git a/builtin/providers/clc/resource_clc_server_test.go b/builtin/providers/clc/resource_clc_server_test.go deleted file mode 100644 index 60ce8b08a..000000000 --- a/builtin/providers/clc/resource_clc_server_test.go +++ /dev/null @@ -1,266 +0,0 @@ -package clc - -import ( - "fmt" - "strings" - "testing" - - clc "github.com/CenturyLinkCloud/clc-sdk" - "github.com/CenturyLinkCloud/clc-sdk/server" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -// things to test: -// basic crud -// modify specs -// power operations -// add'l disks -// custom fields? (skip) - -func TestAccServerBasic(t *testing.T) { - var resp server.Response - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckServerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckServerConfigBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckServerExists("clc_server.acc_test_server", &resp), - resource.TestCheckResourceAttr( - "clc_server.acc_test_server", "name_template", "test"), - resource.TestCheckResourceAttr( - "clc_server.acc_test_server", "cpu", "1"), - resource.TestCheckResourceAttr( - "clc_server.acc_test_server", "memory_mb", "1024"), - ), - }, - // update simple attrs - resource.TestStep{ - Config: testAccCheckServerConfigCPUMEM, - Check: resource.ComposeTestCheckFunc( - testAccCheckServerExists("clc_server.acc_test_server", &resp), - resource.TestCheckResourceAttr( - "clc_server.acc_test_server", "cpu", "2"), - resource.TestCheckResourceAttr( - "clc_server.acc_test_server", "memory_mb", "2048"), - resource.TestCheckResourceAttr( - "clc_server.acc_test_server", "password", "Green123$"), - testAccCheckServerUpdatedSpec("clc_server.acc_test_server", &resp), - ), - }, - // toggle power - resource.TestStep{ - Config: testAccCheckServerConfigPower, - Check: resource.ComposeTestCheckFunc( - testAccCheckServerExists("clc_server.acc_test_server", &resp), - resource.TestCheckResourceAttr( - "clc_server.acc_test_server", "power_state", "stopped"), - ), - }, - /* // currently broken since disk updates require diskId - // add disks - resource.TestStep{ - Config: testAccCheckServerConfig_disks, - Check: resource.ComposeTestCheckFunc( - testAccCheckServerExists("clc_server.acc_test_server", &resp), - // power still off - resource.TestCheckResourceAttr( - "clc_server.acc_test_server", "power_state", "stopped"), - testAccCheckServerUpdatedDisks("clc_server.acc_test_server", &resp), - ), - }, - */ - /* // broken since network id is a (account-specific) guid - // set network id - resource.TestStep{ - Config: testAccCheckServerConfigNetwork, - Check: resource.ComposeTestCheckFunc( - testAccCheckServerExists("clc_server.acc_test_server", &resp), - resource.TestCheckResourceAttr( - "clc_server.acc_test_server", "network_id", "15a0f669c332435ebf375e010ac79fbb"), - testAccCheckServerUpdatedSpec("clc_server.acc_test_server", &resp), - ), - }, - */ - }, - }) -} - -func testAccCheckServerDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*clc.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "clc_server" { - continue - } - - _, err := client.Server.Get(rs.Primary.ID) - - if err == nil { - return fmt.Errorf("Server still exists") - } - } - - return nil -} - -func testAccCheckServerExists(n string, resp *server.Response) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No server ID is set") - } - - client := testAccProvider.Meta().(*clc.Client) - srv, err := client.Server.Get(rs.Primary.ID) - if err != nil { - return err - } - - if strings.ToUpper(srv.ID) != rs.Primary.ID { - return fmt.Errorf("Server not found") - } - *resp = *srv - return nil - } -} - -func testAccCheckServerUpdatedSpec(n string, resp *server.Response) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - client := testAccProvider.Meta().(*clc.Client) - srv, err := client.Server.Get(rs.Primary.ID) - if err != nil { - return err - } - cpu := srv.Details.CPU - mem := srv.Details.MemoryMB - scpu := fmt.Sprintf("%v", cpu) - smem := fmt.Sprintf("%v", mem) - excpu := rs.Primary.Attributes["cpu"] - exmem := rs.Primary.Attributes["memory_mb"] - if scpu != excpu { - return fmt.Errorf("Expected CPU to be %v but found %v", excpu, scpu) - } - if smem != exmem { - return fmt.Errorf("Expected MEM to be %v but found %v", exmem, smem) - } - return nil - } -} - -func testAccCheckServerUpdatedDisks(n string, resp *server.Response) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - client := testAccProvider.Meta().(*clc.Client) - srv, err := client.Server.Get(rs.Primary.ID) - if err != nil { - return err - } - - if len(srv.Details.Disks) <= 3 { - return fmt.Errorf("Expected total of > 3 drives. found: %v", len(srv.Details.Disks)) - } - - return nil - } -} - -const testAccCheckServerConfigBasic = ` -variable "dc" { default = "IL1" } - -resource "clc_group" "acc_test_group_server" { - location_id = "${var.dc}" - name = "acc_test_group_server" - parent = "Default Group" -} - -resource "clc_server" "acc_test_server" { - name_template = "test" - source_server_id = "UBUNTU-14-64-TEMPLATE" - group_id = "${clc_group.acc_test_group_server.id}" - cpu = 1 - memory_mb = 1024 - password = "Green123$" -} -` - -const testAccCheckServerConfigCPUMEM = ` -variable "dc" { default = "IL1" } - -resource "clc_group" "acc_test_group_server" { - location_id = "${var.dc}" - name = "acc_test_group_server" - parent = "Default Group" -} - -resource "clc_server" "acc_test_server" { - name_template = "test" - source_server_id = "UBUNTU-14-64-TEMPLATE" - group_id = "${clc_group.acc_test_group_server.id}" - cpu = 2 - memory_mb = 2048 - password = "Green123$" - power_state = "started" -} -` - -const testAccCheckServerConfigPower = ` -variable "dc" { default = "IL1" } - -resource "clc_group" "acc_test_group_server" { - location_id = "${var.dc}" - name = "acc_test_group_server" - parent = "Default Group" -} - -resource "clc_server" "acc_test_server" { - name_template = "test" - source_server_id = "UBUNTU-14-64-TEMPLATE" - group_id = "${clc_group.acc_test_group_server.id}" - cpu = 2 - memory_mb = 2048 - password = "Green123$" - power_state = "stopped" -} -` - -const testAccCheckServerConfigDisks = ` -variable "dc" { default = "IL1" } - -resource "clc_group" "acc_test_group_server" { - location_id = "${var.dc}" - name = "acc_test_group_server" - parent = "Default Group" -} - -resource "clc_server" "acc_test_server" { - name_template = "test" - source_server_id = "UBUNTU-14-64-TEMPLATE" - group_id = "${clc_group.acc_test_group_server.id}" - cpu = 2 - memory_mb = 2048 - password = "Green123$" - power_state = "stopped" - # network_id = "15a0f669c332435ebf375e010ac79fbb" - additional_disks - { - path = "/data1" - size_gb = 100 - type = "partitioned" - } - -} -` diff --git a/builtin/providers/cloudflare/config.go b/builtin/providers/cloudflare/config.go deleted file mode 100644 index 8fabbed77..000000000 --- a/builtin/providers/cloudflare/config.go +++ /dev/null @@ -1,23 +0,0 @@ -package cloudflare - -import ( - "fmt" - "log" - - "github.com/cloudflare/cloudflare-go" -) - -type Config struct { - Email string - Token string -} - -// Client() returns a new client for accessing cloudflare. -func (c *Config) Client() (*cloudflare.API, error) { - client, err := cloudflare.New(c.Token, c.Email) - if err != nil { - return nil, fmt.Errorf("Error creating new CloudFlare client: %s", err) - } - log.Printf("[INFO] CloudFlare Client configured for user: %s", c.Email) - return client, nil -} diff --git a/builtin/providers/cloudflare/provider.go b/builtin/providers/cloudflare/provider.go deleted file mode 100644 index 5dae2005b..000000000 --- a/builtin/providers/cloudflare/provider.go +++ /dev/null @@ -1,42 +0,0 @@ -package cloudflare - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "email": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("CLOUDFLARE_EMAIL", nil), - Description: "A registered CloudFlare email address.", - }, - - "token": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("CLOUDFLARE_TOKEN", nil), - Description: "The token key for API operations.", - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "cloudflare_record": resourceCloudFlareRecord(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - config := Config{ - Email: d.Get("email").(string), - Token: d.Get("token").(string), - } - - return config.Client() -} diff --git a/builtin/providers/cloudflare/provider_test.go b/builtin/providers/cloudflare/provider_test.go deleted file mode 100644 index e8cd4ffaf..000000000 --- a/builtin/providers/cloudflare/provider_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package cloudflare - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "cloudflare": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("CLOUDFLARE_EMAIL"); v == "" { - t.Fatal("CLOUDFLARE_EMAIL must be set for acceptance tests") - } - - if v := os.Getenv("CLOUDFLARE_TOKEN"); v == "" { - t.Fatal("CLOUDFLARE_TOKEN must be set for acceptance tests") - } - - if v := os.Getenv("CLOUDFLARE_DOMAIN"); v == "" { - t.Fatal("CLOUDFLARE_DOMAIN must be set for acceptance tests. The domain is used to create and destroy record against.") - } -} diff --git a/builtin/providers/cloudflare/resource_cloudflare_record.go b/builtin/providers/cloudflare/resource_cloudflare_record.go deleted file mode 100644 index b0000fe4a..000000000 --- a/builtin/providers/cloudflare/resource_cloudflare_record.go +++ /dev/null @@ -1,212 +0,0 @@ -package cloudflare - -import ( - "fmt" - "log" - - "github.com/cloudflare/cloudflare-go" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceCloudFlareRecord() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudFlareRecordCreate, - Read: resourceCloudFlareRecordRead, - Update: resourceCloudFlareRecordUpdate, - Delete: resourceCloudFlareRecordDelete, - - SchemaVersion: 1, - MigrateState: resourceCloudFlareRecordMigrateState, - Schema: map[string]*schema.Schema{ - "domain": { - Type: schema.TypeString, - Required: true, - }, - - "name": { - Type: schema.TypeString, - Required: true, - }, - - "hostname": { - Type: schema.TypeString, - Computed: true, - }, - - "type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "value": { - Type: schema.TypeString, - Required: true, - }, - - "ttl": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "priority": { - Type: schema.TypeInt, - Optional: true, - }, - - "proxied": { - Default: false, - Optional: true, - Type: schema.TypeBool, - }, - - "zone_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceCloudFlareRecordCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*cloudflare.API) - - newRecord := cloudflare.DNSRecord{ - Type: d.Get("type").(string), - Name: d.Get("name").(string), - Content: d.Get("value").(string), - Proxied: d.Get("proxied").(bool), - ZoneName: d.Get("domain").(string), - } - - if priority, ok := d.GetOk("priority"); ok { - newRecord.Priority = priority.(int) - } - - if ttl, ok := d.GetOk("ttl"); ok { - newRecord.TTL = ttl.(int) - } - - // Validate value based on type - if err := validateRecordName(newRecord.Type, newRecord.Content); err != nil { - return fmt.Errorf("Error validating record name %q: %s", newRecord.Name, err) - } - - // Validate type - if err := validateRecordType(newRecord.Type, newRecord.Proxied); err != nil { - return fmt.Errorf("Error validating record type %q: %s", newRecord.Type, err) - } - - zoneId, err := client.ZoneIDByName(newRecord.ZoneName) - if err != nil { - return fmt.Errorf("Error finding zone %q: %s", newRecord.ZoneName, err) - } - - d.Set("zone_id", zoneId) - newRecord.ZoneID = zoneId - - log.Printf("[DEBUG] CloudFlare Record create configuration: %#v", newRecord) - - r, err := client.CreateDNSRecord(zoneId, newRecord) - if err != nil { - return fmt.Errorf("Failed to create record: %s", err) - } - - // In the Event that the API returns an empty DNS Record, we verify that the - // ID returned is not the default "" - if r.Result.ID == "" { - return fmt.Errorf("Failed to find record in Creat response; Record was empty") - } - - d.SetId(r.Result.ID) - - log.Printf("[INFO] CloudFlare Record ID: %s", d.Id()) - - return resourceCloudFlareRecordRead(d, meta) -} - -func resourceCloudFlareRecordRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*cloudflare.API) - domain := d.Get("domain").(string) - - zoneId, err := client.ZoneIDByName(domain) - if err != nil { - return fmt.Errorf("Error finding zone %q: %s", domain, err) - } - - record, err := client.DNSRecord(zoneId, d.Id()) - if err != nil { - return err - } - - d.SetId(record.ID) - d.Set("hostname", record.Name) - d.Set("type", record.Type) - d.Set("value", record.Content) - d.Set("ttl", record.TTL) - d.Set("priority", record.Priority) - d.Set("proxied", record.Proxied) - d.Set("zone_id", zoneId) - - return nil -} - -func resourceCloudFlareRecordUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*cloudflare.API) - - updateRecord := cloudflare.DNSRecord{ - ID: d.Id(), - Type: d.Get("type").(string), - Name: d.Get("name").(string), - Content: d.Get("value").(string), - ZoneName: d.Get("domain").(string), - Proxied: false, - } - - if priority, ok := d.GetOk("priority"); ok { - updateRecord.Priority = priority.(int) - } - - if proxied, ok := d.GetOk("proxied"); ok { - updateRecord.Proxied = proxied.(bool) - } - - if ttl, ok := d.GetOk("ttl"); ok { - updateRecord.TTL = ttl.(int) - } - - zoneId, err := client.ZoneIDByName(updateRecord.ZoneName) - if err != nil { - return fmt.Errorf("Error finding zone %q: %s", updateRecord.ZoneName, err) - } - - updateRecord.ZoneID = zoneId - - log.Printf("[DEBUG] CloudFlare Record update configuration: %#v", updateRecord) - err = client.UpdateDNSRecord(zoneId, d.Id(), updateRecord) - if err != nil { - return fmt.Errorf("Failed to update CloudFlare Record: %s", err) - } - - return resourceCloudFlareRecordRead(d, meta) -} - -func resourceCloudFlareRecordDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*cloudflare.API) - domain := d.Get("domain").(string) - - zoneId, err := client.ZoneIDByName(domain) - if err != nil { - return fmt.Errorf("Error finding zone %q: %s", domain, err) - } - - log.Printf("[INFO] Deleting CloudFlare Record: %s, %s", domain, d.Id()) - - err = client.DeleteDNSRecord(zoneId, d.Id()) - if err != nil { - return fmt.Errorf("Error deleting CloudFlare Record: %s", err) - } - - return nil -} diff --git a/builtin/providers/cloudflare/resource_cloudflare_record_migrate.go b/builtin/providers/cloudflare/resource_cloudflare_record_migrate.go deleted file mode 100644 index d71d3fa28..000000000 --- a/builtin/providers/cloudflare/resource_cloudflare_record_migrate.go +++ /dev/null @@ -1,95 +0,0 @@ -package cloudflare - -import ( - "fmt" - "log" - "strconv" - - "github.com/cloudflare/cloudflare-go" - "github.com/hashicorp/terraform/terraform" -) - -func resourceCloudFlareRecordMigrateState( - v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - switch v { - case 0: - log.Println("[INFO] Found CloudFlare Record State v0; migrating to v1") - return migrateCloudFlareRecordStateV0toV1(is, meta) - default: - return is, fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateCloudFlareRecordStateV0toV1(is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - if is.Empty() { - log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return is, nil - } - - log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - client := meta.(*cloudflare.API) - - // look up new id based on attributes - domain := is.Attributes["domain"] - zoneId, err := client.ZoneIDByName(domain) - if err != nil { - return is, fmt.Errorf("Error finding zone %q: %s", domain, err) - } - - // all other information is ignored in the DNSRecords call - searchRecord := cloudflare.DNSRecord{ - Type: is.Attributes["type"], - Name: is.Attributes["hostname"], - Content: is.Attributes["value"], - } - - records, err := client.DNSRecords(zoneId, searchRecord) - if err != nil { - return is, err - } - - for _, r := range records { - if is.Attributes["ttl"] != "" { - v, err := strconv.Atoi(is.Attributes["ttl"]) - if err != nil { - return is, fmt.Errorf("Error converting ttl to int in CloudFlare Record Migration") - } - - if v != r.TTL { - continue - } - } - - if is.Attributes["proxied"] != "" { - b, err := strconv.ParseBool(is.Attributes["proxied"]) - if err != nil { - return is, fmt.Errorf("Error converting proxied to bool in CloudFlare Record Migration") - } - - if b != r.Proxied { - continue - } - } - - if is.Attributes["priority"] != "" { - v, err := strconv.Atoi(is.Attributes["priority"]) - if err != nil { - return is, fmt.Errorf("Error converting priority to int in CloudFlare Record Migration") - } - - if v != r.Priority { - continue - } - } - - // assume record found - is.Attributes["id"] = r.ID - is.ID = r.ID - log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil - } - - // assume no record found - log.Printf("[DEBUG] Attributes after no migration: %#v", is.Attributes) - return is, fmt.Errorf("No matching Record found") -} diff --git a/builtin/providers/cloudflare/resource_cloudflare_record_migrate_test.go b/builtin/providers/cloudflare/resource_cloudflare_record_migrate_test.go deleted file mode 100644 index 639ee9f5b..000000000 --- a/builtin/providers/cloudflare/resource_cloudflare_record_migrate_test.go +++ /dev/null @@ -1,335 +0,0 @@ -package cloudflare - -import ( - "fmt" - "log" - "net/http" - "net/http/httptest" - "net/url" - "testing" - - "github.com/cloudflare/cloudflare-go" - "github.com/hashicorp/terraform/terraform" -) - -func TestCloudFlareRecordMigrateState(t *testing.T) { - // create the test server for mocking the API calls - ts := mockCloudFlareEnv() - defer ts.Close() - - // Create a CloudFlare client, overriding the BaseURL - cfMeta, err := cloudflare.New( - "sometoken", - "someemail", - mockHTTPClient(ts.URL), - ) - - if err != nil { - t.Fatalf("Error building CloudFlare API: %s", err) - } - - cases := map[string]struct { - StateVersion int - ID string - Attributes map[string]string - Expected string - ShouldFail bool - }{ - "ttl_120": { - StateVersion: 0, - ID: "123456", - Attributes: map[string]string{ - "id": "123456", - "name": "notthesub", - "hostname": "notthesub.hashicorptest.com", - "type": "A", - "content": "10.0.2.5", - "ttl": "120", - "zone_id": "1234567890", - "domain": "hashicorptest.com", - }, - Expected: "7778f8766e583af8de0abfcd76c5dAAA", - }, - "ttl_121": { - StateVersion: 0, - ID: "123456", - Attributes: map[string]string{ - "id": "123456", - "name": "notthesub", - "hostname": "notthesub.hashicorptest.com", - "type": "A", - "content": "10.0.2.5", - "ttl": "121", - "zone_id": "1234567890", - "domain": "hashicorptest.com", - }, - Expected: "5558f8766e583af8de0abfcd76c5dBBB", - }, - "mx_priority": { - StateVersion: 0, - ID: "123456", - Attributes: map[string]string{ - "id": "123456", - "name": "hashicorptest.com", - "type": "MX", - "content": "some.registrar-servers.com", - "ttl": "1", - "priority": "20", - "zone_id": "1234567890", - "domain": "hashicorptest.com", - }, - Expected: "12342092cbc4c391be33ce548713bba3", - }, - "mx_priority_mismatch": { - StateVersion: 0, - ID: "123456", - Attributes: map[string]string{ - "id": "123456", - "type": "MX", - "name": "hashicorptest.com", - "content": "some.registrar-servers.com", - "ttl": "1", - "priority": "10", - "zone_id": "1234567890", - "domain": "hashicorptest.com", - }, - Expected: "12342092cbc4c391be33ce548713bba3", - ShouldFail: true, - }, - "proxied": { - StateVersion: 0, - ID: "123456", - Attributes: map[string]string{ - "id": "123456", - "name": "tftestingsubv616", - "hostname": "tftestingsubv616.hashicorptest.com", - "type": "A", - "content": "52.39.212.111", - "proxied": "true", - "ttl": "1", - "zone_id": "1234567890", - "domain": "hashicorptest.com", - }, - Expected: "888ffe3f93a31231ad6b0c6d09185eee", - }, - "not_proxied": { - StateVersion: 0, - ID: "123456", - Attributes: map[string]string{ - "id": "123456", - "name": "tftestingsubv616", - "hostname": "tftestingsubv616.hashicorptest.com", - "type": "A", - "content": "52.39.212.111", - "proxied": "false", - "ttl": "1", - "zone_id": "1234567890", - "domain": "hashicorptest.com", - }, - Expected: "222ffe3f93a31231ad6b0c6d09185jjj", - }, - } - - for tn, tc := range cases { - is := &terraform.InstanceState{ - ID: tc.ID, - Attributes: tc.Attributes, - } - is, err := resourceCloudFlareRecordMigrateState( - tc.StateVersion, is, cfMeta) - - if err != nil { - if tc.ShouldFail { - // expected error - continue - } - t.Fatalf("bad: %s, err: %#v", tn, err) - } - - if is.ID != tc.Expected { - t.Fatalf("bad sg rule id: %s\n\n expected: %s", is.ID, tc.Expected) - } - } -} - -// cloudflareEnv establishes a httptest server to mock out the CloudFlare API -// endpoints that we'll be calling. -func mockCloudFlareEnv() *httptest.Server { - endpoints := mockEndpoints() - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain") - log.Printf("[DEBUG] Mocker server received request to %q", r.RequestURI) - rBase, err := url.ParseRequestURI(r.RequestURI) - if err != nil { - log.Fatalf("Failed to find the base path: %s", err) - } - for _, e := range endpoints { - if rBase.Path == e.BasePath { - fmt.Fprintln(w, e.Body) - w.WriteHeader(200) - return - } - } - w.WriteHeader(400) - })) - - return ts -} - -// Stub out the two CloudFlare API routes that will be called -func mockEndpoints() []*endpoint { - return []*endpoint{ - &endpoint{ - BasePath: "/zones", - Body: zoneResponse, - }, - &endpoint{ - BasePath: "/zones/1234567890/dns_records", - Body: dnsResponse, - }, - } -} - -type routes struct { - Endpoints []*endpoint -} -type endpoint struct { - BasePath string - Body string -} - -// HTTPClient accepts a custom *http.Client for making API calls. -// This function is used as a callback of sorts to override any of the client -// options that you can't directly set on the struct -func mockHTTPClient(testURL string) cloudflare.Option { - return func(api *cloudflare.API) error { - api.BaseURL = testURL - return nil - } -} - -const zoneResponse = ` -{ - "result": [ - { - "id": "1234567890", - "name": "hashicorptest.com", - "status": "active", - "paused": false, - "type": "full", - "development_mode": 0 - } - ], - "result_info": { - "page": 1, - "per_page": 20, - "total_pages": 1, - "count": 1, - "total_count": 1 - }, - "success": true, - "errors": [], - "messages": [] -} -` - -const dnsResponse = ` -{ - "result": [ - { - "id": "7778f8766e583af8de0abfcd76c5dAAA", - "type": "A", - "name": "notthesub.hashicorptest.com", - "content": "10.0.2.5", - "proxiable": false, - "proxied": false, - "ttl": 120, - "locked": false, - "zone_id": "1234567890", - "zone_name": "hashicorptest.com" - }, - { - "id": "5558f8766e583af8de0abfcd76c5dBBB", - "type": "A", - "name": "notthesub.hashicorptest.com", - "content": "10.0.2.5", - "proxiable": false, - "proxied": false, - "ttl": 121, - "locked": false, - "zone_id": "1234567890", - "zone_name": "hashicorptest.com" - }, - { - "id": "2220a9593ab869199b65c89bddf72ddd", - "type": "A", - "name": "maybethesub.hashicorptest.com", - "content": "10.0.3.5", - "proxiable": false, - "proxied": false, - "ttl": 120, - "locked": false, - "zone_id": "1234567890", - "zone_name": "hashicorptest.com" - }, - { - "id": "222ffe3f93a31231ad6b0c6d09185jjj", - "type": "A", - "name": "tftestingsubv616.hashicorptest.com", - "content": "52.39.212.111", - "proxiable": true, - "proxied": false, - "ttl": 1, - "locked": false, - "zone_id": "1234567890", - "zone_name": "hashicorptest.com" - }, - { - "id": "888ffe3f93a31231ad6b0c6d09185eee", - "type": "A", - "name": "tftestingsubv616.hashicorptest.com", - "content": "52.39.212.111", - "proxiable": true, - "proxied": true, - "ttl": 1, - "locked": false, - "zone_id": "1234567890", - "zone_name": "hashicorptest.com" - }, - { - "id": "98y6t9ba87e6ee3e6aeba8f3dc52c81b", - "type": "CNAME", - "name": "somecname.hashicorptest.com", - "content": "some.us-west-2.elb.amazonaws.com", - "proxiable": true, - "proxied": false, - "ttl": 120, - "locked": false, - "zone_id": "1234567890", - "zone_name": "hashicorptest.com" - }, - { - "id": "12342092cbc4c391be33ce548713bba3", - "type": "MX", - "name": "hashicorptest.com", - "content": "some.registrar-servers.com", - "proxiable": false, - "proxied": false, - "ttl": 1, - "priority": 20, - "locked": false, - "zone_id": "1234567890", - "zone_name": "hashicorptest.com" - } - ], - "result_info": { - "page": 1, - "per_page": 20, - "total_pages": 2, - "count": 20, - "total_count": 4 - }, - "success": true, - "errors": [], - "messages": [] -}` diff --git a/builtin/providers/cloudflare/resource_cloudflare_record_test.go b/builtin/providers/cloudflare/resource_cloudflare_record_test.go deleted file mode 100644 index 81a16eb1e..000000000 --- a/builtin/providers/cloudflare/resource_cloudflare_record_test.go +++ /dev/null @@ -1,282 +0,0 @@ -package cloudflare - -import ( - "fmt" - "os" - "testing" - - "github.com/cloudflare/cloudflare-go" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccCloudFlareRecord_Basic(t *testing.T) { - var record cloudflare.DNSRecord - domain := os.Getenv("CLOUDFLARE_DOMAIN") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudFlareRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckCloudFlareRecordConfigBasic, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFlareRecordExists("cloudflare_record.foobar", &record), - testAccCheckCloudFlareRecordAttributes(&record), - resource.TestCheckResourceAttr( - "cloudflare_record.foobar", "name", "terraform"), - resource.TestCheckResourceAttr( - "cloudflare_record.foobar", "domain", domain), - resource.TestCheckResourceAttr( - "cloudflare_record.foobar", "value", "192.168.0.10"), - ), - }, - }, - }) -} - -func TestAccCloudFlareRecord_Apex(t *testing.T) { - var record cloudflare.DNSRecord - domain := os.Getenv("CLOUDFLARE_DOMAIN") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudFlareRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckCloudFlareRecordConfigApex, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFlareRecordExists("cloudflare_record.foobar", &record), - testAccCheckCloudFlareRecordAttributes(&record), - resource.TestCheckResourceAttr( - "cloudflare_record.foobar", "name", "@"), - resource.TestCheckResourceAttr( - "cloudflare_record.foobar", "domain", domain), - resource.TestCheckResourceAttr( - "cloudflare_record.foobar", "value", "192.168.0.10"), - ), - }, - }, - }) -} - -func TestAccCloudFlareRecord_Proxied(t *testing.T) { - var record cloudflare.DNSRecord - domain := os.Getenv("CLOUDFLARE_DOMAIN") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudFlareRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckCloudFlareRecordConfigProxied, domain, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFlareRecordExists("cloudflare_record.foobar", &record), - resource.TestCheckResourceAttr( - "cloudflare_record.foobar", "domain", domain), - resource.TestCheckResourceAttr( - "cloudflare_record.foobar", "name", "terraform"), - resource.TestCheckResourceAttr( - "cloudflare_record.foobar", "proxied", "true"), - resource.TestCheckResourceAttr( - "cloudflare_record.foobar", "type", "CNAME"), - resource.TestCheckResourceAttr( - "cloudflare_record.foobar", "value", domain), - ), - }, - }, - }) -} - -func TestAccCloudFlareRecord_Updated(t *testing.T) { - var record cloudflare.DNSRecord - domain := os.Getenv("CLOUDFLARE_DOMAIN") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudFlareRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckCloudFlareRecordConfigBasic, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFlareRecordExists("cloudflare_record.foobar", &record), - testAccCheckCloudFlareRecordAttributes(&record), - resource.TestCheckResourceAttr( - "cloudflare_record.foobar", "name", "terraform"), - resource.TestCheckResourceAttr( - "cloudflare_record.foobar", "domain", domain), - resource.TestCheckResourceAttr( - "cloudflare_record.foobar", "value", "192.168.0.10"), - ), - }, - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckCloudFlareRecordConfigNewValue, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFlareRecordExists("cloudflare_record.foobar", &record), - testAccCheckCloudFlareRecordAttributesUpdated(&record), - resource.TestCheckResourceAttr( - "cloudflare_record.foobar", "name", "terraform"), - resource.TestCheckResourceAttr( - "cloudflare_record.foobar", "domain", domain), - resource.TestCheckResourceAttr( - "cloudflare_record.foobar", "value", "192.168.0.11"), - ), - }, - }, - }) -} - -func TestAccCloudFlareRecord_forceNewRecord(t *testing.T) { - var afterCreate, afterUpdate cloudflare.DNSRecord - domain := os.Getenv("CLOUDFLARE_DOMAIN") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudFlareRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckCloudFlareRecordConfigBasic, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFlareRecordExists("cloudflare_record.foobar", &afterCreate), - ), - }, - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckCloudFlareRecordConfigForceNew, domain, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFlareRecordExists("cloudflare_record.foobar", &afterUpdate), - testAccCheckCloudFlareRecordRecreated(t, &afterCreate, &afterUpdate), - ), - }, - }, - }) -} - -func testAccCheckCloudFlareRecordRecreated(t *testing.T, - before, after *cloudflare.DNSRecord) resource.TestCheckFunc { - return func(s *terraform.State) error { - if before.ID == after.ID { - t.Fatalf("Expected change of Record Ids, but both were %v", before.ID) - } - return nil - } -} - -func testAccCheckCloudFlareRecordDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*cloudflare.API) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudflare_record" { - continue - } - - _, err := client.DNSRecord(rs.Primary.Attributes["zone_id"], rs.Primary.ID) - if err == nil { - return fmt.Errorf("Record still exists") - } - } - - return nil -} - -func testAccCheckCloudFlareRecordAttributes(record *cloudflare.DNSRecord) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if record.Content != "192.168.0.10" { - return fmt.Errorf("Bad content: %s", record.Content) - } - - return nil - } -} - -func testAccCheckCloudFlareRecordAttributesUpdated(record *cloudflare.DNSRecord) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if record.Content != "192.168.0.11" { - return fmt.Errorf("Bad content: %s", record.Content) - } - - return nil - } -} - -func testAccCheckCloudFlareRecordExists(n string, record *cloudflare.DNSRecord) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - client := testAccProvider.Meta().(*cloudflare.API) - foundRecord, err := client.DNSRecord(rs.Primary.Attributes["zone_id"], rs.Primary.ID) - if err != nil { - return err - } - - if foundRecord.ID != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - - *record = foundRecord - - return nil - } -} - -const testAccCheckCloudFlareRecordConfigBasic = ` -resource "cloudflare_record" "foobar" { - domain = "%s" - - name = "terraform" - value = "192.168.0.10" - type = "A" - ttl = 3600 -}` - -const testAccCheckCloudFlareRecordConfigApex = ` -resource "cloudflare_record" "foobar" { - domain = "%s" - name = "@" - value = "192.168.0.10" - type = "A" - ttl = 3600 -}` - -const testAccCheckCloudFlareRecordConfigProxied = ` -resource "cloudflare_record" "foobar" { - domain = "%s" - - name = "terraform" - value = "%s" - type = "CNAME" - proxied = true -}` - -const testAccCheckCloudFlareRecordConfigNewValue = ` -resource "cloudflare_record" "foobar" { - domain = "%s" - - name = "terraform" - value = "192.168.0.11" - type = "A" - ttl = 3600 -}` - -const testAccCheckCloudFlareRecordConfigForceNew = ` -resource "cloudflare_record" "foobar" { - domain = "%s" - - name = "terraform" - value = "%s" - type = "CNAME" - ttl = 3600 -}` diff --git a/builtin/providers/cloudflare/validators.go b/builtin/providers/cloudflare/validators.go deleted file mode 100644 index 696a45f4f..000000000 --- a/builtin/providers/cloudflare/validators.go +++ /dev/null @@ -1,69 +0,0 @@ -package cloudflare - -import ( - "fmt" - "net" - "strings" -) - -// validateRecordType ensures that the cloudflare record type is valid -func validateRecordType(t string, proxied bool) error { - switch t { - case "A": - return nil - case "AAAA": - return nil - case "CNAME": - return nil - case "TXT": - if !proxied { - return nil - } - case "SRV": - if !proxied { - return nil - } - case "LOC": - if !proxied { - return nil - } - case "MX": - if !proxied { - return nil - } - case "NS": - if !proxied { - return nil - } - case "SPF": - if !proxied { - return nil - } - default: - return fmt.Errorf( - `Invalid type %q. Valid types are "A", "AAAA", "CNAME", "TXT", "SRV", "LOC", "MX", "NS" or "SPF"`, t) - } - - return fmt.Errorf("Type %q cannot be proxied", t) -} - -// validateRecordName ensures that based on supplied record type, the name content matches -// Currently only validates A and AAAA types -func validateRecordName(t string, value string) error { - switch t { - case "A": - // Must be ipv4 addr - addr := net.ParseIP(value) - if addr == nil || !strings.Contains(value, ".") { - return fmt.Errorf("A record must be a valid IPv4 address, got: %q", value) - } - case "AAAA": - // Must be ipv6 addr - addr := net.ParseIP(value) - if addr == nil || !strings.Contains(value, ":") { - return fmt.Errorf("AAAA record must be a valid IPv6 address, got: %q", value) - } - } - - return nil -} diff --git a/builtin/providers/cloudflare/validators_test.go b/builtin/providers/cloudflare/validators_test.go deleted file mode 100644 index 6e3976099..000000000 --- a/builtin/providers/cloudflare/validators_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package cloudflare - -import "testing" - -func TestValidateRecordType(t *testing.T) { - validTypes := map[string]bool{ - "A": true, - "AAAA": true, - "CNAME": true, - "TXT": false, - "SRV": false, - "LOC": false, - "MX": false, - "NS": false, - "SPF": false, - } - for k, v := range validTypes { - err := validateRecordType(k, v) - if err != nil { - t.Fatalf("%s should be a valid record type: %s", k, err) - } - } - - invalidTypes := map[string]bool{ - "a": false, - "cName": false, - "txt": false, - "SRv": false, - "foo": false, - "bar": false, - "TXT": true, - "SRV": true, - "SPF": true, - } - for k, v := range invalidTypes { - if err := validateRecordType(k, v); err == nil { - t.Fatalf("%s should be an invalid record type", k) - } - } -} - -func TestValidateRecordName(t *testing.T) { - validNames := map[string]string{ - "A": "192.168.0.1", - "AAAA": "2001:0db8:0000:0042:0000:8a2e:0370:7334", - } - - for k, v := range validNames { - if err := validateRecordName(k, v); err != nil { - t.Fatalf("%q should be a valid name for type %q: %v", v, k, err) - } - } - - invalidNames := map[string]string{ - "A": "terraform.io", - "AAAA": "192.168.0.1", - } - for k, v := range invalidNames { - if err := validateRecordName(k, v); err == nil { - t.Fatalf("%q should be an invalid name for type %q", v, k) - } - } -} diff --git a/builtin/providers/cloudstack/config.go b/builtin/providers/cloudstack/config.go deleted file mode 100644 index 8a2ad813b..000000000 --- a/builtin/providers/cloudstack/config.go +++ /dev/null @@ -1,21 +0,0 @@ -package cloudstack - -import "github.com/xanzy/go-cloudstack/cloudstack" - -// Config is the configuration structure used to instantiate a -// new CloudStack client. -type Config struct { - APIURL string - APIKey string - SecretKey string - HTTPGETOnly bool - Timeout int64 -} - -// NewClient returns a new CloudStack client. -func (c *Config) NewClient() (*cloudstack.CloudStackClient, error) { - cs := cloudstack.NewAsyncClient(c.APIURL, c.APIKey, c.SecretKey, false) - cs.HTTPGETOnly = c.HTTPGETOnly - cs.AsyncTimeout(c.Timeout) - return cs, nil -} diff --git a/builtin/providers/cloudstack/provider.go b/builtin/providers/cloudstack/provider.go deleted file mode 100644 index ebf6670f1..000000000 --- a/builtin/providers/cloudstack/provider.go +++ /dev/null @@ -1,138 +0,0 @@ -package cloudstack - -import ( - "errors" - - "github.com/go-ini/ini" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "api_url": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("CLOUDSTACK_API_URL", nil), - ConflictsWith: []string{"config", "profile"}, - }, - - "api_key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("CLOUDSTACK_API_KEY", nil), - ConflictsWith: []string{"config", "profile"}, - }, - - "secret_key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("CLOUDSTACK_SECRET_KEY", nil), - ConflictsWith: []string{"config", "profile"}, - }, - - "config": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ConflictsWith: []string{"api_url", "api_key", "secret_key"}, - }, - - "profile": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ConflictsWith: []string{"api_url", "api_key", "secret_key"}, - }, - - "http_get_only": &schema.Schema{ - Type: schema.TypeBool, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("CLOUDSTACK_HTTP_GET_ONLY", false), - }, - - "timeout": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("CLOUDSTACK_TIMEOUT", 900), - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "cloudstack_affinity_group": resourceCloudStackAffinityGroup(), - "cloudstack_disk": resourceCloudStackDisk(), - "cloudstack_egress_firewall": resourceCloudStackEgressFirewall(), - "cloudstack_firewall": resourceCloudStackFirewall(), - "cloudstack_instance": resourceCloudStackInstance(), - "cloudstack_ipaddress": resourceCloudStackIPAddress(), - "cloudstack_loadbalancer_rule": resourceCloudStackLoadBalancerRule(), - "cloudstack_network": resourceCloudStackNetwork(), - "cloudstack_network_acl": resourceCloudStackNetworkACL(), - "cloudstack_network_acl_rule": resourceCloudStackNetworkACLRule(), - "cloudstack_nic": resourceCloudStackNIC(), - "cloudstack_port_forward": resourceCloudStackPortForward(), - "cloudstack_private_gateway": resourceCloudStackPrivateGateway(), - "cloudstack_secondary_ipaddress": resourceCloudStackSecondaryIPAddress(), - "cloudstack_security_group": resourceCloudStackSecurityGroup(), - "cloudstack_security_group_rule": resourceCloudStackSecurityGroupRule(), - "cloudstack_ssh_keypair": resourceCloudStackSSHKeyPair(), - "cloudstack_static_nat": resourceCloudStackStaticNAT(), - "cloudstack_static_route": resourceCloudStackStaticRoute(), - "cloudstack_template": resourceCloudStackTemplate(), - "cloudstack_vpc": resourceCloudStackVPC(), - "cloudstack_vpn_connection": resourceCloudStackVPNConnection(), - "cloudstack_vpn_customer_gateway": resourceCloudStackVPNCustomerGateway(), - "cloudstack_vpn_gateway": resourceCloudStackVPNGateway(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - apiURL, apiURLOK := d.GetOk("api_url") - apiKey, apiKeyOK := d.GetOk("api_key") - secretKey, secretKeyOK := d.GetOk("secret_key") - config, configOK := d.GetOk("config") - profile, profileOK := d.GetOk("profile") - - switch { - case apiURLOK, apiKeyOK, secretKeyOK: - if !(apiURLOK && apiKeyOK && secretKeyOK) { - return nil, errors.New("'api_url', 'api_key' and 'secret_key' should all have values") - } - case configOK, profileOK: - if !(configOK && profileOK) { - return nil, errors.New("'config' and 'profile' should both have a value") - } - default: - return nil, errors.New( - "either 'api_url', 'api_key' and 'secret_key' or 'config' and 'profile' should have values") - } - - if configOK && profileOK { - cfg, err := ini.Load(config.(string)) - if err != nil { - return nil, err - } - - section, err := cfg.GetSection(profile.(string)) - if err != nil { - return nil, err - } - - apiURL = section.Key("url").String() - apiKey = section.Key("apikey").String() - secretKey = section.Key("secretkey").String() - } - - cfg := Config{ - APIURL: apiURL.(string), - APIKey: apiKey.(string), - SecretKey: secretKey.(string), - HTTPGETOnly: d.Get("http_get_only").(bool), - Timeout: int64(d.Get("timeout").(int)), - } - - return cfg.NewClient() -} diff --git a/builtin/providers/cloudstack/provider_test.go b/builtin/providers/cloudstack/provider_test.go deleted file mode 100644 index 2e177d4e8..000000000 --- a/builtin/providers/cloudstack/provider_test.go +++ /dev/null @@ -1,249 +0,0 @@ -package cloudstack - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "cloudstack": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testSetValueOnResourceData(t *testing.T) { - d := schema.ResourceData{} - d.Set("id", "name") - - setValueOrID(&d, "id", "name", "54711781-274e-41b2-83c0-17194d0108f7") - - if d.Get("id").(string) != "name" { - t.Fatal("err: 'id' does not match 'name'") - } -} - -func testSetIDOnResourceData(t *testing.T) { - d := schema.ResourceData{} - d.Set("id", "54711781-274e-41b2-83c0-17194d0108f7") - - setValueOrID(&d, "id", "name", "54711781-274e-41b2-83c0-17194d0108f7") - - if d.Get("id").(string) != "54711781-274e-41b2-83c0-17194d0108f7" { - t.Fatal("err: 'id' doest not match '54711781-274e-41b2-83c0-17194d0108f7'") - } -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("CLOUDSTACK_API_URL"); v == "" { - t.Fatal("CLOUDSTACK_API_URL must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_API_KEY"); v == "" { - t.Fatal("CLOUDSTACK_API_KEY must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_SECRET_KEY"); v == "" { - t.Fatal("CLOUDSTACK_SECRET_KEY must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_2ND_NIC_IPADDRESS"); v == "" { - t.Fatal("CLOUDSTACK_2ND_NIC_IPADDRESS must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_2ND_NIC_NETWORK"); v == "" { - t.Fatal("CLOUDSTACK_2ND_NIC_NETWORK must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_DISK_OFFERING_1"); v == "" { - t.Fatal("CLOUDSTACK_DISK_OFFERING_1 must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_DISK_OFFERING_2"); v == "" { - t.Fatal("CLOUDSTACK_DISK_OFFERING_2 must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_HYPERVISOR"); v == "" { - t.Fatal("CLOUDSTACK_HYPERVISOR must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_SERVICE_OFFERING_1"); v == "" { - t.Fatal("CLOUDSTACK_SERVICE_OFFERING_1 must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_SERVICE_OFFERING_2"); v == "" { - t.Fatal("CLOUDSTACK_SERVICE_OFFERING_2 must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_NETWORK_1"); v == "" { - t.Fatal("CLOUDSTACK_NETWORK_1 must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_NETWORK_1_IPADDRESS1"); v == "" { - t.Fatal("CLOUDSTACK_NETWORK_1_IPADDRESS1 must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_NETWORK_1_IPADDRESS2"); v == "" { - t.Fatal("CLOUDSTACK_NETWORK_1_IPADDRESS2 must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_NETWORK_2"); v == "" { - t.Fatal("CLOUDSTACK_NETWORK_2 must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_NETWORK_2_CIDR"); v == "" { - t.Fatal("CLOUDSTACK_NETWORK_2_CIDR must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_NETWORK_2_OFFERING"); v == "" { - t.Fatal("CLOUDSTACK_NETWORK_2_OFFERING must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_NETWORK_2_IPADDRESS"); v == "" { - t.Fatal("CLOUDSTACK_NETWORK_2_IPADDRESS must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_VPC_CIDR_1"); v == "" { - t.Fatal("CLOUDSTACK_VPC_CIDR_1 must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_VPC_CIDR_2"); v == "" { - t.Fatal("CLOUDSTACK_VPC_CIDR_2 must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_VPC_OFFERING"); v == "" { - t.Fatal("CLOUDSTACK_VPC_OFFERING must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_VPC_NETWORK_CIDR"); v == "" { - t.Fatal("CLOUDSTACK_VPC_NETWORK_CIDR must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_VPC_NETWORK_OFFERING"); v == "" { - t.Fatal("CLOUDSTACK_VPC_NETWORK_OFFERING must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_PUBLIC_IPADDRESS"); v == "" { - t.Fatal("CLOUDSTACK_PUBLIC_IPADDRESS must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_SSH_PUBLIC_KEY"); v == "" { - t.Fatal("CLOUDSTACK_SSH_PUBLIC_KEY must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_TEMPLATE"); v == "" { - t.Fatal("CLOUDSTACK_TEMPLATE must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_TEMPLATE_FORMAT"); v == "" { - t.Fatal("CLOUDSTACK_TEMPLATE_FORMAT must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_TEMPLATE_URL"); v == "" { - t.Fatal("CLOUDSTACK_TEMPLATE_URL must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_TEMPLATE_OS_TYPE"); v == "" { - t.Fatal("CLOUDSTACK_TEMPLATE_OS_TYPE must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_PROJECT_NAME"); v == "" { - t.Fatal("CLOUDSTACK_PROJECT_NAME must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_PROJECT_NETWORK"); v == "" { - t.Fatal("CLOUDSTACK_PROJECT_NETWORK must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_ZONE"); v == "" { - t.Fatal("CLOUDSTACK_ZONE must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_PRIVGW_GATEWAY"); v == "" { - t.Fatal("CLOUDSTACK_PRIVGW_GATEWAY must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_PRIVGW_IPADDRESS"); v == "" { - t.Fatal("CLOUDSTACK_PRIVGW_IPADDRESS must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_PRIVGW_NETMASK"); v == "" { - t.Fatal("CLOUDSTACK_PRIVGW_NETMASK must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_PRIVGW_VLAN"); v == "" { - t.Fatal("CLOUDSTACK_PRIVGW_VLAN must be set for acceptance tests") - } - if v := os.Getenv("CLOUDSTACK_STATIC_ROUTE_CIDR"); v == "" { - t.Fatal("CLOUDSTACK_STATIC_ROUTE_CIDR must be set for acceptance tests") - } -} - -// Name of a valid disk offering -var CLOUDSTACK_DISK_OFFERING_1 = os.Getenv("CLOUDSTACK_DISK_OFFERING_1") - -// Name of a disk offering that CLOUDSTACK_DISK_OFFERING_1 can resize to -var CLOUDSTACK_DISK_OFFERING_2 = os.Getenv("CLOUDSTACK_DISK_OFFERING_2") - -// Name of a valid service offering -var CLOUDSTACK_SERVICE_OFFERING_1 = os.Getenv("CLOUDSTACK_SERVICE_OFFERING_1") - -// Name of a service offering that CLOUDSTACK_SERVICE_OFFERING_1 can resize to -var CLOUDSTACK_SERVICE_OFFERING_2 = os.Getenv("CLOUDSTACK_SERVICE_OFFERING_2") - -// Name of a network that already exists -var CLOUDSTACK_NETWORK_1 = os.Getenv("CLOUDSTACK_NETWORK_1") - -// A valid IP address in CLOUDSTACK_NETWORK_1 -var CLOUDSTACK_NETWORK_1_IPADDRESS1 = os.Getenv("CLOUDSTACK_NETWORK_1_IPADDRESS1") - -// A valid IP address in CLOUDSTACK_NETWORK_1 -var CLOUDSTACK_NETWORK_1_IPADDRESS2 = os.Getenv("CLOUDSTACK_NETWORK_1_IPADDRESS2") - -// Name for a network that will be created -var CLOUDSTACK_NETWORK_2 = os.Getenv("CLOUDSTACK_NETWORK_2") - -// Any range -var CLOUDSTACK_NETWORK_2_CIDR = os.Getenv("CLOUDSTACK_NETWORK_2_CIDR") - -// Name of an available network offering with specifyvlan=false -var CLOUDSTACK_NETWORK_2_OFFERING = os.Getenv("CLOUDSTACK_NETWORK_2_OFFERING") - -// An IP address in CLOUDSTACK_NETWORK_2_CIDR -var CLOUDSTACK_NETWORK_2_IPADDRESS = os.Getenv("CLOUDSTACK_NETWORK_2_IPADDRESS") - -// A network that already exists and isn't CLOUDSTACK_NETWORK_1 -var CLOUDSTACK_2ND_NIC_NETWORK = os.Getenv("CLOUDSTACK_2ND_NIC_NETWORK") - -// An IP address in CLOUDSTACK_2ND_NIC_NETWORK -var CLOUDSTACK_2ND_NIC_IPADDRESS = os.Getenv("CLOUDSTACK_2ND_NIC_IPADDRESS") - -// Any range -var CLOUDSTACK_VPC_CIDR_1 = os.Getenv("CLOUDSTACK_VPC_CIDR_1") - -// Any range that doesn't overlap to CLOUDSTACK_VPC_CIDR_1, will be VPNed -var CLOUDSTACK_VPC_CIDR_2 = os.Getenv("CLOUDSTACK_VPC_CIDR_2") - -// An available VPC offering -var CLOUDSTACK_VPC_OFFERING = os.Getenv("CLOUDSTACK_VPC_OFFERING") - -// A sub-range of CLOUDSTACK_VPC_CIDR_1 with same starting point -var CLOUDSTACK_VPC_NETWORK_CIDR = os.Getenv("CLOUDSTACK_VPC_NETWORK_CIDR") - -// Name of an available network offering with forvpc=true -var CLOUDSTACK_VPC_NETWORK_OFFERING = os.Getenv("CLOUDSTACK_VPC_NETWORK_OFFERING") - -// Path to a public IP that exists for CLOUDSTACK_NETWORK_1 -var CLOUDSTACK_PUBLIC_IPADDRESS = os.Getenv("CLOUDSTACK_PUBLIC_IPADDRESS") - -// Path to a public key on local disk -var CLOUDSTACK_SSH_PUBLIC_KEY = os.Getenv("CLOUDSTACK_SSH_PUBLIC_KEY") - -// Name of a template that exists already for building VMs -var CLOUDSTACK_TEMPLATE = os.Getenv("CLOUDSTACK_TEMPLATE") - -// Details of a template that will be added -var CLOUDSTACK_TEMPLATE_FORMAT = os.Getenv("CLOUDSTACK_TEMPLATE_FORMAT") -var CLOUDSTACK_HYPERVISOR = os.Getenv("CLOUDSTACK_HYPERVISOR") -var CLOUDSTACK_TEMPLATE_URL = os.Getenv("CLOUDSTACK_TEMPLATE_URL") -var CLOUDSTACK_TEMPLATE_OS_TYPE = os.Getenv("CLOUDSTACK_TEMPLATE_OS_TYPE") - -// Name of a project that exists already -var CLOUDSTACK_PROJECT_NAME = os.Getenv("CLOUDSTACK_PROJECT_NAME") - -// Name of a network that exists already in CLOUDSTACK_PROJECT_NAME -var CLOUDSTACK_PROJECT_NETWORK = os.Getenv("CLOUDSTACK_PROJECT_NETWORK") - -// Name of a zone that exists already -var CLOUDSTACK_ZONE = os.Getenv("CLOUDSTACK_ZONE") - -// Details of the private gateway that will be added to VPC testing this, should be done using ROOT keys -var CLOUDSTACK_PRIVGW_GATEWAY = os.Getenv("CLOUDSTACK_PRIVGW_GATEWAY") -var CLOUDSTACK_PRIVGW_IPADDRESS = os.Getenv("CLOUDSTACK_PRIVGW_IPADDRESS") -var CLOUDSTACK_PRIVGW_NETMASK = os.Getenv("CLOUDSTACK_PRIVGW_NETMASK") -var CLOUDSTACK_PRIVGW_VLAN = os.Getenv("CLOUDSTACK_PRIVGW_VLAN") - -// Details of the static route that will be added to private gateway testing this. -var CLOUDSTACK_STATIC_ROUTE_CIDR = os.Getenv("CLOUDSTACK_STATIC_ROUTE_CIDR") diff --git a/builtin/providers/cloudstack/resource_cloudstack_affinity_group.go b/builtin/providers/cloudstack/resource_cloudstack_affinity_group.go deleted file mode 100644 index 60bfeb60c..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_affinity_group.go +++ /dev/null @@ -1,134 +0,0 @@ -package cloudstack - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func resourceCloudStackAffinityGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudStackAffinityGroupCreate, - Read: resourceCloudStackAffinityGroupRead, - Delete: resourceCloudStackAffinityGroupDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceCloudStackAffinityGroupCreate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - name := d.Get("name").(string) - affinityGroupType := d.Get("type").(string) - - // Create a new parameter struct - p := cs.AffinityGroup.NewCreateAffinityGroupParams(name, affinityGroupType) - - // Set the description - if description, ok := d.GetOk("description"); ok { - p.SetDescription(description.(string)) - } else { - p.SetDescription(name) - } - - // If there is a project supplied, we retrieve and set the project id - if err := setProjectid(p, cs, d); err != nil { - return err - } - - log.Printf("[DEBUG] Creating affinity group %s", name) - r, err := cs.AffinityGroup.CreateAffinityGroup(p) - if err != nil { - return err - } - - log.Printf("[DEBUG] Affinity group %s successfully created", name) - d.SetId(r.Id) - - return resourceCloudStackAffinityGroupRead(d, meta) -} - -func resourceCloudStackAffinityGroupRead(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - log.Printf("[DEBUG] Rerieving affinity group %s", d.Get("name").(string)) - - // Get the affinity group details - ag, count, err := cs.AffinityGroup.GetAffinityGroupByID( - d.Id(), - cloudstack.WithProject(d.Get("project").(string)), - ) - if err != nil { - if count == 0 { - log.Printf("[DEBUG] Affinity group %s does not longer exist", d.Get("name").(string)) - d.SetId("") - return nil - } - - return err - } - - // Update the config - d.Set("name", ag.Name) - d.Set("description", ag.Description) - d.Set("type", ag.Type) - - return nil -} - -func resourceCloudStackAffinityGroupDelete(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Create a new parameter struct - p := cs.AffinityGroup.NewDeleteAffinityGroupParams() - p.SetId(d.Id()) - - // If there is a project supplied, we retrieve and set the project id - if err := setProjectid(p, cs, d); err != nil { - return err - } - - // Delete the affinity group - _, err := cs.AffinityGroup.DeleteAffinityGroup(p) - if err != nil { - // This is a very poor way to be told the ID does no longer exist :( - if strings.Contains(err.Error(), fmt.Sprintf( - "Invalid parameter id value=%s due to incorrect long value format, "+ - "or entity does not exist", d.Id())) { - return nil - } - - return fmt.Errorf("Error deleting affinity group: %s", err) - } - - return nil -} diff --git a/builtin/providers/cloudstack/resource_cloudstack_affinity_group_test.go b/builtin/providers/cloudstack/resource_cloudstack_affinity_group_test.go deleted file mode 100644 index dda3ffef8..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_affinity_group_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package cloudstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func TestAccCloudStackAffinityGroup_basic(t *testing.T) { - var affinityGroup cloudstack.AffinityGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackAffinityGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackAffinityGroup, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackAffinityGroupExists("cloudstack_affinity_group.foo", &affinityGroup), - testAccCheckCloudStackAffinityGroupAttributes(&affinityGroup), - ), - }, - }, - }) -} - -func testAccCheckCloudStackAffinityGroupExists( - n string, affinityGroup *cloudstack.AffinityGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No affinity group ID is set") - } - - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - ag, _, err := cs.AffinityGroup.GetAffinityGroupByID(rs.Primary.ID) - - if err != nil { - return err - } - - if ag.Id != rs.Primary.ID { - return fmt.Errorf("Affinity group not found") - } - - *affinityGroup = *ag - - return nil - } -} - -func testAccCheckCloudStackAffinityGroupAttributes( - affinityGroup *cloudstack.AffinityGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if affinityGroup.Name != "terraform-affinity-group" { - return fmt.Errorf("Bad name: %s", affinityGroup.Name) - } - - if affinityGroup.Description != "terraform-affinity-group" { - return fmt.Errorf("Bad description: %s", affinityGroup.Description) - } - - if affinityGroup.Type != "host anti-affinity" { - return fmt.Errorf("Bad type: %s", affinityGroup.Type) - } - - return nil - } -} - -func testAccCheckCloudStackAffinityGroupDestroy(s *terraform.State) error { - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_affinity_group" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No affinity group ID is set") - } - - _, _, err := cs.AffinityGroup.GetAffinityGroupByID(rs.Primary.ID) - if err == nil { - return fmt.Errorf("Affinity group %s still exists", rs.Primary.ID) - } - } - - return nil -} - -var testAccCloudStackAffinityGroup = fmt.Sprintf(` -resource "cloudstack_affinity_group" "foo" { - name = "terraform-affinity-group" - type = "host anti-affinity" -}`) diff --git a/builtin/providers/cloudstack/resource_cloudstack_disk.go b/builtin/providers/cloudstack/resource_cloudstack_disk.go deleted file mode 100644 index f9d472e30..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_disk.go +++ /dev/null @@ -1,372 +0,0 @@ -package cloudstack - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func resourceCloudStackDisk() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudStackDiskCreate, - Read: resourceCloudStackDiskRead, - Update: resourceCloudStackDiskUpdate, - Delete: resourceCloudStackDiskDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "attach": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "device_id": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "disk_offering": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "size": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "shrink_ok": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "virtual_machine_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceCloudStackDiskCreate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - d.Partial(true) - - name := d.Get("name").(string) - - // Create a new parameter struct - p := cs.Volume.NewCreateVolumeParams() - p.SetName(name) - - // Retrieve the disk_offering ID - diskofferingid, e := retrieveID(cs, "disk_offering", d.Get("disk_offering").(string)) - if e != nil { - return e.Error() - } - // Set the disk_offering ID - p.SetDiskofferingid(diskofferingid) - - if d.Get("size").(int) != 0 { - // Set the volume size - p.SetSize(int64(d.Get("size").(int))) - } - - // If there is a project supplied, we retrieve and set the project id - if err := setProjectid(p, cs, d); err != nil { - return err - } - - // Retrieve the zone ID - zoneid, e := retrieveID(cs, "zone", d.Get("zone").(string)) - if e != nil { - return e.Error() - } - // Set the zone ID - p.SetZoneid(zoneid) - - // Create the new volume - r, err := cs.Volume.CreateVolume(p) - if err != nil { - return fmt.Errorf("Error creating the new disk %s: %s", name, err) - } - - // Set the volume ID and partials - d.SetId(r.Id) - d.SetPartial("name") - d.SetPartial("device_id") - d.SetPartial("disk_offering") - d.SetPartial("size") - d.SetPartial("virtual_machine_id") - d.SetPartial("project") - d.SetPartial("zone") - - if d.Get("attach").(bool) { - err := resourceCloudStackDiskAttach(d, meta) - if err != nil { - return fmt.Errorf("Error attaching the new disk %s to virtual machine: %s", name, err) - } - - // Set the additional partial - d.SetPartial("attach") - } - - d.Partial(false) - return resourceCloudStackDiskRead(d, meta) -} - -func resourceCloudStackDiskRead(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Get the volume details - v, count, err := cs.Volume.GetVolumeByID( - d.Id(), - cloudstack.WithProject(d.Get("project").(string)), - ) - if err != nil { - if count == 0 { - d.SetId("") - return nil - } - - return err - } - - d.Set("name", v.Name) - d.Set("attach", v.Attached != "") // If attached this contains a timestamp when attached - d.Set("size", int(v.Size/(1024*1024*1024))) // Needed to get GB's again - - setValueOrID(d, "disk_offering", v.Diskofferingname, v.Diskofferingid) - setValueOrID(d, "project", v.Project, v.Projectid) - setValueOrID(d, "zone", v.Zonename, v.Zoneid) - - if v.Attached != "" { - d.Set("device_id", int(v.Deviceid)) - d.Set("virtual_machine_id", v.Virtualmachineid) - } - - return nil -} - -func resourceCloudStackDiskUpdate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - d.Partial(true) - - name := d.Get("name").(string) - - if d.HasChange("disk_offering") || d.HasChange("size") { - // Detach the volume (re-attach is done at the end of this function) - if err := resourceCloudStackDiskDetach(d, meta); err != nil { - return fmt.Errorf("Error detaching disk %s from virtual machine: %s", name, err) - } - - // Create a new parameter struct - p := cs.Volume.NewResizeVolumeParams(d.Id()) - - // Retrieve the disk_offering ID - diskofferingid, e := retrieveID(cs, "disk_offering", d.Get("disk_offering").(string)) - if e != nil { - return e.Error() - } - - // Set the disk_offering ID - p.SetDiskofferingid(diskofferingid) - - if d.Get("size").(int) != 0 { - // Set the size - p.SetSize(int64(d.Get("size").(int))) - } - - // Set the shrink bit - p.SetShrinkok(d.Get("shrink_ok").(bool)) - - // Change the disk_offering - r, err := cs.Volume.ResizeVolume(p) - if err != nil { - return fmt.Errorf("Error changing disk offering/size for disk %s: %s", name, err) - } - - // Update the volume ID and set partials - d.SetId(r.Id) - d.SetPartial("disk_offering") - d.SetPartial("size") - } - - // If the device ID changed, just detach here so we can re-attach the - // volume at the end of this function - if d.HasChange("device_id") || d.HasChange("virtual_machine") { - // Detach the volume - if err := resourceCloudStackDiskDetach(d, meta); err != nil { - return fmt.Errorf("Error detaching disk %s from virtual machine: %s", name, err) - } - } - - if d.Get("attach").(bool) { - // Attach the volume - err := resourceCloudStackDiskAttach(d, meta) - if err != nil { - return fmt.Errorf("Error attaching disk %s to virtual machine: %s", name, err) - } - - // Set the additional partials - d.SetPartial("attach") - d.SetPartial("device_id") - d.SetPartial("virtual_machine_id") - } else { - // Detach the volume - if err := resourceCloudStackDiskDetach(d, meta); err != nil { - return fmt.Errorf("Error detaching disk %s from virtual machine: %s", name, err) - } - } - - d.Partial(false) - return resourceCloudStackDiskRead(d, meta) -} - -func resourceCloudStackDiskDelete(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Detach the volume - if err := resourceCloudStackDiskDetach(d, meta); err != nil { - return err - } - - // Create a new parameter struct - p := cs.Volume.NewDeleteVolumeParams(d.Id()) - - // Delete the voluem - if _, err := cs.Volume.DeleteVolume(p); err != nil { - // This is a very poor way to be told the ID does no longer exist :( - if strings.Contains(err.Error(), fmt.Sprintf( - "Invalid parameter id value=%s due to incorrect long value format, "+ - "or entity does not exist", d.Id())) { - return nil - } - - return err - } - - return nil -} - -func resourceCloudStackDiskAttach(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - if virtualmachineid, ok := d.GetOk("virtual_machine_id"); ok { - // First check if the disk isn't already attached - if attached, err := isAttached(d, meta); err != nil || attached { - return err - } - - // Create a new parameter struct - p := cs.Volume.NewAttachVolumeParams(d.Id(), virtualmachineid.(string)) - - if deviceid, ok := d.GetOk("device_id"); ok { - p.SetDeviceid(int64(deviceid.(int))) - } - - // Attach the new volume - r, err := Retry(10, retryableAttachVolumeFunc(cs, p)) - if err != nil { - return fmt.Errorf("Error attaching volume to VM: %s", err) - } - - d.SetId(r.(*cloudstack.AttachVolumeResponse).Id) - } - - return nil -} - -func resourceCloudStackDiskDetach(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Check if the volume is actually attached, before detaching - if attached, err := isAttached(d, meta); err != nil || !attached { - return err - } - - // Create a new parameter struct - p := cs.Volume.NewDetachVolumeParams() - - // Set the volume ID - p.SetId(d.Id()) - - // Detach the currently attached volume - _, err := cs.Volume.DetachVolume(p) - if err != nil { - if virtualmachineid, ok := d.GetOk("virtual_machine_id"); ok { - // Create a new parameter struct - pd := cs.VirtualMachine.NewStopVirtualMachineParams(virtualmachineid.(string)) - - // Stop the virtual machine in order to be able to detach the disk - if _, err := cs.VirtualMachine.StopVirtualMachine(pd); err != nil { - return err - } - - // Try again to detach the currently attached volume - if _, err := cs.Volume.DetachVolume(p); err != nil { - return err - } - - // Create a new parameter struct - pu := cs.VirtualMachine.NewStartVirtualMachineParams(virtualmachineid.(string)) - - // Start the virtual machine again - if _, err := cs.VirtualMachine.StartVirtualMachine(pu); err != nil { - return err - } - } - } - - return err -} - -func isAttached(d *schema.ResourceData, meta interface{}) (bool, error) { - cs := meta.(*cloudstack.CloudStackClient) - - // Get the volume details - v, _, err := cs.Volume.GetVolumeByID( - d.Id(), - cloudstack.WithProject(d.Get("project").(string)), - ) - if err != nil { - return false, err - } - - return v.Attached != "", nil -} - -func retryableAttachVolumeFunc( - cs *cloudstack.CloudStackClient, - p *cloudstack.AttachVolumeParams) func() (interface{}, error) { - return func() (interface{}, error) { - r, err := cs.Volume.AttachVolume(p) - if err != nil { - return nil, err - } - return r, nil - } -} diff --git a/builtin/providers/cloudstack/resource_cloudstack_disk_test.go b/builtin/providers/cloudstack/resource_cloudstack_disk_test.go deleted file mode 100644 index 2484534fd..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_disk_test.go +++ /dev/null @@ -1,244 +0,0 @@ -package cloudstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func TestAccCloudStackDisk_basic(t *testing.T) { - var disk cloudstack.Volume - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackDiskDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackDisk_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackDiskExists( - "cloudstack_disk.foo", &disk), - testAccCheckCloudStackDiskAttributes(&disk), - ), - }, - }, - }) -} - -func TestAccCloudStackDisk_deviceID(t *testing.T) { - var disk cloudstack.Volume - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackDiskDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackDisk_deviceID, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackDiskExists( - "cloudstack_disk.foo", &disk), - testAccCheckCloudStackDiskAttributes(&disk), - resource.TestCheckResourceAttr( - "cloudstack_disk.foo", "device_id", "4"), - ), - }, - }, - }) -} - -func TestAccCloudStackDisk_update(t *testing.T) { - var disk cloudstack.Volume - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackDiskDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackDisk_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackDiskExists( - "cloudstack_disk.foo", &disk), - testAccCheckCloudStackDiskAttributes(&disk), - ), - }, - - resource.TestStep{ - Config: testAccCloudStackDisk_resize, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackDiskExists( - "cloudstack_disk.foo", &disk), - testAccCheckCloudStackDiskResized(&disk), - resource.TestCheckResourceAttr( - "cloudstack_disk.foo", "disk_offering", CLOUDSTACK_DISK_OFFERING_2), - ), - }, - }, - }) -} - -func testAccCheckCloudStackDiskExists( - n string, disk *cloudstack.Volume) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No disk ID is set") - } - - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - volume, _, err := cs.Volume.GetVolumeByID(rs.Primary.ID) - - if err != nil { - return err - } - - if volume.Id != rs.Primary.ID { - return fmt.Errorf("Disk not found") - } - - *disk = *volume - - return nil - } -} - -func testAccCheckCloudStackDiskAttributes( - disk *cloudstack.Volume) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if disk.Name != "terraform-disk" { - return fmt.Errorf("Bad name: %s", disk.Name) - } - - if disk.Diskofferingname != CLOUDSTACK_DISK_OFFERING_1 { - return fmt.Errorf("Bad disk offering: %s", disk.Diskofferingname) - } - - return nil - } -} - -func testAccCheckCloudStackDiskResized( - disk *cloudstack.Volume) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if disk.Diskofferingname != CLOUDSTACK_DISK_OFFERING_2 { - return fmt.Errorf("Bad disk offering: %s", disk.Diskofferingname) - } - - return nil - } -} - -func testAccCheckCloudStackDiskDestroy(s *terraform.State) error { - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_disk" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No disk ID is set") - } - - _, _, err := cs.Volume.GetVolumeByID(rs.Primary.ID) - if err == nil { - return fmt.Errorf("Disk %s still exists", rs.Primary.ID) - } - } - - return nil -} - -var testAccCloudStackDisk_basic = fmt.Sprintf(` -resource "cloudstack_disk" "foo" { - name = "terraform-disk" - attach = false - disk_offering = "%s" - zone = "%s" -}`, - CLOUDSTACK_DISK_OFFERING_1, - CLOUDSTACK_ZONE) - -var testAccCloudStackDisk_deviceID = fmt.Sprintf(` -resource "cloudstack_instance" "foobar" { - name = "terraform-test" - display_name = "terraform" - service_offering= "%s" - network_id = "%s" - template = "%s" - zone = "%s" - expunge = true -} - -resource "cloudstack_disk" "foo" { - name = "terraform-disk" - attach = true - device_id = 4 - disk_offering = "%s" - virtual_machine_id = "${cloudstack_instance.foobar.id}" - zone = "${cloudstack_instance.foobar.zone}" -}`, - CLOUDSTACK_SERVICE_OFFERING_1, - CLOUDSTACK_NETWORK_1, - CLOUDSTACK_TEMPLATE, - CLOUDSTACK_ZONE, - CLOUDSTACK_DISK_OFFERING_1) - -var testAccCloudStackDisk_update = fmt.Sprintf(` -resource "cloudstack_instance" "foobar" { - name = "terraform-test" - display_name = "terraform" - service_offering= "%s" - network_id = "%s" - template = "%s" - zone = "%s" - expunge = true -} - -resource "cloudstack_disk" "foo" { - name = "terraform-disk" - attach = true - disk_offering = "%s" - virtual_machine_id = "${cloudstack_instance.foobar.id}" - zone = "${cloudstack_instance.foobar.zone}" -}`, - CLOUDSTACK_SERVICE_OFFERING_1, - CLOUDSTACK_NETWORK_1, - CLOUDSTACK_TEMPLATE, - CLOUDSTACK_ZONE, - CLOUDSTACK_DISK_OFFERING_1) - -var testAccCloudStackDisk_resize = fmt.Sprintf(` -resource "cloudstack_instance" "foobar" { - name = "terraform-test" - display_name = "terraform" - service_offering= "%s" - network_id = "%s" - template = "%s" - zone = "%s" - expunge = true -} - -resource "cloudstack_disk" "foo" { - name = "terraform-disk" - attach = true - disk_offering = "%s" - virtual_machine_id = "${cloudstack_instance.foobar.id}" - zone = "${cloudstack_instance.foobar.zone}" -}`, - CLOUDSTACK_SERVICE_OFFERING_1, - CLOUDSTACK_NETWORK_1, - CLOUDSTACK_TEMPLATE, - CLOUDSTACK_ZONE, - CLOUDSTACK_DISK_OFFERING_2) diff --git a/builtin/providers/cloudstack/resource_cloudstack_egress_firewall.go b/builtin/providers/cloudstack/resource_cloudstack_egress_firewall.go deleted file mode 100644 index cd76d10b7..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_egress_firewall.go +++ /dev/null @@ -1,546 +0,0 @@ -package cloudstack - -import ( - "fmt" - "strconv" - "strings" - "sync" - "time" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func resourceCloudStackEgressFirewall() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudStackEgressFirewallCreate, - Read: resourceCloudStackEgressFirewallRead, - Update: resourceCloudStackEgressFirewallUpdate, - Delete: resourceCloudStackEgressFirewallDelete, - - Schema: map[string]*schema.Schema{ - "network_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "managed": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "rule": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cidr_list": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "icmp_type": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "icmp_code": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "ports": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "uuids": &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - }, - }, - }, - }, - - "parallelism": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 2, - }, - }, - } -} - -func resourceCloudStackEgressFirewallCreate(d *schema.ResourceData, meta interface{}) error { - // Make sure all required parameters are there - if err := verifyEgressFirewallParams(d); err != nil { - return err - } - - // We need to set this upfront in order to be able to save a partial state - d.SetId(d.Get("network_id").(string)) - - // Create all rules that are configured - if nrs := d.Get("rule").(*schema.Set); nrs.Len() > 0 { - // Create an empty schema.Set to hold all rules - rules := resourceCloudStackEgressFirewall().Schema["rule"].ZeroValue().(*schema.Set) - - err := createEgressFirewallRules(d, meta, rules, nrs) - - // We need to update this first to preserve the correct state - d.Set("rule", rules) - - if err != nil { - return err - } - } - - return resourceCloudStackEgressFirewallRead(d, meta) -} - -func createEgressFirewallRules(d *schema.ResourceData, meta interface{}, rules *schema.Set, nrs *schema.Set) error { - var errs *multierror.Error - - var wg sync.WaitGroup - wg.Add(nrs.Len()) - - sem := make(chan struct{}, d.Get("parallelism").(int)) - for _, rule := range nrs.List() { - // Put in a tiny sleep here to avoid DoS'ing the API - time.Sleep(500 * time.Millisecond) - - go func(rule map[string]interface{}) { - defer wg.Done() - sem <- struct{}{} - - // Create a single rule - err := createEgressFirewallRule(d, meta, rule) - - // If we have at least one UUID, we need to save the rule - if len(rule["uuids"].(map[string]interface{})) > 0 { - rules.Add(rule) - } - - if err != nil { - errs = multierror.Append(errs, err) - } - - <-sem - }(rule.(map[string]interface{})) - } - - wg.Wait() - - return errs.ErrorOrNil() -} -func createEgressFirewallRule(d *schema.ResourceData, meta interface{}, rule map[string]interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - uuids := rule["uuids"].(map[string]interface{}) - - // Make sure all required rule parameters are there - if err := verifyEgressFirewallRuleParams(d, rule); err != nil { - return err - } - - // Create a new parameter struct - p := cs.Firewall.NewCreateEgressFirewallRuleParams(d.Id(), rule["protocol"].(string)) - - // Set the CIDR list - var cidrList []string - for _, cidr := range rule["cidr_list"].(*schema.Set).List() { - cidrList = append(cidrList, cidr.(string)) - } - p.SetCidrlist(cidrList) - - // If the protocol is ICMP set the needed ICMP parameters - if rule["protocol"].(string) == "icmp" { - p.SetIcmptype(rule["icmp_type"].(int)) - p.SetIcmpcode(rule["icmp_code"].(int)) - - r, err := cs.Firewall.CreateEgressFirewallRule(p) - if err != nil { - return err - } - uuids["icmp"] = r.Id - rule["uuids"] = uuids - } - - // If protocol is not ICMP, loop through all ports - if rule["protocol"].(string) != "icmp" { - if ps := rule["ports"].(*schema.Set); ps.Len() > 0 { - - // Create an empty schema.Set to hold all processed ports - ports := &schema.Set{F: schema.HashString} - - for _, port := range ps.List() { - if _, ok := uuids[port.(string)]; ok { - ports.Add(port) - rule["ports"] = ports - continue - } - - m := splitPorts.FindStringSubmatch(port.(string)) - - startPort, err := strconv.Atoi(m[1]) - if err != nil { - return err - } - - endPort := startPort - if m[2] != "" { - endPort, err = strconv.Atoi(m[2]) - if err != nil { - return err - } - } - - p.SetStartport(startPort) - p.SetEndport(endPort) - - r, err := cs.Firewall.CreateEgressFirewallRule(p) - if err != nil { - return err - } - - ports.Add(port) - rule["ports"] = ports - - uuids[port.(string)] = r.Id - rule["uuids"] = uuids - } - } - } - - return nil -} - -func resourceCloudStackEgressFirewallRead(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Get all the rules from the running environment - p := cs.Firewall.NewListEgressFirewallRulesParams() - p.SetNetworkid(d.Id()) - p.SetListall(true) - - l, err := cs.Firewall.ListEgressFirewallRules(p) - if err != nil { - return err - } - - // Make a map of all the rules so we can easily find a rule - ruleMap := make(map[string]*cloudstack.EgressFirewallRule, l.Count) - for _, r := range l.EgressFirewallRules { - ruleMap[r.Id] = r - } - - // Create an empty schema.Set to hold all rules - rules := resourceCloudStackEgressFirewall().Schema["rule"].ZeroValue().(*schema.Set) - - // Read all rules that are configured - if rs := d.Get("rule").(*schema.Set); rs.Len() > 0 { - for _, rule := range rs.List() { - rule := rule.(map[string]interface{}) - uuids := rule["uuids"].(map[string]interface{}) - - if rule["protocol"].(string) == "icmp" { - id, ok := uuids["icmp"] - if !ok { - continue - } - - // Get the rule - r, ok := ruleMap[id.(string)] - if !ok { - delete(uuids, "icmp") - continue - } - - // Delete the known rule so only unknown rules remain in the ruleMap - delete(ruleMap, id.(string)) - - // Create a set with all CIDR's - cidrs := &schema.Set{F: schema.HashString} - for _, cidr := range strings.Split(r.Cidrlist, ",") { - cidrs.Add(cidr) - } - - // Update the values - rule["protocol"] = r.Protocol - rule["icmp_type"] = r.Icmptype - rule["icmp_code"] = r.Icmpcode - rule["cidr_list"] = cidrs - rules.Add(rule) - } - - // If protocol is not ICMP, loop through all ports - if rule["protocol"].(string) != "icmp" { - if ps := rule["ports"].(*schema.Set); ps.Len() > 0 { - - // Create an empty schema.Set to hold all ports - ports := &schema.Set{F: schema.HashString} - - // Loop through all ports and retrieve their info - for _, port := range ps.List() { - id, ok := uuids[port.(string)] - if !ok { - continue - } - - // Get the rule - r, ok := ruleMap[id.(string)] - if !ok { - delete(uuids, port.(string)) - continue - } - - // Delete the known rule so only unknown rules remain in the ruleMap - delete(ruleMap, id.(string)) - - // Create a set with all CIDR's - cidrs := &schema.Set{F: schema.HashString} - for _, cidr := range strings.Split(r.Cidrlist, ",") { - cidrs.Add(cidr) - } - - // Update the values - rule["protocol"] = r.Protocol - rule["cidr_list"] = cidrs - ports.Add(port) - } - - // If there is at least one port found, add this rule to the rules set - if ports.Len() > 0 { - rule["ports"] = ports - rules.Add(rule) - } - } - } - } - } - - // If this is a managed firewall, add all unknown rules into a single dummy rule - managed := d.Get("managed").(bool) - if managed && len(ruleMap) > 0 { - for uuid := range ruleMap { - // We need to create and add a dummy value to a schema.Set as the - // cidr_list is a required field and thus needs a value - cidrs := &schema.Set{F: schema.HashString} - cidrs.Add(uuid) - - // Make a dummy rule to hold the unknown UUID - rule := map[string]interface{}{ - "cidr_list": uuid, - "protocol": uuid, - "uuids": map[string]interface{}{uuid: uuid}, - } - - // Add the dummy rule to the rules set - rules.Add(rule) - } - } - - if rules.Len() > 0 { - d.Set("rule", rules) - } else if !managed { - d.SetId("") - } - - return nil -} - -func resourceCloudStackEgressFirewallUpdate(d *schema.ResourceData, meta interface{}) error { - // Make sure all required parameters are there - if err := verifyEgressFirewallParams(d); err != nil { - return err - } - - // Check if the rule set as a whole has changed - if d.HasChange("rule") { - o, n := d.GetChange("rule") - ors := o.(*schema.Set).Difference(n.(*schema.Set)) - nrs := n.(*schema.Set).Difference(o.(*schema.Set)) - - // We need to start with a rule set containing all the rules we - // already have and want to keep. Any rules that are not deleted - // correctly and any newly created rules, will be added to this - // set to make sure we end up in a consistent state - rules := o.(*schema.Set).Intersection(n.(*schema.Set)) - - // First loop through all the old rules and delete them - if ors.Len() > 0 { - err := deleteEgressFirewallRules(d, meta, rules, ors) - - // We need to update this first to preserve the correct state - d.Set("rule", rules) - - if err != nil { - return err - } - } - - // Then loop through all the new rules and create them - if nrs.Len() > 0 { - err := createEgressFirewallRules(d, meta, rules, nrs) - - // We need to update this first to preserve the correct state - d.Set("rule", rules) - - if err != nil { - return err - } - } - } - - return resourceCloudStackEgressFirewallRead(d, meta) -} - -func resourceCloudStackEgressFirewallDelete(d *schema.ResourceData, meta interface{}) error { - // Create an empty rule set to hold all rules that where - // not deleted correctly - rules := resourceCloudStackEgressFirewall().Schema["rule"].ZeroValue().(*schema.Set) - - // Delete all rules - if ors := d.Get("rule").(*schema.Set); ors.Len() > 0 { - err := deleteEgressFirewallRules(d, meta, rules, ors) - - // We need to update this first to preserve the correct state - d.Set("rule", rules) - - if err != nil { - return err - } - } - - return nil -} - -func deleteEgressFirewallRules(d *schema.ResourceData, meta interface{}, rules *schema.Set, ors *schema.Set) error { - var errs *multierror.Error - - var wg sync.WaitGroup - wg.Add(ors.Len()) - - sem := make(chan struct{}, d.Get("parallelism").(int)) - for _, rule := range ors.List() { - // Put a sleep here to avoid DoS'ing the API - time.Sleep(500 * time.Millisecond) - - go func(rule map[string]interface{}) { - defer wg.Done() - sem <- struct{}{} - - // Delete a single rule - err := deleteEgressFirewallRule(d, meta, rule) - - // If we have at least one UUID, we need to save the rule - if len(rule["uuids"].(map[string]interface{})) > 0 { - rules.Add(rule) - } - - if err != nil { - errs = multierror.Append(errs, err) - } - - <-sem - }(rule.(map[string]interface{})) - } - - wg.Wait() - - return errs.ErrorOrNil() -} - -func deleteEgressFirewallRule(d *schema.ResourceData, meta interface{}, rule map[string]interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - uuids := rule["uuids"].(map[string]interface{}) - - for k, id := range uuids { - // We don't care about the count here, so just continue - if k == "%" { - continue - } - - // Create the parameter struct - p := cs.Firewall.NewDeleteEgressFirewallRuleParams(id.(string)) - - // Delete the rule - if _, err := cs.Firewall.DeleteEgressFirewallRule(p); err != nil { - - // This is a very poor way to be told the ID does no longer exist :( - if strings.Contains(err.Error(), fmt.Sprintf( - "Invalid parameter id value=%s due to incorrect long value format, "+ - "or entity does not exist", id.(string))) { - delete(uuids, k) - continue - } - - return err - } - - // Delete the UUID of this rule - delete(uuids, k) - rule["uuids"] = uuids - } - - return nil -} - -func verifyEgressFirewallParams(d *schema.ResourceData) error { - managed := d.Get("managed").(bool) - _, rules := d.GetOk("rule") - - if !rules && !managed { - return fmt.Errorf( - "You must supply at least one 'rule' when not using the 'managed' firewall feature") - } - - return nil -} - -func verifyEgressFirewallRuleParams(d *schema.ResourceData, rule map[string]interface{}) error { - protocol := rule["protocol"].(string) - if protocol != "tcp" && protocol != "udp" && protocol != "icmp" { - return fmt.Errorf( - "%q is not a valid protocol. Valid options are 'tcp', 'udp' and 'icmp'", protocol) - } - - if protocol == "icmp" { - if _, ok := rule["icmp_type"]; !ok { - return fmt.Errorf( - "Parameter icmp_type is a required parameter when using protocol 'icmp'") - } - if _, ok := rule["icmp_code"]; !ok { - return fmt.Errorf( - "Parameter icmp_code is a required parameter when using protocol 'icmp'") - } - } else { - if ports, ok := rule["ports"].(*schema.Set); ok { - for _, port := range ports.List() { - m := splitPorts.FindStringSubmatch(port.(string)) - if m == nil { - return fmt.Errorf( - "%q is not a valid port value. Valid options are '80' or '80-90'", port.(string)) - } - } - } else { - return fmt.Errorf( - "Parameter ports is a required parameter when *not* using protocol 'icmp'") - } - } - - return nil -} diff --git a/builtin/providers/cloudstack/resource_cloudstack_egress_firewall_test.go b/builtin/providers/cloudstack/resource_cloudstack_egress_firewall_test.go deleted file mode 100644 index 67e6a4b09..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_egress_firewall_test.go +++ /dev/null @@ -1,191 +0,0 @@ -package cloudstack - -import ( - "fmt" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func TestAccCloudStackEgressFirewall_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackEgressFirewallDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackEgressFirewall_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackEgressFirewallRulesExist("cloudstack_egress_firewall.foo"), - resource.TestCheckResourceAttr( - "cloudstack_egress_firewall.foo", "network_id", CLOUDSTACK_NETWORK_1), - resource.TestCheckResourceAttr( - "cloudstack_egress_firewall.foo", "rule.#", "1"), - resource.TestCheckResourceAttr( - "cloudstack_egress_firewall.foo", - "rule.2905891128.cidr_list.3378711023", - CLOUDSTACK_NETWORK_1_IPADDRESS1+"/32"), - resource.TestCheckResourceAttr( - "cloudstack_egress_firewall.foo", "rule.2905891128.protocol", "tcp"), - resource.TestCheckResourceAttr( - "cloudstack_egress_firewall.foo", "rule.2905891128.ports.32925333", "8080"), - ), - }, - }, - }) -} - -func TestAccCloudStackEgressFirewall_update(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackEgressFirewallDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackEgressFirewall_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackEgressFirewallRulesExist("cloudstack_egress_firewall.foo"), - resource.TestCheckResourceAttr( - "cloudstack_egress_firewall.foo", "network_id", CLOUDSTACK_NETWORK_1), - resource.TestCheckResourceAttr( - "cloudstack_egress_firewall.foo", "rule.#", "1"), - resource.TestCheckResourceAttr( - "cloudstack_egress_firewall.foo", - "rule.2905891128.cidr_list.3378711023", - CLOUDSTACK_NETWORK_1_IPADDRESS1+"/32"), - resource.TestCheckResourceAttr( - "cloudstack_egress_firewall.foo", "rule.2905891128.protocol", "tcp"), - resource.TestCheckResourceAttr( - "cloudstack_egress_firewall.foo", "rule.2905891128.ports.32925333", "8080"), - ), - }, - - resource.TestStep{ - Config: testAccCloudStackEgressFirewall_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackEgressFirewallRulesExist("cloudstack_egress_firewall.foo"), - resource.TestCheckResourceAttr( - "cloudstack_egress_firewall.foo", "network_id", CLOUDSTACK_NETWORK_1), - resource.TestCheckResourceAttr( - "cloudstack_egress_firewall.foo", "rule.#", "2"), - resource.TestCheckResourceAttr( - "cloudstack_egress_firewall.foo", - "rule.3593527682.cidr_list.1910468234", - CLOUDSTACK_NETWORK_1_IPADDRESS2+"/32"), - resource.TestCheckResourceAttr( - "cloudstack_egress_firewall.foo", - "rule.3593527682.cidr_list.3378711023", - CLOUDSTACK_NETWORK_1_IPADDRESS1+"/32"), - resource.TestCheckResourceAttr( - "cloudstack_egress_firewall.foo", "rule.3593527682.protocol", "tcp"), - resource.TestCheckResourceAttr( - "cloudstack_egress_firewall.foo", "rule.3593527682.ports.32925333", "8080"), - resource.TestCheckResourceAttr( - "cloudstack_egress_firewall.foo", - "rule.739924765.cidr_list.3378711023", - CLOUDSTACK_NETWORK_1_IPADDRESS1+"/32"), - resource.TestCheckResourceAttr( - "cloudstack_egress_firewall.foo", "rule.739924765.protocol", "tcp"), - resource.TestCheckResourceAttr( - "cloudstack_egress_firewall.foo", "rule.739924765.ports.1889509032", "80"), - ), - }, - }, - }) -} - -func testAccCheckCloudStackEgressFirewallRulesExist(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No firewall ID is set") - } - - for k, id := range rs.Primary.Attributes { - if !strings.Contains(k, ".uuids.") || strings.HasSuffix(k, ".uuids.%") { - continue - } - - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - _, count, err := cs.Firewall.GetEgressFirewallRuleByID(id) - - if err != nil { - return err - } - - if count == 0 { - return fmt.Errorf("Firewall rule for %s not found", k) - } - } - - return nil - } -} - -func testAccCheckCloudStackEgressFirewallDestroy(s *terraform.State) error { - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_egress_firewall" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No instance ID is set") - } - - for k, id := range rs.Primary.Attributes { - if !strings.Contains(k, ".uuids.") || strings.HasSuffix(k, ".uuids.%") { - continue - } - - _, _, err := cs.Firewall.GetEgressFirewallRuleByID(id) - if err == nil { - return fmt.Errorf("Egress rule %s still exists", rs.Primary.ID) - } - } - } - - return nil -} - -var testAccCloudStackEgressFirewall_basic = fmt.Sprintf(` -resource "cloudstack_egress_firewall" "foo" { - network_id = "%s" - - rule { - cidr_list = ["%s/32"] - protocol = "tcp" - ports = ["8080"] - } -}`, - CLOUDSTACK_NETWORK_1, - CLOUDSTACK_NETWORK_1_IPADDRESS1) - -var testAccCloudStackEgressFirewall_update = fmt.Sprintf(` -resource "cloudstack_egress_firewall" "foo" { - network_id = "%s" - - rule { - cidr_list = ["%s/32", "%s/32"] - protocol = "tcp" - ports = ["8080"] - } - - rule { - cidr_list = ["%s/32"] - protocol = "tcp" - ports = ["80", "1000-2000"] - } -}`, - CLOUDSTACK_NETWORK_1, - CLOUDSTACK_NETWORK_1_IPADDRESS1, - CLOUDSTACK_NETWORK_1_IPADDRESS2, - CLOUDSTACK_NETWORK_1_IPADDRESS1) diff --git a/builtin/providers/cloudstack/resource_cloudstack_firewall.go b/builtin/providers/cloudstack/resource_cloudstack_firewall.go deleted file mode 100644 index 4d4626a9c..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_firewall.go +++ /dev/null @@ -1,547 +0,0 @@ -package cloudstack - -import ( - "fmt" - "strconv" - "strings" - "sync" - "time" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func resourceCloudStackFirewall() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudStackFirewallCreate, - Read: resourceCloudStackFirewallRead, - Update: resourceCloudStackFirewallUpdate, - Delete: resourceCloudStackFirewallDelete, - - Schema: map[string]*schema.Schema{ - "ip_address_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "managed": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "rule": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cidr_list": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "icmp_type": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "icmp_code": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "ports": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "uuids": &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - }, - }, - }, - }, - - "parallelism": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 2, - }, - }, - } -} - -func resourceCloudStackFirewallCreate(d *schema.ResourceData, meta interface{}) error { - // Make sure all required parameters are there - if err := verifyFirewallParams(d); err != nil { - return err - } - - // We need to set this upfront in order to be able to save a partial state - d.SetId(d.Get("ip_address_id").(string)) - - // Create all rules that are configured - if nrs := d.Get("rule").(*schema.Set); nrs.Len() > 0 { - // Create an empty schema.Set to hold all rules - rules := resourceCloudStackFirewall().Schema["rule"].ZeroValue().(*schema.Set) - - err := createFirewallRules(d, meta, rules, nrs) - - // We need to update this first to preserve the correct state - d.Set("rule", rules) - - if err != nil { - return err - } - } - - return resourceCloudStackFirewallRead(d, meta) -} -func createFirewallRules(d *schema.ResourceData, meta interface{}, rules *schema.Set, nrs *schema.Set) error { - var errs *multierror.Error - - var wg sync.WaitGroup - wg.Add(nrs.Len()) - - sem := make(chan struct{}, d.Get("parallelism").(int)) - for _, rule := range nrs.List() { - // Put in a tiny sleep here to avoid DoS'ing the API - time.Sleep(500 * time.Millisecond) - - go func(rule map[string]interface{}) { - defer wg.Done() - sem <- struct{}{} - - // Create a single rule - err := createFirewallRule(d, meta, rule) - - // If we have at least one UUID, we need to save the rule - if len(rule["uuids"].(map[string]interface{})) > 0 { - rules.Add(rule) - } - - if err != nil { - errs = multierror.Append(errs, err) - } - - <-sem - }(rule.(map[string]interface{})) - } - - wg.Wait() - - return errs.ErrorOrNil() -} - -func createFirewallRule(d *schema.ResourceData, meta interface{}, rule map[string]interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - uuids := rule["uuids"].(map[string]interface{}) - - // Make sure all required rule parameters are there - if err := verifyFirewallRuleParams(d, rule); err != nil { - return err - } - - // Create a new parameter struct - p := cs.Firewall.NewCreateFirewallRuleParams(d.Id(), rule["protocol"].(string)) - - // Set the CIDR list - var cidrList []string - for _, cidr := range rule["cidr_list"].(*schema.Set).List() { - cidrList = append(cidrList, cidr.(string)) - } - p.SetCidrlist(cidrList) - - // If the protocol is ICMP set the needed ICMP parameters - if rule["protocol"].(string) == "icmp" { - p.SetIcmptype(rule["icmp_type"].(int)) - p.SetIcmpcode(rule["icmp_code"].(int)) - - r, err := cs.Firewall.CreateFirewallRule(p) - if err != nil { - return err - } - - uuids["icmp"] = r.Id - rule["uuids"] = uuids - } - - // If protocol is not ICMP, loop through all ports - if rule["protocol"].(string) != "icmp" { - if ps := rule["ports"].(*schema.Set); ps.Len() > 0 { - - // Create an empty schema.Set to hold all processed ports - ports := &schema.Set{F: schema.HashString} - - for _, port := range ps.List() { - if _, ok := uuids[port.(string)]; ok { - ports.Add(port) - rule["ports"] = ports - continue - } - - m := splitPorts.FindStringSubmatch(port.(string)) - - startPort, err := strconv.Atoi(m[1]) - if err != nil { - return err - } - - endPort := startPort - if m[2] != "" { - endPort, err = strconv.Atoi(m[2]) - if err != nil { - return err - } - } - - p.SetStartport(startPort) - p.SetEndport(endPort) - - r, err := cs.Firewall.CreateFirewallRule(p) - if err != nil { - return err - } - - ports.Add(port) - rule["ports"] = ports - - uuids[port.(string)] = r.Id - rule["uuids"] = uuids - } - } - } - - return nil -} - -func resourceCloudStackFirewallRead(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Get all the rules from the running environment - p := cs.Firewall.NewListFirewallRulesParams() - p.SetIpaddressid(d.Id()) - p.SetListall(true) - - l, err := cs.Firewall.ListFirewallRules(p) - if err != nil { - return err - } - - // Make a map of all the rules so we can easily find a rule - ruleMap := make(map[string]*cloudstack.FirewallRule, l.Count) - for _, r := range l.FirewallRules { - ruleMap[r.Id] = r - } - - // Create an empty schema.Set to hold all rules - rules := resourceCloudStackFirewall().Schema["rule"].ZeroValue().(*schema.Set) - - // Read all rules that are configured - if rs := d.Get("rule").(*schema.Set); rs.Len() > 0 { - for _, rule := range rs.List() { - rule := rule.(map[string]interface{}) - uuids := rule["uuids"].(map[string]interface{}) - - if rule["protocol"].(string) == "icmp" { - id, ok := uuids["icmp"] - if !ok { - continue - } - - // Get the rule - r, ok := ruleMap[id.(string)] - if !ok { - delete(uuids, "icmp") - continue - } - - // Delete the known rule so only unknown rules remain in the ruleMap - delete(ruleMap, id.(string)) - - // Create a set with all CIDR's - cidrs := &schema.Set{F: schema.HashString} - for _, cidr := range strings.Split(r.Cidrlist, ",") { - cidrs.Add(cidr) - } - - // Update the values - rule["protocol"] = r.Protocol - rule["icmp_type"] = r.Icmptype - rule["icmp_code"] = r.Icmpcode - rule["cidr_list"] = cidrs - rules.Add(rule) - } - - // If protocol is not ICMP, loop through all ports - if rule["protocol"].(string) != "icmp" { - if ps := rule["ports"].(*schema.Set); ps.Len() > 0 { - - // Create an empty schema.Set to hold all ports - ports := &schema.Set{F: schema.HashString} - - // Loop through all ports and retrieve their info - for _, port := range ps.List() { - id, ok := uuids[port.(string)] - if !ok { - continue - } - - // Get the rule - r, ok := ruleMap[id.(string)] - if !ok { - delete(uuids, port.(string)) - continue - } - - // Delete the known rule so only unknown rules remain in the ruleMap - delete(ruleMap, id.(string)) - - // Create a set with all CIDR's - cidrs := &schema.Set{F: schema.HashString} - for _, cidr := range strings.Split(r.Cidrlist, ",") { - cidrs.Add(cidr) - } - - // Update the values - rule["protocol"] = r.Protocol - rule["cidr_list"] = cidrs - ports.Add(port) - } - - // If there is at least one port found, add this rule to the rules set - if ports.Len() > 0 { - rule["ports"] = ports - rules.Add(rule) - } - } - } - } - } - - // If this is a managed firewall, add all unknown rules into a single dummy rule - managed := d.Get("managed").(bool) - if managed && len(ruleMap) > 0 { - for uuid := range ruleMap { - // We need to create and add a dummy value to a schema.Set as the - // cidr_list is a required field and thus needs a value - cidrs := &schema.Set{F: schema.HashString} - cidrs.Add(uuid) - - // Make a dummy rule to hold the unknown UUID - rule := map[string]interface{}{ - "cidr_list": cidrs, - "protocol": uuid, - "uuids": map[string]interface{}{uuid: uuid}, - } - - // Add the dummy rule to the rules set - rules.Add(rule) - } - } - - if rules.Len() > 0 { - d.Set("rule", rules) - } else if !managed { - d.SetId("") - } - - return nil -} - -func resourceCloudStackFirewallUpdate(d *schema.ResourceData, meta interface{}) error { - // Make sure all required parameters are there - if err := verifyFirewallParams(d); err != nil { - return err - } - - // Check if the rule set as a whole has changed - if d.HasChange("rule") { - o, n := d.GetChange("rule") - ors := o.(*schema.Set).Difference(n.(*schema.Set)) - nrs := n.(*schema.Set).Difference(o.(*schema.Set)) - - // We need to start with a rule set containing all the rules we - // already have and want to keep. Any rules that are not deleted - // correctly and any newly created rules, will be added to this - // set to make sure we end up in a consistent state - rules := o.(*schema.Set).Intersection(n.(*schema.Set)) - - // First loop through all the old rules and delete them - if ors.Len() > 0 { - err := deleteFirewallRules(d, meta, rules, ors) - - // We need to update this first to preserve the correct state - d.Set("rule", rules) - - if err != nil { - return err - } - } - - // Then loop through all the new rules and create them - if nrs.Len() > 0 { - err := createFirewallRules(d, meta, rules, nrs) - - // We need to update this first to preserve the correct state - d.Set("rule", rules) - - if err != nil { - return err - } - } - } - - return resourceCloudStackFirewallRead(d, meta) -} - -func resourceCloudStackFirewallDelete(d *schema.ResourceData, meta interface{}) error { - // Create an empty rule set to hold all rules that where - // not deleted correctly - rules := resourceCloudStackFirewall().Schema["rule"].ZeroValue().(*schema.Set) - - // Delete all rules - if ors := d.Get("rule").(*schema.Set); ors.Len() > 0 { - err := deleteFirewallRules(d, meta, rules, ors) - - // We need to update this first to preserve the correct state - d.Set("rule", rules) - - if err != nil { - return err - } - } - - return nil -} - -func deleteFirewallRules(d *schema.ResourceData, meta interface{}, rules *schema.Set, ors *schema.Set) error { - var errs *multierror.Error - - var wg sync.WaitGroup - wg.Add(ors.Len()) - - sem := make(chan struct{}, d.Get("parallelism").(int)) - for _, rule := range ors.List() { - // Put a sleep here to avoid DoS'ing the API - time.Sleep(500 * time.Millisecond) - - go func(rule map[string]interface{}) { - defer wg.Done() - sem <- struct{}{} - - // Delete a single rule - err := deleteFirewallRule(d, meta, rule) - - // If we have at least one UUID, we need to save the rule - if len(rule["uuids"].(map[string]interface{})) > 0 { - rules.Add(rule) - } - - if err != nil { - errs = multierror.Append(errs, err) - } - - <-sem - }(rule.(map[string]interface{})) - } - - wg.Wait() - - return errs.ErrorOrNil() -} - -func deleteFirewallRule(d *schema.ResourceData, meta interface{}, rule map[string]interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - uuids := rule["uuids"].(map[string]interface{}) - - for k, id := range uuids { - // We don't care about the count here, so just continue - if k == "%" { - continue - } - - // Create the parameter struct - p := cs.Firewall.NewDeleteFirewallRuleParams(id.(string)) - - // Delete the rule - if _, err := cs.Firewall.DeleteFirewallRule(p); err != nil { - - // This is a very poor way to be told the ID does no longer exist :( - if strings.Contains(err.Error(), fmt.Sprintf( - "Invalid parameter id value=%s due to incorrect long value format, "+ - "or entity does not exist", id.(string))) { - delete(uuids, k) - continue - } - - return err - } - - // Delete the UUID of this rule - delete(uuids, k) - rule["uuids"] = uuids - } - - return nil -} - -func verifyFirewallParams(d *schema.ResourceData) error { - managed := d.Get("managed").(bool) - _, rules := d.GetOk("rule") - - if !rules && !managed { - return fmt.Errorf( - "You must supply at least one 'rule' when not using the 'managed' firewall feature") - } - - return nil -} - -func verifyFirewallRuleParams(d *schema.ResourceData, rule map[string]interface{}) error { - protocol := rule["protocol"].(string) - if protocol != "tcp" && protocol != "udp" && protocol != "icmp" { - return fmt.Errorf( - "%q is not a valid protocol. Valid options are 'tcp', 'udp' and 'icmp'", protocol) - } - - if protocol == "icmp" { - if _, ok := rule["icmp_type"]; !ok { - return fmt.Errorf( - "Parameter icmp_type is a required parameter when using protocol 'icmp'") - } - if _, ok := rule["icmp_code"]; !ok { - return fmt.Errorf( - "Parameter icmp_code is a required parameter when using protocol 'icmp'") - } - } else { - if ports, ok := rule["ports"].(*schema.Set); ok { - for _, port := range ports.List() { - m := splitPorts.FindStringSubmatch(port.(string)) - if m == nil { - return fmt.Errorf( - "%q is not a valid port value. Valid options are '80' or '80-90'", port.(string)) - } - } - } else { - return fmt.Errorf( - "Parameter ports is a required parameter when *not* using protocol 'icmp'") - } - } - - return nil -} diff --git a/builtin/providers/cloudstack/resource_cloudstack_firewall_test.go b/builtin/providers/cloudstack/resource_cloudstack_firewall_test.go deleted file mode 100644 index aa2c99bd5..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_firewall_test.go +++ /dev/null @@ -1,213 +0,0 @@ -package cloudstack - -import ( - "fmt" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func TestAccCloudStackFirewall_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackFirewallDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackFirewall_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackFirewallRulesExist("cloudstack_firewall.foo"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "ip_address_id", CLOUDSTACK_PUBLIC_IPADDRESS), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.#", "2"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.2263505090.cidr_list.3482919157", "10.0.0.0/24"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.2263505090.protocol", "tcp"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.2263505090.ports.32925333", "8080"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.3782201428.cidr_list.3482919157", "10.0.0.0/24"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.3782201428.protocol", "tcp"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.3782201428.ports.1209010669", "1000-2000"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.3782201428.ports.1889509032", "80"), - ), - }, - }, - }) -} - -func TestAccCloudStackFirewall_update(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackFirewallDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackFirewall_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackFirewallRulesExist("cloudstack_firewall.foo"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "ip_address_id", CLOUDSTACK_PUBLIC_IPADDRESS), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.#", "2"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.2263505090.cidr_list.3482919157", "10.0.0.0/24"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.2263505090.protocol", "tcp"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.2263505090.ports.32925333", "8080"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.3782201428.cidr_list.3482919157", "10.0.0.0/24"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.3782201428.protocol", "tcp"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.3782201428.ports.1209010669", "1000-2000"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.3782201428.ports.1889509032", "80"), - ), - }, - - resource.TestStep{ - Config: testAccCloudStackFirewall_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackFirewallRulesExist("cloudstack_firewall.foo"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "ip_address_id", CLOUDSTACK_PUBLIC_IPADDRESS), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.#", "3"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.3529885171.cidr_list.80081744", "10.0.1.0/24"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.3529885171.cidr_list.3482919157", "10.0.0.0/24"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.3529885171.protocol", "tcp"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.3529885171.ports.32925333", "8080"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.3782201428.cidr_list.3482919157", "10.0.0.0/24"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.3782201428.protocol", "tcp"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.3782201428.ports.1209010669", "1000-2000"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.3782201428.ports.1889509032", "80"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.4160426500.cidr_list.2835005819", "172.16.100.0/24"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.4160426500.protocol", "tcp"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.4160426500.ports.1889509032", "80"), - resource.TestCheckResourceAttr( - "cloudstack_firewall.foo", "rule.4160426500.ports.3638101695", "443"), - ), - }, - }, - }) -} - -func testAccCheckCloudStackFirewallRulesExist(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No firewall ID is set") - } - - for k, id := range rs.Primary.Attributes { - if !strings.Contains(k, ".uuids.") || strings.HasSuffix(k, ".uuids.%") { - continue - } - - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - _, count, err := cs.Firewall.GetFirewallRuleByID(id) - - if err != nil { - return err - } - - if count == 0 { - return fmt.Errorf("Firewall rule for %s not found", k) - } - } - - return nil - } -} - -func testAccCheckCloudStackFirewallDestroy(s *terraform.State) error { - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_firewall" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No instance ID is set") - } - - for k, id := range rs.Primary.Attributes { - if !strings.Contains(k, ".uuids.") || strings.HasSuffix(k, ".uuids.%") { - continue - } - - _, _, err := cs.Firewall.GetFirewallRuleByID(id) - if err == nil { - return fmt.Errorf("Firewall rule %s still exists", rs.Primary.ID) - } - } - } - - return nil -} - -var testAccCloudStackFirewall_basic = fmt.Sprintf(` -resource "cloudstack_firewall" "foo" { - ip_address_id = "%s" - - rule { - cidr_list = ["10.0.0.0/24"] - protocol = "tcp" - ports = ["8080"] - } - - rule { - cidr_list = ["10.0.0.0/24"] - protocol = "tcp" - ports = ["80", "1000-2000"] - } -}`, CLOUDSTACK_PUBLIC_IPADDRESS) - -var testAccCloudStackFirewall_update = fmt.Sprintf(` -resource "cloudstack_firewall" "foo" { - ip_address_id = "%s" - - rule { - cidr_list = ["10.0.0.0/24", "10.0.1.0/24"] - protocol = "tcp" - ports = ["8080"] - } - - rule { - cidr_list = ["10.0.0.0/24"] - protocol = "tcp" - ports = ["80", "1000-2000"] - } - - rule { - cidr_list = ["172.16.100.0/24"] - protocol = "tcp" - ports = ["80", "443"] - } -}`, CLOUDSTACK_PUBLIC_IPADDRESS) diff --git a/builtin/providers/cloudstack/resource_cloudstack_instance.go b/builtin/providers/cloudstack/resource_cloudstack_instance.go deleted file mode 100644 index f0fc90647..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_instance.go +++ /dev/null @@ -1,584 +0,0 @@ -package cloudstack - -import ( - "crypto/sha1" - "encoding/base64" - "encoding/hex" - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func resourceCloudStackInstance() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudStackInstanceCreate, - Read: resourceCloudStackInstanceRead, - Update: resourceCloudStackInstanceUpdate, - Delete: resourceCloudStackInstanceDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "display_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "service_offering": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "network_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "ip_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "template": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "root_disk_size": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - - "group": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "affinity_group_ids": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - ConflictsWith: []string{"affinity_group_names"}, - }, - - "affinity_group_names": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - ConflictsWith: []string{"affinity_group_ids"}, - }, - - "security_group_ids": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - ConflictsWith: []string{"security_group_names"}, - }, - - "security_group_names": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - ConflictsWith: []string{"security_group_ids"}, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "keypair": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "user_data": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - StateFunc: func(v interface{}) string { - switch v.(type) { - case string: - hash := sha1.Sum([]byte(v.(string))) - return hex.EncodeToString(hash[:]) - default: - return "" - } - }, - }, - - "expunge": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - }, - } -} - -func resourceCloudStackInstanceCreate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Retrieve the service_offering ID - serviceofferingid, e := retrieveID(cs, "service_offering", d.Get("service_offering").(string)) - if e != nil { - return e.Error() - } - - // Retrieve the zone ID - zoneid, e := retrieveID(cs, "zone", d.Get("zone").(string)) - if e != nil { - return e.Error() - } - - // Retrieve the zone object - zone, _, err := cs.Zone.GetZoneByID(zoneid) - if err != nil { - return err - } - - // Retrieve the template ID - templateid, e := retrieveTemplateID(cs, zone.Id, d.Get("template").(string)) - if e != nil { - return e.Error() - } - - // Create a new parameter struct - p := cs.VirtualMachine.NewDeployVirtualMachineParams(serviceofferingid, templateid, zone.Id) - - // Set the name - name, hasName := d.GetOk("name") - if hasName { - p.SetName(name.(string)) - } - - // Set the display name - if displayname, ok := d.GetOk("display_name"); ok { - p.SetDisplayname(displayname.(string)) - } else if hasName { - p.SetDisplayname(name.(string)) - } - - // If there is a root_disk_size supplied, add it to the parameter struct - if rootdisksize, ok := d.GetOk("root_disk_size"); ok { - p.SetRootdisksize(int64(rootdisksize.(int))) - } - - if zone.Networktype == "Advanced" { - // Set the default network ID - p.SetNetworkids([]string{d.Get("network_id").(string)}) - } - - // If there is a ipaddres supplied, add it to the parameter struct - if ipaddress, ok := d.GetOk("ip_address"); ok { - p.SetIpaddress(ipaddress.(string)) - } - - // If there is a group supplied, add it to the parameter struct - if group, ok := d.GetOk("group"); ok { - p.SetGroup(group.(string)) - } - - // If there are affinity group IDs supplied, add them to the parameter struct - if agIDs := d.Get("affinity_group_ids").(*schema.Set); agIDs.Len() > 0 { - var groups []string - for _, group := range agIDs.List() { - groups = append(groups, group.(string)) - } - p.SetAffinitygroupids(groups) - } - - // If there are affinity group names supplied, add them to the parameter struct - if agNames := d.Get("affinity_group_names").(*schema.Set); agNames.Len() > 0 { - var groups []string - for _, group := range agNames.List() { - groups = append(groups, group.(string)) - } - p.SetAffinitygroupnames(groups) - } - - // If there are security group IDs supplied, add them to the parameter struct - if sgIDs := d.Get("security_group_ids").(*schema.Set); sgIDs.Len() > 0 { - var groups []string - for _, group := range sgIDs.List() { - groups = append(groups, group.(string)) - } - p.SetSecuritygroupids(groups) - } - - // If there are security group names supplied, add them to the parameter struct - if sgNames := d.Get("security_group_names").(*schema.Set); sgNames.Len() > 0 { - var groups []string - for _, group := range sgNames.List() { - groups = append(groups, group.(string)) - } - p.SetSecuritygroupnames(groups) - } - - // If there is a project supplied, we retrieve and set the project id - if err := setProjectid(p, cs, d); err != nil { - return err - } - - // If a keypair is supplied, add it to the parameter struct - if keypair, ok := d.GetOk("keypair"); ok { - p.SetKeypair(keypair.(string)) - } - - if userData, ok := d.GetOk("user_data"); ok { - ud, err := getUserData(userData.(string), cs.HTTPGETOnly) - if err != nil { - return err - } - - p.SetUserdata(ud) - } - - // Create the new instance - r, err := cs.VirtualMachine.DeployVirtualMachine(p) - if err != nil { - return fmt.Errorf("Error creating the new instance %s: %s", name, err) - } - - d.SetId(r.Id) - - // Set the connection info for any configured provisioners - d.SetConnInfo(map[string]string{ - "host": r.Nic[0].Ipaddress, - "password": r.Password, - }) - - return resourceCloudStackInstanceRead(d, meta) -} - -func resourceCloudStackInstanceRead(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Get the virtual machine details - vm, count, err := cs.VirtualMachine.GetVirtualMachineByID( - d.Id(), - cloudstack.WithProject(d.Get("project").(string)), - ) - if err != nil { - if count == 0 { - log.Printf("[DEBUG] Instance %s does no longer exist", d.Get("name").(string)) - d.SetId("") - return nil - } - - return err - } - - // Update the config - d.Set("name", vm.Name) - d.Set("display_name", vm.Displayname) - d.Set("network_id", vm.Nic[0].Networkid) - d.Set("ip_address", vm.Nic[0].Ipaddress) - d.Set("group", vm.Group) - - if _, ok := d.GetOk("affinity_group_ids"); ok { - groups := &schema.Set{F: schema.HashString} - for _, group := range vm.Affinitygroup { - groups.Add(group.Id) - } - d.Set("affinity_group_ids", groups) - } - - if _, ok := d.GetOk("affinity_group_names"); ok { - groups := &schema.Set{F: schema.HashString} - for _, group := range vm.Affinitygroup { - groups.Add(group.Name) - } - d.Set("affinity_group_names", groups) - } - - if _, ok := d.GetOk("security_group_ids"); ok { - groups := &schema.Set{F: schema.HashString} - for _, group := range vm.Securitygroup { - groups.Add(group.Id) - } - d.Set("security_group_ids", groups) - } - - if _, ok := d.GetOk("security_group_names"); ok { - groups := &schema.Set{F: schema.HashString} - for _, group := range vm.Securitygroup { - groups.Add(group.Name) - } - d.Set("security_group_names", groups) - } - - setValueOrID(d, "service_offering", vm.Serviceofferingname, vm.Serviceofferingid) - setValueOrID(d, "template", vm.Templatename, vm.Templateid) - setValueOrID(d, "project", vm.Project, vm.Projectid) - setValueOrID(d, "zone", vm.Zonename, vm.Zoneid) - - return nil -} - -func resourceCloudStackInstanceUpdate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - d.Partial(true) - - name := d.Get("name").(string) - - // Check if the display name is changed and if so, update the virtual machine - if d.HasChange("display_name") { - log.Printf("[DEBUG] Display name changed for %s, starting update", name) - - // Create a new parameter struct - p := cs.VirtualMachine.NewUpdateVirtualMachineParams(d.Id()) - - // Set the new display name - p.SetDisplayname(d.Get("display_name").(string)) - - // Update the display name - _, err := cs.VirtualMachine.UpdateVirtualMachine(p) - if err != nil { - return fmt.Errorf( - "Error updating the display name for instance %s: %s", name, err) - } - - d.SetPartial("display_name") - } - - // Check if the group is changed and if so, update the virtual machine - if d.HasChange("group") { - log.Printf("[DEBUG] Group changed for %s, starting update", name) - - // Create a new parameter struct - p := cs.VirtualMachine.NewUpdateVirtualMachineParams(d.Id()) - - // Set the new group - p.SetGroup(d.Get("group").(string)) - - // Update the display name - _, err := cs.VirtualMachine.UpdateVirtualMachine(p) - if err != nil { - return fmt.Errorf( - "Error updating the group for instance %s: %s", name, err) - } - - d.SetPartial("group") - } - - // Attributes that require reboot to update - if d.HasChange("name") || d.HasChange("service_offering") || d.HasChange("affinity_group_ids") || - d.HasChange("affinity_group_names") || d.HasChange("keypair") || d.HasChange("user_data") { - // Before we can actually make these changes, the virtual machine must be stopped - _, err := cs.VirtualMachine.StopVirtualMachine( - cs.VirtualMachine.NewStopVirtualMachineParams(d.Id())) - if err != nil { - return fmt.Errorf( - "Error stopping instance %s before making changes: %s", name, err) - } - - // Check if the name has changed and if so, update the name - if d.HasChange("name") { - log.Printf("[DEBUG] Name for %s changed to %s, starting update", d.Id(), name) - - // Create a new parameter struct - p := cs.VirtualMachine.NewUpdateVirtualMachineParams(d.Id()) - - // Set the new name - p.SetName(name) - - // Update the display name - _, err := cs.VirtualMachine.UpdateVirtualMachine(p) - if err != nil { - return fmt.Errorf( - "Error updating the name for instance %s: %s", name, err) - } - - d.SetPartial("name") - } - - // Check if the service offering is changed and if so, update the offering - if d.HasChange("service_offering") { - log.Printf("[DEBUG] Service offering changed for %s, starting update", name) - - // Retrieve the service_offering ID - serviceofferingid, e := retrieveID(cs, "service_offering", d.Get("service_offering").(string)) - if e != nil { - return e.Error() - } - - // Create a new parameter struct - p := cs.VirtualMachine.NewChangeServiceForVirtualMachineParams(d.Id(), serviceofferingid) - - // Change the service offering - _, err = cs.VirtualMachine.ChangeServiceForVirtualMachine(p) - if err != nil { - return fmt.Errorf( - "Error changing the service offering for instance %s: %s", name, err) - } - d.SetPartial("service_offering") - } - - // Check if the affinity group IDs have changed and if so, update the IDs - if d.HasChange("affinity_group_ids") { - p := cs.AffinityGroup.NewUpdateVMAffinityGroupParams(d.Id()) - groups := []string{} - - if agIDs := d.Get("affinity_group_ids").(*schema.Set); agIDs.Len() > 0 { - for _, group := range agIDs.List() { - groups = append(groups, group.(string)) - } - } - - // Set the new groups - p.SetAffinitygroupids(groups) - - // Update the affinity groups - _, err = cs.AffinityGroup.UpdateVMAffinityGroup(p) - if err != nil { - return fmt.Errorf( - "Error updating the affinity groups for instance %s: %s", name, err) - } - d.SetPartial("affinity_group_ids") - } - - // Check if the affinity group names have changed and if so, update the names - if d.HasChange("affinity_group_names") { - p := cs.AffinityGroup.NewUpdateVMAffinityGroupParams(d.Id()) - groups := []string{} - - if agNames := d.Get("affinity_group_names").(*schema.Set); agNames.Len() > 0 { - for _, group := range agNames.List() { - groups = append(groups, group.(string)) - } - } - - // Set the new groups - p.SetAffinitygroupnames(groups) - - // Update the affinity groups - _, err = cs.AffinityGroup.UpdateVMAffinityGroup(p) - if err != nil { - return fmt.Errorf( - "Error updating the affinity groups for instance %s: %s", name, err) - } - d.SetPartial("affinity_group_names") - } - - // Check if the keypair has changed and if so, update the keypair - if d.HasChange("keypair") { - log.Printf("[DEBUG] SSH keypair changed for %s, starting update", name) - - p := cs.SSH.NewResetSSHKeyForVirtualMachineParams(d.Id(), d.Get("keypair").(string)) - - // Change the ssh keypair - _, err = cs.SSH.ResetSSHKeyForVirtualMachine(p) - if err != nil { - return fmt.Errorf( - "Error changing the SSH keypair for instance %s: %s", name, err) - } - d.SetPartial("keypair") - } - - // Check if the user data has changed and if so, update the user data - if d.HasChange("user_data") { - log.Printf("[DEBUG] user_data changed for %s, starting update", name) - - ud, err := getUserData(d.Get("user_data").(string), cs.HTTPGETOnly) - if err != nil { - return err - } - - p := cs.VirtualMachine.NewUpdateVirtualMachineParams(d.Id()) - p.SetUserdata(ud) - _, err = cs.VirtualMachine.UpdateVirtualMachine(p) - if err != nil { - return fmt.Errorf( - "Error updating user_data for instance %s: %s", name, err) - } - d.SetPartial("user_data") - } - - // Start the virtual machine again - _, err = cs.VirtualMachine.StartVirtualMachine( - cs.VirtualMachine.NewStartVirtualMachineParams(d.Id())) - if err != nil { - return fmt.Errorf( - "Error starting instance %s after making changes", name) - } - } - - d.Partial(false) - - return resourceCloudStackInstanceRead(d, meta) -} - -func resourceCloudStackInstanceDelete(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Create a new parameter struct - p := cs.VirtualMachine.NewDestroyVirtualMachineParams(d.Id()) - - if d.Get("expunge").(bool) { - p.SetExpunge(true) - } - - log.Printf("[INFO] Destroying instance: %s", d.Get("name").(string)) - if _, err := cs.VirtualMachine.DestroyVirtualMachine(p); err != nil { - // This is a very poor way to be told the ID does no longer exist :( - if strings.Contains(err.Error(), fmt.Sprintf( - "Invalid parameter id value=%s due to incorrect long value format, "+ - "or entity does not exist", d.Id())) { - return nil - } - - return fmt.Errorf("Error destroying instance: %s", err) - } - - return nil -} - -// getUserData returns the user data as a base64 encoded string -func getUserData(userData string, httpGetOnly bool) (string, error) { - ud := base64.StdEncoding.EncodeToString([]byte(userData)) - - // deployVirtualMachine uses POST by default, so max userdata is 32K - maxUD := 32768 - - if httpGetOnly { - // deployVirtualMachine using GET instead, so max userdata is 2K - maxUD = 2048 - } - - if len(ud) > maxUD { - return "", fmt.Errorf( - "The supplied user_data contains %d bytes after encoding, "+ - "this exeeds the limit of %d bytes", len(ud), maxUD) - } - - return ud, nil -} diff --git a/builtin/providers/cloudstack/resource_cloudstack_instance_test.go b/builtin/providers/cloudstack/resource_cloudstack_instance_test.go deleted file mode 100644 index 2d9743d30..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_instance_test.go +++ /dev/null @@ -1,318 +0,0 @@ -package cloudstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func TestAccCloudStackInstance_basic(t *testing.T) { - var instance cloudstack.VirtualMachine - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackInstance_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackInstanceExists( - "cloudstack_instance.foobar", &instance), - testAccCheckCloudStackInstanceAttributes(&instance), - resource.TestCheckResourceAttr( - "cloudstack_instance.foobar", "user_data", "0cf3dcdc356ec8369494cb3991985ecd5296cdd5"), - ), - }, - }, - }) -} - -func TestAccCloudStackInstance_update(t *testing.T) { - var instance cloudstack.VirtualMachine - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackInstance_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackInstanceExists( - "cloudstack_instance.foobar", &instance), - testAccCheckCloudStackInstanceAttributes(&instance), - resource.TestCheckResourceAttr( - "cloudstack_instance.foobar", "user_data", "0cf3dcdc356ec8369494cb3991985ecd5296cdd5"), - ), - }, - - resource.TestStep{ - Config: testAccCloudStackInstance_renameAndResize, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackInstanceExists( - "cloudstack_instance.foobar", &instance), - testAccCheckCloudStackInstanceRenamedAndResized(&instance), - resource.TestCheckResourceAttr( - "cloudstack_instance.foobar", "name", "terraform-updated"), - resource.TestCheckResourceAttr( - "cloudstack_instance.foobar", "display_name", "terraform-updated"), - resource.TestCheckResourceAttr( - "cloudstack_instance.foobar", "service_offering", CLOUDSTACK_SERVICE_OFFERING_2), - ), - }, - }, - }) -} - -func TestAccCloudStackInstance_fixedIP(t *testing.T) { - var instance cloudstack.VirtualMachine - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackInstance_fixedIP, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackInstanceExists( - "cloudstack_instance.foobar", &instance), - resource.TestCheckResourceAttr( - "cloudstack_instance.foobar", "ip_address", CLOUDSTACK_NETWORK_1_IPADDRESS1), - ), - }, - }, - }) -} - -func TestAccCloudStackInstance_keyPair(t *testing.T) { - var instance cloudstack.VirtualMachine - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackInstance_keyPair, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackInstanceExists( - "cloudstack_instance.foobar", &instance), - resource.TestCheckResourceAttr( - "cloudstack_instance.foobar", "keypair", "terraform-test-keypair"), - ), - }, - }, - }) -} - -func TestAccCloudStackInstance_project(t *testing.T) { - var instance cloudstack.VirtualMachine - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackInstance_project, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackInstanceExists( - "cloudstack_instance.foobar", &instance), - resource.TestCheckResourceAttr( - "cloudstack_instance.foobar", "project", CLOUDSTACK_PROJECT_NAME), - ), - }, - }, - }) -} - -func testAccCheckCloudStackInstanceExists( - n string, instance *cloudstack.VirtualMachine) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No instance ID is set") - } - - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - vm, _, err := cs.VirtualMachine.GetVirtualMachineByID(rs.Primary.ID) - - if err != nil { - return err - } - - if vm.Id != rs.Primary.ID { - return fmt.Errorf("Instance not found") - } - - *instance = *vm - - return nil - } -} - -func testAccCheckCloudStackInstanceAttributes( - instance *cloudstack.VirtualMachine) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if instance.Name != "terraform-test" { - return fmt.Errorf("Bad name: %s", instance.Name) - } - - if instance.Displayname != "terraform-test" { - return fmt.Errorf("Bad display name: %s", instance.Displayname) - } - - if instance.Serviceofferingname != CLOUDSTACK_SERVICE_OFFERING_1 { - return fmt.Errorf("Bad service offering: %s", instance.Serviceofferingname) - } - - if instance.Templatename != CLOUDSTACK_TEMPLATE { - return fmt.Errorf("Bad template: %s", instance.Templatename) - } - - if instance.Nic[0].Networkid != CLOUDSTACK_NETWORK_1 { - return fmt.Errorf("Bad network ID: %s", instance.Nic[0].Networkid) - } - - return nil - } -} - -func testAccCheckCloudStackInstanceRenamedAndResized( - instance *cloudstack.VirtualMachine) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if instance.Name != "terraform-updated" { - return fmt.Errorf("Bad name: %s", instance.Name) - } - - if instance.Displayname != "terraform-updated" { - return fmt.Errorf("Bad display name: %s", instance.Displayname) - } - - if instance.Serviceofferingname != CLOUDSTACK_SERVICE_OFFERING_2 { - return fmt.Errorf("Bad service offering: %s", instance.Serviceofferingname) - } - - return nil - } -} - -func testAccCheckCloudStackInstanceDestroy(s *terraform.State) error { - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_instance" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No instance ID is set") - } - - _, _, err := cs.VirtualMachine.GetVirtualMachineByID(rs.Primary.ID) - if err == nil { - return fmt.Errorf("Virtual Machine %s still exists", rs.Primary.ID) - } - } - - return nil -} - -var testAccCloudStackInstance_basic = fmt.Sprintf(` -resource "cloudstack_instance" "foobar" { - name = "terraform-test" - display_name = "terraform-test" - service_offering= "%s" - network_id = "%s" - template = "%s" - zone = "%s" - user_data = "foobar\nfoo\nbar" - expunge = true -}`, - CLOUDSTACK_SERVICE_OFFERING_1, - CLOUDSTACK_NETWORK_1, - CLOUDSTACK_TEMPLATE, - CLOUDSTACK_ZONE) - -var testAccCloudStackInstance_renameAndResize = fmt.Sprintf(` -resource "cloudstack_instance" "foobar" { - name = "terraform-updated" - display_name = "terraform-updated" - service_offering= "%s" - network_id = "%s" - template = "%s" - zone = "%s" - user_data = "foobar\nfoo\nbar" - expunge = true -}`, - CLOUDSTACK_SERVICE_OFFERING_2, - CLOUDSTACK_NETWORK_1, - CLOUDSTACK_TEMPLATE, - CLOUDSTACK_ZONE) - -var testAccCloudStackInstance_fixedIP = fmt.Sprintf(` -resource "cloudstack_instance" "foobar" { - name = "terraform-test" - display_name = "terraform-test" - service_offering= "%s" - network_id = "%s" - ip_address = "%s" - template = "%s" - zone = "%s" - expunge = true -}`, - CLOUDSTACK_SERVICE_OFFERING_1, - CLOUDSTACK_NETWORK_1, - CLOUDSTACK_NETWORK_1_IPADDRESS1, - CLOUDSTACK_TEMPLATE, - CLOUDSTACK_ZONE) - -var testAccCloudStackInstance_keyPair = fmt.Sprintf(` -resource "cloudstack_ssh_keypair" "foo" { - name = "terraform-test-keypair" -} - -resource "cloudstack_instance" "foobar" { - name = "terraform-test" - display_name = "terraform-test" - service_offering= "%s" - network_id = "%s" - ip_address = "%s" - template = "%s" - zone = "%s" - keypair = "${cloudstack_ssh_keypair.foo.name}" - expunge = true -}`, - CLOUDSTACK_SERVICE_OFFERING_1, - CLOUDSTACK_NETWORK_1, - CLOUDSTACK_NETWORK_1_IPADDRESS1, - CLOUDSTACK_TEMPLATE, - CLOUDSTACK_ZONE) - -var testAccCloudStackInstance_project = fmt.Sprintf(` -resource "cloudstack_instance" "foobar" { - name = "terraform-test" - display_name = "terraform-test" - service_offering= "%s" - network_id = "%s" - template = "%s" - project = "%s" - zone = "%s" - expunge = true -}`, - CLOUDSTACK_SERVICE_OFFERING_1, - CLOUDSTACK_PROJECT_NETWORK, - CLOUDSTACK_TEMPLATE, - CLOUDSTACK_PROJECT_NAME, - CLOUDSTACK_ZONE) diff --git a/builtin/providers/cloudstack/resource_cloudstack_ipaddress.go b/builtin/providers/cloudstack/resource_cloudstack_ipaddress.go deleted file mode 100644 index 9bdd4ab4a..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_ipaddress.go +++ /dev/null @@ -1,163 +0,0 @@ -package cloudstack - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func resourceCloudStackIPAddress() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudStackIPAddressCreate, - Read: resourceCloudStackIPAddressRead, - Delete: resourceCloudStackIPAddressDelete, - - Schema: map[string]*schema.Schema{ - "network_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "vpc_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "zone_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "ip_address": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceCloudStackIPAddressCreate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - if err := verifyIPAddressParams(d); err != nil { - return err - } - - // Create a new parameter struct - p := cs.Address.NewAssociateIpAddressParams() - - if networkid, ok := d.GetOk("network_id"); ok { - // Set the networkid - p.SetNetworkid(networkid.(string)) - } - - if vpcid, ok := d.GetOk("vpc_id"); ok { - // Set the vpcid - p.SetVpcid(vpcid.(string)) - } - - if zoneid, ok := d.GetOk("zone_id"); ok { - // Set the vpcid - p.SetZoneid(zoneid.(string)) - } - - // If there is a project supplied, we retrieve and set the project id - if err := setProjectid(p, cs, d); err != nil { - return err - } - - // Associate a new IP address - r, err := cs.Address.AssociateIpAddress(p) - if err != nil { - return fmt.Errorf("Error associating a new IP address: %s", err) - } - - d.SetId(r.Id) - - return resourceCloudStackIPAddressRead(d, meta) -} - -func resourceCloudStackIPAddressRead(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Get the IP address details - ip, count, err := cs.Address.GetPublicIpAddressByID( - d.Id(), - cloudstack.WithProject(d.Get("project").(string)), - ) - if err != nil { - if count == 0 { - log.Printf( - "[DEBUG] IP address with ID %s is no longer associated", d.Id()) - d.SetId("") - return nil - } - - return err - } - - // Updated the IP address - d.Set("ip_address", ip.Ipaddress) - - if _, ok := d.GetOk("network_id"); ok { - d.Set("network_id", ip.Associatednetworkid) - } - - if _, ok := d.GetOk("vpc_id"); ok { - d.Set("vpc_id", ip.Vpcid) - } - - if _, ok := d.GetOk("zone_id"); ok { - d.Set("zone_id", ip.Zoneid) - } - - setValueOrID(d, "project", ip.Project, ip.Projectid) - - return nil -} - -func resourceCloudStackIPAddressDelete(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Create a new parameter struct - p := cs.Address.NewDisassociateIpAddressParams(d.Id()) - - // Disassociate the IP address - if _, err := cs.Address.DisassociateIpAddress(p); err != nil { - // This is a very poor way to be told the ID does no longer exist :( - if strings.Contains(err.Error(), fmt.Sprintf( - "Invalid parameter id value=%s due to incorrect long value format, "+ - "or entity does not exist", d.Id())) { - return nil - } - - return fmt.Errorf("Error disassociating IP address %s: %s", d.Get("name").(string), err) - } - - return nil -} - -func verifyIPAddressParams(d *schema.ResourceData) error { - _, network := d.GetOk("network_id") - _, vpc := d.GetOk("vpc_id") - - if (network && vpc) || (!network && !vpc) { - return fmt.Errorf( - "You must supply a value for either (so not both) the 'network_id' or 'vpc_id' parameter") - } - - return nil -} diff --git a/builtin/providers/cloudstack/resource_cloudstack_ipaddress_test.go b/builtin/providers/cloudstack/resource_cloudstack_ipaddress_test.go deleted file mode 100644 index 6b74e9692..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_ipaddress_test.go +++ /dev/null @@ -1,131 +0,0 @@ -package cloudstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func TestAccCloudStackIPAddress_basic(t *testing.T) { - var ipaddr cloudstack.PublicIpAddress - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackIPAddressDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackIPAddress_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackIPAddressExists( - "cloudstack_ipaddress.foo", &ipaddr), - testAccCheckCloudStackIPAddressAttributes(&ipaddr), - ), - }, - }, - }) -} - -func TestAccCloudStackIPAddress_vpc(t *testing.T) { - var ipaddr cloudstack.PublicIpAddress - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackIPAddressDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackIPAddress_vpc, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackIPAddressExists( - "cloudstack_ipaddress.foo", &ipaddr), - ), - }, - }, - }) -} - -func testAccCheckCloudStackIPAddressExists( - n string, ipaddr *cloudstack.PublicIpAddress) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No IP address ID is set") - } - - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - pip, _, err := cs.Address.GetPublicIpAddressByID(rs.Primary.ID) - - if err != nil { - return err - } - - if pip.Id != rs.Primary.ID { - return fmt.Errorf("IP address not found") - } - - *ipaddr = *pip - - return nil - } -} - -func testAccCheckCloudStackIPAddressAttributes( - ipaddr *cloudstack.PublicIpAddress) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if ipaddr.Associatednetworkid != CLOUDSTACK_NETWORK_1 { - return fmt.Errorf("Bad network ID: %s", ipaddr.Associatednetworkid) - } - - return nil - } -} - -func testAccCheckCloudStackIPAddressDestroy(s *terraform.State) error { - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_ipaddress" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No IP address ID is set") - } - - ip, _, err := cs.Address.GetPublicIpAddressByID(rs.Primary.ID) - if err == nil && ip.Associatednetworkid != "" { - return fmt.Errorf("Public IP %s still associated", rs.Primary.ID) - } - } - - return nil -} - -var testAccCloudStackIPAddress_basic = fmt.Sprintf(` -resource "cloudstack_ipaddress" "foo" { - network_id = "%s" -}`, CLOUDSTACK_NETWORK_1) - -var testAccCloudStackIPAddress_vpc = fmt.Sprintf(` -resource "cloudstack_vpc" "foobar" { - name = "terraform-vpc" - cidr = "%s" - vpc_offering = "%s" - zone = "%s" -} - -resource "cloudstack_ipaddress" "foo" { - vpc_id = "${cloudstack_vpc.foobar.id}" -}`, - CLOUDSTACK_VPC_CIDR_1, - CLOUDSTACK_VPC_OFFERING, - CLOUDSTACK_ZONE) diff --git a/builtin/providers/cloudstack/resource_cloudstack_loadbalancer_rule.go b/builtin/providers/cloudstack/resource_cloudstack_loadbalancer_rule.go deleted file mode 100644 index d5a5ffe92..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_loadbalancer_rule.go +++ /dev/null @@ -1,286 +0,0 @@ -package cloudstack - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func resourceCloudStackLoadBalancerRule() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudStackLoadBalancerRuleCreate, - Read: resourceCloudStackLoadBalancerRuleRead, - Update: resourceCloudStackLoadBalancerRuleUpdate, - Delete: resourceCloudStackLoadBalancerRuleDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "ip_address_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "network_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "algorithm": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "private_port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - - "public_port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - - "member_ids": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - ForceNew: false, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - } -} - -func resourceCloudStackLoadBalancerRuleCreate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - d.Partial(true) - - // Create a new parameter struct - p := cs.LoadBalancer.NewCreateLoadBalancerRuleParams( - d.Get("algorithm").(string), - d.Get("name").(string), - d.Get("private_port").(int), - d.Get("public_port").(int), - ) - - // Don't autocreate a firewall rule, use a resource if needed - p.SetOpenfirewall(false) - - // Set the description - if description, ok := d.GetOk("description"); ok { - p.SetDescription(description.(string)) - } else { - p.SetDescription(d.Get("name").(string)) - } - - if networkid, ok := d.GetOk("network_id"); ok { - // Set the network id - p.SetNetworkid(networkid.(string)) - } - - // Set the ipaddress id - p.SetPublicipid(d.Get("ip_address_id").(string)) - - // Create the load balancer rule - r, err := cs.LoadBalancer.CreateLoadBalancerRule(p) - if err != nil { - return err - } - - // Set the load balancer rule ID and set partials - d.SetId(r.Id) - d.SetPartial("name") - d.SetPartial("description") - d.SetPartial("ip_address_id") - d.SetPartial("network_id") - d.SetPartial("algorithm") - d.SetPartial("private_port") - d.SetPartial("public_port") - - // Create a new parameter struct - ap := cs.LoadBalancer.NewAssignToLoadBalancerRuleParams(r.Id) - - var mbs []string - for _, id := range d.Get("member_ids").(*schema.Set).List() { - mbs = append(mbs, id.(string)) - } - - ap.SetVirtualmachineids(mbs) - - _, err = cs.LoadBalancer.AssignToLoadBalancerRule(ap) - if err != nil { - return err - } - - d.SetPartial("member_ids") - d.Partial(false) - - return resourceCloudStackLoadBalancerRuleRead(d, meta) -} - -func resourceCloudStackLoadBalancerRuleRead(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Get the load balancer details - lb, count, err := cs.LoadBalancer.GetLoadBalancerRuleByID( - d.Id(), - cloudstack.WithProject(d.Get("project").(string)), - ) - if err != nil { - if count == 0 { - log.Printf("[DEBUG] Load balancer rule %s does no longer exist", d.Get("name").(string)) - d.SetId("") - return nil - } - - return err - } - - d.Set("algorithm", lb.Algorithm) - d.Set("public_port", lb.Publicport) - d.Set("private_port", lb.Privateport) - d.Set("ip_address_id", lb.Publicipid) - - // Only set network if user specified it to avoid spurious diffs - if _, ok := d.GetOk("network_id"); ok { - d.Set("network_id", lb.Networkid) - } - - setValueOrID(d, "project", lb.Project, lb.Projectid) - - p := cs.LoadBalancer.NewListLoadBalancerRuleInstancesParams(d.Id()) - l, err := cs.LoadBalancer.ListLoadBalancerRuleInstances(p) - if err != nil { - return err - } - - var mbs []string - for _, i := range l.LoadBalancerRuleInstances { - mbs = append(mbs, i.Id) - } - d.Set("member_ids", mbs) - - return nil -} - -func resourceCloudStackLoadBalancerRuleUpdate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - if d.HasChange("name") || d.HasChange("description") || d.HasChange("algorithm") { - name := d.Get("name").(string) - - // Create new parameter struct - p := cs.LoadBalancer.NewUpdateLoadBalancerRuleParams(d.Id()) - - if d.HasChange("name") { - log.Printf("[DEBUG] Name has changed for load balancer rule %s, starting update", name) - - p.SetName(name) - } - - if d.HasChange("description") { - log.Printf( - "[DEBUG] Description has changed for load balancer rule %s, starting update", name) - - p.SetDescription(d.Get("description").(string)) - } - - if d.HasChange("algorithm") { - algorithm := d.Get("algorithm").(string) - - log.Printf( - "[DEBUG] Algorithm has changed to %s for load balancer rule %s, starting update", - algorithm, - name, - ) - - // Set the new Algorithm - p.SetAlgorithm(algorithm) - } - - _, err := cs.LoadBalancer.UpdateLoadBalancerRule(p) - if err != nil { - return fmt.Errorf( - "Error updating load balancer rule %s", name) - } - } - - if d.HasChange("member_ids") { - o, n := d.GetChange("member_ids") - ombs, nmbs := o.(*schema.Set), n.(*schema.Set) - - setToStringList := func(s *schema.Set) []string { - l := make([]string, s.Len()) - for i, v := range s.List() { - l[i] = v.(string) - } - return l - } - - membersToAdd := setToStringList(nmbs.Difference(ombs)) - membersToRemove := setToStringList(ombs.Difference(nmbs)) - - log.Printf("[DEBUG] Members to add: %v, remove: %v", membersToAdd, membersToRemove) - - if len(membersToAdd) > 0 { - p := cs.LoadBalancer.NewAssignToLoadBalancerRuleParams(d.Id()) - p.SetVirtualmachineids(membersToAdd) - if _, err := cs.LoadBalancer.AssignToLoadBalancerRule(p); err != nil { - return err - } - } - - if len(membersToRemove) > 0 { - p := cs.LoadBalancer.NewRemoveFromLoadBalancerRuleParams(d.Id()) - p.SetVirtualmachineids(membersToRemove) - if _, err := cs.LoadBalancer.RemoveFromLoadBalancerRule(p); err != nil { - return err - } - } - } - - return resourceCloudStackLoadBalancerRuleRead(d, meta) -} - -func resourceCloudStackLoadBalancerRuleDelete(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Create a new parameter struct - p := cs.LoadBalancer.NewDeleteLoadBalancerRuleParams(d.Id()) - - log.Printf("[INFO] Deleting load balancer rule: %s", d.Get("name").(string)) - if _, err := cs.LoadBalancer.DeleteLoadBalancerRule(p); err != nil { - // This is a very poor way to be told the ID does no longer exist :( - if !strings.Contains(err.Error(), fmt.Sprintf( - "Invalid parameter id value=%s due to incorrect long value format, "+ - "or entity does not exist", d.Id())) { - return err - } - } - - return nil -} diff --git a/builtin/providers/cloudstack/resource_cloudstack_loadbalancer_rule_test.go b/builtin/providers/cloudstack/resource_cloudstack_loadbalancer_rule_test.go deleted file mode 100644 index 9d3f6ec1e..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_loadbalancer_rule_test.go +++ /dev/null @@ -1,423 +0,0 @@ -package cloudstack - -import ( - "fmt" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func TestAccCloudStackLoadBalancerRule_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackLoadBalancerRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackLoadBalancerRule_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackLoadBalancerRuleExist("cloudstack_loadbalancer_rule.foo", nil), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "name", "terraform-lb"), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "algorithm", "roundrobin"), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "public_port", "80"), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "private_port", "80"), - ), - }, - }, - }) -} - -func TestAccCloudStackLoadBalancerRule_update(t *testing.T) { - var id string - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackLoadBalancerRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackLoadBalancerRule_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackLoadBalancerRuleExist("cloudstack_loadbalancer_rule.foo", &id), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "name", "terraform-lb"), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "algorithm", "roundrobin"), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "public_port", "80"), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "private_port", "80"), - ), - }, - - resource.TestStep{ - Config: testAccCloudStackLoadBalancerRule_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackLoadBalancerRuleExist("cloudstack_loadbalancer_rule.foo", &id), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "name", "terraform-lb-update"), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "algorithm", "leastconn"), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "public_port", "80"), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "private_port", "80"), - ), - }, - }, - }) -} - -func TestAccCloudStackLoadBalancerRule_forceNew(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackLoadBalancerRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackLoadBalancerRule_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackLoadBalancerRuleExist("cloudstack_loadbalancer_rule.foo", nil), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "name", "terraform-lb"), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "algorithm", "roundrobin"), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "public_port", "80"), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "private_port", "80"), - ), - }, - - resource.TestStep{ - Config: testAccCloudStackLoadBalancerRule_forcenew, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackLoadBalancerRuleExist("cloudstack_loadbalancer_rule.foo", nil), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "name", "terraform-lb-update"), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "algorithm", "leastconn"), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "public_port", "443"), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "private_port", "443"), - ), - }, - }, - }) -} - -func TestAccCloudStackLoadBalancerRule_vpc(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackLoadBalancerRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackLoadBalancerRule_vpc, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackLoadBalancerRuleExist("cloudstack_loadbalancer_rule.foo", nil), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "name", "terraform-lb"), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "algorithm", "roundrobin"), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "public_port", "80"), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "private_port", "80"), - ), - }, - }, - }) -} - -func TestAccCloudStackLoadBalancerRule_vpcUpdate(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackLoadBalancerRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackLoadBalancerRule_vpc, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackLoadBalancerRuleExist("cloudstack_loadbalancer_rule.foo", nil), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "name", "terraform-lb"), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "algorithm", "roundrobin"), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "public_port", "80"), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "private_port", "80"), - ), - }, - - resource.TestStep{ - Config: testAccCloudStackLoadBalancerRule_vpc_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackLoadBalancerRuleExist("cloudstack_loadbalancer_rule.foo", nil), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "name", "terraform-lb-update"), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "algorithm", "leastconn"), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "public_port", "443"), - resource.TestCheckResourceAttr( - "cloudstack_loadbalancer_rule.foo", "private_port", "443"), - ), - }, - }, - }) -} - -func testAccCheckCloudStackLoadBalancerRuleExist(n string, id *string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No loadbalancer rule ID is set") - } - - if id != nil { - if *id != "" && *id != rs.Primary.ID { - return fmt.Errorf("Resource ID has changed!") - } - - *id = rs.Primary.ID - } - - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - _, count, err := cs.LoadBalancer.GetLoadBalancerRuleByID(rs.Primary.ID) - - if err != nil { - return err - } - - if count == 0 { - return fmt.Errorf("Loadbalancer rule %s not found", n) - } - - return nil - } -} - -func testAccCheckCloudStackLoadBalancerRuleDestroy(s *terraform.State) error { - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_loadbalancer_rule" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Loadbalancer rule ID is set") - } - - for k, id := range rs.Primary.Attributes { - if !strings.Contains(k, "uuid") { - continue - } - - _, _, err := cs.LoadBalancer.GetLoadBalancerRuleByID(id) - if err == nil { - return fmt.Errorf("Loadbalancer rule %s still exists", rs.Primary.ID) - } - } - } - - return nil -} - -var testAccCloudStackLoadBalancerRule_basic = fmt.Sprintf(` -resource "cloudstack_instance" "foobar1" { - name = "terraform-server1" - display_name = "terraform" - service_offering= "%s" - network_id = "%s" - template = "%s" - zone = "%s" - expunge = true -} - -resource "cloudstack_loadbalancer_rule" "foo" { - name = "terraform-lb" - ip_address_id = "%s" - algorithm = "roundrobin" - public_port = 80 - private_port = 80 - member_ids = ["${cloudstack_instance.foobar1.id}"] -} -`, - CLOUDSTACK_SERVICE_OFFERING_1, - CLOUDSTACK_NETWORK_1, - CLOUDSTACK_TEMPLATE, - CLOUDSTACK_ZONE, - CLOUDSTACK_PUBLIC_IPADDRESS) - -var testAccCloudStackLoadBalancerRule_update = fmt.Sprintf(` -resource "cloudstack_instance" "foobar1" { - name = "terraform-server1" - display_name = "terraform" - service_offering= "%s" - network_id = "%s" - template = "%s" - zone = "%s" - expunge = true -} - -resource "cloudstack_loadbalancer_rule" "foo" { - name = "terraform-lb-update" - ip_address_id = "%s" - algorithm = "leastconn" - public_port = 80 - private_port = 80 - member_ids = ["${cloudstack_instance.foobar1.id}"] -} -`, - CLOUDSTACK_SERVICE_OFFERING_1, - CLOUDSTACK_NETWORK_1, - CLOUDSTACK_TEMPLATE, - CLOUDSTACK_ZONE, - CLOUDSTACK_PUBLIC_IPADDRESS) - -var testAccCloudStackLoadBalancerRule_forcenew = fmt.Sprintf(` -resource "cloudstack_instance" "foobar1" { - name = "terraform-server1" - display_name = "terraform" - service_offering= "%s" - network_id = "%s" - template = "%s" - zone = "%s" - expunge = true -} - -resource "cloudstack_loadbalancer_rule" "foo" { - name = "terraform-lb-update" - ip_address_id = "%s" - algorithm = "leastconn" - public_port = 443 - private_port = 443 - member_ids = ["${cloudstack_instance.foobar1.id}"] -} -`, - CLOUDSTACK_SERVICE_OFFERING_1, - CLOUDSTACK_NETWORK_1, - CLOUDSTACK_TEMPLATE, - CLOUDSTACK_ZONE, - CLOUDSTACK_PUBLIC_IPADDRESS) - -var testAccCloudStackLoadBalancerRule_vpc = fmt.Sprintf(` -resource "cloudstack_vpc" "foobar" { - name = "terraform-vpc" - cidr = "%s" - vpc_offering = "%s" - zone = "%s" -} - -resource "cloudstack_network" "foo" { - name = "terraform-network" - cidr = "%s" - network_offering = "%s" - vpc_id = "${cloudstack_vpc.foobar.id}" - zone = "${cloudstack_vpc.foobar.zone}" -} - -resource "cloudstack_ipaddress" "foo" { - vpc_id = "${cloudstack_vpc.foobar.id}" -} - -resource "cloudstack_instance" "foobar1" { - name = "terraform-server1" - display_name = "terraform" - service_offering= "%s" - network_id = "${cloudstack_network.foo.id}" - template = "%s" - zone = "${cloudstack_network.foo.zone}" - expunge = true -} - -resource "cloudstack_loadbalancer_rule" "foo" { - name = "terraform-lb" - ip_address_id = "${cloudstack_ipaddress.foo.id}" - algorithm = "roundrobin" - network_id = "${cloudstack_network.foo.id}" - public_port = 80 - private_port = 80 - member_ids = ["${cloudstack_instance.foobar1.id}"] -}`, - CLOUDSTACK_VPC_CIDR_1, - CLOUDSTACK_VPC_OFFERING, - CLOUDSTACK_ZONE, - CLOUDSTACK_VPC_NETWORK_CIDR, - CLOUDSTACK_VPC_NETWORK_OFFERING, - CLOUDSTACK_SERVICE_OFFERING_1, - CLOUDSTACK_TEMPLATE) - -var testAccCloudStackLoadBalancerRule_vpc_update = fmt.Sprintf(` -resource "cloudstack_vpc" "foobar" { - name = "terraform-vpc" - cidr = "%s" - vpc_offering = "%s" - zone = "%s" -} - -resource "cloudstack_network" "foo" { - name = "terraform-network" - cidr = "%s" - network_offering = "%s" - vpc_id = "${cloudstack_vpc.foobar.id}" - zone = "${cloudstack_vpc.foobar.zone}" -} - -resource "cloudstack_ipaddress" "foo" { - vpc_id = "${cloudstack_vpc.foobar.id}" -} - -resource "cloudstack_instance" "foobar1" { - name = "terraform-server1" - display_name = "terraform" - service_offering= "%s" - network_id = "${cloudstack_network.foo.id}" - template = "%s" - zone = "${cloudstack_network.foo.zone}" - expunge = true -} - -resource "cloudstack_instance" "foobar2" { - name = "terraform-server2" - display_name = "terraform" - service_offering= "%s" - network_id = "${cloudstack_network.foo.id}" - template = "%s" - zone = "${cloudstack_network.foo.zone}" - expunge = true -} - -resource "cloudstack_loadbalancer_rule" "foo" { - name = "terraform-lb-update" - ip_address_id = "${cloudstack_ipaddress.foo.id}" - algorithm = "leastconn" - network_id = "${cloudstack_network.foo.id}" - public_port = 443 - private_port = 443 - member_ids = ["${cloudstack_instance.foobar1.id}", "${cloudstack_instance.foobar2.id}"] -}`, - CLOUDSTACK_VPC_CIDR_1, - CLOUDSTACK_VPC_OFFERING, - CLOUDSTACK_ZONE, - CLOUDSTACK_VPC_NETWORK_CIDR, - CLOUDSTACK_VPC_NETWORK_OFFERING, - CLOUDSTACK_SERVICE_OFFERING_1, - CLOUDSTACK_TEMPLATE, - CLOUDSTACK_SERVICE_OFFERING_1, - CLOUDSTACK_TEMPLATE) diff --git a/builtin/providers/cloudstack/resource_cloudstack_network.go b/builtin/providers/cloudstack/resource_cloudstack_network.go deleted file mode 100644 index e5e110b04..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_network.go +++ /dev/null @@ -1,385 +0,0 @@ -package cloudstack - -import ( - "fmt" - "log" - "net" - "strconv" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -const none = "none" - -func resourceCloudStackNetwork() *schema.Resource { - aclidSchema := &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: none, - } - - aclidSchema.StateFunc = func(v interface{}) string { - value := v.(string) - - if value == none { - aclidSchema.ForceNew = true - } else { - aclidSchema.ForceNew = false - } - - return value - } - - return &schema.Resource{ - Create: resourceCloudStackNetworkCreate, - Read: resourceCloudStackNetworkRead, - Update: resourceCloudStackNetworkUpdate, - Delete: resourceCloudStackNetworkDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "display_text": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "cidr": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "gateway": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "startip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "endip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "network_domain": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "network_offering": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "vlan": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - - "vpc_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "acl_id": aclidSchema, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceCloudStackNetworkCreate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - name := d.Get("name").(string) - - // Retrieve the network_offering ID - networkofferingid, e := retrieveID(cs, "network_offering", d.Get("network_offering").(string)) - if e != nil { - return e.Error() - } - - // Retrieve the zone ID - zoneid, e := retrieveID(cs, "zone", d.Get("zone").(string)) - if e != nil { - return e.Error() - } - - // Compute/set the display text - displaytext, ok := d.GetOk("display_text") - if !ok { - displaytext = name - } - - // Create a new parameter struct - p := cs.Network.NewCreateNetworkParams(displaytext.(string), name, networkofferingid, zoneid) - - // Get the network offering to check if it supports specifying IP ranges - no, _, err := cs.NetworkOffering.GetNetworkOfferingByID(networkofferingid) - if err != nil { - return err - } - - m, err := parseCIDR(d, no.Specifyipranges) - if err != nil { - return err - } - - // Set the needed IP config - p.SetGateway(m["gateway"]) - p.SetNetmask(m["netmask"]) - - // Only set the start IP if we have one - if startip, ok := m["startip"]; ok { - p.SetStartip(startip) - } - - // Only set the end IP if we have one - if endip, ok := m["endip"]; ok { - p.SetEndip(endip) - } - - // Set the network domain if we have one - if networkDomain, ok := d.GetOk("network_domain"); ok { - p.SetNetworkdomain(networkDomain.(string)) - } - - if vlan, ok := d.GetOk("vlan"); ok { - p.SetVlan(strconv.Itoa(vlan.(int))) - } - - // Check is this network needs to be created in a VPC - if vpcid, ok := d.GetOk("vpc_id"); ok { - // Set the vpc id - p.SetVpcid(vpcid.(string)) - - // Since we're in a VPC, check if we want to assiciate an ACL list - if aclid, ok := d.GetOk("acl_id"); ok && aclid.(string) != none { - // Set the acl ID - p.SetAclid(aclid.(string)) - } - } - - // If there is a project supplied, we retrieve and set the project id - if err := setProjectid(p, cs, d); err != nil { - return err - } - - // Create the new network - r, err := cs.Network.CreateNetwork(p) - if err != nil { - return fmt.Errorf("Error creating network %s: %s", name, err) - } - - d.SetId(r.Id) - - err = setTags(cs, d, "network") - if err != nil { - return fmt.Errorf("Error setting tags: %s", err) - } - - return resourceCloudStackNetworkRead(d, meta) -} - -func resourceCloudStackNetworkRead(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Get the virtual machine details - n, count, err := cs.Network.GetNetworkByID( - d.Id(), - cloudstack.WithProject(d.Get("project").(string)), - ) - if err != nil { - if count == 0 { - log.Printf( - "[DEBUG] Network %s does no longer exist", d.Get("name").(string)) - d.SetId("") - return nil - } - - return err - } - - d.Set("name", n.Name) - d.Set("display_text", n.Displaytext) - d.Set("cidr", n.Cidr) - d.Set("gateway", n.Gateway) - d.Set("network_domain", n.Networkdomain) - d.Set("vpc_id", n.Vpcid) - - if n.Aclid == "" { - n.Aclid = none - } - d.Set("acl_id", n.Aclid) - - // Read the tags and store them in a map - tags := make(map[string]interface{}) - for item := range n.Tags { - tags[n.Tags[item].Key] = n.Tags[item].Value - } - d.Set("tags", tags) - - setValueOrID(d, "network_offering", n.Networkofferingname, n.Networkofferingid) - setValueOrID(d, "project", n.Project, n.Projectid) - setValueOrID(d, "zone", n.Zonename, n.Zoneid) - - return nil -} - -func resourceCloudStackNetworkUpdate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - name := d.Get("name").(string) - - // Create a new parameter struct - p := cs.Network.NewUpdateNetworkParams(d.Id()) - - // Check if the name or display text is changed - if d.HasChange("name") || d.HasChange("display_text") { - p.SetName(name) - - // Compute/set the display text - displaytext := d.Get("display_text").(string) - if displaytext == "" { - displaytext = name - } - p.SetDisplaytext(displaytext) - } - - // Check if the cidr is changed - if d.HasChange("cidr") { - p.SetGuestvmcidr(d.Get("cidr").(string)) - } - - // Check if the network domain is changed - if d.HasChange("network_domain") { - p.SetNetworkdomain(d.Get("network_domain").(string)) - } - - // Check if the network offering is changed - if d.HasChange("network_offering") { - // Retrieve the network_offering ID - networkofferingid, e := retrieveID(cs, "network_offering", d.Get("network_offering").(string)) - if e != nil { - return e.Error() - } - // Set the new network offering - p.SetNetworkofferingid(networkofferingid) - } - - // Update the network - _, err := cs.Network.UpdateNetwork(p) - if err != nil { - return fmt.Errorf( - "Error updating network %s: %s", name, err) - } - - // Replace the ACL if the ID has changed - if d.HasChange("acl_id") { - p := cs.NetworkACL.NewReplaceNetworkACLListParams(d.Get("acl_id").(string)) - p.SetNetworkid(d.Id()) - - _, err := cs.NetworkACL.ReplaceNetworkACLList(p) - if err != nil { - return fmt.Errorf("Error replacing ACL: %s", err) - } - } - - // Update tags if they have changed - if d.HasChange("tags") { - err = setTags(cs, d, "network") - if err != nil { - return fmt.Errorf("Error updating tags: %s", err) - } - } - - return resourceCloudStackNetworkRead(d, meta) -} - -func resourceCloudStackNetworkDelete(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Create a new parameter struct - p := cs.Network.NewDeleteNetworkParams(d.Id()) - - // Delete the network - _, err := cs.Network.DeleteNetwork(p) - if err != nil { - // This is a very poor way to be told the ID does no longer exist :( - if strings.Contains(err.Error(), fmt.Sprintf( - "Invalid parameter id value=%s due to incorrect long value format, "+ - "or entity does not exist", d.Id())) { - return nil - } - - return fmt.Errorf("Error deleting network %s: %s", d.Get("name").(string), err) - } - return nil -} - -func parseCIDR(d *schema.ResourceData, specifyiprange bool) (map[string]string, error) { - m := make(map[string]string, 4) - - cidr := d.Get("cidr").(string) - ip, ipnet, err := net.ParseCIDR(cidr) - if err != nil { - return nil, fmt.Errorf("Unable to parse cidr %s: %s", cidr, err) - } - - msk := ipnet.Mask - sub := ip.Mask(msk) - - m["netmask"] = fmt.Sprintf("%d.%d.%d.%d", msk[0], msk[1], msk[2], msk[3]) - - if gateway, ok := d.GetOk("gateway"); ok { - m["gateway"] = gateway.(string) - } else { - m["gateway"] = fmt.Sprintf("%d.%d.%d.%d", sub[0], sub[1], sub[2], sub[3]+1) - } - - if startip, ok := d.GetOk("startip"); ok { - m["startip"] = startip.(string) - } else if specifyiprange { - m["startip"] = fmt.Sprintf("%d.%d.%d.%d", sub[0], sub[1], sub[2], sub[3]+2) - } - - if endip, ok := d.GetOk("endip"); ok { - m["endip"] = endip.(string) - } else if specifyiprange { - m["endip"] = fmt.Sprintf("%d.%d.%d.%d", - sub[0]+(0xff-msk[0]), sub[1]+(0xff-msk[1]), sub[2]+(0xff-msk[2]), sub[3]+(0xff-msk[3]-1)) - } - - return m, nil -} diff --git a/builtin/providers/cloudstack/resource_cloudstack_network_acl.go b/builtin/providers/cloudstack/resource_cloudstack_network_acl.go deleted file mode 100644 index cf19a511f..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_network_acl.go +++ /dev/null @@ -1,121 +0,0 @@ -package cloudstack - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func resourceCloudStackNetworkACL() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudStackNetworkACLCreate, - Read: resourceCloudStackNetworkACLRead, - Delete: resourceCloudStackNetworkACLDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "vpc_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceCloudStackNetworkACLCreate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - name := d.Get("name").(string) - - // Create a new parameter struct - p := cs.NetworkACL.NewCreateNetworkACLListParams(name, d.Get("vpc_id").(string)) - - // Set the description - if description, ok := d.GetOk("description"); ok { - p.SetDescription(description.(string)) - } else { - p.SetDescription(name) - } - - // Create the new network ACL list - r, err := cs.NetworkACL.CreateNetworkACLList(p) - if err != nil { - return fmt.Errorf("Error creating network ACL list %s: %s", name, err) - } - - d.SetId(r.Id) - - return resourceCloudStackNetworkACLRead(d, meta) -} - -func resourceCloudStackNetworkACLRead(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Get the network ACL list details - f, count, err := cs.NetworkACL.GetNetworkACLListByID( - d.Id(), - cloudstack.WithProject(d.Get("project").(string)), - ) - if err != nil { - if count == 0 { - log.Printf( - "[DEBUG] Network ACL list %s does no longer exist", d.Get("name").(string)) - d.SetId("") - return nil - } - - return err - } - - d.Set("name", f.Name) - d.Set("description", f.Description) - d.Set("vpc_id", f.Vpcid) - - return nil -} - -func resourceCloudStackNetworkACLDelete(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Create a new parameter struct - p := cs.NetworkACL.NewDeleteNetworkACLListParams(d.Id()) - - // Delete the network ACL list - _, err := Retry(3, func() (interface{}, error) { - return cs.NetworkACL.DeleteNetworkACLList(p) - }) - if err != nil { - // This is a very poor way to be told the ID does no longer exist :( - if strings.Contains(err.Error(), fmt.Sprintf( - "Invalid parameter id value=%s due to incorrect long value format, "+ - "or entity does not exist", d.Id())) { - return nil - } - - return fmt.Errorf("Error deleting network ACL list %s: %s", d.Get("name").(string), err) - } - - return nil -} diff --git a/builtin/providers/cloudstack/resource_cloudstack_network_acl_rule.go b/builtin/providers/cloudstack/resource_cloudstack_network_acl_rule.go deleted file mode 100644 index 98a3ba5e7..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_network_acl_rule.go +++ /dev/null @@ -1,666 +0,0 @@ -package cloudstack - -import ( - "fmt" - "log" - "strconv" - "strings" - "sync" - "time" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func resourceCloudStackNetworkACLRule() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudStackNetworkACLRuleCreate, - Read: resourceCloudStackNetworkACLRuleRead, - Update: resourceCloudStackNetworkACLRuleUpdate, - Delete: resourceCloudStackNetworkACLRuleDelete, - - Schema: map[string]*schema.Schema{ - "acl_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "managed": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "rule": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "action": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "allow", - }, - - "cidr_list": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "icmp_type": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "icmp_code": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "ports": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "traffic_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "ingress", - }, - - "uuids": &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - }, - }, - }, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "parallelism": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 2, - }, - }, - } -} - -func resourceCloudStackNetworkACLRuleCreate(d *schema.ResourceData, meta interface{}) error { - // Make sure all required parameters are there - if err := verifyNetworkACLParams(d); err != nil { - return err - } - - // We need to set this upfront in order to be able to save a partial state - d.SetId(d.Get("acl_id").(string)) - - // Create all rules that are configured - if nrs := d.Get("rule").(*schema.Set); nrs.Len() > 0 { - // Create an empty rule set to hold all newly created rules - rules := resourceCloudStackNetworkACLRule().Schema["rule"].ZeroValue().(*schema.Set) - - err := createNetworkACLRules(d, meta, rules, nrs) - - // We need to update this first to preserve the correct state - d.Set("rule", rules) - - if err != nil { - return err - } - } - - return resourceCloudStackNetworkACLRuleRead(d, meta) -} - -func createNetworkACLRules(d *schema.ResourceData, meta interface{}, rules *schema.Set, nrs *schema.Set) error { - var errs *multierror.Error - - var wg sync.WaitGroup - wg.Add(nrs.Len()) - - sem := make(chan struct{}, d.Get("parallelism").(int)) - for _, rule := range nrs.List() { - // Put in a tiny sleep here to avoid DoS'ing the API - time.Sleep(500 * time.Millisecond) - - go func(rule map[string]interface{}) { - defer wg.Done() - sem <- struct{}{} - - // Create a single rule - err := createNetworkACLRule(d, meta, rule) - - // If we have at least one UUID, we need to save the rule - if len(rule["uuids"].(map[string]interface{})) > 0 { - rules.Add(rule) - } - - if err != nil { - errs = multierror.Append(errs, err) - } - - <-sem - }(rule.(map[string]interface{})) - } - - wg.Wait() - - return errs.ErrorOrNil() -} - -func createNetworkACLRule(d *schema.ResourceData, meta interface{}, rule map[string]interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - uuids := rule["uuids"].(map[string]interface{}) - - // Make sure all required parameters are there - if err := verifyNetworkACLRuleParams(d, rule); err != nil { - return err - } - - // Create a new parameter struct - p := cs.NetworkACL.NewCreateNetworkACLParams(rule["protocol"].(string)) - - // Set the acl ID - p.SetAclid(d.Id()) - - // Set the action - p.SetAction(rule["action"].(string)) - - // Set the CIDR list - var cidrList []string - for _, cidr := range rule["cidr_list"].(*schema.Set).List() { - cidrList = append(cidrList, cidr.(string)) - } - p.SetCidrlist(cidrList) - - // Set the traffic type - p.SetTraffictype(rule["traffic_type"].(string)) - - // If the protocol is ICMP set the needed ICMP parameters - if rule["protocol"].(string) == "icmp" { - p.SetIcmptype(rule["icmp_type"].(int)) - p.SetIcmpcode(rule["icmp_code"].(int)) - - r, err := Retry(4, retryableACLCreationFunc(cs, p)) - if err != nil { - return err - } - - uuids["icmp"] = r.(*cloudstack.CreateNetworkACLResponse).Id - rule["uuids"] = uuids - } - - // If the protocol is ALL set the needed parameters - if rule["protocol"].(string) == "all" { - r, err := Retry(4, retryableACLCreationFunc(cs, p)) - if err != nil { - return err - } - - uuids["all"] = r.(*cloudstack.CreateNetworkACLResponse).Id - rule["uuids"] = uuids - } - - // If protocol is TCP or UDP, loop through all ports - if rule["protocol"].(string) == "tcp" || rule["protocol"].(string) == "udp" { - if ps := rule["ports"].(*schema.Set); ps.Len() > 0 { - - // Create an empty schema.Set to hold all processed ports - ports := &schema.Set{F: schema.HashString} - - for _, port := range ps.List() { - if _, ok := uuids[port.(string)]; ok { - ports.Add(port) - rule["ports"] = ports - continue - } - - m := splitPorts.FindStringSubmatch(port.(string)) - - startPort, err := strconv.Atoi(m[1]) - if err != nil { - return err - } - - endPort := startPort - if m[2] != "" { - endPort, err = strconv.Atoi(m[2]) - if err != nil { - return err - } - } - - p.SetStartport(startPort) - p.SetEndport(endPort) - - r, err := Retry(4, retryableACLCreationFunc(cs, p)) - if err != nil { - return err - } - - ports.Add(port) - rule["ports"] = ports - - uuids[port.(string)] = r.(*cloudstack.CreateNetworkACLResponse).Id - rule["uuids"] = uuids - } - } - } - - return nil -} - -func resourceCloudStackNetworkACLRuleRead(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // First check if the ACL itself still exists - _, count, err := cs.NetworkACL.GetNetworkACLListByID( - d.Id(), - cloudstack.WithProject(d.Get("project").(string)), - ) - if err != nil { - if count == 0 { - log.Printf( - "[DEBUG] Network ACL list %s does no longer exist", d.Id()) - d.SetId("") - return nil - } - - return err - } - - // Get all the rules from the running environment - p := cs.NetworkACL.NewListNetworkACLsParams() - p.SetAclid(d.Id()) - p.SetListall(true) - - l, err := cs.NetworkACL.ListNetworkACLs(p) - if err != nil { - return err - } - - // Make a map of all the rules so we can easily find a rule - ruleMap := make(map[string]*cloudstack.NetworkACL, l.Count) - for _, r := range l.NetworkACLs { - ruleMap[r.Id] = r - } - - // Create an empty schema.Set to hold all rules - rules := resourceCloudStackNetworkACLRule().Schema["rule"].ZeroValue().(*schema.Set) - - // Read all rules that are configured - if rs := d.Get("rule").(*schema.Set); rs.Len() > 0 { - for _, rule := range rs.List() { - rule := rule.(map[string]interface{}) - uuids := rule["uuids"].(map[string]interface{}) - - if rule["protocol"].(string) == "icmp" { - id, ok := uuids["icmp"] - if !ok { - continue - } - - // Get the rule - r, ok := ruleMap[id.(string)] - if !ok { - delete(uuids, "icmp") - continue - } - - // Delete the known rule so only unknown rules remain in the ruleMap - delete(ruleMap, id.(string)) - - // Create a set with all CIDR's - cidrs := &schema.Set{F: schema.HashString} - for _, cidr := range strings.Split(r.Cidrlist, ",") { - cidrs.Add(cidr) - } - - // Update the values - rule["action"] = strings.ToLower(r.Action) - rule["protocol"] = r.Protocol - rule["icmp_type"] = r.Icmptype - rule["icmp_code"] = r.Icmpcode - rule["traffic_type"] = strings.ToLower(r.Traffictype) - rule["cidr_list"] = cidrs - rules.Add(rule) - } - - if rule["protocol"].(string) == "all" { - id, ok := uuids["all"] - if !ok { - continue - } - - // Get the rule - r, ok := ruleMap[id.(string)] - if !ok { - delete(uuids, "all") - continue - } - - // Delete the known rule so only unknown rules remain in the ruleMap - delete(ruleMap, id.(string)) - - // Create a set with all CIDR's - cidrs := &schema.Set{F: schema.HashString} - for _, cidr := range strings.Split(r.Cidrlist, ",") { - cidrs.Add(cidr) - } - - // Update the values - rule["action"] = strings.ToLower(r.Action) - rule["protocol"] = r.Protocol - rule["traffic_type"] = strings.ToLower(r.Traffictype) - rule["cidr_list"] = cidrs - rules.Add(rule) - } - - // If protocol is tcp or udp, loop through all ports - if rule["protocol"].(string) == "tcp" || rule["protocol"].(string) == "udp" { - if ps := rule["ports"].(*schema.Set); ps.Len() > 0 { - - // Create an empty schema.Set to hold all ports - ports := &schema.Set{F: schema.HashString} - - // Loop through all ports and retrieve their info - for _, port := range ps.List() { - id, ok := uuids[port.(string)] - if !ok { - continue - } - - // Get the rule - r, ok := ruleMap[id.(string)] - if !ok { - delete(uuids, port.(string)) - continue - } - - // Delete the known rule so only unknown rules remain in the ruleMap - delete(ruleMap, id.(string)) - - // Create a set with all CIDR's - cidrs := &schema.Set{F: schema.HashString} - for _, cidr := range strings.Split(r.Cidrlist, ",") { - cidrs.Add(cidr) - } - - // Update the values - rule["action"] = strings.ToLower(r.Action) - rule["protocol"] = r.Protocol - rule["traffic_type"] = strings.ToLower(r.Traffictype) - rule["cidr_list"] = cidrs - ports.Add(port) - } - - // If there is at least one port found, add this rule to the rules set - if ports.Len() > 0 { - rule["ports"] = ports - rules.Add(rule) - } - } - } - } - } - - // If this is a managed firewall, add all unknown rules into dummy rules - managed := d.Get("managed").(bool) - if managed && len(ruleMap) > 0 { - for uuid := range ruleMap { - // We need to create and add a dummy value to a schema.Set as the - // cidr_list is a required field and thus needs a value - cidrs := &schema.Set{F: schema.HashString} - cidrs.Add(uuid) - - // Make a dummy rule to hold the unknown UUID - rule := map[string]interface{}{ - "cidr_list": cidrs, - "protocol": uuid, - "uuids": map[string]interface{}{uuid: uuid}, - } - - // Add the dummy rule to the rules set - rules.Add(rule) - } - } - - if rules.Len() > 0 { - d.Set("rule", rules) - } else if !managed { - d.SetId("") - } - - return nil -} - -func resourceCloudStackNetworkACLRuleUpdate(d *schema.ResourceData, meta interface{}) error { - // Make sure all required parameters are there - if err := verifyNetworkACLParams(d); err != nil { - return err - } - - // Check if the rule set as a whole has changed - if d.HasChange("rule") { - o, n := d.GetChange("rule") - ors := o.(*schema.Set).Difference(n.(*schema.Set)) - nrs := n.(*schema.Set).Difference(o.(*schema.Set)) - - // We need to start with a rule set containing all the rules we - // already have and want to keep. Any rules that are not deleted - // correctly and any newly created rules, will be added to this - // set to make sure we end up in a consistent state - rules := o.(*schema.Set).Intersection(n.(*schema.Set)) - - // First loop through all the new rules and create (before destroy) them - if nrs.Len() > 0 { - err := createNetworkACLRules(d, meta, rules, nrs) - - // We need to update this first to preserve the correct state - d.Set("rule", rules) - - if err != nil { - return err - } - } - - // Then loop through all the old rules and delete them - if ors.Len() > 0 { - err := deleteNetworkACLRules(d, meta, rules, ors) - - // We need to update this first to preserve the correct state - d.Set("rule", rules) - - if err != nil { - return err - } - } - } - - return resourceCloudStackNetworkACLRuleRead(d, meta) -} - -func resourceCloudStackNetworkACLRuleDelete(d *schema.ResourceData, meta interface{}) error { - // Create an empty rule set to hold all rules that where - // not deleted correctly - rules := resourceCloudStackNetworkACLRule().Schema["rule"].ZeroValue().(*schema.Set) - - // Delete all rules - if ors := d.Get("rule").(*schema.Set); ors.Len() > 0 { - err := deleteNetworkACLRules(d, meta, rules, ors) - - // We need to update this first to preserve the correct state - d.Set("rule", rules) - - if err != nil { - return err - } - } - - return nil -} - -func deleteNetworkACLRules(d *schema.ResourceData, meta interface{}, rules *schema.Set, ors *schema.Set) error { - var errs *multierror.Error - - var wg sync.WaitGroup - wg.Add(ors.Len()) - - sem := make(chan struct{}, d.Get("parallelism").(int)) - for _, rule := range ors.List() { - // Put a sleep here to avoid DoS'ing the API - time.Sleep(500 * time.Millisecond) - - go func(rule map[string]interface{}) { - defer wg.Done() - sem <- struct{}{} - - // Delete a single rule - err := deleteNetworkACLRule(d, meta, rule) - - // If we have at least one UUID, we need to save the rule - if len(rule["uuids"].(map[string]interface{})) > 0 { - rules.Add(rule) - } - - if err != nil { - errs = multierror.Append(errs, err) - } - - <-sem - }(rule.(map[string]interface{})) - } - - wg.Wait() - - return errs.ErrorOrNil() -} - -func deleteNetworkACLRule(d *schema.ResourceData, meta interface{}, rule map[string]interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - uuids := rule["uuids"].(map[string]interface{}) - - for k, id := range uuids { - // We don't care about the count here, so just continue - if k == "%" { - continue - } - - // Create the parameter struct - p := cs.NetworkACL.NewDeleteNetworkACLParams(id.(string)) - - // Delete the rule - if _, err := cs.NetworkACL.DeleteNetworkACL(p); err != nil { - - // This is a very poor way to be told the ID does no longer exist :( - if strings.Contains(err.Error(), fmt.Sprintf( - "Invalid parameter id value=%s due to incorrect long value format, "+ - "or entity does not exist", id.(string))) { - delete(uuids, k) - rule["uuids"] = uuids - continue - } - - return err - } - - // Delete the UUID of this rule - delete(uuids, k) - rule["uuids"] = uuids - } - - return nil -} - -func verifyNetworkACLParams(d *schema.ResourceData) error { - managed := d.Get("managed").(bool) - _, rules := d.GetOk("rule") - - if !rules && !managed { - return fmt.Errorf( - "You must supply at least one 'rule' when not using the 'managed' firewall feature") - } - - return nil -} - -func verifyNetworkACLRuleParams(d *schema.ResourceData, rule map[string]interface{}) error { - action := rule["action"].(string) - if action != "allow" && action != "deny" { - return fmt.Errorf("Parameter action only accepts 'allow' or 'deny' as values") - } - - protocol := rule["protocol"].(string) - switch protocol { - case "icmp": - if _, ok := rule["icmp_type"]; !ok { - return fmt.Errorf( - "Parameter icmp_type is a required parameter when using protocol 'icmp'") - } - if _, ok := rule["icmp_code"]; !ok { - return fmt.Errorf( - "Parameter icmp_code is a required parameter when using protocol 'icmp'") - } - case "all": - // No additional test are needed, so just leave this empty... - case "tcp", "udp": - if ports, ok := rule["ports"].(*schema.Set); ok { - for _, port := range ports.List() { - m := splitPorts.FindStringSubmatch(port.(string)) - if m == nil { - return fmt.Errorf( - "%q is not a valid port value. Valid options are '80' or '80-90'", port.(string)) - } - } - } else { - return fmt.Errorf( - "Parameter ports is a required parameter when *not* using protocol 'icmp'") - } - default: - _, err := strconv.ParseInt(protocol, 0, 0) - if err != nil { - return fmt.Errorf( - "%q is not a valid protocol. Valid options are 'tcp', 'udp', "+ - "'icmp', 'all' or a valid protocol number", protocol) - } - } - - traffic := rule["traffic_type"].(string) - if traffic != "ingress" && traffic != "egress" { - return fmt.Errorf( - "Parameter traffic_type only accepts 'ingress' or 'egress' as values") - } - - return nil -} - -func retryableACLCreationFunc( - cs *cloudstack.CloudStackClient, - p *cloudstack.CreateNetworkACLParams) func() (interface{}, error) { - return func() (interface{}, error) { - r, err := cs.NetworkACL.CreateNetworkACL(p) - if err != nil { - return nil, err - } - return r, nil - } -} diff --git a/builtin/providers/cloudstack/resource_cloudstack_network_acl_rule_test.go b/builtin/providers/cloudstack/resource_cloudstack_network_acl_rule_test.go deleted file mode 100644 index de0a4c75c..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_network_acl_rule_test.go +++ /dev/null @@ -1,306 +0,0 @@ -package cloudstack - -import ( - "fmt" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func TestAccCloudStackNetworkACLRule_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackNetworkACLRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackNetworkACLRule_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackNetworkACLRulesExist("cloudstack_network_acl.foo"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.#", "3"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2898748868.action", "allow"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2898748868.cidr_list.2835005819", "172.16.100.0/24"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2898748868.protocol", "tcp"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2898748868.ports.#", "2"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2898748868.ports.1889509032", "80"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2898748868.ports.3638101695", "443"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2898748868.traffic_type", "ingress"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.1480917538.action", "allow"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.1480917538.cidr_list.#", "1"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.1480917538.cidr_list.3056857544", "172.18.100.0/24"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.1480917538.icmp_code", "-1"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.1480917538.icmp_type", "-1"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.1480917538.traffic_type", "ingress"), - ), - }, - }, - }) -} - -func TestAccCloudStackNetworkACLRule_update(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackNetworkACLRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackNetworkACLRule_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackNetworkACLRulesExist("cloudstack_network_acl.foo"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.#", "3"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2898748868.action", "allow"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2898748868.cidr_list.2835005819", "172.16.100.0/24"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2898748868.protocol", "tcp"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2898748868.ports.#", "2"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2898748868.ports.1889509032", "80"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2898748868.ports.3638101695", "443"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2898748868.traffic_type", "ingress"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.1480917538.action", "allow"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.1480917538.cidr_list.#", "1"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.1480917538.cidr_list.3056857544", "172.18.100.0/24"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.1480917538.icmp_code", "-1"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.1480917538.icmp_type", "-1"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.1480917538.traffic_type", "ingress"), - ), - }, - - resource.TestStep{ - Config: testAccCloudStackNetworkACLRule_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackNetworkACLRulesExist("cloudstack_network_acl.foo"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.#", "4"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.1724235854.action", "deny"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.1724235854.cidr_list.3482919157", "10.0.0.0/24"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.1724235854.protocol", "tcp"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.1724235854.ports.#", "2"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.1724235854.ports.1209010669", "1000-2000"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.1724235854.ports.1889509032", "80"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.1724235854.traffic_type", "egress"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2090315355.action", "deny"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2090315355.cidr_list.#", "2"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2090315355.cidr_list.2104435309", "172.18.101.0/24"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2090315355.cidr_list.3056857544", "172.18.100.0/24"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2090315355.icmp_code", "-1"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2090315355.icmp_type", "-1"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2090315355.traffic_type", "ingress"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2576683033.action", "allow"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2576683033.cidr_list.3056857544", "172.18.100.0/24"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2576683033.protocol", "tcp"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2576683033.ports.#", "2"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2576683033.ports.1889509032", "80"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2576683033.ports.3638101695", "443"), - resource.TestCheckResourceAttr( - "cloudstack_network_acl_rule.foo", "rule.2576683033.traffic_type", "ingress"), - ), - }, - }, - }) -} - -func testAccCheckCloudStackNetworkACLRulesExist(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No network ACL rule ID is set") - } - - for k, id := range rs.Primary.Attributes { - if !strings.Contains(k, ".uuids.") || strings.HasSuffix(k, ".uuids.%") { - continue - } - - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - _, count, err := cs.NetworkACL.GetNetworkACLByID(id) - - if err != nil { - return err - } - - if count == 0 { - return fmt.Errorf("Network ACL rule %s not found", k) - } - } - - return nil - } -} - -func testAccCheckCloudStackNetworkACLRuleDestroy(s *terraform.State) error { - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_network_acl_rule" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No network ACL rule ID is set") - } - - for k, id := range rs.Primary.Attributes { - if !strings.Contains(k, ".uuids.") || strings.HasSuffix(k, ".uuids.%") { - continue - } - - _, _, err := cs.NetworkACL.GetNetworkACLByID(id) - if err == nil { - return fmt.Errorf("Network ACL rule %s still exists", rs.Primary.ID) - } - } - } - - return nil -} - -var testAccCloudStackNetworkACLRule_basic = fmt.Sprintf(` -resource "cloudstack_vpc" "foobar" { - name = "terraform-vpc" - cidr = "%s" - vpc_offering = "%s" - zone = "%s" -} - -resource "cloudstack_network_acl" "foo" { - name = "terraform-acl" - description = "terraform-acl-text" - vpc_id = "${cloudstack_vpc.foobar.id}" -} - -resource "cloudstack_network_acl_rule" "foo" { - acl_id = "${cloudstack_network_acl.foo.id}" - - rule { - action = "allow" - cidr_list = ["172.18.100.0/24"] - protocol = "all" - traffic_type = "ingress" - } - - rule { - action = "allow" - cidr_list = ["172.18.100.0/24"] - protocol = "icmp" - icmp_type = "-1" - icmp_code = "-1" - traffic_type = "ingress" - } - - rule { - cidr_list = ["172.16.100.0/24"] - protocol = "tcp" - ports = ["80", "443"] - traffic_type = "ingress" - } -}`, - CLOUDSTACK_VPC_CIDR_1, - CLOUDSTACK_VPC_OFFERING, - CLOUDSTACK_ZONE) - -var testAccCloudStackNetworkACLRule_update = fmt.Sprintf(` -resource "cloudstack_vpc" "foobar" { - name = "terraform-vpc" - cidr = "%s" - vpc_offering = "%s" - zone = "%s" -} - -resource "cloudstack_network_acl" "foo" { - name = "terraform-acl" - description = "terraform-acl-text" - vpc_id = "${cloudstack_vpc.foobar.id}" -} - -resource "cloudstack_network_acl_rule" "foo" { - acl_id = "${cloudstack_network_acl.foo.id}" - - rule { - action = "deny" - cidr_list = ["172.18.100.0/24"] - protocol = "all" - traffic_type = "ingress" - } - - rule { - action = "deny" - cidr_list = ["172.18.100.0/24", "172.18.101.0/24"] - protocol = "icmp" - icmp_type = "-1" - icmp_code = "-1" - traffic_type = "ingress" - } - - rule { - action = "allow" - cidr_list = ["172.18.100.0/24"] - protocol = "tcp" - ports = ["80", "443"] - traffic_type = "ingress" - } - - rule { - action = "deny" - cidr_list = ["10.0.0.0/24"] - protocol = "tcp" - ports = ["80", "1000-2000"] - traffic_type = "egress" - } -}`, - CLOUDSTACK_VPC_CIDR_1, - CLOUDSTACK_VPC_OFFERING, - CLOUDSTACK_ZONE) diff --git a/builtin/providers/cloudstack/resource_cloudstack_network_acl_test.go b/builtin/providers/cloudstack/resource_cloudstack_network_acl_test.go deleted file mode 100644 index d6431c399..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_network_acl_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package cloudstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func TestAccCloudStackNetworkACL_basic(t *testing.T) { - var acl cloudstack.NetworkACLList - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackNetworkACLDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackNetworkACL_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackNetworkACLExists( - "cloudstack_network_acl.foo", &acl), - testAccCheckCloudStackNetworkACLBasicAttributes(&acl), - ), - }, - }, - }) -} - -func testAccCheckCloudStackNetworkACLExists( - n string, acl *cloudstack.NetworkACLList) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No network ACL ID is set") - } - - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - acllist, _, err := cs.NetworkACL.GetNetworkACLListByID(rs.Primary.ID) - if err != nil { - return err - } - - if acllist.Id != rs.Primary.ID { - return fmt.Errorf("Network ACL not found") - } - - *acl = *acllist - - return nil - } -} - -func testAccCheckCloudStackNetworkACLBasicAttributes( - acl *cloudstack.NetworkACLList) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if acl.Name != "terraform-acl" { - return fmt.Errorf("Bad name: %s", acl.Name) - } - - if acl.Description != "terraform-acl-text" { - return fmt.Errorf("Bad description: %s", acl.Description) - } - - return nil - } -} - -func testAccCheckCloudStackNetworkACLDestroy(s *terraform.State) error { - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_network_acl" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No network ACL ID is set") - } - - _, _, err := cs.NetworkACL.GetNetworkACLListByID(rs.Primary.ID) - if err == nil { - return fmt.Errorf("Network ACl list %s still exists", rs.Primary.ID) - } - } - - return nil -} - -var testAccCloudStackNetworkACL_basic = fmt.Sprintf(` -resource "cloudstack_vpc" "foobar" { - name = "terraform-vpc" - cidr = "%s" - vpc_offering = "%s" - zone = "%s" -} - -resource "cloudstack_network_acl" "foo" { - name = "terraform-acl" - description = "terraform-acl-text" - vpc_id = "${cloudstack_vpc.foobar.id}" -}`, - CLOUDSTACK_VPC_CIDR_1, - CLOUDSTACK_VPC_OFFERING, - CLOUDSTACK_ZONE) diff --git a/builtin/providers/cloudstack/resource_cloudstack_network_test.go b/builtin/providers/cloudstack/resource_cloudstack_network_test.go deleted file mode 100644 index 0333f6c25..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_network_test.go +++ /dev/null @@ -1,278 +0,0 @@ -package cloudstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func TestAccCloudStackNetwork_basic(t *testing.T) { - var network cloudstack.Network - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackNetworkDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackNetwork_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackNetworkExists( - "cloudstack_network.foo", &network), - testAccCheckCloudStackNetworkBasicAttributes(&network), - testAccCheckNetworkTags(&network, "terraform-tag", "true"), - ), - }, - }, - }) -} - -func TestAccCloudStackNetwork_vpc(t *testing.T) { - var network cloudstack.Network - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackNetworkDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackNetwork_vpc, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackNetworkExists( - "cloudstack_network.foo", &network), - testAccCheckCloudStackNetworkVPCAttributes(&network), - ), - }, - }, - }) -} - -func TestAccCloudStackNetwork_updateACL(t *testing.T) { - var network cloudstack.Network - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackNetworkDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackNetwork_acl, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackNetworkExists( - "cloudstack_network.foo", &network), - testAccCheckCloudStackNetworkVPCAttributes(&network), - ), - }, - - resource.TestStep{ - Config: testAccCloudStackNetwork_updateACL, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackNetworkExists( - "cloudstack_network.foo", &network), - testAccCheckCloudStackNetworkVPCAttributes(&network), - ), - }, - }, - }) -} - -func testAccCheckCloudStackNetworkExists( - n string, network *cloudstack.Network) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No network ID is set") - } - - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - ntwrk, _, err := cs.Network.GetNetworkByID(rs.Primary.ID) - - if err != nil { - return err - } - - if ntwrk.Id != rs.Primary.ID { - return fmt.Errorf("Network not found") - } - - *network = *ntwrk - - return nil - } -} - -func testAccCheckCloudStackNetworkBasicAttributes( - network *cloudstack.Network) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if network.Name != "terraform-network" { - return fmt.Errorf("Bad name: %s", network.Name) - } - - if network.Displaytext != "terraform-network" { - return fmt.Errorf("Bad display name: %s", network.Displaytext) - } - - if network.Cidr != CLOUDSTACK_NETWORK_2_CIDR { - return fmt.Errorf("Bad CIDR: %s", network.Cidr) - } - - if network.Networkofferingname != CLOUDSTACK_NETWORK_2_OFFERING { - return fmt.Errorf("Bad network offering: %s", network.Networkofferingname) - } - - return nil - } -} - -func testAccCheckNetworkTags( - n *cloudstack.Network, key string, value string) resource.TestCheckFunc { - return func(s *terraform.State) error { - tags := make(map[string]string) - for item := range n.Tags { - tags[n.Tags[item].Key] = n.Tags[item].Value - } - return testAccCheckTags(tags, key, value) - } -} - -func testAccCheckCloudStackNetworkVPCAttributes( - network *cloudstack.Network) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if network.Name != "terraform-network" { - return fmt.Errorf("Bad name: %s", network.Name) - } - - if network.Displaytext != "terraform-network" { - return fmt.Errorf("Bad display name: %s", network.Displaytext) - } - - if network.Cidr != CLOUDSTACK_VPC_NETWORK_CIDR { - return fmt.Errorf("Bad CIDR: %s", network.Cidr) - } - - if network.Networkofferingname != CLOUDSTACK_VPC_NETWORK_OFFERING { - return fmt.Errorf("Bad network offering: %s", network.Networkofferingname) - } - - return nil - } -} - -func testAccCheckCloudStackNetworkDestroy(s *terraform.State) error { - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_network" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No network ID is set") - } - - _, _, err := cs.Network.GetNetworkByID(rs.Primary.ID) - if err == nil { - return fmt.Errorf("Network %s still exists", rs.Primary.ID) - } - } - - return nil -} - -var testAccCloudStackNetwork_basic = fmt.Sprintf(` -resource "cloudstack_network" "foo" { - name = "terraform-network" - cidr = "%s" - network_offering = "%s" - zone = "%s" - tags = { - terraform-tag = "true" - } -}`, - CLOUDSTACK_NETWORK_2_CIDR, - CLOUDSTACK_NETWORK_2_OFFERING, - CLOUDSTACK_ZONE) - -var testAccCloudStackNetwork_vpc = fmt.Sprintf(` -resource "cloudstack_vpc" "foobar" { - name = "terraform-vpc" - cidr = "%s" - vpc_offering = "%s" - zone = "%s" -} - -resource "cloudstack_network" "foo" { - name = "terraform-network" - cidr = "%s" - network_offering = "%s" - vpc_id = "${cloudstack_vpc.foobar.id}" - zone = "${cloudstack_vpc.foobar.zone}" -}`, - CLOUDSTACK_VPC_CIDR_1, - CLOUDSTACK_VPC_OFFERING, - CLOUDSTACK_ZONE, - CLOUDSTACK_VPC_NETWORK_CIDR, - CLOUDSTACK_VPC_NETWORK_OFFERING) - -var testAccCloudStackNetwork_acl = fmt.Sprintf(` -resource "cloudstack_vpc" "foobar" { - name = "terraform-vpc" - cidr = "%s" - vpc_offering = "%s" - zone = "%s" -} - -resource "cloudstack_network_acl" "foo" { - name = "foo" - vpc_id = "${cloudstack_vpc.foobar.id}" -} - -resource "cloudstack_network" "foo" { - name = "terraform-network" - cidr = "%s" - network_offering = "%s" - vpc_id = "${cloudstack_vpc.foobar.id}" - acl_id = "${cloudstack_network_acl.foo.id}" - zone = "${cloudstack_vpc.foobar.zone}" -}`, - CLOUDSTACK_VPC_CIDR_1, - CLOUDSTACK_VPC_OFFERING, - CLOUDSTACK_ZONE, - CLOUDSTACK_VPC_NETWORK_CIDR, - CLOUDSTACK_VPC_NETWORK_OFFERING) - -var testAccCloudStackNetwork_updateACL = fmt.Sprintf(` -resource "cloudstack_vpc" "foobar" { - name = "terraform-vpc" - cidr = "%s" - vpc_offering = "%s" - zone = "%s" -} - -resource "cloudstack_network_acl" "bar" { - name = "bar" - vpc_id = "${cloudstack_vpc.foobar.id}" -} - -resource "cloudstack_network" "foo" { - name = "terraform-network" - cidr = "%s" - network_offering = "%s" - vpc_id = "${cloudstack_vpc.foobar.id}" - acl_id = "${cloudstack_network_acl.bar.id}" - zone = "${cloudstack_vpc.foobar.zone}" -}`, - CLOUDSTACK_VPC_CIDR_1, - CLOUDSTACK_VPC_OFFERING, - CLOUDSTACK_ZONE, - CLOUDSTACK_VPC_NETWORK_CIDR, - CLOUDSTACK_VPC_NETWORK_OFFERING) diff --git a/builtin/providers/cloudstack/resource_cloudstack_nic.go b/builtin/providers/cloudstack/resource_cloudstack_nic.go deleted file mode 100644 index 063497aee..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_nic.go +++ /dev/null @@ -1,145 +0,0 @@ -package cloudstack - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func resourceCloudStackNIC() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudStackNICCreate, - Read: resourceCloudStackNICRead, - Delete: resourceCloudStackNICDelete, - - Schema: map[string]*schema.Schema{ - "network_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "ip_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "virtual_machine_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceCloudStackNICCreate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Create a new parameter struct - p := cs.VirtualMachine.NewAddNicToVirtualMachineParams( - d.Get("network_id").(string), - d.Get("virtual_machine_id").(string), - ) - - // If there is a ipaddres supplied, add it to the parameter struct - if ipaddress, ok := d.GetOk("ip_address"); ok { - p.SetIpaddress(ipaddress.(string)) - } - - // Create and attach the new NIC - r, err := Retry(10, retryableAddNicFunc(cs, p)) - if err != nil { - return fmt.Errorf("Error creating the new NIC: %s", err) - } - - found := false - for _, n := range r.(*cloudstack.AddNicToVirtualMachineResponse).Nic { - if n.Networkid == d.Get("network_id").(string) { - d.SetId(n.Id) - found = true - break - } - } - - if !found { - return fmt.Errorf("Could not find NIC ID for network ID: %s", d.Get("network_id").(string)) - } - - return resourceCloudStackNICRead(d, meta) -} - -func resourceCloudStackNICRead(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Get the virtual machine details - vm, count, err := cs.VirtualMachine.GetVirtualMachineByID(d.Get("virtual_machine_id").(string)) - if err != nil { - if count == 0 { - log.Printf("[DEBUG] Instance %s does no longer exist", d.Get("virtual_machine_id").(string)) - d.SetId("") - return nil - } - - return err - } - - // Read NIC info - found := false - for _, n := range vm.Nic { - if n.Id == d.Id() { - d.Set("ip_address", n.Ipaddress) - d.Set("network_id", n.Networkid) - d.Set("virtual_machine_id", vm.Id) - found = true - break - } - } - - if !found { - log.Printf("[DEBUG] NIC for network ID %s does no longer exist", d.Get("network_id").(string)) - d.SetId("") - } - - return nil -} - -func resourceCloudStackNICDelete(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Create a new parameter struct - p := cs.VirtualMachine.NewRemoveNicFromVirtualMachineParams( - d.Id(), - d.Get("virtual_machine_id").(string), - ) - - // Remove the NIC - _, err := cs.VirtualMachine.RemoveNicFromVirtualMachine(p) - if err != nil { - // This is a very poor way to be told the ID does no longer exist :( - if strings.Contains(err.Error(), fmt.Sprintf( - "Invalid parameter id value=%s due to incorrect long value format, "+ - "or entity does not exist", d.Id())) { - return nil - } - - return fmt.Errorf("Error deleting NIC: %s", err) - } - - return nil -} - -func retryableAddNicFunc(cs *cloudstack.CloudStackClient, p *cloudstack.AddNicToVirtualMachineParams) func() (interface{}, error) { - return func() (interface{}, error) { - r, err := cs.VirtualMachine.AddNicToVirtualMachine(p) - if err != nil { - return nil, err - } - return r, nil - } -} diff --git a/builtin/providers/cloudstack/resource_cloudstack_nic_test.go b/builtin/providers/cloudstack/resource_cloudstack_nic_test.go deleted file mode 100644 index a7e6fcff6..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_nic_test.go +++ /dev/null @@ -1,194 +0,0 @@ -package cloudstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func TestAccCloudStackNIC_basic(t *testing.T) { - var nic cloudstack.Nic - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackNICDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackNIC_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackNICExists( - "cloudstack_instance.foobar", "cloudstack_nic.foo", &nic), - testAccCheckCloudStackNICAttributes(&nic), - ), - }, - }, - }) -} - -func TestAccCloudStackNIC_update(t *testing.T) { - var nic cloudstack.Nic - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackNICDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackNIC_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackNICExists( - "cloudstack_instance.foobar", "cloudstack_nic.foo", &nic), - testAccCheckCloudStackNICAttributes(&nic), - ), - }, - - resource.TestStep{ - Config: testAccCloudStackNIC_ipaddress, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackNICExists( - "cloudstack_instance.foobar", "cloudstack_nic.foo", &nic), - testAccCheckCloudStackNICIPAddress(&nic), - resource.TestCheckResourceAttr( - "cloudstack_nic.foo", "ip_address", CLOUDSTACK_2ND_NIC_IPADDRESS), - ), - }, - }, - }) -} - -func testAccCheckCloudStackNICExists( - v, n string, nic *cloudstack.Nic) resource.TestCheckFunc { - return func(s *terraform.State) error { - rsv, ok := s.RootModule().Resources[v] - if !ok { - return fmt.Errorf("Not found: %s", v) - } - - if rsv.Primary.ID == "" { - return fmt.Errorf("No instance ID is set") - } - - rsn, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rsn.Primary.ID == "" { - return fmt.Errorf("No NIC ID is set") - } - - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - vm, _, err := cs.VirtualMachine.GetVirtualMachineByID(rsv.Primary.ID) - - if err != nil { - return err - } - - for _, n := range vm.Nic { - if n.Id == rsn.Primary.ID { - *nic = n - return nil - } - } - - return fmt.Errorf("NIC not found") - } -} - -func testAccCheckCloudStackNICAttributes( - nic *cloudstack.Nic) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if nic.Networkid != CLOUDSTACK_2ND_NIC_NETWORK { - return fmt.Errorf("Bad network ID: %s", nic.Networkid) - } - - return nil - } -} - -func testAccCheckCloudStackNICIPAddress( - nic *cloudstack.Nic) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if nic.Networkid != CLOUDSTACK_2ND_NIC_NETWORK { - return fmt.Errorf("Bad network ID: %s", nic.Networkname) - } - - if nic.Ipaddress != CLOUDSTACK_2ND_NIC_IPADDRESS { - return fmt.Errorf("Bad IP address: %s", nic.Ipaddress) - } - - return nil - } -} - -func testAccCheckCloudStackNICDestroy(s *terraform.State) error { - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - // Deleting the instance automatically deletes any additional NICs - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_instance" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No instance ID is set") - } - - _, _, err := cs.VirtualMachine.GetVirtualMachineByID(rs.Primary.ID) - if err == nil { - return fmt.Errorf("Virtual Machine %s still exists", rs.Primary.ID) - } - } - - return nil -} - -var testAccCloudStackNIC_basic = fmt.Sprintf(` -resource "cloudstack_instance" "foobar" { - name = "terraform-test" - display_name = "terraform" - service_offering= "%s" - network_id = "%s" - template = "%s" - zone = "%s" - expunge = true -} - -resource "cloudstack_nic" "foo" { - network_id = "%s" - virtual_machine_id = "${cloudstack_instance.foobar.id}" -}`, - CLOUDSTACK_SERVICE_OFFERING_1, - CLOUDSTACK_NETWORK_1, - CLOUDSTACK_TEMPLATE, - CLOUDSTACK_ZONE, - CLOUDSTACK_2ND_NIC_NETWORK) - -var testAccCloudStackNIC_ipaddress = fmt.Sprintf(` -resource "cloudstack_instance" "foobar" { - name = "terraform-test" - display_name = "terraform" - service_offering= "%s" - network_id = "%s" - template = "%s" - zone = "%s" - expunge = true -} - -resource "cloudstack_nic" "foo" { - network_id = "%s" - ip_address = "%s" - virtual_machine_id = "${cloudstack_instance.foobar.id}" -}`, - CLOUDSTACK_SERVICE_OFFERING_1, - CLOUDSTACK_NETWORK_1, - CLOUDSTACK_TEMPLATE, - CLOUDSTACK_ZONE, - CLOUDSTACK_2ND_NIC_NETWORK, - CLOUDSTACK_2ND_NIC_IPADDRESS) diff --git a/builtin/providers/cloudstack/resource_cloudstack_port_forward.go b/builtin/providers/cloudstack/resource_cloudstack_port_forward.go deleted file mode 100644 index 181e41c05..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_port_forward.go +++ /dev/null @@ -1,439 +0,0 @@ -package cloudstack - -import ( - "fmt" - "log" - "strconv" - "strings" - "sync" - "time" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func resourceCloudStackPortForward() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudStackPortForwardCreate, - Read: resourceCloudStackPortForwardRead, - Update: resourceCloudStackPortForwardUpdate, - Delete: resourceCloudStackPortForwardDelete, - - Schema: map[string]*schema.Schema{ - "ip_address_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "managed": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "forward": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "private_port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "public_port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "virtual_machine_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "vm_guest_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "uuid": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - }, - } -} - -func resourceCloudStackPortForwardCreate(d *schema.ResourceData, meta interface{}) error { - // We need to set this upfront in order to be able to save a partial state - d.SetId(d.Get("ip_address_id").(string)) - - // Create all forwards that are configured - if nrs := d.Get("forward").(*schema.Set); nrs.Len() > 0 { - // Create an empty schema.Set to hold all forwards - forwards := resourceCloudStackPortForward().Schema["forward"].ZeroValue().(*schema.Set) - - err := createPortForwards(d, meta, forwards, nrs) - - // We need to update this first to preserve the correct state - d.Set("forward", forwards) - - if err != nil { - return err - } - } - - return resourceCloudStackPortForwardRead(d, meta) -} - -func createPortForwards(d *schema.ResourceData, meta interface{}, forwards *schema.Set, nrs *schema.Set) error { - var errs *multierror.Error - - var wg sync.WaitGroup - wg.Add(nrs.Len()) - - sem := make(chan struct{}, 10) - for _, forward := range nrs.List() { - // Put in a tiny sleep here to avoid DoS'ing the API - time.Sleep(500 * time.Millisecond) - - go func(forward map[string]interface{}) { - defer wg.Done() - sem <- struct{}{} - - // Create a single forward - err := createPortForward(d, meta, forward) - - // If we have a UUID, we need to save the forward - if forward["uuid"].(string) != "" { - forwards.Add(forward) - } - - if err != nil { - errs = multierror.Append(errs, err) - } - - <-sem - }(forward.(map[string]interface{})) - } - - wg.Wait() - - return errs.ErrorOrNil() -} - -func createPortForward(d *schema.ResourceData, meta interface{}, forward map[string]interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Make sure all required parameters are there - if err := verifyPortForwardParams(d, forward); err != nil { - return err - } - - vm, _, err := cs.VirtualMachine.GetVirtualMachineByID( - forward["virtual_machine_id"].(string), - cloudstack.WithProject(d.Get("project").(string)), - ) - if err != nil { - return err - } - - // Create a new parameter struct - p := cs.Firewall.NewCreatePortForwardingRuleParams(d.Id(), forward["private_port"].(int), - forward["protocol"].(string), forward["public_port"].(int), vm.Id) - - if vmGuestIP, ok := forward["vm_guest_ip"]; ok && vmGuestIP.(string) != "" { - p.SetVmguestip(vmGuestIP.(string)) - - // Set the network ID based on the guest IP, needed when the public IP address - // is not associated with any network yet - NICS: - for _, nic := range vm.Nic { - if vmGuestIP.(string) == nic.Ipaddress { - p.SetNetworkid(nic.Networkid) - break NICS - } - for _, ip := range nic.Secondaryip { - if vmGuestIP.(string) == ip.Ipaddress { - p.SetNetworkid(nic.Networkid) - break NICS - } - } - } - } else { - // If no guest IP is configured, use the primary NIC - p.SetNetworkid(vm.Nic[0].Networkid) - } - - // Do not open the firewall automatically in any case - p.SetOpenfirewall(false) - - r, err := cs.Firewall.CreatePortForwardingRule(p) - if err != nil { - return err - } - - forward["uuid"] = r.Id - - return nil -} - -func resourceCloudStackPortForwardRead(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // First check if the IP address is still associated - _, count, err := cs.Address.GetPublicIpAddressByID( - d.Id(), - cloudstack.WithProject(d.Get("project").(string)), - ) - if err != nil { - if count == 0 { - log.Printf( - "[DEBUG] IP address with ID %s is no longer associated", d.Id()) - d.SetId("") - return nil - } - - return err - } - - // Get all the forwards from the running environment - p := cs.Firewall.NewListPortForwardingRulesParams() - p.SetIpaddressid(d.Id()) - p.SetListall(true) - - if err := setProjectid(p, cs, d); err != nil { - return err - } - - l, err := cs.Firewall.ListPortForwardingRules(p) - if err != nil { - return err - } - - // Make a map of all the forwards so we can easily find a forward - forwardMap := make(map[string]*cloudstack.PortForwardingRule, l.Count) - for _, f := range l.PortForwardingRules { - forwardMap[f.Id] = f - } - - // Create an empty schema.Set to hold all forwards - forwards := resourceCloudStackPortForward().Schema["forward"].ZeroValue().(*schema.Set) - - // Read all forwards that are configured - if rs := d.Get("forward").(*schema.Set); rs.Len() > 0 { - for _, forward := range rs.List() { - forward := forward.(map[string]interface{}) - - id, ok := forward["uuid"] - if !ok || id.(string) == "" { - continue - } - - // Get the forward - f, ok := forwardMap[id.(string)] - if !ok { - forward["uuid"] = "" - continue - } - - // Delete the known rule so only unknown rules remain in the ruleMap - delete(forwardMap, id.(string)) - - privPort, err := strconv.Atoi(f.Privateport) - if err != nil { - return err - } - - pubPort, err := strconv.Atoi(f.Publicport) - if err != nil { - return err - } - - // Update the values - forward["protocol"] = f.Protocol - forward["private_port"] = privPort - forward["public_port"] = pubPort - forward["virtual_machine_id"] = f.Virtualmachineid - - // This one is a bit tricky. We only want to update this optional value - // if we've set one ourselves. If not this would become a computed value - // and that would mess up the calculated hash of the set item. - if forward["vm_guest_ip"].(string) != "" { - forward["vm_guest_ip"] = f.Vmguestip - } - - forwards.Add(forward) - } - } - - // If this is a managed resource, add all unknown forwards to dummy forwards - managed := d.Get("managed").(bool) - if managed && len(forwardMap) > 0 { - for uuid := range forwardMap { - // Make a dummy forward to hold the unknown UUID - forward := map[string]interface{}{ - "protocol": uuid, - "private_port": 0, - "public_port": 0, - "virtual_machine_id": uuid, - "uuid": uuid, - } - - // Add the dummy forward to the forwards set - forwards.Add(forward) - } - } - - if forwards.Len() > 0 { - d.Set("forward", forwards) - } else if !managed { - d.SetId("") - } - - return nil -} - -func resourceCloudStackPortForwardUpdate(d *schema.ResourceData, meta interface{}) error { - // Check if the forward set as a whole has changed - if d.HasChange("forward") { - o, n := d.GetChange("forward") - ors := o.(*schema.Set).Difference(n.(*schema.Set)) - nrs := n.(*schema.Set).Difference(o.(*schema.Set)) - - // We need to start with a rule set containing all the rules we - // already have and want to keep. Any rules that are not deleted - // correctly and any newly created rules, will be added to this - // set to make sure we end up in a consistent state - forwards := o.(*schema.Set).Intersection(n.(*schema.Set)) - - // First loop through all the old forwards and delete them - if ors.Len() > 0 { - err := deletePortForwards(d, meta, forwards, ors) - - // We need to update this first to preserve the correct state - d.Set("forward", forwards) - - if err != nil { - return err - } - } - - // Then loop through all the new forwards and create them - if nrs.Len() > 0 { - err := createPortForwards(d, meta, forwards, nrs) - - // We need to update this first to preserve the correct state - d.Set("forward", forwards) - - if err != nil { - return err - } - } - } - - return resourceCloudStackPortForwardRead(d, meta) -} - -func resourceCloudStackPortForwardDelete(d *schema.ResourceData, meta interface{}) error { - // Create an empty rule set to hold all rules that where - // not deleted correctly - forwards := resourceCloudStackPortForward().Schema["forward"].ZeroValue().(*schema.Set) - - // Delete all forwards - if ors := d.Get("forward").(*schema.Set); ors.Len() > 0 { - err := deletePortForwards(d, meta, forwards, ors) - - // We need to update this first to preserve the correct state - d.Set("forward", forwards) - - if err != nil { - return err - } - } - - return nil -} - -func deletePortForwards(d *schema.ResourceData, meta interface{}, forwards *schema.Set, ors *schema.Set) error { - var errs *multierror.Error - - var wg sync.WaitGroup - wg.Add(ors.Len()) - - sem := make(chan struct{}, 10) - for _, forward := range ors.List() { - // Put a sleep here to avoid DoS'ing the API - time.Sleep(500 * time.Millisecond) - - go func(forward map[string]interface{}) { - defer wg.Done() - sem <- struct{}{} - - // Delete a single forward - err := deletePortForward(d, meta, forward) - - // If we have a UUID, we need to save the forward - if forward["uuid"].(string) != "" { - forwards.Add(forward) - } - - if err != nil { - errs = multierror.Append(errs, err) - } - - <-sem - }(forward.(map[string]interface{})) - } - - wg.Wait() - - return errs.ErrorOrNil() -} - -func deletePortForward(d *schema.ResourceData, meta interface{}, forward map[string]interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Create the parameter struct - p := cs.Firewall.NewDeletePortForwardingRuleParams(forward["uuid"].(string)) - - // Delete the forward - if _, err := cs.Firewall.DeletePortForwardingRule(p); err != nil { - // This is a very poor way to be told the ID does no longer exist :( - if !strings.Contains(err.Error(), fmt.Sprintf( - "Invalid parameter id value=%s due to incorrect long value format, "+ - "or entity does not exist", forward["uuid"].(string))) { - return err - } - } - - // Empty the UUID of this rule - forward["uuid"] = "" - - return nil -} - -func verifyPortForwardParams(d *schema.ResourceData, forward map[string]interface{}) error { - protocol := forward["protocol"].(string) - if protocol != "tcp" && protocol != "udp" { - return fmt.Errorf( - "%s is not a valid protocol. Valid options are 'tcp' and 'udp'", protocol) - } - return nil -} diff --git a/builtin/providers/cloudstack/resource_cloudstack_port_forward_test.go b/builtin/providers/cloudstack/resource_cloudstack_port_forward_test.go deleted file mode 100644 index 458abc67f..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_port_forward_test.go +++ /dev/null @@ -1,180 +0,0 @@ -package cloudstack - -import ( - "fmt" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func TestAccCloudStackPortForward_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackPortForwardDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackPortForward_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackPortForwardsExist("cloudstack_port_forward.foo"), - resource.TestCheckResourceAttr( - "cloudstack_port_forward.foo", "ip_address_id", CLOUDSTACK_PUBLIC_IPADDRESS), - resource.TestCheckResourceAttr( - "cloudstack_port_forward.foo", "forward.#", "1"), - ), - }, - }, - }) -} - -func TestAccCloudStackPortForward_update(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackPortForwardDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackPortForward_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackPortForwardsExist("cloudstack_port_forward.foo"), - resource.TestCheckResourceAttr( - "cloudstack_port_forward.foo", "ip_address_id", CLOUDSTACK_PUBLIC_IPADDRESS), - resource.TestCheckResourceAttr( - "cloudstack_port_forward.foo", "forward.#", "1"), - ), - }, - - resource.TestStep{ - Config: testAccCloudStackPortForward_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackPortForwardsExist("cloudstack_port_forward.foo"), - resource.TestCheckResourceAttr( - "cloudstack_port_forward.foo", "ip_address_id", CLOUDSTACK_PUBLIC_IPADDRESS), - resource.TestCheckResourceAttr( - "cloudstack_port_forward.foo", "forward.#", "2"), - ), - }, - }, - }) -} - -func testAccCheckCloudStackPortForwardsExist(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No port forward ID is set") - } - - for k, id := range rs.Primary.Attributes { - if !strings.Contains(k, "uuid") { - continue - } - - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - _, count, err := cs.Firewall.GetPortForwardingRuleByID(id) - - if err != nil { - return err - } - - if count == 0 { - return fmt.Errorf("Port forward for %s not found", k) - } - } - - return nil - } -} - -func testAccCheckCloudStackPortForwardDestroy(s *terraform.State) error { - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_port_forward" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No port forward ID is set") - } - - for k, id := range rs.Primary.Attributes { - if !strings.Contains(k, "uuid") { - continue - } - - _, _, err := cs.Firewall.GetPortForwardingRuleByID(id) - if err == nil { - return fmt.Errorf("Port forward %s still exists", rs.Primary.ID) - } - } - } - - return nil -} - -var testAccCloudStackPortForward_basic = fmt.Sprintf(` -resource "cloudstack_instance" "foobar" { - name = "terraform-test" - service_offering= "%s" - network_id = "%s" - template = "%s" - zone = "%s" - expunge = true -} - -resource "cloudstack_port_forward" "foo" { - ip_address_id = "%s" - - forward { - protocol = "tcp" - private_port = 443 - public_port = 8443 - virtual_machine_id = "${cloudstack_instance.foobar.id}" - } -}`, - CLOUDSTACK_SERVICE_OFFERING_1, - CLOUDSTACK_NETWORK_1, - CLOUDSTACK_TEMPLATE, - CLOUDSTACK_ZONE, - CLOUDSTACK_PUBLIC_IPADDRESS) - -var testAccCloudStackPortForward_update = fmt.Sprintf(` -resource "cloudstack_instance" "foobar" { - name = "terraform-test" - service_offering= "%s" - network_id = "%s" - template = "%s" - zone = "%s" - expunge = true -} - -resource "cloudstack_port_forward" "foo" { - ip_address_id = "%s" - - forward { - protocol = "tcp" - private_port = 443 - public_port = 8443 - virtual_machine_id = "${cloudstack_instance.foobar.id}" - } - - forward { - protocol = "tcp" - private_port = 80 - public_port = 8080 - virtual_machine_id = "${cloudstack_instance.foobar.id}" - } -}`, - CLOUDSTACK_SERVICE_OFFERING_1, - CLOUDSTACK_NETWORK_1, - CLOUDSTACK_TEMPLATE, - CLOUDSTACK_ZONE, - CLOUDSTACK_PUBLIC_IPADDRESS) diff --git a/builtin/providers/cloudstack/resource_cloudstack_private_gateway.go b/builtin/providers/cloudstack/resource_cloudstack_private_gateway.go deleted file mode 100644 index b4194c0bd..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_private_gateway.go +++ /dev/null @@ -1,173 +0,0 @@ -package cloudstack - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func resourceCloudStackPrivateGateway() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudStackPrivateGatewayCreate, - Read: resourceCloudStackPrivateGatewayRead, - Update: resourceCloudStackPrivateGatewayUpdate, - Delete: resourceCloudStackPrivateGatewayDelete, - - Schema: map[string]*schema.Schema{ - "gateway": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "ip_address": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "netmask": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "vlan": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "physical_network_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "network_offering": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "acl_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "vpc_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceCloudStackPrivateGatewayCreate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - ipaddress := d.Get("ip_address").(string) - networkofferingid := d.Get("network_offering").(string) - - // Create a new parameter struct - p := cs.VPC.NewCreatePrivateGatewayParams( - d.Get("gateway").(string), - ipaddress, - d.Get("netmask").(string), - d.Get("vlan").(string), - d.Get("vpc_id").(string), - ) - - // Retrieve the network_offering ID - if networkofferingid != "" { - networkofferingid, e := retrieveID(cs, "network_offering", networkofferingid) - if e != nil { - return e.Error() - } - p.SetNetworkofferingid(networkofferingid) - } - - // Check if we want to associate an ACL - if aclid, ok := d.GetOk("acl_id"); ok { - // Set the acl ID - p.SetAclid(aclid.(string)) - } - - // Create the new private gateway - r, err := cs.VPC.CreatePrivateGateway(p) - if err != nil { - return fmt.Errorf("Error creating private gateway for %s: %s", ipaddress, err) - } - - d.SetId(r.Id) - - return resourceCloudStackPrivateGatewayRead(d, meta) -} - -func resourceCloudStackPrivateGatewayRead(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Get the private gateway details - gw, count, err := cs.VPC.GetPrivateGatewayByID(d.Id()) - if err != nil { - if count == 0 { - log.Printf("[DEBUG] Private gateway %s does no longer exist", d.Id()) - d.SetId("") - return nil - } - - return err - } - - d.Set("gateway", gw.Gateway) - d.Set("ip_address", gw.Ipaddress) - d.Set("netmask", gw.Netmask) - d.Set("vlan", gw.Vlan) - d.Set("acl_id", gw.Aclid) - d.Set("vpc_id", gw.Vpcid) - - return nil -} - -func resourceCloudStackPrivateGatewayUpdate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Replace the ACL if the ID has changed - if d.HasChange("acl_id") { - p := cs.NetworkACL.NewReplaceNetworkACLListParams(d.Get("acl_id").(string)) - p.SetNetworkid(d.Id()) - - _, err := cs.NetworkACL.ReplaceNetworkACLList(p) - if err != nil { - return fmt.Errorf("Error replacing ACL: %s", err) - } - } - - return resourceCloudStackNetworkRead(d, meta) -} - -func resourceCloudStackPrivateGatewayDelete(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Create a new parameter struct - p := cs.VPC.NewDeletePrivateGatewayParams(d.Id()) - - // Delete the private gateway - _, err := cs.VPC.DeletePrivateGateway(p) - if err != nil { - // This is a very poor way to be told the ID does no longer exist :( - if strings.Contains(err.Error(), fmt.Sprintf( - "Invalid parameter id value=%s due to incorrect long value format, "+ - "or entity does not exist", d.Id())) { - return nil - } - - return fmt.Errorf("Error deleting private gateway %s: %s", d.Id(), err) - } - - return nil -} diff --git a/builtin/providers/cloudstack/resource_cloudstack_private_gateway_test.go b/builtin/providers/cloudstack/resource_cloudstack_private_gateway_test.go deleted file mode 100644 index 77ccad870..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_private_gateway_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package cloudstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func TestAccCloudStackPrivateGateway_basic(t *testing.T) { - var gateway cloudstack.PrivateGateway - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackPrivateGatewayDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackPrivateGateway_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackPrivateGatewayExists( - "cloudstack_private_gateway.foo", &gateway), - testAccCheckCloudStackPrivateGatewayAttributes(&gateway), - ), - }, - }, - }) -} - -func testAccCheckCloudStackPrivateGatewayExists( - n string, gateway *cloudstack.PrivateGateway) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Private Gateway ID is set") - } - - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - pgw, _, err := cs.VPC.GetPrivateGatewayByID(rs.Primary.ID) - - if err != nil { - return err - } - - if pgw.Id != rs.Primary.ID { - return fmt.Errorf("Private Gateway not found") - } - - *gateway = *pgw - - return nil - } -} - -func testAccCheckCloudStackPrivateGatewayAttributes( - gateway *cloudstack.PrivateGateway) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if gateway.Gateway != CLOUDSTACK_PRIVGW_GATEWAY { - return fmt.Errorf("Bad Gateway: %s", gateway.Gateway) - } - - if gateway.Ipaddress != CLOUDSTACK_PRIVGW_IPADDRESS { - return fmt.Errorf("Bad Gateway: %s", gateway.Ipaddress) - } - - if gateway.Netmask != CLOUDSTACK_PRIVGW_NETMASK { - return fmt.Errorf("Bad Gateway: %s", gateway.Netmask) - } - - return nil - } -} - -func testAccCheckCloudStackPrivateGatewayDestroy(s *terraform.State) error { - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_private_gateway" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No private gateway ID is set") - } - - gateway, _, err := cs.VPC.GetPrivateGatewayByID(rs.Primary.ID) - if err == nil && gateway.Id != "" { - return fmt.Errorf("Private gateway %s still exists", rs.Primary.ID) - } - } - - return nil -} - -var testAccCloudStackPrivateGateway_basic = fmt.Sprintf(` -resource "cloudstack_vpc" "foobar" { - name = "terraform-vpc" - cidr = "%s" - vpc_offering = "%s" - zone = "%s" -} - -resource "cloudstack_private_gateway" "foo" { - gateway = "%s" - ip_address = "%s" - netmask = "%s" - vlan = "%s" - vpc_id = "${cloudstack_vpc.foobar.id}" -}`, - CLOUDSTACK_VPC_CIDR_1, - CLOUDSTACK_VPC_OFFERING, - CLOUDSTACK_ZONE, - CLOUDSTACK_PRIVGW_GATEWAY, - CLOUDSTACK_PRIVGW_IPADDRESS, - CLOUDSTACK_PRIVGW_NETMASK, - CLOUDSTACK_PRIVGW_VLAN) diff --git a/builtin/providers/cloudstack/resource_cloudstack_secondary_ipaddress.go b/builtin/providers/cloudstack/resource_cloudstack_secondary_ipaddress.go deleted file mode 100644 index c44e34299..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_secondary_ipaddress.go +++ /dev/null @@ -1,154 +0,0 @@ -package cloudstack - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func resourceCloudStackSecondaryIPAddress() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudStackSecondaryIPAddressCreate, - Read: resourceCloudStackSecondaryIPAddressRead, - Delete: resourceCloudStackSecondaryIPAddressDelete, - - Schema: map[string]*schema.Schema{ - "ip_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "nic_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "virtual_machine_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceCloudStackSecondaryIPAddressCreate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - nicid, ok := d.GetOk("nic_id") - if !ok { - virtualmachineid := d.Get("virtual_machine_id").(string) - - // Get the virtual machine details - vm, count, err := cs.VirtualMachine.GetVirtualMachineByID(virtualmachineid) - if err != nil { - if count == 0 { - log.Printf("[DEBUG] Virtual Machine %s does no longer exist", virtualmachineid) - d.SetId("") - return nil - } - return err - } - - nicid = vm.Nic[0].Id - } - - // Create a new parameter struct - p := cs.Nic.NewAddIpToNicParams(nicid.(string)) - - // If there is a ipaddres supplied, add it to the parameter struct - if ipaddress, ok := d.GetOk("ip_address"); ok { - p.SetIpaddress(ipaddress.(string)) - } - - ip, err := cs.Nic.AddIpToNic(p) - if err != nil { - return err - } - - d.SetId(ip.Id) - - return nil -} - -func resourceCloudStackSecondaryIPAddressRead(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - virtualmachineid := d.Get("virtual_machine_id").(string) - - // Get the virtual machine details - vm, count, err := cs.VirtualMachine.GetVirtualMachineByID(virtualmachineid) - if err != nil { - if count == 0 { - log.Printf("[DEBUG] Virtual Machine %s does no longer exist", virtualmachineid) - d.SetId("") - return nil - } - return err - } - - nicid, ok := d.GetOk("nic_id") - if !ok { - nicid = vm.Nic[0].Id - } - - p := cs.Nic.NewListNicsParams(virtualmachineid) - p.SetNicid(nicid.(string)) - - l, err := cs.Nic.ListNics(p) - if err != nil { - return err - } - - if l.Count == 0 { - log.Printf("[DEBUG] NIC %s does no longer exist", d.Get("nic_id").(string)) - d.SetId("") - return nil - } - - if l.Count > 1 { - return fmt.Errorf("Found more then one possible result: %v", l.Nics) - } - - for _, ip := range l.Nics[0].Secondaryip { - if ip.Id == d.Id() { - d.Set("ip_address", ip.Ipaddress) - d.Set("nic_id", l.Nics[0].Id) - d.Set("virtual_machine_id", l.Nics[0].Virtualmachineid) - return nil - } - } - - log.Printf("[DEBUG] IP %s no longer exist", d.Get("ip_address").(string)) - d.SetId("") - - return nil -} - -func resourceCloudStackSecondaryIPAddressDelete(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Create a new parameter struct - p := cs.Nic.NewRemoveIpFromNicParams(d.Id()) - - log.Printf("[INFO] Removing secondary IP address: %s", d.Get("ip_address").(string)) - if _, err := cs.Nic.RemoveIpFromNic(p); err != nil { - // This is a very poor way to be told the ID does no longer exist :( - if strings.Contains(err.Error(), fmt.Sprintf( - "Invalid parameter id value=%s due to incorrect long value format, "+ - "or entity does not exist", d.Id())) { - return nil - } - - return fmt.Errorf("Error removing secondary IP address: %s", err) - } - - return nil -} diff --git a/builtin/providers/cloudstack/resource_cloudstack_secondary_ipaddress_test.go b/builtin/providers/cloudstack/resource_cloudstack_secondary_ipaddress_test.go deleted file mode 100644 index 879ebd4a1..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_secondary_ipaddress_test.go +++ /dev/null @@ -1,239 +0,0 @@ -package cloudstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func TestAccCloudStackSecondaryIPAddress_basic(t *testing.T) { - var ip cloudstack.AddIpToNicResponse - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackSecondaryIPAddressDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackSecondaryIPAddress_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackSecondaryIPAddressExists( - "cloudstack_secondary_ipaddress.foo", &ip), - ), - }, - }, - }) -} - -func TestAccCloudStackSecondaryIPAddress_fixedIP(t *testing.T) { - var ip cloudstack.AddIpToNicResponse - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackSecondaryIPAddressDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackSecondaryIPAddress_fixedIP, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackSecondaryIPAddressExists( - "cloudstack_secondary_ipaddress.foo", &ip), - testAccCheckCloudStackSecondaryIPAddressAttributes(&ip), - resource.TestCheckResourceAttr( - "cloudstack_secondary_ipaddress.foo", "ip_address", CLOUDSTACK_NETWORK_1_IPADDRESS1), - ), - }, - }, - }) -} - -func testAccCheckCloudStackSecondaryIPAddressExists( - n string, ip *cloudstack.AddIpToNicResponse) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No IP address ID is set") - } - - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - virtualmachine, ok := rs.Primary.Attributes["virtual_machine_id"] - if !ok { - virtualmachine, ok = rs.Primary.Attributes["virtual_machine"] - } - - // Retrieve the virtual_machine ID - virtualmachineid, e := retrieveID(cs, "virtual_machine", virtualmachine) - if e != nil { - return e.Error() - } - - // Get the virtual machine details - vm, count, err := cs.VirtualMachine.GetVirtualMachineByID(virtualmachineid) - if err != nil { - if count == 0 { - return fmt.Errorf("Instance not found") - } - return err - } - - nicid, ok := rs.Primary.Attributes["nic_id"] - if !ok { - nicid, ok = rs.Primary.Attributes["nicid"] - } - if !ok { - nicid = vm.Nic[0].Id - } - - p := cs.Nic.NewListNicsParams(virtualmachineid) - p.SetNicid(nicid) - - l, err := cs.Nic.ListNics(p) - if err != nil { - return err - } - - if l.Count == 0 { - return fmt.Errorf("NIC not found") - } - - if l.Count > 1 { - return fmt.Errorf("Found more then one possible result: %v", l.Nics) - } - - for _, sip := range l.Nics[0].Secondaryip { - if sip.Id == rs.Primary.ID { - ip.Ipaddress = sip.Ipaddress - ip.Nicid = l.Nics[0].Id - return nil - } - } - - return fmt.Errorf("IP address not found") - } -} - -func testAccCheckCloudStackSecondaryIPAddressAttributes( - ip *cloudstack.AddIpToNicResponse) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if ip.Ipaddress != CLOUDSTACK_NETWORK_1_IPADDRESS1 { - return fmt.Errorf("Bad IP address: %s", ip.Ipaddress) - } - return nil - } -} - -func testAccCheckCloudStackSecondaryIPAddressDestroy(s *terraform.State) error { - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_secondary_ipaddress" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No IP address ID is set") - } - - virtualmachine, ok := rs.Primary.Attributes["virtual_machine_id"] - if !ok { - virtualmachine, ok = rs.Primary.Attributes["virtual_machine"] - } - - // Retrieve the virtual_machine ID - virtualmachineid, e := retrieveID(cs, "virtual_machine", virtualmachine) - if e != nil { - return e.Error() - } - - // Get the virtual machine details - vm, count, err := cs.VirtualMachine.GetVirtualMachineByID(virtualmachineid) - if err != nil { - if count == 0 { - return nil - } - return err - } - - nicid, ok := rs.Primary.Attributes["nic_id"] - if !ok { - nicid, ok = rs.Primary.Attributes["nicid"] - } - if !ok { - nicid = vm.Nic[0].Id - } - - p := cs.Nic.NewListNicsParams(virtualmachineid) - p.SetNicid(nicid) - - l, err := cs.Nic.ListNics(p) - if err != nil { - return err - } - - if l.Count == 0 { - return fmt.Errorf("NIC not found") - } - - if l.Count > 1 { - return fmt.Errorf("Found more then one possible result: %v", l.Nics) - } - - for _, sip := range l.Nics[0].Secondaryip { - if sip.Id == rs.Primary.ID { - return fmt.Errorf("IP address %s still exists", rs.Primary.ID) - } - } - - return nil - } - - return nil -} - -var testAccCloudStackSecondaryIPAddress_basic = fmt.Sprintf(` -resource "cloudstack_instance" "foobar" { - name = "terraform-test" - service_offering= "%s" - network_id = "%s" - template = "%s" - zone = "%s" - expunge = true -} - -resource "cloudstack_secondary_ipaddress" "foo" { - virtual_machine_id = "${cloudstack_instance.foobar.id}" -} -`, - CLOUDSTACK_SERVICE_OFFERING_1, - CLOUDSTACK_NETWORK_1, - CLOUDSTACK_TEMPLATE, - CLOUDSTACK_ZONE) - -var testAccCloudStackSecondaryIPAddress_fixedIP = fmt.Sprintf(` -resource "cloudstack_instance" "foobar" { - name = "terraform-test" - service_offering= "%s" - network_id = "%s" - template = "%s" - zone = "%s" - expunge = true -} - -resource "cloudstack_secondary_ipaddress" "foo" { - ip_address = "%s" - virtual_machine_id = "${cloudstack_instance.foobar.id}" -}`, - CLOUDSTACK_SERVICE_OFFERING_1, - CLOUDSTACK_NETWORK_1, - CLOUDSTACK_TEMPLATE, - CLOUDSTACK_ZONE, - CLOUDSTACK_NETWORK_1_IPADDRESS1) diff --git a/builtin/providers/cloudstack/resource_cloudstack_security_group.go b/builtin/providers/cloudstack/resource_cloudstack_security_group.go deleted file mode 100644 index 2b198bd88..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_security_group.go +++ /dev/null @@ -1,125 +0,0 @@ -package cloudstack - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func resourceCloudStackSecurityGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudStackSecurityGroupCreate, - Read: resourceCloudStackSecurityGroupRead, - Delete: resourceCloudStackSecurityGroupDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - } -} - -func resourceCloudStackSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - name := d.Get("name").(string) - - // Create a new parameter struct - p := cs.SecurityGroup.NewCreateSecurityGroupParams(name) - - // Set the description - if description, ok := d.GetOk("description"); ok { - p.SetDescription(description.(string)) - } else { - p.SetDescription(name) - } - - // If there is a project supplied, we retrieve and set the project id - if err := setProjectid(p, cs, d); err != nil { - return err - } - - r, err := cs.SecurityGroup.CreateSecurityGroup(p) - if err != nil { - return fmt.Errorf("Error creating security group %s: %s", name, err) - } - - d.SetId(r.Id) - - return resourceCloudStackSecurityGroupRead(d, meta) -} - -func resourceCloudStackSecurityGroupRead(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Get the security group details - sg, count, err := cs.SecurityGroup.GetSecurityGroupByID( - d.Id(), - cloudstack.WithProject(d.Get("project").(string)), - ) - if err != nil { - if count == 0 { - log.Printf("[DEBUG] Security group %s does not longer exist", d.Get("name").(string)) - d.SetId("") - return nil - } - - return err - } - - // Update the config - d.Set("name", sg.Name) - d.Set("description", sg.Description) - - setValueOrID(d, "project", sg.Project, sg.Projectid) - - return nil -} - -func resourceCloudStackSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Create a new parameter struct - p := cs.SecurityGroup.NewDeleteSecurityGroupParams() - p.SetId(d.Id()) - - // If there is a project supplied, we retrieve and set the project id - if err := setProjectid(p, cs, d); err != nil { - return err - } - - // Delete the security group - _, err := cs.SecurityGroup.DeleteSecurityGroup(p) - if err != nil { - // This is a very poor way to be told the ID does no longer exist :( - if strings.Contains(err.Error(), fmt.Sprintf( - "Invalid parameter id value=%s due to incorrect long value format, "+ - "or entity does not exist", d.Id())) { - return nil - } - - return fmt.Errorf("Error deleting security group: %s", err) - } - - return nil -} diff --git a/builtin/providers/cloudstack/resource_cloudstack_security_group_rule.go b/builtin/providers/cloudstack/resource_cloudstack_security_group_rule.go deleted file mode 100644 index 4a538201e..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_security_group_rule.go +++ /dev/null @@ -1,631 +0,0 @@ -package cloudstack - -import ( - "fmt" - "log" - "strconv" - "strings" - "sync" - "time" - - multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -type authorizeSecurityGroupParams interface { - SetCidrlist([]string) - SetIcmptype(int) - SetIcmpcode(int) - SetStartport(int) - SetEndport(int) - SetProtocol(string) - SetSecuritygroupid(string) - SetUsersecuritygrouplist(map[string]string) -} - -func resourceCloudStackSecurityGroupRule() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudStackSecurityGroupRuleCreate, - Read: resourceCloudStackSecurityGroupRuleRead, - Update: resourceCloudStackSecurityGroupRuleUpdate, - Delete: resourceCloudStackSecurityGroupRuleDelete, - - Schema: map[string]*schema.Schema{ - "security_group_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "rule": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cidr_list": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "icmp_type": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "icmp_code": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "ports": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "traffic_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "ingress", - }, - - "user_security_group_list": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "uuids": &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - }, - }, - }, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "parallelism": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 2, - }, - }, - } -} - -func resourceCloudStackSecurityGroupRuleCreate(d *schema.ResourceData, meta interface{}) error { - // We need to set this upfront in order to be able to save a partial state - d.SetId(d.Get("security_group_id").(string)) - - // Create all rules that are configured - if nrs := d.Get("rule").(*schema.Set); nrs.Len() > 0 { - // Create an empty rule set to hold all newly created rules - rules := resourceCloudStackSecurityGroupRule().Schema["rule"].ZeroValue().(*schema.Set) - - err := createSecurityGroupRules(d, meta, rules, nrs) - - // We need to update this first to preserve the correct state - d.Set("rule", rules) - - if err != nil { - return err - } - } - - return resourceCloudStackSecurityGroupRuleRead(d, meta) -} - -func createSecurityGroupRules(d *schema.ResourceData, meta interface{}, rules *schema.Set, nrs *schema.Set) error { - cs := meta.(*cloudstack.CloudStackClient) - var errs *multierror.Error - - var wg sync.WaitGroup - wg.Add(nrs.Len()) - - sem := make(chan struct{}, d.Get("parallelism").(int)) - for _, rule := range nrs.List() { - // Put in a tiny sleep here to avoid DoS'ing the API - time.Sleep(500 * time.Millisecond) - - go func(rule map[string]interface{}) { - defer wg.Done() - sem <- struct{}{} - - // Make sure all required parameters are there - if err := verifySecurityGroupRuleParams(d, rule); err != nil { - errs = multierror.Append(errs, err) - return - } - - var p authorizeSecurityGroupParams - - if cidrList, ok := rule["cidr_list"].(*schema.Set); ok && cidrList.Len() > 0 { - for _, cidr := range cidrList.List() { - // Create a new parameter struct - switch rule["traffic_type"].(string) { - case "ingress": - p = cs.SecurityGroup.NewAuthorizeSecurityGroupIngressParams() - case "egress": - p = cs.SecurityGroup.NewAuthorizeSecurityGroupEgressParams() - } - - p.SetSecuritygroupid(d.Id()) - p.SetCidrlist([]string{cidr.(string)}) - - // Create a single rule - err := createSecurityGroupRule(d, meta, rule, p, cidr.(string)) - if err != nil { - errs = multierror.Append(errs, err) - } - } - } - - if usgList, ok := rule["user_security_group_list"].(*schema.Set); ok && usgList.Len() > 0 { - for _, usg := range usgList.List() { - sg, _, err := cs.SecurityGroup.GetSecurityGroupByName( - usg.(string), - cloudstack.WithProject(d.Get("project").(string)), - ) - if err != nil { - errs = multierror.Append(errs, err) - continue - } - - // Create a new parameter struct - switch rule["traffic_type"].(string) { - case "ingress": - p = cs.SecurityGroup.NewAuthorizeSecurityGroupIngressParams() - case "egress": - p = cs.SecurityGroup.NewAuthorizeSecurityGroupEgressParams() - } - - p.SetSecuritygroupid(d.Id()) - p.SetUsersecuritygrouplist(map[string]string{sg.Account: usg.(string)}) - - // Create a single rule - err = createSecurityGroupRule(d, meta, rule, p, usg.(string)) - if err != nil { - errs = multierror.Append(errs, err) - } - } - } - - // If we have at least one UUID, we need to save the rule - if len(rule["uuids"].(map[string]interface{})) > 0 { - rules.Add(rule) - } - - <-sem - }(rule.(map[string]interface{})) - } - - wg.Wait() - - return errs.ErrorOrNil() -} - -func createSecurityGroupRule(d *schema.ResourceData, meta interface{}, rule map[string]interface{}, p authorizeSecurityGroupParams, uuid string) error { - cs := meta.(*cloudstack.CloudStackClient) - uuids := rule["uuids"].(map[string]interface{}) - - // Set the protocol - p.SetProtocol(rule["protocol"].(string)) - - // If the protocol is ICMP set the needed ICMP parameters - if rule["protocol"].(string) == "icmp" { - p.SetIcmptype(rule["icmp_type"].(int)) - p.SetIcmpcode(rule["icmp_code"].(int)) - - ruleID, err := createIngressOrEgressRule(cs, p) - if err != nil { - return err - } - - uuids[uuid+"icmp"] = ruleID - rule["uuids"] = uuids - } - - // If protocol is TCP or UDP, loop through all ports - if rule["protocol"].(string) == "tcp" || rule["protocol"].(string) == "udp" { - if ps := rule["ports"].(*schema.Set); ps.Len() > 0 { - - // Create an empty schema.Set to hold all processed ports - ports := &schema.Set{F: schema.HashString} - - for _, port := range ps.List() { - if _, ok := uuids[uuid+port.(string)]; ok { - ports.Add(port) - rule["ports"] = ports - continue - } - - m := splitPorts.FindStringSubmatch(port.(string)) - - startPort, err := strconv.Atoi(m[1]) - if err != nil { - return err - } - - endPort := startPort - if m[2] != "" { - endPort, err = strconv.Atoi(m[2]) - if err != nil { - return err - } - } - - p.SetStartport(startPort) - p.SetEndport(endPort) - - ruleID, err := createIngressOrEgressRule(cs, p) - if err != nil { - return err - } - - ports.Add(port) - rule["ports"] = ports - - uuids[uuid+port.(string)] = ruleID - rule["uuids"] = uuids - } - } - } - - return nil -} - -func createIngressOrEgressRule(cs *cloudstack.CloudStackClient, p authorizeSecurityGroupParams) (string, error) { - switch p := p.(type) { - case *cloudstack.AuthorizeSecurityGroupIngressParams: - r, err := cs.SecurityGroup.AuthorizeSecurityGroupIngress(p) - if err != nil { - return "", err - } - return r.Ruleid, nil - case *cloudstack.AuthorizeSecurityGroupEgressParams: - r, err := cs.SecurityGroup.AuthorizeSecurityGroupEgress(p) - if err != nil { - return "", err - } - return r.Ruleid, nil - default: - return "", fmt.Errorf("Unknown authorize security group rule type: %v", p) - } -} - -func resourceCloudStackSecurityGroupRuleRead(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Get the security group details - sg, count, err := cs.SecurityGroup.GetSecurityGroupByID( - d.Id(), - cloudstack.WithProject(d.Get("project").(string)), - ) - if err != nil { - if count == 0 { - log.Printf("[DEBUG] Security group %s does not longer exist", d.Get("name").(string)) - d.SetId("") - return nil - } - - return err - } - - // Make a map of all the rule indexes so we can easily find a rule - sgRules := append(sg.Ingressrule, sg.Egressrule...) - ruleIndex := make(map[string]int, len(sgRules)) - for idx, r := range sgRules { - ruleIndex[r.Ruleid] = idx - } - - // Create an empty schema.Set to hold all rules - rules := resourceCloudStackSecurityGroupRule().Schema["rule"].ZeroValue().(*schema.Set) - - // Read all rules that are configured - if rs := d.Get("rule").(*schema.Set); rs.Len() > 0 { - for _, rule := range rs.List() { - rule := rule.(map[string]interface{}) - - // First get any existing values - cidrList, cidrListOK := rule["cidr_list"].(*schema.Set) - usgList, usgListOk := rule["user_security_group_list"].(*schema.Set) - - // Then reset the values to a new empty set - rule["cidr_list"] = &schema.Set{F: schema.HashString} - rule["user_security_group_list"] = &schema.Set{F: schema.HashString} - - if cidrListOK && cidrList.Len() > 0 { - for _, cidr := range cidrList.List() { - readSecurityGroupRule(sg, ruleIndex, rule, cidr.(string)) - } - } - - if usgListOk && usgList.Len() > 0 { - for _, usg := range usgList.List() { - readSecurityGroupRule(sg, ruleIndex, rule, usg.(string)) - } - } - - rules.Add(rule) - } - } - - return nil -} - -func readSecurityGroupRule(sg *cloudstack.SecurityGroup, ruleIndex map[string]int, rule map[string]interface{}, uuid string) { - uuids := rule["uuids"].(map[string]interface{}) - sgRules := append(sg.Ingressrule, sg.Egressrule...) - - if rule["protocol"].(string) == "icmp" { - id, ok := uuids[uuid+"icmp"] - if !ok { - return - } - - // Get the rule - idx, ok := ruleIndex[id.(string)] - if !ok { - delete(uuids, uuid+"icmp") - return - } - - r := sgRules[idx] - - // Update the values - if r.Cidr != "" { - rule["cidr_list"].(*schema.Set).Add(r.Cidr) - } - - if r.Securitygroupname != "" { - rule["user_security_group_list"].(*schema.Set).Add(r.Securitygroupname) - } - - rule["protocol"] = r.Protocol - rule["icmp_type"] = r.Icmptype - rule["icmp_code"] = r.Icmpcode - } - - // If protocol is tcp or udp, loop through all ports - if rule["protocol"].(string) == "tcp" || rule["protocol"].(string) == "udp" { - if ps := rule["ports"].(*schema.Set); ps.Len() > 0 { - - // Create an empty schema.Set to hold all ports - ports := &schema.Set{F: schema.HashString} - - // Loop through all ports and retrieve their info - for _, port := range ps.List() { - id, ok := uuids[uuid+port.(string)] - if !ok { - continue - } - - // Get the rule - idx, ok := ruleIndex[id.(string)] - if !ok { - delete(uuids, uuid+port.(string)) - continue - } - - r := sgRules[idx] - - // Create a set with all CIDR's - cidrs := &schema.Set{F: schema.HashString} - for _, cidr := range strings.Split(r.Cidr, ",") { - cidrs.Add(cidr) - } - - // Update the values - rule["protocol"] = r.Protocol - ports.Add(port) - } - - // If there is at least one port found, add this rule to the rules set - if ports.Len() > 0 { - rule["ports"] = ports - } - } - } -} - -func resourceCloudStackSecurityGroupRuleUpdate(d *schema.ResourceData, meta interface{}) error { - // Check if the rule set as a whole has changed - if d.HasChange("rule") { - o, n := d.GetChange("rule") - ors := o.(*schema.Set).Difference(n.(*schema.Set)) - nrs := n.(*schema.Set).Difference(o.(*schema.Set)) - - // We need to start with a rule set containing all the rules we - // already have and want to keep. Any rules that are not deleted - // correctly and any newly created rules, will be added to this - // set to make sure we end up in a consistent state - rules := o.(*schema.Set).Intersection(n.(*schema.Set)) - - // First loop through all the old rules destroy them - if ors.Len() > 0 { - err := deleteSecurityGroupRules(d, meta, rules, ors) - - // We need to update this first to preserve the correct state - d.Set("rule", rules) - - if err != nil { - return err - } - } - - // Then loop through all the new rules and delete them - if nrs.Len() > 0 { - err := createSecurityGroupRules(d, meta, rules, nrs) - - // We need to update this first to preserve the correct state - d.Set("rule", rules) - - if err != nil { - return err - } - } - } - - return resourceCloudStackSecurityGroupRuleRead(d, meta) -} - -func resourceCloudStackSecurityGroupRuleDelete(d *schema.ResourceData, meta interface{}) error { - // Create an empty rule set to hold all rules that where - // not deleted correctly - rules := resourceCloudStackSecurityGroupRule().Schema["rule"].ZeroValue().(*schema.Set) - - // Delete all rules - if ors := d.Get("rule").(*schema.Set); ors.Len() > 0 { - err := deleteSecurityGroupRules(d, meta, rules, ors) - - // We need to update this first to preserve the correct state - d.Set("rule", rules) - - if err != nil { - return err - } - } - - return nil -} - -func deleteSecurityGroupRules(d *schema.ResourceData, meta interface{}, rules *schema.Set, ors *schema.Set) error { - var errs *multierror.Error - - var wg sync.WaitGroup - wg.Add(ors.Len()) - - sem := make(chan struct{}, d.Get("parallelism").(int)) - for _, rule := range ors.List() { - // Put a sleep here to avoid DoS'ing the API - time.Sleep(500 * time.Millisecond) - - go func(rule map[string]interface{}) { - defer wg.Done() - sem <- struct{}{} - - // Create a single rule - err := deleteSecurityGroupRule(d, meta, rule) - if err != nil { - errs = multierror.Append(errs, err) - } - - // If we have at least one UUID, we need to save the rule - if len(rule["uuids"].(map[string]interface{})) > 0 { - rules.Add(rule) - } - - <-sem - }(rule.(map[string]interface{})) - } - - wg.Wait() - - return errs.ErrorOrNil() -} - -func deleteSecurityGroupRule(d *schema.ResourceData, meta interface{}, rule map[string]interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - uuids := rule["uuids"].(map[string]interface{}) - - for k, id := range uuids { - // We don't care about the count here, so just continue - if k == "%" { - continue - } - - var err error - switch rule["traffic_type"].(string) { - case "ingress": - p := cs.SecurityGroup.NewRevokeSecurityGroupIngressParams(id.(string)) - _, err = cs.SecurityGroup.RevokeSecurityGroupIngress(p) - case "egress": - p := cs.SecurityGroup.NewRevokeSecurityGroupEgressParams(id.(string)) - _, err = cs.SecurityGroup.RevokeSecurityGroupEgress(p) - } - - if err != nil { - // This is a very poor way to be told the ID does no longer exist :( - if strings.Contains(err.Error(), fmt.Sprintf( - "Invalid parameter id value=%s due to incorrect long value format, "+ - "or entity does not exist", id.(string))) { - delete(uuids, k) - continue - } - - return err - } - - // Delete the UUID of this rule - delete(uuids, k) - } - - return nil -} - -func verifySecurityGroupRuleParams(d *schema.ResourceData, rule map[string]interface{}) error { - cidrList, cidrListOK := rule["cidr_list"].(*schema.Set) - usgList, usgListOK := rule["user_security_group_list"].(*schema.Set) - - if (!cidrListOK || cidrList.Len() == 0) && (!usgListOK || usgList.Len() == 0) { - return fmt.Errorf( - "You must supply at least one 'cidr_list' or `user_security_group_ids` entry") - } - - protocol := rule["protocol"].(string) - switch protocol { - case "icmp": - if _, ok := rule["icmp_type"]; !ok { - return fmt.Errorf( - "Parameter icmp_type is a required parameter when using protocol 'icmp'") - } - if _, ok := rule["icmp_code"]; !ok { - return fmt.Errorf( - "Parameter icmp_code is a required parameter when using protocol 'icmp'") - } - case "tcp", "udp": - if ports, ok := rule["ports"].(*schema.Set); ok { - for _, port := range ports.List() { - m := splitPorts.FindStringSubmatch(port.(string)) - if m == nil { - return fmt.Errorf( - "%q is not a valid port value. Valid options are '80' or '80-90'", port.(string)) - } - } - } else { - return fmt.Errorf( - "Parameter ports is a required parameter when *not* using protocol 'icmp'") - } - default: - _, err := strconv.ParseInt(protocol, 0, 0) - if err != nil { - return fmt.Errorf( - "%q is not a valid protocol. Valid options are 'tcp', 'udp' and 'icmp'", protocol) - } - } - - traffic := rule["traffic_type"].(string) - if traffic != "ingress" && traffic != "egress" { - return fmt.Errorf( - "Parameter traffic_type only accepts 'ingress' or 'egress' as values") - } - - return nil -} diff --git a/builtin/providers/cloudstack/resource_cloudstack_security_group_rule_test.go b/builtin/providers/cloudstack/resource_cloudstack_security_group_rule_test.go deleted file mode 100644 index cef5a39a2..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_security_group_rule_test.go +++ /dev/null @@ -1,274 +0,0 @@ -package cloudstack - -import ( - "fmt" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func TestAccCloudStackSecurityGroupRule_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackSecurityGroupRule_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackSecurityGroupRulesExist("cloudstack_security_group.foo"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.#", "2"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.1322309156.cidr_list.3056857544", "172.18.100.0/24"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.1322309156.protocol", "tcp"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.1322309156.ports.#", "1"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.1322309156.ports.1889509032", "80"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.1322309156.traffic_type", "ingress"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.3666289950.protocol", "tcp"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.3666289950.ports.1889509032", "80"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.3666289950.ports.3638101695", "443"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.3666289950.traffic_type", "egress"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.3666289950.user_security_group_list.1089118859", "terraform-security-group-bar"), - ), - }, - }, - }) -} - -func TestAccCloudStackSecurityGroupRule_update(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackSecurityGroupRule_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackSecurityGroupRulesExist("cloudstack_security_group.foo"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.#", "2"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.1322309156.cidr_list.3056857544", "172.18.100.0/24"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.1322309156.protocol", "tcp"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.1322309156.ports.#", "1"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.1322309156.ports.1889509032", "80"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.1322309156.traffic_type", "ingress"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.3666289950.protocol", "tcp"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.3666289950.ports.1889509032", "80"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.3666289950.ports.3638101695", "443"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.3666289950.traffic_type", "egress"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.3666289950.user_security_group_list.1089118859", "terraform-security-group-bar"), - ), - }, - - resource.TestStep{ - Config: testAccCloudStackSecurityGroupRule_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackSecurityGroupRulesExist("cloudstack_security_group.foo"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.#", "3"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.3156342770.cidr_list.3056857544", "172.18.100.0/24"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.3156342770.cidr_list.951907883", "172.18.200.0/24"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.3156342770.protocol", "tcp"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.3156342770.ports.1889509032", "80"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.3156342770.ports.3638101695", "443"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.3839437815.cidr_list.#", "1"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.3839437815.cidr_list.3056857544", "172.18.100.0/24"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.3839437815.icmp_code", "-1"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.3839437815.icmp_type", "-1"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.1804489748.protocol", "tcp"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.1804489748.ports.#", "1"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.1804489748.ports.1889509032", "80"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.1804489748.traffic_type", "egress"), - resource.TestCheckResourceAttr( - "cloudstack_security_group_rule.foo", "rule.1804489748.user_security_group_list.1089118859", "terraform-security-group-bar"), - ), - }, - }, - }) -} - -func testAccCheckCloudStackSecurityGroupRulesExist(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No security group rule ID is set") - } - - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - sg, count, err := cs.SecurityGroup.GetSecurityGroupByID(rs.Primary.ID) - if err != nil { - if count == 0 { - return fmt.Errorf("Security group %s not found", rs.Primary.ID) - } - return err - } - - // Make a map of all the rule indexes so we can easily find a rule - sgRules := append(sg.Ingressrule, sg.Egressrule...) - ruleIndex := make(map[string]int, len(sgRules)) - for idx, r := range sgRules { - ruleIndex[r.Ruleid] = idx - } - - for k, id := range rs.Primary.Attributes { - if !strings.Contains(k, ".uuids.") || strings.HasSuffix(k, ".uuids.%") { - continue - } - - if _, ok := ruleIndex[id]; !ok { - return fmt.Errorf("Security group rule %s not found", id) - } - } - - return nil - } -} - -func testAccCheckCloudStackSecurityGroupRuleDestroy(s *terraform.State) error { - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_security_group_rule" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No security group rule ID is set") - } - - sg, count, err := cs.SecurityGroup.GetSecurityGroupByID(rs.Primary.ID) - if err != nil { - if count == 0 { - continue - } - return err - } - - // Make a map of all the rule indexes so we can easily find a rule - sgRules := append(sg.Ingressrule, sg.Egressrule...) - ruleIndex := make(map[string]int, len(sgRules)) - for idx, r := range sgRules { - ruleIndex[r.Ruleid] = idx - } - - for k, id := range rs.Primary.Attributes { - if !strings.Contains(k, ".uuids.") || strings.HasSuffix(k, ".uuids.%") { - continue - } - - if _, ok := ruleIndex[id]; ok { - return fmt.Errorf("Security group rule %s still exists", rs.Primary.ID) - } - } - } - - return nil -} - -var testAccCloudStackSecurityGroupRule_basic = fmt.Sprintf(` -resource "cloudstack_security_group" "foo" { - name = "terraform-security-group-foo" - description = "terraform-security-group-text" -} - -resource "cloudstack_security_group" "bar" { - name = "terraform-security-group-bar" - description = "terraform-security-group-text" -} - -resource "cloudstack_security_group_rule" "foo" { - security_group_id = "${cloudstack_security_group.foo.id}" - - rule { - cidr_list = ["172.18.100.0/24"] - protocol = "tcp" - ports = ["80"] - } - - rule { - protocol = "tcp" - ports = ["80", "443"] - traffic_type = "egress" - user_security_group_list = ["terraform-security-group-bar"] - } - - depends_on = ["cloudstack_security_group.bar"] -}`) - -var testAccCloudStackSecurityGroupRule_update = fmt.Sprintf(` -resource "cloudstack_security_group" "foo" { - name = "terraform-security-group-foo" - description = "terraform-security-group-text" -} - -resource "cloudstack_security_group" "bar" { - name = "terraform-security-group-bar" - description = "terraform-security-group-text" -} - -resource "cloudstack_security_group_rule" "foo" { - security_group_id = "${cloudstack_security_group.foo.id}" - - rule { - cidr_list = ["172.18.100.0/24", "172.18.200.0/24"] - protocol = "tcp" - ports = ["80", "443"] - } - - rule { - cidr_list = ["172.18.100.0/24"] - protocol = "icmp" - icmp_type = "-1" - icmp_code = "-1" - traffic_type = "ingress" - } - - rule { - protocol = "tcp" - ports = ["80"] - traffic_type = "egress" - user_security_group_list = ["terraform-security-group-bar"] - } - - depends_on = ["cloudstack_security_group.bar"] -}`) diff --git a/builtin/providers/cloudstack/resource_cloudstack_security_group_test.go b/builtin/providers/cloudstack/resource_cloudstack_security_group_test.go deleted file mode 100644 index 50a1ebcb4..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_security_group_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package cloudstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func TestAccCloudStackSecurityGroup_basic(t *testing.T) { - var sg cloudstack.SecurityGroup - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackSecurityGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackSecurityGroup_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackSecurityGroupExists( - "cloudstack_security_group.foo", &sg), - testAccCheckCloudStackSecurityGroupBasicAttributes(&sg), - ), - }, - }, - }) -} - -func testAccCheckCloudStackSecurityGroupExists( - n string, sg *cloudstack.SecurityGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No security group ID is set") - } - - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - resp, _, err := cs.SecurityGroup.GetSecurityGroupByID(rs.Primary.ID) - if err != nil { - return err - } - - if resp.Id != rs.Primary.ID { - return fmt.Errorf("Network ACL not found") - } - - *sg = *resp - - return nil - } -} - -func testAccCheckCloudStackSecurityGroupBasicAttributes( - sg *cloudstack.SecurityGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if sg.Name != "terraform-security-group" { - return fmt.Errorf("Bad name: %s", sg.Name) - } - - if sg.Description != "terraform-security-group-text" { - return fmt.Errorf("Bad description: %s", sg.Description) - } - - return nil - } -} - -func testAccCheckCloudStackSecurityGroupDestroy(s *terraform.State) error { - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_security_group" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No security group ID is set") - } - - _, _, err := cs.SecurityGroup.GetSecurityGroupByID(rs.Primary.ID) - if err == nil { - return fmt.Errorf("Security group list %s still exists", rs.Primary.ID) - } - } - - return nil -} - -var testAccCloudStackSecurityGroup_basic = fmt.Sprintf(` -resource "cloudstack_security_group" "foo" { - name = "terraform-security-group" - description = "terraform-security-group-text" -}`) diff --git a/builtin/providers/cloudstack/resource_cloudstack_ssh_keypair.go b/builtin/providers/cloudstack/resource_cloudstack_ssh_keypair.go deleted file mode 100644 index f4fc09988..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_ssh_keypair.go +++ /dev/null @@ -1,145 +0,0 @@ -package cloudstack - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func resourceCloudStackSSHKeyPair() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudStackSSHKeyPairCreate, - Read: resourceCloudStackSSHKeyPairRead, - Delete: resourceCloudStackSSHKeyPairDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "public_key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "private_key": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "fingerprint": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceCloudStackSSHKeyPairCreate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - name := d.Get("name").(string) - publicKey := d.Get("public_key").(string) - - if publicKey != "" { - // Register supplied key - p := cs.SSH.NewRegisterSSHKeyPairParams(name, publicKey) - - // If there is a project supplied, we retrieve and set the project id - if err := setProjectid(p, cs, d); err != nil { - return err - } - - _, err := cs.SSH.RegisterSSHKeyPair(p) - if err != nil { - return err - } - } else { - // No key supplied, must create one and return the private key - p := cs.SSH.NewCreateSSHKeyPairParams(name) - - // If there is a project supplied, we retrieve and set the project id - if err := setProjectid(p, cs, d); err != nil { - return err - } - - r, err := cs.SSH.CreateSSHKeyPair(p) - if err != nil { - return err - } - d.Set("private_key", r.Privatekey) - } - - log.Printf("[DEBUG] Key pair successfully generated at Cloudstack") - d.SetId(name) - - return resourceCloudStackSSHKeyPairRead(d, meta) -} - -func resourceCloudStackSSHKeyPairRead(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - log.Printf("[DEBUG] looking for key pair with name %s", d.Id()) - - p := cs.SSH.NewListSSHKeyPairsParams() - p.SetName(d.Id()) - - // If there is a project supplied, we retrieve and set the project id - if err := setProjectid(p, cs, d); err != nil { - return err - } - - r, err := cs.SSH.ListSSHKeyPairs(p) - if err != nil { - return err - } - if r.Count == 0 { - log.Printf("[DEBUG] Key pair %s does not exist", d.Id()) - d.SetId("") - return nil - } - - //SSHKeyPair name is unique in a cloudstack account so dont need to check for multiple - d.Set("name", r.SSHKeyPairs[0].Name) - d.Set("fingerprint", r.SSHKeyPairs[0].Fingerprint) - - return nil -} - -func resourceCloudStackSSHKeyPairDelete(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Create a new parameter struct - p := cs.SSH.NewDeleteSSHKeyPairParams(d.Id()) - - // If there is a project supplied, we retrieve and set the project id - if err := setProjectid(p, cs, d); err != nil { - return err - } - - // Remove the SSH Keypair - _, err := cs.SSH.DeleteSSHKeyPair(p) - if err != nil { - // This is a very poor way to be told the ID does no longer exist :( - if strings.Contains(err.Error(), fmt.Sprintf( - "A key pair with name '%s' does not exist for account", d.Id())) { - return nil - } - - return fmt.Errorf("Error deleting key pair: %s", err) - } - - return nil -} diff --git a/builtin/providers/cloudstack/resource_cloudstack_ssh_keypair_test.go b/builtin/providers/cloudstack/resource_cloudstack_ssh_keypair_test.go deleted file mode 100644 index e367d1a73..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_ssh_keypair_test.go +++ /dev/null @@ -1,169 +0,0 @@ -package cloudstack - -import ( - "fmt" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func TestAccCloudStackSSHKeyPair_basic(t *testing.T) { - var sshkey cloudstack.SSHKeyPair - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackSSHKeyPairDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackSSHKeyPair_create, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackSSHKeyPairExists("cloudstack_ssh_keypair.foo", &sshkey), - testAccCheckCloudStackSSHKeyPairAttributes(&sshkey), - testAccCheckCloudStackSSHKeyPairCreateAttributes("terraform-test-keypair"), - ), - }, - }, - }) -} - -func TestAccCloudStackSSHKeyPair_register(t *testing.T) { - var sshkey cloudstack.SSHKeyPair - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackSSHKeyPairDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackSSHKeyPair_register, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackSSHKeyPairExists("cloudstack_ssh_keypair.foo", &sshkey), - testAccCheckCloudStackSSHKeyPairAttributes(&sshkey), - resource.TestCheckResourceAttr( - "cloudstack_ssh_keypair.foo", - "public_key", - CLOUDSTACK_SSH_PUBLIC_KEY), - ), - }, - }, - }) -} - -func testAccCheckCloudStackSSHKeyPairExists(n string, sshkey *cloudstack.SSHKeyPair) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No key pair ID is set") - } - - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - p := cs.SSH.NewListSSHKeyPairsParams() - p.SetName(rs.Primary.ID) - - list, err := cs.SSH.ListSSHKeyPairs(p) - if err != nil { - return err - } - - if list.Count != 1 || list.SSHKeyPairs[0].Name != rs.Primary.ID { - return fmt.Errorf("Key pair not found") - } - - *sshkey = *list.SSHKeyPairs[0] - - return nil - } -} - -func testAccCheckCloudStackSSHKeyPairAttributes( - keypair *cloudstack.SSHKeyPair) resource.TestCheckFunc { - return func(s *terraform.State) error { - - fpLen := len(keypair.Fingerprint) - if fpLen != 47 { - return fmt.Errorf("SSH key: Attribute private_key expected length 47, got %d", fpLen) - } - - return nil - } -} - -func testAccCheckCloudStackSSHKeyPairCreateAttributes(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - found := false - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_ssh_keypair" { - continue - } - - if rs.Primary.ID != name { - continue - } - - if !strings.Contains(rs.Primary.Attributes["private_key"], "PRIVATE KEY") { - return fmt.Errorf( - "SSH key: Attribute private_key expected 'PRIVATE KEY' to be present, got %s", - rs.Primary.Attributes["private_key"]) - } - - found = true - break - } - - if !found { - return fmt.Errorf("Could not find key pair %s", name) - } - - return nil - } -} - -func testAccCheckCloudStackSSHKeyPairDestroy(s *terraform.State) error { - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_ssh_keypair" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No key pair ID is set") - } - - p := cs.SSH.NewListSSHKeyPairsParams() - p.SetName(rs.Primary.ID) - - list, err := cs.SSH.ListSSHKeyPairs(p) - if err != nil { - return err - } - - for _, keyPair := range list.SSHKeyPairs { - if keyPair.Name == rs.Primary.ID { - return fmt.Errorf("Key pair %s still exists", rs.Primary.ID) - } - } - } - - return nil -} - -var testAccCloudStackSSHKeyPair_create = fmt.Sprintf(` -resource "cloudstack_ssh_keypair" "foo" { - name = "terraform-test-keypair" -}`) - -var testAccCloudStackSSHKeyPair_register = fmt.Sprintf(` -resource "cloudstack_ssh_keypair" "foo" { - name = "terraform-test-keypair" - public_key = "%s" -}`, CLOUDSTACK_SSH_PUBLIC_KEY) diff --git a/builtin/providers/cloudstack/resource_cloudstack_static_nat.go b/builtin/providers/cloudstack/resource_cloudstack_static_nat.go deleted file mode 100644 index bf416c885..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_static_nat.go +++ /dev/null @@ -1,170 +0,0 @@ -package cloudstack - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func resourceCloudStackStaticNAT() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudStackStaticNATCreate, - Exists: resourceCloudStackStaticNATExists, - Read: resourceCloudStackStaticNATRead, - Delete: resourceCloudStackStaticNATDelete, - - Schema: map[string]*schema.Schema{ - "ip_address_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "virtual_machine_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "vm_guest_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - } -} - -func resourceCloudStackStaticNATCreate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - ipaddressid := d.Get("ip_address_id").(string) - - vm, _, err := cs.VirtualMachine.GetVirtualMachineByID( - d.Get("virtual_machine_id").(string), - cloudstack.WithProject(d.Get("project").(string)), - ) - if err != nil { - return err - } - - // Create a new parameter struct - p := cs.NAT.NewEnableStaticNatParams(ipaddressid, vm.Id) - - if vmGuestIP, ok := d.GetOk("vm_guest_ip"); ok { - p.SetVmguestip(vmGuestIP.(string)) - - // Set the network ID based on the guest IP, needed when the public IP address - // is not associated with any network yet - NICS: - for _, nic := range vm.Nic { - if vmGuestIP.(string) == nic.Ipaddress { - p.SetNetworkid(nic.Networkid) - break NICS - } - for _, ip := range nic.Secondaryip { - if vmGuestIP.(string) == ip.Ipaddress { - p.SetNetworkid(nic.Networkid) - break NICS - } - } - } - } else { - // If no guest IP is configured, use the primary NIC - p.SetNetworkid(vm.Nic[0].Networkid) - } - - _, err = cs.NAT.EnableStaticNat(p) - if err != nil { - return fmt.Errorf("Error enabling static NAT: %s", err) - } - - d.SetId(ipaddressid) - - return resourceCloudStackStaticNATRead(d, meta) -} - -func resourceCloudStackStaticNATExists(d *schema.ResourceData, meta interface{}) (bool, error) { - cs := meta.(*cloudstack.CloudStackClient) - - // Get the IP address details - ip, count, err := cs.Address.GetPublicIpAddressByID( - d.Id(), - cloudstack.WithProject(d.Get("project").(string)), - ) - if err != nil { - if count == 0 { - log.Printf("[DEBUG] IP address with ID %s no longer exists", d.Id()) - return false, nil - } - - return false, err - } - - return ip.Isstaticnat, nil -} - -func resourceCloudStackStaticNATRead(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Get the IP address details - ip, count, err := cs.Address.GetPublicIpAddressByID( - d.Id(), - cloudstack.WithProject(d.Get("project").(string)), - ) - if err != nil { - if count == 0 { - log.Printf("[DEBUG] IP address with ID %s no longer exists", d.Id()) - d.SetId("") - return nil - } - - return err - } - - if !ip.Isstaticnat { - log.Printf("[DEBUG] Static NAT is no longer enabled for IP address with ID %s", d.Id()) - d.SetId("") - return nil - } - - d.Set("virtual_machine_id", ip.Virtualmachineid) - d.Set("vm_guest_ip", ip.Vmipaddress) - - setValueOrID(d, "project", ip.Project, ip.Projectid) - - return nil -} - -func resourceCloudStackStaticNATDelete(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Create a new parameter struct - p := cs.NAT.NewDisableStaticNatParams(d.Id()) - - // Disable static NAT - _, err := cs.NAT.DisableStaticNat(p) - if err != nil { - // This is a very poor way to be told the ID does no longer exist :( - if strings.Contains(err.Error(), fmt.Sprintf( - "Invalid parameter id value=%s due to incorrect long value format, "+ - "or entity does not exist", d.Id())) { - return nil - } - - return fmt.Errorf("Error disabling static NAT: %s", err) - } - - return nil -} diff --git a/builtin/providers/cloudstack/resource_cloudstack_static_nat_test.go b/builtin/providers/cloudstack/resource_cloudstack_static_nat_test.go deleted file mode 100644 index b31620248..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_static_nat_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package cloudstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func TestAccCloudStackStaticNAT_basic(t *testing.T) { - var ipaddr cloudstack.PublicIpAddress - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackStaticNATDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackStaticNAT_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackStaticNATExists( - "cloudstack_static_nat.foo", &ipaddr), - testAccCheckCloudStackStaticNATAttributes(&ipaddr), - ), - }, - }, - }) -} - -func testAccCheckCloudStackStaticNATExists( - n string, ipaddr *cloudstack.PublicIpAddress) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No static NAT ID is set") - } - - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - ip, _, err := cs.Address.GetPublicIpAddressByID(rs.Primary.ID) - - if err != nil { - return err - } - - if ip.Id != rs.Primary.ID { - return fmt.Errorf("Static NAT not found") - } - - if !ip.Isstaticnat { - return fmt.Errorf("Static NAT not enabled") - } - - *ipaddr = *ip - - return nil - } -} - -func testAccCheckCloudStackStaticNATAttributes( - ipaddr *cloudstack.PublicIpAddress) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if ipaddr.Associatednetworkid != CLOUDSTACK_NETWORK_1 { - return fmt.Errorf("Bad network ID: %s", ipaddr.Associatednetworkid) - } - - return nil - } -} - -func testAccCheckCloudStackStaticNATDestroy(s *terraform.State) error { - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_static_nat" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No static NAT ID is set") - } - - ip, _, err := cs.Address.GetPublicIpAddressByID(rs.Primary.ID) - if err == nil && ip.Isstaticnat { - return fmt.Errorf("Static NAT %s still enabled", rs.Primary.ID) - } - } - - return nil -} - -var testAccCloudStackStaticNAT_basic = fmt.Sprintf(` -resource "cloudstack_instance" "foobar" { - name = "terraform-test" - display_name = "terraform-test" - service_offering= "%s" - network_id = "%s" - template = "%s" - zone = "%s" - user_data = "foobar\nfoo\nbar" - expunge = true -} - -resource "cloudstack_ipaddress" "foo" { - network_id = "${cloudstack_instance.foobar.network_id}" -} - -resource "cloudstack_static_nat" "foo" { - ip_address_id = "${cloudstack_ipaddress.foo.id}" - virtual_machine_id = "${cloudstack_instance.foobar.id}" -}`, - CLOUDSTACK_SERVICE_OFFERING_1, - CLOUDSTACK_NETWORK_1, - CLOUDSTACK_TEMPLATE, - CLOUDSTACK_ZONE, -) diff --git a/builtin/providers/cloudstack/resource_cloudstack_static_route.go b/builtin/providers/cloudstack/resource_cloudstack_static_route.go deleted file mode 100644 index 3eed2878d..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_static_route.go +++ /dev/null @@ -1,94 +0,0 @@ -package cloudstack - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func resourceCloudStackStaticRoute() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudStackStaticRouteCreate, - Read: resourceCloudStackStaticRouteRead, - Delete: resourceCloudStackStaticRouteDelete, - - Schema: map[string]*schema.Schema{ - "cidr": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "gateway_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceCloudStackStaticRouteCreate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Create a new parameter struct - p := cs.VPC.NewCreateStaticRouteParams( - d.Get("cidr").(string), - d.Get("gateway_id").(string), - ) - - // Create the new private gateway - r, err := cs.VPC.CreateStaticRoute(p) - if err != nil { - return fmt.Errorf("Error creating static route for %s: %s", d.Get("cidr").(string), err) - } - - d.SetId(r.Id) - - return resourceCloudStackStaticRouteRead(d, meta) -} - -func resourceCloudStackStaticRouteRead(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Get the virtual machine details - staticroute, count, err := cs.VPC.GetStaticRouteByID(d.Id()) - if err != nil { - if count == 0 { - log.Printf("[DEBUG] Static route %s does no longer exist", d.Id()) - d.SetId("") - return nil - } - - return err - } - - d.Set("cidr", staticroute.Cidr) - - return nil -} - -func resourceCloudStackStaticRouteDelete(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Create a new parameter struct - p := cs.VPC.NewDeleteStaticRouteParams(d.Id()) - - // Delete the private gateway - _, err := cs.VPC.DeleteStaticRoute(p) - if err != nil { - // This is a very poor way to be told the ID does no longer exist :( - if strings.Contains(err.Error(), fmt.Sprintf( - "Invalid parameter id value=%s due to incorrect long value format, "+ - "or entity does not exist", d.Id())) { - return nil - } - - return fmt.Errorf("Error deleting static route for %s: %s", d.Get("cidr").(string), err) - } - - return nil -} diff --git a/builtin/providers/cloudstack/resource_cloudstack_static_route_test.go b/builtin/providers/cloudstack/resource_cloudstack_static_route_test.go deleted file mode 100644 index 0aae2ce89..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_static_route_test.go +++ /dev/null @@ -1,121 +0,0 @@ -package cloudstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func TestAccCloudStackStaticRoute_basic(t *testing.T) { - var staticroute cloudstack.StaticRoute - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackStaticRouteDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackStaticRoute_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackStaticRouteExists( - "cloudstack_static_route.bar", &staticroute), - testAccCheckCloudStackStaticRouteAttributes(&staticroute), - ), - }, - }, - }) -} - -func testAccCheckCloudStackStaticRouteExists( - n string, staticroute *cloudstack.StaticRoute) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Static Route ID is set") - } - - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - route, _, err := cs.VPC.GetStaticRouteByID(rs.Primary.ID) - - if err != nil { - return err - } - - if route.Id != rs.Primary.ID { - return fmt.Errorf("Static Route not found") - } - - *staticroute = *route - - return nil - } -} - -func testAccCheckCloudStackStaticRouteAttributes( - staticroute *cloudstack.StaticRoute) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if staticroute.Cidr != CLOUDSTACK_STATIC_ROUTE_CIDR { - return fmt.Errorf("Bad Cidr: %s", staticroute.Cidr) - } - - return nil - } -} - -func testAccCheckCloudStackStaticRouteDestroy(s *terraform.State) error { - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_static_route" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No static route ID is set") - } - - staticroute, _, err := cs.VPC.GetStaticRouteByID(rs.Primary.ID) - if err == nil && staticroute.Id != "" { - return fmt.Errorf("Static route %s still exists", rs.Primary.ID) - } - } - - return nil -} - -var testAccCloudStackStaticRoute_basic = fmt.Sprintf(` -resource "cloudstack_vpc" "foobar" { - name = "terraform-vpc" - cidr = "%s" - vpc_offering = "%s" - zone = "%s" -} - -resource "cloudstack_private_gateway" "foo" { - gateway = "%s" - ip_address = "%s" - netmask = "%s" - vlan = "%s" - vpc_id = "${cloudstack_vpc.foobar.id}" -} - -resource "cloudstack_static_route" "bar" { - cidr = "%s" - gateway_id = "${cloudstack_private_gateway.foo.id}" -}`, - CLOUDSTACK_VPC_CIDR_1, - CLOUDSTACK_VPC_OFFERING, - CLOUDSTACK_ZONE, - CLOUDSTACK_PRIVGW_GATEWAY, - CLOUDSTACK_PRIVGW_IPADDRESS, - CLOUDSTACK_PRIVGW_NETMASK, - CLOUDSTACK_PRIVGW_VLAN, - CLOUDSTACK_STATIC_ROUTE_CIDR) diff --git a/builtin/providers/cloudstack/resource_cloudstack_template.go b/builtin/providers/cloudstack/resource_cloudstack_template.go deleted file mode 100644 index a7591d558..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_template.go +++ /dev/null @@ -1,318 +0,0 @@ -package cloudstack - -import ( - "fmt" - "log" - "strings" - "time" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func resourceCloudStackTemplate() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudStackTemplateCreate, - Read: resourceCloudStackTemplateRead, - Update: resourceCloudStackTemplateUpdate, - Delete: resourceCloudStackTemplateDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "display_text": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "format": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "hypervisor": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "os_type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "url": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "is_dynamically_scalable": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "is_extractable": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "is_featured": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "is_public": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "password_enabled": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "is_ready": &schema.Schema{ - Type: schema.TypeBool, - Computed: true, - }, - - "is_ready_timeout": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 300, - }, - }, - } -} - -func resourceCloudStackTemplateCreate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - if err := verifyTemplateParams(d); err != nil { - return err - } - - name := d.Get("name").(string) - - // Compute/set the display text - displaytext := d.Get("display_text").(string) - if displaytext == "" { - displaytext = name - } - - // Retrieve the os_type ID - ostypeid, e := retrieveID(cs, "os_type", d.Get("os_type").(string)) - if e != nil { - return e.Error() - } - - // Retrieve the zone ID - zoneid, e := retrieveID(cs, "zone", d.Get("zone").(string)) - if e != nil { - return e.Error() - } - - // Create a new parameter struct - p := cs.Template.NewRegisterTemplateParams( - displaytext, - d.Get("format").(string), - d.Get("hypervisor").(string), - name, - ostypeid, - d.Get("url").(string), - zoneid) - - // Set optional parameters - if v, ok := d.GetOk("is_dynamically_scalable"); ok { - p.SetIsdynamicallyscalable(v.(bool)) - } - - if v, ok := d.GetOk("is_extractable"); ok { - p.SetIsextractable(v.(bool)) - } - - if v, ok := d.GetOk("is_featured"); ok { - p.SetIsfeatured(v.(bool)) - } - - if v, ok := d.GetOk("is_public"); ok { - p.SetIspublic(v.(bool)) - } - - if v, ok := d.GetOk("password_enabled"); ok { - p.SetPasswordenabled(v.(bool)) - } - - // If there is a project supplied, we retrieve and set the project id - if err := setProjectid(p, cs, d); err != nil { - return err - } - - // Create the new template - r, err := cs.Template.RegisterTemplate(p) - if err != nil { - return fmt.Errorf("Error creating template %s: %s", name, err) - } - - d.SetId(r.RegisterTemplate[0].Id) - - // Wait until the template is ready to use, or timeout with an error... - currentTime := time.Now().Unix() - timeout := int64(d.Get("is_ready_timeout").(int)) - for { - // Start with the sleep so the register action has a few seconds - // to process the registration correctly. Without this wait - time.Sleep(10 * time.Second) - - err := resourceCloudStackTemplateRead(d, meta) - if err != nil { - return err - } - - if d.Get("is_ready").(bool) { - return nil - } - - if time.Now().Unix()-currentTime > timeout { - return fmt.Errorf("Timeout while waiting for template to become ready") - } - } -} - -func resourceCloudStackTemplateRead(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Get the template details - t, count, err := cs.Template.GetTemplateByID( - d.Id(), - "executable", - cloudstack.WithProject(d.Get("project").(string)), - ) - if err != nil { - if count == 0 { - log.Printf( - "[DEBUG] Template %s no longer exists", d.Get("name").(string)) - d.SetId("") - return nil - } - - return err - } - - d.Set("name", t.Name) - d.Set("display_text", t.Displaytext) - d.Set("format", t.Format) - d.Set("hypervisor", t.Hypervisor) - d.Set("is_dynamically_scalable", t.Isdynamicallyscalable) - d.Set("is_extractable", t.Isextractable) - d.Set("is_featured", t.Isfeatured) - d.Set("is_public", t.Ispublic) - d.Set("password_enabled", t.Passwordenabled) - d.Set("is_ready", t.Isready) - - setValueOrID(d, "os_type", t.Ostypename, t.Ostypeid) - setValueOrID(d, "project", t.Project, t.Projectid) - setValueOrID(d, "zone", t.Zonename, t.Zoneid) - - return nil -} - -func resourceCloudStackTemplateUpdate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - name := d.Get("name").(string) - - // Create a new parameter struct - p := cs.Template.NewUpdateTemplateParams(d.Id()) - - if d.HasChange("name") { - p.SetName(name) - } - - if d.HasChange("display_text") { - p.SetDisplaytext(d.Get("display_text").(string)) - } - - if d.HasChange("format") { - p.SetFormat(d.Get("format").(string)) - } - - if d.HasChange("is_dynamically_scalable") { - p.SetIsdynamicallyscalable(d.Get("is_dynamically_scalable").(bool)) - } - - if d.HasChange("os_type") { - ostypeid, e := retrieveID(cs, "os_type", d.Get("os_type").(string)) - if e != nil { - return e.Error() - } - p.SetOstypeid(ostypeid) - } - - if d.HasChange("password_enabled") { - p.SetPasswordenabled(d.Get("password_enabled").(bool)) - } - - _, err := cs.Template.UpdateTemplate(p) - if err != nil { - return fmt.Errorf("Error updating template %s: %s", name, err) - } - - return resourceCloudStackTemplateRead(d, meta) -} - -func resourceCloudStackTemplateDelete(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Create a new parameter struct - p := cs.Template.NewDeleteTemplateParams(d.Id()) - - // Delete the template - log.Printf("[INFO] Deleting template: %s", d.Get("name").(string)) - _, err := cs.Template.DeleteTemplate(p) - if err != nil { - // This is a very poor way to be told the ID does no longer exist :( - if strings.Contains(err.Error(), fmt.Sprintf( - "Invalid parameter id value=%s due to incorrect long value format, "+ - "or entity does not exist", d.Id())) { - return nil - } - - return fmt.Errorf("Error deleting template %s: %s", d.Get("name").(string), err) - } - return nil -} - -func verifyTemplateParams(d *schema.ResourceData) error { - format := d.Get("format").(string) - if format != "OVA" && format != "QCOW2" && format != "RAW" && format != "VHD" && format != "VMDK" { - return fmt.Errorf( - "%s is not a valid format. Valid options are 'OVA','QCOW2', 'RAW', 'VHD' and 'VMDK'", format) - } - - return nil -} diff --git a/builtin/providers/cloudstack/resource_cloudstack_template_test.go b/builtin/providers/cloudstack/resource_cloudstack_template_test.go deleted file mode 100644 index f98130661..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_template_test.go +++ /dev/null @@ -1,194 +0,0 @@ -package cloudstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func TestAccCloudStackTemplate_basic(t *testing.T) { - var template cloudstack.Template - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackTemplateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackTemplate_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackTemplateExists("cloudstack_template.foo", &template), - testAccCheckCloudStackTemplateBasicAttributes(&template), - resource.TestCheckResourceAttr( - "cloudstack_template.foo", "display_text", "terraform-test"), - ), - }, - }, - }) -} - -func TestAccCloudStackTemplate_update(t *testing.T) { - var template cloudstack.Template - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackTemplateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackTemplate_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackTemplateExists("cloudstack_template.foo", &template), - testAccCheckCloudStackTemplateBasicAttributes(&template), - ), - }, - - resource.TestStep{ - Config: testAccCloudStackTemplate_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackTemplateExists( - "cloudstack_template.foo", &template), - testAccCheckCloudStackTemplateUpdatedAttributes(&template), - resource.TestCheckResourceAttr( - "cloudstack_template.foo", "display_text", "terraform-updated"), - ), - }, - }, - }) -} - -func testAccCheckCloudStackTemplateExists( - n string, template *cloudstack.Template) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No template ID is set") - } - - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - tmpl, _, err := cs.Template.GetTemplateByID(rs.Primary.ID, "executable") - - if err != nil { - return err - } - - if tmpl.Id != rs.Primary.ID { - return fmt.Errorf("Template not found") - } - - *template = *tmpl - - return nil - } -} - -func testAccCheckCloudStackTemplateBasicAttributes( - template *cloudstack.Template) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if template.Name != "terraform-test" { - return fmt.Errorf("Bad name: %s", template.Name) - } - - if template.Format != CLOUDSTACK_TEMPLATE_FORMAT { - return fmt.Errorf("Bad format: %s", template.Format) - } - - if template.Hypervisor != CLOUDSTACK_HYPERVISOR { - return fmt.Errorf("Bad hypervisor: %s", template.Hypervisor) - } - - if template.Ostypename != CLOUDSTACK_TEMPLATE_OS_TYPE { - return fmt.Errorf("Bad os type: %s", template.Ostypename) - } - - if template.Zonename != CLOUDSTACK_ZONE { - return fmt.Errorf("Bad zone: %s", template.Zonename) - } - - return nil - } -} - -func testAccCheckCloudStackTemplateUpdatedAttributes( - template *cloudstack.Template) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if template.Displaytext != "terraform-updated" { - return fmt.Errorf("Bad name: %s", template.Displaytext) - } - - if !template.Isdynamicallyscalable { - return fmt.Errorf("Bad is_dynamically_scalable: %t", template.Isdynamicallyscalable) - } - - if !template.Passwordenabled { - return fmt.Errorf("Bad password_enabled: %t", template.Passwordenabled) - } - - return nil - } -} - -func testAccCheckCloudStackTemplateDestroy(s *terraform.State) error { - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_template" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No template ID is set") - } - - _, _, err := cs.Template.GetTemplateByID(rs.Primary.ID, "executable") - if err == nil { - return fmt.Errorf("Template %s still exists", rs.Primary.ID) - } - } - - return nil -} - -var testAccCloudStackTemplate_basic = fmt.Sprintf(` -resource "cloudstack_template" "foo" { - name = "terraform-test" - format = "%s" - hypervisor = "%s" - os_type = "%s" - url = "%s" - zone = "%s" -} -`, - CLOUDSTACK_TEMPLATE_FORMAT, - CLOUDSTACK_HYPERVISOR, - CLOUDSTACK_TEMPLATE_OS_TYPE, - CLOUDSTACK_TEMPLATE_URL, - CLOUDSTACK_ZONE) - -var testAccCloudStackTemplate_update = fmt.Sprintf(` -resource "cloudstack_template" "foo" { - name = "terraform-test" - display_text = "terraform-updated" - format = "%s" - hypervisor = "%s" - os_type = "%s" - url = "%s" - zone = "%s" - is_dynamically_scalable = true - password_enabled = true -} -`, - CLOUDSTACK_TEMPLATE_FORMAT, - CLOUDSTACK_HYPERVISOR, - CLOUDSTACK_TEMPLATE_OS_TYPE, - CLOUDSTACK_TEMPLATE_URL, - CLOUDSTACK_ZONE) diff --git a/builtin/providers/cloudstack/resource_cloudstack_vpc.go b/builtin/providers/cloudstack/resource_cloudstack_vpc.go deleted file mode 100644 index 3456d1c3d..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_vpc.go +++ /dev/null @@ -1,248 +0,0 @@ -package cloudstack - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func resourceCloudStackVPC() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudStackVPCCreate, - Read: resourceCloudStackVPCRead, - Update: resourceCloudStackVPCUpdate, - Delete: resourceCloudStackVPCDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "display_text": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "cidr": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "vpc_offering": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "network_domain": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "source_nat_ip": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceCloudStackVPCCreate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - name := d.Get("name").(string) - - // Retrieve the vpc_offering ID - vpcofferingid, e := retrieveID(cs, "vpc_offering", d.Get("vpc_offering").(string)) - if e != nil { - return e.Error() - } - - // Retrieve the zone ID - zoneid, e := retrieveID(cs, "zone", d.Get("zone").(string)) - if e != nil { - return e.Error() - } - - // Set the display text - displaytext, ok := d.GetOk("display_text") - if !ok { - displaytext = name - } - - // Create a new parameter struct - p := cs.VPC.NewCreateVPCParams( - d.Get("cidr").(string), - displaytext.(string), - name, - vpcofferingid, - zoneid, - ) - - // If there is a network domain supplied, make sure to add it to the request - if networkDomain, ok := d.GetOk("network_domain"); ok { - // Set the network domain - p.SetNetworkdomain(networkDomain.(string)) - } - - // If there is a project supplied, we retrieve and set the project id - if err := setProjectid(p, cs, d); err != nil { - return err - } - - // Create the new VPC - r, err := cs.VPC.CreateVPC(p) - if err != nil { - return fmt.Errorf("Error creating VPC %s: %s", name, err) - } - - d.SetId(r.Id) - - return resourceCloudStackVPCRead(d, meta) -} - -func resourceCloudStackVPCRead(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Get the VPC details - v, count, err := cs.VPC.GetVPCByID( - d.Id(), - cloudstack.WithProject(d.Get("project").(string)), - ) - if err != nil { - if count == 0 { - log.Printf( - "[DEBUG] VPC %s does no longer exist", d.Get("name").(string)) - d.SetId("") - return nil - } - - return err - } - - d.Set("name", v.Name) - d.Set("display_text", v.Displaytext) - d.Set("cidr", v.Cidr) - d.Set("network_domain", v.Networkdomain) - - // Get the VPC offering details - o, _, err := cs.VPC.GetVPCOfferingByID(v.Vpcofferingid) - if err != nil { - return err - } - - setValueOrID(d, "vpc_offering", o.Name, v.Vpcofferingid) - setValueOrID(d, "project", v.Project, v.Projectid) - setValueOrID(d, "zone", v.Zonename, v.Zoneid) - - // Create a new parameter struct - p := cs.Address.NewListPublicIpAddressesParams() - p.SetVpcid(d.Id()) - p.SetIssourcenat(true) - - // If there is a project supplied, we retrieve and set the project id - if err := setProjectid(p, cs, d); err != nil { - return err - } - - // Get the source NAT IP assigned to the VPC - l, err := cs.Address.ListPublicIpAddresses(p) - if err != nil { - return err - } - - if l.Count == 1 { - d.Set("source_nat_ip", l.PublicIpAddresses[0].Ipaddress) - } - - return nil -} - -func resourceCloudStackVPCUpdate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - name := d.Get("name").(string) - - // Check if the name is changed - if d.HasChange("name") { - // Create a new parameter struct - p := cs.VPC.NewUpdateVPCParams(d.Id()) - - // Set the new name - p.SetName(name) - - // Update the VPC - _, err := cs.VPC.UpdateVPC(p) - if err != nil { - return fmt.Errorf( - "Error updating name of VPC %s: %s", name, err) - } - } - - // Check if the display text is changed - if d.HasChange("display_text") { - // Create a new parameter struct - p := cs.VPC.NewUpdateVPCParams(d.Id()) - - // Set the display text - displaytext, ok := d.GetOk("display_text") - if !ok { - displaytext = d.Get("name") - } - - // Set the new display text - p.SetDisplaytext(displaytext.(string)) - - // Update the VPC - _, err := cs.VPC.UpdateVPC(p) - if err != nil { - return fmt.Errorf( - "Error updating display test of VPC %s: %s", name, err) - } - } - - return resourceCloudStackVPCRead(d, meta) -} - -func resourceCloudStackVPCDelete(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Create a new parameter struct - p := cs.VPC.NewDeleteVPCParams(d.Id()) - - // Delete the VPC - _, err := cs.VPC.DeleteVPC(p) - if err != nil { - // This is a very poor way to be told the ID does no longer exist :( - if strings.Contains(err.Error(), fmt.Sprintf( - "Invalid parameter id value=%s due to incorrect long value format, "+ - "or entity does not exist", d.Id())) { - return nil - } - - return fmt.Errorf("Error deleting VPC %s: %s", d.Get("name").(string), err) - } - - return nil -} diff --git a/builtin/providers/cloudstack/resource_cloudstack_vpc_test.go b/builtin/providers/cloudstack/resource_cloudstack_vpc_test.go deleted file mode 100644 index 7c1d1492e..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_vpc_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package cloudstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func TestAccCloudStackVPC_basic(t *testing.T) { - var vpc cloudstack.VPC - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackVPCDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackVPC_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackVPCExists( - "cloudstack_vpc.foo", &vpc), - testAccCheckCloudStackVPCAttributes(&vpc), - resource.TestCheckResourceAttr( - "cloudstack_vpc.foo", "vpc_offering", CLOUDSTACK_VPC_OFFERING), - ), - }, - }, - }) -} - -func testAccCheckCloudStackVPCExists( - n string, vpc *cloudstack.VPC) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No VPC ID is set") - } - - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - v, _, err := cs.VPC.GetVPCByID(rs.Primary.ID) - - if err != nil { - return err - } - - if v.Id != rs.Primary.ID { - return fmt.Errorf("VPC not found") - } - - *vpc = *v - - return nil - } -} - -func testAccCheckCloudStackVPCAttributes( - vpc *cloudstack.VPC) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if vpc.Name != "terraform-vpc" { - return fmt.Errorf("Bad name: %s", vpc.Name) - } - - if vpc.Displaytext != "terraform-vpc-text" { - return fmt.Errorf("Bad display text: %s", vpc.Displaytext) - } - - if vpc.Cidr != CLOUDSTACK_VPC_CIDR_1 { - return fmt.Errorf("Bad VPC CIDR: %s", vpc.Cidr) - } - - if vpc.Networkdomain != "terraform-domain" { - return fmt.Errorf("Bad network domain: %s", vpc.Networkdomain) - } - - return nil - } -} - -func testAccCheckCloudStackVPCDestroy(s *terraform.State) error { - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_vpc" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No VPC ID is set") - } - - _, _, err := cs.VPC.GetVPCByID(rs.Primary.ID) - if err == nil { - return fmt.Errorf("VPC %s still exists", rs.Primary.ID) - } - } - - return nil -} - -var testAccCloudStackVPC_basic = fmt.Sprintf(` -resource "cloudstack_vpc" "foo" { - name = "terraform-vpc" - display_text = "terraform-vpc-text" - cidr = "%s" - vpc_offering = "%s" - network_domain = "terraform-domain" - zone = "%s" -}`, - CLOUDSTACK_VPC_CIDR_1, - CLOUDSTACK_VPC_OFFERING, - CLOUDSTACK_ZONE) diff --git a/builtin/providers/cloudstack/resource_cloudstack_vpn_connection.go b/builtin/providers/cloudstack/resource_cloudstack_vpn_connection.go deleted file mode 100644 index f84715f4c..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_vpn_connection.go +++ /dev/null @@ -1,95 +0,0 @@ -package cloudstack - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func resourceCloudStackVPNConnection() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudStackVPNConnectionCreate, - Read: resourceCloudStackVPNConnectionRead, - Delete: resourceCloudStackVPNConnectionDelete, - - Schema: map[string]*schema.Schema{ - "customer_gateway_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "vpn_gateway_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceCloudStackVPNConnectionCreate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Create a new parameter struct - p := cs.VPN.NewCreateVpnConnectionParams( - d.Get("customer_gateway_id").(string), - d.Get("vpn_gateway_id").(string), - ) - - // Create the new VPN Connection - v, err := cs.VPN.CreateVpnConnection(p) - if err != nil { - return fmt.Errorf("Error creating VPN Connection: %s", err) - } - - d.SetId(v.Id) - - return resourceCloudStackVPNConnectionRead(d, meta) -} - -func resourceCloudStackVPNConnectionRead(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Get the VPN Connection details - v, count, err := cs.VPN.GetVpnConnectionByID(d.Id()) - if err != nil { - if count == 0 { - log.Printf("[DEBUG] VPN Connection does no longer exist") - d.SetId("") - return nil - } - - return err - } - - d.Set("customer_gateway_id", v.S2scustomergatewayid) - d.Set("vpn_gateway_id", v.S2svpngatewayid) - - return nil -} - -func resourceCloudStackVPNConnectionDelete(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Create a new parameter struct - p := cs.VPN.NewDeleteVpnConnectionParams(d.Id()) - - // Delete the VPN Connection - _, err := cs.VPN.DeleteVpnConnection(p) - if err != nil { - // This is a very poor way to be told the ID does no longer exist :( - if strings.Contains(err.Error(), fmt.Sprintf( - "Invalid parameter id value=%s due to incorrect long value format, "+ - "or entity does not exist", d.Id())) { - return nil - } - - return fmt.Errorf("Error deleting VPN Connection: %s", err) - } - - return nil -} diff --git a/builtin/providers/cloudstack/resource_cloudstack_vpn_connection_test.go b/builtin/providers/cloudstack/resource_cloudstack_vpn_connection_test.go deleted file mode 100644 index 930866853..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_vpn_connection_test.go +++ /dev/null @@ -1,138 +0,0 @@ -package cloudstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func TestAccCloudStackVPNConnection_basic(t *testing.T) { - var vpnConnection cloudstack.VpnConnection - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackVPNConnectionDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackVPNConnection_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackVPNConnectionExists( - "cloudstack_vpn_connection.foo-bar", &vpnConnection), - testAccCheckCloudStackVPNConnectionExists( - "cloudstack_vpn_connection.bar-foo", &vpnConnection), - ), - }, - }, - }) -} - -func testAccCheckCloudStackVPNConnectionExists( - n string, vpnConnection *cloudstack.VpnConnection) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No VPN Connection ID is set") - } - - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - v, _, err := cs.VPN.GetVpnConnectionByID(rs.Primary.ID) - - if err != nil { - return err - } - - if v.Id != rs.Primary.ID { - return fmt.Errorf("VPN Connection not found") - } - - *vpnConnection = *v - - return nil - } -} - -func testAccCheckCloudStackVPNConnectionDestroy(s *terraform.State) error { - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_vpn_connection" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No VPN Connection ID is set") - } - - _, _, err := cs.VPN.GetVpnConnectionByID(rs.Primary.ID) - if err == nil { - return fmt.Errorf("VPN Connection %s still exists", rs.Primary.ID) - } - } - - return nil -} - -var testAccCloudStackVPNConnection_basic = fmt.Sprintf(` -resource "cloudstack_vpc" "foo" { - name = "terraform-vpc-foo" - cidr = "%s" - vpc_offering = "%s" - zone = "%s" -} - -resource "cloudstack_vpc" "bar" { - name = "terraform-vpc-bar" - cidr = "%s" - vpc_offering = "%s" - zone = "%s" -} - -resource "cloudstack_vpn_gateway" "foo" { - vpc_id = "${cloudstack_vpc.foo.id}" -} - -resource "cloudstack_vpn_gateway" "bar" { - vpc_id = "${cloudstack_vpc.bar.id}" -} - -resource "cloudstack_vpn_customer_gateway" "foo" { - name = "terraform-foo" - cidr = "${cloudstack_vpc.foo.cidr}" - esp_policy = "aes256-sha1" - gateway = "${cloudstack_vpn_gateway.foo.public_ip}" - ike_policy = "aes256-sha1" - ipsec_psk = "terraform" -} - -resource "cloudstack_vpn_customer_gateway" "bar" { - name = "terraform-bar" - cidr = "${cloudstack_vpc.bar.cidr}" - esp_policy = "aes256-sha1" - gateway = "${cloudstack_vpn_gateway.bar.public_ip}" - ike_policy = "aes256-sha1" - ipsec_psk = "terraform" -} - -resource "cloudstack_vpn_connection" "foo-bar" { - customer_gateway_id = "${cloudstack_vpn_customer_gateway.foo.id}" - vpn_gateway_id = "${cloudstack_vpn_gateway.bar.id}" -} - -resource "cloudstack_vpn_connection" "bar-foo" { - customer_gateway_id = "${cloudstack_vpn_customer_gateway.bar.id}" - vpn_gateway_id = "${cloudstack_vpn_gateway.foo.id}" -}`, - CLOUDSTACK_VPC_CIDR_1, - CLOUDSTACK_VPC_OFFERING, - CLOUDSTACK_ZONE, - CLOUDSTACK_VPC_CIDR_2, - CLOUDSTACK_VPC_OFFERING, - CLOUDSTACK_ZONE) diff --git a/builtin/providers/cloudstack/resource_cloudstack_vpn_customer_gateway.go b/builtin/providers/cloudstack/resource_cloudstack_vpn_customer_gateway.go deleted file mode 100644 index b049c0319..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_vpn_customer_gateway.go +++ /dev/null @@ -1,193 +0,0 @@ -package cloudstack - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func resourceCloudStackVPNCustomerGateway() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudStackVPNCustomerGatewayCreate, - Read: resourceCloudStackVPNCustomerGatewayRead, - Update: resourceCloudStackVPNCustomerGatewayUpdate, - Delete: resourceCloudStackVPNCustomerGatewayDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "cidr": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "esp_policy": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "gateway": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "ike_policy": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "ipsec_psk": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "dpd": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "esp_lifetime": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "ike_lifetime": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - }, - } -} - -func resourceCloudStackVPNCustomerGatewayCreate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Create a new parameter struct - p := cs.VPN.NewCreateVpnCustomerGatewayParams( - d.Get("cidr").(string), - d.Get("esp_policy").(string), - d.Get("gateway").(string), - d.Get("ike_policy").(string), - d.Get("ipsec_psk").(string), - ) - - p.SetName(d.Get("name").(string)) - - if dpd, ok := d.GetOk("dpd"); ok { - p.SetDpd(dpd.(bool)) - } - - if esplifetime, ok := d.GetOk("esp_lifetime"); ok { - p.SetEsplifetime(int64(esplifetime.(int))) - } - - if ikelifetime, ok := d.GetOk("ike_lifetime"); ok { - p.SetIkelifetime(int64(ikelifetime.(int))) - } - - // Create the new VPN Customer Gateway - v, err := cs.VPN.CreateVpnCustomerGateway(p) - if err != nil { - return fmt.Errorf("Error creating VPN Customer Gateway %s: %s", d.Get("name").(string), err) - } - - d.SetId(v.Id) - - return resourceCloudStackVPNCustomerGatewayRead(d, meta) -} - -func resourceCloudStackVPNCustomerGatewayRead(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Get the VPN Customer Gateway details - v, count, err := cs.VPN.GetVpnCustomerGatewayByID(d.Id()) - if err != nil { - if count == 0 { - log.Printf( - "[DEBUG] VPN Customer Gateway %s does no longer exist", d.Get("name").(string)) - d.SetId("") - return nil - } - - return err - } - - d.Set("name", v.Name) - d.Set("cidr", v.Cidrlist) - d.Set("esp_policy", v.Esppolicy) - d.Set("gateway", v.Gateway) - d.Set("ike_policy", v.Ikepolicy) - d.Set("ipsec_psk", v.Ipsecpsk) - d.Set("dpd", v.Dpd) - d.Set("esp_lifetime", int(v.Esplifetime)) - d.Set("ike_lifetime", int(v.Ikelifetime)) - - return nil -} - -func resourceCloudStackVPNCustomerGatewayUpdate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Create a new parameter struct - p := cs.VPN.NewUpdateVpnCustomerGatewayParams( - d.Get("cidr").(string), - d.Get("esp_policy").(string), - d.Get("gateway").(string), - d.Id(), - d.Get("ike_policy").(string), - d.Get("ipsec_psk").(string), - ) - - p.SetName(d.Get("name").(string)) - - if dpd, ok := d.GetOk("dpd"); ok { - p.SetDpd(dpd.(bool)) - } - - if esplifetime, ok := d.GetOk("esp_lifetime"); ok { - p.SetEsplifetime(int64(esplifetime.(int))) - } - - if ikelifetime, ok := d.GetOk("ike_lifetime"); ok { - p.SetIkelifetime(int64(ikelifetime.(int))) - } - - // Update the VPN Customer Gateway - _, err := cs.VPN.UpdateVpnCustomerGateway(p) - if err != nil { - return fmt.Errorf("Error updating VPN Customer Gateway %s: %s", d.Get("name").(string), err) - } - - return resourceCloudStackVPNCustomerGatewayRead(d, meta) -} - -func resourceCloudStackVPNCustomerGatewayDelete(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Create a new parameter struct - p := cs.VPN.NewDeleteVpnCustomerGatewayParams(d.Id()) - - // Delete the VPN Customer Gateway - _, err := cs.VPN.DeleteVpnCustomerGateway(p) - if err != nil { - // This is a very poor way to be told the ID does no longer exist :( - if strings.Contains(err.Error(), fmt.Sprintf( - "Invalid parameter id value=%s due to incorrect long value format, "+ - "or entity does not exist", d.Id())) { - return nil - } - - return fmt.Errorf("Error deleting VPN Customer Gateway %s: %s", d.Get("name").(string), err) - } - - return nil -} diff --git a/builtin/providers/cloudstack/resource_cloudstack_vpn_customer_gateway_test.go b/builtin/providers/cloudstack/resource_cloudstack_vpn_customer_gateway_test.go deleted file mode 100644 index acf181ace..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_vpn_customer_gateway_test.go +++ /dev/null @@ -1,267 +0,0 @@ -package cloudstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func TestAccCloudStackVPNCustomerGateway_basic(t *testing.T) { - var vpnCustomerGateway cloudstack.VpnCustomerGateway - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackVPNCustomerGatewayDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackVPNCustomerGateway_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackVPNCustomerGatewayExists( - "cloudstack_vpn_customer_gateway.foo", &vpnCustomerGateway), - testAccCheckCloudStackVPNCustomerGatewayAttributes(&vpnCustomerGateway), - resource.TestCheckResourceAttr( - "cloudstack_vpn_customer_gateway.foo", "name", "terraform-foo"), - resource.TestCheckResourceAttr( - "cloudstack_vpn_customer_gateway.bar", "name", "terraform-bar"), - resource.TestCheckResourceAttr( - "cloudstack_vpn_customer_gateway.foo", "ike_policy", "aes256-sha1"), - resource.TestCheckResourceAttr( - "cloudstack_vpn_customer_gateway.bar", "esp_policy", "aes256-sha1"), - ), - }, - }, - }) -} - -func TestAccCloudStackVPNCustomerGateway_update(t *testing.T) { - var vpnCustomerGateway cloudstack.VpnCustomerGateway - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackVPNCustomerGatewayDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackVPNCustomerGateway_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackVPNCustomerGatewayExists( - "cloudstack_vpn_customer_gateway.foo", &vpnCustomerGateway), - testAccCheckCloudStackVPNCustomerGatewayAttributes(&vpnCustomerGateway), - resource.TestCheckResourceAttr( - "cloudstack_vpn_customer_gateway.foo", "name", "terraform-foo"), - resource.TestCheckResourceAttr( - "cloudstack_vpn_customer_gateway.bar", "name", "terraform-bar"), - resource.TestCheckResourceAttr( - "cloudstack_vpn_customer_gateway.foo", "ike_policy", "aes256-sha1"), - resource.TestCheckResourceAttr( - "cloudstack_vpn_customer_gateway.bar", "esp_policy", "aes256-sha1"), - ), - }, - - resource.TestStep{ - Config: testAccCloudStackVPNCustomerGateway_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackVPNCustomerGatewayExists( - "cloudstack_vpn_customer_gateway.foo", &vpnCustomerGateway), - testAccCheckCloudStackVPNCustomerGatewayUpdatedAttributes(&vpnCustomerGateway), - resource.TestCheckResourceAttr( - "cloudstack_vpn_customer_gateway.foo", "name", "terraform-foo-bar"), - resource.TestCheckResourceAttr( - "cloudstack_vpn_customer_gateway.bar", "name", "terraform-bar-foo"), - resource.TestCheckResourceAttr( - "cloudstack_vpn_customer_gateway.foo", "ike_policy", "3des-md5"), - resource.TestCheckResourceAttr( - "cloudstack_vpn_customer_gateway.bar", "esp_policy", "3des-md5"), - ), - }, - }, - }) -} - -func testAccCheckCloudStackVPNCustomerGatewayExists( - n string, vpnCustomerGateway *cloudstack.VpnCustomerGateway) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No VPN CustomerGateway ID is set") - } - - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - v, _, err := cs.VPN.GetVpnCustomerGatewayByID(rs.Primary.ID) - - if err != nil { - return err - } - - if v.Id != rs.Primary.ID { - return fmt.Errorf("VPN CustomerGateway not found") - } - - *vpnCustomerGateway = *v - - return nil - } -} - -func testAccCheckCloudStackVPNCustomerGatewayAttributes( - vpnCustomerGateway *cloudstack.VpnCustomerGateway) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if vpnCustomerGateway.Esppolicy != "aes256-sha1" { - return fmt.Errorf("Bad ESP policy: %s", vpnCustomerGateway.Esppolicy) - } - - if vpnCustomerGateway.Ikepolicy != "aes256-sha1" { - return fmt.Errorf("Bad IKE policy: %s", vpnCustomerGateway.Ikepolicy) - } - - if vpnCustomerGateway.Ipsecpsk != "terraform" { - return fmt.Errorf("Bad IPSEC pre-shared key: %s", vpnCustomerGateway.Ipsecpsk) - } - - return nil - } -} - -func testAccCheckCloudStackVPNCustomerGatewayUpdatedAttributes( - vpnCustomerGateway *cloudstack.VpnCustomerGateway) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if vpnCustomerGateway.Esppolicy != "3des-md5" { - return fmt.Errorf("Bad ESP policy: %s", vpnCustomerGateway.Esppolicy) - } - - if vpnCustomerGateway.Ikepolicy != "3des-md5" { - return fmt.Errorf("Bad IKE policy: %s", vpnCustomerGateway.Ikepolicy) - } - - if vpnCustomerGateway.Ipsecpsk != "terraform" { - return fmt.Errorf("Bad IPSEC pre-shared key: %s", vpnCustomerGateway.Ipsecpsk) - } - - return nil - } -} - -func testAccCheckCloudStackVPNCustomerGatewayDestroy(s *terraform.State) error { - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_vpn_customer_gateway" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No VPN Customer Gateway ID is set") - } - - _, _, err := cs.VPN.GetVpnCustomerGatewayByID(rs.Primary.ID) - if err == nil { - return fmt.Errorf("VPN Customer Gateway %s still exists", rs.Primary.ID) - } - } - - return nil -} - -var testAccCloudStackVPNCustomerGateway_basic = fmt.Sprintf(` -resource "cloudstack_vpc" "foo" { - name = "terraform-vpc-foo" - cidr = "%s" - vpc_offering = "%s" - zone = "%s" -} - -resource "cloudstack_vpc" "bar" { - name = "terraform-vpc-bar" - cidr = "%s" - vpc_offering = "%s" - zone = "%s" -} - -resource "cloudstack_vpn_gateway" "foo" { - vpc_id = "${cloudstack_vpc.foo.id}" -} - -resource "cloudstack_vpn_gateway" "bar" { - vpc_id = "${cloudstack_vpc.bar.id}" -} - -resource "cloudstack_vpn_customer_gateway" "foo" { - name = "terraform-foo" - cidr = "${cloudstack_vpc.foo.cidr}" - esp_policy = "aes256-sha1" - gateway = "${cloudstack_vpn_gateway.foo.public_ip}" - ike_policy = "aes256-sha1" - ipsec_psk = "terraform" -} - -resource "cloudstack_vpn_customer_gateway" "bar" { - name = "terraform-bar" - cidr = "${cloudstack_vpc.bar.cidr}" - esp_policy = "aes256-sha1" - gateway = "${cloudstack_vpn_gateway.bar.public_ip}" - ike_policy = "aes256-sha1" - ipsec_psk = "terraform" -}`, - CLOUDSTACK_VPC_CIDR_1, - CLOUDSTACK_VPC_OFFERING, - CLOUDSTACK_ZONE, - CLOUDSTACK_VPC_CIDR_2, - CLOUDSTACK_VPC_OFFERING, - CLOUDSTACK_ZONE) - -var testAccCloudStackVPNCustomerGateway_update = fmt.Sprintf(` -resource "cloudstack_vpc" "foo" { - name = "terraform-vpc-foo" - cidr = "%s" - vpc_offering = "%s" - zone = "%s" -} - -resource "cloudstack_vpc" "bar" { - name = "terraform-vpc-bar" - cidr = "%s" - vpc_offering = "%s" - zone = "%s" -} - -resource "cloudstack_vpn_gateway" "foo" { - vpc_id = "${cloudstack_vpc.foo.id}" -} - -resource "cloudstack_vpn_gateway" "bar" { - vpc_id = "${cloudstack_vpc.bar.id}" -} - -resource "cloudstack_vpn_customer_gateway" "foo" { - name = "terraform-foo-bar" - cidr = "${cloudstack_vpc.foo.cidr}" - esp_policy = "3des-md5" - gateway = "${cloudstack_vpn_gateway.foo.public_ip}" - ike_policy = "3des-md5" - ipsec_psk = "terraform" -} - -resource "cloudstack_vpn_customer_gateway" "bar" { - name = "terraform-bar-foo" - cidr = "${cloudstack_vpc.bar.cidr}" - esp_policy = "3des-md5" - gateway = "${cloudstack_vpn_gateway.bar.public_ip}" - ike_policy = "3des-md5" - ipsec_psk = "terraform" -}`, - CLOUDSTACK_VPC_CIDR_1, - CLOUDSTACK_VPC_OFFERING, - CLOUDSTACK_ZONE, - CLOUDSTACK_VPC_CIDR_2, - CLOUDSTACK_VPC_OFFERING, - CLOUDSTACK_ZONE) diff --git a/builtin/providers/cloudstack/resource_cloudstack_vpn_gateway.go b/builtin/providers/cloudstack/resource_cloudstack_vpn_gateway.go deleted file mode 100644 index c1d28f70d..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_vpn_gateway.go +++ /dev/null @@ -1,92 +0,0 @@ -package cloudstack - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func resourceCloudStackVPNGateway() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudStackVPNGatewayCreate, - Read: resourceCloudStackVPNGatewayRead, - Delete: resourceCloudStackVPNGatewayDelete, - - Schema: map[string]*schema.Schema{ - "vpc_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "public_ip": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceCloudStackVPNGatewayCreate(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - vpcid := d.Get("vpc_id").(string) - p := cs.VPN.NewCreateVpnGatewayParams(vpcid) - - // Create the new VPN Gateway - v, err := cs.VPN.CreateVpnGateway(p) - if err != nil { - return fmt.Errorf("Error creating VPN Gateway for VPC ID %s: %s", vpcid, err) - } - - d.SetId(v.Id) - - return resourceCloudStackVPNGatewayRead(d, meta) -} - -func resourceCloudStackVPNGatewayRead(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Get the VPN Gateway details - v, count, err := cs.VPN.GetVpnGatewayByID(d.Id()) - if err != nil { - if count == 0 { - log.Printf( - "[DEBUG] VPN Gateway for VPC ID %s does no longer exist", d.Get("vpc_id").(string)) - d.SetId("") - return nil - } - - return err - } - - d.Set("vpc_id", v.Vpcid) - d.Set("public_ip", v.Publicip) - - return nil -} - -func resourceCloudStackVPNGatewayDelete(d *schema.ResourceData, meta interface{}) error { - cs := meta.(*cloudstack.CloudStackClient) - - // Create a new parameter struct - p := cs.VPN.NewDeleteVpnGatewayParams(d.Id()) - - // Delete the VPN Gateway - _, err := cs.VPN.DeleteVpnGateway(p) - if err != nil { - // This is a very poor way to be told the ID does no longer exist :( - if strings.Contains(err.Error(), fmt.Sprintf( - "Invalid parameter id value=%s due to incorrect long value format, "+ - "or entity does not exist", d.Id())) { - return nil - } - - return fmt.Errorf("Error deleting VPN Gateway for VPC %s: %s", d.Get("vpc_id").(string), err) - } - - return nil -} diff --git a/builtin/providers/cloudstack/resource_cloudstack_vpn_gateway_test.go b/builtin/providers/cloudstack/resource_cloudstack_vpn_gateway_test.go deleted file mode 100644 index 862daefe9..000000000 --- a/builtin/providers/cloudstack/resource_cloudstack_vpn_gateway_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package cloudstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -func TestAccCloudStackVPNGateway_basic(t *testing.T) { - var vpnGateway cloudstack.VpnGateway - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudStackVPNGatewayDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCloudStackVPNGateway_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStackVPNGatewayExists( - "cloudstack_vpn_gateway.foo", &vpnGateway), - ), - }, - }, - }) -} - -func testAccCheckCloudStackVPNGatewayExists( - n string, vpnGateway *cloudstack.VpnGateway) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No VPN Gateway ID is set") - } - - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - v, _, err := cs.VPN.GetVpnGatewayByID(rs.Primary.ID) - - if err != nil { - return err - } - - if v.Id != rs.Primary.ID { - return fmt.Errorf("VPN Gateway not found") - } - - *vpnGateway = *v - - return nil - } -} - -func testAccCheckCloudStackVPNGatewayDestroy(s *terraform.State) error { - cs := testAccProvider.Meta().(*cloudstack.CloudStackClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "cloudstack_vpn_gateway" { - continue - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No VPN Gateway ID is set") - } - - _, _, err := cs.VPN.GetVpnGatewayByID(rs.Primary.ID) - if err == nil { - return fmt.Errorf("VPN Gateway %s still exists", rs.Primary.ID) - } - } - - return nil -} - -var testAccCloudStackVPNGateway_basic = fmt.Sprintf(` -resource "cloudstack_vpc" "foo" { - name = "terraform-vpc" - display_text = "terraform-vpc-text" - cidr = "%s" - vpc_offering = "%s" - zone = "%s" -} - -resource "cloudstack_vpn_gateway" "foo" { - vpc_id = "${cloudstack_vpc.foo.id}" -}`, - CLOUDSTACK_VPC_CIDR_1, - CLOUDSTACK_VPC_OFFERING, - CLOUDSTACK_ZONE) diff --git a/builtin/providers/cloudstack/resources.go b/builtin/providers/cloudstack/resources.go deleted file mode 100644 index 56174de39..000000000 --- a/builtin/providers/cloudstack/resources.go +++ /dev/null @@ -1,137 +0,0 @@ -package cloudstack - -import ( - "fmt" - "log" - "regexp" - "time" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -// Define a regexp for parsing the port -var splitPorts = regexp.MustCompile(`^(\d+)(?:-(\d+))?$`) - -type retrieveError struct { - name string - value string - err error -} - -func (e *retrieveError) Error() error { - return fmt.Errorf("Error retrieving ID of %s %s: %s", e.name, e.value, e.err) -} - -func setValueOrID(d *schema.ResourceData, key string, value string, id string) { - if cloudstack.IsID(d.Get(key).(string)) { - // If the given id is an empty string, check if the configured value matches - // the UnlimitedResourceID in which case we set id to UnlimitedResourceID - if id == "" && d.Get(key).(string) == cloudstack.UnlimitedResourceID { - id = cloudstack.UnlimitedResourceID - } - - d.Set(key, id) - } else { - d.Set(key, value) - } -} - -func retrieveID(cs *cloudstack.CloudStackClient, name string, value string, opts ...cloudstack.OptionFunc) (id string, e *retrieveError) { - // If the supplied value isn't a ID, try to retrieve the ID ourselves - if cloudstack.IsID(value) { - return value, nil - } - - log.Printf("[DEBUG] Retrieving ID of %s: %s", name, value) - - // Ignore counts, since an error is returned if there is no exact match - var err error - switch name { - case "disk_offering": - id, _, err = cs.DiskOffering.GetDiskOfferingID(value) - case "service_offering": - id, _, err = cs.ServiceOffering.GetServiceOfferingID(value) - case "network_offering": - id, _, err = cs.NetworkOffering.GetNetworkOfferingID(value) - case "project": - id, _, err = cs.Project.GetProjectID(value) - case "vpc_offering": - id, _, err = cs.VPC.GetVPCOfferingID(value) - case "zone": - id, _, err = cs.Zone.GetZoneID(value) - case "os_type": - p := cs.GuestOS.NewListOsTypesParams() - p.SetDescription(value) - l, e := cs.GuestOS.ListOsTypes(p) - if e != nil { - err = e - break - } - if l.Count == 1 { - id = l.OsTypes[0].Id - break - } - err = fmt.Errorf("Could not find ID of OS Type: %s", value) - default: - return id, &retrieveError{name: name, value: value, - err: fmt.Errorf("Unknown request: %s", name)} - } - - if err != nil { - return id, &retrieveError{name: name, value: value, err: err} - } - - return id, nil -} - -func retrieveTemplateID(cs *cloudstack.CloudStackClient, zoneid, value string) (id string, e *retrieveError) { - // If the supplied value isn't a ID, try to retrieve the ID ourselves - if cloudstack.IsID(value) { - return value, nil - } - - log.Printf("[DEBUG] Retrieving ID of template: %s", value) - - // Ignore count, since an error is returned if there is no exact match - id, _, err := cs.Template.GetTemplateID(value, "executable", zoneid) - if err != nil { - return id, &retrieveError{name: "template", value: value, err: err} - } - - return id, nil -} - -// RetryFunc is the function retried n times -type RetryFunc func() (interface{}, error) - -// Retry is a wrapper around a RetryFunc that will retry a function -// n times or until it succeeds. -func Retry(n int, f RetryFunc) (interface{}, error) { - var lastErr error - - for i := 0; i < n; i++ { - r, err := f() - if err == nil || err == cloudstack.AsyncTimeoutErr { - return r, err - } - - lastErr = err - time.Sleep(30 * time.Second) - } - - return nil, lastErr -} - -// If there is a project supplied, we retrieve and set the project id -func setProjectid(p cloudstack.ProjectIDSetter, cs *cloudstack.CloudStackClient, d *schema.ResourceData) error { - if project, ok := d.GetOk("project"); ok { - projectid, e := retrieveID(cs, "project", project.(string)) - if e != nil { - return e.Error() - } - p.SetProjectid(projectid) - } - - return nil -} diff --git a/builtin/providers/cloudstack/tags.go b/builtin/providers/cloudstack/tags.go deleted file mode 100644 index 389cdb47f..000000000 --- a/builtin/providers/cloudstack/tags.go +++ /dev/null @@ -1,77 +0,0 @@ -package cloudstack - -import ( - "log" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/xanzy/go-cloudstack/cloudstack" -) - -// tagsSchema returns the schema to use for tags -func tagsSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Computed: true, - } -} - -// setTags is a helper to set the tags for a resource. It expects the -// tags field to be named "tags" -func setTags(cs *cloudstack.CloudStackClient, d *schema.ResourceData, resourcetype string) error { - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - - remove, create := diffTags(tagsFromSchema(o), tagsFromSchema(n)) - log.Printf("[DEBUG] tags to remove: %v", remove) - log.Printf("[DEBUG] tags to create: %v", create) - - // First remove any obsolete tags - if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags: %v from %s", remove, d.Id()) - p := cs.Resourcetags.NewDeleteTagsParams([]string{d.Id()}, resourcetype) - p.SetTags(remove) - _, err := cs.Resourcetags.DeleteTags(p) - if err != nil { - return err - } - } - - // Then add any new tags - if len(create) > 0 { - log.Printf("[DEBUG] Creating tags: %v for %s", create, d.Id()) - p := cs.Resourcetags.NewCreateTagsParams([]string{d.Id()}, resourcetype, create) - _, err := cs.Resourcetags.CreateTags(p) - if err != nil { - return err - } - } - - return nil -} - -// diffTags takes the old and the new tag sets and returns the difference of -// both. The remaining tags are those that need to be removed and created -func diffTags(oldTags, newTags map[string]string) (map[string]string, map[string]string) { - for k, old := range oldTags { - new, ok := newTags[k] - if ok && old == new { - // We should avoid removing or creating tags we already have - delete(oldTags, k) - delete(newTags, k) - } - } - - return oldTags, newTags -} - -// tagsFromSchema takes the raw schema tags and returns them as a -// properly asserted map[string]string -func tagsFromSchema(m map[string]interface{}) map[string]string { - result := make(map[string]string, len(m)) - for k, v := range m { - result[k] = v.(string) - } - return result -} diff --git a/builtin/providers/cloudstack/tags_test.go b/builtin/providers/cloudstack/tags_test.go deleted file mode 100644 index fba9cadd7..000000000 --- a/builtin/providers/cloudstack/tags_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package cloudstack - -import ( - "fmt" - "reflect" - "testing" -) - -func TestDiffTags(t *testing.T) { - cases := []struct { - Old, New map[string]interface{} - Create, Remove map[string]string - }{ - // Basic add/remove - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "bar": "baz", - }, - Create: map[string]string{ - "bar": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - - // Modify - { - Old: map[string]interface{}{ - "foo": "bar", - }, - New: map[string]interface{}{ - "foo": "baz", - }, - Create: map[string]string{ - "foo": "baz", - }, - Remove: map[string]string{ - "foo": "bar", - }, - }, - } - - for i, tc := range cases { - r, c := diffTags(tagsFromSchema(tc.Old), tagsFromSchema(tc.New)) - if !reflect.DeepEqual(r, tc.Remove) { - t.Fatalf("%d: bad remove: %#v", i, r) - } - if !reflect.DeepEqual(c, tc.Create) { - t.Fatalf("%d: bad create: %#v", i, c) - } - } -} - -// testAccCheckTags can be used to check the tags on a resource. -func testAccCheckTags(tags map[string]string, key string, value string) error { - v, ok := tags[key] - if !ok { - return fmt.Errorf("Missing tag: %s", key) - } - - if v != value { - return fmt.Errorf("%s: bad value: %s", key, v) - } - - return nil -} diff --git a/builtin/providers/cobbler/acceptance_env/deploy.sh b/builtin/providers/cobbler/acceptance_env/deploy.sh deleted file mode 100644 index 59563d110..000000000 --- a/builtin/providers/cobbler/acceptance_env/deploy.sh +++ /dev/null @@ -1,94 +0,0 @@ -#!/bin/bash - -set -e - -# This script assumes Ubuntu 14.04 is being used. -# It will create a standard Cobbler environment that can be used for acceptance testing. - -# With this enviornment spun up, the config should be: -# COBBLER_URL=http://127.0.0.1:25151 -# COBBLER_USERNAME=cobbler -# COBBLER_PASSWORD=cobbler - -sudo apt-get update -sudo apt-get install -y build-essential git mercurial - -cd -echo 'export PATH=$PATH:$HOME/terraform:$HOME/go/bin' >> ~/.bashrc -export PATH=$PATH:$HOME/terraform:$HOME/go/bin - -sudo wget -O /usr/local/bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme -sudo chmod +x /usr/local/bin/gimme -/usr/local/bin/gimme 1.6 >> ~/.bashrc -eval "$(/usr/local/bin/gimme 1.6)" - -mkdir ~/go -echo 'export GOPATH=$HOME/go' >> ~/.bashrc -echo 'export GO15VENDOREXPERIMENT=1' >> ~/.bashrc -export GOPATH=$HOME/go -source ~/.bashrc - -go get github.com/tools/godep -go get github.com/hashicorp/terraform -cd $GOPATH/src/github.com/hashicorp/terraform -godep restore - -# Cobbler -sudo apt-get install -y cobbler cobbler-web debmirror dnsmasq - -sudo tee /etc/cobbler/modules.conf <&1 env \ - make -C ../../.. testacc TEST=./builtin/providers/consul | tee test.log diff --git a/builtin/providers/consul/config.go b/builtin/providers/consul/config.go deleted file mode 100644 index 99897505d..000000000 --- a/builtin/providers/consul/config.go +++ /dev/null @@ -1,70 +0,0 @@ -package consul - -import ( - "log" - "net/http" - "strings" - - consulapi "github.com/hashicorp/consul/api" -) - -type Config struct { - Datacenter string `mapstructure:"datacenter"` - Address string `mapstructure:"address"` - Scheme string `mapstructure:"scheme"` - HttpAuth string `mapstructure:"http_auth"` - Token string `mapstructure:"token"` - CAFile string `mapstructure:"ca_file"` - CertFile string `mapstructure:"cert_file"` - KeyFile string `mapstructure:"key_file"` -} - -// Client() returns a new client for accessing consul. -// -func (c *Config) Client() (*consulapi.Client, error) { - config := consulapi.DefaultConfig() - if c.Datacenter != "" { - config.Datacenter = c.Datacenter - } - if c.Address != "" { - config.Address = c.Address - } - if c.Scheme != "" { - config.Scheme = c.Scheme - } - - tlsConfig := &consulapi.TLSConfig{} - tlsConfig.CAFile = c.CAFile - tlsConfig.CertFile = c.CertFile - tlsConfig.KeyFile = c.KeyFile - cc, err := consulapi.SetupTLSConfig(tlsConfig) - if err != nil { - return nil, err - } - config.HttpClient.Transport.(*http.Transport).TLSClientConfig = cc - - if c.HttpAuth != "" { - var username, password string - if strings.Contains(c.HttpAuth, ":") { - split := strings.SplitN(c.HttpAuth, ":", 2) - username = split[0] - password = split[1] - } else { - username = c.HttpAuth - } - config.HttpAuth = &consulapi.HttpBasicAuth{Username: username, Password: password} - } - - if c.Token != "" { - config.Token = c.Token - } - - client, err := consulapi.NewClient(config) - - log.Printf("[INFO] Consul Client configured with address: '%s', scheme: '%s', datacenter: '%s'", - config.Address, config.Scheme, config.Datacenter) - if err != nil { - return nil, err - } - return client, nil -} diff --git a/builtin/providers/consul/data_source_consul_agent_self.go b/builtin/providers/consul/data_source_consul_agent_self.go deleted file mode 100644 index 17beaa626..000000000 --- a/builtin/providers/consul/data_source_consul_agent_self.go +++ /dev/null @@ -1,1350 +0,0 @@ -package consul - -import ( - "fmt" - "strconv" - "time" - - consulapi "github.com/hashicorp/consul/api" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -const ( - agentSelfACLDatacenter = "acl_datacenter" - agentSelfACLDefaultPolicy = "acl_default_policy" - agentSelfACLDisabledTTL = "acl_disabled_ttl" - agentSelfACLDownPolicy = "acl_down_policy" - agentSelfACLEnforceVersion8 = "acl_enforce_0_8_semantics" - agentSelfACLTTL = "acl_ttl" - agentSelfAddresses = "addresses" - agentSelfAdvertiseAddr = "advertise_addr" - agentSelfAdvertiseAddrWAN = "advertise_addr_wan" - agentSelfAdvertiseAddrs = "advertise_addrs" - agentSelfAtlasJoin = "atlas_join" - agentSelfBindAddr = "bind_addr" - agentSelfBootstrapExpect = "bootstrap_expect" - agentSelfBootstrapMode = "bootstrap_mode" - agentSelfCheckDeregisterIntervalMin = "check_deregister_interval_min" - agentSelfCheckReapInterval = "check_reap_interval" - agentSelfCheckUpdateInterval = "check_update_interval" - agentSelfClientAddr = "client_addr" - agentSelfDNSConfig = "dns" - agentSelfDNSRecursors = "dns_recursors" - agentSelfDataDir = "data_dir" - agentSelfDatacenter = "datacenter" - agentSelfDevMode = "dev_mode" - agentSelfDomain = "domain" - agentSelfEnableAnonymousSignature = "enable_anonymous_signature" - agentSelfEnableCoordinates = "enable_coordinates" - agentSelfEnableDebug = "enable_debug" - agentSelfEnableRemoteExec = "enable_remote_exec" - agentSelfEnableSyslog = "enable_syslog" - agentSelfEnableUI = "enable_ui" - agentSelfEnableUpdateCheck = "enable_update_check" - agentSelfID = "id" - agentSelfLeaveOnInt = "leave_on_int" - agentSelfLeaveOnTerm = "leave_on_term" - agentSelfLogLevel = "log_level" - agentSelfName = "name" - agentSelfPerformance = "performance" - agentSelfPidFile = "pid_file" - agentSelfPorts = "ports" - agentSelfProtocol = "protocol_version" - agentSelfReconnectTimeoutLAN = "reconnect_timeout_lan" - agentSelfReconnectTimeoutWAN = "reconnect_timeout_wan" - agentSelfRejoinAfterLeave = "rejoin_after_leave" - agentSelfRetryJoin = "retry_join" - agentSelfRetryJoinEC2 = "retry_join_ec2" - agentSelfRetryJoinGCE = "retry_join_gce" - agentSelfRetryJoinWAN = "retry_join_wan" - agentSelfRetryMaxAttempts = "retry_max_attempts" - agentSelfRetryMaxAttemptsWAN = "retry_max_attempts_wan" - agentSelfSerfLANBindAddr = "serf_lan_bind_addr" - agentSelfSerfWANBindAddr = "serf_wan_bind_addr" - agentSelfServerMode = "server_mode" - agentSelfServerName = "server_name" - agentSelfSessionTTLMin = "session_ttl_min" - agentSelfStartJoin = "start_join" - agentSelfStartJoinWAN = "start_join_wan" - agentSelfSyslogFacility = "syslog_facility" - agentSelfTLSCAFile = "tls_ca_file" - agentSelfTLSCertFile = "tls_cert_file" - agentSelfTLSKeyFile = "tls_key_file" - agentSelfTLSMinVersion = "tls_min_version" - agentSelfTLSVerifyIncoming = "tls_verify_incoming" - agentSelfTLSVerifyOutgoing = "tls_verify_outgoing" - agentSelfTLSVerifyServerHostname = "tls_verify_server_hostname" - agentSelfTaggedAddresses = "tagged_addresses" - agentSelfTelemetry = "telemetry" - agentSelfTranslateWANAddrs = "translate_wan_addrs" - agentSelfUIDir = "ui_dir" - agentSelfUnixSockets = "unix_sockets" - agentSelfVersion = "version" - agentSelfVersionPrerelease = "version_prerelease" - agentSelfVersionRevision = "version_revision" -) - -const ( - agentSelfRetryJoinAWSRegion = "region" - agentSelfRetryJoinAWSTagKey = "tag_key" - agentSelfRetryJoinAWSTagValue = "tag_value" -) - -const ( - agentSelfRetryJoinGCECredentialsFile = "credentials_file" - agentSelfRetryJoinGCEProjectName = "project_name" - agentSelfRetryJoinGCETagValue = "tag_value" - agentSelfRetryJoinGCEZonePattern = "zone_pattern" -) - -const ( - agentSelfDNSAllowStale = "allow_stale" - agentSelfDNSEnableCompression = "enable_compression" - agentSelfDNSEnableTruncate = "enable_truncate" - agentSelfDNSMaxStale = "max_stale" - agentSelfDNSNodeTTL = "node_ttl" - agentSelfDNSOnlyPassing = "only_passing" - agentSelfDNSRecursorTimeout = "recursor_timeout" - agentSelfDNSServiceTTL = "service_ttl" - agentSelfDNSUDPAnswerLimit = "udp_answer_limit" -) - -const ( - agentSelfPerformanceRaftMultiplier = "raft_multiplier" -) - -const ( - agentSelfAPIPortsDNS = "dns" - agentSelfAPIPortsHTTP = "http" - agentSelfAPIPortsHTTPS = "https" - agentSelfAPIPortsRPC = "rpc" - agentSelfAPIPortsSerfLAN = "serf_lan" - agentSelfAPIPortsSerfWAN = "serf_wan" - agentSelfAPIPortsServer = "server" - - agentSelfSchemaPortsDNS = "dns" - agentSelfSchemaPortsHTTP = "http" - agentSelfSchemaPortsHTTPS = "https" - agentSelfSchemaPortsRPC = "rpc" - agentSelfSchemaPortsSerfLAN = "serf_lan" - agentSelfSchemaPortsSerfWAN = "serf_wan" - agentSelfSchemaPortsServer = "server" -) - -const ( - agentSelfTaggedAddressesLAN = "lan" - agentSelfTaggedAddressesWAN = "wan" -) - -const ( - agentSelfTelemetryCirconusAPIApp = "circonus_api_app" - agentSelfTelemetryCirconusAPIToken = "circonus_api_token" - agentSelfTelemetryCirconusAPIURL = "circonus_api_url" - agentSelfTelemetryCirconusBrokerID = "circonus_broker_id" - agentSelfTelemetryCirconusBrokerSelectTag = "circonus_select_tag" - agentSelfTelemetryCirconusCheckDisplayName = "circonus_display_name" - agentSelfTelemetryCirconusCheckForceMetricActiation = "circonus_force_metric_activation" - agentSelfTelemetryCirconusCheckID = "circonus_check_id" - agentSelfTelemetryCirconusCheckInstanceID = "circonus_instance_id" - agentSelfTelemetryCirconusCheckSearchTag = "circonus_search_tag" - agentSelfTelemetryCirconusCheckSubmissionURL = "circonus_submission_url" - agentSelfTelemetryCirconusCheckTags = "circonus_check_tags" - agentSelfTelemetryCirconusSubmissionInterval = "circonus_submission_interval" - - agentSelfTelemetryDogStatsdAddr = "dogstatsd_addr" - agentSelfTelemetryDogStatsdTags = "dogstatsd_tags" - agentSelfTelemetryEnableHostname = "enable_hostname" - agentSelfTelemetryStatsdAddr = "statsd_addr" - agentSelfTelemetryStatsiteAddr = "statsite_addr" - agentSelfTelemetryStatsitePrefix = "statsite_prefix" -) - -const ( - agentSelfUnixSocketGroup = "group" - agentSelfUnixSocketMode = "mode" - agentSelfUnixSocketUser = "user" -) - -func dataSourceConsulAgentSelf() *schema.Resource { - return &schema.Resource{ - Read: dataSourceConsulAgentSelfRead, - Schema: map[string]*schema.Schema{ - agentSelfACLDatacenter: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfACLDefaultPolicy: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfACLDisabledTTL: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfACLDownPolicy: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfACLEnforceVersion8: { - Computed: true, - Type: schema.TypeBool, - }, - agentSelfACLTTL: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfAddresses: { - Computed: true, - Type: schema.TypeMap, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - agentSelfSchemaPortsDNS: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfSchemaPortsHTTP: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfSchemaPortsHTTPS: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfSchemaPortsRPC: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - agentSelfAdvertiseAddr: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfAdvertiseAddrs: { - Computed: true, - Type: schema.TypeMap, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - agentSelfSchemaPortsSerfLAN: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfSchemaPortsSerfWAN: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfSchemaPortsRPC: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - agentSelfAdvertiseAddrWAN: { - Computed: true, - Type: schema.TypeString, - }, - // Omitting the following since they've been depreciated: - // - // "AtlasInfrastructure": "", - // "AtlasEndpoint": "", - agentSelfAtlasJoin: { - Computed: true, - Type: schema.TypeBool, - }, - agentSelfBindAddr: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfBootstrapMode: { - Computed: true, - Type: schema.TypeBool, - }, - agentSelfBootstrapExpect: { - Computed: true, - Type: schema.TypeInt, - }, - agentSelfCheckDeregisterIntervalMin: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfCheckReapInterval: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfCheckUpdateInterval: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfClientAddr: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfDNSConfig: { - Computed: true, - Type: schema.TypeMap, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - agentSelfDNSAllowStale: { - Computed: true, - Type: schema.TypeBool, - }, - agentSelfDNSEnableCompression: { - Computed: true, - Type: schema.TypeBool, - }, - agentSelfDNSEnableTruncate: { - Computed: true, - Type: schema.TypeBool, - }, - agentSelfDNSMaxStale: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfDNSNodeTTL: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfDNSOnlyPassing: { - Computed: true, - Type: schema.TypeBool, - }, - agentSelfDNSRecursorTimeout: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfDNSServiceTTL: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfDNSUDPAnswerLimit: { - Computed: true, - Type: schema.TypeInt, - }, - }, - }, - }, - agentSelfDataDir: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfDatacenter: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfDevMode: { - Computed: true, - Type: schema.TypeBool, - }, - agentSelfEnableAnonymousSignature: { - Computed: true, - Type: schema.TypeBool, - }, - agentSelfEnableCoordinates: { - Computed: true, - Type: schema.TypeBool, - }, - agentSelfEnableRemoteExec: { - Computed: true, - Type: schema.TypeBool, - }, - agentSelfEnableUpdateCheck: { - Computed: true, - Type: schema.TypeBool, - }, - agentSelfDNSRecursors: { - Computed: true, - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - agentSelfDomain: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfEnableDebug: { - Computed: true, - Type: schema.TypeBool, - }, - agentSelfEnableSyslog: { - Computed: true, - Type: schema.TypeBool, - }, - agentSelfEnableUI: { - Computed: true, - Type: schema.TypeBool, - }, - // "HTTPAPIResponseHeaders": nil, // TODO(sean@) - agentSelfID: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfLeaveOnInt: { - Computed: true, - Type: schema.TypeBool, - }, - agentSelfLeaveOnTerm: { - Computed: true, - Type: schema.TypeBool, - }, - agentSelfLogLevel: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfName: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfPerformance: { - Computed: true, - Type: schema.TypeMap, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - agentSelfPerformanceRaftMultiplier: { - Computed: true, - Type: schema.TypeString, // FIXME(sean@): should be schema.TypeInt - }, - }, - }, - }, - agentSelfPidFile: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfPorts: { - Computed: true, - Type: schema.TypeMap, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - agentSelfSchemaPortsDNS: { - Computed: true, - Type: schema.TypeInt, - }, - agentSelfSchemaPortsHTTP: { - Computed: true, - Type: schema.TypeInt, - }, - agentSelfSchemaPortsHTTPS: { - Computed: true, - Type: schema.TypeInt, - }, - agentSelfSchemaPortsRPC: { - Computed: true, - Type: schema.TypeInt, - }, - agentSelfSchemaPortsSerfLAN: { - Computed: true, - Type: schema.TypeInt, - }, - agentSelfSchemaPortsSerfWAN: { - Computed: true, - Type: schema.TypeInt, - }, - agentSelfSchemaPortsServer: { - Computed: true, - Type: schema.TypeInt, - }, - }, - }, - }, - agentSelfProtocol: { - Computed: true, - Type: schema.TypeInt, - }, - agentSelfReconnectTimeoutLAN: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfReconnectTimeoutWAN: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfRejoinAfterLeave: { - Computed: true, - Type: schema.TypeBool, - }, - agentSelfRetryJoin: { - Computed: true, - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - agentSelfRetryJoinWAN: { - Computed: true, - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - agentSelfRetryMaxAttempts: { - Computed: true, - Type: schema.TypeInt, - }, - agentSelfRetryMaxAttemptsWAN: { - Computed: true, - Type: schema.TypeInt, - }, - agentSelfRetryJoinEC2: { - Computed: true, - Type: schema.TypeMap, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - agentSelfRetryJoinAWSRegion: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfRetryJoinAWSTagKey: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfRetryJoinAWSTagValue: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - agentSelfRetryJoinGCE: { - Computed: true, - Type: schema.TypeMap, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - agentSelfRetryJoinGCEProjectName: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfRetryJoinGCEZonePattern: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfRetryJoinGCETagValue: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfRetryJoinGCECredentialsFile: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - agentSelfSerfLANBindAddr: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfSerfWANBindAddr: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfServerMode: { - Computed: true, - Type: schema.TypeBool, - }, - agentSelfServerName: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfSessionTTLMin: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfStartJoin: { - Computed: true, - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - agentSelfStartJoinWAN: { - Computed: true, - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - agentSelfSyslogFacility: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfTaggedAddresses: { - Computed: true, - Type: schema.TypeMap, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - agentSelfTaggedAddressesLAN: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfTaggedAddressesWAN: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - agentSelfTelemetry: { - Computed: true, - Type: schema.TypeMap, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - agentSelfTelemetryCirconusAPIApp: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfTelemetryCirconusAPIToken: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfTelemetryCirconusAPIURL: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfTelemetryCirconusBrokerID: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfTelemetryCirconusBrokerSelectTag: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfTelemetryCirconusCheckDisplayName: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfTelemetryCirconusCheckID: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfTelemetryCirconusCheckInstanceID: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfTelemetryCirconusCheckSearchTag: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfTelemetryCirconusCheckSubmissionURL: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfTelemetryCirconusCheckTags: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfTelemetryCirconusCheckForceMetricActiation: &schema.Schema{ - Type: schema.TypeBool, - Computed: true, - }, - agentSelfTelemetryCirconusSubmissionInterval: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfTelemetryEnableHostname: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfTelemetryDogStatsdAddr: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfTelemetryDogStatsdTags: &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - agentSelfTelemetryStatsdAddr: { - Type: schema.TypeString, - Computed: true, - }, - agentSelfTelemetryStatsiteAddr: { - Type: schema.TypeString, - Computed: true, - }, - agentSelfTelemetryStatsitePrefix: { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - agentSelfTLSCAFile: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfTLSCertFile: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfTLSKeyFile: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfTLSMinVersion: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfTLSVerifyIncoming: { - Computed: true, - Type: schema.TypeBool, - }, - agentSelfTLSVerifyServerHostname: { - Computed: true, - Type: schema.TypeBool, - }, - agentSelfTLSVerifyOutgoing: { - Computed: true, - Type: schema.TypeBool, - }, - agentSelfTranslateWANAddrs: { - Computed: true, - Type: schema.TypeBool, - }, - agentSelfUIDir: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfUnixSockets: { - Computed: true, - Type: schema.TypeMap, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - agentSelfUnixSocketUser: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfUnixSocketGroup: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - agentSelfUnixSocketMode: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - agentSelfVersion: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfVersionPrerelease: { - Computed: true, - Type: schema.TypeString, - }, - agentSelfVersionRevision: { - Computed: true, - Type: schema.TypeString, - }, - // "Watches": nil, - }, - } -} - -func dataSourceConsulAgentSelfRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - info, err := client.Agent().Self() - if err != nil { - return err - } - - const apiAgentConfig = "Config" - cfg, ok := info[apiAgentConfig] - if !ok { - return fmt.Errorf("No %s info available within provider's agent/self endpoint", apiAgentConfig) - } - - // Pull the datacenter first because we use it when setting the ID - var dc string - if v, found := cfg["Datacenter"]; found { - dc = v.(string) - } - - const idKeyFmt = "agent-self-%s" - d.SetId(fmt.Sprintf(idKeyFmt, dc)) - - if v, found := cfg["ACLDatacenter"]; found { - d.Set(agentSelfACLDatacenter, v.(string)) - } - - if v, found := cfg["ACLDefaultPolicy"]; found { - d.Set(agentSelfACLDefaultPolicy, v.(string)) - } - - if v, found := cfg["ACLDisabledTTL"]; found { - dur := time.Duration(int64(v.(float64))) - d.Set(agentSelfACLDisabledTTL, dur.String()) - } - - if v, found := cfg["ACLDownPolicy"]; found { - d.Set(agentSelfACLDownPolicy, v.(string)) - } - - if v, found := cfg["ACLEnforceVersion8"]; found { - d.Set(agentSelfACLEnforceVersion8, v.(bool)) - } - - if v, found := cfg["ACLTTL"]; found { - dur := time.Duration(int64(v.(float64))) - d.Set(agentSelfACLTTL, dur.String()) - } - - if v, found := cfg["Addresses"]; found { - addrs := v.(map[string]interface{}) - - m := make(map[string]interface{}, len(addrs)) - - if v, found := addrs["DNS"]; found { - m[agentSelfSchemaPortsDNS] = v.(string) - } - - if v, found := addrs["HTTP"]; found { - m[agentSelfSchemaPortsHTTP] = v.(string) - } - - if v, found := addrs["HTTPS"]; found { - m[agentSelfSchemaPortsHTTPS] = v.(string) - } - - if v, found := addrs["RPC"]; found { - m[agentSelfSchemaPortsRPC] = v.(string) - } - - if err := d.Set(agentSelfAddresses, m); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to set %s: {{err}}", agentSelfAddresses), err) - } - } - - if v, found := cfg["AdvertiseAddr"]; found { - d.Set(agentSelfAdvertiseAddr, v.(string)) - } - - if v, found := cfg["AdvertiseAddrs"]; found { - addrs := v.(map[string]interface{}) - - m := make(map[string]interface{}, len(addrs)) - - if v, found := addrs["SerfLan"]; found && v != nil { - m[agentSelfSchemaPortsSerfLAN] = v.(string) - } - - if v, found := addrs["SerfWan"]; found && v != nil { - m[agentSelfSchemaPortsSerfWAN] = v.(string) - } - - if v, found := addrs["RPC"]; found && v != nil { - m[agentSelfSchemaPortsRPC] = v.(string) - } - - if err := d.Set(agentSelfAdvertiseAddrs, m); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to set %s: {{err}}", agentSelfAdvertiseAddrs), err) - } - } - - if v, found := cfg["AtlasJoin"]; found { - d.Set(agentSelfAtlasJoin, v.(bool)) - } - - if v, found := cfg["BindAddr"]; found { - d.Set(agentSelfBindAddr, v.(string)) - } - - if v, found := cfg["Bootstrap"]; found { - d.Set(agentSelfBootstrapMode, v.(bool)) - } - - if v, found := cfg["BootstrapExpect"]; found { - d.Set(agentSelfBootstrapExpect, int(v.(float64))) - } - - if v, found := cfg["CheckDeregisterIntervalMin"]; found { - dur := time.Duration(int64(v.(float64))) - d.Set(agentSelfCheckDeregisterIntervalMin, dur.String()) - } - - if v, found := cfg["CheckReapInterval"]; found { - dur := time.Duration(int64(v.(float64))) - d.Set(agentSelfCheckReapInterval, dur.String()) - } - - if v, found := cfg["CheckUpdateInterval"]; found { - dur := time.Duration(int64(v.(float64))) - d.Set(agentSelfCheckUpdateInterval, dur.String()) - } - - if v, found := cfg["ClientAddr"]; found { - d.Set(agentSelfClientAddr, v.(string)) - } - - if v, found := cfg["DNS"]; found { - dnsOpts := v.(map[string]interface{}) - - m := make(map[string]interface{}, len(dnsOpts)) - - if v, found := dnsOpts["AllowStale"]; found { - m[agentSelfDNSAllowStale] = v.(bool) - } - - if v, found := dnsOpts["DisableCompression"]; found { - m[agentSelfDNSEnableCompression] = !v.(bool) - } - - if v, found := dnsOpts["EnableTruncate"]; found { - m[agentSelfDNSEnableTruncate] = v.(bool) - } - - if v, found := dnsOpts["MaxStale"]; found { - dur := time.Duration(int64(v.(float64))) - m[agentSelfDNSMaxStale] = dur.String() - } - - if v, found := dnsOpts["NodeTTL"]; found { - dur := time.Duration(int64(v.(float64))) - m[agentSelfDNSNodeTTL] = dur.String() - } - - if v, found := dnsOpts["OnlyPassing"]; found { - m[agentSelfDNSOnlyPassing] = v.(bool) - } - - if v, found := dnsOpts["RecursorTimeout"]; found { - dur := time.Duration(int64(v.(float64))) - m[agentSelfDNSRecursorTimeout] = dur.String() - } - - if v, found := dnsOpts["ServiceTTL"]; found { - dur := time.Duration(int64(v.(float64))) - m[agentSelfDNSServiceTTL] = dur.String() - } - - if v, found := dnsOpts["UDPAnswerLimit"]; found { - m[agentSelfDNSServiceTTL] = v.(int) - } - - if err := d.Set(agentSelfDNSConfig, m); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to set %s: {{err}}", agentSelfDNSConfig), err) - } - } - - { - var l []interface{} - - if v, found := cfg["DNSRecursors"]; found { - l = make([]interface{}, 0, len(v.([]interface{}))+1) - l = append(l, v.([]interface{})...) - } - - if v, found := cfg["DNSRecursor"]; found { - l = append([]interface{}{v.(string)}, l...) - } - - if len(l) > 0 { - if err := d.Set(agentSelfDNSRecursors, l); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to set %s: {{err}}", agentSelfDNSRecursors), err) - } - } - } - - if v, found := cfg["DataDir"]; found { - d.Set(agentSelfDataDir, v.(string)) - } - - if len(dc) > 0 { - d.Set(agentSelfDatacenter, dc) - } - - if v, found := cfg["DevMode"]; found { - d.Set(agentSelfDevMode, v.(bool)) - } - - if v, found := cfg["DisableAnonymousSignature"]; found { - d.Set(agentSelfEnableAnonymousSignature, !v.(bool)) - } - - if v, found := cfg["DisableCoordinates"]; found { - d.Set(agentSelfEnableCoordinates, !v.(bool)) - } - - if v, found := cfg["DisableRemoteExec"]; found { - d.Set(agentSelfEnableRemoteExec, !v.(bool)) - } - - if v, found := cfg["DisableUpdateCheck"]; found { - d.Set(agentSelfEnableUpdateCheck, !v.(bool)) - } - - if v, found := cfg["Domain"]; found { - d.Set(agentSelfDomain, v.(string)) - } - - if v, found := cfg["EnableDebug"]; found { - d.Set(agentSelfEnableDebug, v.(bool)) - } - - if v, found := cfg["EnableSyslog"]; found { - d.Set(agentSelfEnableSyslog, v.(bool)) - } - - if v, found := cfg["EnableUi"]; found { - d.Set(agentSelfEnableUI, v.(bool)) - } - - if v, found := cfg["id"]; found { - d.Set(agentSelfID, v.(string)) - } - - if v, found := cfg["SkipLeaveOnInt"]; found { - d.Set(agentSelfLeaveOnInt, !v.(bool)) - } - - if v, found := cfg["LeaveOnTerm"]; found { - d.Set(agentSelfLeaveOnTerm, v.(bool)) - } - - if v, found := cfg["LogLevel"]; found { - d.Set(agentSelfLogLevel, v.(string)) - } - - if v, found := cfg["NodeName"]; found { - d.Set(agentSelfName, v.(string)) - } - - if v, found := cfg["Performance"]; found { - cfgs := v.(map[string]interface{}) - - m := make(map[string]interface{}, len(cfgs)) - - if v, found := cfgs["RaftMultiplier"]; found { - m[agentSelfPerformanceRaftMultiplier] = strconv.FormatFloat(v.(float64), 'g', -1, 64) - } - - if err := d.Set(agentSelfPerformance, m); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to set %s: {{err}}", agentSelfPerformance), err) - } - } - - if v, found := cfg["PidFile"]; found { - d.Set(agentSelfPidFile, v.(string)) - } - - if v, found := cfg["Ports"]; found { - cfgs := v.(map[string]interface{}) - - m := make(map[string]interface{}, len(cfgs)) - - if v, found := cfgs[agentSelfAPIPortsDNS]; found { - m[agentSelfSchemaPortsDNS] = int(v.(float64)) - } - - if v, found := cfgs[agentSelfAPIPortsHTTP]; found { - m[agentSelfSchemaPortsHTTP] = int(v.(float64)) - } - - if v, found := cfgs[agentSelfAPIPortsHTTPS]; found { - m[agentSelfSchemaPortsHTTPS] = int(v.(float64)) - } - - if v, found := cfgs[agentSelfAPIPortsRPC]; found { - m[agentSelfSchemaPortsRPC] = int(v.(float64)) - } - - if v, found := cfgs[agentSelfAPIPortsSerfLAN]; found { - m[agentSelfSchemaPortsSerfLAN] = int(v.(float64)) - } - - if v, found := cfgs[agentSelfAPIPortsSerfWAN]; found { - m[agentSelfSchemaPortsSerfWAN] = int(v.(float64)) - } - - if v, found := cfgs[agentSelfAPIPortsServer]; found { - m[agentSelfSchemaPortsServer] = int(v.(float64)) - } - - if err := d.Set(agentSelfPorts, m); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to set %s: {{err}}", agentSelfPorts), err) - } - } - - if v, found := cfg["Protocol"]; found { - d.Set(agentSelfProtocol, int(v.(float64))) - } - - if v, found := cfg["ReconnectTimeoutLan"]; found { - dur := time.Duration(int64(v.(float64))) - d.Set(agentSelfReconnectTimeoutLAN, dur.String()) - } - - if v, found := cfg["ReconnectTimeoutWan"]; found { - dur := time.Duration(int64(v.(float64))) - d.Set(agentSelfReconnectTimeoutWAN, dur.String()) - } - - if v, found := cfg["RejoinAfterLeave"]; found { - d.Set(agentSelfRejoinAfterLeave, v.(bool)) - } - - if v, found := cfg["RetryJoin"]; found { - l := make([]string, 0, len(v.([]interface{}))) - for _, e := range v.([]interface{}) { - l = append(l, e.(string)) - } - - if err := d.Set(agentSelfRetryJoin, l); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to set %s: {{err}}", agentSelfRetryJoin), err) - } - } - - if v, found := cfg["RetryJoinEC2"]; found { - ec2Config := v.(map[string]interface{}) - - m := make(map[string]interface{}, len(ec2Config)) - - if v, found := ec2Config["Region"]; found { - m[agentSelfRetryJoinAWSRegion] = v.(string) - } - - if v, found := ec2Config["TagKey"]; found { - m[agentSelfRetryJoinAWSTagKey] = v.(string) - } - - if v, found := ec2Config["TagValue"]; found { - m[agentSelfRetryJoinAWSTagValue] = v.(string) - } - - if err := d.Set(agentSelfRetryJoinEC2, m); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to set %s: {{err}}", agentSelfRetryJoinEC2), err) - } - } - - if v, found := cfg["RetryJoinWan"]; found { - l := make([]string, 0, len(v.([]interface{}))) - for _, e := range v.([]interface{}) { - l = append(l, e.(string)) - } - - if err := d.Set(agentSelfRetryJoinWAN, l); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to set %s: {{err}}", agentSelfRetryJoinWAN), err) - } - } - - if v, found := cfg["RetryMaxAttempts"]; found { - d.Set(agentSelfRetryMaxAttempts, int(v.(float64))) - } - - if v, found := cfg["RetryMaxAttemptsWan"]; found { - d.Set(agentSelfRetryMaxAttemptsWAN, int(v.(float64))) - } - - if v, found := cfg["SerfLanBindAddr"]; found { - d.Set(agentSelfSerfLANBindAddr, v.(string)) - } - - if v, found := cfg["SerfWanBindAddr"]; found { - d.Set(agentSelfSerfWANBindAddr, v.(string)) - } - - if v, found := cfg["Server"]; found { - d.Set(agentSelfServerMode, v.(bool)) - } - - if v, found := cfg["ServerName"]; found { - d.Set(agentSelfServerName, v.(string)) - } - - if v, found := cfg["SessionTTLMin"]; found { - dur := time.Duration(int64(v.(float64))) - d.Set(agentSelfSessionTTLMin, dur.String()) - } - - if v, found := cfg["StartJoin"]; found { - serverList := v.([]interface{}) - l := make([]interface{}, 0, len(serverList)) - l = append(l, serverList...) - if err := d.Set(agentSelfStartJoin, l); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to set %s: {{err}}", agentSelfStartJoin), err) - } - } - - if v, found := cfg["StartJoinWan"]; found { - serverList := v.([]interface{}) - l := make([]interface{}, 0, len(serverList)) - l = append(l, serverList...) - if err := d.Set(agentSelfStartJoinWAN, l); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to set %s: {{err}}", agentSelfStartJoinWAN), err) - } - } - - if v, found := cfg["SyslogFacility"]; found { - d.Set(agentSelfSyslogFacility, v.(string)) - } - - if v, found := cfg["CAFile"]; found { - d.Set(agentSelfTLSCAFile, v.(string)) - } - - if v, found := cfg["CertFile"]; found { - d.Set(agentSelfTLSCertFile, v.(string)) - } - - if v, found := cfg["KeyFile"]; found { - d.Set(agentSelfTLSKeyFile, v.(string)) - } - - if v, found := cfg["TLSMinVersion"]; found { - d.Set(agentSelfTLSMinVersion, v.(string)) - } - - if v, found := cfg["VerifyIncoming"]; found { - d.Set(agentSelfTLSVerifyIncoming, v.(bool)) - } - - if v, found := cfg["VerifyOutgoing"]; found { - d.Set(agentSelfTLSVerifyOutgoing, v.(bool)) - } - - if v, found := cfg["VerifyServerHostname"]; found { - d.Set(agentSelfTLSVerifyServerHostname, v.(bool)) - } - - if v, found := cfg["TaggedAddresses"]; found { - addrs := v.(map[string]interface{}) - - m := make(map[string]interface{}, len(addrs)) - - // NOTE(sean@): agentSelfTaggedAddressesLAN and agentSelfTaggedAddressesWAN - // are the only two known values that should be in this map at present, but - // in the future this value could/will expand and the schema should be - // releaxed to include both the known *{L,W}AN values as well as whatever - // else the user specifies. - for s, t := range addrs { - m[s] = t - } - - if err := d.Set(agentSelfTaggedAddresses, m); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to set %s: {{err}}", agentSelfTaggedAddresses), err) - } - } - - if v, found := cfg["Telemetry"]; found { - telemetryCfg := v.(map[string]interface{}) - - m := make(map[string]interface{}, len(telemetryCfg)) - - if v, found := telemetryCfg["CirconusAPIApp"]; found { - m[agentSelfTelemetryCirconusAPIApp] = v.(string) - } - - if v, found := telemetryCfg["CirconusAPIURL"]; found { - m[agentSelfTelemetryCirconusAPIURL] = v.(string) - } - - if v, found := telemetryCfg["CirconusBrokerID"]; found { - m[agentSelfTelemetryCirconusBrokerID] = v.(string) - } - - if v, found := telemetryCfg["CirconusBrokerSelectTag"]; found { - m[agentSelfTelemetryCirconusBrokerSelectTag] = v.(string) - } - - if v, found := telemetryCfg["CirconusCheckDisplayName"]; found { - m[agentSelfTelemetryCirconusCheckDisplayName] = v.(string) - } - - if v, found := telemetryCfg["CirconusCheckID"]; found { - m[agentSelfTelemetryCirconusCheckID] = v.(string) - } - - if v, found := telemetryCfg["CirconusCheckInstanceID"]; found { - m[agentSelfTelemetryCirconusCheckInstanceID] = v.(string) - } - - if v, found := telemetryCfg["CirconusCheckSearchTag"]; found { - m[agentSelfTelemetryCirconusCheckSearchTag] = v.(string) - } - - if v, found := telemetryCfg["CirconusCheckSubmissionURL"]; found { - m[agentSelfTelemetryCirconusCheckSubmissionURL] = v.(string) - } - - if v, found := telemetryCfg["CirconusCheckTags"]; found { - m[agentSelfTelemetryCirconusCheckTags] = v.(string) - } - - if v, found := telemetryCfg["CirconusCheckForceMetricActivation"]; found { - m[agentSelfTelemetryCirconusCheckForceMetricActiation] = v.(string) - } - - if v, found := telemetryCfg["CirconusSubmissionInterval"]; found { - m[agentSelfTelemetryCirconusSubmissionInterval] = v.(string) - } - - if v, found := telemetryCfg["DisableHostname"]; found { - m[agentSelfTelemetryEnableHostname] = fmt.Sprintf("%t", !v.(bool)) - } - - if v, found := telemetryCfg["DogStatsdAddr"]; found { - m[agentSelfTelemetryDogStatsdAddr] = v.(string) - } - - if v, found := telemetryCfg["DogStatsdTags"]; found && v != nil { - m[agentSelfTelemetryDogStatsdTags] = append([]interface{}(nil), v.([]interface{})...) - } - - if v, found := telemetryCfg["StatsdAddr"]; found { - m[agentSelfTelemetryStatsdAddr] = v.(string) - } - - if v, found := telemetryCfg["StatsiteAddr"]; found { - m[agentSelfTelemetryStatsiteAddr] = v.(string) - } - - if v, found := telemetryCfg["StatsitePrefix"]; found { - m[agentSelfTelemetryStatsitePrefix] = v.(string) - } - - if err := d.Set(agentSelfTelemetry, m); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to set %s: {{err}}", agentSelfTelemetry), err) - } - } - - if v, found := cfg["TranslateWanTelemetryCfg"]; found { - d.Set(agentSelfTranslateWANAddrs, v.(bool)) - } - - if v, found := cfg["UiDir"]; found { - d.Set(agentSelfUIDir, v.(string)) - } - - if v, found := cfg["UnixSockets"]; found { - socketConfig := v.(map[string]interface{}) - - m := make(map[string]interface{}, len(socketConfig)) - - if v, found := socketConfig["Grp"]; found { - m[agentSelfUnixSocketGroup] = v.(string) - } - - if v, found := socketConfig["Mode"]; found { - m[agentSelfUnixSocketMode] = v.(string) - } - - if v, found := socketConfig["Usr"]; found { - m[agentSelfUnixSocketUser] = v.(string) - } - - if err := d.Set(agentSelfUnixSockets, m); err != nil { - return errwrap.Wrapf(fmt.Sprintf("Unable to set %s: {{err}}", agentSelfUnixSockets), err) - } - } - - if v, found := cfg["Version"]; found { - d.Set(agentSelfVersion, v.(string)) - } - - if v, found := cfg["VersionPrerelease"]; found { - d.Set(agentSelfVersionPrerelease, v.(string)) - } - - if v, found := cfg["VersionPrerelease"]; found { - d.Set(agentSelfVersionPrerelease, v.(string)) - } - - if v, found := cfg["Revision"]; found { - d.Set(agentSelfVersionRevision, v.(string)) - } - - return nil -} diff --git a/builtin/providers/consul/data_source_consul_agent_self_test.go b/builtin/providers/consul/data_source_consul_agent_self_test.go deleted file mode 100644 index 341a89c9c..000000000 --- a/builtin/providers/consul/data_source_consul_agent_self_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package consul - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataConsulAgentSelf_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataConsulAgentSelfConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckDataSourceValue("data.consul_agent_self.read", "acl_datacenter", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "acl_default_policy", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "acl_disabled_ttl", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "acl_down_policy", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "acl_enforce_0_8_semantics", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "acl_ttl", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "advertise_addr", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "bind_addr", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "bootstrap_expect", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "bootstrap_mode", "false"), - testAccCheckDataSourceValue("data.consul_agent_self.read", "client_addr", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "datacenter", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "dev_mode", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "domain", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "enable_anonymous_signature", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "enable_coordinates", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "enable_debug", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "enable_remote_exec", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "enable_syslog", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "enable_ui", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "enable_update_check", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "id", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "leave_on_int", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "leave_on_term", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "log_level", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "name", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "pid_file", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "rejoin_after_leave", ""), - // testAccCheckDataSourceValue("data.consul_agent_self.read", "retry_join", ""), - // testAccCheckDataSourceValue("data.consul_agent_self.read", "retry_join_wan", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "retry_max_attempts", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "retry_max_attempts_wan", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "serf_lan_bind_addr", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "serf_wan_bind_addr", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "server_mode", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "server_name", ""), - // testAccCheckDataSourceValue("data.consul_agent_self.read", "start_join", ""), - // testAccCheckDataSourceValue("data.consul_agent_self.read", "start_join_wan", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "syslog_facility", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "telemetry.enable_hostname", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "tls_ca_file", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "tls_cert_file", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "tls_key_file", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "tls_verify_incoming", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "tls_verify_outgoing", ""), - testAccCheckDataSourceValue("data.consul_agent_self.read", "tls_verify_server_hostname", ""), - ), - }, - }, - }) -} - -func testAccCheckDataSourceValue(n, attr, val string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rn, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Resource not found") - } - out, found := rn.Primary.Attributes[attr] - switch { - case !found: - return fmt.Errorf("Attribute '%s' not found: %#v", attr, rn.Primary.Attributes) - case val == "": - // Value found, don't care what the payload is (including the zero value) - case val != "" && out != val: - return fmt.Errorf("Attribute '%s' value '%s' != '%s'", attr, out, val) - case val == "" && out == "": - return fmt.Errorf("Attribute '%s' value '%s'", attr, out) - } - return nil - } -} - -const testAccDataConsulAgentSelfConfig = ` -data "consul_agent_self" "read" { -} -` diff --git a/builtin/providers/consul/data_source_consul_catalog_nodes.go b/builtin/providers/consul/data_source_consul_catalog_nodes.go deleted file mode 100644 index b93da423a..000000000 --- a/builtin/providers/consul/data_source_consul_catalog_nodes.go +++ /dev/null @@ -1,153 +0,0 @@ -package consul - -import ( - "fmt" - - consulapi "github.com/hashicorp/consul/api" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -const ( - catalogNodesElem = "nodes" - catalogNodesDatacenter = "datacenter" - catalogNodesQueryOpts = "query_options" - - catalogNodesNodeID = "id" - catalogNodesNodeAddress = "address" - catalogNodesNodeMeta = "meta" - catalogNodesNodeName = "name" - catalogNodesNodeTaggedAddresses = "tagged_addresses" - - catalogNodesNodeIDs = "node_ids" - catalogNodesNodeNames = "node_names" - - catalogNodesAPITaggedLAN = "lan" - catalogNodesAPITaggedWAN = "wan" - catalogNodesSchemaTaggedLAN = "lan" - catalogNodesSchemaTaggedWAN = "wan" -) - -func dataSourceConsulCatalogNodes() *schema.Resource { - return &schema.Resource{ - Read: dataSourceConsulCatalogNodesRead, - Schema: map[string]*schema.Schema{ - // Filters - catalogNodesQueryOpts: schemaQueryOpts, - - // Out parameters - catalogNodesDatacenter: &schema.Schema{ - Computed: true, - Type: schema.TypeString, - }, - catalogNodesNodeIDs: &schema.Schema{ - Computed: true, - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - catalogNodesNodeNames: &schema.Schema{ - Computed: true, - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - catalogNodesElem: &schema.Schema{ - Computed: true, - Type: schema.TypeList, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - catalogNodesNodeID: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - catalogNodesNodeName: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - catalogNodesNodeAddress: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - catalogNodesNodeMeta: &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - }, - catalogNodesNodeTaggedAddresses: &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - catalogNodesSchemaTaggedLAN: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - catalogNodesSchemaTaggedWAN: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func dataSourceConsulCatalogNodesRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - - // Parse out data source filters to populate Consul's query options - queryOpts, err := getQueryOpts(d, client) - if err != nil { - return errwrap.Wrapf("unable to get query options for fetching catalog nodes: {{err}}", err) - } - - nodes, meta, err := client.Catalog().Nodes(queryOpts) - if err != nil { - return err - } - - l := make([]interface{}, 0, len(nodes)) - - nodeNames := make([]interface{}, 0, len(nodes)) - nodeIDs := make([]interface{}, 0, len(nodes)) - - for _, node := range nodes { - const defaultNodeAttrs = 4 - m := make(map[string]interface{}, defaultNodeAttrs) - id := node.ID - if id == "" { - id = node.Node - } - - nodeIDs = append(nodeIDs, id) - nodeNames = append(nodeNames, node.Node) - - m[catalogNodesNodeAddress] = node.Address - m[catalogNodesNodeID] = id - m[catalogNodesNodeName] = node.Node - m[catalogNodesNodeMeta] = node.Meta - m[catalogNodesNodeTaggedAddresses] = node.TaggedAddresses - - l = append(l, m) - } - - const idKeyFmt = "catalog-nodes-%s" - d.SetId(fmt.Sprintf(idKeyFmt, queryOpts.Datacenter)) - - d.Set(catalogNodesDatacenter, queryOpts.Datacenter) - if err := d.Set(catalogNodesNodeIDs, nodeIDs); err != nil { - return errwrap.Wrapf("Unable to store node IDs: {{err}}", err) - } - - if err := d.Set(catalogNodesNodeNames, nodeNames); err != nil { - return errwrap.Wrapf("Unable to store node names: {{err}}", err) - } - - if err := d.Set(catalogNodesElem, l); err != nil { - return errwrap.Wrapf("Unable to store nodes: {{err}}", err) - } - - return nil -} diff --git a/builtin/providers/consul/data_source_consul_catalog_nodes_test.go b/builtin/providers/consul/data_source_consul_catalog_nodes_test.go deleted file mode 100644 index 94cea160d..000000000 --- a/builtin/providers/consul/data_source_consul_catalog_nodes_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package consul - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccDataConsulCatalogNodes_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataConsulCatalogNodesConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckDataSourceValue("data.consul_catalog_nodes.read", "nodes.#", "1"), - testAccCheckDataSourceValue("data.consul_catalog_nodes.read", "nodes.0.id", ""), - testAccCheckDataSourceValue("data.consul_catalog_nodes.read", "nodes.0.name", ""), - testAccCheckDataSourceValue("data.consul_catalog_nodes.read", "nodes.0.address", ""), - ), - }, - }, - }) -} - -const testAccDataConsulCatalogNodesConfig = ` -data "consul_catalog_nodes" "read" { - query_options { - allow_stale = true - require_consistent = false - token = "" - wait_index = 0 - wait_time = "1m" - } -} -` diff --git a/builtin/providers/consul/data_source_consul_catalog_service.go b/builtin/providers/consul/data_source_consul_catalog_service.go deleted file mode 100644 index 1affc781e..000000000 --- a/builtin/providers/consul/data_source_consul_catalog_service.go +++ /dev/null @@ -1,202 +0,0 @@ -package consul - -import ( - "fmt" - "sort" - - consulapi "github.com/hashicorp/consul/api" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -const ( - catalogServiceElem = "service" - - catalogServiceCreateIndex = "create_index" - catalogServiceDatacenter = "datacenter" - catalogServiceModifyIndex = "modify_index" - catalogServiceNodeAddress = "node_address" - catalogServiceNodeID = "node_id" - catalogServiceNodeMeta = "node_meta" - catalogServiceNodeName = "node_name" - catalogServiceServiceAddress = "address" - catalogServiceServiceEnableTagOverride = "enable_tag_override" - catalogServiceServiceID = "id" - catalogServiceServiceName = "name" - catalogServiceServicePort = "port" - catalogServiceServiceTags = "tags" - catalogServiceTaggedAddresses = "tagged_addresses" - - // Filters - catalogServiceName = "name" - catalogServiceTag = "tag" -) - -func dataSourceConsulCatalogService() *schema.Resource { - return &schema.Resource{ - Read: dataSourceConsulCatalogServiceRead, - Schema: map[string]*schema.Schema{ - // Data Source Predicate(s) - catalogServiceDatacenter: &schema.Schema{ - // Used in the query, must be stored and force a refresh if the value - // changes. - Computed: true, - Type: schema.TypeString, - ForceNew: true, - }, - catalogServiceTag: &schema.Schema{ - // Used in the query, must be stored and force a refresh if the value - // changes. - Computed: true, - Type: schema.TypeString, - ForceNew: true, - }, - catalogServiceName: &schema.Schema{ - Required: true, - Type: schema.TypeString, - }, - catalogNodesQueryOpts: schemaQueryOpts, - - // Out parameters - catalogServiceElem: &schema.Schema{ - Computed: true, - Type: schema.TypeList, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - catalogServiceCreateIndex: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - catalogServiceNodeAddress: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - catalogServiceNodeID: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - catalogServiceModifyIndex: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - catalogServiceNodeName: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - catalogServiceNodeMeta: &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - }, - catalogServiceServiceAddress: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - catalogServiceServiceEnableTagOverride: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - catalogServiceServiceID: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - catalogServiceServiceName: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - catalogServiceServicePort: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - catalogServiceServiceTags: &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - catalogServiceTaggedAddresses: &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - catalogNodesSchemaTaggedLAN: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - catalogNodesSchemaTaggedWAN: &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func dataSourceConsulCatalogServiceRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - - // Parse out data source filters to populate Consul's query options - queryOpts, err := getQueryOpts(d, client) - if err != nil { - return errwrap.Wrapf("unable to get query options for fetching catalog services: {{err}}", err) - } - - var serviceName string - if v, ok := d.GetOk(catalogServiceName); ok { - serviceName = v.(string) - } - - var serviceTag string - if v, ok := d.GetOk(catalogServiceTag); ok { - serviceTag = v.(string) - } - - // services, meta, err := client.Catalog().Services(queryOpts) - services, meta, err := client.Catalog().Service(serviceName, serviceTag, queryOpts) - if err != nil { - return err - } - - l := make([]interface{}, 0, len(services)) - for _, service := range services { - const defaultServiceAttrs = 13 - m := make(map[string]interface{}, defaultServiceAttrs) - - m[catalogServiceCreateIndex] = fmt.Sprintf("%d", service.CreateIndex) - m[catalogServiceModifyIndex] = fmt.Sprintf("%d", service.ModifyIndex) - m[catalogServiceNodeAddress] = service.Address - m[catalogServiceNodeID] = service.ID - m[catalogServiceNodeMeta] = service.NodeMeta - m[catalogServiceNodeName] = service.Node - switch service.ServiceAddress { - case "": - m[catalogServiceServiceAddress] = service.Address - default: - m[catalogServiceServiceAddress] = service.ServiceAddress - } - m[catalogServiceServiceEnableTagOverride] = fmt.Sprintf("%t", service.ServiceEnableTagOverride) - m[catalogServiceServiceID] = service.ServiceID - m[catalogServiceServiceName] = service.ServiceName - m[catalogServiceServicePort] = fmt.Sprintf("%d", service.ServicePort) - sort.Strings(service.ServiceTags) - m[catalogServiceServiceTags] = service.ServiceTags - m[catalogServiceTaggedAddresses] = service.TaggedAddresses - - l = append(l, m) - } - - const idKeyFmt = "catalog-service-%s-%q-%q" - d.SetId(fmt.Sprintf(idKeyFmt, queryOpts.Datacenter, serviceName, serviceTag)) - - d.Set(catalogServiceDatacenter, queryOpts.Datacenter) - d.Set(catalogServiceName, serviceName) - d.Set(catalogServiceTag, serviceTag) - if err := d.Set(catalogServiceElem, l); err != nil { - return errwrap.Wrapf("Unable to store service: {{err}}", err) - } - - return nil -} diff --git a/builtin/providers/consul/data_source_consul_catalog_service_test.go b/builtin/providers/consul/data_source_consul_catalog_service_test.go deleted file mode 100644 index 0ef7d1ab0..000000000 --- a/builtin/providers/consul/data_source_consul_catalog_service_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package consul - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccDataConsulCatalogService_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataConsulCatalogServiceConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckDataSourceValue("data.consul_catalog_service.read", "datacenter", "dc1"), - testAccCheckDataSourceValue("data.consul_catalog_service.read", "service.#", "1"), - testAccCheckDataSourceValue("data.consul_catalog_service.read", "service.0.address", ""), - testAccCheckDataSourceValue("data.consul_catalog_service.read", "service.0.create_index", ""), - testAccCheckDataSourceValue("data.consul_catalog_service.read", "service.0.enable_tag_override", ""), - testAccCheckDataSourceValue("data.consul_catalog_service.read", "service.0.id", ""), - testAccCheckDataSourceValue("data.consul_catalog_service.read", "service.0.modify_index", ""), - testAccCheckDataSourceValue("data.consul_catalog_service.read", "service.0.name", ""), - testAccCheckDataSourceValue("data.consul_catalog_service.read", "service.0.node_address", ""), - testAccCheckDataSourceValue("data.consul_catalog_service.read", "service.0.node_id", ""), - testAccCheckDataSourceValue("data.consul_catalog_service.read", "service.0.node_meta.%", "0"), - testAccCheckDataSourceValue("data.consul_catalog_service.read", "service.0.node_name", ""), - testAccCheckDataSourceValue("data.consul_catalog_service.read", "service.0.port", ""), - testAccCheckDataSourceValue("data.consul_catalog_service.read", "service.0.tagged_addresses.%", "2"), - testAccCheckDataSourceValue("data.consul_catalog_service.read", "service.0.tags.#", "0"), - ), - }, - }, - }) -} - -const testAccDataConsulCatalogServiceConfig = ` -data "consul_catalog_service" "read" { - query_options { - allow_stale = true - require_consistent = false - token = "" - wait_index = 0 - wait_time = "1m" - } - - name = "consul" -} -` diff --git a/builtin/providers/consul/data_source_consul_catalog_services.go b/builtin/providers/consul/data_source_consul_catalog_services.go deleted file mode 100644 index 85770d611..000000000 --- a/builtin/providers/consul/data_source_consul_catalog_services.go +++ /dev/null @@ -1,104 +0,0 @@ -package consul - -import ( - "fmt" - "sort" - "strings" - - consulapi "github.com/hashicorp/consul/api" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -const ( - // Datasource predicates - catalogServicesServiceName = "name" - - // Out parameters - catalogServicesDatacenter = "datacenter" - catalogServicesNames = "names" - catalogServicesServices = "services" - catalogServicesServiceTags = "tags" -) - -func dataSourceConsulCatalogServices() *schema.Resource { - return &schema.Resource{ - Read: dataSourceConsulCatalogServicesRead, - Schema: map[string]*schema.Schema{ - // Data Source Predicate(s) - catalogServicesDatacenter: &schema.Schema{ - // Used in the query, must be stored and force a refresh if the value - // changes. - Computed: true, - Type: schema.TypeString, - ForceNew: true, - }, - catalogNodesQueryOpts: schemaQueryOpts, - - // Out parameters - catalogServicesNames: &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - catalogServicesServices: &schema.Schema{ - Computed: true, - Type: schema.TypeMap, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - catalogServiceServiceTags: &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - }, - } -} - -func dataSourceConsulCatalogServicesRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - - // Parse out data source filters to populate Consul's query options - queryOpts, err := getQueryOpts(d, client) - if err != nil { - return errwrap.Wrapf("unable to get query options for fetching catalog services: {{err}}", err) - } - - services, meta, err := client.Catalog().Services(queryOpts) - if err != nil { - return err - } - - catalogServices := make(map[string]interface{}, len(services)) - for name, tags := range services { - tagList := make([]string, 0, len(tags)) - for _, tag := range tags { - tagList = append(tagList, tag) - } - - sort.Strings(tagList) - catalogServices[name] = strings.Join(tagList, " ") - } - - serviceNames := make([]interface{}, 0, len(services)) - for k := range catalogServices { - serviceNames = append(serviceNames, k) - } - - const idKeyFmt = "catalog-services-%s" - d.SetId(fmt.Sprintf(idKeyFmt, queryOpts.Datacenter)) - - d.Set(catalogServicesDatacenter, queryOpts.Datacenter) - if err := d.Set(catalogServicesServices, catalogServices); err != nil { - return errwrap.Wrapf("Unable to store services: {{err}}", err) - } - - if err := d.Set(catalogServicesNames, serviceNames); err != nil { - return errwrap.Wrapf("Unable to store service names: {{err}}", err) - } - - return nil -} diff --git a/builtin/providers/consul/data_source_consul_catalog_services_test.go b/builtin/providers/consul/data_source_consul_catalog_services_test.go deleted file mode 100644 index 1087073f7..000000000 --- a/builtin/providers/consul/data_source_consul_catalog_services_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package consul - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccDataConsulCatalogServices_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataConsulCatalogServicesConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckDataSourceValue("data.consul_catalog_services.read", "datacenter", "dc1"), - testAccCheckDataSourceValue("data.consul_catalog_services.read", "services.%", "1"), - testAccCheckDataSourceValue("data.consul_catalog_services.read", "services.consul", ""), - ), - }, - }, - }) -} - -const testAccDataConsulCatalogServicesConfig = ` -data "consul_catalog_services" "read" { - query_options { - allow_stale = true - require_consistent = false - token = "" - wait_index = 0 - wait_time = "1m" - } -} -` diff --git a/builtin/providers/consul/data_source_consul_keys.go b/builtin/providers/consul/data_source_consul_keys.go deleted file mode 100644 index af3e62771..000000000 --- a/builtin/providers/consul/data_source_consul_keys.go +++ /dev/null @@ -1,96 +0,0 @@ -package consul - -import ( - consulapi "github.com/hashicorp/consul/api" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceConsulKeys() *schema.Resource { - return &schema.Resource{ - Read: dataSourceConsulKeysRead, - - Schema: map[string]*schema.Schema{ - "datacenter": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "token": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "key": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "path": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "default": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - - "var": &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - }, - }, - } -} - -func dataSourceConsulKeysRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - kv := client.KV() - token := d.Get("token").(string) - dc, err := getDC(d, client) - if err != nil { - return err - } - - keyClient := newKeyClient(kv, dc, token) - - vars := make(map[string]string) - - keys := d.Get("key").(*schema.Set).List() - for _, raw := range keys { - key, path, sub, err := parseKey(raw) - if err != nil { - return err - } - - value, err := keyClient.Get(path) - if err != nil { - return err - } - - value = attributeValue(sub, value) - vars[key] = value - } - - if err := d.Set("var", vars); err != nil { - return err - } - - // Store the datacenter on this resource, which can be helpful for reference - // in case it was read from the provider - d.Set("datacenter", dc) - - d.SetId("-") - - return nil -} diff --git a/builtin/providers/consul/data_source_consul_keys_test.go b/builtin/providers/consul/data_source_consul_keys_test.go deleted file mode 100644 index 09f62a927..000000000 --- a/builtin/providers/consul/data_source_consul_keys_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package consul - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccDataConsulKeys_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataConsulKeysConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckConsulKeysValue("data.consul_keys.read", "read", "written"), - ), - }, - }, - }) -} - -const testAccDataConsulKeysConfig = ` -resource "consul_keys" "write" { - datacenter = "dc1" - - key { - path = "test/data_source" - value = "written" - } -} - -data "consul_keys" "read" { - # Create a dependency on the resource so we're sure to - # have the value in place before we try to read it. - datacenter = "${consul_keys.write.datacenter}" - - key { - path = "test/data_source" - name = "read" - } -} -` diff --git a/builtin/providers/consul/key_client.go b/builtin/providers/consul/key_client.go deleted file mode 100644 index 0b909dd2e..000000000 --- a/builtin/providers/consul/key_client.go +++ /dev/null @@ -1,96 +0,0 @@ -package consul - -import ( - "fmt" - "log" - - consulapi "github.com/hashicorp/consul/api" -) - -// keyClient is a wrapper around the upstream Consul client that is -// specialized for Terraform's manipulations of the key/value store. -type keyClient struct { - client *consulapi.KV - qOpts *consulapi.QueryOptions - wOpts *consulapi.WriteOptions -} - -func newKeyClient(realClient *consulapi.KV, dc, token string) *keyClient { - qOpts := &consulapi.QueryOptions{Datacenter: dc, Token: token} - wOpts := &consulapi.WriteOptions{Datacenter: dc, Token: token} - - return &keyClient{ - client: realClient, - qOpts: qOpts, - wOpts: wOpts, - } -} - -func (c *keyClient) Get(path string) (string, error) { - log.Printf( - "[DEBUG] Reading key '%s' in %s", - path, c.qOpts.Datacenter, - ) - pair, _, err := c.client.Get(path, c.qOpts) - if err != nil { - return "", fmt.Errorf("Failed to read Consul key '%s': %s", path, err) - } - value := "" - if pair != nil { - value = string(pair.Value) - } - return value, nil -} - -func (c *keyClient) GetUnderPrefix(pathPrefix string) (map[string]string, error) { - log.Printf( - "[DEBUG] Listing keys under '%s' in %s", - pathPrefix, c.qOpts.Datacenter, - ) - pairs, _, err := c.client.List(pathPrefix, c.qOpts) - if err != nil { - return nil, fmt.Errorf( - "Failed to list Consul keys under prefix '%s': %s", pathPrefix, err, - ) - } - value := map[string]string{} - for _, pair := range pairs { - subKey := pair.Key[len(pathPrefix):] - value[subKey] = string(pair.Value) - } - return value, nil -} - -func (c *keyClient) Put(path, value string) error { - log.Printf( - "[DEBUG] Setting key '%s' to '%v' in %s", - path, value, c.wOpts.Datacenter, - ) - pair := consulapi.KVPair{Key: path, Value: []byte(value)} - if _, err := c.client.Put(&pair, c.wOpts); err != nil { - return fmt.Errorf("Failed to write Consul key '%s': %s", path, err) - } - return nil -} - -func (c *keyClient) Delete(path string) error { - log.Printf( - "[DEBUG] Deleting key '%s' in %s", - path, c.wOpts.Datacenter, - ) - if _, err := c.client.Delete(path, c.wOpts); err != nil { - return fmt.Errorf("Failed to delete Consul key '%s': %s", path, err) - } - return nil -} - -func (c *keyClient) DeleteUnderPrefix(pathPrefix string) error { - log.Printf( - "[DEBUG] Deleting all keys under prefix '%s' in %s", - pathPrefix, c.wOpts.Datacenter, - ) - if _, err := c.client.DeleteTree(pathPrefix, c.wOpts); err != nil { - return fmt.Errorf("Failed to delete Consul keys under '%s': %s", pathPrefix, err) - } - return nil -} diff --git a/builtin/providers/consul/query_options.go b/builtin/providers/consul/query_options.go deleted file mode 100644 index 1cce0fabe..000000000 --- a/builtin/providers/consul/query_options.go +++ /dev/null @@ -1,122 +0,0 @@ -package consul - -import ( - "time" - - consulapi "github.com/hashicorp/consul/api" - "github.com/hashicorp/terraform/helper/schema" -) - -const ( - queryOptAllowStale = "allow_stale" - queryOptDatacenter = "datacenter" - queryOptNear = "near" - queryOptNodeMeta = "node_meta" - queryOptRequireConsistent = "require_consistent" - queryOptToken = "token" - queryOptWaitIndex = "wait_index" - queryOptWaitTime = "wait_time" -) - -var schemaQueryOpts = &schema.Schema{ - Optional: true, - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - queryOptAllowStale: &schema.Schema{ - Optional: true, - Default: true, - Type: schema.TypeBool, - }, - queryOptDatacenter: &schema.Schema{ - // Optional because we'll pull the default from the local agent if it's - // not specified, but we can query remote data centers as a result. - Optional: true, - Type: schema.TypeString, - }, - queryOptNear: &schema.Schema{ - Optional: true, - Type: schema.TypeString, - }, - queryOptNodeMeta: &schema.Schema{ - Optional: true, - Type: schema.TypeMap, - }, - queryOptRequireConsistent: &schema.Schema{ - Optional: true, - Default: false, - Type: schema.TypeBool, - }, - queryOptToken: &schema.Schema{ - Optional: true, - Type: schema.TypeString, - }, - queryOptWaitIndex: &schema.Schema{ - Optional: true, - Type: schema.TypeInt, - ValidateFunc: makeValidationFunc(queryOptWaitIndex, []interface{}{ - validateIntMin(0), - }), - }, - queryOptWaitTime: &schema.Schema{ - Optional: true, - Type: schema.TypeString, - ValidateFunc: makeValidationFunc(queryOptWaitTime, []interface{}{ - validateDurationMin("0ns"), - }), - }, - }, - }, -} - -func getQueryOpts(d *schema.ResourceData, client *consulapi.Client) (*consulapi.QueryOptions, error) { - queryOpts := &consulapi.QueryOptions{} - - if v, ok := d.GetOk(queryOptAllowStale); ok { - queryOpts.AllowStale = v.(bool) - } - - if v, ok := d.GetOk(queryOptDatacenter); ok { - queryOpts.Datacenter = v.(string) - } - - if queryOpts.Datacenter == "" { - dc, err := getDC(d, client) - if err != nil { - return nil, err - } - queryOpts.Datacenter = dc - } - - if v, ok := d.GetOk(queryOptNear); ok { - queryOpts.Near = v.(string) - } - - if v, ok := d.GetOk(queryOptRequireConsistent); ok { - queryOpts.RequireConsistent = v.(bool) - } - - if v, ok := d.GetOk(queryOptNodeMeta); ok { - m := v.(map[string]interface{}) - nodeMetaMap := make(map[string]string, len(queryOptNodeMeta)) - for s, t := range m { - nodeMetaMap[s] = t.(string) - } - queryOpts.NodeMeta = nodeMetaMap - } - - if v, ok := d.GetOk(queryOptToken); ok { - queryOpts.Token = v.(string) - } - - if v, ok := d.GetOk(queryOptWaitIndex); ok { - queryOpts.WaitIndex = uint64(v.(int)) - } - - if v, ok := d.GetOk(queryOptWaitTime); ok { - d, _ := time.ParseDuration(v.(string)) - queryOpts.WaitTime = d - } - - return queryOpts, nil -} diff --git a/builtin/providers/consul/resource_consul_agent_service.go b/builtin/providers/consul/resource_consul_agent_service.go deleted file mode 100644 index 6636060a8..000000000 --- a/builtin/providers/consul/resource_consul_agent_service.go +++ /dev/null @@ -1,139 +0,0 @@ -package consul - -import ( - "fmt" - - consulapi "github.com/hashicorp/consul/api" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceConsulAgentService() *schema.Resource { - return &schema.Resource{ - Create: resourceConsulAgentServiceCreate, - Update: resourceConsulAgentServiceCreate, - Read: resourceConsulAgentServiceRead, - Delete: resourceConsulAgentServiceDelete, - - Schema: map[string]*schema.Schema{ - "address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "port": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - - "tags": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - ForceNew: true, - }, - }, - } -} - -func resourceConsulAgentServiceCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - agent := client.Agent() - - name := d.Get("name").(string) - registration := consulapi.AgentServiceRegistration{Name: name} - - if address, ok := d.GetOk("address"); ok { - registration.Address = address.(string) - } - - if port, ok := d.GetOk("port"); ok { - registration.Port = port.(int) - } - - if v, ok := d.GetOk("tags"); ok { - vs := v.([]interface{}) - s := make([]string, len(vs)) - for i, raw := range vs { - s[i] = raw.(string) - } - registration.Tags = s - } - - if err := agent.ServiceRegister(®istration); err != nil { - return fmt.Errorf("Failed to register service '%s' with Consul agent: %v", name, err) - } - - // Update the resource - if serviceMap, err := agent.Services(); err != nil { - return fmt.Errorf("Failed to read services from Consul agent: %v", err) - } else if service, ok := serviceMap[name]; !ok { - return fmt.Errorf("Failed to read service '%s' from Consul agent: %v", name, err) - } else { - d.Set("address", service.Address) - d.Set("id", service.ID) - d.SetId(service.ID) - d.Set("name", service.Service) - d.Set("port", service.Port) - tags := make([]string, 0, len(service.Tags)) - for _, tag := range service.Tags { - tags = append(tags, tag) - } - d.Set("tags", tags) - } - - return nil -} - -func resourceConsulAgentServiceRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - agent := client.Agent() - - name := d.Get("name").(string) - - if services, err := agent.Services(); err != nil { - return fmt.Errorf("Failed to get services from Consul agent: %v", err) - } else if service, ok := services[name]; !ok { - d.Set("id", "") - } else { - d.Set("address", service.Address) - d.Set("id", service.ID) - d.SetId(service.ID) - d.Set("name", service.Service) - d.Set("port", service.Port) - tags := make([]string, 0, len(service.Tags)) - for _, tag := range service.Tags { - tags = append(tags, tag) - } - d.Set("tags", tags) - } - - return nil -} - -func resourceConsulAgentServiceDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - catalog := client.Agent() - - id := d.Get("id").(string) - - if err := catalog.ServiceDeregister(id); err != nil { - return fmt.Errorf("Failed to deregister service '%s' from Consul agent: %v", id, err) - } - - // Clear the ID - d.SetId("") - return nil -} diff --git a/builtin/providers/consul/resource_consul_agent_service_test.go b/builtin/providers/consul/resource_consul_agent_service_test.go deleted file mode 100644 index 5150c4e85..000000000 --- a/builtin/providers/consul/resource_consul_agent_service_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package consul - -import ( - "fmt" - "testing" - - consulapi "github.com/hashicorp/consul/api" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccConsulAgentService_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() {}, - Providers: testAccProviders, - CheckDestroy: testAccCheckConsulAgentServiceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccConsulAgentServiceConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckConsulAgentServiceExists(), - testAccCheckConsulAgentServiceValue("consul_agent_service.app", "address", "www.google.com"), - testAccCheckConsulAgentServiceValue("consul_agent_service.app", "id", "google"), - testAccCheckConsulAgentServiceValue("consul_agent_service.app", "name", "google"), - testAccCheckConsulAgentServiceValue("consul_agent_service.app", "port", "80"), - testAccCheckConsulAgentServiceValue("consul_agent_service.app", "tags.#", "2"), - testAccCheckConsulAgentServiceValue("consul_agent_service.app", "tags.0", "tag0"), - testAccCheckConsulAgentServiceValue("consul_agent_service.app", "tags.1", "tag1"), - ), - }, - }, - }) -} - -func testAccCheckConsulAgentServiceDestroy(s *terraform.State) error { - agent := testAccProvider.Meta().(*consulapi.Client).Agent() - services, err := agent.Services() - if err != nil { - return fmt.Errorf("Could not retrieve services: %#v", err) - } - _, ok := services["google"] - if ok { - return fmt.Errorf("Service still exists: %#v", "google") - } - return nil -} - -func testAccCheckConsulAgentServiceExists() resource.TestCheckFunc { - return func(s *terraform.State) error { - agent := testAccProvider.Meta().(*consulapi.Client).Agent() - services, err := agent.Services() - if err != nil { - return err - } - _, ok := services["google"] - if !ok { - return fmt.Errorf("Service does not exist: %#v", "google") - } - return nil - } -} - -func testAccCheckConsulAgentServiceValue(n, attr, val string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rn, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Resource not found") - } - out, ok := rn.Primary.Attributes[attr] - if !ok { - return fmt.Errorf("Attribute '%s' not found: %#v", attr, rn.Primary.Attributes) - } - if val != "" && out != val { - return fmt.Errorf("Attribute '%s' value '%s' != '%s'", attr, out, val) - } - if val == "" && out == "" { - return fmt.Errorf("Attribute '%s' value '%s'", attr, out) - } - return nil - } -} - -const testAccConsulAgentServiceConfig = ` -resource "consul_agent_service" "app" { - address = "www.google.com" - name = "google" - port = 80 - tags = ["tag0", "tag1"] -} -` diff --git a/builtin/providers/consul/resource_consul_catalog_entry.go b/builtin/providers/consul/resource_consul_catalog_entry.go deleted file mode 100644 index cfdc830db..000000000 --- a/builtin/providers/consul/resource_consul_catalog_entry.go +++ /dev/null @@ -1,273 +0,0 @@ -package consul - -import ( - "bytes" - "fmt" - "sort" - "strings" - - consulapi "github.com/hashicorp/consul/api" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceConsulCatalogEntry() *schema.Resource { - return &schema.Resource{ - Create: resourceConsulCatalogEntryCreate, - Update: resourceConsulCatalogEntryCreate, - Read: resourceConsulCatalogEntryRead, - Delete: resourceConsulCatalogEntryDelete, - - Schema: map[string]*schema.Schema{ - "address": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "datacenter": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "node": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "service": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "port": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - - "tags": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: resourceConsulCatalogEntryServiceTagsHash, - }, - }, - }, - Set: resourceConsulCatalogEntryServicesHash, - }, - - "token": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -func resourceConsulCatalogEntryServiceTagsHash(v interface{}) int { - return hashcode.String(v.(string)) -} - -func resourceConsulCatalogEntryServicesHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["id"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["address"].(string))) - buf.WriteString(fmt.Sprintf("%d-", m["port"].(int))) - if v, ok := m["tags"]; ok { - vs := v.(*schema.Set).List() - s := make([]string, len(vs)) - for i, raw := range vs { - s[i] = raw.(string) - } - sort.Strings(s) - - for _, v := range s { - buf.WriteString(fmt.Sprintf("%s-", v)) - } - } - return hashcode.String(buf.String()) -} - -func resourceConsulCatalogEntryCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - catalog := client.Catalog() - - var dc string - if v, ok := d.GetOk("datacenter"); ok { - dc = v.(string) - } else { - var err error - if dc, err = getDC(d, client); err != nil { - return err - } - } - - var token string - if v, ok := d.GetOk("token"); ok { - token = v.(string) - } - - // Setup the operations using the datacenter - wOpts := consulapi.WriteOptions{Datacenter: dc, Token: token} - - address := d.Get("address").(string) - node := d.Get("node").(string) - - var serviceIDs []string - if service, ok := d.GetOk("service"); ok { - serviceList := service.(*schema.Set).List() - serviceIDs = make([]string, len(serviceList)) - for i, rawService := range serviceList { - serviceData := rawService.(map[string]interface{}) - - if len(serviceData["id"].(string)) == 0 { - serviceData["id"] = serviceData["name"].(string) - } - serviceID := serviceData["id"].(string) - serviceIDs[i] = serviceID - - var tags []string - if v := serviceData["tags"].(*schema.Set).List(); len(v) > 0 { - tags = make([]string, len(v)) - for i, raw := range v { - tags[i] = raw.(string) - } - } - - registration := &consulapi.CatalogRegistration{ - Address: address, - Datacenter: dc, - Node: node, - Service: &consulapi.AgentService{ - Address: serviceData["address"].(string), - ID: serviceID, - Service: serviceData["name"].(string), - Port: serviceData["port"].(int), - Tags: tags, - }, - } - - if _, err := catalog.Register(registration, &wOpts); err != nil { - return fmt.Errorf("Failed to register Consul catalog entry with node '%s' at address '%s' in %s: %v", - node, address, dc, err) - } - } - } else { - registration := &consulapi.CatalogRegistration{ - Address: address, - Datacenter: dc, - Node: node, - } - - if _, err := catalog.Register(registration, &wOpts); err != nil { - return fmt.Errorf("Failed to register Consul catalog entry with node '%s' at address '%s' in %s: %v", - node, address, dc, err) - } - } - - // Update the resource - qOpts := consulapi.QueryOptions{Datacenter: dc} - if _, _, err := catalog.Node(node, &qOpts); err != nil { - return fmt.Errorf("Failed to read Consul catalog entry for node '%s' at address '%s' in %s: %v", - node, address, dc, err) - } else { - d.Set("datacenter", dc) - } - - sort.Strings(serviceIDs) - serviceIDsJoined := strings.Join(serviceIDs, ",") - - d.SetId(fmt.Sprintf("%s-%s-[%s]", node, address, serviceIDsJoined)) - - return nil -} - -func resourceConsulCatalogEntryRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - catalog := client.Catalog() - - // Get the DC, error if not available. - var dc string - if v, ok := d.GetOk("datacenter"); ok { - dc = v.(string) - } - - node := d.Get("node").(string) - - // Setup the operations using the datacenter - qOpts := consulapi.QueryOptions{Datacenter: dc} - - if _, _, err := catalog.Node(node, &qOpts); err != nil { - return fmt.Errorf("Failed to get node '%s' from Consul catalog: %v", node, err) - } - - return nil -} - -func resourceConsulCatalogEntryDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - catalog := client.Catalog() - - var dc string - if v, ok := d.GetOk("datacenter"); ok { - dc = v.(string) - } else { - var err error - if dc, err = getDC(d, client); err != nil { - return err - } - } - - var token string - if v, ok := d.GetOk("token"); ok { - token = v.(string) - } - - // Setup the operations using the datacenter - wOpts := consulapi.WriteOptions{Datacenter: dc, Token: token} - - address := d.Get("address").(string) - node := d.Get("node").(string) - - deregistration := consulapi.CatalogDeregistration{ - Address: address, - Datacenter: dc, - Node: node, - } - - if _, err := catalog.Deregister(&deregistration, &wOpts); err != nil { - return fmt.Errorf("Failed to deregister Consul catalog entry with node '%s' at address '%s' in %s: %v", - node, address, dc, err) - } - - // Clear the ID - d.SetId("") - return nil -} diff --git a/builtin/providers/consul/resource_consul_catalog_entry_test.go b/builtin/providers/consul/resource_consul_catalog_entry_test.go deleted file mode 100644 index 0a28b675c..000000000 --- a/builtin/providers/consul/resource_consul_catalog_entry_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package consul - -import ( - "fmt" - "testing" - - consulapi "github.com/hashicorp/consul/api" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccConsulCatalogEntry_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() {}, - Providers: testAccProviders, - CheckDestroy: testAccCheckConsulCatalogEntryDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccConsulCatalogEntryConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckConsulCatalogEntryExists(), - testAccCheckConsulCatalogEntryValue("consul_catalog_entry.app", "address", "127.0.0.1"), - testAccCheckConsulCatalogEntryValue("consul_catalog_entry.app", "node", "bastion"), - testAccCheckConsulCatalogEntryValue("consul_catalog_entry.app", "service.#", "1"), - testAccCheckConsulCatalogEntryValue("consul_catalog_entry.app", "service.3112399829.address", "www.google.com"), - testAccCheckConsulCatalogEntryValue("consul_catalog_entry.app", "service.3112399829.id", "google1"), - testAccCheckConsulCatalogEntryValue("consul_catalog_entry.app", "service.3112399829.name", "google"), - testAccCheckConsulCatalogEntryValue("consul_catalog_entry.app", "service.3112399829.port", "80"), - testAccCheckConsulCatalogEntryValue("consul_catalog_entry.app", "service.3112399829.tags.#", "2"), - testAccCheckConsulCatalogEntryValue("consul_catalog_entry.app", "service.3112399829.tags.2154398732", "tag0"), - testAccCheckConsulCatalogEntryValue("consul_catalog_entry.app", "service.3112399829.tags.4151227546", "tag1"), - ), - }, - }, - }) -} - -func testAccCheckConsulCatalogEntryDestroy(s *terraform.State) error { - catalog := testAccProvider.Meta().(*consulapi.Client).Catalog() - qOpts := consulapi.QueryOptions{} - services, _, err := catalog.Services(&qOpts) - if err != nil { - return fmt.Errorf("Could not retrieve services: %#v", err) - } - _, ok := services["google"] - if ok { - return fmt.Errorf("Service still exists: %#v", "google") - } - return nil -} - -func testAccCheckConsulCatalogEntryExists() resource.TestCheckFunc { - return func(s *terraform.State) error { - catalog := testAccProvider.Meta().(*consulapi.Client).Catalog() - qOpts := consulapi.QueryOptions{} - services, _, err := catalog.Services(&qOpts) - if err != nil { - return err - } - _, ok := services["google"] - if !ok { - return fmt.Errorf("Service does not exist: %#v", "google") - } - return nil - } -} - -func testAccCheckConsulCatalogEntryValue(n, attr, val string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rn, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Resource not found") - } - out, ok := rn.Primary.Attributes[attr] - if !ok { - return fmt.Errorf("Attribute '%s' not found: %#v", attr, rn.Primary.Attributes) - } - if val != "" && out != val { - return fmt.Errorf("Attribute '%s' value '%s' != '%s'", attr, out, val) - } - if val == "" && out == "" { - return fmt.Errorf("Attribute '%s' value '%s'", attr, out) - } - return nil - } -} - -const testAccConsulCatalogEntryConfig = ` -resource "consul_catalog_entry" "app" { - address = "127.0.0.1" - node = "bastion" - service = { - address = "www.google.com" - id = "google1" - name = "google" - port = 80 - tags = ["tag0", "tag1"] - } -} -` diff --git a/builtin/providers/consul/resource_consul_key_prefix.go b/builtin/providers/consul/resource_consul_key_prefix.go deleted file mode 100644 index f16460309..000000000 --- a/builtin/providers/consul/resource_consul_key_prefix.go +++ /dev/null @@ -1,221 +0,0 @@ -package consul - -import ( - "fmt" - - consulapi "github.com/hashicorp/consul/api" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceConsulKeyPrefix() *schema.Resource { - return &schema.Resource{ - Create: resourceConsulKeyPrefixCreate, - Update: resourceConsulKeyPrefixUpdate, - Read: resourceConsulKeyPrefixRead, - Delete: resourceConsulKeyPrefixDelete, - - Schema: map[string]*schema.Schema{ - "datacenter": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "token": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "path_prefix": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "subkeys": &schema.Schema{ - Type: schema.TypeMap, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - } -} - -func resourceConsulKeyPrefixCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - kv := client.KV() - token := d.Get("token").(string) - dc, err := getDC(d, client) - if err != nil { - return err - } - - keyClient := newKeyClient(kv, dc, token) - - pathPrefix := d.Get("path_prefix").(string) - subKeys := map[string]string{} - for k, vI := range d.Get("subkeys").(map[string]interface{}) { - subKeys[k] = vI.(string) - } - - // To reduce the impact of mistakes, we will only "create" a prefix that - // is currently empty. This way we are less likely to accidentally - // conflict with other mechanisms managing the same prefix. - currentSubKeys, err := keyClient.GetUnderPrefix(pathPrefix) - if err != nil { - return err - } - if len(currentSubKeys) > 0 { - return fmt.Errorf( - "%d keys already exist under %s; delete them before managing this prefix with Terraform", - len(currentSubKeys), pathPrefix, - ) - } - - // Ideally we'd use d.Partial(true) here so we can correctly record - // a partial write, but that mechanism doesn't work for individual map - // members, so we record that the resource was created before we - // do anything and that way we can recover from errors by doing an - // Update on subsequent runs, rather than re-attempting Create with - // some keys possibly already present. - d.SetId(pathPrefix) - - // Store the datacenter on this resource, which can be helpful for reference - // in case it was read from the provider - d.Set("datacenter", dc) - - // Now we can just write in all the initial values, since we can expect - // that nothing should need deleting yet, as long as there isn't some - // other program racing us to write values... which we'll catch on a - // subsequent Read. - for k, v := range subKeys { - fullPath := pathPrefix + k - err := keyClient.Put(fullPath, v) - if err != nil { - return fmt.Errorf("error while writing %s: %s", fullPath, err) - } - } - - return nil -} - -func resourceConsulKeyPrefixUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - kv := client.KV() - token := d.Get("token").(string) - dc, err := getDC(d, client) - if err != nil { - return err - } - - keyClient := newKeyClient(kv, dc, token) - - pathPrefix := d.Id() - - if d.HasChange("subkeys") { - o, n := d.GetChange("subkeys") - if o == nil { - o = map[string]interface{}{} - } - if n == nil { - n = map[string]interface{}{} - } - - om := o.(map[string]interface{}) - nm := n.(map[string]interface{}) - - // First we'll write all of the stuff in the "new map" nm, - // and then we'll delete any keys that appear in the "old map" om - // and do not also appear in nm. This ordering means that if a subkey - // name is changed we will briefly have both the old and new names in - // Consul, as opposed to briefly having neither. - - // Again, we'd ideally use d.Partial(true) here but it doesn't work - // for maps and so we'll just rely on a subsequent Read to tidy up - // after a partial write. - - // Write new and changed keys - for k, vI := range nm { - v := vI.(string) - fullPath := pathPrefix + k - err := keyClient.Put(fullPath, v) - if err != nil { - return fmt.Errorf("error while writing %s: %s", fullPath, err) - } - } - - // Remove deleted keys - for k, _ := range om { - if _, exists := nm[k]; exists { - continue - } - fullPath := pathPrefix + k - err := keyClient.Delete(fullPath) - if err != nil { - return fmt.Errorf("error while deleting %s: %s", fullPath, err) - } - } - - } - - // Store the datacenter on this resource, which can be helpful for reference - // in case it was read from the provider - d.Set("datacenter", dc) - - return nil -} - -func resourceConsulKeyPrefixRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - kv := client.KV() - token := d.Get("token").(string) - dc, err := getDC(d, client) - if err != nil { - return err - } - - keyClient := newKeyClient(kv, dc, token) - - pathPrefix := d.Id() - - subKeys, err := keyClient.GetUnderPrefix(pathPrefix) - if err != nil { - return err - } - - d.Set("subkeys", subKeys) - - // Store the datacenter on this resource, which can be helpful for reference - // in case it was read from the provider - d.Set("datacenter", dc) - - return nil -} - -func resourceConsulKeyPrefixDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - kv := client.KV() - token := d.Get("token").(string) - dc, err := getDC(d, client) - if err != nil { - return err - } - - keyClient := newKeyClient(kv, dc, token) - - pathPrefix := d.Id() - - // Delete everything under our prefix, since the entire set of keys under - // the given prefix is considered to be managed exclusively by Terraform. - err = keyClient.DeleteUnderPrefix(pathPrefix) - if err != nil { - return err - } - - d.SetId("") - - return nil -} diff --git a/builtin/providers/consul/resource_consul_key_prefix_test.go b/builtin/providers/consul/resource_consul_key_prefix_test.go deleted file mode 100644 index e33fb13da..000000000 --- a/builtin/providers/consul/resource_consul_key_prefix_test.go +++ /dev/null @@ -1,150 +0,0 @@ -package consul - -import ( - "fmt" - "testing" - - consulapi "github.com/hashicorp/consul/api" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccConsulKeyPrefix_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: resource.ComposeTestCheckFunc( - testAccCheckConsulKeyPrefixKeyAbsent("species"), - testAccCheckConsulKeyPrefixKeyAbsent("meat"), - testAccCheckConsulKeyPrefixKeyAbsent("cheese"), - testAccCheckConsulKeyPrefixKeyAbsent("bread"), - ), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccConsulKeyPrefixConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckConsulKeyPrefixKeyValue("cheese", "chevre"), - testAccCheckConsulKeyPrefixKeyValue("bread", "baguette"), - testAccCheckConsulKeyPrefixKeyAbsent("species"), - testAccCheckConsulKeyPrefixKeyAbsent("meat"), - ), - }, - resource.TestStep{ - Config: testAccConsulKeyPrefixConfig, - ExpectNonEmptyPlan: true, - Check: resource.ComposeTestCheckFunc( - // This will add a rogue key that Terraform isn't - // expecting, causing a non-empty plan that wants - // to remove it. - testAccAddConsulKeyPrefixRogue("species", "gorilla"), - ), - }, - resource.TestStep{ - Config: testAccConsulKeyPrefixConfig_Update, - Check: resource.ComposeTestCheckFunc( - testAccCheckConsulKeyPrefixKeyValue("meat", "ham"), - testAccCheckConsulKeyPrefixKeyValue("bread", "batard"), - testAccCheckConsulKeyPrefixKeyAbsent("cheese"), - testAccCheckConsulKeyPrefixKeyAbsent("species"), - ), - }, - resource.TestStep{ - Config: testAccConsulKeyPrefixConfig_Update, - ExpectNonEmptyPlan: true, - Check: resource.ComposeTestCheckFunc( - testAccAddConsulKeyPrefixRogue("species", "gorilla"), - ), - }, - }, - }) -} - -func testAccCheckConsulKeyPrefixDestroy(s *terraform.State) error { - kv := testAccProvider.Meta().(*consulapi.Client).KV() - opts := &consulapi.QueryOptions{Datacenter: "dc1"} - pair, _, err := kv.Get("test/set", opts) - if err != nil { - return err - } - if pair != nil { - return fmt.Errorf("Key still exists: %#v", pair) - } - return nil -} - -func testAccCheckConsulKeyPrefixKeyAbsent(name string) resource.TestCheckFunc { - fullName := "prefix_test/" + name - return func(s *terraform.State) error { - kv := testAccProvider.Meta().(*consulapi.Client).KV() - opts := &consulapi.QueryOptions{Datacenter: "dc1"} - pair, _, err := kv.Get(fullName, opts) - if err != nil { - return err - } - if pair != nil { - return fmt.Errorf("key '%s' exists, but shouldn't", fullName) - } - return nil - } -} - -// This one is actually not a check, but rather a mutation step. It writes -// a value directly into Consul, bypassing our Terraform resource. -func testAccAddConsulKeyPrefixRogue(name, value string) resource.TestCheckFunc { - fullName := "prefix_test/" + name - return func(s *terraform.State) error { - kv := testAccProvider.Meta().(*consulapi.Client).KV() - opts := &consulapi.WriteOptions{Datacenter: "dc1"} - pair := &consulapi.KVPair{ - Key: fullName, - Value: []byte(value), - } - _, err := kv.Put(pair, opts) - return err - } -} - -func testAccCheckConsulKeyPrefixKeyValue(name, value string) resource.TestCheckFunc { - fullName := "prefix_test/" + name - return func(s *terraform.State) error { - kv := testAccProvider.Meta().(*consulapi.Client).KV() - opts := &consulapi.QueryOptions{Datacenter: "dc1"} - pair, _, err := kv.Get(fullName, opts) - if err != nil { - return err - } - if pair == nil { - return fmt.Errorf("key %v doesn't exist, but should", fullName) - } - if string(pair.Value) != value { - return fmt.Errorf("key %v has value %v; want %v", fullName, pair.Value, value) - } - return nil - } -} - -const testAccConsulKeyPrefixConfig = ` -resource "consul_key_prefix" "app" { - datacenter = "dc1" - - path_prefix = "prefix_test/" - - subkeys = { - cheese = "chevre" - bread = "baguette" - } -} -` - -const testAccConsulKeyPrefixConfig_Update = ` -resource "consul_key_prefix" "app" { - datacenter = "dc1" - - path_prefix = "prefix_test/" - - subkeys = { - bread = "batard" - meat = "ham" - } -} -` diff --git a/builtin/providers/consul/resource_consul_keys.go b/builtin/providers/consul/resource_consul_keys.go deleted file mode 100644 index bc91e6e7f..000000000 --- a/builtin/providers/consul/resource_consul_keys.go +++ /dev/null @@ -1,340 +0,0 @@ -package consul - -import ( - "fmt" - "strconv" - - consulapi "github.com/hashicorp/consul/api" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceConsulKeys() *schema.Resource { - return &schema.Resource{ - Create: resourceConsulKeysCreate, - Update: resourceConsulKeysUpdate, - Read: resourceConsulKeysRead, - Delete: resourceConsulKeysDelete, - - SchemaVersion: 1, - MigrateState: resourceConsulKeysMigrateState, - - Schema: map[string]*schema.Schema{ - "datacenter": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "token": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "key": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Deprecated: "Using consul_keys resource to *read* is deprecated; please use consul_keys data source instead", - }, - - "path": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "value": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "default": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "delete": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - }, - }, - }, - - "var": &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - }, - }, - } -} - -func resourceConsulKeysCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - kv := client.KV() - token := d.Get("token").(string) - dc, err := getDC(d, client) - if err != nil { - return err - } - - keyClient := newKeyClient(kv, dc, token) - - keys := d.Get("key").(*schema.Set).List() - for _, raw := range keys { - _, path, sub, err := parseKey(raw) - if err != nil { - return err - } - - value := sub["value"].(string) - if value == "" { - continue - } - - if err := keyClient.Put(path, value); err != nil { - return err - } - } - - // The ID doesn't matter, since we use provider config, datacenter, - // and key paths to address consul properly. So we just need to fill it in - // with some value to indicate the resource has been created. - d.SetId("consul") - - return resourceConsulKeysRead(d, meta) -} - -func resourceConsulKeysUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - kv := client.KV() - token := d.Get("token").(string) - dc, err := getDC(d, client) - if err != nil { - return err - } - - keyClient := newKeyClient(kv, dc, token) - - if d.HasChange("key") { - o, n := d.GetChange("key") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - - remove := os.Difference(ns).List() - add := ns.Difference(os).List() - - // We'll keep track of what keys we add so that if a key is - // in both the "remove" and "add" sets -- which will happen if - // its value is changed in-place -- we will avoid writing the - // value and then immediately removing it. - addedPaths := make(map[string]bool) - - // We add before we remove because then it's possible to change - // a key name (which will result in both an add and a remove) - // without very temporarily having *neither* value in the store. - // Instead, both will briefly be present, which should be less - // disruptive in most cases. - for _, raw := range add { - _, path, sub, err := parseKey(raw) - if err != nil { - return err - } - - value := sub["value"].(string) - if value == "" { - continue - } - - if err := keyClient.Put(path, value); err != nil { - return err - } - addedPaths[path] = true - } - - for _, raw := range remove { - _, path, sub, err := parseKey(raw) - if err != nil { - return err - } - - // Don't delete something we've just added. - // (See explanation at the declaration of this variable above.) - if addedPaths[path] { - continue - } - - shouldDelete, ok := sub["delete"].(bool) - if !ok || !shouldDelete { - continue - } - - if err := keyClient.Delete(path); err != nil { - return err - } - } - } - - // Store the datacenter on this resource, which can be helpful for reference - // in case it was read from the provider - d.Set("datacenter", dc) - - return resourceConsulKeysRead(d, meta) -} - -func resourceConsulKeysRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - kv := client.KV() - token := d.Get("token").(string) - dc, err := getDC(d, client) - if err != nil { - return err - } - - keyClient := newKeyClient(kv, dc, token) - - vars := make(map[string]string) - - keys := d.Get("key").(*schema.Set).List() - for _, raw := range keys { - key, path, sub, err := parseKey(raw) - if err != nil { - return err - } - - value, err := keyClient.Get(path) - if err != nil { - return err - } - - value = attributeValue(sub, value) - if key != "" { - // If key is set then we'll update vars, for backward-compatibilty - // with the pre-0.7 capability to read from Consul with this - // resource. - vars[key] = value - } - - // If there is already a "value" attribute present for this key - // then it was created as a "write" block. We need to update the - // given value within the block itself so that Terraform can detect - // when the Consul-stored value has drifted from what was most - // recently written by Terraform. - // We don't do this for "read" blocks; that causes confusing diffs - // because "value" should not be set for read-only key blocks. - if oldValue := sub["value"]; oldValue != "" { - sub["value"] = value - } - } - - if err := d.Set("var", vars); err != nil { - return err - } - if err := d.Set("key", keys); err != nil { - return err - } - - // Store the datacenter on this resource, which can be helpful for reference - // in case it was read from the provider - d.Set("datacenter", dc) - - return nil -} - -func resourceConsulKeysDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - kv := client.KV() - token := d.Get("token").(string) - dc, err := getDC(d, client) - if err != nil { - return err - } - - keyClient := newKeyClient(kv, dc, token) - - // Clean up any keys that we're explicitly managing - keys := d.Get("key").(*schema.Set).List() - for _, raw := range keys { - _, path, sub, err := parseKey(raw) - if err != nil { - return err - } - - // Skip if the key is non-managed - shouldDelete, ok := sub["delete"].(bool) - if !ok || !shouldDelete { - continue - } - - if err := keyClient.Delete(path); err != nil { - return err - } - } - - // Clear the ID - d.SetId("") - return nil -} - -// parseKey is used to parse a key into a name, path, config or error -func parseKey(raw interface{}) (string, string, map[string]interface{}, error) { - sub, ok := raw.(map[string]interface{}) - if !ok { - return "", "", nil, fmt.Errorf("Failed to unroll: %#v", raw) - } - - key := sub["name"].(string) - - path, ok := sub["path"].(string) - if !ok { - return "", "", nil, fmt.Errorf("Failed to get path for key '%s'", key) - } - return key, path, sub, nil -} - -// attributeValue determines the value for a key, potentially -// using a default value if provided. -func attributeValue(sub map[string]interface{}, readValue string) string { - // Use the value if given - if readValue != "" { - return readValue - } - - // Use a default if given - if raw, ok := sub["default"]; ok { - switch def := raw.(type) { - case string: - return def - case bool: - return strconv.FormatBool(def) - } - } - - // No value - return "" -} - -// getDC is used to get the datacenter of the local agent -func getDC(d *schema.ResourceData, client *consulapi.Client) (string, error) { - if v, ok := d.GetOk("datacenter"); ok { - return v.(string), nil - } - info, err := client.Agent().Self() - if err != nil { - return "", fmt.Errorf("Failed to get datacenter from Consul agent: %v", err) - } - return info["Config"]["Datacenter"].(string), nil -} diff --git a/builtin/providers/consul/resource_consul_keys_migrate.go b/builtin/providers/consul/resource_consul_keys_migrate.go deleted file mode 100644 index 2aa62f890..000000000 --- a/builtin/providers/consul/resource_consul_keys_migrate.go +++ /dev/null @@ -1,92 +0,0 @@ -package consul - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func resourceConsulKeysMigrateState( - v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - switch v { - case 0: - log.Println("[INFO] Found consul_keys State v0; migrating to v1") - return resourceConsulKeysMigrateStateV0toV1(is) - default: - return is, fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func resourceConsulKeysMigrateStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { - if is.Empty() || is.Attributes == nil { - log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return is, nil - } - - log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - - res := resourceConsulKeys() - keys, err := readV0Keys(is, res) - if err != nil { - return is, err - } - if err := clearV0Keys(is); err != nil { - return is, err - } - if err := writeV1Keys(is, res, keys); err != nil { - return is, err - } - - log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} - -func readV0Keys( - is *terraform.InstanceState, - res *schema.Resource, -) (*schema.Set, error) { - reader := &schema.MapFieldReader{ - Schema: res.Schema, - Map: schema.BasicMapReader(is.Attributes), - } - result, err := reader.ReadField([]string{"key"}) - if err != nil { - return nil, err - } - - oldKeys, ok := result.Value.(*schema.Set) - if !ok { - return nil, fmt.Errorf("Got unexpected value from state: %#v", result.Value) - } - return oldKeys, nil -} - -func clearV0Keys(is *terraform.InstanceState) error { - for k := range is.Attributes { - if strings.HasPrefix(k, "key.") { - delete(is.Attributes, k) - } - } - return nil -} - -func writeV1Keys( - is *terraform.InstanceState, - res *schema.Resource, - keys *schema.Set, -) error { - writer := schema.MapFieldWriter{ - Schema: res.Schema, - } - if err := writer.WriteField([]string{"key"}, keys); err != nil { - return err - } - for k, v := range writer.Map() { - is.Attributes[k] = v - } - - return nil -} diff --git a/builtin/providers/consul/resource_consul_keys_migrate_test.go b/builtin/providers/consul/resource_consul_keys_migrate_test.go deleted file mode 100644 index e1935b524..000000000 --- a/builtin/providers/consul/resource_consul_keys_migrate_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package consul - -import ( - "testing" - - "github.com/hashicorp/terraform/terraform" -) - -func TestConsulKeysMigrateState(t *testing.T) { - cases := map[string]struct { - StateVersion int - Attributes map[string]string - Expected map[string]string - Meta interface{} - }{ - "v0.6.9 and earlier, with old values hash function": { - StateVersion: 0, - Attributes: map[string]string{ - "key.#": "2", - "key.12345.name": "hello", - "key.12345.path": "foo/bar", - "key.12345.value": "world", - "key.12345.default": "", - "key.12345.delete": "false", - "key.6789.name": "temp", - "key.6789.path": "foo/foo", - "key.6789.value": "", - "key.6789.default": "", - "key.6789.delete": "true", - }, - Expected: map[string]string{ - "key.#": "2", - "key.2401383718.default": "", - "key.2401383718.delete": "true", - "key.2401383718.name": "temp", - "key.2401383718.path": "foo/foo", - "key.2401383718.value": "", - "key.3116955509.path": "foo/bar", - "key.3116955509.default": "", - "key.3116955509.delete": "false", - "key.3116955509.name": "hello", - "key.3116955509.value": "world", - }, - }, - } - - for tn, tc := range cases { - is := &terraform.InstanceState{ - ID: "consul", - Attributes: tc.Attributes, - } - is, err := resourceConsulKeys().MigrateState( - tc.StateVersion, is, tc.Meta) - - if err != nil { - t.Fatalf("bad: %s, err: %#v", tn, err) - } - - for k, v := range tc.Expected { - if is.Attributes[k] != v { - t.Fatalf( - "bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", - tn, k, v, k, is.Attributes[k], is.Attributes) - } - } - } -} - -func TestConsulKeysMigrateState_empty(t *testing.T) { - var is *terraform.InstanceState - var meta interface{} - - // should handle nil - is, err := resourceConsulKeys().MigrateState(0, is, meta) - - if err != nil { - t.Fatalf("err: %#v", err) - } - if is != nil { - t.Fatalf("expected nil instancestate, got: %#v", is) - } - - // should handle non-nil but empty - is = &terraform.InstanceState{} - is, err = resourceConsulKeys().MigrateState(0, is, meta) - - if err != nil { - t.Fatalf("err: %#v", err) - } -} diff --git a/builtin/providers/consul/resource_consul_keys_test.go b/builtin/providers/consul/resource_consul_keys_test.go deleted file mode 100644 index f6045658e..000000000 --- a/builtin/providers/consul/resource_consul_keys_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package consul - -import ( - "fmt" - "testing" - - consulapi "github.com/hashicorp/consul/api" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccConsulKeys_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckConsulKeysDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccConsulKeysConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckConsulKeysExists(), - testAccCheckConsulKeysValue("consul_keys.app", "enabled", "true"), - testAccCheckConsulKeysValue("consul_keys.app", "set", "acceptance"), - testAccCheckConsulKeysValue("consul_keys.app", "remove_one", "hello"), - ), - }, - resource.TestStep{ - Config: testAccConsulKeysConfig_Update, - Check: resource.ComposeTestCheckFunc( - testAccCheckConsulKeysExists(), - testAccCheckConsulKeysValue("consul_keys.app", "enabled", "true"), - testAccCheckConsulKeysValue("consul_keys.app", "set", "acceptanceUpdated"), - testAccCheckConsulKeysRemoved("consul_keys.app", "remove_one"), - ), - }, - }, - }) -} - -func testAccCheckConsulKeysDestroy(s *terraform.State) error { - kv := testAccProvider.Meta().(*consulapi.Client).KV() - opts := &consulapi.QueryOptions{Datacenter: "dc1"} - pair, _, err := kv.Get("test/set", opts) - if err != nil { - return err - } - if pair != nil { - return fmt.Errorf("Key still exists: %#v", pair) - } - return nil -} - -func testAccCheckConsulKeysExists() resource.TestCheckFunc { - return func(s *terraform.State) error { - kv := testAccProvider.Meta().(*consulapi.Client).KV() - opts := &consulapi.QueryOptions{Datacenter: "dc1"} - pair, _, err := kv.Get("test/set", opts) - if err != nil { - return err - } - if pair == nil { - return fmt.Errorf("Key 'test/set' does not exist") - } - return nil - } -} - -func testAccCheckConsulKeysValue(n, attr, val string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rn, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Resource not found") - } - out, ok := rn.Primary.Attributes["var."+attr] - if !ok { - return fmt.Errorf("Attribute '%s' not found: %#v", attr, rn.Primary.Attributes) - } - if val != "" && out != val { - return fmt.Errorf("Attribute '%s' value '%s' != '%s'", attr, out, val) - } - if val == "" && out == "" { - return fmt.Errorf("Attribute '%s' value '%s'", attr, out) - } - return nil - } -} - -func testAccCheckConsulKeysRemoved(n, attr string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rn, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Resource not found") - } - _, ok = rn.Primary.Attributes["var."+attr] - if ok { - return fmt.Errorf("Attribute '%s' still present: %#v", attr, rn.Primary.Attributes) - } - return nil - } -} - -const testAccConsulKeysConfig = ` -resource "consul_keys" "app" { - datacenter = "dc1" - key { - name = "enabled" - path = "test/enabled" - default = "true" - } - key { - name = "set" - path = "test/set" - value = "acceptance" - delete = true - } - key { - name = "remove_one" - path = "test/remove_one" - value = "hello" - delete = true - } -} -` - -const testAccConsulKeysConfig_Update = ` -resource "consul_keys" "app" { - datacenter = "dc1" - key { - name = "enabled" - path = "test/enabled" - default = "true" - } - key { - name = "set" - path = "test/set" - value = "acceptanceUpdated" - delete = true - } -} -` diff --git a/builtin/providers/consul/resource_consul_node.go b/builtin/providers/consul/resource_consul_node.go deleted file mode 100644 index c81544ccb..000000000 --- a/builtin/providers/consul/resource_consul_node.go +++ /dev/null @@ -1,156 +0,0 @@ -package consul - -import ( - "fmt" - - consulapi "github.com/hashicorp/consul/api" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceConsulNode() *schema.Resource { - return &schema.Resource{ - Create: resourceConsulNodeCreate, - Update: resourceConsulNodeCreate, - Read: resourceConsulNodeRead, - Delete: resourceConsulNodeDelete, - - Schema: map[string]*schema.Schema{ - "address": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "datacenter": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "token": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -func resourceConsulNodeCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - catalog := client.Catalog() - - var dc string - if v, ok := d.GetOk("datacenter"); ok { - dc = v.(string) - } else { - var err error - if dc, err = getDC(d, client); err != nil { - return err - } - } - - var token string - if v, ok := d.GetOk("token"); ok { - token = v.(string) - } - - // Setup the operations using the datacenter - wOpts := consulapi.WriteOptions{Datacenter: dc, Token: token} - - address := d.Get("address").(string) - name := d.Get("name").(string) - - registration := &consulapi.CatalogRegistration{ - Address: address, - Datacenter: dc, - Node: name, - } - - if _, err := catalog.Register(registration, &wOpts); err != nil { - return fmt.Errorf("Failed to register Consul catalog node with name '%s' at address '%s' in %s: %v", - name, address, dc, err) - } - - // Update the resource - qOpts := consulapi.QueryOptions{Datacenter: dc} - if _, _, err := catalog.Node(name, &qOpts); err != nil { - return fmt.Errorf("Failed to read Consul catalog node with name '%s' at address '%s' in %s: %v", - name, address, dc, err) - } else { - d.Set("datacenter", dc) - } - - d.SetId(fmt.Sprintf("%s-%s", name, address)) - - return nil -} - -func resourceConsulNodeRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - catalog := client.Catalog() - - // Get the DC, error if not available. - var dc string - if v, ok := d.GetOk("datacenter"); ok { - dc = v.(string) - } - - name := d.Get("name").(string) - - // Setup the operations using the datacenter - qOpts := consulapi.QueryOptions{Datacenter: dc} - - if _, _, err := catalog.Node(name, &qOpts); err != nil { - return fmt.Errorf("Failed to get name '%s' from Consul catalog: %v", name, err) - } - - return nil -} - -func resourceConsulNodeDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - catalog := client.Catalog() - - var dc string - if v, ok := d.GetOk("datacenter"); ok { - dc = v.(string) - } else { - var err error - if dc, err = getDC(d, client); err != nil { - return err - } - } - - var token string - if v, ok := d.GetOk("token"); ok { - token = v.(string) - } - - // Setup the operations using the datacenter - wOpts := consulapi.WriteOptions{Datacenter: dc, Token: token} - - address := d.Get("address").(string) - name := d.Get("name").(string) - - deregistration := consulapi.CatalogDeregistration{ - Address: address, - Datacenter: dc, - Node: name, - } - - if _, err := catalog.Deregister(&deregistration, &wOpts); err != nil { - return fmt.Errorf("Failed to deregister Consul catalog node with name '%s' at address '%s' in %s: %v", - name, address, dc, err) - } - - // Clear the ID - d.SetId("") - return nil -} diff --git a/builtin/providers/consul/resource_consul_node_test.go b/builtin/providers/consul/resource_consul_node_test.go deleted file mode 100644 index 9cb62a4f6..000000000 --- a/builtin/providers/consul/resource_consul_node_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package consul - -import ( - "fmt" - "testing" - - consulapi "github.com/hashicorp/consul/api" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccConsulNode_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() {}, - Providers: testAccProviders, - CheckDestroy: testAccCheckConsulNodeDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccConsulNodeConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckConsulNodeExists(), - testAccCheckConsulNodeValue("consul_catalog_entry.foo", "address", "127.0.0.1"), - testAccCheckConsulNodeValue("consul_catalog_entry.foo", "node", "foo"), - ), - }, - }, - }) -} - -func testAccCheckConsulNodeDestroy(s *terraform.State) error { - catalog := testAccProvider.Meta().(*consulapi.Client).Catalog() - qOpts := consulapi.QueryOptions{} - nodes, _, err := catalog.Nodes(&qOpts) - if err != nil { - return fmt.Errorf("Could not retrieve services: %#v", err) - } - for i := range nodes { - if nodes[i].Node == "foo" { - return fmt.Errorf("Node still exists: %#v", "foo") - } - } - return nil -} - -func testAccCheckConsulNodeExists() resource.TestCheckFunc { - return func(s *terraform.State) error { - catalog := testAccProvider.Meta().(*consulapi.Client).Catalog() - qOpts := consulapi.QueryOptions{} - nodes, _, err := catalog.Nodes(&qOpts) - if err != nil { - return err - } - for i := range nodes { - if nodes[i].Node == "foo" { - return nil - } - } - return fmt.Errorf("Service does not exist: %#v", "google") - } -} - -func testAccCheckConsulNodeValue(n, attr, val string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rn, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Resource not found") - } - out, ok := rn.Primary.Attributes[attr] - if !ok { - return fmt.Errorf("Attribute '%s' not found: %#v", attr, rn.Primary.Attributes) - } - if val != "" && out != val { - return fmt.Errorf("Attribute '%s' value '%s' != '%s'", attr, out, val) - } - if val == "" && out == "" { - return fmt.Errorf("Attribute '%s' value '%s'", attr, out) - } - return nil - } -} - -const testAccConsulNodeConfig = ` -resource "consul_catalog_entry" "foo" { - address = "127.0.0.1" - node = "foo" -} -` diff --git a/builtin/providers/consul/resource_consul_prepared_query.go b/builtin/providers/consul/resource_consul_prepared_query.go deleted file mode 100644 index 28ff2bf82..000000000 --- a/builtin/providers/consul/resource_consul_prepared_query.go +++ /dev/null @@ -1,271 +0,0 @@ -package consul - -import ( - "strings" - - consulapi "github.com/hashicorp/consul/api" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceConsulPreparedQuery() *schema.Resource { - return &schema.Resource{ - Create: resourceConsulPreparedQueryCreate, - Update: resourceConsulPreparedQueryUpdate, - Read: resourceConsulPreparedQueryRead, - Delete: resourceConsulPreparedQueryDelete, - - SchemaVersion: 0, - - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "datacenter": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "session": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "token": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "stored_token": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "service": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "tags": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "near": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "only_passing": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - - "failover": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "nearest_n": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - "datacenters": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - - "dns": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ttl": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - - "template": &schema.Schema{ - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "regexp": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - } -} - -func resourceConsulPreparedQueryCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - wo := &consulapi.WriteOptions{ - Datacenter: d.Get("datacenter").(string), - Token: d.Get("token").(string), - } - - pq := preparedQueryDefinitionFromResourceData(d) - - id, _, err := client.PreparedQuery().Create(pq, wo) - if err != nil { - return err - } - - d.SetId(id) - return resourceConsulPreparedQueryRead(d, meta) -} - -func resourceConsulPreparedQueryUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - wo := &consulapi.WriteOptions{ - Datacenter: d.Get("datacenter").(string), - Token: d.Get("token").(string), - } - - pq := preparedQueryDefinitionFromResourceData(d) - - if _, err := client.PreparedQuery().Update(pq, wo); err != nil { - return err - } - - return resourceConsulPreparedQueryRead(d, meta) -} - -func resourceConsulPreparedQueryRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - qo := &consulapi.QueryOptions{ - Datacenter: d.Get("datacenter").(string), - Token: d.Get("token").(string), - } - - queries, _, err := client.PreparedQuery().Get(d.Id(), qo) - if err != nil { - // Check for a 404/not found, these are returned as errors. - if strings.Contains(err.Error(), "not found") { - d.SetId("") - return nil - } - return err - } - - if len(queries) != 1 { - d.SetId("") - return nil - } - pq := queries[0] - - d.Set("name", pq.Name) - d.Set("session", pq.Session) - d.Set("stored_token", pq.Token) - d.Set("service", pq.Service.Service) - d.Set("near", pq.Service.Near) - d.Set("only_passing", pq.Service.OnlyPassing) - d.Set("tags", pq.Service.Tags) - - if pq.Service.Failover.NearestN > 0 { - d.Set("failover.0.nearest_n", pq.Service.Failover.NearestN) - } - if len(pq.Service.Failover.Datacenters) > 0 { - d.Set("failover.0.datacenters", pq.Service.Failover.Datacenters) - } - - if pq.DNS.TTL != "" { - d.Set("dns.0.ttl", pq.DNS.TTL) - } - - if pq.Template.Type != "" { - d.Set("template.0.type", pq.Template.Type) - d.Set("template.0.regexp", pq.Template.Regexp) - } - - return nil -} - -func resourceConsulPreparedQueryDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - writeOpts := &consulapi.WriteOptions{ - Datacenter: d.Get("datacenter").(string), - Token: d.Get("token").(string), - } - - if _, err := client.PreparedQuery().Delete(d.Id(), writeOpts); err != nil { - return err - } - - d.SetId("") - return nil -} - -func preparedQueryDefinitionFromResourceData(d *schema.ResourceData) *consulapi.PreparedQueryDefinition { - pq := &consulapi.PreparedQueryDefinition{ - ID: d.Id(), - Name: d.Get("name").(string), - Session: d.Get("session").(string), - Token: d.Get("stored_token").(string), - Service: consulapi.ServiceQuery{ - Service: d.Get("service").(string), - Near: d.Get("near").(string), - OnlyPassing: d.Get("only_passing").(bool), - }, - } - - tags := d.Get("tags").(*schema.Set).List() - pq.Service.Tags = make([]string, len(tags)) - for i, v := range tags { - pq.Service.Tags[i] = v.(string) - } - - if _, ok := d.GetOk("failover.0"); ok { - failover := consulapi.QueryDatacenterOptions{ - NearestN: d.Get("failover.0.nearest_n").(int), - } - - dcs := d.Get("failover.0.datacenters").([]interface{}) - failover.Datacenters = make([]string, len(dcs)) - for i, v := range dcs { - failover.Datacenters[i] = v.(string) - } - - pq.Service.Failover = failover - } - - if _, ok := d.GetOk("template.0"); ok { - pq.Template = consulapi.QueryTemplate{ - Type: d.Get("template.0.type").(string), - Regexp: d.Get("template.0.regexp").(string), - } - } - - if _, ok := d.GetOk("dns.0"); ok { - pq.DNS = consulapi.QueryDNSOptions{ - TTL: d.Get("dns.0.ttl").(string), - } - } - - return pq -} diff --git a/builtin/providers/consul/resource_consul_prepared_query_test.go b/builtin/providers/consul/resource_consul_prepared_query_test.go deleted file mode 100644 index 6b08adaa8..000000000 --- a/builtin/providers/consul/resource_consul_prepared_query_test.go +++ /dev/null @@ -1,171 +0,0 @@ -package consul - -import ( - "fmt" - "testing" - - consulapi "github.com/hashicorp/consul/api" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccConsulPreparedQuery_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckConsulPreparedQueryDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccConsulPreparedQueryConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckConsulPreparedQueryExists(), - testAccCheckConsulPreparedQueryAttrValue("name", "foo"), - testAccCheckConsulPreparedQueryAttrValue("stored_token", "pq-token"), - testAccCheckConsulPreparedQueryAttrValue("service", "redis"), - testAccCheckConsulPreparedQueryAttrValue("near", "_agent"), - testAccCheckConsulPreparedQueryAttrValue("tags.#", "1"), - testAccCheckConsulPreparedQueryAttrValue("only_passing", "true"), - testAccCheckConsulPreparedQueryAttrValue("failover.0.nearest_n", "3"), - testAccCheckConsulPreparedQueryAttrValue("failover.0.datacenters.#", "2"), - testAccCheckConsulPreparedQueryAttrValue("template.0.type", "name_prefix_match"), - testAccCheckConsulPreparedQueryAttrValue("template.0.regexp", "hello"), - testAccCheckConsulPreparedQueryAttrValue("dns.0.ttl", "8m"), - ), - }, - resource.TestStep{ - Config: testAccConsulPreparedQueryConfigUpdate1, - Check: resource.ComposeTestCheckFunc( - testAccCheckConsulPreparedQueryExists(), - testAccCheckConsulPreparedQueryAttrValue("name", "baz"), - testAccCheckConsulPreparedQueryAttrValue("stored_token", "pq-token-updated"), - testAccCheckConsulPreparedQueryAttrValue("service", "memcached"), - testAccCheckConsulPreparedQueryAttrValue("near", "node1"), - testAccCheckConsulPreparedQueryAttrValue("tags.#", "2"), - testAccCheckConsulPreparedQueryAttrValue("only_passing", "false"), - testAccCheckConsulPreparedQueryAttrValue("failover.0.nearest_n", "2"), - testAccCheckConsulPreparedQueryAttrValue("failover.0.datacenters.#", "1"), - testAccCheckConsulPreparedQueryAttrValue("template.0.regexp", "goodbye"), - testAccCheckConsulPreparedQueryAttrValue("dns.0.ttl", "16m"), - ), - }, - resource.TestStep{ - Config: testAccConsulPreparedQueryConfigUpdate2, - Check: resource.ComposeTestCheckFunc( - testAccCheckConsulPreparedQueryExists(), - testAccCheckConsulPreparedQueryAttrValue("stored_token", ""), - testAccCheckConsulPreparedQueryAttrValue("near", ""), - testAccCheckConsulPreparedQueryAttrValue("tags.#", "0"), - testAccCheckConsulPreparedQueryAttrValue("failover.#", "0"), - testAccCheckConsulPreparedQueryAttrValue("template.#", "0"), - testAccCheckConsulPreparedQueryAttrValue("dns.#", "0"), - ), - }, - }, - }) -} - -func checkPreparedQueryExists(s *terraform.State) bool { - rn, ok := s.RootModule().Resources["consul_prepared_query.foo"] - if !ok { - return false - } - id := rn.Primary.ID - - client := testAccProvider.Meta().(*consulapi.Client).PreparedQuery() - opts := &consulapi.QueryOptions{Datacenter: "dc1"} - pq, _, err := client.Get(id, opts) - return err == nil && pq != nil -} - -func testAccCheckConsulPreparedQueryDestroy(s *terraform.State) error { - if checkPreparedQueryExists(s) { - return fmt.Errorf("Prepared query 'foo' still exists") - } - return nil -} - -func testAccCheckConsulPreparedQueryExists() resource.TestCheckFunc { - return func(s *terraform.State) error { - if !checkPreparedQueryExists(s) { - return fmt.Errorf("Prepared query 'foo' does not exist") - } - return nil - } -} - -func testAccCheckConsulPreparedQueryAttrValue(attr, val string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rn, ok := s.RootModule().Resources["consul_prepared_query.foo"] - if !ok { - return fmt.Errorf("Resource not found") - } - out, ok := rn.Primary.Attributes[attr] - if !ok { - return fmt.Errorf("Attribute '%s' not found: %#v", attr, rn.Primary.Attributes) - } - if out != val { - return fmt.Errorf("Attribute '%s' value '%s' != '%s'", attr, out, val) - } - return nil - } -} - -const testAccConsulPreparedQueryConfig = ` -resource "consul_prepared_query" "foo" { - name = "foo" - token = "client-token" - stored_token = "pq-token" - service = "redis" - tags = ["prod"] - near = "_agent" - only_passing = true - - failover { - nearest_n = 3 - datacenters = ["dc1", "dc2"] - } - - template { - type = "name_prefix_match" - regexp = "hello" - } - - dns { - ttl = "8m" - } -} -` - -const testAccConsulPreparedQueryConfigUpdate1 = ` -resource "consul_prepared_query" "foo" { - name = "baz" - token = "client-token" - stored_token = "pq-token-updated" - service = "memcached" - tags = ["prod","sup"] - near = "node1" - only_passing = false - - failover { - nearest_n = 2 - datacenters = ["dc2"] - } - - template { - type = "name_prefix_match" - regexp = "goodbye" - } - - dns { - ttl = "16m" - } -} -` - -const testAccConsulPreparedQueryConfigUpdate2 = ` -resource "consul_prepared_query" "foo" { - name = "baz" - service = "memcached" - token = "client-token" -} -` diff --git a/builtin/providers/consul/resource_consul_service.go b/builtin/providers/consul/resource_consul_service.go deleted file mode 100644 index c66041413..000000000 --- a/builtin/providers/consul/resource_consul_service.go +++ /dev/null @@ -1,154 +0,0 @@ -package consul - -import ( - "fmt" - - consulapi "github.com/hashicorp/consul/api" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceConsulService() *schema.Resource { - return &schema.Resource{ - Create: resourceConsulServiceCreate, - Update: resourceConsulServiceCreate, - Read: resourceConsulServiceRead, - Delete: resourceConsulServiceDelete, - - Schema: map[string]*schema.Schema{ - "address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "service_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "port": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - - "tags": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - ForceNew: true, - }, - }, - } -} - -func resourceConsulServiceCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - agent := client.Agent() - - name := d.Get("name").(string) - identifier := name - - if serviceId, ok := d.GetOk("service_id"); ok { - identifier = serviceId.(string) - } - - registration := consulapi.AgentServiceRegistration{Name: name, ID: identifier} - - if address, ok := d.GetOk("address"); ok { - registration.Address = address.(string) - } - - if port, ok := d.GetOk("port"); ok { - registration.Port = port.(int) - } - - if v, ok := d.GetOk("tags"); ok { - vs := v.([]interface{}) - s := make([]string, len(vs)) - for i, raw := range vs { - s[i] = raw.(string) - } - registration.Tags = s - } - - if err := agent.ServiceRegister(®istration); err != nil { - return fmt.Errorf("Failed to register service '%s' with Consul agent: %v", name, err) - } - - // Update the resource - if serviceMap, err := agent.Services(); err != nil { - return fmt.Errorf("Failed to read services from Consul agent: %v", err) - } else if service, ok := serviceMap[identifier]; !ok { - return fmt.Errorf("Failed to read service '%s' from Consul agent: %v", identifier, err) - } else { - d.SetId(service.ID) - - d.Set("address", service.Address) - d.Set("service_id", service.ID) - d.Set("name", service.Service) - d.Set("port", service.Port) - tags := make([]string, 0, len(service.Tags)) - for _, tag := range service.Tags { - tags = append(tags, tag) - } - d.Set("tags", tags) - } - - return nil -} - -func resourceConsulServiceRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - agent := client.Agent() - - name := d.Get("name").(string) - identifier := name - - if serviceId, ok := d.GetOk("service_id"); ok { - identifier = serviceId.(string) - } - - if services, err := agent.Services(); err != nil { - return fmt.Errorf("Failed to get services from Consul agent: %v", err) - } else if service, ok := services[identifier]; !ok { - return fmt.Errorf("Failed to get service '%s' from Consul agent", identifier) - } else { - d.SetId(service.ID) - - d.Set("address", service.Address) - d.Set("service_id", service.ID) - d.Set("name", service.Service) - d.Set("port", service.Port) - tags := make([]string, 0, len(service.Tags)) - for _, tag := range service.Tags { - tags = append(tags, tag) - } - d.Set("tags", tags) - } - - return nil -} - -func resourceConsulServiceDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*consulapi.Client) - catalog := client.Agent() - - id := d.Get("service_id").(string) - - if err := catalog.ServiceDeregister(id); err != nil { - return fmt.Errorf("Failed to deregister service '%s' from Consul agent: %v", id, err) - } - - // Clear the ID - d.SetId("") - return nil -} diff --git a/builtin/providers/consul/resource_consul_service_test.go b/builtin/providers/consul/resource_consul_service_test.go deleted file mode 100644 index 2ed4405e8..000000000 --- a/builtin/providers/consul/resource_consul_service_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package consul - -import ( - "fmt" - "testing" - - consulapi "github.com/hashicorp/consul/api" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccConsulService_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() {}, - Providers: testAccProviders, - CheckDestroy: testAccCheckConsulServiceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccConsulServiceConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckConsulServiceExists(), - testAccCheckConsulServiceValue("consul_service.app", "address", "www.google.com"), - testAccCheckConsulServiceValue("consul_service.app", "id", "google"), - testAccCheckConsulServiceValue("consul_service.app", "service_id", "google"), - testAccCheckConsulServiceValue("consul_service.app", "name", "google"), - testAccCheckConsulServiceValue("consul_service.app", "port", "80"), - testAccCheckConsulServiceValue("consul_service.app", "tags.#", "2"), - testAccCheckConsulServiceValue("consul_service.app", "tags.0", "tag0"), - testAccCheckConsulServiceValue("consul_service.app", "tags.1", "tag1"), - ), - }, - }, - }) -} - -func testAccCheckConsulServiceDestroy(s *terraform.State) error { - agent := testAccProvider.Meta().(*consulapi.Client).Agent() - services, err := agent.Services() - if err != nil { - return fmt.Errorf("Could not retrieve services: %#v", err) - } - _, ok := services["google"] - if ok { - return fmt.Errorf("Service still exists: %#v", "google") - } - return nil -} - -func testAccCheckConsulServiceExists() resource.TestCheckFunc { - return func(s *terraform.State) error { - agent := testAccProvider.Meta().(*consulapi.Client).Agent() - services, err := agent.Services() - if err != nil { - return err - } - _, ok := services["google"] - if !ok { - return fmt.Errorf("Service does not exist: %#v", "google") - } - return nil - } -} - -func testAccCheckConsulServiceValue(n, attr, val string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rn, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Resource not found") - } - out, ok := rn.Primary.Attributes[attr] - if !ok { - return fmt.Errorf("Attribute '%s' not found: %#v", attr, rn.Primary.Attributes) - } - if val != "" && out != val { - return fmt.Errorf("Attribute '%s' value '%s' != '%s'", attr, out, val) - } - if val == "" && out == "" { - return fmt.Errorf("Attribute '%s' value '%s'", attr, out) - } - return nil - } -} - -const testAccConsulServiceConfig = ` -resource "consul_service" "app" { - address = "www.google.com" - service_id = "google" - name = "google" - port = 80 - tags = ["tag0", "tag1"] -} -` diff --git a/builtin/providers/consul/resource_provider.go b/builtin/providers/consul/resource_provider.go deleted file mode 100644 index dc800e366..000000000 --- a/builtin/providers/consul/resource_provider.go +++ /dev/null @@ -1,102 +0,0 @@ -package consul - -import ( - "log" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/mapstructure" -) - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "datacenter": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "CONSUL_ADDRESS", - "CONSUL_HTTP_ADDR", - }, "localhost:8500"), - }, - - "scheme": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "CONSUL_SCHEME", - "CONSUL_HTTP_SCHEME", - }, "http"), - }, - - "http_auth": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("CONSUL_HTTP_AUTH", ""), - }, - - "ca_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("CONSUL_CA_FILE", ""), - }, - - "cert_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("CONSUL_CERT_FILE", ""), - }, - - "key_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("CONSUL_KEY_FILE", ""), - }, - - "token": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "CONSUL_TOKEN", - "CONSUL_HTTP_TOKEN", - }, ""), - }, - }, - - DataSourcesMap: map[string]*schema.Resource{ - "consul_agent_self": dataSourceConsulAgentSelf(), - "consul_catalog_nodes": dataSourceConsulCatalogNodes(), - "consul_catalog_service": dataSourceConsulCatalogService(), - "consul_catalog_services": dataSourceConsulCatalogServices(), - "consul_keys": dataSourceConsulKeys(), - }, - - ResourcesMap: map[string]*schema.Resource{ - "consul_agent_service": resourceConsulAgentService(), - "consul_catalog_entry": resourceConsulCatalogEntry(), - "consul_keys": resourceConsulKeys(), - "consul_key_prefix": resourceConsulKeyPrefix(), - "consul_node": resourceConsulNode(), - "consul_prepared_query": resourceConsulPreparedQuery(), - "consul_service": resourceConsulService(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - var config Config - configRaw := d.Get("").(map[string]interface{}) - if err := mapstructure.Decode(configRaw, &config); err != nil { - return nil, err - } - log.Printf("[INFO] Initializing Consul client") - return config.Client() -} diff --git a/builtin/providers/consul/resource_provider_test.go b/builtin/providers/consul/resource_provider_test.go deleted file mode 100644 index df8fe1b85..000000000 --- a/builtin/providers/consul/resource_provider_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package consul - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "consul": testAccProvider, - } -} - -func TestResourceProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestResourceProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func TestResourceProvider_Configure(t *testing.T) { - rp := Provider() - - raw := map[string]interface{}{ - "address": "demo.consul.io:80", - "datacenter": "nyc3", - "scheme": "https", - } - - rawConfig, err := config.NewRawConfig(raw) - if err != nil { - t.Fatalf("err: %s", err) - } - - err = rp.Configure(terraform.NewResourceConfig(rawConfig)) - if err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestResourceProvider_ConfigureTLS(t *testing.T) { - rp := Provider() - - raw := map[string]interface{}{ - "address": "demo.consul.io:80", - "ca_file": "test-fixtures/cacert.pem", - "cert_file": "test-fixtures/usercert.pem", - "datacenter": "nyc3", - "key_file": "test-fixtures/userkey.pem", - "scheme": "https", - } - - rawConfig, err := config.NewRawConfig(raw) - if err != nil { - t.Fatalf("err: %s", err) - } - - err = rp.Configure(terraform.NewResourceConfig(rawConfig)) - if err != nil { - t.Fatalf("err: %s", err) - } -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("CONSUL_HTTP_ADDR"); v != "" { - return - } - if v := os.Getenv("CONSUL_ADDRESS"); v != "" { - return - } - t.Fatal("Either CONSUL_ADDRESS or CONSUL_HTTP_ADDR must be set for acceptance tests") -} diff --git a/builtin/providers/consul/test-fixtures/README.md b/builtin/providers/consul/test-fixtures/README.md deleted file mode 100644 index 91cdc1248..000000000 --- a/builtin/providers/consul/test-fixtures/README.md +++ /dev/null @@ -1,41 +0,0 @@ -# Running Consul for Terraform Acceptance Tests - -## TLS - -Some of the acceptance tests for the `consul` provider use -TLS. To service these tests, a Consul server must be started -with HTTPS enabled with TLS certificates. - -### Test fixtures - -File | Description ---- | --- -`agent.json.example` | Configures the Consul agent to respond to HTTPS requests, and verifies the authenticity of HTTPS requests -`agentcert.pem` | A PEM-encoded certificate used by the Consul agent, valid only for 127.0.0.1 signed by `cacert.pem`, expires 2026 -`agentkey.pem` | A PEM-encoded private key used by the Consul agent -`cacert.pem` | A PEM-encoded Certificate Authority, expires 2036 -`usercert.pem` | A PEM-encoded certificate used by the Terraform acceptance tests, signed by `cacert.pem`, expires 2026 -`userkey.pem` | A PEM-encoded private key used by the Terraform acceptance tests - -### Start - -Start a Consul server configured to serve HTTP traffic, and validate incoming -HTTPS requests. - - ~/.go/src/github.com/hashicorp/terraform> consul agent \ - -bind 127.0.0.1 \ - -data-dir=/tmp \ - -dev \ - -config-file=builtin/providers/consul/text-fixtures/agent.json.example \ - -server - -### Test - -With TLS, `CONSUL_HTTP_ADDR` must match the Common Name of the agent certificate. - - ~/.go/src/github.com/hashicorp/terraform> CONSUL_CERT_FILE=test-fixtures/usercert.pem \ - CONSUL_KEY_FILE=test-fixtures/userkey.pem \ - CONSUL_CA_FILE=test-fixtures/cacert.pem \ - CONSUL_SCHEME=https \ - CONSUL_HTTP_ADDR=127.0.0.1:8943 \ - make testacc TEST=./builtin/providers/consul/ diff --git a/builtin/providers/consul/test-fixtures/agent.json.example b/builtin/providers/consul/test-fixtures/agent.json.example deleted file mode 100644 index aefe437a7..000000000 --- a/builtin/providers/consul/test-fixtures/agent.json.example +++ /dev/null @@ -1,11 +0,0 @@ -{ - "ca_file": "./cacert.pem", - "cert_file": "./agentcert.pem", - "datacenter": "dc1", - "domain": "hashicorp.test", - "key_file": "./agentkey.pem", - "ports": { - "https": 8943 - }, - "verify_incoming": true -} diff --git a/builtin/providers/consul/test-fixtures/agentcert.pem b/builtin/providers/consul/test-fixtures/agentcert.pem deleted file mode 100644 index b1b904e95..000000000 --- a/builtin/providers/consul/test-fixtures/agentcert.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEjjCCA3agAwIBAgICEAMwDQYJKoZIhvcNAQELBQAwZjELMAkGA1UEBhMCVVMx -EzARBgNVBAgMCkNhbGlmb3JuaWExEjAQBgNVBAoMCUhhc2hpQ29ycDESMBAGA1UE -CwwJSGFzaGlDb3JwMRowGAYDVQQDDBFIYXNoaUNvcnAgVGVzdCBDQTAgFw0xNjA4 -MTEwNTEwMDRaGA8yMDY2MDczMDA1MTAwNFowXjELMAkGA1UEBhMCVVMxEzARBgNV -BAgMCkNhbGlmb3JuaWExEjAQBgNVBAoMCUhhc2hpQ29ycDESMBAGA1UECwwJSGFz -aGlDb3JwMRIwEAYDVQQDDAkxMjcuMC4wLjEwggEiMA0GCSqGSIb3DQEBAQUAA4IB -DwAwggEKAoIBAQDQiR2zwLdfCd95LIrKekZlDKo3YF5nMZFzR3OR1Mc8jCAaYLz/ -ZFr8hVSAsygwZ+tHzoHP0U3FxeYemPtjLAPE077C6h+v6pHiTLxOkd22GtyalgMZ -E4ACGSogqDUvwssxxDUsG2ItzhVCB0GXTlfo/6XhApyRqvnEto+ZJ+zk6MiHnAmc -eN9sx0c5K097+Nq7PZgtk6HOxbKSvMWEkTtHrOBrhc9lTfwVSiWHdZ2X0wpOL1Ra -pFnBMDxnWyH2ivVMknarzKz2pBBDJwTGvsJcC1ymprqU+SRyjs75BfNv2BKJrhb4 -vBj3YEGMBEhHKtnObniGqV8W4o9jBIwocFpfAgMBAAGjggFKMIIBRjAPBgNVHREE -CDAGhwR/AAABMAkGA1UdEwQCMAAwEQYJYIZIAYb4QgEBBAQDAgZAMDMGCWCGSAGG -+EIBDQQmFiRPcGVuU1NMIEdlbmVyYXRlZCBTZXJ2ZXIgQ2VydGlmaWNhdGUwHQYD -VR0OBBYEFNzoTM6XceaITc2lVIDrXaYBKJolMIGRBgNVHSMEgYkwgYaAFANEnP7/ -5Iil24eKuYJTt/IJPfamoWqkaDBmMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2Fs -aWZvcm5pYTESMBAGA1UECgwJSGFzaGlDb3JwMRIwEAYDVQQLDAlIYXNoaUNvcnAx -GjAYBgNVBAMMEUhhc2hpQ29ycCBUZXN0IENBggIQADAOBgNVHQ8BAf8EBAMCBaAw -HQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMBMA0GCSqGSIb3DQEBCwUAA4IB -AQB4p5sWE+p+sheXYgkg/PsATRMxYDPRTw0Zvd2AKOrodqF4IlibSb4mVVh0fPtx -2VX3z/WOZb8wgXNnEUhVcijf7LgGvw/SvQGgW5mXYSCcHeb4ETFJ1yuKZj5yn5tl -vZx1Sq/fGFkjHn3mgL+gzyQlNk1Wt0p3fLsIfpMOgpntSdRq3IUvf+W+oH5BUrTl -WgaXUD3lkdx3R9h3uLX4nxJWpMPViPCpr3ADW9oEwoHHQbe3LC7iJI2Us/qIH73n -Du7mUk+/HSkajjFsxnVoFCF1+RMqf1i9w7tXaAwWYT+vaP46fq3M/Bmsv/gDc5ur -8p48hpQ61Sfj0oU38Ftzzcs+ ------END CERTIFICATE----- diff --git a/builtin/providers/consul/test-fixtures/agentkey.pem b/builtin/providers/consul/test-fixtures/agentkey.pem deleted file mode 100644 index 61e19a191..000000000 --- a/builtin/providers/consul/test-fixtures/agentkey.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEA0Ikds8C3XwnfeSyKynpGZQyqN2BeZzGRc0dzkdTHPIwgGmC8 -/2Ra/IVUgLMoMGfrR86Bz9FNxcXmHpj7YywDxNO+wuofr+qR4ky8TpHdthrcmpYD -GROAAhkqIKg1L8LLMcQ1LBtiLc4VQgdBl05X6P+l4QKckar5xLaPmSfs5OjIh5wJ -nHjfbMdHOStPe/jauz2YLZOhzsWykrzFhJE7R6zga4XPZU38FUolh3Wdl9MKTi9U -WqRZwTA8Z1sh9or1TJJ2q8ys9qQQQycExr7CXAtcpqa6lPkkco7O+QXzb9gSia4W -+LwY92BBjARIRyrZzm54hqlfFuKPYwSMKHBaXwIDAQABAoIBAFQxyAg3GtIITm3C -ChdN3vYVcvQAuJy5apw8kPCkE/ziJmQAAs6qWgHyYvfDXcqNanUHb2nUe64KBKr9 -4SFdN/hT9YUEud5wuo2/pZejVPydQ8w2HPIW6WvvdQ7SWwb5gsiJC17Pf4g22GZc -P6MzQlMURIjgYQ5/FXDStI+FiyOwDhHDbLoMaIOfToWJupd+sGREccSKOVmJdGY/ -7/n1AGvfbgUToy2sMEz7HqTtgRJW/Knko2dD3ZWh7KqFS2GUYsJ3Ake1CG7xT6sA -4MWQvfR/+t7xSpDDU2WlNgFi9sQ8UjrIhaMYaiFhV6h2bTVeGl1tvBmbE77Z1Lne -jcobwKECgYEA6SuDF0wuu8WnDXnCpNrd83gONy1pEp9s7vbf/GrMXGaavQmqb6x1 -sLZTXho1srqRLXGSAvbDS/yO6wRUd/CdLB8WBda3lcH9y/II1BDynEygGpipGa83 -7Ti+D2hMSMLhX1vsUcCwz3fz61wzBqvdvrjdymmivPLu3rMINd8twl0CgYEA5PQi -jwi483icPPOc1S/5CsKnj6nJwKVQz9dDeT6QLVDCoVh5u0X9WZAAnMdrR9yZS6ax -ZAF453DPlK6Ct7vcBPEFC1W6QrRhjJrzvRb/PLnzaRMY+YoEg2qmGreb+30jrx+4 -jkTLkz4Qag+jOdR3t4104Pix1CkpQUX0chD5u+sCgYAiuI8Bxh9jaLBSimIYqFrK -qYL8Zm+yDTlscCi0brbVv5WlNq5BiN3RnaTWa3K5lZyOts22UUaNpyMlDfUCEztk -WZCu9+VIkKWZXAZChe+KpMJmk3sCzxu14HA03SQW5aYnzAlptxbdHhCdaJJUmP0h -LGgifw5zsn0tfl1noD8xJQKBgBKSSwtXJcl6CxJWoG4aihT5XSYmG5tozXlOeMao -8ID8gA0eZCFwt/A/4gzVkDowBq9AQjteczQyzmO9FBVbQ6mS81nMBmPKxe7l0seP -yfxfCQOI7QmwzFTsnbSlGB36NJ7L7+h6ZBj5e9NemVrjhSJ6cvSct7AB9rq4te9a -uScpAoGBAOIjcv2lQsJ3GWBTHWCh23jC/0XPE4bJg9DjliHQDAB/Yp49oV1giWs6 -xI0SBsovtJqJxOd6F8e6HuQTt1X1kQ8Q1Itb78Wx9Rs4bvN51pxj4L+DTxLBMl5g -xXsS+Zfm5O2HGxU5t60CsxRyF0EVNVEtgKkIiQE+ZaQ1d0WJC8RI ------END RSA PRIVATE KEY----- diff --git a/builtin/providers/consul/test-fixtures/cacert.pem b/builtin/providers/consul/test-fixtures/cacert.pem deleted file mode 100644 index 21aca8283..000000000 --- a/builtin/providers/consul/test-fixtures/cacert.pem +++ /dev/null @@ -1,22 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDrTCCApWgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwZjELMAkGA1UEBhMCVVMx -EzARBgNVBAgMCkNhbGlmb3JuaWExEjAQBgNVBAoMCUhhc2hpQ29ycDESMBAGA1UE -CwwJSGFzaGlDb3JwMRowGAYDVQQDDBFIYXNoaUNvcnAgVGVzdCBDQTAgFw0xNjA4 -MTEwNTA1MDZaGA8yMTE2MDcxODA1MDUwNlowZjELMAkGA1UEBhMCVVMxEzARBgNV -BAgMCkNhbGlmb3JuaWExEjAQBgNVBAoMCUhhc2hpQ29ycDESMBAGA1UECwwJSGFz -aGlDb3JwMRowGAYDVQQDDBFIYXNoaUNvcnAgVGVzdCBDQTCCASIwDQYJKoZIhvcN -AQEBBQADggEPADCCAQoCggEBAKglYmf9Tv1u6e1ulQZNpmvUHi+D+/PBHyg9Ft60 -HiZaeBGyNPZX9uVuM1jN3o/qpwBQxhq3ojQafU3WDSU6Y0GZ1e8AcLsObeNUjma4 -eLjZy+059Vt7DKcp6LA+7JqELToK83QzqNdYuocze5v9hPt5W6Q3Dp5rsmVjOFim -6LxcN/TAcmW+ZrykOGOT4QyYFkamp4uMJkpX1UwO3djdQF7CllnOboUUYqGyOt9e -BBudhCsSvWpJa/wNcAH2AxzaIVu85Dmg3G0Erekcget5ewebsnhGs3emfWO/XQht -uwKdz60mz1vAIK3UR5eYCbxnLrXM0WfcYKFqhuQpqqONWtUCAwEAAaNjMGEwHQYD -VR0OBBYEFANEnP7/5Iil24eKuYJTt/IJPfamMB8GA1UdIwQYMBaAFANEnP7/5Iil -24eKuYJTt/IJPfamMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0G -CSqGSIb3DQEBCwUAA4IBAQBzzvX4GiBalxuW5YxMiuFAljNB+tW20Frz0s7bq0+Z -1+ErQIW26NUHH14RUU4vbisX09QMm4p62oJOpo/5nW1VqsyoTCQJXaolGF6UidFy -l/2bgOy8QbCOqrS0jt0MFQFDr9Z/m8dBgbjFzv8gfsnpxDQvi+iKkVSuzlIfcvoo -xlwtNnrD9lSsinP4Zo8sNqjhaRbih8zhsUdd0mUDDGczw2mY2CdMmeH0wflJMEVe -3hwR8650sCJlJfVuFUDsqy1K9T5j5NVv7i6RloeMvYOH2nwpIejE88lmjpXR6Bzw -g8geEjKOLBN8Nmak3jSvH2IewczZKSaKNSiv/4Izut/8 ------END CERTIFICATE----- diff --git a/builtin/providers/consul/test-fixtures/usercert.pem b/builtin/providers/consul/test-fixtures/usercert.pem deleted file mode 100644 index 0580e364f..000000000 --- a/builtin/providers/consul/test-fixtures/usercert.pem +++ /dev/null @@ -1,25 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEJjCCAw6gAwIBAgICEAIwDQYJKoZIhvcNAQELBQAwZjELMAkGA1UEBhMCVVMx -EzARBgNVBAgMCkNhbGlmb3JuaWExEjAQBgNVBAoMCUhhc2hpQ29ycDESMBAGA1UE -CwwJSGFzaGlDb3JwMRowGAYDVQQDDBFIYXNoaUNvcnAgVGVzdCBDQTAgFw0xNjA4 -MTEwNTA1MzFaGA8yMDY2MDczMDA1MDUzMVowcjELMAkGA1UEBhMCVVMxEzARBgNV -BAgMCkNhbGlmb3JuaWExEjAQBgNVBAoMCUhhc2hpQ29ycDESMBAGA1UECwwJSGFz -aGlDb3JwMSYwJAYDVQQDDB1IYXNoaUNvcnAgVGVzdCBUZXJyYWZvcm0gVXNlcjCC -ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANrfzsj+gIM3pvuI+hdx2W5s -hTh2YXd4Q7cByLDzXPRgI0W1BFOIxOAdHy/zqxCKQPxiibxPqDCxzPnc7mSco8e0 -zvihAysthiUmWcNdF1pIh6631SU9rE+Mis6XcW2beuh/IVloXBwI4dmSuX3Urb0D -Aw3Rb5kCJzXUTBG/g8KriR6KyNFTu0Wb/1NcrrCnNAteQmpDuuMtx75stfoMUnlr -xZfsCZXHVpe8GmVlwqr8Mw7NKmyeKgl0rH1Mef6+ce9BPnVBxdJMEYWl+UQfTSV+ -pWoNtQTZxEbhbMFhYi410EJ5s0Nw6lyUnXrQ2/YglikIvnyfWj/CwLTZwaXlgAkC -AwEAAaOBzzCBzDAJBgNVHRMEAjAAMBEGCWCGSAGG+EIBAQQEAwIFoDAzBglghkgB -hvhCAQ0EJhYkT3BlblNTTCBHZW5lcmF0ZWQgQ2xpZW50IENlcnRpZmljYXRlMB0G -A1UdDgQWBBRIgDbi1wLtW+u3PgrbVrNDkGfwGjAfBgNVHSMEGDAWgBQDRJz+/+SI -pduHirmCU7fyCT32pjAOBgNVHQ8BAf8EBAMCBeAwJwYDVR0lBCAwHgYIKwYBBQUH -AwIGCCsGAQUFBwMEBggrBgEFBQcDATANBgkqhkiG9w0BAQsFAAOCAQEAi1pDjqy1 -bN9cLknPyblWdgaO0xSXJAvBpEaFnz88wmEPOmsg1889x7jJKhrGjTpjDJMeq3kh -ziDVCpOJesJOlUsa8ejOhMbcdiqHOWSk12bQC7dBbnAAXwO1Tr583IdLhC+ej64r -J4dBk7/wLBx2Deh8wxW+6TrIFNCyVptcHw76K2AkO11fscqS0sL/WVxqi1mjA9rV -KNGDIEIzqu13jZ3t0Cxc5AZ6dGHBALGNkfhjJ9SCpuPf8CmWNYHGWeIV0N4AB2SQ -gAjRYUKY4+zU3e+lUuudgTYZIM+zark6hLUAfXTeNRk6kGHod7x/Q9NvLB4SLwlI -DAzXJ9QHZyO1vQ== ------END CERTIFICATE----- diff --git a/builtin/providers/consul/test-fixtures/userkey.pem b/builtin/providers/consul/test-fixtures/userkey.pem deleted file mode 100644 index e3171bc28..000000000 --- a/builtin/providers/consul/test-fixtures/userkey.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpgIBAAKCAQEA2t/OyP6Agzem+4j6F3HZbmyFOHZhd3hDtwHIsPNc9GAjRbUE -U4jE4B0fL/OrEIpA/GKJvE+oMLHM+dzuZJyjx7TO+KEDKy2GJSZZw10XWkiHrrfV -JT2sT4yKzpdxbZt66H8hWWhcHAjh2ZK5fdStvQMDDdFvmQInNdRMEb+DwquJHorI -0VO7RZv/U1yusKc0C15CakO64y3Hvmy1+gxSeWvFl+wJlcdWl7waZWXCqvwzDs0q -bJ4qCXSsfUx5/r5x70E+dUHF0kwRhaX5RB9NJX6lag21BNnERuFswWFiLjXQQnmz -Q3DqXJSdetDb9iCWKQi+fJ9aP8LAtNnBpeWACQIDAQABAoIBAQCRXS8zIoQrodyP -FkwzIfPseLqJ42WcOQ2QD+lATIEh9G+4rh5vdFh9GBpMeKLWW1wJw1AC90yW+p9O -G0NhIv9LdXQ4gIdgN93t8miPbdZCqgUjLwiqsSkttAPEbaRxzV915mk5vivemq+V -FvOG9Kdm7wcqODzL/DgaciMLboyNzrChltgybGKQIHAd9UFm+jE86IeeBsVcHuoL -0rEsYFKKzgdmIizjDOWPDSzVKL+dkiZ/8rYgoe1VtGV1DRWAWU5fawDFrdOxsGCh -Ob+rEmosTvONEQhB6GsdOQZ8++N6UTiJw32jqgieeP+Xj+K4XNG3nhP000DUIx/o -pRnj+KDhAoGBAPWXFbGHIMwEJCUV6OUXiY9Enb3A/Jf65f7cKbvo1i/nIbhzEv3v -LBtcUrsTmgTgNvuh3tF1RnwAUeYgedjdnALGJL16h3E0IWLAStaovoKgQyHHrtp9 -CEnOIj3PcPTFJVe+2FeV0+/kLjTHsZj9gMljzfxgswNdYfeGjXp4o1lVAoGBAOQm -06TW3smWW0FIRyyDNBwhQjD8tg3Yn0QJ+zFP6WMk5qbu/LeJnJSevBpQt1PvJ6xQ -kCj6Bi90jtLeuCW/8XLQjP46jQLU+3a74m3Nszgu9JVofiK6EPIsx62TGlwtIJfJ -U4+C5D/Piw/3qy6MjDbA1NJlSE4i2hAgGA79cDvlAoGBAN2o2sSbkOdyyWjLiKPV -BaxQowrUN2e45YONFQHsGf2sYEwJWNfm2elr/6OoAnhqIlYleGWWsuJSq5jIMRGi -myAJ1LlL8Rkkkwl9Q07RiPl/SngfsVq0RRnQOimNpIbXtWen8b3DlkFLssSihFHw -ZB/gu9cRNCFSVIzDXchvQAftAoGBAL0EzeOLgRhSUVhMoWrnaIzFoSkktU/TYF/m -RQ4dvqY9NDqpVQZaJDedKwpCRSBsytmgBU9tlSJL1ugtTTM5Srhsv+MAb0MhYRSF -pJqECS9K96ew4o+yx8dcAjJz5Sro2E/opCoJr0COmg+oiVIPbzsNl0SYVMcnaLJj -ZItGvW1hAoGBALeVUiqLgEDNQAIvprRlpJhU/ANpKm01ja9v66cgvmg62P9fcqb+ -yYPuJ2CwFDlb70KIDys6q9sTKUFykPDiKQgAFvEBQgAyOb3kdl4SXoUPbVDhMqwB -OfPznsXM6Y5LFNLzEi4n0QP4KsLc+wM52On5vnj7Mgvt5h2QlllPPTXy ------END RSA PRIVATE KEY----- diff --git a/builtin/providers/consul/validators.go b/builtin/providers/consul/validators.go deleted file mode 100644 index af4f44080..000000000 --- a/builtin/providers/consul/validators.go +++ /dev/null @@ -1,146 +0,0 @@ -package consul - -import ( - "fmt" - "regexp" - "strconv" - "time" - - "github.com/hashicorp/errwrap" -) - -// An array of inputs used as typed arguments and converted from their type into -// function objects that are dynamically constructed and executed. -type validatorInputs []interface{} - -// validateDurationMin is the minimum duration to accept as input -type validateDurationMin string - -// validateIntMax is the maximum integer value to accept as input -type validateIntMax int - -// validateIntMin is the minimum integer value to accept as input -type validateIntMin int - -// validateRegexp is a regexp pattern to use to validate schema input. -type validateRegexp string - -// makeValidateionFunc takes the name of the attribute and a list of typed -// validator inputs in order to create a validation closure that calls each -// validator in serial until either a warning or error is returned from the -// first validation function. -func makeValidationFunc(name string, validators []interface{}) func(v interface{}, key string) (warnings []string, errors []error) { - if len(validators) == 0 { - return nil - } - - fns := make([]func(v interface{}, key string) (warnings []string, errors []error), 0, len(validators)) - for _, v := range validators { - switch u := v.(type) { - case validateDurationMin: - fns = append(fns, validateDurationMinFactory(name, string(u))) - case validateIntMax: - fns = append(fns, validateIntMaxFactory(name, int(u))) - case validateIntMin: - fns = append(fns, validateIntMinFactory(name, int(u))) - case validateRegexp: - fns = append(fns, validateRegexpFactory(name, string(u))) - } - } - - return func(v interface{}, key string) (warnings []string, errors []error) { - for _, fn := range fns { - warnings, errors = fn(v, key) - if len(warnings) > 0 || len(errors) > 0 { - break - } - } - return warnings, errors - } -} - -func validateDurationMinFactory(name, minDuration string) func(v interface{}, key string) (warnings []string, errors []error) { - dMin, err := time.ParseDuration(minDuration) - if err != nil { - return func(interface{}, string) (warnings []string, errors []error) { - return nil, []error{ - errwrap.Wrapf(fmt.Sprintf("PROVIDER BUG: duration %q not valid: {{err}}", minDuration), err), - } - } - } - - return func(v interface{}, key string) (warnings []string, errors []error) { - d, err := time.ParseDuration(v.(string)) - if err != nil { - errors = append(errors, errwrap.Wrapf(fmt.Sprintf("Invalid %s specified (%q): {{err}}", name, v.(string)), err)) - } - - if d < dMin { - errors = append(errors, fmt.Errorf("Invalid %s specified: duration %q less than the required minimum %s", name, v.(string), dMin)) - } - - return warnings, errors - } -} - -func validateIntMaxFactory(name string, max int) func(v interface{}, key string) (warnings []string, errors []error) { - return func(v interface{}, key string) (warnings []string, errors []error) { - switch u := v.(type) { - case string: - i, err := strconv.ParseInt(u, 10, 64) - if err != nil { - errors = append(errors, errwrap.Wrapf(fmt.Sprintf("unable to convert %q to an integer: {{err}}", u), err)) - break - } - - if i > int64(max) { - errors = append(errors, fmt.Errorf("Invalid %s specified: %d more than the required maximum %d", name, v.(int), max)) - } - case int: - if u > max { - errors = append(errors, fmt.Errorf("Invalid %s specified: %d more than the required maximum %d", name, v.(int), max)) - } - default: - errors = append(errors, fmt.Errorf("Unsupported type in int max validation: %T", v)) - } - - return warnings, errors - } -} - -func validateIntMinFactory(name string, min int) func(v interface{}, key string) (warnings []string, errors []error) { - return func(v interface{}, key string) (warnings []string, errors []error) { - switch u := v.(type) { - case string: - i, err := strconv.ParseInt(u, 10, 64) - if err != nil { - errors = append(errors, errwrap.Wrapf(fmt.Sprintf("unable to convert %q to an integer: {{err}}", u), err)) - break - } - - if i < int64(min) { - errors = append(errors, fmt.Errorf("Invalid %s specified: %d less than the required minimum %d", name, v.(int), min)) - } - case int: - if u < min { - errors = append(errors, fmt.Errorf("Invalid %s specified: %d less than the required minimum %d", name, v.(int), min)) - } - default: - errors = append(errors, fmt.Errorf("Unsupported type in int min validation: %T", v)) - } - - return warnings, errors - } -} - -func validateRegexpFactory(name string, reString string) func(v interface{}, key string) (warnings []string, errors []error) { - re := regexp.MustCompile(reString) - - return func(v interface{}, key string) (warnings []string, errors []error) { - if !re.MatchString(v.(string)) { - errors = append(errors, fmt.Errorf("Invalid %s specified (%q): regexp failed to match string", name, v.(string))) - } - - return warnings, errors - } -} diff --git a/builtin/providers/datadog/config.go b/builtin/providers/datadog/config.go deleted file mode 100644 index cc84bb690..000000000 --- a/builtin/providers/datadog/config.go +++ /dev/null @@ -1,22 +0,0 @@ -package datadog - -import ( - "log" - - "gopkg.in/zorkian/go-datadog-api.v2" -) - -// Config holds API and APP keys to authenticate to Datadog. -type Config struct { - APIKey string - APPKey string -} - -// Client returns a new Datadog client. -func (c *Config) Client() *datadog.Client { - - client := datadog.NewClient(c.APIKey, c.APPKey) - log.Printf("[INFO] Datadog Client configured ") - - return client -} diff --git a/builtin/providers/datadog/import_datadog_downtime_test.go b/builtin/providers/datadog/import_datadog_downtime_test.go deleted file mode 100644 index 4c5e3454c..000000000 --- a/builtin/providers/datadog/import_datadog_downtime_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package datadog - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestDatadogDowntime_import(t *testing.T) { - resourceName := "datadog_downtime.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDatadogDowntimeDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckDatadogDowntimeConfigImported, - }, - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -const testAccCheckDatadogDowntimeConfigImported = ` -resource "datadog_downtime" "foo" { - scope = ["host:X", "host:Y"] - start = 1735707600 - end = 1735765200 - - message = "Example Datadog downtime message." -} -` diff --git a/builtin/providers/datadog/import_datadog_monitor_test.go b/builtin/providers/datadog/import_datadog_monitor_test.go deleted file mode 100644 index 0fbc8085c..000000000 --- a/builtin/providers/datadog/import_datadog_monitor_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package datadog - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestDatadogMonitor_import(t *testing.T) { - resourceName := "datadog_monitor.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDatadogMonitorDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckDatadogMonitorConfigImported, - }, - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -const testAccCheckDatadogMonitorConfigImported = ` -resource "datadog_monitor" "foo" { - name = "name for monitor foo" - type = "metric alert" - message = "some message Notify: @hipchat-channel" - escalation_message = "the situation has escalated @pagerduty" - - query = "avg(last_1h):avg:aws.ec2.cpu{environment:foo,host:foo} by {host} > 2.5" - - thresholds { - ok = 1.5 - warning = 2.3 - critical = 2.5 - } - - notify_no_data = false - new_host_delay = 600 - renotify_interval = 60 - - notify_audit = false - timeout_h = 60 - include_tags = true - require_full_window = true - locked = false - tags = ["foo:bar", "bar:baz"] -} -` diff --git a/builtin/providers/datadog/import_datadog_user_test.go b/builtin/providers/datadog/import_datadog_user_test.go deleted file mode 100644 index 0adec86b4..000000000 --- a/builtin/providers/datadog/import_datadog_user_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package datadog - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestDatadogUser_import(t *testing.T) { - resourceName := "datadog_user.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDatadogUserDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckDatadogUserConfigImported, - }, - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -const testAccCheckDatadogUserConfigImported = ` -resource "datadog_user" "foo" { - email = "test@example.com" - handle = "test@example.com" - name = "Test User" -} -` diff --git a/builtin/providers/datadog/provider.go b/builtin/providers/datadog/provider.go deleted file mode 100644 index 0c97f8017..000000000 --- a/builtin/providers/datadog/provider.go +++ /dev/null @@ -1,59 +0,0 @@ -package datadog - -import ( - "log" - - "errors" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "api_key": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("DATADOG_API_KEY", nil), - }, - "app_key": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("DATADOG_APP_KEY", nil), - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "datadog_downtime": resourceDatadogDowntime(), - "datadog_monitor": resourceDatadogMonitor(), - "datadog_timeboard": resourceDatadogTimeboard(), - "datadog_user": resourceDatadogUser(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - - config := Config{ - APIKey: d.Get("api_key").(string), - APPKey: d.Get("app_key").(string), - } - - log.Println("[INFO] Initializing Datadog client") - client := config.Client() - - ok, err := client.Validate() - - if err != nil { - return client, err - } - - if ok == false { - return client, errors.New(`No valid credential sources found for Datadog Provider. Please see https://terraform.io/docs/providers/datadog/index.html for more information on providing credentials for the Datadog Provider`) - } - - return client, nil -} diff --git a/builtin/providers/datadog/provider_test.go b/builtin/providers/datadog/provider_test.go deleted file mode 100644 index b6c56102e..000000000 --- a/builtin/providers/datadog/provider_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package datadog - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "datadog": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("DATADOG_API_KEY"); v == "" { - t.Fatal("DATADOG_API_KEY must be set for acceptance tests") - } - if v := os.Getenv("DATADOG_APP_KEY"); v == "" { - t.Fatal("DATADOG_APP_KEY must be set for acceptance tests") - } -} diff --git a/builtin/providers/datadog/resource_datadog_downtime.go b/builtin/providers/datadog/resource_datadog_downtime.go deleted file mode 100644 index 29bd3240f..000000000 --- a/builtin/providers/datadog/resource_datadog_downtime.go +++ /dev/null @@ -1,339 +0,0 @@ -package datadog - -import ( - "fmt" - "log" - "strconv" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "gopkg.in/zorkian/go-datadog-api.v2" -) - -func resourceDatadogDowntime() *schema.Resource { - return &schema.Resource{ - Create: resourceDatadogDowntimeCreate, - Read: resourceDatadogDowntimeRead, - Update: resourceDatadogDowntimeUpdate, - Delete: resourceDatadogDowntimeDelete, - Exists: resourceDatadogDowntimeExists, - Importer: &schema.ResourceImporter{ - State: resourceDatadogDowntimeImport, - }, - - Schema: map[string]*schema.Schema{ - "active": { - Type: schema.TypeBool, - Optional: true, - }, - "disabled": { - Type: schema.TypeBool, - Optional: true, - }, - "end": { - Type: schema.TypeInt, - Optional: true, - }, - "message": { - Type: schema.TypeString, - Optional: true, - StateFunc: func(val interface{}) string { - return strings.TrimSpace(val.(string)) - }, - }, - "recurrence": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "period": { - Type: schema.TypeInt, - Required: true, - }, - "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateDatadogDowntimeRecurrenceType, - }, - "until_date": { - Type: schema.TypeInt, - Optional: true, - ConflictsWith: []string{"recurrence.until_occurrences"}, - }, - "until_occurrences": { - Type: schema.TypeInt, - Optional: true, - ConflictsWith: []string{"recurrence.until_date"}, - }, - "week_days": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateDatadogDowntimeRecurrenceWeekDays, - }, - }, - }, - }, - }, - "scope": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "start": { - Type: schema.TypeInt, - Optional: true, - }, - }, - } -} - -func buildDowntimeStruct(d *schema.ResourceData) *datadog.Downtime { - var dt datadog.Downtime - - if attr, ok := d.GetOk("active"); ok { - dt.SetActive(attr.(bool)) - } - if attr, ok := d.GetOk("disabled"); ok { - dt.SetDisabled(attr.(bool)) - } - if attr, ok := d.GetOk("end"); ok { - dt.SetEnd(attr.(int)) - } - if attr, ok := d.GetOk("message"); ok { - dt.SetMessage(strings.TrimSpace(attr.(string))) - } - if _, ok := d.GetOk("recurrence"); ok { - var recurrence datadog.Recurrence - - if attr, ok := d.GetOk("recurrence.0.period"); ok { - recurrence.SetPeriod(attr.(int)) - } - if attr, ok := d.GetOk("recurrence.0.type"); ok { - recurrence.SetType(attr.(string)) - } - if attr, ok := d.GetOk("recurrence.0.until_date"); ok { - recurrence.SetUntilDate(attr.(int)) - } - if attr, ok := d.GetOk("recurrence.0.until_occurrences"); ok { - recurrence.SetUntilOccurrences(attr.(int)) - } - if attr, ok := d.GetOk("recurrence.0.week_days"); ok { - weekDays := make([]string, 0, len(attr.([]interface{}))) - for _, weekDay := range attr.([]interface{}) { - weekDays = append(weekDays, weekDay.(string)) - } - recurrence.WeekDays = weekDays - } - - dt.SetRecurrence(recurrence) - } - scope := []string{} - for _, s := range d.Get("scope").([]interface{}) { - scope = append(scope, s.(string)) - } - dt.Scope = scope - if attr, ok := d.GetOk("start"); ok { - dt.SetStart(attr.(int)) - } - - return &dt -} - -func resourceDatadogDowntimeExists(d *schema.ResourceData, meta interface{}) (b bool, e error) { - // Exists - This is called to verify a resource still exists. It is called prior to Read, - // and lowers the burden of Read to be able to assume the resource exists. - client := meta.(*datadog.Client) - - id, err := strconv.Atoi(d.Id()) - if err != nil { - return false, err - } - - if _, err = client.GetDowntime(id); err != nil { - if strings.Contains(err.Error(), "404 Not Found") { - return false, nil - } - return false, err - } - - return true, nil -} - -func resourceDatadogDowntimeCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*datadog.Client) - - dts := buildDowntimeStruct(d) - dt, err := client.CreateDowntime(dts) - if err != nil { - return fmt.Errorf("error updating downtime: %s", err.Error()) - } - - d.SetId(strconv.Itoa(dt.GetId())) - - return nil -} - -func resourceDatadogDowntimeRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*datadog.Client) - - id, err := strconv.Atoi(d.Id()) - if err != nil { - return err - } - - dt, err := client.GetDowntime(id) - if err != nil { - return err - } - - log.Printf("[DEBUG] downtime: %v", dt) - d.Set("active", dt.GetActive()) - d.Set("disabled", dt.GetDisabled()) - d.Set("end", dt.GetEnd()) - d.Set("message", dt.GetMessage()) - if r, ok := dt.GetRecurrenceOk(); ok { - recurrence := make(map[string]interface{}) - recurrenceList := make([]map[string]interface{}, 0, 1) - - if attr, ok := r.GetPeriodOk(); ok { - recurrence["period"] = strconv.Itoa(attr) - } - if attr, ok := r.GetTypeOk(); ok { - recurrence["type"] = attr - } - if attr, ok := r.GetUntilDateOk(); ok { - recurrence["until_date"] = strconv.Itoa(attr) - } - if attr, ok := r.GetUntilOccurrencesOk(); ok { - recurrence["until_occurrences"] = strconv.Itoa(attr) - } - if r.WeekDays != nil { - weekDays := make([]string, 0, len(r.WeekDays)) - for _, weekDay := range r.WeekDays { - weekDays = append(weekDays, weekDay) - } - recurrence["week_days"] = weekDays - } - recurrenceList = append(recurrenceList, recurrence) - d.Set("recurrence", recurrenceList) - } - d.Set("scope", dt.Scope) - d.Set("start", dt.GetStart()) - - return nil -} - -func resourceDatadogDowntimeUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*datadog.Client) - - var dt datadog.Downtime - - id, err := strconv.Atoi(d.Id()) - if err != nil { - return err - } - - dt.SetId(id) - if attr, ok := d.GetOk("active"); ok { - dt.SetActive(attr.(bool)) - } - if attr, ok := d.GetOk("disabled"); ok { - dt.SetDisabled(attr.(bool)) - } - if attr, ok := d.GetOk("end"); ok { - dt.SetEnd(attr.(int)) - } - if attr, ok := d.GetOk("message"); ok { - dt.SetMessage(attr.(string)) - } - - if _, ok := d.GetOk("recurrence"); ok { - var recurrence datadog.Recurrence - - if attr, ok := d.GetOk("recurrence.0.period"); ok { - recurrence.SetPeriod(attr.(int)) - } - if attr, ok := d.GetOk("recurrence.0.type"); ok { - recurrence.SetType(attr.(string)) - } - if attr, ok := d.GetOk("recurrence.0.until_date"); ok { - recurrence.SetUntilDate(attr.(int)) - } - if attr, ok := d.GetOk("recurrence.0.until_occurrences"); ok { - recurrence.SetUntilOccurrences(attr.(int)) - } - if attr, ok := d.GetOk("recurrence.0.week_days"); ok { - weekDays := make([]string, 0, len(attr.([]interface{}))) - for _, weekDay := range attr.([]interface{}) { - weekDays = append(weekDays, weekDay.(string)) - } - recurrence.WeekDays = weekDays - } - - dt.SetRecurrence(recurrence) - } - - scope := make([]string, 0) - for _, v := range d.Get("scope").([]interface{}) { - scope = append(scope, v.(string)) - } - dt.Scope = scope - if attr, ok := d.GetOk("start"); ok { - dt.SetStart(attr.(int)) - } - - if err = client.UpdateDowntime(&dt); err != nil { - return fmt.Errorf("error updating downtime: %s", err.Error()) - } - - return resourceDatadogDowntimeRead(d, meta) -} - -func resourceDatadogDowntimeDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*datadog.Client) - - id, err := strconv.Atoi(d.Id()) - if err != nil { - return err - } - - if err = client.DeleteDowntime(id); err != nil { - return err - } - - return nil -} - -func resourceDatadogDowntimeImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - if err := resourceDatadogDowntimeRead(d, meta); err != nil { - return nil, err - } - return []*schema.ResourceData{d}, nil -} - -func validateDatadogDowntimeRecurrenceType(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - switch value { - case "days", "months", "weeks", "years": - break - default: - errors = append(errors, fmt.Errorf( - "%q contains an invalid recurrence type parameter %q. Valid parameters are days, months, weeks, or years", k, value)) - } - return -} - -func validateDatadogDowntimeRecurrenceWeekDays(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - switch value { - case "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun": - break - default: - errors = append(errors, fmt.Errorf( - "%q contains an invalid recurrence week day parameter %q. Valid parameters are Mon, Tue, Wed, Thu, Fri, Sat, or Sun", k, value)) - } - return -} diff --git a/builtin/providers/datadog/resource_datadog_downtime_test.go b/builtin/providers/datadog/resource_datadog_downtime_test.go deleted file mode 100644 index e44c69b9b..000000000 --- a/builtin/providers/datadog/resource_datadog_downtime_test.go +++ /dev/null @@ -1,527 +0,0 @@ -package datadog - -import ( - "fmt" - "strconv" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "gopkg.in/zorkian/go-datadog-api.v2" -) - -func TestAccDatadogDowntime_Basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDatadogDowntimeDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckDatadogDowntimeConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckDatadogDowntimeExists("datadog_downtime.foo"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "scope.0", "*"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "start", "1735707600"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "end", "1735765200"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "recurrence.0.type", "days"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "recurrence.0.period", "1"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "message", "Example Datadog downtime message."), - ), - }, - }, - }) -} - -func TestAccDatadogDowntime_BasicMultiScope(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDatadogDowntimeDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckDatadogDowntimeConfigMultiScope, - Check: resource.ComposeTestCheckFunc( - testAccCheckDatadogDowntimeExists("datadog_downtime.foo"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "scope.0", "host:A"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "scope.1", "host:B"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "start", "1735707600"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "end", "1735765200"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "recurrence.0.type", "days"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "recurrence.0.period", "1"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "message", "Example Datadog downtime message."), - ), - }, - }, - }) -} - -func TestAccDatadogDowntime_BasicNoRecurrence(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDatadogDowntimeDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckDatadogDowntimeConfigNoRecurrence, - Check: resource.ComposeTestCheckFunc( - testAccCheckDatadogDowntimeExists("datadog_downtime.foo"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "scope.0", "host:NoRecurrence"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "start", "1735707600"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "end", "1735765200"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "message", "Example Datadog downtime message."), - ), - }, - }, - }) -} - -func TestAccDatadogDowntime_BasicUntilDateRecurrence(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDatadogDowntimeDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckDatadogDowntimeConfigUntilDateRecurrence, - Check: resource.ComposeTestCheckFunc( - testAccCheckDatadogDowntimeExists("datadog_downtime.foo"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "scope.0", "host:UntilDateRecurrence"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "start", "1735707600"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "end", "1735765200"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "recurrence.0.type", "days"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "recurrence.0.period", "1"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "recurrence.0.until_date", "1736226000"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "message", "Example Datadog downtime message."), - ), - }, - }, - }) -} - -func TestAccDatadogDowntime_BasicUntilOccurrencesRecurrence(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDatadogDowntimeDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckDatadogDowntimeConfigUntilOccurrencesRecurrence, - Check: resource.ComposeTestCheckFunc( - testAccCheckDatadogDowntimeExists("datadog_downtime.foo"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "scope.0", "host:UntilOccurrencesRecurrence"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "start", "1735707600"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "end", "1735765200"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "recurrence.0.type", "days"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "recurrence.0.period", "1"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "recurrence.0.until_occurrences", "5"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "message", "Example Datadog downtime message."), - ), - }, - }, - }) -} - -func TestAccDatadogDowntime_WeekDayRecurring(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDatadogDowntimeDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckDatadogDowntimeConfigWeekDaysRecurrence, - Check: resource.ComposeTestCheckFunc( - testAccCheckDatadogDowntimeExists("datadog_downtime.foo"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "scope.0", "WeekDaysRecurrence"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "start", "1735646400"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "end", "1735732799"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "recurrence.0.type", "weeks"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "recurrence.0.period", "1"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "recurrence.0.week_days.0", "Sat"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "recurrence.0.week_days.1", "Sun"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "message", "Example Datadog downtime message."), - ), - }, - }, - }) -} - -func TestAccDatadogDowntime_Updated(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDatadogDowntimeDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckDatadogDowntimeConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckDatadogDowntimeExists("datadog_downtime.foo"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "scope.0", "*"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "start", "1735707600"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "end", "1735765200"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "recurrence.0.type", "days"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "recurrence.0.period", "1"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "message", "Example Datadog downtime message."), - ), - }, - resource.TestStep{ - Config: testAccCheckDatadogDowntimeConfigUpdated, - Check: resource.ComposeTestCheckFunc( - testAccCheckDatadogDowntimeExists("datadog_downtime.foo"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "scope.0", "Updated"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "start", "1735707600"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "end", "1735765200"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "recurrence.0.type", "days"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "recurrence.0.period", "3"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "message", "Example Datadog downtime message."), - ), - }, - }, - }) -} - -func TestAccDatadogDowntime_TrimWhitespace(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDatadogDowntimeDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckDatadogDowntimeConfigWhitespace, - Check: resource.ComposeTestCheckFunc( - testAccCheckDatadogDowntimeExists("datadog_downtime.foo"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "scope.0", "host:Whitespace"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "start", "1735707600"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "end", "1735765200"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "recurrence.0.type", "days"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "recurrence.0.period", "1"), - resource.TestCheckResourceAttr( - "datadog_downtime.foo", "message", "Example Datadog downtime message."), - ), - }, - }, - }) -} - -func testAccCheckDatadogDowntimeDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*datadog.Client) - - if err := datadogDowntimeDestroyHelper(s, client); err != nil { - return err - } - return nil -} - -func testAccCheckDatadogDowntimeExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*datadog.Client) - if err := datadogDowntimeExistsHelper(s, client); err != nil { - return err - } - return nil - } -} - -const testAccCheckDatadogDowntimeConfig = ` -resource "datadog_downtime" "foo" { - scope = ["*"] - start = 1735707600 - end = 1735765200 - - recurrence { - type = "days" - period = 1 - } - - message = "Example Datadog downtime message." -} -` - -const testAccCheckDatadogDowntimeConfigMultiScope = ` -resource "datadog_downtime" "foo" { - scope = ["host:A", "host:B"] - start = 1735707600 - end = 1735765200 - - recurrence { - type = "days" - period = 1 - } - - message = "Example Datadog downtime message." -} -` - -const testAccCheckDatadogDowntimeConfigNoRecurrence = ` -resource "datadog_downtime" "foo" { - scope = ["host:NoRecurrence"] - start = 1735707600 - end = 1735765200 - message = "Example Datadog downtime message." -} -` - -const testAccCheckDatadogDowntimeConfigUntilDateRecurrence = ` -resource "datadog_downtime" "foo" { - scope = ["host:UntilDateRecurrence"] - start = 1735707600 - end = 1735765200 - - recurrence { - type = "days" - period = 1 - until_date = 1736226000 - } - - message = "Example Datadog downtime message." -} -` - -const testAccCheckDatadogDowntimeConfigUntilOccurrencesRecurrence = ` -resource "datadog_downtime" "foo" { - scope = ["host:UntilOccurrencesRecurrence"] - start = 1735707600 - end = 1735765200 - - recurrence { - type = "days" - period = 1 - until_occurrences = 5 - } - - message = "Example Datadog downtime message." -} -` - -const testAccCheckDatadogDowntimeConfigWeekDaysRecurrence = ` -resource "datadog_downtime" "foo" { - scope = ["WeekDaysRecurrence"] - start = 1735646400 - end = 1735732799 - - recurrence { - period = 1 - type = "weeks" - week_days = ["Sat", "Sun"] - } - - message = "Example Datadog downtime message." -} -` - -const testAccCheckDatadogDowntimeConfigUpdated = ` -resource "datadog_downtime" "foo" { - scope = ["Updated"] - start = 1735707600 - end = 1735765200 - - recurrence { - type = "days" - period = 3 - } - - message = "Example Datadog downtime message." -} -` - -const testAccCheckDatadogDowntimeConfigWhitespace = ` -resource "datadog_downtime" "foo" { - scope = ["host:Whitespace"] - start = 1735707600 - end = 1735765200 - - recurrence { - type = "days" - period = 1 - } - - message = < 2"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "notify_no_data", "false"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "new_host_delay", "600"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "evaluation_delay", "700"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "renotify_interval", "60"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "thresholds.warning", "1.0"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "thresholds.critical", "2.0"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "require_full_window", "true"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "locked", "false"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "tags.0", "foo:bar"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "tags.1", "baz"), - ), - }, - }, - }) -} - -func TestAccDatadogMonitor_BasicNoTreshold(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDatadogMonitorDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDatadogMonitorConfigNoThresholds, - Check: resource.ComposeTestCheckFunc( - testAccCheckDatadogMonitorExists("datadog_monitor.foo"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "name", "name for monitor foo"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "message", "some message Notify: @hipchat-channel"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "type", "metric alert"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "query", "avg(last_1h):avg:aws.ec2.cpu{environment:foo,host:foo} by {host} > 2"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "notify_no_data", "false"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "renotify_interval", "60"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "require_full_window", "true"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "locked", "false"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "tags.0", "foo:bar"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "tags.1", "bar:baz"), - ), - }, - }, - }) -} - -func TestAccDatadogMonitor_Updated(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDatadogMonitorDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDatadogMonitorConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckDatadogMonitorExists("datadog_monitor.foo"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "name", "name for monitor foo"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "message", "some message Notify: @hipchat-channel"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "escalation_message", "the situation has escalated @pagerduty"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "query", "avg(last_1h):avg:aws.ec2.cpu{environment:foo,host:foo} by {host} > 2"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "type", "metric alert"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "notify_no_data", "false"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "new_host_delay", "600"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "evaluation_delay", "700"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "renotify_interval", "60"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "thresholds.warning", "1.0"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "thresholds.critical", "2.0"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "notify_audit", "false"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "timeout_h", "60"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "include_tags", "true"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "require_full_window", "true"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "locked", "false"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "tags.0", "foo:bar"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "tags.1", "baz"), - ), - }, - { - Config: testAccCheckDatadogMonitorConfigUpdated, - Check: resource.ComposeTestCheckFunc( - testAccCheckDatadogMonitorExists("datadog_monitor.foo"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "name", "name for monitor bar"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "message", "a different message Notify: @hipchat-channel"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "query", "avg(last_1h):avg:aws.ec2.cpu{environment:bar,host:bar} by {host} > 3"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "escalation_message", "the situation has escalated! @pagerduty"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "type", "metric alert"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "notify_no_data", "true"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "new_host_delay", "900"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "evaluation_delay", "800"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "no_data_timeframe", "20"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "renotify_interval", "40"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "thresholds.ok", "0.0"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "thresholds.warning", "1.0"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "thresholds.critical", "3.0"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "notify_audit", "true"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "timeout_h", "70"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "include_tags", "false"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "silenced.*", "0"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "require_full_window", "false"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "locked", "true"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "tags.0", "baz:qux"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "tags.1", "quux"), - ), - }, - }, - }) -} - -func TestAccDatadogMonitor_TrimWhitespace(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDatadogMonitorDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDatadogMonitorConfigWhitespace, - Check: resource.ComposeTestCheckFunc( - testAccCheckDatadogMonitorExists("datadog_monitor.foo"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "name", "name for monitor foo"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "message", "some message Notify: @hipchat-channel"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "type", "metric alert"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "query", "avg(last_1h):avg:aws.ec2.cpu{environment:foo,host:foo} by {host} > 2"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "notify_no_data", "false"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "renotify_interval", "60"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "thresholds.ok", "0.0"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "thresholds.warning", "1.0"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "thresholds.critical", "2.0"), - ), - }, - }, - }) -} - -func TestAccDatadogMonitor_Basic_float_int(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDatadogMonitorDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDatadogMonitorConfig_ints, - Check: resource.ComposeTestCheckFunc( - testAccCheckDatadogMonitorExists("datadog_monitor.foo"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "thresholds.warning", "1"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "thresholds.critical", "2"), - ), - }, - - { - Config: testAccCheckDatadogMonitorConfig_ints_mixed, - Check: resource.ComposeTestCheckFunc( - testAccCheckDatadogMonitorExists("datadog_monitor.foo"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "thresholds.warning", "1.0"), - resource.TestCheckResourceAttr( - "datadog_monitor.foo", "thresholds.critical", "3.0"), - ), - }, - }, - }) -} - -func testAccCheckDatadogMonitorDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*datadog.Client) - - if err := destroyHelper(s, client); err != nil { - return err - } - return nil -} - -func testAccCheckDatadogMonitorExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*datadog.Client) - if err := existsHelper(s, client); err != nil { - return err - } - return nil - } -} - -const testAccCheckDatadogMonitorConfig = ` -resource "datadog_monitor" "foo" { - name = "name for monitor foo" - type = "metric alert" - message = "some message Notify: @hipchat-channel" - escalation_message = "the situation has escalated @pagerduty" - - query = "avg(last_1h):avg:aws.ec2.cpu{environment:foo,host:foo} by {host} > 2" - - thresholds { - warning = "1.0" - critical = "2.0" - } - - renotify_interval = 60 - - notify_audit = false - timeout_h = 60 - new_host_delay = 600 - evaluation_delay = 700 - include_tags = true - require_full_window = true - locked = false - tags = ["foo:bar", "baz"] -} -` -const testAccCheckDatadogMonitorConfigNoThresholds = ` -resource "datadog_monitor" "foo" { - name = "name for monitor foo" - type = "metric alert" - message = "some message Notify: @hipchat-channel" - escalation_message = "the situation has escalated @pagerduty" - - query = "avg(last_1h):avg:aws.ec2.cpu{environment:foo,host:foo} by {host} > 2" - - notify_no_data = false - renotify_interval = 60 - - notify_audit = false - timeout_h = 60 - include_tags = true - require_full_window = true - locked = false - tags = ["foo:bar", "bar:baz"] -} -` - -const testAccCheckDatadogMonitorConfig_ints = ` -resource "datadog_monitor" "foo" { - name = "name for monitor foo" - type = "metric alert" - message = "some message Notify: @hipchat-channel" - escalation_message = "the situation has escalated @pagerduty" - - query = "avg(last_1h):avg:aws.ec2.cpu{environment:foo,host:foo} by {host} > 2" - - thresholds { - warning = 1 - critical = 2 - } - - notify_no_data = false - renotify_interval = 60 - - notify_audit = false - timeout_h = 60 - include_tags = true - require_full_window = true - locked = false - - tags = ["foo:bar", "baz"] -} -` - -const testAccCheckDatadogMonitorConfig_ints_mixed = ` -resource "datadog_monitor" "foo" { - name = "name for monitor foo" - type = "metric alert" - message = "some message Notify: @hipchat-channel" - escalation_message = "the situation has escalated @pagerduty" - - query = "avg(last_1h):avg:aws.ec2.cpu{environment:foo,host:foo} by {host} > 3" - - thresholds { - warning = 1 - critical = 3.0 - } - - notify_no_data = false - renotify_interval = 60 - - notify_audit = false - timeout_h = 60 - include_tags = true - require_full_window = true - locked = false - - tags = ["foo:bar", "baz"] -} -` - -const testAccCheckDatadogMonitorConfigUpdated = ` -resource "datadog_monitor" "foo" { - name = "name for monitor bar" - type = "metric alert" - message = "a different message Notify: @hipchat-channel" - escalation_message = "the situation has escalated @pagerduty" - - query = "avg(last_1h):avg:aws.ec2.cpu{environment:bar,host:bar} by {host} > 3" - - thresholds { - ok = "0.0" - warning = "1.0" - critical = "3.0" - } - - notify_no_data = true - new_host_delay = 900 - evaluation_delay = 800 - no_data_timeframe = 20 - renotify_interval = 40 - escalation_message = "the situation has escalated! @pagerduty" - notify_audit = true - timeout_h = 70 - include_tags = false - require_full_window = false - locked = true - silenced { - "*" = 0 - } - tags = ["baz:qux", "quux"] -} -` - -const testAccCheckDatadogMonitorConfigWhitespace = ` -resource "datadog_monitor" "foo" { - name = "name for monitor foo" - type = "metric alert" - message = < 2 -EOF - thresholds { - ok = "0.0" - warning = "1.0" - critical = "2.0" - } - - notify_no_data = false - renotify_interval = 60 - - notify_audit = false - timeout_h = 60 - include_tags = true -} -` - -func destroyHelper(s *terraform.State, client *datadog.Client) error { - for _, r := range s.RootModule().Resources { - i, _ := strconv.Atoi(r.Primary.ID) - if _, err := client.GetMonitor(i); err != nil { - if strings.Contains(err.Error(), "404 Not Found") { - continue - } - return fmt.Errorf("Received an error retrieving monitor %s", err) - } - return fmt.Errorf("Monitor still exists") - } - return nil -} - -func existsHelper(s *terraform.State, client *datadog.Client) error { - for _, r := range s.RootModule().Resources { - i, _ := strconv.Atoi(r.Primary.ID) - if _, err := client.GetMonitor(i); err != nil { - return fmt.Errorf("Received an error retrieving monitor %s", err) - } - } - return nil -} diff --git a/builtin/providers/datadog/resource_datadog_timeboard.go b/builtin/providers/datadog/resource_datadog_timeboard.go deleted file mode 100644 index 1f9efc8a7..000000000 --- a/builtin/providers/datadog/resource_datadog_timeboard.go +++ /dev/null @@ -1,718 +0,0 @@ -package datadog - -import ( - "encoding/json" - "fmt" - "log" - "strconv" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "gopkg.in/zorkian/go-datadog-api.v2" -) - -func resourceDatadogTimeboard() *schema.Resource { - request := &schema.Schema{ - Type: schema.TypeList, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "q": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "stacked": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "line", - }, - "aggregator": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateAggregatorMethod, - }, - "style": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - }, - "conditional_format": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Description: "A list of conditional formatting rules.", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "palette": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "The palette to use if this condition is met.", - }, - "comparator": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "Comparator (<, >, etc)", - }, - "custom_bg_color": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "Custom background color (e.g., #205081)", - }, - "value": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "Value that is threshold for conditional format", - }, - "custom_fg_color": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "Custom foreground color (e.g., #59afe1)", - }, - }, - }, - }, - "change_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "Type of change for change graphs.", - }, - "order_direction": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "Sort change graph in ascending or descending order.", - }, - "compare_to": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "The time period to compare change against in change graphs.", - }, - "increase_good": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Description: "Decides whether to represent increases as good or bad in change graphs.", - }, - "order_by": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "The field a change graph will be ordered by.", - }, - "extra_col": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "If set to 'present', this will include the present values in change graphs.", - }, - }, - }, - } - - marker := &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "value": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "label": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - } - - graph := &schema.Schema{ - Type: schema.TypeList, - Required: true, - Description: "A list of graph definitions.", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "title": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "The name of the graph.", - }, - "events": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Description: "Filter for events to be overlayed on the graph.", - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "viz": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "request": request, - "marker": marker, - "yaxis": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - }, - "autoscale": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Description: "Automatically scale graphs", - }, - "text_align": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "How to align text", - }, - "precision": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "How many digits to show", - }, - "custom_unit": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "Use a custom unit (like 'users')", - }, - "style": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - }, - "group": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Description: "A list of groupings for hostmap type graphs.", - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "include_no_metric_hosts": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Description: "Include hosts without metrics in hostmap graphs", - }, - "scope": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Description: "A list of scope filters for hostmap type graphs.", - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "include_ungrouped_hosts": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Description: "Include ungrouped hosts in hostmap graphs", - }, - }, - }, - } - - template_variable := &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Description: "A list of template variables for using Dashboard templating.", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "The name of the variable.", - }, - "prefix": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "The tag prefix associated with the variable. Only tags with this prefix will appear in the variable dropdown.", - }, - "default": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "The default value for the template variable on dashboard load.", - }, - }, - }, - } - - return &schema.Resource{ - Create: resourceDatadogTimeboardCreate, - Update: resourceDatadogTimeboardUpdate, - Read: resourceDatadogTimeboardRead, - Delete: resourceDatadogTimeboardDelete, - Exists: resourceDatadogTimeboardExists, - Importer: &schema.ResourceImporter{ - State: resourceDatadogTimeboardImport, - }, - - Schema: map[string]*schema.Schema{ - "title": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "The name of the dashboard.", - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "A description of the dashboard's content.", - }, - "read_only": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "graph": graph, - "template_variable": template_variable, - }, - } -} - -func appendConditionalFormats(datadogRequest *datadog.GraphDefinitionRequest, terraformFormats *[]interface{}) { - for _, t_ := range *terraformFormats { - t := t_.(map[string]interface{}) - d := datadog.DashboardConditionalFormat{ - Comparator: datadog.String(t["comparator"].(string)), - } - - if v, ok := t["palette"]; ok { - d.SetPalette(v.(string)) - } - - if v, ok := t["custom_bg_color"]; ok { - d.SetCustomBgColor(v.(string)) - } - - if v, ok := t["custom_fg_color"]; ok { - d.SetCustomFgColor(v.(string)) - } - - if v, ok := t["value"]; ok { - d.SetValue(json.Number(v.(string))) - } - - datadogRequest.ConditionalFormats = append(datadogRequest.ConditionalFormats, d) - } -} - -func buildTemplateVariables(terraformTemplateVariables *[]interface{}) *[]datadog.TemplateVariable { - datadogTemplateVariables := make([]datadog.TemplateVariable, len(*terraformTemplateVariables)) - for i, t_ := range *terraformTemplateVariables { - t := t_.(map[string]interface{}) - datadogTemplateVariables[i] = datadog.TemplateVariable{ - Name: datadog.String(t["name"].(string)), - Prefix: datadog.String(t["prefix"].(string)), - Default: datadog.String(t["default"].(string)), - } - } - return &datadogTemplateVariables -} - -func appendRequests(datadogGraph *datadog.Graph, terraformRequests *[]interface{}) { - for _, t_ := range *terraformRequests { - t := t_.(map[string]interface{}) - d := datadog.GraphDefinitionRequest{ - Query: datadog.String(t["q"].(string)), - Type: datadog.String(t["type"].(string)), - Aggregator: datadog.String(t["aggregator"].(string)), - } - if stacked, ok := t["stacked"]; ok { - d.SetStacked(stacked.(bool)) - } - if style, ok := t["style"]; ok { - s, _ := style.(map[string]interface{}) - - style := datadog.GraphDefinitionRequestStyle{} - - if v, ok := s["palette"]; ok { - style.SetPalette(v.(string)) - } - - if v, ok := s["width"]; ok { - style.SetWidth(v.(string)) - } - - if v, ok := s["type"]; ok { - style.SetType(v.(string)) - } - - d.SetStyle(style) - } - - if v, ok := t["change_type"]; ok { - d.SetChangeType(v.(string)) - } - if v, ok := t["compare_to"]; ok { - d.SetCompareTo(v.(string)) - } - if v, ok := t["increase_good"]; ok { - d.SetIncreaseGood(v.(bool)) - } - if v, ok := t["order_by"]; ok { - d.SetOrderBy(v.(string)) - } - if v, ok := t["extra_col"]; ok { - d.SetExtraCol(v.(string)) - } - if v, ok := t["order_direction"]; ok { - d.SetOrderDirection(v.(string)) - } - - if v, ok := t["conditional_format"]; ok { - v_ := v.([]interface{}) - appendConditionalFormats(&d, &v_) - } - - datadogGraph.Definition.Requests = append(datadogGraph.Definition.Requests, d) - } -} - -func appendEvents(datadogGraph *datadog.Graph, terraformEvents *[]interface{}) { - for _, t_ := range *terraformEvents { - datadogGraph.Definition.Events = append(datadogGraph.Definition.Events, datadog.GraphEvent{ - Query: datadog.String(t_.(string)), - }) - } -} - -func appendMarkers(datadogGraph *datadog.Graph, terraformMarkers *[]interface{}) { - for _, t_ := range *terraformMarkers { - t := t_.(map[string]interface{}) - d := datadog.GraphDefinitionMarker{ - Type: datadog.String(t["type"].(string)), - Value: datadog.String(t["value"].(string)), - } - if v, ok := t["label"]; ok { - d.SetLabel(v.(string)) - } - datadogGraph.Definition.Markers = append(datadogGraph.Definition.Markers, d) - } -} - -func buildGraphs(terraformGraphs *[]interface{}) *[]datadog.Graph { - datadogGraphs := make([]datadog.Graph, len(*terraformGraphs)) - for i, t_ := range *terraformGraphs { - t := t_.(map[string]interface{}) - - datadogGraphs[i] = datadog.Graph{ - Title: datadog.String(t["title"].(string)), - } - - d := &datadogGraphs[i] - d.Definition = &datadog.GraphDefinition{} - d.Definition.SetViz(t["viz"].(string)) - - if v, ok := t["yaxis"]; ok { - yaxis := v.(map[string]interface{}) - if v, ok := yaxis["min"]; ok { - min, _ := strconv.ParseFloat(v.(string), 64) - d.Definition.Yaxis.SetMin(min) - } - if v, ok := yaxis["max"]; ok { - max, _ := strconv.ParseFloat(v.(string), 64) - d.Definition.Yaxis.SetMax(max) - } - if v, ok := yaxis["scale"]; ok { - d.Definition.Yaxis.SetScale(v.(string)) - } - } - - if v, ok := t["autoscale"]; ok { - d.Definition.SetAutoscale(v.(bool)) - } - - if v, ok := t["text_align"]; ok { - d.Definition.SetTextAlign(v.(string)) - } - - if precision, ok := t["precision"]; ok { - d.Definition.SetPrecision(precision.(string)) - } - - if v, ok := t["custom_unit"]; ok { - d.Definition.SetCustomUnit(v.(string)) - } - - if style, ok := t["style"]; ok { - s := style.(map[string]interface{}) - - gs := datadog.Style{} - - if v, ok := s["palette"]; ok { - gs.SetPalette(v.(string)) - } - - if v, ok := s["palette_flip"]; ok { - pf, _ := strconv.ParseBool(v.(string)) - gs.SetPaletteFlip(pf) - } - d.Definition.SetStyle(gs) - - } - - if v, ok := t["group"]; ok { - for _, g := range v.(*schema.Set).List() { - d.Definition.Groups = append(d.Definition.Groups, g.(string)) - } - } - - if includeNoMetricHosts, ok := t["include_no_metric_hosts"]; ok { - d.Definition.SetIncludeNoMetricHosts(includeNoMetricHosts.(bool)) - } - - if v, ok := t["scope"]; ok { - for _, s := range v.(*schema.Set).List() { - d.Definition.Scopes = append(d.Definition.Groups, s.(string)) - } - } - - if v, ok := t["include_ungrouped_hosts"]; ok { - d.Definition.SetIncludeUngroupedHosts(v.(bool)) - } - v := t["marker"].([]interface{}) - appendMarkers(d, &v) - - v = t["events"].(*schema.Set).List() - appendEvents(d, &v) - - v = t["request"].([]interface{}) - appendRequests(d, &v) - } - return &datadogGraphs -} - -func buildTimeboard(d *schema.ResourceData) (*datadog.Dashboard, error) { - var id int - if d.Id() != "" { - var err error - id, err = strconv.Atoi(d.Id()) - if err != nil { - return nil, err - } - } - terraformGraphs := d.Get("graph").([]interface{}) - terraformTemplateVariables := d.Get("template_variable").([]interface{}) - return &datadog.Dashboard{ - Id: datadog.Int(id), - Title: datadog.String(d.Get("title").(string)), - Description: datadog.String(d.Get("description").(string)), - ReadOnly: datadog.Bool(d.Get("read_only").(bool)), - Graphs: *buildGraphs(&terraformGraphs), - TemplateVariables: *buildTemplateVariables(&terraformTemplateVariables), - }, nil -} - -func resourceDatadogTimeboardCreate(d *schema.ResourceData, meta interface{}) error { - timeboard, err := buildTimeboard(d) - if err != nil { - return fmt.Errorf("Failed to parse resource configuration: %s", err.Error()) - } - timeboard, err = meta.(*datadog.Client).CreateDashboard(timeboard) - if err != nil { - return fmt.Errorf("Failed to create timeboard using Datadog API: %s", err.Error()) - } - d.SetId(strconv.Itoa(timeboard.GetId())) - return nil -} - -func resourceDatadogTimeboardUpdate(d *schema.ResourceData, meta interface{}) error { - timeboard, err := buildTimeboard(d) - if err != nil { - return fmt.Errorf("Failed to parse resource configuration: %s", err.Error()) - } - if err = meta.(*datadog.Client).UpdateDashboard(timeboard); err != nil { - return fmt.Errorf("Failed to update timeboard using Datadog API: %s", err.Error()) - } - return resourceDatadogTimeboardRead(d, meta) -} - -func appendTerraformGraphRequests(datadogRequests []datadog.GraphDefinitionRequest, requests *[]map[string]interface{}) { - for _, datadogRequest := range datadogRequests { - request := map[string]interface{}{} - request["q"] = datadogRequest.GetQuery() - request["stacked"] = datadogRequest.GetStacked() - request["type"] = datadogRequest.GetType() - if v, ok := datadogRequest.GetStyleOk(); ok { - style := map[string]string{} - if v, ok := v.GetPaletteOk(); ok { - style["palette"] = v - } - if v, ok := v.GetTypeOk(); ok { - style["type"] = v - } - if v, ok := v.GetWidthOk(); ok { - style["width"] = v - } - request["style"] = style - } - conditionalFormats := []map[string]interface{}{} - for _, cf := range datadogRequest.ConditionalFormats { - conditionalFormat := map[string]interface{}{ - "palette": cf.Palette, - "comparator": cf.Comparator, - "custom_bg_color": cf.CustomBgColor, - "value": cf.Value, - "custom_fg_color": cf.CustomFgColor, - } - conditionalFormats = append(conditionalFormats, conditionalFormat) - } - request["conditional_format"] = conditionalFormats - request["change_type"] = datadogRequest.GetChangeType() - request["order_direction"] = datadogRequest.GetOrderDirection() - request["compare_to"] = datadogRequest.GetCompareTo() - request["increase_good"] = datadogRequest.GetIncreaseGood() - request["order_by"] = datadogRequest.GetOrderBy() - request["extra_col"] = datadogRequest.GetExtraCol() - - *requests = append(*requests, request) - } -} - -func buildTerraformGraph(datadog_graph datadog.Graph) map[string]interface{} { - graph := map[string]interface{}{} - graph["title"] = datadog_graph.GetTitle() - - definition := datadog_graph.Definition - graph["viz"] = definition.GetViz() - - events := []*string{} - for _, datadog_event := range definition.Events { - events = append(events, datadog_event.Query) - } - graph["events"] = events - - markers := []map[string]interface{}{} - for _, datadog_marker := range definition.Markers { - marker := map[string]interface{}{ - "type": datadog_marker.Type, - "value": datadog_marker.Value, - "label": datadog_marker.Label, - } - markers = append(markers, marker) - } - graph["marker"] = markers - - yaxis := map[string]string{} - - if v, ok := definition.Yaxis.GetMinOk(); ok { - yaxis["min"] = strconv.FormatFloat(v, 'f', -1, 64) - } - - if v, ok := definition.Yaxis.GetMaxOk(); ok { - yaxis["max"] = strconv.FormatFloat(v, 'f', -1, 64) - } - - if v, ok := definition.Yaxis.GetScaleOk(); ok { - yaxis["scale"] = v - } - - graph["yaxis"] = yaxis - - graph["autoscale"] = definition.Autoscale - graph["text_align"] = definition.TextAlign - graph["precision"] = definition.Precision - graph["custom_unit"] = definition.CustomUnit - - if v, ok := definition.GetStyleOk(); ok { - style := map[string]string{} - if v, ok := v.GetPaletteOk(); ok { - style["palette"] = v - } - if v, ok := v.GetPaletteFlipOk(); ok { - style["palette_flip"] = strconv.FormatBool(v) - } - graph["style"] = style - } - graph["group"] = definition.Groups - graph["include_no_metric_hosts"] = definition.IncludeNoMetricHosts - graph["scope"] = definition.Scopes - graph["include_ungrouped_hosts"] = definition.IncludeUngroupedHosts - - requests := []map[string]interface{}{} - appendTerraformGraphRequests(definition.Requests, &requests) - graph["request"] = requests - - return graph -} - -func resourceDatadogTimeboardRead(d *schema.ResourceData, meta interface{}) error { - id, err := strconv.Atoi(d.Id()) - if err != nil { - return err - } - timeboard, err := meta.(*datadog.Client).GetDashboard(id) - if err != nil { - return err - } - log.Printf("[DEBUG] timeboard: %v", timeboard) - d.Set("title", timeboard.GetTitle()) - d.Set("description", timeboard.GetDescription()) - - graphs := []map[string]interface{}{} - for _, datadog_graph := range timeboard.Graphs { - graphs = append(graphs, buildTerraformGraph(datadog_graph)) - } - d.Set("graph", graphs) - - templateVariables := []map[string]*string{} - for _, templateVariable := range timeboard.TemplateVariables { - tv := map[string]*string{ - "name": templateVariable.Name, - "prefix": templateVariable.Prefix, - "default": templateVariable.Default, - } - templateVariables = append(templateVariables, tv) - } - d.Set("template_variable", templateVariables) - - return nil -} - -func resourceDatadogTimeboardDelete(d *schema.ResourceData, meta interface{}) error { - id, err := strconv.Atoi(d.Id()) - if err != nil { - return err - } - if err = meta.(*datadog.Client).DeleteDashboard(id); err != nil { - return err - } - return nil -} - -func resourceDatadogTimeboardImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - if err := resourceDatadogTimeboardRead(d, meta); err != nil { - return nil, err - } - return []*schema.ResourceData{d}, nil -} - -func resourceDatadogTimeboardExists(d *schema.ResourceData, meta interface{}) (b bool, e error) { - id, err := strconv.Atoi(d.Id()) - if err != nil { - return false, err - } - if _, err = meta.(*datadog.Client).GetDashboard(id); err != nil { - if strings.Contains(err.Error(), "404 Not Found") { - return false, nil - } - return false, err - } - return true, nil -} - -func validateAggregatorMethod(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - validMethods := map[string]struct{}{ - "avg": {}, - "max": {}, - "min": {}, - "sum": {}, - "last": {}, - } - if _, ok := validMethods[value]; !ok { - errors = append(errors, fmt.Errorf( - `%q contains an invalid method %q. Valid methods are either "avg", "max", "min", "sum", or "last"`, k, value)) - } - return -} diff --git a/builtin/providers/datadog/resource_datadog_timeboard_test.go b/builtin/providers/datadog/resource_datadog_timeboard_test.go deleted file mode 100644 index 3673b9506..000000000 --- a/builtin/providers/datadog/resource_datadog_timeboard_test.go +++ /dev/null @@ -1,252 +0,0 @@ -package datadog - -import ( - "fmt" - "strconv" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "gopkg.in/zorkian/go-datadog-api.v2" -) - -const config1 = ` -resource "datadog_timeboard" "acceptance_test" { - title = "Acceptance Test Timeboard" - description = "Created using the Datadog provider in Terraform" - read_only = true - graph { - title = "Top System CPU by Docker container" - viz = "toplist" - request { - q = "top(avg:docker.cpu.system{*} by {container_name}, 10, 'mean', 'desc')" - } - } -} -` - -const config2 = ` -resource "datadog_timeboard" "acceptance_test" { - title = "Acceptance Test Timeboard" - description = "Created using the Datadog provider in Terraform" - graph { - title = "Redis latency (ms)" - viz = "timeseries" - request { - q = "avg:redis.info.latency_ms{$host}" - } - } - graph { - title = "Redis memory usage" - viz = "timeseries" - request { - q = "avg:redis.mem.used{$host} - avg:redis.mem.lua{$host}, avg:redis.mem.lua{$host}" - aggregator = "sum" - stacked = true - } - request { - q = "avg:redis.mem.rss{$host}" - } - request { - q = "avg:redis.mem.rss{$host}" - type = "bars" - style { - palette = "warm" - } - aggregator = "max" - } - } - template_variable { - name = "host" - prefix = "host" - } -} -` - -const config3 = ` -resource "datadog_timeboard" "acceptance_test" { - title = "Acceptance Test Timeboard" - description = "Created using the Datadog provider in Terraform" - graph { - title = "Redis latency (ms)" - viz = "timeseries" - request { - q = "avg:redis.info.latency_ms{$host}" - } - events = ["sources:capistrano"] - - marker { - label = "High Latency" - type = "error solid" - value = "y > 100" - } - yaxis { - max = "50" - scale = "sqrt" - } - } - graph { - title = "ELB Requests" - viz = "query_value" - request { - q = "sum:aws.elb.request_count{*}.as_count()" - type = "line" - aggregator = "min" - conditional_format { - comparator = ">" - value = "1000" - palette = "white_on_red" - } - conditional_format { - comparator = "<=" - value = "1000" - palette = "white_on_green" - } - } - custom_unit = "hits" - precision = "*" - text_align = "left" - } - template_variable { - name = "host" - prefix = "host" - } -} -` - -func TestAccDatadogTimeboard_update(t *testing.T) { - - step1 := resource.TestStep{ - Config: config1, - Check: resource.ComposeTestCheckFunc( - checkExists, - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "title", "Acceptance Test Timeboard"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "description", "Created using the Datadog provider in Terraform"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "read_only", "true"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.0.title", "Top System CPU by Docker container"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.0.viz", "toplist"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.0.request.0.q", "top(avg:docker.cpu.system{*} by {container_name}, 10, 'mean', 'desc')"), - ), - } - - step2 := resource.TestStep{ - Config: config2, - Check: resource.ComposeTestCheckFunc( - checkExists, - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "title", "Acceptance Test Timeboard"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "description", "Created using the Datadog provider in Terraform"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.0.title", "Redis latency (ms)"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.0.viz", "timeseries"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.0.request.0.q", "avg:redis.info.latency_ms{$host}"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.1.title", "Redis memory usage"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.1.viz", "timeseries"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.1.request.0.q", "avg:redis.mem.used{$host} - avg:redis.mem.lua{$host}, avg:redis.mem.lua{$host}"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.1.request.0.aggregator", "sum"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.1.request.0.stacked", "true"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.1.request.1.q", "avg:redis.mem.rss{$host}"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "template_variable.0.name", "host"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "template_variable.0.prefix", "host"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.1.request.2.type", "bars"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.1.request.2.q", "avg:redis.mem.rss{$host}"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.1.request.2.aggregator", "max"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.1.request.2.style.palette", "warm"), - ), - } - - step3 := resource.TestStep{ - Config: config3, - Check: resource.ComposeTestCheckFunc( - checkExists, - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "title", "Acceptance Test Timeboard"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "description", "Created using the Datadog provider in Terraform"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.0.title", "Redis latency (ms)"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.0.viz", "timeseries"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.0.request.0.q", "avg:redis.info.latency_ms{$host}"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.0.events.#", "1"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.0.marker.0.label", "High Latency"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.0.marker.0.type", "error solid"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.0.marker.0.value", "y > 100"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.0.yaxis.max", "50"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.0.yaxis.scale", "sqrt"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.1.title", "ELB Requests"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.1.viz", "query_value"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.1.request.0.q", "sum:aws.elb.request_count{*}.as_count()"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.1.request.0.aggregator", "min"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.1.request.0.type", "line"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.1.request.0.conditional_format.0.comparator", ">"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.1.request.0.conditional_format.0.value", "1000"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.1.request.0.conditional_format.0.palette", "white_on_red"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.1.request.0.conditional_format.1.comparator", "<="), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.1.request.0.conditional_format.1.value", "1000"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.1.request.0.conditional_format.1.palette", "white_on_green"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.1.custom_unit", "hits"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.1.precision", "*"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "graph.1.text_align", "left"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "template_variable.0.name", "host"), - resource.TestCheckResourceAttr("datadog_timeboard.acceptance_test", "template_variable.0.prefix", "host"), - ), - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: checkDestroy, - Steps: []resource.TestStep{step1, step2, step3}, - }) -} - -func checkExists(s *terraform.State) error { - client := testAccProvider.Meta().(*datadog.Client) - for _, r := range s.RootModule().Resources { - i, _ := strconv.Atoi(r.Primary.ID) - if _, err := client.GetDashboard(i); err != nil { - return fmt.Errorf("Received an error retrieving monitor %s", err) - } - } - return nil -} - -func checkDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*datadog.Client) - for _, r := range s.RootModule().Resources { - i, _ := strconv.Atoi(r.Primary.ID) - if _, err := client.GetDashboard(i); err != nil { - if strings.Contains(err.Error(), "404 Not Found") { - continue - } - return fmt.Errorf("Received an error retrieving timeboard %s", err) - } - return fmt.Errorf("Timeboard still exists") - } - return nil -} - -func TestValidateAggregatorMethod(t *testing.T) { - validMethods := []string{ - "avg", - "max", - "min", - "sum", - } - for _, v := range validMethods { - _, errors := validateAggregatorMethod(v, "request") - if len(errors) != 0 { - t.Fatalf("%q should be a valid aggregator method: %q", v, errors) - } - } - - invalidMethods := []string{ - "average", - "suM", - "m", - "foo", - } - for _, v := range invalidMethods { - _, errors := validateAggregatorMethod(v, "request") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid aggregator method", v) - } - } - -} diff --git a/builtin/providers/datadog/resource_datadog_user.go b/builtin/providers/datadog/resource_datadog_user.go deleted file mode 100644 index e48be1a2f..000000000 --- a/builtin/providers/datadog/resource_datadog_user.go +++ /dev/null @@ -1,159 +0,0 @@ -package datadog - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "gopkg.in/zorkian/go-datadog-api.v2" -) - -func resourceDatadogUser() *schema.Resource { - return &schema.Resource{ - Create: resourceDatadogUserCreate, - Read: resourceDatadogUserRead, - Update: resourceDatadogUserUpdate, - Delete: resourceDatadogUserDelete, - Exists: resourceDatadogUserExists, - Importer: &schema.ResourceImporter{ - State: resourceDatadogUserImport, - }, - - Schema: map[string]*schema.Schema{ - "disabled": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "email": { - Type: schema.TypeString, - Required: true, - }, - "handle": { - Type: schema.TypeString, - Required: true, - }, - "is_admin": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "name": { - Type: schema.TypeString, - Required: true, - }, - "role": { - Type: schema.TypeString, - Optional: true, - }, - "verified": { - Type: schema.TypeBool, - Computed: true, - }, - }, - } -} - -func resourceDatadogUserExists(d *schema.ResourceData, meta interface{}) (b bool, e error) { - // Exists - This is called to verify a resource still exists. It is called prior to Read, - // and lowers the burden of Read to be able to assume the resource exists. - client := meta.(*datadog.Client) - - if _, err := client.GetUser(d.Id()); err != nil { - if strings.Contains(err.Error(), "404 Not Found") { - return false, nil - } - return false, err - } - - return true, nil -} - -func resourceDatadogUserCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*datadog.Client) - - var u datadog.User - u.SetDisabled(d.Get("disabled").(bool)) - u.SetEmail(d.Get("email").(string)) - u.SetHandle(d.Get("handle").(string)) - u.SetIsAdmin(d.Get("is_admin").(bool)) - u.SetName(d.Get("name").(string)) - u.SetRole(d.Get("role").(string)) - - // Datadog does not actually delete users, so CreateUser might return a 409. - // We ignore that case and proceed, likely re-enabling the user. - if _, err := client.CreateUser(u.Handle, u.Name); err != nil { - if !strings.Contains(err.Error(), "API error 409 Conflict") { - return fmt.Errorf("error creating user: %s", err.Error()) - } - log.Printf("[INFO] Updating existing Datadog user %q", u.Handle) - } - - if err := client.UpdateUser(u); err != nil { - return fmt.Errorf("error creating user: %s", err.Error()) - } - - d.SetId(u.GetHandle()) - - return resourceDatadogUserRead(d, meta) -} - -func resourceDatadogUserRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*datadog.Client) - - u, err := client.GetUser(d.Id()) - if err != nil { - return err - } - - d.Set("disabled", u.GetDisabled()) - d.Set("email", u.GetEmail()) - d.Set("handle", u.GetHandle()) - d.Set("is_admin", u.GetIsAdmin()) - d.Set("name", u.GetName()) - d.Set("role", u.GetRole()) - d.Set("verified", u.GetVerified()) - - return nil -} - -func resourceDatadogUserUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*datadog.Client) - var u datadog.User - u.SetDisabled(d.Get("disabled").(bool)) - u.SetEmail(d.Get("email").(string)) - u.SetHandle(d.Id()) - u.SetIsAdmin(d.Get("is_admin").(bool)) - u.SetName(d.Get("name").(string)) - u.SetRole(d.Get("role").(string)) - - if err := client.UpdateUser(u); err != nil { - return fmt.Errorf("error updating user: %s", err.Error()) - } - - return resourceDatadogUserRead(d, meta) -} - -func resourceDatadogUserDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*datadog.Client) - - // Datadog does not actually delete users, but instead marks them as disabled. - // Bypass DeleteUser if GetUser returns User.Disabled == true, otherwise it will 400. - if u, err := client.GetUser(d.Id()); err == nil && u.GetDisabled() { - return nil - } - - if err := client.DeleteUser(d.Id()); err != nil { - return err - } - - return nil -} - -func resourceDatadogUserImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - if err := resourceDatadogUserRead(d, meta); err != nil { - return nil, err - } - return []*schema.ResourceData{d}, nil -} diff --git a/builtin/providers/datadog/resource_datadog_user_test.go b/builtin/providers/datadog/resource_datadog_user_test.go deleted file mode 100644 index cdda12721..000000000 --- a/builtin/providers/datadog/resource_datadog_user_test.go +++ /dev/null @@ -1,121 +0,0 @@ -package datadog - -import ( - "fmt" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "gopkg.in/zorkian/go-datadog-api.v2" -) - -func TestAccDatadogUser_Updated(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDatadogUserDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckDatadogUserConfigRequired, - Check: resource.ComposeTestCheckFunc( - testAccCheckDatadogUserExists("datadog_user.foo"), - resource.TestCheckResourceAttr( - "datadog_user.foo", "email", "test@example.com"), - resource.TestCheckResourceAttr( - "datadog_user.foo", "handle", "test@example.com"), - resource.TestCheckResourceAttr( - "datadog_user.foo", "name", "Test User"), - resource.TestCheckResourceAttr( - "datadog_user.foo", "verified", "false"), - ), - }, - resource.TestStep{ - Config: testAccCheckDatadogUserConfigUpdated, - Check: resource.ComposeTestCheckFunc( - testAccCheckDatadogUserExists("datadog_user.foo"), - resource.TestCheckResourceAttr( - "datadog_user.foo", "disabled", "true"), - resource.TestCheckResourceAttr( - "datadog_user.foo", "email", "updated@example.com"), - resource.TestCheckResourceAttr( - "datadog_user.foo", "handle", "test@example.com"), - resource.TestCheckResourceAttr( - "datadog_user.foo", "is_admin", "true"), - resource.TestCheckResourceAttr( - "datadog_user.foo", "name", "Updated User"), - resource.TestCheckResourceAttr( - "datadog_user.foo", "verified", "false"), - ), - }, - }, - }) -} - -func testAccCheckDatadogUserDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*datadog.Client) - - if err := datadogUserDestroyHelper(s, client); err != nil { - return err - } - return nil -} - -func testAccCheckDatadogUserExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*datadog.Client) - if err := datadogUserExistsHelper(s, client); err != nil { - return err - } - return nil - } -} - -const testAccCheckDatadogUserConfigRequired = ` -resource "datadog_user" "foo" { - email = "test@example.com" - handle = "test@example.com" - name = "Test User" -} -` - -const testAccCheckDatadogUserConfigUpdated = ` -resource "datadog_user" "foo" { - disabled = true - email = "updated@example.com" - handle = "test@example.com" - is_admin = true - name = "Updated User" -} -` - -func datadogUserDestroyHelper(s *terraform.State, client *datadog.Client) error { - for _, r := range s.RootModule().Resources { - id := r.Primary.ID - u, err := client.GetUser(id) - - if err != nil { - if strings.Contains(err.Error(), "404 Not Found") { - continue - } - return fmt.Errorf("Received an error retrieving user %s", err) - } - - // Datadog only disables user on DELETE - if u.GetDisabled() { - continue - } - return fmt.Errorf("User still exists") - } - return nil -} - -func datadogUserExistsHelper(s *terraform.State, client *datadog.Client) error { - for _, r := range s.RootModule().Resources { - id := r.Primary.ID - if _, err := client.GetUser(id); err != nil { - return fmt.Errorf("Received an error retrieving user %s", err) - } - } - return nil -} diff --git a/builtin/providers/digitalocean/config.go b/builtin/providers/digitalocean/config.go deleted file mode 100644 index 9c9e4cd20..000000000 --- a/builtin/providers/digitalocean/config.go +++ /dev/null @@ -1,97 +0,0 @@ -package digitalocean - -import ( - "context" - "log" - "net/http" - "net/http/httputil" - "time" - - "github.com/digitalocean/godo" - "github.com/hashicorp/terraform/helper/logging" - "github.com/hashicorp/terraform/helper/resource" - "golang.org/x/oauth2" -) - -type Config struct { - Token string -} - -// Client() returns a new client for accessing digital ocean. -func (c *Config) Client() (*godo.Client, error) { - tokenSrc := oauth2.StaticTokenSource(&oauth2.Token{ - AccessToken: c.Token, - }) - - client := godo.NewClient(oauth2.NewClient(oauth2.NoContext, tokenSrc)) - - if logging.IsDebugOrHigher() { - client.OnRequestCompleted(logRequestAndResponse) - } - - log.Printf("[INFO] DigitalOcean Client configured for URL: %s", client.BaseURL.String()) - - return client, nil -} - -func logRequestAndResponse(req *http.Request, resp *http.Response) { - reqData, err := httputil.DumpRequest(req, true) - if err == nil { - log.Printf("[DEBUG] "+logReqMsg, string(reqData)) - } else { - log.Printf("[ERROR] DigitalOcean API Request error: %#v", err) - } - - respData, err := httputil.DumpResponse(resp, true) - if err == nil { - log.Printf("[DEBUG] "+logRespMsg, string(respData)) - } else { - log.Printf("[ERROR] DigitalOcean API Response error: %#v", err) - } -} - -// waitForAction waits for the action to finish using the resource.StateChangeConf. -func waitForAction(client *godo.Client, action *godo.Action) error { - var ( - pending = "in-progress" - target = "completed" - refreshfn = func() (result interface{}, state string, err error) { - a, _, err := client.Actions.Get(context.Background(), action.ID) - if err != nil { - return nil, "", err - } - if a.Status == "errored" { - return a, "errored", nil - } - if a.CompletedAt != nil { - return a, target, nil - } - return a, pending, nil - } - ) - _, err := (&resource.StateChangeConf{ - Pending: []string{pending}, - Refresh: refreshfn, - Target: []string{target}, - - Delay: 10 * time.Second, - Timeout: 60 * time.Minute, - MinTimeout: 3 * time.Second, - - // This is a hack around DO API strangeness. - // https://github.com/hashicorp/terraform/issues/481 - // - NotFoundChecks: 60, - }).WaitForState() - return err -} - -const logReqMsg = `DigitalOcean API Request Details: ----[ REQUEST ]--------------------------------------- -%s ------------------------------------------------------` - -const logRespMsg = `DigitalOcean API Response Details: ----[ RESPONSE ]-------------------------------------- -%s ------------------------------------------------------` diff --git a/builtin/providers/digitalocean/datasource_digitalocean_image.go b/builtin/providers/digitalocean/datasource_digitalocean_image.go deleted file mode 100644 index e47e422bf..000000000 --- a/builtin/providers/digitalocean/datasource_digitalocean_image.go +++ /dev/null @@ -1,94 +0,0 @@ -package digitalocean - -import ( - "context" - "fmt" - "strconv" - - "github.com/digitalocean/godo" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceDigitalOceanImage() *schema.Resource { - return &schema.Resource{ - Read: dataSourceDigitalOceanImageRead, - Schema: map[string]*schema.Schema{ - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "name of the image", - }, - // computed attributes - "image": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: "slug or id of the image", - }, - "min_disk_size": &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - Description: "minimum disk size required by the image", - }, - "private": &schema.Schema{ - Type: schema.TypeBool, - Computed: true, - Description: "Is the image private or non-private", - }, - "regions": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Description: "list of the regions that the image is available in", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "type": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: "type of the image", - }, - }, - } -} - -func dataSourceDigitalOceanImageRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - opts := &godo.ListOptions{} - - images, _, err := client.Images.ListUser(context.Background(), opts) - if err != nil { - d.SetId("") - return err - } - image, err := findImageByName(images, d.Get("name").(string)) - - if err != nil { - return err - } - - d.SetId(image.Name) - d.Set("name", image.Name) - d.Set("image", strconv.Itoa(image.ID)) - d.Set("min_disk_size", image.MinDiskSize) - d.Set("private", !image.Public) - d.Set("regions", image.Regions) - d.Set("type", image.Type) - - return nil -} - -func findImageByName(images []godo.Image, name string) (*godo.Image, error) { - results := make([]godo.Image, 0) - for _, v := range images { - if v.Name == name { - results = append(results, v) - } - } - if len(results) == 1 { - return &results[0], nil - } - if len(results) == 0 { - return nil, fmt.Errorf("no user image found with name %s", name) - } - return nil, fmt.Errorf("too many user images found with name %s (found %d, expected 1)", name, len(results)) -} diff --git a/builtin/providers/digitalocean/datasource_digitalocean_image_test.go b/builtin/providers/digitalocean/datasource_digitalocean_image_test.go deleted file mode 100644 index 18a1e0865..000000000 --- a/builtin/providers/digitalocean/datasource_digitalocean_image_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package digitalocean - -import ( - "context" - "fmt" - "log" - "regexp" - "testing" - - "github.com/digitalocean/godo" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDigitalOceanImage_Basic(t *testing.T) { - var droplet godo.Droplet - var snapshotsId []int - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanDropletDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDigitalOceanDropletConfig_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanDropletExists("digitalocean_droplet.foobar", &droplet), - takeSnapshotsOfDroplet(rInt, &droplet, &snapshotsId), - ), - }, - { - Config: testAccCheckDigitalOceanImageConfig_basic(rInt, 1), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "data.digitalocean_image.foobar", "name", fmt.Sprintf("snap-%d-1", rInt)), - resource.TestCheckResourceAttr( - "data.digitalocean_image.foobar", "min_disk_size", "20"), - resource.TestCheckResourceAttr( - "data.digitalocean_image.foobar", "private", "true"), - resource.TestCheckResourceAttr( - "data.digitalocean_image.foobar", "type", "snapshot"), - ), - }, - { - Config: testAccCheckDigitalOceanImageConfig_basic(rInt, 0), - ExpectError: regexp.MustCompile(`.*too many user images found with name snap-.*\ .found 2, expected 1.`), - }, - { - Config: testAccCheckDigitalOceanImageConfig_nonexisting(rInt), - Destroy: false, - ExpectError: regexp.MustCompile(`.*no user image found with name snap-.*-nonexisting`), - }, - { - Config: " ", - Check: resource.ComposeTestCheckFunc( - deleteSnapshots(&snapshotsId), - ), - }, - }, - }) -} - -func takeSnapshotsOfDroplet(rInt int, droplet *godo.Droplet, snapshotsId *[]int) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*godo.Client) - for i := 0; i < 3; i++ { - err := takeSnapshotOfDroplet(rInt, i%2, droplet) - if err != nil { - return err - } - } - retrieveDroplet, _, err := client.Droplets.Get(context.Background(), (*droplet).ID) - if err != nil { - return err - } - *snapshotsId = retrieveDroplet.SnapshotIDs - return nil - } -} - -func takeSnapshotOfDroplet(rInt, sInt int, droplet *godo.Droplet) error { - client := testAccProvider.Meta().(*godo.Client) - action, _, err := client.DropletActions.Snapshot(context.Background(), (*droplet).ID, fmt.Sprintf("snap-%d-%d", rInt, sInt)) - if err != nil { - return err - } - waitForAction(client, action) - return nil -} - -func deleteSnapshots(snapshotsId *[]int) resource.TestCheckFunc { - return func(s *terraform.State) error { - log.Printf("XXX Deleting snaps") - client := testAccProvider.Meta().(*godo.Client) - snapshots := *snapshotsId - for _, value := range snapshots { - log.Printf("XXX Deleting %d", value) - _, err := client.Images.Delete(context.Background(), value) - if err != nil { - return err - } - } - return nil - } -} - -func testAccCheckDigitalOceanImageConfig_basic(rInt, sInt int) string { - return fmt.Sprintf(` -data "digitalocean_image" "foobar" { - name = "snap-%d-%d" -} -`, rInt, sInt) -} - -func testAccCheckDigitalOceanImageConfig_nonexisting(rInt int) string { - return fmt.Sprintf(` -data "digitalocean_image" "foobar" { - name = "snap-%d-nonexisting" -} -`, rInt) -} diff --git a/builtin/providers/digitalocean/import_digitalocean_domain_test.go b/builtin/providers/digitalocean/import_digitalocean_domain_test.go deleted file mode 100644 index 171893159..000000000 --- a/builtin/providers/digitalocean/import_digitalocean_domain_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package digitalocean - -import ( - "testing" - - "fmt" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccDigitalOceanDomain_importBasic(t *testing.T) { - resourceName := "digitalocean_domain.foobar" - domainName := fmt.Sprintf("foobar-test-terraform-%s.com", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanDomainDestroy, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testAccCheckDigitalOceanDomainConfig_basic, domainName), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "ip_address"}, //we ignore the IP Address as we do not set to state - }, - }, - }) -} diff --git a/builtin/providers/digitalocean/import_digitalocean_droplet_test.go b/builtin/providers/digitalocean/import_digitalocean_droplet_test.go deleted file mode 100644 index f9877419f..000000000 --- a/builtin/providers/digitalocean/import_digitalocean_droplet_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package digitalocean - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccDigitalOceanDroplet_importBasic(t *testing.T) { - resourceName := "digitalocean_droplet.foobar" - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanDropletDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDigitalOceanDropletConfig_basic(rInt), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "ssh_keys", "user_data", "resize_disk"}, //we ignore the ssh_keys, resize_disk and user_data as we do not set to state - }, - }, - }) -} diff --git a/builtin/providers/digitalocean/import_digitalocean_floating_ip_test.go b/builtin/providers/digitalocean/import_digitalocean_floating_ip_test.go deleted file mode 100644 index 4bf4c132a..000000000 --- a/builtin/providers/digitalocean/import_digitalocean_floating_ip_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package digitalocean - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccDigitalOceanFloatingIP_importBasicRegion(t *testing.T) { - resourceName := "digitalocean_floating_ip.foobar" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanFloatingIPDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDigitalOceanFloatingIPConfig_region, - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccDigitalOceanFloatingIP_importBasicDroplet(t *testing.T) { - resourceName := "digitalocean_floating_ip.foobar" - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanFloatingIPDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDigitalOceanFloatingIPConfig_droplet(rInt), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/digitalocean/import_digitalocean_ssh_key_test.go b/builtin/providers/digitalocean/import_digitalocean_ssh_key_test.go deleted file mode 100644 index 7c93f0076..000000000 --- a/builtin/providers/digitalocean/import_digitalocean_ssh_key_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package digitalocean - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccDigitalOceanSSHKey_importBasic(t *testing.T) { - resourceName := "digitalocean_ssh_key.foobar" - rInt := acctest.RandInt() - publicKeyMaterial, _, err := acctest.RandSSHKeyPair("digitalocean@ssh-acceptance-test") - if err != nil { - t.Fatalf("Cannot generate test SSH key pair: %s", err) - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanSSHKeyDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDigitalOceanSSHKeyConfig_basic(rInt, publicKeyMaterial), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/digitalocean/import_digitalocean_tag_test.go b/builtin/providers/digitalocean/import_digitalocean_tag_test.go deleted file mode 100644 index aa0423341..000000000 --- a/builtin/providers/digitalocean/import_digitalocean_tag_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package digitalocean - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccDigitalOceanTag_importBasic(t *testing.T) { - resourceName := "digitalocean_tag.foobar" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanTagDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDigitalOceanTagConfig_basic, - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/digitalocean/import_digitalocean_volume_test.go b/builtin/providers/digitalocean/import_digitalocean_volume_test.go deleted file mode 100644 index e3fd1898f..000000000 --- a/builtin/providers/digitalocean/import_digitalocean_volume_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package digitalocean - -import ( - "testing" - - "fmt" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccDigitalOceanVolume_importBasic(t *testing.T) { - resourceName := "digitalocean_volume.foobar" - volumeName := fmt.Sprintf("volume-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanVolumeDestroy, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testAccCheckDigitalOceanVolumeConfig_basic, volumeName), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/digitalocean/loadbalancer.go b/builtin/providers/digitalocean/loadbalancer.go deleted file mode 100644 index efacf7ae5..000000000 --- a/builtin/providers/digitalocean/loadbalancer.go +++ /dev/null @@ -1,146 +0,0 @@ -package digitalocean - -import ( - "context" - "fmt" - - "github.com/digitalocean/godo" - "github.com/hashicorp/terraform/helper/resource" -) - -func loadbalancerStateRefreshFunc(client *godo.Client, loadbalancerId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - lb, _, err := client.LoadBalancers.Get(context.Background(), loadbalancerId) - if err != nil { - return nil, "", fmt.Errorf("Error issuing read request in LoadbalancerStateRefreshFunc to DigitalOcean for Load Balancer '%s': %s", loadbalancerId, err) - } - - return lb, lb.Status, nil - } -} - -func expandStickySessions(config []interface{}) *godo.StickySessions { - stickysessionConfig := config[0].(map[string]interface{}) - - stickySession := &godo.StickySessions{ - Type: stickysessionConfig["type"].(string), - } - - if v, ok := stickysessionConfig["cookie_name"]; ok { - stickySession.CookieName = v.(string) - } - - if v, ok := stickysessionConfig["cookie_ttl_seconds"]; ok { - stickySession.CookieTtlSeconds = v.(int) - } - - return stickySession -} - -func expandHealthCheck(config []interface{}) *godo.HealthCheck { - healthcheckConfig := config[0].(map[string]interface{}) - - healthcheck := &godo.HealthCheck{ - Protocol: healthcheckConfig["protocol"].(string), - Port: healthcheckConfig["port"].(int), - CheckIntervalSeconds: healthcheckConfig["check_interval_seconds"].(int), - ResponseTimeoutSeconds: healthcheckConfig["response_timeout_seconds"].(int), - UnhealthyThreshold: healthcheckConfig["unhealthy_threshold"].(int), - HealthyThreshold: healthcheckConfig["healthy_threshold"].(int), - } - - if v, ok := healthcheckConfig["path"]; ok { - healthcheck.Path = v.(string) - } - - return healthcheck -} - -func expandForwardingRules(config []interface{}) []godo.ForwardingRule { - forwardingRules := make([]godo.ForwardingRule, 0, len(config)) - - for _, rawRule := range config { - rule := rawRule.(map[string]interface{}) - - r := godo.ForwardingRule{ - EntryPort: rule["entry_port"].(int), - EntryProtocol: rule["entry_protocol"].(string), - TargetPort: rule["target_port"].(int), - TargetProtocol: rule["target_protocol"].(string), - TlsPassthrough: rule["tls_passthrough"].(bool), - } - - if v, ok := rule["certificate_id"]; ok { - r.CertificateID = v.(string) - } - - forwardingRules = append(forwardingRules, r) - - } - - return forwardingRules -} - -func flattenDropletIds(list []int) []interface{} { - vs := make([]interface{}, 0, len(list)) - for _, v := range list { - vs = append(vs, v) - } - return vs -} - -func flattenHealthChecks(health *godo.HealthCheck) []map[string]interface{} { - result := make([]map[string]interface{}, 0, 1) - - if health != nil { - - r := make(map[string]interface{}) - r["protocol"] = (*health).Protocol - r["port"] = (*health).Port - r["path"] = (*health).Path - r["check_interval_seconds"] = (*health).CheckIntervalSeconds - r["response_timeout_seconds"] = (*health).ResponseTimeoutSeconds - r["unhealthy_threshold"] = (*health).UnhealthyThreshold - r["healthy_threshold"] = (*health).HealthyThreshold - - result = append(result, r) - } - - return result -} - -func flattenStickySessions(session *godo.StickySessions) []map[string]interface{} { - result := make([]map[string]interface{}, 0, 1) - - if session != nil { - - r := make(map[string]interface{}) - r["type"] = (*session).Type - r["cookie_name"] = (*session).CookieName - r["cookie_ttl_seconds"] = (*session).CookieTtlSeconds - - result = append(result, r) - } - - return result -} - -func flattenForwardingRules(rules []godo.ForwardingRule) []map[string]interface{} { - result := make([]map[string]interface{}, 0, 1) - - if rules != nil { - for _, rule := range rules { - r := make(map[string]interface{}) - r["entry_protocol"] = rule.EntryProtocol - r["entry_port"] = rule.EntryPort - r["target_protocol"] = rule.TargetProtocol - r["target_port"] = rule.TargetPort - r["certificate_id"] = rule.CertificateID - r["tls_passthrough"] = rule.TlsPassthrough - - result = append(result, r) - } - } - - return result -} diff --git a/builtin/providers/digitalocean/provider.go b/builtin/providers/digitalocean/provider.go deleted file mode 100644 index 9d0b9af1b..000000000 --- a/builtin/providers/digitalocean/provider.go +++ /dev/null @@ -1,46 +0,0 @@ -package digitalocean - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider returns a schema.Provider for DigitalOcean. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "token": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("DIGITALOCEAN_TOKEN", nil), - Description: "The token key for API operations.", - }, - }, - - DataSourcesMap: map[string]*schema.Resource{ - "digitalocean_image": dataSourceDigitalOceanImage(), - }, - - ResourcesMap: map[string]*schema.Resource{ - "digitalocean_certificate": resourceDigitalOceanCertificate(), - "digitalocean_domain": resourceDigitalOceanDomain(), - "digitalocean_droplet": resourceDigitalOceanDroplet(), - "digitalocean_floating_ip": resourceDigitalOceanFloatingIp(), - "digitalocean_loadbalancer": resourceDigitalOceanLoadbalancer(), - "digitalocean_record": resourceDigitalOceanRecord(), - "digitalocean_ssh_key": resourceDigitalOceanSSHKey(), - "digitalocean_tag": resourceDigitalOceanTag(), - "digitalocean_volume": resourceDigitalOceanVolume(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - config := Config{ - Token: d.Get("token").(string), - } - - return config.Client() -} diff --git a/builtin/providers/digitalocean/provider_test.go b/builtin/providers/digitalocean/provider_test.go deleted file mode 100644 index fc5f78a2b..000000000 --- a/builtin/providers/digitalocean/provider_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package digitalocean - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "digitalocean": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("DIGITALOCEAN_TOKEN"); v == "" { - t.Fatal("DIGITALOCEAN_TOKEN must be set for acceptance tests") - } -} diff --git a/builtin/providers/digitalocean/resource_digitalocean_certificate.go b/builtin/providers/digitalocean/resource_digitalocean_certificate.go deleted file mode 100644 index 264ab5297..000000000 --- a/builtin/providers/digitalocean/resource_digitalocean_certificate.go +++ /dev/null @@ -1,116 +0,0 @@ -package digitalocean - -import ( - "context" - "fmt" - "log" - - "github.com/digitalocean/godo" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceDigitalOceanCertificate() *schema.Resource { - return &schema.Resource{ - Create: resourceDigitalOceanCertificateCreate, - Read: resourceDigitalOceanCertificateRead, - Delete: resourceDigitalOceanCertificateDelete, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "private_key": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "leaf_certificate": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "certificate_chain": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "not_after": { - Type: schema.TypeString, - Computed: true, - }, - - "sha1_fingerprint": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func buildCertificateRequest(d *schema.ResourceData) (*godo.CertificateRequest, error) { - req := &godo.CertificateRequest{ - Name: d.Get("name").(string), - PrivateKey: d.Get("private_key").(string), - LeafCertificate: d.Get("leaf_certificate").(string), - CertificateChain: d.Get("certificate_chain").(string), - } - - return req, nil -} - -func resourceDigitalOceanCertificateCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - log.Printf("[INFO] Create a Certificate Request") - - certReq, err := buildCertificateRequest(d) - if err != nil { - return err - } - - log.Printf("[DEBUG] Certificate Create: %#v", certReq) - cert, _, err := client.Certificates.Create(context.Background(), certReq) - if err != nil { - return fmt.Errorf("Error creating Certificate: %s", err) - } - - d.SetId(cert.ID) - - return resourceDigitalOceanCertificateRead(d, meta) -} - -func resourceDigitalOceanCertificateRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - log.Printf("[INFO] Reading the details of the Certificate %s", d.Id()) - cert, _, err := client.Certificates.Get(context.Background(), d.Id()) - if err != nil { - return fmt.Errorf("Error retrieving Certificate: %s", err) - } - - d.Set("name", cert.Name) - d.Set("not_after", cert.NotAfter) - d.Set("sha1_fingerprint", cert.SHA1Fingerprint) - - return nil - -} - -func resourceDigitalOceanCertificateDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - log.Printf("[INFO] Deleting Certificate: %s", d.Id()) - _, err := client.Certificates.Delete(context.Background(), d.Id()) - if err != nil { - return fmt.Errorf("Error deleting Certificate: %s", err) - } - - return nil - -} diff --git a/builtin/providers/digitalocean/resource_digitalocean_certificate_test.go b/builtin/providers/digitalocean/resource_digitalocean_certificate_test.go deleted file mode 100644 index 270d01a24..000000000 --- a/builtin/providers/digitalocean/resource_digitalocean_certificate_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package digitalocean - -import ( - "context" - "fmt" - "strings" - "testing" - - "github.com/digitalocean/godo" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDigitalOceanCertificate_Basic(t *testing.T) { - var cert godo.Certificate - rInt := acctest.RandInt() - leafCertMaterial, privateKeyMaterial, err := acctest.RandTLSCert("Acme Co") - if err != nil { - t.Fatalf("Cannot generate test TLS certificate: %s", err) - } - rootCertMaterial, _, err := acctest.RandTLSCert("Acme Go") - if err != nil { - t.Fatalf("Cannot generate test TLS certificate: %s", err) - } - certChainMaterial := fmt.Sprintf("%s\n%s", strings.TrimSpace(rootCertMaterial), leafCertMaterial) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanCertificateDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDigitalOceanCertificateConfig_basic(rInt, privateKeyMaterial, leafCertMaterial, certChainMaterial), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanCertificateExists("digitalocean_certificate.foobar", &cert), - resource.TestCheckResourceAttr( - "digitalocean_certificate.foobar", "name", fmt.Sprintf("certificate-%d", rInt)), - resource.TestCheckResourceAttr( - "digitalocean_certificate.foobar", "private_key", fmt.Sprintf("%s\n", privateKeyMaterial)), - resource.TestCheckResourceAttr( - "digitalocean_certificate.foobar", "leaf_certificate", fmt.Sprintf("%s\n", leafCertMaterial)), - resource.TestCheckResourceAttr( - "digitalocean_certificate.foobar", "certificate_chain", fmt.Sprintf("%s\n", certChainMaterial)), - ), - }, - }, - }) -} - -func testAccCheckDigitalOceanCertificateDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*godo.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "digitalocean_certificate" { - continue - } - - _, _, err := client.Certificates.Get(context.Background(), rs.Primary.ID) - - if err != nil && !strings.Contains(err.Error(), "404") { - return fmt.Errorf( - "Error waiting for certificate (%s) to be destroyed: %s", - rs.Primary.ID, err) - } - } - - return nil -} - -func testAccCheckDigitalOceanCertificateExists(n string, cert *godo.Certificate) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Certificate ID is set") - } - - client := testAccProvider.Meta().(*godo.Client) - - c, _, err := client.Certificates.Get(context.Background(), rs.Primary.ID) - - if err != nil { - return err - } - - if c.ID != rs.Primary.ID { - return fmt.Errorf("Certificate not found") - } - - *cert = *c - - return nil - } -} - -func testAccCheckDigitalOceanCertificateConfig_basic(rInt int, privateKeyMaterial, leafCert, certChain string) string { - return fmt.Sprintf(` -resource "digitalocean_certificate" "foobar" { - name = "certificate-%d" - private_key = < 0 { - opts.SSHKeys = make([]godo.DropletCreateSSHKey, 0, sshKeys) - for i := 0; i < sshKeys; i++ { - key := fmt.Sprintf("ssh_keys.%d", i) - sshKeyRef := d.Get(key).(string) - - var sshKey godo.DropletCreateSSHKey - // sshKeyRef can be either an ID or a fingerprint - if id, err := strconv.Atoi(sshKeyRef); err == nil { - sshKey.ID = id - } else { - sshKey.Fingerprint = sshKeyRef - } - - opts.SSHKeys = append(opts.SSHKeys, sshKey) - } - } - - log.Printf("[DEBUG] Droplet create configuration: %#v", opts) - - droplet, _, err := client.Droplets.Create(context.Background(), opts) - - if err != nil { - return fmt.Errorf("Error creating droplet: %s", err) - } - - // Assign the droplets id - d.SetId(strconv.Itoa(droplet.ID)) - - log.Printf("[INFO] Droplet ID: %s", d.Id()) - - _, err = WaitForDropletAttribute(d, "active", []string{"new"}, "status", meta) - if err != nil { - return fmt.Errorf( - "Error waiting for droplet (%s) to become ready: %s", d.Id(), err) - } - - // droplet needs to be active in order to set tags - err = setTags(client, d) - if err != nil { - return fmt.Errorf("Error setting tags: %s", err) - } - - return resourceDigitalOceanDropletRead(d, meta) -} - -func resourceDigitalOceanDropletRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - id, err := strconv.Atoi(d.Id()) - if err != nil { - return fmt.Errorf("invalid droplet id: %v", err) - } - - // Retrieve the droplet properties for updating the state - droplet, resp, err := client.Droplets.Get(context.Background(), id) - if err != nil { - // check if the droplet no longer exists. - if resp != nil && resp.StatusCode == 404 { - log.Printf("[WARN] DigitalOcean Droplet (%s) not found", d.Id()) - d.SetId("") - return nil - } - - return fmt.Errorf("Error retrieving droplet: %s", err) - } - - _, err = strconv.Atoi(d.Get("image").(string)) - if err == nil || droplet.Image.Slug == "" { - // The image field is provided as an ID (number), or - // the image bash no slug. In both cases we store it as an ID. - d.Set("image", droplet.Image.ID) - } else { - d.Set("image", droplet.Image.Slug) - } - - d.Set("name", droplet.Name) - d.Set("region", droplet.Region.Slug) - d.Set("size", droplet.Size.Slug) - d.Set("price_hourly", droplet.Size.PriceHourly) - d.Set("price_monthly", droplet.Size.PriceMonthly) - d.Set("disk", droplet.Disk) - d.Set("vcpus", droplet.Vcpus) - d.Set("status", droplet.Status) - d.Set("locked", strconv.FormatBool(droplet.Locked)) - - if len(droplet.VolumeIDs) > 0 { - vlms := make([]interface{}, 0, len(droplet.VolumeIDs)) - for _, vid := range droplet.VolumeIDs { - vlms = append(vlms, vid) - } - d.Set("volume_ids", vlms) - } - - if publicIPv6 := findIPv6AddrByType(droplet, "public"); publicIPv6 != "" { - d.Set("ipv6", true) - d.Set("ipv6_address", strings.ToLower(publicIPv6)) - d.Set("ipv6_address_private", findIPv6AddrByType(droplet, "private")) - } - - d.Set("ipv4_address", findIPv4AddrByType(droplet, "public")) - - if privateIPv4 := findIPv4AddrByType(droplet, "private"); privateIPv4 != "" { - d.Set("private_networking", true) - d.Set("ipv4_address_private", privateIPv4) - } - - // Initialize the connection info - d.SetConnInfo(map[string]string{ - "type": "ssh", - "host": findIPv4AddrByType(droplet, "public"), - }) - - d.Set("tags", droplet.Tags) - - return nil -} - -func findIPv6AddrByType(d *godo.Droplet, addrType string) string { - for _, addr := range d.Networks.V6 { - if addr.Type == addrType { - return addr.IPAddress - } - } - return "" -} - -func findIPv4AddrByType(d *godo.Droplet, addrType string) string { - for _, addr := range d.Networks.V4 { - if addr.Type == addrType { - return addr.IPAddress - } - } - return "" -} - -func resourceDigitalOceanDropletUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - id, err := strconv.Atoi(d.Id()) - if err != nil { - return fmt.Errorf("invalid droplet id: %v", err) - } - - resize_disk := d.Get("resize_disk").(bool) - if d.HasChange("size") || d.HasChange("resize_disk") && resize_disk { - newSize := d.Get("size") - - _, _, err = client.DropletActions.PowerOff(context.Background(), id) - if err != nil && !strings.Contains(err.Error(), "Droplet is already powered off") { - return fmt.Errorf( - "Error powering off droplet (%s): %s", d.Id(), err) - } - - // Wait for power off - _, err = WaitForDropletAttribute(d, "off", []string{"active"}, "status", client) - if err != nil { - return fmt.Errorf( - "Error waiting for droplet (%s) to become powered off: %s", d.Id(), err) - } - - // Resize the droplet - action, _, err := client.DropletActions.Resize(context.Background(), id, newSize.(string), resize_disk) - if err != nil { - newErr := powerOnAndWait(d, meta) - if newErr != nil { - return fmt.Errorf( - "Error powering on droplet (%s) after failed resize: %s", d.Id(), err) - } - return fmt.Errorf( - "Error resizing droplet (%s): %s", d.Id(), err) - } - - // Wait for the resize action to complete. - if err := waitForAction(client, action); err != nil { - newErr := powerOnAndWait(d, meta) - if newErr != nil { - return fmt.Errorf( - "Error powering on droplet (%s) after waiting for resize to finish: %s", d.Id(), err) - } - return fmt.Errorf( - "Error waiting for resize droplet (%s) to finish: %s", d.Id(), err) - } - - _, _, err = client.DropletActions.PowerOn(context.Background(), id) - - if err != nil { - return fmt.Errorf( - "Error powering on droplet (%s) after resize: %s", d.Id(), err) - } - - // Wait for power off - _, err = WaitForDropletAttribute(d, "active", []string{"off"}, "status", meta) - if err != nil { - return err - } - } - - if d.HasChange("name") { - oldName, newName := d.GetChange("name") - - // Rename the droplet - _, _, err = client.DropletActions.Rename(context.Background(), id, newName.(string)) - - if err != nil { - return fmt.Errorf( - "Error renaming droplet (%s): %s", d.Id(), err) - } - - // Wait for the name to change - _, err = WaitForDropletAttribute( - d, newName.(string), []string{"", oldName.(string)}, "name", meta) - - if err != nil { - return fmt.Errorf( - "Error waiting for rename droplet (%s) to finish: %s", d.Id(), err) - } - } - - // As there is no way to disable private networking, - // we only check if it needs to be enabled - if d.HasChange("private_networking") && d.Get("private_networking").(bool) { - _, _, err = client.DropletActions.EnablePrivateNetworking(context.Background(), id) - - if err != nil { - return fmt.Errorf( - "Error enabling private networking for droplet (%s): %s", d.Id(), err) - } - - // Wait for the private_networking to turn on - _, err = WaitForDropletAttribute( - d, "true", []string{"", "false"}, "private_networking", meta) - - return fmt.Errorf( - "Error waiting for private networking to be enabled on for droplet (%s): %s", d.Id(), err) - } - - // As there is no way to disable IPv6, we only check if it needs to be enabled - if d.HasChange("ipv6") && d.Get("ipv6").(bool) { - _, _, err = client.DropletActions.EnableIPv6(context.Background(), id) - - if err != nil { - return fmt.Errorf( - "Error turning on ipv6 for droplet (%s): %s", d.Id(), err) - } - - // Wait for ipv6 to turn on - _, err = WaitForDropletAttribute( - d, "true", []string{"", "false"}, "ipv6", meta) - - if err != nil { - return fmt.Errorf( - "Error waiting for ipv6 to be turned on for droplet (%s): %s", d.Id(), err) - } - } - - if d.HasChange("tags") { - err = setTags(client, d) - if err != nil { - return fmt.Errorf("Error updating tags: %s", err) - } - } - - if d.HasChange("volume_ids") { - oldIDs, newIDs := d.GetChange("volume_ids") - newSet := func(ids []interface{}) map[string]struct{} { - out := make(map[string]struct{}, len(ids)) - for _, id := range ids { - out[id.(string)] = struct{}{} - } - return out - } - // leftDiff returns all elements in Left that are not in Right - leftDiff := func(left, right map[string]struct{}) map[string]struct{} { - out := make(map[string]struct{}) - for l := range left { - if _, ok := right[l]; !ok { - out[l] = struct{}{} - } - } - return out - } - oldIDSet := newSet(oldIDs.([]interface{})) - newIDSet := newSet(newIDs.([]interface{})) - for volumeID := range leftDiff(newIDSet, oldIDSet) { - action, _, err := client.StorageActions.Attach(context.Background(), volumeID, id) - if err != nil { - return fmt.Errorf("Error attaching volume %q to droplet (%s): %s", volumeID, d.Id(), err) - } - // can't fire >1 action at a time, so waiting for each is OK - if err := waitForAction(client, action); err != nil { - return fmt.Errorf("Error waiting for volume %q to attach to droplet (%s): %s", volumeID, d.Id(), err) - } - } - for volumeID := range leftDiff(oldIDSet, newIDSet) { - action, _, err := client.StorageActions.DetachByDropletID(context.Background(), volumeID, id) - if err != nil { - return fmt.Errorf("Error detaching volume %q from droplet (%s): %s", volumeID, d.Id(), err) - } - // can't fire >1 action at a time, so waiting for each is OK - if err := waitForAction(client, action); err != nil { - return fmt.Errorf("Error waiting for volume %q to detach from droplet (%s): %s", volumeID, d.Id(), err) - } - } - } - - return resourceDigitalOceanDropletRead(d, meta) -} - -func resourceDigitalOceanDropletDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - id, err := strconv.Atoi(d.Id()) - if err != nil { - return fmt.Errorf("invalid droplet id: %v", err) - } - - _, err = WaitForDropletAttribute( - d, "false", []string{"", "true"}, "locked", meta) - - if err != nil { - return fmt.Errorf( - "Error waiting for droplet to be unlocked for destroy (%s): %s", d.Id(), err) - } - - log.Printf("[INFO] Deleting droplet: %s", d.Id()) - - // Destroy the droplet - _, err = client.Droplets.Delete(context.Background(), id) - - // Handle remotely destroyed droplets - if err != nil && strings.Contains(err.Error(), "404 Not Found") { - return nil - } - - if err != nil { - return fmt.Errorf("Error deleting droplet: %s", err) - } - - return nil -} - -func WaitForDropletAttribute( - d *schema.ResourceData, target string, pending []string, attribute string, meta interface{}) (interface{}, error) { - // Wait for the droplet so we can get the networking attributes - // that show up after a while - log.Printf( - "[INFO] Waiting for droplet (%s) to have %s of %s", - d.Id(), attribute, target) - - stateConf := &resource.StateChangeConf{ - Pending: pending, - Target: []string{target}, - Refresh: newDropletStateRefreshFunc(d, attribute, meta), - Timeout: 60 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - - // This is a hack around DO API strangeness. - // https://github.com/hashicorp/terraform/issues/481 - // - NotFoundChecks: 60, - } - - return stateConf.WaitForState() -} - -// TODO This function still needs a little more refactoring to make it -// cleaner and more efficient -func newDropletStateRefreshFunc( - d *schema.ResourceData, attribute string, meta interface{}) resource.StateRefreshFunc { - client := meta.(*godo.Client) - return func() (interface{}, string, error) { - id, err := strconv.Atoi(d.Id()) - if err != nil { - return nil, "", err - } - - err = resourceDigitalOceanDropletRead(d, meta) - if err != nil { - return nil, "", err - } - - // If the droplet is locked, continue waiting. We can - // only perform actions on unlocked droplets, so it's - // pointless to look at that status - if d.Get("locked").(string) == "true" { - log.Println("[DEBUG] Droplet is locked, skipping status check and retrying") - return nil, "", nil - } - - // See if we can access our attribute - if attr, ok := d.GetOk(attribute); ok { - // Retrieve the droplet properties - droplet, _, err := client.Droplets.Get(context.Background(), id) - if err != nil { - return nil, "", fmt.Errorf("Error retrieving droplet: %s", err) - } - - return &droplet, attr.(string), nil - } - - return nil, "", nil - } -} - -// Powers on the droplet and waits for it to be active -func powerOnAndWait(d *schema.ResourceData, meta interface{}) error { - id, err := strconv.Atoi(d.Id()) - if err != nil { - return fmt.Errorf("invalid droplet id: %v", err) - } - - client := meta.(*godo.Client) - _, _, err = client.DropletActions.PowerOn(context.Background(), id) - if err != nil { - return err - } - - // Wait for power on - _, err = WaitForDropletAttribute(d, "active", []string{"off"}, "status", client) - if err != nil { - return err - } - - return nil -} diff --git a/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go b/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go deleted file mode 100644 index 73290400c..000000000 --- a/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go +++ /dev/null @@ -1,631 +0,0 @@ -package digitalocean - -import ( - "context" - "fmt" - "strconv" - "strings" - "testing" - - "github.com/digitalocean/godo" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDigitalOceanDroplet_Basic(t *testing.T) { - var droplet godo.Droplet - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanDropletDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDigitalOceanDropletConfig_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanDropletExists("digitalocean_droplet.foobar", &droplet), - testAccCheckDigitalOceanDropletAttributes(&droplet), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "name", fmt.Sprintf("foo-%d", rInt)), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "size", "512mb"), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "price_hourly", "0.00744"), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "price_monthly", "5"), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "image", "centos-7-x64"), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "region", "nyc3"), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "user_data", "foobar"), - ), - }, - }, - }) -} - -func TestAccDigitalOceanDroplet_WithID(t *testing.T) { - var droplet godo.Droplet - rInt := acctest.RandInt() - // TODO: not hardcode this as it will change over time - centosID := 22995941 - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanDropletDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDigitalOceanDropletConfig_withID(centosID, rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanDropletExists("digitalocean_droplet.foobar", &droplet), - ), - }, - }, - }) -} -func TestAccDigitalOceanDroplet_withSSH(t *testing.T) { - var droplet godo.Droplet - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanDropletDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDigitalOceanDropletConfig_withSSH(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanDropletExists("digitalocean_droplet.foobar", &droplet), - testAccCheckDigitalOceanDropletAttributes(&droplet), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "name", fmt.Sprintf("foo-%d", rInt)), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "size", "512mb"), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "image", "centos-7-x64"), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "region", "nyc3"), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "user_data", "foobar"), - ), - }, - }, - }) -} - -func TestAccDigitalOceanDroplet_Update(t *testing.T) { - var droplet godo.Droplet - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanDropletDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDigitalOceanDropletConfig_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanDropletExists("digitalocean_droplet.foobar", &droplet), - testAccCheckDigitalOceanDropletAttributes(&droplet), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "name", fmt.Sprintf("foo-%d", rInt)), - ), - }, - - { - Config: testAccCheckDigitalOceanDropletConfig_RenameAndResize(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanDropletExists("digitalocean_droplet.foobar", &droplet), - testAccCheckDigitalOceanDropletRenamedAndResized(&droplet), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "name", fmt.Sprintf("baz-%d", rInt)), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "size", "1gb"), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "disk", "30"), - ), - }, - }, - }) -} - -func TestAccDigitalOceanDroplet_ResizeWithOutDisk(t *testing.T) { - var droplet godo.Droplet - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanDropletDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDigitalOceanDropletConfig_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanDropletExists("digitalocean_droplet.foobar", &droplet), - testAccCheckDigitalOceanDropletAttributes(&droplet), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "name", fmt.Sprintf("foo-%d", rInt)), - ), - }, - - { - Config: testAccCheckDigitalOceanDropletConfig_resize_without_disk(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanDropletExists("digitalocean_droplet.foobar", &droplet), - testAccCheckDigitalOceanDropletResizeWithOutDisk(&droplet), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "name", fmt.Sprintf("foo-%d", rInt)), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "size", "1gb"), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "disk", "20"), - ), - }, - }, - }) -} - -func TestAccDigitalOceanDroplet_ResizeOnlyDisk(t *testing.T) { - var droplet godo.Droplet - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanDropletDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDigitalOceanDropletConfig_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanDropletExists("digitalocean_droplet.foobar", &droplet), - testAccCheckDigitalOceanDropletAttributes(&droplet), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "name", fmt.Sprintf("foo-%d", rInt)), - ), - }, - - { - Config: testAccCheckDigitalOceanDropletConfig_resize_without_disk(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanDropletExists("digitalocean_droplet.foobar", &droplet), - testAccCheckDigitalOceanDropletResizeWithOutDisk(&droplet), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "name", fmt.Sprintf("foo-%d", rInt)), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "size", "1gb"), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "disk", "20"), - ), - }, - - { - Config: testAccCheckDigitalOceanDropletConfig_resize_only_disk(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanDropletExists("digitalocean_droplet.foobar", &droplet), - testAccCheckDigitalOceanDropletResizeOnlyDisk(&droplet), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "name", fmt.Sprintf("foo-%d", rInt)), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "size", "1gb"), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "disk", "30"), - ), - }, - }, - }) -} - -func TestAccDigitalOceanDroplet_UpdateUserData(t *testing.T) { - var afterCreate, afterUpdate godo.Droplet - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanDropletDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDigitalOceanDropletConfig_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanDropletExists("digitalocean_droplet.foobar", &afterCreate), - testAccCheckDigitalOceanDropletAttributes(&afterCreate), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "name", fmt.Sprintf("foo-%d", rInt)), - ), - }, - - { - Config: testAccCheckDigitalOceanDropletConfig_userdata_update(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanDropletExists("digitalocean_droplet.foobar", &afterUpdate), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "name", fmt.Sprintf("foo-%d", rInt)), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", - "user_data", - "foobar foobar"), - testAccCheckDigitalOceanDropletRecreated( - t, &afterCreate, &afterUpdate), - ), - }, - }, - }) -} - -func TestAccDigitalOceanDroplet_UpdateTags(t *testing.T) { - var afterCreate, afterUpdate godo.Droplet - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanDropletDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDigitalOceanDropletConfig_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanDropletExists("digitalocean_droplet.foobar", &afterCreate), - testAccCheckDigitalOceanDropletAttributes(&afterCreate), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "name", fmt.Sprintf("foo-%d", rInt)), - ), - }, - - { - Config: testAccCheckDigitalOceanDropletConfig_tag_update(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanDropletExists("digitalocean_droplet.foobar", &afterUpdate), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "name", fmt.Sprintf("foo-%d", rInt)), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", - "tags.#", - "1"), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", - "tags.0", - "barbaz"), - ), - }, - }, - }) -} - -func TestAccDigitalOceanDroplet_PrivateNetworkingIpv6(t *testing.T) { - var droplet godo.Droplet - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanDropletDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDigitalOceanDropletConfig_PrivateNetworkingIpv6(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanDropletExists("digitalocean_droplet.foobar", &droplet), - testAccCheckDigitalOceanDropletAttributes_PrivateNetworkingIpv6(&droplet), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "private_networking", "true"), - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "ipv6", "true"), - ), - }, - }, - }) -} - -func testAccCheckDigitalOceanDropletDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*godo.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "digitalocean_droplet" { - continue - } - - id, err := strconv.Atoi(rs.Primary.ID) - if err != nil { - return err - } - - // Try to find the Droplet - _, _, err = client.Droplets.Get(context.Background(), id) - - // Wait - - if err != nil && !strings.Contains(err.Error(), "404") { - return fmt.Errorf( - "Error waiting for droplet (%s) to be destroyed: %s", - rs.Primary.ID, err) - } - } - - return nil -} - -func testAccCheckDigitalOceanDropletAttributes(droplet *godo.Droplet) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if droplet.Image.Slug != "centos-7-x64" { - return fmt.Errorf("Bad image_slug: %s", droplet.Image.Slug) - } - - if droplet.Size.Slug != "512mb" { - return fmt.Errorf("Bad size_slug: %s", droplet.Size.Slug) - } - - if droplet.Size.PriceHourly != 0.00744 { - return fmt.Errorf("Bad price_hourly: %v", droplet.Size.PriceHourly) - } - - if droplet.Size.PriceMonthly != 5.0 { - return fmt.Errorf("Bad price_monthly: %v", droplet.Size.PriceMonthly) - } - - if droplet.Region.Slug != "nyc3" { - return fmt.Errorf("Bad region_slug: %s", droplet.Region.Slug) - } - - return nil - } -} - -func testAccCheckDigitalOceanDropletRenamedAndResized(droplet *godo.Droplet) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if droplet.Size.Slug != "1gb" { - return fmt.Errorf("Bad size_slug: %s", droplet.SizeSlug) - } - - if droplet.Disk != 30 { - return fmt.Errorf("Bad disk: %d", droplet.Disk) - } - - return nil - } -} - -func testAccCheckDigitalOceanDropletResizeWithOutDisk(droplet *godo.Droplet) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if droplet.Size.Slug != "1gb" { - return fmt.Errorf("Bad size_slug: %s", droplet.SizeSlug) - } - - if droplet.Disk != 20 { - return fmt.Errorf("Bad disk: %d", droplet.Disk) - } - - return nil - } -} - -func testAccCheckDigitalOceanDropletResizeOnlyDisk(droplet *godo.Droplet) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if droplet.Size.Slug != "1gb" { - return fmt.Errorf("Bad size_slug: %s", droplet.SizeSlug) - } - - if droplet.Disk != 30 { - return fmt.Errorf("Bad disk: %d", droplet.Disk) - } - - return nil - } -} - -func testAccCheckDigitalOceanDropletAttributes_PrivateNetworkingIpv6(droplet *godo.Droplet) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if droplet.Image.Slug != "centos-7-x64" { - return fmt.Errorf("Bad image_slug: %s", droplet.Image.Slug) - } - - if droplet.Size.Slug != "1gb" { - return fmt.Errorf("Bad size_slug: %s", droplet.Size.Slug) - } - - if droplet.Region.Slug != "sgp1" { - return fmt.Errorf("Bad region_slug: %s", droplet.Region.Slug) - } - - if findIPv4AddrByType(droplet, "private") == "" { - return fmt.Errorf("No ipv4 private: %s", findIPv4AddrByType(droplet, "private")) - } - - // if droplet.IPV6Address("private") == "" { - // return fmt.Errorf("No ipv6 private: %s", droplet.IPV6Address("private")) - // } - - if findIPv4AddrByType(droplet, "public") == "" { - return fmt.Errorf("No ipv4 public: %s", findIPv4AddrByType(droplet, "public")) - } - - if findIPv6AddrByType(droplet, "public") == "" { - return fmt.Errorf("No ipv6 public: %s", findIPv6AddrByType(droplet, "public")) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "digitalocean_droplet" { - continue - } - if rs.Primary.Attributes["ipv6_address"] != strings.ToLower(findIPv6AddrByType(droplet, "public")) { - return fmt.Errorf("IPV6 Address should be lowercase") - } - - } - - return nil - } -} - -func testAccCheckDigitalOceanDropletExists(n string, droplet *godo.Droplet) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Droplet ID is set") - } - - client := testAccProvider.Meta().(*godo.Client) - - id, err := strconv.Atoi(rs.Primary.ID) - if err != nil { - return err - } - - // Try to find the Droplet - retrieveDroplet, _, err := client.Droplets.Get(context.Background(), id) - - if err != nil { - return err - } - - if strconv.Itoa(retrieveDroplet.ID) != rs.Primary.ID { - return fmt.Errorf("Droplet not found") - } - - *droplet = *retrieveDroplet - - return nil - } -} - -func testAccCheckDigitalOceanDropletRecreated(t *testing.T, - before, after *godo.Droplet) resource.TestCheckFunc { - return func(s *terraform.State) error { - if before.ID == after.ID { - t.Fatalf("Expected change of droplet IDs, but both were %v", before.ID) - } - return nil - } -} - -func testAccCheckDigitalOceanDropletConfig_basic(rInt int) string { - return fmt.Sprintf(` -resource "digitalocean_droplet" "foobar" { - name = "foo-%d" - size = "512mb" - image = "centos-7-x64" - region = "nyc3" - user_data = "foobar" -}`, rInt) -} - -func testAccCheckDigitalOceanDropletConfig_withID(imageID, rInt int) string { - return fmt.Sprintf(` -resource "digitalocean_droplet" "foobar" { - name = "foo-%d" - size = "512mb" - image = "%d" - region = "nyc3" - user_data = "foobar" -}`, rInt, imageID) -} - -func testAccCheckDigitalOceanDropletConfig_withSSH(rInt int) string { - return fmt.Sprintf(` -resource "digitalocean_ssh_key" "foobar" { - name = "foobar-%d" - public_key = "%s" -} - -resource "digitalocean_droplet" "foobar" { - name = "foo-%d" - size = "512mb" - image = "centos-7-x64" - region = "nyc3" - user_data = "foobar" - ssh_keys = ["${digitalocean_ssh_key.foobar.id}"] -}`, rInt, testAccValidPublicKey, rInt) -} - -func testAccCheckDigitalOceanDropletConfig_tag_update(rInt int) string { - return fmt.Sprintf(` -resource "digitalocean_tag" "barbaz" { - name = "barbaz" -} - -resource "digitalocean_droplet" "foobar" { - name = "foo-%d" - size = "512mb" - image = "centos-7-x64" - region = "nyc3" - user_data = "foobar" - tags = ["${digitalocean_tag.barbaz.id}"] -} -`, rInt) -} - -func testAccCheckDigitalOceanDropletConfig_userdata_update(rInt int) string { - return fmt.Sprintf(` -resource "digitalocean_droplet" "foobar" { - name = "foo-%d" - size = "512mb" - image = "centos-7-x64" - region = "nyc3" - user_data = "foobar foobar" -} -`, rInt) -} - -func testAccCheckDigitalOceanDropletConfig_RenameAndResize(rInt int) string { - return fmt.Sprintf(` -resource "digitalocean_droplet" "foobar" { - name = "baz-%d" - size = "1gb" - image = "centos-7-x64" - region = "nyc3" -} -`, rInt) -} - -func testAccCheckDigitalOceanDropletConfig_resize_without_disk(rInt int) string { - return fmt.Sprintf(` -resource "digitalocean_droplet" "foobar" { - name = "foo-%d" - size = "1gb" - image = "centos-7-x64" - region = "nyc3" - user_data = "foobar" - resize_disk = false -} -`, rInt) -} - -func testAccCheckDigitalOceanDropletConfig_resize_only_disk(rInt int) string { - return fmt.Sprintf(` -resource "digitalocean_droplet" "foobar" { - name = "foo-%d" - size = "1gb" - image = "centos-7-x64" - region = "nyc3" - user_data = "foobar" - resize_disk = true -} -`, rInt) -} - -// IPV6 only in singapore -func testAccCheckDigitalOceanDropletConfig_PrivateNetworkingIpv6(rInt int) string { - return fmt.Sprintf(` -resource "digitalocean_droplet" "foobar" { - name = "baz-%d" - size = "1gb" - image = "centos-7-x64" - region = "sgp1" - ipv6 = true - private_networking = true -} -`, rInt) -} - -var testAccValidPublicKey = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCKVmnMOlHKcZK8tpt3MP1lqOLAcqcJzhsvJcjscgVERRN7/9484SOBJ3HSKxxNG5JN8owAjy5f9yYwcUg+JaUVuytn5Pv3aeYROHGGg+5G346xaq3DAwX6Y5ykr2fvjObgncQBnuU5KHWCECO/4h8uWuwh/kfniXPVjFToc+gnkqA+3RKpAecZhFXwfalQ9mMuYGFxn+fwn8cYEApsJbsEmb0iJwPiZ5hjFC8wREuiTlhPHDgkBLOiycd20op2nXzDbHfCHInquEe/gYxEitALONxm0swBOwJZwlTDOB7C6y2dzlrtxr1L59m7pCkWI4EtTRLvleehBoj3u7jB4usR` diff --git a/builtin/providers/digitalocean/resource_digitalocean_floating_ip.go b/builtin/providers/digitalocean/resource_digitalocean_floating_ip.go deleted file mode 100644 index 0f4a8d346..000000000 --- a/builtin/providers/digitalocean/resource_digitalocean_floating_ip.go +++ /dev/null @@ -1,201 +0,0 @@ -package digitalocean - -import ( - "context" - "fmt" - "log" - "time" - - "github.com/digitalocean/godo" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceDigitalOceanFloatingIp() *schema.Resource { - return &schema.Resource{ - Create: resourceDigitalOceanFloatingIpCreate, - Update: resourceDigitalOceanFloatingIpUpdate, - Read: resourceDigitalOceanFloatingIpRead, - Delete: resourceDigitalOceanFloatingIpDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "ip_address": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "region": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "droplet_id": { - Type: schema.TypeInt, - Optional: true, - }, - }, - } -} - -func resourceDigitalOceanFloatingIpCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - log.Printf("[INFO] Create a FloatingIP In a Region") - regionOpts := &godo.FloatingIPCreateRequest{ - Region: d.Get("region").(string), - } - - log.Printf("[DEBUG] FloatingIP Create: %#v", regionOpts) - floatingIp, _, err := client.FloatingIPs.Create(context.Background(), regionOpts) - if err != nil { - return fmt.Errorf("Error creating FloatingIP: %s", err) - } - - d.SetId(floatingIp.IP) - - if v, ok := d.GetOk("droplet_id"); ok { - - log.Printf("[INFO] Assigning the Floating IP to the Droplet %d", v.(int)) - action, _, err := client.FloatingIPActions.Assign(context.Background(), d.Id(), v.(int)) - if err != nil { - return fmt.Errorf( - "Error Assigning FloatingIP (%s) to the droplet: %s", d.Id(), err) - } - - _, unassignedErr := waitForFloatingIPReady(d, "completed", []string{"new", "in-progress"}, "status", meta, action.ID) - if unassignedErr != nil { - return fmt.Errorf( - "Error waiting for FloatingIP (%s) to be Assigned: %s", d.Id(), unassignedErr) - } - } - - return resourceDigitalOceanFloatingIpRead(d, meta) -} - -func resourceDigitalOceanFloatingIpUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - if d.HasChange("droplet_id") { - if v, ok := d.GetOk("droplet_id"); ok { - log.Printf("[INFO] Assigning the Floating IP %s to the Droplet %d", d.Id(), v.(int)) - action, _, err := client.FloatingIPActions.Assign(context.Background(), d.Id(), v.(int)) - if err != nil { - return fmt.Errorf( - "Error Assigning FloatingIP (%s) to the droplet: %s", d.Id(), err) - } - - _, unassignedErr := waitForFloatingIPReady(d, "completed", []string{"new", "in-progress"}, "status", meta, action.ID) - if unassignedErr != nil { - return fmt.Errorf( - "Error waiting for FloatingIP (%s) to be Assigned: %s", d.Id(), unassignedErr) - } - } else { - log.Printf("[INFO] Unassigning the Floating IP %s", d.Id()) - action, _, err := client.FloatingIPActions.Unassign(context.Background(), d.Id()) - if err != nil { - return fmt.Errorf( - "Error Unassigning FloatingIP (%s): %s", d.Id(), err) - } - - _, unassignedErr := waitForFloatingIPReady(d, "completed", []string{"new", "in-progress"}, "status", meta, action.ID) - if unassignedErr != nil { - return fmt.Errorf( - "Error waiting for FloatingIP (%s) to be Unassigned: %s", d.Id(), unassignedErr) - } - } - } - - return resourceDigitalOceanFloatingIpRead(d, meta) -} - -func resourceDigitalOceanFloatingIpRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - log.Printf("[INFO] Reading the details of the FloatingIP %s", d.Id()) - floatingIp, _, err := client.FloatingIPs.Get(context.Background(), d.Id()) - if err != nil { - return fmt.Errorf("Error retrieving FloatingIP: %s", err) - } - - if floatingIp.Droplet != nil { - log.Printf("[INFO] A droplet was detected on the FloatingIP so setting the Region based on the Droplet") - log.Printf("[INFO] The region of the Droplet is %s", floatingIp.Droplet.Region.Slug) - d.Set("region", floatingIp.Droplet.Region.Slug) - d.Set("droplet_id", floatingIp.Droplet.ID) - } else { - d.Set("region", floatingIp.Region.Slug) - } - - d.Set("ip_address", floatingIp.IP) - - return nil -} - -func resourceDigitalOceanFloatingIpDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - if _, ok := d.GetOk("droplet_id"); ok { - log.Printf("[INFO] Unassigning the Floating IP from the Droplet") - action, _, err := client.FloatingIPActions.Unassign(context.Background(), d.Id()) - if err != nil { - return fmt.Errorf( - "Error Unassigning FloatingIP (%s) from the droplet: %s", d.Id(), err) - } - - _, unassignedErr := waitForFloatingIPReady(d, "completed", []string{"new", "in-progress"}, "status", meta, action.ID) - if unassignedErr != nil { - return fmt.Errorf( - "Error waiting for FloatingIP (%s) to be unassigned: %s", d.Id(), unassignedErr) - } - } - - log.Printf("[INFO] Deleting FloatingIP: %s", d.Id()) - _, err := client.FloatingIPs.Delete(context.Background(), d.Id()) - if err != nil { - return fmt.Errorf("Error deleting FloatingIP: %s", err) - } - - d.SetId("") - return nil -} - -func waitForFloatingIPReady( - d *schema.ResourceData, target string, pending []string, attribute string, meta interface{}, actionId int) (interface{}, error) { - log.Printf( - "[INFO] Waiting for FloatingIP (%s) to have %s of %s", - d.Id(), attribute, target) - - stateConf := &resource.StateChangeConf{ - Pending: pending, - Target: []string{target}, - Refresh: newFloatingIPStateRefreshFunc(d, attribute, meta, actionId), - Timeout: 60 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - - NotFoundChecks: 60, - } - - return stateConf.WaitForState() -} - -func newFloatingIPStateRefreshFunc( - d *schema.ResourceData, attribute string, meta interface{}, actionId int) resource.StateRefreshFunc { - client := meta.(*godo.Client) - return func() (interface{}, string, error) { - - log.Printf("[INFO] Assigning the Floating IP to the Droplet") - action, _, err := client.FloatingIPActions.Get(context.Background(), d.Id(), actionId) - if err != nil { - return nil, "", fmt.Errorf("Error retrieving FloatingIP (%s) ActionId (%d): %s", d.Id(), actionId, err) - } - - log.Printf("[INFO] The FloatingIP Action Status is %s", action.Status) - return &action, action.Status, nil - } -} diff --git a/builtin/providers/digitalocean/resource_digitalocean_floating_ip_test.go b/builtin/providers/digitalocean/resource_digitalocean_floating_ip_test.go deleted file mode 100644 index cd05aae55..000000000 --- a/builtin/providers/digitalocean/resource_digitalocean_floating_ip_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package digitalocean - -import ( - "context" - "fmt" - "testing" - - "github.com/digitalocean/godo" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDigitalOceanFloatingIP_Region(t *testing.T) { - var floatingIP godo.FloatingIP - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanFloatingIPDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDigitalOceanFloatingIPConfig_region, - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanFloatingIPExists("digitalocean_floating_ip.foobar", &floatingIP), - resource.TestCheckResourceAttr( - "digitalocean_floating_ip.foobar", "region", "nyc3"), - ), - }, - }, - }) -} - -func TestAccDigitalOceanFloatingIP_Droplet(t *testing.T) { - var floatingIP godo.FloatingIP - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanFloatingIPDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDigitalOceanFloatingIPConfig_droplet(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanFloatingIPExists("digitalocean_floating_ip.foobar", &floatingIP), - resource.TestCheckResourceAttr( - "digitalocean_floating_ip.foobar", "region", "nyc3"), - ), - }, - }, - }) -} - -func testAccCheckDigitalOceanFloatingIPDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*godo.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "digitalocean_floating_ip" { - continue - } - - // Try to find the key - _, _, err := client.FloatingIPs.Get(context.Background(), rs.Primary.ID) - - if err == nil { - return fmt.Errorf("Floating IP still exists") - } - } - - return nil -} - -func testAccCheckDigitalOceanFloatingIPExists(n string, floatingIP *godo.FloatingIP) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - client := testAccProvider.Meta().(*godo.Client) - - // Try to find the FloatingIP - foundFloatingIP, _, err := client.FloatingIPs.Get(context.Background(), rs.Primary.ID) - - if err != nil { - return err - } - - if foundFloatingIP.IP != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - - *floatingIP = *foundFloatingIP - - return nil - } -} - -var testAccCheckDigitalOceanFloatingIPConfig_region = ` -resource "digitalocean_floating_ip" "foobar" { - region = "nyc3" -}` - -func testAccCheckDigitalOceanFloatingIPConfig_droplet(rInt int) string { - return fmt.Sprintf(` -resource "digitalocean_droplet" "foobar" { - name = "baz-%d" - size = "1gb" - image = "centos-7-x64" - region = "nyc3" - ipv6 = true - private_networking = true -} - -resource "digitalocean_floating_ip" "foobar" { - droplet_id = "${digitalocean_droplet.foobar.id}" - region = "${digitalocean_droplet.foobar.region}" -}`, rInt) -} diff --git a/builtin/providers/digitalocean/resource_digitalocean_loadbalancer.go b/builtin/providers/digitalocean/resource_digitalocean_loadbalancer.go deleted file mode 100644 index f6ff03a69..000000000 --- a/builtin/providers/digitalocean/resource_digitalocean_loadbalancer.go +++ /dev/null @@ -1,303 +0,0 @@ -package digitalocean - -import ( - "context" - "fmt" - "log" - "strconv" - "time" - - "github.com/digitalocean/godo" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceDigitalOceanLoadbalancer() *schema.Resource { - return &schema.Resource{ - Create: resourceDigitalOceanLoadbalancerCreate, - Read: resourceDigitalOceanLoadbalancerRead, - Update: resourceDigitalOceanLoadbalancerUpdate, - Delete: resourceDigitalOceanLoadbalancerDelete, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - - "region": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "algorithm": { - Type: schema.TypeString, - Optional: true, - Default: "round_robin", - }, - - "forwarding_rule": { - Type: schema.TypeList, - Required: true, - MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "entry_protocol": { - Type: schema.TypeString, - Required: true, - }, - "entry_port": { - Type: schema.TypeInt, - Required: true, - }, - "target_protocol": { - Type: schema.TypeString, - Required: true, - }, - "target_port": { - Type: schema.TypeInt, - Required: true, - }, - "certificate_id": { - Type: schema.TypeString, - Optional: true, - }, - "tls_passthrough": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - }, - }, - }, - - "healthcheck": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "protocol": { - Type: schema.TypeString, - Required: true, - }, - "port": { - Type: schema.TypeInt, - Required: true, - }, - "path": { - Type: schema.TypeString, - Optional: true, - }, - "check_interval_seconds": { - Type: schema.TypeInt, - Optional: true, - Default: 10, - }, - "response_timeout_seconds": { - Type: schema.TypeInt, - Optional: true, - Default: 5, - }, - "unhealthy_threshold": { - Type: schema.TypeInt, - Optional: true, - Default: 3, - }, - "healthy_threshold": { - Type: schema.TypeInt, - Optional: true, - Default: 5, - }, - }, - }, - }, - - "sticky_sessions": { - Type: schema.TypeList, - Optional: true, - Computed: true, //this needs to be computed as the API returns a struct with none as the type - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Optional: true, - Default: "none", - }, - "cookie_name": { - Type: schema.TypeString, - Optional: true, - }, - "cookie_ttl_seconds": { - Type: schema.TypeInt, - Optional: true, - }, - }, - }, - }, - - "droplet_ids": { - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - }, - - "droplet_tag": { - Type: schema.TypeString, - Optional: true, - }, - - "redirect_http_to_https": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "ip": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func buildLoadBalancerRequest(d *schema.ResourceData) (*godo.LoadBalancerRequest, error) { - opts := &godo.LoadBalancerRequest{ - Name: d.Get("name").(string), - Region: d.Get("region").(string), - Algorithm: d.Get("algorithm").(string), - RedirectHttpToHttps: d.Get("redirect_http_to_https").(bool), - ForwardingRules: expandForwardingRules(d.Get("forwarding_rule").([]interface{})), - } - - if v, ok := d.GetOk("droplet_ids"); ok { - var droplets []int - for _, id := range v.([]interface{}) { - i, err := strconv.Atoi(id.(string)) - if err != nil { - return nil, err - } - droplets = append(droplets, i) - } - - opts.DropletIDs = droplets - } - - if v, ok := d.GetOk("droplet_tag"); ok { - opts.Tag = v.(string) - } - - if v, ok := d.GetOk("healthcheck"); ok { - opts.HealthCheck = expandHealthCheck(v.([]interface{})) - } - - if v, ok := d.GetOk("sticky_sessions"); ok { - opts.StickySessions = expandStickySessions(v.([]interface{})) - } - - return opts, nil -} - -func resourceDigitalOceanLoadbalancerCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - log.Printf("[INFO] Create a Loadbalancer Request") - - lbOpts, err := buildLoadBalancerRequest(d) - if err != nil { - return err - } - - log.Printf("[DEBUG] Loadbalancer Create: %#v", lbOpts) - loadbalancer, _, err := client.LoadBalancers.Create(context.Background(), lbOpts) - if err != nil { - return fmt.Errorf("Error creating Load Balancer: %s", err) - } - - d.SetId(loadbalancer.ID) - - log.Printf("[DEBUG] Waiting for Load Balancer (%s) to become active", d.Get("name")) - stateConf := &resource.StateChangeConf{ - Pending: []string{"new"}, - Target: []string{"active"}, - Refresh: loadbalancerStateRefreshFunc(client, d.Id()), - Timeout: 10 * time.Minute, - MinTimeout: 15 * time.Second, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for Load Balancer (%s) to become active: %s", d.Get("name"), err) - } - - return resourceDigitalOceanLoadbalancerRead(d, meta) -} - -func resourceDigitalOceanLoadbalancerRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - log.Printf("[INFO] Reading the details of the Loadbalancer %s", d.Id()) - loadbalancer, resp, err := client.LoadBalancers.Get(context.Background(), d.Id()) - if err != nil { - if resp != nil && resp.StatusCode == 404 { - log.Printf("[WARN] DigitalOcean Load Balancer (%s) not found", d.Id()) - d.SetId("") - return nil - } - return fmt.Errorf("Error retrieving Loadbalancer: %s", err) - } - - d.Set("name", loadbalancer.Name) - d.Set("ip", loadbalancer.IP) - d.Set("algorithm", loadbalancer.Algorithm) - d.Set("region", loadbalancer.Region.Slug) - d.Set("redirect_http_to_https", loadbalancer.RedirectHttpToHttps) - d.Set("droplet_ids", flattenDropletIds(loadbalancer.DropletIDs)) - d.Set("droplet_tag", loadbalancer.Tag) - - if err := d.Set("sticky_sessions", flattenStickySessions(loadbalancer.StickySessions)); err != nil { - return fmt.Errorf("[DEBUG] Error setting Load Balancer sticky_sessions - error: %#v", err) - } - - if err := d.Set("healthcheck", flattenHealthChecks(loadbalancer.HealthCheck)); err != nil { - return fmt.Errorf("[DEBUG] Error setting Load Balancer healthcheck - error: %#v", err) - } - - if err := d.Set("forwarding_rule", flattenForwardingRules(loadbalancer.ForwardingRules)); err != nil { - return fmt.Errorf("[DEBUG] Error setting Load Balancer forwarding_rule - error: %#v", err) - } - - return nil - -} - -func resourceDigitalOceanLoadbalancerUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - lbOpts, err := buildLoadBalancerRequest(d) - if err != nil { - return err - } - - log.Printf("[DEBUG] Load Balancer Update: %#v", lbOpts) - _, _, err = client.LoadBalancers.Update(context.Background(), d.Id(), lbOpts) - if err != nil { - return fmt.Errorf("Error updating Load Balancer: %s", err) - } - - return resourceDigitalOceanLoadbalancerRead(d, meta) -} - -func resourceDigitalOceanLoadbalancerDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - log.Printf("[INFO] Deleting Load Balancer: %s", d.Id()) - _, err := client.LoadBalancers.Delete(context.Background(), d.Id()) - if err != nil { - return fmt.Errorf("Error deleting Load Balancer: %s", err) - } - - d.SetId("") - return nil - -} diff --git a/builtin/providers/digitalocean/resource_digitalocean_loadbalancer_test.go b/builtin/providers/digitalocean/resource_digitalocean_loadbalancer_test.go deleted file mode 100644 index b4955d720..000000000 --- a/builtin/providers/digitalocean/resource_digitalocean_loadbalancer_test.go +++ /dev/null @@ -1,317 +0,0 @@ -package digitalocean - -import ( - "context" - "fmt" - "strings" - "testing" - - "github.com/digitalocean/godo" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDigitalOceanLoadbalancer_Basic(t *testing.T) { - var loadbalancer godo.LoadBalancer - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanLoadbalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDigitalOceanLoadbalancerConfig_basic(rInt), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDigitalOceanLoadbalancerExists("digitalocean_loadbalancer.foobar", &loadbalancer), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "name", fmt.Sprintf("loadbalancer-%d", rInt)), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "region", "nyc3"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "forwarding_rule.#", "1"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "forwarding_rule.0.entry_port", "80"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "forwarding_rule.0.entry_protocol", "http"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "forwarding_rule.0.target_port", "80"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "forwarding_rule.0.target_protocol", "http"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "healthcheck.#", "1"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "healthcheck.0.port", "22"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "healthcheck.0.protocol", "tcp"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "droplet_ids.#", "1"), - ), - }, - }, - }) -} - -func TestAccDigitalOceanLoadbalancer_Updated(t *testing.T) { - var loadbalancer godo.LoadBalancer - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanLoadbalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDigitalOceanLoadbalancerConfig_basic(rInt), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDigitalOceanLoadbalancerExists("digitalocean_loadbalancer.foobar", &loadbalancer), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "name", fmt.Sprintf("loadbalancer-%d", rInt)), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "region", "nyc3"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "forwarding_rule.#", "1"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "forwarding_rule.0.entry_port", "80"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "forwarding_rule.0.entry_protocol", "http"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "forwarding_rule.0.target_port", "80"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "forwarding_rule.0.target_protocol", "http"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "healthcheck.#", "1"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "healthcheck.0.port", "22"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "healthcheck.0.protocol", "tcp"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "droplet_ids.#", "1"), - ), - }, - { - Config: testAccCheckDigitalOceanLoadbalancerConfig_updated(rInt), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDigitalOceanLoadbalancerExists("digitalocean_loadbalancer.foobar", &loadbalancer), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "name", fmt.Sprintf("loadbalancer-%d", rInt)), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "region", "nyc3"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "forwarding_rule.#", "1"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "forwarding_rule.0.entry_port", "81"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "forwarding_rule.0.entry_protocol", "http"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "forwarding_rule.0.target_port", "81"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "forwarding_rule.0.target_protocol", "http"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "healthcheck.#", "1"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "healthcheck.0.port", "22"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "healthcheck.0.protocol", "tcp"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "droplet_ids.#", "2"), - ), - }, - }, - }) -} - -func TestAccDigitalOceanLoadbalancer_dropletTag(t *testing.T) { - var loadbalancer godo.LoadBalancer - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanLoadbalancerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDigitalOceanLoadbalancerConfig_dropletTag(rInt), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDigitalOceanLoadbalancerExists("digitalocean_loadbalancer.foobar", &loadbalancer), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "name", fmt.Sprintf("loadbalancer-%d", rInt)), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "region", "nyc3"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "forwarding_rule.#", "1"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "forwarding_rule.0.entry_port", "80"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "forwarding_rule.0.entry_protocol", "http"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "forwarding_rule.0.target_port", "80"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "forwarding_rule.0.target_protocol", "http"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "healthcheck.#", "1"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "healthcheck.0.port", "22"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "healthcheck.0.protocol", "tcp"), - resource.TestCheckResourceAttr( - "digitalocean_loadbalancer.foobar", "droplet_tag", "sample"), - ), - }, - }, - }) -} - -func testAccCheckDigitalOceanLoadbalancerDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*godo.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "digitalocean_loadbalancer" { - continue - } - - _, _, err := client.LoadBalancers.Get(context.Background(), rs.Primary.ID) - - if err != nil && !strings.Contains(err.Error(), "404") { - return fmt.Errorf( - "Error waiting for loadbalancer (%s) to be destroyed: %s", - rs.Primary.ID, err) - } - } - - return nil -} - -func testAccCheckDigitalOceanLoadbalancerExists(n string, loadbalancer *godo.LoadBalancer) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Loadbalancer ID is set") - } - - client := testAccProvider.Meta().(*godo.Client) - - lb, _, err := client.LoadBalancers.Get(context.Background(), rs.Primary.ID) - - if err != nil { - return err - } - - if lb.ID != rs.Primary.ID { - return fmt.Errorf("Loabalancer not found") - } - - *loadbalancer = *lb - - return nil - } -} - -func testAccCheckDigitalOceanLoadbalancerConfig_basic(rInt int) string { - return fmt.Sprintf(` -resource "digitalocean_droplet" "foobar" { - name = "foo-%d" - size = "512mb" - image = "centos-7-x64" - region = "nyc3" -} - -resource "digitalocean_loadbalancer" "foobar" { - name = "loadbalancer-%d" - region = "nyc3" - - forwarding_rule { - entry_port = 80 - entry_protocol = "http" - - target_port = 80 - target_protocol = "http" - } - - healthcheck { - port = 22 - protocol = "tcp" - } - - droplet_ids = ["${digitalocean_droplet.foobar.id}"] -}`, rInt, rInt) -} - -func testAccCheckDigitalOceanLoadbalancerConfig_updated(rInt int) string { - return fmt.Sprintf(` -resource "digitalocean_droplet" "foobar" { - name = "foo-%d" - size = "512mb" - image = "centos-7-x64" - region = "nyc3" -} - -resource "digitalocean_droplet" "foo" { - name = "foo-%d" - size = "512mb" - image = "centos-7-x64" - region = "nyc3" -} - -resource "digitalocean_loadbalancer" "foobar" { - name = "loadbalancer-%d" - region = "nyc3" - - forwarding_rule { - entry_port = 81 - entry_protocol = "http" - - target_port = 81 - target_protocol = "http" - } - - healthcheck { - port = 22 - protocol = "tcp" - } - - droplet_ids = ["${digitalocean_droplet.foobar.id}","${digitalocean_droplet.foo.id}"] -}`, rInt, rInt, rInt) -} - -func testAccCheckDigitalOceanLoadbalancerConfig_dropletTag(rInt int) string { - return fmt.Sprintf(` -resource "digitalocean_tag" "barbaz" { - name = "sample" -} - -resource "digitalocean_droplet" "foobar" { - name = "foo-%d" - size = "512mb" - image = "centos-7-x64" - region = "nyc3" - tags = ["${digitalocean_tag.barbaz.id}"] -} - -resource "digitalocean_loadbalancer" "foobar" { - name = "loadbalancer-%d" - region = "nyc3" - - forwarding_rule { - entry_port = 80 - entry_protocol = "http" - - target_port = 80 - target_protocol = "http" - } - - healthcheck { - port = 22 - protocol = "tcp" - } - - droplet_tag = "${digitalocean_tag.barbaz.name}" - - depends_on = ["digitalocean_droplet.foobar"] -}`, rInt, rInt) -} diff --git a/builtin/providers/digitalocean/resource_digitalocean_record.go b/builtin/providers/digitalocean/resource_digitalocean_record.go deleted file mode 100644 index 11d8a8e86..000000000 --- a/builtin/providers/digitalocean/resource_digitalocean_record.go +++ /dev/null @@ -1,233 +0,0 @@ -package digitalocean - -import ( - "context" - "fmt" - "log" - "strconv" - "strings" - - "github.com/digitalocean/godo" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceDigitalOceanRecord() *schema.Resource { - return &schema.Resource{ - Create: resourceDigitalOceanRecordCreate, - Read: resourceDigitalOceanRecordRead, - Update: resourceDigitalOceanRecordUpdate, - Delete: resourceDigitalOceanRecordDelete, - - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "domain": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "name": { - Type: schema.TypeString, - Optional: true, - }, - - "port": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "priority": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "weight": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "ttl": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "value": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "fqdn": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceDigitalOceanRecordCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - newRecord := godo.DomainRecordEditRequest{ - Type: d.Get("type").(string), - Name: d.Get("name").(string), - Data: d.Get("value").(string), - } - - var err error - if priority := d.Get("priority").(string); priority != "" { - newRecord.Priority, err = strconv.Atoi(priority) - if err != nil { - return fmt.Errorf("Failed to parse priority as an integer: %v", err) - } - } - if port := d.Get("port").(string); port != "" { - newRecord.Port, err = strconv.Atoi(port) - if err != nil { - return fmt.Errorf("Failed to parse port as an integer: %v", err) - } - } - if ttl := d.Get("ttl").(string); ttl != "" { - newRecord.TTL, err = strconv.Atoi(ttl) - if err != nil { - return fmt.Errorf("Failed to parse ttl as an integer: %v", err) - } - } - if weight := d.Get("weight").(string); weight != "" { - newRecord.Weight, err = strconv.Atoi(weight) - if err != nil { - return fmt.Errorf("Failed to parse weight as an integer: %v", err) - } - } - - log.Printf("[DEBUG] record create configuration: %#v", newRecord) - rec, _, err := client.Domains.CreateRecord(context.Background(), d.Get("domain").(string), &newRecord) - if err != nil { - return fmt.Errorf("Failed to create record: %s", err) - } - - d.SetId(strconv.Itoa(rec.ID)) - log.Printf("[INFO] Record ID: %s", d.Id()) - - return resourceDigitalOceanRecordRead(d, meta) -} - -func resourceDigitalOceanRecordRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - domain := d.Get("domain").(string) - id, err := strconv.Atoi(d.Id()) - if err != nil { - return fmt.Errorf("invalid record ID: %v", err) - } - - rec, resp, err := client.Domains.Record(context.Background(), domain, id) - if err != nil { - // If the record is somehow already destroyed, mark as - // successfully gone - if resp.StatusCode == 404 { - d.SetId("") - return nil - } - - return err - } - - if t := rec.Type; t == "CNAME" || t == "MX" || t == "NS" || t == "SRV" { - if rec.Data == "@" { - rec.Data = domain - } - rec.Data += "." - } - - d.Set("name", rec.Name) - d.Set("type", rec.Type) - d.Set("value", rec.Data) - d.Set("weight", strconv.Itoa(rec.Weight)) - d.Set("priority", strconv.Itoa(rec.Priority)) - d.Set("port", strconv.Itoa(rec.Port)) - d.Set("ttl", strconv.Itoa(rec.TTL)) - - en := constructFqdn(rec.Name, d.Get("domain").(string)) - log.Printf("[DEBUG] Constructed FQDN: %s", en) - d.Set("fqdn", en) - - return nil -} - -func resourceDigitalOceanRecordUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - domain := d.Get("domain").(string) - id, err := strconv.Atoi(d.Id()) - if err != nil { - return fmt.Errorf("invalid record ID: %v", err) - } - - var editRecord godo.DomainRecordEditRequest - if v, ok := d.GetOk("name"); ok { - editRecord.Name = v.(string) - } - - if d.HasChange("ttl") { - newTTL := d.Get("ttl").(string) - editRecord.TTL, err = strconv.Atoi(newTTL) - if err != nil { - return fmt.Errorf("Failed to parse ttl as an integer: %v", err) - } - } - - log.Printf("[DEBUG] record update configuration: %#v", editRecord) - _, _, err = client.Domains.EditRecord(context.Background(), domain, id, &editRecord) - if err != nil { - return fmt.Errorf("Failed to update record: %s", err) - } - - return resourceDigitalOceanRecordRead(d, meta) -} - -func resourceDigitalOceanRecordDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - domain := d.Get("domain").(string) - id, err := strconv.Atoi(d.Id()) - if err != nil { - return fmt.Errorf("invalid record ID: %v", err) - } - - log.Printf("[INFO] Deleting record: %s, %d", domain, id) - - resp, delErr := client.Domains.DeleteRecord(context.Background(), domain, id) - if delErr != nil { - // If the record is somehow already destroyed, mark as - // successfully gone - if resp.StatusCode == 404 { - return nil - } - - return fmt.Errorf("Error deleting record: %s", delErr) - } - - return nil -} - -func constructFqdn(name, domain string) string { - rn := strings.ToLower(strings.TrimSuffix(name, ".")) - domain = strings.TrimSuffix(domain, ".") - if !strings.HasSuffix(rn, domain) { - rn = strings.Join([]string{name, domain}, ".") - } - return rn -} diff --git a/builtin/providers/digitalocean/resource_digitalocean_record_test.go b/builtin/providers/digitalocean/resource_digitalocean_record_test.go deleted file mode 100644 index b3def11b9..000000000 --- a/builtin/providers/digitalocean/resource_digitalocean_record_test.go +++ /dev/null @@ -1,407 +0,0 @@ -package digitalocean - -import ( - "context" - "fmt" - "strconv" - "testing" - - "strings" - - "github.com/digitalocean/godo" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestDigitalOceanRecordConstructFqdn(t *testing.T) { - cases := []struct { - Input, Output string - }{ - {"www", "www.nonexample.com"}, - {"dev.www", "dev.www.nonexample.com"}, - {"*", "*.nonexample.com"}, - {"nonexample.com", "nonexample.com"}, - {"test.nonexample.com", "test.nonexample.com"}, - {"test.nonexample.com.", "test.nonexample.com"}, - } - - domain := "nonexample.com" - for _, tc := range cases { - actual := constructFqdn(tc.Input, domain) - if actual != tc.Output { - t.Fatalf("input: %s\noutput: %s", tc.Input, actual) - } - } -} - -func TestAccDigitalOceanRecord_Basic(t *testing.T) { - var record godo.DomainRecord - domain := fmt.Sprintf("foobar-test-terraform-%s.com", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanRecordDestroy, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testAccCheckDigitalOceanRecordConfig_basic, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanRecordExists("digitalocean_record.foobar", &record), - testAccCheckDigitalOceanRecordAttributes(&record), - resource.TestCheckResourceAttr( - "digitalocean_record.foobar", "name", "terraform"), - resource.TestCheckResourceAttr( - "digitalocean_record.foobar", "domain", domain), - resource.TestCheckResourceAttr( - "digitalocean_record.foobar", "value", "192.168.0.10"), - resource.TestCheckResourceAttr( - "digitalocean_record.foobar", "fqdn", strings.Join([]string{"terraform", domain}, ".")), - ), - }, - }, - }) -} - -func TestAccDigitalOceanRecord_Updated(t *testing.T) { - var record godo.DomainRecord - domain := fmt.Sprintf("foobar-test-terraform-%s.com", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanRecordDestroy, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testAccCheckDigitalOceanRecordConfig_basic, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanRecordExists("digitalocean_record.foobar", &record), - testAccCheckDigitalOceanRecordAttributes(&record), - resource.TestCheckResourceAttr( - "digitalocean_record.foobar", "name", "terraform"), - resource.TestCheckResourceAttr( - "digitalocean_record.foobar", "domain", domain), - resource.TestCheckResourceAttr( - "digitalocean_record.foobar", "value", "192.168.0.10"), - resource.TestCheckResourceAttr( - "digitalocean_record.foobar", "type", "A"), - resource.TestCheckResourceAttr( - "digitalocean_record.foobar", "ttl", "1800"), - ), - }, - { - Config: fmt.Sprintf( - testAccCheckDigitalOceanRecordConfig_new_value, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanRecordExists("digitalocean_record.foobar", &record), - testAccCheckDigitalOceanRecordAttributesUpdated(&record), - resource.TestCheckResourceAttr( - "digitalocean_record.foobar", "name", "terraform"), - resource.TestCheckResourceAttr( - "digitalocean_record.foobar", "domain", domain), - resource.TestCheckResourceAttr( - "digitalocean_record.foobar", "value", "192.168.0.11"), - resource.TestCheckResourceAttr( - "digitalocean_record.foobar", "type", "A"), - resource.TestCheckResourceAttr( - "digitalocean_record.foobar", "ttl", "90"), - ), - }, - }, - }) -} - -func TestAccDigitalOceanRecord_HostnameValue(t *testing.T) { - var record godo.DomainRecord - domain := fmt.Sprintf("foobar-test-terraform-%s.com", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanRecordDestroy, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf( - testAccCheckDigitalOceanRecordConfig_cname, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanRecordExists("digitalocean_record.foobar", &record), - testAccCheckDigitalOceanRecordAttributesHostname("a.foobar-test-terraform.com", &record), - resource.TestCheckResourceAttr( - "digitalocean_record.foobar", "name", "terraform"), - resource.TestCheckResourceAttr( - "digitalocean_record.foobar", "domain", domain), - resource.TestCheckResourceAttr( - "digitalocean_record.foobar", "value", "a.foobar-test-terraform.com."), - resource.TestCheckResourceAttr( - "digitalocean_record.foobar", "type", "CNAME"), - ), - }, - }, - }) -} - -func TestAccDigitalOceanRecord_ExternalHostnameValue(t *testing.T) { - var record godo.DomainRecord - domain := fmt.Sprintf("foobar-test-terraform-%s.com", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanRecordDestroy, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf( - testAccCheckDigitalOceanRecordConfig_external_cname, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanRecordExists("digitalocean_record.foobar", &record), - testAccCheckDigitalOceanRecordAttributesHostname("a.foobar-test-terraform.net", &record), - resource.TestCheckResourceAttr( - "digitalocean_record.foobar", "name", "terraform"), - resource.TestCheckResourceAttr( - "digitalocean_record.foobar", "domain", domain), - resource.TestCheckResourceAttr( - "digitalocean_record.foobar", "value", "a.foobar-test-terraform.net."), - resource.TestCheckResourceAttr( - "digitalocean_record.foobar", "type", "CNAME"), - ), - }, - }, - }) -} - -func TestAccDigitalOceanRecord_MX(t *testing.T) { - var record godo.DomainRecord - domain := fmt.Sprintf("foobar-test-terraform-%s.com", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanRecordDestroy, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf( - testAccCheckDigitalOceanRecordConfig_mx, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanRecordExists("digitalocean_record.foo_record", &record), - testAccCheckDigitalOceanRecordAttributesHostname("foobar."+domain, &record), - resource.TestCheckResourceAttr( - "digitalocean_record.foo_record", "name", "terraform"), - resource.TestCheckResourceAttr( - "digitalocean_record.foo_record", "domain", domain), - resource.TestCheckResourceAttr( - "digitalocean_record.foo_record", "value", "foobar."+domain+"."), - resource.TestCheckResourceAttr( - "digitalocean_record.foo_record", "type", "MX"), - ), - }, - }, - }) -} - -func TestAccDigitalOceanRecord_MX_at(t *testing.T) { - var record godo.DomainRecord - domain := fmt.Sprintf("foobar-test-terraform-%s.com", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanRecordDestroy, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf( - testAccCheckDigitalOceanRecordConfig_mx_at, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanRecordExists("digitalocean_record.foo_record", &record), - testAccCheckDigitalOceanRecordAttributesHostname("@", &record), - resource.TestCheckResourceAttr( - "digitalocean_record.foo_record", "name", "terraform"), - resource.TestCheckResourceAttr( - "digitalocean_record.foo_record", "domain", domain), - resource.TestCheckResourceAttr( - "digitalocean_record.foo_record", "value", domain+"."), - resource.TestCheckResourceAttr( - "digitalocean_record.foo_record", "type", "MX"), - ), - }, - }, - }) -} - -func testAccCheckDigitalOceanRecordDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*godo.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "digitalocean_record" { - continue - } - domain := rs.Primary.Attributes["domain"] - id, err := strconv.Atoi(rs.Primary.ID) - if err != nil { - return err - } - - _, _, err = client.Domains.Record(context.Background(), domain, id) - - if err == nil { - return fmt.Errorf("Record still exists") - } - } - - return nil -} - -func testAccCheckDigitalOceanRecordAttributes(record *godo.DomainRecord) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if record.Data != "192.168.0.10" { - return fmt.Errorf("Bad value: %s", record.Data) - } - - return nil - } -} - -func testAccCheckDigitalOceanRecordAttributesUpdated(record *godo.DomainRecord) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if record.Data != "192.168.0.11" { - return fmt.Errorf("Bad value: %s", record.Data) - } - - return nil - } -} - -func testAccCheckDigitalOceanRecordExists(n string, record *godo.DomainRecord) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - client := testAccProvider.Meta().(*godo.Client) - - domain := rs.Primary.Attributes["domain"] - id, err := strconv.Atoi(rs.Primary.ID) - if err != nil { - return err - } - - foundRecord, _, err := client.Domains.Record(context.Background(), domain, id) - - if err != nil { - return err - } - - if strconv.Itoa(foundRecord.ID) != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - - *record = *foundRecord - - return nil - } -} - -func testAccCheckDigitalOceanRecordAttributesHostname(data string, record *godo.DomainRecord) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if record.Data != data { - return fmt.Errorf("Bad value: expected %s, got %s", data, record.Data) - } - - return nil - } -} - -const testAccCheckDigitalOceanRecordConfig_basic = ` -resource "digitalocean_domain" "foobar" { - name = "%s" - ip_address = "192.168.0.10" -} - -resource "digitalocean_record" "foobar" { - domain = "${digitalocean_domain.foobar.name}" - - name = "terraform" - value = "192.168.0.10" - type = "A" -}` - -const testAccCheckDigitalOceanRecordConfig_new_value = ` -resource "digitalocean_domain" "foobar" { - name = "%s" - ip_address = "192.168.0.10" -} - -resource "digitalocean_record" "foobar" { - domain = "${digitalocean_domain.foobar.name}" - - name = "terraform" - value = "192.168.0.11" - type = "A" - ttl = 90 -}` - -const testAccCheckDigitalOceanRecordConfig_cname = ` -resource "digitalocean_domain" "foobar" { - name = "%s" - ip_address = "192.168.0.10" -} - -resource "digitalocean_record" "foobar" { - domain = "${digitalocean_domain.foobar.name}" - - name = "terraform" - value = "a.foobar-test-terraform.com." - type = "CNAME" -}` - -const testAccCheckDigitalOceanRecordConfig_mx_at = ` -resource "digitalocean_domain" "foobar" { - name = "%s" - ip_address = "192.168.0.10" -} - -resource "digitalocean_record" "foo_record" { - domain = "${digitalocean_domain.foobar.name}" - - name = "terraform" - value = "${digitalocean_domain.foobar.name}." - type = "MX" - priority = "10" -}` - -const testAccCheckDigitalOceanRecordConfig_mx = ` -resource "digitalocean_domain" "foobar" { - name = "%s" - ip_address = "192.168.0.10" -} - -resource "digitalocean_record" "foo_record" { - domain = "${digitalocean_domain.foobar.name}" - - name = "terraform" - value = "foobar.${digitalocean_domain.foobar.name}." - type = "MX" - priority = "10" -}` - -const testAccCheckDigitalOceanRecordConfig_external_cname = ` -resource "digitalocean_domain" "foobar" { - name = "%s" - ip_address = "192.168.0.10" -} - -resource "digitalocean_record" "foobar" { - domain = "${digitalocean_domain.foobar.name}" - - name = "terraform" - value = "a.foobar-test-terraform.net." - type = "CNAME" -}` diff --git a/builtin/providers/digitalocean/resource_digitalocean_ssh_key.go b/builtin/providers/digitalocean/resource_digitalocean_ssh_key.go deleted file mode 100644 index 65d7743df..000000000 --- a/builtin/providers/digitalocean/resource_digitalocean_ssh_key.go +++ /dev/null @@ -1,143 +0,0 @@ -package digitalocean - -import ( - "context" - "fmt" - "log" - "strconv" - "strings" - - "github.com/digitalocean/godo" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceDigitalOceanSSHKey() *schema.Resource { - return &schema.Resource{ - Create: resourceDigitalOceanSSHKeyCreate, - Read: resourceDigitalOceanSSHKeyRead, - Update: resourceDigitalOceanSSHKeyUpdate, - Delete: resourceDigitalOceanSSHKeyDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, - - "name": { - Type: schema.TypeString, - Required: true, - }, - - "public_key": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: resourceDigitalOceanSSHKeyPublicKeyDiffSuppress, - }, - - "fingerprint": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceDigitalOceanSSHKeyPublicKeyDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - return strings.TrimSpace(old) == strings.TrimSpace(new) -} - -func resourceDigitalOceanSSHKeyCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - // Build up our creation options - opts := &godo.KeyCreateRequest{ - Name: d.Get("name").(string), - PublicKey: d.Get("public_key").(string), - } - - log.Printf("[DEBUG] SSH Key create configuration: %#v", opts) - key, _, err := client.Keys.Create(context.Background(), opts) - if err != nil { - return fmt.Errorf("Error creating SSH Key: %s", err) - } - - d.SetId(strconv.Itoa(key.ID)) - log.Printf("[INFO] SSH Key: %d", key.ID) - - return resourceDigitalOceanSSHKeyRead(d, meta) -} - -func resourceDigitalOceanSSHKeyRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - id, err := strconv.Atoi(d.Id()) - if err != nil { - return fmt.Errorf("invalid SSH key id: %v", err) - } - - key, resp, err := client.Keys.GetByID(context.Background(), id) - if err != nil { - // If the key is somehow already destroyed, mark as - // successfully gone - if resp != nil && resp.StatusCode == 404 { - d.SetId("") - return nil - } - - return fmt.Errorf("Error retrieving SSH key: %s", err) - } - - d.Set("name", key.Name) - d.Set("fingerprint", key.Fingerprint) - d.Set("public_key", key.PublicKey) - - return nil -} - -func resourceDigitalOceanSSHKeyUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - id, err := strconv.Atoi(d.Id()) - if err != nil { - return fmt.Errorf("invalid SSH key id: %v", err) - } - - var newName string - if v, ok := d.GetOk("name"); ok { - newName = v.(string) - } - - log.Printf("[DEBUG] SSH key update name: %#v", newName) - opts := &godo.KeyUpdateRequest{ - Name: newName, - } - _, _, err = client.Keys.UpdateByID(context.Background(), id, opts) - if err != nil { - return fmt.Errorf("Failed to update SSH key: %s", err) - } - - return resourceDigitalOceanSSHKeyRead(d, meta) -} - -func resourceDigitalOceanSSHKeyDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - id, err := strconv.Atoi(d.Id()) - if err != nil { - return fmt.Errorf("invalid SSH key id: %v", err) - } - - log.Printf("[INFO] Deleting SSH key: %d", id) - _, err = client.Keys.DeleteByID(context.Background(), id) - if err != nil { - return fmt.Errorf("Error deleting SSH key: %s", err) - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/digitalocean/resource_digitalocean_ssh_key_test.go b/builtin/providers/digitalocean/resource_digitalocean_ssh_key_test.go deleted file mode 100644 index 3b518670d..000000000 --- a/builtin/providers/digitalocean/resource_digitalocean_ssh_key_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package digitalocean - -import ( - "context" - "fmt" - "strconv" - "testing" - - "github.com/digitalocean/godo" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDigitalOceanSSHKey_Basic(t *testing.T) { - var key godo.Key - rInt := acctest.RandInt() - publicKeyMaterial, _, err := acctest.RandSSHKeyPair("digitalocean@ssh-acceptance-test") - if err != nil { - t.Fatalf("Cannot generate test SSH key pair: %s", err) - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanSSHKeyDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDigitalOceanSSHKeyConfig_basic(rInt, publicKeyMaterial), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanSSHKeyExists("digitalocean_ssh_key.foobar", &key), - resource.TestCheckResourceAttr( - "digitalocean_ssh_key.foobar", "name", fmt.Sprintf("foobar-%d", rInt)), - resource.TestCheckResourceAttr( - "digitalocean_ssh_key.foobar", "public_key", publicKeyMaterial), - ), - }, - }, - }) -} - -func testAccCheckDigitalOceanSSHKeyDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*godo.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "digitalocean_ssh_key" { - continue - } - - id, err := strconv.Atoi(rs.Primary.ID) - if err != nil { - return err - } - - // Try to find the key - _, _, err = client.Keys.GetByID(context.Background(), id) - - if err == nil { - return fmt.Errorf("SSH key still exists") - } - } - - return nil -} - -func testAccCheckDigitalOceanSSHKeyExists(n string, key *godo.Key) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - client := testAccProvider.Meta().(*godo.Client) - - id, err := strconv.Atoi(rs.Primary.ID) - if err != nil { - return err - } - - // Try to find the key - foundKey, _, err := client.Keys.GetByID(context.Background(), id) - - if err != nil { - return err - } - - if strconv.Itoa(foundKey.ID) != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - - *key = *foundKey - - return nil - } -} - -func testAccCheckDigitalOceanSSHKeyConfig_basic(rInt int, key string) string { - return fmt.Sprintf(` -resource "digitalocean_ssh_key" "foobar" { - name = "foobar-%d" - public_key = "%s" -}`, rInt, key) -} diff --git a/builtin/providers/digitalocean/resource_digitalocean_tag.go b/builtin/providers/digitalocean/resource_digitalocean_tag.go deleted file mode 100644 index d82550af7..000000000 --- a/builtin/providers/digitalocean/resource_digitalocean_tag.go +++ /dev/null @@ -1,82 +0,0 @@ -package digitalocean - -import ( - "context" - "fmt" - "log" - - "github.com/digitalocean/godo" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceDigitalOceanTag() *schema.Resource { - return &schema.Resource{ - Create: resourceDigitalOceanTagCreate, - Read: resourceDigitalOceanTagRead, - Delete: resourceDigitalOceanTagDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceDigitalOceanTagCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - // Build up our creation options - opts := &godo.TagCreateRequest{ - Name: d.Get("name").(string), - } - - log.Printf("[DEBUG] Tag create configuration: %#v", opts) - tag, _, err := client.Tags.Create(context.Background(), opts) - if err != nil { - return fmt.Errorf("Error creating tag: %s", err) - } - - d.SetId(tag.Name) - log.Printf("[INFO] Tag: %s", tag.Name) - - return resourceDigitalOceanTagRead(d, meta) -} - -func resourceDigitalOceanTagRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - tag, resp, err := client.Tags.Get(context.Background(), d.Id()) - if err != nil { - // If the tag is somehow already destroyed, mark as - // successfully gone - if resp != nil && resp.StatusCode == 404 { - d.SetId("") - return nil - } - - return fmt.Errorf("Error retrieving tag: %s", err) - } - - d.Set("name", tag.Name) - - return nil -} - -func resourceDigitalOceanTagDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - log.Printf("[INFO] Deleting tag: %s", d.Id()) - _, err := client.Tags.Delete(context.Background(), d.Id()) - if err != nil { - return fmt.Errorf("Error deleting tag: %s", err) - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/digitalocean/resource_digitalocean_tag_test.go b/builtin/providers/digitalocean/resource_digitalocean_tag_test.go deleted file mode 100644 index bf2ee7eb1..000000000 --- a/builtin/providers/digitalocean/resource_digitalocean_tag_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package digitalocean - -import ( - "context" - "fmt" - "testing" - - "github.com/digitalocean/godo" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDigitalOceanTag_Basic(t *testing.T) { - var tag godo.Tag - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanTagDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDigitalOceanTagConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanTagExists("digitalocean_tag.foobar", &tag), - testAccCheckDigitalOceanTagAttributes(&tag), - resource.TestCheckResourceAttr( - "digitalocean_tag.foobar", "name", "foobar"), - ), - }, - }, - }) -} - -func testAccCheckDigitalOceanTagDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*godo.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "digitalocean_tag" { - continue - } - - // Try to find the key - _, _, err := client.Tags.Get(context.Background(), rs.Primary.ID) - - if err == nil { - return fmt.Errorf("Tag still exists") - } - } - - return nil -} - -func testAccCheckDigitalOceanTagAttributes(tag *godo.Tag) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if tag.Name != "foobar" { - return fmt.Errorf("Bad name: %s", tag.Name) - } - - return nil - } -} - -func testAccCheckDigitalOceanTagExists(n string, tag *godo.Tag) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - client := testAccProvider.Meta().(*godo.Client) - - // Try to find the tag - foundTag, _, err := client.Tags.Get(context.Background(), rs.Primary.ID) - - if err != nil { - return err - } - - *tag = *foundTag - - return nil - } -} - -var testAccCheckDigitalOceanTagConfig_basic = fmt.Sprintf(` -resource "digitalocean_tag" "foobar" { - name = "foobar" -}`) diff --git a/builtin/providers/digitalocean/resource_digitalocean_volume.go b/builtin/providers/digitalocean/resource_digitalocean_volume.go deleted file mode 100644 index 1040fe9ca..000000000 --- a/builtin/providers/digitalocean/resource_digitalocean_volume.go +++ /dev/null @@ -1,147 +0,0 @@ -package digitalocean - -import ( - "context" - "fmt" - "log" - - "github.com/digitalocean/godo" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceDigitalOceanVolume() *schema.Resource { - return &schema.Resource{ - Create: resourceDigitalOceanVolumeCreate, - Read: resourceDigitalOceanVolumeRead, - Delete: resourceDigitalOceanVolumeDelete, - Importer: &schema.ResourceImporter{ - State: resourceDigitalOceanVolumeImport, - }, - - Schema: map[string]*schema.Schema{ - "region": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "id": { - Type: schema.TypeString, - Computed: true, - }, - - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "droplet_ids": { - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeInt}, - Computed: true, - }, - - "size": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, // Update-ability Coming Soon ™ - }, - - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, // Update-ability Coming Soon ™ - }, - }, - } -} - -func resourceDigitalOceanVolumeCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - opts := &godo.VolumeCreateRequest{ - Region: d.Get("region").(string), - Name: d.Get("name").(string), - Description: d.Get("description").(string), - SizeGigaBytes: int64(d.Get("size").(int)), - } - - log.Printf("[DEBUG] Volume create configuration: %#v", opts) - volume, _, err := client.Storage.CreateVolume(context.Background(), opts) - if err != nil { - return fmt.Errorf("Error creating Volume: %s", err) - } - - d.SetId(volume.ID) - log.Printf("[INFO] Volume name: %s", volume.Name) - - return resourceDigitalOceanVolumeRead(d, meta) -} - -func resourceDigitalOceanVolumeRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - volume, resp, err := client.Storage.GetVolume(context.Background(), d.Id()) - if err != nil { - // If the volume is somehow already destroyed, mark as - // successfully gone - if resp.StatusCode == 404 { - d.SetId("") - return nil - } - - return fmt.Errorf("Error retrieving volume: %s", err) - } - - d.Set("id", volume.ID) - - dids := make([]interface{}, 0, len(volume.DropletIDs)) - for _, did := range volume.DropletIDs { - dids = append(dids, did) - } - d.Set("droplet_ids", schema.NewSet( - func(dropletID interface{}) int { return dropletID.(int) }, - dids, - )) - - return nil -} - -func resourceDigitalOceanVolumeDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*godo.Client) - - log.Printf("[INFO] Deleting volume: %s", d.Id()) - _, err := client.Storage.DeleteVolume(context.Background(), d.Id()) - if err != nil { - return fmt.Errorf("Error deleting volume: %s", err) - } - - d.SetId("") - return nil -} - -func resourceDigitalOceanVolumeImport(rs *schema.ResourceData, v interface{}) ([]*schema.ResourceData, error) { - client := v.(*godo.Client) - volume, _, err := client.Storage.GetVolume(context.Background(), rs.Id()) - if err != nil { - return nil, err - } - - rs.Set("id", volume.ID) - rs.Set("name", volume.Name) - rs.Set("region", volume.Region.Slug) - rs.Set("description", volume.Description) - rs.Set("size", int(volume.SizeGigaBytes)) - - dids := make([]interface{}, 0, len(volume.DropletIDs)) - for _, did := range volume.DropletIDs { - dids = append(dids, did) - } - rs.Set("droplet_ids", schema.NewSet( - func(dropletID interface{}) int { return dropletID.(int) }, - dids, - )) - - return []*schema.ResourceData{rs}, nil -} diff --git a/builtin/providers/digitalocean/resource_digitalocean_volume_test.go b/builtin/providers/digitalocean/resource_digitalocean_volume_test.go deleted file mode 100644 index be876ab93..000000000 --- a/builtin/providers/digitalocean/resource_digitalocean_volume_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package digitalocean - -import ( - "context" - "fmt" - "testing" - - "github.com/digitalocean/godo" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDigitalOceanVolume_Basic(t *testing.T) { - name := fmt.Sprintf("volume-%s", acctest.RandString(10)) - - volume := godo.Volume{ - Name: name, - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanVolumeDestroy, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testAccCheckDigitalOceanVolumeConfig_basic, name), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanVolumeExists("digitalocean_volume.foobar", &volume), - resource.TestCheckResourceAttr( - "digitalocean_volume.foobar", "name", name), - resource.TestCheckResourceAttr( - "digitalocean_volume.foobar", "size", "100"), - resource.TestCheckResourceAttr( - "digitalocean_volume.foobar", "region", "nyc1"), - resource.TestCheckResourceAttr( - "digitalocean_volume.foobar", "description", "peace makes plenty"), - ), - }, - }, - }) -} - -const testAccCheckDigitalOceanVolumeConfig_basic = ` -resource "digitalocean_volume" "foobar" { - region = "nyc1" - name = "%s" - size = 100 - description = "peace makes plenty" -}` - -func testAccCheckDigitalOceanVolumeExists(rn string, volume *godo.Volume) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[rn] - if !ok { - return fmt.Errorf("not found: %s", rn) - } - if rs.Primary.ID == "" { - return fmt.Errorf("no volume ID is set") - } - - client := testAccProvider.Meta().(*godo.Client) - - got, _, err := client.Storage.GetVolume(context.Background(), rs.Primary.ID) - if err != nil { - return err - } - if got.Name != volume.Name { - return fmt.Errorf("wrong volume found, want %q got %q", volume.Name, got.Name) - } - // get the computed volume details - *volume = *got - return nil - } -} - -func testAccCheckDigitalOceanVolumeDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*godo.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "digitalocean_volume" { - continue - } - - // Try to find the volume - _, _, err := client.Storage.GetVolume(context.Background(), rs.Primary.ID) - - if err == nil { - return fmt.Errorf("Volume still exists") - } - } - - return nil -} - -func TestAccDigitalOceanVolume_Droplet(t *testing.T) { - var ( - volume = godo.Volume{Name: fmt.Sprintf("volume-%s", acctest.RandString(10))} - droplet godo.Droplet - ) - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDigitalOceanVolumeDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckDigitalOceanVolumeConfig_droplet(rInt, volume.Name), - Check: resource.ComposeTestCheckFunc( - testAccCheckDigitalOceanVolumeExists("digitalocean_volume.foobar", &volume), - testAccCheckDigitalOceanDropletExists("digitalocean_droplet.foobar", &droplet), - // the droplet should see an attached volume - resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "volume_ids.#", "1"), - ), - }, - }, - }) -} - -func testAccCheckDigitalOceanVolumeConfig_droplet(rInt int, vName string) string { - return fmt.Sprintf(` -resource "digitalocean_volume" "foobar" { - region = "nyc1" - name = "%s" - size = 100 - description = "peace makes plenty" -} - -resource "digitalocean_droplet" "foobar" { - name = "baz-%d" - size = "1gb" - image = "centos-7-x64" - region = "nyc1" - ipv6 = true - private_networking = true - volume_ids = ["${digitalocean_volume.foobar.id}"] -}`, vName, rInt) -} diff --git a/builtin/providers/digitalocean/tags.go b/builtin/providers/digitalocean/tags.go deleted file mode 100644 index e3a990e79..000000000 --- a/builtin/providers/digitalocean/tags.go +++ /dev/null @@ -1,73 +0,0 @@ -package digitalocean - -import ( - "context" - "log" - - "github.com/digitalocean/godo" - "github.com/hashicorp/terraform/helper/schema" -) - -// setTags is a helper to set the tags for a resource. It expects the -// tags field to be named "tags" -func setTags(conn *godo.Client, d *schema.ResourceData) error { - oraw, nraw := d.GetChange("tags") - remove, create := diffTags(tagsFromSchema(oraw), tagsFromSchema(nraw)) - - log.Printf("[DEBUG] Removing tags: %#v from %s", remove, d.Id()) - for _, tag := range remove { - _, err := conn.Tags.UntagResources(context.Background(), tag, &godo.UntagResourcesRequest{ - Resources: []godo.Resource{ - { - ID: d.Id(), - Type: godo.DropletResourceType, - }, - }, - }) - if err != nil { - return err - } - } - - log.Printf("[DEBUG] Creating tags: %s for %s", create, d.Id()) - for _, tag := range create { - _, err := conn.Tags.TagResources(context.Background(), tag, &godo.TagResourcesRequest{ - Resources: []godo.Resource{ - { - ID: d.Id(), - Type: godo.DropletResourceType, - }, - }, - }) - if err != nil { - return err - } - } - - return nil -} - -// tagsFromSchema takes the raw schema tags and returns them as a -// properly asserted map[string]string -func tagsFromSchema(raw interface{}) map[string]string { - result := make(map[string]string) - for _, t := range raw.([]interface{}) { - result[t.(string)] = t.(string) - } - - return result -} - -// diffTags takes the old and the new tag sets and returns the difference of -// both. The remaining tags are those that need to be removed and created -func diffTags(oldTags, newTags map[string]string) (map[string]string, map[string]string) { - for k := range oldTags { - _, ok := newTags[k] - if ok { - delete(newTags, k) - delete(oldTags, k) - } - } - - return oldTags, newTags -} diff --git a/builtin/providers/digitalocean/tags_test.go b/builtin/providers/digitalocean/tags_test.go deleted file mode 100644 index 02686ed2d..000000000 --- a/builtin/providers/digitalocean/tags_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package digitalocean - -import ( - "reflect" - "testing" -) - -func TestDiffTags(t *testing.T) { - cases := []struct { - Old, New []interface{} - Create, Remove map[string]string - }{ - // Basic add/remove - { - Old: []interface{}{ - "foo", - }, - New: []interface{}{ - "bar", - }, - Create: map[string]string{ - "bar": "bar", - }, - Remove: map[string]string{ - "foo": "foo", - }, - }, - - // Noop - { - Old: []interface{}{ - "foo", - }, - New: []interface{}{ - "foo", - }, - Create: map[string]string{}, - Remove: map[string]string{}, - }, - } - - for i, tc := range cases { - r, c := diffTags(tagsFromSchema(tc.Old), tagsFromSchema(tc.New)) - if !reflect.DeepEqual(r, tc.Remove) { - t.Fatalf("%d: bad remove: %#v", i, r) - } - if !reflect.DeepEqual(c, tc.Create) { - t.Fatalf("%d: bad create: %#v", i, c) - } - } -} diff --git a/builtin/providers/dme/config.go b/builtin/providers/dme/config.go deleted file mode 100644 index cc75f120c..000000000 --- a/builtin/providers/dme/config.go +++ /dev/null @@ -1,34 +0,0 @@ -package dme - -import ( - "fmt" - "log" - - "github.com/hashicorp/go-cleanhttp" - "github.com/soniah/dnsmadeeasy" -) - -// Config contains DNSMadeEasy provider settings -type Config struct { - AKey string - SKey string - UseSandbox bool -} - -// Client returns a new client for accessing DNSMadeEasy -func (c *Config) Client() (*dnsmadeeasy.Client, error) { - client, err := dnsmadeeasy.NewClient(c.AKey, c.SKey) - if err != nil { - return nil, fmt.Errorf("Error setting up client: %s", err) - } - - client.HTTP = cleanhttp.DefaultClient() - - if c.UseSandbox { - client.URL = dnsmadeeasy.SandboxURL - } - - log.Printf("[INFO] DNSMadeEasy Client configured for AKey: %s", client.AKey) - - return client, nil -} diff --git a/builtin/providers/dme/provider.go b/builtin/providers/dme/provider.go deleted file mode 100644 index ea134520a..000000000 --- a/builtin/providers/dme/provider.go +++ /dev/null @@ -1,63 +0,0 @@ -package dme - -import ( - "os" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider provides a Provider... -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "akey": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: envDefaultFunc("DME_AKEY"), - Description: "A DNSMadeEasy API Key.", - }, - "skey": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: envDefaultFunc("DME_SKEY"), - Description: "The Secret Key for API operations.", - }, - "usesandbox": &schema.Schema{ - Type: schema.TypeBool, - Required: true, - DefaultFunc: envDefaultFunc("DME_USESANDBOX"), - Description: "If true, use the DME Sandbox.", - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "dme_record": resourceDMERecord(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func envDefaultFunc(k string) schema.SchemaDefaultFunc { - return func() (interface{}, error) { - if v := os.Getenv(k); v != "" { - if v == "true" { - return true, nil - } else if v == "false" { - return false, nil - } - return v, nil - } - return nil, nil - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - config := Config{ - AKey: d.Get("akey").(string), - SKey: d.Get("skey").(string), - UseSandbox: d.Get("usesandbox").(bool), - } - return config.Client() -} diff --git a/builtin/providers/dme/provider_test.go b/builtin/providers/dme/provider_test.go deleted file mode 100644 index 76b51712b..000000000 --- a/builtin/providers/dme/provider_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package dme - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - // provider is called terraform-provider-dme ie dme - "dme": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProviderImpl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("DME_SKEY"); v == "" { - t.Fatal("DME_SKEY must be set for acceptance tests") - } - - if v := os.Getenv("DME_AKEY"); v == "" { - t.Fatal("DME_AKEY must be set for acceptance tests") - } - - if v := os.Getenv("DME_DOMAINID"); v == "" { - t.Fatal("DME_DOMAINID must be set for acceptance tests") - } - - if v := os.Getenv("DME_USESANDBOX"); v == "" { - t.Fatal("DME_USESANDBOX must be set for acceptance tests. Use the strings 'true' or 'false'.") - } -} diff --git a/builtin/providers/dme/resource_dme_record.go b/builtin/providers/dme/resource_dme_record.go deleted file mode 100644 index adc7cee6e..000000000 --- a/builtin/providers/dme/resource_dme_record.go +++ /dev/null @@ -1,253 +0,0 @@ -package dme - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/soniah/dnsmadeeasy" -) - -func resourceDMERecord() *schema.Resource { - return &schema.Resource{ - Create: resourceDMERecordCreate, - Read: resourceDMERecordRead, - Update: resourceDMERecordUpdate, - Delete: resourceDMERecordDelete, - - Schema: map[string]*schema.Schema{ - // Use recordid for TF ID. - "domainid": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "value": &schema.Schema{ - Type: schema.TypeString, - Required: true, - StateFunc: func(value interface{}) string { - return strings.ToLower(value.(string)) - }, - }, - "ttl": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - "mxLevel": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - "weight": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - "priority": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - "port": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - "keywords": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "title": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "hardLink": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - "redirectType": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "gtdLocation": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -func resourceDMERecordCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*dnsmadeeasy.Client) - - domainid := d.Get("domainid").(string) - log.Printf("[INFO] Creating record for domainid: %s", domainid) - - cr := make(map[string]interface{}) - if err := getAll(d, cr); err != nil { - return err - } - log.Printf("[DEBUG] record create configuration: %#v", cr) - - result, err := client.CreateRecord(domainid, cr) - if err != nil { - return fmt.Errorf("Failed to create record: %s", err) - } - - d.SetId(result) - log.Printf("[INFO] record ID: %s", d.Id()) - - return resourceDMERecordRead(d, meta) -} - -func resourceDMERecordRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*dnsmadeeasy.Client) - - domainid := d.Get("domainid").(string) - recordid := d.Id() - log.Printf("[INFO] Reading record for domainid: %s recordid: %s", domainid, recordid) - - rec, err := client.ReadRecord(domainid, recordid) - if err != nil { - if strings.Contains(err.Error(), "Unable to find") { - d.SetId("") - return nil - } - - return fmt.Errorf("Couldn't find record: %s", err) - } - - return setAll(d, rec) -} - -func resourceDMERecordUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*dnsmadeeasy.Client) - - domainid := d.Get("domainid").(string) - recordid := d.Id() - - cr := make(map[string]interface{}) - if err := getAll(d, cr); err != nil { - return err - } - log.Printf("[DEBUG] record update configuration: %+#v", cr) - - if _, err := client.UpdateRecord(domainid, recordid, cr); err != nil { - return fmt.Errorf("Error updating record: %s", err) - } - - return resourceDMERecordRead(d, meta) -} - -func resourceDMERecordDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*dnsmadeeasy.Client) - - domainid := d.Get("domainid").(string) - recordid := d.Id() - log.Printf("[INFO] Deleting record for domainid: %s recordid: %s", domainid, recordid) - - if err := client.DeleteRecord(domainid, recordid); err != nil { - return fmt.Errorf("Error deleting record: %s", err) - } - - return nil -} - -func getAll(d *schema.ResourceData, cr map[string]interface{}) error { - - if attr, ok := d.GetOk("name"); ok { - cr["name"] = attr.(string) - } - if attr, ok := d.GetOk("type"); ok { - cr["type"] = attr.(string) - } - if attr, ok := d.GetOk("ttl"); ok { - cr["ttl"] = int64(attr.(int)) - } - if attr, ok := d.GetOk("value"); ok { - cr["value"] = attr.(string) - } - if attr, ok := d.GetOk("gtdLocation"); ok { - cr["gtdLocation"] = attr.(string) - } - - switch strings.ToUpper(d.Get("type").(string)) { - case "A", "CNAME", "ANAME", "TXT", "SPF", "NS", "PTR", "AAAA": - // all done - case "MX": - if attr, ok := d.GetOk("mxLevel"); ok { - cr["mxLevel"] = int64(attr.(int)) - } - case "SRV": - if attr, ok := d.GetOk("priority"); ok { - cr["priority"] = int64(attr.(int)) - } - if attr, ok := d.GetOk("weight"); ok { - cr["weight"] = int64(attr.(int)) - } - if attr, ok := d.GetOk("port"); ok { - cr["port"] = int64(attr.(int)) - } - case "HTTPRED": - if attr, ok := d.GetOk("hardLink"); ok && attr.(bool) { - cr["hardLink"] = "true" - } - if attr, ok := d.GetOk("redirectType"); ok { - cr["redirectType"] = attr.(string) - } - if attr, ok := d.GetOk("title"); ok { - cr["title"] = attr.(string) - } - if attr, ok := d.GetOk("keywords"); ok { - cr["keywords"] = attr.(string) - } - if attr, ok := d.GetOk("description"); ok { - cr["description"] = attr.(string) - } - default: - return fmt.Errorf("getAll: type not found") - } - return nil -} - -func setAll(d *schema.ResourceData, rec *dnsmadeeasy.Record) error { - d.Set("type", rec.Type) - d.Set("name", rec.Name) - d.Set("ttl", rec.TTL) - d.Set("value", rec.Value) - // only set gtdLocation if it is given as this is optional. - if rec.GtdLocation != "" { - d.Set("gtdLocation", rec.GtdLocation) - } - - switch rec.Type { - case "A", "CNAME", "ANAME", "TXT", "SPF", "NS", "PTR": - // all done - case "AAAA": - // overwrite value set above - DME ipv6 is lower case - d.Set("value", strings.ToLower(rec.Value)) - case "MX": - d.Set("mxLevel", rec.MXLevel) - case "SRV": - d.Set("priority", rec.Priority) - d.Set("weight", rec.Weight) - d.Set("port", rec.Port) - case "HTTPRED": - d.Set("hardLink", rec.HardLink) - d.Set("redirectType", rec.RedirectType) - d.Set("title", rec.Title) - d.Set("keywords", rec.Keywords) - d.Set("description", rec.Description) - default: - return fmt.Errorf("setAll: type not found") - } - return nil -} diff --git a/builtin/providers/dme/resource_dme_record_test.go b/builtin/providers/dme/resource_dme_record_test.go deleted file mode 100644 index 9c7c0ffb7..000000000 --- a/builtin/providers/dme/resource_dme_record_test.go +++ /dev/null @@ -1,511 +0,0 @@ -package dme - -import ( - "fmt" - "os" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/soniah/dnsmadeeasy" -) - -var _ = fmt.Sprintf("dummy") // dummy -var _ = os.DevNull // dummy - -func TestAccDMERecord_basic(t *testing.T) { - var record dnsmadeeasy.Record - domainid := os.Getenv("DME_DOMAINID") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDMERecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testDMERecordConfigA, domainid), - Check: resource.ComposeTestCheckFunc( - testAccCheckDMERecordExists("dme_record.test", &record), - resource.TestCheckResourceAttr( - "dme_record.test", "domainid", domainid), - resource.TestCheckResourceAttr( - "dme_record.test", "name", "testa"), - resource.TestCheckResourceAttr( - "dme_record.test", "type", "A"), - resource.TestCheckResourceAttr( - "dme_record.test", "value", "1.1.1.1"), - resource.TestCheckResourceAttr( - "dme_record.test", "ttl", "2000"), - resource.TestCheckResourceAttr( - "dme_record.test", "gtdLocation", "DEFAULT"), - ), - }, - }, - }) -} - -func TestAccDMERecordCName(t *testing.T) { - var record dnsmadeeasy.Record - domainid := os.Getenv("DME_DOMAINID") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDMERecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testDMERecordConfigCName, domainid), - Check: resource.ComposeTestCheckFunc( - testAccCheckDMERecordExists("dme_record.test", &record), - resource.TestCheckResourceAttr( - "dme_record.test", "domainid", domainid), - resource.TestCheckResourceAttr( - "dme_record.test", "name", "testcname"), - resource.TestCheckResourceAttr( - "dme_record.test", "type", "CNAME"), - resource.TestCheckResourceAttr( - "dme_record.test", "value", "foo"), - resource.TestCheckResourceAttr( - "dme_record.test", "ttl", "2000"), - resource.TestCheckResourceAttr( - "dme_record.test", "gtdLocation", "DEFAULT"), - ), - }, - }, - }) -} - -func TestAccDMERecordMX(t *testing.T) { - var record dnsmadeeasy.Record - domainid := os.Getenv("DME_DOMAINID") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDMERecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testDMERecordConfigMX, domainid), - Check: resource.ComposeTestCheckFunc( - testAccCheckDMERecordExists("dme_record.test", &record), - resource.TestCheckResourceAttr( - "dme_record.test", "domainid", domainid), - resource.TestCheckResourceAttr( - "dme_record.test", "name", "testmx"), - resource.TestCheckResourceAttr( - "dme_record.test", "type", "MX"), - resource.TestCheckResourceAttr( - "dme_record.test", "value", "foo"), - resource.TestCheckResourceAttr( - "dme_record.test", "mxLevel", "10"), - resource.TestCheckResourceAttr( - "dme_record.test", "ttl", "2000"), - resource.TestCheckResourceAttr( - "dme_record.test", "gtdLocation", "DEFAULT"), - ), - }, - }, - }) -} - -func TestAccDMERecordHTTPRED(t *testing.T) { - var record dnsmadeeasy.Record - domainid := os.Getenv("DME_DOMAINID") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDMERecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testDMERecordConfigHTTPRED, domainid), - Check: resource.ComposeTestCheckFunc( - testAccCheckDMERecordExists("dme_record.test", &record), - resource.TestCheckResourceAttr( - "dme_record.test", "domainid", domainid), - resource.TestCheckResourceAttr( - "dme_record.test", "name", "testhttpred"), - resource.TestCheckResourceAttr( - "dme_record.test", "type", "HTTPRED"), - - resource.TestCheckResourceAttr( - "dme_record.test", "value", "https://github.com/soniah/terraform-provider-dme"), - resource.TestCheckResourceAttr( - "dme_record.test", "hardLink", "true"), - resource.TestCheckResourceAttr( - "dme_record.test", "redirectType", "Hidden Frame Masked"), - resource.TestCheckResourceAttr( - "dme_record.test", "title", "An Example"), - resource.TestCheckResourceAttr( - "dme_record.test", "keywords", "terraform example"), - resource.TestCheckResourceAttr( - "dme_record.test", "description", "This is a description"), - - resource.TestCheckResourceAttr( - "dme_record.test", "ttl", "2000"), - resource.TestCheckResourceAttr( - "dme_record.test", "gtdLocation", "DEFAULT"), - ), - }, - }, - }) -} - -func TestAccDMERecordTXT(t *testing.T) { - var record dnsmadeeasy.Record - domainid := os.Getenv("DME_DOMAINID") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDMERecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testDMERecordConfigTXT, domainid), - Check: resource.ComposeTestCheckFunc( - testAccCheckDMERecordExists("dme_record.test", &record), - resource.TestCheckResourceAttr( - "dme_record.test", "domainid", domainid), - resource.TestCheckResourceAttr( - "dme_record.test", "name", "testtxt"), - resource.TestCheckResourceAttr( - "dme_record.test", "type", "TXT"), - resource.TestCheckResourceAttr( - "dme_record.test", "value", "\"foo\""), - resource.TestCheckResourceAttr( - "dme_record.test", "ttl", "2000"), - resource.TestCheckResourceAttr( - "dme_record.test", "gtdLocation", "DEFAULT"), - ), - }, - }, - }) -} - -func TestAccDMERecordSPF(t *testing.T) { - var record dnsmadeeasy.Record - domainid := os.Getenv("DME_DOMAINID") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDMERecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testDMERecordConfigSPF, domainid), - Check: resource.ComposeTestCheckFunc( - testAccCheckDMERecordExists("dme_record.test", &record), - resource.TestCheckResourceAttr( - "dme_record.test", "domainid", domainid), - resource.TestCheckResourceAttr( - "dme_record.test", "name", "testspf"), - resource.TestCheckResourceAttr( - "dme_record.test", "type", "SPF"), - resource.TestCheckResourceAttr( - "dme_record.test", "value", "\"foo\""), - resource.TestCheckResourceAttr( - "dme_record.test", "ttl", "2000"), - resource.TestCheckResourceAttr( - "dme_record.test", "gtdLocation", "DEFAULT"), - ), - }, - }, - }) -} - -func TestAccDMERecordPTR(t *testing.T) { - var record dnsmadeeasy.Record - domainid := os.Getenv("DME_DOMAINID") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDMERecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testDMERecordConfigPTR, domainid), - Check: resource.ComposeTestCheckFunc( - testAccCheckDMERecordExists("dme_record.test", &record), - resource.TestCheckResourceAttr( - "dme_record.test", "domainid", domainid), - resource.TestCheckResourceAttr( - "dme_record.test", "name", "testptr"), - resource.TestCheckResourceAttr( - "dme_record.test", "type", "PTR"), - resource.TestCheckResourceAttr( - "dme_record.test", "value", "foo"), - resource.TestCheckResourceAttr( - "dme_record.test", "ttl", "2000"), - resource.TestCheckResourceAttr( - "dme_record.test", "gtdLocation", "DEFAULT"), - ), - }, - }, - }) -} - -func TestAccDMERecordNS(t *testing.T) { - var record dnsmadeeasy.Record - domainid := os.Getenv("DME_DOMAINID") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDMERecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testDMERecordConfigNS, domainid), - Check: resource.ComposeTestCheckFunc( - testAccCheckDMERecordExists("dme_record.test", &record), - resource.TestCheckResourceAttr( - "dme_record.test", "domainid", domainid), - resource.TestCheckResourceAttr( - "dme_record.test", "name", "testns"), - resource.TestCheckResourceAttr( - "dme_record.test", "type", "NS"), - resource.TestCheckResourceAttr( - "dme_record.test", "value", "foo"), - resource.TestCheckResourceAttr( - "dme_record.test", "ttl", "2000"), - resource.TestCheckResourceAttr( - "dme_record.test", "gtdLocation", "DEFAULT"), - ), - }, - }, - }) -} - -func TestAccDMERecordAAAA(t *testing.T) { - var record dnsmadeeasy.Record - domainid := os.Getenv("DME_DOMAINID") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDMERecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testDMERecordConfigAAAA, domainid), - Check: resource.ComposeTestCheckFunc( - testAccCheckDMERecordExists("dme_record.test", &record), - resource.TestCheckResourceAttr( - "dme_record.test", "domainid", domainid), - resource.TestCheckResourceAttr( - "dme_record.test", "name", "testaaaa"), - resource.TestCheckResourceAttr( - "dme_record.test", "type", "AAAA"), - resource.TestCheckResourceAttr( - "dme_record.test", "value", "fe80::0202:b3ff:fe1e:8329"), - resource.TestCheckResourceAttr( - "dme_record.test", "ttl", "2000"), - resource.TestCheckResourceAttr( - "dme_record.test", "gtdLocation", "DEFAULT"), - ), - }, - }, - }) -} - -func TestAccDMERecordSRV(t *testing.T) { - var record dnsmadeeasy.Record - domainid := os.Getenv("DME_DOMAINID") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDMERecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testDMERecordConfigSRV, domainid), - Check: resource.ComposeTestCheckFunc( - testAccCheckDMERecordExists("dme_record.test", &record), - resource.TestCheckResourceAttr( - "dme_record.test", "domainid", domainid), - resource.TestCheckResourceAttr( - "dme_record.test", "name", "testsrv"), - resource.TestCheckResourceAttr( - "dme_record.test", "type", "SRV"), - resource.TestCheckResourceAttr( - "dme_record.test", "value", "foo"), - resource.TestCheckResourceAttr( - "dme_record.test", "priority", "10"), - resource.TestCheckResourceAttr( - "dme_record.test", "weight", "20"), - resource.TestCheckResourceAttr( - "dme_record.test", "port", "30"), - resource.TestCheckResourceAttr( - "dme_record.test", "ttl", "2000"), - resource.TestCheckResourceAttr( - "dme_record.test", "gtdLocation", "DEFAULT"), - ), - }, - }, - }) -} - -func testAccCheckDMERecordDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*dnsmadeeasy.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "dnsmadeeasy_record" { - continue - } - - _, err := client.ReadRecord(rs.Primary.Attributes["domainid"], rs.Primary.ID) - - if err == nil { - return fmt.Errorf("Record still exists") - } - } - - return nil -} - -func testAccCheckDMERecordExists(n string, record *dnsmadeeasy.Record) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - client := testAccProvider.Meta().(*dnsmadeeasy.Client) - - foundRecord, err := client.ReadRecord(rs.Primary.Attributes["domainid"], rs.Primary.ID) - - if err != nil { - return err - } - - if foundRecord.StringRecordID() != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - - *record = *foundRecord - - return nil - } -} - -const testDMERecordConfigA = ` -resource "dme_record" "test" { - domainid = "%s" - name = "testa" - type = "A" - value = "1.1.1.1" - ttl = 2000 - gtdLocation = "DEFAULT" -}` - -const testDMERecordConfigCName = ` -resource "dme_record" "test" { - domainid = "%s" - name = "testcname" - type = "CNAME" - value = "foo" - ttl = 2000 - gtdLocation = "DEFAULT" -}` - -const testDMERecordConfigAName = ` -resource "dme_record" "test" { - domainid = "%s" - name = "testaname" - type = "ANAME" - value = "foo" - ttl = 2000 - gtdLocation = "DEFAULT" -}` - -const testDMERecordConfigMX = ` -resource "dme_record" "test" { - domainid = "%s" - name = "testmx" - type = "MX" - value = "foo" - mxLevel = 10 - ttl = 2000 - gtdLocation = "DEFAULT" -}` - -const testDMERecordConfigHTTPRED = ` -resource "dme_record" "test" { - domainid = "%s" - name = "testhttpred" - type = "HTTPRED" - value = "https://github.com/soniah/terraform-provider-dme" - hardLink = true - redirectType = "Hidden Frame Masked" - title = "An Example" - keywords = "terraform example" - description = "This is a description" - ttl = 2000 - gtdLocation = "DEFAULT" -}` - -const testDMERecordConfigTXT = ` -resource "dme_record" "test" { - domainid = "%s" - name = "testtxt" - type = "TXT" - value = "foo" - ttl = 2000 - gtdLocation = "DEFAULT" -}` - -const testDMERecordConfigSPF = ` -resource "dme_record" "test" { - domainid = "%s" - name = "testspf" - type = "SPF" - value = "foo" - ttl = 2000 - gtdLocation = "DEFAULT" -}` - -const testDMERecordConfigPTR = ` -resource "dme_record" "test" { - domainid = "%s" - name = "testptr" - type = "PTR" - value = "foo" - ttl = 2000 - gtdLocation = "DEFAULT" -}` - -const testDMERecordConfigNS = ` -resource "dme_record" "test" { - domainid = "%s" - name = "testns" - type = "NS" - value = "foo" - ttl = 2000 - gtdLocation = "DEFAULT" -}` - -const testDMERecordConfigAAAA = ` -resource "dme_record" "test" { - domainid = "%s" - name = "testaaaa" - type = "AAAA" - value = "FE80::0202:B3FF:FE1E:8329" - ttl = 2000 - gtdLocation = "DEFAULT" -}` - -const testDMERecordConfigSRV = ` -resource "dme_record" "test" { - domainid = "%s" - name = "testsrv" - type = "SRV" - value = "foo" - priority = 10 - weight = 20 - port = 30 - ttl = 2000 - gtdLocation = "DEFAULT" -}` diff --git a/builtin/providers/dns/acceptance.sh b/builtin/providers/dns/acceptance.sh deleted file mode 100755 index a86aea43e..000000000 --- a/builtin/providers/dns/acceptance.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -set -eu -set -x - -# Test domains -export DNS_DOMAIN_FORWARD="example.com." -export DNS_DOMAIN_REVERSE="1.168.192.in-addr.arpa." - -# Run with no authentication - -export DNS_UPDATE_SERVER=127.0.0.1 -docker run -d -p 53:53/udp \ - -e BIND_DOMAIN_FORWARD=${DNS_DOMAIN_FORWARD} \ - -e BIND_DOMAIN_REVERSE=${DNS_DOMAIN_REVERSE} \ - -e BIND_INSECURE=true \ - --name bind_insecure drebes/bind -make testacc TEST=./builtin/providers/dns -docker stop bind_insecure -docker rm bind_insecure - -# Run with authentication - -export DNS_UPDATE_KEYNAME=${DNS_DOMAIN_FORWARD} -export DNS_UPDATE_KEYALGORITHM="hmac-md5" -export DNS_UPDATE_KEYSECRET="c3VwZXJzZWNyZXQ=" -docker run -d -p 53:53/udp \ - -e BIND_DOMAIN_FORWARD=${DNS_DOMAIN_FORWARD} \ - -e BIND_DOMAIN_REVERSE=${DNS_DOMAIN_REVERSE} \ - -e BIND_KEY_NAME=${DNS_UPDATE_KEYNAME} \ - -e BIND_KEY_ALGORITHM=${DNS_UPDATE_KEYALGORITHM} \ - -e BIND_KEY_SECRET=${DNS_UPDATE_KEYSECRET} \ - --name bind_secure drebes/bind -make testacc TEST=./builtin/providers/dns -docker stop bind_secure -docker rm bind_secure diff --git a/builtin/providers/dns/config.go b/builtin/providers/dns/config.go deleted file mode 100644 index 8921ebb84..000000000 --- a/builtin/providers/dns/config.go +++ /dev/null @@ -1,67 +0,0 @@ -package dns - -import ( - "fmt" - "github.com/miekg/dns" - "log" -) - -type Config struct { - server string - port int - keyname string - keyalgo string - keysecret string -} - -type DNSClient struct { - c *dns.Client - srv_addr string - keyname string - keysecret string - keyalgo string -} - -// Configures and returns a fully initialized DNSClient -func (c *Config) Client() (interface{}, error) { - log.Println("[INFO] Building DNSClient config structure") - - var client DNSClient - client.srv_addr = fmt.Sprintf("%s:%d", c.server, c.port) - authCfgOk := false - if (c.keyname == "" && c.keysecret == "" && c.keyalgo == "") || - (c.keyname != "" && c.keysecret != "" && c.keyalgo != "") { - authCfgOk = true - } - if !authCfgOk { - return nil, fmt.Errorf("Error configuring provider: when using authentication, \"key_name\", \"key_secret\" and \"key_algorithm\" should be non empty") - } - client.c = new(dns.Client) - if c.keyname != "" { - client.keyname = c.keyname - client.keysecret = c.keysecret - keyalgo, err := convertHMACAlgorithm(c.keyalgo) - if err != nil { - return nil, fmt.Errorf("Error configuring provider: %s", err) - } - client.keyalgo = keyalgo - client.c.TsigSecret = map[string]string{c.keyname: c.keysecret} - } - return &client, nil -} - -// Validates and converts HMAC algorithm -func convertHMACAlgorithm(name string) (string, error) { - switch name { - case "hmac-md5": - return dns.HmacMD5, nil - case "hmac-sha1": - return dns.HmacSHA1, nil - case "hmac-sha256": - return dns.HmacSHA256, nil - case "hmac-sha512": - return dns.HmacSHA512, nil - default: - return "", fmt.Errorf("Unknown HMAC algorithm: %s", name) - } -} diff --git a/builtin/providers/dns/data_dns_a_record_set.go b/builtin/providers/dns/data_dns_a_record_set.go deleted file mode 100644 index 4a08a179e..000000000 --- a/builtin/providers/dns/data_dns_a_record_set.go +++ /dev/null @@ -1,52 +0,0 @@ -package dns - -import ( - "fmt" - "net" - "sort" - - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceDnsARecordSet() *schema.Resource { - return &schema.Resource{ - Read: dataSourceDnsARecordSetRead, - Schema: map[string]*schema.Schema{ - "host": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "addrs": &schema.Schema{ - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Computed: true, - }, - }, - } -} - -func dataSourceDnsARecordSetRead(d *schema.ResourceData, meta interface{}) error { - host := d.Get("host").(string) - - records, err := net.LookupIP(host) - if err != nil { - return fmt.Errorf("error looking up A records for %q: %s", host, err) - } - - addrs := make([]string, 0) - - for _, ip := range records { - // LookupIP returns A (IPv4) and AAAA (IPv6) records - // Filter out AAAA records - if ipv4 := ip.To4(); ipv4 != nil { - addrs = append(addrs, ipv4.String()) - } - } - - sort.Strings(addrs) - - d.Set("addrs", addrs) - d.SetId(host) - - return nil -} diff --git a/builtin/providers/dns/data_dns_a_record_set_test.go b/builtin/providers/dns/data_dns_a_record_set_test.go deleted file mode 100644 index 34f72f6e7..000000000 --- a/builtin/providers/dns/data_dns_a_record_set_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package dns - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccDataDnsARecordSet_Basic(t *testing.T) { - tests := []struct { - DataSourceBlock string - DataSourceName string - Expected []string - Host string - }{ - { - ` - data "dns_a_record_set" "foo" { - host = "127.0.0.1.nip.io" - } - `, - "foo", - []string{ - "127.0.0.1", - }, - "127.0.0.1.nip.io", - }, - { - ` - data "dns_a_record_set" "ntp" { - host = "time-c.nist.gov" - } - `, - "ntp", - []string{ - "129.6.15.30", - }, - "time-c.nist.gov", - }, - } - - for _, test := range tests { - recordName := fmt.Sprintf("data.dns_a_record_set.%s", test.DataSourceName) - - resource.Test(t, resource.TestCase{ - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: test.DataSourceBlock, - Check: resource.ComposeTestCheckFunc( - testCheckAttrStringArray(recordName, "addrs", test.Expected), - ), - }, - resource.TestStep{ - Config: test.DataSourceBlock, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(recordName, "id", test.Host), - ), - }, - }, - }) - } - -} diff --git a/builtin/providers/dns/data_dns_cname_record_set.go b/builtin/providers/dns/data_dns_cname_record_set.go deleted file mode 100644 index 282d9846b..000000000 --- a/builtin/providers/dns/data_dns_cname_record_set.go +++ /dev/null @@ -1,41 +0,0 @@ -package dns - -import ( - "fmt" - "net" - - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceDnsCnameRecordSet() *schema.Resource { - return &schema.Resource{ - Read: dataSourceDnsCnameRecordSetRead, - - Schema: map[string]*schema.Schema{ - "host": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "cname": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceDnsCnameRecordSetRead(d *schema.ResourceData, meta interface{}) error { - host := d.Get("host").(string) - - cname, err := net.LookupCNAME(host) - if err != nil { - return fmt.Errorf("error looking up CNAME records for %q: %s", host, err) - } - - d.Set("cname", cname) - d.SetId(host) - - return nil -} diff --git a/builtin/providers/dns/data_dns_cname_record_set_test.go b/builtin/providers/dns/data_dns_cname_record_set_test.go deleted file mode 100644 index 6bbdbdfa0..000000000 --- a/builtin/providers/dns/data_dns_cname_record_set_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package dns - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccDnsCnameRecordSet_Basic(t *testing.T) { - tests := []struct { - DataSourceBlock string - Expected string - Host string - }{ - { - ` - data "dns_cname_record_set" "foo" { - host = "www.hashicorp.com" - } - `, - "dualstack.s.shared.global.fastly.net.", - "www.hashicorp.com", - }, - } - - for _, test := range tests { - resource.Test(t, resource.TestCase{ - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: test.DataSourceBlock, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.dns_cname_record_set.foo", "cname", test.Expected), - ), - }, - resource.TestStep{ - Config: test.DataSourceBlock, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.dns_cname_record_set.foo", "id", test.Host), - ), - }, - }, - }) - } -} diff --git a/builtin/providers/dns/data_dns_txt_record_set.go b/builtin/providers/dns/data_dns_txt_record_set.go deleted file mode 100644 index ad03ccb24..000000000 --- a/builtin/providers/dns/data_dns_txt_record_set.go +++ /dev/null @@ -1,52 +0,0 @@ -package dns - -import ( - "fmt" - "net" - - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceDnsTxtRecordSet() *schema.Resource { - return &schema.Resource{ - Read: dataSourceDnsTxtRecordSetRead, - - Schema: map[string]*schema.Schema{ - "host": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "record": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "records": &schema.Schema{ - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Computed: true, - }, - }, - } -} - -func dataSourceDnsTxtRecordSetRead(d *schema.ResourceData, meta interface{}) error { - host := d.Get("host").(string) - - records, err := net.LookupTXT(host) - if err != nil { - return fmt.Errorf("error looking up TXT records for %q: %s", host, err) - } - - if len(records) > 0 { - d.Set("record", records[0]) - } else { - d.Set("record", "") - } - d.Set("records", records) - d.SetId(host) - - return nil -} diff --git a/builtin/providers/dns/data_dns_txt_record_set_test.go b/builtin/providers/dns/data_dns_txt_record_set_test.go deleted file mode 100644 index 6958ee269..000000000 --- a/builtin/providers/dns/data_dns_txt_record_set_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package dns - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccDataDnsTxtRecordSet_Basic(t *testing.T) { - tests := []struct { - DataSourceBlock string - DataSourceName string - Expected []string - Host string - }{ - { - ` - data "dns_txt_record_set" "foo" { - host = "hashicorp.com" - } - `, - "foo", - []string{ - "google-site-verification=oqoe6Z7OB_726BNm33g4OdKK57KDtCfH266f8wAvLBo", - "v=spf1 include:_spf.google.com include:spf.mail.intercom.io include:stspg-customer.com include:mail.zendesk.com ~all", - "status-page-domain-verification=dgtdvzlp8tfn", - }, - "hashicorp.com", - }, - } - - for _, test := range tests { - recordName := fmt.Sprintf("data.dns_txt_record_set.%s", test.DataSourceName) - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: test.DataSourceBlock, - Check: resource.ComposeTestCheckFunc( - testCheckAttrStringArray(recordName, "records", test.Expected), - ), - }, - resource.TestStep{ - Config: test.DataSourceBlock, - Check: resource.ComposeTestCheckFunc( - testCheckAttrStringArrayMember(recordName, "record", test.Expected), - ), - }, - resource.TestStep{ - Config: test.DataSourceBlock, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(recordName, "id", test.Host), - ), - }, - }, - }) - } -} diff --git a/builtin/providers/dns/provider.go b/builtin/providers/dns/provider.go deleted file mode 100644 index 8d960dac6..000000000 --- a/builtin/providers/dns/provider.go +++ /dev/null @@ -1,171 +0,0 @@ -package dns - -import ( - "fmt" - "os" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - "github.com/miekg/dns" -) - -// Provider returns a schema.Provider for DNS dynamic updates. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "update": &schema.Schema{ - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "server": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("DNS_UPDATE_SERVER", nil), - }, - "port": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 53, - }, - "key_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("DNS_UPDATE_KEYNAME", nil), - }, - "key_algorithm": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("DNS_UPDATE_KEYALGORITHM", nil), - }, - "key_secret": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("DNS_UPDATE_KEYSECRET", nil), - }, - }, - }, - }, - }, - - DataSourcesMap: map[string]*schema.Resource{ - "dns_a_record_set": dataSourceDnsARecordSet(), - "dns_cname_record_set": dataSourceDnsCnameRecordSet(), - "dns_txt_record_set": dataSourceDnsTxtRecordSet(), - }, - - ResourcesMap: map[string]*schema.Resource{ - "dns_a_record_set": resourceDnsARecordSet(), - "dns_aaaa_record_set": resourceDnsAAAARecordSet(), - "dns_cname_record": resourceDnsCnameRecord(), - "dns_ptr_record": resourceDnsPtrRecord(), - }, - - ConfigureFunc: configureProvider, - } -} - -func configureProvider(d *schema.ResourceData) (interface{}, error) { - - var server, keyname, keyalgo, keysecret string - var port int - - // if the update block is missing, schema.EnvDefaultFunc is not called - if v, ok := d.GetOk("update"); ok { - update := v.([]interface{})[0].(map[string]interface{}) - if val, ok := update["port"]; ok { - port = int(val.(int)) - } - if val, ok := update["server"]; ok { - server = val.(string) - } - if val, ok := update["key_name"]; ok { - keyname = val.(string) - } - if val, ok := update["key_algorithm"]; ok { - keyalgo = val.(string) - } - if val, ok := update["key_secret"]; ok { - keysecret = val.(string) - } - } else { - if len(os.Getenv("DNS_UPDATE_SERVER")) > 0 { - server = os.Getenv("DNS_UPDATE_SERVER") - } else { - return nil, nil - } - port = 53 - if len(os.Getenv("DNS_UPDATE_KEYNAME")) > 0 { - keyname = os.Getenv("DNS_UPDATE_KEYNAME") - } - if len(os.Getenv("DNS_UPDATE_KEYALGORITHM")) > 0 { - keyalgo = os.Getenv("DNS_UPDATE_KEYALGORITHM") - } - if len(os.Getenv("DNS_UPDATE_KEYSECRET")) > 0 { - keysecret = os.Getenv("DNS_UPDATE_KEYSECRET") - } - } - - config := Config{ - server: server, - port: port, - keyname: keyname, - keyalgo: keyalgo, - keysecret: keysecret, - } - - return config.Client() -} - -func getAVal(record interface{}) (string, error) { - - recstr := record.(*dns.A).String() - var name, ttl, class, typ, addr string - - _, err := fmt.Sscanf(recstr, "%s\t%s\t%s\t%s\t%s", &name, &ttl, &class, &typ, &addr) - if err != nil { - return "", fmt.Errorf("Error parsing record: %s", err) - } - - return addr, nil -} - -func getAAAAVal(record interface{}) (string, error) { - - recstr := record.(*dns.AAAA).String() - var name, ttl, class, typ, addr string - - _, err := fmt.Sscanf(recstr, "%s\t%s\t%s\t%s\t%s", &name, &ttl, &class, &typ, &addr) - if err != nil { - return "", fmt.Errorf("Error parsing record: %s", err) - } - - return addr, nil -} - -func getCnameVal(record interface{}) (string, error) { - - recstr := record.(*dns.CNAME).String() - var name, ttl, class, typ, cname string - - _, err := fmt.Sscanf(recstr, "%s\t%s\t%s\t%s\t%s", &name, &ttl, &class, &typ, &cname) - if err != nil { - return "", fmt.Errorf("Error parsing record: %s", err) - } - - return cname, nil -} - -func getPtrVal(record interface{}) (string, error) { - - recstr := record.(*dns.PTR).String() - var name, ttl, class, typ, ptr string - - _, err := fmt.Sscanf(recstr, "%s\t%s\t%s\t%s\t%s", &name, &ttl, &class, &typ, &ptr) - if err != nil { - return "", fmt.Errorf("Error parsing record: %s", err) - } - - return ptr, nil -} diff --git a/builtin/providers/dns/provider_test.go b/builtin/providers/dns/provider_test.go deleted file mode 100644 index 52e1c6c26..000000000 --- a/builtin/providers/dns/provider_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package dns - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "dns": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - v := os.Getenv("DNS_UPDATE_SERVER") - if v == "" { - t.Fatal("DNS_UPDATE_SERVER must be set for acceptance tests") - } -} diff --git a/builtin/providers/dns/resource_dns_a_record_set.go b/builtin/providers/dns/resource_dns_a_record_set.go deleted file mode 100644 index 75991edfd..000000000 --- a/builtin/providers/dns/resource_dns_a_record_set.go +++ /dev/null @@ -1,212 +0,0 @@ -package dns - -import ( - "fmt" - "time" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/miekg/dns" -) - -func resourceDnsARecordSet() *schema.Resource { - return &schema.Resource{ - Create: resourceDnsARecordSetCreate, - Read: resourceDnsARecordSetRead, - Update: resourceDnsARecordSetUpdate, - Delete: resourceDnsARecordSetDelete, - - Schema: map[string]*schema.Schema{ - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "addresses": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "ttl": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Default: 3600, - }, - }, - } -} - -func resourceDnsARecordSetCreate(d *schema.ResourceData, meta interface{}) error { - - rec_name := d.Get("name").(string) - rec_zone := d.Get("zone").(string) - - if rec_zone != dns.Fqdn(rec_zone) { - return fmt.Errorf("Error creating DNS record: \"zone\" should be an FQDN") - } - - rec_fqdn := fmt.Sprintf("%s.%s", rec_name, rec_zone) - - d.SetId(rec_fqdn) - - return resourceDnsARecordSetUpdate(d, meta) -} - -func resourceDnsARecordSetRead(d *schema.ResourceData, meta interface{}) error { - - if meta != nil { - - rec_name := d.Get("name").(string) - rec_zone := d.Get("zone").(string) - - if rec_zone != dns.Fqdn(rec_zone) { - return fmt.Errorf("Error reading DNS record: \"zone\" should be an FQDN") - } - - rec_fqdn := fmt.Sprintf("%s.%s", rec_name, rec_zone) - - c := meta.(*DNSClient).c - srv_addr := meta.(*DNSClient).srv_addr - - msg := new(dns.Msg) - msg.SetQuestion(rec_fqdn, dns.TypeA) - - r, _, err := c.Exchange(msg, srv_addr) - if err != nil { - return fmt.Errorf("Error querying DNS record: %s", err) - } - if r.Rcode != dns.RcodeSuccess { - return fmt.Errorf("Error querying DNS record: %v", r.Rcode) - } - - addresses := schema.NewSet(schema.HashString, nil) - for _, record := range r.Answer { - addr, err := getAVal(record) - if err != nil { - return fmt.Errorf("Error querying DNS record: %s", err) - } - addresses.Add(addr) - } - if !addresses.Equal(d.Get("addresses")) { - d.SetId("") - return fmt.Errorf("DNS record differs") - } - return nil - } else { - return fmt.Errorf("update server is not set") - } -} - -func resourceDnsARecordSetUpdate(d *schema.ResourceData, meta interface{}) error { - - if meta != nil { - - rec_name := d.Get("name").(string) - rec_zone := d.Get("zone").(string) - ttl := d.Get("ttl").(int) - - if rec_zone != dns.Fqdn(rec_zone) { - return fmt.Errorf("Error updating DNS record: \"zone\" should be an FQDN") - } - - rec_fqdn := fmt.Sprintf("%s.%s", rec_name, rec_zone) - - c := meta.(*DNSClient).c - srv_addr := meta.(*DNSClient).srv_addr - keyname := meta.(*DNSClient).keyname - keyalgo := meta.(*DNSClient).keyalgo - - msg := new(dns.Msg) - - msg.SetUpdate(rec_zone) - - if d.HasChange("addresses") { - o, n := d.GetChange("addresses") - os := o.(*schema.Set) - ns := n.(*schema.Set) - remove := os.Difference(ns).List() - add := ns.Difference(os).List() - - // Loop through all the old addresses and remove them - for _, addr := range remove { - rr_remove, _ := dns.NewRR(fmt.Sprintf("%s %d A %s", rec_fqdn, ttl, addr.(string))) - msg.Remove([]dns.RR{rr_remove}) - } - // Loop through all the new addresses and insert them - for _, addr := range add { - rr_insert, _ := dns.NewRR(fmt.Sprintf("%s %d A %s", rec_fqdn, ttl, addr.(string))) - msg.Insert([]dns.RR{rr_insert}) - } - - if keyname != "" { - msg.SetTsig(keyname, keyalgo, 300, time.Now().Unix()) - } - - r, _, err := c.Exchange(msg, srv_addr) - if err != nil { - d.SetId("") - return fmt.Errorf("Error updating DNS record: %s", err) - } - if r.Rcode != dns.RcodeSuccess { - d.SetId("") - return fmt.Errorf("Error updating DNS record: %v", r.Rcode) - } - - addresses := ns - d.Set("addresses", addresses) - } - - return resourceDnsARecordSetRead(d, meta) - } else { - return fmt.Errorf("update server is not set") - } -} - -func resourceDnsARecordSetDelete(d *schema.ResourceData, meta interface{}) error { - - if meta != nil { - - rec_name := d.Get("name").(string) - rec_zone := d.Get("zone").(string) - - if rec_zone != dns.Fqdn(rec_zone) { - return fmt.Errorf("Error updating DNS record: \"zone\" should be an FQDN") - } - - rec_fqdn := fmt.Sprintf("%s.%s", rec_name, rec_zone) - - c := meta.(*DNSClient).c - srv_addr := meta.(*DNSClient).srv_addr - keyname := meta.(*DNSClient).keyname - keyalgo := meta.(*DNSClient).keyalgo - - msg := new(dns.Msg) - - msg.SetUpdate(rec_zone) - - rr_remove, _ := dns.NewRR(fmt.Sprintf("%s 0 A", rec_fqdn)) - msg.RemoveRRset([]dns.RR{rr_remove}) - - if keyname != "" { - msg.SetTsig(keyname, keyalgo, 300, time.Now().Unix()) - } - - r, _, err := c.Exchange(msg, srv_addr) - if err != nil { - return fmt.Errorf("Error deleting DNS record: %s", err) - } - if r.Rcode != dns.RcodeSuccess { - return fmt.Errorf("Error deleting DNS record: %v", r.Rcode) - } - - return nil - } else { - return fmt.Errorf("update server is not set") - } -} diff --git a/builtin/providers/dns/resource_dns_a_record_set_test.go b/builtin/providers/dns/resource_dns_a_record_set_test.go deleted file mode 100644 index 45fc1a525..000000000 --- a/builtin/providers/dns/resource_dns_a_record_set_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package dns - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - "github.com/miekg/dns" -) - -func TestAccDnsARecordSet_Basic(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDnsARecordSetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDnsARecordSet_basic, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("dns_a_record_set.foo", "addresses.#", "2"), - testAccCheckDnsARecordSetExists(t, "dns_a_record_set.foo", []interface{}{"192.168.0.2", "192.168.0.1"}), - ), - }, - resource.TestStep{ - Config: testAccDnsARecordSet_update, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("dns_a_record_set.foo", "addresses.#", "3"), - testAccCheckDnsARecordSetExists(t, "dns_a_record_set.foo", []interface{}{"10.0.0.3", "10.0.0.2", "10.0.0.1"}), - ), - }, - }, - }) -} - -func testAccCheckDnsARecordSetDestroy(s *terraform.State) error { - meta := testAccProvider.Meta() - c := meta.(*DNSClient).c - srv_addr := meta.(*DNSClient).srv_addr - for _, rs := range s.RootModule().Resources { - if rs.Type != "dns_a_record_set" { - continue - } - - rec_name := rs.Primary.Attributes["name"] - rec_zone := rs.Primary.Attributes["zone"] - - if rec_zone != dns.Fqdn(rec_zone) { - return fmt.Errorf("Error reading DNS record: \"zone\" should be an FQDN") - } - - rec_fqdn := fmt.Sprintf("%s.%s", rec_name, rec_zone) - - msg := new(dns.Msg) - msg.SetQuestion(rec_fqdn, dns.TypeA) - r, _, err := c.Exchange(msg, srv_addr) - if err != nil { - return fmt.Errorf("Error querying DNS record: %s", err) - } - if r.Rcode != dns.RcodeNameError { - return fmt.Errorf("DNS record still exists: %v", r.Rcode) - } - } - - return nil -} - -func testAccCheckDnsARecordSetExists(t *testing.T, n string, addr []interface{}) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - rec_name := rs.Primary.Attributes["name"] - rec_zone := rs.Primary.Attributes["zone"] - - if rec_zone != dns.Fqdn(rec_zone) { - return fmt.Errorf("Error reading DNS record: \"zone\" should be an FQDN") - } - - rec_fqdn := fmt.Sprintf("%s.%s", rec_name, rec_zone) - - meta := testAccProvider.Meta() - c := meta.(*DNSClient).c - srv_addr := meta.(*DNSClient).srv_addr - - msg := new(dns.Msg) - msg.SetQuestion(rec_fqdn, dns.TypeA) - r, _, err := c.Exchange(msg, srv_addr) - if err != nil { - return fmt.Errorf("Error querying DNS record: %s", err) - } - if r.Rcode != dns.RcodeSuccess { - return fmt.Errorf("Error querying DNS record") - } - - addresses := schema.NewSet(schema.HashString, nil) - expected := schema.NewSet(schema.HashString, addr) - for _, record := range r.Answer { - addr, err := getAVal(record) - if err != nil { - return fmt.Errorf("Error querying DNS record: %s", err) - } - addresses.Add(addr) - } - if !addresses.Equal(expected) { - return fmt.Errorf("DNS record differs: expected %v, found %v", expected, addresses) - } - return nil - } -} - -var testAccDnsARecordSet_basic = fmt.Sprintf(` - resource "dns_a_record_set" "foo" { - zone = "example.com." - name = "foo" - addresses = ["192.168.0.1", "192.168.0.2"] - ttl = 300 - }`) - -var testAccDnsARecordSet_update = fmt.Sprintf(` - resource "dns_a_record_set" "foo" { - zone = "example.com." - name = "foo" - addresses = ["10.0.0.1", "10.0.0.2", "10.0.0.3"] - ttl = 300 - }`) diff --git a/builtin/providers/dns/resource_dns_aaaa_record_set.go b/builtin/providers/dns/resource_dns_aaaa_record_set.go deleted file mode 100644 index 96bcc4c30..000000000 --- a/builtin/providers/dns/resource_dns_aaaa_record_set.go +++ /dev/null @@ -1,212 +0,0 @@ -package dns - -import ( - "fmt" - "time" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/miekg/dns" -) - -func resourceDnsAAAARecordSet() *schema.Resource { - return &schema.Resource{ - Create: resourceDnsAAAARecordSetCreate, - Read: resourceDnsAAAARecordSetRead, - Update: resourceDnsAAAARecordSetUpdate, - Delete: resourceDnsAAAARecordSetDelete, - - Schema: map[string]*schema.Schema{ - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "addresses": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "ttl": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Default: 3600, - }, - }, - } -} - -func resourceDnsAAAARecordSetCreate(d *schema.ResourceData, meta interface{}) error { - - rec_name := d.Get("name").(string) - rec_zone := d.Get("zone").(string) - - if rec_zone != dns.Fqdn(rec_zone) { - return fmt.Errorf("Error creating DNS record: \"zone\" should be an FQDN") - } - - rec_fqdn := fmt.Sprintf("%s.%s", rec_name, rec_zone) - - d.SetId(rec_fqdn) - - return resourceDnsAAAARecordSetUpdate(d, meta) -} - -func resourceDnsAAAARecordSetRead(d *schema.ResourceData, meta interface{}) error { - - if meta != nil { - - rec_name := d.Get("name").(string) - rec_zone := d.Get("zone").(string) - - if rec_zone != dns.Fqdn(rec_zone) { - return fmt.Errorf("Error reading DNS record: \"zone\" should be an FQDN") - } - - rec_fqdn := fmt.Sprintf("%s.%s", rec_name, rec_zone) - - c := meta.(*DNSClient).c - srv_addr := meta.(*DNSClient).srv_addr - - msg := new(dns.Msg) - msg.SetQuestion(rec_fqdn, dns.TypeAAAA) - - r, _, err := c.Exchange(msg, srv_addr) - if err != nil { - return fmt.Errorf("Error querying DNS record: %s", err) - } - if r.Rcode != dns.RcodeSuccess { - return fmt.Errorf("Error querying DNS record: %v", r.Rcode) - } - - addresses := schema.NewSet(schema.HashString, nil) - for _, record := range r.Answer { - addr, err := getAAAAVal(record) - if err != nil { - return fmt.Errorf("Error querying DNS record: %s", err) - } - addresses.Add(addr) - } - if !addresses.Equal(d.Get("addresses")) { - d.SetId("") - return fmt.Errorf("DNS record differs") - } - return nil - } else { - return fmt.Errorf("update server is not set") - } -} - -func resourceDnsAAAARecordSetUpdate(d *schema.ResourceData, meta interface{}) error { - - if meta != nil { - - rec_name := d.Get("name").(string) - rec_zone := d.Get("zone").(string) - ttl := d.Get("ttl").(int) - - if rec_zone != dns.Fqdn(rec_zone) { - return fmt.Errorf("Error updating DNS record: \"zone\" should be an FQDN") - } - - rec_fqdn := fmt.Sprintf("%s.%s", rec_name, rec_zone) - - c := meta.(*DNSClient).c - srv_addr := meta.(*DNSClient).srv_addr - keyname := meta.(*DNSClient).keyname - keyalgo := meta.(*DNSClient).keyalgo - - msg := new(dns.Msg) - - msg.SetUpdate(rec_zone) - - if d.HasChange("addresses") { - o, n := d.GetChange("addresses") - os := o.(*schema.Set) - ns := n.(*schema.Set) - remove := os.Difference(ns).List() - add := ns.Difference(os).List() - - // Loop through all the old addresses and remove them - for _, addr := range remove { - rr_remove, _ := dns.NewRR(fmt.Sprintf("%s %d AAAA %s", rec_fqdn, ttl, addr.(string))) - msg.Remove([]dns.RR{rr_remove}) - } - // Loop through all the new addresses and insert them - for _, addr := range add { - rr_insert, _ := dns.NewRR(fmt.Sprintf("%s %d AAAA %s", rec_fqdn, ttl, addr.(string))) - msg.Insert([]dns.RR{rr_insert}) - } - - if keyname != "" { - msg.SetTsig(keyname, keyalgo, 300, time.Now().Unix()) - } - - r, _, err := c.Exchange(msg, srv_addr) - if err != nil { - d.SetId("") - return fmt.Errorf("Error updating DNS record: %s", err) - } - if r.Rcode != dns.RcodeSuccess { - d.SetId("") - return fmt.Errorf("Error updating DNS record: %v", r.Rcode) - } - - addresses := ns - d.Set("addresses", addresses) - } - - return resourceDnsAAAARecordSetRead(d, meta) - } else { - return fmt.Errorf("update server is not set") - } -} - -func resourceDnsAAAARecordSetDelete(d *schema.ResourceData, meta interface{}) error { - - if meta != nil { - - rec_name := d.Get("name").(string) - rec_zone := d.Get("zone").(string) - - if rec_zone != dns.Fqdn(rec_zone) { - return fmt.Errorf("Error updating DNS record: \"zone\" should be an FQDN") - } - - rec_fqdn := fmt.Sprintf("%s.%s", rec_name, rec_zone) - - c := meta.(*DNSClient).c - srv_addr := meta.(*DNSClient).srv_addr - keyname := meta.(*DNSClient).keyname - keyalgo := meta.(*DNSClient).keyalgo - - msg := new(dns.Msg) - - msg.SetUpdate(rec_zone) - - rr_remove, _ := dns.NewRR(fmt.Sprintf("%s 0 AAAA", rec_fqdn)) - msg.RemoveRRset([]dns.RR{rr_remove}) - - if keyname != "" { - msg.SetTsig(keyname, keyalgo, 300, time.Now().Unix()) - } - - r, _, err := c.Exchange(msg, srv_addr) - if err != nil { - return fmt.Errorf("Error deleting DNS record: %s", err) - } - if r.Rcode != dns.RcodeSuccess { - return fmt.Errorf("Error deleting DNS record: %v", r.Rcode) - } - - return nil - } else { - return fmt.Errorf("update server is not set") - } -} diff --git a/builtin/providers/dns/resource_dns_aaaa_record_set_test.go b/builtin/providers/dns/resource_dns_aaaa_record_set_test.go deleted file mode 100644 index ec8ea1571..000000000 --- a/builtin/providers/dns/resource_dns_aaaa_record_set_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package dns - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - "github.com/miekg/dns" -) - -func TestAccDnsAAAARecordSet_basic(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDnsAAAARecordSetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDnsAAAARecordSet_basic, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("dns_aaaa_record_set.bar", "addresses.#", "2"), - testAccCheckDnsAAAARecordSetExists(t, "dns_aaaa_record_set.bar", []interface{}{"fdd5:e282:43b8:5303:dead:beef:cafe:babe", "fdd5:e282:43b8:5303:cafe:babe:dead:beef"}), - ), - }, - resource.TestStep{ - Config: testAccDnsAAAARecordSet_update, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("dns_aaaa_record_set.bar", "addresses.#", "2"), - testAccCheckDnsAAAARecordSetExists(t, "dns_aaaa_record_set.bar", []interface{}{"fdd5:e282:43b8:5303:beef:dead:babe:cafe", "fdd5:e282:43b8:5303:babe:cafe:beef:dead"}), - ), - }, - }, - }) -} - -func testAccCheckDnsAAAARecordSetDestroy(s *terraform.State) error { - meta := testAccProvider.Meta() - c := meta.(*DNSClient).c - srv_addr := meta.(*DNSClient).srv_addr - for _, rs := range s.RootModule().Resources { - if rs.Type != "dns_aaaa_record_set" { - continue - } - - rec_name := rs.Primary.Attributes["name"] - rec_zone := rs.Primary.Attributes["zone"] - - if rec_zone != dns.Fqdn(rec_zone) { - return fmt.Errorf("Error reading DNS record: \"zone\" should be an FQDN") - } - - rec_fqdn := fmt.Sprintf("%s.%s", rec_name, rec_zone) - - msg := new(dns.Msg) - msg.SetQuestion(rec_fqdn, dns.TypeAAAA) - r, _, err := c.Exchange(msg, srv_addr) - if err != nil { - return fmt.Errorf("Error querying DNS record: %s", err) - } - if r.Rcode != dns.RcodeNameError { - return fmt.Errorf("DNS record still exists: %v", r.Rcode) - } - } - - return nil -} - -func testAccCheckDnsAAAARecordSetExists(t *testing.T, n string, addr []interface{}) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - rec_name := rs.Primary.Attributes["name"] - rec_zone := rs.Primary.Attributes["zone"] - - if rec_zone != dns.Fqdn(rec_zone) { - return fmt.Errorf("Error reading DNS record: \"zone\" should be an FQDN") - } - - rec_fqdn := fmt.Sprintf("%s.%s", rec_name, rec_zone) - - meta := testAccProvider.Meta() - c := meta.(*DNSClient).c - srv_addr := meta.(*DNSClient).srv_addr - - msg := new(dns.Msg) - msg.SetQuestion(rec_fqdn, dns.TypeAAAA) - r, _, err := c.Exchange(msg, srv_addr) - if err != nil { - return fmt.Errorf("Error querying DNS record: %s", err) - } - if r.Rcode != dns.RcodeSuccess { - return fmt.Errorf("Error querying DNS record") - } - - addresses := schema.NewSet(schema.HashString, nil) - expected := schema.NewSet(schema.HashString, addr) - for _, record := range r.Answer { - addr, err := getAAAAVal(record) - if err != nil { - return fmt.Errorf("Error querying DNS record: %s", err) - } - addresses.Add(addr) - } - if !addresses.Equal(expected) { - return fmt.Errorf("DNS record differs: expected %v, found %v", expected, addresses) - } - return nil - } -} - -var testAccDnsAAAARecordSet_basic = fmt.Sprintf(` - resource "dns_aaaa_record_set" "bar" { - zone = "example.com." - name = "bar" - addresses = ["fdd5:e282:43b8:5303:dead:beef:cafe:babe", "fdd5:e282:43b8:5303:cafe:babe:dead:beef"] - ttl = 300 - }`) - -var testAccDnsAAAARecordSet_update = fmt.Sprintf(` - resource "dns_aaaa_record_set" "bar" { - zone = "example.com." - name = "bar" - addresses = ["fdd5:e282:43b8:5303:beef:dead:babe:cafe", "fdd5:e282:43b8:5303:babe:cafe:beef:dead"] - ttl = 300 - }`) diff --git a/builtin/providers/dns/resource_dns_cname_record.go b/builtin/providers/dns/resource_dns_cname_record.go deleted file mode 100644 index 25851ec1a..000000000 --- a/builtin/providers/dns/resource_dns_cname_record.go +++ /dev/null @@ -1,219 +0,0 @@ -package dns - -import ( - "fmt" - "time" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/miekg/dns" -) - -func resourceDnsCnameRecord() *schema.Resource { - return &schema.Resource{ - Create: resourceDnsCnameRecordCreate, - Read: resourceDnsCnameRecordRead, - Update: resourceDnsCnameRecordUpdate, - Delete: resourceDnsCnameRecordDelete, - - Schema: map[string]*schema.Schema{ - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "cname": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "ttl": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Default: 3600, - }, - }, - } -} - -func resourceDnsCnameRecordCreate(d *schema.ResourceData, meta interface{}) error { - - rec_name := d.Get("name").(string) - rec_zone := d.Get("zone").(string) - rec_cname := d.Get("cname").(string) - - if rec_zone != dns.Fqdn(rec_zone) { - return fmt.Errorf("Error creating DNS record: \"zone\" should be an FQDN") - } - - if rec_cname != dns.Fqdn(rec_cname) { - return fmt.Errorf("Error creating DNS record: \"cname\" should be an FQDN") - } - - rec_fqdn := fmt.Sprintf("%s.%s", rec_name, rec_zone) - - d.SetId(rec_fqdn) - - return resourceDnsCnameRecordUpdate(d, meta) -} - -func resourceDnsCnameRecordRead(d *schema.ResourceData, meta interface{}) error { - - if meta != nil { - - rec_name := d.Get("name").(string) - rec_zone := d.Get("zone").(string) - rec_cname := d.Get("cname").(string) - - if rec_zone != dns.Fqdn(rec_zone) { - return fmt.Errorf("Error reading DNS record: \"zone\" should be an FQDN") - } - - if rec_cname != dns.Fqdn(rec_cname) { - return fmt.Errorf("Error reading DNS record: \"cname\" should be an FQDN") - } - - rec_fqdn := fmt.Sprintf("%s.%s", rec_name, rec_zone) - - c := meta.(*DNSClient).c - srv_addr := meta.(*DNSClient).srv_addr - - msg := new(dns.Msg) - msg.SetQuestion(rec_fqdn, dns.TypeCNAME) - - r, _, err := c.Exchange(msg, srv_addr) - if err != nil { - return fmt.Errorf("Error querying DNS record: %s", err) - } - if r.Rcode != dns.RcodeSuccess { - return fmt.Errorf("Error querying DNS record: %v", r.Rcode) - } - - if len(r.Answer) > 1 { - return fmt.Errorf("Error querying DNS record: multiple responses received") - } - record := r.Answer[0] - cname, err := getCnameVal(record) - if err != nil { - return fmt.Errorf("Error querying DNS record: %s", err) - } - if rec_cname != cname { - d.SetId("") - return fmt.Errorf("DNS record differs") - } - return nil - } else { - return fmt.Errorf("update server is not set") - } -} - -func resourceDnsCnameRecordUpdate(d *schema.ResourceData, meta interface{}) error { - - if meta != nil { - - rec_name := d.Get("name").(string) - rec_zone := d.Get("zone").(string) - rec_cname := d.Get("cname").(string) - ttl := d.Get("ttl").(int) - - if rec_zone != dns.Fqdn(rec_zone) { - return fmt.Errorf("Error updating DNS record: \"zone\" should be an FQDN") - } - - if rec_cname != dns.Fqdn(rec_cname) { - return fmt.Errorf("Error updating DNS record: \"cname\" should be an FQDN") - } - - rec_fqdn := fmt.Sprintf("%s.%s", rec_name, rec_zone) - - c := meta.(*DNSClient).c - srv_addr := meta.(*DNSClient).srv_addr - keyname := meta.(*DNSClient).keyname - keyalgo := meta.(*DNSClient).keyalgo - - msg := new(dns.Msg) - - msg.SetUpdate(rec_zone) - - if d.HasChange("cname") { - o, n := d.GetChange("cname") - - if o != "" { - rr_remove, _ := dns.NewRR(fmt.Sprintf("%s %d CNAME %s", rec_fqdn, ttl, o)) - msg.Remove([]dns.RR{rr_remove}) - } - if n != "" { - rr_insert, _ := dns.NewRR(fmt.Sprintf("%s %d CNAME %s", rec_fqdn, ttl, n)) - msg.Insert([]dns.RR{rr_insert}) - } - - if keyname != "" { - msg.SetTsig(keyname, keyalgo, 300, time.Now().Unix()) - } - - r, _, err := c.Exchange(msg, srv_addr) - if err != nil { - d.SetId("") - return fmt.Errorf("Error updating DNS record: %s", err) - } - if r.Rcode != dns.RcodeSuccess { - d.SetId("") - return fmt.Errorf("Error updating DNS record: %v", r.Rcode) - } - - cname := n - d.Set("cname", cname) - } - - return resourceDnsCnameRecordRead(d, meta) - } else { - return fmt.Errorf("update server is not set") - } -} - -func resourceDnsCnameRecordDelete(d *schema.ResourceData, meta interface{}) error { - - if meta != nil { - - rec_name := d.Get("name").(string) - rec_zone := d.Get("zone").(string) - - if rec_zone != dns.Fqdn(rec_zone) { - return fmt.Errorf("Error updating DNS record: \"zone\" should be an FQDN") - } - - rec_fqdn := fmt.Sprintf("%s.%s", rec_name, rec_zone) - - c := meta.(*DNSClient).c - srv_addr := meta.(*DNSClient).srv_addr - keyname := meta.(*DNSClient).keyname - keyalgo := meta.(*DNSClient).keyalgo - - msg := new(dns.Msg) - - msg.SetUpdate(rec_zone) - - rr_remove, _ := dns.NewRR(fmt.Sprintf("%s 0 CNAME", rec_fqdn)) - msg.RemoveRRset([]dns.RR{rr_remove}) - - if keyname != "" { - msg.SetTsig(keyname, keyalgo, 300, time.Now().Unix()) - } - - r, _, err := c.Exchange(msg, srv_addr) - if err != nil { - return fmt.Errorf("Error deleting DNS record: %s", err) - } - if r.Rcode != dns.RcodeSuccess { - return fmt.Errorf("Error deleting DNS record: %v", r.Rcode) - } - - return nil - } else { - return fmt.Errorf("update server is not set") - } -} diff --git a/builtin/providers/dns/resource_dns_cname_record_test.go b/builtin/providers/dns/resource_dns_cname_record_test.go deleted file mode 100644 index ef000933d..000000000 --- a/builtin/providers/dns/resource_dns_cname_record_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package dns - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/miekg/dns" -) - -func TestAccDnsCnameRecord_basic(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDnsCnameRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDnsCnameRecord_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckDnsCnameRecordExists(t, "dns_cname_record.foo", "bar.example.com."), - ), - }, - resource.TestStep{ - Config: testAccDnsCnameRecord_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckDnsCnameRecordExists(t, "dns_cname_record.foo", "baz.example.com."), - ), - }, - }, - }) -} - -func testAccCheckDnsCnameRecordDestroy(s *terraform.State) error { - meta := testAccProvider.Meta() - c := meta.(*DNSClient).c - srv_addr := meta.(*DNSClient).srv_addr - for _, rs := range s.RootModule().Resources { - if rs.Type != "dns_cname_record" { - continue - } - - rec_name := rs.Primary.Attributes["name"] - rec_zone := rs.Primary.Attributes["zone"] - - if rec_zone != dns.Fqdn(rec_zone) { - return fmt.Errorf("Error reading DNS record: \"zone\" should be an FQDN") - } - - rec_fqdn := fmt.Sprintf("%s.%s", rec_name, rec_zone) - - msg := new(dns.Msg) - msg.SetQuestion(rec_fqdn, dns.TypeCNAME) - r, _, err := c.Exchange(msg, srv_addr) - if err != nil { - return fmt.Errorf("Error querying DNS record: %s", err) - } - if r.Rcode != dns.RcodeNameError { - return fmt.Errorf("DNS record still exists: %v", r.Rcode) - } - } - - return nil -} - -func testAccCheckDnsCnameRecordExists(t *testing.T, n string, expected string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - rec_name := rs.Primary.Attributes["name"] - rec_zone := rs.Primary.Attributes["zone"] - - if rec_zone != dns.Fqdn(rec_zone) { - return fmt.Errorf("Error reading DNS record: \"zone\" should be an FQDN") - } - - rec_fqdn := fmt.Sprintf("%s.%s", rec_name, rec_zone) - - meta := testAccProvider.Meta() - c := meta.(*DNSClient).c - srv_addr := meta.(*DNSClient).srv_addr - - msg := new(dns.Msg) - msg.SetQuestion(rec_fqdn, dns.TypeCNAME) - r, _, err := c.Exchange(msg, srv_addr) - if err != nil { - return fmt.Errorf("Error querying DNS record: %s", err) - } - if r.Rcode != dns.RcodeSuccess { - return fmt.Errorf("Error querying DNS record") - } - - if len(r.Answer) > 1 { - return fmt.Errorf("Error querying DNS record: multiple responses received") - } - record := r.Answer[0] - cname, err := getCnameVal(record) - if err != nil { - return fmt.Errorf("Error querying DNS record: %s", err) - } - if expected != cname { - return fmt.Errorf("DNS record differs: expected %v, found %v", expected, cname) - } - return nil - } -} - -var testAccDnsCnameRecord_basic = fmt.Sprintf(` - resource "dns_cname_record" "foo" { - zone = "example.com." - name = "foo" - cname = "bar.example.com." - ttl = 300 - }`) - -var testAccDnsCnameRecord_update = fmt.Sprintf(` - resource "dns_cname_record" "foo" { - zone = "example.com." - name = "baz" - cname = "baz.example.com." - ttl = 300 - }`) diff --git a/builtin/providers/dns/resource_dns_ptr_record.go b/builtin/providers/dns/resource_dns_ptr_record.go deleted file mode 100644 index 7515bd2f8..000000000 --- a/builtin/providers/dns/resource_dns_ptr_record.go +++ /dev/null @@ -1,219 +0,0 @@ -package dns - -import ( - "fmt" - "time" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/miekg/dns" -) - -func resourceDnsPtrRecord() *schema.Resource { - return &schema.Resource{ - Create: resourceDnsPtrRecordCreate, - Read: resourceDnsPtrRecordRead, - Update: resourceDnsPtrRecordUpdate, - Delete: resourceDnsPtrRecordDelete, - - Schema: map[string]*schema.Schema{ - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "ptr": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "ttl": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Default: 3600, - }, - }, - } -} - -func resourceDnsPtrRecordCreate(d *schema.ResourceData, meta interface{}) error { - - rec_name := d.Get("name").(string) - rec_zone := d.Get("zone").(string) - rec_ptr := d.Get("ptr").(string) - - if rec_zone != dns.Fqdn(rec_zone) { - return fmt.Errorf("Error creating DNS record: \"zone\" should be an FQDN") - } - - if rec_ptr != dns.Fqdn(rec_ptr) { - return fmt.Errorf("Error creating DNS record: \"ptr\" should be an FQDN") - } - - rec_fqdn := fmt.Sprintf("%s.%s", rec_name, rec_zone) - - d.SetId(rec_fqdn) - - return resourceDnsPtrRecordUpdate(d, meta) -} - -func resourceDnsPtrRecordRead(d *schema.ResourceData, meta interface{}) error { - - if meta != nil { - - rec_name := d.Get("name").(string) - rec_zone := d.Get("zone").(string) - rec_ptr := d.Get("ptr").(string) - - if rec_zone != dns.Fqdn(rec_zone) { - return fmt.Errorf("Error reading DNS record: \"zone\" should be an FQDN") - } - - if rec_ptr != dns.Fqdn(rec_ptr) { - return fmt.Errorf("Error reading DNS record: \"ptr\" should be an FQDN") - } - - rec_fqdn := fmt.Sprintf("%s.%s", rec_name, rec_zone) - - c := meta.(*DNSClient).c - srv_addr := meta.(*DNSClient).srv_addr - - msg := new(dns.Msg) - msg.SetQuestion(rec_fqdn, dns.TypePTR) - - r, _, err := c.Exchange(msg, srv_addr) - if err != nil { - return fmt.Errorf("Error querying DNS record: %s", err) - } - if r.Rcode != dns.RcodeSuccess { - return fmt.Errorf("Error querying DNS record: %v", r.Rcode) - } - - if len(r.Answer) > 1 { - return fmt.Errorf("Error querying DNS record: multiple responses received") - } - record := r.Answer[0] - ptr, err := getPtrVal(record) - if err != nil { - return fmt.Errorf("Error querying DNS record: %s", err) - } - if rec_ptr != ptr { - d.SetId("") - return fmt.Errorf("DNS record differs") - } - return nil - } else { - return fmt.Errorf("update server is not set") - } -} - -func resourceDnsPtrRecordUpdate(d *schema.ResourceData, meta interface{}) error { - - if meta != nil { - - rec_name := d.Get("name").(string) - rec_zone := d.Get("zone").(string) - rec_ptr := d.Get("ptr").(string) - ttl := d.Get("ttl").(int) - - if rec_zone != dns.Fqdn(rec_zone) { - return fmt.Errorf("Error updating DNS record: \"zone\" should be an FQDN") - } - - if rec_ptr != dns.Fqdn(rec_ptr) { - return fmt.Errorf("Error updating DNS record: \"ptr\" should be an FQDN") - } - - rec_fqdn := fmt.Sprintf("%s.%s", rec_name, rec_zone) - - c := meta.(*DNSClient).c - srv_addr := meta.(*DNSClient).srv_addr - keyname := meta.(*DNSClient).keyname - keyalgo := meta.(*DNSClient).keyalgo - - msg := new(dns.Msg) - - msg.SetUpdate(rec_zone) - - if d.HasChange("ptr") { - o, n := d.GetChange("ptr") - - if o != "" { - rr_remove, _ := dns.NewRR(fmt.Sprintf("%s %d PTR %s", rec_fqdn, ttl, o)) - msg.Remove([]dns.RR{rr_remove}) - } - if n != "" { - rr_insert, _ := dns.NewRR(fmt.Sprintf("%s %d PTR %s", rec_fqdn, ttl, n)) - msg.Insert([]dns.RR{rr_insert}) - } - - if keyname != "" { - msg.SetTsig(keyname, keyalgo, 300, time.Now().Unix()) - } - - r, _, err := c.Exchange(msg, srv_addr) - if err != nil { - d.SetId("") - return fmt.Errorf("Error updating DNS record: %s", err) - } - if r.Rcode != dns.RcodeSuccess { - d.SetId("") - return fmt.Errorf("Error updating DNS record: %v", r.Rcode) - } - - ptr := n - d.Set("ptr", ptr) - } - - return resourceDnsPtrRecordRead(d, meta) - } else { - return fmt.Errorf("update server is not set") - } -} - -func resourceDnsPtrRecordDelete(d *schema.ResourceData, meta interface{}) error { - - if meta != nil { - - rec_name := d.Get("name").(string) - rec_zone := d.Get("zone").(string) - - if rec_zone != dns.Fqdn(rec_zone) { - return fmt.Errorf("Error updating DNS record: \"zone\" should be an FQDN") - } - - rec_fqdn := fmt.Sprintf("%s.%s", rec_name, rec_zone) - - c := meta.(*DNSClient).c - srv_addr := meta.(*DNSClient).srv_addr - keyname := meta.(*DNSClient).keyname - keyalgo := meta.(*DNSClient).keyalgo - - msg := new(dns.Msg) - - msg.SetUpdate(rec_zone) - - rr_remove, _ := dns.NewRR(fmt.Sprintf("%s 0 PTR", rec_fqdn)) - msg.RemoveRRset([]dns.RR{rr_remove}) - - if keyname != "" { - msg.SetTsig(keyname, keyalgo, 300, time.Now().Unix()) - } - - r, _, err := c.Exchange(msg, srv_addr) - if err != nil { - return fmt.Errorf("Error deleting DNS record: %s", err) - } - if r.Rcode != dns.RcodeSuccess { - return fmt.Errorf("Error deleting DNS record: %v", r.Rcode) - } - - return nil - } else { - return fmt.Errorf("update server is not set") - } -} diff --git a/builtin/providers/dns/resource_dns_ptr_record_test.go b/builtin/providers/dns/resource_dns_ptr_record_test.go deleted file mode 100644 index 4729bf1e5..000000000 --- a/builtin/providers/dns/resource_dns_ptr_record_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package dns - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/miekg/dns" -) - -func TestAccDnsPtrRecord_basic(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDnsPtrRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDnsPtrRecord_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckDnsPtrRecordExists(t, "dns_ptr_record.foo", "bar.example.com."), - ), - }, - resource.TestStep{ - Config: testAccDnsPtrRecord_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckDnsPtrRecordExists(t, "dns_ptr_record.foo", "baz.example.com."), - ), - }, - }, - }) -} - -func testAccCheckDnsPtrRecordDestroy(s *terraform.State) error { - meta := testAccProvider.Meta() - c := meta.(*DNSClient).c - srv_addr := meta.(*DNSClient).srv_addr - for _, rs := range s.RootModule().Resources { - if rs.Type != "dns_ptr_record" { - continue - } - - rec_name := rs.Primary.Attributes["name"] - rec_zone := rs.Primary.Attributes["zone"] - - if rec_zone != dns.Fqdn(rec_zone) { - return fmt.Errorf("Error reading DNS record: \"zone\" should be an FQDN") - } - - rec_fqdn := fmt.Sprintf("%s.%s", rec_name, rec_zone) - - msg := new(dns.Msg) - msg.SetQuestion(rec_fqdn, dns.TypePTR) - r, _, err := c.Exchange(msg, srv_addr) - if err != nil { - return fmt.Errorf("Error querying DNS record: %s", err) - } - if r.Rcode != dns.RcodeNameError { - return fmt.Errorf("DNS record still exists: %v", r.Rcode) - } - } - - return nil -} - -func testAccCheckDnsPtrRecordExists(t *testing.T, n string, expected string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - rec_name := rs.Primary.Attributes["name"] - rec_zone := rs.Primary.Attributes["zone"] - - if rec_zone != dns.Fqdn(rec_zone) { - return fmt.Errorf("Error reading DNS record: \"zone\" should be an FQDN") - } - - rec_fqdn := fmt.Sprintf("%s.%s", rec_name, rec_zone) - - meta := testAccProvider.Meta() - c := meta.(*DNSClient).c - srv_addr := meta.(*DNSClient).srv_addr - - msg := new(dns.Msg) - msg.SetQuestion(rec_fqdn, dns.TypePTR) - r, _, err := c.Exchange(msg, srv_addr) - if err != nil { - return fmt.Errorf("Error querying DNS record: %s", err) - } - if r.Rcode != dns.RcodeSuccess { - return fmt.Errorf("Error querying DNS record") - } - - if len(r.Answer) > 1 { - return fmt.Errorf("Error querying DNS record: multiple responses received") - } - record := r.Answer[0] - ptr, err := getPtrVal(record) - if err != nil { - return fmt.Errorf("Error querying DNS record: %s", err) - } - if expected != ptr { - return fmt.Errorf("DNS record differs: expected %v, found %v", expected, ptr) - } - return nil - } -} - -var testAccDnsPtrRecord_basic = fmt.Sprintf(` - resource "dns_ptr_record" "foo" { - zone = "example.com." - name = "r._dns-sd._udp" - ptr = "bar.example.com." - ttl = 300 - }`) - -var testAccDnsPtrRecord_update = fmt.Sprintf(` - resource "dns_ptr_record" "foo" { - zone = "example.com." - name = "r._dns-sd._udp" - ptr = "baz.example.com." - ttl = 300 - }`) diff --git a/builtin/providers/dns/test_check_attr_string_array.go b/builtin/providers/dns/test_check_attr_string_array.go deleted file mode 100644 index 978b62a00..000000000 --- a/builtin/providers/dns/test_check_attr_string_array.go +++ /dev/null @@ -1,55 +0,0 @@ -package dns - -import ( - "fmt" - "strconv" - - r "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func testCheckAttrStringArray(name, key string, value []string) r.TestCheckFunc { - return func(s *terraform.State) error { - ms := s.RootModule() - rs, ok := ms.Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - is := rs.Primary - if is == nil { - return fmt.Errorf("No primary instance: %s", name) - } - - attrKey := fmt.Sprintf("%s.#", key) - count, ok := is.Attributes[attrKey] - if !ok { - return fmt.Errorf("Attributes not found for %s", attrKey) - } - - gotCount, _ := strconv.Atoi(count) - if gotCount != len(value) { - return fmt.Errorf("Mismatch array count for %s: got %s, wanted %d", key, count, len(value)) - } - - Next: - for i := 0; i < gotCount; i++ { - attrKey = fmt.Sprintf("%s.%d", key, i) - got, ok := is.Attributes[attrKey] - if !ok { - return fmt.Errorf("Missing array item for %s", attrKey) - } - for _, want := range value { - if got == want { - continue Next - } - } - return fmt.Errorf( - "Unexpected array item for %s: got %s", - attrKey, - got) - } - - return nil - } -} diff --git a/builtin/providers/dns/test_check_attr_string_array_member.go b/builtin/providers/dns/test_check_attr_string_array_member.go deleted file mode 100644 index e246ed32e..000000000 --- a/builtin/providers/dns/test_check_attr_string_array_member.go +++ /dev/null @@ -1,39 +0,0 @@ -package dns - -import ( - "fmt" - - r "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func testCheckAttrStringArrayMember(name, key string, value []string) r.TestCheckFunc { - return func(s *terraform.State) error { - ms := s.RootModule() - rs, ok := ms.Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - is := rs.Primary - if is == nil { - return fmt.Errorf("No primary instance: %s", name) - } - - got, ok := is.Attributes[key] - if !ok { - return fmt.Errorf("Attributes not found for %s", key) - } - - for _, want := range value { - if got == want { - return nil - } - } - - return fmt.Errorf( - "Unexpected value for %s: got %s", - key, - got) - } -} diff --git a/builtin/providers/dnsimple/config.go b/builtin/providers/dnsimple/config.go deleted file mode 100644 index 7992e6993..000000000 --- a/builtin/providers/dnsimple/config.go +++ /dev/null @@ -1,36 +0,0 @@ -package dnsimple - -import ( - "log" - - "github.com/dnsimple/dnsimple-go/dnsimple" - "github.com/hashicorp/terraform/terraform" -) - -type Config struct { - Email string - Account string - Token string -} - -// Client represents the DNSimple provider client. -// This is a convenient container for the configuration and the underlying API client. -type Client struct { - client *dnsimple.Client - config *Config -} - -// Client() returns a new client for accessing dnsimple. -func (c *Config) Client() (*Client, error) { - client := dnsimple.NewClient(dnsimple.NewOauthTokenCredentials(c.Token)) - client.UserAgent = "HashiCorp-Terraform/" + terraform.VersionString() - - provider := &Client{ - client: client, - config: c, - } - - log.Printf("[INFO] DNSimple Client configured for account: %s", c.Account) - - return provider, nil -} diff --git a/builtin/providers/dnsimple/import_dnsimple_record_test.go b/builtin/providers/dnsimple/import_dnsimple_record_test.go deleted file mode 100644 index 420a6e4c2..000000000 --- a/builtin/providers/dnsimple/import_dnsimple_record_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package dnsimple - -import ( - "fmt" - "os" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccDnsimpleRecord_import(t *testing.T) { - resourceName := "dnsimple_record.foobar" - domain := os.Getenv("DNSIMPLE_DOMAIN") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDNSimpleRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckDNSimpleRecordConfig_import, domain), - }, - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateIdPrefix: fmt.Sprintf("%s_", domain), - }, - }, - }) -} - -const testAccCheckDNSimpleRecordConfig_import = ` -resource "dnsimple_record" "foobar" { - domain = "%s" - - name = "terraform" - value = "192.168.0.10" - type = "A" - ttl = 3600 -}` diff --git a/builtin/providers/dnsimple/provider.go b/builtin/providers/dnsimple/provider.go deleted file mode 100644 index 1c73c1a5b..000000000 --- a/builtin/providers/dnsimple/provider.go +++ /dev/null @@ -1,63 +0,0 @@ -package dnsimple - -import ( - "errors" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "email": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("DNSIMPLE_EMAIL", ""), - Description: "The DNSimple account email address.", - }, - - "token": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("DNSIMPLE_TOKEN", nil), - Description: "The API v2 token for API operations.", - }, - - "account": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("DNSIMPLE_ACCOUNT", nil), - Description: "The account for API operations.", - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "dnsimple_record": resourceDNSimpleRecord(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - // DNSimple API v1 requires email+token to authenticate. - // DNSimple API v2 requires only an OAuth token and in this particular case - // the reference of the account for API operations (to avoid fetching it in real time). - // - // v2 is not backward compatible with v1, therefore return an error in case email is set, - // to inform the user to upgrade to v2. Also, v1 token is not the same of v2. - if email := d.Get("email").(string); email != "" { - return nil, errors.New( - "DNSimple API v2 requires an account identifier and the new OAuth token. " + - "Please upgrade your configuration.") - } - - config := Config{ - Token: d.Get("token").(string), - Account: d.Get("account").(string), - } - - return config.Client() -} diff --git a/builtin/providers/dnsimple/provider_test.go b/builtin/providers/dnsimple/provider_test.go deleted file mode 100644 index b45e3e464..000000000 --- a/builtin/providers/dnsimple/provider_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package dnsimple - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "dnsimple": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("DNSIMPLE_EMAIL"); v != "" { - t.Fatal("DNSIMPLE_EMAIL is no longer required for DNSimple API v2") - } - - if v := os.Getenv("DNSIMPLE_TOKEN"); v == "" { - t.Fatal("DNSIMPLE_TOKEN must be set for acceptance tests") - } - - if v := os.Getenv("DNSIMPLE_ACCOUNT"); v == "" { - t.Fatal("DNSIMPLE_ACCOUNT must be set for acceptance tests") - } - - if v := os.Getenv("DNSIMPLE_DOMAIN"); v == "" { - t.Fatal("DNSIMPLE_DOMAIN must be set for acceptance tests. The domain is used to create and destroy record against.") - } -} diff --git a/builtin/providers/dnsimple/resource_dnsimple_record.go b/builtin/providers/dnsimple/resource_dnsimple_record.go deleted file mode 100644 index af1c26de9..000000000 --- a/builtin/providers/dnsimple/resource_dnsimple_record.go +++ /dev/null @@ -1,205 +0,0 @@ -package dnsimple - -import ( - "fmt" - "log" - "strconv" - "strings" - - "github.com/dnsimple/dnsimple-go/dnsimple" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceDNSimpleRecord() *schema.Resource { - return &schema.Resource{ - Create: resourceDNSimpleRecordCreate, - Read: resourceDNSimpleRecordRead, - Update: resourceDNSimpleRecordUpdate, - Delete: resourceDNSimpleRecordDelete, - Importer: &schema.ResourceImporter{ - State: resourceDNSimpleRecordImport, - }, - - Schema: map[string]*schema.Schema{ - "domain": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "domain_id": { - Type: schema.TypeString, - Computed: true, - }, - - "name": { - Type: schema.TypeString, - Required: true, - }, - - "hostname": { - Type: schema.TypeString, - Computed: true, - }, - - "type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "value": { - Type: schema.TypeString, - Required: true, - }, - - "ttl": { - Type: schema.TypeString, - Optional: true, - Default: "3600", - }, - - "priority": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - }, - } -} - -func resourceDNSimpleRecordCreate(d *schema.ResourceData, meta interface{}) error { - provider := meta.(*Client) - - // Create the new record - newRecord := dnsimple.ZoneRecord{ - Name: d.Get("name").(string), - Type: d.Get("type").(string), - Content: d.Get("value").(string), - } - if attr, ok := d.GetOk("ttl"); ok { - newRecord.TTL, _ = strconv.Atoi(attr.(string)) - } - - if attr, ok := d.GetOk("priority"); ok { - newRecord.Priority, _ = strconv.Atoi(attr.(string)) - } - - log.Printf("[DEBUG] DNSimple Record create configuration: %#v", newRecord) - - resp, err := provider.client.Zones.CreateRecord(provider.config.Account, d.Get("domain").(string), newRecord) - if err != nil { - return fmt.Errorf("Failed to create DNSimple Record: %s", err) - } - - d.SetId(strconv.Itoa(resp.Data.ID)) - log.Printf("[INFO] DNSimple Record ID: %s", d.Id()) - - return resourceDNSimpleRecordRead(d, meta) -} - -func resourceDNSimpleRecordRead(d *schema.ResourceData, meta interface{}) error { - provider := meta.(*Client) - - recordID, err := strconv.Atoi(d.Id()) - if err != nil { - return fmt.Errorf("Error converting Record ID: %s", err) - } - - resp, err := provider.client.Zones.GetRecord(provider.config.Account, d.Get("domain").(string), recordID) - if err != nil { - if err != nil && strings.Contains(err.Error(), "404") { - log.Printf("DNSimple Record Not Found - Refreshing from State") - d.SetId("") - return nil - } - return fmt.Errorf("Couldn't find DNSimple Record: %s", err) - } - - record := resp.Data - d.Set("domain_id", record.ZoneID) - d.Set("name", record.Name) - d.Set("type", record.Type) - d.Set("value", record.Content) - d.Set("ttl", strconv.Itoa(record.TTL)) - d.Set("priority", strconv.Itoa(record.Priority)) - - if record.Name == "" { - d.Set("hostname", d.Get("domain").(string)) - } else { - d.Set("hostname", fmt.Sprintf("%s.%s", record.Name, d.Get("domain").(string))) - } - - return nil -} - -func resourceDNSimpleRecordUpdate(d *schema.ResourceData, meta interface{}) error { - provider := meta.(*Client) - - recordID, err := strconv.Atoi(d.Id()) - if err != nil { - return fmt.Errorf("Error converting Record ID: %s", err) - } - - updateRecord := dnsimple.ZoneRecord{} - - if attr, ok := d.GetOk("name"); ok { - updateRecord.Name = attr.(string) - } - if attr, ok := d.GetOk("type"); ok { - updateRecord.Type = attr.(string) - } - if attr, ok := d.GetOk("value"); ok { - updateRecord.Content = attr.(string) - } - if attr, ok := d.GetOk("ttl"); ok { - updateRecord.TTL, _ = strconv.Atoi(attr.(string)) - } - - if attr, ok := d.GetOk("priority"); ok { - updateRecord.Priority, _ = strconv.Atoi(attr.(string)) - } - - log.Printf("[DEBUG] DNSimple Record update configuration: %#v", updateRecord) - - _, err = provider.client.Zones.UpdateRecord(provider.config.Account, d.Get("domain").(string), recordID, updateRecord) - if err != nil { - return fmt.Errorf("Failed to update DNSimple Record: %s", err) - } - - return resourceDNSimpleRecordRead(d, meta) -} - -func resourceDNSimpleRecordDelete(d *schema.ResourceData, meta interface{}) error { - provider := meta.(*Client) - - log.Printf("[INFO] Deleting DNSimple Record: %s, %s", d.Get("domain").(string), d.Id()) - - recordID, err := strconv.Atoi(d.Id()) - if err != nil { - return fmt.Errorf("Error converting Record ID: %s", err) - } - - _, err = provider.client.Zones.DeleteRecord(provider.config.Account, d.Get("domain").(string), recordID) - if err != nil { - return fmt.Errorf("Error deleting DNSimple Record: %s", err) - } - - return nil -} - -func resourceDNSimpleRecordImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - parts := strings.Split(d.Id(), "_") - - if len(parts) != 2 { - return nil, fmt.Errorf("Error Importing dnsimple_record. Please make sure the record ID is in the form DOMAIN_RECORDID (i.e. example.com_1234") - } - - d.SetId(parts[1]) - d.Set("domain", parts[0]) - - if err := resourceDNSimpleRecordRead(d, meta); err != nil { - return nil, err - } - return []*schema.ResourceData{d}, nil -} diff --git a/builtin/providers/dnsimple/resource_dnsimple_record_test.go b/builtin/providers/dnsimple/resource_dnsimple_record_test.go deleted file mode 100644 index 8b19697bd..000000000 --- a/builtin/providers/dnsimple/resource_dnsimple_record_test.go +++ /dev/null @@ -1,294 +0,0 @@ -package dnsimple - -import ( - "fmt" - "os" - "strconv" - "testing" - - "github.com/dnsimple/dnsimple-go/dnsimple" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDNSimpleRecord_Basic(t *testing.T) { - var record dnsimple.ZoneRecord - domain := os.Getenv("DNSIMPLE_DOMAIN") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDNSimpleRecordDestroy, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testAccCheckDNSimpleRecordConfig_basic, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckDNSimpleRecordExists("dnsimple_record.foobar", &record), - testAccCheckDNSimpleRecordAttributes(&record), - resource.TestCheckResourceAttr( - "dnsimple_record.foobar", "name", "terraform"), - resource.TestCheckResourceAttr( - "dnsimple_record.foobar", "domain", domain), - resource.TestCheckResourceAttr( - "dnsimple_record.foobar", "value", "192.168.0.10"), - ), - }, - }, - }) -} - -func TestAccDNSimpleRecord_CreateMxWithPriority(t *testing.T) { - var record dnsimple.ZoneRecord - domain := os.Getenv("DNSIMPLE_DOMAIN") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDNSimpleRecordDestroy, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testAccCheckDNSimpleRecordConfig_mx, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckDNSimpleRecordExists("dnsimple_record.foobar", &record), - resource.TestCheckResourceAttr( - "dnsimple_record.foobar", "name", ""), - resource.TestCheckResourceAttr( - "dnsimple_record.foobar", "domain", domain), - resource.TestCheckResourceAttr( - "dnsimple_record.foobar", "value", "mx.example.com"), - resource.TestCheckResourceAttr( - "dnsimple_record.foobar", "priority", "5"), - ), - }, - }, - }) -} - -func TestAccDNSimpleRecord_Updated(t *testing.T) { - var record dnsimple.ZoneRecord - domain := os.Getenv("DNSIMPLE_DOMAIN") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDNSimpleRecordDestroy, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testAccCheckDNSimpleRecordConfig_basic, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckDNSimpleRecordExists("dnsimple_record.foobar", &record), - testAccCheckDNSimpleRecordAttributes(&record), - resource.TestCheckResourceAttr( - "dnsimple_record.foobar", "name", "terraform"), - resource.TestCheckResourceAttr( - "dnsimple_record.foobar", "domain", domain), - resource.TestCheckResourceAttr( - "dnsimple_record.foobar", "value", "192.168.0.10"), - ), - }, - { - Config: fmt.Sprintf(testAccCheckDNSimpleRecordConfig_new_value, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckDNSimpleRecordExists("dnsimple_record.foobar", &record), - testAccCheckDNSimpleRecordAttributesUpdated(&record), - resource.TestCheckResourceAttr( - "dnsimple_record.foobar", "name", "terraform"), - resource.TestCheckResourceAttr( - "dnsimple_record.foobar", "domain", domain), - resource.TestCheckResourceAttr( - "dnsimple_record.foobar", "value", "192.168.0.11"), - ), - }, - }, - }) -} - -func TestAccDNSimpleRecord_disappears(t *testing.T) { - var record dnsimple.ZoneRecord - domain := os.Getenv("DNSIMPLE_DOMAIN") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDNSimpleRecordDestroy, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testAccCheckDNSimpleRecordConfig_basic, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckDNSimpleRecordExists("dnsimple_record.foobar", &record), - testAccCheckDNSimpleRecordDisappears(&record, domain), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccDNSimpleRecord_UpdatedMx(t *testing.T) { - var record dnsimple.ZoneRecord - domain := os.Getenv("DNSIMPLE_DOMAIN") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDNSimpleRecordDestroy, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testAccCheckDNSimpleRecordConfig_mx, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckDNSimpleRecordExists("dnsimple_record.foobar", &record), - resource.TestCheckResourceAttr( - "dnsimple_record.foobar", "name", ""), - resource.TestCheckResourceAttr( - "dnsimple_record.foobar", "domain", domain), - resource.TestCheckResourceAttr( - "dnsimple_record.foobar", "value", "mx.example.com"), - resource.TestCheckResourceAttr( - "dnsimple_record.foobar", "priority", "5"), - ), - }, - { - Config: fmt.Sprintf(testAccCheckDNSimpleRecordConfig_mx_new_value, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckDNSimpleRecordExists("dnsimple_record.foobar", &record), - resource.TestCheckResourceAttr( - "dnsimple_record.foobar", "name", ""), - resource.TestCheckResourceAttr( - "dnsimple_record.foobar", "domain", domain), - resource.TestCheckResourceAttr( - "dnsimple_record.foobar", "value", "mx2.example.com"), - resource.TestCheckResourceAttr( - "dnsimple_record.foobar", "priority", "10"), - ), - }, - }, - }) -} - -func testAccCheckDNSimpleRecordDisappears(record *dnsimple.ZoneRecord, domain string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - provider := testAccProvider.Meta().(*Client) - - _, err := provider.client.Zones.DeleteRecord(provider.config.Account, domain, record.ID) - if err != nil { - return err - } - - return nil - } - -} - -func testAccCheckDNSimpleRecordDestroy(s *terraform.State) error { - provider := testAccProvider.Meta().(*Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "dnsimple_record" { - continue - } - - recordID, _ := strconv.Atoi(rs.Primary.ID) - _, err := provider.client.Zones.GetRecord(provider.config.Account, rs.Primary.Attributes["domain"], recordID) - if err == nil { - return fmt.Errorf("Record still exists") - } - } - - return nil -} - -func testAccCheckDNSimpleRecordAttributes(record *dnsimple.ZoneRecord) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if record.Content != "192.168.0.10" { - return fmt.Errorf("Bad content: %s", record.Content) - } - - return nil - } -} - -func testAccCheckDNSimpleRecordAttributesUpdated(record *dnsimple.ZoneRecord) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if record.Content != "192.168.0.11" { - return fmt.Errorf("Bad content: %s", record.Content) - } - - return nil - } -} - -func testAccCheckDNSimpleRecordExists(n string, record *dnsimple.ZoneRecord) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - provider := testAccProvider.Meta().(*Client) - - recordID, _ := strconv.Atoi(rs.Primary.ID) - resp, err := provider.client.Zones.GetRecord(provider.config.Account, rs.Primary.Attributes["domain"], recordID) - if err != nil { - return err - } - - foundRecord := resp.Data - if foundRecord.ID != recordID { - return fmt.Errorf("Record not found") - } - - *record = *foundRecord - - return nil - } -} - -const testAccCheckDNSimpleRecordConfig_basic = ` -resource "dnsimple_record" "foobar" { - domain = "%s" - - name = "terraform" - value = "192.168.0.10" - type = "A" - ttl = 3600 -}` - -const testAccCheckDNSimpleRecordConfig_new_value = ` -resource "dnsimple_record" "foobar" { - domain = "%s" - - name = "terraform" - value = "192.168.0.11" - type = "A" - ttl = 3600 -}` - -const testAccCheckDNSimpleRecordConfig_mx = ` -resource "dnsimple_record" "foobar" { - domain = "%s" - - name = "" - value = "mx.example.com" - type = "MX" - ttl = 3600 - priority = 5 -}` - -const testAccCheckDNSimpleRecordConfig_mx_new_value = ` -resource "dnsimple_record" "foobar" { - domain = "%s" - - name = "" - value = "mx2.example.com" - type = "MX" - ttl = 3600 - priority = 10 -}` diff --git a/builtin/providers/docker/config.go b/builtin/providers/docker/config.go deleted file mode 100644 index ad05d5409..000000000 --- a/builtin/providers/docker/config.go +++ /dev/null @@ -1,49 +0,0 @@ -package docker - -import ( - "fmt" - "path/filepath" - - dc "github.com/fsouza/go-dockerclient" -) - -// Config is the structure that stores the configuration to talk to a -// Docker API compatible host. -type Config struct { - Host string - Ca string - Cert string - Key string - CertPath string -} - -// NewClient() returns a new Docker client. -func (c *Config) NewClient() (*dc.Client, error) { - if c.Ca != "" || c.Cert != "" || c.Key != "" { - if c.Ca == "" || c.Cert == "" || c.Key == "" { - return nil, fmt.Errorf("ca_material, cert_material, and key_material must be specified") - } - - if c.CertPath != "" { - return nil, fmt.Errorf("cert_path must not be specified") - } - - return dc.NewTLSClientFromBytes(c.Host, []byte(c.Cert), []byte(c.Key), []byte(c.Ca)) - } - - if c.CertPath != "" { - // If there is cert information, load it and use it. - ca := filepath.Join(c.CertPath, "ca.pem") - cert := filepath.Join(c.CertPath, "cert.pem") - key := filepath.Join(c.CertPath, "key.pem") - return dc.NewTLSClient(c.Host, cert, key, ca) - } - - // If there is no cert information, then just return the direct client - return dc.NewClient(c.Host) -} - -// Data ia structure for holding data that we fetch from Docker. -type Data struct { - DockerImages map[string]*dc.APIImages -} diff --git a/builtin/providers/docker/data_source_docker_registry_image.go b/builtin/providers/docker/data_source_docker_registry_image.go deleted file mode 100644 index 9898c8ac8..000000000 --- a/builtin/providers/docker/data_source_docker_registry_image.go +++ /dev/null @@ -1,166 +0,0 @@ -package docker - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceDockerRegistryImage() *schema.Resource { - return &schema.Resource{ - Read: dataSourceDockerRegistryImageRead, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "sha256_digest": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceDockerRegistryImageRead(d *schema.ResourceData, meta interface{}) error { - pullOpts := parseImageOptions(d.Get("name").(string)) - - // Use the official Docker Hub if a registry isn't specified - if pullOpts.Registry == "" { - pullOpts.Registry = "registry.hub.docker.com" - } else { - // Otherwise, filter the registry name out of the repo name - pullOpts.Repository = strings.Replace(pullOpts.Repository, pullOpts.Registry+"/", "", 1) - } - - // Docker prefixes 'library' to official images in the path; 'consul' becomes 'library/consul' - if !strings.Contains(pullOpts.Repository, "/") { - pullOpts.Repository = "library/" + pullOpts.Repository - } - - if pullOpts.Tag == "" { - pullOpts.Tag = "latest" - } - - digest, err := getImageDigest(pullOpts.Registry, pullOpts.Repository, pullOpts.Tag, "", "") - - if err != nil { - return fmt.Errorf("Got error when attempting to fetch image version from registry: %s", err) - } - - d.SetId(digest) - d.Set("sha256_digest", digest) - - return nil -} - -func getImageDigest(registry, image, tag, username, password string) (string, error) { - client := http.DefaultClient - - req, err := http.NewRequest("GET", "https://"+registry+"/v2/"+image+"/manifests/"+tag, nil) - - if err != nil { - return "", fmt.Errorf("Error creating registry request: %s", err) - } - - if username != "" { - req.SetBasicAuth(username, password) - } - - resp, err := client.Do(req) - - if err != nil { - return "", fmt.Errorf("Error during registry request: %s", err) - } - - switch resp.StatusCode { - // Basic auth was valid or not needed - case http.StatusOK: - return resp.Header.Get("Docker-Content-Digest"), nil - - // Either OAuth is required or the basic auth creds were invalid - case http.StatusUnauthorized: - if strings.HasPrefix(resp.Header.Get("www-authenticate"), "Bearer") { - auth := parseAuthHeader(resp.Header.Get("www-authenticate")) - params := url.Values{} - params.Set("service", auth["service"]) - params.Set("scope", auth["scope"]) - tokenRequest, err := http.NewRequest("GET", auth["realm"]+"?"+params.Encode(), nil) - - if err != nil { - return "", fmt.Errorf("Error creating registry request: %s", err) - } - - if username != "" { - tokenRequest.SetBasicAuth(username, password) - } - - tokenResponse, err := client.Do(tokenRequest) - - if err != nil { - return "", fmt.Errorf("Error during registry request: %s", err) - } - - if tokenResponse.StatusCode != http.StatusOK { - return "", fmt.Errorf("Got bad response from registry: " + tokenResponse.Status) - } - - body, err := ioutil.ReadAll(tokenResponse.Body) - if err != nil { - return "", fmt.Errorf("Error reading response body: %s", err) - } - - token := &TokenResponse{} - err = json.Unmarshal(body, token) - if err != nil { - return "", fmt.Errorf("Error parsing OAuth token response: %s", err) - } - - req.Header.Set("Authorization", "Bearer "+token.Token) - digestResponse, err := client.Do(req) - - if err != nil { - return "", fmt.Errorf("Error during registry request: %s", err) - } - - if digestResponse.StatusCode != http.StatusOK { - return "", fmt.Errorf("Got bad response from registry: " + digestResponse.Status) - } - - return digestResponse.Header.Get("Docker-Content-Digest"), nil - } else { - return "", fmt.Errorf("Bad credentials: " + resp.Status) - } - - // Some unexpected status was given, return an error - default: - return "", fmt.Errorf("Got bad response from registry: " + resp.Status) - } -} - -type TokenResponse struct { - Token string -} - -// Parses key/value pairs from a WWW-Authenticate header -func parseAuthHeader(header string) map[string]string { - parts := strings.SplitN(header, " ", 2) - parts = strings.Split(parts[1], ",") - opts := make(map[string]string) - - for _, part := range parts { - vals := strings.SplitN(part, "=", 2) - key := vals[0] - val := strings.Trim(vals[1], "\", ") - opts[key] = val - } - - return opts -} diff --git a/builtin/providers/docker/data_source_docker_registry_image_test.go b/builtin/providers/docker/data_source_docker_registry_image_test.go deleted file mode 100644 index aa34b004b..000000000 --- a/builtin/providers/docker/data_source_docker_registry_image_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package docker - -import ( - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -var registryDigestRegexp = regexp.MustCompile(`\A[A-Za-z0-9_\+\.-]+:[A-Fa-f0-9]+\z`) - -func TestAccDockerRegistryImage_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDockerImageDataSourceConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestMatchResourceAttr("data.docker_registry_image.foo", "sha256_digest", registryDigestRegexp), - ), - }, - }, - }) -} - -func TestAccDockerRegistryImage_private(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDockerImageDataSourcePrivateConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestMatchResourceAttr("data.docker_registry_image.bar", "sha256_digest", registryDigestRegexp), - ), - }, - }, - }) -} - -const testAccDockerImageDataSourceConfig = ` -data "docker_registry_image" "foo" { - name = "alpine:latest" -} -` - -const testAccDockerImageDataSourcePrivateConfig = ` -data "docker_registry_image" "bar" { - name = "gcr.io:443/google_containers/pause:0.8.0" -} -` diff --git a/builtin/providers/docker/provider.go b/builtin/providers/docker/provider.go deleted file mode 100644 index 1da7ffbea..000000000 --- a/builtin/providers/docker/provider.go +++ /dev/null @@ -1,82 +0,0 @@ -package docker - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "host": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("DOCKER_HOST", "unix:///var/run/docker.sock"), - Description: "The Docker daemon address", - }, - - "ca_material": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("DOCKER_CA_MATERIAL", ""), - Description: "PEM-encoded content of Docker host CA certificate", - }, - "cert_material": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("DOCKER_CERT_MATERIAL", ""), - Description: "PEM-encoded content of Docker client certificate", - }, - "key_material": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("DOCKER_KEY_MATERIAL", ""), - Description: "PEM-encoded content of Docker client private key", - }, - - "cert_path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("DOCKER_CERT_PATH", ""), - Description: "Path to directory with Docker TLS config", - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "docker_container": resourceDockerContainer(), - "docker_image": resourceDockerImage(), - "docker_network": resourceDockerNetwork(), - "docker_volume": resourceDockerVolume(), - }, - - DataSourcesMap: map[string]*schema.Resource{ - "docker_registry_image": dataSourceDockerRegistryImage(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - config := Config{ - Host: d.Get("host").(string), - Ca: d.Get("ca_material").(string), - Cert: d.Get("cert_material").(string), - Key: d.Get("key_material").(string), - CertPath: d.Get("cert_path").(string), - } - - client, err := config.NewClient() - if err != nil { - return nil, fmt.Errorf("Error initializing Docker client: %s", err) - } - - err = client.Ping() - if err != nil { - return nil, fmt.Errorf("Error pinging Docker server: %s", err) - } - - return client, nil -} diff --git a/builtin/providers/docker/provider_test.go b/builtin/providers/docker/provider_test.go deleted file mode 100644 index d09104889..000000000 --- a/builtin/providers/docker/provider_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package docker - -import ( - "os/exec" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "docker": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - cmd := exec.Command("docker", "version") - if err := cmd.Run(); err != nil { - t.Fatalf("Docker must be available: %s", err) - } -} diff --git a/builtin/providers/docker/resource_docker_container.go b/builtin/providers/docker/resource_docker_container.go deleted file mode 100644 index 543dc930c..000000000 --- a/builtin/providers/docker/resource_docker_container.go +++ /dev/null @@ -1,528 +0,0 @@ -package docker - -import ( - "bytes" - "fmt" - - "regexp" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceDockerContainer() *schema.Resource { - return &schema.Resource{ - Create: resourceDockerContainerCreate, - Read: resourceDockerContainerRead, - Update: resourceDockerContainerUpdate, - Delete: resourceDockerContainerDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - // Indicates whether the container must be running. - // - // An assumption is made that configured containers - // should be running; if not, they should not be in - // the configuration. Therefore a stopped container - // should be started. Set to false to have the - // provider leave the container alone. - // - // Actively-debugged containers are likely to be - // stopped and started manually, and Docker has - // some provisions for restarting containers that - // stop. The utility here comes from the fact that - // this will delete and re-create the container - // following the principle that the containers - // should be pristine when started. - "must_run": &schema.Schema{ - Type: schema.TypeBool, - Default: true, - Optional: true, - }, - - // ForceNew is not true for image because we need to - // sane this against Docker image IDs, as each image - // can have multiple names/tags attached do it. - "image": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "hostname": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "domainname": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "command": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "entrypoint": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "user": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "dns": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "dns_opts": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "dns_search": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "publish_all_ports": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - - "restart": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "no", - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - value := v.(string) - if !regexp.MustCompile(`^(no|on-failure|always|unless-stopped)$`).MatchString(value) { - es = append(es, fmt.Errorf( - "%q must be one of \"no\", \"on-failure\", \"always\" or \"unless-stopped\"", k)) - } - return - }, - }, - - "max_retry_count": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - - "capabilities": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "add": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "drop": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - }, - Set: resourceDockerCapabilitiesHash, - }, - - "volumes": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "from_container": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "container_path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "host_path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateDockerContainerPath, - }, - - "volume_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "read_only": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - }, - }, - Set: resourceDockerVolumesHash, - }, - - "ports": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "internal": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - - "external": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - - "ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "protocol": &schema.Schema{ - Type: schema.TypeString, - Default: "tcp", - Optional: true, - ForceNew: true, - }, - }, - }, - Set: resourceDockerPortsHash, - }, - - "host": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ip": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "host": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - }, - Set: resourceDockerHostsHash, - }, - - "env": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "links": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "ip_address": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "ip_prefix_length": &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - }, - - "gateway": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "bridge": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "privileged": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - - "destroy_grace_seconds": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - - "labels": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - - "memory": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - value := v.(int) - if value < 0 { - es = append(es, fmt.Errorf("%q must be greater than or equal to 0", k)) - } - return - }, - }, - - "memory_swap": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - value := v.(int) - if value < -1 { - es = append(es, fmt.Errorf("%q must be greater than or equal to -1", k)) - } - return - }, - }, - - "cpu_shares": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - value := v.(int) - if value < 0 { - es = append(es, fmt.Errorf("%q must be greater than or equal to 0", k)) - } - return - }, - }, - - "log_driver": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "json-file", - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - value := v.(string) - if !regexp.MustCompile(`^(json-file|syslog|journald|gelf|fluentd)$`).MatchString(value) { - es = append(es, fmt.Errorf( - "%q must be one of \"json-file\", \"syslog\", \"journald\", \"gelf\", or \"fluentd\"", k)) - } - return - }, - }, - - "log_opts": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - - "network_alias": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "network_mode": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "networks": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "upload": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "content": &schema.Schema{ - Type: schema.TypeString, - Required: true, - // This is intentional. The container is mutated once, and never updated later. - // New configuration forces a new deployment, even with the same binaries. - ForceNew: true, - }, - "file": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - }, - Set: resourceDockerUploadHash, - }, - }, - } -} - -func resourceDockerCapabilitiesHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - if v, ok := m["add"]; ok { - buf.WriteString(fmt.Sprintf("%v-", v)) - } - - if v, ok := m["remove"]; ok { - buf.WriteString(fmt.Sprintf("%v-", v)) - } - - return hashcode.String(buf.String()) -} - -func resourceDockerPortsHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - buf.WriteString(fmt.Sprintf("%v-", m["internal"].(int))) - - if v, ok := m["external"]; ok { - buf.WriteString(fmt.Sprintf("%v-", v.(int))) - } - - if v, ok := m["ip"]; ok { - buf.WriteString(fmt.Sprintf("%v-", v.(string))) - } - - if v, ok := m["protocol"]; ok { - buf.WriteString(fmt.Sprintf("%v-", v.(string))) - } - - return hashcode.String(buf.String()) -} - -func resourceDockerHostsHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - if v, ok := m["ip"]; ok { - buf.WriteString(fmt.Sprintf("%v-", v.(string))) - } - - if v, ok := m["host"]; ok { - buf.WriteString(fmt.Sprintf("%v-", v.(string))) - } - - return hashcode.String(buf.String()) -} - -func resourceDockerVolumesHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - if v, ok := m["from_container"]; ok { - buf.WriteString(fmt.Sprintf("%v-", v.(string))) - } - - if v, ok := m["container_path"]; ok { - buf.WriteString(fmt.Sprintf("%v-", v.(string))) - } - - if v, ok := m["host_path"]; ok { - buf.WriteString(fmt.Sprintf("%v-", v.(string))) - } - - if v, ok := m["volume_name"]; ok { - buf.WriteString(fmt.Sprintf("%v-", v.(string))) - } - - if v, ok := m["read_only"]; ok { - buf.WriteString(fmt.Sprintf("%v-", v.(bool))) - } - - return hashcode.String(buf.String()) -} - -func resourceDockerUploadHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - if v, ok := m["content"]; ok { - buf.WriteString(fmt.Sprintf("%v-", v.(string))) - } - - if v, ok := m["file"]; ok { - buf.WriteString(fmt.Sprintf("%v-", v.(string))) - } - - return hashcode.String(buf.String()) -} - -func validateDockerContainerPath(v interface{}, k string) (ws []string, errors []error) { - - value := v.(string) - if !regexp.MustCompile(`^[a-zA-Z]:\\|^/`).MatchString(value) { - errors = append(errors, fmt.Errorf("%q must be an absolute path", k)) - } - - return -} diff --git a/builtin/providers/docker/resource_docker_container_funcs.go b/builtin/providers/docker/resource_docker_container_funcs.go deleted file mode 100644 index 4a494c5bd..000000000 --- a/builtin/providers/docker/resource_docker_container_funcs.go +++ /dev/null @@ -1,465 +0,0 @@ -package docker - -import ( - "archive/tar" - "bytes" - "errors" - "fmt" - "strconv" - "time" - - dc "github.com/fsouza/go-dockerclient" - "github.com/hashicorp/terraform/helper/schema" -) - -var ( - creationTime time.Time -) - -func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) error { - var err error - client := meta.(*dc.Client) - - var data Data - if err := fetchLocalImages(&data, client); err != nil { - return err - } - - image := d.Get("image").(string) - if _, ok := data.DockerImages[image]; !ok { - if _, ok := data.DockerImages[image+":latest"]; !ok { - return fmt.Errorf("Unable to find image %s", image) - } - image = image + ":latest" - } - - // The awesome, wonderful, splendiferous, sensical - // Docker API now lets you specify a HostConfig in - // CreateContainerOptions, but in my testing it still only - // actually applies HostConfig options set in StartContainer. - // How cool is that? - createOpts := dc.CreateContainerOptions{ - Name: d.Get("name").(string), - Config: &dc.Config{ - Image: image, - Hostname: d.Get("hostname").(string), - Domainname: d.Get("domainname").(string), - }, - } - - if v, ok := d.GetOk("env"); ok { - createOpts.Config.Env = stringSetToStringSlice(v.(*schema.Set)) - } - - if v, ok := d.GetOk("command"); ok { - createOpts.Config.Cmd = stringListToStringSlice(v.([]interface{})) - for _, v := range createOpts.Config.Cmd { - if v == "" { - return fmt.Errorf("values for command may not be empty") - } - } - } - - if v, ok := d.GetOk("entrypoint"); ok { - createOpts.Config.Entrypoint = stringListToStringSlice(v.([]interface{})) - } - - if v, ok := d.GetOk("user"); ok { - createOpts.Config.User = v.(string) - } - - exposedPorts := map[dc.Port]struct{}{} - portBindings := map[dc.Port][]dc.PortBinding{} - - if v, ok := d.GetOk("ports"); ok { - exposedPorts, portBindings = portSetToDockerPorts(v.(*schema.Set)) - } - if len(exposedPorts) != 0 { - createOpts.Config.ExposedPorts = exposedPorts - } - - extraHosts := []string{} - if v, ok := d.GetOk("host"); ok { - extraHosts = extraHostsSetToDockerExtraHosts(v.(*schema.Set)) - } - - volumes := map[string]struct{}{} - binds := []string{} - volumesFrom := []string{} - - if v, ok := d.GetOk("volumes"); ok { - volumes, binds, volumesFrom, err = volumeSetToDockerVolumes(v.(*schema.Set)) - if err != nil { - return fmt.Errorf("Unable to parse volumes: %s", err) - } - } - if len(volumes) != 0 { - createOpts.Config.Volumes = volumes - } - - if v, ok := d.GetOk("labels"); ok { - createOpts.Config.Labels = mapTypeMapValsToString(v.(map[string]interface{})) - } - - hostConfig := &dc.HostConfig{ - Privileged: d.Get("privileged").(bool), - PublishAllPorts: d.Get("publish_all_ports").(bool), - RestartPolicy: dc.RestartPolicy{ - Name: d.Get("restart").(string), - MaximumRetryCount: d.Get("max_retry_count").(int), - }, - LogConfig: dc.LogConfig{ - Type: d.Get("log_driver").(string), - }, - } - - if len(portBindings) != 0 { - hostConfig.PortBindings = portBindings - } - if len(extraHosts) != 0 { - hostConfig.ExtraHosts = extraHosts - } - if len(binds) != 0 { - hostConfig.Binds = binds - } - if len(volumesFrom) != 0 { - hostConfig.VolumesFrom = volumesFrom - } - - if v, ok := d.GetOk("capabilities"); ok { - for _, capInt := range v.(*schema.Set).List() { - capa := capInt.(map[string]interface{}) - hostConfig.CapAdd = stringSetToStringSlice(capa["add"].(*schema.Set)) - hostConfig.CapDrop = stringSetToStringSlice(capa["drop"].(*schema.Set)) - break - } - } - - if v, ok := d.GetOk("dns"); ok { - hostConfig.DNS = stringSetToStringSlice(v.(*schema.Set)) - } - - if v, ok := d.GetOk("dns_opts"); ok { - hostConfig.DNSOptions = stringSetToStringSlice(v.(*schema.Set)) - } - - if v, ok := d.GetOk("dns_search"); ok { - hostConfig.DNSSearch = stringSetToStringSlice(v.(*schema.Set)) - } - - if v, ok := d.GetOk("links"); ok { - hostConfig.Links = stringSetToStringSlice(v.(*schema.Set)) - } - - if v, ok := d.GetOk("memory"); ok { - hostConfig.Memory = int64(v.(int)) * 1024 * 1024 - } - - if v, ok := d.GetOk("memory_swap"); ok { - swap := int64(v.(int)) - if swap > 0 { - swap = swap * 1024 * 1024 - } - hostConfig.MemorySwap = swap - } - - if v, ok := d.GetOk("cpu_shares"); ok { - hostConfig.CPUShares = int64(v.(int)) - } - - if v, ok := d.GetOk("log_opts"); ok { - hostConfig.LogConfig.Config = mapTypeMapValsToString(v.(map[string]interface{})) - } - - if v, ok := d.GetOk("network_mode"); ok { - hostConfig.NetworkMode = v.(string) - } - - createOpts.HostConfig = hostConfig - - var retContainer *dc.Container - if retContainer, err = client.CreateContainer(createOpts); err != nil { - return fmt.Errorf("Unable to create container: %s", err) - } - if retContainer == nil { - return fmt.Errorf("Returned container is nil") - } - - d.SetId(retContainer.ID) - - if v, ok := d.GetOk("networks"); ok { - var connectionOpts dc.NetworkConnectionOptions - if v, ok := d.GetOk("network_alias"); ok { - endpointConfig := &dc.EndpointConfig{} - endpointConfig.Aliases = stringSetToStringSlice(v.(*schema.Set)) - connectionOpts = dc.NetworkConnectionOptions{Container: retContainer.ID, EndpointConfig: endpointConfig} - } else { - connectionOpts = dc.NetworkConnectionOptions{Container: retContainer.ID} - } - - for _, rawNetwork := range v.(*schema.Set).List() { - network := rawNetwork.(string) - if err := client.ConnectNetwork(network, connectionOpts); err != nil { - return fmt.Errorf("Unable to connect to network '%s': %s", network, err) - } - } - } - - if v, ok := d.GetOk("upload"); ok { - for _, upload := range v.(*schema.Set).List() { - content := upload.(map[string]interface{})["content"].(string) - file := upload.(map[string]interface{})["file"].(string) - - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) - hdr := &tar.Header{ - Name: file, - Mode: 0644, - Size: int64(len(content)), - } - if err := tw.WriteHeader(hdr); err != nil { - return fmt.Errorf("Error creating tar archive: %s", err) - } - if _, err := tw.Write([]byte(content)); err != nil { - return fmt.Errorf("Error creating tar archive: %s", err) - } - if err := tw.Close(); err != nil { - return fmt.Errorf("Error creating tar archive: %s", err) - } - - uploadOpts := dc.UploadToContainerOptions{ - InputStream: bytes.NewReader(buf.Bytes()), - Path: "/", - } - - if err := client.UploadToContainer(retContainer.ID, uploadOpts); err != nil { - return fmt.Errorf("Unable to upload volume content: %s", err) - } - } - } - - creationTime = time.Now() - if err := client.StartContainer(retContainer.ID, nil); err != nil { - return fmt.Errorf("Unable to start container: %s", err) - } - - return resourceDockerContainerRead(d, meta) -} - -func resourceDockerContainerRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*dc.Client) - - apiContainer, err := fetchDockerContainer(d.Id(), client) - if err != nil { - return err - } - if apiContainer == nil { - // This container doesn't exist anymore - d.SetId("") - return nil - } - - var container *dc.Container - - loops := 1 // if it hasn't just been created, don't delay - if !creationTime.IsZero() { - loops = 30 // with 500ms spacing, 15 seconds; ought to be plenty - } - sleepTime := 500 * time.Millisecond - - for i := loops; i > 0; i-- { - container, err = client.InspectContainer(apiContainer.ID) - if err != nil { - return fmt.Errorf("Error inspecting container %s: %s", apiContainer.ID, err) - } - - if container.State.Running || - !container.State.Running && !d.Get("must_run").(bool) { - break - } - - if creationTime.IsZero() { // We didn't just create it, so don't wait around - return resourceDockerContainerDelete(d, meta) - } - - if container.State.FinishedAt.After(creationTime) { - // It exited immediately, so error out so dependent containers - // aren't started - resourceDockerContainerDelete(d, meta) - return fmt.Errorf("Container %s exited after creation, error was: %s", apiContainer.ID, container.State.Error) - } - - time.Sleep(sleepTime) - } - - // Handle the case of the for loop above running its course - if !container.State.Running && d.Get("must_run").(bool) { - resourceDockerContainerDelete(d, meta) - return fmt.Errorf("Container %s failed to be in running state", apiContainer.ID) - } - - // Read Network Settings - if container.NetworkSettings != nil { - d.Set("ip_address", container.NetworkSettings.IPAddress) - d.Set("ip_prefix_length", container.NetworkSettings.IPPrefixLen) - d.Set("gateway", container.NetworkSettings.Gateway) - d.Set("bridge", container.NetworkSettings.Bridge) - } - - return nil -} - -func resourceDockerContainerUpdate(d *schema.ResourceData, meta interface{}) error { - return nil -} - -func resourceDockerContainerDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*dc.Client) - - // Stop the container before removing if destroy_grace_seconds is defined - if d.Get("destroy_grace_seconds").(int) > 0 { - var timeout = uint(d.Get("destroy_grace_seconds").(int)) - if err := client.StopContainer(d.Id(), timeout); err != nil { - return fmt.Errorf("Error stopping container %s: %s", d.Id(), err) - } - } - - removeOpts := dc.RemoveContainerOptions{ - ID: d.Id(), - RemoveVolumes: true, - Force: true, - } - - if err := client.RemoveContainer(removeOpts); err != nil { - return fmt.Errorf("Error deleting container %s: %s", d.Id(), err) - } - - d.SetId("") - return nil -} - -func stringListToStringSlice(stringList []interface{}) []string { - ret := []string{} - for _, v := range stringList { - if v == nil { - ret = append(ret, "") - continue - } - ret = append(ret, v.(string)) - } - return ret -} - -func stringSetToStringSlice(stringSet *schema.Set) []string { - ret := []string{} - if stringSet == nil { - return ret - } - for _, envVal := range stringSet.List() { - ret = append(ret, envVal.(string)) - } - return ret -} - -func mapTypeMapValsToString(typeMap map[string]interface{}) map[string]string { - mapped := make(map[string]string, len(typeMap)) - for k, v := range typeMap { - mapped[k] = v.(string) - } - return mapped -} - -func fetchDockerContainer(ID string, client *dc.Client) (*dc.APIContainers, error) { - apiContainers, err := client.ListContainers(dc.ListContainersOptions{All: true}) - - if err != nil { - return nil, fmt.Errorf("Error fetching container information from Docker: %s\n", err) - } - - for _, apiContainer := range apiContainers { - if apiContainer.ID == ID { - return &apiContainer, nil - } - } - - return nil, nil -} - -func portSetToDockerPorts(ports *schema.Set) (map[dc.Port]struct{}, map[dc.Port][]dc.PortBinding) { - retExposedPorts := map[dc.Port]struct{}{} - retPortBindings := map[dc.Port][]dc.PortBinding{} - - for _, portInt := range ports.List() { - port := portInt.(map[string]interface{}) - internal := port["internal"].(int) - protocol := port["protocol"].(string) - - exposedPort := dc.Port(strconv.Itoa(internal) + "/" + protocol) - retExposedPorts[exposedPort] = struct{}{} - - external, extOk := port["external"].(int) - ip, ipOk := port["ip"].(string) - - if extOk { - portBinding := dc.PortBinding{ - HostPort: strconv.Itoa(external), - } - if ipOk { - portBinding.HostIP = ip - } - retPortBindings[exposedPort] = append(retPortBindings[exposedPort], portBinding) - } - } - - return retExposedPorts, retPortBindings -} - -func extraHostsSetToDockerExtraHosts(extraHosts *schema.Set) []string { - retExtraHosts := []string{} - - for _, hostInt := range extraHosts.List() { - host := hostInt.(map[string]interface{}) - ip := host["ip"].(string) - hostname := host["host"].(string) - retExtraHosts = append(retExtraHosts, hostname+":"+ip) - } - - return retExtraHosts -} - -func volumeSetToDockerVolumes(volumes *schema.Set) (map[string]struct{}, []string, []string, error) { - retVolumeMap := map[string]struct{}{} - retHostConfigBinds := []string{} - retVolumeFromContainers := []string{} - - for _, volumeInt := range volumes.List() { - volume := volumeInt.(map[string]interface{}) - fromContainer := volume["from_container"].(string) - containerPath := volume["container_path"].(string) - volumeName := volume["volume_name"].(string) - if len(volumeName) == 0 { - volumeName = volume["host_path"].(string) - } - readOnly := volume["read_only"].(bool) - - switch { - case len(fromContainer) == 0 && len(containerPath) == 0: - return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, errors.New("Volume entry without container path or source container") - case len(fromContainer) != 0 && len(containerPath) != 0: - return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, errors.New("Both a container and a path specified in a volume entry") - case len(fromContainer) != 0: - retVolumeFromContainers = append(retVolumeFromContainers, fromContainer) - case len(volumeName) != 0: - readWrite := "rw" - if readOnly { - readWrite = "ro" - } - retVolumeMap[containerPath] = struct{}{} - retHostConfigBinds = append(retHostConfigBinds, volumeName+":"+containerPath+":"+readWrite) - default: - retVolumeMap[containerPath] = struct{}{} - } - } - - return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, nil -} diff --git a/builtin/providers/docker/resource_docker_container_test.go b/builtin/providers/docker/resource_docker_container_test.go deleted file mode 100644 index dcc6affa8..000000000 --- a/builtin/providers/docker/resource_docker_container_test.go +++ /dev/null @@ -1,410 +0,0 @@ -package docker - -import ( - "archive/tar" - "bytes" - "fmt" - "testing" - - dc "github.com/fsouza/go-dockerclient" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDockerContainer_basic(t *testing.T) { - var c dc.Container - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDockerContainerConfig, - Check: resource.ComposeTestCheckFunc( - testAccContainerRunning("docker_container.foo", &c), - ), - }, - }, - }) -} - -func TestAccDockerContainerPath_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - {Value: "/var/log", ErrCount: 0}, - {Value: "/tmp", ErrCount: 0}, - {Value: "C:\\Windows\\System32", ErrCount: 0}, - {Value: "C:\\Program Files\\MSBuild", ErrCount: 0}, - {Value: "test", ErrCount: 1}, - {Value: "C:Test", ErrCount: 1}, - {Value: "", ErrCount: 1}, - } - - for _, tc := range cases { - _, errors := validateDockerContainerPath(tc.Value, "docker_container") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the Docker Container Path to trigger a validation error") - } - } -} - -func TestAccDockerContainer_volume(t *testing.T) { - var c dc.Container - - testCheck := func(*terraform.State) error { - if len(c.Mounts) != 1 { - return fmt.Errorf("Incorrect number of mounts: expected 1, got %d", len(c.Mounts)) - } - - for _, v := range c.Mounts { - if v.Name != "testAccDockerContainerVolume_volume" { - continue - } - - if v.Destination != "/tmp/volume" { - return fmt.Errorf("Bad destination on mount: expected /tmp/volume, got %q", v.Destination) - } - - if v.Mode != "rw" { - return fmt.Errorf("Bad mode on mount: expected rw, got %q", v.Mode) - } - - return nil - } - - return fmt.Errorf("Mount for testAccDockerContainerVolume_volume not found") - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDockerContainerVolumeConfig, - Check: resource.ComposeTestCheckFunc( - testAccContainerRunning("docker_container.foo", &c), - testCheck, - ), - }, - }, - }) -} - -func TestAccDockerContainer_customized(t *testing.T) { - var c dc.Container - - testCheck := func(*terraform.State) error { - if len(c.Config.Entrypoint) < 3 || - (c.Config.Entrypoint[0] != "/bin/bash" && - c.Config.Entrypoint[1] != "-c" && - c.Config.Entrypoint[2] != "ping localhost") { - return fmt.Errorf("Container wrong entrypoint: %s", c.Config.Entrypoint) - } - - if c.Config.User != "root:root" { - return fmt.Errorf("Container wrong user: %s", c.Config.User) - } - - if c.HostConfig.RestartPolicy.Name == "on-failure" { - if c.HostConfig.RestartPolicy.MaximumRetryCount != 5 { - return fmt.Errorf("Container has wrong restart policy max retry count: %d", c.HostConfig.RestartPolicy.MaximumRetryCount) - } - } else { - return fmt.Errorf("Container has wrong restart policy: %s", c.HostConfig.RestartPolicy.Name) - } - - if c.HostConfig.Memory != (512 * 1024 * 1024) { - return fmt.Errorf("Container has wrong memory setting: %d", c.HostConfig.Memory) - } - - if c.HostConfig.MemorySwap != (2048 * 1024 * 1024) { - return fmt.Errorf("Container has wrong memory swap setting: %d\n\r\tPlease check that you machine supports memory swap (you can do that by running 'docker info' command).", c.HostConfig.MemorySwap) - } - - if c.HostConfig.CPUShares != 32 { - return fmt.Errorf("Container has wrong cpu shares setting: %d", c.HostConfig.CPUShares) - } - - if len(c.HostConfig.DNS) != 1 { - return fmt.Errorf("Container does not have the correct number of dns entries: %d", len(c.HostConfig.DNS)) - } - - if c.HostConfig.DNS[0] != "8.8.8.8" { - return fmt.Errorf("Container has wrong dns setting: %v", c.HostConfig.DNS[0]) - } - - if len(c.HostConfig.DNSOptions) != 1 { - return fmt.Errorf("Container does not have the correct number of dns option entries: %d", len(c.HostConfig.DNS)) - } - - if c.HostConfig.DNSOptions[0] != "rotate" { - return fmt.Errorf("Container has wrong dns option setting: %v", c.HostConfig.DNS[0]) - } - - if len(c.HostConfig.DNSSearch) != 1 { - return fmt.Errorf("Container does not have the correct number of dns search entries: %d", len(c.HostConfig.DNS)) - } - - if c.HostConfig.DNSSearch[0] != "example.com" { - return fmt.Errorf("Container has wrong dns search setting: %v", c.HostConfig.DNS[0]) - } - - if len(c.HostConfig.CapAdd) != 1 { - return fmt.Errorf("Container does not have the correct number of Capabilities in ADD: %d", len(c.HostConfig.CapAdd)) - } - - if c.HostConfig.CapAdd[0] != "ALL" { - return fmt.Errorf("Container has wrong CapAdd setting: %v", c.HostConfig.CapAdd[0]) - } - - if len(c.HostConfig.CapDrop) != 1 { - return fmt.Errorf("Container does not have the correct number of Capabilities in Drop: %d", len(c.HostConfig.CapDrop)) - } - - if c.HostConfig.CapDrop[0] != "SYS_ADMIN" { - return fmt.Errorf("Container has wrong CapDrop setting: %v", c.HostConfig.CapDrop[0]) - } - - if c.HostConfig.CPUShares != 32 { - return fmt.Errorf("Container has wrong cpu shares setting: %d", c.HostConfig.CPUShares) - } - - if c.HostConfig.CPUShares != 32 { - return fmt.Errorf("Container has wrong cpu shares setting: %d", c.HostConfig.CPUShares) - } - - if c.Config.Labels["env"] != "prod" || c.Config.Labels["role"] != "test" { - return fmt.Errorf("Container does not have the correct labels") - } - - if c.HostConfig.LogConfig.Type != "json-file" { - return fmt.Errorf("Container does not have the correct log config: %s", c.HostConfig.LogConfig.Type) - } - - if c.HostConfig.LogConfig.Config["max-size"] != "10m" { - return fmt.Errorf("Container does not have the correct max-size log option: %v", c.HostConfig.LogConfig.Config["max-size"]) - } - - if c.HostConfig.LogConfig.Config["max-file"] != "20" { - return fmt.Errorf("Container does not have the correct max-file log option: %v", c.HostConfig.LogConfig.Config["max-file"]) - } - - if len(c.HostConfig.ExtraHosts) != 2 { - return fmt.Errorf("Container does not have correct number of extra host entries, got %d", len(c.HostConfig.ExtraHosts)) - } - - if c.HostConfig.ExtraHosts[0] != "testhost2:10.0.2.0" { - return fmt.Errorf("Container has incorrect extra host string: %q", c.HostConfig.ExtraHosts[0]) - } - - if c.HostConfig.ExtraHosts[1] != "testhost:10.0.1.0" { - return fmt.Errorf("Container has incorrect extra host string: %q", c.HostConfig.ExtraHosts[1]) - } - - if _, ok := c.NetworkSettings.Networks["test"]; !ok { - return fmt.Errorf("Container is not connected to the right user defined network: test") - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDockerContainerCustomizedConfig, - Check: resource.ComposeTestCheckFunc( - testAccContainerRunning("docker_container.foo", &c), - testCheck, - ), - }, - }, - }) -} - -func TestAccDockerContainer_upload(t *testing.T) { - var c dc.Container - - testCheck := func(*terraform.State) error { - client := testAccProvider.Meta().(*dc.Client) - - buf := new(bytes.Buffer) - opts := dc.DownloadFromContainerOptions{ - OutputStream: buf, - Path: "/terraform/test.txt", - } - - if err := client.DownloadFromContainer(c.ID, opts); err != nil { - return fmt.Errorf("Unable to download a file from container: %s", err) - } - - r := bytes.NewReader(buf.Bytes()) - tr := tar.NewReader(r) - - if _, err := tr.Next(); err != nil { - return fmt.Errorf("Unable to read content of tar archive: %s", err) - } - - fbuf := new(bytes.Buffer) - fbuf.ReadFrom(tr) - content := fbuf.String() - - if content != "foo" { - return fmt.Errorf("file content is invalid") - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDockerContainerUploadConfig, - Check: resource.ComposeTestCheckFunc( - testAccContainerRunning("docker_container.foo", &c), - testCheck, - ), - }, - }, - }) -} - -func testAccContainerRunning(n string, container *dc.Container) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - client := testAccProvider.Meta().(*dc.Client) - containers, err := client.ListContainers(dc.ListContainersOptions{}) - if err != nil { - return err - } - - for _, c := range containers { - if c.ID == rs.Primary.ID { - inspected, err := client.InspectContainer(c.ID) - if err != nil { - return fmt.Errorf("Container could not be inspected: %s", err) - } - *container = *inspected - return nil - } - } - - return fmt.Errorf("Container not found: %s", rs.Primary.ID) - } -} - -const testAccDockerContainerConfig = ` -resource "docker_image" "foo" { - name = "nginx:latest" -} - -resource "docker_container" "foo" { - name = "tf-test" - image = "${docker_image.foo.latest}" -} -` - -const testAccDockerContainerVolumeConfig = ` -resource "docker_image" "foo" { - name = "nginx:latest" -} - -resource "docker_volume" "foo" { - name = "testAccDockerContainerVolume_volume" -} - -resource "docker_container" "foo" { - name = "tf-test" - image = "${docker_image.foo.latest}" - - volumes { - volume_name = "${docker_volume.foo.name}" - container_path = "/tmp/volume" - read_only = false - } -} -` - -const testAccDockerContainerCustomizedConfig = ` -resource "docker_image" "foo" { - name = "nginx:latest" -} - -resource "docker_container" "foo" { - name = "tf-test" - image = "${docker_image.foo.latest}" - entrypoint = ["/bin/bash", "-c", "ping localhost"] - user = "root:root" - restart = "on-failure" - destroy_grace_seconds = 10 - max_retry_count = 5 - memory = 512 - memory_swap = 2048 - cpu_shares = 32 - - capabilities { - add= ["ALL"] - drop = ["SYS_ADMIN"] - } - - dns = ["8.8.8.8"] - dns_opts = ["rotate"] - dns_search = ["example.com"] - labels { - env = "prod" - role = "test" - } - log_driver = "json-file" - log_opts = { - max-size = "10m" - max-file = 20 - } - network_mode = "bridge" - - networks = ["${docker_network.test_network.name}"] - network_alias = ["tftest"] - - host { - host = "testhost" - ip = "10.0.1.0" - } - - host { - host = "testhost2" - ip = "10.0.2.0" - } -} - -resource "docker_network" "test_network" { - name = "test" -} -` - -const testAccDockerContainerUploadConfig = ` -resource "docker_image" "foo" { - name = "nginx:latest" -} - -resource "docker_container" "foo" { - name = "tf-test" - image = "${docker_image.foo.latest}" - - upload { - content = "foo" - file = "/terraform/test.txt" - } -} -` diff --git a/builtin/providers/docker/resource_docker_image.go b/builtin/providers/docker/resource_docker_image.go deleted file mode 100644 index eb84a2570..000000000 --- a/builtin/providers/docker/resource_docker_image.go +++ /dev/null @@ -1,47 +0,0 @@ -package docker - -import ( - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceDockerImage() *schema.Resource { - return &schema.Resource{ - Create: resourceDockerImageCreate, - Read: resourceDockerImageRead, - Update: resourceDockerImageUpdate, - Delete: resourceDockerImageDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "latest": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "keep_locally": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - - "pull_trigger": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"pull_triggers"}, - Deprecated: "Use field pull_triggers instead", - }, - - "pull_triggers": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - } -} diff --git a/builtin/providers/docker/resource_docker_image_funcs.go b/builtin/providers/docker/resource_docker_image_funcs.go deleted file mode 100644 index 9c27b4259..000000000 --- a/builtin/providers/docker/resource_docker_image_funcs.go +++ /dev/null @@ -1,201 +0,0 @@ -package docker - -import ( - "fmt" - "strings" - - dc "github.com/fsouza/go-dockerclient" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceDockerImageCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*dc.Client) - apiImage, err := findImage(d, client) - if err != nil { - return fmt.Errorf("Unable to read Docker image into resource: %s", err) - } - - d.SetId(apiImage.ID + d.Get("name").(string)) - d.Set("latest", apiImage.ID) - - return nil -} - -func resourceDockerImageRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*dc.Client) - var data Data - if err := fetchLocalImages(&data, client); err != nil { - return fmt.Errorf("Error reading docker image list: %s", err) - } - foundImage := searchLocalImages(data, d.Get("name").(string)) - - if foundImage != nil { - d.Set("latest", foundImage.ID) - } else { - d.SetId("") - } - - return nil -} - -func resourceDockerImageUpdate(d *schema.ResourceData, meta interface{}) error { - // We need to re-read in case switching parameters affects - // the value of "latest" or others - client := meta.(*dc.Client) - apiImage, err := findImage(d, client) - if err != nil { - return fmt.Errorf("Unable to read Docker image into resource: %s", err) - } - - d.Set("latest", apiImage.ID) - - return nil -} - -func resourceDockerImageDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*dc.Client) - err := removeImage(d, client) - if err != nil { - return fmt.Errorf("Unable to remove Docker image: %s", err) - } - d.SetId("") - return nil -} - -func searchLocalImages(data Data, imageName string) *dc.APIImages { - if apiImage, ok := data.DockerImages[imageName]; ok { - return apiImage - } - if apiImage, ok := data.DockerImages[imageName+":latest"]; ok { - imageName = imageName + ":latest" - return apiImage - } - return nil -} - -func removeImage(d *schema.ResourceData, client *dc.Client) error { - var data Data - - if keepLocally := d.Get("keep_locally").(bool); keepLocally { - return nil - } - - if err := fetchLocalImages(&data, client); err != nil { - return err - } - - imageName := d.Get("name").(string) - if imageName == "" { - return fmt.Errorf("Empty image name is not allowed") - } - - foundImage := searchLocalImages(data, imageName) - - if foundImage != nil { - err := client.RemoveImage(foundImage.ID) - if err != nil { - return err - } - } - - return nil -} - -func fetchLocalImages(data *Data, client *dc.Client) error { - images, err := client.ListImages(dc.ListImagesOptions{All: false}) - if err != nil { - return fmt.Errorf("Unable to list Docker images: %s", err) - } - - if data.DockerImages == nil { - data.DockerImages = make(map[string]*dc.APIImages) - } - - // Docker uses different nomenclatures in different places...sometimes a short - // ID, sometimes long, etc. So we store both in the map so we can always find - // the same image object. We store the tags, too. - for i, image := range images { - data.DockerImages[image.ID[:12]] = &images[i] - data.DockerImages[image.ID] = &images[i] - for _, repotag := range image.RepoTags { - data.DockerImages[repotag] = &images[i] - } - } - - return nil -} - -func pullImage(data *Data, client *dc.Client, image string) error { - // TODO: Test local registry handling. It should be working - // based on the code that was ported over - - pullOpts := parseImageOptions(image) - auth := dc.AuthConfiguration{} - - if err := client.PullImage(pullOpts, auth); err != nil { - return fmt.Errorf("Error pulling image %s: %s\n", image, err) - } - - return fetchLocalImages(data, client) -} - -func parseImageOptions(image string) dc.PullImageOptions { - pullOpts := dc.PullImageOptions{} - - splitImageName := strings.Split(image, ":") - switch len(splitImageName) { - - // It's in registry:port/username/repo:tag or registry:port/repo:tag format - case 3: - splitPortRepo := strings.Split(splitImageName[1], "/") - pullOpts.Registry = splitImageName[0] + ":" + splitPortRepo[0] - pullOpts.Tag = splitImageName[2] - pullOpts.Repository = pullOpts.Registry + "/" + strings.Join(splitPortRepo[1:], "/") - - // It's either registry:port/username/repo, registry:port/repo, - // or repo:tag with default registry - case 2: - splitPortRepo := strings.Split(splitImageName[1], "/") - switch len(splitPortRepo) { - // repo:tag - case 1: - pullOpts.Repository = splitImageName[0] - pullOpts.Tag = splitImageName[1] - - // registry:port/username/repo or registry:port/repo - default: - pullOpts.Registry = splitImageName[0] + ":" + splitPortRepo[0] - pullOpts.Repository = pullOpts.Registry + "/" + strings.Join(splitPortRepo[1:], "/") - pullOpts.Tag = "latest" - } - - // Plain username/repo or repo - default: - pullOpts.Repository = image - } - - return pullOpts -} - -func findImage(d *schema.ResourceData, client *dc.Client) (*dc.APIImages, error) { - var data Data - if err := fetchLocalImages(&data, client); err != nil { - return nil, err - } - - imageName := d.Get("name").(string) - if imageName == "" { - return nil, fmt.Errorf("Empty image name is not allowed") - } - - if err := pullImage(&data, client, imageName); err != nil { - return nil, fmt.Errorf("Unable to pull image %s: %s", imageName, err) - } - - foundImage := searchLocalImages(data, imageName) - if foundImage != nil { - return foundImage, nil - } - - return nil, fmt.Errorf("Unable to find or pull image %s", imageName) -} diff --git a/builtin/providers/docker/resource_docker_image_test.go b/builtin/providers/docker/resource_docker_image_test.go deleted file mode 100644 index 4d75a6177..000000000 --- a/builtin/providers/docker/resource_docker_image_test.go +++ /dev/null @@ -1,162 +0,0 @@ -package docker - -import ( - "fmt" - "regexp" - "testing" - - dc "github.com/fsouza/go-dockerclient" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -var contentDigestRegexp = regexp.MustCompile(`\A[A-Za-z0-9_\+\.-]+:[A-Fa-f0-9]+\z`) - -func TestAccDockerImage_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccDockerImageDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDockerImageConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestMatchResourceAttr("docker_image.foo", "latest", contentDigestRegexp), - ), - }, - }, - }) -} - -func TestAccDockerImage_private(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccDockerImageDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAddDockerPrivateImageConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestMatchResourceAttr("docker_image.foobar", "latest", contentDigestRegexp), - ), - }, - }, - }) -} - -func TestAccDockerImage_destroy(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: func(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "docker_image" { - continue - } - - client := testAccProvider.Meta().(*dc.Client) - _, err := client.InspectImage(rs.Primary.Attributes["latest"]) - if err != nil { - return err - } - } - return nil - }, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDockerImageKeepLocallyConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestMatchResourceAttr("docker_image.foobarzoo", "latest", contentDigestRegexp), - ), - }, - }, - }) -} - -func TestAccDockerImage_data(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - PreventPostDestroyRefresh: true, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDockerImageFromDataConfig, - Check: resource.ComposeTestCheckFunc( - resource.TestMatchResourceAttr("docker_image.foobarbaz", "latest", contentDigestRegexp), - ), - }, - }, - }) -} - -func TestAccDockerImage_data_pull_trigger(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - PreventPostDestroyRefresh: true, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDockerImageFromDataConfigWithPullTrigger, - Check: resource.ComposeTestCheckFunc( - resource.TestMatchResourceAttr("docker_image.foobarbazoo", "latest", contentDigestRegexp), - ), - }, - }, - }) -} - -func testAccDockerImageDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "docker_image" { - continue - } - - client := testAccProvider.Meta().(*dc.Client) - _, err := client.InspectImage(rs.Primary.Attributes["latest"]) - if err == nil { - return fmt.Errorf("Image still exists") - } else if err != dc.ErrNoSuchImage { - return err - } - } - return nil -} - -const testAccDockerImageConfig = ` -resource "docker_image" "foo" { - name = "alpine:3.1" -} -` - -const testAddDockerPrivateImageConfig = ` -resource "docker_image" "foobar" { - name = "gcr.io:443/google_containers/pause:0.8.0" -} -` - -const testAccDockerImageKeepLocallyConfig = ` -resource "docker_image" "foobarzoo" { - name = "crux:3.1" - keep_locally = true -} -` - -const testAccDockerImageFromDataConfig = ` -data "docker_registry_image" "foobarbaz" { - name = "alpine:3.1" -} -resource "docker_image" "foobarbaz" { - name = "${data.docker_registry_image.foobarbaz.name}" - pull_triggers = ["${data.docker_registry_image.foobarbaz.sha256_digest}"] -} -` - -const testAccDockerImageFromDataConfigWithPullTrigger = ` -data "docker_registry_image" "foobarbazoo" { - name = "alpine:3.1" -} -resource "docker_image" "foobarbazoo" { - name = "${data.docker_registry_image.foobarbazoo.name}" - pull_trigger = "${data.docker_registry_image.foobarbazoo.sha256_digest}" -} -` diff --git a/builtin/providers/docker/resource_docker_network.go b/builtin/providers/docker/resource_docker_network.go deleted file mode 100644 index 7279d2eeb..000000000 --- a/builtin/providers/docker/resource_docker_network.go +++ /dev/null @@ -1,142 +0,0 @@ -package docker - -import ( - "bytes" - "fmt" - "sort" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceDockerNetwork() *schema.Resource { - return &schema.Resource{ - Create: resourceDockerNetworkCreate, - Read: resourceDockerNetworkRead, - Delete: resourceDockerNetworkDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "check_duplicate": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - - "driver": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "options": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "internal": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "ipam_driver": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "ipam_config": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: getIpamConfigElem(), - Set: resourceDockerIpamConfigHash, - }, - - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "scope": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func getIpamConfigElem() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "subnet": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "ip_range": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "gateway": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "aux_address": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceDockerIpamConfigHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - if v, ok := m["subnet"]; ok { - buf.WriteString(fmt.Sprintf("%v-", v.(string))) - } - - if v, ok := m["ip_range"]; ok { - buf.WriteString(fmt.Sprintf("%v-", v.(string))) - } - - if v, ok := m["gateway"]; ok { - buf.WriteString(fmt.Sprintf("%v-", v.(string))) - } - - if v, ok := m["aux_address"]; ok { - auxAddress := v.(map[string]interface{}) - - keys := make([]string, len(auxAddress)) - i := 0 - for k, _ := range auxAddress { - keys[i] = k - i++ - } - sort.Strings(keys) - - for _, k := range keys { - buf.WriteString(fmt.Sprintf("%v-%v-", k, auxAddress[k].(string))) - } - } - - return hashcode.String(buf.String()) -} diff --git a/builtin/providers/docker/resource_docker_network_funcs.go b/builtin/providers/docker/resource_docker_network_funcs.go deleted file mode 100644 index f5ff172b8..000000000 --- a/builtin/providers/docker/resource_docker_network_funcs.go +++ /dev/null @@ -1,122 +0,0 @@ -package docker - -import ( - "fmt" - - dc "github.com/fsouza/go-dockerclient" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceDockerNetworkCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*dc.Client) - - createOpts := dc.CreateNetworkOptions{ - Name: d.Get("name").(string), - } - if v, ok := d.GetOk("check_duplicate"); ok { - createOpts.CheckDuplicate = v.(bool) - } - if v, ok := d.GetOk("driver"); ok { - createOpts.Driver = v.(string) - } - if v, ok := d.GetOk("options"); ok { - createOpts.Options = v.(map[string]interface{}) - } - if v, ok := d.GetOk("internal"); ok { - createOpts.Internal = v.(bool) - } - - ipamOpts := dc.IPAMOptions{} - ipamOptsSet := false - if v, ok := d.GetOk("ipam_driver"); ok { - ipamOpts.Driver = v.(string) - ipamOptsSet = true - } - if v, ok := d.GetOk("ipam_config"); ok { - ipamOpts.Config = ipamConfigSetToIpamConfigs(v.(*schema.Set)) - ipamOptsSet = true - } - - if ipamOptsSet { - createOpts.IPAM = ipamOpts - } - - var err error - var retNetwork *dc.Network - if retNetwork, err = client.CreateNetwork(createOpts); err != nil { - return fmt.Errorf("Unable to create network: %s", err) - } - if retNetwork == nil { - return fmt.Errorf("Returned network is nil") - } - - d.SetId(retNetwork.ID) - d.Set("name", retNetwork.Name) - d.Set("scope", retNetwork.Scope) - d.Set("driver", retNetwork.Driver) - d.Set("options", retNetwork.Options) - - // The 'internal' property is not send back when create network - d.Set("internal", createOpts.Internal) - - return nil -} - -func resourceDockerNetworkRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*dc.Client) - - var err error - var retNetwork *dc.Network - if retNetwork, err = client.NetworkInfo(d.Id()); err != nil { - if _, ok := err.(*dc.NoSuchNetwork); !ok { - return fmt.Errorf("Unable to inspect network: %s", err) - } - } - if retNetwork == nil { - d.SetId("") - return nil - } - - d.Set("scope", retNetwork.Scope) - d.Set("driver", retNetwork.Driver) - d.Set("options", retNetwork.Options) - d.Set("internal", retNetwork.Internal) - - return nil -} - -func resourceDockerNetworkDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*dc.Client) - - if err := client.RemoveNetwork(d.Id()); err != nil { - if _, ok := err.(*dc.NoSuchNetwork); !ok { - return fmt.Errorf("Error deleting network %s: %s", d.Id(), err) - } - } - - d.SetId("") - return nil -} - -func ipamConfigSetToIpamConfigs(ipamConfigSet *schema.Set) []dc.IPAMConfig { - ipamConfigs := make([]dc.IPAMConfig, ipamConfigSet.Len()) - - for i, ipamConfigInt := range ipamConfigSet.List() { - ipamConfigRaw := ipamConfigInt.(map[string]interface{}) - - ipamConfig := dc.IPAMConfig{} - ipamConfig.Subnet = ipamConfigRaw["subnet"].(string) - ipamConfig.IPRange = ipamConfigRaw["ip_range"].(string) - ipamConfig.Gateway = ipamConfigRaw["gateway"].(string) - - auxAddressRaw := ipamConfigRaw["aux_address"].(map[string]interface{}) - ipamConfig.AuxAddress = make(map[string]string, len(auxAddressRaw)) - for k, v := range auxAddressRaw { - ipamConfig.AuxAddress[k] = v.(string) - } - - ipamConfigs[i] = ipamConfig - } - - return ipamConfigs -} diff --git a/builtin/providers/docker/resource_docker_network_test.go b/builtin/providers/docker/resource_docker_network_test.go deleted file mode 100644 index 5fe7f8b3e..000000000 --- a/builtin/providers/docker/resource_docker_network_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package docker - -import ( - "fmt" - "testing" - - dc "github.com/fsouza/go-dockerclient" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDockerNetwork_basic(t *testing.T) { - var n dc.Network - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDockerNetworkConfig, - Check: resource.ComposeTestCheckFunc( - testAccNetwork("docker_network.foo", &n), - ), - }, - }, - }) -} - -func testAccNetwork(n string, network *dc.Network) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - client := testAccProvider.Meta().(*dc.Client) - networks, err := client.ListNetworks() - if err != nil { - return err - } - - for _, n := range networks { - if n.ID == rs.Primary.ID { - inspected, err := client.NetworkInfo(n.ID) - if err != nil { - return fmt.Errorf("Network could not be obtained: %s", err) - } - *network = *inspected - return nil - } - } - - return fmt.Errorf("Network not found: %s", rs.Primary.ID) - } -} - -const testAccDockerNetworkConfig = ` -resource "docker_network" "foo" { - name = "bar" -} -` - -func TestAccDockerNetwork_internal(t *testing.T) { - var n dc.Network - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDockerNetworkInternalConfig, - Check: resource.ComposeTestCheckFunc( - testAccNetwork("docker_network.foobar", &n), - testAccNetworkInternal(&n, true), - ), - }, - }, - }) -} - -func testAccNetworkInternal(network *dc.Network, internal bool) resource.TestCheckFunc { - return func(s *terraform.State) error { - if network.Internal != internal { - return fmt.Errorf("Bad value for attribute 'internal': %t", network.Internal) - } - return nil - } -} - -const testAccDockerNetworkInternalConfig = ` -resource "docker_network" "foobar" { - name = "foobar" - internal = "true" -} -` diff --git a/builtin/providers/docker/resource_docker_volume.go b/builtin/providers/docker/resource_docker_volume.go deleted file mode 100644 index 33c22d581..000000000 --- a/builtin/providers/docker/resource_docker_volume.go +++ /dev/null @@ -1,102 +0,0 @@ -package docker - -import ( - "fmt" - - dc "github.com/fsouza/go-dockerclient" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceDockerVolume() *schema.Resource { - return &schema.Resource{ - Create: resourceDockerVolumeCreate, - Read: resourceDockerVolumeRead, - Delete: resourceDockerVolumeDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "driver": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "driver_opts": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - "mountpoint": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceDockerVolumeCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*dc.Client) - - createOpts := dc.CreateVolumeOptions{} - if v, ok := d.GetOk("name"); ok { - createOpts.Name = v.(string) - } - if v, ok := d.GetOk("driver"); ok { - createOpts.Driver = v.(string) - } - if v, ok := d.GetOk("driver_opts"); ok { - createOpts.DriverOpts = mapTypeMapValsToString(v.(map[string]interface{})) - } - - var err error - var retVolume *dc.Volume - if retVolume, err = client.CreateVolume(createOpts); err != nil { - return fmt.Errorf("Unable to create volume: %s", err) - } - if retVolume == nil { - return fmt.Errorf("Returned volume is nil") - } - - d.SetId(retVolume.Name) - d.Set("name", retVolume.Name) - d.Set("driver", retVolume.Driver) - d.Set("mountpoint", retVolume.Mountpoint) - - return nil -} - -func resourceDockerVolumeRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*dc.Client) - - var err error - var retVolume *dc.Volume - if retVolume, err = client.InspectVolume(d.Id()); err != nil && err != dc.ErrNoSuchVolume { - return fmt.Errorf("Unable to inspect volume: %s", err) - } - if retVolume == nil { - d.SetId("") - return nil - } - - d.Set("name", retVolume.Name) - d.Set("driver", retVolume.Driver) - d.Set("mountpoint", retVolume.Mountpoint) - - return nil -} - -func resourceDockerVolumeDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*dc.Client) - - if err := client.RemoveVolume(d.Id()); err != nil && err != dc.ErrNoSuchVolume { - return fmt.Errorf("Error deleting volume %s: %s", d.Id(), err) - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/docker/resource_docker_volume_test.go b/builtin/providers/docker/resource_docker_volume_test.go deleted file mode 100644 index 38fec3c4e..000000000 --- a/builtin/providers/docker/resource_docker_volume_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package docker - -import ( - "fmt" - "testing" - - dc "github.com/fsouza/go-dockerclient" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDockerVolume_basic(t *testing.T) { - var v dc.Volume - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDockerVolumeConfig, - Check: resource.ComposeTestCheckFunc( - checkDockerVolume("docker_volume.foo", &v), - resource.TestCheckResourceAttr("docker_volume.foo", "id", "testAccDockerVolume_basic"), - resource.TestCheckResourceAttr("docker_volume.foo", "name", "testAccDockerVolume_basic"), - ), - }, - }, - }) -} - -func checkDockerVolume(n string, volume *dc.Volume) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - client := testAccProvider.Meta().(*dc.Client) - volumes, err := client.ListVolumes(dc.ListVolumesOptions{}) - if err != nil { - return err - } - - for _, v := range volumes { - if v.Name == rs.Primary.ID { - inspected, err := client.InspectVolume(v.Name) - if err != nil { - return fmt.Errorf("Volume could not be inspected: %s", err) - } - *volume = *inspected - return nil - } - } - - return fmt.Errorf("Volume not found: %s", rs.Primary.ID) - } -} - -const testAccDockerVolumeConfig = ` -resource "docker_volume" "foo" { - name = "testAccDockerVolume_basic" -} -` diff --git a/builtin/providers/dyn/config.go b/builtin/providers/dyn/config.go deleted file mode 100644 index 6910ef76d..000000000 --- a/builtin/providers/dyn/config.go +++ /dev/null @@ -1,32 +0,0 @@ -package dyn - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/logging" - "github.com/nesv/go-dynect/dynect" -) - -type Config struct { - CustomerName string - Username string - Password string -} - -// Client() returns a new client for accessing dyn. -func (c *Config) Client() (*dynect.ConvenientClient, error) { - client := dynect.NewConvenientClient(c.CustomerName) - if logging.IsDebugOrHigher() { - client.Verbose(true) - } - - err := client.Login(c.Username, c.Password) - if err != nil { - return nil, fmt.Errorf("Error setting up Dyn client: %s", err) - } - - log.Printf("[INFO] Dyn client configured for customer: %s, user: %s", c.CustomerName, c.Username) - - return client, nil -} diff --git a/builtin/providers/dyn/provider.go b/builtin/providers/dyn/provider.go deleted file mode 100644 index c591745ae..000000000 --- a/builtin/providers/dyn/provider.go +++ /dev/null @@ -1,50 +0,0 @@ -package dyn - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "customer_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("DYN_CUSTOMER_NAME", nil), - Description: "A Dyn customer name.", - }, - - "username": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("DYN_USERNAME", nil), - Description: "A Dyn username.", - }, - - "password": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("DYN_PASSWORD", nil), - Description: "The Dyn password.", - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "dyn_record": resourceDynRecord(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - config := Config{ - CustomerName: d.Get("customer_name").(string), - Username: d.Get("username").(string), - Password: d.Get("password").(string), - } - - return config.Client() -} diff --git a/builtin/providers/dyn/provider_test.go b/builtin/providers/dyn/provider_test.go deleted file mode 100644 index da148ff2f..000000000 --- a/builtin/providers/dyn/provider_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package dyn - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "dyn": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("DYN_CUSTOMER_NAME"); v == "" { - t.Fatal("DYN_CUSTOMER_NAME must be set for acceptance tests") - } - - if v := os.Getenv("DYN_USERNAME"); v == "" { - t.Fatal("DYN_USERNAME must be set for acceptance tests") - } - - if v := os.Getenv("DYN_PASSWORD"); v == "" { - t.Fatal("DYN_PASSWORD must be set for acceptance tests.") - } - - if v := os.Getenv("DYN_ZONE"); v == "" { - t.Fatal("DYN_ZONE must be set for acceptance tests. The domain is used to ` and destroy record against.") - } -} diff --git a/builtin/providers/dyn/resource_dyn_record.go b/builtin/providers/dyn/resource_dyn_record.go deleted file mode 100644 index 7f7b66fd5..000000000 --- a/builtin/providers/dyn/resource_dyn_record.go +++ /dev/null @@ -1,198 +0,0 @@ -package dyn - -import ( - "fmt" - "log" - "sync" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/nesv/go-dynect/dynect" -) - -var mutex = &sync.Mutex{} - -func resourceDynRecord() *schema.Resource { - return &schema.Resource{ - Create: resourceDynRecordCreate, - Read: resourceDynRecordRead, - Update: resourceDynRecordUpdate, - Delete: resourceDynRecordDelete, - - Schema: map[string]*schema.Schema{ - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "fqdn": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "value": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "ttl": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "0", // 0 means use zone default - }, - }, - } -} - -func resourceDynRecordCreate(d *schema.ResourceData, meta interface{}) error { - mutex.Lock() - - client := meta.(*dynect.ConvenientClient) - - record := &dynect.Record{ - Name: d.Get("name").(string), - Zone: d.Get("zone").(string), - Type: d.Get("type").(string), - TTL: d.Get("ttl").(string), - Value: d.Get("value").(string), - } - log.Printf("[DEBUG] Dyn record create configuration: %#v", record) - - // create the record - err := client.CreateRecord(record) - if err != nil { - mutex.Unlock() - return fmt.Errorf("Failed to create Dyn record: %s", err) - } - - // publish the zone - err = client.PublishZone(record.Zone) - if err != nil { - mutex.Unlock() - return fmt.Errorf("Failed to publish Dyn zone: %s", err) - } - - // get the record ID - err = client.GetRecordID(record) - if err != nil { - mutex.Unlock() - return fmt.Errorf("%s", err) - } - d.SetId(record.ID) - - mutex.Unlock() - return resourceDynRecordRead(d, meta) -} - -func resourceDynRecordRead(d *schema.ResourceData, meta interface{}) error { - mutex.Lock() - defer mutex.Unlock() - - client := meta.(*dynect.ConvenientClient) - - record := &dynect.Record{ - ID: d.Id(), - Name: d.Get("name").(string), - Zone: d.Get("zone").(string), - TTL: d.Get("ttl").(string), - FQDN: d.Get("fqdn").(string), - Type: d.Get("type").(string), - } - - err := client.GetRecord(record) - if err != nil { - return fmt.Errorf("Couldn't find Dyn record: %s", err) - } - - d.Set("zone", record.Zone) - d.Set("fqdn", record.FQDN) - d.Set("name", record.Name) - d.Set("type", record.Type) - d.Set("ttl", record.TTL) - d.Set("value", record.Value) - - return nil -} - -func resourceDynRecordUpdate(d *schema.ResourceData, meta interface{}) error { - mutex.Lock() - - client := meta.(*dynect.ConvenientClient) - - record := &dynect.Record{ - Name: d.Get("name").(string), - Zone: d.Get("zone").(string), - TTL: d.Get("ttl").(string), - Type: d.Get("type").(string), - Value: d.Get("value").(string), - } - log.Printf("[DEBUG] Dyn record update configuration: %#v", record) - - // update the record - err := client.UpdateRecord(record) - if err != nil { - mutex.Unlock() - return fmt.Errorf("Failed to update Dyn record: %s", err) - } - - // publish the zone - err = client.PublishZone(record.Zone) - if err != nil { - mutex.Unlock() - return fmt.Errorf("Failed to publish Dyn zone: %s", err) - } - - // get the record ID - err = client.GetRecordID(record) - if err != nil { - mutex.Unlock() - return fmt.Errorf("%s", err) - } - d.SetId(record.ID) - - mutex.Unlock() - return resourceDynRecordRead(d, meta) -} - -func resourceDynRecordDelete(d *schema.ResourceData, meta interface{}) error { - mutex.Lock() - defer mutex.Unlock() - - client := meta.(*dynect.ConvenientClient) - - record := &dynect.Record{ - ID: d.Id(), - Name: d.Get("name").(string), - Zone: d.Get("zone").(string), - FQDN: d.Get("fqdn").(string), - Type: d.Get("type").(string), - } - - log.Printf("[INFO] Deleting Dyn record: %s, %s", record.FQDN, record.ID) - - // delete the record - err := client.DeleteRecord(record) - if err != nil { - return fmt.Errorf("Failed to delete Dyn record: %s", err) - } - - // publish the zone - err = client.PublishZone(record.Zone) - if err != nil { - return fmt.Errorf("Failed to publish Dyn zone: %s", err) - } - - return nil -} diff --git a/builtin/providers/dyn/resource_dyn_record_test.go b/builtin/providers/dyn/resource_dyn_record_test.go deleted file mode 100644 index e23367283..000000000 --- a/builtin/providers/dyn/resource_dyn_record_test.go +++ /dev/null @@ -1,239 +0,0 @@ -package dyn - -import ( - "fmt" - "os" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/nesv/go-dynect/dynect" -) - -func TestAccDynRecord_Basic(t *testing.T) { - var record dynect.Record - zone := os.Getenv("DYN_ZONE") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDynRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckDynRecordConfig_basic, zone), - Check: resource.ComposeTestCheckFunc( - testAccCheckDynRecordExists("dyn_record.foobar", &record), - testAccCheckDynRecordAttributes(&record), - resource.TestCheckResourceAttr( - "dyn_record.foobar", "name", "terraform"), - resource.TestCheckResourceAttr( - "dyn_record.foobar", "zone", zone), - resource.TestCheckResourceAttr( - "dyn_record.foobar", "value", "192.168.0.10"), - ), - }, - }, - }) -} - -func TestAccDynRecord_Updated(t *testing.T) { - var record dynect.Record - zone := os.Getenv("DYN_ZONE") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDynRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckDynRecordConfig_basic, zone), - Check: resource.ComposeTestCheckFunc( - testAccCheckDynRecordExists("dyn_record.foobar", &record), - testAccCheckDynRecordAttributes(&record), - resource.TestCheckResourceAttr( - "dyn_record.foobar", "name", "terraform"), - resource.TestCheckResourceAttr( - "dyn_record.foobar", "zone", zone), - resource.TestCheckResourceAttr( - "dyn_record.foobar", "value", "192.168.0.10"), - ), - }, - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckDynRecordConfig_new_value, zone), - Check: resource.ComposeTestCheckFunc( - testAccCheckDynRecordExists("dyn_record.foobar", &record), - testAccCheckDynRecordAttributesUpdated(&record), - resource.TestCheckResourceAttr( - "dyn_record.foobar", "name", "terraform"), - resource.TestCheckResourceAttr( - "dyn_record.foobar", "zone", zone), - resource.TestCheckResourceAttr( - "dyn_record.foobar", "value", "192.168.0.11"), - ), - }, - }, - }) -} - -func TestAccDynRecord_Multiple(t *testing.T) { - var record dynect.Record - zone := os.Getenv("DYN_ZONE") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDynRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckDynRecordConfig_multiple, zone, zone, zone), - Check: resource.ComposeTestCheckFunc( - testAccCheckDynRecordExists("dyn_record.foobar1", &record), - testAccCheckDynRecordAttributes(&record), - resource.TestCheckResourceAttr( - "dyn_record.foobar1", "name", "terraform1"), - resource.TestCheckResourceAttr( - "dyn_record.foobar1", "zone", zone), - resource.TestCheckResourceAttr( - "dyn_record.foobar1", "value", "192.168.0.10"), - resource.TestCheckResourceAttr( - "dyn_record.foobar2", "name", "terraform2"), - resource.TestCheckResourceAttr( - "dyn_record.foobar2", "zone", zone), - resource.TestCheckResourceAttr( - "dyn_record.foobar2", "value", "192.168.1.10"), - resource.TestCheckResourceAttr( - "dyn_record.foobar3", "name", "terraform3"), - resource.TestCheckResourceAttr( - "dyn_record.foobar3", "zone", zone), - resource.TestCheckResourceAttr( - "dyn_record.foobar3", "value", "192.168.2.10"), - ), - }, - }, - }) -} - -func testAccCheckDynRecordDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*dynect.ConvenientClient) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "dyn_record" { - continue - } - - foundRecord := &dynect.Record{ - Zone: rs.Primary.Attributes["zone"], - ID: rs.Primary.ID, - FQDN: rs.Primary.Attributes["fqdn"], - Type: rs.Primary.Attributes["type"], - } - - err := client.GetRecord(foundRecord) - - if err != nil { - return fmt.Errorf("Record still exists") - } - } - - return nil -} - -func testAccCheckDynRecordAttributes(record *dynect.Record) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if record.Value != "192.168.0.10" { - return fmt.Errorf("Bad value: %s", record.Value) - } - - return nil - } -} - -func testAccCheckDynRecordAttributesUpdated(record *dynect.Record) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if record.Value != "192.168.0.11" { - return fmt.Errorf("Bad value: %s", record.Value) - } - - return nil - } -} - -func testAccCheckDynRecordExists(n string, record *dynect.Record) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - client := testAccProvider.Meta().(*dynect.ConvenientClient) - - foundRecord := &dynect.Record{ - Zone: rs.Primary.Attributes["zone"], - ID: rs.Primary.ID, - FQDN: rs.Primary.Attributes["fqdn"], - Type: rs.Primary.Attributes["type"], - } - - err := client.GetRecord(foundRecord) - - if err != nil { - return err - } - - if foundRecord.ID != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - - *record = *foundRecord - - return nil - } -} - -const testAccCheckDynRecordConfig_basic = ` -resource "dyn_record" "foobar" { - zone = "%s" - name = "terraform" - value = "192.168.0.10" - type = "A" - ttl = 3600 -}` - -const testAccCheckDynRecordConfig_new_value = ` -resource "dyn_record" "foobar" { - zone = "%s" - name = "terraform" - value = "192.168.0.11" - type = "A" - ttl = 3600 -}` - -const testAccCheckDynRecordConfig_multiple = ` -resource "dyn_record" "foobar1" { - zone = "%s" - name = "terraform1" - value = "192.168.0.10" - type = "A" - ttl = 3600 -} -resource "dyn_record" "foobar2" { - zone = "%s" - name = "terraform2" - value = "192.168.1.10" - type = "A" - ttl = 3600 -} -resource "dyn_record" "foobar3" { - zone = "%s" - name = "terraform3" - value = "192.168.2.10" - type = "A" - ttl = 3600 -}` diff --git a/builtin/providers/external/data_source.go b/builtin/providers/external/data_source.go deleted file mode 100644 index f41e86b67..000000000 --- a/builtin/providers/external/data_source.go +++ /dev/null @@ -1,93 +0,0 @@ -package external - -import ( - "bytes" - "encoding/json" - "fmt" - "os/exec" - - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSource() *schema.Resource { - return &schema.Resource{ - Read: dataSourceRead, - - Schema: map[string]*schema.Schema{ - "program": &schema.Schema{ - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "query": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "result": &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - } -} - -func dataSourceRead(d *schema.ResourceData, meta interface{}) error { - - programI := d.Get("program").([]interface{}) - query := d.Get("query").(map[string]interface{}) - - // This would be a ValidateFunc if helper/schema allowed these - // to be applied to lists. - if err := validateProgramAttr(programI); err != nil { - return err - } - - program := make([]string, len(programI)) - for i, vI := range programI { - program[i] = vI.(string) - } - - cmd := exec.Command(program[0], program[1:]...) - - queryJson, err := json.Marshal(query) - if err != nil { - // Should never happen, since we know query will always be a map - // from string to string, as guaranteed by d.Get and our schema. - return err - } - - cmd.Stdin = bytes.NewReader(queryJson) - - resultJson, err := cmd.Output() - if err != nil { - if exitErr, ok := err.(*exec.ExitError); ok { - if exitErr.Stderr != nil && len(exitErr.Stderr) > 0 { - return fmt.Errorf("failed to execute %q: %s", program[0], string(exitErr.Stderr)) - } - return fmt.Errorf("command %q failed with no error message", program[0]) - } else { - return fmt.Errorf("failed to execute %q: %s", program[0], err) - } - } - - result := map[string]string{} - err = json.Unmarshal(resultJson, &result) - if err != nil { - return fmt.Errorf("command %q produced invalid JSON: %s", program[0], err) - } - - d.Set("result", result) - - d.SetId("-") - return nil -} diff --git a/builtin/providers/external/data_source_test.go b/builtin/providers/external/data_source_test.go deleted file mode 100644 index b1ceabddf..000000000 --- a/builtin/providers/external/data_source_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package external - -import ( - "fmt" - "os" - "os/exec" - "path" - "path/filepath" - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -const testDataSourceConfig_basic = ` -data "external" "test" { - program = ["%s", "cheese"] - - query = { - value = "pizza" - } -} - -output "query_value" { - value = "${data.external.test.result["query_value"]}" -} - -output "argument" { - value = "${data.external.test.result["argument"]}" -} -` - -func TestDataSource_basic(t *testing.T) { - programPath, err := buildDataSourceTestProgram() - if err != nil { - t.Fatal(err) - return - } - - resource.UnitTest(t, resource.TestCase{ - Providers: testProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testDataSourceConfig_basic, programPath), - Check: func(s *terraform.State) error { - _, ok := s.RootModule().Resources["data.external.test"] - if !ok { - return fmt.Errorf("missing data resource") - } - - outputs := s.RootModule().Outputs - - if outputs["argument"] == nil { - return fmt.Errorf("missing 'argument' output") - } - if outputs["query_value"] == nil { - return fmt.Errorf("missing 'query_value' output") - } - - if outputs["argument"].Value != "cheese" { - return fmt.Errorf( - "'argument' output is %q; want 'cheese'", - outputs["argument"].Value, - ) - } - if outputs["query_value"].Value != "pizza" { - return fmt.Errorf( - "'query_value' output is %q; want 'pizza'", - outputs["query_value"].Value, - ) - } - - return nil - }, - }, - }, - }) -} - -const testDataSourceConfig_error = ` -data "external" "test" { - program = ["%s"] - - query = { - fail = "true" - } -} -` - -func TestDataSource_error(t *testing.T) { - programPath, err := buildDataSourceTestProgram() - if err != nil { - t.Fatal(err) - return - } - - resource.UnitTest(t, resource.TestCase{ - Providers: testProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testDataSourceConfig_error, programPath), - ExpectError: regexp.MustCompile("I was asked to fail"), - }, - }, - }) -} - -func buildDataSourceTestProgram() (string, error) { - // We have a simple Go program that we use as a stub for testing. - cmd := exec.Command( - "go", "install", - "github.com/hashicorp/terraform/builtin/providers/external/test-programs/tf-acc-external-data-source", - ) - err := cmd.Run() - - if err != nil { - return "", fmt.Errorf("failed to build test stub program: %s", err) - } - - gopath := os.Getenv("GOPATH") - if gopath == "" { - gopath = filepath.Join(os.Getenv("HOME") + "/go") - } - - programPath := path.Join( - filepath.SplitList(gopath)[0], "bin", "tf-acc-external-data-source", - ) - return programPath, nil -} diff --git a/builtin/providers/external/provider.go b/builtin/providers/external/provider.go deleted file mode 100644 index 24a72ad98..000000000 --- a/builtin/providers/external/provider.go +++ /dev/null @@ -1,15 +0,0 @@ -package external - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - DataSourcesMap: map[string]*schema.Resource{ - "external": dataSource(), - }, - ResourcesMap: map[string]*schema.Resource{}, - } -} diff --git a/builtin/providers/external/provider_test.go b/builtin/providers/external/provider_test.go deleted file mode 100644 index b5afda93c..000000000 --- a/builtin/providers/external/provider_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package external - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -var testProviders = map[string]terraform.ResourceProvider{ - "external": Provider(), -} diff --git a/builtin/providers/external/test-programs/tf-acc-external-data-source/main.go b/builtin/providers/external/test-programs/tf-acc-external-data-source/main.go deleted file mode 100644 index f495cc249..000000000 --- a/builtin/providers/external/test-programs/tf-acc-external-data-source/main.go +++ /dev/null @@ -1,50 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" -) - -// This is a minimal implementation of the external data source protocol -// intended only for use in the provider acceptance tests. -// -// In practice it's likely not much harder to just write a real Terraform -// plugin if you're going to be writing your data source in Go anyway; -// this example is just in Go because we want to avoid introducing -// additional language runtimes into the test environment. -func main() { - queryBytes, err := ioutil.ReadAll(os.Stdin) - if err != nil { - panic(err) - } - - var query map[string]string - err = json.Unmarshal(queryBytes, &query) - if err != nil { - panic(err) - } - - if query["fail"] != "" { - fmt.Fprintf(os.Stderr, "I was asked to fail\n") - os.Exit(1) - } - - var result = map[string]string{ - "result": "yes", - "query_value": query["value"], - } - - if len(os.Args) >= 2 { - result["argument"] = os.Args[1] - } - - resultBytes, err := json.Marshal(result) - if err != nil { - panic(err) - } - - os.Stdout.Write(resultBytes) - os.Exit(0) -} diff --git a/builtin/providers/external/util.go b/builtin/providers/external/util.go deleted file mode 100644 index fd378fb3e..000000000 --- a/builtin/providers/external/util.go +++ /dev/null @@ -1,35 +0,0 @@ -package external - -import ( - "fmt" - "os/exec" -) - -// validateProgramAttr is a validation function for the "program" attribute we -// accept as input on our resources. -// -// The attribute is assumed to be specified in schema as a list of strings. -func validateProgramAttr(v interface{}) error { - args := v.([]interface{}) - if len(args) < 1 { - return fmt.Errorf("'program' list must contain at least one element") - } - - for i, vI := range args { - if _, ok := vI.(string); !ok { - return fmt.Errorf( - "'program' element %d is %T; a string is required", - i, vI, - ) - } - } - - // first element is assumed to be an executable command, possibly found - // using the PATH environment variable. - _, err := exec.LookPath(args[0].(string)) - if err != nil { - return fmt.Errorf("can't find external program %q", args[0]) - } - - return nil -} diff --git a/builtin/providers/fastly/config.go b/builtin/providers/fastly/config.go deleted file mode 100644 index a2e194818..000000000 --- a/builtin/providers/fastly/config.go +++ /dev/null @@ -1,31 +0,0 @@ -package fastly - -import ( - "fmt" - - gofastly "github.com/sethvargo/go-fastly" -) - -type Config struct { - ApiKey string -} - -type FastlyClient struct { - conn *gofastly.Client -} - -func (c *Config) Client() (interface{}, error) { - var client FastlyClient - - if c.ApiKey == "" { - return nil, fmt.Errorf("[Err] No API key for Fastly") - } - - fconn, err := gofastly.NewClient(c.ApiKey) - if err != nil { - return nil, err - } - - client.conn = fconn - return &client, nil -} diff --git a/builtin/providers/fastly/data_source_ip_ranges.go b/builtin/providers/fastly/data_source_ip_ranges.go deleted file mode 100644 index cc418465c..000000000 --- a/builtin/providers/fastly/data_source_ip_ranges.go +++ /dev/null @@ -1,70 +0,0 @@ -package fastly - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "log" - "sort" - "strconv" - - "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -type dataSourceFastlyIPRangesResult struct { - Addresses []string -} - -func dataSourceFastlyIPRanges() *schema.Resource { - return &schema.Resource{ - Read: dataSourceFastlyIPRangesRead, - - Schema: map[string]*schema.Schema{ - "cidr_blocks": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func dataSourceFastlyIPRangesRead(d *schema.ResourceData, meta interface{}) error { - - conn := cleanhttp.DefaultClient() - - log.Printf("[DEBUG] Reading IP ranges") - - res, err := conn.Get("https://api.fastly.com/public-ip-list") - - if err != nil { - return fmt.Errorf("Error listing IP ranges: %s", err) - } - - defer res.Body.Close() - - data, err := ioutil.ReadAll(res.Body) - - if err != nil { - return fmt.Errorf("Error reading response body: %s", err) - } - - d.SetId(strconv.Itoa(hashcode.String(string(data)))) - - result := new(dataSourceFastlyIPRangesResult) - - if err := json.Unmarshal(data, result); err != nil { - return fmt.Errorf("Error parsing result: %s", err) - } - - sort.Strings(result.Addresses) - - if err := d.Set("cidr_blocks", result.Addresses); err != nil { - return fmt.Errorf("Error setting ip ranges: %s", err) - } - - return nil - -} diff --git a/builtin/providers/fastly/data_source_ip_ranges_test.go b/builtin/providers/fastly/data_source_ip_ranges_test.go deleted file mode 100644 index 26e4d8f56..000000000 --- a/builtin/providers/fastly/data_source_ip_ranges_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package fastly - -import ( - "fmt" - "net" - "sort" - "strconv" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccFastlyIPRanges(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccFastlyIPRangesConfig, - Check: resource.ComposeTestCheckFunc( - testAccFastlyIPRanges("data.fastly_ip_ranges.some"), - ), - }, - }, - }) -} - -func testAccFastlyIPRanges(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - r := s.RootModule().Resources[n] - a := r.Primary.Attributes - - var ( - cidrBlockSize int - err error - ) - - if cidrBlockSize, err = strconv.Atoi(a["cidr_blocks.#"]); err != nil { - return err - } - - if cidrBlockSize < 10 { - return fmt.Errorf("cidr_blocks seem suspiciously low: %d", cidrBlockSize) - } - - var cidrBlocks sort.StringSlice = make([]string, cidrBlockSize) - - for i := range make([]string, cidrBlockSize) { - - block := a[fmt.Sprintf("cidr_blocks.%d", i)] - - if _, _, err := net.ParseCIDR(block); err != nil { - return fmt.Errorf("malformed CIDR block %s: %s", block, err) - } - - cidrBlocks[i] = block - - } - - if !sort.IsSorted(cidrBlocks) { - return fmt.Errorf("unexpected order of cidr_blocks: %s", cidrBlocks) - } - - return nil - } -} - -const testAccFastlyIPRangesConfig = ` -data "fastly_ip_ranges" "some" { -} -` diff --git a/builtin/providers/fastly/provider.go b/builtin/providers/fastly/provider.go deleted file mode 100644 index eee4be8e8..000000000 --- a/builtin/providers/fastly/provider.go +++ /dev/null @@ -1,37 +0,0 @@ -package fastly - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "api_key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "FASTLY_API_KEY", - }, nil), - Description: "Fastly API Key from https://app.fastly.com/#account", - }, - }, - DataSourcesMap: map[string]*schema.Resource{ - "fastly_ip_ranges": dataSourceFastlyIPRanges(), - }, - ResourcesMap: map[string]*schema.Resource{ - "fastly_service_v1": resourceServiceV1(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - config := Config{ - ApiKey: d.Get("api_key").(string), - } - return config.Client() -} diff --git a/builtin/providers/fastly/provider_test.go b/builtin/providers/fastly/provider_test.go deleted file mode 100644 index e567354b0..000000000 --- a/builtin/providers/fastly/provider_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package fastly - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "fastly": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("FASTLY_API_KEY"); v == "" { - t.Fatal("FASTLY_API_KEY must be set for acceptance tests") - } -} diff --git a/builtin/providers/fastly/resource_fastly_service_v1.go b/builtin/providers/fastly/resource_fastly_service_v1.go deleted file mode 100644 index e2a9ba07f..000000000 --- a/builtin/providers/fastly/resource_fastly_service_v1.go +++ /dev/null @@ -1,2717 +0,0 @@ -package fastly - -import ( - "crypto/sha1" - "encoding/hex" - "errors" - "fmt" - "log" - "strings" - "time" - - "github.com/hashicorp/terraform/helper/schema" - gofastly "github.com/sethvargo/go-fastly" -) - -var fastlyNoServiceFoundErr = errors.New("No matching Fastly Service found") - -func resourceServiceV1() *schema.Resource { - return &schema.Resource{ - Create: resourceServiceV1Create, - Read: resourceServiceV1Read, - Update: resourceServiceV1Update, - Delete: resourceServiceV1Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: "Unique name for this Service", - }, - - // Active Version represents the currently activated version in Fastly. In - // Terraform, we abstract this number away from the users and manage - // creating and activating. It's used internally, but also exported for - // users to see. - "active_version": { - Type: schema.TypeInt, - Computed: true, - }, - - "domain": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: "The domain that this Service will respond to", - }, - - "comment": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - - "condition": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "statement": { - Type: schema.TypeString, - Required: true, - Description: "The statement used to determine if the condition is met", - StateFunc: func(v interface{}) string { - value := v.(string) - // Trim newlines and spaces, to match Fastly API - return strings.TrimSpace(value) - }, - }, - "priority": { - Type: schema.TypeInt, - Required: true, - Description: "A number used to determine the order in which multiple conditions execute. Lower numbers execute first", - }, - "type": { - Type: schema.TypeString, - Required: true, - Description: "Type of the condition, either `REQUEST`, `RESPONSE`, or `CACHE`", - }, - }, - }, - }, - - "default_ttl": { - Type: schema.TypeInt, - Optional: true, - Default: 3600, - Description: "The default Time-to-live (TTL) for the version", - }, - - "default_host": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "The default hostname for the version", - }, - - "healthcheck": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // required fields - "name": { - Type: schema.TypeString, - Required: true, - Description: "A name to refer to this healthcheck", - }, - "host": { - Type: schema.TypeString, - Required: true, - Description: "Which host to check", - }, - "path": { - Type: schema.TypeString, - Required: true, - Description: "The path to check", - }, - // optional fields - "check_interval": { - Type: schema.TypeInt, - Optional: true, - Default: 5000, - Description: "How often to run the healthcheck in milliseconds", - }, - "expected_response": { - Type: schema.TypeInt, - Optional: true, - Default: 200, - Description: "The status code expected from the host", - }, - "http_version": { - Type: schema.TypeString, - Optional: true, - Default: "1.1", - Description: "Whether to use version 1.0 or 1.1 HTTP", - }, - "initial": { - Type: schema.TypeInt, - Optional: true, - Default: 2, - Description: "When loading a config, the initial number of probes to be seen as OK", - }, - "method": { - Type: schema.TypeString, - Optional: true, - Default: "HEAD", - Description: "Which HTTP method to use", - }, - "threshold": { - Type: schema.TypeInt, - Optional: true, - Default: 3, - Description: "How many healthchecks must succeed to be considered healthy", - }, - "timeout": { - Type: schema.TypeInt, - Optional: true, - Default: 500, - Description: "Timeout in milliseconds", - }, - "window": { - Type: schema.TypeInt, - Optional: true, - Default: 5, - Description: "The number of most recent healthcheck queries to keep for this healthcheck", - }, - }, - }, - }, - - "backend": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // required fields - "name": { - Type: schema.TypeString, - Required: true, - Description: "A name for this Backend", - }, - "address": { - Type: schema.TypeString, - Required: true, - Description: "An IPv4, hostname, or IPv6 address for the Backend", - }, - // Optional fields, defaults where they exist - "auto_loadbalance": { - Type: schema.TypeBool, - Optional: true, - Default: true, - Description: "Should this Backend be load balanced", - }, - "between_bytes_timeout": { - Type: schema.TypeInt, - Optional: true, - Default: 10000, - Description: "How long to wait between bytes in milliseconds", - }, - "connect_timeout": { - Type: schema.TypeInt, - Optional: true, - Default: 1000, - Description: "How long to wait for a timeout in milliseconds", - }, - "error_threshold": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - Description: "Number of errors to allow before the Backend is marked as down", - }, - "first_byte_timeout": { - Type: schema.TypeInt, - Optional: true, - Default: 15000, - Description: "How long to wait for the first bytes in milliseconds", - }, - "healthcheck": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: "The healthcheck name that should be used for this Backend", - }, - "max_conn": { - Type: schema.TypeInt, - Optional: true, - Default: 200, - Description: "Maximum number of connections for this Backend", - }, - "port": { - Type: schema.TypeInt, - Optional: true, - Default: 80, - Description: "The port number Backend responds on. Default 80", - }, - "request_condition": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: "Name of a condition, which if met, will select this backend during a request.", - }, - "shield": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: "The POP of the shield designated to reduce inbound load.", - }, - "ssl_check_cert": { - Type: schema.TypeBool, - Optional: true, - Default: true, - Description: "Be strict on checking SSL certs", - }, - "ssl_hostname": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: "SSL certificate hostname", - Deprecated: "Use ssl_cert_hostname and ssl_sni_hostname instead.", - }, - "ssl_cert_hostname": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: "SSL certificate hostname for cert verification", - }, - "ssl_sni_hostname": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: "SSL certificate hostname for SNI verification", - }, - // UseSSL is something we want to support in the future, but - // requires SSL setup we don't yet have - // TODO: Provide all SSL fields from https://docs.fastly.com/api/config#backend - // "use_ssl": &schema.Schema{ - // Type: schema.TypeBool, - // Optional: true, - // Default: false, - // Description: "Whether or not to use SSL to reach the Backend", - // }, - "weight": { - Type: schema.TypeInt, - Optional: true, - Default: 100, - Description: "The portion of traffic to send to a specific origins. Each origin receives weight/total of the traffic.", - }, - }, - }, - }, - - "force_destroy": { - Type: schema.TypeBool, - Optional: true, - }, - - "cache_setting": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // required fields - "name": { - Type: schema.TypeString, - Required: true, - Description: "A name to refer to this Cache Setting", - }, - "action": { - Type: schema.TypeString, - Optional: true, - Description: "Action to take", - }, - // optional - "cache_condition": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: "Name of a condition to check if this Cache Setting applies", - }, - "stale_ttl": { - Type: schema.TypeInt, - Optional: true, - Description: "Max 'Time To Live' for stale (unreachable) objects.", - Default: 300, - }, - "ttl": { - Type: schema.TypeInt, - Optional: true, - Description: "The 'Time To Live' for the object", - }, - }, - }, - }, - - "gzip": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // required fields - "name": { - Type: schema.TypeString, - Required: true, - Description: "A name to refer to this gzip condition", - }, - // optional fields - "content_types": { - Type: schema.TypeSet, - Optional: true, - Description: "Content types to apply automatic gzip to", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "extensions": { - Type: schema.TypeSet, - Optional: true, - Description: "File extensions to apply automatic gzip to. Do not include '.'", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "cache_condition": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: "Name of a condition controlling when this gzip configuration applies.", - }, - }, - }, - }, - - "header": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // required fields - "name": { - Type: schema.TypeString, - Required: true, - Description: "A name to refer to this Header object", - }, - "action": { - Type: schema.TypeString, - Required: true, - Description: "One of set, append, delete, regex, or regex_repeat", - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - var found bool - for _, t := range []string{"set", "append", "delete", "regex", "regex_repeat"} { - if v.(string) == t { - found = true - } - } - if !found { - es = append(es, fmt.Errorf( - "Fastly Header action is case sensitive and must be one of 'set', 'append', 'delete', 'regex', or 'regex_repeat'; found: %s", v.(string))) - } - return - }, - }, - "type": { - Type: schema.TypeString, - Required: true, - Description: "Type to manipulate: request, fetch, cache, response", - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - var found bool - for _, t := range []string{"request", "fetch", "cache", "response"} { - if v.(string) == t { - found = true - } - } - if !found { - es = append(es, fmt.Errorf( - "Fastly Header type is case sensitive and must be one of 'request', 'fetch', 'cache', or 'response'; found: %s", v.(string))) - } - return - }, - }, - "destination": { - Type: schema.TypeString, - Required: true, - Description: "Header this affects", - }, - // Optional fields, defaults where they exist - "ignore_if_set": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Don't add the header if it is already. (Only applies to 'set' action.). Default `false`", - }, - "source": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "Variable to be used as a source for the header content (Does not apply to 'delete' action.)", - }, - "regex": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "Regular expression to use (Only applies to 'regex' and 'regex_repeat' actions.)", - }, - "substitution": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "Value to substitute in place of regular expression. (Only applies to 'regex' and 'regex_repeat'.)", - }, - "priority": { - Type: schema.TypeInt, - Optional: true, - Default: 100, - Description: "Lower priorities execute first. (Default: 100.)", - }, - "request_condition": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: "Optional name of a request condition to apply.", - }, - "cache_condition": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: "Optional name of a cache condition to apply.", - }, - "response_condition": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: "Optional name of a response condition to apply.", - }, - }, - }, - }, - - "s3logging": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Required fields - "name": { - Type: schema.TypeString, - Required: true, - Description: "Unique name to refer to this logging setup", - }, - "bucket_name": { - Type: schema.TypeString, - Required: true, - Description: "S3 Bucket name to store logs in", - }, - "s3_access_key": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("FASTLY_S3_ACCESS_KEY", ""), - Description: "AWS Access Key", - Sensitive: true, - }, - "s3_secret_key": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("FASTLY_S3_SECRET_KEY", ""), - Description: "AWS Secret Key", - Sensitive: true, - }, - // Optional fields - "path": { - Type: schema.TypeString, - Optional: true, - Description: "Path to store the files. Must end with a trailing slash", - }, - "domain": { - Type: schema.TypeString, - Optional: true, - Description: "Bucket endpoint", - }, - "gzip_level": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - Description: "Gzip Compression level", - }, - "period": { - Type: schema.TypeInt, - Optional: true, - Default: 3600, - Description: "How frequently the logs should be transferred, in seconds (Default 3600)", - }, - "format": { - Type: schema.TypeString, - Optional: true, - Default: "%h %l %u %t %r %>s", - Description: "Apache-style string or VCL variables to use for log formatting", - }, - "format_version": { - Type: schema.TypeInt, - Optional: true, - Default: 1, - Description: "The version of the custom logging format used for the configured endpoint. Can be either 1 or 2. (Default: 1)", - ValidateFunc: validateLoggingFormatVersion, - }, - "timestamp_format": { - Type: schema.TypeString, - Optional: true, - Default: "%Y-%m-%dT%H:%M:%S.000", - Description: "specified timestamp formatting (default `%Y-%m-%dT%H:%M:%S.000`)", - }, - "response_condition": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: "Name of a condition to apply this logging.", - }, - }, - }, - }, - - "papertrail": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Required fields - "name": { - Type: schema.TypeString, - Required: true, - Description: "Unique name to refer to this logging setup", - }, - "address": { - Type: schema.TypeString, - Required: true, - Description: "The address of the papertrail service", - }, - "port": { - Type: schema.TypeInt, - Required: true, - Description: "The port of the papertrail service", - }, - // Optional fields - "format": { - Type: schema.TypeString, - Optional: true, - Default: "%h %l %u %t %r %>s", - Description: "Apache-style string or VCL variables to use for log formatting", - }, - "response_condition": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: "Name of a condition to apply this logging", - }, - }, - }, - }, - "sumologic": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Required fields - "name": { - Type: schema.TypeString, - Required: true, - Description: "Unique name to refer to this logging setup", - }, - "url": { - Type: schema.TypeString, - Required: true, - Description: "The URL to POST to.", - }, - // Optional fields - "format": { - Type: schema.TypeString, - Optional: true, - Default: "%h %l %u %t %r %>s", - Description: "Apache-style string or VCL variables to use for log formatting", - }, - "format_version": { - Type: schema.TypeInt, - Optional: true, - Default: 1, - Description: "The version of the custom logging format used for the configured endpoint. Can be either 1 or 2. (Default: 1)", - ValidateFunc: validateLoggingFormatVersion, - }, - "response_condition": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: "Name of a condition to apply this logging.", - }, - "message_type": { - Type: schema.TypeString, - Optional: true, - Default: "classic", - Description: "How the message should be formatted.", - ValidateFunc: validateLoggingMessageType, - }, - }, - }, - }, - - "gcslogging": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Required fields - "name": { - Type: schema.TypeString, - Required: true, - Description: "Unique name to refer to this logging setup", - }, - "email": { - Type: schema.TypeString, - Required: true, - Description: "The email address associated with the target GCS bucket on your account.", - }, - "bucket_name": { - Type: schema.TypeString, - Required: true, - Description: "The name of the bucket in which to store the logs.", - }, - "secret_key": { - Type: schema.TypeString, - Required: true, - Description: "The secret key associated with the target gcs bucket on your account.", - Sensitive: true, - }, - // Optional fields - "path": { - Type: schema.TypeString, - Optional: true, - Description: "Path to store the files. Must end with a trailing slash", - }, - "gzip_level": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - Description: "Gzip Compression level", - }, - "period": { - Type: schema.TypeInt, - Optional: true, - Default: 3600, - Description: "How frequently the logs should be transferred, in seconds (Default 3600)", - }, - "format": { - Type: schema.TypeString, - Optional: true, - Default: "%h %l %u %t %r %>s", - Description: "Apache-style string or VCL variables to use for log formatting", - }, - "timestamp_format": { - Type: schema.TypeString, - Optional: true, - Default: "%Y-%m-%dT%H:%M:%S.000", - Description: "specified timestamp formatting (default `%Y-%m-%dT%H:%M:%S.000`)", - }, - "response_condition": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: "Name of a condition to apply this logging.", - }, - }, - }, - }, - - "response_object": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Required - "name": { - Type: schema.TypeString, - Required: true, - Description: "Unique name to refer to this request object", - }, - // Optional fields - "status": { - Type: schema.TypeInt, - Optional: true, - Default: 200, - Description: "The HTTP Status Code of the object", - }, - "response": { - Type: schema.TypeString, - Optional: true, - Default: "OK", - Description: "The HTTP Response of the object", - }, - "content": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: "The content to deliver for the response object", - }, - "content_type": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: "The MIME type of the content", - }, - "request_condition": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: "Name of the condition to be checked during the request phase to see if the object should be delivered", - }, - "cache_condition": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: "Name of the condition checked after we have retrieved an object. If the condition passes then deliver this Request Object instead.", - }, - }, - }, - }, - - "request_setting": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Required fields - "name": { - Type: schema.TypeString, - Required: true, - Description: "Unique name to refer to this Request Setting", - }, - // Optional fields - "request_condition": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: "Name of a request condition to apply. If there is no condition this setting will always be applied.", - }, - "max_stale_age": { - Type: schema.TypeInt, - Optional: true, - Default: 60, - Description: "How old an object is allowed to be, in seconds. Default `60`", - }, - "force_miss": { - Type: schema.TypeBool, - Optional: true, - Description: "Force a cache miss for the request", - }, - "force_ssl": { - Type: schema.TypeBool, - Optional: true, - Description: "Forces the request use SSL", - }, - "action": { - Type: schema.TypeString, - Optional: true, - Description: "Allows you to terminate request handling and immediately perform an action", - }, - "bypass_busy_wait": { - Type: schema.TypeBool, - Optional: true, - Description: "Disable collapsed forwarding", - }, - "hash_keys": { - Type: schema.TypeString, - Optional: true, - Description: "Comma separated list of varnish request object fields that should be in the hash key", - }, - "xff": { - Type: schema.TypeString, - Optional: true, - Default: "append", - Description: "X-Forwarded-For options", - }, - "timer_support": { - Type: schema.TypeBool, - Optional: true, - Description: "Injects the X-Timer info into the request", - }, - "geo_headers": { - Type: schema.TypeBool, - Optional: true, - Description: "Inject Fastly-Geo-Country, Fastly-Geo-City, and Fastly-Geo-Region", - }, - "default_host": { - Type: schema.TypeString, - Optional: true, - Description: "the host header", - }, - }, - }, - }, - "vcl": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: "A name to refer to this VCL configuration", - }, - "content": { - Type: schema.TypeString, - Required: true, - Description: "The contents of this VCL configuration", - StateFunc: func(v interface{}) string { - switch v.(type) { - case string: - hash := sha1.Sum([]byte(v.(string))) - return hex.EncodeToString(hash[:]) - default: - return "" - } - }, - }, - "main": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Should this VCL configuration be the main configuration", - }, - }, - }, - }, - }, - } -} - -func resourceServiceV1Create(d *schema.ResourceData, meta interface{}) error { - if err := validateVCLs(d); err != nil { - return err - } - - conn := meta.(*FastlyClient).conn - service, err := conn.CreateService(&gofastly.CreateServiceInput{ - Name: d.Get("name").(string), - Comment: "Managed by Terraform", - }) - - if err != nil { - return err - } - - d.SetId(service.ID) - return resourceServiceV1Update(d, meta) -} - -func resourceServiceV1Update(d *schema.ResourceData, meta interface{}) error { - if err := validateVCLs(d); err != nil { - return err - } - - conn := meta.(*FastlyClient).conn - - // Update Name. No new verions is required for this - if d.HasChange("name") { - _, err := conn.UpdateService(&gofastly.UpdateServiceInput{ - ID: d.Id(), - Name: d.Get("name").(string), - }) - if err != nil { - return err - } - } - - // Once activated, Versions are locked and become immutable. This is true for - // versions that are no longer active. For Domains, Backends, DefaultHost and - // DefaultTTL, a new Version must be created first, and updates posted to that - // Version. Loop these attributes and determine if we need to create a new version first - var needsChange bool - for _, v := range []string{ - "domain", - "backend", - "default_host", - "default_ttl", - "header", - "gzip", - "healthcheck", - "s3logging", - "papertrail", - "response_object", - "condition", - "request_setting", - "cache_setting", - "vcl", - } { - if d.HasChange(v) { - needsChange = true - } - } - - if needsChange { - latestVersion := d.Get("active_version").(int) - if latestVersion == 0 { - // If the service was just created, there is an empty Version 1 available - // that is unlocked and can be updated - latestVersion = 1 - } else { - // Clone the latest version, giving us an unlocked version we can modify - log.Printf("[DEBUG] Creating clone of version (%d) for updates", latestVersion) - newVersion, err := conn.CloneVersion(&gofastly.CloneVersionInput{ - Service: d.Id(), - Version: latestVersion, - }) - if err != nil { - return err - } - - // The new version number is named "Number", but it's actually a string - latestVersion = newVersion.Number - - // New versions are not immediately found in the API, or are not - // immediately mutable, so we need to sleep a few and let Fastly ready - // itself. Typically, 7 seconds is enough - log.Print("[DEBUG] Sleeping 7 seconds to allow Fastly Version to be available") - time.Sleep(7 * time.Second) - } - - // update general settings - if d.HasChange("default_host") || d.HasChange("default_ttl") { - opts := gofastly.UpdateSettingsInput{ - Service: d.Id(), - Version: latestVersion, - // default_ttl has the same default value of 3600 that is provided by - // the Fastly API, so it's safe to include here - DefaultTTL: uint(d.Get("default_ttl").(int)), - } - - if attr, ok := d.GetOk("default_host"); ok { - opts.DefaultHost = attr.(string) - } - - log.Printf("[DEBUG] Update Settings opts: %#v", opts) - _, err := conn.UpdateSettings(&opts) - if err != nil { - return err - } - } - - // Conditions need to be updated first, as they can be referenced by other - // configuraiton objects (Backends, Request Headers, etc) - - // Find difference in Conditions - if d.HasChange("condition") { - // Note: we don't utilize the PUT endpoint to update these objects, we simply - // destroy any that have changed, and create new ones with the updated - // values. This is how Terraform works with nested sub resources, we only - // get the full diff not a partial set item diff. Because this is done - // on a new version of the Fastly Service configuration, this is considered safe - - oc, nc := d.GetChange("condition") - if oc == nil { - oc = new(schema.Set) - } - if nc == nil { - nc = new(schema.Set) - } - - ocs := oc.(*schema.Set) - ncs := nc.(*schema.Set) - removeConditions := ocs.Difference(ncs).List() - addConditions := ncs.Difference(ocs).List() - - // DELETE old Conditions - for _, cRaw := range removeConditions { - cf := cRaw.(map[string]interface{}) - opts := gofastly.DeleteConditionInput{ - Service: d.Id(), - Version: latestVersion, - Name: cf["name"].(string), - } - - log.Printf("[DEBUG] Fastly Conditions Removal opts: %#v", opts) - err := conn.DeleteCondition(&opts) - if err != nil { - return err - } - } - - // POST new Conditions - for _, cRaw := range addConditions { - cf := cRaw.(map[string]interface{}) - opts := gofastly.CreateConditionInput{ - Service: d.Id(), - Version: latestVersion, - Name: cf["name"].(string), - Type: cf["type"].(string), - // need to trim leading/tailing spaces, incase the config has HEREDOC - // formatting and contains a trailing new line - Statement: strings.TrimSpace(cf["statement"].(string)), - Priority: cf["priority"].(int), - } - - log.Printf("[DEBUG] Create Conditions Opts: %#v", opts) - _, err := conn.CreateCondition(&opts) - if err != nil { - return err - } - } - } - - // Find differences in domains - if d.HasChange("domain") { - od, nd := d.GetChange("domain") - if od == nil { - od = new(schema.Set) - } - if nd == nil { - nd = new(schema.Set) - } - - ods := od.(*schema.Set) - nds := nd.(*schema.Set) - - remove := ods.Difference(nds).List() - add := nds.Difference(ods).List() - - // Delete removed domains - for _, dRaw := range remove { - df := dRaw.(map[string]interface{}) - opts := gofastly.DeleteDomainInput{ - Service: d.Id(), - Version: latestVersion, - Name: df["name"].(string), - } - - log.Printf("[DEBUG] Fastly Domain removal opts: %#v", opts) - err := conn.DeleteDomain(&opts) - if err != nil { - return err - } - } - - // POST new Domains - for _, dRaw := range add { - df := dRaw.(map[string]interface{}) - opts := gofastly.CreateDomainInput{ - Service: d.Id(), - Version: latestVersion, - Name: df["name"].(string), - } - - if v, ok := df["comment"]; ok { - opts.Comment = v.(string) - } - - log.Printf("[DEBUG] Fastly Domain Addition opts: %#v", opts) - _, err := conn.CreateDomain(&opts) - if err != nil { - return err - } - } - } - - // Healthchecks need to be updated BEFORE backends - if d.HasChange("healthcheck") { - oh, nh := d.GetChange("healthcheck") - if oh == nil { - oh = new(schema.Set) - } - if nh == nil { - nh = new(schema.Set) - } - - ohs := oh.(*schema.Set) - nhs := nh.(*schema.Set) - removeHealthCheck := ohs.Difference(nhs).List() - addHealthCheck := nhs.Difference(ohs).List() - - // DELETE old healthcheck configurations - for _, hRaw := range removeHealthCheck { - hf := hRaw.(map[string]interface{}) - opts := gofastly.DeleteHealthCheckInput{ - Service: d.Id(), - Version: latestVersion, - Name: hf["name"].(string), - } - - log.Printf("[DEBUG] Fastly Healthcheck removal opts: %#v", opts) - err := conn.DeleteHealthCheck(&opts) - if err != nil { - return err - } - } - - // POST new/updated Healthcheck - for _, hRaw := range addHealthCheck { - hf := hRaw.(map[string]interface{}) - - opts := gofastly.CreateHealthCheckInput{ - Service: d.Id(), - Version: latestVersion, - Name: hf["name"].(string), - Host: hf["host"].(string), - Path: hf["path"].(string), - CheckInterval: uint(hf["check_interval"].(int)), - ExpectedResponse: uint(hf["expected_response"].(int)), - HTTPVersion: hf["http_version"].(string), - Initial: uint(hf["initial"].(int)), - Method: hf["method"].(string), - Threshold: uint(hf["threshold"].(int)), - Timeout: uint(hf["timeout"].(int)), - Window: uint(hf["window"].(int)), - } - - log.Printf("[DEBUG] Create Healthcheck Opts: %#v", opts) - _, err := conn.CreateHealthCheck(&opts) - if err != nil { - return err - } - } - } - - // find difference in backends - if d.HasChange("backend") { - ob, nb := d.GetChange("backend") - if ob == nil { - ob = new(schema.Set) - } - if nb == nil { - nb = new(schema.Set) - } - - obs := ob.(*schema.Set) - nbs := nb.(*schema.Set) - removeBackends := obs.Difference(nbs).List() - addBackends := nbs.Difference(obs).List() - - // DELETE old Backends - for _, bRaw := range removeBackends { - bf := bRaw.(map[string]interface{}) - opts := gofastly.DeleteBackendInput{ - Service: d.Id(), - Version: latestVersion, - Name: bf["name"].(string), - } - - log.Printf("[DEBUG] Fastly Backend removal opts: %#v", opts) - err := conn.DeleteBackend(&opts) - if err != nil { - return err - } - } - - // Find and post new Backends - for _, dRaw := range addBackends { - df := dRaw.(map[string]interface{}) - opts := gofastly.CreateBackendInput{ - Service: d.Id(), - Version: latestVersion, - Name: df["name"].(string), - Address: df["address"].(string), - AutoLoadbalance: gofastly.CBool(df["auto_loadbalance"].(bool)), - SSLCheckCert: gofastly.CBool(df["ssl_check_cert"].(bool)), - SSLHostname: df["ssl_hostname"].(string), - SSLCertHostname: df["ssl_cert_hostname"].(string), - SSLSNIHostname: df["ssl_sni_hostname"].(string), - Shield: df["shield"].(string), - Port: uint(df["port"].(int)), - BetweenBytesTimeout: uint(df["between_bytes_timeout"].(int)), - ConnectTimeout: uint(df["connect_timeout"].(int)), - ErrorThreshold: uint(df["error_threshold"].(int)), - FirstByteTimeout: uint(df["first_byte_timeout"].(int)), - MaxConn: uint(df["max_conn"].(int)), - Weight: uint(df["weight"].(int)), - RequestCondition: df["request_condition"].(string), - HealthCheck: df["healthcheck"].(string), - } - - log.Printf("[DEBUG] Create Backend Opts: %#v", opts) - _, err := conn.CreateBackend(&opts) - if err != nil { - return err - } - } - } - - if d.HasChange("header") { - oh, nh := d.GetChange("header") - if oh == nil { - oh = new(schema.Set) - } - if nh == nil { - nh = new(schema.Set) - } - - ohs := oh.(*schema.Set) - nhs := nh.(*schema.Set) - - remove := ohs.Difference(nhs).List() - add := nhs.Difference(ohs).List() - - // Delete removed headers - for _, dRaw := range remove { - df := dRaw.(map[string]interface{}) - opts := gofastly.DeleteHeaderInput{ - Service: d.Id(), - Version: latestVersion, - Name: df["name"].(string), - } - - log.Printf("[DEBUG] Fastly Header removal opts: %#v", opts) - err := conn.DeleteHeader(&opts) - if err != nil { - return err - } - } - - // POST new Headers - for _, dRaw := range add { - opts, err := buildHeader(dRaw.(map[string]interface{})) - if err != nil { - log.Printf("[DEBUG] Error building Header: %s", err) - return err - } - opts.Service = d.Id() - opts.Version = latestVersion - - log.Printf("[DEBUG] Fastly Header Addition opts: %#v", opts) - _, err = conn.CreateHeader(opts) - if err != nil { - return err - } - } - } - - // Find differences in Gzips - if d.HasChange("gzip") { - og, ng := d.GetChange("gzip") - if og == nil { - og = new(schema.Set) - } - if ng == nil { - ng = new(schema.Set) - } - - ogs := og.(*schema.Set) - ngs := ng.(*schema.Set) - - remove := ogs.Difference(ngs).List() - add := ngs.Difference(ogs).List() - - // Delete removed gzip rules - for _, dRaw := range remove { - df := dRaw.(map[string]interface{}) - opts := gofastly.DeleteGzipInput{ - Service: d.Id(), - Version: latestVersion, - Name: df["name"].(string), - } - - log.Printf("[DEBUG] Fastly Gzip removal opts: %#v", opts) - err := conn.DeleteGzip(&opts) - if err != nil { - return err - } - } - - // POST new Gzips - for _, dRaw := range add { - df := dRaw.(map[string]interface{}) - opts := gofastly.CreateGzipInput{ - Service: d.Id(), - Version: latestVersion, - Name: df["name"].(string), - CacheCondition: df["cache_condition"].(string), - } - - if v, ok := df["content_types"]; ok { - if len(v.(*schema.Set).List()) > 0 { - var cl []string - for _, c := range v.(*schema.Set).List() { - cl = append(cl, c.(string)) - } - opts.ContentTypes = strings.Join(cl, " ") - } - } - - if v, ok := df["extensions"]; ok { - if len(v.(*schema.Set).List()) > 0 { - var el []string - for _, e := range v.(*schema.Set).List() { - el = append(el, e.(string)) - } - opts.Extensions = strings.Join(el, " ") - } - } - - log.Printf("[DEBUG] Fastly Gzip Addition opts: %#v", opts) - _, err := conn.CreateGzip(&opts) - if err != nil { - return err - } - } - } - - // find difference in s3logging - if d.HasChange("s3logging") { - os, ns := d.GetChange("s3logging") - if os == nil { - os = new(schema.Set) - } - if ns == nil { - ns = new(schema.Set) - } - - oss := os.(*schema.Set) - nss := ns.(*schema.Set) - removeS3Logging := oss.Difference(nss).List() - addS3Logging := nss.Difference(oss).List() - - // DELETE old S3 Log configurations - for _, sRaw := range removeS3Logging { - sf := sRaw.(map[string]interface{}) - opts := gofastly.DeleteS3Input{ - Service: d.Id(), - Version: latestVersion, - Name: sf["name"].(string), - } - - log.Printf("[DEBUG] Fastly S3 Logging removal opts: %#v", opts) - err := conn.DeleteS3(&opts) - if err != nil { - return err - } - } - - // POST new/updated S3 Logging - for _, sRaw := range addS3Logging { - sf := sRaw.(map[string]interface{}) - - // Fastly API will not error if these are omitted, so we throw an error - // if any of these are empty - for _, sk := range []string{"s3_access_key", "s3_secret_key"} { - if sf[sk].(string) == "" { - return fmt.Errorf("[ERR] No %s found for S3 Log stream setup for Service (%s)", sk, d.Id()) - } - } - - opts := gofastly.CreateS3Input{ - Service: d.Id(), - Version: latestVersion, - Name: sf["name"].(string), - BucketName: sf["bucket_name"].(string), - AccessKey: sf["s3_access_key"].(string), - SecretKey: sf["s3_secret_key"].(string), - Period: uint(sf["period"].(int)), - GzipLevel: uint(sf["gzip_level"].(int)), - Domain: sf["domain"].(string), - Path: sf["path"].(string), - Format: sf["format"].(string), - FormatVersion: uint(sf["format_version"].(int)), - TimestampFormat: sf["timestamp_format"].(string), - ResponseCondition: sf["response_condition"].(string), - } - - log.Printf("[DEBUG] Create S3 Logging Opts: %#v", opts) - _, err := conn.CreateS3(&opts) - if err != nil { - return err - } - } - } - - // find difference in Papertrail - if d.HasChange("papertrail") { - os, ns := d.GetChange("papertrail") - if os == nil { - os = new(schema.Set) - } - if ns == nil { - ns = new(schema.Set) - } - - oss := os.(*schema.Set) - nss := ns.(*schema.Set) - removePapertrail := oss.Difference(nss).List() - addPapertrail := nss.Difference(oss).List() - - // DELETE old papertrail configurations - for _, pRaw := range removePapertrail { - pf := pRaw.(map[string]interface{}) - opts := gofastly.DeletePapertrailInput{ - Service: d.Id(), - Version: latestVersion, - Name: pf["name"].(string), - } - - log.Printf("[DEBUG] Fastly Papertrail removal opts: %#v", opts) - err := conn.DeletePapertrail(&opts) - if err != nil { - return err - } - } - - // POST new/updated Papertrail - for _, pRaw := range addPapertrail { - pf := pRaw.(map[string]interface{}) - - opts := gofastly.CreatePapertrailInput{ - Service: d.Id(), - Version: latestVersion, - Name: pf["name"].(string), - Address: pf["address"].(string), - Port: uint(pf["port"].(int)), - Format: pf["format"].(string), - ResponseCondition: pf["response_condition"].(string), - } - - log.Printf("[DEBUG] Create Papertrail Opts: %#v", opts) - _, err := conn.CreatePapertrail(&opts) - if err != nil { - return err - } - } - } - - // find difference in Sumologic - if d.HasChange("sumologic") { - os, ns := d.GetChange("sumologic") - if os == nil { - os = new(schema.Set) - } - if ns == nil { - ns = new(schema.Set) - } - - oss := os.(*schema.Set) - nss := ns.(*schema.Set) - removeSumologic := oss.Difference(nss).List() - addSumologic := nss.Difference(oss).List() - - // DELETE old sumologic configurations - for _, pRaw := range removeSumologic { - sf := pRaw.(map[string]interface{}) - opts := gofastly.DeleteSumologicInput{ - Service: d.Id(), - Version: latestVersion, - Name: sf["name"].(string), - } - - log.Printf("[DEBUG] Fastly Sumologic removal opts: %#v", opts) - err := conn.DeleteSumologic(&opts) - if err != nil { - return err - } - } - - // POST new/updated Sumologic - for _, pRaw := range addSumologic { - sf := pRaw.(map[string]interface{}) - opts := gofastly.CreateSumologicInput{ - Service: d.Id(), - Version: latestVersion, - Name: sf["name"].(string), - URL: sf["url"].(string), - Format: sf["format"].(string), - FormatVersion: sf["format_version"].(int), - ResponseCondition: sf["response_condition"].(string), - MessageType: sf["message_type"].(string), - } - - log.Printf("[DEBUG] Create Sumologic Opts: %#v", opts) - _, err := conn.CreateSumologic(&opts) - if err != nil { - return err - } - } - } - - // find difference in gcslogging - if d.HasChange("gcslogging") { - os, ns := d.GetChange("gcslogging") - if os == nil { - os = new(schema.Set) - } - if ns == nil { - ns = new(schema.Set) - } - - oss := os.(*schema.Set) - nss := ns.(*schema.Set) - removeGcslogging := oss.Difference(nss).List() - addGcslogging := nss.Difference(oss).List() - - // DELETE old gcslogging configurations - for _, pRaw := range removeGcslogging { - sf := pRaw.(map[string]interface{}) - opts := gofastly.DeleteGCSInput{ - Service: d.Id(), - Version: latestVersion, - Name: sf["name"].(string), - } - - log.Printf("[DEBUG] Fastly gcslogging removal opts: %#v", opts) - err := conn.DeleteGCS(&opts) - if err != nil { - return err - } - } - - // POST new/updated gcslogging - for _, pRaw := range addGcslogging { - sf := pRaw.(map[string]interface{}) - opts := gofastly.CreateGCSInput{ - Service: d.Id(), - Version: latestVersion, - Name: sf["name"].(string), - User: sf["email"].(string), - Bucket: sf["bucket_name"].(string), - SecretKey: sf["secret_key"].(string), - Format: sf["format"].(string), - ResponseCondition: sf["response_condition"].(string), - } - - log.Printf("[DEBUG] Create GCS Opts: %#v", opts) - _, err := conn.CreateGCS(&opts) - if err != nil { - return err - } - } - } - - // find difference in Response Object - if d.HasChange("response_object") { - or, nr := d.GetChange("response_object") - if or == nil { - or = new(schema.Set) - } - if nr == nil { - nr = new(schema.Set) - } - - ors := or.(*schema.Set) - nrs := nr.(*schema.Set) - removeResponseObject := ors.Difference(nrs).List() - addResponseObject := nrs.Difference(ors).List() - - // DELETE old response object configurations - for _, rRaw := range removeResponseObject { - rf := rRaw.(map[string]interface{}) - opts := gofastly.DeleteResponseObjectInput{ - Service: d.Id(), - Version: latestVersion, - Name: rf["name"].(string), - } - - log.Printf("[DEBUG] Fastly Response Object removal opts: %#v", opts) - err := conn.DeleteResponseObject(&opts) - if err != nil { - return err - } - } - - // POST new/updated Response Object - for _, rRaw := range addResponseObject { - rf := rRaw.(map[string]interface{}) - - opts := gofastly.CreateResponseObjectInput{ - Service: d.Id(), - Version: latestVersion, - Name: rf["name"].(string), - Status: uint(rf["status"].(int)), - Response: rf["response"].(string), - Content: rf["content"].(string), - ContentType: rf["content_type"].(string), - RequestCondition: rf["request_condition"].(string), - CacheCondition: rf["cache_condition"].(string), - } - - log.Printf("[DEBUG] Create Response Object Opts: %#v", opts) - _, err := conn.CreateResponseObject(&opts) - if err != nil { - return err - } - } - } - - // find difference in request settings - if d.HasChange("request_setting") { - os, ns := d.GetChange("request_setting") - if os == nil { - os = new(schema.Set) - } - if ns == nil { - ns = new(schema.Set) - } - - ors := os.(*schema.Set) - nrs := ns.(*schema.Set) - removeRequestSettings := ors.Difference(nrs).List() - addRequestSettings := nrs.Difference(ors).List() - - // DELETE old Request Settings configurations - for _, sRaw := range removeRequestSettings { - sf := sRaw.(map[string]interface{}) - opts := gofastly.DeleteRequestSettingInput{ - Service: d.Id(), - Version: latestVersion, - Name: sf["name"].(string), - } - - log.Printf("[DEBUG] Fastly Request Setting removal opts: %#v", opts) - err := conn.DeleteRequestSetting(&opts) - if err != nil { - return err - } - } - - // POST new/updated Request Setting - for _, sRaw := range addRequestSettings { - opts, err := buildRequestSetting(sRaw.(map[string]interface{})) - if err != nil { - log.Printf("[DEBUG] Error building Requset Setting: %s", err) - return err - } - opts.Service = d.Id() - opts.Version = latestVersion - - log.Printf("[DEBUG] Create Request Setting Opts: %#v", opts) - _, err = conn.CreateRequestSetting(opts) - if err != nil { - return err - } - } - } - - // Find differences in VCLs - if d.HasChange("vcl") { - // Note: as above with Gzip and S3 logging, we don't utilize the PUT - // endpoint to update a VCL, we simply destroy it and create a new one. - oldVCLVal, newVCLVal := d.GetChange("vcl") - if oldVCLVal == nil { - oldVCLVal = new(schema.Set) - } - if newVCLVal == nil { - newVCLVal = new(schema.Set) - } - - oldVCLSet := oldVCLVal.(*schema.Set) - newVCLSet := newVCLVal.(*schema.Set) - - remove := oldVCLSet.Difference(newVCLSet).List() - add := newVCLSet.Difference(oldVCLSet).List() - - // Delete removed VCL configurations - for _, dRaw := range remove { - df := dRaw.(map[string]interface{}) - opts := gofastly.DeleteVCLInput{ - Service: d.Id(), - Version: latestVersion, - Name: df["name"].(string), - } - - log.Printf("[DEBUG] Fastly VCL Removal opts: %#v", opts) - err := conn.DeleteVCL(&opts) - if err != nil { - return err - } - } - // POST new VCL configurations - for _, dRaw := range add { - df := dRaw.(map[string]interface{}) - opts := gofastly.CreateVCLInput{ - Service: d.Id(), - Version: latestVersion, - Name: df["name"].(string), - Content: df["content"].(string), - } - - log.Printf("[DEBUG] Fastly VCL Addition opts: %#v", opts) - _, err := conn.CreateVCL(&opts) - if err != nil { - return err - } - - // if this new VCL is the main - if df["main"].(bool) { - opts := gofastly.ActivateVCLInput{ - Service: d.Id(), - Version: latestVersion, - Name: df["name"].(string), - } - log.Printf("[DEBUG] Fastly VCL activation opts: %#v", opts) - _, err := conn.ActivateVCL(&opts) - if err != nil { - return err - } - - } - } - } - - // Find differences in Cache Settings - if d.HasChange("cache_setting") { - oc, nc := d.GetChange("cache_setting") - if oc == nil { - oc = new(schema.Set) - } - if nc == nil { - nc = new(schema.Set) - } - - ocs := oc.(*schema.Set) - ncs := nc.(*schema.Set) - - remove := ocs.Difference(ncs).List() - add := ncs.Difference(ocs).List() - - // Delete removed Cache Settings - for _, dRaw := range remove { - df := dRaw.(map[string]interface{}) - opts := gofastly.DeleteCacheSettingInput{ - Service: d.Id(), - Version: latestVersion, - Name: df["name"].(string), - } - - log.Printf("[DEBUG] Fastly Cache Settings removal opts: %#v", opts) - err := conn.DeleteCacheSetting(&opts) - if err != nil { - return err - } - } - - // POST new Cache Settings - for _, dRaw := range add { - opts, err := buildCacheSetting(dRaw.(map[string]interface{})) - if err != nil { - log.Printf("[DEBUG] Error building Cache Setting: %s", err) - return err - } - opts.Service = d.Id() - opts.Version = latestVersion - - log.Printf("[DEBUG] Fastly Cache Settings Addition opts: %#v", opts) - _, err = conn.CreateCacheSetting(opts) - if err != nil { - return err - } - } - } - - // validate version - log.Printf("[DEBUG] Validating Fastly Service (%s), Version (%v)", d.Id(), latestVersion) - valid, msg, err := conn.ValidateVersion(&gofastly.ValidateVersionInput{ - Service: d.Id(), - Version: latestVersion, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error checking validation: %s", err) - } - - if !valid { - return fmt.Errorf("[ERR] Invalid configuration for Fastly Service (%s): %s", d.Id(), msg) - } - - log.Printf("[DEBUG] Activating Fastly Service (%s), Version (%v)", d.Id(), latestVersion) - _, err = conn.ActivateVersion(&gofastly.ActivateVersionInput{ - Service: d.Id(), - Version: latestVersion, - }) - if err != nil { - return fmt.Errorf("[ERR] Error activating version (%d): %s", latestVersion, err) - } - - // Only if the version is valid and activated do we set the active_version. - // This prevents us from getting stuck in cloning an invalid version - d.Set("active_version", latestVersion) - } - - return resourceServiceV1Read(d, meta) -} - -func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*FastlyClient).conn - - // Find the Service. Discard the service because we need the ServiceDetails, - // not just a Service record - _, err := findService(d.Id(), meta) - if err != nil { - switch err { - case fastlyNoServiceFoundErr: - log.Printf("[WARN] %s for ID (%s)", err, d.Id()) - d.SetId("") - return nil - default: - return err - } - } - - s, err := conn.GetServiceDetails(&gofastly.GetServiceInput{ - ID: d.Id(), - }) - - if err != nil { - return err - } - - d.Set("name", s.Name) - d.Set("active_version", s.ActiveVersion.Number) - - // If CreateService succeeds, but initial updates to the Service fail, we'll - // have an empty ActiveService version (no version is active, so we can't - // query for information on it) - if s.ActiveVersion.Number != 0 { - settingsOpts := gofastly.GetSettingsInput{ - Service: d.Id(), - Version: s.ActiveVersion.Number, - } - if settings, err := conn.GetSettings(&settingsOpts); err == nil { - d.Set("default_host", settings.DefaultHost) - d.Set("default_ttl", settings.DefaultTTL) - } else { - return fmt.Errorf("[ERR] Error looking up Version settings for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) - } - - // TODO: update go-fastly to support an ActiveVersion struct, which contains - // domain and backend info in the response. Here we do 2 additional queries - // to find out that info - log.Printf("[DEBUG] Refreshing Domains for (%s)", d.Id()) - domainList, err := conn.ListDomains(&gofastly.ListDomainsInput{ - Service: d.Id(), - Version: s.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up Domains for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) - } - - // Refresh Domains - dl := flattenDomains(domainList) - - if err := d.Set("domain", dl); err != nil { - log.Printf("[WARN] Error setting Domains for (%s): %s", d.Id(), err) - } - - // Refresh Backends - log.Printf("[DEBUG] Refreshing Backends for (%s)", d.Id()) - backendList, err := conn.ListBackends(&gofastly.ListBackendsInput{ - Service: d.Id(), - Version: s.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up Backends for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) - } - - bl := flattenBackends(backendList) - - if err := d.Set("backend", bl); err != nil { - log.Printf("[WARN] Error setting Backends for (%s): %s", d.Id(), err) - } - - // refresh headers - log.Printf("[DEBUG] Refreshing Headers for (%s)", d.Id()) - headerList, err := conn.ListHeaders(&gofastly.ListHeadersInput{ - Service: d.Id(), - Version: s.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up Headers for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) - } - - hl := flattenHeaders(headerList) - - if err := d.Set("header", hl); err != nil { - log.Printf("[WARN] Error setting Headers for (%s): %s", d.Id(), err) - } - - // refresh gzips - log.Printf("[DEBUG] Refreshing Gzips for (%s)", d.Id()) - gzipsList, err := conn.ListGzips(&gofastly.ListGzipsInput{ - Service: d.Id(), - Version: s.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up Gzips for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) - } - - gl := flattenGzips(gzipsList) - - if err := d.Set("gzip", gl); err != nil { - log.Printf("[WARN] Error setting Gzips for (%s): %s", d.Id(), err) - } - - // refresh Healthcheck - log.Printf("[DEBUG] Refreshing Healthcheck for (%s)", d.Id()) - healthcheckList, err := conn.ListHealthChecks(&gofastly.ListHealthChecksInput{ - Service: d.Id(), - Version: s.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up Healthcheck for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) - } - - hcl := flattenHealthchecks(healthcheckList) - - if err := d.Set("healthcheck", hcl); err != nil { - log.Printf("[WARN] Error setting Healthcheck for (%s): %s", d.Id(), err) - } - - // refresh S3 Logging - log.Printf("[DEBUG] Refreshing S3 Logging for (%s)", d.Id()) - s3List, err := conn.ListS3s(&gofastly.ListS3sInput{ - Service: d.Id(), - Version: s.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up S3 Logging for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) - } - - sl := flattenS3s(s3List) - - if err := d.Set("s3logging", sl); err != nil { - log.Printf("[WARN] Error setting S3 Logging for (%s): %s", d.Id(), err) - } - - // refresh Papertrail Logging - log.Printf("[DEBUG] Refreshing Papertrail for (%s)", d.Id()) - papertrailList, err := conn.ListPapertrails(&gofastly.ListPapertrailsInput{ - Service: d.Id(), - Version: s.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up Papertrail for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) - } - - pl := flattenPapertrails(papertrailList) - - if err := d.Set("papertrail", pl); err != nil { - log.Printf("[WARN] Error setting Papertrail for (%s): %s", d.Id(), err) - } - - // refresh Sumologic Logging - log.Printf("[DEBUG] Refreshing Sumologic for (%s)", d.Id()) - sumologicList, err := conn.ListSumologics(&gofastly.ListSumologicsInput{ - Service: d.Id(), - Version: s.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up Sumologic for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) - } - - sul := flattenSumologics(sumologicList) - if err := d.Set("sumologic", sul); err != nil { - log.Printf("[WARN] Error setting Sumologic for (%s): %s", d.Id(), err) - } - - // refresh GCS Logging - log.Printf("[DEBUG] Refreshing GCS for (%s)", d.Id()) - GCSList, err := conn.ListGCSs(&gofastly.ListGCSsInput{ - Service: d.Id(), - Version: s.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up GCS for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) - } - - gcsl := flattenGCS(GCSList) - if err := d.Set("gcs", gcsl); err != nil { - log.Printf("[WARN] Error setting gcs for (%s): %s", d.Id(), err) - } - - // refresh Response Objects - log.Printf("[DEBUG] Refreshing Response Object for (%s)", d.Id()) - responseObjectList, err := conn.ListResponseObjects(&gofastly.ListResponseObjectsInput{ - Service: d.Id(), - Version: s.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up Response Object for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) - } - - rol := flattenResponseObjects(responseObjectList) - - if err := d.Set("response_object", rol); err != nil { - log.Printf("[WARN] Error setting Response Object for (%s): %s", d.Id(), err) - } - - // refresh Conditions - log.Printf("[DEBUG] Refreshing Conditions for (%s)", d.Id()) - conditionList, err := conn.ListConditions(&gofastly.ListConditionsInput{ - Service: d.Id(), - Version: s.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up Conditions for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) - } - - cl := flattenConditions(conditionList) - - if err := d.Set("condition", cl); err != nil { - log.Printf("[WARN] Error setting Conditions for (%s): %s", d.Id(), err) - } - - // refresh Request Settings - log.Printf("[DEBUG] Refreshing Request Settings for (%s)", d.Id()) - rsList, err := conn.ListRequestSettings(&gofastly.ListRequestSettingsInput{ - Service: d.Id(), - Version: s.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up Request Settings for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) - } - - rl := flattenRequestSettings(rsList) - - if err := d.Set("request_setting", rl); err != nil { - log.Printf("[WARN] Error setting Request Settings for (%s): %s", d.Id(), err) - } - - // refresh VCLs - log.Printf("[DEBUG] Refreshing VCLs for (%s)", d.Id()) - vclList, err := conn.ListVCLs(&gofastly.ListVCLsInput{ - Service: d.Id(), - Version: s.ActiveVersion.Number, - }) - if err != nil { - return fmt.Errorf("[ERR] Error looking up VCLs for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) - } - - vl := flattenVCLs(vclList) - - if err := d.Set("vcl", vl); err != nil { - log.Printf("[WARN] Error setting VCLs for (%s): %s", d.Id(), err) - } - - // refresh Cache Settings - log.Printf("[DEBUG] Refreshing Cache Settings for (%s)", d.Id()) - cslList, err := conn.ListCacheSettings(&gofastly.ListCacheSettingsInput{ - Service: d.Id(), - Version: s.ActiveVersion.Number, - }) - if err != nil { - return fmt.Errorf("[ERR] Error looking up Cache Settings for (%s), version (%v): %s", d.Id(), s.ActiveVersion.Number, err) - } - - csl := flattenCacheSettings(cslList) - - if err := d.Set("cache_setting", csl); err != nil { - log.Printf("[WARN] Error setting Cache Settings for (%s): %s", d.Id(), err) - } - - } else { - log.Printf("[DEBUG] Active Version for Service (%s) is empty, no state to refresh", d.Id()) - } - - return nil -} - -func resourceServiceV1Delete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*FastlyClient).conn - - // Fastly will fail to delete any service with an Active Version. - // If `force_destroy` is given, we deactivate the active version and then send - // the DELETE call - if d.Get("force_destroy").(bool) { - s, err := conn.GetServiceDetails(&gofastly.GetServiceInput{ - ID: d.Id(), - }) - - if err != nil { - return err - } - - if s.ActiveVersion.Number != 0 { - _, err := conn.DeactivateVersion(&gofastly.DeactivateVersionInput{ - Service: d.Id(), - Version: s.ActiveVersion.Number, - }) - if err != nil { - return err - } - } - } - - err := conn.DeleteService(&gofastly.DeleteServiceInput{ - ID: d.Id(), - }) - - if err != nil { - return err - } - - _, err = findService(d.Id(), meta) - if err != nil { - switch err { - // we expect no records to be found here - case fastlyNoServiceFoundErr: - d.SetId("") - return nil - default: - return err - } - } - - // findService above returned something and nil error, but shouldn't have - return fmt.Errorf("[WARN] Tried deleting Service (%s), but was still found", d.Id()) - -} - -func flattenDomains(list []*gofastly.Domain) []map[string]interface{} { - dl := make([]map[string]interface{}, 0, len(list)) - - for _, d := range list { - dl = append(dl, map[string]interface{}{ - "name": d.Name, - "comment": d.Comment, - }) - } - - return dl -} - -func flattenBackends(backendList []*gofastly.Backend) []map[string]interface{} { - var bl []map[string]interface{} - for _, b := range backendList { - // Convert Backend to a map for saving to state. - nb := map[string]interface{}{ - "name": b.Name, - "address": b.Address, - "auto_loadbalance": b.AutoLoadbalance, - "between_bytes_timeout": int(b.BetweenBytesTimeout), - "connect_timeout": int(b.ConnectTimeout), - "error_threshold": int(b.ErrorThreshold), - "first_byte_timeout": int(b.FirstByteTimeout), - "max_conn": int(b.MaxConn), - "port": int(b.Port), - "shield": b.Shield, - "ssl_check_cert": b.SSLCheckCert, - "ssl_hostname": b.SSLHostname, - "ssl_cert_hostname": b.SSLCertHostname, - "ssl_sni_hostname": b.SSLSNIHostname, - "weight": int(b.Weight), - "request_condition": b.RequestCondition, - "healthcheck": b.HealthCheck, - } - - bl = append(bl, nb) - } - return bl -} - -// findService finds a Fastly Service via the ListServices endpoint, returning -// the Service if found. -// -// Fastly API does not include any "deleted_at" type parameter to indicate -// that a Service has been deleted. GET requests to a deleted Service will -// return 200 OK and have the full output of the Service for an unknown time -// (days, in my testing). In order to determine if a Service is deleted, we -// need to hit /service and loop the returned Services, searching for the one -// in question. This endpoint only returns active or "alive" services. If the -// Service is not included, then it's "gone" -// -// Returns a fastlyNoServiceFoundErr error if the Service is not found in the -// ListServices response. -func findService(id string, meta interface{}) (*gofastly.Service, error) { - conn := meta.(*FastlyClient).conn - - l, err := conn.ListServices(&gofastly.ListServicesInput{}) - if err != nil { - return nil, fmt.Errorf("[WARN] Error listing services (%s): %s", id, err) - } - - for _, s := range l { - if s.ID == id { - log.Printf("[DEBUG] Found Service (%s)", id) - return s, nil - } - } - - return nil, fastlyNoServiceFoundErr -} - -func flattenHeaders(headerList []*gofastly.Header) []map[string]interface{} { - var hl []map[string]interface{} - for _, h := range headerList { - // Convert Header to a map for saving to state. - nh := map[string]interface{}{ - "name": h.Name, - "action": h.Action, - "ignore_if_set": h.IgnoreIfSet, - "type": h.Type, - "destination": h.Destination, - "source": h.Source, - "regex": h.Regex, - "substitution": h.Substitution, - "priority": int(h.Priority), - "request_condition": h.RequestCondition, - "cache_condition": h.CacheCondition, - "response_condition": h.ResponseCondition, - } - - for k, v := range nh { - if v == "" { - delete(nh, k) - } - } - - hl = append(hl, nh) - } - return hl -} - -func buildHeader(headerMap interface{}) (*gofastly.CreateHeaderInput, error) { - df := headerMap.(map[string]interface{}) - opts := gofastly.CreateHeaderInput{ - Name: df["name"].(string), - IgnoreIfSet: gofastly.CBool(df["ignore_if_set"].(bool)), - Destination: df["destination"].(string), - Priority: uint(df["priority"].(int)), - Source: df["source"].(string), - Regex: df["regex"].(string), - Substitution: df["substitution"].(string), - RequestCondition: df["request_condition"].(string), - CacheCondition: df["cache_condition"].(string), - ResponseCondition: df["response_condition"].(string), - } - - act := strings.ToLower(df["action"].(string)) - switch act { - case "set": - opts.Action = gofastly.HeaderActionSet - case "append": - opts.Action = gofastly.HeaderActionAppend - case "delete": - opts.Action = gofastly.HeaderActionDelete - case "regex": - opts.Action = gofastly.HeaderActionRegex - case "regex_repeat": - opts.Action = gofastly.HeaderActionRegexRepeat - } - - ty := strings.ToLower(df["type"].(string)) - switch ty { - case "request": - opts.Type = gofastly.HeaderTypeRequest - case "fetch": - opts.Type = gofastly.HeaderTypeFetch - case "cache": - opts.Type = gofastly.HeaderTypeCache - case "response": - opts.Type = gofastly.HeaderTypeResponse - } - - return &opts, nil -} - -func buildCacheSetting(cacheMap interface{}) (*gofastly.CreateCacheSettingInput, error) { - df := cacheMap.(map[string]interface{}) - opts := gofastly.CreateCacheSettingInput{ - Name: df["name"].(string), - StaleTTL: uint(df["stale_ttl"].(int)), - CacheCondition: df["cache_condition"].(string), - } - - if v, ok := df["ttl"]; ok { - opts.TTL = uint(v.(int)) - } - - act := strings.ToLower(df["action"].(string)) - switch act { - case "cache": - opts.Action = gofastly.CacheSettingActionCache - case "pass": - opts.Action = gofastly.CacheSettingActionPass - case "restart": - opts.Action = gofastly.CacheSettingActionRestart - } - - return &opts, nil -} - -func flattenGzips(gzipsList []*gofastly.Gzip) []map[string]interface{} { - var gl []map[string]interface{} - for _, g := range gzipsList { - // Convert Gzip to a map for saving to state. - ng := map[string]interface{}{ - "name": g.Name, - "cache_condition": g.CacheCondition, - } - - if g.Extensions != "" { - e := strings.Split(g.Extensions, " ") - var et []interface{} - for _, ev := range e { - et = append(et, ev) - } - ng["extensions"] = schema.NewSet(schema.HashString, et) - } - - if g.ContentTypes != "" { - c := strings.Split(g.ContentTypes, " ") - var ct []interface{} - for _, cv := range c { - ct = append(ct, cv) - } - ng["content_types"] = schema.NewSet(schema.HashString, ct) - } - - // prune any empty values that come from the default string value in structs - for k, v := range ng { - if v == "" { - delete(ng, k) - } - } - - gl = append(gl, ng) - } - - return gl -} - -func flattenHealthchecks(healthcheckList []*gofastly.HealthCheck) []map[string]interface{} { - var hl []map[string]interface{} - for _, h := range healthcheckList { - // Convert HealthChecks to a map for saving to state. - nh := map[string]interface{}{ - "name": h.Name, - "host": h.Host, - "path": h.Path, - "check_interval": h.CheckInterval, - "expected_response": h.ExpectedResponse, - "http_version": h.HTTPVersion, - "initial": h.Initial, - "method": h.Method, - "threshold": h.Threshold, - "timeout": h.Timeout, - "window": h.Window, - } - - // prune any empty values that come from the default string value in structs - for k, v := range nh { - if v == "" { - delete(nh, k) - } - } - - hl = append(hl, nh) - } - - return hl -} - -func flattenS3s(s3List []*gofastly.S3) []map[string]interface{} { - var sl []map[string]interface{} - for _, s := range s3List { - // Convert S3s to a map for saving to state. - ns := map[string]interface{}{ - "name": s.Name, - "bucket_name": s.BucketName, - "s3_access_key": s.AccessKey, - "s3_secret_key": s.SecretKey, - "path": s.Path, - "period": s.Period, - "domain": s.Domain, - "gzip_level": s.GzipLevel, - "format": s.Format, - "format_version": s.FormatVersion, - "timestamp_format": s.TimestampFormat, - "response_condition": s.ResponseCondition, - } - - // prune any empty values that come from the default string value in structs - for k, v := range ns { - if v == "" { - delete(ns, k) - } - } - - sl = append(sl, ns) - } - - return sl -} - -func flattenPapertrails(papertrailList []*gofastly.Papertrail) []map[string]interface{} { - var pl []map[string]interface{} - for _, p := range papertrailList { - // Convert Papertrails to a map for saving to state. - ns := map[string]interface{}{ - "name": p.Name, - "address": p.Address, - "port": p.Port, - "format": p.Format, - "response_condition": p.ResponseCondition, - } - - // prune any empty values that come from the default string value in structs - for k, v := range ns { - if v == "" { - delete(ns, k) - } - } - - pl = append(pl, ns) - } - - return pl -} - -func flattenSumologics(sumologicList []*gofastly.Sumologic) []map[string]interface{} { - var l []map[string]interface{} - for _, p := range sumologicList { - // Convert Sumologic to a map for saving to state. - ns := map[string]interface{}{ - "name": p.Name, - "url": p.URL, - "format": p.Format, - "response_condition": p.ResponseCondition, - "message_type": p.MessageType, - "format_version": int(p.FormatVersion), - } - - // prune any empty values that come from the default string value in structs - for k, v := range ns { - if v == "" { - delete(ns, k) - } - } - - l = append(l, ns) - } - - return l -} - -func flattenGCS(gcsList []*gofastly.GCS) []map[string]interface{} { - var GCSList []map[string]interface{} - for _, currentGCS := range gcsList { - // Convert gcs to a map for saving to state. - GCSMapString := map[string]interface{}{ - "name": currentGCS.Name, - "email": currentGCS.User, - "bucket_name": currentGCS.Bucket, - "secret_key": currentGCS.SecretKey, - "path": currentGCS.Path, - "period": int(currentGCS.Period), - "gzip_level": int(currentGCS.GzipLevel), - "response_condition": currentGCS.ResponseCondition, - "format": currentGCS.Format, - } - - // prune any empty values that come from the default string value in structs - for k, v := range GCSMapString { - if v == "" { - delete(GCSMapString, k) - } - } - - GCSList = append(GCSList, GCSMapString) - } - - return GCSList -} - -func flattenResponseObjects(responseObjectList []*gofastly.ResponseObject) []map[string]interface{} { - var rol []map[string]interface{} - for _, ro := range responseObjectList { - // Convert ResponseObjects to a map for saving to state. - nro := map[string]interface{}{ - "name": ro.Name, - "status": ro.Status, - "response": ro.Response, - "content": ro.Content, - "content_type": ro.ContentType, - "request_condition": ro.RequestCondition, - "cache_condition": ro.CacheCondition, - } - - // prune any empty values that come from the default string value in structs - for k, v := range nro { - if v == "" { - delete(nro, k) - } - } - - rol = append(rol, nro) - } - - return rol -} - -func flattenConditions(conditionList []*gofastly.Condition) []map[string]interface{} { - var cl []map[string]interface{} - for _, c := range conditionList { - // Convert Conditions to a map for saving to state. - nc := map[string]interface{}{ - "name": c.Name, - "statement": c.Statement, - "type": c.Type, - "priority": c.Priority, - } - - // prune any empty values that come from the default string value in structs - for k, v := range nc { - if v == "" { - delete(nc, k) - } - } - - cl = append(cl, nc) - } - - return cl -} - -func flattenRequestSettings(rsList []*gofastly.RequestSetting) []map[string]interface{} { - var rl []map[string]interface{} - for _, r := range rsList { - // Convert Request Settings to a map for saving to state. - nrs := map[string]interface{}{ - "name": r.Name, - "max_stale_age": r.MaxStaleAge, - "force_miss": r.ForceMiss, - "force_ssl": r.ForceSSL, - "action": r.Action, - "bypass_busy_wait": r.BypassBusyWait, - "hash_keys": r.HashKeys, - "xff": r.XForwardedFor, - "timer_support": r.TimerSupport, - "geo_headers": r.GeoHeaders, - "default_host": r.DefaultHost, - "request_condition": r.RequestCondition, - } - - // prune any empty values that come from the default string value in structs - for k, v := range nrs { - if v == "" { - delete(nrs, k) - } - } - - rl = append(rl, nrs) - } - - return rl -} - -func buildRequestSetting(requestSettingMap interface{}) (*gofastly.CreateRequestSettingInput, error) { - df := requestSettingMap.(map[string]interface{}) - opts := gofastly.CreateRequestSettingInput{ - Name: df["name"].(string), - MaxStaleAge: uint(df["max_stale_age"].(int)), - ForceMiss: gofastly.CBool(df["force_miss"].(bool)), - ForceSSL: gofastly.CBool(df["force_ssl"].(bool)), - BypassBusyWait: gofastly.CBool(df["bypass_busy_wait"].(bool)), - HashKeys: df["hash_keys"].(string), - TimerSupport: gofastly.CBool(df["timer_support"].(bool)), - GeoHeaders: gofastly.CBool(df["geo_headers"].(bool)), - DefaultHost: df["default_host"].(string), - RequestCondition: df["request_condition"].(string), - } - - act := strings.ToLower(df["action"].(string)) - switch act { - case "lookup": - opts.Action = gofastly.RequestSettingActionLookup - case "pass": - opts.Action = gofastly.RequestSettingActionPass - } - - xff := strings.ToLower(df["xff"].(string)) - switch xff { - case "clear": - opts.XForwardedFor = gofastly.RequestSettingXFFClear - case "leave": - opts.XForwardedFor = gofastly.RequestSettingXFFLeave - case "append": - opts.XForwardedFor = gofastly.RequestSettingXFFAppend - case "append_all": - opts.XForwardedFor = gofastly.RequestSettingXFFAppendAll - case "overwrite": - opts.XForwardedFor = gofastly.RequestSettingXFFOverwrite - } - - return &opts, nil -} - -func flattenCacheSettings(csList []*gofastly.CacheSetting) []map[string]interface{} { - var csl []map[string]interface{} - for _, cl := range csList { - // Convert Cache Settings to a map for saving to state. - clMap := map[string]interface{}{ - "name": cl.Name, - "action": cl.Action, - "cache_condition": cl.CacheCondition, - "stale_ttl": cl.StaleTTL, - "ttl": cl.TTL, - } - - // prune any empty values that come from the default string value in structs - for k, v := range clMap { - if v == "" { - delete(clMap, k) - } - } - - csl = append(csl, clMap) - } - - return csl -} - -func flattenVCLs(vclList []*gofastly.VCL) []map[string]interface{} { - var vl []map[string]interface{} - for _, vcl := range vclList { - // Convert VCLs to a map for saving to state. - vclMap := map[string]interface{}{ - "name": vcl.Name, - "content": vcl.Content, - "main": vcl.Main, - } - - // prune any empty values that come from the default string value in structs - for k, v := range vclMap { - if v == "" { - delete(vclMap, k) - } - } - - vl = append(vl, vclMap) - } - - return vl -} - -func validateVCLs(d *schema.ResourceData) error { - // TODO: this would be nice to move into a resource/collection validation function, once that is available - // (see https://github.com/hashicorp/terraform/pull/4348 and https://github.com/hashicorp/terraform/pull/6508) - vcls, exists := d.GetOk("vcl") - if !exists { - return nil - } - - numberOfMainVCLs, numberOfIncludeVCLs := 0, 0 - for _, vclElem := range vcls.(*schema.Set).List() { - vcl := vclElem.(map[string]interface{}) - if mainVal, hasMain := vcl["main"]; hasMain && mainVal.(bool) { - numberOfMainVCLs++ - } else { - numberOfIncludeVCLs++ - } - } - if numberOfMainVCLs == 0 && numberOfIncludeVCLs > 0 { - return errors.New("if you include VCL configurations, one of them should have main = true") - } - if numberOfMainVCLs > 1 { - return errors.New("you cannot have more than one VCL configuration with main = true") - } - return nil -} diff --git a/builtin/providers/fastly/resource_fastly_service_v1_cache_setting_test.go b/builtin/providers/fastly/resource_fastly_service_v1_cache_setting_test.go deleted file mode 100644 index 68b9dcee2..000000000 --- a/builtin/providers/fastly/resource_fastly_service_v1_cache_setting_test.go +++ /dev/null @@ -1,205 +0,0 @@ -package fastly - -import ( - "fmt" - "reflect" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - gofastly "github.com/sethvargo/go-fastly" -) - -func TestAccFastlyServiceV1CacheSetting_basic(t *testing.T) { - var service gofastly.ServiceDetail - name := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - domainName1 := fmt.Sprintf("%s.notadomain.com", acctest.RandString(10)) - - cq1 := gofastly.CacheSetting{ - Name: "alt_backend", - Action: "pass", - StaleTTL: uint(3600), - CacheCondition: "serve_alt_backend", - } - - cq2 := gofastly.CacheSetting{ - Name: "cache_backend", - Action: "restart", - StaleTTL: uint(1600), - CacheCondition: "cache_alt_backend", - TTL: uint(300), - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckServiceV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccServiceV1CacheSetting(name, domainName1), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1CacheSettingsAttributes(&service, []*gofastly.CacheSetting{&cq1}), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "name", name), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "cache_setting.#", "1"), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "condition.#", "1"), - ), - }, - - resource.TestStep{ - Config: testAccServiceV1CacheSetting_update(name, domainName1), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1CacheSettingsAttributes(&service, []*gofastly.CacheSetting{&cq1, &cq2}), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "cache_setting.#", "2"), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "condition.#", "2"), - ), - }, - }, - }) -} - -func testAccCheckFastlyServiceV1CacheSettingsAttributes(service *gofastly.ServiceDetail, rqs []*gofastly.CacheSetting) resource.TestCheckFunc { - return func(s *terraform.State) error { - - conn := testAccProvider.Meta().(*FastlyClient).conn - rqList, err := conn.ListCacheSettings(&gofastly.ListCacheSettingsInput{ - Service: service.ID, - Version: service.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up Request Setting for (%s), version (%v): %s", service.Name, service.ActiveVersion.Number, err) - } - - if len(rqList) != len(rqs) { - return fmt.Errorf("Request Setting List count mismatch, expected (%d), got (%d)", len(rqs), len(rqList)) - } - - var found int - for _, r := range rqs { - for _, lr := range rqList { - if r.Name == lr.Name { - // we don't know these things ahead of time, so populate them now - r.ServiceID = service.ID - r.Version = service.ActiveVersion.Number - if !reflect.DeepEqual(r, lr) { - return fmt.Errorf("Bad match Request Setting match, expected (%#v), got (%#v)", r, lr) - } - found++ - } - } - } - - if found != len(rqs) { - return fmt.Errorf("Error matching Request Setting rules (%d/%d)", found, len(rqs)) - } - - return nil - } -} - -func testAccServiceV1CacheSetting(name, domain string) string { - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - domain { - name = "%s" - comment = "demo" - } - - backend { - address = "tftesting.tftesting.net.s3-website-us-west-2.amazonaws.com" - name = "AWS S3 hosting" - port = 80 - } - - backend { - address = "tftestingother.tftesting.net.s3-website-us-west-2.amazonaws.com" - name = "OtherAWSS3hosting" - port = 80 - } - - condition { - name = "serve_alt_backend" - type = "CACHE" - priority = 10 - statement = "req.url ~ \"^/alt/\"" - } - - cache_setting { - name = "alt_backend" - stale_ttl = 3600 - cache_condition = "serve_alt_backend" - action = "pass" - } - - default_host = "tftesting.tftesting.net.s3-website-us-west-2.amazonaws.com" - - force_destroy = true -}`, name, domain) -} - -func testAccServiceV1CacheSetting_update(name, domain string) string { - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - domain { - name = "%s" - comment = "demo" - } - - backend { - address = "tftesting.tftesting.net.s3-website-us-west-2.amazonaws.com" - name = "AWS S3 hosting" - port = 80 - } - - backend { - address = "tftestingother.tftesting.net.s3-website-us-west-2.amazonaws.com" - name = "OtherAWSS3hosting" - port = 80 - } - - condition { - name = "serve_alt_backend" - type = "CACHE" - priority = 10 - statement = "req.url ~ \"^/alt/\"" - } - - condition { - name = "cache_alt_backend" - type = "CACHE" - priority = 20 - statement = "req.url ~ \"^/cache/\"" - } - - cache_setting { - name = "alt_backend" - stale_ttl = 3600 - cache_condition = "serve_alt_backend" - action = "pass" - } - - cache_setting { - name = "cache_backend" - stale_ttl = 1600 - cache_condition = "cache_alt_backend" - action = "restart" - ttl = 300 - } - - default_host = "tftesting.tftesting.net.s3-website-us-west-2.amazonaws.com" - - force_destroy = true -}`, name, domain) -} diff --git a/builtin/providers/fastly/resource_fastly_service_v1_conditionals_test.go b/builtin/providers/fastly/resource_fastly_service_v1_conditionals_test.go deleted file mode 100644 index 0599b7a9c..000000000 --- a/builtin/providers/fastly/resource_fastly_service_v1_conditionals_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package fastly - -import ( - "fmt" - "reflect" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - gofastly "github.com/sethvargo/go-fastly" -) - -func TestAccFastlyServiceV1_conditional_basic(t *testing.T) { - var service gofastly.ServiceDetail - name := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - domainName1 := fmt.Sprintf("%s.notadomain.com", acctest.RandString(10)) - - con1 := gofastly.Condition{ - Name: "some amz condition", - Priority: 10, - Type: "REQUEST", - Statement: `req.url ~ "^/yolo/"`, - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckServiceV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccServiceV1ConditionConfig(name, domainName1), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1ConditionalAttributes(&service, name, []*gofastly.Condition{&con1}), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "name", name), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "condition.#", "1"), - ), - }, - }, - }) -} - -func testAccCheckFastlyServiceV1ConditionalAttributes(service *gofastly.ServiceDetail, name string, conditions []*gofastly.Condition) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if service.Name != name { - return fmt.Errorf("Bad name, expected (%s), got (%s)", name, service.Name) - } - - conn := testAccProvider.Meta().(*FastlyClient).conn - conditionList, err := conn.ListConditions(&gofastly.ListConditionsInput{ - Service: service.ID, - Version: service.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up Conditions for (%s), version (%v): %s", service.Name, service.ActiveVersion.Number, err) - } - - if len(conditionList) != len(conditions) { - return fmt.Errorf("Error: mis match count of conditions, expected (%d), got (%d)", len(conditions), len(conditionList)) - } - - var found int - for _, c := range conditions { - for _, lc := range conditionList { - if c.Name == lc.Name { - // we don't know these things ahead of time, so populate them now - c.ServiceID = service.ID - c.Version = service.ActiveVersion.Number - if !reflect.DeepEqual(c, lc) { - return fmt.Errorf("Bad match Conditions match, expected (%#v), got (%#v)", c, lc) - } - found++ - } - } - } - - if found != len(conditions) { - return fmt.Errorf("Error matching Conditions rules") - } - return nil - } -} - -func testAccServiceV1ConditionConfig(name, domain string) string { - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - domain { - name = "%s" - comment = "tf-testing-domain" - } - - backend { - address = "aws.amazon.com" - name = "amazon docs" - } - - header { - destination = "http.x-amz-request-id" - type = "cache" - action = "delete" - name = "remove x-amz-request-id" - } - - condition { - name = "some amz condition" - type = "REQUEST" - - statement = "req.url ~ \"^/yolo/\"" - - priority = 10 - } - - force_destroy = true -}`, name, domain) -} diff --git a/builtin/providers/fastly/resource_fastly_service_v1_gcslogging_test.go b/builtin/providers/fastly/resource_fastly_service_v1_gcslogging_test.go deleted file mode 100644 index a16240f30..000000000 --- a/builtin/providers/fastly/resource_fastly_service_v1_gcslogging_test.go +++ /dev/null @@ -1,131 +0,0 @@ -package fastly - -import ( - "fmt" - "reflect" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - gofastly "github.com/sethvargo/go-fastly" -) - -func TestResourceFastlyFlattenGCS(t *testing.T) { - cases := []struct { - remote []*gofastly.GCS - local []map[string]interface{} - }{ - { - remote: []*gofastly.GCS{ - &gofastly.GCS{ - Name: "GCS collector", - User: "email@example.com", - Bucket: "bucketName", - SecretKey: "secretKey", - Format: "log format", - Period: 3600, - GzipLevel: 0, - }, - }, - local: []map[string]interface{}{ - map[string]interface{}{ - "name": "GCS collector", - "email": "email@example.com", - "bucket_name": "bucketName", - "secret_key": "secretKey", - "format": "log format", - "period": 3600, - "gzip_level": 0, - }, - }, - }, - } - - for _, c := range cases { - out := flattenGCS(c.remote) - if !reflect.DeepEqual(out, c.local) { - t.Fatalf("Error matching:\nexpected: %#v\ngot: %#v", c.local, out) - } - } -} - -func TestAccFastlyServiceV1_gcslogging(t *testing.T) { - var service gofastly.ServiceDetail - name := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - gcsName := fmt.Sprintf("gcs %s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckServiceV1Destroy, - Steps: []resource.TestStep{ - { - Config: testAccServiceV1Config_gcs(name, gcsName), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1Attributes_gcs(&service, name, gcsName), - ), - }, - }, - }) -} - -func testAccCheckFastlyServiceV1Attributes_gcs(service *gofastly.ServiceDetail, name, gcsName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if service.Name != name { - return fmt.Errorf("Bad name, expected (%s), got (%s)", name, service.Name) - } - - conn := testAccProvider.Meta().(*FastlyClient).conn - gcsList, err := conn.ListGCSs(&gofastly.ListGCSsInput{ - Service: service.ID, - Version: service.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up GCSs for (%s), version (%v): %s", service.Name, service.ActiveVersion.Number, err) - } - - if len(gcsList) != 1 { - return fmt.Errorf("GCS missing, expected: 1, got: %d", len(gcsList)) - } - - if gcsList[0].Name != gcsName { - return fmt.Errorf("GCS name mismatch, expected: %s, got: %#v", gcsName, gcsList[0].Name) - } - - return nil - } -} - -func testAccServiceV1Config_gcs(name, gcsName string) string { - backendName := fmt.Sprintf("%s.aws.amazon.com", acctest.RandString(3)) - - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - domain { - name = "test.notadomain.com" - comment = "tf-testing-domain" - } - - backend { - address = "%s" - name = "tf -test backend" - } - - gcslogging { - name = "%s" - email = "email@example.com", - bucket_name = "bucketName", - secret_key = "secretKey", - format = "log format", - response_condition = "", - } - - force_destroy = true -}`, name, backendName, gcsName) -} diff --git a/builtin/providers/fastly/resource_fastly_service_v1_gzip_test.go b/builtin/providers/fastly/resource_fastly_service_v1_gzip_test.go deleted file mode 100644 index 91560d4e5..000000000 --- a/builtin/providers/fastly/resource_fastly_service_v1_gzip_test.go +++ /dev/null @@ -1,270 +0,0 @@ -package fastly - -import ( - "fmt" - "reflect" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - gofastly "github.com/sethvargo/go-fastly" -) - -func TestFastlyServiceV1_FlattenGzips(t *testing.T) { - cases := []struct { - remote []*gofastly.Gzip - local []map[string]interface{} - }{ - { - remote: []*gofastly.Gzip{ - &gofastly.Gzip{ - Name: "somegzip", - Extensions: "css", - }, - }, - local: []map[string]interface{}{ - map[string]interface{}{ - "name": "somegzip", - "extensions": schema.NewSet(schema.HashString, []interface{}{"css"}), - }, - }, - }, - { - remote: []*gofastly.Gzip{ - &gofastly.Gzip{ - Name: "somegzip", - Extensions: "css json js", - ContentTypes: "text/html", - }, - &gofastly.Gzip{ - Name: "someothergzip", - Extensions: "css js", - ContentTypes: "text/html text/xml", - }, - }, - local: []map[string]interface{}{ - map[string]interface{}{ - "name": "somegzip", - "extensions": schema.NewSet(schema.HashString, []interface{}{"css", "json", "js"}), - "content_types": schema.NewSet(schema.HashString, []interface{}{"text/html"}), - }, - map[string]interface{}{ - "name": "someothergzip", - "extensions": schema.NewSet(schema.HashString, []interface{}{"css", "js"}), - "content_types": schema.NewSet(schema.HashString, []interface{}{"text/html", "text/xml"}), - }, - }, - }, - } - - for _, c := range cases { - out := flattenGzips(c.remote) - // loop, because deepequal wont work with our sets - expectedCount := len(c.local) - var found int - for _, o := range out { - for _, l := range c.local { - if o["name"].(string) == l["name"].(string) { - found++ - if o["extensions"] == nil && l["extensions"] != nil { - t.Fatalf("output extensions are nil, local are not") - } - - if o["extensions"] != nil { - oex := o["extensions"].(*schema.Set) - lex := l["extensions"].(*schema.Set) - if !oex.Equal(lex) { - t.Fatalf("Extensions don't match, expected: %#v, got: %#v", lex, oex) - } - } - - if o["content_types"] == nil && l["content_types"] != nil { - t.Fatalf("output content types are nil, local are not") - } - - if o["content_types"] != nil { - oct := o["content_types"].(*schema.Set) - lct := l["content_types"].(*schema.Set) - if !oct.Equal(lct) { - t.Fatalf("ContentTypes don't match, expected: %#v, got: %#v", lct, oct) - } - } - - } - } - } - - if found != expectedCount { - t.Fatalf("Found and expected mismatch: %d / %d", found, expectedCount) - } - } -} - -func TestAccFastlyServiceV1_gzips_basic(t *testing.T) { - var service gofastly.ServiceDetail - name := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - domainName1 := fmt.Sprintf("%s.notadomain.com", acctest.RandString(10)) - - log1 := gofastly.Gzip{ - Version: 1, - Name: "gzip file types", - Extensions: "js css", - CacheCondition: "testing_condition", - } - - log2 := gofastly.Gzip{ - Version: 1, - Name: "gzip extensions", - ContentTypes: "text/css text/html", - } - - log3 := gofastly.Gzip{ - Version: 1, - Name: "all", - Extensions: "js html css", - ContentTypes: "text/javascript application/x-javascript application/javascript text/css text/html", - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckServiceV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccServiceV1GzipsConfig(name, domainName1), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1GzipsAttributes(&service, []*gofastly.Gzip{&log1, &log2}), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "name", name), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "gzip.#", "2"), - ), - }, - - resource.TestStep{ - Config: testAccServiceV1GzipsConfig_update(name, domainName1), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1GzipsAttributes(&service, []*gofastly.Gzip{&log3}), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "name", name), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "gzip.#", "1"), - ), - }, - }, - }) -} - -func testAccCheckFastlyServiceV1GzipsAttributes(service *gofastly.ServiceDetail, gzips []*gofastly.Gzip) resource.TestCheckFunc { - return func(s *terraform.State) error { - - conn := testAccProvider.Meta().(*FastlyClient).conn - gzipsList, err := conn.ListGzips(&gofastly.ListGzipsInput{ - Service: service.ID, - Version: service.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up Gzips for (%s), version (%v): %s", service.Name, service.ActiveVersion.Number, err) - } - - if len(gzipsList) != len(gzips) { - return fmt.Errorf("Gzip count mismatch, expected (%d), got (%d)", len(gzips), len(gzipsList)) - } - - var found int - for _, g := range gzips { - for _, lg := range gzipsList { - if g.Name == lg.Name { - // we don't know these things ahead of time, so populate them now - g.ServiceID = service.ID - g.Version = service.ActiveVersion.Number - if !reflect.DeepEqual(g, lg) { - return fmt.Errorf("Bad match Gzip match, expected (%#v), got (%#v)", g, lg) - } - found++ - } - } - } - - if found != len(gzips) { - return fmt.Errorf("Error matching Gzip rules") - } - - return nil - } -} - -func testAccServiceV1GzipsConfig(name, domain string) string { - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - domain { - name = "%s" - comment = "tf-testing-domain" - } - - backend { - address = "aws.amazon.com" - name = "amazon docs" - } - - condition { - name = "testing_condition" - type = "CACHE" - priority = 10 - statement = "req.url ~ \"^/articles/\"" - } - - gzip { - name = "gzip file types" - extensions = ["css", "js"] - cache_condition = "testing_condition" - } - - gzip { - name = "gzip extensions" - content_types = ["text/html", "text/css"] - } - - force_destroy = true -}`, name, domain) -} - -func testAccServiceV1GzipsConfig_update(name, domain string) string { - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - domain { - name = "%s" - comment = "tf-testing-domain" - } - - backend { - address = "aws.amazon.com" - name = "amazon docs" - } - - gzip { - name = "all" - extensions = ["css", "js", "html"] - - content_types = [ - "text/html", - "text/css", - "application/x-javascript", - "text/css", - "application/javascript", - "text/javascript", - ] - } - - force_destroy = true -}`, name, domain) -} diff --git a/builtin/providers/fastly/resource_fastly_service_v1_headers_test.go b/builtin/providers/fastly/resource_fastly_service_v1_headers_test.go deleted file mode 100644 index 8428c3fcb..000000000 --- a/builtin/providers/fastly/resource_fastly_service_v1_headers_test.go +++ /dev/null @@ -1,293 +0,0 @@ -package fastly - -import ( - "fmt" - "reflect" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - gofastly "github.com/sethvargo/go-fastly" -) - -func TestFastlyServiceV1_BuildHeaders(t *testing.T) { - cases := []struct { - remote *gofastly.CreateHeaderInput - local map[string]interface{} - }{ - { - remote: &gofastly.CreateHeaderInput{ - Name: "someheadder", - Action: gofastly.HeaderActionDelete, - IgnoreIfSet: gofastly.CBool(true), - Type: gofastly.HeaderTypeCache, - Destination: "http.aws-id", - Priority: uint(100), - }, - local: map[string]interface{}{ - "name": "someheadder", - "action": "delete", - "ignore_if_set": true, - "destination": "http.aws-id", - "priority": 100, - "source": "", - "regex": "", - "substitution": "", - "request_condition": "", - "cache_condition": "", - "response_condition": "", - "type": "cache", - }, - }, - { - remote: &gofastly.CreateHeaderInput{ - Name: "someheadder", - Action: gofastly.HeaderActionSet, - IgnoreIfSet: gofastly.CBool(false), - Type: gofastly.HeaderTypeCache, - Destination: "http.aws-id", - Priority: uint(100), - Source: "http.server-name", - }, - local: map[string]interface{}{ - "name": "someheadder", - "action": "set", - "ignore_if_set": false, - "destination": "http.aws-id", - "priority": 100, - "source": "http.server-name", - "regex": "", - "substitution": "", - "request_condition": "", - "cache_condition": "", - "response_condition": "", - "type": "cache", - }, - }, - } - - for _, c := range cases { - out, _ := buildHeader(c.local) - if !reflect.DeepEqual(out, c.remote) { - t.Fatalf("Error matching:\nexpected: %#v\ngot: %#v", c.remote, out) - } - } -} - -func TestAccFastlyServiceV1_headers_basic(t *testing.T) { - var service gofastly.ServiceDetail - name := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - domainName1 := fmt.Sprintf("%s.notadomain.com", acctest.RandString(10)) - - log1 := gofastly.Header{ - Version: 1, - Name: "remove x-amz-request-id", - Destination: "http.x-amz-request-id", - Type: "cache", - Action: "delete", - Priority: uint(100), - } - - log2 := gofastly.Header{ - Version: 1, - Name: "remove s3 server", - Destination: "http.Server", - Type: "cache", - Action: "delete", - IgnoreIfSet: true, - Priority: uint(100), - } - - log3 := gofastly.Header{ - Version: 1, - Name: "DESTROY S3", - Destination: "http.Server", - Type: "cache", - Action: "delete", - Priority: uint(100), - } - - log4 := gofastly.Header{ - Version: 1, - Name: "Add server name", - Destination: "http.server-name", - Type: "request", - Action: "set", - Source: "server.identity", - Priority: uint(100), - RequestCondition: "test_req_condition", - CacheCondition: "test_cache_condition", - ResponseCondition: "test_res_condition", - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckServiceV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccServiceV1HeadersConfig(name, domainName1), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1HeaderAttributes(&service, []*gofastly.Header{&log1, &log2}), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "name", name), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "header.#", "2"), - ), - }, - - resource.TestStep{ - Config: testAccServiceV1HeadersConfig_update(name, domainName1), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1HeaderAttributes(&service, []*gofastly.Header{&log1, &log3, &log4}), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "name", name), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "header.#", "3"), - ), - }, - }, - }) -} - -func testAccCheckFastlyServiceV1HeaderAttributes(service *gofastly.ServiceDetail, headers []*gofastly.Header) resource.TestCheckFunc { - return func(s *terraform.State) error { - - conn := testAccProvider.Meta().(*FastlyClient).conn - headersList, err := conn.ListHeaders(&gofastly.ListHeadersInput{ - Service: service.ID, - Version: service.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up Headers for (%s), version (%v): %s", service.Name, service.ActiveVersion.Number, err) - } - - if len(headersList) != len(headers) { - return fmt.Errorf("Healthcheck List count mismatch, expected (%d), got (%d)", len(headers), len(headersList)) - } - - var found int - for _, h := range headers { - for _, lh := range headersList { - if h.Name == lh.Name { - // we don't know these things ahead of time, so populate them now - h.ServiceID = service.ID - h.Version = service.ActiveVersion.Number - if !reflect.DeepEqual(h, lh) { - return fmt.Errorf("Bad match Header match, expected (%#v), got (%#v)", h, lh) - } - found++ - } - } - } - - if found != len(headers) { - return fmt.Errorf("Error matching Header rules") - } - - return nil - } -} - -func testAccServiceV1HeadersConfig(name, domain string) string { - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - domain { - name = "%s" - comment = "tf-testing-domain" - } - - backend { - address = "aws.amazon.com" - name = "amazon docs" - } - - header { - destination = "http.x-amz-request-id" - type = "cache" - action = "delete" - name = "remove x-amz-request-id" - } - - header { - destination = "http.Server" - type = "cache" - action = "delete" - name = "remove s3 server" - ignore_if_set = "true" - } - - force_destroy = true -}`, name, domain) -} - -func testAccServiceV1HeadersConfig_update(name, domain string) string { - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - domain { - name = "%s" - comment = "tf-testing-domain" - } - - backend { - address = "aws.amazon.com" - name = "amazon docs" - } - - header { - destination = "http.x-amz-request-id" - type = "cache" - action = "delete" - name = "remove x-amz-request-id" - } - - header { - destination = "http.Server" - type = "cache" - action = "delete" - name = "DESTROY S3" - } - - condition { - name = "test_req_condition" - type = "REQUEST" - priority = 5 - statement = "req.url ~ \"^/foo/bar$\"" - } - - condition { - name = "test_cache_condition" - type = "CACHE" - priority = 9 - statement = "req.url ~ \"^/articles/\"" - } - - condition { - name = "test_res_condition" - type = "RESPONSE" - priority = 10 - statement = "resp.status == 404" - } - - header { - destination = "http.server-name" - type = "request" - action = "set" - source = "server.identity" - name = "Add server name" - request_condition = "test_req_condition" - cache_condition = "test_cache_condition" - response_condition = "test_res_condition" - } - - force_destroy = true -}`, name, domain) -} diff --git a/builtin/providers/fastly/resource_fastly_service_v1_healthcheck_test.go b/builtin/providers/fastly/resource_fastly_service_v1_healthcheck_test.go deleted file mode 100644 index c456aa958..000000000 --- a/builtin/providers/fastly/resource_fastly_service_v1_healthcheck_test.go +++ /dev/null @@ -1,199 +0,0 @@ -package fastly - -import ( - "fmt" - "reflect" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - gofastly "github.com/sethvargo/go-fastly" -) - -func TestAccFastlyServiceV1_healthcheck_basic(t *testing.T) { - var service gofastly.ServiceDetail - name := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - domainName := fmt.Sprintf("%s.notadomain.com", acctest.RandString(10)) - - log1 := gofastly.HealthCheck{ - Version: 1, - Name: "example-healthcheck1", - Host: "example1.com", - Path: "/test1.txt", - CheckInterval: 4000, - ExpectedResponse: 200, - HTTPVersion: "1.1", - Initial: 2, - Method: "HEAD", - Threshold: 3, - Timeout: 5000, - Window: 5, - } - - log2 := gofastly.HealthCheck{ - Version: 1, - Name: "example-healthcheck2", - Host: "example2.com", - Path: "/test2.txt", - CheckInterval: 4500, - ExpectedResponse: 404, - HTTPVersion: "1.0", - Initial: 1, - Method: "POST", - Threshold: 4, - Timeout: 4000, - Window: 10, - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckServiceV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccServiceV1HealthCheckConfig(name, domainName), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1HealthCheckAttributes(&service, []*gofastly.HealthCheck{&log1}), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "name", name), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "healthcheck.#", "1"), - ), - }, - - resource.TestStep{ - Config: testAccServiceV1HealthCheckConfig_update(name, domainName), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1HealthCheckAttributes(&service, []*gofastly.HealthCheck{&log1, &log2}), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "name", name), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "healthcheck.#", "2"), - ), - }, - }, - }) -} - -func testAccCheckFastlyServiceV1HealthCheckAttributes(service *gofastly.ServiceDetail, healthchecks []*gofastly.HealthCheck) resource.TestCheckFunc { - return func(s *terraform.State) error { - - conn := testAccProvider.Meta().(*FastlyClient).conn - healthcheckList, err := conn.ListHealthChecks(&gofastly.ListHealthChecksInput{ - Service: service.ID, - Version: service.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up Healthcheck for (%s), version (%v): %s", service.Name, service.ActiveVersion.Number, err) - } - - if len(healthcheckList) != len(healthchecks) { - return fmt.Errorf("Healthcheck List count mismatch, expected (%d), got (%d)", len(healthchecks), len(healthcheckList)) - } - - var found int - for _, h := range healthchecks { - for _, lh := range healthcheckList { - if h.Name == lh.Name { - // we don't know these things ahead of time, so populate them now - h.ServiceID = service.ID - h.Version = service.ActiveVersion.Number - if !reflect.DeepEqual(h, lh) { - return fmt.Errorf("Bad match Healthcheck match, expected (%#v), got (%#v)", h, lh) - } - found++ - } - } - } - - if found != len(healthchecks) { - return fmt.Errorf("Error matching Healthcheck rules") - } - - return nil - } -} - -func testAccServiceV1HealthCheckConfig(name, domain string) string { - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - domain { - name = "%s" - comment = "tf-testing-domain" - } - - backend { - address = "aws.amazon.com" - name = "amazon docs" - } - - healthcheck { - name = "example-healthcheck1" - host = "example1.com" - path = "/test1.txt" - check_interval = 4000 - expected_response = 200 - http_version = "1.1" - initial = 2 - method = "HEAD" - threshold = 3 - timeout = 5000 - window = 5 - } - - force_destroy = true -}`, name, domain) -} - -func testAccServiceV1HealthCheckConfig_update(name, domain string) string { - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - domain { - name = "%s" - comment = "tf-testing-domain" - } - - backend { - address = "aws.amazon.com" - name = "amazon docs" - } - - healthcheck { - name = "example-healthcheck1" - host = "example1.com" - path = "/test1.txt" - check_interval = 4000 - expected_response = 200 - http_version = "1.1" - initial = 2 - method = "HEAD" - threshold = 3 - timeout = 5000 - window = 5 - } - - healthcheck { - name = "example-healthcheck2" - host = "example2.com" - path = "/test2.txt" - check_interval = 4500 - expected_response = 404 - http_version = "1.0" - initial = 1 - method = "POST" - threshold = 4 - timeout = 4000 - window = 10 - } - - force_destroy = true -}`, name, domain) -} diff --git a/builtin/providers/fastly/resource_fastly_service_v1_papertrail_test.go b/builtin/providers/fastly/resource_fastly_service_v1_papertrail_test.go deleted file mode 100644 index 2e2fcac3d..000000000 --- a/builtin/providers/fastly/resource_fastly_service_v1_papertrail_test.go +++ /dev/null @@ -1,182 +0,0 @@ -package fastly - -import ( - "fmt" - "reflect" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - gofastly "github.com/sethvargo/go-fastly" -) - -func TestAccFastlyServiceV1_papertrail_basic(t *testing.T) { - var service gofastly.ServiceDetail - name := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - domainName1 := fmt.Sprintf("%s.notadomain.com", acctest.RandString(10)) - - log1 := gofastly.Papertrail{ - Version: 1, - Name: "papertrailtesting", - Address: "test1.papertrailapp.com", - Port: uint(3600), - Format: "%h %l %u %t %r %>s", - ResponseCondition: "test_response_condition", - } - - log2 := gofastly.Papertrail{ - Version: 1, - Name: "papertrailtesting2", - Address: "test2.papertrailapp.com", - Port: uint(8080), - Format: "%h %l %u %t %r %>s", - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckServiceV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccServiceV1PapertrailConfig(name, domainName1), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1PapertrailAttributes(&service, []*gofastly.Papertrail{&log1}), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "name", name), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "papertrail.#", "1"), - ), - }, - - resource.TestStep{ - Config: testAccServiceV1PapertrailConfig_update(name, domainName1), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1PapertrailAttributes(&service, []*gofastly.Papertrail{&log1, &log2}), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "name", name), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "papertrail.#", "2"), - ), - }, - }, - }) -} - -func testAccCheckFastlyServiceV1PapertrailAttributes(service *gofastly.ServiceDetail, papertrails []*gofastly.Papertrail) resource.TestCheckFunc { - return func(s *terraform.State) error { - - conn := testAccProvider.Meta().(*FastlyClient).conn - papertrailList, err := conn.ListPapertrails(&gofastly.ListPapertrailsInput{ - Service: service.ID, - Version: service.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up Papertrail for (%s), version (%v): %s", service.Name, service.ActiveVersion.Number, err) - } - - if len(papertrailList) != len(papertrails) { - return fmt.Errorf("Papertrail List count mismatch, expected (%d), got (%d)", len(papertrails), len(papertrailList)) - } - - var found int - for _, p := range papertrails { - for _, lp := range papertrailList { - if p.Name == lp.Name { - // we don't know these things ahead of time, so populate them now - p.ServiceID = service.ID - p.Version = service.ActiveVersion.Number - // We don't track these, so clear them out because we also wont know - // these ahead of time - lp.CreatedAt = nil - lp.UpdatedAt = nil - if !reflect.DeepEqual(p, lp) { - return fmt.Errorf("Bad match Papertrail match, expected (%#v), got (%#v)", p, lp) - } - found++ - } - } - } - - if found != len(papertrails) { - return fmt.Errorf("Error matching Papertrail rules") - } - - return nil - } -} - -func testAccServiceV1PapertrailConfig(name, domain string) string { - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - domain { - name = "%s" - comment = "tf-testing-domain" - } - - backend { - address = "aws.amazon.com" - name = "amazon docs" - } - - condition { - name = "test_response_condition" - type = "RESPONSE" - priority = 5 - statement = "resp.status >= 400 && resp.status < 600" - } - - papertrail { - name = "papertrailtesting" - address = "test1.papertrailapp.com" - port = 3600 - response_condition = "test_response_condition" - } - - force_destroy = true -}`, name, domain) -} - -func testAccServiceV1PapertrailConfig_update(name, domain string) string { - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - domain { - name = "%s" - comment = "tf-testing-domain" - } - - backend { - address = "aws.amazon.com" - name = "amazon docs" - } - - condition { - name = "test_response_condition" - type = "RESPONSE" - priority = 5 - statement = "resp.status >= 400 && resp.status < 600" - } - - papertrail { - name = "papertrailtesting" - address = "test1.papertrailapp.com" - port = 3600 - response_condition = "test_response_condition" - } - - papertrail { - name = "papertrailtesting2" - address = "test2.papertrailapp.com" - port = 8080 - } - - force_destroy = true -}`, name, domain) -} diff --git a/builtin/providers/fastly/resource_fastly_service_v1_request_setting_test.go b/builtin/providers/fastly/resource_fastly_service_v1_request_setting_test.go deleted file mode 100644 index 72d1c3e81..000000000 --- a/builtin/providers/fastly/resource_fastly_service_v1_request_setting_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package fastly - -import ( - "fmt" - "reflect" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - gofastly "github.com/sethvargo/go-fastly" -) - -func TestAccFastlyServiceV1RequestSetting_basic(t *testing.T) { - var service gofastly.ServiceDetail - name := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - domainName1 := fmt.Sprintf("%s.notadomain.com", acctest.RandString(10)) - - rq1 := gofastly.RequestSetting{ - Name: "alt_backend", - RequestCondition: "serve_alt_backend", - DefaultHost: "tftestingother.tftesting.net.s3-website-us-west-2.amazonaws.com", - XForwardedFor: "append", - MaxStaleAge: uint(90), - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckServiceV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccServiceV1RequestSetting(name, domainName1), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1RequestSettingsAttributes(&service, []*gofastly.RequestSetting{&rq1}), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "name", name), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "request_setting.#", "1"), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "condition.#", "1"), - ), - }, - }, - }) -} - -func testAccCheckFastlyServiceV1RequestSettingsAttributes(service *gofastly.ServiceDetail, rqs []*gofastly.RequestSetting) resource.TestCheckFunc { - return func(s *terraform.State) error { - - conn := testAccProvider.Meta().(*FastlyClient).conn - rqList, err := conn.ListRequestSettings(&gofastly.ListRequestSettingsInput{ - Service: service.ID, - Version: service.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up Request Setting for (%s), version (%v): %s", service.Name, service.ActiveVersion.Number, err) - } - - if len(rqList) != len(rqs) { - return fmt.Errorf("Request Setting List count mismatch, expected (%d), got (%d)", len(rqs), len(rqList)) - } - - var found int - for _, r := range rqs { - for _, lr := range rqList { - if r.Name == lr.Name { - // we don't know these things ahead of time, so populate them now - r.ServiceID = service.ID - r.Version = service.ActiveVersion.Number - if !reflect.DeepEqual(r, lr) { - return fmt.Errorf("Bad match Request Setting match, expected (%#v), got (%#v)", r, lr) - } - found++ - } - } - } - - if found != len(rqs) { - return fmt.Errorf("Error matching Request Setting rules (%d/%d)", found, len(rqs)) - } - - return nil - } -} - -func testAccServiceV1RequestSetting(name, domain string) string { - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - domain { - name = "%s" - comment = "demo" - } - - backend { - address = "tftesting.tftesting.net.s3-website-us-west-2.amazonaws.com" - name = "AWS S3 hosting" - port = 80 - } - - backend { - address = "tftestingother.tftesting.net.s3-website-us-west-2.amazonaws.com" - name = "OtherAWSS3hosting" - port = 80 - } - - condition { - name = "serve_alt_backend" - type = "REQUEST" - priority = 10 - statement = "req.url ~ \"^/alt/\"" - } - - request_setting { - default_host = "tftestingother.tftesting.net.s3-website-us-west-2.amazonaws.com" - name = "alt_backend" - request_condition = "serve_alt_backend" - max_stale_age = 90 - } - - default_host = "tftesting.tftesting.net.s3-website-us-west-2.amazonaws.com" - - force_destroy = true -}`, name, domain) -} diff --git a/builtin/providers/fastly/resource_fastly_service_v1_response_object_test.go b/builtin/providers/fastly/resource_fastly_service_v1_response_object_test.go deleted file mode 100644 index e4a2e02b2..000000000 --- a/builtin/providers/fastly/resource_fastly_service_v1_response_object_test.go +++ /dev/null @@ -1,221 +0,0 @@ -package fastly - -import ( - "fmt" - "reflect" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - gofastly "github.com/sethvargo/go-fastly" -) - -func TestAccFastlyServiceV1_response_object_basic(t *testing.T) { - var service gofastly.ServiceDetail - name := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - domainName1 := fmt.Sprintf("%s.notadomain.com", acctest.RandString(10)) - - log1 := gofastly.ResponseObject{ - Version: 1, - Name: "responseObjecttesting", - Status: 200, - Response: "OK", - Content: "test content", - ContentType: "text/html", - RequestCondition: "test-request-condition", - CacheCondition: "test-cache-condition", - } - - log2 := gofastly.ResponseObject{ - Version: 1, - Name: "responseObjecttesting2", - Status: 404, - Response: "Not Found", - Content: "some, other, content", - ContentType: "text/csv", - RequestCondition: "another-test-request-condition", - CacheCondition: "another-test-cache-condition", - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckServiceV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccServiceV1ResponseObjectConfig(name, domainName1), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1ResponseObjectAttributes(&service, []*gofastly.ResponseObject{&log1}), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "name", name), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "response_object.#", "1"), - ), - }, - - resource.TestStep{ - Config: testAccServiceV1ResponseObjectConfig_update(name, domainName1), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1ResponseObjectAttributes(&service, []*gofastly.ResponseObject{&log1, &log2}), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "name", name), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "response_object.#", "2"), - ), - }, - }, - }) -} - -func testAccCheckFastlyServiceV1ResponseObjectAttributes(service *gofastly.ServiceDetail, responseObjects []*gofastly.ResponseObject) resource.TestCheckFunc { - return func(s *terraform.State) error { - - conn := testAccProvider.Meta().(*FastlyClient).conn - responseObjectList, err := conn.ListResponseObjects(&gofastly.ListResponseObjectsInput{ - Service: service.ID, - Version: service.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up Response Object for (%s), version (%v): %s", service.Name, service.ActiveVersion.Number, err) - } - - if len(responseObjectList) != len(responseObjects) { - return fmt.Errorf("Response Object List count mismatch, expected (%d), got (%d)", len(responseObjects), len(responseObjectList)) - } - - var found int - for _, p := range responseObjects { - for _, lp := range responseObjectList { - if p.Name == lp.Name { - // we don't know these things ahead of time, so populate them now - p.ServiceID = service.ID - p.Version = service.ActiveVersion.Number - if !reflect.DeepEqual(p, lp) { - return fmt.Errorf("Bad match Response Object match, expected (%#v), got (%#v)", p, lp) - } - found++ - } - } - } - - if found != len(responseObjects) { - return fmt.Errorf("Error matching Response Object rules") - } - - return nil - } -} - -func testAccServiceV1ResponseObjectConfig(name, domain string) string { - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - domain { - name = "%s" - comment = "tf-testing-domain" - } - - backend { - address = "aws.amazon.com" - name = "amazon docs" - } - - condition { - name = "test-request-condition" - type = "REQUEST" - priority = 5 - statement = "req.url ~ \"^/foo/bar$\"" - } - - condition { - name = "test-cache-condition" - type = "CACHE" - priority = 9 - statement = "req.url ~ \"^/articles/\"" - } - - response_object { - name = "responseObjecttesting" - status = 200 - response = "OK" - content = "test content" - content_type = "text/html" - request_condition = "test-request-condition" - cache_condition = "test-cache-condition" - } - - force_destroy = true -}`, name, domain) -} - -func testAccServiceV1ResponseObjectConfig_update(name, domain string) string { - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - domain { - name = "%s" - comment = "tf-testing-domain" - } - - backend { - address = "aws.amazon.com" - name = "amazon docs" - } - - condition { - name = "test-cache-condition" - type = "CACHE" - priority = 9 - statement = "req.url ~ \"^/articles/\"" - } - - condition { - name = "another-test-cache-condition" - type = "CACHE" - priority = 7 - statement = "req.url ~ \"^/stories/\"" - } - - condition { - name = "test-request-condition" - type = "REQUEST" - priority = 5 - statement = "req.url ~ \"^/foo/bar$\"" - } - - condition { - name = "another-test-request-condition" - type = "REQUEST" - priority = 10 - statement = "req.url ~ \"^/articles$\"" - } - - response_object { - name = "responseObjecttesting" - status = 200 - response = "OK" - content = "test content" - content_type = "text/html" - request_condition = "test-request-condition" - cache_condition = "test-cache-condition" - } - - response_object { - name = "responseObjecttesting2" - status = 404 - response = "Not Found" - content = "some, other, content" - content_type = "text/csv" - request_condition = "another-test-request-condition" - cache_condition = "another-test-cache-condition" - } - - force_destroy = true -}`, name, domain) -} diff --git a/builtin/providers/fastly/resource_fastly_service_v1_s3logging_test.go b/builtin/providers/fastly/resource_fastly_service_v1_s3logging_test.go deleted file mode 100644 index 651eceac3..000000000 --- a/builtin/providers/fastly/resource_fastly_service_v1_s3logging_test.go +++ /dev/null @@ -1,375 +0,0 @@ -package fastly - -import ( - "fmt" - "os" - "reflect" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - gofastly "github.com/sethvargo/go-fastly" -) - -func TestAccFastlyServiceV1_s3logging_basic(t *testing.T) { - var service gofastly.ServiceDetail - name := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - domainName1 := fmt.Sprintf("%s.notadomain.com", acctest.RandString(10)) - - log1 := gofastly.S3{ - Version: 1, - Name: "somebucketlog", - BucketName: "fastlytestlogging", - Domain: "s3-us-west-2.amazonaws.com", - AccessKey: "somekey", - SecretKey: "somesecret", - Period: uint(3600), - GzipLevel: uint(0), - Format: "%h %l %u %t %r %>s", - FormatVersion: 1, - TimestampFormat: "%Y-%m-%dT%H:%M:%S.000", - ResponseCondition: "response_condition_test", - } - - log2 := gofastly.S3{ - Version: 1, - Name: "someotherbucketlog", - BucketName: "fastlytestlogging2", - Domain: "s3-us-west-2.amazonaws.com", - AccessKey: "someotherkey", - SecretKey: "someothersecret", - GzipLevel: uint(3), - Period: uint(60), - Format: "%h %l %u %t %r %>s", - FormatVersion: 1, - TimestampFormat: "%Y-%m-%dT%H:%M:%S.000", - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckServiceV1Destroy, - Steps: []resource.TestStep{ - { - Config: testAccServiceV1S3LoggingConfig(name, domainName1), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1S3LoggingAttributes(&service, []*gofastly.S3{&log1}), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "name", name), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "s3logging.#", "1"), - ), - }, - - { - Config: testAccServiceV1S3LoggingConfig_update(name, domainName1), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1S3LoggingAttributes(&service, []*gofastly.S3{&log1, &log2}), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "name", name), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "s3logging.#", "2"), - ), - }, - }, - }) -} - -// Tests that s3_access_key and s3_secret_key are read from the env -func TestAccFastlyServiceV1_s3logging_s3_env(t *testing.T) { - var service gofastly.ServiceDetail - name := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - domainName1 := fmt.Sprintf("%s.notadomain.com", acctest.RandString(10)) - - // set env Vars to something we expect - resetEnv := setEnv("someEnv", t) - defer resetEnv() - - log3 := gofastly.S3{ - Version: 1, - Name: "somebucketlog", - BucketName: "fastlytestlogging", - Domain: "s3-us-west-2.amazonaws.com", - AccessKey: "someEnv", - SecretKey: "someEnv", - Period: uint(3600), - GzipLevel: uint(0), - Format: "%h %l %u %t %r %>s", - FormatVersion: 1, - TimestampFormat: "%Y-%m-%dT%H:%M:%S.000", - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckServiceV1Destroy, - Steps: []resource.TestStep{ - { - Config: testAccServiceV1S3LoggingConfig_env(name, domainName1), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1S3LoggingAttributes(&service, []*gofastly.S3{&log3}), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "name", name), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "s3logging.#", "1"), - ), - }, - }, - }) -} - -func TestAccFastlyServiceV1_s3logging_formatVersion(t *testing.T) { - var service gofastly.ServiceDetail - name := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - domainName1 := fmt.Sprintf("%s.notadomain.com", acctest.RandString(10)) - - log1 := gofastly.S3{ - Version: 1, - Name: "somebucketlog", - BucketName: "fastlytestlogging", - Domain: "s3-us-west-2.amazonaws.com", - AccessKey: "somekey", - SecretKey: "somesecret", - Period: uint(3600), - GzipLevel: uint(0), - Format: "%a %l %u %t %m %U%q %H %>s %b %T", - FormatVersion: 2, - TimestampFormat: "%Y-%m-%dT%H:%M:%S.000", - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckServiceV1Destroy, - Steps: []resource.TestStep{ - { - Config: testAccServiceV1S3LoggingConfig_formatVersion(name, domainName1), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1S3LoggingAttributes(&service, []*gofastly.S3{&log1}), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "name", name), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "s3logging.#", "1"), - ), - }, - }, - }) -} - -func testAccCheckFastlyServiceV1S3LoggingAttributes(service *gofastly.ServiceDetail, s3s []*gofastly.S3) resource.TestCheckFunc { - return func(s *terraform.State) error { - - conn := testAccProvider.Meta().(*FastlyClient).conn - s3List, err := conn.ListS3s(&gofastly.ListS3sInput{ - Service: service.ID, - Version: service.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up S3 Logging for (%s), version (%v): %s", service.Name, service.ActiveVersion.Number, err) - } - - if len(s3List) != len(s3s) { - return fmt.Errorf("S3 List count mismatch, expected (%d), got (%d)", len(s3s), len(s3List)) - } - - var found int - for _, s := range s3s { - for _, ls := range s3List { - if s.Name == ls.Name { - // we don't know these things ahead of time, so populate them now - s.ServiceID = service.ID - s.Version = service.ActiveVersion.Number - // We don't track these, so clear them out because we also wont know - // these ahead of time - ls.CreatedAt = nil - ls.UpdatedAt = nil - if !reflect.DeepEqual(s, ls) { - return fmt.Errorf("Bad match S3 logging match, expected (%#v), got (%#v)", s, ls) - } - found++ - } - } - } - - if found != len(s3s) { - return fmt.Errorf("Error matching S3 Logging rules") - } - - return nil - } -} - -func testAccServiceV1S3LoggingConfig(name, domain string) string { - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - domain { - name = "%s" - comment = "tf-testing-domain" - } - - backend { - address = "aws.amazon.com" - name = "amazon docs" - } - - condition { - name = "response_condition_test" - type = "RESPONSE" - priority = 8 - statement = "resp.status == 418" - } - - s3logging { - name = "somebucketlog" - bucket_name = "fastlytestlogging" - domain = "s3-us-west-2.amazonaws.com" - s3_access_key = "somekey" - s3_secret_key = "somesecret" - response_condition = "response_condition_test" - } - - force_destroy = true -}`, name, domain) -} - -func testAccServiceV1S3LoggingConfig_update(name, domain string) string { - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - domain { - name = "%s" - comment = "tf-testing-domain" - } - - backend { - address = "aws.amazon.com" - name = "amazon docs" - } - - condition { - name = "response_condition_test" - type = "RESPONSE" - priority = 8 - statement = "resp.status == 418" - } - - s3logging { - name = "somebucketlog" - bucket_name = "fastlytestlogging" - domain = "s3-us-west-2.amazonaws.com" - s3_access_key = "somekey" - s3_secret_key = "somesecret" - response_condition = "response_condition_test" - } - - s3logging { - name = "someotherbucketlog" - bucket_name = "fastlytestlogging2" - domain = "s3-us-west-2.amazonaws.com" - s3_access_key = "someotherkey" - s3_secret_key = "someothersecret" - period = 60 - gzip_level = 3 - } - - force_destroy = true -}`, name, domain) -} - -func testAccServiceV1S3LoggingConfig_env(name, domain string) string { - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - domain { - name = "%s" - comment = "tf-testing-domain" - } - - backend { - address = "aws.amazon.com" - name = "amazon docs" - } - - s3logging { - name = "somebucketlog" - bucket_name = "fastlytestlogging" - domain = "s3-us-west-2.amazonaws.com" - } - - force_destroy = true -}`, name, domain) -} - -func testAccServiceV1S3LoggingConfig_formatVersion(name, domain string) string { - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - domain { - name = "%s" - comment = "tf-testing-domain" - } - - backend { - address = "aws.amazon.com" - name = "amazon docs" - } - - s3logging { - name = "somebucketlog" - bucket_name = "fastlytestlogging" - domain = "s3-us-west-2.amazonaws.com" - s3_access_key = "somekey" - s3_secret_key = "somesecret" - format = "%%a %%l %%u %%t %%m %%U%%q %%H %%>s %%b %%T" - format_version = 2 - } - - force_destroy = true -}`, name, domain) -} - -func setEnv(s string, t *testing.T) func() { - e := getEnv() - // Set all the envs to a dummy value - if err := os.Setenv("FASTLY_S3_ACCESS_KEY", s); err != nil { - t.Fatalf("Error setting env var AWS_ACCESS_KEY_ID: %s", err) - } - if err := os.Setenv("FASTLY_S3_SECRET_KEY", s); err != nil { - t.Fatalf("Error setting env var FASTLY_S3_SECRET_KEY: %s", err) - } - - return func() { - // re-set all the envs we unset above - if err := os.Setenv("FASTLY_S3_ACCESS_KEY", e.Key); err != nil { - t.Fatalf("Error resetting env var AWS_ACCESS_KEY_ID: %s", err) - } - if err := os.Setenv("FASTLY_S3_SECRET_KEY", e.Secret); err != nil { - t.Fatalf("Error resetting env var FASTLY_S3_SECRET_KEY: %s", err) - } - } -} - -// struct to preserve the current environment -type currentEnv struct { - Key, Secret string -} - -func getEnv() *currentEnv { - // Grab any existing Fastly AWS S3 keys and preserve, in the off chance - // they're actually set in the enviornment - return ¤tEnv{ - Key: os.Getenv("FASTLY_S3_ACCESS_KEY"), - Secret: os.Getenv("FASTLY_S3_SECRET_KEY"), - } -} diff --git a/builtin/providers/fastly/resource_fastly_service_v1_sumologic_test.go b/builtin/providers/fastly/resource_fastly_service_v1_sumologic_test.go deleted file mode 100644 index ca7aabae9..000000000 --- a/builtin/providers/fastly/resource_fastly_service_v1_sumologic_test.go +++ /dev/null @@ -1,126 +0,0 @@ -package fastly - -import ( - "fmt" - "reflect" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - gofastly "github.com/sethvargo/go-fastly" -) - -func TestResourceFastlyFlattenSumologic(t *testing.T) { - cases := []struct { - remote []*gofastly.Sumologic - local []map[string]interface{} - }{ - { - remote: []*gofastly.Sumologic{ - &gofastly.Sumologic{ - Name: "sumo collector", - URL: "https://sumologic.com/collector/1", - Format: "log format", - FormatVersion: 2, - MessageType: "classic", - ResponseCondition: "condition 1", - }, - }, - local: []map[string]interface{}{ - map[string]interface{}{ - "name": "sumo collector", - "url": "https://sumologic.com/collector/1", - "format": "log format", - "format_version": 2, - "message_type": "classic", - "response_condition": "condition 1", - }, - }, - }, - } - - for _, c := range cases { - out := flattenSumologics(c.remote) - if !reflect.DeepEqual(out, c.local) { - t.Fatalf("Error matching:\nexpected: %#v\ngot: %#v", c.local, out) - } - } -} - -func TestAccFastlyServiceV1_sumologic(t *testing.T) { - var service gofastly.ServiceDetail - name := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - sumologicName := fmt.Sprintf("sumologic %s", acctest.RandString(3)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckServiceV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccServiceV1Config_sumologic(name, sumologicName), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1Attributes_sumologic(&service, name, sumologicName), - ), - }, - }, - }) -} - -func testAccCheckFastlyServiceV1Attributes_sumologic(service *gofastly.ServiceDetail, name, sumologic string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if service.Name != name { - return fmt.Errorf("Bad name, expected (%s), got (%s)", name, service.Name) - } - - conn := testAccProvider.Meta().(*FastlyClient).conn - sumologicList, err := conn.ListSumologics(&gofastly.ListSumologicsInput{ - Service: service.ID, - Version: service.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up Sumologics for (%s), version (%v): %s", service.Name, service.ActiveVersion.Number, err) - } - - if len(sumologicList) != 1 { - return fmt.Errorf("Sumologic missing, expected: 1, got: %d", len(sumologicList)) - } - - if sumologicList[0].Name != sumologic { - return fmt.Errorf("Sumologic name mismatch, expected: %s, got: %#v", sumologic, sumologicList[0].Name) - } - - return nil - } -} - -func testAccServiceV1Config_sumologic(name, sumologic string) string { - backendName := fmt.Sprintf("%s.aws.amazon.com", acctest.RandString(3)) - - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - domain { - name = "test.notadomain.com" - comment = "tf-testing-domain" - } - - backend { - address = "%s" - name = "tf -test backend" - } - - sumologic { - name = "%s" - url = "https://sumologic.com/collector/1" - format_version = 2 - } - - force_destroy = true -}`, name, backendName, sumologic) -} diff --git a/builtin/providers/fastly/resource_fastly_service_v1_test.go b/builtin/providers/fastly/resource_fastly_service_v1_test.go deleted file mode 100644 index 90ba12cad..000000000 --- a/builtin/providers/fastly/resource_fastly_service_v1_test.go +++ /dev/null @@ -1,518 +0,0 @@ -package fastly - -import ( - "fmt" - "reflect" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - gofastly "github.com/sethvargo/go-fastly" -) - -func TestResourceFastlyFlattenDomains(t *testing.T) { - cases := []struct { - remote []*gofastly.Domain - local []map[string]interface{} - }{ - { - remote: []*gofastly.Domain{ - &gofastly.Domain{ - Name: "test.notexample.com", - Comment: "not comment", - }, - }, - local: []map[string]interface{}{ - map[string]interface{}{ - "name": "test.notexample.com", - "comment": "not comment", - }, - }, - }, - { - remote: []*gofastly.Domain{ - &gofastly.Domain{ - Name: "test.notexample.com", - }, - }, - local: []map[string]interface{}{ - map[string]interface{}{ - "name": "test.notexample.com", - "comment": "", - }, - }, - }, - } - - for _, c := range cases { - out := flattenDomains(c.remote) - if !reflect.DeepEqual(out, c.local) { - t.Fatalf("Error matching:\nexpected: %#v\ngot: %#v", c.local, out) - } - } -} - -func TestResourceFastlyFlattenBackend(t *testing.T) { - cases := []struct { - remote []*gofastly.Backend - local []map[string]interface{} - }{ - { - remote: []*gofastly.Backend{ - &gofastly.Backend{ - Name: "test.notexample.com", - Address: "www.notexample.com", - Port: uint(80), - AutoLoadbalance: true, - BetweenBytesTimeout: uint(10000), - ConnectTimeout: uint(1000), - ErrorThreshold: uint(0), - FirstByteTimeout: uint(15000), - MaxConn: uint(200), - RequestCondition: "", - HealthCheck: "", - SSLCheckCert: true, - SSLHostname: "", - SSLCertHostname: "", - SSLSNIHostname: "", - Shield: "New York", - Weight: uint(100), - }, - }, - local: []map[string]interface{}{ - map[string]interface{}{ - "name": "test.notexample.com", - "address": "www.notexample.com", - "port": 80, - "auto_loadbalance": true, - "between_bytes_timeout": 10000, - "connect_timeout": 1000, - "error_threshold": 0, - "first_byte_timeout": 15000, - "max_conn": 200, - "request_condition": "", - "healthcheck": "", - "ssl_check_cert": true, - "ssl_hostname": "", - "ssl_cert_hostname": "", - "ssl_sni_hostname": "", - "shield": "New York", - "weight": 100, - }, - }, - }, - } - - for _, c := range cases { - out := flattenBackends(c.remote) - if !reflect.DeepEqual(out, c.local) { - t.Fatalf("Error matching:\nexpected: %#v\ngot: %#v", c.local, out) - } - } -} - -func TestAccFastlyServiceV1_updateDomain(t *testing.T) { - var service gofastly.ServiceDetail - name := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - nameUpdate := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - domainName1 := fmt.Sprintf("%s.notadomain.com", acctest.RandString(10)) - domainName2 := fmt.Sprintf("%s.notadomain.com", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckServiceV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccServiceV1Config(name, domainName1), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1Attributes(&service, name, []string{domainName1}), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "name", name), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "active_version", "1"), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "domain.#", "1"), - ), - }, - - resource.TestStep{ - Config: testAccServiceV1Config_domainUpdate(nameUpdate, domainName1, domainName2), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1Attributes(&service, nameUpdate, []string{domainName1, domainName2}), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "name", nameUpdate), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "active_version", "2"), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "domain.#", "2"), - ), - }, - }, - }) -} - -func TestAccFastlyServiceV1_updateBackend(t *testing.T) { - var service gofastly.ServiceDetail - name := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - domain := fmt.Sprintf("tf-acc-test-%s.com", acctest.RandString(10)) - backendName := fmt.Sprintf("%s.aws.amazon.com", acctest.RandString(3)) - backendName2 := fmt.Sprintf("%s.aws.amazon.com", acctest.RandString(3)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckServiceV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccServiceV1Config_backend(name, domain, backendName), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1Attributes_backends(&service, name, []string{backendName}), - ), - }, - - resource.TestStep{ - Config: testAccServiceV1Config_backend_update(name, domain, backendName, backendName2, 3400), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1Attributes_backends(&service, name, []string{backendName, backendName2}), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "active_version", "2"), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "backend.#", "2"), - ), - }, - }, - }) -} - -func TestAccFastlyServiceV1_basic(t *testing.T) { - var service gofastly.ServiceDetail - name := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - domainName := fmt.Sprintf("tf-acc-test-%s.com", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckServiceV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccServiceV1Config(name, domainName), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1Attributes(&service, name, []string{domainName}), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "name", name), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "active_version", "1"), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "domain.#", "1"), - ), - }, - }, - }) -} - -// ServiceV1_disappears – test that a non-empty plan is returned when a Fastly -// Service is destroyed outside of Terraform, and can no longer be found, -// correctly clearing the ID field and generating a new plan -func TestAccFastlyServiceV1_disappears(t *testing.T) { - var service gofastly.ServiceDetail - name := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - domainName := fmt.Sprintf("tf-acc-test-%s.com", acctest.RandString(10)) - - testDestroy := func(*terraform.State) error { - // reach out and DELETE the service - conn := testAccProvider.Meta().(*FastlyClient).conn - // deactivate active version to destoy - _, err := conn.DeactivateVersion(&gofastly.DeactivateVersionInput{ - Service: service.ID, - Version: service.ActiveVersion.Number, - }) - if err != nil { - return err - } - - // delete service - err = conn.DeleteService(&gofastly.DeleteServiceInput{ - ID: service.ID, - }) - - if err != nil { - return err - } - - return nil - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckServiceV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccServiceV1Config(name, domainName), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testDestroy, - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testAccCheckServiceV1Exists(n string, service *gofastly.ServiceDetail) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Service ID is set") - } - - conn := testAccProvider.Meta().(*FastlyClient).conn - latest, err := conn.GetServiceDetails(&gofastly.GetServiceInput{ - ID: rs.Primary.ID, - }) - - if err != nil { - return err - } - - *service = *latest - - return nil - } -} - -func testAccCheckFastlyServiceV1Attributes(service *gofastly.ServiceDetail, name string, domains []string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if service.Name != name { - return fmt.Errorf("Bad name, expected (%s), got (%s)", name, service.Name) - } - - conn := testAccProvider.Meta().(*FastlyClient).conn - domainList, err := conn.ListDomains(&gofastly.ListDomainsInput{ - Service: service.ID, - Version: service.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up Domains for (%s), version (%v): %s", service.Name, service.ActiveVersion.Number, err) - } - - expected := len(domains) - for _, d := range domainList { - for _, e := range domains { - if d.Name == e { - expected-- - } - } - } - - if expected > 0 { - return fmt.Errorf("Domain count mismatch, expected: %#v, got: %#v", domains, domainList) - } - - return nil - } -} - -func testAccCheckFastlyServiceV1Attributes_backends(service *gofastly.ServiceDetail, name string, backends []string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if service.Name != name { - return fmt.Errorf("Bad name, expected (%s), got (%s)", name, service.Name) - } - - conn := testAccProvider.Meta().(*FastlyClient).conn - backendList, err := conn.ListBackends(&gofastly.ListBackendsInput{ - Service: service.ID, - Version: service.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up Backends for (%s), version (%v): %s", service.Name, service.ActiveVersion.Number, err) - } - - expected := len(backendList) - for _, b := range backendList { - for _, e := range backends { - if b.Address == e { - expected-- - } - } - } - - if expected > 0 { - return fmt.Errorf("Backend count mismatch, expected: %#v, got: %#v", backends, backendList) - } - - return nil - } -} - -func TestAccFastlyServiceV1_defaultTTL(t *testing.T) { - var service gofastly.ServiceDetail - name := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - domain := fmt.Sprintf("terraform-acc-test-%s.com", acctest.RandString(10)) - backendName := fmt.Sprintf("%s.aws.amazon.com", acctest.RandString(3)) - backendName2 := fmt.Sprintf("%s.aws.amazon.com", acctest.RandString(3)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckServiceV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccServiceV1Config_backend(name, domain, backendName), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1Attributes_backends(&service, name, []string{backendName}), - ), - }, - - resource.TestStep{ - Config: testAccServiceV1Config_backend_update(name, domain, backendName, backendName2, 3400), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1Attributes_backends(&service, name, []string{backendName, backendName2}), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "default_ttl", "3400"), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "active_version", "2"), - ), - }, - // Now update the default_ttl to 0 and encounter the issue https://github.com/hashicorp/terraform/issues/12910 - resource.TestStep{ - Config: testAccServiceV1Config_backend_update(name, domain, backendName, backendName2, 0), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1Attributes_backends(&service, name, []string{backendName, backendName2}), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "default_ttl", "0"), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "active_version", "3"), - ), - }, - }, - }) -} - -func testAccCheckServiceV1Destroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "fastly_service_v1" { - continue - } - - conn := testAccProvider.Meta().(*FastlyClient).conn - l, err := conn.ListServices(&gofastly.ListServicesInput{}) - if err != nil { - return fmt.Errorf("[WARN] Error listing servcies when deleting Fastly Service (%s): %s", rs.Primary.ID, err) - } - - for _, s := range l { - if s.ID == rs.Primary.ID { - // service still found - return fmt.Errorf("[WARN] Tried deleting Service (%s), but was still found", rs.Primary.ID) - } - } - } - return nil -} - -func testAccServiceV1Config(name, domain string) string { - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - domain { - name = "%s" - comment = "tf-testing-domain" - } - - backend { - address = "aws.amazon.com" - name = "amazon docs" - } - - force_destroy = true -}`, name, domain) -} - -func testAccServiceV1Config_domainUpdate(name, domain1, domain2 string) string { - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - domain { - name = "%s" - comment = "tf-testing-domain" - } - - domain { - name = "%s" - comment = "tf-testing-other-domain" - } - - backend { - address = "aws.amazon.com" - name = "amazon docs" - } - - force_destroy = true -}`, name, domain1, domain2) -} - -func testAccServiceV1Config_backend(name, domain, backend string) string { - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - domain { - name = "%s" - comment = "tf-testing-domain" - } - - backend { - address = "%s" - name = "tf -test backend" - } - - force_destroy = true -}`, name, domain, backend) -} - -func testAccServiceV1Config_backend_update(name, domain, backend, backend2 string, ttl uint) string { - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - default_ttl = %d - - domain { - name = "%s" - comment = "tf-testing-domain" - } - - backend { - address = "%s" - name = "tf-test-backend" - } - - backend { - address = "%s" - name = "tf-test-backend-other" - } - - force_destroy = true -}`, name, ttl, domain, backend, backend2) -} diff --git a/builtin/providers/fastly/resource_fastly_service_v1_vcl_test.go b/builtin/providers/fastly/resource_fastly_service_v1_vcl_test.go deleted file mode 100644 index 621e6d1da..000000000 --- a/builtin/providers/fastly/resource_fastly_service_v1_vcl_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package fastly - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - gofastly "github.com/sethvargo/go-fastly" -) - -func TestAccFastlyServiceV1_VCL_basic(t *testing.T) { - var service gofastly.ServiceDetail - name := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - domainName1 := fmt.Sprintf("%s.notadomain.com", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckServiceV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccServiceV1VCLConfig(name, domainName1), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1VCLAttributes(&service, name, 1), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "name", name), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "vcl.#", "1"), - ), - }, - - resource.TestStep{ - Config: testAccServiceV1VCLConfig_update(name, domainName1), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceV1Exists("fastly_service_v1.foo", &service), - testAccCheckFastlyServiceV1VCLAttributes(&service, name, 2), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "name", name), - resource.TestCheckResourceAttr( - "fastly_service_v1.foo", "vcl.#", "2"), - ), - }, - }, - }) -} - -func testAccCheckFastlyServiceV1VCLAttributes(service *gofastly.ServiceDetail, name string, vclCount int) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if service.Name != name { - return fmt.Errorf("Bad name, expected (%s), got (%s)", name, service.Name) - } - - conn := testAccProvider.Meta().(*FastlyClient).conn - vclList, err := conn.ListVCLs(&gofastly.ListVCLsInput{ - Service: service.ID, - Version: service.ActiveVersion.Number, - }) - - if err != nil { - return fmt.Errorf("[ERR] Error looking up VCL for (%s), version (%v): %s", service.Name, service.ActiveVersion.Number, err) - } - - if len(vclList) != vclCount { - return fmt.Errorf("VCL count mismatch, expected (%d), got (%d)", vclCount, len(vclList)) - } - - return nil - } -} - -func testAccServiceV1VCLConfig(name, domain string) string { - return fmt.Sprintf(` -resource "fastly_service_v1" "foo" { - name = "%s" - - domain { - name = "%s" - comment = "tf-testing-domain" - } - - vcl { - name = "my_custom_main_vcl" - content = < 1 { - return nil, errors.New("cannot specify required_status_checks more than one time") - } - - for _, v := range vL { - m := v.(map[string]interface{}) - - rsc := new(github.RequiredStatusChecks) - rsc.IncludeAdmins = m["include_admins"].(bool) - rsc.Strict = m["strict"].(bool) - - rsc.Contexts = []string{} - if contexts, ok := m["contexts"].([]interface{}); ok { - for _, c := range contexts { - rsc.Contexts = append(rsc.Contexts, c.(string)) - } - } - - protectionRequest.RequiredStatusChecks = rsc - } - } - - if v, ok := d.GetOk("required_pull_request_reviews"); ok { - vL := v.([]interface{}) - if len(vL) > 1 { - return nil, errors.New("cannot specify required_pull_request_reviews more than one time") - } - - for _, v := range vL { - m := v.(map[string]interface{}) - - rprr := new(github.RequiredPullRequestReviews) - rprr.IncludeAdmins = m["include_admins"].(bool) - - protectionRequest.RequiredPullRequestReviews = rprr - } - } - - if v, ok := d.GetOk("restrictions"); ok { - vL := v.([]interface{}) - if len(vL) > 1 { - return nil, errors.New("cannot specify restrictions more than one time") - } - - for _, v := range vL { - m := v.(map[string]interface{}) - - restrictions := new(github.BranchRestrictionsRequest) - - restrictions.Users = []string{} - if users, ok := m["users"].([]interface{}); ok { - for _, u := range users { - restrictions.Users = append(restrictions.Users, u.(string)) - } - } - - restrictions.Teams = []string{} - if teams, ok := m["teams"].([]interface{}); ok { - for _, t := range teams { - restrictions.Teams = append(restrictions.Teams, t.(string)) - } - } - - protectionRequest.Restrictions = restrictions - } - } - - return protectionRequest, nil -} diff --git a/builtin/providers/github/resource_github_branch_protection_test.go b/builtin/providers/github/resource_github_branch_protection_test.go deleted file mode 100644 index 89f4a7bff..000000000 --- a/builtin/providers/github/resource_github_branch_protection_test.go +++ /dev/null @@ -1,242 +0,0 @@ -package github - -import ( - "context" - "fmt" - "reflect" - "testing" - - "github.com/google/go-github/github" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccGithubBranchProtection_basic(t *testing.T) { - var protection github.Protection - - rString := acctest.RandString(5) - repoName := fmt.Sprintf("tf-acc-test-branch-prot-%s", rString) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGithubBranchProtectionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccGithubBranchProtectionConfig(repoName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGithubProtectedBranchExists("github_branch_protection.master", repoName+":master", &protection), - testAccCheckGithubBranchProtectionRequiredStatusChecks(&protection, true, true, []string{"github/foo"}), - testAccCheckGithubBranchProtectionRestrictions(&protection, []string{testUser}, []string{}), - resource.TestCheckResourceAttr("github_branch_protection.master", "repository", repoName), - resource.TestCheckResourceAttr("github_branch_protection.master", "branch", "master"), - resource.TestCheckResourceAttr("github_branch_protection.master", "required_status_checks.0.include_admins", "true"), - resource.TestCheckResourceAttr("github_branch_protection.master", "required_status_checks.0.strict", "true"), - resource.TestCheckResourceAttr("github_branch_protection.master", "required_status_checks.0.contexts.#", "1"), - resource.TestCheckResourceAttr("github_branch_protection.master", "required_status_checks.0.contexts.0", "github/foo"), - resource.TestCheckResourceAttr("github_branch_protection.master", "required_pull_request_reviews.0.include_admins", "true"), - resource.TestCheckResourceAttr("github_branch_protection.master", "restrictions.0.users.#", "1"), - resource.TestCheckResourceAttr("github_branch_protection.master", "restrictions.0.users.0", testUser), - resource.TestCheckResourceAttr("github_branch_protection.master", "restrictions.0.teams.#", "0"), - ), - }, - { - Config: testAccGithubBranchProtectionUpdateConfig(repoName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGithubProtectedBranchExists("github_branch_protection.master", repoName+":master", &protection), - testAccCheckGithubBranchProtectionRequiredStatusChecks(&protection, false, false, []string{"github/bar"}), - testAccCheckGithubBranchProtectionNoRestrictionsExist(&protection), - resource.TestCheckResourceAttr("github_branch_protection.master", "repository", repoName), - resource.TestCheckResourceAttr("github_branch_protection.master", "branch", "master"), - resource.TestCheckResourceAttr("github_branch_protection.master", "required_status_checks.0.include_admins", "false"), - resource.TestCheckResourceAttr("github_branch_protection.master", "required_status_checks.0.strict", "false"), - resource.TestCheckResourceAttr("github_branch_protection.master", "required_status_checks.0.contexts.#", "1"), - resource.TestCheckResourceAttr("github_branch_protection.master", "required_status_checks.0.contexts.0", "github/bar"), - resource.TestCheckResourceAttr("github_branch_protection.master", "required_pull_request_reviews.#", "0"), - resource.TestCheckResourceAttr("github_branch_protection.master", "restrictions.#", "0"), - ), - }, - }, - }) -} - -func TestAccGithubBranchProtection_importBasic(t *testing.T) { - rString := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGithubBranchProtectionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccGithubBranchProtectionConfig(rString), - }, - { - ResourceName: "github_branch_protection.master", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccCheckGithubProtectedBranchExists(n, id string, protection *github.Protection) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not Found: %s", n) - } - - if rs.Primary.ID != id { - return fmt.Errorf("Expected ID to be %v, got %v", id, rs.Primary.ID) - } - - conn := testAccProvider.Meta().(*Organization).client - o := testAccProvider.Meta().(*Organization).name - r, b := parseTwoPartID(rs.Primary.ID) - - githubProtection, _, err := conn.Repositories.GetBranchProtection(context.TODO(), o, r, b) - if err != nil { - return err - } - - *protection = *githubProtection - return nil - } -} - -func testAccCheckGithubBranchProtectionRequiredStatusChecks(protection *github.Protection, expectedIncludeAdmins bool, expectedStrict bool, expectedContexts []string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rsc := protection.RequiredStatusChecks - if rsc == nil { - return fmt.Errorf("Expected RequiredStatusChecks to be present, but was nil") - } - - if rsc.IncludeAdmins != expectedIncludeAdmins { - return fmt.Errorf("Expected RequiredStatusChecks.IncludeAdmins to be %v, got %v", expectedIncludeAdmins, rsc.IncludeAdmins) - } - if rsc.Strict != expectedStrict { - return fmt.Errorf("Expected RequiredStatusChecks.Strict to be %v, got %v", expectedStrict, rsc.Strict) - } - - if !reflect.DeepEqual(rsc.Contexts, expectedContexts) { - return fmt.Errorf("Expected RequiredStatusChecks.Contexts to be %v, got %v", expectedContexts, rsc.Contexts) - } - - return nil - } -} - -func testAccCheckGithubBranchProtectionRestrictions(protection *github.Protection, expectedUserLogins []string, expectedTeamNames []string) resource.TestCheckFunc { - return func(s *terraform.State) error { - restrictions := protection.Restrictions - if restrictions == nil { - return fmt.Errorf("Expected Restrictions to be present, but was nil") - } - - userLogins := []string{} - for _, u := range restrictions.Users { - userLogins = append(userLogins, *u.Login) - } - if !reflect.DeepEqual(userLogins, expectedUserLogins) { - return fmt.Errorf("Expected Restrictions.Users to be %v, got %v", expectedUserLogins, userLogins) - } - - teamLogins := []string{} - for _, t := range restrictions.Teams { - teamLogins = append(teamLogins, *t.Name) - } - if !reflect.DeepEqual(teamLogins, expectedTeamNames) { - return fmt.Errorf("Expected Restrictions.Teams to be %v, got %v", expectedTeamNames, teamLogins) - } - - return nil - } -} - -func testAccCheckGithubBranchProtectionNoRestrictionsExist(protection *github.Protection) resource.TestCheckFunc { - return func(s *terraform.State) error { - if protection.Restrictions != nil { - return fmt.Errorf("Expected Restrictions to be nil, but was %v", protection.Restrictions) - } - - return nil - - } -} - -func testAccGithubBranchProtectionDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*Organization).client - - for _, rs := range s.RootModule().Resources { - if rs.Type != "github_branch_protection" { - continue - } - - o := testAccProvider.Meta().(*Organization).name - r, b := parseTwoPartID(rs.Primary.ID) - protection, res, err := conn.Repositories.GetBranchProtection(context.TODO(), o, r, b) - - if err == nil { - if protection != nil { - return fmt.Errorf("Branch protection still exists") - } - } - if res.StatusCode != 404 { - return err - } - return nil - } - return nil -} - -func testAccGithubBranchProtectionConfig(repoName string) string { - return fmt.Sprintf(` -resource "github_repository" "test" { - name = "%s" - description = "Terraform Acceptance Test %s" - auto_init = true -} - -resource "github_branch_protection" "master" { - repository = "${github_repository.test.name}" - branch = "master" - - required_status_checks = { - include_admins = true - strict = true - contexts = ["github/foo"] - } - - required_pull_request_reviews { - include_admins = true - } - - restrictions { - users = ["%s"] - } -} -`, repoName, repoName, testUser) -} - -func testAccGithubBranchProtectionUpdateConfig(repoName string) string { - return fmt.Sprintf(` -resource "github_repository" "test" { - name = "%s" - description = "Terraform Acceptance Test %s" - auto_init = true -} - -resource "github_branch_protection" "master" { - repository = "${github_repository.test.name}" - branch = "master" - - required_status_checks = { - include_admins = false - strict = false - contexts = ["github/bar"] - } -} -`, repoName, repoName) -} diff --git a/builtin/providers/github/resource_github_issue_label.go b/builtin/providers/github/resource_github_issue_label.go deleted file mode 100644 index 5b92de648..000000000 --- a/builtin/providers/github/resource_github_issue_label.go +++ /dev/null @@ -1,127 +0,0 @@ -package github - -import ( - "context" - "log" - - "github.com/google/go-github/github" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceGithubIssueLabel() *schema.Resource { - return &schema.Resource{ - Create: resourceGithubIssueLabelCreateOrUpdate, - Read: resourceGithubIssueLabelRead, - Update: resourceGithubIssueLabelCreateOrUpdate, - Delete: resourceGithubIssueLabelDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "repository": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - }, - "color": { - Type: schema.TypeString, - Required: true, - }, - "url": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -// resourceGithubIssueLabelCreateOrUpdate idempotently creates or updates an -// issue label. Issue labels are keyed off of their "name", so pre-existing -// issue labels result in a 422 HTTP error if they exist outside of Terraform. -// Normally this would not be an issue, except new repositories are created with -// a "default" set of labels, and those labels easily conflict with custom ones. -// -// This function will first check if the label exists, and then issue an update, -// otherwise it will create. This is also advantageous in that we get to use the -// same function for two schema funcs. - -func resourceGithubIssueLabelCreateOrUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - o := meta.(*Organization).name - r := d.Get("repository").(string) - n := d.Get("name").(string) - c := d.Get("color").(string) - - label := &github.Label{ - Name: &n, - Color: &c, - } - - log.Printf("[DEBUG] Querying label existence %s/%s (%s)", o, r, n) - existing, _, _ := client.Issues.GetLabel(context.TODO(), o, r, n) - - if existing != nil { - log.Printf("[DEBUG] Updating label: %s/%s (%s: %s)", o, r, n, c) - - // Pull out the original name. If we already have a resource, this is the - // parsed ID. If not, it's the value given to the resource. - var oname string - if d.Id() == "" { - oname = n - } else { - _, oname = parseTwoPartID(d.Id()) - } - - _, _, err := client.Issues.EditLabel(context.TODO(), o, r, oname, label) - if err != nil { - return err - } - } else { - log.Printf("[DEBUG] Creating label: %s/%s (%s: %s)", o, r, n, c) - _, resp, err := client.Issues.CreateLabel(context.TODO(), o, r, label) - if resp != nil { - log.Printf("[DEBUG] Response from creating label: %s", *resp) - } - if err != nil { - return err - } - } - - d.SetId(buildTwoPartID(&r, &n)) - - return resourceGithubIssueLabelRead(d, meta) -} - -func resourceGithubIssueLabelRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - r, n := parseTwoPartID(d.Id()) - - log.Printf("[DEBUG] Reading label: %s/%s", r, n) - githubLabel, _, err := client.Issues.GetLabel(context.TODO(), meta.(*Organization).name, r, n) - if err != nil { - d.SetId("") - return nil - } - - d.Set("repository", r) - d.Set("name", n) - d.Set("color", githubLabel.Color) - d.Set("url", githubLabel.URL) - - return nil -} - -func resourceGithubIssueLabelDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - r := d.Get("repository").(string) - n := d.Get("name").(string) - - log.Printf("[DEBUG] Deleting label: %s/%s", r, n) - _, err := client.Issues.DeleteLabel(context.TODO(), meta.(*Organization).name, r, n) - return err -} diff --git a/builtin/providers/github/resource_github_issue_label_test.go b/builtin/providers/github/resource_github_issue_label_test.go deleted file mode 100644 index a73a209e1..000000000 --- a/builtin/providers/github/resource_github_issue_label_test.go +++ /dev/null @@ -1,192 +0,0 @@ -package github - -import ( - "context" - "fmt" - "testing" - - "github.com/google/go-github/github" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccGithubIssueLabel_basic(t *testing.T) { - var label github.Label - - rString := acctest.RandString(5) - repoName := fmt.Sprintf("tf-acc-test-branch-issue-label-%s", rString) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGithubIssueLabelDestroy, - Steps: []resource.TestStep{ - { - Config: testAccGithubIssueLabelConfig(repoName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGithubIssueLabelExists("github_issue_label.test", &label), - testAccCheckGithubIssueLabelAttributes(&label, "foo", "000000"), - ), - }, - { - Config: testAccGithubIssueLabelUpdateConfig(repoName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGithubIssueLabelExists("github_issue_label.test", &label), - testAccCheckGithubIssueLabelAttributes(&label, "bar", "FFFFFF"), - ), - }, - }, - }) -} - -func TestAccGithubIssueLabel_existingLabel(t *testing.T) { - var label github.Label - - rString := acctest.RandString(5) - repoName := fmt.Sprintf("tf-acc-test-branch-issue-label-%s", rString) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGithubIssueLabelDestroy, - Steps: []resource.TestStep{ - { - Config: testAccGitHubIssueLabelExistsConfig(repoName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGithubIssueLabelExists("github_issue_label.test", &label), - testAccCheckGithubIssueLabelAttributes(&label, "enhancement", "FF00FF"), - ), - }, - }, - }) -} - -func TestAccGithubIssueLabel_importBasic(t *testing.T) { - rString := acctest.RandString(5) - repoName := fmt.Sprintf("tf-acc-test-branch-issue-label-%s", rString) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGithubIssueLabelDestroy, - Steps: []resource.TestStep{ - { - Config: testAccGithubIssueLabelConfig(repoName), - }, - { - ResourceName: "github_issue_label.test", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccCheckGithubIssueLabelExists(n string, label *github.Label) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not Found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No issue label ID is set") - } - - conn := testAccProvider.Meta().(*Organization).client - o := testAccProvider.Meta().(*Organization).name - r, n := parseTwoPartID(rs.Primary.ID) - - githubLabel, _, err := conn.Issues.GetLabel(context.TODO(), o, r, n) - if err != nil { - return err - } - - *label = *githubLabel - return nil - } -} - -func testAccCheckGithubIssueLabelAttributes(label *github.Label, name, color string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *label.Name != name { - return fmt.Errorf("Issue label name does not match: %s, %s", *label.Name, name) - } - - if *label.Color != color { - return fmt.Errorf("Issue label color does not match: %s, %s", *label.Color, color) - } - - return nil - } -} - -func testAccGithubIssueLabelDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*Organization).client - - for _, rs := range s.RootModule().Resources { - if rs.Type != "github_issue_label" { - continue - } - - o := testAccProvider.Meta().(*Organization).name - r, n := parseTwoPartID(rs.Primary.ID) - label, res, err := conn.Issues.GetLabel(context.TODO(), o, r, n) - - if err == nil { - if label != nil && - buildTwoPartID(label.Name, label.Color) == rs.Primary.ID { - return fmt.Errorf("Issue label still exists") - } - } - if res.StatusCode != 404 { - return err - } - return nil - } - return nil -} - -func testAccGithubIssueLabelConfig(repoName string) string { - return fmt.Sprintf(` -resource "github_repository" "test" { - name = "%s" -} - -resource "github_issue_label" "test" { - repository = "${github_repository.test.name}" - name = "foo" - color = "000000" -} -`, repoName) -} - -func testAccGithubIssueLabelUpdateConfig(repoName string) string { - return fmt.Sprintf(` -resource "github_repository" "test" { - name = "%s" -} - -resource "github_issue_label" "test" { - repository = "${github_repository.test.name}" - name = "bar" - color = "FFFFFF" -} -`, repoName) -} - -func testAccGitHubIssueLabelExistsConfig(repoName string) string { - return fmt.Sprintf(` -// Create a repository which has the default labels -resource "github_repository" "test" { - name = "%s" -} - -resource "github_issue_label" "test" { - repository = "${github_repository.test.name}" - name = "enhancement" // Important! This is a pre-created label - color = "FF00FF" -} -`, repoName) -} diff --git a/builtin/providers/github/resource_github_membership.go b/builtin/providers/github/resource_github_membership.go deleted file mode 100644 index 50bc2f164..000000000 --- a/builtin/providers/github/resource_github_membership.go +++ /dev/null @@ -1,91 +0,0 @@ -package github - -import ( - "context" - - "github.com/google/go-github/github" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceGithubMembership() *schema.Resource { - - return &schema.Resource{ - Create: resourceGithubMembershipCreate, - Read: resourceGithubMembershipRead, - Update: resourceGithubMembershipUpdate, - Delete: resourceGithubMembershipDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "username": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "role": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateValueFunc([]string{"member", "admin"}), - Default: "member", - }, - }, - } -} - -func resourceGithubMembershipCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - n := d.Get("username").(string) - r := d.Get("role").(string) - - membership, _, err := client.Organizations.EditOrgMembership(context.TODO(), n, meta.(*Organization).name, - &github.Membership{Role: &r}) - if err != nil { - return err - } - - d.SetId(buildTwoPartID(membership.Organization.Login, membership.User.Login)) - - return resourceGithubMembershipRead(d, meta) -} - -func resourceGithubMembershipRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - _, n := parseTwoPartID(d.Id()) - - membership, _, err := client.Organizations.GetOrgMembership(context.TODO(), n, meta.(*Organization).name) - if err != nil { - d.SetId("") - return nil - } - - d.Set("username", membership.User.Login) - d.Set("role", membership.Role) - return nil -} - -func resourceGithubMembershipUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - n := d.Get("username").(string) - r := d.Get("role").(string) - - membership, _, err := client.Organizations.EditOrgMembership(context.TODO(), n, meta.(*Organization).name, &github.Membership{ - Role: &r, - }) - if err != nil { - return err - } - d.SetId(buildTwoPartID(membership.Organization.Login, membership.User.Login)) - - return nil -} - -func resourceGithubMembershipDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - n := d.Get("username").(string) - - _, err := client.Organizations.RemoveOrgMembership(context.TODO(), n, meta.(*Organization).name) - - return err -} diff --git a/builtin/providers/github/resource_github_membership_test.go b/builtin/providers/github/resource_github_membership_test.go deleted file mode 100644 index 0caed0e04..000000000 --- a/builtin/providers/github/resource_github_membership_test.go +++ /dev/null @@ -1,132 +0,0 @@ -package github - -import ( - "context" - "fmt" - "testing" - - "github.com/google/go-github/github" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccGithubMembership_basic(t *testing.T) { - var membership github.Membership - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckGithubMembershipDestroy, - Steps: []resource.TestStep{ - { - Config: testAccGithubMembershipConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckGithubMembershipExists("github_membership.test_org_membership", &membership), - testAccCheckGithubMembershipRoleState("github_membership.test_org_membership", &membership), - ), - }, - }, - }) -} - -func TestAccGithubMembership_importBasic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckGithubMembershipDestroy, - Steps: []resource.TestStep{ - { - Config: testAccGithubMembershipConfig, - }, - { - ResourceName: "github_membership.test_org_membership", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccCheckGithubMembershipDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*Organization).client - - for _, rs := range s.RootModule().Resources { - if rs.Type != "github_membership" { - continue - } - o, u := parseTwoPartID(rs.Primary.ID) - - membership, resp, err := conn.Organizations.GetOrgMembership(context.TODO(), u, o) - - if err == nil { - if membership != nil && - buildTwoPartID(membership.Organization.Login, membership.User.Login) == rs.Primary.ID { - return fmt.Errorf("Organization membership still exists") - } - } - if resp.StatusCode != 404 { - return err - } - return nil - } - return nil -} - -func testAccCheckGithubMembershipExists(n string, membership *github.Membership) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not Found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No membership ID is set") - } - - conn := testAccProvider.Meta().(*Organization).client - o, u := parseTwoPartID(rs.Primary.ID) - - githubMembership, _, err := conn.Organizations.GetOrgMembership(context.TODO(), u, o) - if err != nil { - return err - } - *membership = *githubMembership - return nil - } -} - -func testAccCheckGithubMembershipRoleState(n string, membership *github.Membership) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not Found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No membership ID is set") - } - - conn := testAccProvider.Meta().(*Organization).client - o, u := parseTwoPartID(rs.Primary.ID) - - githubMembership, _, err := conn.Organizations.GetOrgMembership(context.TODO(), u, o) - if err != nil { - return err - } - - resourceRole := membership.Role - actualRole := githubMembership.Role - - if *resourceRole != *actualRole { - return fmt.Errorf("Membership role %v in resource does match actual state of %v", *resourceRole, *actualRole) - } - return nil - } -} - -var testAccGithubMembershipConfig string = fmt.Sprintf(` - resource "github_membership" "test_org_membership" { - username = "%s" - role = "member" - } -`, testUser) diff --git a/builtin/providers/github/resource_github_organization_webhook.go b/builtin/providers/github/resource_github_organization_webhook.go deleted file mode 100644 index 9acc6f164..000000000 --- a/builtin/providers/github/resource_github_organization_webhook.go +++ /dev/null @@ -1,137 +0,0 @@ -package github - -import ( - "context" - "fmt" - "strconv" - - "github.com/google/go-github/github" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceGithubOrganizationWebhook() *schema.Resource { - - return &schema.Resource{ - Create: resourceGithubOrganizationWebhookCreate, - Read: resourceGithubOrganizationWebhookRead, - Update: resourceGithubOrganizationWebhookUpdate, - Delete: resourceGithubOrganizationWebhookDelete, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateGithubOrganizationWebhookName, - }, - "events": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "configuration": { - Type: schema.TypeMap, - Optional: true, - }, - "url": { - Type: schema.TypeString, - Computed: true, - }, - "active": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - }, - } -} - -func validateGithubOrganizationWebhookName(v interface{}, k string) (ws []string, errors []error) { - if v.(string) != "web" { - errors = append(errors, fmt.Errorf("Github: name can only be web")) - } - return -} - -func resourceGithubOrganizationWebhookObject(d *schema.ResourceData) *github.Hook { - url := d.Get("url").(string) - active := d.Get("active").(bool) - events := []string{} - eventSet := d.Get("events").(*schema.Set) - for _, v := range eventSet.List() { - events = append(events, v.(string)) - } - name := d.Get("name").(string) - - hook := &github.Hook{ - Name: &name, - URL: &url, - Events: events, - Active: &active, - Config: d.Get("configuration").(map[string]interface{}), - } - - return hook -} - -func resourceGithubOrganizationWebhookCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - hk := resourceGithubOrganizationWebhookObject(d) - - hook, _, err := client.Organizations.CreateHook(context.TODO(), meta.(*Organization).name, hk) - if err != nil { - return err - } - d.SetId(strconv.Itoa(*hook.ID)) - - return resourceGithubOrganizationWebhookRead(d, meta) -} - -func resourceGithubOrganizationWebhookRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - hookID, _ := strconv.Atoi(d.Id()) - - hook, resp, err := client.Organizations.GetHook(context.TODO(), meta.(*Organization).name, hookID) - if err != nil { - if resp != nil && resp.StatusCode == 404 { - d.SetId("") - return nil - } - return err - } - d.Set("name", hook.Name) - d.Set("url", hook.URL) - d.Set("active", hook.Active) - d.Set("events", hook.Events) - d.Set("configuration", hook.Config) - - return nil -} - -func resourceGithubOrganizationWebhookUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - hk := resourceGithubOrganizationWebhookObject(d) - hookID, err := strconv.Atoi(d.Id()) - if err != nil { - return err - } - - _, _, err = client.Organizations.EditHook(context.TODO(), meta.(*Organization).name, hookID, hk) - if err != nil { - return err - } - - return resourceGithubOrganizationWebhookRead(d, meta) -} - -func resourceGithubOrganizationWebhookDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - hookID, err := strconv.Atoi(d.Id()) - if err != nil { - return err - } - - _, err = client.Organizations.DeleteHook(context.TODO(), meta.(*Organization).name, hookID) - return err -} diff --git a/builtin/providers/github/resource_github_organization_webhook_test.go b/builtin/providers/github/resource_github_organization_webhook_test.go deleted file mode 100644 index 6f29dbc92..000000000 --- a/builtin/providers/github/resource_github_organization_webhook_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package github - -import ( - "context" - "fmt" - "reflect" - "strconv" - "strings" - "testing" - - "github.com/google/go-github/github" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccGithubOrganizationWebhook_basic(t *testing.T) { - var hook github.Hook - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckGithubOrganizationWebhookDestroy, - Steps: []resource.TestStep{ - { - Config: testAccGithubOrganizationWebhookConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckGithubOrganizationWebhookExists("github_organization_webhook.foo", &hook), - testAccCheckGithubOrganizationWebhookAttributes(&hook, &testAccGithubOrganizationWebhookExpectedAttributes{ - Name: "web", - Events: []string{"pull_request"}, - Configuration: map[string]interface{}{ - "url": "https://google.de/webhook", - "content_type": "json", - "insecure_ssl": "1", - }, - Active: true, - }), - ), - }, - { - Config: testAccGithubOrganizationWebhookUpdateConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckGithubOrganizationWebhookExists("github_organization_webhook.foo", &hook), - testAccCheckGithubOrganizationWebhookAttributes(&hook, &testAccGithubOrganizationWebhookExpectedAttributes{ - Name: "web", - Events: []string{"issues"}, - Configuration: map[string]interface{}{ - "url": "https://google.de/webhooks", - "content_type": "form", - "insecure_ssl": "0", - }, - Active: false, - }), - ), - }, - }, - }) -} - -func testAccCheckGithubOrganizationWebhookExists(n string, hook *github.Hook) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not Found: %s", n) - } - - hookID, _ := strconv.Atoi(rs.Primary.ID) - if hookID == 0 { - return fmt.Errorf("No repository name is set") - } - - org := testAccProvider.Meta().(*Organization) - conn := org.client - getHook, _, err := conn.Organizations.GetHook(context.TODO(), org.name, hookID) - if err != nil { - return err - } - *hook = *getHook - return nil - } -} - -type testAccGithubOrganizationWebhookExpectedAttributes struct { - Name string - Events []string - Configuration map[string]interface{} - Active bool -} - -func testAccCheckGithubOrganizationWebhookAttributes(hook *github.Hook, want *testAccGithubOrganizationWebhookExpectedAttributes) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if *hook.Name != want.Name { - return fmt.Errorf("got hook %q; want %q", *hook.Name, want.Name) - } - if *hook.Active != want.Active { - return fmt.Errorf("got hook %t; want %t", *hook.Active, want.Active) - } - if !strings.HasPrefix(*hook.URL, "https://") { - return fmt.Errorf("got http URL %q; want to start with 'https://'", *hook.URL) - } - if !reflect.DeepEqual(hook.Events, want.Events) { - return fmt.Errorf("got hook events %q; want %q", hook.Events, want.Events) - } - if !reflect.DeepEqual(hook.Config, want.Configuration) { - return fmt.Errorf("got hook configuration %q; want %q", hook.Config, want.Configuration) - } - - return nil - } -} - -func testAccCheckGithubOrganizationWebhookDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*Organization).client - orgName := testAccProvider.Meta().(*Organization).name - - for _, rs := range s.RootModule().Resources { - if rs.Type != "github_organization_webhook" { - continue - } - - id, err := strconv.Atoi(rs.Primary.ID) - if err != nil { - return err - } - - gotHook, resp, err := conn.Organizations.GetHook(context.TODO(), orgName, id) - if err == nil { - if gotHook != nil && *gotHook.ID == id { - return fmt.Errorf("Webhook still exists") - } - } - if resp.StatusCode != 404 { - return err - } - return nil - } - return nil -} - -const testAccGithubOrganizationWebhookConfig = ` -resource "github_organization_webhook" "foo" { - name = "web" - configuration { - url = "https://google.de/webhook" - content_type = "json" - insecure_ssl = true - } - - events = ["pull_request"] -} -` - -const testAccGithubOrganizationWebhookUpdateConfig = ` -resource "github_organization_webhook" "foo" { - name = "web" - configuration { - url = "https://google.de/webhooks" - content_type = "form" - insecure_ssl = false - } - active = false - - events = ["issues"] -} -` diff --git a/builtin/providers/github/resource_github_repository.go b/builtin/providers/github/resource_github_repository.go deleted file mode 100644 index b97bb949e..000000000 --- a/builtin/providers/github/resource_github_repository.go +++ /dev/null @@ -1,177 +0,0 @@ -package github - -import ( - "context" - "log" - - "github.com/google/go-github/github" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceGithubRepository() *schema.Resource { - - return &schema.Resource{ - Create: resourceGithubRepositoryCreate, - Read: resourceGithubRepositoryRead, - Update: resourceGithubRepositoryUpdate, - Delete: resourceGithubRepositoryDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "description": { - Type: schema.TypeString, - Optional: true, - }, - "homepage_url": { - Type: schema.TypeString, - Optional: true, - }, - "private": { - Type: schema.TypeBool, - Optional: true, - }, - "has_issues": { - Type: schema.TypeBool, - Optional: true, - }, - "has_wiki": { - Type: schema.TypeBool, - Optional: true, - }, - "has_downloads": { - Type: schema.TypeBool, - Optional: true, - }, - "auto_init": { - Type: schema.TypeBool, - Optional: true, - }, - - "full_name": { - Type: schema.TypeString, - Computed: true, - }, - "default_branch": { - Type: schema.TypeString, - Computed: true, - }, - "ssh_clone_url": { - Type: schema.TypeString, - Computed: true, - }, - "svn_url": { - Type: schema.TypeString, - Computed: true, - }, - "git_clone_url": { - Type: schema.TypeString, - Computed: true, - }, - "http_clone_url": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceGithubRepositoryObject(d *schema.ResourceData) *github.Repository { - name := d.Get("name").(string) - description := d.Get("description").(string) - homepageUrl := d.Get("homepage_url").(string) - private := d.Get("private").(bool) - hasIssues := d.Get("has_issues").(bool) - hasWiki := d.Get("has_wiki").(bool) - hasDownloads := d.Get("has_downloads").(bool) - autoInit := d.Get("auto_init").(bool) - - repo := &github.Repository{ - Name: &name, - Description: &description, - Homepage: &homepageUrl, - Private: &private, - HasIssues: &hasIssues, - HasWiki: &hasWiki, - HasDownloads: &hasDownloads, - AutoInit: &autoInit, - } - - return repo -} - -func resourceGithubRepositoryCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - - repoReq := resourceGithubRepositoryObject(d) - log.Printf("[DEBUG] create github repository %s/%s", meta.(*Organization).name, *repoReq.Name) - repo, _, err := client.Repositories.Create(context.TODO(), meta.(*Organization).name, repoReq) - if err != nil { - return err - } - d.SetId(*repo.Name) - - return resourceGithubRepositoryRead(d, meta) -} - -func resourceGithubRepositoryRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - repoName := d.Id() - - log.Printf("[DEBUG] read github repository %s/%s", meta.(*Organization).name, repoName) - repo, resp, err := client.Repositories.Get(context.TODO(), meta.(*Organization).name, repoName) - if err != nil { - if resp != nil && resp.StatusCode == 404 { - log.Printf( - "[WARN] removing %s/%s from state because it no longer exists in github", - meta.(*Organization).name, - repoName, - ) - d.SetId("") - return nil - } - return err - } - d.Set("name", repoName) - d.Set("description", repo.Description) - d.Set("homepage_url", repo.Homepage) - d.Set("private", repo.Private) - d.Set("has_issues", repo.HasIssues) - d.Set("has_wiki", repo.HasWiki) - d.Set("has_downloads", repo.HasDownloads) - d.Set("full_name", repo.FullName) - d.Set("default_branch", repo.DefaultBranch) - d.Set("ssh_clone_url", repo.SSHURL) - d.Set("svn_url", repo.SVNURL) - d.Set("git_clone_url", repo.GitURL) - d.Set("http_clone_url", repo.CloneURL) - return nil -} - -func resourceGithubRepositoryUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - repoReq := resourceGithubRepositoryObject(d) - repoName := d.Id() - log.Printf("[DEBUG] update github repository %s/%s", meta.(*Organization).name, repoName) - repo, _, err := client.Repositories.Edit(context.TODO(), meta.(*Organization).name, repoName, repoReq) - if err != nil { - return err - } - d.SetId(*repo.Name) - - return resourceGithubRepositoryRead(d, meta) -} - -func resourceGithubRepositoryDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - repoName := d.Id() - log.Printf("[DEBUG] delete github repository %s/%s", meta.(*Organization).name, repoName) - _, err := client.Repositories.Delete(context.TODO(), meta.(*Organization).name, repoName) - return err -} diff --git a/builtin/providers/github/resource_github_repository_collaborator.go b/builtin/providers/github/resource_github_repository_collaborator.go deleted file mode 100644 index 84667c35b..000000000 --- a/builtin/providers/github/resource_github_repository_collaborator.go +++ /dev/null @@ -1,151 +0,0 @@ -package github - -import ( - "context" - - "github.com/google/go-github/github" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceGithubRepositoryCollaborator() *schema.Resource { - return &schema.Resource{ - Create: resourceGithubRepositoryCollaboratorCreate, - Read: resourceGithubRepositoryCollaboratorRead, - // editing repository collaborators are not supported by github api so forcing new on any changes - Delete: resourceGithubRepositoryCollaboratorDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "username": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "repository": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "permission": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "push", - ValidateFunc: validateValueFunc([]string{"pull", "push", "admin"}), - }, - }, - } -} - -func resourceGithubRepositoryCollaboratorCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - u := d.Get("username").(string) - r := d.Get("repository").(string) - p := d.Get("permission").(string) - - _, err := client.Repositories.AddCollaborator(context.TODO(), meta.(*Organization).name, r, u, - &github.RepositoryAddCollaboratorOptions{Permission: p}) - - if err != nil { - return err - } - - d.SetId(buildTwoPartID(&r, &u)) - - return resourceGithubRepositoryCollaboratorRead(d, meta) -} - -func resourceGithubRepositoryCollaboratorRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - r, u := parseTwoPartID(d.Id()) - - // First, check if the user has been invited but has not yet accepted - invitation, err := findRepoInvitation(client, meta.(*Organization).name, r, u) - if err != nil { - return err - } else if invitation != nil { - permName, err := getInvitationPermission(invitation) - if err != nil { - return err - } - - d.Set("repository", r) - d.Set("username", u) - d.Set("permission", permName) - return nil - } - - // Next, check if the user has accepted the invite and is a full collaborator - opt := &github.ListOptions{PerPage: maxPerPage} - for { - collaborators, resp, err := client.Repositories.ListCollaborators(context.TODO(), meta.(*Organization).name, r, opt) - if err != nil { - return err - } - - for _, c := range collaborators { - if *c.Login == u { - permName, err := getRepoPermission(c.Permissions) - if err != nil { - return err - } - - d.Set("repository", r) - d.Set("username", u) - d.Set("permission", permName) - return nil - } - } - - if resp.NextPage == 0 { - break - } - opt.Page = resp.NextPage - } - - // The user is neither invited nor a collaborator - d.SetId("") - return nil -} - -func resourceGithubRepositoryCollaboratorDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - u := d.Get("username").(string) - r := d.Get("repository").(string) - - // Delete any pending invitations - invitation, err := findRepoInvitation(client, meta.(*Organization).name, r, u) - if err != nil { - return err - } else if invitation != nil { - _, err = client.Repositories.DeleteInvitation(context.TODO(), meta.(*Organization).name, r, *invitation.ID) - return err - } - - _, err = client.Repositories.RemoveCollaborator(context.TODO(), meta.(*Organization).name, r, u) - return err -} - -func findRepoInvitation(client *github.Client, owner string, repo string, collaborator string) (*github.RepositoryInvitation, error) { - opt := &github.ListOptions{PerPage: maxPerPage} - for { - invitations, resp, err := client.Repositories.ListInvitations(context.TODO(), owner, repo, opt) - if err != nil { - return nil, err - } - - for _, i := range invitations { - if *i.Invitee.Login == collaborator { - return i, nil - } - } - - if resp.NextPage == 0 { - break - } - opt.Page = resp.NextPage - } - return nil, nil -} diff --git a/builtin/providers/github/resource_github_repository_collaborator_test.go b/builtin/providers/github/resource_github_repository_collaborator_test.go deleted file mode 100644 index c842bfe44..000000000 --- a/builtin/providers/github/resource_github_repository_collaborator_test.go +++ /dev/null @@ -1,168 +0,0 @@ -package github - -import ( - "context" - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -const expectedPermission string = "admin" - -func TestAccGithubRepositoryCollaborator_basic(t *testing.T) { - repoName := fmt.Sprintf("tf-acc-test-collab-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckGithubRepositoryCollaboratorDestroy, - Steps: []resource.TestStep{ - { - Config: testAccGithubRepositoryCollaboratorConfig(repoName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGithubRepositoryCollaboratorExists("github_repository_collaborator.test_repo_collaborator"), - testAccCheckGithubRepositoryCollaboratorPermission("github_repository_collaborator.test_repo_collaborator"), - ), - }, - }, - }) -} - -func TestAccGithubRepositoryCollaborator_importBasic(t *testing.T) { - repoName := fmt.Sprintf("tf-acc-test-collab-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckGithubRepositoryCollaboratorDestroy, - Steps: []resource.TestStep{ - { - Config: testAccGithubRepositoryCollaboratorConfig(repoName), - }, - { - ResourceName: "github_repository_collaborator.test_repo_collaborator", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccCheckGithubRepositoryCollaboratorDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*Organization).client - - for _, rs := range s.RootModule().Resources { - if rs.Type != "github_repository_collaborator" { - continue - } - - o := testAccProvider.Meta().(*Organization).name - r, u := parseTwoPartID(rs.Primary.ID) - isCollaborator, _, err := conn.Repositories.IsCollaborator(context.TODO(), o, r, u) - - if err != nil { - return err - } - - if isCollaborator { - return fmt.Errorf("Repository collaborator still exists") - } - - return nil - } - - return nil -} - -func testAccCheckGithubRepositoryCollaboratorExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not Found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No membership ID is set") - } - - conn := testAccProvider.Meta().(*Organization).client - o := testAccProvider.Meta().(*Organization).name - r, u := parseTwoPartID(rs.Primary.ID) - - invitations, _, err := conn.Repositories.ListInvitations(context.TODO(), o, r, nil) - if err != nil { - return err - } - - hasInvitation := false - for _, i := range invitations { - if *i.Invitee.Login == u { - hasInvitation = true - break - } - } - - if !hasInvitation { - return fmt.Errorf("Repository collaboration invitation does not exist") - } - - return nil - } -} - -func testAccCheckGithubRepositoryCollaboratorPermission(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not Found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No membership ID is set") - } - - conn := testAccProvider.Meta().(*Organization).client - o := testAccProvider.Meta().(*Organization).name - r, u := parseTwoPartID(rs.Primary.ID) - - invitations, _, err := conn.Repositories.ListInvitations(context.TODO(), o, r, nil) - if err != nil { - return err - } - - for _, i := range invitations { - if *i.Invitee.Login == u { - permName, err := getInvitationPermission(i) - - if err != nil { - return err - } - - if permName != expectedPermission { - return fmt.Errorf("Expected permission %s on repository collaborator, actual permission %s", expectedPermission, permName) - } - - return nil - } - } - - return fmt.Errorf("Repository collaborator did not appear in list of collaborators on repository") - } -} - -func testAccGithubRepositoryCollaboratorConfig(repoName string) string { - return fmt.Sprintf(` -resource "github_repository" "test" { - name = "%s" -} - - resource "github_repository_collaborator" "test_repo_collaborator" { - repository = "${github_repository.test.name}" - username = "%s" - permission = "%s" - } -`, repoName, testCollaborator, expectedPermission) -} diff --git a/builtin/providers/github/resource_github_repository_test.go b/builtin/providers/github/resource_github_repository_test.go deleted file mode 100644 index 03101c89f..000000000 --- a/builtin/providers/github/resource_github_repository_test.go +++ /dev/null @@ -1,232 +0,0 @@ -package github - -import ( - "context" - "fmt" - "strings" - "testing" - - "github.com/google/go-github/github" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccGithubRepository_basic(t *testing.T) { - var repo github.Repository - randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - name := fmt.Sprintf("tf-acc-test-%s", randString) - description := fmt.Sprintf("Terraform acceptance tests %s", randString) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckGithubRepositoryDestroy, - Steps: []resource.TestStep{ - { - Config: testAccGithubRepositoryConfig(randString), - Check: resource.ComposeTestCheckFunc( - testAccCheckGithubRepositoryExists("github_repository.foo", &repo), - testAccCheckGithubRepositoryAttributes(&repo, &testAccGithubRepositoryExpectedAttributes{ - Name: name, - Description: description, - Homepage: "http://example.com/", - HasIssues: true, - HasWiki: true, - HasDownloads: true, - DefaultBranch: "master", - }), - ), - }, - { - Config: testAccGithubRepositoryUpdateConfig(randString), - Check: resource.ComposeTestCheckFunc( - testAccCheckGithubRepositoryExists("github_repository.foo", &repo), - testAccCheckGithubRepositoryAttributes(&repo, &testAccGithubRepositoryExpectedAttributes{ - Name: name, - Description: "Updated " + description, - Homepage: "http://example.com/", - DefaultBranch: "master", - }), - ), - }, - }, - }) -} - -func TestAccGithubRepository_importBasic(t *testing.T) { - randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckGithubRepositoryDestroy, - Steps: []resource.TestStep{ - { - Config: testAccGithubRepositoryConfig(randString), - }, - { - ResourceName: "github_repository.foo", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccCheckGithubRepositoryExists(n string, repo *github.Repository) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not Found: %s", n) - } - - repoName := rs.Primary.ID - if repoName == "" { - return fmt.Errorf("No repository name is set") - } - - org := testAccProvider.Meta().(*Organization) - conn := org.client - gotRepo, _, err := conn.Repositories.Get(context.TODO(), org.name, repoName) - if err != nil { - return err - } - *repo = *gotRepo - return nil - } -} - -type testAccGithubRepositoryExpectedAttributes struct { - Name string - Description string - Homepage string - Private bool - HasIssues bool - HasWiki bool - HasDownloads bool - - DefaultBranch string -} - -func testAccCheckGithubRepositoryAttributes(repo *github.Repository, want *testAccGithubRepositoryExpectedAttributes) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if *repo.Name != want.Name { - return fmt.Errorf("got repo %q; want %q", *repo.Name, want.Name) - } - if *repo.Description != want.Description { - return fmt.Errorf("got description %q; want %q", *repo.Description, want.Description) - } - if *repo.Homepage != want.Homepage { - return fmt.Errorf("got homepage URL %q; want %q", *repo.Homepage, want.Homepage) - } - if *repo.Private != want.Private { - return fmt.Errorf("got private %#v; want %#v", *repo.Private, want.Private) - } - if *repo.HasIssues != want.HasIssues { - return fmt.Errorf("got has issues %#v; want %#v", *repo.HasIssues, want.HasIssues) - } - if *repo.HasWiki != want.HasWiki { - return fmt.Errorf("got has wiki %#v; want %#v", *repo.HasWiki, want.HasWiki) - } - if *repo.HasDownloads != want.HasDownloads { - return fmt.Errorf("got has downloads %#v; want %#v", *repo.HasDownloads, want.HasDownloads) - } - - if *repo.DefaultBranch != want.DefaultBranch { - return fmt.Errorf("got default branch %q; want %q", *repo.DefaultBranch, want.DefaultBranch) - } - - // For the rest of these, we just want to make sure they've been - // populated with something that seems somewhat reasonable. - if !strings.HasSuffix(*repo.FullName, "/"+want.Name) { - return fmt.Errorf("got full name %q; want to end with '/%s'", *repo.FullName, want.Name) - } - if !strings.HasSuffix(*repo.CloneURL, "/"+want.Name+".git") { - return fmt.Errorf("got Clone URL %q; want to end with '/%s.git'", *repo.CloneURL, want.Name) - } - if !strings.HasPrefix(*repo.CloneURL, "https://") { - return fmt.Errorf("got Clone URL %q; want to start with 'https://'", *repo.CloneURL) - } - if !strings.HasSuffix(*repo.SSHURL, "/"+want.Name+".git") { - return fmt.Errorf("got SSH URL %q; want to end with '/%s.git'", *repo.SSHURL, want.Name) - } - if !strings.HasPrefix(*repo.SSHURL, "git@github.com:") { - return fmt.Errorf("got SSH URL %q; want to start with 'git@github.com:'", *repo.SSHURL) - } - if !strings.HasSuffix(*repo.GitURL, "/"+want.Name+".git") { - return fmt.Errorf("got git URL %q; want to end with '/%s.git'", *repo.GitURL, want.Name) - } - if !strings.HasPrefix(*repo.GitURL, "git://") { - return fmt.Errorf("got git URL %q; want to start with 'git://'", *repo.GitURL) - } - if !strings.HasSuffix(*repo.SVNURL, "/"+want.Name) { - return fmt.Errorf("got svn URL %q; want to end with '/%s'", *repo.SVNURL, want.Name) - } - if !strings.HasPrefix(*repo.SVNURL, "https://") { - return fmt.Errorf("got svn URL %q; want to start with 'https://'", *repo.SVNURL) - } - - return nil - } -} - -func testAccCheckGithubRepositoryDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*Organization).client - orgName := testAccProvider.Meta().(*Organization).name - - for _, rs := range s.RootModule().Resources { - if rs.Type != "github_repository" { - continue - } - - gotRepo, resp, err := conn.Repositories.Get(context.TODO(), orgName, rs.Primary.ID) - if err == nil { - if gotRepo != nil && *gotRepo.Name == rs.Primary.ID { - return fmt.Errorf("Repository %s/%s still exists", orgName, *gotRepo.Name) - } - } - if resp.StatusCode != 404 { - return err - } - return nil - } - return nil -} - -func testAccGithubRepositoryConfig(randString string) string { - return fmt.Sprintf(` -resource "github_repository" "foo" { - name = "tf-acc-test-%s" - description = "Terraform acceptance tests %s" - homepage_url = "http://example.com/" - - # So that acceptance tests can be run in a github organization - # with no billing - private = false - - has_issues = true - has_wiki = true - has_downloads = true -} -`, randString, randString) -} - -func testAccGithubRepositoryUpdateConfig(randString string) string { - return fmt.Sprintf(` -resource "github_repository" "foo" { - name = "tf-acc-test-%s" - description = "Updated Terraform acceptance tests %s" - homepage_url = "http://example.com/" - - # So that acceptance tests can be run in a github organization - # with no billing - private = false - - has_issues = false - has_wiki = false - has_downloads = false -} -`, randString, randString) -} diff --git a/builtin/providers/github/resource_github_repository_webhook.go b/builtin/providers/github/resource_github_repository_webhook.go deleted file mode 100644 index 3c77e1032..000000000 --- a/builtin/providers/github/resource_github_repository_webhook.go +++ /dev/null @@ -1,132 +0,0 @@ -package github - -import ( - "context" - "strconv" - - "github.com/google/go-github/github" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceGithubRepositoryWebhook() *schema.Resource { - return &schema.Resource{ - Create: resourceGithubRepositoryWebhookCreate, - Read: resourceGithubRepositoryWebhookRead, - Update: resourceGithubRepositoryWebhookUpdate, - Delete: resourceGithubRepositoryWebhookDelete, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "repository": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "events": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "configuration": { - Type: schema.TypeMap, - Optional: true, - }, - "url": { - Type: schema.TypeString, - Computed: true, - }, - "active": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - }, - } -} - -func resourceGithubRepositoryWebhookObject(d *schema.ResourceData) *github.Hook { - url := d.Get("url").(string) - active := d.Get("active").(bool) - events := []string{} - eventSet := d.Get("events").(*schema.Set) - for _, v := range eventSet.List() { - events = append(events, v.(string)) - } - name := d.Get("name").(string) - - hook := &github.Hook{ - Name: &name, - URL: &url, - Events: events, - Active: &active, - Config: d.Get("configuration").(map[string]interface{}), - } - - return hook -} - -func resourceGithubRepositoryWebhookCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - hk := resourceGithubRepositoryWebhookObject(d) - - hook, _, err := client.Repositories.CreateHook(context.TODO(), meta.(*Organization).name, d.Get("repository").(string), hk) - if err != nil { - return err - } - d.SetId(strconv.Itoa(*hook.ID)) - - return resourceGithubRepositoryWebhookRead(d, meta) -} - -func resourceGithubRepositoryWebhookRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - hookID, _ := strconv.Atoi(d.Id()) - - hook, resp, err := client.Repositories.GetHook(context.TODO(), meta.(*Organization).name, d.Get("repository").(string), hookID) - if err != nil { - if resp != nil && resp.StatusCode == 404 { - d.SetId("") - return nil - } - return err - } - d.Set("name", hook.Name) - d.Set("url", hook.URL) - d.Set("active", hook.Active) - d.Set("events", hook.Events) - d.Set("configuration", hook.Config) - - return nil -} - -func resourceGithubRepositoryWebhookUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - hk := resourceGithubRepositoryWebhookObject(d) - hookID, err := strconv.Atoi(d.Id()) - if err != nil { - return err - } - - _, _, err = client.Repositories.EditHook(context.TODO(), meta.(*Organization).name, d.Get("repository").(string), hookID, hk) - if err != nil { - return err - } - - return resourceGithubRepositoryWebhookRead(d, meta) -} - -func resourceGithubRepositoryWebhookDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - hookID, err := strconv.Atoi(d.Id()) - if err != nil { - return err - } - - _, err = client.Repositories.DeleteHook(context.TODO(), meta.(*Organization).name, d.Get("repository").(string), hookID) - return err -} diff --git a/builtin/providers/github/resource_github_repository_webhook_test.go b/builtin/providers/github/resource_github_repository_webhook_test.go deleted file mode 100644 index 189cae5c3..000000000 --- a/builtin/providers/github/resource_github_repository_webhook_test.go +++ /dev/null @@ -1,206 +0,0 @@ -package github - -import ( - "context" - "fmt" - "reflect" - "strconv" - "strings" - "testing" - - "github.com/google/go-github/github" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccGithubRepositoryWebhook_basic(t *testing.T) { - randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - var hook github.Hook - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckGithubRepositoryWebhookDestroy, - Steps: []resource.TestStep{ - { - Config: testAccGithubRepositoryWebhookConfig(randString), - Check: resource.ComposeTestCheckFunc( - testAccCheckGithubRepositoryWebhookExists("github_repository_webhook.foo", fmt.Sprintf("foo-%s", randString), &hook), - testAccCheckGithubRepositoryWebhookAttributes(&hook, &testAccGithubRepositoryWebhookExpectedAttributes{ - Name: "web", - Events: []string{"pull_request"}, - Configuration: map[string]interface{}{ - "url": "https://google.de/webhook", - "content_type": "json", - "insecure_ssl": "1", - }, - Active: true, - }), - ), - }, - { - Config: testAccGithubRepositoryWebhookUpdateConfig(randString), - Check: resource.ComposeTestCheckFunc( - testAccCheckGithubRepositoryWebhookExists("github_repository_webhook.foo", fmt.Sprintf("foo-%s", randString), &hook), - testAccCheckGithubRepositoryWebhookAttributes(&hook, &testAccGithubRepositoryWebhookExpectedAttributes{ - Name: "web", - Events: []string{"issues"}, - Configuration: map[string]interface{}{ - "url": "https://google.de/webhooks", - "content_type": "form", - "insecure_ssl": "0", - }, - Active: false, - }), - ), - }, - }, - }) -} - -func testAccCheckGithubRepositoryWebhookExists(n string, repoName string, hook *github.Hook) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not Found: %s", n) - } - - hookID, _ := strconv.Atoi(rs.Primary.ID) - if hookID == 0 { - return fmt.Errorf("No repository name is set") - } - - org := testAccProvider.Meta().(*Organization) - conn := org.client - getHook, _, err := conn.Repositories.GetHook(context.TODO(), org.name, repoName, hookID) - if err != nil { - return err - } - *hook = *getHook - return nil - } -} - -type testAccGithubRepositoryWebhookExpectedAttributes struct { - Name string - Events []string - Configuration map[string]interface{} - Active bool -} - -func testAccCheckGithubRepositoryWebhookAttributes(hook *github.Hook, want *testAccGithubRepositoryWebhookExpectedAttributes) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if *hook.Name != want.Name { - return fmt.Errorf("got hook %q; want %q", *hook.Name, want.Name) - } - if *hook.Active != want.Active { - return fmt.Errorf("got hook %t; want %t", *hook.Active, want.Active) - } - if !strings.HasPrefix(*hook.URL, "https://") { - return fmt.Errorf("got http URL %q; want to start with 'https://'", *hook.URL) - } - if !reflect.DeepEqual(hook.Events, want.Events) { - return fmt.Errorf("got hook events %q; want %q", hook.Events, want.Events) - } - if !reflect.DeepEqual(hook.Config, want.Configuration) { - return fmt.Errorf("got hook configuration %q; want %q", hook.Config, want.Configuration) - } - - return nil - } -} - -func testAccCheckGithubRepositoryWebhookDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*Organization).client - orgName := testAccProvider.Meta().(*Organization).name - - for _, rs := range s.RootModule().Resources { - if rs.Type != "github_repository_webhook" { - continue - } - - id, err := strconv.Atoi(rs.Primary.ID) - if err != nil { - return err - } - - gotHook, resp, err := conn.Repositories.GetHook(context.TODO(), orgName, rs.Primary.Attributes["repository"], id) - if err == nil { - if gotHook != nil && *gotHook.ID == id { - return fmt.Errorf("Webhook still exists") - } - } - if resp.StatusCode != 404 { - return err - } - return nil - } - return nil -} - -func testAccGithubRepositoryWebhookConfig(randString string) string { - return fmt.Sprintf(` - resource "github_repository" "foo" { - name = "foo-%s" - description = "Terraform acceptance tests" - homepage_url = "http://example.com/" - - # So that acceptance tests can be run in a github organization - # with no billing - private = false - - has_issues = true - has_wiki = true - has_downloads = true - } - - resource "github_repository_webhook" "foo" { - depends_on = ["github_repository.foo"] - repository = "foo-%s" - - name = "web" - configuration { - url = "https://google.de/webhook" - content_type = "json" - insecure_ssl = true - } - - events = ["pull_request"] - } - `, randString, randString) -} - -func testAccGithubRepositoryWebhookUpdateConfig(randString string) string { - return fmt.Sprintf(` -resource "github_repository" "foo" { - name = "foo-%s" - description = "Terraform acceptance tests" - homepage_url = "http://example.com/" - - # So that acceptance tests can be run in a github organization - # with no billing - private = false - - has_issues = true - has_wiki = true - has_downloads = true -} - -resource "github_repository_webhook" "foo" { - depends_on = ["github_repository.foo"] - repository = "foo-%s" - - name = "web" - configuration { - url = "https://google.de/webhooks" - content_type = "form" - insecure_ssl = false - } - active = false - - events = ["issues"] -} -`, randString, randString) -} diff --git a/builtin/providers/github/resource_github_team.go b/builtin/providers/github/resource_github_team.go deleted file mode 100644 index 71c01d266..000000000 --- a/builtin/providers/github/resource_github_team.go +++ /dev/null @@ -1,106 +0,0 @@ -package github - -import ( - "context" - - "github.com/google/go-github/github" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceGithubTeam() *schema.Resource { - - return &schema.Resource{ - Create: resourceGithubTeamCreate, - Read: resourceGithubTeamRead, - Update: resourceGithubTeamUpdate, - Delete: resourceGithubTeamDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "description": { - Type: schema.TypeString, - Optional: true, - }, - "privacy": { - Type: schema.TypeString, - Optional: true, - Default: "secret", - ValidateFunc: validateValueFunc([]string{"secret", "closed"}), - }, - }, - } -} - -func resourceGithubTeamCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - n := d.Get("name").(string) - desc := d.Get("description").(string) - p := d.Get("privacy").(string) - githubTeam, _, err := client.Organizations.CreateTeam(context.TODO(), meta.(*Organization).name, &github.Team{ - Name: &n, - Description: &desc, - Privacy: &p, - }) - if err != nil { - return err - } - d.SetId(fromGithubID(githubTeam.ID)) - return resourceGithubTeamRead(d, meta) -} - -func resourceGithubTeamRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - - team, err := getGithubTeam(d, client) - if err != nil { - d.SetId("") - return nil - } - d.Set("description", team.Description) - d.Set("name", team.Name) - d.Set("privacy", team.Privacy) - return nil -} - -func resourceGithubTeamUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - team, err := getGithubTeam(d, client) - - if err != nil { - d.SetId("") - return nil - } - - name := d.Get("name").(string) - description := d.Get("description").(string) - privacy := d.Get("privacy").(string) - team.Description = &description - team.Name = &name - team.Privacy = &privacy - - team, _, err = client.Organizations.EditTeam(context.TODO(), *team.ID, team) - if err != nil { - return err - } - d.SetId(fromGithubID(team.ID)) - return resourceGithubTeamRead(d, meta) -} - -func resourceGithubTeamDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - id := toGithubID(d.Id()) - _, err := client.Organizations.DeleteTeam(context.TODO(), id) - return err -} - -func getGithubTeam(d *schema.ResourceData, github *github.Client) (*github.Team, error) { - id := toGithubID(d.Id()) - team, _, err := github.Organizations.GetTeam(context.TODO(), id) - return team, err -} diff --git a/builtin/providers/github/resource_github_team_membership.go b/builtin/providers/github/resource_github_team_membership.go deleted file mode 100644 index ca54f1e95..000000000 --- a/builtin/providers/github/resource_github_team_membership.go +++ /dev/null @@ -1,103 +0,0 @@ -package github - -import ( - "context" - "strings" - - "github.com/google/go-github/github" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceGithubTeamMembership() *schema.Resource { - - return &schema.Resource{ - Create: resourceGithubTeamMembershipCreate, - Read: resourceGithubTeamMembershipRead, - // editing team memberships are not supported by github api so forcing new on any changes - Delete: resourceGithubTeamMembershipDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "team_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "username": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "role": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "member", - ValidateFunc: validateValueFunc([]string{"member", "maintainer"}), - }, - }, - } -} - -func resourceGithubTeamMembershipCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - t := d.Get("team_id").(string) - n := d.Get("username").(string) - r := d.Get("role").(string) - - _, _, err := client.Organizations.AddTeamMembership(context.TODO(), toGithubID(t), n, - &github.OrganizationAddTeamMembershipOptions{Role: r}) - - if err != nil { - return err - } - - d.SetId(buildTwoPartID(&t, &n)) - - return resourceGithubTeamMembershipRead(d, meta) -} - -func resourceGithubTeamMembershipRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - t, n := parseTwoPartID(d.Id()) - - membership, _, err := client.Organizations.GetTeamMembership(context.TODO(), toGithubID(t), n) - - if err != nil { - d.SetId("") - return nil - } - team, user := getTeamAndUserFromURL(membership.URL) - - d.Set("username", user) - d.Set("role", membership.Role) - d.Set("team_id", team) - return nil -} - -func resourceGithubTeamMembershipDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - t := d.Get("team_id").(string) - n := d.Get("username").(string) - - _, err := client.Organizations.RemoveTeamMembership(context.TODO(), toGithubID(t), n) - - return err -} - -func getTeamAndUserFromURL(url *string) (string, string) { - var team, user string - - urlSlice := strings.Split(*url, "/") - for v := range urlSlice { - if urlSlice[v] == "teams" { - team = urlSlice[v+1] - } - if urlSlice[v] == "memberships" { - user = urlSlice[v+1] - } - } - return team, user -} diff --git a/builtin/providers/github/resource_github_team_membership_test.go b/builtin/providers/github/resource_github_team_membership_test.go deleted file mode 100644 index d344b0598..000000000 --- a/builtin/providers/github/resource_github_team_membership_test.go +++ /dev/null @@ -1,177 +0,0 @@ -package github - -import ( - "context" - "fmt" - "testing" - - "github.com/google/go-github/github" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccGithubTeamMembership_basic(t *testing.T) { - var membership github.Membership - randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - - testAccGithubTeamMembershipUpdateConfig := fmt.Sprintf(` - resource "github_membership" "test_org_membership" { - username = "%s" - role = "member" - } - - resource "github_team" "test_team" { - name = "tf-acc-test-team-membership-%s" - description = "Terraform acc test group" - } - - resource "github_team_membership" "test_team_membership" { - team_id = "${github_team.test_team.id}" - username = "%s" - role = "maintainer" - } - `, testUser, randString, testUser) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckGithubTeamMembershipDestroy, - Steps: []resource.TestStep{ - { - Config: testAccGithubTeamMembershipConfig(randString, testUser), - Check: resource.ComposeTestCheckFunc( - testAccCheckGithubTeamMembershipExists("github_team_membership.test_team_membership", &membership), - testAccCheckGithubTeamMembershipRoleState("github_team_membership.test_team_membership", "member", &membership), - ), - }, - { - Config: testAccGithubTeamMembershipUpdateConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckGithubTeamMembershipExists("github_team_membership.test_team_membership", &membership), - testAccCheckGithubTeamMembershipRoleState("github_team_membership.test_team_membership", "maintainer", &membership), - ), - }, - }, - }) -} - -func TestAccGithubTeamMembership_importBasic(t *testing.T) { - randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckGithubTeamMembershipDestroy, - Steps: []resource.TestStep{ - { - Config: testAccGithubTeamMembershipConfig(randString, testUser), - }, - { - ResourceName: "github_team_membership.test_team_membership", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccCheckGithubTeamMembershipDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*Organization).client - - for _, rs := range s.RootModule().Resources { - if rs.Type != "github_team_membership" { - continue - } - - t, u := parseTwoPartID(rs.Primary.ID) - membership, resp, err := conn.Organizations.GetTeamMembership(context.TODO(), toGithubID(t), u) - if err == nil { - if membership != nil { - return fmt.Errorf("Team membership still exists") - } - } - if resp.StatusCode != 404 { - return err - } - return nil - } - return nil -} - -func testAccCheckGithubTeamMembershipExists(n string, membership *github.Membership) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not Found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No team membership ID is set") - } - - conn := testAccProvider.Meta().(*Organization).client - t, u := parseTwoPartID(rs.Primary.ID) - - teamMembership, _, err := conn.Organizations.GetTeamMembership(context.TODO(), toGithubID(t), u) - - if err != nil { - return err - } - *membership = *teamMembership - return nil - } -} - -func testAccCheckGithubTeamMembershipRoleState(n, expected string, membership *github.Membership) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not Found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No team membership ID is set") - } - - conn := testAccProvider.Meta().(*Organization).client - t, u := parseTwoPartID(rs.Primary.ID) - - teamMembership, _, err := conn.Organizations.GetTeamMembership(context.TODO(), toGithubID(t), u) - if err != nil { - return err - } - - resourceRole := membership.Role - actualRole := teamMembership.Role - - if *resourceRole != expected { - return fmt.Errorf("Team membership role %v in resource does match expected state of %v", *resourceRole, expected) - } - - if *resourceRole != *actualRole { - return fmt.Errorf("Team membership role %v in resource does match actual state of %v", *resourceRole, *actualRole) - } - return nil - } -} - -func testAccGithubTeamMembershipConfig(randString, username string) string { - return fmt.Sprintf(` - resource "github_membership" "test_org_membership" { - username = "%s" - role = "member" - } - - resource "github_team" "test_team" { - name = "tf-acc-test-team-membership-%s" - description = "Terraform acc test group" - } - - resource "github_team_membership" "test_team_membership" { - team_id = "${github_team.test_team.id}" - username = "%s" - role = "member" - } -`, username, randString, username) -} diff --git a/builtin/providers/github/resource_github_team_repository.go b/builtin/providers/github/resource_github_team_repository.go deleted file mode 100644 index 7a13cef1b..000000000 --- a/builtin/providers/github/resource_github_team_repository.go +++ /dev/null @@ -1,112 +0,0 @@ -package github - -import ( - "context" - - "github.com/google/go-github/github" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceGithubTeamRepository() *schema.Resource { - return &schema.Resource{ - Create: resourceGithubTeamRepositoryCreate, - Read: resourceGithubTeamRepositoryRead, - Update: resourceGithubTeamRepositoryUpdate, - Delete: resourceGithubTeamRepositoryDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "team_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "repository": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "permission": { - Type: schema.TypeString, - Optional: true, - Default: "pull", - ValidateFunc: validateValueFunc([]string{"pull", "push", "admin"}), - }, - }, - } -} - -func resourceGithubTeamRepositoryCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - t := d.Get("team_id").(string) - r := d.Get("repository").(string) - p := d.Get("permission").(string) - - _, err := client.Organizations.AddTeamRepo(context.TODO(), toGithubID(t), meta.(*Organization).name, r, - &github.OrganizationAddTeamRepoOptions{Permission: p}) - - if err != nil { - return err - } - - d.SetId(buildTwoPartID(&t, &r)) - - return resourceGithubTeamRepositoryRead(d, meta) -} - -func resourceGithubTeamRepositoryRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - t, r := parseTwoPartID(d.Id()) - - repo, _, repoErr := client.Organizations.IsTeamRepo(context.TODO(), toGithubID(t), meta.(*Organization).name, r) - - if repoErr != nil { - d.SetId("") - return nil - } - - repositoryName := repo.Name - - d.Set("team_id", t) - d.Set("repository", repositoryName) - - permName, permErr := getRepoPermission(repo.Permissions) - - if permErr != nil { - return permErr - } - - d.Set("permission", permName) - - return nil -} - -func resourceGithubTeamRepositoryUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - t := d.Get("team_id").(string) - r := d.Get("repository").(string) - p := d.Get("permission").(string) - - // the go-github library's AddTeamRepo method uses the add/update endpoint from Github API - _, err := client.Organizations.AddTeamRepo(context.TODO(), toGithubID(t), meta.(*Organization).name, r, - &github.OrganizationAddTeamRepoOptions{Permission: p}) - - if err != nil { - return err - } - d.SetId(buildTwoPartID(&t, &r)) - - return resourceGithubTeamRepositoryRead(d, meta) -} - -func resourceGithubTeamRepositoryDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Organization).client - t := d.Get("team_id").(string) - r := d.Get("repository").(string) - - _, err := client.Organizations.RemoveTeamRepo(context.TODO(), toGithubID(t), meta.(*Organization).name, r) - - return err -} diff --git a/builtin/providers/github/resource_github_team_repository_test.go b/builtin/providers/github/resource_github_team_repository_test.go deleted file mode 100644 index a3721c819..000000000 --- a/builtin/providers/github/resource_github_team_repository_test.go +++ /dev/null @@ -1,194 +0,0 @@ -package github - -import ( - "context" - "fmt" - "testing" - - "github.com/google/go-github/github" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccGithubTeamRepository_basic(t *testing.T) { - var repository github.Repository - - randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - repoName := fmt.Sprintf("tf-acc-test-team-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckGithubTeamRepositoryDestroy, - Steps: []resource.TestStep{ - { - Config: testAccGithubTeamRepositoryConfig(randString, repoName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGithubTeamRepositoryExists("github_team_repository.test_team_test_repo", &repository), - testAccCheckGithubTeamRepositoryRoleState("pull", &repository), - ), - }, - { - Config: testAccGithubTeamRepositoryUpdateConfig(randString, repoName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGithubTeamRepositoryExists("github_team_repository.test_team_test_repo", &repository), - testAccCheckGithubTeamRepositoryRoleState("push", &repository), - ), - }, - }, - }) -} - -func TestAccGithubTeamRepository_importBasic(t *testing.T) { - randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - repoName := fmt.Sprintf("tf-acc-test-team-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckGithubTeamRepositoryDestroy, - Steps: []resource.TestStep{ - { - Config: testAccGithubTeamRepositoryConfig(randString, repoName), - }, - { - ResourceName: "github_team_repository.test_team_test_repo", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccCheckGetPermissions(t *testing.T) { - pullMap := map[string]bool{"pull": true, "push": false, "admin": false} - pushMap := map[string]bool{"pull": true, "push": true, "admin": false} - adminMap := map[string]bool{"pull": true, "push": true, "admin": true} - errorMap := map[string]bool{"pull": false, "push": false, "admin": false} - - pull, _ := getRepoPermission(&pullMap) - if pull != "pull" { - t.Fatalf("Expected pull permission, actual: %s", pull) - } - - push, _ := getRepoPermission(&pushMap) - if push != "push" { - t.Fatalf("Expected push permission, actual: %s", push) - } - - admin, _ := getRepoPermission(&adminMap) - if admin != "admin" { - t.Fatalf("Expected admin permission, actual: %s", admin) - } - - errPerm, err := getRepoPermission(&errorMap) - if err == nil { - t.Fatalf("Expected an error getting permissions, actual: %v", errPerm) - } -} - -func testAccCheckGithubTeamRepositoryRoleState(role string, repository *github.Repository) resource.TestCheckFunc { - return func(s *terraform.State) error { - resourceRole, err := getRepoPermission(repository.Permissions) - if err != nil { - return err - } - - if resourceRole != role { - return fmt.Errorf("Team repository role %v in resource does match expected state of %v", resourceRole, role) - } - return nil - } -} - -func testAccCheckGithubTeamRepositoryExists(n string, repository *github.Repository) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not Found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No team repository ID is set") - } - - conn := testAccProvider.Meta().(*Organization).client - t, r := parseTwoPartID(rs.Primary.ID) - - repo, _, err := conn.Organizations.IsTeamRepo(context.TODO(), - toGithubID(t), - testAccProvider.Meta().(*Organization).name, r) - - if err != nil { - return err - } - *repository = *repo - return nil - } -} - -func testAccCheckGithubTeamRepositoryDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*Organization).client - - for _, rs := range s.RootModule().Resources { - if rs.Type != "github_team_repository" { - continue - } - t, r := parseTwoPartID(rs.Primary.ID) - - repo, resp, err := conn.Organizations.IsTeamRepo(context.TODO(), - toGithubID(t), - testAccProvider.Meta().(*Organization).name, r) - - if err == nil { - if repo != nil && - buildTwoPartID(&t, repo.Name) == rs.Primary.ID { - return fmt.Errorf("Team repository still exists") - } - } - if resp.StatusCode != 404 { - return err - } - return nil - } - return nil -} - -func testAccGithubTeamRepositoryConfig(randString, repoName string) string { - return fmt.Sprintf(` -resource "github_team" "test_team" { - name = "tf-acc-test-team-repo-%s" - description = "Terraform acc test group" -} - -resource "github_repository" "test" { - name = "%s" -} - -resource "github_team_repository" "test_team_test_repo" { - team_id = "${github_team.test_team.id}" - repository = "${github_repository.test.name}" - permission = "pull" -} -`, randString, repoName) -} - -func testAccGithubTeamRepositoryUpdateConfig(randString, repoName string) string { - return fmt.Sprintf(` -resource "github_team" "test_team" { - name = "tf-acc-test-team-repo-%s" - description = "Terraform acc test group" -} - -resource "github_repository" "test" { - name = "%s" -} - -resource "github_team_repository" "test_team_test_repo" { - team_id = "${github_team.test_team.id}" - repository = "${github_repository.test.name}" - permission = "push" -} -`, randString, repoName) -} diff --git a/builtin/providers/github/resource_github_team_test.go b/builtin/providers/github/resource_github_team_test.go deleted file mode 100644 index b59703189..000000000 --- a/builtin/providers/github/resource_github_team_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package github - -import ( - "context" - "fmt" - "testing" - - "github.com/google/go-github/github" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccGithubTeam_basic(t *testing.T) { - var team github.Team - randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - name := fmt.Sprintf("tf-acc-test-%s", randString) - updatedName := fmt.Sprintf("tf-acc-test-updated-%s", randString) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckGithubTeamDestroy, - Steps: []resource.TestStep{ - { - Config: testAccGithubTeamConfig(randString), - Check: resource.ComposeTestCheckFunc( - testAccCheckGithubTeamExists("github_team.foo", &team), - testAccCheckGithubTeamAttributes(&team, name, "Terraform acc test group"), - ), - }, - { - Config: testAccGithubTeamUpdateConfig(randString), - Check: resource.ComposeTestCheckFunc( - testAccCheckGithubTeamExists("github_team.foo", &team), - testAccCheckGithubTeamAttributes(&team, updatedName, "Terraform acc test group - updated"), - ), - }, - }, - }) -} - -func TestAccGithubTeam_importBasic(t *testing.T) { - randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckGithubTeamDestroy, - Steps: []resource.TestStep{ - { - Config: testAccGithubTeamConfig(randString), - }, - { - ResourceName: "github_team.foo", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccCheckGithubTeamExists(n string, team *github.Team) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not Found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Team ID is set") - } - - conn := testAccProvider.Meta().(*Organization).client - githubTeam, _, err := conn.Organizations.GetTeam(context.TODO(), toGithubID(rs.Primary.ID)) - if err != nil { - return err - } - *team = *githubTeam - return nil - } -} - -func testAccCheckGithubTeamAttributes(team *github.Team, name, description string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *team.Name != name { - return fmt.Errorf("Team name does not match: %s, %s", *team.Name, name) - } - - if *team.Description != description { - return fmt.Errorf("Team description does not match: %s, %s", *team.Description, description) - } - - return nil - } -} - -func testAccCheckGithubTeamDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*Organization).client - - for _, rs := range s.RootModule().Resources { - if rs.Type != "github_team" { - continue - } - - team, resp, err := conn.Organizations.GetTeam(context.TODO(), toGithubID(rs.Primary.ID)) - if err == nil { - if team != nil && - fromGithubID(team.ID) == rs.Primary.ID { - return fmt.Errorf("Team still exists") - } - } - if resp.StatusCode != 404 { - return err - } - return nil - } - return nil -} - -func testAccGithubTeamConfig(randString string) string { - return fmt.Sprintf(` -resource "github_team" "foo" { - name = "tf-acc-test-%s" - description = "Terraform acc test group" - privacy = "secret" -} -`, randString) -} - -func testAccGithubTeamUpdateConfig(randString string) string { - return fmt.Sprintf(` -resource "github_team" "foo" { - name = "tf-acc-test-updated-%s" - description = "Terraform acc test group - updated" - privacy = "closed" -} -`, randString) -} diff --git a/builtin/providers/github/util.go b/builtin/providers/github/util.go deleted file mode 100644 index d8f07df5a..000000000 --- a/builtin/providers/github/util.go +++ /dev/null @@ -1,52 +0,0 @@ -package github - -import ( - "fmt" - "strconv" - "strings" - - "github.com/hashicorp/terraform/helper/schema" -) - -const ( - // https://developer.github.com/guides/traversing-with-pagination/#basics-of-pagination - maxPerPage = 100 -) - -func toGithubID(id string) int { - githubID, _ := strconv.Atoi(id) - return githubID -} - -func fromGithubID(id *int) string { - return strconv.Itoa(*id) -} - -func validateValueFunc(values []string) schema.SchemaValidateFunc { - return func(v interface{}, k string) (we []string, errors []error) { - value := v.(string) - valid := false - for _, role := range values { - if value == role { - valid = true - break - } - } - - if !valid { - errors = append(errors, fmt.Errorf("%s is an invalid value for argument %s", value, k)) - } - return - } -} - -// return the pieces of id `a:b` as a, b -func parseTwoPartID(id string) (string, string) { - parts := strings.SplitN(id, ":", 2) - return parts[0], parts[1] -} - -// format the strings into an id `a:b` -func buildTwoPartID(a, b *string) string { - return fmt.Sprintf("%s:%s", *a, *b) -} diff --git a/builtin/providers/github/util_permissions.go b/builtin/providers/github/util_permissions.go deleted file mode 100644 index edd8b164a..000000000 --- a/builtin/providers/github/util_permissions.go +++ /dev/null @@ -1,49 +0,0 @@ -package github - -import ( - "errors" - "fmt" - - "github.com/google/go-github/github" -) - -const ( - pullPermission string = "pull" - pushPermission string = "push" - adminPermission string = "admin" - - writePermission string = "write" - readPermission string = "read" -) - -func getRepoPermission(p *map[string]bool) (string, error) { - - // Permissions are returned in this map format such that if you have a certain level - // of permission, all levels below are also true. For example, if a team has push - // permission, the map will be: {"pull": true, "push": true, "admin": false} - if (*p)[adminPermission] { - return adminPermission, nil - } else if (*p)[pushPermission] { - return pushPermission, nil - } else { - if (*p)[pullPermission] { - return pullPermission, nil - } - return "", errors.New("At least one permission expected from permissions map.") - } -} - -func getInvitationPermission(i *github.RepositoryInvitation) (string, error) { - // Permissions for some GitHub API routes are expressed as "read", - // "write", and "admin"; in other places, they are expressed as "pull", - // "push", and "admin". - if *i.Permissions == readPermission { - return pullPermission, nil - } else if *i.Permissions == writePermission { - return pushPermission, nil - } else if *i.Permissions == adminPermission { - return adminPermission, nil - } - - return "", fmt.Errorf("unexpected permission value: %v", *i.Permissions) -} diff --git a/builtin/providers/github/util_test.go b/builtin/providers/github/util_test.go deleted file mode 100644 index 5d58407ca..000000000 --- a/builtin/providers/github/util_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package github - -import ( - "testing" -) - -func TestAccGithubUtilRole_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "invalid", - ErrCount: 1, - }, - { - Value: "valid_one", - ErrCount: 0, - }, - { - Value: "valid_two", - ErrCount: 0, - }, - } - - validationFunc := validateValueFunc([]string{"valid_one", "valid_two"}) - - for _, tc := range cases { - _, errors := validationFunc(tc.Value, "test_arg") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected 1 validation error") - } - } -} - -func TestAccGithubUtilTwoPartID(t *testing.T) { - partOne, partTwo := "foo", "bar" - - id := buildTwoPartID(&partOne, &partTwo) - - if id != "foo:bar" { - t.Fatalf("Expected two part id to be foo:bar, actual: %s", id) - } - - parsedPartOne, parsedPartTwo := parseTwoPartID(id) - - if parsedPartOne != "foo" { - t.Fatalf("Expected parsed part one foo, actual: %s", parsedPartOne) - } - - if parsedPartTwo != "bar" { - t.Fatalf("Expected parsed part two bar, actual: %s", parsedPartTwo) - } -} diff --git a/builtin/providers/gitlab/config.go b/builtin/providers/gitlab/config.go deleted file mode 100644 index 288f7ba6a..000000000 --- a/builtin/providers/gitlab/config.go +++ /dev/null @@ -1,31 +0,0 @@ -package gitlab - -import ( - "github.com/xanzy/go-gitlab" -) - -// Config is per-provider, specifies where to connect to gitlab -type Config struct { - Token string - BaseURL string -} - -// Client returns a *gitlab.Client to interact with the configured gitlab instance -func (c *Config) Client() (interface{}, error) { - client := gitlab.NewClient(nil, c.Token) - if c.BaseURL != "" { - err := client.SetBaseURL(c.BaseURL) - if err != nil { - // The BaseURL supplied wasn't valid, bail. - return nil, err - } - } - - // Test the credentials by checking we can get information about the authenticated user. - _, _, err := client.Users.CurrentUser() - if err != nil { - return nil, err - } - - return client, nil -} diff --git a/builtin/providers/gitlab/provider.go b/builtin/providers/gitlab/provider.go deleted file mode 100644 index 1af6c658b..000000000 --- a/builtin/providers/gitlab/provider.go +++ /dev/null @@ -1,55 +0,0 @@ -package gitlab - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - - // The actual provider - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "token": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("GITLAB_TOKEN", nil), - Description: descriptions["token"], - }, - "base_url": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("GITLAB_BASE_URL", ""), - Description: descriptions["base_url"], - }, - }, - ResourcesMap: map[string]*schema.Resource{ - "gitlab_group": resourceGitlabGroup(), - "gitlab_project": resourceGitlabProject(), - "gitlab_project_hook": resourceGitlabProjectHook(), - "gitlab_deploy_key": resourceGitlabDeployKey(), - }, - - ConfigureFunc: providerConfigure, - } -} - -var descriptions map[string]string - -func init() { - descriptions = map[string]string{ - "token": "The OAuth token used to connect to GitLab.", - - "base_url": "The GitLab Base API URL", - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - config := Config{ - Token: d.Get("token").(string), - BaseURL: d.Get("base_url").(string), - } - - return config.Client() -} diff --git a/builtin/providers/gitlab/provider_test.go b/builtin/providers/gitlab/provider_test.go deleted file mode 100644 index a28eddb8d..000000000 --- a/builtin/providers/gitlab/provider_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package gitlab - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "gitlab": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("GITLAB_TOKEN"); v == "" { - t.Fatal("GITLAB_TOKEN must be set for acceptance tests") - } -} diff --git a/builtin/providers/gitlab/resource_gitlab_deploy_key.go b/builtin/providers/gitlab/resource_gitlab_deploy_key.go deleted file mode 100644 index 8da54fb8e..000000000 --- a/builtin/providers/gitlab/resource_gitlab_deploy_key.go +++ /dev/null @@ -1,107 +0,0 @@ -package gitlab - -import ( - "fmt" - "log" - "strconv" - - "github.com/hashicorp/terraform/helper/schema" - gitlab "github.com/xanzy/go-gitlab" -) - -func resourceGitlabDeployKey() *schema.Resource { - return &schema.Resource{ - Create: resourceGitlabDeployKeyCreate, - Read: resourceGitlabDeployKeyRead, - Delete: resourceGitlabDeployKeyDelete, - - Schema: map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "title": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "key": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "can_push": { - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - }, - }, - } -} - -func resourceGitlabDeployKeyCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*gitlab.Client) - project := d.Get("project").(string) - options := &gitlab.AddDeployKeyOptions{ - Title: gitlab.String(d.Get("title").(string)), - Key: gitlab.String(d.Get("key").(string)), - CanPush: gitlab.Bool(d.Get("can_push").(bool)), - } - - log.Printf("[DEBUG] create gitlab deployment key %s", *options.Title) - - deployKey, _, err := client.DeployKeys.AddDeployKey(project, options) - if err != nil { - return err - } - - d.SetId(fmt.Sprintf("%d", deployKey.ID)) - - return resourceGitlabDeployKeyRead(d, meta) -} - -func resourceGitlabDeployKeyRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*gitlab.Client) - project := d.Get("project").(string) - deployKeyID, err := strconv.Atoi(d.Id()) - if err != nil { - return err - } - log.Printf("[DEBUG] read gitlab deploy key %s/%d", project, deployKeyID) - - deployKey, response, err := client.DeployKeys.GetDeployKey(project, deployKeyID) - if err != nil { - if response.StatusCode == 404 { - log.Printf("[WARN] removing deploy key %d from state because it no longer exists in gitlab", deployKeyID) - d.SetId("") - return nil - } - - return err - } - - d.Set("title", deployKey.Title) - d.Set("key", deployKey.Key) - d.Set("can_push", deployKey.CanPush) - return nil -} - -func resourceGitlabDeployKeyDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*gitlab.Client) - project := d.Get("project").(string) - deployKeyID, err := strconv.Atoi(d.Id()) - if err != nil { - return err - } - log.Printf("[DEBUG] Delete gitlab deploy key %s", d.Id()) - - response, err := client.DeployKeys.DeleteDeployKey(project, deployKeyID) - - // HTTP 204 is success with no body - if response.StatusCode == 204 { - return nil - } - return err -} diff --git a/builtin/providers/gitlab/resource_gitlab_deploy_key_test.go b/builtin/providers/gitlab/resource_gitlab_deploy_key_test.go deleted file mode 100644 index 44a1841c0..000000000 --- a/builtin/providers/gitlab/resource_gitlab_deploy_key_test.go +++ /dev/null @@ -1,170 +0,0 @@ -package gitlab - -import ( - "fmt" - "strconv" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-gitlab" -) - -func TestAccGitlabDeployKey_basic(t *testing.T) { - var deployKey gitlab.DeployKey - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckGitlabDeployKeyDestroy, - Steps: []resource.TestStep{ - // Create a project and deployKey with default options - { - Config: testAccGitlabDeployKeyConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckGitlabDeployKeyExists("gitlab_deploy_key.foo", &deployKey), - testAccCheckGitlabDeployKeyAttributes(&deployKey, &testAccGitlabDeployKeyExpectedAttributes{ - Title: fmt.Sprintf("deployKey-%d", rInt), - Key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCj13ozEBZ0s4el4k6mYqoyIKKKMh9hHY0sAYqSPXs2zGuVFZss1P8TPuwmdXVjHR7TiRXwC49zDrkyWJgiufggYJ1VilOohcMOODwZEJz+E5q4GCfHuh90UEh0nl8B2R0Uoy0LPeg93uZzy0hlHApsxRf/XZJz/1ytkZvCtxdllxfImCVxJReMeRVEqFCTCvy3YuJn0bce7ulcTFRvtgWOpQsr6GDK8YkcCCv2eZthVlrEwy6DEpAKTRiRLGgUj4dPO0MmO4cE2qD4ualY01PhNORJ8Q++I+EtkGt/VALkecwFuBkl18/gy+yxNJHpKc/8WVVinDeFrd/HhiY9yU0d richardc@tamborine.example.1", - }), - ), - }, - // Update the project deployKey to toggle all the values to their inverse - { - Config: testAccGitlabDeployKeyUpdateConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckGitlabDeployKeyExists("gitlab_deploy_key.foo", &deployKey), - testAccCheckGitlabDeployKeyAttributes(&deployKey, &testAccGitlabDeployKeyExpectedAttributes{ - Title: fmt.Sprintf("modifiedDeployKey-%d", rInt), - Key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6pSke2kb7YBjo65xDKegbOQsAtnMupRcFxXji7L1iXivGwORq0qpC2xzbhez5jk1WgPckEaNv2/Bz0uEW6oSIXw1KT1VN2WzEUfQCbpNyZPtn4iV3nyl6VQW/Nd1SrxiFJtH1H4vu+eCo4McMXTjuBBD06fiJNrHaSw734LjQgqtXWJuVym9qS5MqraZB7wDwTQwSM6kslL7KTgmo3ONsTLdb2zZhv6CS+dcFKinQo7/ttTmeMuXGbPOVuNfT/bePVIN1MF1TislHa2L2dZdGeoynNJT4fVPjA2Xl6eHWh4ySbvnfPznASsjBhP0n/QKprYJ/5fQShdBYBcuQiIMd richardc@tamborine.example.2", - }), - ), - }, - // Update the project deployKey to toggle the options back - { - Config: testAccGitlabDeployKeyConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckGitlabDeployKeyExists("gitlab_deploy_key.foo", &deployKey), - testAccCheckGitlabDeployKeyAttributes(&deployKey, &testAccGitlabDeployKeyExpectedAttributes{ - Title: fmt.Sprintf("deployKey-%d", rInt), - Key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCj13ozEBZ0s4el4k6mYqoyIKKKMh9hHY0sAYqSPXs2zGuVFZss1P8TPuwmdXVjHR7TiRXwC49zDrkyWJgiufggYJ1VilOohcMOODwZEJz+E5q4GCfHuh90UEh0nl8B2R0Uoy0LPeg93uZzy0hlHApsxRf/XZJz/1ytkZvCtxdllxfImCVxJReMeRVEqFCTCvy3YuJn0bce7ulcTFRvtgWOpQsr6GDK8YkcCCv2eZthVlrEwy6DEpAKTRiRLGgUj4dPO0MmO4cE2qD4ualY01PhNORJ8Q++I+EtkGt/VALkecwFuBkl18/gy+yxNJHpKc/8WVVinDeFrd/HhiY9yU0d richardc@tamborine.example.1", - }), - ), - }, - }, - }) -} - -func testAccCheckGitlabDeployKeyExists(n string, deployKey *gitlab.DeployKey) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not Found: %s", n) - } - - deployKeyID, err := strconv.Atoi(rs.Primary.ID) - if err != nil { - return err - } - repoName := rs.Primary.Attributes["project"] - if repoName == "" { - return fmt.Errorf("No project ID is set") - } - conn := testAccProvider.Meta().(*gitlab.Client) - - gotDeployKey, _, err := conn.DeployKeys.GetDeployKey(repoName, deployKeyID) - if err != nil { - return err - } - *deployKey = *gotDeployKey - return nil - } -} - -type testAccGitlabDeployKeyExpectedAttributes struct { - Title string - Key string - CanPush bool -} - -func testAccCheckGitlabDeployKeyAttributes(deployKey *gitlab.DeployKey, want *testAccGitlabDeployKeyExpectedAttributes) resource.TestCheckFunc { - return func(s *terraform.State) error { - if deployKey.Title != want.Title { - return fmt.Errorf("got title %q; want %q", deployKey.Title, want.Title) - } - - if deployKey.Key != want.Key { - return fmt.Errorf("got key %q; want %q", deployKey.Key, want.Key) - } - - if deployKey.CanPush != nil && *deployKey.CanPush != want.CanPush { - return fmt.Errorf("got can_push %t; want %t", *deployKey.CanPush, want.CanPush) - } - - return nil - } -} - -func testAccCheckGitlabDeployKeyDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*gitlab.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "gitlab_project" { - continue - } - deployKeyID, err := strconv.Atoi(rs.Primary.ID) - project := rs.Primary.Attributes["project"] - - gotDeployKey, resp, err := conn.DeployKeys.GetDeployKey(project, deployKeyID) - if err == nil { - if gotDeployKey != nil && fmt.Sprintf("%d", gotDeployKey.ID) == rs.Primary.ID { - return fmt.Errorf("Deploy key still exists") - } - } - if resp.StatusCode != 404 { - return err - } - return nil - } - return nil -} - -func testAccGitlabDeployKeyConfig(rInt int) string { - return fmt.Sprintf(` -resource "gitlab_project" "foo" { - name = "foo-%d" - description = "Terraform acceptance tests" - - # So that acceptance tests can be run in a gitlab organization - # with no billing - visibility_level = "public" -} - -resource "gitlab_deploy_key" "foo" { - project = "${gitlab_project.foo.id}" - title = "deployKey-%d" - key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCj13ozEBZ0s4el4k6mYqoyIKKKMh9hHY0sAYqSPXs2zGuVFZss1P8TPuwmdXVjHR7TiRXwC49zDrkyWJgiufggYJ1VilOohcMOODwZEJz+E5q4GCfHuh90UEh0nl8B2R0Uoy0LPeg93uZzy0hlHApsxRf/XZJz/1ytkZvCtxdllxfImCVxJReMeRVEqFCTCvy3YuJn0bce7ulcTFRvtgWOpQsr6GDK8YkcCCv2eZthVlrEwy6DEpAKTRiRLGgUj4dPO0MmO4cE2qD4ualY01PhNORJ8Q++I+EtkGt/VALkecwFuBkl18/gy+yxNJHpKc/8WVVinDeFrd/HhiY9yU0d richardc@tamborine.example.1" -} - `, rInt, rInt) -} - -func testAccGitlabDeployKeyUpdateConfig(rInt int) string { - return fmt.Sprintf(` -resource "gitlab_project" "foo" { - name = "foo-%d" - description = "Terraform acceptance tests" - - # So that acceptance tests can be run in a gitlab organization - # with no billing - visibility_level = "public" -} - -resource "gitlab_deploy_key" "foo" { - project = "${gitlab_project.foo.id}" - title = "modifiedDeployKey-%d" - key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6pSke2kb7YBjo65xDKegbOQsAtnMupRcFxXji7L1iXivGwORq0qpC2xzbhez5jk1WgPckEaNv2/Bz0uEW6oSIXw1KT1VN2WzEUfQCbpNyZPtn4iV3nyl6VQW/Nd1SrxiFJtH1H4vu+eCo4McMXTjuBBD06fiJNrHaSw734LjQgqtXWJuVym9qS5MqraZB7wDwTQwSM6kslL7KTgmo3ONsTLdb2zZhv6CS+dcFKinQo7/ttTmeMuXGbPOVuNfT/bePVIN1MF1TislHa2L2dZdGeoynNJT4fVPjA2Xl6eHWh4ySbvnfPznASsjBhP0n/QKprYJ/5fQShdBYBcuQiIMd richardc@tamborine.example.2" -} - `, rInt, rInt) -} diff --git a/builtin/providers/gitlab/resource_gitlab_group.go b/builtin/providers/gitlab/resource_gitlab_group.go deleted file mode 100644 index fcc5ef5da..000000000 --- a/builtin/providers/gitlab/resource_gitlab_group.go +++ /dev/null @@ -1,153 +0,0 @@ -package gitlab - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" - gitlab "github.com/xanzy/go-gitlab" -) - -func resourceGitlabGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceGitlabGroupCreate, - Read: resourceGitlabGroupRead, - Update: resourceGitlabGroupUpdate, - Delete: resourceGitlabGroupDelete, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "path": { - Type: schema.TypeString, - Required: true, - }, - "description": { - Type: schema.TypeString, - Optional: true, - }, - "lfs_enabled": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "request_access_enabled": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "visibility_level": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"private", "internal", "public"}, true), - Default: "private", - }, - }, - } -} - -func resourceGitlabGroupCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*gitlab.Client) - options := &gitlab.CreateGroupOptions{ - Name: gitlab.String(d.Get("name").(string)), - LFSEnabled: gitlab.Bool(d.Get("lfs_enabled").(bool)), - RequestAccessEnabled: gitlab.Bool(d.Get("request_access_enabled").(bool)), - } - - if v, ok := d.GetOk("path"); ok { - options.Path = gitlab.String(v.(string)) - } - - if v, ok := d.GetOk("description"); ok { - options.Description = gitlab.String(v.(string)) - } - - if v, ok := d.GetOk("visibility_level"); ok { - options.VisibilityLevel = stringToVisibilityLevel(v.(string)) - } - - log.Printf("[DEBUG] create gitlab group %q", options.Name) - - group, _, err := client.Groups.CreateGroup(options) - if err != nil { - return err - } - - d.SetId(fmt.Sprintf("%d", group.ID)) - - return resourceGitlabGroupRead(d, meta) -} - -func resourceGitlabGroupRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*gitlab.Client) - log.Printf("[DEBUG] read gitlab group %s", d.Id()) - - group, response, err := client.Groups.GetGroup(d.Id()) - if err != nil { - if response.StatusCode == 404 { - log.Printf("[WARN] removing group %s from state because it no longer exists in gitlab", d.Id()) - d.SetId("") - return nil - } - - return err - } - - d.Set("name", group.Name) - d.Set("path", group.Path) - d.Set("description", group.Description) - d.Set("lfs_enabled", group.LFSEnabled) - d.Set("request_access_enabled", group.RequestAccessEnabled) - - return nil -} - -func resourceGitlabGroupUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*gitlab.Client) - - options := &gitlab.UpdateGroupOptions{} - - if d.HasChange("name") { - options.Name = gitlab.String(d.Get("name").(string)) - } - - if d.HasChange("path") { - options.Path = gitlab.String(d.Get("path").(string)) - } - - if d.HasChange("description") { - options.Description = gitlab.String(d.Get("description").(string)) - } - - if d.HasChange("lfs_enabled") { - options.LFSEnabled = gitlab.Bool(d.Get("lfs_enabled").(bool)) - } - - if d.HasChange("request_access_enabled") { - options.RequestAccessEnabled = gitlab.Bool(d.Get("request_access_enabled").(bool)) - } - - if d.HasChange("visibility_level") { - options.VisibilityLevel = stringToVisibilityLevel(d.Get("visibility_level").(string)) - } - - log.Printf("[DEBUG] update gitlab group %s", d.Id()) - - _, _, err := client.Groups.UpdateGroup(d.Id(), options) - if err != nil { - return err - } - - return resourceGitlabGroupRead(d, meta) -} - -func resourceGitlabGroupDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*gitlab.Client) - log.Printf("[DEBUG] Delete gitlab group %s", d.Id()) - - _, err := client.Groups.DeleteGroup(d.Id()) - return err -} diff --git a/builtin/providers/gitlab/resource_gitlab_group_test.go b/builtin/providers/gitlab/resource_gitlab_group_test.go deleted file mode 100644 index 6fdc07dee..000000000 --- a/builtin/providers/gitlab/resource_gitlab_group_test.go +++ /dev/null @@ -1,171 +0,0 @@ -package gitlab - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-gitlab" -) - -func TestAccGitlabGroup_basic(t *testing.T) { - var group gitlab.Group - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckGitlabGroupDestroy, - Steps: []resource.TestStep{ - // Create a group - { - Config: testAccGitlabGroupConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckGitlabGroupExists("gitlab_group.foo", &group), - testAccCheckGitlabGroupAttributes(&group, &testAccGitlabGroupExpectedAttributes{ - Name: fmt.Sprintf("foo-name-%d", rInt), - Path: fmt.Sprintf("foo-path-%d", rInt), - Description: "Terraform acceptance tests", - LFSEnabled: true, - }), - ), - }, - // Update the group to change the description - { - Config: testAccGitlabGroupUpdateConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckGitlabGroupExists("gitlab_group.foo", &group), - testAccCheckGitlabGroupAttributes(&group, &testAccGitlabGroupExpectedAttributes{ - Name: fmt.Sprintf("bar-name-%d", rInt), - Path: fmt.Sprintf("bar-path-%d", rInt), - Description: "Terraform acceptance tests! Updated description", - RequestAccessEnabled: true, - }), - ), - }, - // Update the group to put the anem and description back - { - Config: testAccGitlabGroupConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckGitlabGroupExists("gitlab_group.foo", &group), - testAccCheckGitlabGroupAttributes(&group, &testAccGitlabGroupExpectedAttributes{ - Name: fmt.Sprintf("foo-name-%d", rInt), - Path: fmt.Sprintf("foo-path-%d", rInt), - Description: "Terraform acceptance tests", - LFSEnabled: true, - }), - ), - }, - }, - }) -} - -func testAccCheckGitlabGroupExists(n string, group *gitlab.Group) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not Found: %s", n) - } - - groupID := rs.Primary.ID - if groupID == "" { - return fmt.Errorf("No group ID is set") - } - conn := testAccProvider.Meta().(*gitlab.Client) - - gotGroup, _, err := conn.Groups.GetGroup(groupID) - if err != nil { - return err - } - *group = *gotGroup - return nil - } -} - -type testAccGitlabGroupExpectedAttributes struct { - Name string - Path string - Description string - LFSEnabled bool - RequestAccessEnabled bool -} - -func testAccCheckGitlabGroupAttributes(group *gitlab.Group, want *testAccGitlabGroupExpectedAttributes) resource.TestCheckFunc { - return func(s *terraform.State) error { - if group.Name != want.Name { - return fmt.Errorf("got repo %q; want %q", group.Name, want.Name) - } - - if group.Path != want.Path { - return fmt.Errorf("got path %q; want %q", group.Path, want.Path) - } - - if group.Description != want.Description { - return fmt.Errorf("got description %q; want %q", group.Description, want.Description) - } - - if group.LFSEnabled != want.LFSEnabled { - return fmt.Errorf("got lfs_enabled %t; want %t", group.LFSEnabled, want.LFSEnabled) - } - - if group.RequestAccessEnabled != want.RequestAccessEnabled { - return fmt.Errorf("got request_access_enabled %t; want %t", group.RequestAccessEnabled, want.RequestAccessEnabled) - } - - return nil - } -} - -func testAccCheckGitlabGroupDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*gitlab.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "gitlab_group" { - continue - } - - group, resp, err := conn.Groups.GetGroup(rs.Primary.ID) - if err == nil { - if group != nil && fmt.Sprintf("%d", group.ID) == rs.Primary.ID { - return fmt.Errorf("Group still exists") - } - } - if resp.StatusCode != 404 { - return err - } - return nil - } - return nil -} - -func testAccGitlabGroupConfig(rInt int) string { - return fmt.Sprintf(` -resource "gitlab_group" "foo" { - name = "foo-name-%d" - path = "foo-path-%d" - description = "Terraform acceptance tests" - - # So that acceptance tests can be run in a gitlab organization - # with no billing - visibility_level = "public" -} - `, rInt, rInt) -} - -func testAccGitlabGroupUpdateConfig(rInt int) string { - return fmt.Sprintf(` -resource "gitlab_group" "foo" { - name = "bar-name-%d" - path = "bar-path-%d" - description = "Terraform acceptance tests! Updated description" - lfs_enabled = false - request_access_enabled = true - - # So that acceptance tests can be run in a gitlab organization - # with no billing - visibility_level = "public" -} - `, rInt, rInt) -} diff --git a/builtin/providers/gitlab/resource_gitlab_project.go b/builtin/providers/gitlab/resource_gitlab_project.go deleted file mode 100644 index 91989a3cf..000000000 --- a/builtin/providers/gitlab/resource_gitlab_project.go +++ /dev/null @@ -1,200 +0,0 @@ -package gitlab - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" - gitlab "github.com/xanzy/go-gitlab" -) - -func resourceGitlabProject() *schema.Resource { - return &schema.Resource{ - Create: resourceGitlabProjectCreate, - Read: resourceGitlabProjectRead, - Update: resourceGitlabProjectUpdate, - Delete: resourceGitlabProjectDelete, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "namespace_id": { - Type: schema.TypeInt, - Optional: true, - }, - "description": { - Type: schema.TypeString, - Optional: true, - }, - "default_branch": { - Type: schema.TypeString, - Optional: true, - }, - "issues_enabled": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "merge_requests_enabled": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "wiki_enabled": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "snippets_enabled": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "visibility_level": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"private", "internal", "public"}, true), - Default: "private", - }, - - "ssh_url_to_repo": { - Type: schema.TypeString, - Computed: true, - }, - "http_url_to_repo": { - Type: schema.TypeString, - Computed: true, - }, - "web_url": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceGitlabProjectSetToState(d *schema.ResourceData, project *gitlab.Project) { - d.Set("name", project.Name) - d.Set("description", project.Description) - d.Set("default_branch", project.DefaultBranch) - d.Set("issues_enabled", project.IssuesEnabled) - d.Set("merge_requests_enabled", project.MergeRequestsEnabled) - d.Set("wiki_enabled", project.WikiEnabled) - d.Set("snippets_enabled", project.SnippetsEnabled) - d.Set("visibility_level", visibilityLevelToString(project.VisibilityLevel)) - - d.Set("ssh_url_to_repo", project.SSHURLToRepo) - d.Set("http_url_to_repo", project.HTTPURLToRepo) - d.Set("web_url", project.WebURL) -} - -func resourceGitlabProjectCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*gitlab.Client) - options := &gitlab.CreateProjectOptions{ - Name: gitlab.String(d.Get("name").(string)), - IssuesEnabled: gitlab.Bool(d.Get("issues_enabled").(bool)), - MergeRequestsEnabled: gitlab.Bool(d.Get("merge_requests_enabled").(bool)), - WikiEnabled: gitlab.Bool(d.Get("wiki_enabled").(bool)), - SnippetsEnabled: gitlab.Bool(d.Get("snippets_enabled").(bool)), - } - - if v, ok := d.GetOk("namespace_id"); ok { - options.NamespaceID = gitlab.Int(v.(int)) - } - - if v, ok := d.GetOk("description"); ok { - options.Description = gitlab.String(v.(string)) - } - - if v, ok := d.GetOk("visibility_level"); ok { - options.VisibilityLevel = stringToVisibilityLevel(v.(string)) - } - - log.Printf("[DEBUG] create gitlab project %q", options.Name) - - project, _, err := client.Projects.CreateProject(options) - if err != nil { - return err - } - - d.SetId(fmt.Sprintf("%d", project.ID)) - - return resourceGitlabProjectRead(d, meta) -} - -func resourceGitlabProjectRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*gitlab.Client) - log.Printf("[DEBUG] read gitlab project %s", d.Id()) - - project, response, err := client.Projects.GetProject(d.Id()) - if err != nil { - if response.StatusCode == 404 { - log.Printf("[WARN] removing project %s from state because it no longer exists in gitlab", d.Id()) - d.SetId("") - return nil - } - - return err - } - - resourceGitlabProjectSetToState(d, project) - return nil -} - -func resourceGitlabProjectUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*gitlab.Client) - - options := &gitlab.EditProjectOptions{} - - if d.HasChange("name") { - options.Name = gitlab.String(d.Get("name").(string)) - } - - if d.HasChange("description") { - options.Description = gitlab.String(d.Get("description").(string)) - } - - if d.HasChange("default_branch") { - options.DefaultBranch = gitlab.String(d.Get("description").(string)) - } - - if d.HasChange("visibility_level") { - options.VisibilityLevel = stringToVisibilityLevel(d.Get("visibility_level").(string)) - } - - if d.HasChange("issues_enabled") { - options.IssuesEnabled = gitlab.Bool(d.Get("issues_enabled").(bool)) - } - - if d.HasChange("merge_requests_enabled") { - options.MergeRequestsEnabled = gitlab.Bool(d.Get("merge_requests_enabled").(bool)) - } - - if d.HasChange("wiki_enabled") { - options.WikiEnabled = gitlab.Bool(d.Get("wiki_enabled").(bool)) - } - - if d.HasChange("snippets_enabled") { - options.SnippetsEnabled = gitlab.Bool(d.Get("snippets_enabled").(bool)) - } - - log.Printf("[DEBUG] update gitlab project %s", d.Id()) - - _, _, err := client.Projects.EditProject(d.Id(), options) - if err != nil { - return err - } - - return resourceGitlabProjectRead(d, meta) -} - -func resourceGitlabProjectDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*gitlab.Client) - log.Printf("[DEBUG] Delete gitlab project %s", d.Id()) - - _, err := client.Projects.DeleteProject(d.Id()) - return err -} diff --git a/builtin/providers/gitlab/resource_gitlab_project_hook.go b/builtin/providers/gitlab/resource_gitlab_project_hook.go deleted file mode 100644 index aa3d9a9ce..000000000 --- a/builtin/providers/gitlab/resource_gitlab_project_hook.go +++ /dev/null @@ -1,192 +0,0 @@ -package gitlab - -import ( - "fmt" - "log" - "strconv" - - "github.com/hashicorp/terraform/helper/schema" - gitlab "github.com/xanzy/go-gitlab" -) - -func resourceGitlabProjectHook() *schema.Resource { - return &schema.Resource{ - Create: resourceGitlabProjectHookCreate, - Read: resourceGitlabProjectHookRead, - Update: resourceGitlabProjectHookUpdate, - Delete: resourceGitlabProjectHookDelete, - - Schema: map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Required: true, - }, - "url": { - Type: schema.TypeString, - Required: true, - }, - "token": { - Type: schema.TypeString, - Optional: true, - Sensitive: true, - }, - "push_events": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "issues_events": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "merge_requests_events": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "tag_push_events": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "note_events": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "build_events": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "pipeline_events": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "wiki_page_events": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "enable_ssl_verification": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - }, - } -} - -func resourceGitlabProjectHookCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*gitlab.Client) - project := d.Get("project").(string) - options := &gitlab.AddProjectHookOptions{ - URL: gitlab.String(d.Get("url").(string)), - PushEvents: gitlab.Bool(d.Get("push_events").(bool)), - IssuesEvents: gitlab.Bool(d.Get("issues_events").(bool)), - MergeRequestsEvents: gitlab.Bool(d.Get("merge_requests_events").(bool)), - TagPushEvents: gitlab.Bool(d.Get("tag_push_events").(bool)), - NoteEvents: gitlab.Bool(d.Get("note_events").(bool)), - BuildEvents: gitlab.Bool(d.Get("build_events").(bool)), - PipelineEvents: gitlab.Bool(d.Get("pipeline_events").(bool)), - WikiPageEvents: gitlab.Bool(d.Get("wiki_page_events").(bool)), - EnableSSLVerification: gitlab.Bool(d.Get("enable_ssl_verification").(bool)), - } - - if v, ok := d.GetOk("token"); ok { - options.Token = gitlab.String(v.(string)) - } - - log.Printf("[DEBUG] create gitlab project hook %q", options.URL) - - hook, _, err := client.Projects.AddProjectHook(project, options) - if err != nil { - return err - } - - d.SetId(fmt.Sprintf("%d", hook.ID)) - - return resourceGitlabProjectHookRead(d, meta) -} - -func resourceGitlabProjectHookRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*gitlab.Client) - project := d.Get("project").(string) - hookId, err := strconv.Atoi(d.Id()) - if err != nil { - return err - } - log.Printf("[DEBUG] read gitlab project hook %s/%d", project, hookId) - - hook, response, err := client.Projects.GetProjectHook(project, hookId) - if err != nil { - if response.StatusCode == 404 { - log.Printf("[WARN] removing project hook %d from state because it no longer exists in gitlab", hookId) - d.SetId("") - return nil - } - - return err - } - - d.Set("url", hook.URL) - d.Set("push_events", hook.PushEvents) - d.Set("issues_events", hook.IssuesEvents) - d.Set("merge_requests_events", hook.MergeRequestsEvents) - d.Set("tag_push_events", hook.TagPushEvents) - d.Set("note_events", hook.NoteEvents) - d.Set("build_events", hook.BuildEvents) - d.Set("pipeline_events", hook.PipelineEvents) - d.Set("wiki_page_events", hook.WikiPageEvents) - d.Set("enable_ssl_verification", hook.EnableSSLVerification) - return nil -} - -func resourceGitlabProjectHookUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*gitlab.Client) - project := d.Get("project").(string) - hookId, err := strconv.Atoi(d.Id()) - if err != nil { - return err - } - options := &gitlab.EditProjectHookOptions{ - URL: gitlab.String(d.Get("url").(string)), - PushEvents: gitlab.Bool(d.Get("push_events").(bool)), - IssuesEvents: gitlab.Bool(d.Get("issues_events").(bool)), - MergeRequestsEvents: gitlab.Bool(d.Get("merge_requests_events").(bool)), - TagPushEvents: gitlab.Bool(d.Get("tag_push_events").(bool)), - NoteEvents: gitlab.Bool(d.Get("note_events").(bool)), - BuildEvents: gitlab.Bool(d.Get("build_events").(bool)), - PipelineEvents: gitlab.Bool(d.Get("pipeline_events").(bool)), - WikiPageEvents: gitlab.Bool(d.Get("wiki_page_events").(bool)), - EnableSSLVerification: gitlab.Bool(d.Get("enable_ssl_verification").(bool)), - } - - if d.HasChange("token") { - options.Token = gitlab.String(d.Get("token").(string)) - } - - log.Printf("[DEBUG] update gitlab project hook %s", d.Id()) - - _, _, err = client.Projects.EditProjectHook(project, hookId, options) - if err != nil { - return err - } - - return resourceGitlabProjectHookRead(d, meta) -} - -func resourceGitlabProjectHookDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*gitlab.Client) - project := d.Get("project").(string) - hookId, err := strconv.Atoi(d.Id()) - if err != nil { - return err - } - log.Printf("[DEBUG] Delete gitlab project hook %s", d.Id()) - - _, err = client.Projects.DeleteProjectHook(project, hookId) - return err -} diff --git a/builtin/providers/gitlab/resource_gitlab_project_hook_test.go b/builtin/providers/gitlab/resource_gitlab_project_hook_test.go deleted file mode 100644 index cd0c317cc..000000000 --- a/builtin/providers/gitlab/resource_gitlab_project_hook_test.go +++ /dev/null @@ -1,220 +0,0 @@ -package gitlab - -import ( - "fmt" - "strconv" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-gitlab" -) - -func TestAccGitlabProjectHook_basic(t *testing.T) { - var hook gitlab.ProjectHook - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckGitlabProjectHookDestroy, - Steps: []resource.TestStep{ - // Create a project and hook with default options - { - Config: testAccGitlabProjectHookConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckGitlabProjectHookExists("gitlab_project_hook.foo", &hook), - testAccCheckGitlabProjectHookAttributes(&hook, &testAccGitlabProjectHookExpectedAttributes{ - URL: fmt.Sprintf("https://example.com/hook-%d", rInt), - PushEvents: true, - EnableSSLVerification: true, - }), - ), - }, - // Update the project hook to toggle all the values to their inverse - { - Config: testAccGitlabProjectHookUpdateConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckGitlabProjectHookExists("gitlab_project_hook.foo", &hook), - testAccCheckGitlabProjectHookAttributes(&hook, &testAccGitlabProjectHookExpectedAttributes{ - URL: fmt.Sprintf("https://example.com/hook-%d", rInt), - PushEvents: false, - IssuesEvents: true, - MergeRequestsEvents: true, - TagPushEvents: true, - NoteEvents: true, - BuildEvents: true, - PipelineEvents: true, - WikiPageEvents: true, - EnableSSLVerification: false, - }), - ), - }, - // Update the project hook to toggle the options back - { - Config: testAccGitlabProjectHookConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckGitlabProjectHookExists("gitlab_project_hook.foo", &hook), - testAccCheckGitlabProjectHookAttributes(&hook, &testAccGitlabProjectHookExpectedAttributes{ - URL: fmt.Sprintf("https://example.com/hook-%d", rInt), - PushEvents: true, - EnableSSLVerification: true, - }), - ), - }, - }, - }) -} - -func testAccCheckGitlabProjectHookExists(n string, hook *gitlab.ProjectHook) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not Found: %s", n) - } - - hookID, err := strconv.Atoi(rs.Primary.ID) - if err != nil { - return err - } - repoName := rs.Primary.Attributes["project"] - if repoName == "" { - return fmt.Errorf("No project ID is set") - } - conn := testAccProvider.Meta().(*gitlab.Client) - - gotHook, _, err := conn.Projects.GetProjectHook(repoName, hookID) - if err != nil { - return err - } - *hook = *gotHook - return nil - } -} - -type testAccGitlabProjectHookExpectedAttributes struct { - URL string - PushEvents bool - IssuesEvents bool - MergeRequestsEvents bool - TagPushEvents bool - NoteEvents bool - BuildEvents bool - PipelineEvents bool - WikiPageEvents bool - EnableSSLVerification bool -} - -func testAccCheckGitlabProjectHookAttributes(hook *gitlab.ProjectHook, want *testAccGitlabProjectHookExpectedAttributes) resource.TestCheckFunc { - return func(s *terraform.State) error { - if hook.URL != want.URL { - return fmt.Errorf("got url %q; want %q", hook.URL, want.URL) - } - - if hook.EnableSSLVerification != want.EnableSSLVerification { - return fmt.Errorf("got enable_ssl_verification %t; want %t", hook.EnableSSLVerification, want.EnableSSLVerification) - } - - if hook.PushEvents != want.PushEvents { - return fmt.Errorf("got push_events %t; want %t", hook.PushEvents, want.PushEvents) - } - - if hook.IssuesEvents != want.IssuesEvents { - return fmt.Errorf("got issues_events %t; want %t", hook.IssuesEvents, want.IssuesEvents) - } - - if hook.MergeRequestsEvents != want.MergeRequestsEvents { - return fmt.Errorf("got merge_requests_events %t; want %t", hook.MergeRequestsEvents, want.MergeRequestsEvents) - } - - if hook.TagPushEvents != want.TagPushEvents { - return fmt.Errorf("got tag_push_events %t; want %t", hook.TagPushEvents, want.TagPushEvents) - } - - if hook.NoteEvents != want.NoteEvents { - return fmt.Errorf("got note_events %t; want %t", hook.NoteEvents, want.NoteEvents) - } - - if hook.BuildEvents != want.BuildEvents { - return fmt.Errorf("got build_events %t; want %t", hook.BuildEvents, want.BuildEvents) - } - - if hook.PipelineEvents != want.PipelineEvents { - return fmt.Errorf("got pipeline_events %t; want %t", hook.PipelineEvents, want.PipelineEvents) - } - - if hook.WikiPageEvents != want.WikiPageEvents { - return fmt.Errorf("got wiki_page_events %t; want %t", hook.WikiPageEvents, want.WikiPageEvents) - } - - return nil - } -} - -func testAccCheckGitlabProjectHookDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*gitlab.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "gitlab_project" { - continue - } - - gotRepo, resp, err := conn.Projects.GetProject(rs.Primary.ID) - if err == nil { - if gotRepo != nil && fmt.Sprintf("%d", gotRepo.ID) == rs.Primary.ID { - return fmt.Errorf("Repository still exists") - } - } - if resp.StatusCode != 404 { - return err - } - return nil - } - return nil -} - -func testAccGitlabProjectHookConfig(rInt int) string { - return fmt.Sprintf(` -resource "gitlab_project" "foo" { - name = "foo-%d" - description = "Terraform acceptance tests" - - # So that acceptance tests can be run in a gitlab organization - # with no billing - visibility_level = "public" -} - -resource "gitlab_project_hook" "foo" { - project = "${gitlab_project.foo.id}" - url = "https://example.com/hook-%d" -} - `, rInt, rInt) -} - -func testAccGitlabProjectHookUpdateConfig(rInt int) string { - return fmt.Sprintf(` -resource "gitlab_project" "foo" { - name = "foo-%d" - description = "Terraform acceptance tests" - - # So that acceptance tests can be run in a gitlab organization - # with no billing - visibility_level = "public" -} - -resource "gitlab_project_hook" "foo" { - project = "${gitlab_project.foo.id}" - url = "https://example.com/hook-%d" - enable_ssl_verification = false - push_events = false - issues_events = true - merge_requests_events = true - tag_push_events = true - note_events = true - build_events = true - pipeline_events = true - wiki_page_events = true -} - `, rInt, rInt) -} diff --git a/builtin/providers/gitlab/resource_gitlab_project_test.go b/builtin/providers/gitlab/resource_gitlab_project_test.go deleted file mode 100644 index 7ddc0b642..000000000 --- a/builtin/providers/gitlab/resource_gitlab_project_test.go +++ /dev/null @@ -1,191 +0,0 @@ -package gitlab - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/xanzy/go-gitlab" -) - -func TestAccGitlabProject_basic(t *testing.T) { - var project gitlab.Project - rInt := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckGitlabProjectDestroy, - Steps: []resource.TestStep{ - // Create a project with all the features on - { - Config: testAccGitlabProjectConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckGitlabProjectExists("gitlab_project.foo", &project), - testAccCheckGitlabProjectAttributes(&project, &testAccGitlabProjectExpectedAttributes{ - Name: fmt.Sprintf("foo-%d", rInt), - Description: "Terraform acceptance tests", - IssuesEnabled: true, - MergeRequestsEnabled: true, - WikiEnabled: true, - SnippetsEnabled: true, - VisibilityLevel: 20, - }), - ), - }, - // Update the project to turn the features off - { - Config: testAccGitlabProjectUpdateConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckGitlabProjectExists("gitlab_project.foo", &project), - testAccCheckGitlabProjectAttributes(&project, &testAccGitlabProjectExpectedAttributes{ - Name: fmt.Sprintf("foo-%d", rInt), - Description: "Terraform acceptance tests!", - VisibilityLevel: 20, - }), - ), - }, - //Update the project to turn the features on again - { - Config: testAccGitlabProjectConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckGitlabProjectExists("gitlab_project.foo", &project), - testAccCheckGitlabProjectAttributes(&project, &testAccGitlabProjectExpectedAttributes{ - Name: fmt.Sprintf("foo-%d", rInt), - Description: "Terraform acceptance tests", - IssuesEnabled: true, - MergeRequestsEnabled: true, - WikiEnabled: true, - SnippetsEnabled: true, - VisibilityLevel: 20, - }), - ), - }, - }, - }) -} - -func testAccCheckGitlabProjectExists(n string, project *gitlab.Project) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not Found: %s", n) - } - - repoName := rs.Primary.ID - if repoName == "" { - return fmt.Errorf("No project ID is set") - } - conn := testAccProvider.Meta().(*gitlab.Client) - - gotProject, _, err := conn.Projects.GetProject(repoName) - if err != nil { - return err - } - *project = *gotProject - return nil - } -} - -type testAccGitlabProjectExpectedAttributes struct { - Name string - Description string - DefaultBranch string - IssuesEnabled bool - MergeRequestsEnabled bool - WikiEnabled bool - SnippetsEnabled bool - VisibilityLevel gitlab.VisibilityLevelValue -} - -func testAccCheckGitlabProjectAttributes(project *gitlab.Project, want *testAccGitlabProjectExpectedAttributes) resource.TestCheckFunc { - return func(s *terraform.State) error { - if project.Name != want.Name { - return fmt.Errorf("got repo %q; want %q", project.Name, want.Name) - } - if project.Description != want.Description { - return fmt.Errorf("got description %q; want %q", project.Description, want.Description) - } - - if project.DefaultBranch != want.DefaultBranch { - return fmt.Errorf("got default_branch %q; want %q", project.DefaultBranch, want.DefaultBranch) - } - - if project.IssuesEnabled != want.IssuesEnabled { - return fmt.Errorf("got issues_enabled %t; want %t", project.IssuesEnabled, want.IssuesEnabled) - } - - if project.MergeRequestsEnabled != want.MergeRequestsEnabled { - return fmt.Errorf("got merge_requests_enabled %t; want %t", project.MergeRequestsEnabled, want.MergeRequestsEnabled) - } - - if project.WikiEnabled != want.WikiEnabled { - return fmt.Errorf("got wiki_enabled %t; want %t", project.WikiEnabled, want.WikiEnabled) - } - - if project.SnippetsEnabled != want.SnippetsEnabled { - return fmt.Errorf("got snippets_enabled %t; want %t", project.SnippetsEnabled, want.SnippetsEnabled) - } - - if project.VisibilityLevel != want.VisibilityLevel { - return fmt.Errorf("got default branch %q; want %q", project.VisibilityLevel, want.VisibilityLevel) - } - - return nil - } -} - -func testAccCheckGitlabProjectDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*gitlab.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "gitlab_project" { - continue - } - - gotRepo, resp, err := conn.Projects.GetProject(rs.Primary.ID) - if err == nil { - if gotRepo != nil && fmt.Sprintf("%d", gotRepo.ID) == rs.Primary.ID { - return fmt.Errorf("Repository still exists") - } - } - if resp.StatusCode != 404 { - return err - } - return nil - } - return nil -} - -func testAccGitlabProjectConfig(rInt int) string { - return fmt.Sprintf(` -resource "gitlab_project" "foo" { - name = "foo-%d" - description = "Terraform acceptance tests" - - # So that acceptance tests can be run in a gitlab organization - # with no billing - visibility_level = "public" -} - `, rInt) -} - -func testAccGitlabProjectUpdateConfig(rInt int) string { - return fmt.Sprintf(` -resource "gitlab_project" "foo" { - name = "foo-%d" - description = "Terraform acceptance tests!" - - # So that acceptance tests can be run in a gitlab organization - # with no billing - visibility_level = "public" - - issues_enabled = false - merge_requests_enabled = false - wiki_enabled = false - snippets_enabled = false -} - `, rInt) -} diff --git a/builtin/providers/gitlab/util.go b/builtin/providers/gitlab/util.go deleted file mode 100644 index 942e30852..000000000 --- a/builtin/providers/gitlab/util.go +++ /dev/null @@ -1,54 +0,0 @@ -package gitlab - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" - gitlab "github.com/xanzy/go-gitlab" -) - -// copied from ../github/util.go -func validateValueFunc(values []string) schema.SchemaValidateFunc { - return func(v interface{}, k string) (we []string, errors []error) { - value := v.(string) - valid := false - for _, role := range values { - if value == role { - valid = true - break - } - } - - if !valid { - errors = append(errors, fmt.Errorf("%s is an invalid value for argument %s", value, k)) - } - return - } -} - -func stringToVisibilityLevel(s string) *gitlab.VisibilityLevelValue { - lookup := map[string]gitlab.VisibilityLevelValue{ - "private": gitlab.PrivateVisibility, - "internal": gitlab.InternalVisibility, - "public": gitlab.PublicVisibility, - } - - value, ok := lookup[s] - if !ok { - return nil - } - return &value -} - -func visibilityLevelToString(v gitlab.VisibilityLevelValue) *string { - lookup := map[gitlab.VisibilityLevelValue]string{ - gitlab.PrivateVisibility: "private", - gitlab.InternalVisibility: "internal", - gitlab.PublicVisibility: "public", - } - value, ok := lookup[v] - if !ok { - return nil - } - return &value -} diff --git a/builtin/providers/gitlab/util_test.go b/builtin/providers/gitlab/util_test.go deleted file mode 100644 index 465eec73c..000000000 --- a/builtin/providers/gitlab/util_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package gitlab - -import ( - "testing" - - "github.com/xanzy/go-gitlab" -) - -func TestGitlab_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "invalid", - ErrCount: 1, - }, - { - Value: "valid_one", - ErrCount: 0, - }, - { - Value: "valid_two", - ErrCount: 0, - }, - } - - validationFunc := validateValueFunc([]string{"valid_one", "valid_two"}) - - for _, tc := range cases { - _, errors := validationFunc(tc.Value, "test_arg") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected 1 validation error") - } - } -} - -func TestGitlab_visbilityHelpers(t *testing.T) { - cases := []struct { - String string - Level gitlab.VisibilityLevelValue - }{ - { - String: "private", - Level: gitlab.PrivateVisibility, - }, - { - String: "public", - Level: gitlab.PublicVisibility, - }, - } - - for _, tc := range cases { - level := stringToVisibilityLevel(tc.String) - if level == nil || *level != tc.Level { - t.Fatalf("got %v expected %v", level, tc.Level) - } - - sv := visibilityLevelToString(tc.Level) - if sv == nil || *sv != tc.String { - t.Fatalf("got %v expected %v", sv, tc.String) - } - } -} diff --git a/builtin/providers/google/compute_operation.go b/builtin/providers/google/compute_operation.go deleted file mode 100644 index 188deefd4..000000000 --- a/builtin/providers/google/compute_operation.go +++ /dev/null @@ -1,166 +0,0 @@ -package google - -import ( - "bytes" - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "google.golang.org/api/compute/v1" -) - -// OperationWaitType is an enum specifying what type of operation -// we're waiting on. -type ComputeOperationWaitType byte - -const ( - ComputeOperationWaitInvalid ComputeOperationWaitType = iota - ComputeOperationWaitGlobal - ComputeOperationWaitRegion - ComputeOperationWaitZone -) - -type ComputeOperationWaiter struct { - Service *compute.Service - Op *compute.Operation - Project string - Region string - Type ComputeOperationWaitType - Zone string -} - -func (w *ComputeOperationWaiter) RefreshFunc() resource.StateRefreshFunc { - return func() (interface{}, string, error) { - var op *compute.Operation - var err error - - switch w.Type { - case ComputeOperationWaitGlobal: - op, err = w.Service.GlobalOperations.Get( - w.Project, w.Op.Name).Do() - case ComputeOperationWaitRegion: - op, err = w.Service.RegionOperations.Get( - w.Project, w.Region, w.Op.Name).Do() - case ComputeOperationWaitZone: - op, err = w.Service.ZoneOperations.Get( - w.Project, w.Zone, w.Op.Name).Do() - default: - return nil, "bad-type", fmt.Errorf( - "Invalid wait type: %#v", w.Type) - } - - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] Got %q when asking for operation %q", op.Status, w.Op.Name) - - return op, op.Status, nil - } -} - -func (w *ComputeOperationWaiter) Conf() *resource.StateChangeConf { - return &resource.StateChangeConf{ - Pending: []string{"PENDING", "RUNNING"}, - Target: []string{"DONE"}, - Refresh: w.RefreshFunc(), - } -} - -// ComputeOperationError wraps compute.OperationError and implements the -// error interface so it can be returned. -type ComputeOperationError compute.OperationError - -func (e ComputeOperationError) Error() string { - var buf bytes.Buffer - - for _, err := range e.Errors { - buf.WriteString(err.Message + "\n") - } - - return buf.String() -} - -func computeOperationWaitGlobal(config *Config, op *compute.Operation, project string, activity string) error { - return computeOperationWaitGlobalTime(config, op, project, activity, 4) -} - -func computeOperationWaitGlobalTime(config *Config, op *compute.Operation, project string, activity string, timeoutMin int) error { - w := &ComputeOperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: project, - Type: ComputeOperationWaitGlobal, - } - - state := w.Conf() - state.Delay = 10 * time.Second - state.Timeout = time.Duration(timeoutMin) * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for %s: %s", activity, err) - } - - op = opRaw.(*compute.Operation) - if op.Error != nil { - return ComputeOperationError(*op.Error) - } - - return nil -} - -func computeOperationWaitRegion(config *Config, op *compute.Operation, project string, region, activity string) error { - w := &ComputeOperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: project, - Type: ComputeOperationWaitRegion, - Region: region, - } - - state := w.Conf() - state.Delay = 10 * time.Second - state.Timeout = 4 * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for %s: %s", activity, err) - } - - op = opRaw.(*compute.Operation) - if op.Error != nil { - return ComputeOperationError(*op.Error) - } - - return nil -} - -func computeOperationWaitZone(config *Config, op *compute.Operation, project string, zone, activity string) error { - return computeOperationWaitZoneTime(config, op, project, zone, 4, activity) -} - -func computeOperationWaitZoneTime(config *Config, op *compute.Operation, project string, zone string, minutes int, activity string) error { - w := &ComputeOperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: project, - Zone: zone, - Type: ComputeOperationWaitZone, - } - state := w.Conf() - state.Delay = 10 * time.Second - state.Timeout = time.Duration(minutes) * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for %s: %s", activity, err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return ComputeOperationError(*op.Error) - } - return nil -} diff --git a/builtin/providers/google/config.go b/builtin/providers/google/config.go deleted file mode 100644 index 716296442..000000000 --- a/builtin/providers/google/config.go +++ /dev/null @@ -1,200 +0,0 @@ -package google - -import ( - "encoding/json" - "fmt" - "log" - "net/http" - "runtime" - "strings" - - "github.com/hashicorp/terraform/helper/logging" - "github.com/hashicorp/terraform/helper/pathorcontents" - "github.com/hashicorp/terraform/terraform" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "golang.org/x/oauth2/jwt" - "google.golang.org/api/bigquery/v2" - "google.golang.org/api/cloudbilling/v1" - "google.golang.org/api/cloudresourcemanager/v1" - "google.golang.org/api/compute/v1" - "google.golang.org/api/container/v1" - "google.golang.org/api/dns/v1" - "google.golang.org/api/iam/v1" - "google.golang.org/api/pubsub/v1" - "google.golang.org/api/servicemanagement/v1" - "google.golang.org/api/sqladmin/v1beta4" - "google.golang.org/api/storage/v1" -) - -// Config is the configuration structure used to instantiate the Google -// provider. -type Config struct { - Credentials string - Project string - Region string - - clientBilling *cloudbilling.Service - clientCompute *compute.Service - clientContainer *container.Service - clientDns *dns.Service - clientPubsub *pubsub.Service - clientResourceManager *cloudresourcemanager.Service - clientStorage *storage.Service - clientSqlAdmin *sqladmin.Service - clientIAM *iam.Service - clientServiceMan *servicemanagement.APIService - clientBigQuery *bigquery.Service -} - -func (c *Config) loadAndValidate() error { - var account accountFile - clientScopes := []string{ - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/ndev.clouddns.readwrite", - "https://www.googleapis.com/auth/devstorage.full_control", - } - - var client *http.Client - - if c.Credentials != "" { - contents, _, err := pathorcontents.Read(c.Credentials) - if err != nil { - return fmt.Errorf("Error loading credentials: %s", err) - } - - // Assume account_file is a JSON string - if err := parseJSON(&account, contents); err != nil { - return fmt.Errorf("Error parsing credentials '%s': %s", contents, err) - } - - // Get the token for use in our requests - log.Printf("[INFO] Requesting Google token...") - log.Printf("[INFO] -- Email: %s", account.ClientEmail) - log.Printf("[INFO] -- Scopes: %s", clientScopes) - log.Printf("[INFO] -- Private Key Length: %d", len(account.PrivateKey)) - - conf := jwt.Config{ - Email: account.ClientEmail, - PrivateKey: []byte(account.PrivateKey), - Scopes: clientScopes, - TokenURL: "https://accounts.google.com/o/oauth2/token", - } - - // Initiate an http.Client. The following GET request will be - // authorized and authenticated on the behalf of - // your service account. - client = conf.Client(oauth2.NoContext) - - } else { - log.Printf("[INFO] Authenticating using DefaultClient") - err := error(nil) - client, err = google.DefaultClient(oauth2.NoContext, clientScopes...) - if err != nil { - return err - } - } - - client.Transport = logging.NewTransport("Google", client.Transport) - - versionString := terraform.VersionString() - userAgent := fmt.Sprintf( - "(%s %s) Terraform/%s", runtime.GOOS, runtime.GOARCH, versionString) - - var err error - - log.Printf("[INFO] Instantiating GCE client...") - c.clientCompute, err = compute.New(client) - if err != nil { - return err - } - c.clientCompute.UserAgent = userAgent - - log.Printf("[INFO] Instantiating GKE client...") - c.clientContainer, err = container.New(client) - if err != nil { - return err - } - c.clientContainer.UserAgent = userAgent - - log.Printf("[INFO] Instantiating Google Cloud DNS client...") - c.clientDns, err = dns.New(client) - if err != nil { - return err - } - c.clientDns.UserAgent = userAgent - - log.Printf("[INFO] Instantiating Google Storage Client...") - c.clientStorage, err = storage.New(client) - if err != nil { - return err - } - c.clientStorage.UserAgent = userAgent - - log.Printf("[INFO] Instantiating Google SqlAdmin Client...") - c.clientSqlAdmin, err = sqladmin.New(client) - if err != nil { - return err - } - c.clientSqlAdmin.UserAgent = userAgent - - log.Printf("[INFO] Instantiating Google Pubsub Client...") - c.clientPubsub, err = pubsub.New(client) - if err != nil { - return err - } - c.clientPubsub.UserAgent = userAgent - - log.Printf("[INFO] Instantiating Google Cloud ResourceManager Client...") - c.clientResourceManager, err = cloudresourcemanager.New(client) - if err != nil { - return err - } - c.clientResourceManager.UserAgent = userAgent - - log.Printf("[INFO] Instantiating Google Cloud IAM Client...") - c.clientIAM, err = iam.New(client) - if err != nil { - return err - } - c.clientIAM.UserAgent = userAgent - - log.Printf("[INFO] Instantiating Google Cloud Service Management Client...") - c.clientServiceMan, err = servicemanagement.New(client) - if err != nil { - return err - } - c.clientServiceMan.UserAgent = userAgent - - log.Printf("[INFO] Instantiating Google Cloud Billing Client...") - c.clientBilling, err = cloudbilling.New(client) - if err != nil { - return err - } - c.clientBilling.UserAgent = userAgent - - log.Printf("[INFO] Instantiating Google Cloud BigQuery Client...") - c.clientBigQuery, err = bigquery.New(client) - if err != nil { - return err - } - c.clientBigQuery.UserAgent = userAgent - - return nil -} - -// accountFile represents the structure of the account file JSON file. -type accountFile struct { - PrivateKeyId string `json:"private_key_id"` - PrivateKey string `json:"private_key"` - ClientEmail string `json:"client_email"` - ClientId string `json:"client_id"` -} - -func parseJSON(result interface{}, contents string) error { - r := strings.NewReader(contents) - dec := json.NewDecoder(r) - - return dec.Decode(result) -} diff --git a/builtin/providers/google/config_test.go b/builtin/providers/google/config_test.go deleted file mode 100644 index 648f93a68..000000000 --- a/builtin/providers/google/config_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package google - -import ( - "io/ioutil" - "testing" -) - -const testFakeCredentialsPath = "./test-fixtures/fake_account.json" - -func TestConfigLoadAndValidate_accountFilePath(t *testing.T) { - config := Config{ - Credentials: testFakeCredentialsPath, - Project: "my-gce-project", - Region: "us-central1", - } - - err := config.loadAndValidate() - if err != nil { - t.Fatalf("error: %v", err) - } -} - -func TestConfigLoadAndValidate_accountFileJSON(t *testing.T) { - contents, err := ioutil.ReadFile(testFakeCredentialsPath) - if err != nil { - t.Fatalf("error: %v", err) - } - config := Config{ - Credentials: string(contents), - Project: "my-gce-project", - Region: "us-central1", - } - - err = config.loadAndValidate() - if err != nil { - t.Fatalf("error: %v", err) - } -} - -func TestConfigLoadAndValidate_accountFileJSONInvalid(t *testing.T) { - config := Config{ - Credentials: "{this is not json}", - Project: "my-gce-project", - Region: "us-central1", - } - - if config.loadAndValidate() == nil { - t.Fatalf("expected error, but got nil") - } -} diff --git a/builtin/providers/google/container_operation.go b/builtin/providers/google/container_operation.go deleted file mode 100644 index fb1b9cab8..000000000 --- a/builtin/providers/google/container_operation.go +++ /dev/null @@ -1,59 +0,0 @@ -package google - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "google.golang.org/api/container/v1" -) - -type ContainerOperationWaiter struct { - Service *container.Service - Op *container.Operation - Project string - Zone string -} - -func (w *ContainerOperationWaiter) Conf() *resource.StateChangeConf { - return &resource.StateChangeConf{ - Pending: []string{"PENDING", "RUNNING"}, - Target: []string{"DONE"}, - Refresh: w.RefreshFunc(), - } -} - -func (w *ContainerOperationWaiter) RefreshFunc() resource.StateRefreshFunc { - return func() (interface{}, string, error) { - resp, err := w.Service.Projects.Zones.Operations.Get( - w.Project, w.Zone, w.Op.Name).Do() - - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] Progress of operation %q: %q", w.Op.Name, resp.Status) - - return resp, resp.Status, err - } -} - -func containerOperationWait(config *Config, op *container.Operation, project, zone, activity string, timeoutMinutes, minTimeoutSeconds int) error { - w := &ContainerOperationWaiter{ - Service: config.clientContainer, - Op: op, - Project: project, - Zone: zone, - } - - state := w.Conf() - state.Timeout = time.Duration(timeoutMinutes) * time.Minute - state.MinTimeout = time.Duration(minTimeoutSeconds) * time.Second - _, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for %s: %s", activity, err) - } - - return nil -} diff --git a/builtin/providers/google/data_source_google_compute_network.go b/builtin/providers/google/data_source_google_compute_network.go deleted file mode 100644 index b22d2b257..000000000 --- a/builtin/providers/google/data_source_google_compute_network.go +++ /dev/null @@ -1,73 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/googleapi" -) - -func dataSourceGoogleComputeNetwork() *schema.Resource { - return &schema.Resource{ - Read: dataSourceGoogleComputeNetworkRead, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "gateway_ipv4": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "subnetworks_self_links": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func dataSourceGoogleComputeNetworkRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - network, err := config.clientCompute.Networks.Get( - project, d.Get("name").(string)).Do() - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - // The resource doesn't exist anymore - - return fmt.Errorf("Network Not Found : %s", d.Get("name")) - } - - return fmt.Errorf("Error reading network: %s", err) - } - d.Set("gateway_ipv4", network.GatewayIPv4) - d.Set("self_link", network.SelfLink) - d.Set("description", network.Description) - d.Set("subnetworks_self_links", network.Subnetworks) - d.SetId(network.Name) - return nil -} diff --git a/builtin/providers/google/data_source_google_compute_network_test.go b/builtin/providers/google/data_source_google_compute_network_test.go deleted file mode 100644 index fe0aac8fa..000000000 --- a/builtin/providers/google/data_source_google_compute_network_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourceGoogleNetwork(t *testing.T) { - networkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataSourceGoogleNetworkConfig(networkName), - Check: resource.ComposeTestCheckFunc( - testAccDataSourceGoogleNetworkCheck("data.google_compute_network.my_network", "google_compute_network.foobar"), - ), - }, - }, - }) -} - -func testAccDataSourceGoogleNetworkCheck(data_source_name string, resource_name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - ds, ok := s.RootModule().Resources[data_source_name] - if !ok { - return fmt.Errorf("root module has no resource called %s", data_source_name) - } - - rs, ok := s.RootModule().Resources[resource_name] - if !ok { - return fmt.Errorf("can't find %s in state", resource_name) - } - - ds_attr := ds.Primary.Attributes - rs_attr := rs.Primary.Attributes - network_attrs_to_test := []string{ - "id", - "self_link", - "name", - "description", - } - - for _, attr_to_check := range network_attrs_to_test { - if ds_attr[attr_to_check] != rs_attr[attr_to_check] { - return fmt.Errorf( - "%s is %s; want %s", - attr_to_check, - ds_attr[attr_to_check], - rs_attr[attr_to_check], - ) - } - } - return nil - } -} - -func testAccDataSourceGoogleNetworkConfig(name string) string { - return fmt.Sprintf(` -resource "google_compute_network" "foobar" { - name = "%s" - description = "my-description" -} - -data "google_compute_network" "my_network" { - name = "${google_compute_network.foobar.name}" -}`, name) -} diff --git a/builtin/providers/google/data_source_google_compute_subnetwork.go b/builtin/providers/google/data_source_google_compute_subnetwork.go deleted file mode 100644 index 03a368bcd..000000000 --- a/builtin/providers/google/data_source_google_compute_subnetwork.go +++ /dev/null @@ -1,92 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/googleapi" -) - -func dataSourceGoogleComputeSubnetwork() *schema.Resource { - return &schema.Resource{ - Read: dataSourceGoogleComputeSubnetworkRead, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "ip_cidr_range": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "private_ip_google_access": &schema.Schema{ - Type: schema.TypeBool, - Computed: true, - }, - "network": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "gateway_address": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -func dataSourceGoogleComputeSubnetworkRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - region, err := getRegion(d, config) - if err != nil { - return err - } - - subnetwork, err := config.clientCompute.Subnetworks.Get( - project, region, d.Get("name").(string)).Do() - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - // The resource doesn't exist anymore - - return fmt.Errorf("Subnetwork Not Found") - } - - return fmt.Errorf("Error reading Subnetwork: %s", err) - } - - d.Set("ip_cidr_range", subnetwork.IpCidrRange) - d.Set("private_ip_google_access", subnetwork.PrivateIpGoogleAccess) - d.Set("self_link", subnetwork.SelfLink) - d.Set("description", subnetwork.Description) - d.Set("gateway_address", subnetwork.GatewayAddress) - d.Set("network", subnetwork.Network) - - //Subnet id creation is defined in resource_compute_subnetwork.go - subnetwork.Region = region - d.SetId(createSubnetID(subnetwork)) - return nil -} diff --git a/builtin/providers/google/data_source_google_compute_subnetwork_test.go b/builtin/providers/google/data_source_google_compute_subnetwork_test.go deleted file mode 100644 index 835bd6ea3..000000000 --- a/builtin/providers/google/data_source_google_compute_subnetwork_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourceGoogleSubnetwork(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: TestAccDataSourceGoogleSubnetworkConfig, - Check: resource.ComposeTestCheckFunc( - testAccDataSourceGoogleSubnetworkCheck("data.google_compute_subnetwork.my_subnetwork", "google_compute_subnetwork.foobar"), - ), - }, - }, - }) -} - -func testAccDataSourceGoogleSubnetworkCheck(data_source_name string, resource_name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - ds, ok := s.RootModule().Resources[data_source_name] - if !ok { - return fmt.Errorf("root module has no resource called %s", data_source_name) - } - - rs, ok := s.RootModule().Resources[resource_name] - if !ok { - return fmt.Errorf("can't find %s in state", resource_name) - } - - ds_attr := ds.Primary.Attributes - rs_attr := rs.Primary.Attributes - - subnetwork_attrs_to_test := []string{ - "id", - "self_link", - "name", - "description", - "ip_cidr_range", - "network", - "private_ip_google_access", - } - - for _, attr_to_check := range subnetwork_attrs_to_test { - if ds_attr[attr_to_check] != rs_attr[attr_to_check] { - return fmt.Errorf( - "%s is %s; want %s", - attr_to_check, - ds_attr[attr_to_check], - rs_attr[attr_to_check], - ) - } - } - - return nil - } -} - -var TestAccDataSourceGoogleSubnetworkConfig = ` - -resource "google_compute_network" "foobar" { - name = "network-test" - description = "my-description" -} -resource "google_compute_subnetwork" "foobar" { - name = "subnetwork-test" - description = "my-description" - ip_cidr_range = "10.0.0.0/24" - network = "${google_compute_network.foobar.self_link}" - private_ip_google_access = true -} - -data "google_compute_subnetwork" "my_subnetwork" { - name = "${google_compute_subnetwork.foobar.name}" -} -` diff --git a/builtin/providers/google/data_source_google_compute_zones.go b/builtin/providers/google/data_source_google_compute_zones.go deleted file mode 100644 index a200aba5c..000000000 --- a/builtin/providers/google/data_source_google_compute_zones.go +++ /dev/null @@ -1,80 +0,0 @@ -package google - -import ( - "fmt" - "log" - "sort" - "time" - - "github.com/hashicorp/terraform/helper/schema" - compute "google.golang.org/api/compute/v1" -) - -func dataSourceGoogleComputeZones() *schema.Resource { - return &schema.Resource{ - Read: dataSourceGoogleComputeZonesRead, - Schema: map[string]*schema.Schema{ - "region": { - Type: schema.TypeString, - Optional: true, - }, - "names": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "status": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - value := v.(string) - if value != "UP" && value != "DOWN" { - es = append(es, fmt.Errorf("%q can only be 'UP' or 'DOWN' (%q given)", k, value)) - } - return - }, - }, - }, - } -} - -func dataSourceGoogleComputeZonesRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - region := config.Region - if r, ok := d.GetOk("region"); ok { - region = r.(string) - } - - regionUrl := fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/regions/%s", - config.Project, region) - filter := fmt.Sprintf("(region eq %s)", regionUrl) - - if s, ok := d.GetOk("status"); ok { - filter += fmt.Sprintf(" (status eq %s)", s) - } - - call := config.clientCompute.Zones.List(config.Project).Filter(filter) - - resp, err := call.Do() - if err != nil { - return err - } - - zones := flattenZones(resp.Items) - log.Printf("[DEBUG] Received Google Compute Zones: %q", zones) - - d.Set("names", zones) - d.SetId(time.Now().UTC().String()) - - return nil -} - -func flattenZones(zones []*compute.Zone) []string { - result := make([]string, len(zones), len(zones)) - for i, zone := range zones { - result[i] = zone.Name - } - sort.Strings(result) - return result -} diff --git a/builtin/providers/google/data_source_google_compute_zones_test.go b/builtin/providers/google/data_source_google_compute_zones_test.go deleted file mode 100644 index 80dabf220..000000000 --- a/builtin/providers/google/data_source_google_compute_zones_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package google - -import ( - "errors" - "fmt" - "strconv" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccGoogleComputeZones_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckGoogleComputeZonesConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleComputeZonesMeta("data.google_compute_zones.available"), - ), - }, - }, - }) -} - -func testAccCheckGoogleComputeZonesMeta(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Can't find zones data source: %s", n) - } - - if rs.Primary.ID == "" { - return errors.New("zones data source ID not set.") - } - - count, ok := rs.Primary.Attributes["names.#"] - if !ok { - return errors.New("can't find 'names' attribute") - } - - noOfNames, err := strconv.Atoi(count) - if err != nil { - return errors.New("failed to read number of zones") - } - if noOfNames < 2 { - return fmt.Errorf("expected at least 2 zones, received %d, this is most likely a bug", - noOfNames) - } - - for i := 0; i < noOfNames; i++ { - idx := "names." + strconv.Itoa(i) - v, ok := rs.Primary.Attributes[idx] - if !ok { - return fmt.Errorf("zone list is corrupt (%q not found), this is definitely a bug", idx) - } - if len(v) < 1 { - return fmt.Errorf("Empty zone name (%q), this is definitely a bug", idx) - } - } - - return nil - } -} - -var testAccCheckGoogleComputeZonesConfig = ` -data "google_compute_zones" "available" {} -` diff --git a/builtin/providers/google/data_source_google_container_engine_versions.go b/builtin/providers/google/data_source_google_container_engine_versions.go deleted file mode 100644 index 3eaf8043a..000000000 --- a/builtin/providers/google/data_source_google_container_engine_versions.go +++ /dev/null @@ -1,67 +0,0 @@ -package google - -import ( - "fmt" - "time" - - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceGoogleContainerEngineVersions() *schema.Resource { - return &schema.Resource{ - Read: dataSourceGoogleContainerEngineVersionsRead, - Schema: map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Optional: true, - }, - "zone": { - Type: schema.TypeString, - Required: true, - }, - "latest_master_version": { - Type: schema.TypeString, - Computed: true, - }, - "latest_node_version": { - Type: schema.TypeString, - Computed: true, - }, - "valid_master_versions": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "valid_node_versions": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func dataSourceGoogleContainerEngineVersionsRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone := d.Get("zone").(string) - - resp, err := config.clientContainer.Projects.Zones.GetServerconfig(project, zone).Do() - if err != nil { - return fmt.Errorf("Error retrieving available container cluster versions: %s", err.Error()) - } - - d.Set("valid_master_versions", resp.ValidMasterVersions) - d.Set("valid_node_versions", resp.ValidNodeVersions) - d.Set("latest_master_version", resp.ValidMasterVersions[0]) - d.Set("latest_node_version", resp.ValidNodeVersions[0]) - - d.SetId(time.Now().UTC().String()) - - return nil -} diff --git a/builtin/providers/google/data_source_google_container_engine_versions_test.go b/builtin/providers/google/data_source_google_container_engine_versions_test.go deleted file mode 100644 index baf880943..000000000 --- a/builtin/providers/google/data_source_google_container_engine_versions_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package google - -import ( - "errors" - "fmt" - "strconv" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccGoogleContainerEngineVersions_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckGoogleContainerEngineVersionsConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleContainerEngineVersionsMeta("data.google_container_engine_versions.versions"), - ), - }, - }, - }) -} - -func testAccCheckGoogleContainerEngineVersionsMeta(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Can't find versions data source: %s", n) - } - - if rs.Primary.ID == "" { - return errors.New("versions data source ID not set.") - } - - nodeCount, ok := rs.Primary.Attributes["valid_node_versions.#"] - if !ok { - return errors.New("can't find 'valid_node_versions' attribute") - } - - noOfNodes, err := strconv.Atoi(nodeCount) - if err != nil { - return errors.New("failed to read number of valid node versions") - } - if noOfNodes < 2 { - return fmt.Errorf("expected at least 2 valid node versions, received %d, this is most likely a bug", - noOfNodes) - } - - for i := 0; i < noOfNodes; i++ { - idx := "valid_node_versions." + strconv.Itoa(i) - v, ok := rs.Primary.Attributes[idx] - if !ok { - return fmt.Errorf("valid node versions list is corrupt (%q not found), this is definitely a bug", idx) - } - if len(v) < 1 { - return fmt.Errorf("Empty node version (%q), this is definitely a bug", idx) - } - } - - masterCount, ok := rs.Primary.Attributes["valid_master_versions.#"] - if !ok { - return errors.New("can't find 'valid_master_versions' attribute") - } - - noOfMasters, err := strconv.Atoi(masterCount) - if err != nil { - return errors.New("failed to read number of valid master versions") - } - if noOfMasters < 2 { - return fmt.Errorf("expected at least 2 valid master versions, received %d, this is most likely a bug", - noOfMasters) - } - - for i := 0; i < noOfMasters; i++ { - idx := "valid_master_versions." + strconv.Itoa(i) - v, ok := rs.Primary.Attributes[idx] - if !ok { - return fmt.Errorf("valid master versions list is corrupt (%q not found), this is definitely a bug", idx) - } - if len(v) < 1 { - return fmt.Errorf("Empty master version (%q), this is definitely a bug", idx) - } - } - - return nil - } -} - -var testAccCheckGoogleContainerEngineVersionsConfig = ` -data "google_container_engine_versions" "versions" { - zone = "us-central1-b" -} -` diff --git a/builtin/providers/google/data_source_google_iam_policy.go b/builtin/providers/google/data_source_google_iam_policy.go deleted file mode 100644 index e47b0f009..000000000 --- a/builtin/providers/google/data_source_google_iam_policy.go +++ /dev/null @@ -1,103 +0,0 @@ -package google - -import ( - "encoding/json" - "strconv" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var iamBinding *schema.Schema = &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "role": { - Type: schema.TypeString, - Required: true, - }, - "members": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - }, -} - -// dataSourceGoogleIamPolicy returns a *schema.Resource that allows a customer -// to express a Google Cloud IAM policy in a data resource. This is an example -// of how the schema would be used in a config: -// -// data "google_iam_policy" "admin" { -// binding { -// role = "roles/storage.objectViewer" -// members = [ -// "user:evanbrown@google.com", -// ] -// } -// } -func dataSourceGoogleIamPolicy() *schema.Resource { - return &schema.Resource{ - Read: dataSourceGoogleIamPolicyRead, - Schema: map[string]*schema.Schema{ - "binding": iamBinding, - "policy_data": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -// dataSourceGoogleIamPolicyRead reads a data source from config and writes it -// to state. -func dataSourceGoogleIamPolicyRead(d *schema.ResourceData, meta interface{}) error { - var policy cloudresourcemanager.Policy - var bindings []*cloudresourcemanager.Binding - - // The schema supports multiple binding{} blocks - bset := d.Get("binding").(*schema.Set) - - // All binding{} blocks will be converted and stored in an array - bindings = make([]*cloudresourcemanager.Binding, bset.Len()) - policy.Bindings = bindings - - // Convert each config binding into a cloudresourcemanager.Binding - for i, v := range bset.List() { - binding := v.(map[string]interface{}) - policy.Bindings[i] = &cloudresourcemanager.Binding{ - Role: binding["role"].(string), - Members: dataSourceGoogleIamPolicyMembers(binding["members"].(*schema.Set)), - } - } - - // Marshal cloudresourcemanager.Policy to JSON suitable for storing in state - pjson, err := json.Marshal(&policy) - if err != nil { - // should never happen if the above code is correct - return err - } - pstring := string(pjson) - - d.Set("policy_data", pstring) - d.SetId(strconv.Itoa(hashcode.String(pstring))) - - return nil -} - -// dataSourceGoogleIamPolicyMembers converts a set of members in a binding -// (a member is a principal, usually an e-mail address) into an array of -// string. -func dataSourceGoogleIamPolicyMembers(d *schema.Set) []string { - var members []string - members = make([]string, d.Len()) - - for i, v := range d.List() { - members[i] = v.(string) - } - return members -} diff --git a/builtin/providers/google/data_source_storage_object_signed_url.go b/builtin/providers/google/data_source_storage_object_signed_url.go deleted file mode 100644 index fced990cf..000000000 --- a/builtin/providers/google/data_source_storage_object_signed_url.go +++ /dev/null @@ -1,368 +0,0 @@ -package google - -import ( - "bytes" - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "crypto/x509" - "encoding/base64" - "encoding/pem" - "errors" - "fmt" - "log" - "net/url" - "os" - "strconv" - "strings" - "time" - - "sort" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/pathorcontents" - "github.com/hashicorp/terraform/helper/schema" - "golang.org/x/oauth2/google" - "golang.org/x/oauth2/jwt" -) - -const gcsBaseUrl = "https://storage.googleapis.com" -const googleCredentialsEnvVar = "GOOGLE_APPLICATION_CREDENTIALS" - -func dataSourceGoogleSignedUrl() *schema.Resource { - return &schema.Resource{ - Read: dataSourceGoogleSignedUrlRead, - - Schema: map[string]*schema.Schema{ - "bucket": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "content_md5": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "", - }, - "content_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "", - }, - "credentials": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "duration": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "1h", - }, - "extension_headers": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Elem: schema.TypeString, - ValidateFunc: validateExtensionHeaders, - }, - "http_method": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "GET", - ValidateFunc: validateHttpMethod, - }, - "path": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "signed_url": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func validateExtensionHeaders(v interface{}, k string) (ws []string, errors []error) { - hdrMap := v.(map[string]interface{}) - for k, _ := range hdrMap { - if !strings.HasPrefix(strings.ToLower(k), "x-goog-") { - errors = append(errors, fmt.Errorf( - "extension_header (%s) not valid, header name must begin with 'x-goog-'", k)) - } - } - return -} - -func validateHttpMethod(v interface{}, k string) (ws []string, errs []error) { - value := v.(string) - value = strings.ToUpper(value) - if value != "GET" && value != "HEAD" && value != "PUT" && value != "DELETE" { - errs = append(errs, errors.New("http_method must be one of [GET|HEAD|PUT|DELETE]")) - } - return -} - -func dataSourceGoogleSignedUrlRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - // Build UrlData object from data source attributes - urlData := &UrlData{} - - // HTTP Method - if method, ok := d.GetOk("http_method"); ok { - urlData.HttpMethod = method.(string) - } - - // convert duration to an expiration datetime (unix time in seconds) - durationString := "1h" - if v, ok := d.GetOk("duration"); ok { - durationString = v.(string) - } - duration, err := time.ParseDuration(durationString) - if err != nil { - return errwrap.Wrapf("could not parse duration: {{err}}", err) - } - expires := time.Now().Unix() + int64(duration.Seconds()) - urlData.Expires = int(expires) - - // content_md5 is optional - if v, ok := d.GetOk("content_md5"); ok { - urlData.ContentMd5 = v.(string) - } - - // content_type is optional - if v, ok := d.GetOk("content_type"); ok { - urlData.ContentType = v.(string) - } - - // extension_headers (x-goog-* HTTP headers) are optional - if v, ok := d.GetOk("extension_headers"); ok { - hdrMap := v.(map[string]interface{}) - - if len(hdrMap) > 0 { - urlData.HttpHeaders = make(map[string]string, len(hdrMap)) - for k, v := range hdrMap { - urlData.HttpHeaders[k] = v.(string) - } - } - } - - urlData.Path = fmt.Sprintf("/%s/%s", d.Get("bucket").(string), d.Get("path").(string)) - - // Load JWT Config from Google Credentials - jwtConfig, err := loadJwtConfig(d, config) - if err != nil { - return err - } - urlData.JwtConfig = jwtConfig - - // Construct URL - signedUrl, err := urlData.SignedUrl() - if err != nil { - return err - } - - // Success - d.Set("signed_url", signedUrl) - - encodedSig, err := urlData.EncodedSignature() - if err != nil { - return err - } - d.SetId(encodedSig) - - return nil -} - -// loadJwtConfig looks for credentials json in the following places, -// in order of preference: -// 1. `credentials` attribute of the datasource -// 2. `credentials` attribute in the provider definition. -// 3. A JSON file whose path is specified by the -// GOOGLE_APPLICATION_CREDENTIALS environment variable. -func loadJwtConfig(d *schema.ResourceData, meta interface{}) (*jwt.Config, error) { - config := meta.(*Config) - - credentials := "" - if v, ok := d.GetOk("credentials"); ok { - log.Println("[DEBUG] using data source credentials to sign URL") - credentials = v.(string) - - } else if config.Credentials != "" { - log.Println("[DEBUG] using provider credentials to sign URL") - credentials = config.Credentials - - } else if filename := os.Getenv(googleCredentialsEnvVar); filename != "" { - log.Println("[DEBUG] using env GOOGLE_APPLICATION_CREDENTIALS credentials to sign URL") - credentials = filename - - } - - if strings.TrimSpace(credentials) != "" { - contents, _, err := pathorcontents.Read(credentials) - if err != nil { - return nil, errwrap.Wrapf("Error loading credentials: {{err}}", err) - } - - cfg, err := google.JWTConfigFromJSON([]byte(contents), "") - if err != nil { - return nil, errwrap.Wrapf("Error parsing credentials: {{err}}", err) - } - return cfg, nil - } - - return nil, errors.New("Credentials not found in datasource, provider configuration or GOOGLE_APPLICATION_CREDENTIALS environment variable.") -} - -// parsePrivateKey converts the binary contents of a private key file -// to an *rsa.PrivateKey. It detects whether the private key is in a -// PEM container or not. If so, it extracts the the private key -// from PEM container before conversion. It only supports PEM -// containers with no passphrase. -// copied from golang.org/x/oauth2/internal -func parsePrivateKey(key []byte) (*rsa.PrivateKey, error) { - block, _ := pem.Decode(key) - if block != nil { - key = block.Bytes - } - parsedKey, err := x509.ParsePKCS8PrivateKey(key) - if err != nil { - parsedKey, err = x509.ParsePKCS1PrivateKey(key) - if err != nil { - return nil, errwrap.Wrapf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: {{err}}", err) - } - } - parsed, ok := parsedKey.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("private key is invalid") - } - return parsed, nil -} - -// UrlData stores the values required to create a Signed Url -type UrlData struct { - JwtConfig *jwt.Config - ContentMd5 string - ContentType string - HttpMethod string - Expires int - HttpHeaders map[string]string - Path string -} - -// SigningString creates a string representation of the UrlData in a form ready for signing: -// see https://cloud.google.com/storage/docs/access-control/create-signed-urls-program -// Example output: -// ------------------- -// GET -// -// -// 1388534400 -// bucket/objectname -// ------------------- -func (u *UrlData) SigningString() []byte { - var buf bytes.Buffer - - // HTTP Verb - buf.WriteString(u.HttpMethod) - buf.WriteString("\n") - - // Content MD5 (optional, always add new line) - buf.WriteString(u.ContentMd5) - buf.WriteString("\n") - - // Content Type (optional, always add new line) - buf.WriteString(u.ContentType) - buf.WriteString("\n") - - // Expiration - buf.WriteString(strconv.Itoa(u.Expires)) - buf.WriteString("\n") - - // Extra HTTP headers (optional) - // Must be sorted in lexigraphical order - var keys []string - for k := range u.HttpHeaders { - keys = append(keys, strings.ToLower(k)) - } - sort.Strings(keys) - // Write sorted headers to signing string buffer - for _, k := range keys { - buf.WriteString(fmt.Sprintf("%s:%s\n", k, u.HttpHeaders[k])) - } - - // Storate Object path (includes bucketname) - buf.WriteString(u.Path) - - return buf.Bytes() -} - -func (u *UrlData) Signature() ([]byte, error) { - // Sign url data - signature, err := SignString(u.SigningString(), u.JwtConfig) - if err != nil { - return nil, err - - } - - return signature, nil -} - -// EncodedSignature returns the Signature() after base64 encoding and url escaping -func (u *UrlData) EncodedSignature() (string, error) { - signature, err := u.Signature() - if err != nil { - return "", err - } - - // base64 encode signature - encoded := base64.StdEncoding.EncodeToString(signature) - // encoded signature may include /, = characters that need escaping - encoded = url.QueryEscape(encoded) - - return encoded, nil -} - -// SignedUrl constructs the final signed URL a client can use to retrieve storage object -func (u *UrlData) SignedUrl() (string, error) { - - encodedSig, err := u.EncodedSignature() - if err != nil { - return "", err - } - - // build url - // https://cloud.google.com/storage/docs/access-control/create-signed-urls-program - var urlBuffer bytes.Buffer - urlBuffer.WriteString(gcsBaseUrl) - urlBuffer.WriteString(u.Path) - urlBuffer.WriteString("?GoogleAccessId=") - urlBuffer.WriteString(u.JwtConfig.Email) - urlBuffer.WriteString("&Expires=") - urlBuffer.WriteString(strconv.Itoa(u.Expires)) - urlBuffer.WriteString("&Signature=") - urlBuffer.WriteString(encodedSig) - - return urlBuffer.String(), nil -} - -// SignString calculates the SHA256 signature of the input string -func SignString(toSign []byte, cfg *jwt.Config) ([]byte, error) { - // Parse private key - pk, err := parsePrivateKey(cfg.PrivateKey) - if err != nil { - return nil, errwrap.Wrapf("failed to sign string, could not parse key: {{err}}", err) - } - - // Hash string - hasher := sha256.New() - hasher.Write(toSign) - - // Sign string - signed, err := rsa.SignPKCS1v15(rand.Reader, pk, crypto.SHA256, hasher.Sum(nil)) - if err != nil { - return nil, errwrap.Wrapf("failed to sign string, an error occurred: {{err}}", err) - } - - return signed, nil -} diff --git a/builtin/providers/google/data_source_storage_object_signed_url_test.go b/builtin/providers/google/data_source_storage_object_signed_url_test.go deleted file mode 100644 index 03912216c..000000000 --- a/builtin/providers/google/data_source_storage_object_signed_url_test.go +++ /dev/null @@ -1,263 +0,0 @@ -package google - -import ( - "testing" - - "bytes" - "encoding/base64" - "fmt" - "io/ioutil" - "net/http" - "net/url" - - "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "golang.org/x/oauth2/google" -) - -const fakeCredentials = `{ - "type": "service_account", - "project_id": "gcp-project", - "private_key_id": "29a54056cee3d6886d9e8515a959af538ab5add9", - "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAsGHDAdHZfi81LgVeeMHXYLgNDpcFYhoBykYtTDdNyA5AixID\n8JdKlCmZ6qLNnZrbs4JlBJfmzw6rjUC5bVBFg5NwYVBu3+3Msa4rgLsTGsjPH9rt\nC+QFnFhcmzg3zz8eeXBqJdhw7wmn1Xa9SsC3h6YWveBk98ecyE7yGe8J8xGphjk7\nEQ/KBmRK/EJD0ZwuYW1W4Bv5f5fca7qvi9rCprEmL8//uy0qCwoJj2jU3zc5p72M\npkSZb1XlYxxTEo/h9WCEvWS9pGhy6fJ0sA2RsBHqU4Y5O7MJEei9yu5fVSZUi05f\n/ggfUID+cFEq0Z/A98whKPEBBJ/STdEaqEEkBwIDAQABAoIBAED6EsvF0dihbXbh\ntXbI+h4AT5cTXYFRUV2B0sgkC3xqe65/2YG1Sl0gojoE9bhcxxjvLWWuy/F1Vw93\nS5gQnTsmgpzm86F8yg6euhn3UMdqOJtknDToMITzLFJmOHEZsJFOL1x3ysrUhMan\nsn4qVrIbJn+WfbumBoToSFnzbHflacOh06ZRbYa2bpSPMfGGFtwqQjRadn5+pync\nlCjaupcg209sM0qEk/BDSzHvWL1VgLMdiKBx574TSwS0o569+7vPNt92Ydi7kARo\nreOzkkF4L3xNhKZnmls2eGH6A8cp1KZXoMLFuO+IwvBMA0O29LsUlKJU4PjBrf+7\nwaslnMECgYEA5bJv0L6DKZQD3RCBLue4/mDg0GHZqAhJBS6IcaXeaWeH6PgGZggV\nMGkWnULltJIYFwtaueTfjWqciAeocKx+rqoRjuDMOGgcrEf6Y+b5AqF+IjQM66Ll\nIYPUt3FCIc69z5LNEtyP4DSWsFPJ5UhAoG4QRlDTqT5q0gKHFjeLdeECgYEAxJRk\nkrsWmdmUs5NH9pyhTdEDIc59EuJ8iOqOLzU8xUw6/s2GSClopEFJeeEoIWhLuPY3\nX3bFt4ppl/ksLh05thRs4wXRxqhnokjD3IcGu3l6Gb5QZTYwb0VfN+q2tWVEE8Qc\nPQURheUsM2aP/gpJVQvNsWVmkT0Ijc3J8bR2hucCgYEAjOF4e0ueHu5NwFTTJvWx\nHTRGLwkU+l66ipcT0MCvPW7miRk2s3XZqSuLV0Ekqi/A3sF0D/g0tQPipfwsb48c\n0/wzcLKoDyCsFW7AQG315IswVcIe+peaeYfl++1XZmzrNlkPtrXY+ObIVbXOavZ5\nzOw0xyvj5jYGRnCOci33N4ECgYA91EKx2ABq0YGw3aEj0u31MMlgZ7b1KqFq2wNv\nm7oKgEiJ/hC/P673AsXefNAHeetfOKn/77aOXQ2LTEb2FiEhwNjiquDpL+ywoVxh\nT2LxsmqSEEbvHpUrWlFxn/Rpp3k7ElKjaqWxTHyTii2+BHQ+OKEwq6kQA3deSpy6\n1jz1fwKBgQDLqbdq5FA63PWqApfNVykXukg9MASIcg/0fjADFaHTPDvJjhFutxRP\nppI5Q95P12CQ/eRBZKJnRlkhkL8tfPaWPzzOpCTjID7avRhx2oLmstmYuXx0HluE\ncqXLbAV9WDpIJ3Bpa/S8tWujWhLDmixn2JeAdurWS+naH9U9e4I6Rw==\n-----END RSA PRIVATE KEY-----\n", - "client_email": "user@gcp-project.iam.gserviceaccount.com", - "client_id": "103198861025845558729", - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://accounts.google.com/o/oauth2/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/user%40gcp-project.iam.gserviceaccount.com" -}` - -// The following values are derived from the output of the `gsutil signurl` command. -// i.e. -// gsutil signurl fake_creds.json gs://tf-test-bucket-6159205297736845881/path/to/file -// URL HTTP Method Expiration Signed URL -// gs://tf-test-bucket-6159205297736845881/path/to/file GET 2016-08-12 14:03:30 https://storage.googleapis.com/tf-test-bucket-6159205297736845881/path/to/file?GoogleAccessId=user@gcp-project.iam.gserviceaccount.com&Expires=1470967410&Signature=JJvE2Jc%2BeoagyS1qRACKBGUkgLkKjw7cGymHhtB4IzzN3nbXDqr0acRWGy0%2BEpZ3HYNDalEYsK0lR9Q0WCgty5I0JKmPIuo9hOYa1xTNH%2B22xiWsekxGV%2FcA9FXgWpi%2BFt7fBmMk4dhDe%2BuuYc7N79hd0FYuSBNW1Wp32Bluoe4SNkNAB%2BuIDd9KqPzqs09UAbBoz2y4WxXOQnRyR8GAfb8B%2FDtv62gYjtmp%2F6%2Fyr6xj7byWKZdQt8kEftQLTQmP%2F17Efjp6p%2BXo71Q0F9IhAFiqWfp3Ij8hHDSebLcVb2ULXyHNNQpHBOhFgALrFW3I6Uc3WciLEOsBS9Ej3EGdTg%3D%3D - -const testUrlPath = "/tf-test-bucket-6159205297736845881/path/to/file" -const testUrlExpires = 1470967410 -const testUrlExpectedSignatureBase64Encoded = "JJvE2Jc%2BeoagyS1qRACKBGUkgLkKjw7cGymHhtB4IzzN3nbXDqr0acRWGy0%2BEpZ3HYNDalEYsK0lR9Q0WCgty5I0JKmPIuo9hOYa1xTNH%2B22xiWsekxGV%2FcA9FXgWpi%2BFt7fBmMk4dhDe%2BuuYc7N79hd0FYuSBNW1Wp32Bluoe4SNkNAB%2BuIDd9KqPzqs09UAbBoz2y4WxXOQnRyR8GAfb8B%2FDtv62gYjtmp%2F6%2Fyr6xj7byWKZdQt8kEftQLTQmP%2F17Efjp6p%2BXo71Q0F9IhAFiqWfp3Ij8hHDSebLcVb2ULXyHNNQpHBOhFgALrFW3I6Uc3WciLEOsBS9Ej3EGdTg%3D%3D" -const testUrlExpectedUrl = "https://storage.googleapis.com/tf-test-bucket-6159205297736845881/path/to/file?GoogleAccessId=user@gcp-project.iam.gserviceaccount.com&Expires=1470967410&Signature=JJvE2Jc%2BeoagyS1qRACKBGUkgLkKjw7cGymHhtB4IzzN3nbXDqr0acRWGy0%2BEpZ3HYNDalEYsK0lR9Q0WCgty5I0JKmPIuo9hOYa1xTNH%2B22xiWsekxGV%2FcA9FXgWpi%2BFt7fBmMk4dhDe%2BuuYc7N79hd0FYuSBNW1Wp32Bluoe4SNkNAB%2BuIDd9KqPzqs09UAbBoz2y4WxXOQnRyR8GAfb8B%2FDtv62gYjtmp%2F6%2Fyr6xj7byWKZdQt8kEftQLTQmP%2F17Efjp6p%2BXo71Q0F9IhAFiqWfp3Ij8hHDSebLcVb2ULXyHNNQpHBOhFgALrFW3I6Uc3WciLEOsBS9Ej3EGdTg%3D%3D" - -func TestUrlData_Signing(t *testing.T) { - urlData := &UrlData{ - HttpMethod: "GET", - Expires: testUrlExpires, - Path: testUrlPath, - } - // unescape and decode the expected signature - expectedSig, err := url.QueryUnescape(testUrlExpectedSignatureBase64Encoded) - if err != nil { - t.Error(err) - } - expected, err := base64.StdEncoding.DecodeString(expectedSig) - if err != nil { - t.Error(err) - } - - // load fake service account credentials - cfg, err := google.JWTConfigFromJSON([]byte(fakeCredentials), "") - if err != nil { - t.Error(err) - } - - // create url data signature - toSign := urlData.SigningString() - result, err := SignString(toSign, cfg) - if err != nil { - t.Error(err) - } - - // compare to expected value - if !bytes.Equal(result, expected) { - t.Errorf("Signatures do not match:\n%x\n%x\n", expected, result) - } - -} - -func TestUrlData_SignedUrl(t *testing.T) { - // load fake service account credentials - cfg, err := google.JWTConfigFromJSON([]byte(fakeCredentials), "") - if err != nil { - t.Error(err) - } - - urlData := &UrlData{ - HttpMethod: "GET", - Expires: testUrlExpires, - Path: testUrlPath, - JwtConfig: cfg, - } - result, err := urlData.SignedUrl() - if err != nil { - t.Errorf("Could not generated signed url: %+v", err) - } - if result != testUrlExpectedUrl { - t.Errorf("URL does not match expected value:\n%s\n%s", testUrlExpectedUrl, result) - } -} - -func TestAccStorageSignedUrl_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testGoogleSignedUrlConfig, - Check: resource.ComposeTestCheckFunc( - testAccGoogleSignedUrlExists("data.google_storage_object_signed_url.blerg"), - ), - }, - }, - }) -} - -func TestAccStorageSignedUrl_accTest(t *testing.T) { - bucketName := fmt.Sprintf("tf-test-bucket-%d", acctest.RandInt()) - - headers := map[string]string{ - "x-goog-test": "foo", - "x-goog-if-generation-match": "1", - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccTestGoogleStorageObjectSignedURL(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccGoogleSignedUrlRetrieval("data.google_storage_object_signed_url.story_url", nil), - testAccGoogleSignedUrlRetrieval("data.google_storage_object_signed_url.story_url_w_headers", headers), - testAccGoogleSignedUrlRetrieval("data.google_storage_object_signed_url.story_url_w_content_type", nil), - testAccGoogleSignedUrlRetrieval("data.google_storage_object_signed_url.story_url_w_md5", nil), - ), - }, - }, - }) -} - -func testAccGoogleSignedUrlExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - r := s.RootModule().Resources[n] - a := r.Primary.Attributes - - if a["signed_url"] == "" { - return fmt.Errorf("signed_url is empty: %v", a) - } - - return nil - } -} - -func testAccGoogleSignedUrlRetrieval(n string, headers map[string]string) resource.TestCheckFunc { - return func(s *terraform.State) error { - r := s.RootModule().Resources[n] - if r == nil { - return fmt.Errorf("Datasource not found") - } - a := r.Primary.Attributes - - if a["signed_url"] == "" { - return fmt.Errorf("signed_url is empty: %v", a) - } - - // create HTTP request - url := a["signed_url"] - method := a["http_method"] - req, err := http.NewRequest(method, url, nil) - if err != nil { - return err - } - - // Add extension headers to request, if provided - for k, v := range headers { - req.Header.Set(k, v) - } - - // content_type is optional, add to test query if provided in datasource config - contentType := a["content_type"] - if contentType != "" { - req.Header.Add("Content-Type", contentType) - } - - // content_md5 is optional, add to test query if provided in datasource config - contentMd5 := a["content_md5"] - if contentMd5 != "" { - req.Header.Add("Content-MD5", contentMd5) - } - - // send request using signed url - client := cleanhttp.DefaultClient() - response, err := client.Do(req) - if err != nil { - return err - } - defer response.Body.Close() - - // check content in response, should be our test string or XML with error - body, err := ioutil.ReadAll(response.Body) - if err != nil { - return err - } - if string(body) != "once upon a time..." { - return fmt.Errorf("Got unexpected object contents: %s\n\tURL: %s", string(body), url) - } - - return nil - } -} - -const testGoogleSignedUrlConfig = ` -data "google_storage_object_signed_url" "blerg" { - bucket = "friedchicken" - path = "path/to/file" - -} -` - -func testAccTestGoogleStorageObjectSignedURL(bucketName string) string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "%s" -} - -resource "google_storage_bucket_object" "story" { - name = "path/to/file" - bucket = "${google_storage_bucket.bucket.name}" - - content = "once upon a time..." -} - -data "google_storage_object_signed_url" "story_url" { - bucket = "${google_storage_bucket.bucket.name}" - path = "${google_storage_bucket_object.story.name}" - -} - -data "google_storage_object_signed_url" "story_url_w_headers" { - bucket = "${google_storage_bucket.bucket.name}" - path = "${google_storage_bucket_object.story.name}" - extension_headers { - x-goog-test = "foo" - x-goog-if-generation-match = 1 - } -} - -data "google_storage_object_signed_url" "story_url_w_content_type" { - bucket = "${google_storage_bucket.bucket.name}" - path = "${google_storage_bucket_object.story.name}" - - content_type = "text/plain" -} - -data "google_storage_object_signed_url" "story_url_w_md5" { - bucket = "${google_storage_bucket.bucket.name}" - path = "${google_storage_bucket_object.story.name}" - - content_md5 = "${google_storage_bucket_object.story.md5hash}" -}`, bucketName) -} diff --git a/builtin/providers/google/disk_type.go b/builtin/providers/google/disk_type.go deleted file mode 100644 index 1653337be..000000000 --- a/builtin/providers/google/disk_type.go +++ /dev/null @@ -1,15 +0,0 @@ -package google - -import ( - "google.golang.org/api/compute/v1" -) - -// readDiskType finds the disk type with the given name. -func readDiskType(c *Config, zone *compute.Zone, name string) (*compute.DiskType, error) { - diskType, err := c.clientCompute.DiskTypes.Get(c.Project, zone.Name, name).Do() - if err == nil && diskType != nil && diskType.SelfLink != "" { - return diskType, nil - } else { - return nil, err - } -} diff --git a/builtin/providers/google/dns_change.go b/builtin/providers/google/dns_change.go deleted file mode 100644 index f2f827a3b..000000000 --- a/builtin/providers/google/dns_change.go +++ /dev/null @@ -1,45 +0,0 @@ -package google - -import ( - "time" - - "google.golang.org/api/dns/v1" - - "github.com/hashicorp/terraform/helper/resource" -) - -type DnsChangeWaiter struct { - Service *dns.Service - Change *dns.Change - Project string - ManagedZone string -} - -func (w *DnsChangeWaiter) RefreshFunc() resource.StateRefreshFunc { - return func() (interface{}, string, error) { - var chg *dns.Change - var err error - - chg, err = w.Service.Changes.Get( - w.Project, w.ManagedZone, w.Change.Id).Do() - - if err != nil { - return nil, "", err - } - - return chg, chg.Status, nil - } -} - -func (w *DnsChangeWaiter) Conf() *resource.StateChangeConf { - state := &resource.StateChangeConf{ - Pending: []string{"pending"}, - Target: []string{"done"}, - Refresh: w.RefreshFunc(), - } - state.Delay = 10 * time.Second - state.Timeout = 10 * time.Minute - state.MinTimeout = 2 * time.Second - return state - -} diff --git a/builtin/providers/google/gcp_sweeper_test.go b/builtin/providers/google/gcp_sweeper_test.go deleted file mode 100644 index 54661f050..000000000 --- a/builtin/providers/google/gcp_sweeper_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package google - -import ( - "fmt" - "os" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestMain(m *testing.M) { - resource.TestMain(m) -} - -// sharedConfigForRegion returns a common config setup needed for the sweeper -// functions for a given region -func sharedConfigForRegion(region string) (*Config, error) { - project := os.Getenv("GOOGLE_PROJECT") - if project == "" { - return nil, fmt.Errorf("empty GOOGLE_PROJECT") - } - - creds := os.Getenv("GOOGLE_CREDENTIALS") - if creds == "" { - return nil, fmt.Errorf("empty GOOGLE_CREDENTIALS") - } - - conf := &Config{ - Credentials: creds, - Region: region, - Project: project, - } - - return conf, nil -} diff --git a/builtin/providers/google/image.go b/builtin/providers/google/image.go deleted file mode 100644 index d21210d99..000000000 --- a/builtin/providers/google/image.go +++ /dev/null @@ -1,194 +0,0 @@ -package google - -import ( - "fmt" - "regexp" - "strings" - - "google.golang.org/api/googleapi" -) - -const ( - resolveImageProjectRegex = "[-_a-zA-Z0-9]*" - resolveImageFamilyRegex = "[-_a-zA-Z0-9]*" - resolveImageImageRegex = "[-_a-zA-Z0-9]*" -) - -var ( - resolveImageProjectImage = regexp.MustCompile(fmt.Sprintf("^projects/(%s)/global/images/(%s)$", resolveImageProjectRegex, resolveImageImageRegex)) - resolveImageProjectFamily = regexp.MustCompile(fmt.Sprintf("^projects/(%s)/global/images/family/(%s)$", resolveImageProjectRegex, resolveImageFamilyRegex)) - resolveImageGlobalImage = regexp.MustCompile(fmt.Sprintf("^global/images/(%s)$", resolveImageImageRegex)) - resolveImageGlobalFamily = regexp.MustCompile(fmt.Sprintf("^global/images/family/(%s)$", resolveImageFamilyRegex)) - resolveImageFamilyFamily = regexp.MustCompile(fmt.Sprintf("^family/(%s)$", resolveImageFamilyRegex)) - resolveImageProjectImageShorthand = regexp.MustCompile(fmt.Sprintf("^(%s)/(%s)$", resolveImageProjectRegex, resolveImageImageRegex)) - resolveImageProjectFamilyShorthand = regexp.MustCompile(fmt.Sprintf("^(%s)/(%s)$", resolveImageProjectRegex, resolveImageFamilyRegex)) - resolveImageFamily = regexp.MustCompile(fmt.Sprintf("^(%s)$", resolveImageFamilyRegex)) - resolveImageImage = regexp.MustCompile(fmt.Sprintf("^(%s)$", resolveImageImageRegex)) - resolveImageLink = regexp.MustCompile(fmt.Sprintf("^https://www.googleapis.com/compute/v1/projects/(%s)/global/images/(%s)", resolveImageProjectRegex, resolveImageImageRegex)) -) - -func resolveImageImageExists(c *Config, project, name string) (bool, error) { - if _, err := c.clientCompute.Images.Get(project, name).Do(); err == nil { - return true, nil - } else if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - return false, nil - } else { - return false, fmt.Errorf("Error checking if image %s exists: %s", name, err) - } -} - -func resolveImageFamilyExists(c *Config, project, name string) (bool, error) { - if _, err := c.clientCompute.Images.GetFromFamily(project, name).Do(); err == nil { - return true, nil - } else if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - return false, nil - } else { - return false, fmt.Errorf("Error checking if family %s exists: %s", name, err) - } -} - -func sanityTestRegexMatches(expected int, got []string, regexType, name string) error { - if len(got)-1 != expected { // subtract one, index zero is the entire matched expression - return fmt.Errorf("Expected %d %s regex matches, got %d for %s", expected, regexType, len(got)-1, name) - } - return nil -} - -// If the given name is a URL, return it. -// If it's in the form projects/{project}/global/images/{image}, return it -// If it's in the form projects/{project}/global/images/family/{family}, return it -// If it's in the form global/images/{image}, return it -// If it's in the form global/images/family/{family}, return it -// If it's in the form family/{family}, check if it's a family in the current project. If it is, return it as global/images/family/{family}. -// If not, check if it could be a GCP-provided family, and if it exists. If it does, return it as projects/{project}/global/images/family/{family}. -// If it's in the form {project}/{family-or-image}, check if it's an image in the named project. If it is, return it as projects/{project}/global/images/{image}. -// If not, check if it's a family in the named project. If it is, return it as projects/{project}/global/images/family/{family}. -// If it's in the form {family-or-image}, check if it's an image in the current project. If it is, return it as global/images/{image}. -// If not, check if it could be a GCP-provided image, and if it exists. If it does, return it as projects/{project}/global/images/{image}. -// If not, check if it's a family in the current project. If it is, return it as global/images/family/{family}. -// If not, check if it could be a GCP-provided family, and if it exists. If it does, return it as projects/{project}/global/images/family/{family} -func resolveImage(c *Config, name string) (string, error) { - // built-in projects to look for images/families containing the string - // on the left in - imageMap := map[string]string{ - "centos": "centos-cloud", - "coreos": "coreos-cloud", - "debian": "debian-cloud", - "opensuse": "opensuse-cloud", - "rhel": "rhel-cloud", - "sles": "suse-cloud", - "ubuntu": "ubuntu-os-cloud", - "windows": "windows-cloud", - } - var builtInProject string - for k, v := range imageMap { - if strings.Contains(name, k) { - builtInProject = v - break - } - } - switch { - case resolveImageLink.MatchString(name): // https://www.googleapis.com/compute/v1/projects/xyz/global/images/xyz - return name, nil - case resolveImageProjectImage.MatchString(name): // projects/xyz/global/images/xyz - res := resolveImageProjectImage.FindStringSubmatch(name) - if err := sanityTestRegexMatches(2, res, "project image", name); err != nil { - return "", err - } - return fmt.Sprintf("projects/%s/global/images/%s", res[1], res[2]), nil - case resolveImageProjectFamily.MatchString(name): // projects/xyz/global/images/family/xyz - res := resolveImageProjectFamily.FindStringSubmatch(name) - if err := sanityTestRegexMatches(2, res, "project family", name); err != nil { - return "", err - } - return fmt.Sprintf("projects/%s/global/images/family/%s", res[1], res[2]), nil - case resolveImageGlobalImage.MatchString(name): // global/images/xyz - res := resolveImageGlobalImage.FindStringSubmatch(name) - if err := sanityTestRegexMatches(1, res, "global image", name); err != nil { - return "", err - } - return fmt.Sprintf("global/images/%s", res[1]), nil - case resolveImageGlobalFamily.MatchString(name): // global/images/family/xyz - res := resolveImageGlobalFamily.FindStringSubmatch(name) - if err := sanityTestRegexMatches(1, res, "global family", name); err != nil { - return "", err - } - return fmt.Sprintf("global/images/family/%s", res[1]), nil - case resolveImageFamilyFamily.MatchString(name): // family/xyz - res := resolveImageFamilyFamily.FindStringSubmatch(name) - if err := sanityTestRegexMatches(1, res, "family family", name); err != nil { - return "", err - } - if ok, err := resolveImageFamilyExists(c, c.Project, res[1]); err != nil { - return "", err - } else if ok { - return fmt.Sprintf("global/images/family/%s", res[1]), nil - } - if builtInProject != "" { - if ok, err := resolveImageFamilyExists(c, builtInProject, res[1]); err != nil { - return "", err - } else if ok { - return fmt.Sprintf("projects/%s/global/images/family/%s", builtInProject, res[1]), nil - } - } - case resolveImageProjectImageShorthand.MatchString(name): // xyz/xyz - res := resolveImageProjectImageShorthand.FindStringSubmatch(name) - if err := sanityTestRegexMatches(2, res, "project image shorthand", name); err != nil { - return "", err - } - if ok, err := resolveImageImageExists(c, res[1], res[2]); err != nil { - return "", err - } else if ok { - return fmt.Sprintf("projects/%s/global/images/%s", res[1], res[2]), nil - } - fallthrough // check if it's a family - case resolveImageProjectFamilyShorthand.MatchString(name): // xyz/xyz - res := resolveImageProjectFamilyShorthand.FindStringSubmatch(name) - if err := sanityTestRegexMatches(2, res, "project family shorthand", name); err != nil { - return "", err - } - if ok, err := resolveImageFamilyExists(c, res[1], res[2]); err != nil { - return "", err - } else if ok { - return fmt.Sprintf("projects/%s/global/images/family/%s", res[1], res[2]), nil - } - case resolveImageImage.MatchString(name): // xyz - res := resolveImageImage.FindStringSubmatch(name) - if err := sanityTestRegexMatches(1, res, "image", name); err != nil { - return "", err - } - if ok, err := resolveImageImageExists(c, c.Project, res[1]); err != nil { - return "", err - } else if ok { - return fmt.Sprintf("global/images/%s", res[1]), nil - } - if builtInProject != "" { - // check the images GCP provides - if ok, err := resolveImageImageExists(c, builtInProject, res[1]); err != nil { - return "", err - } else if ok { - return fmt.Sprintf("projects/%s/global/images/%s", builtInProject, res[1]), nil - } - } - fallthrough // check if the name is a family, instead of an image - case resolveImageFamily.MatchString(name): // xyz - res := resolveImageFamily.FindStringSubmatch(name) - if err := sanityTestRegexMatches(1, res, "family", name); err != nil { - return "", err - } - if ok, err := resolveImageFamilyExists(c, c.Project, res[1]); err != nil { - return "", err - } else if ok { - return fmt.Sprintf("global/images/family/%s", res[1]), nil - } - if builtInProject != "" { - // check the families GCP provides - if ok, err := resolveImageFamilyExists(c, builtInProject, res[1]); err != nil { - return "", err - } else if ok { - return fmt.Sprintf("projects/%s/global/images/family/%s", builtInProject, res[1]), nil - } - } - } - return "", fmt.Errorf("Could not find image or family %s", name) -} diff --git a/builtin/providers/google/image_test.go b/builtin/providers/google/image_test.go deleted file mode 100644 index e0f56518a..000000000 --- a/builtin/providers/google/image_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - compute "google.golang.org/api/compute/v1" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccComputeImage_resolveImage(t *testing.T) { - var image compute.Image - rand := acctest.RandString(10) - name := fmt.Sprintf("test-image-%s", rand) - fam := fmt.Sprintf("test-image-family-%s", rand) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeImageDestroy, - Steps: []resource.TestStep{ - { - Config: testAccComputeImage_resolving(name, fam), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeImageExists( - "google_compute_image.foobar", &image), - testAccCheckComputeImageResolution("google_compute_image.foobar"), - ), - }, - }, - }) -} - -func testAccCheckComputeImageResolution(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - project := config.Project - - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Resource not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - if rs.Primary.Attributes["name"] == "" { - return fmt.Errorf("No image name is set") - } - if rs.Primary.Attributes["family"] == "" { - return fmt.Errorf("No image family is set") - } - if rs.Primary.Attributes["self_link"] == "" { - return fmt.Errorf("No self_link is set") - } - - name := rs.Primary.Attributes["name"] - family := rs.Primary.Attributes["family"] - link := rs.Primary.Attributes["self_link"] - - images := map[string]string{ - "family/debian-8": "projects/debian-cloud/global/images/family/debian-8", - "projects/debian-cloud/global/images/debian-8-jessie-v20170110": "projects/debian-cloud/global/images/debian-8-jessie-v20170110", - "debian-8": "projects/debian-cloud/global/images/family/debian-8", - "debian-8-jessie-v20170110": "projects/debian-cloud/global/images/debian-8-jessie-v20170110", - "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20170110": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20170110", - - "global/images/" + name: "global/images/" + name, - "global/images/family/" + family: "global/images/family/" + family, - name: "global/images/" + name, - family: "global/images/family/" + family, - "family/" + family: "global/images/family/" + family, - project + "/" + name: "projects/" + project + "/global/images/" + name, - project + "/" + family: "projects/" + project + "/global/images/family/" + family, - link: link, - } - - for input, expectation := range images { - result, err := resolveImage(config, input) - if err != nil { - return fmt.Errorf("Error resolving input %s to image: %+v\n", input, err) - } - if result != expectation { - return fmt.Errorf("Expected input '%s' to resolve to '%s', it resolved to '%s' instead.\n", input, expectation, result) - } - } - return nil - } -} - -func testAccComputeImage_resolving(name, family string) string { - return fmt.Sprintf(` -resource "google_compute_disk" "foobar" { - name = "%s" - zone = "us-central1-a" - image = "debian-8-jessie-v20160803" -} -resource "google_compute_image" "foobar" { - name = "%s" - family = "%s" - source_disk = "${google_compute_disk.foobar.self_link}" -} -`, name, name, family) -} diff --git a/builtin/providers/google/import_bigquery_dataset_test.go b/builtin/providers/google/import_bigquery_dataset_test.go deleted file mode 100644 index 32f2682d4..000000000 --- a/builtin/providers/google/import_bigquery_dataset_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccBigQueryDataset_importBasic(t *testing.T) { - resourceName := "google_bigquery_dataset.test" - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBigQueryDatasetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccBigQueryDataset(datasetID), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/google/import_bigquery_table_test.go b/builtin/providers/google/import_bigquery_table_test.go deleted file mode 100644 index 7fa359a4c..000000000 --- a/builtin/providers/google/import_bigquery_table_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccBigQueryTable_importBasic(t *testing.T) { - resourceName := "google_bigquery_table.test" - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBigQueryTableDestroy, - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTable(datasetID, tableID), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/google/import_compute_address_test.go b/builtin/providers/google/import_compute_address_test.go deleted file mode 100644 index db579f4c0..000000000 --- a/builtin/providers/google/import_compute_address_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package google - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccComputeAddress_importBasic(t *testing.T) { - resourceName := "google_compute_address.foobar" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeAddressDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeAddress_basic, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/google/import_compute_autoscaler_test.go b/builtin/providers/google/import_compute_autoscaler_test.go deleted file mode 100644 index e358438ac..000000000 --- a/builtin/providers/google/import_compute_autoscaler_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package google - -import ( - "testing" - - "fmt" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccComputeAutoscaler_importBasic(t *testing.T) { - resourceName := "google_compute_autoscaler.foobar" - - var it_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10)) - var tp_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10)) - var igm_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10)) - var autoscaler_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeAutoscalerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeAutoscaler_basic(it_name, tp_name, igm_name, autoscaler_name), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/google/import_compute_disk_test.go b/builtin/providers/google/import_compute_disk_test.go deleted file mode 100644 index 0eba27637..000000000 --- a/builtin/providers/google/import_compute_disk_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccComputeDisk_importBasic(t *testing.T) { - resourceName := "google_compute_disk.foobar" - diskName := fmt.Sprintf("disk-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeDiskDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeDisk_basic(diskName), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/google/import_compute_firewall_test.go b/builtin/providers/google/import_compute_firewall_test.go deleted file mode 100644 index 362391e1e..000000000 --- a/builtin/providers/google/import_compute_firewall_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccComputeFirewall_importBasic(t *testing.T) { - resourceName := "google_compute_firewall.foobar" - networkName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10)) - firewallName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeFirewallDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeFirewall_basic(networkName, firewallName), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/google/import_compute_forwarding_rule_test.go b/builtin/providers/google/import_compute_forwarding_rule_test.go deleted file mode 100644 index cc6c0214e..000000000 --- a/builtin/providers/google/import_compute_forwarding_rule_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccComputeForwardingRule_importBasic(t *testing.T) { - resourceName := "google_compute_forwarding_rule.foobar" - poolName := fmt.Sprintf("tf-%s", acctest.RandString(10)) - ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeForwardingRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeForwardingRule_basic(poolName, ruleName), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/google/import_compute_global_address_test.go b/builtin/providers/google/import_compute_global_address_test.go deleted file mode 100644 index 73e495644..000000000 --- a/builtin/providers/google/import_compute_global_address_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package google - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccComputeGlobalAddress_importBasic(t *testing.T) { - resourceName := "google_compute_global_address.foobar" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeGlobalAddressDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeGlobalAddress_basic, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/google/import_compute_http_health_check_test.go b/builtin/providers/google/import_compute_http_health_check_test.go deleted file mode 100644 index 9e29dd787..000000000 --- a/builtin/providers/google/import_compute_http_health_check_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package google - -import ( - "testing" - - "fmt" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccComputeHttpHealthCheck_importBasic(t *testing.T) { - resourceName := "google_compute_http_health_check.foobar" - - hhckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeHttpHealthCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeHttpHealthCheck_basic(hhckName), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/google/import_compute_instance_group_manager_test.go b/builtin/providers/google/import_compute_instance_group_manager_test.go deleted file mode 100644 index 6fc3d8e8c..000000000 --- a/builtin/providers/google/import_compute_instance_group_manager_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccInstanceGroupManager_importBasic(t *testing.T) { - resourceName1 := "google_compute_instance_group_manager.igm-basic" - resourceName2 := "google_compute_instance_group_manager.igm-no-tp" - template := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - target := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - igm1 := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - igm2 := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceGroupManagerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccInstanceGroupManager_basic(template, target, igm1, igm2), - }, - - resource.TestStep{ - ResourceName: resourceName1, - ImportState: true, - ImportStateVerify: true, - }, - - resource.TestStep{ - ResourceName: resourceName2, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccInstanceGroupManager_importUpdate(t *testing.T) { - resourceName := "google_compute_instance_group_manager.igm-update" - template := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - target := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceGroupManagerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccInstanceGroupManager_update(template, target, igm), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/google/import_compute_instance_template_test.go b/builtin/providers/google/import_compute_instance_template_test.go deleted file mode 100644 index fc414cd53..000000000 --- a/builtin/providers/google/import_compute_instance_template_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package google - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccComputeInstanceTemplate_importBasic(t *testing.T) { - resourceName := "google_compute_instance_template.foobar" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstanceTemplate_basic, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccComputeInstanceTemplate_importIp(t *testing.T) { - resourceName := "google_compute_instance_template.foobar" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstanceTemplate_ip, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccComputeInstanceTemplate_importDisks(t *testing.T) { - resourceName := "google_compute_instance_template.foobar" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstanceTemplate_disks, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccComputeInstanceTemplate_importSubnetAuto(t *testing.T) { - resourceName := "google_compute_instance_template.foobar" - network := "network-" + acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstanceTemplate_subnet_auto(network), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccComputeInstanceTemplate_importSubnetCustom(t *testing.T) { - resourceName := "google_compute_instance_template.foobar" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstanceTemplate_subnet_custom, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/google/import_compute_network_test.go b/builtin/providers/google/import_compute_network_test.go deleted file mode 100644 index 8e6ab769b..000000000 --- a/builtin/providers/google/import_compute_network_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package google - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccComputeNetwork_importBasic(t *testing.T) { - resourceName := "google_compute_network.foobar" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeNetworkDestroy, - Steps: []resource.TestStep{ - { - Config: testAccComputeNetwork_basic, - }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - //ImportStateVerifyIgnore: []string{"ipv4_range", "name"}, - }, - }, - }) -} - -func TestAccComputeNetwork_importAuto_subnet(t *testing.T) { - resourceName := "google_compute_network.bar" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeNetworkDestroy, - Steps: []resource.TestStep{ - { - Config: testAccComputeNetwork_auto_subnet, - }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccComputeNetwork_importCustom_subnet(t *testing.T) { - resourceName := "google_compute_network.baz" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeNetworkDestroy, - Steps: []resource.TestStep{ - { - Config: testAccComputeNetwork_custom_subnet, - }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/google/import_compute_route_test.go b/builtin/providers/google/import_compute_route_test.go deleted file mode 100644 index a4bfb9893..000000000 --- a/builtin/providers/google/import_compute_route_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package google - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccComputeRoute_importBasic(t *testing.T) { - resourceName := "google_compute_network.foobar" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeRouteDestroy, - Steps: []resource.TestStep{ - { - Config: testAccComputeRoute_basic, - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccComputeRoute_importDefaultInternetGateway(t *testing.T) { - resourceName := "google_compute_network.foobar" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeRouteDestroy, - Steps: []resource.TestStep{ - { - Config: testAccComputeRoute_defaultInternetGateway, - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/google/import_compute_router_interface_test.go b/builtin/providers/google/import_compute_router_interface_test.go deleted file mode 100644 index 29355ae1e..000000000 --- a/builtin/providers/google/import_compute_router_interface_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package google - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccComputeRouterInterface_import(t *testing.T) { - resourceName := "google_compute_router_interface.foobar" - testId := acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeRouterInterfaceBasic(testId), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/google/import_compute_router_peer_test.go b/builtin/providers/google/import_compute_router_peer_test.go deleted file mode 100644 index 71c2ed86c..000000000 --- a/builtin/providers/google/import_compute_router_peer_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package google - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccComputeRouterPeer_import(t *testing.T) { - resourceName := "google_compute_router_peer.foobar" - testId := acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeRouterPeerBasic(testId), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/google/import_compute_router_test.go b/builtin/providers/google/import_compute_router_test.go deleted file mode 100644 index e149fa836..000000000 --- a/builtin/providers/google/import_compute_router_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package google - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccComputeRouter_import(t *testing.T) { - resourceName := "google_compute_router.foobar" - resourceRegion := "europe-west1" - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeRouterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeRouterBasic(resourceRegion), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/google/import_compute_target_pool_test.go b/builtin/providers/google/import_compute_target_pool_test.go deleted file mode 100644 index 9d3e70322..000000000 --- a/builtin/providers/google/import_compute_target_pool_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package google - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccComputeTargetPool_importBasic(t *testing.T) { - resourceName := "google_compute_target_pool.foobar" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeTargetPoolDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeTargetPool_basic, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/google/import_dns_managed_zone_test.go b/builtin/providers/google/import_dns_managed_zone_test.go deleted file mode 100644 index 751663516..000000000 --- a/builtin/providers/google/import_dns_managed_zone_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package google - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccDnsManagedZone_importBasic(t *testing.T) { - resourceName := "google_dns_managed_zone.foobar" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDnsManagedZoneDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDnsManagedZone_basic, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/google/import_google_project_test.go b/builtin/providers/google/import_google_project_test.go deleted file mode 100644 index 2bec9461a..000000000 --- a/builtin/providers/google/import_google_project_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccGoogleProject_importBasic(t *testing.T) { - resourceName := "google_project.acceptance" - projectId := "terraform-" + acctest.RandString(10) - conf := testAccGoogleProject_import(projectId, org, pname) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: conf, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccGoogleProject_import(pid, orgId, projectName string) string { - return fmt.Sprintf(` -resource "google_project" "acceptance" { - project_id = "%s" - org_id = "%s" - name = "%s" -}`, pid, orgId, projectName) -} diff --git a/builtin/providers/google/import_sql_user_test.go b/builtin/providers/google/import_sql_user_test.go deleted file mode 100644 index ea58f1aa0..000000000 --- a/builtin/providers/google/import_sql_user_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package google - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccGoogleSqlUser_importBasic(t *testing.T) { - resourceName := "google_sql_user.user" - user := acctest.RandString(10) - instance := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleSqlUserDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testGoogleSqlUser_basic(instance, user), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"password"}, - }, - }, - }) -} diff --git a/builtin/providers/google/import_storage_bucket_test.go b/builtin/providers/google/import_storage_bucket_test.go deleted file mode 100644 index 138b454be..000000000 --- a/builtin/providers/google/import_storage_bucket_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccStorageBucket_import(t *testing.T) { - bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccStorageBucketDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccStorageBucket_basic(bucketName), - }, - resource.TestStep{ - ResourceName: "google_storage_bucket.bucket", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy"}, - }, - }, - }) -} diff --git a/builtin/providers/google/metadata.go b/builtin/providers/google/metadata.go deleted file mode 100644 index e2ebd18a3..000000000 --- a/builtin/providers/google/metadata.go +++ /dev/null @@ -1,73 +0,0 @@ -package google - -import ( - "fmt" - - "google.golang.org/api/compute/v1" -) - -const FINGERPRINT_RETRIES = 10 -const FINGERPRINT_FAIL = "Invalid fingerprint." - -// Since the google compute API uses optimistic locking, there is a chance -// we need to resubmit our updated metadata. To do this, you need to provide -// an update function that attempts to submit your metadata -func MetadataRetryWrapper(update func() error) error { - attempt := 0 - for attempt < FINGERPRINT_RETRIES { - err := update() - if err != nil && err.Error() == FINGERPRINT_FAIL { - attempt++ - } else { - return err - } - } - - return fmt.Errorf("Failed to update metadata after %d retries", attempt) -} - -// Update the metadata (serverMD) according to the provided diff (oldMDMap v -// newMDMap). -func MetadataUpdate(oldMDMap map[string]interface{}, newMDMap map[string]interface{}, serverMD *compute.Metadata) { - curMDMap := make(map[string]string) - // Load metadata on server into map - for _, kv := range serverMD.Items { - // If the server state has a key that we had in our old - // state, but not in our new state, we should delete it - _, okOld := oldMDMap[kv.Key] - _, okNew := newMDMap[kv.Key] - if okOld && !okNew { - continue - } else { - curMDMap[kv.Key] = *kv.Value - } - } - - // Insert new metadata into existing metadata (overwriting when needed) - for key, val := range newMDMap { - curMDMap[key] = val.(string) - } - - // Reformat old metadata into a list - serverMD.Items = nil - for key, val := range curMDMap { - v := val - serverMD.Items = append(serverMD.Items, &compute.MetadataItems{ - Key: key, - Value: &v, - }) - } -} - -// Format metadata from the server data format -> schema data format -func MetadataFormatSchema(curMDMap map[string]interface{}, md *compute.Metadata) map[string]interface{} { - newMD := make(map[string]interface{}) - - for _, kv := range md.Items { - if _, ok := curMDMap[kv.Key]; ok { - newMD[kv.Key] = *kv.Value - } - } - - return newMD -} diff --git a/builtin/providers/google/provider.go b/builtin/providers/google/provider.go deleted file mode 100644 index 6c08fd11c..000000000 --- a/builtin/providers/google/provider.go +++ /dev/null @@ -1,288 +0,0 @@ -package google - -import ( - "encoding/json" - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/mutexkv" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" -) - -// Global MutexKV -var mutexKV = mutexkv.NewMutexKV() - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "credentials": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CREDENTIALS", - "GOOGLE_CLOUD_KEYFILE_JSON", - "GCLOUD_KEYFILE_JSON", - }, nil), - ValidateFunc: validateCredentials, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_PROJECT", - "GCLOUD_PROJECT", - "CLOUDSDK_CORE_PROJECT", - }, nil), - }, - - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_REGION", - "GCLOUD_REGION", - "CLOUDSDK_COMPUTE_REGION", - }, nil), - }, - }, - - DataSourcesMap: map[string]*schema.Resource{ - "google_compute_network": dataSourceGoogleComputeNetwork(), - "google_compute_subnetwork": dataSourceGoogleComputeSubnetwork(), - "google_compute_zones": dataSourceGoogleComputeZones(), - "google_container_engine_versions": dataSourceGoogleContainerEngineVersions(), - "google_iam_policy": dataSourceGoogleIamPolicy(), - "google_storage_object_signed_url": dataSourceGoogleSignedUrl(), - }, - - ResourcesMap: map[string]*schema.Resource{ - "google_bigquery_dataset": resourceBigQueryDataset(), - "google_bigquery_table": resourceBigQueryTable(), - "google_compute_autoscaler": resourceComputeAutoscaler(), - "google_compute_address": resourceComputeAddress(), - "google_compute_backend_bucket": resourceComputeBackendBucket(), - "google_compute_backend_service": resourceComputeBackendService(), - "google_compute_disk": resourceComputeDisk(), - "google_compute_snapshot": resourceComputeSnapshot(), - "google_compute_firewall": resourceComputeFirewall(), - "google_compute_forwarding_rule": resourceComputeForwardingRule(), - "google_compute_global_address": resourceComputeGlobalAddress(), - "google_compute_global_forwarding_rule": resourceComputeGlobalForwardingRule(), - "google_compute_health_check": resourceComputeHealthCheck(), - "google_compute_http_health_check": resourceComputeHttpHealthCheck(), - "google_compute_https_health_check": resourceComputeHttpsHealthCheck(), - "google_compute_image": resourceComputeImage(), - "google_compute_instance": resourceComputeInstance(), - "google_compute_instance_group": resourceComputeInstanceGroup(), - "google_compute_instance_group_manager": resourceComputeInstanceGroupManager(), - "google_compute_instance_template": resourceComputeInstanceTemplate(), - "google_compute_network": resourceComputeNetwork(), - "google_compute_project_metadata": resourceComputeProjectMetadata(), - "google_compute_region_backend_service": resourceComputeRegionBackendService(), - "google_compute_route": resourceComputeRoute(), - "google_compute_router": resourceComputeRouter(), - "google_compute_router_interface": resourceComputeRouterInterface(), - "google_compute_router_peer": resourceComputeRouterPeer(), - "google_compute_ssl_certificate": resourceComputeSslCertificate(), - "google_compute_subnetwork": resourceComputeSubnetwork(), - "google_compute_target_http_proxy": resourceComputeTargetHttpProxy(), - "google_compute_target_https_proxy": resourceComputeTargetHttpsProxy(), - "google_compute_target_pool": resourceComputeTargetPool(), - "google_compute_url_map": resourceComputeUrlMap(), - "google_compute_vpn_gateway": resourceComputeVpnGateway(), - "google_compute_vpn_tunnel": resourceComputeVpnTunnel(), - "google_container_cluster": resourceContainerCluster(), - "google_container_node_pool": resourceContainerNodePool(), - "google_dns_managed_zone": resourceDnsManagedZone(), - "google_dns_record_set": resourceDnsRecordSet(), - "google_sql_database": resourceSqlDatabase(), - "google_sql_database_instance": resourceSqlDatabaseInstance(), - "google_sql_user": resourceSqlUser(), - "google_project": resourceGoogleProject(), - "google_project_iam_policy": resourceGoogleProjectIamPolicy(), - "google_project_services": resourceGoogleProjectServices(), - "google_pubsub_topic": resourcePubsubTopic(), - "google_pubsub_subscription": resourcePubsubSubscription(), - "google_service_account": resourceGoogleServiceAccount(), - "google_storage_bucket": resourceStorageBucket(), - "google_storage_bucket_acl": resourceStorageBucketAcl(), - "google_storage_bucket_object": resourceStorageBucketObject(), - "google_storage_object_acl": resourceStorageObjectAcl(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - credentials := d.Get("credentials").(string) - config := Config{ - Credentials: credentials, - Project: d.Get("project").(string), - Region: d.Get("region").(string), - } - - if err := config.loadAndValidate(); err != nil { - return nil, err - } - - return &config, nil -} - -func validateCredentials(v interface{}, k string) (warnings []string, errors []error) { - if v == nil || v.(string) == "" { - return - } - creds := v.(string) - var account accountFile - if err := json.Unmarshal([]byte(creds), &account); err != nil { - errors = append(errors, - fmt.Errorf("credentials are not valid JSON '%s': %s", creds, err)) - } - - return -} - -// getRegionFromZone returns the region from a zone for Google cloud. -func getRegionFromZone(zone string) string { - if zone != "" && len(zone) > 2 { - region := zone[:len(zone)-2] - return region - } - return "" -} - -// getRegion reads the "region" field from the given resource data and falls -// back to the provider's value if not given. If the provider's value is not -// given, an error is returned. -func getRegion(d *schema.ResourceData, config *Config) (string, error) { - res, ok := d.GetOk("region") - if !ok { - if config.Region != "" { - return config.Region, nil - } - return "", fmt.Errorf("%q: required field is not set", "region") - } - return res.(string), nil -} - -// getProject reads the "project" field from the given resource data and falls -// back to the provider's value if not given. If the provider's value is not -// given, an error is returned. -func getProject(d *schema.ResourceData, config *Config) (string, error) { - res, ok := d.GetOk("project") - if !ok { - if config.Project != "" { - return config.Project, nil - } - return "", fmt.Errorf("%q: required field is not set", "project") - } - return res.(string), nil -} - -func getZonalResourceFromRegion(getResource func(string) (interface{}, error), region string, compute *compute.Service, project string) (interface{}, error) { - zoneList, err := compute.Zones.List(project).Do() - if err != nil { - return nil, err - } - var resource interface{} - for _, zone := range zoneList.Items { - if strings.Contains(zone.Name, region) { - resource, err = getResource(zone.Name) - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - // Resource was not found in this zone - continue - } - return nil, fmt.Errorf("Error reading Resource: %s", err) - } - // Resource was found - return resource, nil - } - } - // Resource does not exist in this region - return nil, nil -} - -// getNetworkLink reads the "network" field from the given resource data and if the value: -// - is a resource URL, returns the string unchanged -// - is the network name only, then looks up the resource URL using the google client -func getNetworkLink(d *schema.ResourceData, config *Config, field string) (string, error) { - if v, ok := d.GetOk(field); ok { - network := v.(string) - - project, err := getProject(d, config) - if err != nil { - return "", err - } - - if !strings.HasPrefix(network, "https://www.googleapis.com/compute/") { - // Network value provided is just the name, lookup the network SelfLink - networkData, err := config.clientCompute.Networks.Get( - project, network).Do() - if err != nil { - return "", fmt.Errorf("Error reading network: %s", err) - } - network = networkData.SelfLink - } - - return network, nil - - } else { - return "", nil - } -} - -// getNetworkName reads the "network" field from the given resource data and if the value: -// - is a resource URL, extracts the network name from the URL and returns it -// - is the network name only (i.e not prefixed with http://www.googleapis.com/compute/...), is returned unchanged -func getNetworkName(d *schema.ResourceData, field string) (string, error) { - if v, ok := d.GetOk(field); ok { - network := v.(string) - return getNetworkNameFromSelfLink(network) - } - return "", nil -} - -func getNetworkNameFromSelfLink(network string) (string, error) { - if strings.HasPrefix(network, "https://www.googleapis.com/compute/") { - // extract the network name from SelfLink URL - networkName := network[strings.LastIndex(network, "/")+1:] - if networkName == "" { - return "", fmt.Errorf("network url not valid") - } - return networkName, nil - } - - return network, nil -} - -func getRouterLockName(region string, router string) string { - return fmt.Sprintf("router/%s/%s", region, router) -} - -func handleNotFoundError(err error, d *schema.ResourceData, resource string) error { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing %s because it's gone", resource) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading %s: %s", resource, err) -} - -func linkDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - parts := strings.Split(old, "/") - if parts[len(parts)-1] == new { - return true - } - return false -} diff --git a/builtin/providers/google/provider_test.go b/builtin/providers/google/provider_test.go deleted file mode 100644 index b69ee814b..000000000 --- a/builtin/providers/google/provider_test.go +++ /dev/null @@ -1,112 +0,0 @@ -package google - -import ( - "fmt" - "io/ioutil" - "os" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "google": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("GOOGLE_CREDENTIALS_FILE"); v != "" { - creds, err := ioutil.ReadFile(v) - if err != nil { - t.Fatalf("Error reading GOOGLE_CREDENTIALS_FILE path: %s", err) - } - os.Setenv("GOOGLE_CREDENTIALS", string(creds)) - } - - multiEnvSearch := func(ks []string) string { - for _, k := range ks { - if v := os.Getenv(k); v != "" { - return v - } - } - return "" - } - - creds := []string{ - "GOOGLE_CREDENTIALS", - "GOOGLE_CLOUD_KEYFILE_JSON", - "GCLOUD_KEYFILE_JSON", - } - if v := multiEnvSearch(creds); v == "" { - t.Fatalf("One of %s must be set for acceptance tests", strings.Join(creds, ", ")) - } - - projs := []string{ - "GOOGLE_PROJECT", - "GCLOUD_PROJECT", - "CLOUDSDK_CORE_PROJECT", - } - if v := multiEnvSearch(projs); v == "" { - t.Fatalf("One of %s must be set for acceptance tests", strings.Join(projs, ", ")) - } - - regs := []string{ - "GOOGLE_REGION", - "GCLOUD_REGION", - "CLOUDSDK_COMPUTE_REGION", - } - if v := multiEnvSearch(regs); v != "us-central1" { - t.Fatalf("One of %s must be set to us-central1 for acceptance tests", strings.Join(regs, ", ")) - } - - if v := os.Getenv("GOOGLE_XPN_HOST_PROJECT"); v == "" { - t.Fatal("GOOGLE_XPN_HOST_PROJECT must be set for acceptance tests") - } -} - -func TestProvider_getRegionFromZone(t *testing.T) { - expected := "us-central1" - actual := getRegionFromZone("us-central1-f") - if expected != actual { - t.Fatalf("Region (%s) did not match expected value: %s", actual, expected) - } -} - -// getTestRegion has the same logic as the provider's getRegion, to be used in tests. -func getTestRegion(is *terraform.InstanceState, config *Config) (string, error) { - if res, ok := is.Attributes["region"]; ok { - return res, nil - } - if config.Region != "" { - return config.Region, nil - } - return "", fmt.Errorf("%q: required field is not set", "region") -} - -// getTestProject has the same logic as the provider's getProject, to be used in tests. -func getTestProject(is *terraform.InstanceState, config *Config) (string, error) { - if res, ok := is.Attributes["project"]; ok { - return res, nil - } - if config.Project != "" { - return config.Project, nil - } - return "", fmt.Errorf("%q: required field is not set", "project") -} diff --git a/builtin/providers/google/resource_bigquery_dataset.go b/builtin/providers/google/resource_bigquery_dataset.go deleted file mode 100644 index 8080b8dbc..000000000 --- a/builtin/providers/google/resource_bigquery_dataset.go +++ /dev/null @@ -1,276 +0,0 @@ -package google - -import ( - "fmt" - "log" - "regexp" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" - "google.golang.org/api/bigquery/v2" -) - -func resourceBigQueryDataset() *schema.Resource { - return &schema.Resource{ - Create: resourceBigQueryDatasetCreate, - Read: resourceBigQueryDatasetRead, - Update: resourceBigQueryDatasetUpdate, - Delete: resourceBigQueryDatasetDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - Schema: map[string]*schema.Schema{ - // DatasetId: [Required] A unique ID for this dataset, without the - // project name. The ID must contain only letters (a-z, A-Z), numbers - // (0-9), or underscores (_). The maximum length is 1,024 characters. - "dataset_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[0-9A-Za-z_]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_)", k)) - } - - if len(value) > 1024 { - errors = append(errors, fmt.Errorf( - "%q cannot be greater than 1,024 characters", k)) - } - - return - }, - }, - - // ProjectId: [Optional] The ID of the project containing this dataset. - "project": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - // FriendlyName: [Optional] A descriptive name for the dataset. - "friendly_name": { - Type: schema.TypeString, - Optional: true, - }, - - // Description: [Optional] A user-friendly description of the dataset. - "description": { - Type: schema.TypeString, - Optional: true, - }, - - // Location: [Experimental] The geographic location where the dataset - // should reside. Possible values include EU and US. The default value - // is US. - "location": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "US", - ValidateFunc: validation.StringInSlice([]string{"US", "EU"}, false), - }, - - // DefaultTableExpirationMs: [Optional] The default lifetime of all - // tables in the dataset, in milliseconds. The minimum value is 3600000 - // milliseconds (one hour). Once this property is set, all newly-created - // tables in the dataset will have an expirationTime property set to the - // creation time plus the value in this property, and changing the value - // will only affect new tables, not existing ones. When the - // expirationTime for a given table is reached, that table will be - // deleted automatically. If a table's expirationTime is modified or - // removed before the table expires, or if you provide an explicit - // expirationTime when creating a table, that value takes precedence - // over the default expiration time indicated by this property. - "default_table_expiration_ms": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 3600000 { - errors = append(errors, fmt.Errorf("%q cannot be shorter than 3600000 milliseconds (one hour)", k)) - } - - return - }, - }, - - // Labels: [Experimental] The labels associated with this dataset. You - // can use these to organize and group your datasets. You can set this - // property when inserting or updating a dataset. - "labels": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Elem: schema.TypeString, - }, - - // SelfLink: [Output-only] A URL that can be used to access the resource - // again. You can use this URL in Get or Update requests to the - // resource. - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - - // Etag: [Output-only] A hash of the resource. - "etag": { - Type: schema.TypeString, - Computed: true, - }, - - // CreationTime: [Output-only] The time when this dataset was created, - // in milliseconds since the epoch. - "creation_time": { - Type: schema.TypeInt, - Computed: true, - }, - - // LastModifiedTime: [Output-only] The date when this dataset or any of - // its tables was last modified, in milliseconds since the epoch. - "last_modified_time": { - Type: schema.TypeInt, - Computed: true, - }, - }, - } -} - -func resourceDataset(d *schema.ResourceData, meta interface{}) (*bigquery.Dataset, error) { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - dataset := &bigquery.Dataset{ - DatasetReference: &bigquery.DatasetReference{ - DatasetId: d.Get("dataset_id").(string), - ProjectId: project, - }, - } - - if v, ok := d.GetOk("friendly_name"); ok { - dataset.FriendlyName = v.(string) - } - - if v, ok := d.GetOk("description"); ok { - dataset.Description = v.(string) - } - - if v, ok := d.GetOk("location"); ok { - dataset.Location = v.(string) - } - - if v, ok := d.GetOk("default_table_expiration_ms"); ok { - dataset.DefaultTableExpirationMs = int64(v.(int)) - } - - if v, ok := d.GetOk("labels"); ok { - labels := map[string]string{} - - for k, v := range v.(map[string]interface{}) { - labels[k] = v.(string) - } - - dataset.Labels = labels - } - - return dataset, nil -} - -func resourceBigQueryDatasetCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - dataset, err := resourceDataset(d, meta) - if err != nil { - return err - } - - log.Printf("[INFO] Creating BigQuery dataset: %s", dataset.DatasetReference.DatasetId) - - res, err := config.clientBigQuery.Datasets.Insert(project, dataset).Do() - if err != nil { - return err - } - - log.Printf("[INFO] BigQuery dataset %s has been created", res.Id) - - d.SetId(res.Id) - - return resourceBigQueryDatasetRead(d, meta) -} - -func resourceBigQueryDatasetParseID(id string) (string, string) { - // projectID, datasetID - parts := strings.Split(id, ":") - return parts[0], parts[1] -} - -func resourceBigQueryDatasetRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - log.Printf("[INFO] Reading BigQuery dataset: %s", d.Id()) - - projectID, datasetID := resourceBigQueryDatasetParseID(d.Id()) - - res, err := config.clientBigQuery.Datasets.Get(projectID, datasetID).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("BigQuery dataset %q", datasetID)) - } - - d.Set("etag", res.Etag) - d.Set("labels", res.Labels) - d.Set("location", res.Location) - d.Set("self_link", res.SelfLink) - d.Set("description", res.Description) - d.Set("friendly_name", res.FriendlyName) - d.Set("creation_time", res.CreationTime) - d.Set("last_modified_time", res.LastModifiedTime) - d.Set("dataset_id", res.DatasetReference.DatasetId) - d.Set("default_table_expiration_ms", res.DefaultTableExpirationMs) - - return nil -} - -func resourceBigQueryDatasetUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - dataset, err := resourceDataset(d, meta) - if err != nil { - return err - } - - log.Printf("[INFO] Updating BigQuery dataset: %s", d.Id()) - - projectID, datasetID := resourceBigQueryDatasetParseID(d.Id()) - - if _, err = config.clientBigQuery.Datasets.Update(projectID, datasetID, dataset).Do(); err != nil { - return err - } - - return resourceBigQueryDatasetRead(d, meta) -} - -func resourceBigQueryDatasetDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - log.Printf("[INFO] Deleting BigQuery dataset: %s", d.Id()) - - projectID, datasetID := resourceBigQueryDatasetParseID(d.Id()) - - if err := config.clientBigQuery.Datasets.Delete(projectID, datasetID).Do(); err != nil { - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/google/resource_bigquery_dataset_test.go b/builtin/providers/google/resource_bigquery_dataset_test.go deleted file mode 100644 index e1032ce91..000000000 --- a/builtin/providers/google/resource_bigquery_dataset_test.go +++ /dev/null @@ -1,112 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccBigQueryDataset_basic(t *testing.T) { - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBigQueryDatasetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccBigQueryDataset(datasetID), - Check: resource.ComposeTestCheckFunc( - testAccCheckBigQueryDatasetExists( - "google_bigquery_dataset.test"), - ), - }, - - { - Config: testAccBigQueryDatasetUpdated(datasetID), - Check: resource.ComposeTestCheckFunc( - testAccCheckBigQueryDatasetExists( - "google_bigquery_dataset.test"), - ), - }, - }, - }) -} - -func testAccCheckBigQueryDatasetDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_bigquery_dataset" { - continue - } - - _, err := config.clientBigQuery.Datasets.Get(config.Project, rs.Primary.Attributes["dataset_id"]).Do() - if err == nil { - return fmt.Errorf("Dataset still exists") - } - } - - return nil -} - -func testAccCheckBigQueryDatasetExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientBigQuery.Datasets.Get(config.Project, rs.Primary.Attributes["dataset_id"]).Do() - if err != nil { - return err - } - - if found.Id != rs.Primary.ID { - return fmt.Errorf("Dataset not found") - } - - return nil - } -} - -func testAccBigQueryDataset(datasetID string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "test" { - dataset_id = "%s" - friendly_name = "foo" - description = "This is a foo description" - location = "EU" - default_table_expiration_ms = 3600000 - - labels { - env = "foo" - default_table_expiration_ms = 3600000 - } -}`, datasetID) -} - -func testAccBigQueryDatasetUpdated(datasetID string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "test" { - dataset_id = "%s" - friendly_name = "bar" - description = "This is a bar description" - location = "EU" - default_table_expiration_ms = 7200000 - - labels { - env = "bar" - default_table_expiration_ms = 7200000 - } -}`, datasetID) -} diff --git a/builtin/providers/google/resource_bigquery_table.go b/builtin/providers/google/resource_bigquery_table.go deleted file mode 100644 index 298152a86..000000000 --- a/builtin/providers/google/resource_bigquery_table.go +++ /dev/null @@ -1,396 +0,0 @@ -package google - -import ( - "encoding/json" - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/structure" - "github.com/hashicorp/terraform/helper/validation" - "google.golang.org/api/bigquery/v2" -) - -func resourceBigQueryTable() *schema.Resource { - return &schema.Resource{ - Create: resourceBigQueryTableCreate, - Read: resourceBigQueryTableRead, - Delete: resourceBigQueryTableDelete, - Update: resourceBigQueryTableUpdate, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - Schema: map[string]*schema.Schema{ - // TableId: [Required] The ID of the table. The ID must contain only - // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum - // length is 1,024 characters. - "table_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - // DatasetId: [Required] The ID of the dataset containing this table. - "dataset_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - // ProjectId: [Required] The ID of the project containing this table. - "project": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - // Description: [Optional] A user-friendly description of this table. - "description": { - Type: schema.TypeString, - Optional: true, - }, - - // ExpirationTime: [Optional] The time when this table expires, in - // milliseconds since the epoch. If not present, the table will persist - // indefinitely. Expired tables will be deleted and their storage - // reclaimed. - "expiration_time": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - // FriendlyName: [Optional] A descriptive name for this table. - "friendly_name": { - Type: schema.TypeString, - Optional: true, - }, - - // Labels: [Experimental] The labels associated with this table. You can - // use these to organize and group your tables. Label keys and values - // can be no longer than 63 characters, can only contain lowercase - // letters, numeric characters, underscores and dashes. International - // characters are allowed. Label values are optional. Label keys must - // start with a letter and each label in the list must have a different - // key. - "labels": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Elem: schema.TypeString, - }, - - // Schema: [Optional] Describes the schema of this table. - "schema": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.ValidateJsonString, - StateFunc: func(v interface{}) string { - json, _ := structure.NormalizeJsonString(v) - return json - }, - }, - - // TimePartitioning: [Experimental] If specified, configures time-based - // partitioning for this table. - "time_partitioning": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // ExpirationMs: [Optional] Number of milliseconds for which to keep the - // storage for a partition. - "expiration_ms": { - Type: schema.TypeInt, - Optional: true, - }, - - // Type: [Required] The only type supported is DAY, which will generate - // one partition per day based on data loading time. - "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"DAY"}, false), - }, - }, - }, - }, - - // CreationTime: [Output-only] The time when this table was created, in - // milliseconds since the epoch. - "creation_time": { - Type: schema.TypeInt, - Computed: true, - }, - - // Etag: [Output-only] A hash of this resource. - "etag": { - Type: schema.TypeString, - Computed: true, - }, - - // LastModifiedTime: [Output-only] The time when this table was last - // modified, in milliseconds since the epoch. - "last_modified_time": { - Type: schema.TypeInt, - Computed: true, - }, - - // Location: [Output-only] The geographic location where the table - // resides. This value is inherited from the dataset. - "location": { - Type: schema.TypeString, - Computed: true, - }, - - // NumBytes: [Output-only] The size of this table in bytes, excluding - // any data in the streaming buffer. - "num_bytes": { - Type: schema.TypeInt, - Computed: true, - }, - - // NumLongTermBytes: [Output-only] The number of bytes in the table that - // are considered "long-term storage". - "num_long_term_bytes": { - Type: schema.TypeInt, - Computed: true, - }, - - // NumRows: [Output-only] The number of rows of data in this table, - // excluding any data in the streaming buffer. - "num_rows": { - Type: schema.TypeInt, - Computed: true, - }, - - // SelfLink: [Output-only] A URL that can be used to access this - // resource again. - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - - // Type: [Output-only] Describes the table type. The following values - // are supported: TABLE: A normal BigQuery table. VIEW: A virtual table - // defined by a SQL query. EXTERNAL: A table that references data stored - // in an external storage system, such as Google Cloud Storage. The - // default value is TABLE. - "type": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceTable(d *schema.ResourceData, meta interface{}) (*bigquery.Table, error) { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - table := &bigquery.Table{ - TableReference: &bigquery.TableReference{ - DatasetId: d.Get("dataset_id").(string), - TableId: d.Get("table_id").(string), - ProjectId: project, - }, - } - - if v, ok := d.GetOk("description"); ok { - table.Description = v.(string) - } - - if v, ok := d.GetOk("expiration_time"); ok { - table.ExpirationTime = v.(int64) - } - - if v, ok := d.GetOk("friendly_name"); ok { - table.FriendlyName = v.(string) - } - - if v, ok := d.GetOk("labels"); ok { - labels := map[string]string{} - - for k, v := range v.(map[string]interface{}) { - labels[k] = v.(string) - } - - table.Labels = labels - } - - if v, ok := d.GetOk("schema"); ok { - schema, err := expandSchema(v) - if err != nil { - return nil, err - } - - table.Schema = schema - } - - if v, ok := d.GetOk("time_partitioning"); ok { - table.TimePartitioning = expandTimePartitioning(v) - } - - return table, nil -} - -func resourceBigQueryTableCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - table, err := resourceTable(d, meta) - if err != nil { - return err - } - - datasetID := d.Get("dataset_id").(string) - - log.Printf("[INFO] Creating BigQuery table: %s", table.TableReference.TableId) - - res, err := config.clientBigQuery.Tables.Insert(project, datasetID, table).Do() - if err != nil { - return err - } - - log.Printf("[INFO] BigQuery table %s has been created", res.Id) - - d.SetId(fmt.Sprintf("%s:%s.%s", res.TableReference.ProjectId, res.TableReference.DatasetId, res.TableReference.TableId)) - - return resourceBigQueryTableRead(d, meta) -} - -func resourceBigQueryTableParseID(id string) (string, string, string) { - parts := strings.FieldsFunc(id, func(r rune) bool { return r == ':' || r == '.' }) - return parts[0], parts[1], parts[2] // projectID, datasetID, tableID -} - -func resourceBigQueryTableRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - log.Printf("[INFO] Reading BigQuery table: %s", d.Id()) - - projectID, datasetID, tableID := resourceBigQueryTableParseID(d.Id()) - - res, err := config.clientBigQuery.Tables.Get(projectID, datasetID, tableID).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("BigQuery table %q", tableID)) - } - - d.Set("description", res.Description) - d.Set("expiration_time", res.ExpirationTime) - d.Set("friendly_name", res.FriendlyName) - d.Set("labels", res.Labels) - d.Set("creation_time", res.CreationTime) - d.Set("etag", res.Etag) - d.Set("last_modified_time", res.LastModifiedTime) - d.Set("location", res.Location) - d.Set("num_bytes", res.NumBytes) - d.Set("table_id", res.TableReference.TableId) - d.Set("dataset_id", res.TableReference.DatasetId) - d.Set("num_long_term_bytes", res.NumLongTermBytes) - d.Set("num_rows", res.NumRows) - d.Set("self_link", res.SelfLink) - d.Set("type", res.Type) - - if res.TimePartitioning != nil { - if err := d.Set("time_partitioning", flattenTimePartitioning(res.TimePartitioning)); err != nil { - return err - } - } - - if res.Schema != nil { - schema, err := flattenSchema(res.Schema) - if err != nil { - return err - } - - d.Set("schema", schema) - } - - return nil -} - -func resourceBigQueryTableUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - table, err := resourceTable(d, meta) - if err != nil { - return err - } - - log.Printf("[INFO] Updating BigQuery table: %s", d.Id()) - - projectID, datasetID, tableID := resourceBigQueryTableParseID(d.Id()) - - if _, err = config.clientBigQuery.Tables.Update(projectID, datasetID, tableID, table).Do(); err != nil { - return err - } - - return resourceBigQueryTableRead(d, meta) -} - -func resourceBigQueryTableDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - log.Printf("[INFO] Deleting BigQuery table: %s", d.Id()) - - projectID, datasetID, tableID := resourceBigQueryTableParseID(d.Id()) - - if err := config.clientBigQuery.Tables.Delete(projectID, datasetID, tableID).Do(); err != nil { - return err - } - - d.SetId("") - - return nil -} - -func expandSchema(raw interface{}) (*bigquery.TableSchema, error) { - var fields []*bigquery.TableFieldSchema - - if err := json.Unmarshal([]byte(raw.(string)), &fields); err != nil { - return nil, err - } - - return &bigquery.TableSchema{Fields: fields}, nil -} - -func flattenSchema(tableSchema *bigquery.TableSchema) (string, error) { - schema, err := json.Marshal(tableSchema.Fields) - if err != nil { - return "", err - } - - return string(schema), nil -} - -func expandTimePartitioning(configured interface{}) *bigquery.TimePartitioning { - raw := configured.([]interface{})[0].(map[string]interface{}) - tp := &bigquery.TimePartitioning{Type: raw["type"].(string)} - - if v, ok := raw["expiration_ms"]; ok { - tp.ExpirationMs = int64(v.(int)) - } - - return tp -} - -func flattenTimePartitioning(tp *bigquery.TimePartitioning) []map[string]interface{} { - result := map[string]interface{}{"type": tp.Type} - - if tp.ExpirationMs != 0 { - result["expiration_ms"] = tp.ExpirationMs - } - - return []map[string]interface{}{result} -} diff --git a/builtin/providers/google/resource_bigquery_table_test.go b/builtin/providers/google/resource_bigquery_table_test.go deleted file mode 100644 index f01b7e0f4..000000000 --- a/builtin/providers/google/resource_bigquery_table_test.go +++ /dev/null @@ -1,174 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccBigQueryTable_Basic(t *testing.T) { - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBigQueryTableDestroy, - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTable(datasetID, tableID), - Check: resource.ComposeTestCheckFunc( - testAccBigQueryTableExists( - "google_bigquery_table.test"), - ), - }, - - { - Config: testAccBigQueryTableUpdated(datasetID, tableID), - Check: resource.ComposeTestCheckFunc( - testAccBigQueryTableExists( - "google_bigquery_table.test"), - ), - }, - }, - }) -} - -func testAccCheckBigQueryTableDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_bigquery_table" { - continue - } - - config := testAccProvider.Meta().(*Config) - _, err := config.clientBigQuery.Tables.Get(config.Project, rs.Primary.Attributes["dataset_id"], rs.Primary.Attributes["name"]).Do() - if err == nil { - return fmt.Errorf("Table still present") - } - } - - return nil -} - -func testAccBigQueryTableExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - config := testAccProvider.Meta().(*Config) - _, err := config.clientBigQuery.Tables.Get(config.Project, rs.Primary.Attributes["dataset_id"], rs.Primary.Attributes["name"]).Do() - if err != nil { - return fmt.Errorf("BigQuery Table not present") - } - - return nil - } -} - -func testAccBigQueryTable(datasetID, tableID string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "test" { - dataset_id = "%s" -} - -resource "google_bigquery_table" "test" { - table_id = "%s" - dataset_id = "${google_bigquery_dataset.test.dataset_id}" - - time_partitioning { - type = "DAY" - } - - schema = <", n, attr) - } - - if attr != disk.DiskEncryptionKey.Sha256 { - return fmt.Errorf("Disk %s has mismatched encryption key.\nTF State: %+v.\nGCP State: %+v", - n, attr, disk.DiskEncryptionKey.Sha256) - } - return nil - } -} - -func testAccCheckComputeDiskInstances(n string, disk *compute.Disk) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - attr := rs.Primary.Attributes["users.#"] - if strconv.Itoa(len(disk.Users)) != attr { - return fmt.Errorf("Disk %s has mismatched users.\nTF State: %+v\nGCP State: %+v", n, rs.Primary.Attributes["users"], disk.Users) - } - - for pos, user := range disk.Users { - if rs.Primary.Attributes["users."+strconv.Itoa(pos)] != user { - return fmt.Errorf("Disk %s has mismatched users.\nTF State: %+v.\nGCP State: %+v", - n, rs.Primary.Attributes["users"], disk.Users) - } - } - return nil - } -} - -func testAccComputeDisk_basic(diskName string) string { - return fmt.Sprintf(` -resource "google_compute_disk" "foobar" { - name = "%s" - image = "debian-8-jessie-v20160803" - size = 50 - type = "pd-ssd" - zone = "us-central1-a" -}`, diskName) -} - -func testAccComputeDisk_resized(diskName string) string { - return fmt.Sprintf(` -resource "google_compute_disk" "foobar" { - name = "%s" - image = "debian-8-jessie-v20160803" - size = 100 - type = "pd-ssd" - zone = "us-central1-a" -}`, diskName) -} - -func testAccComputeDisk_fromSnapshotURI(firstDiskName, snapshotName, diskName, xpn_host string) string { - return fmt.Sprintf(` - resource "google_compute_disk" "foobar" { - name = "%s" - image = "debian-8-jessie-v20160803" - size = 50 - type = "pd-ssd" - zone = "us-central1-a" - project = "%s" - } - -resource "google_compute_snapshot" "snapdisk" { - name = "%s" - source_disk = "${google_compute_disk.foobar.name}" - zone = "us-central1-a" - project = "%s" -} -resource "google_compute_disk" "seconddisk" { - name = "%s" - snapshot = "${google_compute_snapshot.snapdisk.self_link}" - type = "pd-ssd" - zone = "us-central1-a" -}`, firstDiskName, xpn_host, snapshotName, xpn_host, diskName) -} - -func testAccComputeDisk_encryption(diskName string) string { - return fmt.Sprintf(` -resource "google_compute_disk" "foobar" { - name = "%s" - image = "debian-8-jessie-v20160803" - size = 50 - type = "pd-ssd" - zone = "us-central1-a" - disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" -}`, diskName) -} - -func testAccComputeDisk_deleteDetach(instanceName, diskName string) string { - return fmt.Sprintf(` -resource "google_compute_disk" "foo" { - name = "%s" - image = "debian-8" - size = 50 - type = "pd-ssd" - zone = "us-central1-a" -} - -resource "google_compute_instance" "bar" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - - disk { - image = "debian-8-jessie-v20170523" - } - - disk { - disk = "${google_compute_disk.foo.name}" - auto_delete = false - } - - network_interface { - network = "default" - } -}`, diskName, instanceName) -} diff --git a/builtin/providers/google/resource_compute_firewall.go b/builtin/providers/google/resource_compute_firewall.go deleted file mode 100644 index c276d86c1..000000000 --- a/builtin/providers/google/resource_compute_firewall.go +++ /dev/null @@ -1,313 +0,0 @@ -package google - -import ( - "bytes" - "fmt" - "sort" - "strings" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" -) - -func resourceComputeFirewall() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeFirewallCreate, - Read: resourceComputeFirewallRead, - Update: resourceComputeFirewallUpdate, - Delete: resourceComputeFirewallDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - SchemaVersion: 1, - MigrateState: resourceComputeFirewallMigrateState, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "network": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "allow": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "protocol": { - Type: schema.TypeString, - Required: true, - }, - - "ports": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - Set: resourceComputeFirewallAllowHash, - }, - - "description": { - Type: schema.TypeString, - Optional: true, - }, - - "project": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - - "source_ranges": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "source_tags": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "target_tags": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - } -} - -func resourceComputeFirewallAllowHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["protocol"].(string))) - - // We need to make sure to sort the strings below so that we always - // generate the same hash code no matter what is in the set. - if v, ok := m["ports"]; ok { - s := convertStringArr(v.([]interface{})) - sort.Strings(s) - - for _, v := range s { - buf.WriteString(fmt.Sprintf("%s-", v)) - } - } - - return hashcode.String(buf.String()) -} - -func resourceComputeFirewallCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - firewall, err := resourceFirewall(d, meta) - if err != nil { - return err - } - - op, err := config.clientCompute.Firewalls.Insert( - project, firewall).Do() - if err != nil { - return fmt.Errorf("Error creating firewall: %s", err) - } - - // It probably maybe worked, so store the ID now - d.SetId(firewall.Name) - - err = computeOperationWaitGlobal(config, op, project, "Creating Firewall") - if err != nil { - return err - } - - return resourceComputeFirewallRead(d, meta) -} - -func flattenAllowed(allowed []*compute.FirewallAllowed) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(allowed)) - for _, allow := range allowed { - allowMap := make(map[string]interface{}) - allowMap["protocol"] = allow.IPProtocol - allowMap["ports"] = allow.Ports - - result = append(result, allowMap) - } - return result -} - -func resourceComputeFirewallRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - firewall, err := config.clientCompute.Firewalls.Get( - project, d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Firewall %q", d.Get("name").(string))) - } - - networkUrl := strings.Split(firewall.Network, "/") - d.Set("self_link", firewall.SelfLink) - d.Set("name", firewall.Name) - d.Set("network", networkUrl[len(networkUrl)-1]) - d.Set("description", firewall.Description) - d.Set("project", project) - d.Set("source_ranges", firewall.SourceRanges) - d.Set("source_tags", firewall.SourceTags) - d.Set("target_tags", firewall.TargetTags) - d.Set("allow", flattenAllowed(firewall.Allowed)) - return nil -} - -func resourceComputeFirewallUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - d.Partial(true) - - firewall, err := resourceFirewall(d, meta) - if err != nil { - return err - } - - op, err := config.clientCompute.Firewalls.Update( - project, d.Id(), firewall).Do() - if err != nil { - return fmt.Errorf("Error updating firewall: %s", err) - } - - err = computeOperationWaitGlobal(config, op, project, "Updating Firewall") - if err != nil { - return err - } - - d.Partial(false) - - return resourceComputeFirewallRead(d, meta) -} - -func resourceComputeFirewallDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Delete the firewall - op, err := config.clientCompute.Firewalls.Delete( - project, d.Id()).Do() - if err != nil { - return fmt.Errorf("Error deleting firewall: %s", err) - } - - err = computeOperationWaitGlobal(config, op, project, "Deleting Firewall") - if err != nil { - return err - } - - d.SetId("") - return nil -} - -func resourceFirewall( - d *schema.ResourceData, - meta interface{}) (*compute.Firewall, error) { - config := meta.(*Config) - - project, _ := getProject(d, config) - - // Look up the network to attach the firewall to - network, err := config.clientCompute.Networks.Get( - project, d.Get("network").(string)).Do() - if err != nil { - return nil, fmt.Errorf("Error reading network: %s", err) - } - - // Build up the list of allowed entries - var allowed []*compute.FirewallAllowed - if v := d.Get("allow").(*schema.Set); v.Len() > 0 { - allowed = make([]*compute.FirewallAllowed, 0, v.Len()) - for _, v := range v.List() { - m := v.(map[string]interface{}) - - var ports []string - if v := convertStringArr(m["ports"].([]interface{})); len(v) > 0 { - ports = make([]string, len(v)) - for i, v := range v { - ports[i] = v - } - } - - allowed = append(allowed, &compute.FirewallAllowed{ - IPProtocol: m["protocol"].(string), - Ports: ports, - }) - } - } - - // Build up the list of sources - var sourceRanges, sourceTags []string - if v := d.Get("source_ranges").(*schema.Set); v.Len() > 0 { - sourceRanges = make([]string, v.Len()) - for i, v := range v.List() { - sourceRanges[i] = v.(string) - } - } - if v := d.Get("source_tags").(*schema.Set); v.Len() > 0 { - sourceTags = make([]string, v.Len()) - for i, v := range v.List() { - sourceTags[i] = v.(string) - } - } - - // Build up the list of targets - var targetTags []string - if v := d.Get("target_tags").(*schema.Set); v.Len() > 0 { - targetTags = make([]string, v.Len()) - for i, v := range v.List() { - targetTags[i] = v.(string) - } - } - - // Build the firewall parameter - return &compute.Firewall{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - Network: network.SelfLink, - Allowed: allowed, - SourceRanges: sourceRanges, - SourceTags: sourceTags, - TargetTags: targetTags, - }, nil -} diff --git a/builtin/providers/google/resource_compute_firewall_migrate.go b/builtin/providers/google/resource_compute_firewall_migrate.go deleted file mode 100644 index 8509075f4..000000000 --- a/builtin/providers/google/resource_compute_firewall_migrate.go +++ /dev/null @@ -1,93 +0,0 @@ -package google - -import ( - "fmt" - "log" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/terraform/terraform" -) - -func resourceComputeFirewallMigrateState( - v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - if is.Empty() { - log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return is, nil - } - - switch v { - case 0: - log.Println("[INFO] Found Compute Firewall State v0; migrating to v1") - is, err := migrateFirewallStateV0toV1(is) - if err != nil { - return is, err - } - return is, nil - default: - return is, fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateFirewallStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { - log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - idx := 0 - portCount := 0 - newPorts := make(map[string]string) - keys := make([]string, len(is.Attributes)) - for k, _ := range is.Attributes { - keys[idx] = k - idx++ - - } - sort.Strings(keys) - for _, k := range keys { - if !strings.HasPrefix(k, "allow.") { - continue - } - - if k == "allow.#" { - continue - } - - if strings.HasSuffix(k, ".ports.#") { - continue - } - - if strings.HasSuffix(k, ".protocol") { - continue - } - - // We have a key that looks like "allow..ports.*" and we know it's not - // allow..ports.# because we deleted it above, so it must be allow..ports. - // from the Set of Ports. Just need to convert it to a list by - // replacing second hash with sequential numbers. - kParts := strings.Split(k, ".") - - // Sanity check: all four parts should be there and should be a number - badFormat := false - if len(kParts) != 4 { - badFormat = true - } else if _, err := strconv.Atoi(kParts[1]); err != nil { - badFormat = true - } - - if badFormat { - return is, fmt.Errorf( - "migration error: found port key in unexpected format: %s", k) - } - allowHash, _ := strconv.Atoi(kParts[1]) - newK := fmt.Sprintf("allow.%d.ports.%d", allowHash, portCount) - portCount++ - newPorts[newK] = is.Attributes[k] - delete(is.Attributes, k) - } - - for k, v := range newPorts { - is.Attributes[k] = v - } - - log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} diff --git a/builtin/providers/google/resource_compute_firewall_migrate_test.go b/builtin/providers/google/resource_compute_firewall_migrate_test.go deleted file mode 100644 index e28d607f3..000000000 --- a/builtin/providers/google/resource_compute_firewall_migrate_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package google - -import ( - "testing" - - "github.com/hashicorp/terraform/terraform" -) - -func TestComputeFirewallMigrateState(t *testing.T) { - cases := map[string]struct { - StateVersion int - Attributes map[string]string - Expected map[string]string - Meta interface{} - }{ - "change scope from list to set": { - StateVersion: 0, - Attributes: map[string]string{ - "allow.#": "1", - "allow.0.protocol": "udp", - "allow.0.ports.#": "4", - "allow.0.ports.1693978638": "8080", - "allow.0.ports.172152165": "8081", - "allow.0.ports.299962681": "7072", - "allow.0.ports.3435931483": "4044", - }, - Expected: map[string]string{ - "allow.#": "1", - "allow.0.protocol": "udp", - "allow.0.ports.#": "4", - "allow.0.ports.0": "8080", - "allow.0.ports.1": "8081", - "allow.0.ports.2": "7072", - "allow.0.ports.3": "4044", - }, - }, - } - for tn, tc := range cases { - is := &terraform.InstanceState{ - ID: "i-abc123", - Attributes: tc.Attributes, - } - is, err := resourceComputeFirewallMigrateState( - tc.StateVersion, is, tc.Meta) - - if err != nil { - t.Fatalf("bad: %s, err: %#v", tn, err) - } - - for k, v := range tc.Expected { - if is.Attributes[k] != v { - t.Fatalf( - "bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", - tn, k, v, k, is.Attributes[k], is.Attributes) - } - } - } -} - -func TestComputeFirewallMigrateState_empty(t *testing.T) { - var is *terraform.InstanceState - var meta interface{} - - // should handle nil - is, err := resourceComputeFirewallMigrateState(0, is, meta) - - if err != nil { - t.Fatalf("err: %#v", err) - } - if is != nil { - t.Fatalf("expected nil instancestate, got: %#v", is) - } - - // should handle non-nil but empty - is = &terraform.InstanceState{} - is, err = resourceComputeFirewallMigrateState(0, is, meta) - - if err != nil { - t.Fatalf("err: %#v", err) - } -} diff --git a/builtin/providers/google/resource_compute_firewall_test.go b/builtin/providers/google/resource_compute_firewall_test.go deleted file mode 100644 index 8b077314b..000000000 --- a/builtin/providers/google/resource_compute_firewall_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/compute/v1" -) - -func TestAccComputeFirewall_basic(t *testing.T) { - var firewall compute.Firewall - networkName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10)) - firewallName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeFirewallDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeFirewall_basic(networkName, firewallName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeFirewallExists( - "google_compute_firewall.foobar", &firewall), - ), - }, - }, - }) -} - -func TestAccComputeFirewall_update(t *testing.T) { - var firewall compute.Firewall - networkName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10)) - firewallName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeFirewallDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeFirewall_basic(networkName, firewallName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeFirewallExists( - "google_compute_firewall.foobar", &firewall), - ), - }, - resource.TestStep{ - Config: testAccComputeFirewall_update(networkName, firewallName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeFirewallExists( - "google_compute_firewall.foobar", &firewall), - testAccCheckComputeFirewallPorts( - &firewall, "80-255"), - ), - }, - }, - }) -} - -func testAccCheckComputeFirewallDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_firewall" { - continue - } - - _, err := config.clientCompute.Firewalls.Get( - config.Project, rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("Firewall still exists") - } - } - - return nil -} - -func testAccCheckComputeFirewallExists(n string, firewall *compute.Firewall) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.Firewalls.Get( - config.Project, rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("Firewall not found") - } - - *firewall = *found - - return nil - } -} - -func testAccCheckComputeFirewallPorts( - firewall *compute.Firewall, ports string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if len(firewall.Allowed) == 0 { - return fmt.Errorf("no allowed rules") - } - - if firewall.Allowed[0].Ports[0] != ports { - return fmt.Errorf("bad: %#v", firewall.Allowed[0].Ports) - } - - return nil - } -} - -func testAccComputeFirewall_basic(network, firewall string) string { - return fmt.Sprintf(` - resource "google_compute_network" "foobar" { - name = "%s" - ipv4_range = "10.0.0.0/16" - } - - resource "google_compute_firewall" "foobar" { - name = "firewall-test-%s" - description = "Resource created for Terraform acceptance testing" - network = "${google_compute_network.foobar.name}" - source_tags = ["foo"] - - allow { - protocol = "icmp" - } - }`, network, firewall) -} - -func testAccComputeFirewall_update(network, firewall string) string { - return fmt.Sprintf(` - resource "google_compute_network" "foobar" { - name = "%s" - ipv4_range = "10.0.0.0/16" - } - - resource "google_compute_firewall" "foobar" { - name = "firewall-test-%s" - description = "Resource created for Terraform acceptance testing" - network = "${google_compute_network.foobar.name}" - source_tags = ["foo"] - - allow { - protocol = "tcp" - ports = ["80-255"] - } - }`, network, firewall) -} diff --git a/builtin/providers/google/resource_compute_forwarding_rule.go b/builtin/providers/google/resource_compute_forwarding_rule.go deleted file mode 100644 index 696bd62a3..000000000 --- a/builtin/providers/google/resource_compute_forwarding_rule.go +++ /dev/null @@ -1,276 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" -) - -func resourceComputeForwardingRule() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeForwardingRuleCreate, - Read: resourceComputeForwardingRuleRead, - Delete: resourceComputeForwardingRuleDelete, - Update: resourceComputeForwardingRuleUpdate, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "target": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - - "backend_service": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "ip_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "ip_protocol": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "load_balancing_scheme": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "EXTERNAL", - }, - - "network": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "port_range": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if old == new+"-"+new { - return true - } - return false - }, - }, - - "ports": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - ForceNew: true, - Set: schema.HashString, - MaxItems: 5, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "subnetwork": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - }, - } -} - -func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - ps := d.Get("ports").(*schema.Set).List() - ports := make([]string, 0, len(ps)) - for _, v := range ps { - ports = append(ports, v.(string)) - } - - frule := &compute.ForwardingRule{ - BackendService: d.Get("backend_service").(string), - IPAddress: d.Get("ip_address").(string), - IPProtocol: d.Get("ip_protocol").(string), - Description: d.Get("description").(string), - LoadBalancingScheme: d.Get("load_balancing_scheme").(string), - Name: d.Get("name").(string), - Network: d.Get("network").(string), - PortRange: d.Get("port_range").(string), - Ports: ports, - Subnetwork: d.Get("subnetwork").(string), - Target: d.Get("target").(string), - } - - log.Printf("[DEBUG] ForwardingRule insert request: %#v", frule) - op, err := config.clientCompute.ForwardingRules.Insert( - project, region, frule).Do() - if err != nil { - return fmt.Errorf("Error creating ForwardingRule: %s", err) - } - - // It probably maybe worked, so store the ID now - d.SetId(frule.Name) - - err = computeOperationWaitRegion(config, op, project, region, "Creating Fowarding Rule") - if err != nil { - return err - } - - return resourceComputeForwardingRuleRead(d, meta) -} - -func resourceComputeForwardingRuleUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - d.Partial(true) - - if d.HasChange("target") { - target_name := d.Get("target").(string) - target_ref := &compute.TargetReference{Target: target_name} - op, err := config.clientCompute.ForwardingRules.SetTarget( - project, region, d.Id(), target_ref).Do() - if err != nil { - return fmt.Errorf("Error updating target: %s", err) - } - - err = computeOperationWaitRegion(config, op, project, region, "Updating Forwarding Rule") - if err != nil { - return err - } - - d.SetPartial("target") - } - - d.Partial(false) - - return resourceComputeForwardingRuleRead(d, meta) -} - -func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - frule, err := config.clientCompute.ForwardingRules.Get( - project, region, d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Forwarding Rule %q", d.Get("name").(string))) - } - - d.Set("name", frule.Name) - d.Set("target", frule.Target) - d.Set("backend_service", frule.BackendService) - d.Set("description", frule.Description) - d.Set("load_balancing_scheme", frule.LoadBalancingScheme) - d.Set("network", frule.Network) - d.Set("port_range", frule.PortRange) - d.Set("ports", frule.Ports) - d.Set("project", project) - d.Set("region", region) - d.Set("subnetwork", frule.Subnetwork) - d.Set("ip_address", frule.IPAddress) - d.Set("ip_protocol", frule.IPProtocol) - d.Set("self_link", frule.SelfLink) - return nil -} - -func resourceComputeForwardingRuleDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Delete the ForwardingRule - log.Printf("[DEBUG] ForwardingRule delete request") - op, err := config.clientCompute.ForwardingRules.Delete( - project, region, d.Id()).Do() - if err != nil { - return fmt.Errorf("Error deleting ForwardingRule: %s", err) - } - - err = computeOperationWaitRegion(config, op, project, region, "Deleting Forwarding Rule") - if err != nil { - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/google/resource_compute_forwarding_rule_test.go b/builtin/providers/google/resource_compute_forwarding_rule_test.go deleted file mode 100644 index 349ebd82c..000000000 --- a/builtin/providers/google/resource_compute_forwarding_rule_test.go +++ /dev/null @@ -1,220 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccComputeForwardingRule_basic(t *testing.T) { - poolName := fmt.Sprintf("tf-%s", acctest.RandString(10)) - ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeForwardingRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeForwardingRule_basic(poolName, ruleName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeForwardingRuleExists( - "google_compute_forwarding_rule.foobar"), - ), - }, - }, - }) -} - -func TestAccComputeForwardingRule_singlePort(t *testing.T) { - poolName := fmt.Sprintf("tf-%s", acctest.RandString(10)) - ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeForwardingRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeForwardingRule_singlePort(poolName, ruleName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeForwardingRuleExists( - "google_compute_forwarding_rule.foobar"), - ), - }, - }, - }) -} - -func TestAccComputeForwardingRule_ip(t *testing.T) { - addrName := fmt.Sprintf("tf-%s", acctest.RandString(10)) - poolName := fmt.Sprintf("tf-%s", acctest.RandString(10)) - ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeForwardingRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeForwardingRule_ip(addrName, poolName, ruleName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeForwardingRuleExists( - "google_compute_forwarding_rule.foobar"), - ), - }, - }, - }) -} - -func TestAccComputeForwardingRule_internalLoadBalancing(t *testing.T) { - serviceName := fmt.Sprintf("tf-%s", acctest.RandString(10)) - checkName := fmt.Sprintf("tf-%s", acctest.RandString(10)) - ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeForwardingRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeForwardingRule_internalLoadBalancing(serviceName, checkName, ruleName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeForwardingRuleExists( - "google_compute_forwarding_rule.foobar"), - ), - }, - }, - }) -} - -func testAccCheckComputeForwardingRuleDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_forwarding_rule" { - continue - } - - _, err := config.clientCompute.ForwardingRules.Get( - config.Project, config.Region, rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("ForwardingRule still exists") - } - } - - return nil -} - -func testAccCheckComputeForwardingRuleExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.ForwardingRules.Get( - config.Project, config.Region, rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("ForwardingRule not found") - } - - return nil - } -} - -func testAccComputeForwardingRule_basic(poolName, ruleName string) string { - return fmt.Sprintf(` -resource "google_compute_target_pool" "foobar-tp" { - description = "Resource created for Terraform acceptance testing" - instances = ["us-central1-a/foo", "us-central1-b/bar"] - name = "%s" -} -resource "google_compute_forwarding_rule" "foobar" { - description = "Resource created for Terraform acceptance testing" - ip_protocol = "UDP" - name = "%s" - port_range = "80-81" - target = "${google_compute_target_pool.foobar-tp.self_link}" -} -`, poolName, ruleName) -} - -func testAccComputeForwardingRule_singlePort(poolName, ruleName string) string { - return fmt.Sprintf(` -resource "google_compute_target_pool" "foobar-tp" { - description = "Resource created for Terraform acceptance testing" - instances = ["us-central1-a/foo", "us-central1-b/bar"] - name = "%s" -} -resource "google_compute_forwarding_rule" "foobar" { - description = "Resource created for Terraform acceptance testing" - ip_protocol = "UDP" - name = "%s" - port_range = "80" - target = "${google_compute_target_pool.foobar-tp.self_link}" -} -`, poolName, ruleName) -} - -func testAccComputeForwardingRule_ip(addrName, poolName, ruleName string) string { - return fmt.Sprintf(` -resource "google_compute_address" "foo" { - name = "%s" -} -resource "google_compute_target_pool" "foobar-tp" { - description = "Resource created for Terraform acceptance testing" - instances = ["us-central1-a/foo", "us-central1-b/bar"] - name = "%s" -} -resource "google_compute_forwarding_rule" "foobar" { - description = "Resource created for Terraform acceptance testing" - ip_address = "${google_compute_address.foo.address}" - ip_protocol = "TCP" - name = "%s" - port_range = "80-81" - target = "${google_compute_target_pool.foobar-tp.self_link}" -} -`, addrName, poolName, ruleName) -} - -func testAccComputeForwardingRule_internalLoadBalancing(serviceName, checkName, ruleName string) string { - return fmt.Sprintf(` -resource "google_compute_region_backend_service" "foobar-bs" { - name = "%s" - description = "Resource created for Terraform acceptance testing" - health_checks = ["${google_compute_health_check.zero.self_link}"] - region = "us-central1" -} -resource "google_compute_health_check" "zero" { - name = "%s" - description = "Resource created for Terraform acceptance testing" - check_interval_sec = 1 - timeout_sec = 1 - - tcp_health_check { - port = "80" - } -} -resource "google_compute_forwarding_rule" "foobar" { - description = "Resource created for Terraform acceptance testing" - name = "%s" - load_balancing_scheme = "INTERNAL" - backend_service = "${google_compute_region_backend_service.foobar-bs.self_link}" - ports = ["80"] -} -`, serviceName, checkName, ruleName) -} diff --git a/builtin/providers/google/resource_compute_global_address.go b/builtin/providers/google/resource_compute_global_address.go deleted file mode 100644 index db3a1798e..000000000 --- a/builtin/providers/google/resource_compute_global_address.go +++ /dev/null @@ -1,116 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" -) - -func resourceComputeGlobalAddress() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeGlobalAddressCreate, - Read: resourceComputeGlobalAddressRead, - Delete: resourceComputeGlobalAddressDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "address": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceComputeGlobalAddressCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Build the address parameter - addr := &compute.Address{Name: d.Get("name").(string)} - op, err := config.clientCompute.GlobalAddresses.Insert( - project, addr).Do() - if err != nil { - return fmt.Errorf("Error creating address: %s", err) - } - - // It probably maybe worked, so store the ID now - d.SetId(addr.Name) - - err = computeOperationWaitGlobal(config, op, project, "Creating Global Address") - if err != nil { - return err - } - - return resourceComputeGlobalAddressRead(d, meta) -} - -func resourceComputeGlobalAddressRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - addr, err := config.clientCompute.GlobalAddresses.Get( - project, d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Global Address %q", d.Get("name").(string))) - } - - d.Set("address", addr.Address) - d.Set("self_link", addr.SelfLink) - d.Set("name", addr.Name) - - return nil -} - -func resourceComputeGlobalAddressDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Delete the address - log.Printf("[DEBUG] address delete request") - op, err := config.clientCompute.GlobalAddresses.Delete( - project, d.Id()).Do() - if err != nil { - return fmt.Errorf("Error deleting address: %s", err) - } - - err = computeOperationWaitGlobal(config, op, project, "Deleting Global Address") - if err != nil { - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/google/resource_compute_global_address_test.go b/builtin/providers/google/resource_compute_global_address_test.go deleted file mode 100644 index 9ed49d836..000000000 --- a/builtin/providers/google/resource_compute_global_address_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/compute/v1" -) - -func TestAccComputeGlobalAddress_basic(t *testing.T) { - var addr compute.Address - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeGlobalAddressDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeGlobalAddress_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeGlobalAddressExists( - "google_compute_global_address.foobar", &addr), - ), - }, - }, - }) -} - -func testAccCheckComputeGlobalAddressDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_global_address" { - continue - } - - _, err := config.clientCompute.GlobalAddresses.Get( - config.Project, rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("Address still exists") - } - } - - return nil -} - -func testAccCheckComputeGlobalAddressExists(n string, addr *compute.Address) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.GlobalAddresses.Get( - config.Project, rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("Addr not found") - } - - *addr = *found - - return nil - } -} - -var testAccComputeGlobalAddress_basic = fmt.Sprintf(` -resource "google_compute_global_address" "foobar" { - name = "address-test-%s" -}`, acctest.RandString(10)) diff --git a/builtin/providers/google/resource_compute_global_forwarding_rule.go b/builtin/providers/google/resource_compute_global_forwarding_rule.go deleted file mode 100644 index 7f86adbb1..000000000 --- a/builtin/providers/google/resource_compute_global_forwarding_rule.go +++ /dev/null @@ -1,187 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" -) - -func resourceComputeGlobalForwardingRule() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeGlobalForwardingRuleCreate, - Read: resourceComputeGlobalForwardingRuleRead, - Update: resourceComputeGlobalForwardingRuleUpdate, - Delete: resourceComputeGlobalForwardingRuleDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "target": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "ip_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "ip_protocol": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "port_range": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Deprecated: "Please remove this attribute (it was never used)", - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceComputeGlobalForwardingRuleCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - frule := &compute.ForwardingRule{ - IPAddress: d.Get("ip_address").(string), - IPProtocol: d.Get("ip_protocol").(string), - Description: d.Get("description").(string), - Name: d.Get("name").(string), - PortRange: d.Get("port_range").(string), - Target: d.Get("target").(string), - } - - op, err := config.clientCompute.GlobalForwardingRules.Insert( - project, frule).Do() - if err != nil { - return fmt.Errorf("Error creating Global Forwarding Rule: %s", err) - } - - // It probably maybe worked, so store the ID now - d.SetId(frule.Name) - - err = computeOperationWaitGlobal(config, op, project, "Creating Global Fowarding Rule") - if err != nil { - return err - } - - return resourceComputeGlobalForwardingRuleRead(d, meta) -} - -func resourceComputeGlobalForwardingRuleUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - d.Partial(true) - - if d.HasChange("target") { - target_name := d.Get("target").(string) - target_ref := &compute.TargetReference{Target: target_name} - op, err := config.clientCompute.GlobalForwardingRules.SetTarget( - project, d.Id(), target_ref).Do() - if err != nil { - return fmt.Errorf("Error updating target: %s", err) - } - - err = computeOperationWaitGlobal(config, op, project, "Updating Global Forwarding Rule") - if err != nil { - return err - } - - d.SetPartial("target") - } - - d.Partial(false) - - return resourceComputeGlobalForwardingRuleRead(d, meta) -} - -func resourceComputeGlobalForwardingRuleRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - frule, err := config.clientCompute.GlobalForwardingRules.Get( - project, d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Global Forwarding Rule %q", d.Get("name").(string))) - } - - d.Set("ip_address", frule.IPAddress) - d.Set("ip_protocol", frule.IPProtocol) - d.Set("self_link", frule.SelfLink) - - return nil -} - -func resourceComputeGlobalForwardingRuleDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Delete the GlobalForwardingRule - log.Printf("[DEBUG] GlobalForwardingRule delete request") - op, err := config.clientCompute.GlobalForwardingRules.Delete( - project, d.Id()).Do() - if err != nil { - return fmt.Errorf("Error deleting GlobalForwardingRule: %s", err) - } - - err = computeOperationWaitGlobal(config, op, project, "Deleting GlobalForwarding Rule") - if err != nil { - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/google/resource_compute_global_forwarding_rule_test.go b/builtin/providers/google/resource_compute_global_forwarding_rule_test.go deleted file mode 100644 index f81361c7b..000000000 --- a/builtin/providers/google/resource_compute_global_forwarding_rule_test.go +++ /dev/null @@ -1,225 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccComputeGlobalForwardingRule_basic(t *testing.T) { - fr := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) - proxy1 := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) - proxy2 := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) - backend := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) - hc := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) - urlmap := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeGlobalForwardingRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeGlobalForwardingRule_basic1(fr, proxy1, proxy2, backend, hc, urlmap), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeGlobalForwardingRuleExists( - "google_compute_global_forwarding_rule.foobar"), - ), - }, - }, - }) -} - -func TestAccComputeGlobalForwardingRule_update(t *testing.T) { - fr := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) - proxy1 := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) - proxy2 := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) - backend := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) - hc := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) - urlmap := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeGlobalForwardingRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeGlobalForwardingRule_basic1(fr, proxy1, proxy2, backend, hc, urlmap), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeGlobalForwardingRuleExists( - "google_compute_global_forwarding_rule.foobar"), - ), - }, - - resource.TestStep{ - Config: testAccComputeGlobalForwardingRule_basic2(fr, proxy1, proxy2, backend, hc, urlmap), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeGlobalForwardingRuleExists( - "google_compute_global_forwarding_rule.foobar"), - ), - }, - }, - }) -} - -func testAccCheckComputeGlobalForwardingRuleDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_global_forwarding_rule" { - continue - } - - _, err := config.clientCompute.GlobalForwardingRules.Get( - config.Project, rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("Global Forwarding Rule still exists") - } - } - - return nil -} - -func testAccCheckComputeGlobalForwardingRuleExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.GlobalForwardingRules.Get( - config.Project, rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("Global Forwarding Rule not found") - } - - return nil - } -} - -func testAccComputeGlobalForwardingRule_basic1(fr, proxy1, proxy2, backend, hc, urlmap string) string { - return fmt.Sprintf(` - resource "google_compute_global_forwarding_rule" "foobar" { - description = "Resource created for Terraform acceptance testing" - ip_protocol = "TCP" - name = "%s" - port_range = "80" - target = "${google_compute_target_http_proxy.foobar1.self_link}" - } - - resource "google_compute_target_http_proxy" "foobar1" { - description = "Resource created for Terraform acceptance testing" - name = "%s" - url_map = "${google_compute_url_map.foobar.self_link}" - } - - resource "google_compute_target_http_proxy" "foobar2" { - description = "Resource created for Terraform acceptance testing" - name = "%s" - url_map = "${google_compute_url_map.foobar.self_link}" - } - - resource "google_compute_backend_service" "foobar" { - name = "%s" - health_checks = ["${google_compute_http_health_check.zero.self_link}"] - } - - resource "google_compute_http_health_check" "zero" { - name = "%s" - request_path = "/" - check_interval_sec = 1 - timeout_sec = 1 - } - - resource "google_compute_url_map" "foobar" { - name = "%s" - default_service = "${google_compute_backend_service.foobar.self_link}" - host_rule { - hosts = ["mysite.com", "myothersite.com"] - path_matcher = "boop" - } - path_matcher { - default_service = "${google_compute_backend_service.foobar.self_link}" - name = "boop" - path_rule { - paths = ["/*"] - service = "${google_compute_backend_service.foobar.self_link}" - } - } - test { - host = "mysite.com" - path = "/*" - service = "${google_compute_backend_service.foobar.self_link}" - } - }`, fr, proxy1, proxy2, backend, hc, urlmap) -} - -func testAccComputeGlobalForwardingRule_basic2(fr, proxy1, proxy2, backend, hc, urlmap string) string { - return fmt.Sprintf(` - resource "google_compute_global_forwarding_rule" "foobar" { - description = "Resource created for Terraform acceptance testing" - ip_protocol = "TCP" - name = "%s" - port_range = "80" - target = "${google_compute_target_http_proxy.foobar2.self_link}" - } - - resource "google_compute_target_http_proxy" "foobar1" { - description = "Resource created for Terraform acceptance testing" - name = "%s" - url_map = "${google_compute_url_map.foobar.self_link}" - } - - resource "google_compute_target_http_proxy" "foobar2" { - description = "Resource created for Terraform acceptance testing" - name = "%s" - url_map = "${google_compute_url_map.foobar.self_link}" - } - - resource "google_compute_backend_service" "foobar" { - name = "%s" - health_checks = ["${google_compute_http_health_check.zero.self_link}"] - } - - resource "google_compute_http_health_check" "zero" { - name = "%s" - request_path = "/" - check_interval_sec = 1 - timeout_sec = 1 - } - - resource "google_compute_url_map" "foobar" { - name = "%s" - default_service = "${google_compute_backend_service.foobar.self_link}" - host_rule { - hosts = ["mysite.com", "myothersite.com"] - path_matcher = "boop" - } - path_matcher { - default_service = "${google_compute_backend_service.foobar.self_link}" - name = "boop" - path_rule { - paths = ["/*"] - service = "${google_compute_backend_service.foobar.self_link}" - } - } - test { - host = "mysite.com" - path = "/*" - service = "${google_compute_backend_service.foobar.self_link}" - } - }`, fr, proxy1, proxy2, backend, hc, urlmap) -} diff --git a/builtin/providers/google/resource_compute_health_check.go b/builtin/providers/google/resource_compute_health_check.go deleted file mode 100644 index 286ebc195..000000000 --- a/builtin/providers/google/resource_compute_health_check.go +++ /dev/null @@ -1,485 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" -) - -func resourceComputeHealthCheck() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeHealthCheckCreate, - Read: resourceComputeHealthCheckRead, - Delete: resourceComputeHealthCheckDelete, - Update: resourceComputeHealthCheckUpdate, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "check_interval_sec": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 5, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "healthy_threshold": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 2, - }, - - "tcp_health_check": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{"ssl_health_check", "http_health_check", "https_health_check"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "port": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 80, - }, - "proxy_header": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "NONE", - }, - "request": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "response": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - - "ssl_health_check": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{"tcp_health_check", "http_health_check", "https_health_check"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "port": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 443, - }, - "proxy_header": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "NONE", - }, - "request": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "response": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - - "http_health_check": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{"tcp_health_check", "ssl_health_check", "https_health_check"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "host": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "port": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 80, - }, - "proxy_header": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "NONE", - }, - "request_path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "/", - }, - }, - }, - }, - - "https_health_check": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{"tcp_health_check", "ssl_health_check", "http_health_check"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "host": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "port": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 443, - }, - "proxy_header": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "NONE", - }, - "request_path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "/", - }, - }, - }, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "timeout_sec": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 5, - }, - - "unhealthy_threshold": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 2, - }, - }, - } -} - -func resourceComputeHealthCheckCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Build the parameter - hchk := &compute.HealthCheck{ - Name: d.Get("name").(string), - } - // Optional things - if v, ok := d.GetOk("description"); ok { - hchk.Description = v.(string) - } - if v, ok := d.GetOk("check_interval_sec"); ok { - hchk.CheckIntervalSec = int64(v.(int)) - } - if v, ok := d.GetOk("healthy_threshold"); ok { - hchk.HealthyThreshold = int64(v.(int)) - } - if v, ok := d.GetOk("timeout_sec"); ok { - hchk.TimeoutSec = int64(v.(int)) - } - if v, ok := d.GetOk("unhealthy_threshold"); ok { - hchk.UnhealthyThreshold = int64(v.(int)) - } - - if v, ok := d.GetOk("tcp_health_check"); ok { - hchk.Type = "TCP" - tcpcheck := v.([]interface{})[0].(map[string]interface{}) - tcpHealthCheck := &compute.TCPHealthCheck{} - if val, ok := tcpcheck["port"]; ok { - tcpHealthCheck.Port = int64(val.(int)) - } - if val, ok := tcpcheck["proxy_header"]; ok { - tcpHealthCheck.ProxyHeader = val.(string) - } - if val, ok := tcpcheck["request"]; ok { - tcpHealthCheck.Request = val.(string) - } - if val, ok := tcpcheck["response"]; ok { - tcpHealthCheck.Response = val.(string) - } - hchk.TcpHealthCheck = tcpHealthCheck - } - - if v, ok := d.GetOk("ssl_health_check"); ok { - hchk.Type = "SSL" - sslcheck := v.([]interface{})[0].(map[string]interface{}) - sslHealthCheck := &compute.SSLHealthCheck{} - if val, ok := sslcheck["port"]; ok { - sslHealthCheck.Port = int64(val.(int)) - } - if val, ok := sslcheck["proxy_header"]; ok { - sslHealthCheck.ProxyHeader = val.(string) - } - if val, ok := sslcheck["request"]; ok { - sslHealthCheck.Request = val.(string) - } - if val, ok := sslcheck["response"]; ok { - sslHealthCheck.Response = val.(string) - } - hchk.SslHealthCheck = sslHealthCheck - } - - if v, ok := d.GetOk("http_health_check"); ok { - hchk.Type = "HTTP" - httpcheck := v.([]interface{})[0].(map[string]interface{}) - httpHealthCheck := &compute.HTTPHealthCheck{} - if val, ok := httpcheck["host"]; ok { - httpHealthCheck.Host = val.(string) - } - if val, ok := httpcheck["port"]; ok { - httpHealthCheck.Port = int64(val.(int)) - } - if val, ok := httpcheck["proxy_header"]; ok { - httpHealthCheck.ProxyHeader = val.(string) - } - if val, ok := httpcheck["request_path"]; ok { - httpHealthCheck.RequestPath = val.(string) - } - hchk.HttpHealthCheck = httpHealthCheck - } - - if v, ok := d.GetOk("https_health_check"); ok { - hchk.Type = "HTTPS" - httpscheck := v.([]interface{})[0].(map[string]interface{}) - httpsHealthCheck := &compute.HTTPSHealthCheck{} - if val, ok := httpscheck["host"]; ok { - httpsHealthCheck.Host = val.(string) - } - if val, ok := httpscheck["port"]; ok { - httpsHealthCheck.Port = int64(val.(int)) - } - if val, ok := httpscheck["proxy_header"]; ok { - httpsHealthCheck.ProxyHeader = val.(string) - } - if val, ok := httpscheck["request_path"]; ok { - httpsHealthCheck.RequestPath = val.(string) - } - hchk.HttpsHealthCheck = httpsHealthCheck - } - - log.Printf("[DEBUG] HealthCheck insert request: %#v", hchk) - op, err := config.clientCompute.HealthChecks.Insert( - project, hchk).Do() - if err != nil { - return fmt.Errorf("Error creating HealthCheck: %s", err) - } - - // It probably maybe worked, so store the ID now - d.SetId(hchk.Name) - - err = computeOperationWaitGlobal(config, op, project, "Creating Health Check") - if err != nil { - return err - } - - return resourceComputeHealthCheckRead(d, meta) -} - -func resourceComputeHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Build the parameter - hchk := &compute.HealthCheck{ - Name: d.Get("name").(string), - } - // Optional things - if v, ok := d.GetOk("description"); ok { - hchk.Description = v.(string) - } - if v, ok := d.GetOk("check_interval_sec"); ok { - hchk.CheckIntervalSec = int64(v.(int)) - } - if v, ok := d.GetOk("healthy_threshold"); ok { - hchk.HealthyThreshold = int64(v.(int)) - } - if v, ok := d.GetOk("timeout_sec"); ok { - hchk.TimeoutSec = int64(v.(int)) - } - if v, ok := d.GetOk("unhealthy_threshold"); ok { - hchk.UnhealthyThreshold = int64(v.(int)) - } - if v, ok := d.GetOk("tcp_health_check"); ok { - hchk.Type = "TCP" - tcpcheck := v.([]interface{})[0].(map[string]interface{}) - tcpHealthCheck := &compute.TCPHealthCheck{} - if val, ok := tcpcheck["port"]; ok { - tcpHealthCheck.Port = int64(val.(int)) - } - if val, ok := tcpcheck["proxy_header"]; ok { - tcpHealthCheck.ProxyHeader = val.(string) - } - if val, ok := tcpcheck["request"]; ok { - tcpHealthCheck.Request = val.(string) - } - if val, ok := tcpcheck["response"]; ok { - tcpHealthCheck.Response = val.(string) - } - hchk.TcpHealthCheck = tcpHealthCheck - } - if v, ok := d.GetOk("ssl_health_check"); ok { - hchk.Type = "SSL" - sslcheck := v.([]interface{})[0].(map[string]interface{}) - sslHealthCheck := &compute.SSLHealthCheck{} - if val, ok := sslcheck["port"]; ok { - sslHealthCheck.Port = int64(val.(int)) - } - if val, ok := sslcheck["proxy_header"]; ok { - sslHealthCheck.ProxyHeader = val.(string) - } - if val, ok := sslcheck["request"]; ok { - sslHealthCheck.Request = val.(string) - } - if val, ok := sslcheck["response"]; ok { - sslHealthCheck.Response = val.(string) - } - hchk.SslHealthCheck = sslHealthCheck - } - if v, ok := d.GetOk("http_health_check"); ok { - hchk.Type = "HTTP" - httpcheck := v.([]interface{})[0].(map[string]interface{}) - httpHealthCheck := &compute.HTTPHealthCheck{} - if val, ok := httpcheck["host"]; ok { - httpHealthCheck.Host = val.(string) - } - if val, ok := httpcheck["port"]; ok { - httpHealthCheck.Port = int64(val.(int)) - } - if val, ok := httpcheck["proxy_header"]; ok { - httpHealthCheck.ProxyHeader = val.(string) - } - if val, ok := httpcheck["request_path"]; ok { - httpHealthCheck.RequestPath = val.(string) - } - hchk.HttpHealthCheck = httpHealthCheck - } - - if v, ok := d.GetOk("https_health_check"); ok { - hchk.Type = "HTTPS" - httpscheck := v.([]interface{})[0].(map[string]interface{}) - httpsHealthCheck := &compute.HTTPSHealthCheck{} - if val, ok := httpscheck["host"]; ok { - httpsHealthCheck.Host = val.(string) - } - if val, ok := httpscheck["port"]; ok { - httpsHealthCheck.Port = int64(val.(int)) - } - if val, ok := httpscheck["proxy_header"]; ok { - httpsHealthCheck.ProxyHeader = val.(string) - } - if val, ok := httpscheck["request_path"]; ok { - httpsHealthCheck.RequestPath = val.(string) - } - hchk.HttpsHealthCheck = httpsHealthCheck - } - - log.Printf("[DEBUG] HealthCheck patch request: %#v", hchk) - op, err := config.clientCompute.HealthChecks.Patch( - project, hchk.Name, hchk).Do() - if err != nil { - return fmt.Errorf("Error patching HealthCheck: %s", err) - } - - // It probably maybe worked, so store the ID now - d.SetId(hchk.Name) - - err = computeOperationWaitGlobal(config, op, project, "Updating Health Check") - if err != nil { - return err - } - - return resourceComputeHealthCheckRead(d, meta) -} - -func resourceComputeHealthCheckRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - hchk, err := config.clientCompute.HealthChecks.Get( - project, d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Health Check %q", d.Get("name").(string))) - } - - d.Set("check_interval_sec", hchk.CheckIntervalSec) - d.Set("healthy_threshold", hchk.HealthyThreshold) - d.Set("timeout_sec", hchk.TimeoutSec) - d.Set("unhealthy_threshold", hchk.UnhealthyThreshold) - d.Set("tcp_health_check", hchk.TcpHealthCheck) - d.Set("ssl_health_check", hchk.SslHealthCheck) - d.Set("http_health_check", hchk.HttpHealthCheck) - d.Set("https_health_check", hchk.HttpsHealthCheck) - d.Set("self_link", hchk.SelfLink) - d.Set("name", hchk.Name) - d.Set("description", hchk.Description) - d.Set("project", project) - - return nil -} - -func resourceComputeHealthCheckDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Delete the HealthCheck - op, err := config.clientCompute.HealthChecks.Delete( - project, d.Id()).Do() - if err != nil { - return fmt.Errorf("Error deleting HealthCheck: %s", err) - } - - err = computeOperationWaitGlobal(config, op, project, "Deleting Health Check") - if err != nil { - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/google/resource_compute_health_check_test.go b/builtin/providers/google/resource_compute_health_check_test.go deleted file mode 100644 index bde1d731a..000000000 --- a/builtin/providers/google/resource_compute_health_check_test.go +++ /dev/null @@ -1,332 +0,0 @@ -package google - -import ( - "fmt" - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/compute/v1" -) - -func TestAccComputeHealthCheck_tcp(t *testing.T) { - var healthCheck compute.HealthCheck - - hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeHealthCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeHealthCheck_tcp(hckName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeHealthCheckExists( - "google_compute_health_check.foobar", &healthCheck), - testAccCheckComputeHealthCheckThresholds( - 3, 3, &healthCheck), - testAccCheckComputeHealthCheckTcpPort(80, &healthCheck), - ), - }, - }, - }) -} - -func TestAccComputeHealthCheck_tcp_update(t *testing.T) { - var healthCheck compute.HealthCheck - - hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeHealthCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeHealthCheck_tcp(hckName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeHealthCheckExists( - "google_compute_health_check.foobar", &healthCheck), - testAccCheckComputeHealthCheckThresholds( - 3, 3, &healthCheck), - testAccCheckComputeHealthCheckTcpPort(80, &healthCheck), - ), - }, - resource.TestStep{ - Config: testAccComputeHealthCheck_tcp_update(hckName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeHealthCheckExists( - "google_compute_health_check.foobar", &healthCheck), - testAccCheckComputeHealthCheckThresholds( - 10, 10, &healthCheck), - testAccCheckComputeHealthCheckTcpPort(8080, &healthCheck), - ), - }, - }, - }) -} - -func TestAccComputeHealthCheck_ssl(t *testing.T) { - var healthCheck compute.HealthCheck - - hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeHealthCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeHealthCheck_ssl(hckName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeHealthCheckExists( - "google_compute_health_check.foobar", &healthCheck), - testAccCheckComputeHealthCheckThresholds( - 3, 3, &healthCheck), - ), - }, - }, - }) -} - -func TestAccComputeHealthCheck_http(t *testing.T) { - var healthCheck compute.HealthCheck - - hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeHealthCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeHealthCheck_http(hckName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeHealthCheckExists( - "google_compute_health_check.foobar", &healthCheck), - testAccCheckComputeHealthCheckThresholds( - 3, 3, &healthCheck), - ), - }, - }, - }) -} - -func TestAccComputeHealthCheck_https(t *testing.T) { - var healthCheck compute.HealthCheck - - hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeHealthCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeHealthCheck_https(hckName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeHealthCheckExists( - "google_compute_health_check.foobar", &healthCheck), - testAccCheckComputeHealthCheckThresholds( - 3, 3, &healthCheck), - ), - }, - }, - }) -} - -func TestAccComputeHealthCheck_tcpAndSsl_shouldFail(t *testing.T) { - hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeHealthCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeHealthCheck_tcpAndSsl_shouldFail(hckName), - ExpectError: regexp.MustCompile("conflicts with tcp_health_check"), - }, - }, - }) -} - -func testAccCheckComputeHealthCheckDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_health_check" { - continue - } - - _, err := config.clientCompute.HealthChecks.Get( - config.Project, rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("HealthCheck %s still exists", rs.Primary.ID) - } - } - - return nil -} - -func testAccCheckComputeHealthCheckExists(n string, healthCheck *compute.HealthCheck) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.HealthChecks.Get( - config.Project, rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("HealthCheck not found") - } - - *healthCheck = *found - - return nil - } -} - -func testAccCheckErrorCreating(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, ok := s.RootModule().Resources[n] - if ok { - return fmt.Errorf("HealthCheck %s created successfully with bad config", n) - } - return nil - } -} - -func testAccCheckComputeHealthCheckThresholds(healthy, unhealthy int64, healthCheck *compute.HealthCheck) resource.TestCheckFunc { - return func(s *terraform.State) error { - if healthCheck.HealthyThreshold != healthy { - return fmt.Errorf("HealthyThreshold doesn't match: expected %d, got %d", healthy, healthCheck.HealthyThreshold) - } - - if healthCheck.UnhealthyThreshold != unhealthy { - return fmt.Errorf("UnhealthyThreshold doesn't match: expected %d, got %d", unhealthy, healthCheck.UnhealthyThreshold) - } - - return nil - } -} - -func testAccCheckComputeHealthCheckTcpPort(port int64, healthCheck *compute.HealthCheck) resource.TestCheckFunc { - return func(s *terraform.State) error { - if healthCheck.TcpHealthCheck.Port != port { - return fmt.Errorf("Port doesn't match: expected %v, got %v", port, healthCheck.TcpHealthCheck.Port) - } - return nil - } -} - -func testAccComputeHealthCheck_tcp(hckName string) string { - return fmt.Sprintf(` -resource "google_compute_health_check" "foobar" { - check_interval_sec = 3 - description = "Resource created for Terraform acceptance testing" - healthy_threshold = 3 - name = "health-test-%s" - timeout_sec = 2 - unhealthy_threshold = 3 - tcp_health_check { - } -} -`, hckName) -} - -func testAccComputeHealthCheck_tcp_update(hckName string) string { - return fmt.Sprintf(` -resource "google_compute_health_check" "foobar" { - check_interval_sec = 3 - description = "Resource updated for Terraform acceptance testing" - healthy_threshold = 10 - name = "health-test-%s" - timeout_sec = 2 - unhealthy_threshold = 10 - tcp_health_check { - port = "8080" - } -} -`, hckName) -} - -func testAccComputeHealthCheck_ssl(hckName string) string { - return fmt.Sprintf(` -resource "google_compute_health_check" "foobar" { - check_interval_sec = 3 - description = "Resource created for Terraform acceptance testing" - healthy_threshold = 3 - name = "health-test-%s" - timeout_sec = 2 - unhealthy_threshold = 3 - ssl_health_check { - port = "443" - } -} -`, hckName) -} - -func testAccComputeHealthCheck_http(hckName string) string { - return fmt.Sprintf(` -resource "google_compute_health_check" "foobar" { - check_interval_sec = 3 - description = "Resource created for Terraform acceptance testing" - healthy_threshold = 3 - name = "health-test-%s" - timeout_sec = 2 - unhealthy_threshold = 3 - http_health_check { - port = "80" - } -} -`, hckName) -} - -func testAccComputeHealthCheck_https(hckName string) string { - return fmt.Sprintf(` -resource "google_compute_health_check" "foobar" { - check_interval_sec = 3 - description = "Resource created for Terraform acceptance testing" - healthy_threshold = 3 - name = "health-test-%s" - timeout_sec = 2 - unhealthy_threshold = 3 - https_health_check { - port = "443" - } -} -`, hckName) -} - -func testAccComputeHealthCheck_tcpAndSsl_shouldFail(hckName string) string { - return fmt.Sprintf(` -resource "google_compute_health_check" "foobar" { - check_interval_sec = 3 - description = "Resource created for Terraform acceptance testing" - healthy_threshold = 3 - name = "health-test-%s" - timeout_sec = 2 - unhealthy_threshold = 3 - - tcp_health_check { - } - ssl_health_check { - } -} -`, hckName) -} diff --git a/builtin/providers/google/resource_compute_http_health_check.go b/builtin/providers/google/resource_compute_http_health_check.go deleted file mode 100644 index e3e8235a4..000000000 --- a/builtin/providers/google/resource_compute_http_health_check.go +++ /dev/null @@ -1,252 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" -) - -func resourceComputeHttpHealthCheck() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeHttpHealthCheckCreate, - Read: resourceComputeHttpHealthCheckRead, - Delete: resourceComputeHttpHealthCheckDelete, - Update: resourceComputeHttpHealthCheckUpdate, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "check_interval_sec": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 5, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "healthy_threshold": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 2, - }, - - "host": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "port": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 80, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "request_path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "/", - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "timeout_sec": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 5, - }, - - "unhealthy_threshold": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 2, - }, - }, - } -} - -func resourceComputeHttpHealthCheckCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Build the parameter - hchk := &compute.HttpHealthCheck{ - Name: d.Get("name").(string), - } - // Optional things - if v, ok := d.GetOk("description"); ok { - hchk.Description = v.(string) - } - if v, ok := d.GetOk("host"); ok { - hchk.Host = v.(string) - } - if v, ok := d.GetOk("request_path"); ok { - hchk.RequestPath = v.(string) - } - if v, ok := d.GetOk("check_interval_sec"); ok { - hchk.CheckIntervalSec = int64(v.(int)) - } - if v, ok := d.GetOk("healthy_threshold"); ok { - hchk.HealthyThreshold = int64(v.(int)) - } - if v, ok := d.GetOk("port"); ok { - hchk.Port = int64(v.(int)) - } - if v, ok := d.GetOk("timeout_sec"); ok { - hchk.TimeoutSec = int64(v.(int)) - } - if v, ok := d.GetOk("unhealthy_threshold"); ok { - hchk.UnhealthyThreshold = int64(v.(int)) - } - - log.Printf("[DEBUG] HttpHealthCheck insert request: %#v", hchk) - op, err := config.clientCompute.HttpHealthChecks.Insert( - project, hchk).Do() - if err != nil { - return fmt.Errorf("Error creating HttpHealthCheck: %s", err) - } - - // It probably maybe worked, so store the ID now - d.SetId(hchk.Name) - - err = computeOperationWaitGlobal(config, op, project, "Creating Http Health Check") - if err != nil { - return err - } - - return resourceComputeHttpHealthCheckRead(d, meta) -} - -func resourceComputeHttpHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Build the parameter - hchk := &compute.HttpHealthCheck{ - Name: d.Get("name").(string), - } - // Optional things - if v, ok := d.GetOk("description"); ok { - hchk.Description = v.(string) - } - if v, ok := d.GetOk("host"); ok { - hchk.Host = v.(string) - } - if v, ok := d.GetOk("request_path"); ok { - hchk.RequestPath = v.(string) - } - if v, ok := d.GetOk("check_interval_sec"); ok { - hchk.CheckIntervalSec = int64(v.(int)) - } - if v, ok := d.GetOk("healthy_threshold"); ok { - hchk.HealthyThreshold = int64(v.(int)) - } - if v, ok := d.GetOk("port"); ok { - hchk.Port = int64(v.(int)) - } - if v, ok := d.GetOk("timeout_sec"); ok { - hchk.TimeoutSec = int64(v.(int)) - } - if v, ok := d.GetOk("unhealthy_threshold"); ok { - hchk.UnhealthyThreshold = int64(v.(int)) - } - - log.Printf("[DEBUG] HttpHealthCheck patch request: %#v", hchk) - op, err := config.clientCompute.HttpHealthChecks.Patch( - project, hchk.Name, hchk).Do() - if err != nil { - return fmt.Errorf("Error patching HttpHealthCheck: %s", err) - } - - // It probably maybe worked, so store the ID now - d.SetId(hchk.Name) - - err = computeOperationWaitGlobal(config, op, project, "Updating Http Health Check") - if err != nil { - return err - } - - return resourceComputeHttpHealthCheckRead(d, meta) -} - -func resourceComputeHttpHealthCheckRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - hchk, err := config.clientCompute.HttpHealthChecks.Get( - project, d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("HTTP Health Check %q", d.Get("name").(string))) - } - - d.Set("host", hchk.Host) - d.Set("request_path", hchk.RequestPath) - d.Set("check_interval_sec", hchk.CheckIntervalSec) - d.Set("healthy_threshold", hchk.HealthyThreshold) - d.Set("port", hchk.Port) - d.Set("timeout_sec", hchk.TimeoutSec) - d.Set("unhealthy_threshold", hchk.UnhealthyThreshold) - d.Set("self_link", hchk.SelfLink) - d.Set("name", hchk.Name) - d.Set("description", hchk.Description) - d.Set("project", project) - - return nil -} - -func resourceComputeHttpHealthCheckDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Delete the HttpHealthCheck - op, err := config.clientCompute.HttpHealthChecks.Delete( - project, d.Id()).Do() - if err != nil { - return fmt.Errorf("Error deleting HttpHealthCheck: %s", err) - } - - err = computeOperationWaitGlobal(config, op, project, "Deleting Http Health Check") - if err != nil { - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/google/resource_compute_http_health_check_test.go b/builtin/providers/google/resource_compute_http_health_check_test.go deleted file mode 100644 index efc9911de..000000000 --- a/builtin/providers/google/resource_compute_http_health_check_test.go +++ /dev/null @@ -1,180 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/compute/v1" -) - -func TestAccComputeHttpHealthCheck_basic(t *testing.T) { - var healthCheck compute.HttpHealthCheck - - hhckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeHttpHealthCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeHttpHealthCheck_basic(hhckName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeHttpHealthCheckExists( - "google_compute_http_health_check.foobar", &healthCheck), - testAccCheckComputeHttpHealthCheckRequestPath( - "/health_check", &healthCheck), - testAccCheckComputeHttpHealthCheckThresholds( - 3, 3, &healthCheck), - ), - }, - }, - }) -} - -func TestAccComputeHttpHealthCheck_update(t *testing.T) { - var healthCheck compute.HttpHealthCheck - - hhckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeHttpHealthCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeHttpHealthCheck_update1(hhckName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeHttpHealthCheckExists( - "google_compute_http_health_check.foobar", &healthCheck), - testAccCheckComputeHttpHealthCheckRequestPath( - "/not_default", &healthCheck), - testAccCheckComputeHttpHealthCheckThresholds( - 2, 2, &healthCheck), - ), - }, - resource.TestStep{ - Config: testAccComputeHttpHealthCheck_update2(hhckName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeHttpHealthCheckExists( - "google_compute_http_health_check.foobar", &healthCheck), - testAccCheckComputeHttpHealthCheckRequestPath( - "/", &healthCheck), - testAccCheckComputeHttpHealthCheckThresholds( - 10, 10, &healthCheck), - ), - }, - }, - }) -} - -func testAccCheckComputeHttpHealthCheckDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_http_health_check" { - continue - } - - _, err := config.clientCompute.HttpHealthChecks.Get( - config.Project, rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("HttpHealthCheck still exists") - } - } - - return nil -} - -func testAccCheckComputeHttpHealthCheckExists(n string, healthCheck *compute.HttpHealthCheck) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.HttpHealthChecks.Get( - config.Project, rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("HttpHealthCheck not found") - } - - *healthCheck = *found - - return nil - } -} - -func testAccCheckComputeHttpHealthCheckRequestPath(path string, healthCheck *compute.HttpHealthCheck) resource.TestCheckFunc { - return func(s *terraform.State) error { - if healthCheck.RequestPath != path { - return fmt.Errorf("RequestPath doesn't match: expected %s, got %s", path, healthCheck.RequestPath) - } - - return nil - } -} - -func testAccCheckComputeHttpHealthCheckThresholds(healthy, unhealthy int64, healthCheck *compute.HttpHealthCheck) resource.TestCheckFunc { - return func(s *terraform.State) error { - if healthCheck.HealthyThreshold != healthy { - return fmt.Errorf("HealthyThreshold doesn't match: expected %d, got %d", healthy, healthCheck.HealthyThreshold) - } - - if healthCheck.UnhealthyThreshold != unhealthy { - return fmt.Errorf("UnhealthyThreshold doesn't match: expected %d, got %d", unhealthy, healthCheck.UnhealthyThreshold) - } - - return nil - } -} - -func testAccComputeHttpHealthCheck_basic(hhckName string) string { - return fmt.Sprintf(` -resource "google_compute_http_health_check" "foobar" { - name = "%s" - check_interval_sec = 3 - description = "Resource created for Terraform acceptance testing" - healthy_threshold = 3 - host = "foobar" - port = "80" - request_path = "/health_check" - timeout_sec = 2 - unhealthy_threshold = 3 -} -`, hhckName) -} - -func testAccComputeHttpHealthCheck_update1(hhckName string) string { - return fmt.Sprintf(` -resource "google_compute_http_health_check" "foobar" { - name = "%s" - description = "Resource created for Terraform acceptance testing" - request_path = "/not_default" -} -`, hhckName) -} - -func testAccComputeHttpHealthCheck_update2(hhckName string) string { - return fmt.Sprintf(` -resource "google_compute_http_health_check" "foobar" { - name = "%s" - description = "Resource updated for Terraform acceptance testing" - healthy_threshold = 10 - unhealthy_threshold = 10 -} -`, hhckName) -} diff --git a/builtin/providers/google/resource_compute_https_health_check.go b/builtin/providers/google/resource_compute_https_health_check.go deleted file mode 100644 index 769606268..000000000 --- a/builtin/providers/google/resource_compute_https_health_check.go +++ /dev/null @@ -1,245 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" -) - -func resourceComputeHttpsHealthCheck() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeHttpsHealthCheckCreate, - Read: resourceComputeHttpsHealthCheckRead, - Delete: resourceComputeHttpsHealthCheckDelete, - Update: resourceComputeHttpsHealthCheckUpdate, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "check_interval_sec": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 5, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "healthy_threshold": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 2, - }, - - "host": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "port": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 443, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "request_path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "/", - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "timeout_sec": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 5, - }, - - "unhealthy_threshold": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 2, - }, - }, - } -} - -func resourceComputeHttpsHealthCheckCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Build the parameter - hchk := &compute.HttpsHealthCheck{ - Name: d.Get("name").(string), - } - // Optional things - if v, ok := d.GetOk("description"); ok { - hchk.Description = v.(string) - } - if v, ok := d.GetOk("host"); ok { - hchk.Host = v.(string) - } - if v, ok := d.GetOk("request_path"); ok { - hchk.RequestPath = v.(string) - } - if v, ok := d.GetOk("check_interval_sec"); ok { - hchk.CheckIntervalSec = int64(v.(int)) - } - if v, ok := d.GetOk("healthy_threshold"); ok { - hchk.HealthyThreshold = int64(v.(int)) - } - if v, ok := d.GetOk("port"); ok { - hchk.Port = int64(v.(int)) - } - if v, ok := d.GetOk("timeout_sec"); ok { - hchk.TimeoutSec = int64(v.(int)) - } - if v, ok := d.GetOk("unhealthy_threshold"); ok { - hchk.UnhealthyThreshold = int64(v.(int)) - } - - log.Printf("[DEBUG] HttpsHealthCheck insert request: %#v", hchk) - op, err := config.clientCompute.HttpsHealthChecks.Insert( - project, hchk).Do() - if err != nil { - return fmt.Errorf("Error creating HttpsHealthCheck: %s", err) - } - - // It probably maybe worked, so store the ID now - d.SetId(hchk.Name) - - err = computeOperationWaitGlobal(config, op, project, "Creating Https Health Check") - if err != nil { - return err - } - - return resourceComputeHttpsHealthCheckRead(d, meta) -} - -func resourceComputeHttpsHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Build the parameter - hchk := &compute.HttpsHealthCheck{ - Name: d.Get("name").(string), - } - // Optional things - if v, ok := d.GetOk("description"); ok { - hchk.Description = v.(string) - } - if v, ok := d.GetOk("host"); ok { - hchk.Host = v.(string) - } - if v, ok := d.GetOk("request_path"); ok { - hchk.RequestPath = v.(string) - } - if v, ok := d.GetOk("check_interval_sec"); ok { - hchk.CheckIntervalSec = int64(v.(int)) - } - if v, ok := d.GetOk("healthy_threshold"); ok { - hchk.HealthyThreshold = int64(v.(int)) - } - if v, ok := d.GetOk("port"); ok { - hchk.Port = int64(v.(int)) - } - if v, ok := d.GetOk("timeout_sec"); ok { - hchk.TimeoutSec = int64(v.(int)) - } - if v, ok := d.GetOk("unhealthy_threshold"); ok { - hchk.UnhealthyThreshold = int64(v.(int)) - } - - log.Printf("[DEBUG] HttpsHealthCheck patch request: %#v", hchk) - op, err := config.clientCompute.HttpsHealthChecks.Patch( - project, hchk.Name, hchk).Do() - if err != nil { - return fmt.Errorf("Error patching HttpsHealthCheck: %s", err) - } - - // It probably maybe worked, so store the ID now - d.SetId(hchk.Name) - - err = computeOperationWaitGlobal(config, op, project, "Updating Https Health Check") - if err != nil { - return err - } - - return resourceComputeHttpsHealthCheckRead(d, meta) -} - -func resourceComputeHttpsHealthCheckRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - hchk, err := config.clientCompute.HttpsHealthChecks.Get( - project, d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("HTTPS Health Check %q", d.Get("name").(string))) - } - - d.Set("host", hchk.Host) - d.Set("request_path", hchk.RequestPath) - d.Set("check_interval_sec", hchk.CheckIntervalSec) - d.Set("health_threshold", hchk.HealthyThreshold) - d.Set("port", hchk.Port) - d.Set("timeout_sec", hchk.TimeoutSec) - d.Set("unhealthy_threshold", hchk.UnhealthyThreshold) - d.Set("self_link", hchk.SelfLink) - - return nil -} - -func resourceComputeHttpsHealthCheckDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Delete the HttpsHealthCheck - op, err := config.clientCompute.HttpsHealthChecks.Delete( - project, d.Id()).Do() - if err != nil { - return fmt.Errorf("Error deleting HttpsHealthCheck: %s", err) - } - - err = computeOperationWaitGlobal(config, op, project, "Deleting Https Health Check") - if err != nil { - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/google/resource_compute_https_health_check_test.go b/builtin/providers/google/resource_compute_https_health_check_test.go deleted file mode 100644 index 98a5083dd..000000000 --- a/builtin/providers/google/resource_compute_https_health_check_test.go +++ /dev/null @@ -1,180 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/compute/v1" -) - -func TestAccComputeHttpsHealthCheck_basic(t *testing.T) { - var healthCheck compute.HttpsHealthCheck - - hhckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeHttpsHealthCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeHttpsHealthCheck_basic(hhckName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeHttpsHealthCheckExists( - "google_compute_https_health_check.foobar", &healthCheck), - testAccCheckComputeHttpsHealthCheckRequestPath( - "/health_check", &healthCheck), - testAccCheckComputeHttpsHealthCheckThresholds( - 3, 3, &healthCheck), - ), - }, - }, - }) -} - -func TestAccComputeHttpsHealthCheck_update(t *testing.T) { - var healthCheck compute.HttpsHealthCheck - - hhckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeHttpsHealthCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeHttpsHealthCheck_update1(hhckName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeHttpsHealthCheckExists( - "google_compute_https_health_check.foobar", &healthCheck), - testAccCheckComputeHttpsHealthCheckRequestPath( - "/not_default", &healthCheck), - testAccCheckComputeHttpsHealthCheckThresholds( - 2, 2, &healthCheck), - ), - }, - resource.TestStep{ - Config: testAccComputeHttpsHealthCheck_update2(hhckName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeHttpsHealthCheckExists( - "google_compute_https_health_check.foobar", &healthCheck), - testAccCheckComputeHttpsHealthCheckRequestPath( - "/", &healthCheck), - testAccCheckComputeHttpsHealthCheckThresholds( - 10, 10, &healthCheck), - ), - }, - }, - }) -} - -func testAccCheckComputeHttpsHealthCheckDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_https_health_check" { - continue - } - - _, err := config.clientCompute.HttpsHealthChecks.Get( - config.Project, rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("HttpsHealthCheck still exists") - } - } - - return nil -} - -func testAccCheckComputeHttpsHealthCheckExists(n string, healthCheck *compute.HttpsHealthCheck) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.HttpsHealthChecks.Get( - config.Project, rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("HttpsHealthCheck not found") - } - - *healthCheck = *found - - return nil - } -} - -func testAccCheckComputeHttpsHealthCheckRequestPath(path string, healthCheck *compute.HttpsHealthCheck) resource.TestCheckFunc { - return func(s *terraform.State) error { - if healthCheck.RequestPath != path { - return fmt.Errorf("RequestPath doesn't match: expected %s, got %s", path, healthCheck.RequestPath) - } - - return nil - } -} - -func testAccCheckComputeHttpsHealthCheckThresholds(healthy, unhealthy int64, healthCheck *compute.HttpsHealthCheck) resource.TestCheckFunc { - return func(s *terraform.State) error { - if healthCheck.HealthyThreshold != healthy { - return fmt.Errorf("HealthyThreshold doesn't match: expected %d, got %d", healthy, healthCheck.HealthyThreshold) - } - - if healthCheck.UnhealthyThreshold != unhealthy { - return fmt.Errorf("UnhealthyThreshold doesn't match: expected %d, got %d", unhealthy, healthCheck.UnhealthyThreshold) - } - - return nil - } -} - -func testAccComputeHttpsHealthCheck_basic(hhckName string) string { - return fmt.Sprintf(` -resource "google_compute_https_health_check" "foobar" { - check_interval_sec = 3 - description = "Resource created for Terraform acceptance testing" - healthy_threshold = 3 - host = "foobar" - name = "%s" - port = "80" - request_path = "/health_check" - timeout_sec = 2 - unhealthy_threshold = 3 -} -`, hhckName) -} - -func testAccComputeHttpsHealthCheck_update1(hhckName string) string { - return fmt.Sprintf(` -resource "google_compute_https_health_check" "foobar" { - name = "%s" - description = "Resource created for Terraform acceptance testing" - request_path = "/not_default" -} -`, hhckName) -} - -func testAccComputeHttpsHealthCheck_update2(hhckName string) string { - return fmt.Sprintf(` -resource "google_compute_https_health_check" "foobar" { - name = "%s" - description = "Resource updated for Terraform acceptance testing" - healthy_threshold = 10 - unhealthy_threshold = 10 -} -`, hhckName) -} diff --git a/builtin/providers/google/resource_compute_image.go b/builtin/providers/google/resource_compute_image.go deleted file mode 100644 index 9e5b14197..000000000 --- a/builtin/providers/google/resource_compute_image.go +++ /dev/null @@ -1,197 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" -) - -func resourceComputeImage() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeImageCreate, - Read: resourceComputeImageRead, - Delete: resourceComputeImageDelete, - - Schema: map[string]*schema.Schema{ - // TODO(cblecker): one of source_disk or raw_disk is required - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "family": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "source_disk": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "raw_disk": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "source": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "sha1": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "container_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "TAR", - ForceNew: true, - }, - }, - }, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "create_timeout": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 4, - ForceNew: true, - }, - }, - } -} - -func resourceComputeImageCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Build the image - image := &compute.Image{ - Name: d.Get("name").(string), - } - - if v, ok := d.GetOk("description"); ok { - image.Description = v.(string) - } - - if v, ok := d.GetOk("family"); ok { - image.Family = v.(string) - } - - // Load up the source_disk for this image if specified - if v, ok := d.GetOk("source_disk"); ok { - image.SourceDisk = v.(string) - } - - // Load up the raw_disk for this image if specified - if v, ok := d.GetOk("raw_disk"); ok { - rawDiskEle := v.([]interface{})[0].(map[string]interface{}) - imageRawDisk := &compute.ImageRawDisk{ - Source: rawDiskEle["source"].(string), - ContainerType: rawDiskEle["container_type"].(string), - } - if val, ok := rawDiskEle["sha1"]; ok { - imageRawDisk.Sha1Checksum = val.(string) - } - - image.RawDisk = imageRawDisk - } - - // Read create timeout - var createTimeout int - if v, ok := d.GetOk("create_timeout"); ok { - createTimeout = v.(int) - } - - // Insert the image - op, err := config.clientCompute.Images.Insert( - project, image).Do() - if err != nil { - return fmt.Errorf("Error creating image: %s", err) - } - - // Store the ID - d.SetId(image.Name) - - err = computeOperationWaitGlobalTime(config, op, project, "Creating Image", createTimeout) - if err != nil { - return err - } - - return resourceComputeImageRead(d, meta) -} - -func resourceComputeImageRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - image, err := config.clientCompute.Images.Get( - project, d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Image %q", d.Get("name").(string))) - } - - d.Set("self_link", image.SelfLink) - - return nil -} - -func resourceComputeImageDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Delete the image - log.Printf("[DEBUG] image delete request") - op, err := config.clientCompute.Images.Delete( - project, d.Id()).Do() - if err != nil { - return fmt.Errorf("Error deleting image: %s", err) - } - - err = computeOperationWaitGlobal(config, op, project, "Deleting image") - if err != nil { - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/google/resource_compute_image_test.go b/builtin/providers/google/resource_compute_image_test.go deleted file mode 100644 index 25ffd144b..000000000 --- a/builtin/providers/google/resource_compute_image_test.go +++ /dev/null @@ -1,116 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/compute/v1" -) - -func TestAccComputeImage_basic(t *testing.T) { - var image compute.Image - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeImageDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeImage_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeImageExists( - "google_compute_image.foobar", &image), - ), - }, - }, - }) -} - -func TestAccComputeImage_basedondisk(t *testing.T) { - var image compute.Image - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeImageDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeImage_basedondisk, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeImageExists( - "google_compute_image.foobar", &image), - ), - }, - }, - }) -} - -func testAccCheckComputeImageDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_image" { - continue - } - - _, err := config.clientCompute.Images.Get( - config.Project, rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("Image still exists") - } - } - - return nil -} - -func testAccCheckComputeImageExists(n string, image *compute.Image) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.Images.Get( - config.Project, rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("Image not found") - } - - *image = *found - - return nil - } -} - -var testAccComputeImage_basic = fmt.Sprintf(` -resource "google_compute_image" "foobar" { - name = "image-test-%s" - raw_disk { - source = "https://storage.googleapis.com/bosh-cpi-artifacts/bosh-stemcell-3262.4-google-kvm-ubuntu-trusty-go_agent-raw.tar.gz" - } - create_timeout = 5 -}`, acctest.RandString(10)) - -var testAccComputeImage_basedondisk = fmt.Sprintf(` -resource "google_compute_disk" "foobar" { - name = "disk-test-%s" - zone = "us-central1-a" - image = "debian-8-jessie-v20160803" -} -resource "google_compute_image" "foobar" { - name = "image-test-%s" - source_disk = "${google_compute_disk.foobar.self_link}" -}`, acctest.RandString(10), acctest.RandString(10)) diff --git a/builtin/providers/google/resource_compute_instance.go b/builtin/providers/google/resource_compute_instance.go deleted file mode 100644 index 8b647255c..000000000 --- a/builtin/providers/google/resource_compute_instance.go +++ /dev/null @@ -1,1144 +0,0 @@ -package google - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" -) - -func stringScopeHashcode(v interface{}) int { - v = canonicalizeServiceScope(v.(string)) - return schema.HashString(v) -} - -func resourceComputeInstance() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeInstanceCreate, - Read: resourceComputeInstanceRead, - Update: resourceComputeInstanceUpdate, - Delete: resourceComputeInstanceDelete, - - SchemaVersion: 2, - MigrateState: resourceComputeInstanceMigrateState, - - Schema: map[string]*schema.Schema{ - "disk": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // TODO(mitchellh): one of image or disk is required - - "disk": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "image": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "scratch": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - - "auto_delete": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - ForceNew: true, - }, - - "size": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - - "device_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "disk_encryption_key_raw": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Sensitive: true, - }, - - "disk_encryption_key_sha256": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - - // Preferred way of adding persistent disks to an instance. - // Use this instead of `disk` when possible. - "attached_disk": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, // TODO(danawillow): Remove this, support attaching/detaching - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "source": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "device_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "disk_encryption_key_raw": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Sensitive: true, - ForceNew: true, - }, - - "disk_encryption_key_sha256": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - - "machine_type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "can_ip_forward": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "metadata": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Elem: schema.TypeString, - }, - - "metadata_startup_script": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "metadata_fingerprint": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "network_interface": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "network": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "subnetwork": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "subnetwork_project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "access_config": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "nat_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "assigned_nat_ip": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - - "network": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Deprecated: "Please use network_interface", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "source": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "internal_address": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "external_address": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "scheduling": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "on_host_maintenance": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "automatic_restart": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - - "preemptible": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - }, - }, - }, - - "service_account": &schema.Schema{ - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "email": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Computed: true, - }, - - "scopes": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - StateFunc: func(v interface{}) string { - return canonicalizeServiceScope(v.(string)) - }, - }, - Set: stringScopeHashcode, - }, - }, - }, - }, - - "tags": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "tags_fingerprint": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "create_timeout": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 4, - }, - }, - } -} - -func getInstance(config *Config, d *schema.ResourceData) (*compute.Instance, error) { - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - instance, err := config.clientCompute.Instances.Get( - project, d.Get("zone").(string), d.Id()).Do() - if err != nil { - return nil, handleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string))) - } - - return instance, nil -} - -func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Get the zone - log.Printf("[DEBUG] Loading zone: %s", d.Get("zone").(string)) - zone, err := config.clientCompute.Zones.Get( - project, d.Get("zone").(string)).Do() - if err != nil { - return fmt.Errorf( - "Error loading zone '%s': %s", d.Get("zone").(string), err) - } - - // Get the machine type - log.Printf("[DEBUG] Loading machine type: %s", d.Get("machine_type").(string)) - machineType, err := config.clientCompute.MachineTypes.Get( - project, zone.Name, d.Get("machine_type").(string)).Do() - if err != nil { - return fmt.Errorf( - "Error loading machine type: %s", - err) - } - - // Build up the list of disks - disksCount := d.Get("disk.#").(int) - attachedDisksCount := d.Get("attached_disk.#").(int) - if disksCount+attachedDisksCount == 0 { - return fmt.Errorf("At least one disk or attached_disk must be set") - } - disks := make([]*compute.AttachedDisk, 0, disksCount+attachedDisksCount) - for i := 0; i < disksCount; i++ { - prefix := fmt.Sprintf("disk.%d", i) - - // var sourceLink string - - // Build the disk - var disk compute.AttachedDisk - disk.Type = "PERSISTENT" - disk.Mode = "READ_WRITE" - disk.Boot = i == 0 - disk.AutoDelete = d.Get(prefix + ".auto_delete").(bool) - - if _, ok := d.GetOk(prefix + ".disk"); ok { - if _, ok := d.GetOk(prefix + ".type"); ok { - return fmt.Errorf( - "Error: cannot define both disk and type.") - } - } - - hasSource := false - // Load up the disk for this disk if specified - if v, ok := d.GetOk(prefix + ".disk"); ok { - diskName := v.(string) - diskData, err := config.clientCompute.Disks.Get( - project, zone.Name, diskName).Do() - if err != nil { - return fmt.Errorf( - "Error loading disk '%s': %s", - diskName, err) - } - - disk.Source = diskData.SelfLink - hasSource = true - } else { - // Create a new disk - disk.InitializeParams = &compute.AttachedDiskInitializeParams{} - } - - if v, ok := d.GetOk(prefix + ".scratch"); ok { - if v.(bool) { - disk.Type = "SCRATCH" - } - } - - // Load up the image for this disk if specified - if v, ok := d.GetOk(prefix + ".image"); ok && !hasSource { - imageName := v.(string) - - imageUrl, err := resolveImage(config, imageName) - if err != nil { - return fmt.Errorf( - "Error resolving image name '%s': %s", - imageName, err) - } - - disk.InitializeParams.SourceImage = imageUrl - } else if ok && hasSource { - return fmt.Errorf("Cannot specify disk image when referencing an existing disk") - } - - if v, ok := d.GetOk(prefix + ".type"); ok && !hasSource { - diskTypeName := v.(string) - diskType, err := readDiskType(config, zone, diskTypeName) - if err != nil { - return fmt.Errorf( - "Error loading disk type '%s': %s", - diskTypeName, err) - } - - disk.InitializeParams.DiskType = diskType.SelfLink - } else if ok && hasSource { - return fmt.Errorf("Cannot specify disk type when referencing an existing disk") - } - - if v, ok := d.GetOk(prefix + ".size"); ok && !hasSource { - diskSizeGb := v.(int) - disk.InitializeParams.DiskSizeGb = int64(diskSizeGb) - } else if ok && hasSource { - return fmt.Errorf("Cannot specify disk size when referencing an existing disk") - } - - if v, ok := d.GetOk(prefix + ".device_name"); ok { - disk.DeviceName = v.(string) - } - - if v, ok := d.GetOk(prefix + ".disk_encryption_key_raw"); ok { - disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{} - disk.DiskEncryptionKey.RawKey = v.(string) - } - - disks = append(disks, &disk) - } - - for i := 0; i < attachedDisksCount; i++ { - prefix := fmt.Sprintf("attached_disk.%d", i) - disk := compute.AttachedDisk{ - Source: d.Get(prefix + ".source").(string), - AutoDelete: false, // Don't allow autodelete; let terraform handle disk deletion - } - - disk.Boot = i == 0 && disksCount == 0 // TODO(danawillow): This is super hacky, let's just add a boot field. - - if v, ok := d.GetOk(prefix + ".device_name"); ok { - disk.DeviceName = v.(string) - } - - if v, ok := d.GetOk(prefix + ".disk_encryption_key_raw"); ok { - disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{ - RawKey: v.(string), - } - } - - disks = append(disks, &disk) - } - - networksCount := d.Get("network.#").(int) - networkInterfacesCount := d.Get("network_interface.#").(int) - - if networksCount > 0 && networkInterfacesCount > 0 { - return fmt.Errorf("Error: cannot define both networks and network_interfaces.") - } - if networksCount == 0 && networkInterfacesCount == 0 { - return fmt.Errorf("Error: Must define at least one network_interface.") - } - - var networkInterfaces []*compute.NetworkInterface - - if networksCount > 0 { - // TODO: Delete this block when removing network { } - // Build up the list of networkInterfaces - networkInterfaces = make([]*compute.NetworkInterface, 0, networksCount) - for i := 0; i < networksCount; i++ { - prefix := fmt.Sprintf("network.%d", i) - // Load up the name of this network - networkName := d.Get(prefix + ".source").(string) - network, err := config.clientCompute.Networks.Get( - project, networkName).Do() - if err != nil { - return fmt.Errorf( - "Error loading network '%s': %s", - networkName, err) - } - - // Build the networkInterface - var iface compute.NetworkInterface - iface.AccessConfigs = []*compute.AccessConfig{ - &compute.AccessConfig{ - Type: "ONE_TO_ONE_NAT", - NatIP: d.Get(prefix + ".address").(string), - }, - } - iface.Network = network.SelfLink - - networkInterfaces = append(networkInterfaces, &iface) - } - } - - if networkInterfacesCount > 0 { - // Build up the list of networkInterfaces - networkInterfaces = make([]*compute.NetworkInterface, 0, networkInterfacesCount) - for i := 0; i < networkInterfacesCount; i++ { - prefix := fmt.Sprintf("network_interface.%d", i) - // Load up the name of this network_interface - networkName := d.Get(prefix + ".network").(string) - subnetworkName := d.Get(prefix + ".subnetwork").(string) - subnetworkProject := d.Get(prefix + ".subnetwork_project").(string) - address := d.Get(prefix + ".address").(string) - var networkLink, subnetworkLink string - - if networkName != "" && subnetworkName != "" { - return fmt.Errorf("Cannot specify both network and subnetwork values.") - } else if networkName != "" { - networkLink, err = getNetworkLink(d, config, prefix+".network") - if err != nil { - return fmt.Errorf( - "Error referencing network '%s': %s", - networkName, err) - } - - } else { - region := getRegionFromZone(d.Get("zone").(string)) - if subnetworkProject == "" { - subnetworkProject = project - } - subnetwork, err := config.clientCompute.Subnetworks.Get( - subnetworkProject, region, subnetworkName).Do() - if err != nil { - return fmt.Errorf( - "Error referencing subnetwork '%s' in region '%s': %s", - subnetworkName, region, err) - } - subnetworkLink = subnetwork.SelfLink - } - - // Build the networkInterface - var iface compute.NetworkInterface - iface.Network = networkLink - iface.Subnetwork = subnetworkLink - iface.NetworkIP = address - - // Handle access_config structs - accessConfigsCount := d.Get(prefix + ".access_config.#").(int) - iface.AccessConfigs = make([]*compute.AccessConfig, accessConfigsCount) - for j := 0; j < accessConfigsCount; j++ { - acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j) - iface.AccessConfigs[j] = &compute.AccessConfig{ - Type: "ONE_TO_ONE_NAT", - NatIP: d.Get(acPrefix + ".nat_ip").(string), - } - } - - networkInterfaces = append(networkInterfaces, &iface) - } - } - - serviceAccountsCount := d.Get("service_account.#").(int) - serviceAccounts := make([]*compute.ServiceAccount, 0, serviceAccountsCount) - for i := 0; i < serviceAccountsCount; i++ { - prefix := fmt.Sprintf("service_account.%d", i) - - scopesSet := d.Get(prefix + ".scopes").(*schema.Set) - scopes := make([]string, scopesSet.Len()) - for i, v := range scopesSet.List() { - scopes[i] = canonicalizeServiceScope(v.(string)) - } - - email := "default" - if v := d.Get(prefix + ".email"); v != nil { - email = v.(string) - } - - serviceAccount := &compute.ServiceAccount{ - Email: email, - Scopes: scopes, - } - - serviceAccounts = append(serviceAccounts, serviceAccount) - } - - prefix := "scheduling.0" - scheduling := &compute.Scheduling{} - - if val, ok := d.GetOk(prefix + ".automatic_restart"); ok { - scheduling.AutomaticRestart = val.(bool) - } - - if val, ok := d.GetOk(prefix + ".preemptible"); ok { - scheduling.Preemptible = val.(bool) - } - - if val, ok := d.GetOk(prefix + ".on_host_maintenance"); ok { - scheduling.OnHostMaintenance = val.(string) - } - - // Read create timeout - var createTimeout int - if v, ok := d.GetOk("create_timeout"); ok { - createTimeout = v.(int) - } - - metadata, err := resourceInstanceMetadata(d) - if err != nil { - return fmt.Errorf("Error creating metadata: %s", err) - } - - // Create the instance information - instance := compute.Instance{ - CanIpForward: d.Get("can_ip_forward").(bool), - Description: d.Get("description").(string), - Disks: disks, - MachineType: machineType.SelfLink, - Metadata: metadata, - Name: d.Get("name").(string), - NetworkInterfaces: networkInterfaces, - Tags: resourceInstanceTags(d), - ServiceAccounts: serviceAccounts, - Scheduling: scheduling, - } - - log.Printf("[INFO] Requesting instance creation") - op, err := config.clientCompute.Instances.Insert( - project, zone.Name, &instance).Do() - if err != nil { - return fmt.Errorf("Error creating instance: %s", err) - } - - // Store the ID now - d.SetId(instance.Name) - - // Wait for the operation to complete - waitErr := computeOperationWaitZoneTime(config, op, project, zone.Name, createTimeout, "instance to create") - if waitErr != nil { - // The resource didn't actually create - d.SetId("") - return waitErr - } - - return resourceComputeInstanceRead(d, meta) -} - -func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - instance, err := getInstance(config, d) - if err != nil || instance == nil { - return err - } - - // Synch metadata - md := instance.Metadata - - _md := MetadataFormatSchema(d.Get("metadata").(map[string]interface{}), md) - - if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists { - d.Set("metadata_startup_script", script) - delete(_md, "startup-script") - } - - if err = d.Set("metadata", _md); err != nil { - return fmt.Errorf("Error setting metadata: %s", err) - } - - d.Set("can_ip_forward", instance.CanIpForward) - - machineTypeResource := strings.Split(instance.MachineType, "/") - machineType := machineTypeResource[len(machineTypeResource)-1] - d.Set("machine_type", machineType) - - // Set the service accounts - serviceAccounts := make([]map[string]interface{}, 0, 1) - for _, serviceAccount := range instance.ServiceAccounts { - scopes := make([]interface{}, len(serviceAccount.Scopes)) - for i, scope := range serviceAccount.Scopes { - scopes[i] = scope - } - serviceAccounts = append(serviceAccounts, map[string]interface{}{ - "email": serviceAccount.Email, - "scopes": schema.NewSet(stringScopeHashcode, scopes), - }) - } - d.Set("service_account", serviceAccounts) - - networksCount := d.Get("network.#").(int) - networkInterfacesCount := d.Get("network_interface.#").(int) - - if networksCount > 0 && networkInterfacesCount > 0 { - return fmt.Errorf("Error: cannot define both networks and network_interfaces.") - } - if networksCount == 0 && networkInterfacesCount == 0 { - return fmt.Errorf("Error: Must define at least one network_interface.") - } - - // Set the networks - // Use the first external IP found for the default connection info. - externalIP := "" - internalIP := "" - networks := make([]map[string]interface{}, 0, 1) - if networksCount > 0 { - // TODO: Remove this when realizing deprecation of .network - for i, iface := range instance.NetworkInterfaces { - var natIP string - for _, config := range iface.AccessConfigs { - if config.Type == "ONE_TO_ONE_NAT" { - natIP = config.NatIP - break - } - } - - if externalIP == "" && natIP != "" { - externalIP = natIP - } - - network := make(map[string]interface{}) - network["name"] = iface.Name - network["external_address"] = natIP - network["internal_address"] = iface.NetworkIP - network["source"] = d.Get(fmt.Sprintf("network.%d.source", i)) - networks = append(networks, network) - } - } - d.Set("network", networks) - - networkInterfaces := make([]map[string]interface{}, 0, 1) - if networkInterfacesCount > 0 { - for i, iface := range instance.NetworkInterfaces { - // The first non-empty ip is left in natIP - var natIP string - accessConfigs := make( - []map[string]interface{}, 0, len(iface.AccessConfigs)) - for j, config := range iface.AccessConfigs { - accessConfigs = append(accessConfigs, map[string]interface{}{ - "nat_ip": d.Get(fmt.Sprintf("network_interface.%d.access_config.%d.nat_ip", i, j)), - "assigned_nat_ip": config.NatIP, - }) - - if natIP == "" { - natIP = config.NatIP - } - } - - if externalIP == "" { - externalIP = natIP - } - - if internalIP == "" { - internalIP = iface.NetworkIP - } - - networkInterfaces = append(networkInterfaces, map[string]interface{}{ - "name": iface.Name, - "address": iface.NetworkIP, - "network": d.Get(fmt.Sprintf("network_interface.%d.network", i)), - "subnetwork": d.Get(fmt.Sprintf("network_interface.%d.subnetwork", i)), - "subnetwork_project": d.Get(fmt.Sprintf("network_interface.%d.subnetwork_project", i)), - "access_config": accessConfigs, - }) - } - } - d.Set("network_interface", networkInterfaces) - - // Fall back on internal ip if there is no external ip. This makes sense in the situation where - // terraform is being used on a cloud instance and can therefore access the instances it creates - // via their internal ips. - sshIP := externalIP - if sshIP == "" { - sshIP = internalIP - } - - // Initialize the connection info - d.SetConnInfo(map[string]string{ - "type": "ssh", - "host": sshIP, - }) - - // Set the metadata fingerprint if there is one. - if instance.Metadata != nil { - d.Set("metadata_fingerprint", instance.Metadata.Fingerprint) - } - - // Set the tags fingerprint if there is one. - if instance.Tags != nil { - d.Set("tags_fingerprint", instance.Tags.Fingerprint) - } - - disksCount := d.Get("disk.#").(int) - attachedDisksCount := d.Get("attached_disk.#").(int) - disks := make([]map[string]interface{}, 0, disksCount) - attachedDisks := make([]map[string]interface{}, 0, attachedDisksCount) - - if expectedDisks := disksCount + attachedDisksCount; len(instance.Disks) != expectedDisks { - return fmt.Errorf("Expected %d disks, API returned %d", expectedDisks, len(instance.Disks)) - } - - attachedDiskSources := make(map[string]struct{}, attachedDisksCount) - for i := 0; i < attachedDisksCount; i++ { - attachedDiskSources[d.Get(fmt.Sprintf("attached_disk.%d.source", i)).(string)] = struct{}{} - } - - dIndex := 0 - adIndex := 0 - for _, disk := range instance.Disks { - if _, ok := attachedDiskSources[disk.Source]; !ok { - di := map[string]interface{}{ - "disk": d.Get(fmt.Sprintf("disk.%d.disk", dIndex)), - "image": d.Get(fmt.Sprintf("disk.%d.image", dIndex)), - "type": d.Get(fmt.Sprintf("disk.%d.type", dIndex)), - "scratch": d.Get(fmt.Sprintf("disk.%d.scratch", dIndex)), - "auto_delete": d.Get(fmt.Sprintf("disk.%d.auto_delete", dIndex)), - "size": d.Get(fmt.Sprintf("disk.%d.size", dIndex)), - "device_name": d.Get(fmt.Sprintf("disk.%d.device_name", dIndex)), - "disk_encryption_key_raw": d.Get(fmt.Sprintf("disk.%d.disk_encryption_key_raw", dIndex)), - } - if disk.DiskEncryptionKey != nil && disk.DiskEncryptionKey.Sha256 != "" { - di["disk_encryption_key_sha256"] = disk.DiskEncryptionKey.Sha256 - } - disks = append(disks, di) - dIndex++ - } else { - di := map[string]interface{}{ - "source": disk.Source, - "device_name": disk.DeviceName, - "disk_encryption_key_raw": d.Get(fmt.Sprintf("attached_disk.%d.disk_encryption_key_raw", adIndex)), - } - if disk.DiskEncryptionKey != nil && disk.DiskEncryptionKey.Sha256 != "" { - di["disk_encryption_key_sha256"] = disk.DiskEncryptionKey.Sha256 - } - attachedDisks = append(attachedDisks, di) - adIndex++ - } - } - d.Set("disk", disks) - d.Set("attached_disk", attachedDisks) - - d.Set("self_link", instance.SelfLink) - d.SetId(instance.Name) - - return nil -} - -func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone := d.Get("zone").(string) - - instance, err := getInstance(config, d) - if err != nil { - return err - } - - // Enable partial mode for the resource since it is possible - d.Partial(true) - - // If the Metadata has changed, then update that. - if d.HasChange("metadata") { - o, n := d.GetChange("metadata") - if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists { - if _, ok := n.(map[string]interface{})["startup-script"]; ok { - return fmt.Errorf("Only one of metadata.startup-script and metadata_startup_script may be defined") - } - - n.(map[string]interface{})["startup-script"] = script - } - - updateMD := func() error { - // Reload the instance in the case of a fingerprint mismatch - instance, err = getInstance(config, d) - if err != nil { - return err - } - - md := instance.Metadata - - MetadataUpdate(o.(map[string]interface{}), n.(map[string]interface{}), md) - - if err != nil { - return fmt.Errorf("Error updating metadata: %s", err) - } - op, err := config.clientCompute.Instances.SetMetadata( - project, zone, d.Id(), md).Do() - if err != nil { - return fmt.Errorf("Error updating metadata: %s", err) - } - - opErr := computeOperationWaitZone(config, op, project, zone, "metadata to update") - if opErr != nil { - return opErr - } - - d.SetPartial("metadata") - return nil - } - - MetadataRetryWrapper(updateMD) - } - - if d.HasChange("tags") { - tags := resourceInstanceTags(d) - op, err := config.clientCompute.Instances.SetTags( - project, zone, d.Id(), tags).Do() - if err != nil { - return fmt.Errorf("Error updating tags: %s", err) - } - - opErr := computeOperationWaitZone(config, op, project, zone, "tags to update") - if opErr != nil { - return opErr - } - - d.SetPartial("tags") - } - - if d.HasChange("scheduling") { - prefix := "scheduling.0" - scheduling := &compute.Scheduling{} - - if val, ok := d.GetOk(prefix + ".automatic_restart"); ok { - scheduling.AutomaticRestart = val.(bool) - } - - if val, ok := d.GetOk(prefix + ".preemptible"); ok { - scheduling.Preemptible = val.(bool) - } - - if val, ok := d.GetOk(prefix + ".on_host_maintenance"); ok { - scheduling.OnHostMaintenance = val.(string) - } - - op, err := config.clientCompute.Instances.SetScheduling(project, - zone, d.Id(), scheduling).Do() - - if err != nil { - return fmt.Errorf("Error updating scheduling policy: %s", err) - } - - opErr := computeOperationWaitZone(config, op, project, zone, - "scheduling policy update") - if opErr != nil { - return opErr - } - - d.SetPartial("scheduling") - } - - networkInterfacesCount := d.Get("network_interface.#").(int) - if networkInterfacesCount > 0 { - // Sanity check - if networkInterfacesCount != len(instance.NetworkInterfaces) { - return fmt.Errorf("Instance had unexpected number of network interfaces: %d", len(instance.NetworkInterfaces)) - } - for i := 0; i < networkInterfacesCount; i++ { - prefix := fmt.Sprintf("network_interface.%d", i) - instNetworkInterface := instance.NetworkInterfaces[i] - networkName := d.Get(prefix + ".name").(string) - - // TODO: This sanity check is broken by #929, disabled for now (by forcing the equality) - networkName = instNetworkInterface.Name - // Sanity check - if networkName != instNetworkInterface.Name { - return fmt.Errorf("Instance networkInterface had unexpected name: %s", instNetworkInterface.Name) - } - - if d.HasChange(prefix + ".access_config") { - - // TODO: This code deletes then recreates accessConfigs. This is bad because it may - // leave the machine inaccessible from either ip if the creation part fails (network - // timeout etc). However right now there is a GCE limit of 1 accessConfig so it is - // the only way to do it. In future this should be revised to only change what is - // necessary, and also add before removing. - - // Delete any accessConfig that currently exists in instNetworkInterface - for _, ac := range instNetworkInterface.AccessConfigs { - op, err := config.clientCompute.Instances.DeleteAccessConfig( - project, zone, d.Id(), ac.Name, networkName).Do() - if err != nil { - return fmt.Errorf("Error deleting old access_config: %s", err) - } - opErr := computeOperationWaitZone(config, op, project, zone, - "old access_config to delete") - if opErr != nil { - return opErr - } - } - - // Create new ones - accessConfigsCount := d.Get(prefix + ".access_config.#").(int) - for j := 0; j < accessConfigsCount; j++ { - acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j) - ac := &compute.AccessConfig{ - Type: "ONE_TO_ONE_NAT", - NatIP: d.Get(acPrefix + ".nat_ip").(string), - } - op, err := config.clientCompute.Instances.AddAccessConfig( - project, zone, d.Id(), networkName, ac).Do() - if err != nil { - return fmt.Errorf("Error adding new access_config: %s", err) - } - opErr := computeOperationWaitZone(config, op, project, zone, - "new access_config to add") - if opErr != nil { - return opErr - } - } - } - } - } - - // We made it, disable partial mode - d.Partial(false) - - return resourceComputeInstanceRead(d, meta) -} - -func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone := d.Get("zone").(string) - log.Printf("[INFO] Requesting instance deletion: %s", d.Id()) - op, err := config.clientCompute.Instances.Delete(project, zone, d.Id()).Do() - if err != nil { - return fmt.Errorf("Error deleting instance: %s", err) - } - - // Wait for the operation to complete - opErr := computeOperationWaitZone(config, op, project, zone, "instance to delete") - if opErr != nil { - return opErr - } - - d.SetId("") - return nil -} - -func resourceInstanceMetadata(d *schema.ResourceData) (*compute.Metadata, error) { - m := &compute.Metadata{} - mdMap := d.Get("metadata").(map[string]interface{}) - if v, ok := d.GetOk("metadata_startup_script"); ok && v.(string) != "" { - mdMap["startup-script"] = v - } - if len(mdMap) > 0 { - m.Items = make([]*compute.MetadataItems, 0, len(mdMap)) - for key, val := range mdMap { - v := val.(string) - m.Items = append(m.Items, &compute.MetadataItems{ - Key: key, - Value: &v, - }) - } - - // Set the fingerprint. If the metadata has never been set before - // then this will just be blank. - m.Fingerprint = d.Get("metadata_fingerprint").(string) - } - - return m, nil -} - -func resourceInstanceTags(d *schema.ResourceData) *compute.Tags { - // Calculate the tags - var tags *compute.Tags - if v := d.Get("tags"); v != nil { - vs := v.(*schema.Set) - tags = new(compute.Tags) - tags.Items = make([]string, vs.Len()) - for i, v := range vs.List() { - tags.Items[i] = v.(string) - } - - tags.Fingerprint = d.Get("tags_fingerprint").(string) - } - - return tags -} diff --git a/builtin/providers/google/resource_compute_instance_group.go b/builtin/providers/google/resource_compute_instance_group.go deleted file mode 100644 index 0a7c49547..000000000 --- a/builtin/providers/google/resource_compute_instance_group.go +++ /dev/null @@ -1,346 +0,0 @@ -package google - -import ( - "fmt" - "log" - "strings" - - "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" - - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceComputeInstanceGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeInstanceGroupCreate, - Read: resourceComputeInstanceGroupRead, - Update: resourceComputeInstanceGroupUpdate, - Delete: resourceComputeInstanceGroupDelete, - - SchemaVersion: 1, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "zone": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "instances": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "named_port": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - - "port": { - Type: schema.TypeInt, - Required: true, - }, - }, - }, - }, - - "network": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "project": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - - "size": { - Type: schema.TypeInt, - Computed: true, - }, - }, - } -} - -func getInstanceReferences(instanceUrls []string) (refs []*compute.InstanceReference) { - for _, v := range instanceUrls { - refs = append(refs, &compute.InstanceReference{ - Instance: v, - }) - } - return refs -} - -func validInstanceURLs(instanceUrls []string) bool { - for _, v := range instanceUrls { - if !strings.HasPrefix(v, "https://www.googleapis.com/compute/v1/") { - return false - } - } - return true -} - -func resourceComputeInstanceGroupCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Build the parameter - instanceGroup := &compute.InstanceGroup{ - Name: d.Get("name").(string), - } - - // Set optional fields - if v, ok := d.GetOk("description"); ok { - instanceGroup.Description = v.(string) - } - - if v, ok := d.GetOk("named_port"); ok { - instanceGroup.NamedPorts = getNamedPorts(v.([]interface{})) - } - - if v, ok := d.GetOk("network"); ok { - instanceGroup.Network = v.(string) - } - - log.Printf("[DEBUG] InstanceGroup insert request: %#v", instanceGroup) - op, err := config.clientCompute.InstanceGroups.Insert( - project, d.Get("zone").(string), instanceGroup).Do() - if err != nil { - return fmt.Errorf("Error creating InstanceGroup: %s", err) - } - - // It probably maybe worked, so store the ID now - d.SetId(instanceGroup.Name) - - // Wait for the operation to complete - err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Creating InstanceGroup") - if err != nil { - return err - } - - if v, ok := d.GetOk("instances"); ok { - instanceUrls := convertStringArr(v.(*schema.Set).List()) - if !validInstanceURLs(instanceUrls) { - return fmt.Errorf("Error invalid instance URLs: %v", instanceUrls) - } - - addInstanceReq := &compute.InstanceGroupsAddInstancesRequest{ - Instances: getInstanceReferences(instanceUrls), - } - - log.Printf("[DEBUG] InstanceGroup add instances request: %#v", addInstanceReq) - op, err := config.clientCompute.InstanceGroups.AddInstances( - project, d.Get("zone").(string), d.Id(), addInstanceReq).Do() - if err != nil { - return fmt.Errorf("Error adding instances to InstanceGroup: %s", err) - } - - // Wait for the operation to complete - err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Adding instances to InstanceGroup") - if err != nil { - return err - } - } - - return resourceComputeInstanceGroupRead(d, meta) -} - -func resourceComputeInstanceGroupRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // retreive instance group - instanceGroup, err := config.clientCompute.InstanceGroups.Get( - project, d.Get("zone").(string), d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Instance Group %q", d.Get("name").(string))) - } - - // retreive instance group members - var memberUrls []string - members, err := config.clientCompute.InstanceGroups.ListInstances( - project, d.Get("zone").(string), d.Id(), &compute.InstanceGroupsListInstancesRequest{ - InstanceState: "ALL", - }).Do() - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - // The resource doesn't have any instances - d.Set("instances", nil) - } else { - // any other errors return them - return fmt.Errorf("Error reading InstanceGroup Members: %s", err) - } - } else { - for _, member := range members.Items { - memberUrls = append(memberUrls, member.Instance) - } - log.Printf("[DEBUG] InstanceGroup members: %v", memberUrls) - d.Set("instances", memberUrls) - } - - // Set computed fields - d.Set("network", instanceGroup.Network) - d.Set("size", instanceGroup.Size) - d.Set("self_link", instanceGroup.SelfLink) - - return nil -} -func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // refresh the state incase referenced instances have been removed earlier in the run - err = resourceComputeInstanceGroupRead(d, meta) - if err != nil { - return fmt.Errorf("Error reading InstanceGroup: %s", err) - } - - d.Partial(true) - - if d.HasChange("instances") { - // to-do check for no instances - from_, to_ := d.GetChange("instances") - - from := convertStringArr(from_.(*schema.Set).List()) - to := convertStringArr(to_.(*schema.Set).List()) - - if !validInstanceURLs(from) { - return fmt.Errorf("Error invalid instance URLs: %v", from) - } - if !validInstanceURLs(to) { - return fmt.Errorf("Error invalid instance URLs: %v", to) - } - - add, remove := calcAddRemove(from, to) - - if len(remove) > 0 { - removeReq := &compute.InstanceGroupsRemoveInstancesRequest{ - Instances: getInstanceReferences(remove), - } - - log.Printf("[DEBUG] InstanceGroup remove instances request: %#v", removeReq) - removeOp, err := config.clientCompute.InstanceGroups.RemoveInstances( - project, d.Get("zone").(string), d.Id(), removeReq).Do() - if err != nil { - return fmt.Errorf("Error removing instances from InstanceGroup: %s", err) - } - - // Wait for the operation to complete - err = computeOperationWaitZone(config, removeOp, project, d.Get("zone").(string), "Updating InstanceGroup") - if err != nil { - return err - } - } - - if len(add) > 0 { - - addReq := &compute.InstanceGroupsAddInstancesRequest{ - Instances: getInstanceReferences(add), - } - - log.Printf("[DEBUG] InstanceGroup adding instances request: %#v", addReq) - addOp, err := config.clientCompute.InstanceGroups.AddInstances( - project, d.Get("zone").(string), d.Id(), addReq).Do() - if err != nil { - return fmt.Errorf("Error adding instances from InstanceGroup: %s", err) - } - - // Wait for the operation to complete - err = computeOperationWaitZone(config, addOp, project, d.Get("zone").(string), "Updating InstanceGroup") - if err != nil { - return err - } - } - - d.SetPartial("instances") - } - - if d.HasChange("named_port") { - namedPorts := getNamedPorts(d.Get("named_port").([]interface{})) - - namedPortsReq := &compute.InstanceGroupsSetNamedPortsRequest{ - NamedPorts: namedPorts, - } - - log.Printf("[DEBUG] InstanceGroup updating named ports request: %#v", namedPortsReq) - op, err := config.clientCompute.InstanceGroups.SetNamedPorts( - project, d.Get("zone").(string), d.Id(), namedPortsReq).Do() - if err != nil { - return fmt.Errorf("Error updating named ports for InstanceGroup: %s", err) - } - - err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroup") - if err != nil { - return err - } - d.SetPartial("named_port") - } - - d.Partial(false) - - return resourceComputeInstanceGroupRead(d, meta) -} - -func resourceComputeInstanceGroupDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone := d.Get("zone").(string) - op, err := config.clientCompute.InstanceGroups.Delete(project, zone, d.Id()).Do() - if err != nil { - return fmt.Errorf("Error deleting InstanceGroup: %s", err) - } - - err = computeOperationWaitZone(config, op, project, zone, "Deleting InstanceGroup") - if err != nil { - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/google/resource_compute_instance_group_manager.go b/builtin/providers/google/resource_compute_instance_group_manager.go deleted file mode 100644 index 58d435a74..000000000 --- a/builtin/providers/google/resource_compute_instance_group_manager.go +++ /dev/null @@ -1,462 +0,0 @@ -package google - -import ( - "fmt" - "log" - "strings" - "time" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" -) - -func resourceComputeInstanceGroupManager() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeInstanceGroupManagerCreate, - Read: resourceComputeInstanceGroupManagerRead, - Update: resourceComputeInstanceGroupManagerUpdate, - Delete: resourceComputeInstanceGroupManagerDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "base_instance_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "instance_template": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "fingerprint": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "instance_group": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "named_port": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - }, - }, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "update_strategy": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "RESTART", - }, - - "target_pools": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "target_size": &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - Optional: true, - }, - }, - } -} - -func getNamedPorts(nps []interface{}) []*compute.NamedPort { - namedPorts := make([]*compute.NamedPort, 0, len(nps)) - for _, v := range nps { - np := v.(map[string]interface{}) - namedPorts = append(namedPorts, &compute.NamedPort{ - Name: np["name"].(string), - Port: int64(np["port"].(int)), - }) - } - return namedPorts -} - -func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Get group size, default to 1 if not given - var target_size int64 = 1 - if v, ok := d.GetOk("target_size"); ok { - target_size = int64(v.(int)) - } - - // Build the parameter - manager := &compute.InstanceGroupManager{ - Name: d.Get("name").(string), - BaseInstanceName: d.Get("base_instance_name").(string), - InstanceTemplate: d.Get("instance_template").(string), - TargetSize: target_size, - } - - // Set optional fields - if v, ok := d.GetOk("description"); ok { - manager.Description = v.(string) - } - - if v, ok := d.GetOk("named_port"); ok { - manager.NamedPorts = getNamedPorts(v.([]interface{})) - } - - if attr := d.Get("target_pools").(*schema.Set); attr.Len() > 0 { - var s []string - for _, v := range attr.List() { - s = append(s, v.(string)) - } - manager.TargetPools = s - } - - updateStrategy := d.Get("update_strategy").(string) - if !(updateStrategy == "NONE" || updateStrategy == "RESTART") { - return fmt.Errorf("Update strategy must be \"NONE\" or \"RESTART\"") - } - - log.Printf("[DEBUG] InstanceGroupManager insert request: %#v", manager) - op, err := config.clientCompute.InstanceGroupManagers.Insert( - project, d.Get("zone").(string), manager).Do() - if err != nil { - return fmt.Errorf("Error creating InstanceGroupManager: %s", err) - } - - // It probably maybe worked, so store the ID now - d.SetId(manager.Name) - - // Wait for the operation to complete - err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Creating InstanceGroupManager") - if err != nil { - return err - } - - return resourceComputeInstanceGroupManagerRead(d, meta) -} - -func flattenNamedPorts(namedPorts []*compute.NamedPort) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(namedPorts)) - for _, namedPort := range namedPorts { - namedPortMap := make(map[string]interface{}) - namedPortMap["name"] = namedPort.Name - namedPortMap["port"] = namedPort.Port - result = append(result, namedPortMap) - } - return result - -} - -func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - getInstanceGroupManager := func(zone string) (interface{}, error) { - return config.clientCompute.InstanceGroupManagers.Get(project, zone, d.Id()).Do() - } - - var manager *compute.InstanceGroupManager - var e error - if zone, ok := d.GetOk("zone"); ok { - manager, e = config.clientCompute.InstanceGroupManagers.Get(project, zone.(string), d.Id()).Do() - - if e != nil { - return handleNotFoundError(e, d, fmt.Sprintf("Instance Group Manager %q", d.Get("name").(string))) - } - } else { - // If the resource was imported, the only info we have is the ID. Try to find the resource - // by searching in the region of the project. - var resource interface{} - resource, e = getZonalResourceFromRegion(getInstanceGroupManager, region, config.clientCompute, project) - - if e != nil { - return e - } - - manager = resource.(*compute.InstanceGroupManager) - } - - if manager == nil { - log.Printf("[WARN] Removing Instance Group Manager %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - return nil - } - - zoneUrl := strings.Split(manager.Zone, "/") - d.Set("base_instance_name", manager.BaseInstanceName) - d.Set("instance_template", manager.InstanceTemplate) - d.Set("name", manager.Name) - d.Set("zone", zoneUrl[len(zoneUrl)-1]) - d.Set("description", manager.Description) - d.Set("project", project) - d.Set("target_size", manager.TargetSize) - d.Set("target_pools", manager.TargetPools) - d.Set("named_port", flattenNamedPorts(manager.NamedPorts)) - d.Set("fingerprint", manager.Fingerprint) - d.Set("instance_group", manager.InstanceGroup) - d.Set("target_size", manager.TargetSize) - d.Set("self_link", manager.SelfLink) - update_strategy, ok := d.GetOk("update_strategy") - if !ok { - update_strategy = "RESTART" - } - d.Set("update_strategy", update_strategy.(string)) - - return nil -} -func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - d.Partial(true) - - // If target_pools changes then update - if d.HasChange("target_pools") { - var targetPools []string - if attr := d.Get("target_pools").(*schema.Set); attr.Len() > 0 { - for _, v := range attr.List() { - targetPools = append(targetPools, v.(string)) - } - } - - // Build the parameter - setTargetPools := &compute.InstanceGroupManagersSetTargetPoolsRequest{ - Fingerprint: d.Get("fingerprint").(string), - TargetPools: targetPools, - } - - op, err := config.clientCompute.InstanceGroupManagers.SetTargetPools( - project, d.Get("zone").(string), d.Id(), setTargetPools).Do() - if err != nil { - return fmt.Errorf("Error updating InstanceGroupManager: %s", err) - } - - // Wait for the operation to complete - err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager") - if err != nil { - return err - } - - d.SetPartial("target_pools") - } - - // If instance_template changes then update - if d.HasChange("instance_template") { - // Build the parameter - setInstanceTemplate := &compute.InstanceGroupManagersSetInstanceTemplateRequest{ - InstanceTemplate: d.Get("instance_template").(string), - } - - op, err := config.clientCompute.InstanceGroupManagers.SetInstanceTemplate( - project, d.Get("zone").(string), d.Id(), setInstanceTemplate).Do() - if err != nil { - return fmt.Errorf("Error updating InstanceGroupManager: %s", err) - } - - // Wait for the operation to complete - err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager") - if err != nil { - return err - } - - if d.Get("update_strategy").(string) == "RESTART" { - managedInstances, err := config.clientCompute.InstanceGroupManagers.ListManagedInstances( - project, d.Get("zone").(string), d.Id()).Do() - - managedInstanceCount := len(managedInstances.ManagedInstances) - instances := make([]string, managedInstanceCount) - for i, v := range managedInstances.ManagedInstances { - instances[i] = v.Instance - } - - recreateInstances := &compute.InstanceGroupManagersRecreateInstancesRequest{ - Instances: instances, - } - - op, err = config.clientCompute.InstanceGroupManagers.RecreateInstances( - project, d.Get("zone").(string), d.Id(), recreateInstances).Do() - - if err != nil { - return fmt.Errorf("Error restarting instance group managers instances: %s", err) - } - - // Wait for the operation to complete - err = computeOperationWaitZoneTime(config, op, project, d.Get("zone").(string), - managedInstanceCount*4, "Restarting InstanceGroupManagers instances") - if err != nil { - return err - } - } - - d.SetPartial("instance_template") - } - - // If named_port changes then update: - if d.HasChange("named_port") { - - // Build the parameters for a "SetNamedPorts" request: - namedPorts := getNamedPorts(d.Get("named_port").([]interface{})) - setNamedPorts := &compute.InstanceGroupsSetNamedPortsRequest{ - NamedPorts: namedPorts, - } - - // Make the request: - op, err := config.clientCompute.InstanceGroups.SetNamedPorts( - project, d.Get("zone").(string), d.Id(), setNamedPorts).Do() - if err != nil { - return fmt.Errorf("Error updating InstanceGroupManager: %s", err) - } - - // Wait for the operation to complete: - err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager") - if err != nil { - return err - } - - d.SetPartial("named_port") - } - - // If size changes trigger a resize - if d.HasChange("target_size") { - if v, ok := d.GetOk("target_size"); ok { - // Only do anything if the new size is set - target_size := int64(v.(int)) - - op, err := config.clientCompute.InstanceGroupManagers.Resize( - project, d.Get("zone").(string), d.Id(), target_size).Do() - if err != nil { - return fmt.Errorf("Error updating InstanceGroupManager: %s", err) - } - - // Wait for the operation to complete - err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager") - if err != nil { - return err - } - } - - d.SetPartial("target_size") - } - - d.Partial(false) - - return resourceComputeInstanceGroupManagerRead(d, meta) -} - -func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone := d.Get("zone").(string) - op, err := config.clientCompute.InstanceGroupManagers.Delete(project, zone, d.Id()).Do() - attempt := 0 - for err != nil && attempt < 20 { - attempt++ - time.Sleep(2000 * time.Millisecond) - op, err = config.clientCompute.InstanceGroupManagers.Delete(project, zone, d.Id()).Do() - } - if err != nil { - return fmt.Errorf("Error deleting instance group manager: %s", err) - } - - currentSize := int64(d.Get("target_size").(int)) - - // Wait for the operation to complete - err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Deleting InstanceGroupManager") - - for err != nil && currentSize > 0 { - if !strings.Contains(err.Error(), "timeout") { - return err - } - - instanceGroup, err := config.clientCompute.InstanceGroups.Get( - project, d.Get("zone").(string), d.Id()).Do() - - if err != nil { - return fmt.Errorf("Error getting instance group size: %s", err) - } - - if instanceGroup.Size >= currentSize { - return fmt.Errorf("Error, instance group isn't shrinking during delete") - } - - log.Printf("[INFO] timeout occured, but instance group is shrinking (%d < %d)", instanceGroup.Size, currentSize) - - currentSize = instanceGroup.Size - - err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Deleting InstanceGroupManager") - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/google/resource_compute_instance_group_manager_test.go b/builtin/providers/google/resource_compute_instance_group_manager_test.go deleted file mode 100644 index 22e35d163..000000000 --- a/builtin/providers/google/resource_compute_instance_group_manager_test.go +++ /dev/null @@ -1,648 +0,0 @@ -package google - -import ( - "fmt" - "reflect" - "strings" - "testing" - - "google.golang.org/api/compute/v1" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccInstanceGroupManager_basic(t *testing.T) { - var manager compute.InstanceGroupManager - - template := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - target := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - igm1 := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - igm2 := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceGroupManagerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccInstanceGroupManager_basic(template, target, igm1, igm2), - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceGroupManagerExists( - "google_compute_instance_group_manager.igm-basic", &manager), - testAccCheckInstanceGroupManagerExists( - "google_compute_instance_group_manager.igm-no-tp", &manager), - ), - }, - }, - }) -} - -func TestAccInstanceGroupManager_update(t *testing.T) { - var manager compute.InstanceGroupManager - - template1 := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - target := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - template2 := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceGroupManagerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccInstanceGroupManager_update(template1, target, igm), - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceGroupManagerExists( - "google_compute_instance_group_manager.igm-update", &manager), - testAccCheckInstanceGroupManagerNamedPorts( - "google_compute_instance_group_manager.igm-update", - map[string]int64{"customhttp": 8080}, - &manager), - ), - }, - resource.TestStep{ - Config: testAccInstanceGroupManager_update2(template1, target, template2, igm), - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceGroupManagerExists( - "google_compute_instance_group_manager.igm-update", &manager), - testAccCheckInstanceGroupManagerUpdated( - "google_compute_instance_group_manager.igm-update", 3, - "google_compute_target_pool.igm-update", template2), - testAccCheckInstanceGroupManagerNamedPorts( - "google_compute_instance_group_manager.igm-update", - map[string]int64{"customhttp": 8080, "customhttps": 8443}, - &manager), - ), - }, - }, - }) -} - -func TestAccInstanceGroupManager_updateLifecycle(t *testing.T) { - var manager compute.InstanceGroupManager - - tag1 := "tag1" - tag2 := "tag2" - igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceGroupManagerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccInstanceGroupManager_updateLifecycle(tag1, igm), - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceGroupManagerExists( - "google_compute_instance_group_manager.igm-update", &manager), - ), - }, - resource.TestStep{ - Config: testAccInstanceGroupManager_updateLifecycle(tag2, igm), - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceGroupManagerExists( - "google_compute_instance_group_manager.igm-update", &manager), - testAccCheckInstanceGroupManagerTemplateTags( - "google_compute_instance_group_manager.igm-update", []string{tag2}), - ), - }, - }, - }) -} - -func TestAccInstanceGroupManager_updateStrategy(t *testing.T) { - var manager compute.InstanceGroupManager - igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceGroupManagerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccInstanceGroupManager_updateStrategy(igm), - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceGroupManagerExists( - "google_compute_instance_group_manager.igm-update-strategy", &manager), - testAccCheckInstanceGroupManagerUpdateStrategy( - "google_compute_instance_group_manager.igm-update-strategy", "NONE"), - ), - }, - }, - }) -} - -func TestAccInstanceGroupManager_separateRegions(t *testing.T) { - var manager compute.InstanceGroupManager - - igm1 := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - igm2 := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceGroupManagerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccInstanceGroupManager_separateRegions(igm1, igm2), - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceGroupManagerExists( - "google_compute_instance_group_manager.igm-basic", &manager), - testAccCheckInstanceGroupManagerExists( - "google_compute_instance_group_manager.igm-basic-2", &manager), - ), - }, - }, - }) -} - -func testAccCheckInstanceGroupManagerDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_instance_group_manager" { - continue - } - _, err := config.clientCompute.InstanceGroupManagers.Get( - config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("InstanceGroupManager still exists") - } - } - - return nil -} - -func testAccCheckInstanceGroupManagerExists(n string, manager *compute.InstanceGroupManager) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.InstanceGroupManagers.Get( - config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("InstanceGroupManager not found") - } - - *manager = *found - - return nil - } -} - -func testAccCheckInstanceGroupManagerUpdated(n string, size int64, targetPool string, template string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - manager, err := config.clientCompute.InstanceGroupManagers.Get( - config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() - if err != nil { - return err - } - - // Cannot check the target pool as the instance creation is asynchronous. However, can - // check the target_size. - if manager.TargetSize != size { - return fmt.Errorf("instance count incorrect") - } - - // check that the instance template updated - instanceTemplate, err := config.clientCompute.InstanceTemplates.Get( - config.Project, template).Do() - if err != nil { - return fmt.Errorf("Error reading instance template: %s", err) - } - - if instanceTemplate.Name != template { - return fmt.Errorf("instance template not updated") - } - - return nil - } -} - -func testAccCheckInstanceGroupManagerNamedPorts(n string, np map[string]int64, instanceGroupManager *compute.InstanceGroupManager) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - manager, err := config.clientCompute.InstanceGroupManagers.Get( - config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() - if err != nil { - return err - } - - var found bool - for _, namedPort := range manager.NamedPorts { - found = false - for name, port := range np { - if namedPort.Name == name && namedPort.Port == port { - found = true - } - } - if !found { - return fmt.Errorf("named port incorrect") - } - } - - return nil - } -} - -func testAccCheckInstanceGroupManagerTemplateTags(n string, tags []string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - manager, err := config.clientCompute.InstanceGroupManagers.Get( - config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() - if err != nil { - return err - } - - // check that the instance template updated - instanceTemplate, err := config.clientCompute.InstanceTemplates.Get( - config.Project, resourceSplitter(manager.InstanceTemplate)).Do() - if err != nil { - return fmt.Errorf("Error reading instance template: %s", err) - } - - if !reflect.DeepEqual(instanceTemplate.Properties.Tags.Items, tags) { - return fmt.Errorf("instance template not updated") - } - - return nil - } -} - -func testAccCheckInstanceGroupManagerUpdateStrategy(n, strategy string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - if rs.Primary.Attributes["update_strategy"] != strategy { - return fmt.Errorf("Expected strategy to be %s, got %s", - strategy, rs.Primary.Attributes["update_strategy"]) - } - return nil - } -} - -func testAccInstanceGroupManager_basic(template, target, igm1, igm2 string) string { - return fmt.Sprintf(` - resource "google_compute_instance_template" "igm-basic" { - name = "%s" - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["foo", "bar"] - - disk { - source_image = "debian-cloud/debian-8-jessie-v20160803" - auto_delete = true - boot = true - } - - network_interface { - network = "default" - } - - metadata { - foo = "bar" - } - - service_account { - scopes = ["userinfo-email", "compute-ro", "storage-ro"] - } - } - - resource "google_compute_target_pool" "igm-basic" { - description = "Resource created for Terraform acceptance testing" - name = "%s" - session_affinity = "CLIENT_IP_PROTO" - } - - resource "google_compute_instance_group_manager" "igm-basic" { - description = "Terraform test instance group manager" - name = "%s" - instance_template = "${google_compute_instance_template.igm-basic.self_link}" - target_pools = ["${google_compute_target_pool.igm-basic.self_link}"] - base_instance_name = "igm-basic" - zone = "us-central1-c" - target_size = 2 - } - - resource "google_compute_instance_group_manager" "igm-no-tp" { - description = "Terraform test instance group manager" - name = "%s" - instance_template = "${google_compute_instance_template.igm-basic.self_link}" - base_instance_name = "igm-no-tp" - zone = "us-central1-c" - target_size = 2 - } - `, template, target, igm1, igm2) -} - -func testAccInstanceGroupManager_update(template, target, igm string) string { - return fmt.Sprintf(` - resource "google_compute_instance_template" "igm-update" { - name = "%s" - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["foo", "bar"] - - disk { - source_image = "debian-cloud/debian-8-jessie-v20160803" - auto_delete = true - boot = true - } - - network_interface { - network = "default" - } - - metadata { - foo = "bar" - } - - service_account { - scopes = ["userinfo-email", "compute-ro", "storage-ro"] - } - } - - resource "google_compute_target_pool" "igm-update" { - description = "Resource created for Terraform acceptance testing" - name = "%s" - session_affinity = "CLIENT_IP_PROTO" - } - - resource "google_compute_instance_group_manager" "igm-update" { - description = "Terraform test instance group manager" - name = "%s" - instance_template = "${google_compute_instance_template.igm-update.self_link}" - target_pools = ["${google_compute_target_pool.igm-update.self_link}"] - base_instance_name = "igm-update" - zone = "us-central1-c" - target_size = 2 - named_port { - name = "customhttp" - port = 8080 - } - }`, template, target, igm) -} - -// Change IGM's instance template and target size -func testAccInstanceGroupManager_update2(template1, target, template2, igm string) string { - return fmt.Sprintf(` - resource "google_compute_instance_template" "igm-update" { - name = "%s" - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["foo", "bar"] - - disk { - source_image = "debian-cloud/debian-8-jessie-v20160803" - auto_delete = true - boot = true - } - - network_interface { - network = "default" - } - - metadata { - foo = "bar" - } - - service_account { - scopes = ["userinfo-email", "compute-ro", "storage-ro"] - } - } - - resource "google_compute_target_pool" "igm-update" { - description = "Resource created for Terraform acceptance testing" - name = "%s" - session_affinity = "CLIENT_IP_PROTO" - } - - resource "google_compute_instance_template" "igm-update2" { - name = "%s" - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["foo", "bar"] - - disk { - source_image = "debian-cloud/debian-8-jessie-v20160803" - auto_delete = true - boot = true - } - - network_interface { - network = "default" - } - - metadata { - foo = "bar" - } - - service_account { - scopes = ["userinfo-email", "compute-ro", "storage-ro"] - } - } - - resource "google_compute_instance_group_manager" "igm-update" { - description = "Terraform test instance group manager" - name = "%s" - instance_template = "${google_compute_instance_template.igm-update2.self_link}" - target_pools = ["${google_compute_target_pool.igm-update.self_link}"] - base_instance_name = "igm-update" - zone = "us-central1-c" - target_size = 3 - named_port { - name = "customhttp" - port = 8080 - } - named_port { - name = "customhttps" - port = 8443 - } - }`, template1, target, template2, igm) -} - -func testAccInstanceGroupManager_updateLifecycle(tag, igm string) string { - return fmt.Sprintf(` - resource "google_compute_instance_template" "igm-update" { - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["%s"] - - disk { - source_image = "debian-cloud/debian-8-jessie-v20160803" - auto_delete = true - boot = true - } - - network_interface { - network = "default" - } - - service_account { - scopes = ["userinfo-email", "compute-ro", "storage-ro"] - } - - lifecycle { - create_before_destroy = true - } - } - - resource "google_compute_instance_group_manager" "igm-update" { - description = "Terraform test instance group manager" - name = "%s" - instance_template = "${google_compute_instance_template.igm-update.self_link}" - base_instance_name = "igm-update" - zone = "us-central1-c" - target_size = 2 - named_port { - name = "customhttp" - port = 8080 - } - }`, tag, igm) -} - -func testAccInstanceGroupManager_updateStrategy(igm string) string { - return fmt.Sprintf(` - resource "google_compute_instance_template" "igm-update-strategy" { - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["terraform-testing"] - - disk { - source_image = "debian-cloud/debian-8-jessie-v20160803" - auto_delete = true - boot = true - } - - network_interface { - network = "default" - } - - service_account { - scopes = ["userinfo-email", "compute-ro", "storage-ro"] - } - - lifecycle { - create_before_destroy = true - } - } - - resource "google_compute_instance_group_manager" "igm-update-strategy" { - description = "Terraform test instance group manager" - name = "%s" - instance_template = "${google_compute_instance_template.igm-update-strategy.self_link}" - base_instance_name = "igm-update-strategy" - zone = "us-central1-c" - target_size = 2 - update_strategy = "NONE" - named_port { - name = "customhttp" - port = 8080 - } - }`, igm) -} - -func testAccInstanceGroupManager_separateRegions(igm1, igm2 string) string { - return fmt.Sprintf(` - resource "google_compute_instance_template" "igm-basic" { - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["foo", "bar"] - - disk { - source_image = "debian-cloud/debian-8-jessie-v20160803" - auto_delete = true - boot = true - } - - network_interface { - network = "default" - } - - metadata { - foo = "bar" - } - - service_account { - scopes = ["userinfo-email", "compute-ro", "storage-ro"] - } - } - - resource "google_compute_instance_group_manager" "igm-basic" { - description = "Terraform test instance group manager" - name = "%s" - instance_template = "${google_compute_instance_template.igm-basic.self_link}" - base_instance_name = "igm-basic" - zone = "us-central1-c" - target_size = 2 - } - - resource "google_compute_instance_group_manager" "igm-basic-2" { - description = "Terraform test instance group manager" - name = "%s" - instance_template = "${google_compute_instance_template.igm-basic.self_link}" - base_instance_name = "igm-basic-2" - zone = "us-west1-b" - target_size = 2 - } - `, igm1, igm2) -} - -func resourceSplitter(resource string) string { - splits := strings.Split(resource, "/") - - return splits[len(splits)-1] -} diff --git a/builtin/providers/google/resource_compute_instance_group_migrate.go b/builtin/providers/google/resource_compute_instance_group_migrate.go deleted file mode 100644 index 1db04c22a..000000000 --- a/builtin/providers/google/resource_compute_instance_group_migrate.go +++ /dev/null @@ -1,74 +0,0 @@ -package google - -import ( - "fmt" - "log" - "strconv" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func resourceComputeInstanceGroupMigrateState( - v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - if is.Empty() { - log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return is, nil - } - - switch v { - case 0: - log.Println("[INFO] Found Compute Instance Group State v0; migrating to v1") - is, err := migrateInstanceGroupStateV0toV1(is) - if err != nil { - return is, err - } - return is, nil - default: - return is, fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateInstanceGroupStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { - log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - - newInstances := []string{} - - for k, v := range is.Attributes { - if !strings.HasPrefix(k, "instances.") { - continue - } - - if k == "instances.#" { - continue - } - - // Key is now of the form instances.%d - kParts := strings.Split(k, ".") - - // Sanity check: two parts should be there and should be a number - badFormat := false - if len(kParts) != 2 { - badFormat = true - } else if _, err := strconv.Atoi(kParts[1]); err != nil { - badFormat = true - } - - if badFormat { - return is, fmt.Errorf("migration error: found instances key in unexpected format: %s", k) - } - - newInstances = append(newInstances, v) - delete(is.Attributes, k) - } - - for _, v := range newInstances { - hash := schema.HashString(v) - newKey := fmt.Sprintf("instances.%d", hash) - is.Attributes[newKey] = v - } - - log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} diff --git a/builtin/providers/google/resource_compute_instance_group_migrate_test.go b/builtin/providers/google/resource_compute_instance_group_migrate_test.go deleted file mode 100644 index 88057d99e..000000000 --- a/builtin/providers/google/resource_compute_instance_group_migrate_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package google - -import ( - "testing" - - "github.com/hashicorp/terraform/terraform" -) - -func TestComputeInstanceGroupMigrateState(t *testing.T) { - cases := map[string]struct { - StateVersion int - Attributes map[string]string - Expected map[string]string - Meta interface{} - }{ - "change instances from list to set": { - StateVersion: 0, - Attributes: map[string]string{ - "instances.#": "1", - "instances.0": "https://www.googleapis.com/compute/v1/projects/project_name/zones/zone_name/instances/instancegroup-test-1", - "instances.1": "https://www.googleapis.com/compute/v1/projects/project_name/zones/zone_name/instances/instancegroup-test-0", - }, - Expected: map[string]string{ - "instances.#": "1", - "instances.764135222": "https://www.googleapis.com/compute/v1/projects/project_name/zones/zone_name/instances/instancegroup-test-1", - "instances.1519187872": "https://www.googleapis.com/compute/v1/projects/project_name/zones/zone_name/instances/instancegroup-test-0", - }, - Meta: &Config{}, - }, - } - - for tn, tc := range cases { - is := &terraform.InstanceState{ - ID: "i-abc123", - Attributes: tc.Attributes, - } - is, err := resourceComputeInstanceGroupMigrateState( - tc.StateVersion, is, tc.Meta) - - if err != nil { - t.Fatalf("bad: %s, err: %#v", tn, err) - } - - for k, v := range tc.Expected { - if is.Attributes[k] != v { - t.Fatalf( - "bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", - tn, k, v, k, is.Attributes[k], is.Attributes) - } - } - } -} - -func TestComputeInstanceGroupMigrateState_empty(t *testing.T) { - var is *terraform.InstanceState - var meta *Config - - // should handle nil - is, err := resourceComputeInstanceGroupMigrateState(0, is, meta) - - if err != nil { - t.Fatalf("err: %#v", err) - } - if is != nil { - t.Fatalf("expected nil instancestate, got: %#v", is) - } - - // should handle non-nil but empty - is = &terraform.InstanceState{} - is, err = resourceComputeInstanceGroupMigrateState(0, is, meta) - - if err != nil { - t.Fatalf("err: %#v", err) - } -} diff --git a/builtin/providers/google/resource_compute_instance_group_test.go b/builtin/providers/google/resource_compute_instance_group_test.go deleted file mode 100644 index 50d956fcd..000000000 --- a/builtin/providers/google/resource_compute_instance_group_test.go +++ /dev/null @@ -1,468 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "google.golang.org/api/compute/v1" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccComputeInstanceGroup_basic(t *testing.T) { - var instanceGroup compute.InstanceGroup - var instanceName = fmt.Sprintf("instancegroup-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccComputeInstanceGroup_destroy, - Steps: []resource.TestStep{ - { - Config: testAccComputeInstanceGroup_basic(instanceName), - Check: resource.ComposeTestCheckFunc( - testAccComputeInstanceGroup_exists( - "google_compute_instance_group.basic", &instanceGroup), - testAccComputeInstanceGroup_exists( - "google_compute_instance_group.empty", &instanceGroup), - ), - }, - }, - }) -} - -func TestAccComputeInstanceGroup_update(t *testing.T) { - var instanceGroup compute.InstanceGroup - var instanceName = fmt.Sprintf("instancegroup-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccComputeInstanceGroup_destroy, - Steps: []resource.TestStep{ - { - Config: testAccComputeInstanceGroup_update(instanceName), - Check: resource.ComposeTestCheckFunc( - testAccComputeInstanceGroup_exists( - "google_compute_instance_group.update", &instanceGroup), - testAccComputeInstanceGroup_named_ports( - "google_compute_instance_group.update", - map[string]int64{"http": 8080, "https": 8443}, - &instanceGroup), - ), - }, - { - Config: testAccComputeInstanceGroup_update2(instanceName), - Check: resource.ComposeTestCheckFunc( - testAccComputeInstanceGroup_exists( - "google_compute_instance_group.update", &instanceGroup), - testAccComputeInstanceGroup_updated( - "google_compute_instance_group.update", 3, &instanceGroup), - testAccComputeInstanceGroup_named_ports( - "google_compute_instance_group.update", - map[string]int64{"http": 8081, "test": 8444}, - &instanceGroup), - ), - }, - }, - }) -} - -func TestAccComputeInstanceGroup_outOfOrderInstances(t *testing.T) { - var instanceGroup compute.InstanceGroup - var instanceName = fmt.Sprintf("instancegroup-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccComputeInstanceGroup_destroy, - Steps: []resource.TestStep{ - { - Config: testAccComputeInstanceGroup_outOfOrderInstances(instanceName), - Check: resource.ComposeTestCheckFunc( - testAccComputeInstanceGroup_exists( - "google_compute_instance_group.group", &instanceGroup), - ), - }, - }, - }) -} - -func TestAccComputeInstanceGroup_network(t *testing.T) { - var instanceGroup compute.InstanceGroup - var instanceName = fmt.Sprintf("instancegroup-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccComputeInstanceGroup_destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstanceGroup_network(instanceName), - Check: resource.ComposeTestCheckFunc( - testAccComputeInstanceGroup_exists( - "google_compute_instance_group.with_instance", &instanceGroup), - testAccComputeInstanceGroup_hasCorrectNetwork( - "google_compute_instance_group.with_instance", "google_compute_network.ig_network", &instanceGroup), - testAccComputeInstanceGroup_exists( - "google_compute_instance_group.without_instance", &instanceGroup), - testAccComputeInstanceGroup_hasCorrectNetwork( - "google_compute_instance_group.without_instance", "google_compute_network.ig_network", &instanceGroup), - ), - }, - }, - }) -} - -func testAccComputeInstanceGroup_destroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_instance_group" { - continue - } - _, err := config.clientCompute.InstanceGroups.Get( - config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("InstanceGroup still exists") - } - } - - return nil -} - -func testAccComputeInstanceGroup_exists(n string, instanceGroup *compute.InstanceGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.InstanceGroups.Get( - config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("InstanceGroup not found") - } - - *instanceGroup = *found - - return nil - } -} - -func testAccComputeInstanceGroup_updated(n string, size int64, instanceGroup *compute.InstanceGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - instanceGroup, err := config.clientCompute.InstanceGroups.Get( - config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() - if err != nil { - return err - } - - // Cannot check the target pool as the instance creation is asynchronous. However, can - // check the target_size. - if instanceGroup.Size != size { - return fmt.Errorf("instance count incorrect") - } - - return nil - } -} - -func testAccComputeInstanceGroup_named_ports(n string, np map[string]int64, instanceGroup *compute.InstanceGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - instanceGroup, err := config.clientCompute.InstanceGroups.Get( - config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() - if err != nil { - return err - } - - var found bool - for _, namedPort := range instanceGroup.NamedPorts { - found = false - for name, port := range np { - if namedPort.Name == name && namedPort.Port == port { - found = true - } - } - if !found { - return fmt.Errorf("named port incorrect") - } - } - - return nil - } -} - -func testAccComputeInstanceGroup_hasCorrectNetwork(nInstanceGroup string, nNetwork string, instanceGroup *compute.InstanceGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - rsInstanceGroup, ok := s.RootModule().Resources[nInstanceGroup] - if !ok { - return fmt.Errorf("Not found: %s", nInstanceGroup) - } - if rsInstanceGroup.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - instanceGroup, err := config.clientCompute.InstanceGroups.Get( - config.Project, rsInstanceGroup.Primary.Attributes["zone"], rsInstanceGroup.Primary.ID).Do() - if err != nil { - return err - } - - rsNetwork, ok := s.RootModule().Resources[nNetwork] - if !ok { - return fmt.Errorf("Not found: %s", nNetwork) - } - if rsNetwork.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - network, err := config.clientCompute.Networks.Get( - config.Project, rsNetwork.Primary.ID).Do() - if err != nil { - return err - } - - if instanceGroup.Network != network.SelfLink { - return fmt.Errorf("network incorrect: actual=%s vs expected=%s", instanceGroup.Network, network.SelfLink) - } - - return nil - } -} - -func testAccComputeInstanceGroup_basic(instance string) string { - return fmt.Sprintf(` - resource "google_compute_instance" "ig_instance" { - name = "%s" - machine_type = "n1-standard-1" - can_ip_forward = false - zone = "us-central1-c" - - disk { - image = "debian-8-jessie-v20160803" - } - - network_interface { - network = "default" - } - } - - resource "google_compute_instance_group" "basic" { - description = "Terraform test instance group" - name = "%s" - zone = "us-central1-c" - instances = [ "${google_compute_instance.ig_instance.self_link}" ] - named_port { - name = "http" - port = "8080" - } - named_port { - name = "https" - port = "8443" - } - } - - resource "google_compute_instance_group" "empty" { - description = "Terraform test instance group empty" - name = "%s-empty" - zone = "us-central1-c" - named_port { - name = "http" - port = "8080" - } - named_port { - name = "https" - port = "8443" - } - }`, instance, instance, instance) -} - -func testAccComputeInstanceGroup_update(instance string) string { - return fmt.Sprintf(` - resource "google_compute_instance" "ig_instance" { - name = "%s-${count.index}" - machine_type = "n1-standard-1" - can_ip_forward = false - zone = "us-central1-c" - count = 1 - - disk { - image = "debian-8-jessie-v20160803" - } - - network_interface { - network = "default" - } - } - - resource "google_compute_instance_group" "update" { - description = "Terraform test instance group" - name = "%s" - zone = "us-central1-c" - instances = [ "${google_compute_instance.ig_instance.self_link}" ] - named_port { - name = "http" - port = "8080" - } - named_port { - name = "https" - port = "8443" - } - }`, instance, instance) -} - -// Change IGM's instance template and target size -func testAccComputeInstanceGroup_update2(instance string) string { - return fmt.Sprintf(` - resource "google_compute_instance" "ig_instance" { - name = "%s-${count.index}" - machine_type = "n1-standard-1" - can_ip_forward = false - zone = "us-central1-c" - count = 3 - - disk { - image = "debian-8-jessie-v20160803" - } - - network_interface { - network = "default" - } - } - - resource "google_compute_instance_group" "update" { - description = "Terraform test instance group" - name = "%s" - zone = "us-central1-c" - instances = [ "${google_compute_instance.ig_instance.*.self_link}" ] - - named_port { - name = "http" - port = "8081" - } - named_port { - name = "test" - port = "8444" - } - }`, instance, instance) -} - -func testAccComputeInstanceGroup_outOfOrderInstances(instance string) string { - return fmt.Sprintf(` - resource "google_compute_instance" "ig_instance" { - name = "%s-1" - machine_type = "n1-standard-1" - can_ip_forward = false - zone = "us-central1-c" - - disk { - image = "debian-8-jessie-v20160803" - } - - network_interface { - network = "default" - } - } - - resource "google_compute_instance" "ig_instance_2" { - name = "%s-2" - machine_type = "n1-standard-1" - can_ip_forward = false - zone = "us-central1-c" - - disk { - image = "debian-8-jessie-v20160803" - } - - network_interface { - network = "default" - } - } - - resource "google_compute_instance_group" "group" { - description = "Terraform test instance group" - name = "%s" - zone = "us-central1-c" - instances = [ "${google_compute_instance.ig_instance_2.self_link}", "${google_compute_instance.ig_instance.self_link}" ] - named_port { - name = "http" - port = "8080" - } - named_port { - name = "https" - port = "8443" - } - }`, instance, instance, instance) -} - -func testAccComputeInstanceGroup_network(instance string) string { - return fmt.Sprintf(` - resource "google_compute_network" "ig_network" { - name = "%[1]s" - auto_create_subnetworks = true - } - - resource "google_compute_instance" "ig_instance" { - name = "%[1]s" - machine_type = "n1-standard-1" - can_ip_forward = false - zone = "us-central1-c" - - disk { - image = "debian-8-jessie-v20160803" - } - - network_interface { - network = "${google_compute_network.ig_network.name}" - } - } - - resource "google_compute_instance_group" "with_instance" { - description = "Terraform test instance group" - name = "%[1]s-with-instance" - zone = "us-central1-c" - instances = [ "${google_compute_instance.ig_instance.self_link}" ] - } - - resource "google_compute_instance_group" "without_instance" { - description = "Terraform test instance group" - name = "%[1]s-without-instance" - zone = "us-central1-c" - network = "${google_compute_network.ig_network.self_link}" - }`, instance) -} diff --git a/builtin/providers/google/resource_compute_instance_migrate.go b/builtin/providers/google/resource_compute_instance_migrate.go deleted file mode 100644 index 2b463f9a8..000000000 --- a/builtin/providers/google/resource_compute_instance_migrate.go +++ /dev/null @@ -1,154 +0,0 @@ -package google - -import ( - "fmt" - "log" - "strconv" - "strings" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/terraform" -) - -func resourceComputeInstanceMigrateState( - v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - if is.Empty() { - log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return is, nil - } - - switch v { - case 0: - log.Println("[INFO] Found Compute Instance State v0; migrating to v1") - is, err := migrateStateV0toV1(is) - if err != nil { - return is, err - } - fallthrough - case 1: - log.Println("[INFO] Found Compute Instance State v1; migrating to v2") - is, err := migrateStateV1toV2(is) - if err != nil { - return is, err - } - return is, nil - case 2: - log.Println("[INFO] Found Compute Instance State v2; migrating to v3") - is, err := migrateStateV2toV3(is) - if err != nil { - return is, err - } - return is, nil - default: - return is, fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { - log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - - // Delete old count - delete(is.Attributes, "metadata.#") - - newMetadata := make(map[string]string) - - for k, v := range is.Attributes { - if !strings.HasPrefix(k, "metadata.") { - continue - } - - // We have a key that looks like "metadata.*" and we know it's not - // metadata.# because we deleted it above, so it must be metadata.. - // from the List of Maps. Just need to convert it to a single Map by - // ditching the '' field. - kParts := strings.SplitN(k, ".", 3) - - // Sanity check: all three parts should be there and should be a number - badFormat := false - if len(kParts) != 3 { - badFormat = true - } else if _, err := strconv.Atoi(kParts[1]); err != nil { - badFormat = true - } - - if badFormat { - return is, fmt.Errorf( - "migration error: found metadata key in unexpected format: %s", k) - } - - // Rejoin as "metadata." - newK := strings.Join([]string{kParts[0], kParts[2]}, ".") - newMetadata[newK] = v - delete(is.Attributes, k) - } - - for k, v := range newMetadata { - is.Attributes[k] = v - } - - log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} - -func migrateStateV1toV2(is *terraform.InstanceState) (*terraform.InstanceState, error) { - log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - - // Maps service account index to list of scopes for that sccount - newScopesMap := make(map[string][]string) - - for k, v := range is.Attributes { - if !strings.HasPrefix(k, "service_account.") { - continue - } - - if k == "service_account.#" { - continue - } - - if strings.HasSuffix(k, ".scopes.#") { - continue - } - - if strings.HasSuffix(k, ".email") { - continue - } - - // Key is now of the form service_account.%d.scopes.%d - kParts := strings.Split(k, ".") - - // Sanity check: all three parts should be there and should be a number - badFormat := false - if len(kParts) != 4 { - badFormat = true - } else if _, err := strconv.Atoi(kParts[1]); err != nil { - badFormat = true - } - - if badFormat { - return is, fmt.Errorf( - "migration error: found scope key in unexpected format: %s", k) - } - - newScopesMap[kParts[1]] = append(newScopesMap[kParts[1]], v) - - delete(is.Attributes, k) - } - - for service_acct_index, newScopes := range newScopesMap { - for _, newScope := range newScopes { - hash := hashcode.String(canonicalizeServiceScope(newScope)) - newKey := fmt.Sprintf("service_account.%s.scopes.%d", service_acct_index, hash) - is.Attributes[newKey] = newScope - } - } - - log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} - -func migrateStateV2toV3(is *terraform.InstanceState) (*terraform.InstanceState, error) { - log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - is.Attributes["create_timeout"] = "4" - log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} diff --git a/builtin/providers/google/resource_compute_instance_migrate_test.go b/builtin/providers/google/resource_compute_instance_migrate_test.go deleted file mode 100644 index bce44e635..000000000 --- a/builtin/providers/google/resource_compute_instance_migrate_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package google - -import ( - "testing" - - "github.com/hashicorp/terraform/terraform" -) - -func TestComputeInstanceMigrateState(t *testing.T) { - cases := map[string]struct { - StateVersion int - Attributes map[string]string - Expected map[string]string - Meta interface{} - }{ - "v0.4.2 and earlier": { - StateVersion: 0, - Attributes: map[string]string{ - "metadata.#": "2", - "metadata.0.foo": "bar", - "metadata.1.baz": "qux", - "metadata.2.with.dots": "should.work", - }, - Expected: map[string]string{ - "metadata.foo": "bar", - "metadata.baz": "qux", - "metadata.with.dots": "should.work", - }, - }, - "change scope from list to set": { - StateVersion: 1, - Attributes: map[string]string{ - "service_account.#": "1", - "service_account.0.email": "xxxxxx-compute@developer.gserviceaccount.com", - "service_account.0.scopes.#": "4", - "service_account.0.scopes.0": "https://www.googleapis.com/auth/compute", - "service_account.0.scopes.1": "https://www.googleapis.com/auth/datastore", - "service_account.0.scopes.2": "https://www.googleapis.com/auth/devstorage.full_control", - "service_account.0.scopes.3": "https://www.googleapis.com/auth/logging.write", - }, - Expected: map[string]string{ - "service_account.#": "1", - "service_account.0.email": "xxxxxx-compute@developer.gserviceaccount.com", - "service_account.0.scopes.#": "4", - "service_account.0.scopes.1693978638": "https://www.googleapis.com/auth/devstorage.full_control", - "service_account.0.scopes.172152165": "https://www.googleapis.com/auth/logging.write", - "service_account.0.scopes.299962681": "https://www.googleapis.com/auth/compute", - "service_account.0.scopes.3435931483": "https://www.googleapis.com/auth/datastore", - }, - }, - "add new create_timeout attribute": { - StateVersion: 2, - Attributes: map[string]string{}, - Expected: map[string]string{ - "create_timeout": "4", - }, - }, - } - - for tn, tc := range cases { - is := &terraform.InstanceState{ - ID: "i-abc123", - Attributes: tc.Attributes, - } - is, err := resourceComputeInstanceMigrateState( - tc.StateVersion, is, tc.Meta) - - if err != nil { - t.Fatalf("bad: %s, err: %#v", tn, err) - } - - for k, v := range tc.Expected { - if is.Attributes[k] != v { - t.Fatalf( - "bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", - tn, k, v, k, is.Attributes[k], is.Attributes) - } - } - } -} - -func TestComputeInstanceMigrateState_empty(t *testing.T) { - var is *terraform.InstanceState - var meta interface{} - - // should handle nil - is, err := resourceComputeInstanceMigrateState(0, is, meta) - - if err != nil { - t.Fatalf("err: %#v", err) - } - if is != nil { - t.Fatalf("expected nil instancestate, got: %#v", is) - } - - // should handle non-nil but empty - is = &terraform.InstanceState{} - is, err = resourceComputeInstanceMigrateState(0, is, meta) - - if err != nil { - t.Fatalf("err: %#v", err) - } -} diff --git a/builtin/providers/google/resource_compute_instance_template.go b/builtin/providers/google/resource_compute_instance_template.go deleted file mode 100644 index 7b38a5b0c..000000000 --- a/builtin/providers/google/resource_compute_instance_template.go +++ /dev/null @@ -1,835 +0,0 @@ -package google - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" -) - -func resourceComputeInstanceTemplate() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeInstanceTemplateCreate, - Read: resourceComputeInstanceTemplateRead, - Delete: resourceComputeInstanceTemplateDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"name_prefix"}, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - // https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource - value := v.(string) - if len(value) > 63 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 63 characters", k)) - } - return - }, - }, - - "name_prefix": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - // https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource - // uuid is 26 characters, limit the prefix to 37. - value := v.(string) - if len(value) > 37 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 37 characters, name is limited to 63", k)) - } - return - }, - }, - "disk": &schema.Schema{ - Type: schema.TypeList, - Required: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "auto_delete": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - ForceNew: true, - }, - - "boot": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "device_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "disk_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "disk_size_gb": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - - "disk_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "source_image": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "interface": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "mode": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "source": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - }, - }, - }, - - "machine_type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "automatic_restart": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - ForceNew: true, - Deprecated: "Please use `scheduling.automatic_restart` instead", - }, - - "can_ip_forward": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "instance_description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "metadata": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - - "metadata_startup_script": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "metadata_fingerprint": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "network_interface": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "network": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "network_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "subnetwork": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "subnetwork_project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "access_config": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "nat_ip": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - - "on_host_maintenance": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Deprecated: "Please use `scheduling.on_host_maintenance` instead", - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "scheduling": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "preemptible": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - }, - - "automatic_restart": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - ForceNew: true, - }, - - "on_host_maintenance": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - }, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "service_account": &schema.Schema{ - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "email": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "scopes": &schema.Schema{ - Type: schema.TypeList, - Required: true, - ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - StateFunc: func(v interface{}) string { - return canonicalizeServiceScope(v.(string)) - }, - }, - }, - }, - }, - }, - - "tags": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "tags_fingerprint": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func buildDisks(d *schema.ResourceData, meta interface{}) ([]*compute.AttachedDisk, error) { - config := meta.(*Config) - - disksCount := d.Get("disk.#").(int) - - disks := make([]*compute.AttachedDisk, 0, disksCount) - for i := 0; i < disksCount; i++ { - prefix := fmt.Sprintf("disk.%d", i) - - // Build the disk - var disk compute.AttachedDisk - disk.Type = "PERSISTENT" - disk.Mode = "READ_WRITE" - disk.Interface = "SCSI" - disk.Boot = i == 0 - disk.AutoDelete = d.Get(prefix + ".auto_delete").(bool) - - if v, ok := d.GetOk(prefix + ".boot"); ok { - disk.Boot = v.(bool) - } - - if v, ok := d.GetOk(prefix + ".device_name"); ok { - disk.DeviceName = v.(string) - } - - if v, ok := d.GetOk(prefix + ".source"); ok { - disk.Source = v.(string) - } else { - disk.InitializeParams = &compute.AttachedDiskInitializeParams{} - - if v, ok := d.GetOk(prefix + ".disk_name"); ok { - disk.InitializeParams.DiskName = v.(string) - } - if v, ok := d.GetOk(prefix + ".disk_size_gb"); ok { - disk.InitializeParams.DiskSizeGb = int64(v.(int)) - } - disk.InitializeParams.DiskType = "pd-standard" - if v, ok := d.GetOk(prefix + ".disk_type"); ok { - disk.InitializeParams.DiskType = v.(string) - } - - if v, ok := d.GetOk(prefix + ".source_image"); ok { - imageName := v.(string) - imageUrl, err := resolveImage(config, imageName) - if err != nil { - return nil, fmt.Errorf( - "Error resolving image name '%s': %s", - imageName, err) - } - disk.InitializeParams.SourceImage = imageUrl - } - } - - if v, ok := d.GetOk(prefix + ".interface"); ok { - disk.Interface = v.(string) - } - - if v, ok := d.GetOk(prefix + ".mode"); ok { - disk.Mode = v.(string) - } - - if v, ok := d.GetOk(prefix + ".type"); ok { - disk.Type = v.(string) - } - - disks = append(disks, &disk) - } - - return disks, nil -} - -func buildNetworks(d *schema.ResourceData, meta interface{}) ([]*compute.NetworkInterface, error) { - // Build up the list of networks - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - networksCount := d.Get("network_interface.#").(int) - networkInterfaces := make([]*compute.NetworkInterface, 0, networksCount) - for i := 0; i < networksCount; i++ { - prefix := fmt.Sprintf("network_interface.%d", i) - - var networkName, subnetworkName, subnetworkProject string - if v, ok := d.GetOk(prefix + ".network"); ok { - networkName = v.(string) - } - if v, ok := d.GetOk(prefix + ".subnetwork"); ok { - subnetworkName = v.(string) - } - if v, ok := d.GetOk(prefix + ".subnetwork_project"); ok { - subnetworkProject = v.(string) - } - if networkName == "" && subnetworkName == "" { - return nil, fmt.Errorf("network or subnetwork must be provided") - } - if networkName != "" && subnetworkName != "" { - return nil, fmt.Errorf("network or subnetwork must not both be provided") - } - - var networkLink, subnetworkLink string - if networkName != "" { - networkLink, err = getNetworkLink(d, config, prefix+".network") - if err != nil { - return nil, fmt.Errorf("Error referencing network '%s': %s", - networkName, err) - } - - } else { - // lookup subnetwork link using region and subnetwork name - region, err := getRegion(d, config) - if err != nil { - return nil, err - } - if subnetworkProject == "" { - subnetworkProject = project - } - subnetwork, err := config.clientCompute.Subnetworks.Get( - subnetworkProject, region, subnetworkName).Do() - if err != nil { - return nil, fmt.Errorf( - "Error referencing subnetwork '%s' in region '%s': %s", - subnetworkName, region, err) - } - subnetworkLink = subnetwork.SelfLink - } - - // Build the networkInterface - var iface compute.NetworkInterface - iface.Network = networkLink - iface.Subnetwork = subnetworkLink - if v, ok := d.GetOk(prefix + ".network_ip"); ok { - iface.NetworkIP = v.(string) - } - accessConfigsCount := d.Get(prefix + ".access_config.#").(int) - iface.AccessConfigs = make([]*compute.AccessConfig, accessConfigsCount) - for j := 0; j < accessConfigsCount; j++ { - acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j) - iface.AccessConfigs[j] = &compute.AccessConfig{ - Type: "ONE_TO_ONE_NAT", - NatIP: d.Get(acPrefix + ".nat_ip").(string), - } - } - - networkInterfaces = append(networkInterfaces, &iface) - } - return networkInterfaces, nil -} - -func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - instanceProperties := &compute.InstanceProperties{} - - instanceProperties.CanIpForward = d.Get("can_ip_forward").(bool) - instanceProperties.Description = d.Get("instance_description").(string) - instanceProperties.MachineType = d.Get("machine_type").(string) - disks, err := buildDisks(d, meta) - if err != nil { - return err - } - instanceProperties.Disks = disks - - metadata, err := resourceInstanceMetadata(d) - if err != nil { - return err - } - instanceProperties.Metadata = metadata - networks, err := buildNetworks(d, meta) - if err != nil { - return err - } - instanceProperties.NetworkInterfaces = networks - - instanceProperties.Scheduling = &compute.Scheduling{} - instanceProperties.Scheduling.OnHostMaintenance = "MIGRATE" - - // Depreciated fields - if v, ok := d.GetOk("automatic_restart"); ok { - instanceProperties.Scheduling.AutomaticRestart = v.(bool) - } - - if v, ok := d.GetOk("on_host_maintenance"); ok { - instanceProperties.Scheduling.OnHostMaintenance = v.(string) - } - - forceSendFieldsScheduling := make([]string, 0, 3) - var hasSendMaintenance bool - hasSendMaintenance = false - if v, ok := d.GetOk("scheduling"); ok { - _schedulings := v.([]interface{}) - if len(_schedulings) > 1 { - return fmt.Errorf("Error, at most one `scheduling` block can be defined") - } - _scheduling := _schedulings[0].(map[string]interface{}) - - if vp, okp := _scheduling["automatic_restart"]; okp { - instanceProperties.Scheduling.AutomaticRestart = vp.(bool) - forceSendFieldsScheduling = append(forceSendFieldsScheduling, "AutomaticRestart") - } - - if vp, okp := _scheduling["on_host_maintenance"]; okp { - instanceProperties.Scheduling.OnHostMaintenance = vp.(string) - forceSendFieldsScheduling = append(forceSendFieldsScheduling, "OnHostMaintenance") - hasSendMaintenance = true - } - - if vp, okp := _scheduling["preemptible"]; okp { - instanceProperties.Scheduling.Preemptible = vp.(bool) - forceSendFieldsScheduling = append(forceSendFieldsScheduling, "Preemptible") - if vp.(bool) && !hasSendMaintenance { - instanceProperties.Scheduling.OnHostMaintenance = "TERMINATE" - forceSendFieldsScheduling = append(forceSendFieldsScheduling, "OnHostMaintenance") - } - } - } - instanceProperties.Scheduling.ForceSendFields = forceSendFieldsScheduling - - serviceAccountsCount := d.Get("service_account.#").(int) - serviceAccounts := make([]*compute.ServiceAccount, 0, serviceAccountsCount) - for i := 0; i < serviceAccountsCount; i++ { - prefix := fmt.Sprintf("service_account.%d", i) - - scopesCount := d.Get(prefix + ".scopes.#").(int) - scopes := make([]string, 0, scopesCount) - for j := 0; j < scopesCount; j++ { - scope := d.Get(fmt.Sprintf(prefix+".scopes.%d", j)).(string) - scopes = append(scopes, canonicalizeServiceScope(scope)) - } - - email := "default" - if v := d.Get(prefix + ".email"); v != nil { - email = v.(string) - } - - serviceAccount := &compute.ServiceAccount{ - Email: email, - Scopes: scopes, - } - - serviceAccounts = append(serviceAccounts, serviceAccount) - } - instanceProperties.ServiceAccounts = serviceAccounts - - instanceProperties.Tags = resourceInstanceTags(d) - - var itName string - if v, ok := d.GetOk("name"); ok { - itName = v.(string) - } else if v, ok := d.GetOk("name_prefix"); ok { - itName = resource.PrefixedUniqueId(v.(string)) - } else { - itName = resource.UniqueId() - } - instanceTemplate := compute.InstanceTemplate{ - Description: d.Get("description").(string), - Properties: instanceProperties, - Name: itName, - } - - op, err := config.clientCompute.InstanceTemplates.Insert( - project, &instanceTemplate).Do() - if err != nil { - return fmt.Errorf("Error creating instance: %s", err) - } - - // Store the ID now - d.SetId(instanceTemplate.Name) - - err = computeOperationWaitGlobal(config, op, project, "Creating Instance Template") - if err != nil { - return err - } - - return resourceComputeInstanceTemplateRead(d, meta) -} - -func flattenDisks(disks []*compute.AttachedDisk, d *schema.ResourceData) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(disks)) - for i, disk := range disks { - diskMap := make(map[string]interface{}) - if disk.InitializeParams != nil { - var source_img = fmt.Sprintf("disk.%d.source_image", i) - if d.Get(source_img) == nil || d.Get(source_img) == "" { - sourceImageUrl := strings.Split(disk.InitializeParams.SourceImage, "/") - diskMap["source_image"] = sourceImageUrl[len(sourceImageUrl)-1] - } else { - diskMap["source_image"] = d.Get(source_img) - } - diskMap["disk_type"] = disk.InitializeParams.DiskType - diskMap["disk_name"] = disk.InitializeParams.DiskName - diskMap["disk_size_gb"] = disk.InitializeParams.DiskSizeGb - } - diskMap["auto_delete"] = disk.AutoDelete - diskMap["boot"] = disk.Boot - diskMap["device_name"] = disk.DeviceName - diskMap["interface"] = disk.Interface - diskMap["source"] = disk.Source - diskMap["mode"] = disk.Mode - diskMap["type"] = disk.Type - result = append(result, diskMap) - } - return result -} - -func flattenNetworkInterfaces(networkInterfaces []*compute.NetworkInterface) ([]map[string]interface{}, string) { - result := make([]map[string]interface{}, 0, len(networkInterfaces)) - region := "" - for _, networkInterface := range networkInterfaces { - networkInterfaceMap := make(map[string]interface{}) - if networkInterface.Network != "" { - networkUrl := strings.Split(networkInterface.Network, "/") - networkInterfaceMap["network"] = networkUrl[len(networkUrl)-1] - } - if networkInterface.NetworkIP != "" { - networkInterfaceMap["network_ip"] = networkInterface.NetworkIP - } - if networkInterface.Subnetwork != "" { - subnetworkUrl := strings.Split(networkInterface.Subnetwork, "/") - networkInterfaceMap["subnetwork"] = subnetworkUrl[len(subnetworkUrl)-1] - region = subnetworkUrl[len(subnetworkUrl)-3] - networkInterfaceMap["subnetwork_project"] = subnetworkUrl[len(subnetworkUrl)-5] - } - - if networkInterface.AccessConfigs != nil { - accessConfigsMap := make([]map[string]interface{}, 0, len(networkInterface.AccessConfigs)) - for _, accessConfig := range networkInterface.AccessConfigs { - accessConfigMap := make(map[string]interface{}) - accessConfigMap["nat_ip"] = accessConfig.NatIP - - accessConfigsMap = append(accessConfigsMap, accessConfigMap) - } - networkInterfaceMap["access_config"] = accessConfigsMap - } - result = append(result, networkInterfaceMap) - } - return result, region -} - -func flattenScheduling(scheduling *compute.Scheduling) ([]map[string]interface{}, bool) { - result := make([]map[string]interface{}, 0, 1) - schedulingMap := make(map[string]interface{}) - schedulingMap["automatic_restart"] = scheduling.AutomaticRestart - schedulingMap["on_host_maintenance"] = scheduling.OnHostMaintenance - schedulingMap["preemptible"] = scheduling.Preemptible - result = append(result, schedulingMap) - return result, scheduling.AutomaticRestart -} - -func flattenServiceAccounts(serviceAccounts []*compute.ServiceAccount) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(serviceAccounts)) - for _, serviceAccount := range serviceAccounts { - serviceAccountMap := make(map[string]interface{}) - serviceAccountMap["email"] = serviceAccount.Email - serviceAccountMap["scopes"] = serviceAccount.Scopes - - result = append(result, serviceAccountMap) - } - return result -} - -func flattenMetadata(metadata *compute.Metadata) map[string]string { - metadataMap := make(map[string]string) - for _, item := range metadata.Items { - metadataMap[item.Key] = *item.Value - } - return metadataMap -} - -func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - instanceTemplate, err := config.clientCompute.InstanceTemplates.Get( - project, d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Instance Template %q", d.Get("name").(string))) - } - - // Set the metadata fingerprint if there is one. - if instanceTemplate.Properties.Metadata != nil { - if err = d.Set("metadata_fingerprint", instanceTemplate.Properties.Metadata.Fingerprint); err != nil { - return fmt.Errorf("Error setting metadata_fingerprint: %s", err) - } - - md := instanceTemplate.Properties.Metadata - - _md := flattenMetadata(md) - - if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists { - if err = d.Set("metadata_startup_script", script); err != nil { - return fmt.Errorf("Error setting metadata_startup_script: %s", err) - } - delete(_md, "startup-script") - } - if err = d.Set("metadata", _md); err != nil { - return fmt.Errorf("Error setting metadata: %s", err) - } - } - - // Set the tags fingerprint if there is one. - if instanceTemplate.Properties.Tags != nil { - if err = d.Set("tags_fingerprint", instanceTemplate.Properties.Tags.Fingerprint); err != nil { - return fmt.Errorf("Error setting tags_fingerprint: %s", err) - } - } - if err = d.Set("self_link", instanceTemplate.SelfLink); err != nil { - return fmt.Errorf("Error setting self_link: %s", err) - } - if err = d.Set("name", instanceTemplate.Name); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - if instanceTemplate.Properties.Disks != nil { - if err = d.Set("disk", flattenDisks(instanceTemplate.Properties.Disks, d)); err != nil { - return fmt.Errorf("Error setting disk: %s", err) - } - } - if err = d.Set("description", instanceTemplate.Description); err != nil { - return fmt.Errorf("Error setting description: %s", err) - } - if err = d.Set("machine_type", instanceTemplate.Properties.MachineType); err != nil { - return fmt.Errorf("Error setting machine_type: %s", err) - } - - if err = d.Set("can_ip_forward", instanceTemplate.Properties.CanIpForward); err != nil { - return fmt.Errorf("Error setting can_ip_forward: %s", err) - } - - if err = d.Set("instance_description", instanceTemplate.Properties.Description); err != nil { - return fmt.Errorf("Error setting instance_description: %s", err) - } - if err = d.Set("project", project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - if instanceTemplate.Properties.NetworkInterfaces != nil { - networkInterfaces, region := flattenNetworkInterfaces(instanceTemplate.Properties.NetworkInterfaces) - if err = d.Set("network_interface", networkInterfaces); err != nil { - return fmt.Errorf("Error setting network_interface: %s", err) - } - // region is where to look up the subnetwork if there is one attached to the instance template - if region != "" { - if err = d.Set("region", region); err != nil { - return fmt.Errorf("Error setting region: %s", err) - } - } - } - if instanceTemplate.Properties.Scheduling != nil { - scheduling, autoRestart := flattenScheduling(instanceTemplate.Properties.Scheduling) - if err = d.Set("scheduling", scheduling); err != nil { - return fmt.Errorf("Error setting scheduling: %s", err) - } - if err = d.Set("automatic_restart", autoRestart); err != nil { - return fmt.Errorf("Error setting automatic_restart: %s", err) - } - } - if instanceTemplate.Properties.Tags != nil { - if err = d.Set("tags", instanceTemplate.Properties.Tags.Items); err != nil { - return fmt.Errorf("Error setting tags: %s", err) - } - } - if instanceTemplate.Properties.ServiceAccounts != nil { - if err = d.Set("service_account", flattenServiceAccounts(instanceTemplate.Properties.ServiceAccounts)); err != nil { - return fmt.Errorf("Error setting service_account: %s", err) - } - } - return nil -} - -func resourceComputeInstanceTemplateDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - op, err := config.clientCompute.InstanceTemplates.Delete( - project, d.Id()).Do() - if err != nil { - return fmt.Errorf("Error deleting instance template: %s", err) - } - - err = computeOperationWaitGlobal(config, op, project, "Deleting Instance Template") - if err != nil { - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/google/resource_compute_instance_template_test.go b/builtin/providers/google/resource_compute_instance_template_test.go deleted file mode 100644 index 62a8beef5..000000000 --- a/builtin/providers/google/resource_compute_instance_template_test.go +++ /dev/null @@ -1,608 +0,0 @@ -package google - -import ( - "fmt" - "os" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/compute/v1" -) - -func TestAccComputeInstanceTemplate_basic(t *testing.T) { - var instanceTemplate compute.InstanceTemplate - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstanceTemplate_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceTemplateExists( - "google_compute_instance_template.foobar", &instanceTemplate), - testAccCheckComputeInstanceTemplateTag(&instanceTemplate, "foo"), - testAccCheckComputeInstanceTemplateMetadata(&instanceTemplate, "foo", "bar"), - testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "projects/debian-cloud/global/images/debian-8-jessie-v20160803", true, true), - ), - }, - }, - }) -} - -func TestAccComputeInstanceTemplate_IP(t *testing.T) { - var instanceTemplate compute.InstanceTemplate - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstanceTemplate_ip, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceTemplateExists( - "google_compute_instance_template.foobar", &instanceTemplate), - testAccCheckComputeInstanceTemplateNetwork(&instanceTemplate), - ), - }, - }, - }) -} - -func TestAccComputeInstanceTemplate_networkIP(t *testing.T) { - var instanceTemplate compute.InstanceTemplate - networkIP := "10.128.0.2" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstanceTemplate_networkIP(networkIP), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceTemplateExists( - "google_compute_instance_template.foobar", &instanceTemplate), - testAccCheckComputeInstanceTemplateNetwork(&instanceTemplate), - testAccCheckComputeInstanceTemplateNetworkIP( - "google_compute_instance_template.foobar", networkIP, &instanceTemplate), - ), - }, - }, - }) -} - -func TestAccComputeInstanceTemplate_disks(t *testing.T) { - var instanceTemplate compute.InstanceTemplate - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstanceTemplate_disks, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceTemplateExists( - "google_compute_instance_template.foobar", &instanceTemplate), - testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "projects/debian-cloud/global/images/debian-8-jessie-v20160803", true, true), - testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "terraform-test-foobar", false, false), - ), - }, - }, - }) -} - -func TestAccComputeInstanceTemplate_subnet_auto(t *testing.T) { - var instanceTemplate compute.InstanceTemplate - network := "network-" + acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstanceTemplate_subnet_auto(network), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceTemplateExists( - "google_compute_instance_template.foobar", &instanceTemplate), - testAccCheckComputeInstanceTemplateNetworkName(&instanceTemplate, network), - ), - }, - }, - }) -} - -func TestAccComputeInstanceTemplate_subnet_custom(t *testing.T) { - var instanceTemplate compute.InstanceTemplate - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstanceTemplate_subnet_custom, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceTemplateExists( - "google_compute_instance_template.foobar", &instanceTemplate), - testAccCheckComputeInstanceTemplateSubnetwork(&instanceTemplate), - ), - }, - }, - }) -} - -func TestAccComputeInstanceTemplate_subnet_xpn(t *testing.T) { - var instanceTemplate compute.InstanceTemplate - var xpn_host = os.Getenv("GOOGLE_XPN_HOST_PROJECT") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstanceTemplate_subnet_xpn(xpn_host), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceTemplateExists( - "google_compute_instance_template.foobar", &instanceTemplate), - testAccCheckComputeInstanceTemplateSubnetwork(&instanceTemplate), - ), - }, - }, - }) -} - -func TestAccComputeInstanceTemplate_metadata_startup_script(t *testing.T) { - var instanceTemplate compute.InstanceTemplate - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstanceTemplate_startup_script, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceTemplateExists( - "google_compute_instance_template.foobar", &instanceTemplate), - testAccCheckComputeInstanceTemplateStartupScript(&instanceTemplate, "echo 'Hello'"), - ), - }, - }, - }) -} - -func testAccCheckComputeInstanceTemplateDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_instance_template" { - continue - } - - _, err := config.clientCompute.InstanceTemplates.Get( - config.Project, rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("Instance template still exists") - } - } - - return nil -} - -func testAccCheckComputeInstanceTemplateExists(n string, instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.InstanceTemplates.Get( - config.Project, rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("Instance template not found") - } - - *instanceTemplate = *found - - return nil - } -} - -func testAccCheckComputeInstanceTemplateMetadata( - instanceTemplate *compute.InstanceTemplate, - k string, v string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if instanceTemplate.Properties.Metadata == nil { - return fmt.Errorf("no metadata") - } - - for _, item := range instanceTemplate.Properties.Metadata.Items { - if k != item.Key { - continue - } - - if item.Value != nil && v == *item.Value { - return nil - } - - return fmt.Errorf("bad value for %s: %s", k, *item.Value) - } - - return fmt.Errorf("metadata not found: %s", k) - } -} - -func testAccCheckComputeInstanceTemplateNetwork(instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, i := range instanceTemplate.Properties.NetworkInterfaces { - for _, c := range i.AccessConfigs { - if c.NatIP == "" { - return fmt.Errorf("no NAT IP") - } - } - } - - return nil - } -} - -func testAccCheckComputeInstanceTemplateNetworkName(instanceTemplate *compute.InstanceTemplate, network string) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, i := range instanceTemplate.Properties.NetworkInterfaces { - if !strings.Contains(i.Network, network) { - return fmt.Errorf("Network doesn't match expected value, Expected: %s Actual: %s", network, i.Network[strings.LastIndex("/", i.Network)+1:]) - } - } - - return nil - } -} - -func testAccCheckComputeInstanceTemplateDisk(instanceTemplate *compute.InstanceTemplate, source string, delete bool, boot bool) resource.TestCheckFunc { - return func(s *terraform.State) error { - if instanceTemplate.Properties.Disks == nil { - return fmt.Errorf("no disks") - } - - for _, disk := range instanceTemplate.Properties.Disks { - if disk.InitializeParams == nil { - // Check disk source - if disk.Source == source { - if disk.AutoDelete == delete && disk.Boot == boot { - return nil - } - } - } else { - // Check source image - if disk.InitializeParams.SourceImage == source { - if disk.AutoDelete == delete && disk.Boot == boot { - return nil - } - } - } - } - - return fmt.Errorf("Disk not found: %s", source) - } -} - -func testAccCheckComputeInstanceTemplateSubnetwork(instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, i := range instanceTemplate.Properties.NetworkInterfaces { - if i.Subnetwork == "" { - return fmt.Errorf("no subnet") - } - } - - return nil - } -} - -func testAccCheckComputeInstanceTemplateTag(instanceTemplate *compute.InstanceTemplate, n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if instanceTemplate.Properties.Tags == nil { - return fmt.Errorf("no tags") - } - - for _, k := range instanceTemplate.Properties.Tags.Items { - if k == n { - return nil - } - } - - return fmt.Errorf("tag not found: %s", n) - } -} - -func testAccCheckComputeInstanceTemplateStartupScript(instanceTemplate *compute.InstanceTemplate, n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if instanceTemplate.Properties.Metadata == nil && n == "" { - return nil - } else if instanceTemplate.Properties.Metadata == nil && n != "" { - return fmt.Errorf("Expected metadata.startup-script to be '%s', metadata wasn't set at all", n) - } - for _, item := range instanceTemplate.Properties.Metadata.Items { - if item.Key != "startup-script" { - continue - } - if item.Value != nil && *item.Value == n { - return nil - } else if item.Value == nil && n == "" { - return nil - } else if item.Value == nil && n != "" { - return fmt.Errorf("Expected metadata.startup-script to be '%s', wasn't set", n) - } else if *item.Value != n { - return fmt.Errorf("Expected metadata.startup-script to be '%s', got '%s'", n, *item.Value) - } - } - return fmt.Errorf("This should never be reached.") - } -} - -func testAccCheckComputeInstanceTemplateNetworkIP(n, networkIP string, instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { - return func(s *terraform.State) error { - ip := instanceTemplate.Properties.NetworkInterfaces[0].NetworkIP - err := resource.TestCheckResourceAttr(n, "network_interface.0.network_ip", ip)(s) - if err != nil { - return err - } - return resource.TestCheckResourceAttr(n, "network_interface.0.network_ip", networkIP)(s) - } -} - -var testAccComputeInstanceTemplate_basic = fmt.Sprintf(` -resource "google_compute_instance_template" "foobar" { - name = "instancet-test-%s" - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["foo", "bar"] - - disk { - source_image = "debian-8-jessie-v20160803" - auto_delete = true - boot = true - } - - network_interface { - network = "default" - } - - scheduling { - preemptible = false - automatic_restart = true - } - - metadata { - foo = "bar" - } - - service_account { - scopes = ["userinfo-email", "compute-ro", "storage-ro"] - } -}`, acctest.RandString(10)) - -var testAccComputeInstanceTemplate_ip = fmt.Sprintf(` -resource "google_compute_address" "foo" { - name = "instancet-test-%s" -} - -resource "google_compute_instance_template" "foobar" { - name = "instancet-test-%s" - machine_type = "n1-standard-1" - tags = ["foo", "bar"] - - disk { - source_image = "debian-8-jessie-v20160803" - } - - network_interface { - network = "default" - access_config { - nat_ip = "${google_compute_address.foo.address}" - } - } - - metadata { - foo = "bar" - } -}`, acctest.RandString(10), acctest.RandString(10)) - -func testAccComputeInstanceTemplate_networkIP(networkIP string) string { - return fmt.Sprintf(` -resource "google_compute_instance_template" "foobar" { - name = "instancet-test-%s" - machine_type = "n1-standard-1" - tags = ["foo", "bar"] - - disk { - source_image = "debian-8-jessie-v20160803" - } - - network_interface { - network = "default" - network_ip = "%s" - } - - metadata { - foo = "bar" - } -}`, acctest.RandString(10), networkIP) -} - -var testAccComputeInstanceTemplate_disks = fmt.Sprintf(` -resource "google_compute_disk" "foobar" { - name = "instancet-test-%s" - image = "debian-8-jessie-v20160803" - size = 10 - type = "pd-ssd" - zone = "us-central1-a" -} - -resource "google_compute_instance_template" "foobar" { - name = "instancet-test-%s" - machine_type = "n1-standard-1" - - disk { - source_image = "debian-8-jessie-v20160803" - auto_delete = true - disk_size_gb = 100 - boot = true - } - - disk { - source = "terraform-test-foobar" - auto_delete = false - boot = false - } - - network_interface { - network = "default" - } - - metadata { - foo = "bar" - } -}`, acctest.RandString(10), acctest.RandString(10)) - -func testAccComputeInstanceTemplate_subnet_auto(network string) string { - return fmt.Sprintf(` - resource "google_compute_network" "auto-network" { - name = "%s" - auto_create_subnetworks = true - } - - resource "google_compute_instance_template" "foobar" { - name = "instance-tpl-%s" - machine_type = "n1-standard-1" - - disk { - source_image = "debian-8-jessie-v20160803" - auto_delete = true - disk_size_gb = 10 - boot = true - } - - network_interface { - network = "${google_compute_network.auto-network.name}" - } - - metadata { - foo = "bar" - } - }`, network, acctest.RandString(10)) -} - -var testAccComputeInstanceTemplate_subnet_custom = fmt.Sprintf(` -resource "google_compute_network" "network" { - name = "network-%s" - auto_create_subnetworks = false -} - -resource "google_compute_subnetwork" "subnetwork" { - name = "subnetwork-%s" - ip_cidr_range = "10.0.0.0/24" - region = "us-central1" - network = "${google_compute_network.network.self_link}" -} - -resource "google_compute_instance_template" "foobar" { - name = "instance-test-%s" - machine_type = "n1-standard-1" - region = "us-central1" - - disk { - source_image = "debian-8-jessie-v20160803" - auto_delete = true - disk_size_gb = 10 - boot = true - } - - network_interface { - subnetwork = "${google_compute_subnetwork.subnetwork.name}" - } - - metadata { - foo = "bar" - } -}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) - -func testAccComputeInstanceTemplate_subnet_xpn(xpn_host string) string { - return fmt.Sprintf(` - resource "google_compute_network" "network" { - name = "network-%s" - auto_create_subnetworks = false - project = "%s" - } - - resource "google_compute_subnetwork" "subnetwork" { - name = "subnetwork-%s" - ip_cidr_range = "10.0.0.0/24" - region = "us-central1" - network = "${google_compute_network.network.self_link}" - project = "%s" - } - - resource "google_compute_instance_template" "foobar" { - name = "instance-test-%s" - machine_type = "n1-standard-1" - region = "us-central1" - - disk { - source_image = "debian-8-jessie-v20160803" - auto_delete = true - disk_size_gb = 10 - boot = true - } - - network_interface { - subnetwork = "${google_compute_subnetwork.subnetwork.name}" - subnetwork_project = "${google_compute_subnetwork.subnetwork.project}" - } - - metadata { - foo = "bar" - } - }`, acctest.RandString(10), xpn_host, acctest.RandString(10), xpn_host, acctest.RandString(10)) -} - -var testAccComputeInstanceTemplate_startup_script = fmt.Sprintf(` -resource "google_compute_instance_template" "foobar" { - name = "instance-test-%s" - machine_type = "n1-standard-1" - - disk { - source_image = "debian-8-jessie-v20160803" - auto_delete = true - disk_size_gb = 10 - boot = true - } - - metadata { - foo = "bar" - } - - network_interface{ - network = "default" - } - - metadata_startup_script = "echo 'Hello'" -}`, acctest.RandString(10)) diff --git a/builtin/providers/google/resource_compute_instance_test.go b/builtin/providers/google/resource_compute_instance_test.go deleted file mode 100644 index e91368e24..000000000 --- a/builtin/providers/google/resource_compute_instance_test.go +++ /dev/null @@ -1,1483 +0,0 @@ -package google - -import ( - "fmt" - "os" - "regexp" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/compute/v1" -) - -func TestAccComputeInstance_basic_deprecated_network(t *testing.T) { - var instance compute.Instance - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_basic_deprecated_network(instanceName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceTag(&instance, "foo"), - testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), - testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), - ), - }, - }, - }) -} - -func TestAccComputeInstance_basic1(t *testing.T) { - var instance compute.Instance - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_basic(instanceName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceTag(&instance, "foo"), - testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), - testAccCheckComputeInstanceMetadata(&instance, "baz", "qux"), - testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), - ), - }, - }, - }) -} - -func TestAccComputeInstance_basic2(t *testing.T) { - var instance compute.Instance - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_basic2(instanceName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceTag(&instance, "foo"), - testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), - testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), - ), - }, - }, - }) -} - -func TestAccComputeInstance_basic3(t *testing.T) { - var instance compute.Instance - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_basic3(instanceName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceTag(&instance, "foo"), - testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), - testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), - ), - }, - }, - }) -} - -func TestAccComputeInstance_basic4(t *testing.T) { - var instance compute.Instance - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_basic4(instanceName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceTag(&instance, "foo"), - testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), - testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), - ), - }, - }, - }) -} - -func TestAccComputeInstance_basic5(t *testing.T) { - var instance compute.Instance - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_basic5(instanceName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceTag(&instance, "foo"), - testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), - testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), - ), - }, - }, - }) -} - -func TestAccComputeInstance_IP(t *testing.T) { - var instance compute.Instance - var ipName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_ip(ipName, instanceName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceAccessConfigHasIP(&instance), - ), - }, - }, - }) -} - -func TestAccComputeInstance_disksWithoutAutodelete(t *testing.T) { - var instance compute.Instance - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_disks(diskName, instanceName, false), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), - testAccCheckComputeInstanceDisk(&instance, diskName, false, false), - ), - }, - }, - }) -} - -func TestAccComputeInstance_disksWithAutodelete(t *testing.T) { - var instance compute.Instance - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_disks(diskName, instanceName, true), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), - testAccCheckComputeInstanceDisk(&instance, diskName, true, false), - ), - }, - }, - }) -} - -func TestAccComputeInstance_diskEncryption(t *testing.T) { - var instance compute.Instance - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_disks_encryption(diskName, instanceName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), - testAccCheckComputeInstanceDisk(&instance, diskName, true, false), - testAccCheckComputeInstanceDiskEncryptionKey("google_compute_instance.foobar", &instance), - ), - }, - }, - }) -} - -func TestAccComputeInstance_attachedDisk(t *testing.T) { - var instance compute.Instance - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_attachedDisk(diskName, instanceName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceDisk(&instance, diskName, false, true), - ), - }, - }, - }) -} - -func TestAccComputeInstance_noDisk(t *testing.T) { - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_noDisk(instanceName), - ExpectError: regexp.MustCompile("At least one disk or attached_disk must be set"), - }, - }, - }) -} - -func TestAccComputeInstance_local_ssd(t *testing.T) { - var instance compute.Instance - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_local_ssd(instanceName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.local-ssd", &instance), - testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), - ), - }, - }, - }) -} - -func TestAccComputeInstance_update_deprecated_network(t *testing.T) { - var instance compute.Instance - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_basic_deprecated_network(instanceName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - ), - }, - resource.TestStep{ - Config: testAccComputeInstance_update_deprecated_network(instanceName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceMetadata( - &instance, "bar", "baz"), - testAccCheckComputeInstanceTag(&instance, "baz"), - ), - }, - }, - }) -} - -func TestAccComputeInstance_forceNewAndChangeMetadata(t *testing.T) { - var instance compute.Instance - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_basic(instanceName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - ), - }, - resource.TestStep{ - Config: testAccComputeInstance_forceNewAndChangeMetadata(instanceName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceMetadata( - &instance, "qux", "true"), - ), - }, - }, - }) -} - -func TestAccComputeInstance_update(t *testing.T) { - var instance compute.Instance - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_basic(instanceName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - ), - }, - resource.TestStep{ - Config: testAccComputeInstance_update(instanceName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceMetadata( - &instance, "bar", "baz"), - testAccCheckComputeInstanceTag(&instance, "baz"), - testAccCheckComputeInstanceAccessConfig(&instance), - ), - }, - }, - }) -} - -func TestAccComputeInstance_service_account(t *testing.T) { - var instance compute.Instance - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_service_account(instanceName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceServiceAccount(&instance, - "https://www.googleapis.com/auth/compute.readonly"), - testAccCheckComputeInstanceServiceAccount(&instance, - "https://www.googleapis.com/auth/devstorage.read_only"), - testAccCheckComputeInstanceServiceAccount(&instance, - "https://www.googleapis.com/auth/userinfo.email"), - ), - }, - }, - }) -} - -func TestAccComputeInstance_scheduling(t *testing.T) { - var instance compute.Instance - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_scheduling(instanceName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - ), - }, - }, - }) -} - -func TestAccComputeInstance_subnet_auto(t *testing.T) { - var instance compute.Instance - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_subnet_auto(instanceName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceHasSubnet(&instance), - ), - }, - }, - }) -} - -func TestAccComputeInstance_subnet_custom(t *testing.T) { - var instance compute.Instance - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_subnet_custom(instanceName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceHasSubnet(&instance), - ), - }, - }, - }) -} - -func TestAccComputeInstance_subnet_xpn(t *testing.T) { - var instance compute.Instance - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - var xpn_host = os.Getenv("GOOGLE_XPN_HOST_PROJECT") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_subnet_xpn(instanceName, xpn_host), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceHasSubnet(&instance), - ), - }, - }, - }) -} - -func TestAccComputeInstance_address_auto(t *testing.T) { - var instance compute.Instance - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_address_auto(instanceName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceHasAnyAddress(&instance), - ), - }, - }, - }) -} - -func TestAccComputeInstance_address_custom(t *testing.T) { - var instance compute.Instance - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - var address = "10.0.200.200" - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_address_custom(instanceName, address), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceHasAddress(&instance, address), - ), - }, - }, - }) -} - -func TestAccComputeInstance_private_image_family(t *testing.T) { - var instance compute.Instance - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10)) - var imageName = fmt.Sprintf("instance-testi-%s", acctest.RandString(10)) - var familyName = fmt.Sprintf("instance-testf-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_private_image_family(diskName, imageName, familyName, instanceName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists( - "google_compute_instance.foobar", &instance), - ), - }, - }, - }) -} - -func TestAccComputeInstance_invalid_disk(t *testing.T) { - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_invalid_disk(diskName, instanceName), - ExpectError: regexp.MustCompile("Error: cannot define both disk and type."), - }, - }, - }) -} - -func TestAccComputeInstance_forceChangeMachineTypeManually(t *testing.T) { - var instance compute.Instance - var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeInstance_basic(instanceName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists("google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceUpdateMachineType("google_compute_instance.foobar"), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testAccCheckComputeInstanceUpdateMachineType(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - op, err := config.clientCompute.Instances.Stop(config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() - if err != nil { - return fmt.Errorf("Could not stop instance: %s", err) - } - err = computeOperationWaitZone(config, op, config.Project, rs.Primary.Attributes["zone"], "Waiting on stop") - if err != nil { - return fmt.Errorf("Could not stop instance: %s", err) - } - - machineType := compute.InstancesSetMachineTypeRequest{ - MachineType: "zones/us-central1-a/machineTypes/f1-micro", - } - - op, err = config.clientCompute.Instances.SetMachineType( - config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID, &machineType).Do() - if err != nil { - return fmt.Errorf("Could not change machine type: %s", err) - } - err = computeOperationWaitZone(config, op, config.Project, rs.Primary.Attributes["zone"], "Waiting machine type change") - if err != nil { - return fmt.Errorf("Could not change machine type: %s", err) - } - return nil - } -} - -func testAccCheckComputeInstanceDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_instance" { - continue - } - - _, err := config.clientCompute.Instances.Get( - config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("Instance still exists") - } - } - - return nil -} - -func testAccCheckComputeInstanceExists(n string, instance *compute.Instance) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.Instances.Get( - config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("Instance not found") - } - - *instance = *found - - return nil - } -} - -func testAccCheckComputeInstanceMetadata( - instance *compute.Instance, - k string, v string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if instance.Metadata == nil { - return fmt.Errorf("no metadata") - } - - for _, item := range instance.Metadata.Items { - if k != item.Key { - continue - } - - if item.Value != nil && v == *item.Value { - return nil - } - - return fmt.Errorf("bad value for %s: %s", k, *item.Value) - } - - return fmt.Errorf("metadata not found: %s", k) - } -} - -func testAccCheckComputeInstanceAccessConfig(instance *compute.Instance) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, i := range instance.NetworkInterfaces { - if len(i.AccessConfigs) == 0 { - return fmt.Errorf("no access_config") - } - } - - return nil - } -} - -func testAccCheckComputeInstanceAccessConfigHasIP(instance *compute.Instance) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, i := range instance.NetworkInterfaces { - for _, c := range i.AccessConfigs { - if c.NatIP == "" { - return fmt.Errorf("no NAT IP") - } - } - } - - return nil - } -} - -func testAccCheckComputeInstanceDisk(instance *compute.Instance, source string, delete bool, boot bool) resource.TestCheckFunc { - return func(s *terraform.State) error { - if instance.Disks == nil { - return fmt.Errorf("no disks") - } - - for _, disk := range instance.Disks { - if strings.LastIndex(disk.Source, "/"+source) == len(disk.Source)-len(source)-1 && disk.AutoDelete == delete && disk.Boot == boot { - return nil - } - } - - return fmt.Errorf("Disk not found: %s", source) - } -} - -func testAccCheckComputeInstanceDiskEncryptionKey(n string, instance *compute.Instance) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - for i, disk := range instance.Disks { - attr := rs.Primary.Attributes[fmt.Sprintf("disk.%d.disk_encryption_key_sha256", i)] - if disk.DiskEncryptionKey == nil && attr != "" { - return fmt.Errorf("Disk %d has mismatched encryption key.\nTF State: %+v\nGCP State: ", i, attr) - } - if disk.DiskEncryptionKey != nil && attr != disk.DiskEncryptionKey.Sha256 { - return fmt.Errorf("Disk %d has mismatched encryption key.\nTF State: %+v\nGCP State: %+v", - i, attr, disk.DiskEncryptionKey.Sha256) - } - } - return nil - } -} - -func testAccCheckComputeInstanceTag(instance *compute.Instance, n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if instance.Tags == nil { - return fmt.Errorf("no tags") - } - - for _, k := range instance.Tags.Items { - if k == n { - return nil - } - } - - return fmt.Errorf("tag not found: %s", n) - } -} - -func testAccCheckComputeInstanceServiceAccount(instance *compute.Instance, scope string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if count := len(instance.ServiceAccounts); count != 1 { - return fmt.Errorf("Wrong number of ServiceAccounts: expected 1, got %d", count) - } - - for _, val := range instance.ServiceAccounts[0].Scopes { - if val == scope { - return nil - } - } - - return fmt.Errorf("Scope not found: %s", scope) - } -} - -func testAccCheckComputeInstanceHasSubnet(instance *compute.Instance) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, i := range instance.NetworkInterfaces { - if i.Subnetwork == "" { - return fmt.Errorf("no subnet") - } - } - - return nil - } -} - -func testAccCheckComputeInstanceHasAnyAddress(instance *compute.Instance) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, i := range instance.NetworkInterfaces { - if i.NetworkIP == "" { - return fmt.Errorf("no address") - } - } - - return nil - } -} - -func testAccCheckComputeInstanceHasAddress(instance *compute.Instance, address string) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, i := range instance.NetworkInterfaces { - if i.NetworkIP != address { - return fmt.Errorf("Wrong address found: expected %v, got %v", address, i.NetworkIP) - } - } - - return nil - } -} - -func testAccComputeInstance_basic_deprecated_network(instance string) string { - return fmt.Sprintf(` - resource "google_compute_instance" "foobar" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - can_ip_forward = false - tags = ["foo", "bar"] - - disk { - image = "debian-8-jessie-v20160803" - } - - network { - source = "default" - } - - metadata { - foo = "bar" - } - }`, instance) -} - -func testAccComputeInstance_update_deprecated_network(instance string) string { - return fmt.Sprintf(` - resource "google_compute_instance" "foobar" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - tags = ["baz"] - - disk { - image = "debian-8-jessie-v20160803" - } - - network { - source = "default" - } - - metadata { - bar = "baz" - } - }`, instance) -} - -func testAccComputeInstance_basic(instance string) string { - return fmt.Sprintf(` - resource "google_compute_instance" "foobar" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - can_ip_forward = false - tags = ["foo", "bar"] - - disk { - image = "debian-8-jessie-v20160803" - } - - network_interface { - network = "default" - } - - metadata { - foo = "bar" - baz = "qux" - } - - create_timeout = 5 - - metadata_startup_script = "echo Hello" - }`, instance) -} - -func testAccComputeInstance_basic2(instance string) string { - return fmt.Sprintf(` - resource "google_compute_instance" "foobar" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - can_ip_forward = false - tags = ["foo", "bar"] - - disk { - image = "debian-8" - } - - network_interface { - network = "default" - } - - metadata { - foo = "bar" - } - }`, instance) -} - -func testAccComputeInstance_basic3(instance string) string { - return fmt.Sprintf(` - resource "google_compute_instance" "foobar" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - can_ip_forward = false - tags = ["foo", "bar"] - - disk { - image = "debian-cloud/debian-8-jessie-v20160803" - } - - network_interface { - network = "default" - } - - - metadata { - foo = "bar" - } - }`, instance) -} - -func testAccComputeInstance_basic4(instance string) string { - return fmt.Sprintf(` - resource "google_compute_instance" "foobar" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - can_ip_forward = false - tags = ["foo", "bar"] - - disk { - image = "debian-cloud/debian-8" - } - - network_interface { - network = "default" - } - - - metadata { - foo = "bar" - } - }`, instance) -} - -func testAccComputeInstance_basic5(instance string) string { - return fmt.Sprintf(` - resource "google_compute_instance" "foobar" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - can_ip_forward = false - tags = ["foo", "bar"] - - disk { - image = "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20160803" - } - - network_interface { - network = "default" - } - - metadata { - foo = "bar" - } - }`, instance) -} - -// Update zone to ForceNew, and change metadata k/v entirely -// Generates diff mismatch -func testAccComputeInstance_forceNewAndChangeMetadata(instance string) string { - return fmt.Sprintf(` - resource "google_compute_instance" "foobar" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - zone = "us-central1-b" - tags = ["baz"] - - disk { - image = "debian-8-jessie-v20160803" - } - - network_interface { - network = "default" - access_config { } - } - - metadata { - qux = "true" - } - }`, instance) -} - -// Update metadata, tags, and network_interface -func testAccComputeInstance_update(instance string) string { - return fmt.Sprintf(` - resource "google_compute_instance" "foobar" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - tags = ["baz"] - - disk { - image = "debian-8-jessie-v20160803" - } - - network_interface { - network = "default" - access_config { } - } - - metadata { - bar = "baz" - } - }`, instance) -} - -func testAccComputeInstance_ip(ip, instance string) string { - return fmt.Sprintf(` - resource "google_compute_address" "foo" { - name = "%s" - } - - resource "google_compute_instance" "foobar" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - tags = ["foo", "bar"] - - disk { - image = "debian-8-jessie-v20160803" - } - - network_interface { - network = "default" - access_config { - nat_ip = "${google_compute_address.foo.address}" - } - } - - metadata { - foo = "bar" - } - }`, ip, instance) -} - -func testAccComputeInstance_disks(disk, instance string, autodelete bool) string { - return fmt.Sprintf(` - resource "google_compute_disk" "foobar" { - name = "%s" - size = 10 - type = "pd-ssd" - zone = "us-central1-a" - } - - resource "google_compute_instance" "foobar" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - - disk { - image = "debian-8-jessie-v20160803" - } - - disk { - disk = "${google_compute_disk.foobar.name}" - auto_delete = %v - } - - network_interface { - network = "default" - } - - metadata { - foo = "bar" - } - }`, disk, instance, autodelete) -} - -func testAccComputeInstance_disks_encryption(disk, instance string) string { - return fmt.Sprintf(` - resource "google_compute_disk" "foobar" { - name = "%s" - size = 10 - type = "pd-ssd" - zone = "us-central1-a" - } - - resource "google_compute_instance" "foobar" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - - disk { - image = "debian-8-jessie-v20160803" - disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" - } - - disk { - disk = "${google_compute_disk.foobar.name}" - } - - network_interface { - network = "default" - } - - metadata { - foo = "bar" - } - }`, disk, instance) -} - -func testAccComputeInstance_attachedDisk(disk, instance string) string { - return fmt.Sprintf(` - resource "google_compute_disk" "foobar" { - name = "%s" - size = 10 - type = "pd-ssd" - zone = "us-central1-a" - } - - resource "google_compute_instance" "foobar" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - - attached_disk { - source = "${google_compute_disk.foobar.self_link}" - } - - network_interface { - network = "default" - } - - metadata { - foo = "bar" - } - }`, disk, instance) -} - -func testAccComputeInstance_noDisk(instance string) string { - return fmt.Sprintf(` - resource "google_compute_instance" "foobar" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - - network_interface { - network = "default" - } - - metadata { - foo = "bar" - } - }`, instance) -} - -func testAccComputeInstance_local_ssd(instance string) string { - return fmt.Sprintf(` - resource "google_compute_instance" "local-ssd" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - - disk { - image = "debian-8-jessie-v20160803" - } - - disk { - type = "local-ssd" - scratch = true - } - - network_interface { - network = "default" - } - - }`, instance) -} - -func testAccComputeInstance_service_account(instance string) string { - return fmt.Sprintf(` - resource "google_compute_instance" "foobar" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - - disk { - image = "debian-8-jessie-v20160803" - } - - network_interface { - network = "default" - } - - service_account { - scopes = [ - "userinfo-email", - "compute-ro", - "storage-ro", - ] - } - }`, instance) -} - -func testAccComputeInstance_scheduling(instance string) string { - return fmt.Sprintf(` - resource "google_compute_instance" "foobar" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - - disk { - image = "debian-8-jessie-v20160803" - } - - network_interface { - network = "default" - } - - scheduling { - } - }`, instance) -} - -func testAccComputeInstance_subnet_auto(instance string) string { - return fmt.Sprintf(` - resource "google_compute_network" "inst-test-network" { - name = "inst-test-network-%s" - auto_create_subnetworks = true - } - - resource "google_compute_instance" "foobar" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - - disk { - image = "debian-8-jessie-v20160803" - } - - network_interface { - network = "${google_compute_network.inst-test-network.name}" - access_config { } - } - - }`, acctest.RandString(10), instance) -} - -func testAccComputeInstance_subnet_custom(instance string) string { - return fmt.Sprintf(` - resource "google_compute_network" "inst-test-network" { - name = "inst-test-network-%s" - auto_create_subnetworks = false - } - - resource "google_compute_subnetwork" "inst-test-subnetwork" { - name = "inst-test-subnetwork-%s" - ip_cidr_range = "10.0.0.0/16" - region = "us-central1" - network = "${google_compute_network.inst-test-network.self_link}" - } - - resource "google_compute_instance" "foobar" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - - disk { - image = "debian-8-jessie-v20160803" - } - - network_interface { - subnetwork = "${google_compute_subnetwork.inst-test-subnetwork.name}" - access_config { } - } - - }`, acctest.RandString(10), acctest.RandString(10), instance) -} - -func testAccComputeInstance_subnet_xpn(instance, xpn_host string) string { - return fmt.Sprintf(` - resource "google_compute_network" "inst-test-network" { - name = "inst-test-network-%s" - auto_create_subnetworks = false - project = "%s" - } - - resource "google_compute_subnetwork" "inst-test-subnetwork" { - name = "inst-test-subnetwork-%s" - ip_cidr_range = "10.0.0.0/16" - region = "us-central1" - network = "${google_compute_network.inst-test-network.self_link}" - project = "%s" - } - - resource "google_compute_instance" "foobar" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - - disk { - image = "debian-8-jessie-v20160803" - } - - network_interface { - subnetwork = "${google_compute_subnetwork.inst-test-subnetwork.name}" - subnetwork_project = "${google_compute_subnetwork.inst-test-subnetwork.project}" - access_config { } - } - - }`, acctest.RandString(10), xpn_host, acctest.RandString(10), xpn_host, instance) -} - -func testAccComputeInstance_address_auto(instance string) string { - return fmt.Sprintf(` - resource "google_compute_network" "inst-test-network" { - name = "inst-test-network-%s" - } - resource "google_compute_subnetwork" "inst-test-subnetwork" { - name = "inst-test-subnetwork-%s" - ip_cidr_range = "10.0.0.0/16" - region = "us-central1" - network = "${google_compute_network.inst-test-network.self_link}" - } - resource "google_compute_instance" "foobar" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - - disk { - image = "debian-8-jessie-v20160803" - } - - network_interface { - subnetwork = "${google_compute_subnetwork.inst-test-subnetwork.name}" - access_config { } - } - - }`, acctest.RandString(10), acctest.RandString(10), instance) -} - -func testAccComputeInstance_address_custom(instance, address string) string { - return fmt.Sprintf(` - resource "google_compute_network" "inst-test-network" { - name = "inst-test-network-%s" - } - resource "google_compute_subnetwork" "inst-test-subnetwork" { - name = "inst-test-subnetwork-%s" - ip_cidr_range = "10.0.0.0/16" - region = "us-central1" - network = "${google_compute_network.inst-test-network.self_link}" - } - resource "google_compute_instance" "foobar" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - - disk { - image = "debian-8-jessie-v20160803" - } - - network_interface { - subnetwork = "${google_compute_subnetwork.inst-test-subnetwork.name}" - address = "%s" - access_config { } - } - - }`, acctest.RandString(10), acctest.RandString(10), instance, address) -} - -func testAccComputeInstance_private_image_family(disk, image, family, instance string) string { - return fmt.Sprintf(` - resource "google_compute_disk" "foobar" { - name = "%s" - zone = "us-central1-a" - image = "debian-8-jessie-v20160803" - } - - resource "google_compute_image" "foobar" { - name = "%s" - source_disk = "${google_compute_disk.foobar.self_link}" - family = "%s" - } - - resource "google_compute_instance" "foobar" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - - disk { - image = "${google_compute_image.foobar.family}" - } - - network_interface { - network = "default" - } - - metadata { - foo = "bar" - } - }`, disk, image, family, instance) -} - -func testAccComputeInstance_invalid_disk(disk, instance string) string { - return fmt.Sprintf(` - resource "google_compute_instance" "foobar" { - name = "%s" - machine_type = "f1-micro" - zone = "us-central1-a" - - disk { - image = "ubuntu-os-cloud/ubuntu-1604-lts" - type = "pd-standard" - } - - disk { - disk = "${google_compute_disk.foobar.name}" - type = "pd-standard" - device_name = "xvdb" - } - - network_interface { - network = "default" - } - } - - resource "google_compute_disk" "foobar" { - name = "%s" - zone = "us-central1-a" - type = "pd-standard" - size = "1" - }`, instance, disk) -} diff --git a/builtin/providers/google/resource_compute_network.go b/builtin/providers/google/resource_compute_network.go deleted file mode 100644 index d0fef1753..000000000 --- a/builtin/providers/google/resource_compute_network.go +++ /dev/null @@ -1,168 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" -) - -func resourceComputeNetwork() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeNetworkCreate, - Read: resourceComputeNetworkRead, - Delete: resourceComputeNetworkDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "auto_create_subnetworks": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - /* Ideally this would default to true as per the API, but that would cause - existing Terraform configs which have not been updated to report this as - a change. Perhaps we can bump this for a minor release bump rather than - a point release. - Default: false, */ - ConflictsWith: []string{"ipv4_range"}, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "gateway_ipv4": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "ipv4_range": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Deprecated: "Please use google_compute_subnetwork resources instead.", - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // - // Possible modes: - // - 1 Legacy mode - Create a network in the legacy mode. ipv4_range is set. auto_create_subnetworks must not be - // set (enforced by ConflictsWith schema attribute) - // - 2 Distributed Mode - Create a new generation network that supports subnetworks: - // - 2.a - Auto subnet mode - auto_create_subnetworks = true, Google will generate 1 subnetwork per region - // - 2.b - Custom subnet mode - auto_create_subnetworks = false & ipv4_range not set, - // - autoCreateSubnetworks := d.Get("auto_create_subnetworks").(bool) - - // Build the network parameter - network := &compute.Network{ - Name: d.Get("name").(string), - AutoCreateSubnetworks: autoCreateSubnetworks, - Description: d.Get("description").(string), - } - - if v, ok := d.GetOk("ipv4_range"); ok { - log.Printf("[DEBUG] Setting IPv4Range (%#v) for legacy network mode", v.(string)) - network.IPv4Range = v.(string) - } else { - // custom subnet mode, so make sure AutoCreateSubnetworks field is included in request otherwise - // google will create a network in legacy mode. - network.ForceSendFields = []string{"AutoCreateSubnetworks"} - } - - log.Printf("[DEBUG] Network insert request: %#v", network) - op, err := config.clientCompute.Networks.Insert( - project, network).Do() - if err != nil { - return fmt.Errorf("Error creating network: %s", err) - } - - // It probably maybe worked, so store the ID now - d.SetId(network.Name) - - err = computeOperationWaitGlobal(config, op, project, "Creating Network") - if err != nil { - return err - } - - return resourceComputeNetworkRead(d, meta) -} - -func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - network, err := config.clientCompute.Networks.Get( - project, d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Network %q", d.Get("name").(string))) - } - - d.Set("gateway_ipv4", network.GatewayIPv4) - d.Set("self_link", network.SelfLink) - d.Set("ipv4_range", network.IPv4Range) - d.Set("name", network.Name) - d.Set("auto_create_subnetworks", network.AutoCreateSubnetworks) - - return nil -} - -func resourceComputeNetworkDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Delete the network - op, err := config.clientCompute.Networks.Delete( - project, d.Id()).Do() - if err != nil { - return fmt.Errorf("Error deleting network: %s", err) - } - - err = computeOperationWaitGlobal(config, op, project, "Deleting Network") - if err != nil { - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/google/resource_compute_network_test.go b/builtin/providers/google/resource_compute_network_test.go deleted file mode 100644 index ab05a7535..000000000 --- a/builtin/providers/google/resource_compute_network_test.go +++ /dev/null @@ -1,181 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/compute/v1" -) - -func TestAccComputeNetwork_basic(t *testing.T) { - var network compute.Network - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeNetworkDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeNetwork_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeNetworkExists( - "google_compute_network.foobar", &network), - ), - }, - }, - }) -} - -func TestAccComputeNetwork_auto_subnet(t *testing.T) { - var network compute.Network - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeNetworkDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeNetwork_auto_subnet, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeNetworkExists( - "google_compute_network.bar", &network), - testAccCheckComputeNetworkIsAutoSubnet( - "google_compute_network.bar", &network), - ), - }, - }, - }) -} - -func TestAccComputeNetwork_custom_subnet(t *testing.T) { - var network compute.Network - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeNetworkDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeNetwork_custom_subnet, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeNetworkExists( - "google_compute_network.baz", &network), - testAccCheckComputeNetworkIsCustomSubnet( - "google_compute_network.baz", &network), - ), - }, - }, - }) -} - -func testAccCheckComputeNetworkDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_network" { - continue - } - - _, err := config.clientCompute.Networks.Get( - config.Project, rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("Network still exists") - } - } - - return nil -} - -func testAccCheckComputeNetworkExists(n string, network *compute.Network) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.Networks.Get( - config.Project, rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("Network not found") - } - - *network = *found - - return nil - } -} - -func testAccCheckComputeNetworkIsAutoSubnet(n string, network *compute.Network) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.Networks.Get( - config.Project, network.Name).Do() - if err != nil { - return err - } - - if !found.AutoCreateSubnetworks { - return fmt.Errorf("should have AutoCreateSubnetworks = true") - } - - if found.IPv4Range != "" { - return fmt.Errorf("should not have IPv4Range") - } - - return nil - } -} - -func testAccCheckComputeNetworkIsCustomSubnet(n string, network *compute.Network) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.Networks.Get( - config.Project, network.Name).Do() - if err != nil { - return err - } - - if found.AutoCreateSubnetworks { - return fmt.Errorf("should have AutoCreateSubnetworks = false") - } - - if found.IPv4Range != "" { - return fmt.Errorf("should not have IPv4Range") - } - - return nil - } -} - -var testAccComputeNetwork_basic = fmt.Sprintf(` -resource "google_compute_network" "foobar" { - name = "network-test-%s" - ipv4_range = "10.0.0.0/16" -}`, acctest.RandString(10)) - -var testAccComputeNetwork_auto_subnet = fmt.Sprintf(` -resource "google_compute_network" "bar" { - name = "network-test-%s" - auto_create_subnetworks = true -}`, acctest.RandString(10)) - -var testAccComputeNetwork_custom_subnet = fmt.Sprintf(` -resource "google_compute_network" "baz" { - name = "network-test-%s" - auto_create_subnetworks = false -}`, acctest.RandString(10)) diff --git a/builtin/providers/google/resource_compute_project_metadata.go b/builtin/providers/google/resource_compute_project_metadata.go deleted file mode 100644 index 07e3ee1cb..000000000 --- a/builtin/providers/google/resource_compute_project_metadata.go +++ /dev/null @@ -1,198 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" -) - -func resourceComputeProjectMetadata() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeProjectMetadataCreate, - Read: resourceComputeProjectMetadataRead, - Update: resourceComputeProjectMetadataUpdate, - Delete: resourceComputeProjectMetadataDelete, - - SchemaVersion: 0, - - Schema: map[string]*schema.Schema{ - "metadata": &schema.Schema{ - Elem: schema.TypeString, - Type: schema.TypeMap, - Required: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceComputeProjectMetadataCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - projectID, err := getProject(d, config) - if err != nil { - return err - } - - createMD := func() error { - // Load project service - log.Printf("[DEBUG] Loading project service: %s", projectID) - project, err := config.clientCompute.Projects.Get(projectID).Do() - if err != nil { - return fmt.Errorf("Error loading project '%s': %s", projectID, err) - } - - md := project.CommonInstanceMetadata - - newMDMap := d.Get("metadata").(map[string]interface{}) - // Ensure that we aren't overwriting entries that already exist - for _, kv := range md.Items { - if _, ok := newMDMap[kv.Key]; ok { - return fmt.Errorf("Error, key '%s' already exists in project '%s'", kv.Key, projectID) - } - } - - // Append new metadata to existing metadata - for key, val := range newMDMap { - v := val.(string) - md.Items = append(md.Items, &compute.MetadataItems{ - Key: key, - Value: &v, - }) - } - - op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(projectID, md).Do() - - if err != nil { - return fmt.Errorf("SetCommonInstanceMetadata failed: %s", err) - } - - log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) - - return computeOperationWaitGlobal(config, op, project.Name, "SetCommonMetadata") - } - - err = MetadataRetryWrapper(createMD) - if err != nil { - return err - } - - return resourceComputeProjectMetadataRead(d, meta) -} - -func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - projectID, err := getProject(d, config) - if err != nil { - return err - } - - // Load project service - log.Printf("[DEBUG] Loading project service: %s", projectID) - project, err := config.clientCompute.Projects.Get(projectID).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Project metadata for project %q", projectID)) - } - - md := project.CommonInstanceMetadata - - if err = d.Set("metadata", MetadataFormatSchema(d.Get("metadata").(map[string]interface{}), md)); err != nil { - return fmt.Errorf("Error setting metadata: %s", err) - } - - d.SetId("common_metadata") - - return nil -} - -func resourceComputeProjectMetadataUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - projectID, err := getProject(d, config) - if err != nil { - return err - } - - if d.HasChange("metadata") { - o, n := d.GetChange("metadata") - - updateMD := func() error { - // Load project service - log.Printf("[DEBUG] Loading project service: %s", projectID) - project, err := config.clientCompute.Projects.Get(projectID).Do() - if err != nil { - return fmt.Errorf("Error loading project '%s': %s", projectID, err) - } - - md := project.CommonInstanceMetadata - - MetadataUpdate(o.(map[string]interface{}), n.(map[string]interface{}), md) - - op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(projectID, md).Do() - - if err != nil { - return fmt.Errorf("SetCommonInstanceMetadata failed: %s", err) - } - - log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) - - // Optimistic locking requires the fingerprint received to match - // the fingerprint we send the server, if there is a mismatch then we - // are working on old data, and must retry - return computeOperationWaitGlobal(config, op, project.Name, "SetCommonMetadata") - } - - err := MetadataRetryWrapper(updateMD) - if err != nil { - return err - } - - return resourceComputeProjectMetadataRead(d, meta) - } - - return nil -} - -func resourceComputeProjectMetadataDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - projectID, err := getProject(d, config) - if err != nil { - return err - } - - // Load project service - log.Printf("[DEBUG] Loading project service: %s", projectID) - project, err := config.clientCompute.Projects.Get(projectID).Do() - if err != nil { - return fmt.Errorf("Error loading project '%s': %s", projectID, err) - } - - md := project.CommonInstanceMetadata - - // Remove all items - md.Items = nil - - op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(projectID, md).Do() - - if err != nil { - return fmt.Errorf("Error removing metadata from project %s: %s", projectID, err) - } - - log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) - - err = computeOperationWaitGlobal(config, op, project.Name, "SetCommonMetadata") - if err != nil { - return err - } - - return resourceComputeProjectMetadataRead(d, meta) -} diff --git a/builtin/providers/google/resource_compute_project_metadata_test.go b/builtin/providers/google/resource_compute_project_metadata_test.go deleted file mode 100644 index b0bfa0ea1..000000000 --- a/builtin/providers/google/resource_compute_project_metadata_test.go +++ /dev/null @@ -1,315 +0,0 @@ -package google - -import ( - "fmt" - "os" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/compute/v1" -) - -// Add two key value pairs -func TestAccComputeProjectMetadata_basic(t *testing.T) { - skipIfEnvNotSet(t, - []string{ - "GOOGLE_ORG", - "GOOGLE_BILLING_ACCOUNT", - }..., - ) - - billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT") - var project compute.Project - projectID := "terrafom-test-" + acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeProjectMetadataDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeProject_basic0_metadata(projectID, pname, org, billingId), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeProjectExists( - "google_compute_project_metadata.fizzbuzz", projectID, &project), - testAccCheckComputeProjectMetadataContains(projectID, "banana", "orange"), - testAccCheckComputeProjectMetadataContains(projectID, "sofa", "darwinism"), - testAccCheckComputeProjectMetadataSize(projectID, 2), - ), - }, - }, - }) -} - -// Add three key value pairs, then replace one and modify a second -func TestAccComputeProjectMetadata_modify_1(t *testing.T) { - skipIfEnvNotSet(t, - []string{ - "GOOGLE_ORG", - "GOOGLE_BILLING_ACCOUNT", - }..., - ) - - billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT") - var project compute.Project - projectID := "terrafom-test-" + acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeProjectMetadataDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeProject_modify0_metadata(projectID, pname, org, billingId), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeProjectExists( - "google_compute_project_metadata.fizzbuzz", projectID, &project), - testAccCheckComputeProjectMetadataContains(projectID, "paper", "pen"), - testAccCheckComputeProjectMetadataContains(projectID, "genghis_khan", "french bread"), - testAccCheckComputeProjectMetadataContains(projectID, "happy", "smiling"), - testAccCheckComputeProjectMetadataSize(projectID, 3), - ), - }, - - resource.TestStep{ - Config: testAccComputeProject_modify1_metadata(projectID, pname, org, billingId), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeProjectExists( - "google_compute_project_metadata.fizzbuzz", projectID, &project), - testAccCheckComputeProjectMetadataContains(projectID, "paper", "pen"), - testAccCheckComputeProjectMetadataContains(projectID, "paris", "french bread"), - testAccCheckComputeProjectMetadataContains(projectID, "happy", "laughing"), - testAccCheckComputeProjectMetadataSize(projectID, 3), - ), - }, - }, - }) -} - -// Add two key value pairs, and replace both -func TestAccComputeProjectMetadata_modify_2(t *testing.T) { - skipIfEnvNotSet(t, - []string{ - "GOOGLE_ORG", - "GOOGLE_BILLING_ACCOUNT", - }..., - ) - - billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT") - var project compute.Project - projectID := "terraform-test-" + acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeProjectMetadataDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeProject_basic0_metadata(projectID, pname, org, billingId), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeProjectExists( - "google_compute_project_metadata.fizzbuzz", projectID, &project), - testAccCheckComputeProjectMetadataContains(projectID, "banana", "orange"), - testAccCheckComputeProjectMetadataContains(projectID, "sofa", "darwinism"), - testAccCheckComputeProjectMetadataSize(projectID, 2), - ), - }, - - resource.TestStep{ - Config: testAccComputeProject_basic1_metadata(projectID, pname, org, billingId), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeProjectExists( - "google_compute_project_metadata.fizzbuzz", projectID, &project), - testAccCheckComputeProjectMetadataContains(projectID, "kiwi", "papaya"), - testAccCheckComputeProjectMetadataContains(projectID, "finches", "darwinism"), - testAccCheckComputeProjectMetadataSize(projectID, 2), - ), - }, - }, - }) -} - -func testAccCheckComputeProjectMetadataDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_project_metadata" { - continue - } - - project, err := config.clientCompute.Projects.Get(rs.Primary.ID).Do() - if err == nil && len(project.CommonInstanceMetadata.Items) > 0 { - return fmt.Errorf("Error, metadata items still exist in %s", rs.Primary.ID) - } - } - - return nil -} - -func testAccCheckComputeProjectExists(n, projectID string, project *compute.Project) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.Projects.Get(projectID).Do() - if err != nil { - return err - } - - if "common_metadata" != rs.Primary.ID { - return fmt.Errorf("Common metadata not found, found %s", rs.Primary.ID) - } - - *project = *found - - return nil - } -} - -func testAccCheckComputeProjectMetadataContains(projectID, key, value string) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - project, err := config.clientCompute.Projects.Get(projectID).Do() - if err != nil { - return fmt.Errorf("Error, failed to load project service for %s: %s", config.Project, err) - } - - for _, kv := range project.CommonInstanceMetadata.Items { - if kv.Key == key { - if kv.Value != nil && *kv.Value == value { - return nil - } else { - return fmt.Errorf("Error, key value mismatch, wanted (%s, %s), got (%s, %s)", - key, value, kv.Key, *kv.Value) - } - } - } - - return fmt.Errorf("Error, key %s not present in %s", key, project.SelfLink) - } -} - -func testAccCheckComputeProjectMetadataSize(projectID string, size int) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - project, err := config.clientCompute.Projects.Get(projectID).Do() - if err != nil { - return fmt.Errorf("Error, failed to load project service for %s: %s", config.Project, err) - } - - if size > len(project.CommonInstanceMetadata.Items) { - return fmt.Errorf("Error, expected at least %d metadata items, got %d", size, - len(project.CommonInstanceMetadata.Items)) - } - - return nil - } -} - -func testAccComputeProject_basic0_metadata(projectID, name, org, billing string) string { - return fmt.Sprintf(` -resource "google_project" "project" { - project_id = "%s" - name = "%s" - org_id = "%s" - billing_account = "%s" -} - -resource "google_project_services" "services" { - project = "${google_project.project.project_id}" - services = ["compute-component.googleapis.com"] -} - -resource "google_compute_project_metadata" "fizzbuzz" { - project = "${google_project.project.project_id}" - metadata { - banana = "orange" - sofa = "darwinism" - } - depends_on = ["google_project_services.services"] -}`, projectID, name, org, billing) -} - -func testAccComputeProject_basic1_metadata(projectID, name, org, billing string) string { - return fmt.Sprintf(` -resource "google_project" "project" { - project_id = "%s" - name = "%s" - org_id = "%s" - billing_account = "%s" -} - -resource "google_project_services" "services" { - project = "${google_project.project.project_id}" - services = ["compute-component.googleapis.com"] -} - -resource "google_compute_project_metadata" "fizzbuzz" { - project = "${google_project.project.project_id}" - metadata { - kiwi = "papaya" - finches = "darwinism" - } - depends_on = ["google_project_services.services"] -}`, projectID, name, org, billing) -} - -func testAccComputeProject_modify0_metadata(projectID, name, org, billing string) string { - return fmt.Sprintf(` -resource "google_project" "project" { - project_id = "%s" - name = "%s" - org_id = "%s" - billing_account = "%s" -} - -resource "google_project_services" "services" { - project = "${google_project.project.project_id}" - services = ["compute-component.googleapis.com"] -} - -resource "google_compute_project_metadata" "fizzbuzz" { - project = "${google_project.project.project_id}" - metadata { - paper = "pen" - genghis_khan = "french bread" - happy = "smiling" - } - depends_on = ["google_project_services.services"] -}`, projectID, name, org, billing) -} - -func testAccComputeProject_modify1_metadata(projectID, name, org, billing string) string { - return fmt.Sprintf(` -resource "google_project" "project" { - project_id = "%s" - name = "%s" - org_id = "%s" - billing_account = "%s" -} - -resource "google_project_services" "services" { - project = "${google_project.project.project_id}" - services = ["compute-component.googleapis.com"] -} - -resource "google_compute_project_metadata" "fizzbuzz" { - project = "${google_project.project.project_id}" - metadata { - paper = "pen" - paris = "french bread" - happy = "laughing" - } - depends_on = ["google_project_services.services"] -}`, projectID, name, org, billing) -} diff --git a/builtin/providers/google/resource_compute_region_backend_service.go b/builtin/providers/google/resource_compute_region_backend_service.go deleted file mode 100644 index 682cd0fab..000000000 --- a/builtin/providers/google/resource_compute_region_backend_service.go +++ /dev/null @@ -1,311 +0,0 @@ -package google - -import ( - "bytes" - "fmt" - "log" - "regexp" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" -) - -func resourceComputeRegionBackendService() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeRegionBackendServiceCreate, - Read: resourceComputeRegionBackendServiceRead, - Update: resourceComputeRegionBackendServiceUpdate, - Delete: resourceComputeRegionBackendServiceDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - re := `^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$` - if !regexp.MustCompile(re).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q (%q) doesn't match regexp %q", k, value, re)) - } - return - }, - }, - - "health_checks": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Required: true, - Set: schema.HashString, - }, - - "backend": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "group": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - Optional: true, - Set: resourceGoogleComputeRegionBackendServiceBackendHash, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "fingerprint": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "protocol": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "session_affinity": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "timeout_sec": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - }, - } -} - -func resourceComputeRegionBackendServiceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - hc := d.Get("health_checks").(*schema.Set).List() - healthChecks := make([]string, 0, len(hc)) - for _, v := range hc { - healthChecks = append(healthChecks, v.(string)) - } - - service := compute.BackendService{ - Name: d.Get("name").(string), - HealthChecks: healthChecks, - LoadBalancingScheme: "INTERNAL", - } - - if v, ok := d.GetOk("backend"); ok { - service.Backends = expandBackends(v.(*schema.Set).List()) - } - - if v, ok := d.GetOk("description"); ok { - service.Description = v.(string) - } - - if v, ok := d.GetOk("protocol"); ok { - service.Protocol = v.(string) - } - - if v, ok := d.GetOk("session_affinity"); ok { - service.SessionAffinity = v.(string) - } - - if v, ok := d.GetOk("timeout_sec"); ok { - service.TimeoutSec = int64(v.(int)) - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Region Backend Service: %#v", service) - - op, err := config.clientCompute.RegionBackendServices.Insert( - project, region, &service).Do() - if err != nil { - return fmt.Errorf("Error creating backend service: %s", err) - } - - log.Printf("[DEBUG] Waiting for new backend service, operation: %#v", op) - - d.SetId(service.Name) - - err = computeOperationWaitRegion(config, op, project, region, "Creating Region Backend Service") - if err != nil { - return err - } - - return resourceComputeRegionBackendServiceRead(d, meta) -} - -func resourceComputeRegionBackendServiceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - service, err := config.clientCompute.RegionBackendServices.Get( - project, region, d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Region Backend Service %q", d.Get("name").(string))) - } - - d.Set("description", service.Description) - d.Set("protocol", service.Protocol) - d.Set("session_affinity", service.SessionAffinity) - d.Set("timeout_sec", service.TimeoutSec) - d.Set("fingerprint", service.Fingerprint) - d.Set("self_link", service.SelfLink) - - d.Set("backend", flattenBackends(service.Backends)) - d.Set("health_checks", service.HealthChecks) - - return nil -} - -func resourceComputeRegionBackendServiceUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - hc := d.Get("health_checks").(*schema.Set).List() - healthChecks := make([]string, 0, len(hc)) - for _, v := range hc { - healthChecks = append(healthChecks, v.(string)) - } - - service := compute.BackendService{ - Name: d.Get("name").(string), - Fingerprint: d.Get("fingerprint").(string), - HealthChecks: healthChecks, - LoadBalancingScheme: "INTERNAL", - } - - // Optional things - if v, ok := d.GetOk("backend"); ok { - service.Backends = expandBackends(v.(*schema.Set).List()) - } - if v, ok := d.GetOk("description"); ok { - service.Description = v.(string) - } - if v, ok := d.GetOk("protocol"); ok { - service.Protocol = v.(string) - } - if v, ok := d.GetOk("session_affinity"); ok { - service.SessionAffinity = v.(string) - } - if v, ok := d.GetOk("timeout_sec"); ok { - service.TimeoutSec = int64(v.(int)) - } - - log.Printf("[DEBUG] Updating existing Backend Service %q: %#v", d.Id(), service) - op, err := config.clientCompute.RegionBackendServices.Update( - project, region, d.Id(), &service).Do() - if err != nil { - return fmt.Errorf("Error updating backend service: %s", err) - } - - d.SetId(service.Name) - - err = computeOperationWaitRegion(config, op, project, region, "Updating Backend Service") - if err != nil { - return err - } - - return resourceComputeRegionBackendServiceRead(d, meta) -} - -func resourceComputeRegionBackendServiceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - log.Printf("[DEBUG] Deleting backend service %s", d.Id()) - op, err := config.clientCompute.RegionBackendServices.Delete( - project, region, d.Id()).Do() - if err != nil { - return fmt.Errorf("Error deleting backend service: %s", err) - } - - err = computeOperationWaitRegion(config, op, project, region, "Deleting Backend Service") - if err != nil { - return err - } - - d.SetId("") - return nil -} - -func resourceGoogleComputeRegionBackendServiceBackendHash(v interface{}) int { - if v == nil { - return 0 - } - - var buf bytes.Buffer - m := v.(map[string]interface{}) - - buf.WriteString(fmt.Sprintf("%s-", m["group"].(string))) - - if v, ok := m["description"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - - return hashcode.String(buf.String()) -} diff --git a/builtin/providers/google/resource_compute_region_backend_service_test.go b/builtin/providers/google/resource_compute_region_backend_service_test.go deleted file mode 100644 index 2abd76473..000000000 --- a/builtin/providers/google/resource_compute_region_backend_service_test.go +++ /dev/null @@ -1,310 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/compute/v1" -) - -func TestAccComputeRegionBackendService_basic(t *testing.T) { - serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - extraCheckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - var svc compute.BackendService - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeRegionBackendServiceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeRegionBackendService_basic(serviceName, checkName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeRegionBackendServiceExists( - "google_compute_region_backend_service.foobar", &svc), - ), - }, - resource.TestStep{ - Config: testAccComputeRegionBackendService_basicModified( - serviceName, checkName, extraCheckName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeRegionBackendServiceExists( - "google_compute_region_backend_service.foobar", &svc), - ), - }, - }, - }) -} - -func TestAccComputeRegionBackendService_withBackend(t *testing.T) { - serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - igName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - itName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - var svc compute.BackendService - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeRegionBackendServiceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeRegionBackendService_withBackend( - serviceName, igName, itName, checkName, 10), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeRegionBackendServiceExists( - "google_compute_region_backend_service.lipsum", &svc), - ), - }, - }, - }) - - if svc.TimeoutSec != 10 { - t.Errorf("Expected TimeoutSec == 10, got %d", svc.TimeoutSec) - } - if svc.Protocol != "TCP" { - t.Errorf("Expected Protocol to be TCP, got %q", svc.Protocol) - } - if len(svc.Backends) != 1 { - t.Errorf("Expected 1 backend, got %d", len(svc.Backends)) - } -} - -func TestAccComputeRegionBackendService_withBackendAndUpdate(t *testing.T) { - serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - igName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - itName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - var svc compute.BackendService - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeRegionBackendServiceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeRegionBackendService_withBackend( - serviceName, igName, itName, checkName, 10), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeRegionBackendServiceExists( - "google_compute_region_backend_service.lipsum", &svc), - ), - }, - resource.TestStep{ - Config: testAccComputeRegionBackendService_withBackend( - serviceName, igName, itName, checkName, 20), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeRegionBackendServiceExists( - "google_compute_region_backend_service.lipsum", &svc), - ), - }, - }, - }) - - if svc.TimeoutSec != 20 { - t.Errorf("Expected TimeoutSec == 20, got %d", svc.TimeoutSec) - } - if svc.Protocol != "TCP" { - t.Errorf("Expected Protocol to be TCP, got %q", svc.Protocol) - } - if len(svc.Backends) != 1 { - t.Errorf("Expected 1 backend, got %d", len(svc.Backends)) - } -} - -func TestAccComputeRegionBackendService_withSessionAffinity(t *testing.T) { - serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - var svc compute.BackendService - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeRegionBackendServiceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeRegionBackendService_withSessionAffinity( - serviceName, checkName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeRegionBackendServiceExists( - "google_compute_region_backend_service.foobar", &svc), - ), - }, - }, - }) - - if svc.SessionAffinity != "CLIENT_IP" { - t.Errorf("Expected Protocol to be CLIENT_IP, got %q", svc.SessionAffinity) - } -} - -func testAccCheckComputeRegionBackendServiceDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_region_backend_service" { - continue - } - - _, err := config.clientCompute.RegionBackendServices.Get( - config.Project, config.Region, rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("Backend service still exists") - } - } - - return nil -} - -func testAccCheckComputeRegionBackendServiceExists(n string, svc *compute.BackendService) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.RegionBackendServices.Get( - config.Project, config.Region, rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("Backend service not found") - } - - *svc = *found - - return nil - } -} - -func testAccComputeRegionBackendService_basic(serviceName, checkName string) string { - return fmt.Sprintf(` -resource "google_compute_region_backend_service" "foobar" { - name = "%s" - health_checks = ["${google_compute_health_check.zero.self_link}"] - region = "us-central1" -} - -resource "google_compute_health_check" "zero" { - name = "%s" - check_interval_sec = 1 - timeout_sec = 1 - - tcp_health_check { - port = "80" - } -} -`, serviceName, checkName) -} - -func testAccComputeRegionBackendService_basicModified(serviceName, checkOne, checkTwo string) string { - return fmt.Sprintf(` -resource "google_compute_region_backend_service" "foobar" { - name = "%s" - health_checks = ["${google_compute_health_check.one.self_link}"] - region = "us-central1" -} - -resource "google_compute_health_check" "zero" { - name = "%s" - check_interval_sec = 1 - timeout_sec = 1 - - tcp_health_check { - } -} - -resource "google_compute_health_check" "one" { - name = "%s" - check_interval_sec = 30 - timeout_sec = 30 - - tcp_health_check { - } -} -`, serviceName, checkOne, checkTwo) -} - -func testAccComputeRegionBackendService_withBackend( - serviceName, igName, itName, checkName string, timeout int64) string { - return fmt.Sprintf(` -resource "google_compute_region_backend_service" "lipsum" { - name = "%s" - description = "Hello World 1234" - protocol = "TCP" - region = "us-central1" - timeout_sec = %v - - backend { - group = "${google_compute_instance_group_manager.foobar.instance_group}" - } - - health_checks = ["${google_compute_health_check.default.self_link}"] -} - -resource "google_compute_instance_group_manager" "foobar" { - name = "%s" - instance_template = "${google_compute_instance_template.foobar.self_link}" - base_instance_name = "foobar" - zone = "us-central1-f" - target_size = 1 -} - -resource "google_compute_instance_template" "foobar" { - name = "%s" - machine_type = "n1-standard-1" - - network_interface { - network = "default" - } - - disk { - source_image = "debian-8-jessie-v20160803" - auto_delete = true - boot = true - } -} - -resource "google_compute_health_check" "default" { - name = "%s" - check_interval_sec = 1 - timeout_sec = 1 - - tcp_health_check { - - } -} -`, serviceName, timeout, igName, itName, checkName) -} - -func testAccComputeRegionBackendService_withSessionAffinity(serviceName, checkName string) string { - return fmt.Sprintf(` -resource "google_compute_region_backend_service" "foobar" { - name = "%s" - health_checks = ["${google_compute_health_check.zero.self_link}"] - region = "us-central1" - session_affinity = "CLIENT_IP" - -} - -resource "google_compute_health_check" "zero" { - name = "%s" - check_interval_sec = 1 - timeout_sec = 1 - - tcp_health_check { - port = "80" - } -} -`, serviceName, checkName) -} diff --git a/builtin/providers/google/resource_compute_route.go b/builtin/providers/google/resource_compute_route.go deleted file mode 100644 index 90b5a2e8b..000000000 --- a/builtin/providers/google/resource_compute_route.go +++ /dev/null @@ -1,225 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" -) - -func resourceComputeRoute() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeRouteCreate, - Read: resourceComputeRouteRead, - Delete: resourceComputeRouteDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "dest_range": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "network": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "priority": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - - "next_hop_gateway": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "next_hop_instance": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "next_hop_instance_zone": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "next_hop_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "next_hop_network": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "next_hop_vpn_tunnel": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "tags": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - } -} - -func resourceComputeRouteCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Look up the network to attach the route to - network, err := getNetworkLink(d, config, "network") - if err != nil { - return fmt.Errorf("Error reading network: %s", err) - } - - // Next hop data - var nextHopInstance, nextHopIp, nextHopGateway, - nextHopVpnTunnel string - if v, ok := d.GetOk("next_hop_ip"); ok { - nextHopIp = v.(string) - } - if v, ok := d.GetOk("next_hop_gateway"); ok { - if v == "default-internet-gateway" { - nextHopGateway = fmt.Sprintf("projects/%s/global/gateways/default-internet-gateway", project) - } else { - nextHopGateway = v.(string) - } - } - if v, ok := d.GetOk("next_hop_vpn_tunnel"); ok { - nextHopVpnTunnel = v.(string) - } - if v, ok := d.GetOk("next_hop_instance"); ok { - nextInstance, err := config.clientCompute.Instances.Get( - project, - d.Get("next_hop_instance_zone").(string), - v.(string)).Do() - if err != nil { - return fmt.Errorf("Error reading instance: %s", err) - } - - nextHopInstance = nextInstance.SelfLink - } - - // Tags - var tags []string - if v := d.Get("tags").(*schema.Set); v.Len() > 0 { - tags = make([]string, v.Len()) - for i, v := range v.List() { - tags[i] = v.(string) - } - } - - // Build the route parameter - route := &compute.Route{ - Name: d.Get("name").(string), - DestRange: d.Get("dest_range").(string), - Network: network, - NextHopInstance: nextHopInstance, - NextHopVpnTunnel: nextHopVpnTunnel, - NextHopIp: nextHopIp, - NextHopGateway: nextHopGateway, - Priority: int64(d.Get("priority").(int)), - Tags: tags, - } - log.Printf("[DEBUG] Route insert request: %#v", route) - op, err := config.clientCompute.Routes.Insert( - project, route).Do() - if err != nil { - return fmt.Errorf("Error creating route: %s", err) - } - - // It probably maybe worked, so store the ID now - d.SetId(route.Name) - - err = computeOperationWaitGlobal(config, op, project, "Creating Route") - if err != nil { - return err - } - - return resourceComputeRouteRead(d, meta) -} - -func resourceComputeRouteRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - route, err := config.clientCompute.Routes.Get( - project, d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Route %q", d.Get("name").(string))) - } - - d.Set("next_hop_network", route.NextHopNetwork) - d.Set("self_link", route.SelfLink) - - return nil -} - -func resourceComputeRouteDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Delete the route - op, err := config.clientCompute.Routes.Delete( - project, d.Id()).Do() - if err != nil { - return fmt.Errorf("Error deleting route: %s", err) - } - - err = computeOperationWaitGlobal(config, op, project, "Deleting Route") - if err != nil { - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/google/resource_compute_route_test.go b/builtin/providers/google/resource_compute_route_test.go deleted file mode 100644 index 24ef0cf21..000000000 --- a/builtin/providers/google/resource_compute_route_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/compute/v1" -) - -func TestAccComputeRoute_basic(t *testing.T) { - var route compute.Route - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeRouteDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeRoute_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeRouteExists( - "google_compute_route.foobar", &route), - ), - }, - }, - }) -} - -func TestAccComputeRoute_defaultInternetGateway(t *testing.T) { - var route compute.Route - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeRouteDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeRoute_defaultInternetGateway, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeRouteExists( - "google_compute_route.foobar", &route), - ), - }, - }, - }) -} - -func testAccCheckComputeRouteDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_route" { - continue - } - - _, err := config.clientCompute.Routes.Get( - config.Project, rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("Route still exists") - } - } - - return nil -} - -func testAccCheckComputeRouteExists(n string, route *compute.Route) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.Routes.Get( - config.Project, rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("Route not found") - } - - *route = *found - - return nil - } -} - -var testAccComputeRoute_basic = fmt.Sprintf(` -resource "google_compute_network" "foobar" { - name = "route-test-%s" - ipv4_range = "10.0.0.0/16" -} - -resource "google_compute_route" "foobar" { - name = "route-test-%s" - dest_range = "15.0.0.0/24" - network = "${google_compute_network.foobar.name}" - next_hop_ip = "10.0.1.5" - priority = 100 -}`, acctest.RandString(10), acctest.RandString(10)) - -var testAccComputeRoute_defaultInternetGateway = fmt.Sprintf(` -resource "google_compute_network" "foobar" { - name = "route-test-%s" - ipv4_range = "10.0.0.0/16" -} - -resource "google_compute_route" "foobar" { - name = "route-test-%s" - dest_range = "0.0.0.0/0" - network = "${google_compute_network.foobar.name}" - next_hop_gateway = "default-internet-gateway" - priority = 100 -}`, acctest.RandString(10), acctest.RandString(10)) diff --git a/builtin/providers/google/resource_compute_router.go b/builtin/providers/google/resource_compute_router.go deleted file mode 100644 index 7d0e53ed6..000000000 --- a/builtin/providers/google/resource_compute_router.go +++ /dev/null @@ -1,254 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" -) - -func resourceComputeRouter() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeRouterCreate, - Read: resourceComputeRouterRead, - Delete: resourceComputeRouterDelete, - Importer: &schema.ResourceImporter{ - State: resourceComputeRouterImportState, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "network": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: linkDiffSuppress, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "bgp": &schema.Schema{ - Type: schema.TypeList, - MaxItems: 1, - Required: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - - "asn": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - }, - }, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceComputeRouterCreate(d *schema.ResourceData, meta interface{}) error { - - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - - routerLock := getRouterLockName(region, name) - mutexKV.Lock(routerLock) - defer mutexKV.Unlock(routerLock) - - network, err := getNetworkLink(d, config, "network") - if err != nil { - return err - } - routersService := config.clientCompute.Routers - - router := &compute.Router{ - Name: name, - Network: network, - } - - if v, ok := d.GetOk("description"); ok { - router.Description = v.(string) - } - - if _, ok := d.GetOk("bgp"); ok { - prefix := "bgp.0" - if v, ok := d.GetOk(prefix + ".asn"); ok { - asn := v.(int) - bgp := &compute.RouterBgp{ - Asn: int64(asn), - } - router.Bgp = bgp - } - } - - op, err := routersService.Insert(project, region, router).Do() - if err != nil { - return fmt.Errorf("Error Inserting Router %s into network %s: %s", name, network, err) - } - d.SetId(fmt.Sprintf("%s/%s", region, name)) - err = computeOperationWaitRegion(config, op, project, region, "Inserting Router") - if err != nil { - d.SetId("") - return fmt.Errorf("Error Waiting to Insert Router %s into network %s: %s", name, network, err) - } - - return resourceComputeRouterRead(d, meta) -} - -func resourceComputeRouterRead(d *schema.ResourceData, meta interface{}) error { - - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - routersService := config.clientCompute.Routers - router, err := routersService.Get(project, region, name).Do() - - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing router %s/%s because it is gone", region, name) - d.SetId("") - - return nil - } - - return fmt.Errorf("Error Reading Router %s: %s", name, err) - } - - d.Set("self_link", router.SelfLink) - d.Set("network", router.Network) - - d.Set("name", router.Name) - d.Set("description", router.Description) - d.Set("region", region) - d.Set("project", project) - d.Set("bgp", flattenAsn(router.Bgp.Asn)) - d.SetId(fmt.Sprintf("%s/%s", region, name)) - - return nil -} - -func resourceComputeRouterDelete(d *schema.ResourceData, meta interface{}) error { - - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - - routerLock := getRouterLockName(region, name) - mutexKV.Lock(routerLock) - defer mutexKV.Unlock(routerLock) - - routersService := config.clientCompute.Routers - - op, err := routersService.Delete(project, region, name).Do() - if err != nil { - return fmt.Errorf("Error Reading Router %s: %s", name, err) - } - - err = computeOperationWaitRegion(config, op, project, region, "Deleting Router") - if err != nil { - return fmt.Errorf("Error Waiting to Delete Router %s: %s", name, err) - } - - d.SetId("") - return nil -} - -func resourceComputeRouterImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - parts := strings.Split(d.Id(), "/") - if len(parts) != 2 { - return nil, fmt.Errorf("Invalid router specifier. Expecting {region}/{name}") - } - - d.Set("region", parts[0]) - d.Set("name", parts[1]) - - return []*schema.ResourceData{d}, nil -} - -func getRouterLink(config *Config, project string, region string, router string) (string, error) { - - if !strings.HasPrefix(router, "https://www.googleapis.com/compute/") { - // Router value provided is just the name, lookup the router SelfLink - routerData, err := config.clientCompute.Routers.Get( - project, region, router).Do() - if err != nil { - return "", fmt.Errorf("Error reading router: %s", err) - } - router = routerData.SelfLink - } - - return router, nil - -} - -func flattenAsn(asn int64) []map[string]interface{} { - result := make([]map[string]interface{}, 0, 1) - r := make(map[string]interface{}) - r["asn"] = asn - result = append(result, r) - return result -} diff --git a/builtin/providers/google/resource_compute_router_interface.go b/builtin/providers/google/resource_compute_router_interface.go deleted file mode 100644 index cdfa21f01..000000000 --- a/builtin/providers/google/resource_compute_router_interface.go +++ /dev/null @@ -1,269 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" -) - -func resourceComputeRouterInterface() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeRouterInterfaceCreate, - Read: resourceComputeRouterInterfaceRead, - Delete: resourceComputeRouterInterfaceDelete, - Importer: &schema.ResourceImporter{ - State: resourceComputeRouterInterfaceImportState, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "router": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "vpn_tunnel": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: linkDiffSuppress, - }, - - "ip_range": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - } -} - -func resourceComputeRouterInterfaceCreate(d *schema.ResourceData, meta interface{}) error { - - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - routerName := d.Get("router").(string) - ifaceName := d.Get("name").(string) - - routerLock := getRouterLockName(region, routerName) - mutexKV.Lock(routerLock) - defer mutexKV.Unlock(routerLock) - - routersService := config.clientCompute.Routers - router, err := routersService.Get(project, region, routerName).Do() - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing router interface %s because its router %s/%s is gone", ifaceName, region, routerName) - d.SetId("") - - return nil - } - - return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err) - } - - ifaces := router.Interfaces - for _, iface := range ifaces { - if iface.Name == ifaceName { - d.SetId("") - return fmt.Errorf("Router %s has interface %s already", routerName, ifaceName) - } - } - - vpnTunnel, err := getVpnTunnelLink(config, project, region, d.Get("vpn_tunnel").(string)) - if err != nil { - return err - } - - iface := &compute.RouterInterface{Name: ifaceName, - LinkedVpnTunnel: vpnTunnel} - - if v, ok := d.GetOk("ip_range"); ok { - iface.IpRange = v.(string) - } - - log.Printf("[INFO] Adding interface %s", ifaceName) - ifaces = append(ifaces, iface) - patchRouter := &compute.Router{ - Interfaces: ifaces, - } - - log.Printf("[DEBUG] Updating router %s/%s with interfaces: %+v", region, routerName, ifaces) - op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() - if err != nil { - return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) - } - d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, ifaceName)) - err = computeOperationWaitRegion(config, op, project, region, "Patching router") - if err != nil { - d.SetId("") - return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) - } - - return resourceComputeRouterInterfaceRead(d, meta) -} - -func resourceComputeRouterInterfaceRead(d *schema.ResourceData, meta interface{}) error { - - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - routerName := d.Get("router").(string) - ifaceName := d.Get("name").(string) - - routersService := config.clientCompute.Routers - router, err := routersService.Get(project, region, routerName).Do() - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing router interface %s because its router %s/%s is gone", ifaceName, region, routerName) - d.SetId("") - - return nil - } - - return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err) - } - - for _, iface := range router.Interfaces { - - if iface.Name == ifaceName { - d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, ifaceName)) - d.Set("vpn_tunnel", iface.LinkedVpnTunnel) - d.Set("ip_range", iface.IpRange) - d.Set("region", region) - d.Set("project", project) - return nil - } - } - - log.Printf("[WARN] Removing router interface %s/%s/%s because it is gone", region, routerName, ifaceName) - d.SetId("") - return nil -} - -func resourceComputeRouterInterfaceDelete(d *schema.ResourceData, meta interface{}) error { - - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - routerName := d.Get("router").(string) - ifaceName := d.Get("name").(string) - - routerLock := getRouterLockName(region, routerName) - mutexKV.Lock(routerLock) - defer mutexKV.Unlock(routerLock) - - routersService := config.clientCompute.Routers - router, err := routersService.Get(project, region, routerName).Do() - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing router interface %s because its router %s/%s is gone", ifaceName, region, routerName) - - return nil - } - - return fmt.Errorf("Error Reading Router %s: %s", routerName, err) - } - - var ifaceFound bool - - newIfaces := make([]*compute.RouterInterface, 0, len(router.Interfaces)) - for _, iface := range router.Interfaces { - - if iface.Name == ifaceName { - ifaceFound = true - continue - } else { - newIfaces = append(newIfaces, iface) - } - } - - if !ifaceFound { - log.Printf("[DEBUG] Router %s/%s had no interface %s already", region, routerName, ifaceName) - d.SetId("") - return nil - } - - log.Printf( - "[INFO] Removing interface %s from router %s/%s", ifaceName, region, routerName) - patchRouter := &compute.Router{ - Interfaces: newIfaces, - } - - log.Printf("[DEBUG] Updating router %s/%s with interfaces: %+v", region, routerName, newIfaces) - op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() - if err != nil { - return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) - } - - err = computeOperationWaitRegion(config, op, project, region, "Patching router") - if err != nil { - return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) - } - - d.SetId("") - return nil -} - -func resourceComputeRouterInterfaceImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - parts := strings.Split(d.Id(), "/") - if len(parts) != 3 { - return nil, fmt.Errorf("Invalid router interface specifier. Expecting {region}/{router}/{interface}") - } - - d.Set("region", parts[0]) - d.Set("router", parts[1]) - d.Set("name", parts[2]) - - return []*schema.ResourceData{d}, nil -} diff --git a/builtin/providers/google/resource_compute_router_interface_test.go b/builtin/providers/google/resource_compute_router_interface_test.go deleted file mode 100644 index 7a762b91a..000000000 --- a/builtin/providers/google/resource_compute_router_interface_test.go +++ /dev/null @@ -1,282 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccComputeRouterInterface_basic(t *testing.T) { - testId := acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeRouterInterfaceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeRouterInterfaceBasic(testId), - Check: testAccCheckComputeRouterInterfaceExists( - "google_compute_router_interface.foobar"), - }, - resource.TestStep{ - Config: testAccComputeRouterInterfaceKeepRouter(testId), - Check: testAccCheckComputeRouterInterfaceDelete( - "google_compute_router_interface.foobar"), - }, - }, - }) -} - -func testAccCheckComputeRouterInterfaceDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - routersService := config.clientCompute.Routers - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_router" { - continue - } - - project, err := getTestProject(rs.Primary, config) - if err != nil { - return err - } - - region, err := getTestRegion(rs.Primary, config) - if err != nil { - return err - } - - routerName := rs.Primary.Attributes["router"] - - _, err = routersService.Get(project, region, routerName).Do() - - if err == nil { - return fmt.Errorf("Error, Router %s in region %s still exists", - routerName, region) - } - } - - return nil -} - -func testAccCheckComputeRouterInterfaceDelete(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - routersService := config.clientCompute.Routers - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_router_interface" { - continue - } - - project, err := getTestProject(rs.Primary, config) - if err != nil { - return err - } - - region, err := getTestRegion(rs.Primary, config) - if err != nil { - return err - } - - name := rs.Primary.Attributes["name"] - routerName := rs.Primary.Attributes["router"] - - router, err := routersService.Get(project, region, routerName).Do() - - if err != nil { - return fmt.Errorf("Error Reading Router %s: %s", routerName, err) - } - - ifaces := router.Interfaces - for _, iface := range ifaces { - - if iface.Name == name { - return fmt.Errorf("Interface %s still exists on router %s/%s", name, region, router.Name) - } - } - } - - return nil - } -} - -func testAccCheckComputeRouterInterfaceExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - project, err := getTestProject(rs.Primary, config) - if err != nil { - return err - } - - region, err := getTestRegion(rs.Primary, config) - if err != nil { - return err - } - - name := rs.Primary.Attributes["name"] - routerName := rs.Primary.Attributes["router"] - - routersService := config.clientCompute.Routers - router, err := routersService.Get(project, region, routerName).Do() - - if err != nil { - return fmt.Errorf("Error Reading Router %s: %s", routerName, err) - } - - for _, iface := range router.Interfaces { - - if iface.Name == name { - return nil - } - } - - return fmt.Errorf("Interface %s not found for router %s", name, router.Name) - } -} - -func testAccComputeRouterInterfaceBasic(testId string) string { - return fmt.Sprintf(` - resource "google_compute_network" "foobar" { - name = "router-interface-test-%s" - } - resource "google_compute_subnetwork" "foobar" { - name = "router-interface-test-%s" - network = "${google_compute_network.foobar.self_link}" - ip_cidr_range = "10.0.0.0/16" - region = "us-central1" - } - resource "google_compute_address" "foobar" { - name = "router-interface-test-%s" - region = "${google_compute_subnetwork.foobar.region}" - } - resource "google_compute_vpn_gateway" "foobar" { - name = "router-interface-test-%s" - network = "${google_compute_network.foobar.self_link}" - region = "${google_compute_subnetwork.foobar.region}" - } - resource "google_compute_forwarding_rule" "foobar_esp" { - name = "router-interface-test-%s-1" - region = "${google_compute_vpn_gateway.foobar.region}" - ip_protocol = "ESP" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" - } - resource "google_compute_forwarding_rule" "foobar_udp500" { - name = "router-interface-test-%s-2" - region = "${google_compute_forwarding_rule.foobar_esp.region}" - ip_protocol = "UDP" - port_range = "500-500" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" - } - resource "google_compute_forwarding_rule" "foobar_udp4500" { - name = "router-interface-test-%s-3" - region = "${google_compute_forwarding_rule.foobar_udp500.region}" - ip_protocol = "UDP" - port_range = "4500-4500" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" - } - resource "google_compute_router" "foobar"{ - name = "router-interface-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp500.region}" - network = "${google_compute_network.foobar.self_link}" - bgp { - asn = 64514 - } - } - resource "google_compute_vpn_tunnel" "foobar" { - name = "router-interface-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp4500.region}" - target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" - shared_secret = "unguessable" - peer_ip = "8.8.8.8" - router = "${google_compute_router.foobar.name}" - } - resource "google_compute_router_interface" "foobar" { - name = "router-interface-test-%s" - router = "${google_compute_router.foobar.name}" - region = "${google_compute_router.foobar.region}" - ip_range = "169.254.3.1/30" - vpn_tunnel = "${google_compute_vpn_tunnel.foobar.name}" - } - `, testId, testId, testId, testId, testId, testId, testId, testId, testId, testId) -} - -func testAccComputeRouterInterfaceKeepRouter(testId string) string { - return fmt.Sprintf(` - resource "google_compute_network" "foobar" { - name = "router-interface-test-%s" - } - resource "google_compute_subnetwork" "foobar" { - name = "router-interface-test-%s" - network = "${google_compute_network.foobar.self_link}" - ip_cidr_range = "10.0.0.0/16" - region = "us-central1" - } - resource "google_compute_address" "foobar" { - name = "router-interface-test-%s" - region = "${google_compute_subnetwork.foobar.region}" - } - resource "google_compute_vpn_gateway" "foobar" { - name = "router-interface-test-%s" - network = "${google_compute_network.foobar.self_link}" - region = "${google_compute_subnetwork.foobar.region}" - } - resource "google_compute_forwarding_rule" "foobar_esp" { - name = "router-interface-test-%s-1" - region = "${google_compute_vpn_gateway.foobar.region}" - ip_protocol = "ESP" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" - } - resource "google_compute_forwarding_rule" "foobar_udp500" { - name = "router-interface-test-%s-2" - region = "${google_compute_forwarding_rule.foobar_esp.region}" - ip_protocol = "UDP" - port_range = "500-500" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" - } - resource "google_compute_forwarding_rule" "foobar_udp4500" { - name = "router-interface-test-%s-3" - region = "${google_compute_forwarding_rule.foobar_udp500.region}" - ip_protocol = "UDP" - port_range = "4500-4500" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" - } - resource "google_compute_router" "foobar"{ - name = "router-interface-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp500.region}" - network = "${google_compute_network.foobar.self_link}" - bgp { - asn = 64514 - } - } - resource "google_compute_vpn_tunnel" "foobar" { - name = "router-interface-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp4500.region}" - target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" - shared_secret = "unguessable" - peer_ip = "8.8.8.8" - router = "${google_compute_router.foobar.name}" - } - `, testId, testId, testId, testId, testId, testId, testId, testId, testId) -} diff --git a/builtin/providers/google/resource_compute_router_peer.go b/builtin/providers/google/resource_compute_router_peer.go deleted file mode 100644 index 0b1fcfa53..000000000 --- a/builtin/providers/google/resource_compute_router_peer.go +++ /dev/null @@ -1,290 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" -) - -func resourceComputeRouterPeer() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeRouterPeerCreate, - Read: resourceComputeRouterPeerRead, - Delete: resourceComputeRouterPeerDelete, - Importer: &schema.ResourceImporter{ - State: resourceComputeRouterPeerImportState, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "router": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "interface": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "peer_ip_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "peer_asn": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - - "advertised_route_priority": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - - "ip_address": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - } -} - -func resourceComputeRouterPeerCreate(d *schema.ResourceData, meta interface{}) error { - - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - routerName := d.Get("router").(string) - peerName := d.Get("name").(string) - - routerLock := getRouterLockName(region, routerName) - mutexKV.Lock(routerLock) - defer mutexKV.Unlock(routerLock) - - routersService := config.clientCompute.Routers - router, err := routersService.Get(project, region, routerName).Do() - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing router peer %s because its router %s/%s is gone", peerName, region, routerName) - d.SetId("") - - return nil - } - - return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err) - } - - peers := router.BgpPeers - for _, peer := range peers { - if peer.Name == peerName { - d.SetId("") - return fmt.Errorf("Router %s has peer %s already", routerName, peerName) - } - } - - ifaceName := d.Get("interface").(string) - - peer := &compute.RouterBgpPeer{Name: peerName, - InterfaceName: ifaceName} - - if v, ok := d.GetOk("peer_ip_address"); ok { - peer.PeerIpAddress = v.(string) - } - - if v, ok := d.GetOk("peer_asn"); ok { - peer.PeerAsn = int64(v.(int)) - } - - if v, ok := d.GetOk("advertised_route_priority"); ok { - peer.AdvertisedRoutePriority = int64(v.(int)) - } - - log.Printf("[INFO] Adding peer %s", peerName) - peers = append(peers, peer) - patchRouter := &compute.Router{ - BgpPeers: peers, - } - - log.Printf("[DEBUG] Updating router %s/%s with peers: %+v", region, routerName, peers) - op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() - if err != nil { - return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) - } - d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, peerName)) - err = computeOperationWaitRegion(config, op, project, region, "Patching router") - if err != nil { - d.SetId("") - return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) - } - - return resourceComputeRouterPeerRead(d, meta) -} - -func resourceComputeRouterPeerRead(d *schema.ResourceData, meta interface{}) error { - - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - routerName := d.Get("router").(string) - peerName := d.Get("name").(string) - - routersService := config.clientCompute.Routers - router, err := routersService.Get(project, region, routerName).Do() - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing router peer %s because its router %s/%s is gone", peerName, region, routerName) - d.SetId("") - - return nil - } - - return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err) - } - - for _, peer := range router.BgpPeers { - - if peer.Name == peerName { - d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, peerName)) - d.Set("interface", peer.InterfaceName) - d.Set("peer_ip_address", peer.PeerIpAddress) - d.Set("peer_asn", peer.PeerAsn) - d.Set("advertised_route_priority", peer.AdvertisedRoutePriority) - d.Set("ip_address", peer.IpAddress) - d.Set("region", region) - d.Set("project", project) - return nil - } - } - - log.Printf("[WARN] Removing router peer %s/%s/%s because it is gone", region, routerName, peerName) - d.SetId("") - return nil -} - -func resourceComputeRouterPeerDelete(d *schema.ResourceData, meta interface{}) error { - - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - routerName := d.Get("router").(string) - peerName := d.Get("name").(string) - - routerLock := getRouterLockName(region, routerName) - mutexKV.Lock(routerLock) - defer mutexKV.Unlock(routerLock) - - routersService := config.clientCompute.Routers - router, err := routersService.Get(project, region, routerName).Do() - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing router peer %s because its router %s/%s is gone", peerName, region, routerName) - - return nil - } - - return fmt.Errorf("Error Reading Router %s: %s", routerName, err) - } - - var newPeers []*compute.RouterBgpPeer = make([]*compute.RouterBgpPeer, 0, len(router.BgpPeers)) - for _, peer := range router.BgpPeers { - if peer.Name == peerName { - continue - } else { - newPeers = append(newPeers, peer) - } - } - - if len(newPeers) == len(router.BgpPeers) { - log.Printf("[DEBUG] Router %s/%s had no peer %s already", region, routerName, peerName) - d.SetId("") - return nil - } - - log.Printf( - "[INFO] Removing peer %s from router %s/%s", peerName, region, routerName) - patchRouter := &compute.Router{ - BgpPeers: newPeers, - } - - log.Printf("[DEBUG] Updating router %s/%s with peers: %+v", region, routerName, newPeers) - op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() - if err != nil { - return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) - } - - err = computeOperationWaitRegion(config, op, project, region, "Patching router") - if err != nil { - return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) - } - - d.SetId("") - return nil -} - -func resourceComputeRouterPeerImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - parts := strings.Split(d.Id(), "/") - if len(parts) != 3 { - return nil, fmt.Errorf("Invalid router peer specifier. Expecting {region}/{router}/{peer}") - } - - d.Set("region", parts[0]) - d.Set("router", parts[1]) - d.Set("name", parts[2]) - - return []*schema.ResourceData{d}, nil -} diff --git a/builtin/providers/google/resource_compute_router_peer_test.go b/builtin/providers/google/resource_compute_router_peer_test.go deleted file mode 100644 index 83d676d53..000000000 --- a/builtin/providers/google/resource_compute_router_peer_test.go +++ /dev/null @@ -1,298 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccComputeRouterPeer_basic(t *testing.T) { - testId := acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeRouterPeerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeRouterPeerBasic(testId), - Check: testAccCheckComputeRouterPeerExists( - "google_compute_router_peer.foobar"), - }, - resource.TestStep{ - Config: testAccComputeRouterPeerKeepRouter(testId), - Check: testAccCheckComputeRouterPeerDelete( - "google_compute_router_peer.foobar"), - }, - }, - }) -} - -func testAccCheckComputeRouterPeerDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - routersService := config.clientCompute.Routers - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_router" { - continue - } - - project, err := getTestProject(rs.Primary, config) - if err != nil { - return err - } - - region, err := getTestRegion(rs.Primary, config) - if err != nil { - return err - } - - routerName := rs.Primary.Attributes["router"] - - _, err = routersService.Get(project, region, routerName).Do() - - if err == nil { - return fmt.Errorf("Error, Router %s in region %s still exists", - routerName, region) - } - } - - return nil -} - -func testAccCheckComputeRouterPeerDelete(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - routersService := config.clientCompute.Routers - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_router_peer" { - continue - } - - project, err := getTestProject(rs.Primary, config) - if err != nil { - return err - } - - region, err := getTestRegion(rs.Primary, config) - if err != nil { - return err - } - - name := rs.Primary.Attributes["name"] - routerName := rs.Primary.Attributes["router"] - - router, err := routersService.Get(project, region, routerName).Do() - - if err != nil { - return fmt.Errorf("Error Reading Router %s: %s", routerName, err) - } - - peers := router.BgpPeers - for _, peer := range peers { - - if peer.Name == name { - return fmt.Errorf("Peer %s still exists on router %s/%s", name, region, router.Name) - } - } - } - - return nil - } -} - -func testAccCheckComputeRouterPeerExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - project, err := getTestProject(rs.Primary, config) - if err != nil { - return err - } - - region, err := getTestRegion(rs.Primary, config) - if err != nil { - return err - } - - name := rs.Primary.Attributes["name"] - routerName := rs.Primary.Attributes["router"] - - routersService := config.clientCompute.Routers - router, err := routersService.Get(project, region, routerName).Do() - - if err != nil { - return fmt.Errorf("Error Reading Router %s: %s", routerName, err) - } - - for _, peer := range router.BgpPeers { - - if peer.Name == name { - return nil - } - } - - return fmt.Errorf("Peer %s not found for router %s", name, router.Name) - } -} - -func testAccComputeRouterPeerBasic(testId string) string { - return fmt.Sprintf(` - resource "google_compute_network" "foobar" { - name = "router-peer-test-%s" - } - resource "google_compute_subnetwork" "foobar" { - name = "router-peer-test-%s" - network = "${google_compute_network.foobar.self_link}" - ip_cidr_range = "10.0.0.0/16" - region = "us-central1" - } - resource "google_compute_address" "foobar" { - name = "router-peer-test-%s" - region = "${google_compute_subnetwork.foobar.region}" - } - resource "google_compute_vpn_gateway" "foobar" { - name = "router-peer-test-%s" - network = "${google_compute_network.foobar.self_link}" - region = "${google_compute_subnetwork.foobar.region}" - } - resource "google_compute_forwarding_rule" "foobar_esp" { - name = "router-peer-test-%s-1" - region = "${google_compute_vpn_gateway.foobar.region}" - ip_protocol = "ESP" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" - } - resource "google_compute_forwarding_rule" "foobar_udp500" { - name = "router-peer-test-%s-2" - region = "${google_compute_forwarding_rule.foobar_esp.region}" - ip_protocol = "UDP" - port_range = "500-500" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" - } - resource "google_compute_forwarding_rule" "foobar_udp4500" { - name = "router-peer-test-%s-3" - region = "${google_compute_forwarding_rule.foobar_udp500.region}" - ip_protocol = "UDP" - port_range = "4500-4500" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" - } - resource "google_compute_router" "foobar"{ - name = "router-peer-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp500.region}" - network = "${google_compute_network.foobar.self_link}" - bgp { - asn = 64514 - } - } - resource "google_compute_vpn_tunnel" "foobar" { - name = "router-peer-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp4500.region}" - target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" - shared_secret = "unguessable" - peer_ip = "8.8.8.8" - router = "${google_compute_router.foobar.name}" - } - resource "google_compute_router_interface" "foobar" { - name = "router-peer-test-%s" - router = "${google_compute_router.foobar.name}" - region = "${google_compute_router.foobar.region}" - ip_range = "169.254.3.1/30" - vpn_tunnel = "${google_compute_vpn_tunnel.foobar.name}" - } - resource "google_compute_router_peer" "foobar" { - name = "router-peer-test-%s" - router = "${google_compute_router.foobar.name}" - region = "${google_compute_router.foobar.region}" - peer_ip_address = "169.254.3.2" - peer_asn = 65515 - advertised_route_priority = 100 - interface = "${google_compute_router_interface.foobar.name}" - } - `, testId, testId, testId, testId, testId, testId, testId, testId, testId, testId, testId) -} - -func testAccComputeRouterPeerKeepRouter(testId string) string { - return fmt.Sprintf(` - resource "google_compute_network" "foobar" { - name = "router-peer-test-%s" - } - resource "google_compute_subnetwork" "foobar" { - name = "router-peer-test-%s" - network = "${google_compute_network.foobar.self_link}" - ip_cidr_range = "10.0.0.0/16" - region = "us-central1" - } - resource "google_compute_address" "foobar" { - name = "router-peer-test-%s" - region = "${google_compute_subnetwork.foobar.region}" - } - resource "google_compute_vpn_gateway" "foobar" { - name = "router-peer-test-%s" - network = "${google_compute_network.foobar.self_link}" - region = "${google_compute_subnetwork.foobar.region}" - } - resource "google_compute_forwarding_rule" "foobar_esp" { - name = "router-peer-test-%s-1" - region = "${google_compute_vpn_gateway.foobar.region}" - ip_protocol = "ESP" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" - } - resource "google_compute_forwarding_rule" "foobar_udp500" { - name = "router-peer-test-%s-2" - region = "${google_compute_forwarding_rule.foobar_esp.region}" - ip_protocol = "UDP" - port_range = "500-500" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" - } - resource "google_compute_forwarding_rule" "foobar_udp4500" { - name = "router-peer-test-%s-3" - region = "${google_compute_forwarding_rule.foobar_udp500.region}" - ip_protocol = "UDP" - port_range = "4500-4500" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" - } - resource "google_compute_router" "foobar"{ - name = "router-peer-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp500.region}" - network = "${google_compute_network.foobar.self_link}" - bgp { - asn = 64514 - } - } - resource "google_compute_vpn_tunnel" "foobar" { - name = "router-peer-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp4500.region}" - target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" - shared_secret = "unguessable" - peer_ip = "8.8.8.8" - router = "${google_compute_router.foobar.name}" - } - resource "google_compute_router_interface" "foobar" { - name = "router-peer-test-%s" - router = "${google_compute_router.foobar.name}" - region = "${google_compute_router.foobar.region}" - ip_range = "169.254.3.1/30" - vpn_tunnel = "${google_compute_vpn_tunnel.foobar.name}" - } - `, testId, testId, testId, testId, testId, testId, testId, testId, testId, testId) -} diff --git a/builtin/providers/google/resource_compute_router_test.go b/builtin/providers/google/resource_compute_router_test.go deleted file mode 100644 index aee7dfe2d..000000000 --- a/builtin/providers/google/resource_compute_router_test.go +++ /dev/null @@ -1,202 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccComputeRouter_basic(t *testing.T) { - resourceRegion := "europe-west1" - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeRouterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeRouterBasic(resourceRegion), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeRouterExists( - "google_compute_router.foobar"), - resource.TestCheckResourceAttr( - "google_compute_router.foobar", "region", resourceRegion), - ), - }, - }, - }) -} - -func TestAccComputeRouter_noRegion(t *testing.T) { - providerRegion := "us-central1" - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeRouterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeRouterNoRegion(providerRegion), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeRouterExists( - "google_compute_router.foobar"), - resource.TestCheckResourceAttr( - "google_compute_router.foobar", "region", providerRegion), - ), - }, - }, - }) -} - -func TestAccComputeRouter_networkLink(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeRouterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeRouterNetworkLink(), - Check: testAccCheckComputeRouterExists( - "google_compute_router.foobar"), - }, - }, - }) -} - -func testAccCheckComputeRouterDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - routersService := config.clientCompute.Routers - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_router" { - continue - } - - project, err := getTestProject(rs.Primary, config) - if err != nil { - return err - } - - region, err := getTestRegion(rs.Primary, config) - if err != nil { - return err - } - - name := rs.Primary.Attributes["name"] - - _, err = routersService.Get(project, region, name).Do() - - if err == nil { - return fmt.Errorf("Error, Router %s in region %s still exists", - name, region) - } - } - - return nil -} - -func testAccCheckComputeRouterExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - project, err := getTestProject(rs.Primary, config) - if err != nil { - return err - } - - region, err := getTestRegion(rs.Primary, config) - if err != nil { - return err - } - - name := rs.Primary.Attributes["name"] - - routersService := config.clientCompute.Routers - _, err = routersService.Get(project, region, name).Do() - - if err != nil { - return fmt.Errorf("Error Reading Router %s: %s", name, err) - } - - return nil - } -} - -func testAccComputeRouterBasic(resourceRegion string) string { - testId := acctest.RandString(10) - return fmt.Sprintf(` - resource "google_compute_network" "foobar" { - name = "router-test-%s" - } - resource "google_compute_subnetwork" "foobar" { - name = "router-test-%s" - network = "${google_compute_network.foobar.self_link}" - ip_cidr_range = "10.0.0.0/16" - region = "%s" - } - resource "google_compute_router" "foobar" { - name = "router-test-%s" - region = "${google_compute_subnetwork.foobar.region}" - network = "${google_compute_network.foobar.name}" - bgp { - asn = 64514 - } - } - `, testId, testId, resourceRegion, testId) -} - -func testAccComputeRouterNoRegion(providerRegion string) string { - testId := acctest.RandString(10) - return fmt.Sprintf(` - resource "google_compute_network" "foobar" { - name = "router-test-%s" - } - resource "google_compute_subnetwork" "foobar" { - name = "router-test-%s" - network = "${google_compute_network.foobar.self_link}" - ip_cidr_range = "10.0.0.0/16" - region = "%s" - } - resource "google_compute_router" "foobar" { - name = "router-test-%s" - network = "${google_compute_network.foobar.name}" - bgp { - asn = 64514 - } - } - `, testId, testId, providerRegion, testId) -} - -func testAccComputeRouterNetworkLink() string { - testId := acctest.RandString(10) - return fmt.Sprintf(` - resource "google_compute_network" "foobar" { - name = "router-test-%s" - } - resource "google_compute_subnetwork" "foobar" { - name = "router-test-%s" - network = "${google_compute_network.foobar.self_link}" - ip_cidr_range = "10.0.0.0/16" - region = "europe-west1" - } - resource "google_compute_router" "foobar" { - name = "router-test-%s" - region = "${google_compute_subnetwork.foobar.region}" - network = "${google_compute_network.foobar.self_link}" - bgp { - asn = 64514 - } - } - `, testId, testId, testId) -} diff --git a/builtin/providers/google/resource_compute_snapshot.go b/builtin/providers/google/resource_compute_snapshot.go deleted file mode 100644 index 794d98904..000000000 --- a/builtin/providers/google/resource_compute_snapshot.go +++ /dev/null @@ -1,202 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" -) - -func resourceComputeSnapshot() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeSnapshotCreate, - Read: resourceComputeSnapshotRead, - Delete: resourceComputeSnapshotDelete, - Exists: resourceComputeSnapshotExists, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "snapshot_encryption_key_raw": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Sensitive: true, - }, - - "snapshot_encryption_key_sha256": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "source_disk_encryption_key_raw": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Sensitive: true, - }, - - "source_disk_encryption_key_sha256": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "source_disk": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "source_disk_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceComputeSnapshotCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Build the snapshot parameter - snapshot := &compute.Snapshot{ - Name: d.Get("name").(string), - } - - source_disk := d.Get("source_disk").(string) - - if v, ok := d.GetOk("snapshot_encryption_key_raw"); ok { - snapshot.SnapshotEncryptionKey = &compute.CustomerEncryptionKey{} - snapshot.SnapshotEncryptionKey.RawKey = v.(string) - } - - if v, ok := d.GetOk("source_disk_encryption_key_raw"); ok { - snapshot.SourceDiskEncryptionKey = &compute.CustomerEncryptionKey{} - snapshot.SourceDiskEncryptionKey.RawKey = v.(string) - } - - op, err := config.clientCompute.Disks.CreateSnapshot( - project, d.Get("zone").(string), source_disk, snapshot).Do() - if err != nil { - return fmt.Errorf("Error creating snapshot: %s", err) - } - - // It probably maybe worked, so store the ID now - d.SetId(snapshot.Name) - - err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Creating Snapshot") - if err != nil { - return err - } - return resourceComputeSnapshotRead(d, meta) -} - -func resourceComputeSnapshotRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - snapshot, err := config.clientCompute.Snapshots.Get( - project, d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Snapshot %q", d.Get("name").(string))) - } - - d.Set("self_link", snapshot.SelfLink) - d.Set("source_disk_link", snapshot.SourceDisk) - d.Set("name", snapshot.Name) - - if snapshot.SnapshotEncryptionKey != nil && snapshot.SnapshotEncryptionKey.Sha256 != "" { - d.Set("snapshot_encryption_key_sha256", snapshot.SnapshotEncryptionKey.Sha256) - } - - if snapshot.SourceDiskEncryptionKey != nil && snapshot.SourceDiskEncryptionKey.Sha256 != "" { - d.Set("source_disk_encryption_key_sha256", snapshot.SourceDiskEncryptionKey.Sha256) - } - - return nil -} - -func resourceComputeSnapshotDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Delete the snapshot - op, err := config.clientCompute.Snapshots.Delete( - project, d.Id()).Do() - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Snapshot %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - return nil - } - return fmt.Errorf("Error deleting snapshot: %s", err) - } - - err = computeOperationWaitGlobal(config, op, project, "Deleting Snapshot") - if err != nil { - return err - } - - d.SetId("") - return nil -} - -func resourceComputeSnapshotExists(d *schema.ResourceData, meta interface{}) (bool, error) { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return false, err - } - - _, err = config.clientCompute.Snapshots.Get( - project, d.Id()).Do() - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Snapshot %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return false, err - } - return true, err - } - return true, nil -} diff --git a/builtin/providers/google/resource_compute_snapshot_test.go b/builtin/providers/google/resource_compute_snapshot_test.go deleted file mode 100644 index 2a29f940d..000000000 --- a/builtin/providers/google/resource_compute_snapshot_test.go +++ /dev/null @@ -1,183 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" -) - -func TestAccComputeSnapshot_basic(t *testing.T) { - snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - var snapshot compute.Snapshot - diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeSnapshotDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeSnapshot_basic(snapshotName, diskName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeSnapshotExists( - "google_compute_snapshot.foobar", &snapshot), - ), - }, - }, - }) -} - -func TestAccComputeSnapshot_encryption(t *testing.T) { - snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - var snapshot compute.Snapshot - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeSnapshotDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeSnapshot_encryption(snapshotName, diskName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeSnapshotExists( - "google_compute_snapshot.foobar", &snapshot), - ), - }, - }, - }) -} - -func testAccCheckComputeSnapshotDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_snapshot" { - continue - } - - _, err := config.clientCompute.Snapshots.Get( - config.Project, rs.Primary.ID).Do() - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - return nil - } else if ok { - return fmt.Errorf("Error while requesting Google Cloud Plateform: http code error : %d, http message error: %s", gerr.Code, gerr.Message) - } - return fmt.Errorf("Error while requesting Google Cloud Plateform") - } - return fmt.Errorf("Snapshot still exists") - } - - return nil -} - -func testAccCheckComputeSnapshotExists(n string, snapshot *compute.Snapshot) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.Snapshots.Get( - config.Project, rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("Snapshot %s not found", n) - } - - attr := rs.Primary.Attributes["snapshot_encryption_key_sha256"] - if found.SnapshotEncryptionKey != nil && found.SnapshotEncryptionKey.Sha256 != attr { - return fmt.Errorf("Snapshot %s has mismatched encryption key (Sha256).\nTF State: %+v.\nGCP State: %+v", - n, attr, found.SnapshotEncryptionKey.Sha256) - } else if found.SnapshotEncryptionKey == nil && attr != "" { - return fmt.Errorf("Snapshot %s has mismatched encryption key.\nTF State: %+v.\nGCP State: %+v", - n, attr, found.SnapshotEncryptionKey) - } - - attr = rs.Primary.Attributes["source_disk_encryption_key_sha256"] - if found.SourceDiskEncryptionKey != nil && found.SourceDiskEncryptionKey.Sha256 != attr { - return fmt.Errorf("Snapshot %s has mismatched source disk encryption key (Sha256).\nTF State: %+v.\nGCP State: %+v", - n, attr, found.SourceDiskEncryptionKey.Sha256) - } else if found.SourceDiskEncryptionKey == nil && attr != "" { - return fmt.Errorf("Snapshot %s has mismatched source disk encryption key.\nTF State: %+v.\nGCP State: %+v", - n, attr, found.SourceDiskEncryptionKey) - } - - attr = rs.Primary.Attributes["source_disk_link"] - if found.SourceDisk != attr { - return fmt.Errorf("Snapshot %s has mismatched source disk link.\nTF State: %+v.\nGCP State: %+v", - n, attr, found.SourceDisk) - } - - foundDisk, errDisk := config.clientCompute.Disks.Get( - config.Project, rs.Primary.Attributes["zone"], rs.Primary.Attributes["source_disk"]).Do() - if errDisk != nil { - return errDisk - } - if foundDisk.SelfLink != attr { - return fmt.Errorf("Snapshot %s has mismatched source disk\nTF State: %+v.\nGCP State: %+v", - n, attr, foundDisk.SelfLink) - } - - attr = rs.Primary.Attributes["self_link"] - if found.SelfLink != attr { - return fmt.Errorf("Snapshot %s has mismatched self link.\nTF State: %+v.\nGCP State: %+v", - n, attr, found.SelfLink) - } - - *snapshot = *found - - return nil - } -} - -func testAccComputeSnapshot_basic(snapshotName string, diskName string) string { - return fmt.Sprintf(` -resource "google_compute_disk" "foobar" { - name = "%s" - image = "debian-8-jessie-v20160921" - size = 10 - type = "pd-ssd" - zone = "us-central1-a" -} - -resource "google_compute_snapshot" "foobar" { - name = "%s" - source_disk = "${google_compute_disk.foobar.name}" - zone = "us-central1-a" -}`, diskName, snapshotName) -} - -func testAccComputeSnapshot_encryption(snapshotName string, diskName string) string { - return fmt.Sprintf(` -resource "google_compute_disk" "foobar" { - name = "%s" - image = "debian-8-jessie-v20160921" - size = 10 - type = "pd-ssd" - zone = "us-central1-a" - disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" -} -resource "google_compute_snapshot" "foobar" { - name = "%s" - source_disk = "${google_compute_disk.foobar.name}" - zone = "us-central1-a" - source_disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" - snapshot_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" -}`, diskName, snapshotName) -} diff --git a/builtin/providers/google/resource_compute_ssl_certificate.go b/builtin/providers/google/resource_compute_ssl_certificate.go deleted file mode 100644 index 5b64ebbf7..000000000 --- a/builtin/providers/google/resource_compute_ssl_certificate.go +++ /dev/null @@ -1,175 +0,0 @@ -package google - -import ( - "fmt" - "strconv" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" -) - -func resourceComputeSslCertificate() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeSslCertificateCreate, - Read: resourceComputeSslCertificateRead, - Delete: resourceComputeSslCertificateDelete, - - Schema: map[string]*schema.Schema{ - "certificate": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"name_prefix"}, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - // https://cloud.google.com/compute/docs/reference/latest/sslCertificates#resource - value := v.(string) - if len(value) > 63 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 63 characters", k)) - } - return - }, - }, - - "name_prefix": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - // https://cloud.google.com/compute/docs/reference/latest/sslCertificates#resource - // uuid is 26 characters, limit the prefix to 37. - value := v.(string) - if len(value) > 37 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 37 characters, name is limited to 63", k)) - } - return - }, - }, - - "private_key": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceComputeSslCertificateCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - var certName string - if v, ok := d.GetOk("name"); ok { - certName = v.(string) - } else if v, ok := d.GetOk("name_prefix"); ok { - certName = resource.PrefixedUniqueId(v.(string)) - } else { - certName = resource.UniqueId() - } - - // Build the certificate parameter - cert := &compute.SslCertificate{ - Name: certName, - Certificate: d.Get("certificate").(string), - PrivateKey: d.Get("private_key").(string), - } - - if v, ok := d.GetOk("description"); ok { - cert.Description = v.(string) - } - - op, err := config.clientCompute.SslCertificates.Insert( - project, cert).Do() - - if err != nil { - return fmt.Errorf("Error creating ssl certificate: %s", err) - } - - err = computeOperationWaitGlobal(config, op, project, "Creating SslCertificate") - if err != nil { - return err - } - - d.SetId(cert.Name) - - return resourceComputeSslCertificateRead(d, meta) -} - -func resourceComputeSslCertificateRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - cert, err := config.clientCompute.SslCertificates.Get( - project, d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("SSL Certificate %q", d.Get("name").(string))) - } - - d.Set("self_link", cert.SelfLink) - d.Set("id", strconv.FormatUint(cert.Id, 10)) - - return nil -} - -func resourceComputeSslCertificateDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - op, err := config.clientCompute.SslCertificates.Delete( - project, d.Id()).Do() - if err != nil { - return fmt.Errorf("Error deleting ssl certificate: %s", err) - } - - err = computeOperationWaitGlobal(config, op, project, "Deleting SslCertificate") - if err != nil { - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/google/resource_compute_ssl_certificate_test.go b/builtin/providers/google/resource_compute_ssl_certificate_test.go deleted file mode 100644 index 987282c67..000000000 --- a/builtin/providers/google/resource_compute_ssl_certificate_test.go +++ /dev/null @@ -1,132 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccComputeSslCertificate_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeSslCertificateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeSslCertificate_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeSslCertificateExists( - "google_compute_ssl_certificate.foobar"), - ), - }, - }, - }) -} - -func TestAccComputeSslCertificate_no_name(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeSslCertificateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeSslCertificate_no_name, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeSslCertificateExists( - "google_compute_ssl_certificate.foobar"), - ), - }, - }, - }) -} - -func TestAccComputeSslCertificate_name_prefix(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeSslCertificateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeSslCertificate_name_prefix, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeSslCertificateExists( - "google_compute_ssl_certificate.foobar"), - ), - }, - }, - }) -} - -func testAccCheckComputeSslCertificateDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_ssl_certificate" { - continue - } - - _, err := config.clientCompute.SslCertificates.Get( - config.Project, rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("SslCertificate still exists") - } - } - - return nil -} - -func testAccCheckComputeSslCertificateExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.SslCertificates.Get( - config.Project, rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("Certificate not found") - } - - return nil - } -} - -var testAccComputeSslCertificate_basic = fmt.Sprintf(` -resource "google_compute_ssl_certificate" "foobar" { - name = "sslcert-test-%s" - description = "very descriptive" - private_key = "${file("test-fixtures/ssl_cert/test.key")}" - certificate = "${file("test-fixtures/ssl_cert/test.crt")}" -} -`, acctest.RandString(10)) - -var testAccComputeSslCertificate_no_name = fmt.Sprintf(` -resource "google_compute_ssl_certificate" "foobar" { - description = "really descriptive" - private_key = "${file("test-fixtures/ssl_cert/test.key")}" - certificate = "${file("test-fixtures/ssl_cert/test.crt")}" -} -`) - -var testAccComputeSslCertificate_name_prefix = fmt.Sprintf(` -resource "google_compute_ssl_certificate" "foobar" { - name_prefix = "sslcert-test-%s-" - description = "extremely descriptive" - private_key = "${file("test-fixtures/ssl_cert/test.key")}" - certificate = "${file("test-fixtures/ssl_cert/test.crt")}" -} -`, acctest.RandString(10)) diff --git a/builtin/providers/google/resource_compute_subnetwork.go b/builtin/providers/google/resource_compute_subnetwork.go deleted file mode 100644 index 04db37cbd..000000000 --- a/builtin/providers/google/resource_compute_subnetwork.go +++ /dev/null @@ -1,231 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" -) - -func resourceComputeSubnetwork() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeSubnetworkCreate, - Read: resourceComputeSubnetworkRead, - Update: resourceComputeSubnetworkUpdate, - Delete: resourceComputeSubnetworkDelete, - - Schema: map[string]*schema.Schema{ - "ip_cidr_range": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "network": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "gateway_address": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "private_ip_google_access": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func createSubnetID(s *compute.Subnetwork) string { - return fmt.Sprintf("%s/%s", s.Region, s.Name) -} - -func splitSubnetID(id string) (region string, name string) { - parts := strings.Split(id, "/") - region = parts[0] - name = parts[1] - return -} - -func resourceComputeSubnetworkCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - network, err := getNetworkLink(d, config, "network") - if err != nil { - return err - } - - // Build the subnetwork parameters - subnetwork := &compute.Subnetwork{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - IpCidrRange: d.Get("ip_cidr_range").(string), - PrivateIpGoogleAccess: d.Get("private_ip_google_access").(bool), - Network: network, - } - - log.Printf("[DEBUG] Subnetwork insert request: %#v", subnetwork) - op, err := config.clientCompute.Subnetworks.Insert( - project, region, subnetwork).Do() - - if err != nil { - return fmt.Errorf("Error creating subnetwork: %s", err) - } - - // It probably maybe worked, so store the ID now. ID is a combination of region + subnetwork - // name because subnetwork names are not unique in a project, per the Google docs: - // "When creating a new subnetwork, its name has to be unique in that project for that region, even across networks. - // The same name can appear twice in a project, as long as each one is in a different region." - // https://cloud.google.com/compute/docs/subnetworks - subnetwork.Region = region - d.SetId(createSubnetID(subnetwork)) - - err = computeOperationWaitRegion(config, op, project, region, "Creating Subnetwork") - if err != nil { - return err - } - - return resourceComputeSubnetworkRead(d, meta) -} - -func resourceComputeSubnetworkRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - - subnetwork, err := config.clientCompute.Subnetworks.Get( - project, region, name).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Subnetwork %q", name)) - } - - d.Set("gateway_address", subnetwork.GatewayAddress) - d.Set("self_link", subnetwork.SelfLink) - - return nil -} - -func resourceComputeSubnetworkUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - d.Partial(true) - - if d.HasChange("private_ip_google_access") { - subnetworksSetPrivateIpGoogleAccessRequest := &compute.SubnetworksSetPrivateIpGoogleAccessRequest{ - PrivateIpGoogleAccess: d.Get("private_ip_google_access").(bool), - } - - log.Printf("[DEBUG] Updating Subnetwork PrivateIpGoogleAccess %q: %#v", d.Id(), subnetworksSetPrivateIpGoogleAccessRequest) - op, err := config.clientCompute.Subnetworks.SetPrivateIpGoogleAccess( - project, region, d.Get("name").(string), subnetworksSetPrivateIpGoogleAccessRequest).Do() - if err != nil { - return fmt.Errorf("Error updating subnetwork PrivateIpGoogleAccess: %s", err) - } - - err = computeOperationWaitRegion(config, op, project, region, "Updating Subnetwork PrivateIpGoogleAccess") - if err != nil { - return err - } - - d.SetPartial("private_ip_google_access") - } - - d.Partial(false) - - return resourceComputeSubnetworkRead(d, meta) -} - -func resourceComputeSubnetworkDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Delete the subnetwork - op, err := config.clientCompute.Subnetworks.Delete( - project, region, d.Get("name").(string)).Do() - if err != nil { - return fmt.Errorf("Error deleting subnetwork: %s", err) - } - - err = computeOperationWaitRegion(config, op, project, region, "Deleting Subnetwork") - if err != nil { - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/google/resource_compute_subnetwork_test.go b/builtin/providers/google/resource_compute_subnetwork_test.go deleted file mode 100644 index 7ad178a47..000000000 --- a/builtin/providers/google/resource_compute_subnetwork_test.go +++ /dev/null @@ -1,185 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/compute/v1" -) - -func TestAccComputeSubnetwork_basic(t *testing.T) { - var subnetwork1 compute.Subnetwork - var subnetwork2 compute.Subnetwork - - cnName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - subnetwork1Name := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - subnetwork2Name := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - subnetwork3Name := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeSubnetworkDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeSubnetwork_basic(cnName, subnetwork1Name, subnetwork2Name, subnetwork3Name), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeSubnetworkExists( - "google_compute_subnetwork.network-ref-by-url", &subnetwork1), - testAccCheckComputeSubnetworkExists( - "google_compute_subnetwork.network-ref-by-name", &subnetwork2), - ), - }, - }, - }) -} - -func TestAccComputeSubnetwork_update(t *testing.T) { - var subnetwork compute.Subnetwork - - cnName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - subnetworkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeSubnetworkDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeSubnetwork_update1(cnName, subnetworkName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeSubnetworkExists( - "google_compute_subnetwork.network-with-private-google-access", &subnetwork), - ), - }, - resource.TestStep{ - Config: testAccComputeSubnetwork_update2(cnName, subnetworkName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeSubnetworkExists( - "google_compute_subnetwork.network-with-private-google-access", &subnetwork), - ), - }, - }, - }) - - if subnetwork.PrivateIpGoogleAccess { - t.Errorf("Expected PrivateIpGoogleAccess to be false, got %v", subnetwork.PrivateIpGoogleAccess) - } -} - -func testAccCheckComputeSubnetworkDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_subnetwork" { - continue - } - - region, subnet_name := splitSubnetID(rs.Primary.ID) - _, err := config.clientCompute.Subnetworks.Get( - config.Project, region, subnet_name).Do() - if err == nil { - return fmt.Errorf("Network still exists") - } - } - - return nil -} - -func testAccCheckComputeSubnetworkExists(n string, subnetwork *compute.Subnetwork) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - region, subnet_name := splitSubnetID(rs.Primary.ID) - found, err := config.clientCompute.Subnetworks.Get( - config.Project, region, subnet_name).Do() - if err != nil { - return err - } - - if found.Name != subnet_name { - return fmt.Errorf("Subnetwork not found") - } - - *subnetwork = *found - - return nil - } -} - -func testAccComputeSubnetwork_basic(cnName, subnetwork1Name, subnetwork2Name, subnetwork3Name string) string { - return fmt.Sprintf(` -resource "google_compute_network" "custom-test" { - name = "%s" - auto_create_subnetworks = false -} - -resource "google_compute_subnetwork" "network-ref-by-url" { - name = "%s" - ip_cidr_range = "10.0.0.0/16" - region = "us-central1" - network = "${google_compute_network.custom-test.self_link}" -} - - -resource "google_compute_subnetwork" "network-ref-by-name" { - name = "%s" - ip_cidr_range = "10.1.0.0/16" - region = "us-central1" - network = "${google_compute_network.custom-test.name}" -} - -resource "google_compute_subnetwork" "network-with-private-google-access" { - name = "%s" - ip_cidr_range = "10.2.0.0/16" - region = "us-central1" - network = "${google_compute_network.custom-test.self_link}" - private_ip_google_access = true -} -`, cnName, subnetwork1Name, subnetwork2Name, subnetwork3Name) -} - -func testAccComputeSubnetwork_update1(cnName, subnetworkName string) string { - return fmt.Sprintf(` -resource "google_compute_network" "custom-test" { - name = "%s" - auto_create_subnetworks = false -} - -resource "google_compute_subnetwork" "network-with-private-google-access" { - name = "%s" - ip_cidr_range = "10.2.0.0/16" - region = "us-central1" - network = "${google_compute_network.custom-test.self_link}" - private_ip_google_access = true -} -`, cnName, subnetworkName) -} - -func testAccComputeSubnetwork_update2(cnName, subnetworkName string) string { - return fmt.Sprintf(` -resource "google_compute_network" "custom-test" { - name = "%s" - auto_create_subnetworks = false -} - -resource "google_compute_subnetwork" "network-with-private-google-access" { - name = "%s" - ip_cidr_range = "10.2.0.0/16" - region = "us-central1" - network = "${google_compute_network.custom-test.self_link}" -} -`, cnName, subnetworkName) -} diff --git a/builtin/providers/google/resource_compute_target_http_proxy.go b/builtin/providers/google/resource_compute_target_http_proxy.go deleted file mode 100644 index 602c38b7c..000000000 --- a/builtin/providers/google/resource_compute_target_http_proxy.go +++ /dev/null @@ -1,165 +0,0 @@ -package google - -import ( - "fmt" - "log" - "strconv" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" -) - -func resourceComputeTargetHttpProxy() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeTargetHttpProxyCreate, - Read: resourceComputeTargetHttpProxyRead, - Delete: resourceComputeTargetHttpProxyDelete, - Update: resourceComputeTargetHttpProxyUpdate, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "url_map": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceComputeTargetHttpProxyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - proxy := &compute.TargetHttpProxy{ - Name: d.Get("name").(string), - UrlMap: d.Get("url_map").(string), - } - - if v, ok := d.GetOk("description"); ok { - proxy.Description = v.(string) - } - - log.Printf("[DEBUG] TargetHttpProxy insert request: %#v", proxy) - op, err := config.clientCompute.TargetHttpProxies.Insert( - project, proxy).Do() - if err != nil { - return fmt.Errorf("Error creating TargetHttpProxy: %s", err) - } - - err = computeOperationWaitGlobal(config, op, project, "Creating Target Http Proxy") - if err != nil { - return err - } - - d.SetId(proxy.Name) - - return resourceComputeTargetHttpProxyRead(d, meta) -} - -func resourceComputeTargetHttpProxyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - d.Partial(true) - - if d.HasChange("url_map") { - url_map := d.Get("url_map").(string) - url_map_ref := &compute.UrlMapReference{UrlMap: url_map} - op, err := config.clientCompute.TargetHttpProxies.SetUrlMap( - project, d.Id(), url_map_ref).Do() - if err != nil { - return fmt.Errorf("Error updating target: %s", err) - } - - err = computeOperationWaitGlobal(config, op, project, "Updating Target Http Proxy") - if err != nil { - return err - } - - d.SetPartial("url_map") - } - - d.Partial(false) - - return resourceComputeTargetHttpProxyRead(d, meta) -} - -func resourceComputeTargetHttpProxyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - proxy, err := config.clientCompute.TargetHttpProxies.Get( - project, d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Target HTTP Proxy %q", d.Get("name").(string))) - } - - d.Set("self_link", proxy.SelfLink) - d.Set("id", strconv.FormatUint(proxy.Id, 10)) - - return nil -} - -func resourceComputeTargetHttpProxyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Delete the TargetHttpProxy - log.Printf("[DEBUG] TargetHttpProxy delete request") - op, err := config.clientCompute.TargetHttpProxies.Delete( - project, d.Id()).Do() - if err != nil { - return fmt.Errorf("Error deleting TargetHttpProxy: %s", err) - } - - err = computeOperationWaitGlobal(config, op, project, "Deleting Target Http Proxy") - if err != nil { - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/google/resource_compute_target_http_proxy_test.go b/builtin/providers/google/resource_compute_target_http_proxy_test.go deleted file mode 100644 index 591a3eaa5..000000000 --- a/builtin/providers/google/resource_compute_target_http_proxy_test.go +++ /dev/null @@ -1,241 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccComputeTargetHttpProxy_basic(t *testing.T) { - target := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) - backend := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) - hc := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) - urlmap1 := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) - urlmap2 := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeTargetHttpProxyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeTargetHttpProxy_basic1(target, backend, hc, urlmap1, urlmap2), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeTargetHttpProxyExists( - "google_compute_target_http_proxy.foobar"), - ), - }, - }, - }) -} - -func TestAccComputeTargetHttpProxy_update(t *testing.T) { - target := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) - backend := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) - hc := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) - urlmap1 := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) - urlmap2 := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeTargetHttpProxyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeTargetHttpProxy_basic1(target, backend, hc, urlmap1, urlmap2), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeTargetHttpProxyExists( - "google_compute_target_http_proxy.foobar"), - ), - }, - - resource.TestStep{ - Config: testAccComputeTargetHttpProxy_basic2(target, backend, hc, urlmap1, urlmap2), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeTargetHttpProxyExists( - "google_compute_target_http_proxy.foobar"), - ), - }, - }, - }) -} - -func testAccCheckComputeTargetHttpProxyDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_target_http_proxy" { - continue - } - - _, err := config.clientCompute.TargetHttpProxies.Get( - config.Project, rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("TargetHttpProxy still exists") - } - } - - return nil -} - -func testAccCheckComputeTargetHttpProxyExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.TargetHttpProxies.Get( - config.Project, rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("TargetHttpProxy not found") - } - - return nil - } -} - -func testAccComputeTargetHttpProxy_basic1(target, backend, hc, urlmap1, urlmap2 string) string { - return fmt.Sprintf(` - resource "google_compute_target_http_proxy" "foobar" { - description = "Resource created for Terraform acceptance testing" - name = "%s" - url_map = "${google_compute_url_map.foobar1.self_link}" - } - - resource "google_compute_backend_service" "foobar" { - name = "%s" - health_checks = ["${google_compute_http_health_check.zero.self_link}"] - } - - resource "google_compute_http_health_check" "zero" { - name = "%s" - request_path = "/" - check_interval_sec = 1 - timeout_sec = 1 - } - - resource "google_compute_url_map" "foobar1" { - name = "%s" - default_service = "${google_compute_backend_service.foobar.self_link}" - host_rule { - hosts = ["mysite.com", "myothersite.com"] - path_matcher = "boop" - } - path_matcher { - default_service = "${google_compute_backend_service.foobar.self_link}" - name = "boop" - path_rule { - paths = ["/*"] - service = "${google_compute_backend_service.foobar.self_link}" - } - } - test { - host = "mysite.com" - path = "/*" - service = "${google_compute_backend_service.foobar.self_link}" - } - } - - resource "google_compute_url_map" "foobar2" { - name = "%s" - default_service = "${google_compute_backend_service.foobar.self_link}" - host_rule { - hosts = ["mysite.com", "myothersite.com"] - path_matcher = "boop" - } - path_matcher { - default_service = "${google_compute_backend_service.foobar.self_link}" - name = "boop" - path_rule { - paths = ["/*"] - service = "${google_compute_backend_service.foobar.self_link}" - } - } - test { - host = "mysite.com" - path = "/*" - service = "${google_compute_backend_service.foobar.self_link}" - } - } - `, target, backend, hc, urlmap1, urlmap2) -} - -func testAccComputeTargetHttpProxy_basic2(target, backend, hc, urlmap1, urlmap2 string) string { - return fmt.Sprintf(` - resource "google_compute_target_http_proxy" "foobar" { - description = "Resource created for Terraform acceptance testing" - name = "%s" - url_map = "${google_compute_url_map.foobar2.self_link}" - } - - resource "google_compute_backend_service" "foobar" { - name = "%s" - health_checks = ["${google_compute_http_health_check.zero.self_link}"] - } - - resource "google_compute_http_health_check" "zero" { - name = "%s" - request_path = "/" - check_interval_sec = 1 - timeout_sec = 1 - } - - resource "google_compute_url_map" "foobar1" { - name = "%s" - default_service = "${google_compute_backend_service.foobar.self_link}" - host_rule { - hosts = ["mysite.com", "myothersite.com"] - path_matcher = "boop" - } - path_matcher { - default_service = "${google_compute_backend_service.foobar.self_link}" - name = "boop" - path_rule { - paths = ["/*"] - service = "${google_compute_backend_service.foobar.self_link}" - } - } - test { - host = "mysite.com" - path = "/*" - service = "${google_compute_backend_service.foobar.self_link}" - } - } - - resource "google_compute_url_map" "foobar2" { - name = "%s" - default_service = "${google_compute_backend_service.foobar.self_link}" - host_rule { - hosts = ["mysite.com", "myothersite.com"] - path_matcher = "boop" - } - path_matcher { - default_service = "${google_compute_backend_service.foobar.self_link}" - name = "boop" - path_rule { - paths = ["/*"] - service = "${google_compute_backend_service.foobar.self_link}" - } - } - test { - host = "mysite.com" - path = "/*" - service = "${google_compute_backend_service.foobar.self_link}" - } - } - `, target, backend, hc, urlmap1, urlmap2) -} diff --git a/builtin/providers/google/resource_compute_target_https_proxy.go b/builtin/providers/google/resource_compute_target_https_proxy.go deleted file mode 100644 index 7ba080e4c..000000000 --- a/builtin/providers/google/resource_compute_target_https_proxy.go +++ /dev/null @@ -1,258 +0,0 @@ -package google - -import ( - "fmt" - "log" - "strconv" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" -) - -func resourceComputeTargetHttpsProxy() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeTargetHttpsProxyCreate, - Read: resourceComputeTargetHttpsProxyRead, - Delete: resourceComputeTargetHttpsProxyDelete, - Update: resourceComputeTargetHttpsProxyUpdate, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "ssl_certificates": &schema.Schema{ - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "url_map": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceComputeTargetHttpsProxyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - _sslCertificates := d.Get("ssl_certificates").([]interface{}) - sslCertificates := make([]string, len(_sslCertificates)) - - for i, v := range _sslCertificates { - sslCertificates[i] = v.(string) - } - - proxy := &compute.TargetHttpsProxy{ - Name: d.Get("name").(string), - UrlMap: d.Get("url_map").(string), - SslCertificates: sslCertificates, - } - - if v, ok := d.GetOk("description"); ok { - proxy.Description = v.(string) - } - - log.Printf("[DEBUG] TargetHttpsProxy insert request: %#v", proxy) - op, err := config.clientCompute.TargetHttpsProxies.Insert( - project, proxy).Do() - if err != nil { - return fmt.Errorf("Error creating TargetHttpsProxy: %s", err) - } - - err = computeOperationWaitGlobal(config, op, project, "Creating Target Https Proxy") - if err != nil { - return err - } - - d.SetId(proxy.Name) - - return resourceComputeTargetHttpsProxyRead(d, meta) -} - -func resourceComputeTargetHttpsProxyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - d.Partial(true) - - if d.HasChange("url_map") { - url_map := d.Get("url_map").(string) - url_map_ref := &compute.UrlMapReference{UrlMap: url_map} - op, err := config.clientCompute.TargetHttpsProxies.SetUrlMap( - project, d.Id(), url_map_ref).Do() - if err != nil { - return fmt.Errorf("Error updating Target HTTPS proxy URL map: %s", err) - } - - err = computeOperationWaitGlobal(config, op, project, "Updating Target Https Proxy URL Map") - if err != nil { - return err - } - - d.SetPartial("url_map") - } - - if d.HasChange("ssl_certificates") { - proxy, err := config.clientCompute.TargetHttpsProxies.Get( - project, d.Id()).Do() - - _old, _new := d.GetChange("ssl_certificates") - _oldCerts := _old.([]interface{}) - _newCerts := _new.([]interface{}) - current := proxy.SslCertificates - - _oldMap := make(map[string]bool) - _newMap := make(map[string]bool) - - for _, v := range _oldCerts { - _oldMap[v.(string)] = true - } - - for _, v := range _newCerts { - _newMap[v.(string)] = true - } - - sslCertificates := make([]string, 0) - // Only modify certificates in one of our old or new states - for _, v := range current { - _, okOld := _oldMap[v] - _, okNew := _newMap[v] - - // we deleted the certificate - if okOld && !okNew { - continue - } - - sslCertificates = append(sslCertificates, v) - - // Keep track of the fact that we have added this certificate - if okNew { - delete(_newMap, v) - } - } - - // Add fresh certificates - for k, _ := range _newMap { - sslCertificates = append(sslCertificates, k) - } - - cert_ref := &compute.TargetHttpsProxiesSetSslCertificatesRequest{ - SslCertificates: sslCertificates, - } - op, err := config.clientCompute.TargetHttpsProxies.SetSslCertificates( - project, d.Id(), cert_ref).Do() - if err != nil { - return fmt.Errorf("Error updating Target Https Proxy SSL Certificates: %s", err) - } - - err = computeOperationWaitGlobal(config, op, project, "Updating Target Https Proxy SSL certificates") - if err != nil { - return err - } - - d.SetPartial("ssl_certificate") - } - - d.Partial(false) - - return resourceComputeTargetHttpsProxyRead(d, meta) -} - -func resourceComputeTargetHttpsProxyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - proxy, err := config.clientCompute.TargetHttpsProxies.Get( - project, d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Target HTTPS proxy %q", d.Get("name").(string))) - } - - _certs := d.Get("ssl_certificates").([]interface{}) - current := proxy.SslCertificates - - _certMap := make(map[string]bool) - _newCerts := make([]interface{}, 0) - - for _, v := range _certs { - _certMap[v.(string)] = true - } - - // Store intersection of server certificates and user defined certificates - for _, v := range current { - if _, ok := _certMap[v]; ok { - _newCerts = append(_newCerts, v) - } - } - - d.Set("ssl_certificates", _newCerts) - d.Set("self_link", proxy.SelfLink) - d.Set("id", strconv.FormatUint(proxy.Id, 10)) - - return nil -} - -func resourceComputeTargetHttpsProxyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Delete the TargetHttpsProxy - log.Printf("[DEBUG] TargetHttpsProxy delete request") - op, err := config.clientCompute.TargetHttpsProxies.Delete( - project, d.Id()).Do() - if err != nil { - return fmt.Errorf("Error deleting TargetHttpsProxy: %s", err) - } - - err = computeOperationWaitGlobal(config, op, project, "Deleting Target Https Proxy") - if err != nil { - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/google/resource_compute_target_https_proxy_test.go b/builtin/providers/google/resource_compute_target_https_proxy_test.go deleted file mode 100644 index f8d731f08..000000000 --- a/builtin/providers/google/resource_compute_target_https_proxy_test.go +++ /dev/null @@ -1,215 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccComputeTargetHttpsProxy_basic(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeTargetHttpsProxyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeTargetHttpsProxy_basic1, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeTargetHttpsProxyExists( - "google_compute_target_https_proxy.foobar"), - ), - }, - }, - }) -} - -func TestAccComputeTargetHttpsProxy_update(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeTargetHttpsProxyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeTargetHttpsProxy_basic1, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeTargetHttpsProxyExists( - "google_compute_target_https_proxy.foobar"), - ), - }, - - resource.TestStep{ - Config: testAccComputeTargetHttpsProxy_basic2, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeTargetHttpsProxyExists( - "google_compute_target_https_proxy.foobar"), - ), - }, - }, - }) -} - -func testAccCheckComputeTargetHttpsProxyDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_target_https_proxy" { - continue - } - - _, err := config.clientCompute.TargetHttpsProxies.Get( - config.Project, rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("TargetHttpsProxy still exists") - } - } - - return nil -} - -func testAccCheckComputeTargetHttpsProxyExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.TargetHttpsProxies.Get( - config.Project, rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("TargetHttpsProxy not found") - } - - return nil - } -} - -var testAccComputeTargetHttpsProxy_basic1 = fmt.Sprintf(` -resource "google_compute_target_https_proxy" "foobar" { - description = "Resource created for Terraform acceptance testing" - name = "httpsproxy-test-%s" - url_map = "${google_compute_url_map.foobar.self_link}" - ssl_certificates = ["${google_compute_ssl_certificate.foobar1.self_link}"] -} - -resource "google_compute_backend_service" "foobar" { - name = "httpsproxy-test-%s" - health_checks = ["${google_compute_http_health_check.zero.self_link}"] -} - -resource "google_compute_http_health_check" "zero" { - name = "httpsproxy-test-%s" - request_path = "/" - check_interval_sec = 1 - timeout_sec = 1 -} - -resource "google_compute_url_map" "foobar" { - name = "httpsproxy-test-%s" - default_service = "${google_compute_backend_service.foobar.self_link}" - host_rule { - hosts = ["mysite.com", "myothersite.com"] - path_matcher = "boop" - } - path_matcher { - default_service = "${google_compute_backend_service.foobar.self_link}" - name = "boop" - path_rule { - paths = ["/*"] - service = "${google_compute_backend_service.foobar.self_link}" - } - } - test { - host = "mysite.com" - path = "/*" - service = "${google_compute_backend_service.foobar.self_link}" - } -} - -resource "google_compute_ssl_certificate" "foobar1" { - name = "httpsproxy-test-%s" - description = "very descriptive" - private_key = "${file("test-fixtures/ssl_cert/test.key")}" - certificate = "${file("test-fixtures/ssl_cert/test.crt")}" -} - -resource "google_compute_ssl_certificate" "foobar2" { - name = "httpsproxy-test-%s" - description = "very descriptive" - private_key = "${file("test-fixtures/ssl_cert/test.key")}" - certificate = "${file("test-fixtures/ssl_cert/test.crt")}" -} -`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), - acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) - -var testAccComputeTargetHttpsProxy_basic2 = fmt.Sprintf(` -resource "google_compute_target_https_proxy" "foobar" { - description = "Resource created for Terraform acceptance testing" - name = "httpsproxy-test-%s" - url_map = "${google_compute_url_map.foobar.self_link}" - ssl_certificates = ["${google_compute_ssl_certificate.foobar1.self_link}"] -} - -resource "google_compute_backend_service" "foobar" { - name = "httpsproxy-test-%s" - health_checks = ["${google_compute_http_health_check.zero.self_link}"] -} - -resource "google_compute_http_health_check" "zero" { - name = "httpsproxy-test-%s" - request_path = "/" - check_interval_sec = 1 - timeout_sec = 1 -} - -resource "google_compute_url_map" "foobar" { - name = "httpsproxy-test-%s" - default_service = "${google_compute_backend_service.foobar.self_link}" - host_rule { - hosts = ["mysite.com", "myothersite.com"] - path_matcher = "boop" - } - path_matcher { - default_service = "${google_compute_backend_service.foobar.self_link}" - name = "boop" - path_rule { - paths = ["/*"] - service = "${google_compute_backend_service.foobar.self_link}" - } - } - test { - host = "mysite.com" - path = "/*" - service = "${google_compute_backend_service.foobar.self_link}" - } -} - -resource "google_compute_ssl_certificate" "foobar1" { - name = "httpsproxy-test-%s" - description = "very descriptive" - private_key = "${file("test-fixtures/ssl_cert/test.key")}" - certificate = "${file("test-fixtures/ssl_cert/test.crt")}" -} - -resource "google_compute_ssl_certificate" "foobar2" { - name = "httpsproxy-test-%s" - description = "very descriptive" - private_key = "${file("test-fixtures/ssl_cert/test.key")}" - certificate = "${file("test-fixtures/ssl_cert/test.crt")}" -} -`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), - acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/builtin/providers/google/resource_compute_target_pool.go b/builtin/providers/google/resource_compute_target_pool.go deleted file mode 100644 index 8f3b2219a..000000000 --- a/builtin/providers/google/resource_compute_target_pool.go +++ /dev/null @@ -1,445 +0,0 @@ -package google - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" -) - -func resourceComputeTargetPool() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeTargetPoolCreate, - Read: resourceComputeTargetPoolRead, - Delete: resourceComputeTargetPoolDelete, - Update: resourceComputeTargetPoolUpdate, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "backup_pool": { - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "failover_ratio": { - Type: schema.TypeFloat, - Optional: true, - ForceNew: true, - }, - - "health_checks": { - Type: schema.TypeList, - Optional: true, - ForceNew: false, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "instances": { - Type: schema.TypeList, - Optional: true, - Computed: true, - ForceNew: false, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "project": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "region": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - - "session_affinity": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "NONE", - }, - }, - } -} - -func convertStringArr(ifaceArr []interface{}) []string { - var arr []string - for _, v := range ifaceArr { - if v == nil { - continue - } - arr = append(arr, v.(string)) - } - return arr -} - -// Healthchecks need to exist before being referred to from the target pool. -func convertHealthChecks(config *Config, project string, names []string) ([]string, error) { - urls := make([]string, len(names)) - for i, name := range names { - // Look up the healthcheck - res, err := config.clientCompute.HttpHealthChecks.Get(project, name).Do() - if err != nil { - return nil, fmt.Errorf("Error reading HealthCheck: %s", err) - } - urls[i] = res.SelfLink - } - return urls, nil -} - -// Instances do not need to exist yet, so we simply generate URLs. -// Instances can be full URLS or zone/name -func convertInstancesToUrls(config *Config, project string, names []string) ([]string, error) { - urls := make([]string, len(names)) - for i, name := range names { - if strings.HasPrefix(name, "https://www.googleapis.com/compute/v1/") { - urls[i] = name - } else { - splitName := strings.Split(name, "/") - if len(splitName) != 2 { - return nil, fmt.Errorf("Invalid instance name, require URL or zone/name: %s", name) - } else { - urls[i] = fmt.Sprintf( - "https://www.googleapis.com/compute/v1/projects/%s/zones/%s/instances/%s", - project, splitName[0], splitName[1]) - } - } - } - return urls, nil -} - -func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - hchkUrls, err := convertHealthChecks( - config, project, convertStringArr(d.Get("health_checks").([]interface{}))) - if err != nil { - return err - } - - instanceUrls, err := convertInstancesToUrls( - config, project, convertStringArr(d.Get("instances").([]interface{}))) - if err != nil { - return err - } - - // Build the parameter - tpool := &compute.TargetPool{ - BackupPool: d.Get("backup_pool").(string), - Description: d.Get("description").(string), - HealthChecks: hchkUrls, - Instances: instanceUrls, - Name: d.Get("name").(string), - SessionAffinity: d.Get("session_affinity").(string), - } - if d.Get("failover_ratio") != nil { - tpool.FailoverRatio = d.Get("failover_ratio").(float64) - } - log.Printf("[DEBUG] TargetPool insert request: %#v", tpool) - op, err := config.clientCompute.TargetPools.Insert( - project, region, tpool).Do() - if err != nil { - return fmt.Errorf("Error creating TargetPool: %s", err) - } - - // It probably maybe worked, so store the ID now - d.SetId(tpool.Name) - - err = computeOperationWaitRegion(config, op, project, region, "Creating Target Pool") - if err != nil { - return err - } - return resourceComputeTargetPoolRead(d, meta) -} - -func calcAddRemove(from []string, to []string) ([]string, []string) { - add := make([]string, 0) - remove := make([]string, 0) - for _, u := range to { - found := false - for _, v := range from { - if u == v { - found = true - break - } - } - if !found { - add = append(add, u) - } - } - for _, u := range from { - found := false - for _, v := range to { - if u == v { - found = true - break - } - } - if !found { - remove = append(remove, u) - } - } - return add, remove -} - -func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - d.Partial(true) - - if d.HasChange("health_checks") { - - from_, to_ := d.GetChange("health_checks") - from := convertStringArr(from_.([]interface{})) - to := convertStringArr(to_.([]interface{})) - fromUrls, err := convertHealthChecks(config, project, from) - if err != nil { - return err - } - toUrls, err := convertHealthChecks(config, project, to) - if err != nil { - return err - } - add, remove := calcAddRemove(fromUrls, toUrls) - - removeReq := &compute.TargetPoolsRemoveHealthCheckRequest{ - HealthChecks: make([]*compute.HealthCheckReference, len(remove)), - } - for i, v := range remove { - removeReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} - } - op, err := config.clientCompute.TargetPools.RemoveHealthCheck( - project, region, d.Id(), removeReq).Do() - if err != nil { - return fmt.Errorf("Error updating health_check: %s", err) - } - - err = computeOperationWaitRegion(config, op, project, region, "Updating Target Pool") - if err != nil { - return err - } - addReq := &compute.TargetPoolsAddHealthCheckRequest{ - HealthChecks: make([]*compute.HealthCheckReference, len(add)), - } - for i, v := range add { - addReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} - } - op, err = config.clientCompute.TargetPools.AddHealthCheck( - project, region, d.Id(), addReq).Do() - if err != nil { - return fmt.Errorf("Error updating health_check: %s", err) - } - - err = computeOperationWaitRegion(config, op, project, region, "Updating Target Pool") - if err != nil { - return err - } - d.SetPartial("health_checks") - } - - if d.HasChange("instances") { - - from_, to_ := d.GetChange("instances") - from := convertStringArr(from_.([]interface{})) - to := convertStringArr(to_.([]interface{})) - fromUrls, err := convertInstancesToUrls(config, project, from) - if err != nil { - return err - } - toUrls, err := convertInstancesToUrls(config, project, to) - if err != nil { - return err - } - add, remove := calcAddRemove(fromUrls, toUrls) - - addReq := &compute.TargetPoolsAddInstanceRequest{ - Instances: make([]*compute.InstanceReference, len(add)), - } - for i, v := range add { - addReq.Instances[i] = &compute.InstanceReference{Instance: v} - } - op, err := config.clientCompute.TargetPools.AddInstance( - project, region, d.Id(), addReq).Do() - if err != nil { - return fmt.Errorf("Error updating instances: %s", err) - } - - err = computeOperationWaitRegion(config, op, project, region, "Updating Target Pool") - if err != nil { - return err - } - removeReq := &compute.TargetPoolsRemoveInstanceRequest{ - Instances: make([]*compute.InstanceReference, len(remove)), - } - for i, v := range remove { - removeReq.Instances[i] = &compute.InstanceReference{Instance: v} - } - op, err = config.clientCompute.TargetPools.RemoveInstance( - project, region, d.Id(), removeReq).Do() - if err != nil { - return fmt.Errorf("Error updating instances: %s", err) - } - err = computeOperationWaitRegion(config, op, project, region, "Updating Target Pool") - if err != nil { - return err - } - d.SetPartial("instances") - } - - if d.HasChange("backup_pool") { - bpool_name := d.Get("backup_pool").(string) - tref := &compute.TargetReference{ - Target: bpool_name, - } - op, err := config.clientCompute.TargetPools.SetBackup( - project, region, d.Id(), tref).Do() - if err != nil { - return fmt.Errorf("Error updating backup_pool: %s", err) - } - - err = computeOperationWaitRegion(config, op, project, region, "Updating Target Pool") - if err != nil { - return err - } - d.SetPartial("backup_pool") - } - - d.Partial(false) - - return resourceComputeTargetPoolRead(d, meta) -} - -func convertInstancesFromUrls(urls []string) []string { - result := make([]string, 0, len(urls)) - for _, url := range urls { - urlArray := strings.Split(url, "/") - instance := fmt.Sprintf("%s/%s", urlArray[len(urlArray)-3], urlArray[len(urlArray)-1]) - result = append(result, instance) - } - return result -} - -func convertHealthChecksFromUrls(urls []string) []string { - result := make([]string, 0, len(urls)) - for _, url := range urls { - urlArray := strings.Split(url, "/") - healthCheck := fmt.Sprintf("%s", urlArray[len(urlArray)-1]) - result = append(result, healthCheck) - } - return result -} - -func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - tpool, err := config.clientCompute.TargetPools.Get( - project, region, d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Target Pool %q", d.Get("name").(string))) - } - - regionUrl := strings.Split(tpool.Region, "/") - d.Set("self_link", tpool.SelfLink) - d.Set("backup_pool", tpool.BackupPool) - d.Set("description", tpool.Description) - d.Set("failover_ratio", tpool.FailoverRatio) - if tpool.HealthChecks != nil { - d.Set("health_checks", convertHealthChecksFromUrls(tpool.HealthChecks)) - } else { - d.Set("health_checks", nil) - } - if tpool.Instances != nil { - d.Set("instances", convertInstancesFromUrls(tpool.Instances)) - } else { - d.Set("instances", nil) - } - d.Set("name", tpool.Name) - d.Set("region", regionUrl[len(regionUrl)-1]) - d.Set("session_affinity", tpool.SessionAffinity) - d.Set("project", project) - return nil -} - -func resourceComputeTargetPoolDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Delete the TargetPool - op, err := config.clientCompute.TargetPools.Delete( - project, region, d.Id()).Do() - if err != nil { - return fmt.Errorf("Error deleting TargetPool: %s", err) - } - - err = computeOperationWaitRegion(config, op, project, region, "Deleting Target Pool") - if err != nil { - return err - } - d.SetId("") - return nil -} diff --git a/builtin/providers/google/resource_compute_target_pool_test.go b/builtin/providers/google/resource_compute_target_pool_test.go deleted file mode 100644 index 056a571b1..000000000 --- a/builtin/providers/google/resource_compute_target_pool_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccComputeTargetPool_basic(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeTargetPoolDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeTargetPool_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeTargetPoolExists( - "google_compute_target_pool.foobar"), - ), - }, - }, - }) -} - -func testAccCheckComputeTargetPoolDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_target_pool" { - continue - } - - _, err := config.clientCompute.TargetPools.Get( - config.Project, config.Region, rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("TargetPool still exists") - } - } - - return nil -} - -func testAccCheckComputeTargetPoolExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.TargetPools.Get( - config.Project, config.Region, rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("TargetPool not found") - } - - return nil - } -} - -var testAccComputeTargetPool_basic = fmt.Sprintf(` -resource "google_compute_http_health_check" "foobar" { - name = "healthcheck-test-%s" - host = "example.com" -} - -resource "google_compute_target_pool" "foobar" { - description = "Resource created for Terraform acceptance testing" - instances = ["us-central1-a/foo", "us-central1-b/bar"] - name = "tpool-test-%s" - session_affinity = "CLIENT_IP_PROTO" - health_checks = [ - "${google_compute_http_health_check.foobar.name}" - ] -}`, acctest.RandString(10), acctest.RandString(10)) diff --git a/builtin/providers/google/resource_compute_url_map.go b/builtin/providers/google/resource_compute_url_map.go deleted file mode 100644 index 3c5740e06..000000000 --- a/builtin/providers/google/resource_compute_url_map.go +++ /dev/null @@ -1,692 +0,0 @@ -package google - -import ( - "fmt" - "strconv" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" -) - -func resourceComputeUrlMap() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeUrlMapCreate, - Read: resourceComputeUrlMapRead, - Update: resourceComputeUrlMapUpdate, - Delete: resourceComputeUrlMapDelete, - - Schema: map[string]*schema.Schema{ - "default_service": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "fingerprint": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "host_rule": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - // TODO(evandbrown): Enable when lists support validation - //ValidateFunc: validateHostRules, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "hosts": &schema.Schema{ - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "path_matcher": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "path_matcher": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "default_service": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "path_rule": &schema.Schema{ - Type: schema.TypeList, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "paths": &schema.Schema{ - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "service": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - }, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "test": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "host": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "path": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "service": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - } -} - -func createHostRule(v interface{}) *compute.HostRule { - _hostRule := v.(map[string]interface{}) - - _hosts := _hostRule["hosts"].([]interface{}) - hosts := make([]string, len(_hosts)) - - for i, v := range _hosts { - hosts[i] = v.(string) - } - - pathMatcher := _hostRule["path_matcher"].(string) - - hostRule := &compute.HostRule{ - Hosts: hosts, - PathMatcher: pathMatcher, - } - - if v, ok := _hostRule["description"]; ok { - hostRule.Description = v.(string) - } - - return hostRule -} - -func createPathMatcher(v interface{}) *compute.PathMatcher { - _pathMatcher := v.(map[string]interface{}) - - _pathRules := _pathMatcher["path_rule"].([]interface{}) - pathRules := make([]*compute.PathRule, len(_pathRules)) - - for ip, vp := range _pathRules { - _pathRule := vp.(map[string]interface{}) - - _paths := _pathRule["paths"].([]interface{}) - paths := make([]string, len(_paths)) - - for ipp, vpp := range _paths { - paths[ipp] = vpp.(string) - } - - service := _pathRule["service"].(string) - - pathRule := &compute.PathRule{ - Paths: paths, - Service: service, - } - - pathRules[ip] = pathRule - } - - name := _pathMatcher["name"].(string) - defaultService := _pathMatcher["default_service"].(string) - - pathMatcher := &compute.PathMatcher{ - PathRules: pathRules, - Name: name, - DefaultService: defaultService, - } - - if vp, okp := _pathMatcher["description"]; okp { - pathMatcher.Description = vp.(string) - } - - return pathMatcher -} - -func createUrlMapTest(v interface{}) *compute.UrlMapTest { - _test := v.(map[string]interface{}) - - host := _test["host"].(string) - path := _test["path"].(string) - service := _test["service"].(string) - - test := &compute.UrlMapTest{ - Host: host, - Path: path, - Service: service, - } - - if vp, okp := _test["description"]; okp { - test.Description = vp.(string) - } - - return test -} - -func resourceComputeUrlMapCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - defaultService := d.Get("default_service").(string) - - urlMap := &compute.UrlMap{ - Name: name, - DefaultService: defaultService, - } - - if v, ok := d.GetOk("description"); ok { - urlMap.Description = v.(string) - } - - _hostRules := d.Get("host_rule").(*schema.Set) - urlMap.HostRules = make([]*compute.HostRule, _hostRules.Len()) - - for i, v := range _hostRules.List() { - urlMap.HostRules[i] = createHostRule(v) - } - - _pathMatchers := d.Get("path_matcher").([]interface{}) - urlMap.PathMatchers = make([]*compute.PathMatcher, len(_pathMatchers)) - - for i, v := range _pathMatchers { - urlMap.PathMatchers[i] = createPathMatcher(v) - } - - _tests := make([]interface{}, 0) - if v, ok := d.GetOk("test"); ok { - _tests = v.([]interface{}) - } - urlMap.Tests = make([]*compute.UrlMapTest, len(_tests)) - - for i, v := range _tests { - urlMap.Tests[i] = createUrlMapTest(v) - } - - op, err := config.clientCompute.UrlMaps.Insert(project, urlMap).Do() - - if err != nil { - return fmt.Errorf("Error, failed to insert Url Map %s: %s", name, err) - } - - err = computeOperationWaitGlobal(config, op, project, "Insert Url Map") - - if err != nil { - return fmt.Errorf("Error, failed waitng to insert Url Map %s: %s", name, err) - } - - return resourceComputeUrlMapRead(d, meta) -} - -func resourceComputeUrlMapRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - - urlMap, err := config.clientCompute.UrlMaps.Get(project, name).Do() - - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("URL Map %q", d.Get("name").(string))) - } - - d.SetId(name) - d.Set("self_link", urlMap.SelfLink) - d.Set("id", strconv.FormatUint(urlMap.Id, 10)) - d.Set("fingerprint", urlMap.Fingerprint) - - hostRuleMap := make(map[string]*compute.HostRule) - for _, v := range urlMap.HostRules { - hostRuleMap[v.PathMatcher] = v - } - - /* Only read host rules into our TF state that we have defined */ - _hostRules := d.Get("host_rule").(*schema.Set).List() - _newHostRules := make([]interface{}, 0) - for _, v := range _hostRules { - _hostRule := v.(map[string]interface{}) - _pathMatcher := _hostRule["path_matcher"].(string) - - /* Delete local entries that are no longer found on the GCE server */ - if hostRule, ok := hostRuleMap[_pathMatcher]; ok { - _newHostRule := make(map[string]interface{}) - _newHostRule["path_matcher"] = _pathMatcher - - hostsSet := make(map[string]bool) - for _, host := range hostRule.Hosts { - hostsSet[host] = true - } - - /* Only store hosts we are keeping track of */ - _newHosts := make([]interface{}, 0) - for _, vp := range _hostRule["hosts"].([]interface{}) { - if _, okp := hostsSet[vp.(string)]; okp { - _newHosts = append(_newHosts, vp) - } - } - - _newHostRule["hosts"] = _newHosts - _newHostRule["description"] = hostRule.Description - - _newHostRules = append(_newHostRules, _newHostRule) - } - } - - d.Set("host_rule", _newHostRules) - - pathMatcherMap := make(map[string]*compute.PathMatcher) - for _, v := range urlMap.PathMatchers { - pathMatcherMap[v.Name] = v - } - - /* Only read path matchers into our TF state that we have defined */ - _pathMatchers := d.Get("path_matcher").([]interface{}) - _newPathMatchers := make([]interface{}, 0) - for _, v := range _pathMatchers { - _pathMatcher := v.(map[string]interface{}) - _name := _pathMatcher["name"].(string) - - if pathMatcher, ok := pathMatcherMap[_name]; ok { - _newPathMatcher := make(map[string]interface{}) - _newPathMatcher["name"] = _name - _newPathMatcher["default_service"] = pathMatcher.DefaultService - _newPathMatcher["description"] = pathMatcher.Description - - _newPathRules := make([]interface{}, len(pathMatcher.PathRules)) - for ip, pathRule := range pathMatcher.PathRules { - _newPathRule := make(map[string]interface{}) - _newPathRule["service"] = pathRule.Service - _paths := make([]interface{}, len(pathRule.Paths)) - - for ipp, vpp := range pathRule.Paths { - _paths[ipp] = vpp - } - - _newPathRule["paths"] = _paths - - _newPathRules[ip] = _newPathRule - } - - _newPathMatcher["path_rule"] = _newPathRules - _newPathMatchers = append(_newPathMatchers, _newPathMatcher) - } - } - - d.Set("path_matcher", _newPathMatchers) - - testMap := make(map[string]*compute.UrlMapTest) - for _, v := range urlMap.Tests { - testMap[fmt.Sprintf("%s/%s", v.Host, v.Path)] = v - } - - _tests := make([]interface{}, 0) - /* Only read tests into our TF state that we have defined */ - if v, ok := d.GetOk("test"); ok { - _tests = v.([]interface{}) - } - _newTests := make([]interface{}, 0) - for _, v := range _tests { - _test := v.(map[string]interface{}) - _host := _test["host"].(string) - _path := _test["path"].(string) - - /* Delete local entries that are no longer found on the GCE server */ - if test, ok := testMap[fmt.Sprintf("%s/%s", _host, _path)]; ok { - _newTest := make(map[string]interface{}) - _newTest["host"] = _host - _newTest["path"] = _path - _newTest["description"] = test.Description - _newTest["service"] = test.Service - - _newTests = append(_newTests, _newTest) - } - } - - d.Set("test", _newTests) - - return nil -} - -func resourceComputeUrlMapUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - urlMap, err := config.clientCompute.UrlMaps.Get(project, name).Do() - if err != nil { - return fmt.Errorf("Error, failed to get Url Map %s: %s", name, err) - } - - urlMap.DefaultService = d.Get("default_service").(string) - - if v, ok := d.GetOk("description"); ok { - urlMap.Description = v.(string) - } - - if d.HasChange("host_rule") { - _oldHostRules, _newHostRules := d.GetChange("host_rule") - _oldHostRulesMap := make(map[string]interface{}) - _newHostRulesMap := make(map[string]interface{}) - - for _, v := range _oldHostRules.(*schema.Set).List() { - _hostRule := v.(map[string]interface{}) - _oldHostRulesMap[_hostRule["path_matcher"].(string)] = v - } - - for _, v := range _newHostRules.(*schema.Set).List() { - _hostRule := v.(map[string]interface{}) - _newHostRulesMap[_hostRule["path_matcher"].(string)] = v - } - - newHostRules := make([]*compute.HostRule, 0) - /* Decide which host rules to keep */ - for _, v := range urlMap.HostRules { - /* If it's in the old state, we have ownership over the host rule */ - if vOld, ok := _oldHostRulesMap[v.PathMatcher]; ok { - if vNew, ok := _newHostRulesMap[v.PathMatcher]; ok { - /* Adjust for any changes made to this rule */ - _newHostRule := vNew.(map[string]interface{}) - _oldHostRule := vOld.(map[string]interface{}) - _newHostsSet := make(map[string]bool) - _oldHostsSet := make(map[string]bool) - - hostRule := &compute.HostRule{ - PathMatcher: v.PathMatcher, - } - - for _, v := range _newHostRule["hosts"].([]interface{}) { - _newHostsSet[v.(string)] = true - } - - for _, v := range _oldHostRule["hosts"].([]interface{}) { - _oldHostsSet[v.(string)] = true - } - - /* Only add hosts that have been added locally or are new, - * not touching those from the GCE server state */ - for _, host := range v.Hosts { - _, okNew := _newHostsSet[host] - _, okOld := _oldHostsSet[host] - - /* Drop deleted hosts */ - if okOld && !okNew { - continue - } - - hostRule.Hosts = append(hostRule.Hosts, host) - - /* Kep track of the fact that this host was added */ - delete(_newHostsSet, host) - } - - /* Now add in the brand new entries */ - for host, _ := range _newHostsSet { - hostRule.Hosts = append(hostRule.Hosts, host) - } - - if v, ok := _newHostRule["description"]; ok { - hostRule.Description = v.(string) - } - - newHostRules = append(newHostRules, hostRule) - - /* Record that we've include this host rule */ - delete(_newHostRulesMap, v.PathMatcher) - } else { - /* It's been deleted */ - continue - } - } else { - if vNew, ok := _newHostRulesMap[v.PathMatcher]; ok { - newHostRules = append(newHostRules, createHostRule(vNew)) - - /* Record that we've include this host rule */ - delete(_newHostRulesMap, v.PathMatcher) - } else { - /* It wasn't created or modified locally */ - newHostRules = append(newHostRules, v) - } - } - } - - /* Record brand new host rules (ones not deleted above) */ - for _, v := range _newHostRulesMap { - newHostRules = append(newHostRules, createHostRule(v)) - } - - urlMap.HostRules = newHostRules - } - - if d.HasChange("path_matcher") { - _oldPathMatchers, _newPathMatchers := d.GetChange("path_matcher") - _oldPathMatchersMap := make(map[string]interface{}) - _newPathMatchersMap := make(map[string]interface{}) - - for _, v := range _oldPathMatchers.([]interface{}) { - _pathMatcher := v.(map[string]interface{}) - _oldPathMatchersMap[_pathMatcher["name"].(string)] = v - } - - for _, v := range _newPathMatchers.([]interface{}) { - _pathMatcher := v.(map[string]interface{}) - _newPathMatchersMap[_pathMatcher["name"].(string)] = v - } - - newPathMatchers := make([]*compute.PathMatcher, 0) - /* Decide which path matchers to keep */ - for _, v := range urlMap.PathMatchers { - /* If it's in the old state, we have ownership over the host rule */ - _, okOld := _oldPathMatchersMap[v.Name] - vNew, okNew := _newPathMatchersMap[v.Name] - - /* Drop deleted entries */ - if okOld && !okNew { - continue - } - - /* Don't change entries that don't belong to us */ - if !okNew { - newPathMatchers = append(newPathMatchers, v) - } else { - newPathMatchers = append(newPathMatchers, createPathMatcher(vNew)) - - delete(_newPathMatchersMap, v.Name) - } - } - - /* Record brand new host rules */ - for _, v := range _newPathMatchersMap { - newPathMatchers = append(newPathMatchers, createPathMatcher(v)) - } - - urlMap.PathMatchers = newPathMatchers - } - - if d.HasChange("test") { - _oldTests, _newTests := d.GetChange("test") - _oldTestsMap := make(map[string]interface{}) - _newTestsMap := make(map[string]interface{}) - - for _, v := range _oldTests.([]interface{}) { - _test := v.(map[string]interface{}) - ident := fmt.Sprintf("%s/%s", _test["host"].(string), _test["path"].(string)) - _oldTestsMap[ident] = v - } - - for _, v := range _newTests.([]interface{}) { - _test := v.(map[string]interface{}) - ident := fmt.Sprintf("%s/%s", _test["host"].(string), _test["path"].(string)) - _newTestsMap[ident] = v - } - - newTests := make([]*compute.UrlMapTest, 0) - /* Decide which path matchers to keep */ - for _, v := range urlMap.Tests { - ident := fmt.Sprintf("%s/%s", v.Host, v.Path) - /* If it's in the old state, we have ownership over the host rule */ - _, okOld := _oldTestsMap[ident] - vNew, okNew := _newTestsMap[ident] - - /* Drop deleted entries */ - if okOld && !okNew { - continue - } - - /* Don't change entries that don't belong to us */ - if !okNew { - newTests = append(newTests, v) - } else { - newTests = append(newTests, createUrlMapTest(vNew)) - - delete(_newTestsMap, ident) - } - } - - /* Record brand new host rules */ - for _, v := range _newTestsMap { - newTests = append(newTests, createUrlMapTest(v)) - } - - urlMap.Tests = newTests - } - op, err := config.clientCompute.UrlMaps.Update(project, urlMap.Name, urlMap).Do() - - if err != nil { - return fmt.Errorf("Error, failed to update Url Map %s: %s", name, err) - } - - err = computeOperationWaitGlobal(config, op, project, "Update Url Map") - - if err != nil { - return fmt.Errorf("Error, failed waitng to update Url Map %s: %s", name, err) - } - - return resourceComputeUrlMapRead(d, meta) -} - -func resourceComputeUrlMapDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - - op, err := config.clientCompute.UrlMaps.Delete(project, name).Do() - - if err != nil { - return fmt.Errorf("Error, failed to delete Url Map %s: %s", name, err) - } - - err = computeOperationWaitGlobal(config, op, project, "Delete Url Map") - - if err != nil { - return fmt.Errorf("Error, failed waitng to delete Url Map %s: %s", name, err) - } - - return nil -} - -func validateHostRules(v interface{}, k string) (ws []string, es []error) { - pathMatchers := make(map[string]bool) - hostRules := v.([]interface{}) - for _, hri := range hostRules { - hr := hri.(map[string]interface{}) - pm := hr["path_matcher"].(string) - if pathMatchers[pm] { - es = append(es, fmt.Errorf("Multiple host_rule entries with the same path_matcher are not allowed. Please collapse all hosts with the same path_matcher into one host_rule")) - return - } - pathMatchers[pm] = true - } - return -} diff --git a/builtin/providers/google/resource_compute_url_map_test.go b/builtin/providers/google/resource_compute_url_map_test.go deleted file mode 100644 index ea763cd2d..000000000 --- a/builtin/providers/google/resource_compute_url_map_test.go +++ /dev/null @@ -1,322 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccComputeUrlMap_basic(t *testing.T) { - bsName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(10)) - hcName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(10)) - umName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(10)) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeUrlMapDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeUrlMap_basic1(bsName, hcName, umName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeUrlMapExists( - "google_compute_url_map.foobar"), - ), - }, - }, - }) -} - -func TestAccComputeUrlMap_update_path_matcher(t *testing.T) { - bsName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(10)) - hcName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(10)) - umName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(10)) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeUrlMapDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeUrlMap_basic1(bsName, hcName, umName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeUrlMapExists( - "google_compute_url_map.foobar"), - ), - }, - - resource.TestStep{ - Config: testAccComputeUrlMap_basic2(bsName, hcName, umName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeUrlMapExists( - "google_compute_url_map.foobar"), - ), - }, - }, - }) -} - -func TestAccComputeUrlMap_advanced(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeUrlMapDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeUrlMap_advanced1, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeUrlMapExists( - "google_compute_url_map.foobar"), - ), - }, - - resource.TestStep{ - Config: testAccComputeUrlMap_advanced2, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeUrlMapExists( - "google_compute_url_map.foobar"), - ), - }, - }, - }) -} - -func testAccCheckComputeUrlMapDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_url_map" { - continue - } - - _, err := config.clientCompute.UrlMaps.Get( - config.Project, rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("Url map still exists") - } - } - - return nil -} - -func testAccCheckComputeUrlMapExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.UrlMaps.Get( - config.Project, rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("Url map not found") - } - return nil - } -} - -func testAccComputeUrlMap_basic1(bsName, hcName, umName string) string { - return fmt.Sprintf(` -resource "google_compute_backend_service" "foobar" { - name = "urlmap-test-%s" - health_checks = ["${google_compute_http_health_check.zero.self_link}"] -} - -resource "google_compute_http_health_check" "zero" { - name = "urlmap-test-%s" - request_path = "/" - check_interval_sec = 1 - timeout_sec = 1 -} - -resource "google_compute_url_map" "foobar" { - name = "urlmap-test-%s" - default_service = "${google_compute_backend_service.foobar.self_link}" - - host_rule { - hosts = ["mysite.com", "myothersite.com"] - path_matcher = "boop" - } - - path_matcher { - default_service = "${google_compute_backend_service.foobar.self_link}" - name = "boop" - path_rule { - paths = ["/*"] - service = "${google_compute_backend_service.foobar.self_link}" - } - } - - test { - host = "mysite.com" - path = "/*" - service = "${google_compute_backend_service.foobar.self_link}" - } -} -`, bsName, hcName, umName) -} - -func testAccComputeUrlMap_basic2(bsName, hcName, umName string) string { - return fmt.Sprintf(` -resource "google_compute_backend_service" "foobar" { - name = "urlmap-test-%s" - health_checks = ["${google_compute_http_health_check.zero.self_link}"] -} - -resource "google_compute_http_health_check" "zero" { - name = "urlmap-test-%s" - request_path = "/" - check_interval_sec = 1 - timeout_sec = 1 -} - -resource "google_compute_url_map" "foobar" { - name = "urlmap-test-%s" - default_service = "${google_compute_backend_service.foobar.self_link}" - - host_rule { - hosts = ["mysite.com", "myothersite.com"] - path_matcher = "blip" - } - - path_matcher { - default_service = "${google_compute_backend_service.foobar.self_link}" - name = "blip" - path_rule { - paths = ["/*", "/home"] - service = "${google_compute_backend_service.foobar.self_link}" - } - } - - test { - host = "mysite.com" - path = "/test" - service = "${google_compute_backend_service.foobar.self_link}" - } -} -`, bsName, hcName, umName) -} - -var testAccComputeUrlMap_advanced1 = fmt.Sprintf(` -resource "google_compute_backend_service" "foobar" { - name = "urlmap-test-%s" - health_checks = ["${google_compute_http_health_check.zero.self_link}"] -} - -resource "google_compute_http_health_check" "zero" { - name = "urlmap-test-%s" - request_path = "/" - check_interval_sec = 1 - timeout_sec = 1 -} - -resource "google_compute_url_map" "foobar" { - name = "urlmap-test-%s" - default_service = "${google_compute_backend_service.foobar.self_link}" - - host_rule { - hosts = ["mysite.com", "myothersite.com"] - path_matcher = "blop" - } - - host_rule { - hosts = ["myfavoritesite.com"] - path_matcher = "blip" - } - - path_matcher { - default_service = "${google_compute_backend_service.foobar.self_link}" - name = "blop" - path_rule { - paths = ["/*", "/home"] - service = "${google_compute_backend_service.foobar.self_link}" - } - } - - path_matcher { - default_service = "${google_compute_backend_service.foobar.self_link}" - name = "blip" - path_rule { - paths = ["/*", "/home"] - service = "${google_compute_backend_service.foobar.self_link}" - } - } -} -`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) - -var testAccComputeUrlMap_advanced2 = fmt.Sprintf(` -resource "google_compute_backend_service" "foobar" { - name = "urlmap-test-%s" - health_checks = ["${google_compute_http_health_check.zero.self_link}"] -} - -resource "google_compute_http_health_check" "zero" { - name = "urlmap-test-%s" - request_path = "/" - check_interval_sec = 1 - timeout_sec = 1 -} - -resource "google_compute_url_map" "foobar" { - name = "urlmap-test-%s" - default_service = "${google_compute_backend_service.foobar.self_link}" - - host_rule { - hosts = ["mysite.com", "myothersite.com"] - path_matcher = "blep" - } - - host_rule { - hosts = ["myfavoritesite.com"] - path_matcher = "blip" - } - - host_rule { - hosts = ["myleastfavoritesite.com"] - path_matcher = "blub" - } - - path_matcher { - default_service = "${google_compute_backend_service.foobar.self_link}" - name = "blep" - path_rule { - paths = ["/home"] - service = "${google_compute_backend_service.foobar.self_link}" - } - - path_rule { - paths = ["/login"] - service = "${google_compute_backend_service.foobar.self_link}" - } - } - - path_matcher { - default_service = "${google_compute_backend_service.foobar.self_link}" - name = "blub" - path_rule { - paths = ["/*", "/blub"] - service = "${google_compute_backend_service.foobar.self_link}" - } - } - - path_matcher { - default_service = "${google_compute_backend_service.foobar.self_link}" - name = "blip" - path_rule { - paths = ["/*", "/home"] - service = "${google_compute_backend_service.foobar.self_link}" - } - } -} -`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/builtin/providers/google/resource_compute_vpn_gateway.go b/builtin/providers/google/resource_compute_vpn_gateway.go deleted file mode 100644 index 5b23eaa49..000000000 --- a/builtin/providers/google/resource_compute_vpn_gateway.go +++ /dev/null @@ -1,157 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" - - "google.golang.org/api/compute/v1" -) - -func resourceComputeVpnGateway() *schema.Resource { - return &schema.Resource{ - // Unfortunately, the VPNGatewayService does not support update - // operations. This is why everything is marked forcenew - Create: resourceComputeVpnGatewayCreate, - Read: resourceComputeVpnGatewayRead, - Delete: resourceComputeVpnGatewayDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "network": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceComputeVpnGatewayCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - network, err := getNetworkLink(d, config, "network") - if err != nil { - return err - } - - vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute) - - vpnGateway := &compute.TargetVpnGateway{ - Name: name, - Network: network, - } - - if v, ok := d.GetOk("description"); ok { - vpnGateway.Description = v.(string) - } - - op, err := vpnGatewaysService.Insert(project, region, vpnGateway).Do() - if err != nil { - return fmt.Errorf("Error Inserting VPN Gateway %s into network %s: %s", name, network, err) - } - - err = computeOperationWaitRegion(config, op, project, region, "Inserting VPN Gateway") - if err != nil { - return fmt.Errorf("Error Waiting to Insert VPN Gateway %s into network %s: %s", name, network, err) - } - - return resourceComputeVpnGatewayRead(d, meta) -} - -func resourceComputeVpnGatewayRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - - vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute) - vpnGateway, err := vpnGatewaysService.Get(project, region, name).Do() - - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("VPN Gateway %q", d.Get("name").(string))) - } - - d.Set("self_link", vpnGateway.SelfLink) - d.SetId(name) - - return nil -} - -func resourceComputeVpnGatewayDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - - vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute) - - op, err := vpnGatewaysService.Delete(project, region, name).Do() - if err != nil { - return fmt.Errorf("Error Reading VPN Gateway %s: %s", name, err) - } - - err = computeOperationWaitRegion(config, op, project, region, "Deleting VPN Gateway") - if err != nil { - return fmt.Errorf("Error Waiting to Delete VPN Gateway %s: %s", name, err) - } - - return nil -} diff --git a/builtin/providers/google/resource_compute_vpn_gateway_test.go b/builtin/providers/google/resource_compute_vpn_gateway_test.go deleted file mode 100644 index 7a38f6ad8..000000000 --- a/builtin/providers/google/resource_compute_vpn_gateway_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "google.golang.org/api/compute/v1" -) - -func TestAccComputeVpnGateway_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeVpnGatewayDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeVpnGateway_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeVpnGatewayExists( - "google_compute_vpn_gateway.foobar"), - testAccCheckComputeVpnGatewayExists( - "google_compute_vpn_gateway.baz"), - ), - }, - }, - }) -} - -func testAccCheckComputeVpnGatewayDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - project := config.Project - - vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_network" { - continue - } - - region := rs.Primary.Attributes["region"] - name := rs.Primary.Attributes["name"] - - _, err := vpnGatewaysService.Get(project, region, name).Do() - - if err == nil { - return fmt.Errorf("Error, VPN Gateway %s in region %s still exists", - name, region) - } - } - - return nil -} - -func testAccCheckComputeVpnGatewayExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - name := rs.Primary.Attributes["name"] - region := rs.Primary.Attributes["region"] - project := config.Project - - vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute) - _, err := vpnGatewaysService.Get(project, region, name).Do() - - if err != nil { - return fmt.Errorf("Error Reading VPN Gateway %s: %s", name, err) - } - - return nil - } -} - -var testAccComputeVpnGateway_basic = fmt.Sprintf(` -resource "google_compute_network" "foobar" { - name = "gateway-test-%s" - ipv4_range = "10.0.0.0/16" -} -resource "google_compute_vpn_gateway" "foobar" { - name = "gateway-test-%s" - network = "${google_compute_network.foobar.self_link}" - region = "us-central1" -} -resource "google_compute_vpn_gateway" "baz" { - name = "gateway-test-%s" - network = "${google_compute_network.foobar.name}" - region = "us-central1" -}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/builtin/providers/google/resource_compute_vpn_tunnel.go b/builtin/providers/google/resource_compute_vpn_tunnel.go deleted file mode 100644 index b62aadd1d..000000000 --- a/builtin/providers/google/resource_compute_vpn_tunnel.go +++ /dev/null @@ -1,373 +0,0 @@ -package google - -import ( - "bytes" - "fmt" - "net" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - - "google.golang.org/api/compute/v1" -) - -func resourceComputeVpnTunnel() *schema.Resource { - return &schema.Resource{ - // Unfortunately, the VPNTunnelService does not support update - // operations. This is why everything is marked forcenew - Create: resourceComputeVpnTunnelCreate, - Read: resourceComputeVpnTunnelRead, - Delete: resourceComputeVpnTunnelDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "peer_ip": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validatePeerAddr, - }, - - "shared_secret": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "target_vpn_gateway": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "detailed_status": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "ike_version": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 2, - ForceNew: true, - }, - - "local_traffic_selector": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "remote_traffic_selector": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "router": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceComputeVpnTunnelCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - peerIp := d.Get("peer_ip").(string) - sharedSecret := d.Get("shared_secret").(string) - targetVpnGateway := d.Get("target_vpn_gateway").(string) - ikeVersion := d.Get("ike_version").(int) - - if ikeVersion < 1 || ikeVersion > 2 { - return fmt.Errorf("Only IKE version 1 or 2 supported, not %d", ikeVersion) - } - - // Build up the list of sources - var localTrafficSelectors []string - if v := d.Get("local_traffic_selector").(*schema.Set); v.Len() > 0 { - localTrafficSelectors = make([]string, v.Len()) - for i, v := range v.List() { - localTrafficSelectors[i] = v.(string) - } - } - - var remoteTrafficSelectors []string - if v := d.Get("remote_traffic_selector").(*schema.Set); v.Len() > 0 { - remoteTrafficSelectors = make([]string, v.Len()) - for i, v := range v.List() { - remoteTrafficSelectors[i] = v.(string) - } - } - - vpnTunnelsService := compute.NewVpnTunnelsService(config.clientCompute) - - vpnTunnel := &compute.VpnTunnel{ - Name: name, - PeerIp: peerIp, - SharedSecret: sharedSecret, - TargetVpnGateway: targetVpnGateway, - IkeVersion: int64(ikeVersion), - LocalTrafficSelector: localTrafficSelectors, - RemoteTrafficSelector: remoteTrafficSelectors, - } - - if v, ok := d.GetOk("description"); ok { - vpnTunnel.Description = v.(string) - } - - if v, ok := d.GetOk("router"); ok { - routerLink, err := getRouterLink(config, project, region, v.(string)) - if err != nil { - return err - } - vpnTunnel.Router = routerLink - } - - op, err := vpnTunnelsService.Insert(project, region, vpnTunnel).Do() - if err != nil { - return fmt.Errorf("Error Inserting VPN Tunnel %s : %s", name, err) - } - - err = computeOperationWaitRegion(config, op, project, region, "Inserting VPN Tunnel") - if err != nil { - return fmt.Errorf("Error Waiting to Insert VPN Tunnel %s: %s", name, err) - } - - return resourceComputeVpnTunnelRead(d, meta) -} - -func resourceComputeVpnTunnelRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - - vpnTunnelsService := compute.NewVpnTunnelsService(config.clientCompute) - - vpnTunnel, err := vpnTunnelsService.Get(project, region, name).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("VPN Tunnel %q", d.Get("name").(string))) - } - - localTrafficSelectors := []string{} - for _, lts := range vpnTunnel.LocalTrafficSelector { - localTrafficSelectors = append(localTrafficSelectors, lts) - } - d.Set("local_traffic_selector", localTrafficSelectors) - - remoteTrafficSelectors := []string{} - for _, rts := range vpnTunnel.RemoteTrafficSelector { - remoteTrafficSelectors = append(remoteTrafficSelectors, rts) - } - d.Set("remote_traffic_selector", remoteTrafficSelectors) - - d.Set("detailed_status", vpnTunnel.DetailedStatus) - d.Set("self_link", vpnTunnel.SelfLink) - - d.SetId(name) - - return nil -} - -func resourceComputeVpnTunnelDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - - vpnTunnelsService := compute.NewVpnTunnelsService(config.clientCompute) - - op, err := vpnTunnelsService.Delete(project, region, name).Do() - if err != nil { - return fmt.Errorf("Error Reading VPN Tunnel %s: %s", name, err) - } - - err = computeOperationWaitRegion(config, op, project, region, "Deleting VPN Tunnel") - if err != nil { - return fmt.Errorf("Error Waiting to Delete VPN Tunnel %s: %s", name, err) - } - - return nil -} - -// validatePeerAddr returns false if a tunnel's peer_ip property -// is invalid. Currently, only addresses that collide with RFC -// 5735 (https://tools.ietf.org/html/rfc5735) fail validation. -func validatePeerAddr(i interface{}, val string) ([]string, []error) { - ip := net.ParseIP(i.(string)) - if ip == nil { - return nil, []error{fmt.Errorf("could not parse %q to IP address", val)} - } - for _, test := range invalidPeerAddrs { - if bytes.Compare(ip, test.from) >= 0 && bytes.Compare(ip, test.to) <= 0 { - return nil, []error{fmt.Errorf("address is invalid (is between %q and %q, conflicting with RFC5735)", test.from, test.to)} - } - } - return nil, nil -} - -// invalidPeerAddrs is a collection of IP addres ranges that represent -// a conflict with RFC 5735 (https://tools.ietf.org/html/rfc5735#page-3). -// CIDR range notations in the RFC were converted to a (from, to) pair -// for easy checking with bytes.Compare. -var invalidPeerAddrs = []struct { - from net.IP - to net.IP -}{ - { - from: net.ParseIP("0.0.0.0"), - to: net.ParseIP("0.255.255.255"), - }, - { - from: net.ParseIP("10.0.0.0"), - to: net.ParseIP("10.255.255.255"), - }, - { - from: net.ParseIP("127.0.0.0"), - to: net.ParseIP("127.255.255.255"), - }, - { - from: net.ParseIP("169.254.0.0"), - to: net.ParseIP("169.254.255.255"), - }, - { - from: net.ParseIP("172.16.0.0"), - to: net.ParseIP("172.31.255.255"), - }, - { - from: net.ParseIP("192.0.0.0"), - to: net.ParseIP("192.0.0.255"), - }, - { - from: net.ParseIP("192.0.2.0"), - to: net.ParseIP("192.0.2.255"), - }, - { - from: net.ParseIP("192.88.99.0"), - to: net.ParseIP("192.88.99.255"), - }, - { - from: net.ParseIP("192.168.0.0"), - to: net.ParseIP("192.168.255.255"), - }, - { - from: net.ParseIP("198.18.0.0"), - to: net.ParseIP("198.19.255.255"), - }, - { - from: net.ParseIP("198.51.100.0"), - to: net.ParseIP("198.51.100.255"), - }, - { - from: net.ParseIP("203.0.113.0"), - to: net.ParseIP("203.0.113.255"), - }, - { - from: net.ParseIP("224.0.0.0"), - to: net.ParseIP("239.255.255.255"), - }, - { - from: net.ParseIP("240.0.0.0"), - to: net.ParseIP("255.255.255.255"), - }, - { - from: net.ParseIP("255.255.255.255"), - to: net.ParseIP("255.255.255.255"), - }, -} - -func getVpnTunnelLink(config *Config, project string, region string, tunnel string) (string, error) { - - if !strings.HasPrefix(tunnel, "https://www.googleapis.com/compute/") { - // Tunnel value provided is just the name, lookup the tunnel SelfLink - tunnelData, err := config.clientCompute.VpnTunnels.Get( - project, region, tunnel).Do() - if err != nil { - return "", fmt.Errorf("Error reading tunnel: %s", err) - } - tunnel = tunnelData.SelfLink - } - - return tunnel, nil - -} - -func getVpnTunnelName(vpntunnel string) (string, error) { - - if strings.HasPrefix(vpntunnel, "https://www.googleapis.com/compute/") { - // extract the VPN tunnel name from SelfLink URL - vpntunnelName := vpntunnel[strings.LastIndex(vpntunnel, "/")+1:] - if vpntunnelName == "" { - return "", fmt.Errorf("VPN tunnel url not valid") - } - return vpntunnelName, nil - } - - return vpntunnel, nil -} diff --git a/builtin/providers/google/resource_compute_vpn_tunnel_test.go b/builtin/providers/google/resource_compute_vpn_tunnel_test.go deleted file mode 100644 index d2399fa3e..000000000 --- a/builtin/providers/google/resource_compute_vpn_tunnel_test.go +++ /dev/null @@ -1,285 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "google.golang.org/api/compute/v1" -) - -func TestAccComputeVpnTunnel_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeVpnTunnelDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeVpnTunnel_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeVpnTunnelExists( - "google_compute_vpn_tunnel.foobar"), - resource.TestCheckResourceAttr( - "google_compute_vpn_tunnel.foobar", "local_traffic_selector.#", "1"), - resource.TestCheckResourceAttr( - "google_compute_vpn_tunnel.foobar", "remote_traffic_selector.#", "2"), - ), - }, - }, - }) -} - -func TestAccComputeVpnTunnel_router(t *testing.T) { - router := fmt.Sprintf("tunnel-test-router-%s", acctest.RandString(10)) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeVpnTunnelDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeVpnTunnelRouter(router), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeVpnTunnelExists( - "google_compute_vpn_tunnel.foobar"), - resource.TestCheckResourceAttr( - "google_compute_vpn_tunnel.foobar", "router", router), - ), - }, - }, - }) -} - -func TestAccComputeVpnTunnel_defaultTrafficSelectors(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeVpnTunnelDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeVpnTunnelDefaultTrafficSelectors, - Check: testAccCheckComputeVpnTunnelExists( - "google_compute_vpn_tunnel.foobar"), - }, - }, - }) -} - -func testAccCheckComputeVpnTunnelDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - project := config.Project - - vpnTunnelsService := compute.NewVpnTunnelsService(config.clientCompute) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_network" { - continue - } - - region := rs.Primary.Attributes["region"] - name := rs.Primary.Attributes["name"] - - _, err := vpnTunnelsService.Get(project, region, name).Do() - - if err == nil { - return fmt.Errorf("Error, VPN Tunnel %s in region %s still exists", - name, region) - } - } - - return nil -} - -func testAccCheckComputeVpnTunnelExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - name := rs.Primary.Attributes["name"] - region := rs.Primary.Attributes["region"] - project := config.Project - - vpnTunnelsService := compute.NewVpnTunnelsService(config.clientCompute) - _, err := vpnTunnelsService.Get(project, region, name).Do() - - if err != nil { - return fmt.Errorf("Error Reading VPN Tunnel %s: %s", name, err) - } - - return nil - } -} - -var testAccComputeVpnTunnel_basic = fmt.Sprintf(` -resource "google_compute_network" "foobar" { - name = "tunnel-test-%s" -} -resource "google_compute_subnetwork" "foobar" { - name = "tunnel-test-%s" - network = "${google_compute_network.foobar.self_link}" - ip_cidr_range = "10.0.0.0/16" - region = "us-central1" -} -resource "google_compute_address" "foobar" { - name = "tunnel-test-%s" - region = "${google_compute_subnetwork.foobar.region}" -} -resource "google_compute_vpn_gateway" "foobar" { - name = "tunnel-test-%s" - network = "${google_compute_network.foobar.self_link}" - region = "${google_compute_subnetwork.foobar.region}" -} -resource "google_compute_forwarding_rule" "foobar_esp" { - name = "tunnel-test-%s" - region = "${google_compute_vpn_gateway.foobar.region}" - ip_protocol = "ESP" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" -} -resource "google_compute_forwarding_rule" "foobar_udp500" { - name = "tunnel-test-%s" - region = "${google_compute_forwarding_rule.foobar_esp.region}" - ip_protocol = "UDP" - port_range = "500-500" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" -} -resource "google_compute_forwarding_rule" "foobar_udp4500" { - name = "tunnel-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp500.region}" - ip_protocol = "UDP" - port_range = "4500-4500" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" -} -resource "google_compute_vpn_tunnel" "foobar" { - name = "tunnel-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp4500.region}" - target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" - shared_secret = "unguessable" - peer_ip = "8.8.8.8" - local_traffic_selector = ["${google_compute_subnetwork.foobar.ip_cidr_range}"] - remote_traffic_selector = ["192.168.0.0/24", "192.168.1.0/24"] -}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), - acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), - acctest.RandString(10), acctest.RandString(10)) - -func testAccComputeVpnTunnelRouter(router string) string { - testId := acctest.RandString(10) - return fmt.Sprintf(` - resource "google_compute_network" "foobar" { - name = "tunnel-test-%s" - } - resource "google_compute_subnetwork" "foobar" { - name = "tunnel-test-%s" - network = "${google_compute_network.foobar.self_link}" - ip_cidr_range = "10.0.0.0/16" - region = "us-central1" - } - resource "google_compute_address" "foobar" { - name = "tunnel-test-%s" - region = "${google_compute_subnetwork.foobar.region}" - } - resource "google_compute_vpn_gateway" "foobar" { - name = "tunnel-test-%s" - network = "${google_compute_network.foobar.self_link}" - region = "${google_compute_subnetwork.foobar.region}" - } - resource "google_compute_forwarding_rule" "foobar_esp" { - name = "tunnel-test-%s-1" - region = "${google_compute_vpn_gateway.foobar.region}" - ip_protocol = "ESP" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" - } - resource "google_compute_forwarding_rule" "foobar_udp500" { - name = "tunnel-test-%s-2" - region = "${google_compute_forwarding_rule.foobar_esp.region}" - ip_protocol = "UDP" - port_range = "500-500" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" - } - resource "google_compute_forwarding_rule" "foobar_udp4500" { - name = "tunnel-test-%s-3" - region = "${google_compute_forwarding_rule.foobar_udp500.region}" - ip_protocol = "UDP" - port_range = "4500-4500" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" - } - resource "google_compute_router" "foobar"{ - name = "%s" - region = "${google_compute_forwarding_rule.foobar_udp500.region}" - network = "${google_compute_network.foobar.self_link}" - bgp { - asn = 64514 - } - } - resource "google_compute_vpn_tunnel" "foobar" { - name = "tunnel-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp4500.region}" - target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" - shared_secret = "unguessable" - peer_ip = "8.8.8.8" - router = "${google_compute_router.foobar.name}" - } - `, testId, testId, testId, testId, testId, testId, testId, router, testId) -} - -var testAccComputeVpnTunnelDefaultTrafficSelectors = fmt.Sprintf(` -resource "google_compute_network" "foobar" { - name = "tunnel-test-%s" - auto_create_subnetworks = "true" -} -resource "google_compute_address" "foobar" { - name = "tunnel-test-%s" - region = "us-central1" -} -resource "google_compute_vpn_gateway" "foobar" { - name = "tunnel-test-%s" - network = "${google_compute_network.foobar.self_link}" - region = "${google_compute_address.foobar.region}" -} -resource "google_compute_forwarding_rule" "foobar_esp" { - name = "tunnel-test-%s" - region = "${google_compute_vpn_gateway.foobar.region}" - ip_protocol = "ESP" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" -} -resource "google_compute_forwarding_rule" "foobar_udp500" { - name = "tunnel-test-%s" - region = "${google_compute_forwarding_rule.foobar_esp.region}" - ip_protocol = "UDP" - port_range = "500-500" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" -} -resource "google_compute_forwarding_rule" "foobar_udp4500" { - name = "tunnel-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp500.region}" - ip_protocol = "UDP" - port_range = "4500-4500" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" -} -resource "google_compute_vpn_tunnel" "foobar" { - name = "tunnel-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp4500.region}" - target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" - shared_secret = "unguessable" - peer_ip = "8.8.8.8" -}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), - acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), - acctest.RandString(10)) diff --git a/builtin/providers/google/resource_container_cluster.go b/builtin/providers/google/resource_container_cluster.go deleted file mode 100644 index cdb2de037..000000000 --- a/builtin/providers/google/resource_container_cluster.go +++ /dev/null @@ -1,711 +0,0 @@ -package google - -import ( - "fmt" - "log" - "net" - "regexp" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/container/v1" -) - -var ( - instanceGroupManagerURL = regexp.MustCompile("^https://www.googleapis.com/compute/v1/projects/([a-z][a-z0-9-]{5}(?:[-a-z0-9]{0,23}[a-z0-9])?)/zones/([a-z0-9-]*)/instanceGroupManagers/([^/]*)") -) - -func resourceContainerCluster() *schema.Resource { - return &schema.Resource{ - Create: resourceContainerClusterCreate, - Read: resourceContainerClusterRead, - Update: resourceContainerClusterUpdate, - Delete: resourceContainerClusterDelete, - - Schema: map[string]*schema.Schema{ - "master_auth": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "client_certificate": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "client_key": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Sensitive: true, - }, - "cluster_ca_certificate": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "password": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - Sensitive: true, - }, - "username": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - }, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if len(value) > 40 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 40 characters", k)) - } - if !regexp.MustCompile("^[a-z0-9-]+$").MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q can only contain lowercase letters, numbers and hyphens", k)) - } - if !regexp.MustCompile("^[a-z]").MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q must start with a letter", k)) - } - if !regexp.MustCompile("[a-z0-9]$").MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q must end with a number or a letter", k)) - } - return - }, - }, - - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "initial_node_count": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - - "additional_zones": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "cluster_ipv4_cidr": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - _, ipnet, err := net.ParseCIDR(value) - - if err != nil || ipnet == nil || value != ipnet.String() { - errors = append(errors, fmt.Errorf( - "%q must contain a valid CIDR", k)) - } - return - }, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "endpoint": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "instance_group_urls": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "logging_service": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "monitoring_service": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "network": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "default", - ForceNew: true, - }, - "subnetwork": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "addons_config": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "http_load_balancing": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "disabled": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - }, - }, - }, - "horizontal_pod_autoscaling": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "disabled": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - }, - }, - }, - }, - }, - }, - "node_config": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "machine_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "disk_size_gb": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - - if value < 10 { - errors = append(errors, fmt.Errorf( - "%q cannot be less than 10", k)) - } - return - }, - }, - - "local_ssd_count": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - - if value < 0 { - errors = append(errors, fmt.Errorf( - "%q cannot be negative", k)) - } - return - }, - }, - - "oauth_scopes": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - StateFunc: func(v interface{}) string { - return canonicalizeServiceScope(v.(string)) - }, - }, - }, - - "service_account": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "metadata": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Elem: schema.TypeString, - }, - - "image_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - }, - }, - - "node_version": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "node_pool": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, // TODO(danawillow): Add ability to add/remove nodePools - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "initial_node_count": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ConflictsWith: []string{"node_pool.name_prefix"}, - ForceNew: true, - }, - - "name_prefix": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - }, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - zoneName := d.Get("zone").(string) - clusterName := d.Get("name").(string) - - cluster := &container.Cluster{ - Name: clusterName, - InitialNodeCount: int64(d.Get("initial_node_count").(int)), - } - - if v, ok := d.GetOk("master_auth"); ok { - masterAuths := v.([]interface{}) - masterAuth := masterAuths[0].(map[string]interface{}) - cluster.MasterAuth = &container.MasterAuth{ - Password: masterAuth["password"].(string), - Username: masterAuth["username"].(string), - } - } - - if v, ok := d.GetOk("node_version"); ok { - cluster.InitialClusterVersion = v.(string) - } - - if v, ok := d.GetOk("additional_zones"); ok { - locationsList := v.([]interface{}) - locations := []string{} - for _, v := range locationsList { - location := v.(string) - locations = append(locations, location) - if location == zoneName { - return fmt.Errorf("additional_zones should not contain the original 'zone'.") - } - } - locations = append(locations, zoneName) - cluster.Locations = locations - } - - if v, ok := d.GetOk("cluster_ipv4_cidr"); ok { - cluster.ClusterIpv4Cidr = v.(string) - } - - if v, ok := d.GetOk("description"); ok { - cluster.Description = v.(string) - } - - if v, ok := d.GetOk("logging_service"); ok { - cluster.LoggingService = v.(string) - } - - if v, ok := d.GetOk("monitoring_service"); ok { - cluster.MonitoringService = v.(string) - } - - if _, ok := d.GetOk("network"); ok { - network, err := getNetworkName(d, "network") - if err != nil { - return err - } - cluster.Network = network - } - - if v, ok := d.GetOk("subnetwork"); ok { - cluster.Subnetwork = v.(string) - } - - if v, ok := d.GetOk("addons_config"); ok { - addonsConfig := v.([]interface{})[0].(map[string]interface{}) - cluster.AddonsConfig = &container.AddonsConfig{} - - if v, ok := addonsConfig["http_load_balancing"]; ok && len(v.([]interface{})) > 0 { - addon := v.([]interface{})[0].(map[string]interface{}) - cluster.AddonsConfig.HttpLoadBalancing = &container.HttpLoadBalancing{ - Disabled: addon["disabled"].(bool), - } - } - - if v, ok := addonsConfig["horizontal_pod_autoscaling"]; ok && len(v.([]interface{})) > 0 { - addon := v.([]interface{})[0].(map[string]interface{}) - cluster.AddonsConfig.HorizontalPodAutoscaling = &container.HorizontalPodAutoscaling{ - Disabled: addon["disabled"].(bool), - } - } - } - if v, ok := d.GetOk("node_config"); ok { - nodeConfigs := v.([]interface{}) - if len(nodeConfigs) > 1 { - return fmt.Errorf("Cannot specify more than one node_config.") - } - nodeConfig := nodeConfigs[0].(map[string]interface{}) - - cluster.NodeConfig = &container.NodeConfig{} - - if v, ok = nodeConfig["machine_type"]; ok { - cluster.NodeConfig.MachineType = v.(string) - } - - if v, ok = nodeConfig["disk_size_gb"]; ok { - cluster.NodeConfig.DiskSizeGb = int64(v.(int)) - } - - if v, ok = nodeConfig["local_ssd_count"]; ok { - cluster.NodeConfig.LocalSsdCount = int64(v.(int)) - } - - if v, ok := nodeConfig["oauth_scopes"]; ok { - scopesList := v.([]interface{}) - scopes := []string{} - for _, v := range scopesList { - scopes = append(scopes, canonicalizeServiceScope(v.(string))) - } - - cluster.NodeConfig.OauthScopes = scopes - } - - if v, ok = nodeConfig["service_account"]; ok { - cluster.NodeConfig.ServiceAccount = v.(string) - } - - if v, ok = nodeConfig["metadata"]; ok { - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - cluster.NodeConfig.Metadata = m - } - - if v, ok = nodeConfig["image_type"]; ok { - cluster.NodeConfig.ImageType = v.(string) - } - } - - nodePoolsCount := d.Get("node_pool.#").(int) - if nodePoolsCount > 0 { - nodePools := make([]*container.NodePool, 0, nodePoolsCount) - for i := 0; i < nodePoolsCount; i++ { - prefix := fmt.Sprintf("node_pool.%d", i) - - nodeCount := d.Get(prefix + ".initial_node_count").(int) - - var name string - if v, ok := d.GetOk(prefix + ".name"); ok { - name = v.(string) - } else if v, ok := d.GetOk(prefix + ".name_prefix"); ok { - name = resource.PrefixedUniqueId(v.(string)) - } else { - name = resource.UniqueId() - } - - nodePool := &container.NodePool{ - Name: name, - InitialNodeCount: int64(nodeCount), - } - - nodePools = append(nodePools, nodePool) - } - cluster.NodePools = nodePools - } - - req := &container.CreateClusterRequest{ - Cluster: cluster, - } - - op, err := config.clientContainer.Projects.Zones.Clusters.Create( - project, zoneName, req).Do() - if err != nil { - return err - } - - // Wait until it's created - waitErr := containerOperationWait(config, op, project, zoneName, "creating GKE cluster", 30, 3) - if waitErr != nil { - // The resource didn't actually create - d.SetId("") - return waitErr - } - - log.Printf("[INFO] GKE cluster %s has been created", clusterName) - - d.SetId(clusterName) - - return resourceContainerClusterRead(d, meta) -} - -func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - zoneName := d.Get("zone").(string) - - cluster, err := config.clientContainer.Projects.Zones.Clusters.Get( - project, zoneName, d.Get("name").(string)).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Container Cluster %q", d.Get("name").(string))) - } - - d.Set("name", cluster.Name) - d.Set("zone", cluster.Zone) - - locations := []string{} - if len(cluster.Locations) > 1 { - for _, location := range cluster.Locations { - if location != cluster.Zone { - locations = append(locations, location) - } - } - } - d.Set("additional_zones", locations) - - d.Set("endpoint", cluster.Endpoint) - - masterAuth := []map[string]interface{}{ - map[string]interface{}{ - "username": cluster.MasterAuth.Username, - "password": cluster.MasterAuth.Password, - "client_certificate": cluster.MasterAuth.ClientCertificate, - "client_key": cluster.MasterAuth.ClientKey, - "cluster_ca_certificate": cluster.MasterAuth.ClusterCaCertificate, - }, - } - d.Set("master_auth", masterAuth) - - d.Set("initial_node_count", cluster.InitialNodeCount) - d.Set("node_version", cluster.CurrentNodeVersion) - d.Set("cluster_ipv4_cidr", cluster.ClusterIpv4Cidr) - d.Set("description", cluster.Description) - d.Set("logging_service", cluster.LoggingService) - d.Set("monitoring_service", cluster.MonitoringService) - d.Set("network", d.Get("network").(string)) - d.Set("subnetwork", cluster.Subnetwork) - d.Set("node_config", flattenClusterNodeConfig(cluster.NodeConfig)) - d.Set("node_pool", flattenClusterNodePools(d, cluster.NodePools)) - - if igUrls, err := getInstanceGroupUrlsFromManagerUrls(config, cluster.InstanceGroupUrls); err != nil { - return err - } else { - d.Set("instance_group_urls", igUrls) - } - - return nil -} - -func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - zoneName := d.Get("zone").(string) - clusterName := d.Get("name").(string) - desiredNodeVersion := d.Get("node_version").(string) - - req := &container.UpdateClusterRequest{ - Update: &container.ClusterUpdate{ - DesiredNodeVersion: desiredNodeVersion, - }, - } - op, err := config.clientContainer.Projects.Zones.Clusters.Update( - project, zoneName, clusterName, req).Do() - if err != nil { - return err - } - - // Wait until it's updated - waitErr := containerOperationWait(config, op, project, zoneName, "updating GKE cluster", 10, 2) - if waitErr != nil { - return waitErr - } - - log.Printf("[INFO] GKE cluster %s has been updated to %s", d.Id(), - desiredNodeVersion) - - return resourceContainerClusterRead(d, meta) -} - -func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - zoneName := d.Get("zone").(string) - clusterName := d.Get("name").(string) - - log.Printf("[DEBUG] Deleting GKE cluster %s", d.Get("name").(string)) - op, err := config.clientContainer.Projects.Zones.Clusters.Delete( - project, zoneName, clusterName).Do() - if err != nil { - return err - } - - // Wait until it's deleted - waitErr := containerOperationWait(config, op, project, zoneName, "deleting GKE cluster", 10, 3) - if waitErr != nil { - return waitErr - } - - log.Printf("[INFO] GKE cluster %s has been deleted", d.Id()) - - d.SetId("") - - return nil -} - -// container engine's API currently mistakenly returns the instance group manager's -// URL instead of the instance group's URL in its responses. This shim detects that -// error, and corrects it, by fetching the instance group manager URL and retrieving -// the instance group manager, then using that to look up the instance group URL, which -// is then substituted. -// -// This should be removed when the API response is fixed. -func getInstanceGroupUrlsFromManagerUrls(config *Config, igmUrls []string) ([]string, error) { - instanceGroupURLs := make([]string, 0, len(igmUrls)) - for _, u := range igmUrls { - if !instanceGroupManagerURL.MatchString(u) { - instanceGroupURLs = append(instanceGroupURLs, u) - continue - } - matches := instanceGroupManagerURL.FindStringSubmatch(u) - instanceGroupManager, err := config.clientCompute.InstanceGroupManagers.Get(matches[1], matches[2], matches[3]).Do() - if err != nil { - return nil, fmt.Errorf("Error reading instance group manager returned as an instance group URL: %s", err) - } - instanceGroupURLs = append(instanceGroupURLs, instanceGroupManager.InstanceGroup) - } - return instanceGroupURLs, nil -} - -func flattenClusterNodeConfig(c *container.NodeConfig) []map[string]interface{} { - config := []map[string]interface{}{ - map[string]interface{}{ - "machine_type": c.MachineType, - "disk_size_gb": c.DiskSizeGb, - "local_ssd_count": c.LocalSsdCount, - "service_account": c.ServiceAccount, - "metadata": c.Metadata, - "image_type": c.ImageType, - }, - } - - if len(c.OauthScopes) > 0 { - config[0]["oauth_scopes"] = c.OauthScopes - } - - return config -} - -func flattenClusterNodePools(d *schema.ResourceData, c []*container.NodePool) []map[string]interface{} { - count := len(c) - - nodePools := make([]map[string]interface{}, 0, count) - - for i, np := range c { - nodePool := map[string]interface{}{ - "name": np.Name, - "name_prefix": d.Get(fmt.Sprintf("node_pool.%d.name_prefix", i)), - "initial_node_count": np.InitialNodeCount, - } - nodePools = append(nodePools, nodePool) - } - - return nodePools -} diff --git a/builtin/providers/google/resource_container_cluster_test.go b/builtin/providers/google/resource_container_cluster_test.go deleted file mode 100644 index 295dd4e54..000000000 --- a/builtin/providers/google/resource_container_cluster_test.go +++ /dev/null @@ -1,619 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "strconv" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccContainerCluster_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckContainerClusterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccContainerCluster_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckContainerCluster( - "google_container_cluster.primary"), - ), - }, - }, - }) -} - -func TestAccContainerCluster_withMasterAuth(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckContainerClusterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccContainerCluster_withMasterAuth, - Check: resource.ComposeTestCheckFunc( - testAccCheckContainerCluster( - "google_container_cluster.with_master_auth"), - ), - }, - }, - }) -} - -func TestAccContainerCluster_withAdditionalZones(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckContainerClusterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccContainerCluster_withAdditionalZones, - Check: resource.ComposeTestCheckFunc( - testAccCheckContainerCluster( - "google_container_cluster.with_additional_zones"), - ), - }, - }, - }) -} - -func TestAccContainerCluster_withVersion(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckContainerClusterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccContainerCluster_withVersion, - Check: resource.ComposeTestCheckFunc( - testAccCheckContainerCluster( - "google_container_cluster.with_version"), - ), - }, - }, - }) -} - -func TestAccContainerCluster_withNodeConfig(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckContainerClusterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccContainerCluster_withNodeConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckContainerCluster( - "google_container_cluster.with_node_config"), - ), - }, - }, - }) -} - -func TestAccContainerCluster_withNodeConfigScopeAlias(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckContainerClusterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccContainerCluster_withNodeConfigScopeAlias, - Check: resource.ComposeTestCheckFunc( - testAccCheckContainerCluster( - "google_container_cluster.with_node_config_scope_alias"), - ), - }, - }, - }) -} - -func TestAccContainerCluster_network(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckContainerClusterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccContainerCluster_networkRef, - Check: resource.ComposeTestCheckFunc( - testAccCheckContainerCluster( - "google_container_cluster.with_net_ref_by_url"), - testAccCheckContainerCluster( - "google_container_cluster.with_net_ref_by_name"), - ), - }, - }, - }) -} - -func TestAccContainerCluster_backend(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckContainerClusterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccContainerCluster_backendRef, - Check: resource.ComposeTestCheckFunc( - testAccCheckContainerCluster( - "google_container_cluster.primary"), - ), - }, - }, - }) -} - -func TestAccContainerCluster_withNodePoolBasic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckContainerClusterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccContainerCluster_withNodePoolBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckContainerCluster( - "google_container_cluster.with_node_pool"), - ), - }, - }, - }) -} - -func TestAccContainerCluster_withNodePoolNamePrefix(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckContainerClusterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccContainerCluster_withNodePoolNamePrefix, - Check: resource.ComposeTestCheckFunc( - testAccCheckContainerCluster( - "google_container_cluster.with_node_pool_name_prefix"), - ), - }, - }, - }) -} - -func TestAccContainerCluster_withNodePoolMultiple(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckContainerClusterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccContainerCluster_withNodePoolMultiple, - Check: resource.ComposeTestCheckFunc( - testAccCheckContainerCluster( - "google_container_cluster.with_node_pool_multiple"), - ), - }, - }, - }) -} - -func testAccCheckContainerClusterDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_container_cluster" { - continue - } - - attributes := rs.Primary.Attributes - _, err := config.clientContainer.Projects.Zones.Clusters.Get( - config.Project, attributes["zone"], attributes["name"]).Do() - if err == nil { - return fmt.Errorf("Cluster still exists") - } - } - - return nil -} - -func testAccCheckContainerCluster(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - attributes, err := getResourceAttributes(n, s) - if err != nil { - return err - } - - config := testAccProvider.Meta().(*Config) - cluster, err := config.clientContainer.Projects.Zones.Clusters.Get( - config.Project, attributes["zone"], attributes["name"]).Do() - if err != nil { - return err - } - - if cluster.Name != attributes["name"] { - return fmt.Errorf("Cluster %s not found, found %s instead", attributes["name"], cluster.Name) - } - - type clusterTestField struct { - tf_attr string - gcp_attr interface{} - } - - var igUrls []string - if igUrls, err = getInstanceGroupUrlsFromManagerUrls(config, cluster.InstanceGroupUrls); err != nil { - return err - } - clusterTests := []clusterTestField{ - {"initial_node_count", strconv.FormatInt(cluster.InitialNodeCount, 10)}, - {"master_auth.0.client_certificate", cluster.MasterAuth.ClientCertificate}, - {"master_auth.0.client_key", cluster.MasterAuth.ClientKey}, - {"master_auth.0.cluster_ca_certificate", cluster.MasterAuth.ClusterCaCertificate}, - {"master_auth.0.password", cluster.MasterAuth.Password}, - {"master_auth.0.username", cluster.MasterAuth.Username}, - {"zone", cluster.Zone}, - {"cluster_ipv4_cidr", cluster.ClusterIpv4Cidr}, - {"description", cluster.Description}, - {"endpoint", cluster.Endpoint}, - {"instance_group_urls", igUrls}, - {"logging_service", cluster.LoggingService}, - {"monitoring_service", cluster.MonitoringService}, - {"subnetwork", cluster.Subnetwork}, - {"node_config.0.machine_type", cluster.NodeConfig.MachineType}, - {"node_config.0.disk_size_gb", strconv.FormatInt(cluster.NodeConfig.DiskSizeGb, 10)}, - {"node_config.0.local_ssd_count", strconv.FormatInt(cluster.NodeConfig.LocalSsdCount, 10)}, - {"node_config.0.oauth_scopes", cluster.NodeConfig.OauthScopes}, - {"node_config.0.service_account", cluster.NodeConfig.ServiceAccount}, - {"node_config.0.metadata", cluster.NodeConfig.Metadata}, - {"node_config.0.image_type", cluster.NodeConfig.ImageType}, - {"node_version", cluster.CurrentNodeVersion}, - } - - // Remove Zone from additional_zones since that's what the resource writes in state - additionalZones := []string{} - for _, location := range cluster.Locations { - if location != cluster.Zone { - additionalZones = append(additionalZones, location) - } - } - clusterTests = append(clusterTests, clusterTestField{"additional_zones", additionalZones}) - - // AddonsConfig is neither Required or Computed, so the API may return nil for it - if cluster.AddonsConfig != nil { - if cluster.AddonsConfig.HttpLoadBalancing != nil { - clusterTests = append(clusterTests, clusterTestField{"addons_config.0.http_load_balancing.0.disabled", strconv.FormatBool(cluster.AddonsConfig.HttpLoadBalancing.Disabled)}) - } - if cluster.AddonsConfig.HorizontalPodAutoscaling != nil { - clusterTests = append(clusterTests, clusterTestField{"addons_config.0.horizontal_pod_autoscaling.0.disabled", strconv.FormatBool(cluster.AddonsConfig.HorizontalPodAutoscaling.Disabled)}) - } - } - - for i, np := range cluster.NodePools { - prefix := fmt.Sprintf("node_pool.%d.", i) - clusterTests = append(clusterTests, - clusterTestField{prefix + "name", np.Name}, - clusterTestField{prefix + "initial_node_count", strconv.FormatInt(np.InitialNodeCount, 10)}) - } - - for _, attrs := range clusterTests { - if c := checkMatch(attributes, attrs.tf_attr, attrs.gcp_attr); c != "" { - return fmt.Errorf(c) - } - } - - // Network has to be done separately in order to normalize the two values - tf, err := getNetworkNameFromSelfLink(attributes["network"]) - if err != nil { - return err - } - gcp, err := getNetworkNameFromSelfLink(cluster.Network) - if err != nil { - return err - } - if tf != gcp { - return fmt.Errorf(matchError("network", tf, gcp)) - } - - return nil - } -} - -func getResourceAttributes(n string, s *terraform.State) (map[string]string, error) { - rs, ok := s.RootModule().Resources[n] - if !ok { - return nil, fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return nil, fmt.Errorf("No ID is set") - } - - return rs.Primary.Attributes, nil -} - -func checkMatch(attributes map[string]string, attr string, gcp interface{}) string { - if gcpList, ok := gcp.([]string); ok { - return checkListMatch(attributes, attr, gcpList) - } - if gcpMap, ok := gcp.(map[string]string); ok { - return checkMapMatch(attributes, attr, gcpMap) - } - tf := attributes[attr] - if tf != gcp { - return matchError(attr, tf, gcp) - } - return "" -} - -func checkListMatch(attributes map[string]string, attr string, gcpList []string) string { - num, err := strconv.Atoi(attributes[attr+".#"]) - if err != nil { - return fmt.Sprintf("Error in number conversion for attribute %s: %s", attr, err) - } - if num != len(gcpList) { - return fmt.Sprintf("Cluster has mismatched %s size.\nTF Size: %d\nGCP Size: %d", attr, num, len(gcpList)) - } - - for i, gcp := range gcpList { - if tf := attributes[fmt.Sprintf("%s.%d", attr, i)]; tf != gcp { - return matchError(fmt.Sprintf("%s[%d]", attr, i), tf, gcp) - } - } - - return "" -} - -func checkMapMatch(attributes map[string]string, attr string, gcpMap map[string]string) string { - num, err := strconv.Atoi(attributes[attr+".%"]) - if err != nil { - return fmt.Sprintf("Error in number conversion for attribute %s: %s", attr, err) - } - if num != len(gcpMap) { - return fmt.Sprintf("Cluster has mismatched %s size.\nTF Size: %d\nGCP Size: %d", attr, num, len(gcpMap)) - } - - for k, gcp := range gcpMap { - if tf := attributes[fmt.Sprintf("%s.%s", attr, k)]; tf != gcp { - return matchError(fmt.Sprintf("%s[%s]", attr, k), tf, gcp) - } - } - - return "" -} - -func matchError(attr, tf string, gcp interface{}) string { - return fmt.Sprintf("Cluster has mismatched %s.\nTF State: %+v\nGCP State: %+v", attr, tf, gcp) -} - -var testAccContainerCluster_basic = fmt.Sprintf(` -resource "google_container_cluster" "primary" { - name = "cluster-test-%s" - zone = "us-central1-a" - initial_node_count = 3 -}`, acctest.RandString(10)) - -var testAccContainerCluster_withMasterAuth = fmt.Sprintf(` -resource "google_container_cluster" "with_master_auth" { - name = "cluster-test-%s" - zone = "us-central1-a" - initial_node_count = 3 - - master_auth { - username = "mr.yoda" - password = "adoy.rm" - } -}`, acctest.RandString(10)) - -var testAccContainerCluster_withAdditionalZones = fmt.Sprintf(` -resource "google_container_cluster" "with_additional_zones" { - name = "cluster-test-%s" - zone = "us-central1-a" - initial_node_count = 1 - - additional_zones = [ - "us-central1-b", - "us-central1-c" - ] - - master_auth { - username = "mr.yoda" - password = "adoy.rm" - } -}`, acctest.RandString(10)) - -var testAccContainerCluster_withVersion = fmt.Sprintf(` -data "google_container_engine_versions" "central1a" { - zone = "us-central1-a" -} - -resource "google_container_cluster" "with_version" { - name = "cluster-test-%s" - zone = "us-central1-a" - node_version = "${data.google_container_engine_versions.central1a.latest_node_version}" - initial_node_count = 1 - - master_auth { - username = "mr.yoda" - password = "adoy.rm" - } -}`, acctest.RandString(10)) - -var testAccContainerCluster_withNodeConfig = fmt.Sprintf(` -resource "google_container_cluster" "with_node_config" { - name = "cluster-test-%s" - zone = "us-central1-f" - initial_node_count = 1 - - master_auth { - username = "mr.yoda" - password = "adoy.rm" - } - - node_config { - machine_type = "n1-standard-1" - disk_size_gb = 15 - local_ssd_count = 1 - oauth_scopes = [ - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/logging.write", - "https://www.googleapis.com/auth/monitoring" - ] - service_account = "default" - metadata { - foo = "bar" - } - image_type = "CONTAINER_VM" - } -}`, acctest.RandString(10)) - -var testAccContainerCluster_withNodeConfigScopeAlias = fmt.Sprintf(` -resource "google_container_cluster" "with_node_config_scope_alias" { - name = "cluster-test-%s" - zone = "us-central1-f" - initial_node_count = 1 - - master_auth { - username = "mr.yoda" - password = "adoy.rm" - } - - node_config { - machine_type = "g1-small" - disk_size_gb = 15 - oauth_scopes = [ "compute-rw", "storage-ro", "logging-write", "monitoring" ] - } -}`, acctest.RandString(10)) - -var testAccContainerCluster_networkRef = fmt.Sprintf(` -resource "google_compute_network" "container_network" { - name = "container-net-%s" - auto_create_subnetworks = true -} - -resource "google_container_cluster" "with_net_ref_by_url" { - name = "cluster-test-%s" - zone = "us-central1-a" - initial_node_count = 1 - - master_auth { - username = "mr.yoda" - password = "adoy.rm" - } - - network = "${google_compute_network.container_network.self_link}" -} - -resource "google_container_cluster" "with_net_ref_by_name" { - name = "cluster-test-%s" - zone = "us-central1-a" - initial_node_count = 1 - - master_auth { - username = "mr.yoda" - password = "adoy.rm" - } - - network = "${google_compute_network.container_network.name}" -}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) - -var testAccContainerCluster_backendRef = fmt.Sprintf(` -resource "google_compute_backend_service" "my-backend-service" { - name = "terraform-test-%s" - port_name = "http" - protocol = "HTTP" - - backend { - group = "${element(google_container_cluster.primary.instance_group_urls, 1)}" - } - - health_checks = ["${google_compute_http_health_check.default.self_link}"] -} - -resource "google_compute_http_health_check" "default" { - name = "terraform-test-%s" - request_path = "/" - check_interval_sec = 1 - timeout_sec = 1 -} - -resource "google_container_cluster" "primary" { - name = "terraform-test-%s" - zone = "us-central1-a" - initial_node_count = 3 - - additional_zones = [ - "us-central1-b", - "us-central1-c", - ] - - master_auth { - username = "mr.yoda" - password = "adoy.rm" - } - - node_config { - oauth_scopes = [ - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/logging.write", - "https://www.googleapis.com/auth/monitoring", - ] - } -} -`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) - -var testAccContainerCluster_withNodePoolBasic = fmt.Sprintf(` -resource "google_container_cluster" "with_node_pool" { - name = "tf-cluster-nodepool-test-%s" - zone = "us-central1-a" - - master_auth { - username = "mr.yoda" - password = "adoy.rm" - } - - node_pool { - name = "tf-cluster-nodepool-test-%s" - initial_node_count = 2 - } -}`, acctest.RandString(10), acctest.RandString(10)) - -var testAccContainerCluster_withNodePoolNamePrefix = fmt.Sprintf(` -resource "google_container_cluster" "with_node_pool_name_prefix" { - name = "tf-cluster-nodepool-test-%s" - zone = "us-central1-a" - - master_auth { - username = "mr.yoda" - password = "adoy.rm" - } - - node_pool { - name_prefix = "tf-np-test" - initial_node_count = 2 - } -}`, acctest.RandString(10)) - -var testAccContainerCluster_withNodePoolMultiple = fmt.Sprintf(` -resource "google_container_cluster" "with_node_pool_multiple" { - name = "tf-cluster-nodepool-test-%s" - zone = "us-central1-a" - - master_auth { - username = "mr.yoda" - password = "adoy.rm" - } - - node_pool { - name = "tf-cluster-nodepool-test-%s" - initial_node_count = 2 - } - - node_pool { - name = "tf-cluster-nodepool-test-%s" - initial_node_count = 3 - } -}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/builtin/providers/google/resource_container_node_pool.go b/builtin/providers/google/resource_container_node_pool.go deleted file mode 100644 index 24f2c97a7..000000000 --- a/builtin/providers/google/resource_container_node_pool.go +++ /dev/null @@ -1,191 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/container/v1" - "google.golang.org/api/googleapi" -) - -func resourceContainerNodePool() *schema.Resource { - return &schema.Resource{ - Create: resourceContainerNodePoolCreate, - Read: resourceContainerNodePoolRead, - Delete: resourceContainerNodePoolDelete, - Exists: resourceContainerNodePoolExists, - - Schema: map[string]*schema.Schema{ - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ConflictsWith: []string{"name_prefix"}, - ForceNew: true, - }, - - "name_prefix": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "cluster": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "initial_node_count": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone := d.Get("zone").(string) - cluster := d.Get("cluster").(string) - nodeCount := d.Get("initial_node_count").(int) - - var name string - if v, ok := d.GetOk("name"); ok { - name = v.(string) - } else if v, ok := d.GetOk("name_prefix"); ok { - name = resource.PrefixedUniqueId(v.(string)) - } else { - name = resource.UniqueId() - } - - nodePool := &container.NodePool{ - Name: name, - InitialNodeCount: int64(nodeCount), - } - - req := &container.CreateNodePoolRequest{ - NodePool: nodePool, - } - - op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Create(project, zone, cluster, req).Do() - - if err != nil { - return fmt.Errorf("Error creating NodePool: %s", err) - } - - waitErr := containerOperationWait(config, op, project, zone, "creating GKE NodePool", 10, 3) - if waitErr != nil { - // The resource didn't actually create - d.SetId("") - return waitErr - } - - log.Printf("[INFO] GKE NodePool %s has been created", name) - - d.SetId(name) - - return resourceContainerNodePoolRead(d, meta) -} - -func resourceContainerNodePoolRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone := d.Get("zone").(string) - name := d.Get("name").(string) - cluster := d.Get("cluster").(string) - - nodePool, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get( - project, zone, cluster, name).Do() - if err != nil { - return fmt.Errorf("Error reading NodePool: %s", err) - } - - d.Set("name", nodePool.Name) - d.Set("initial_node_count", nodePool.InitialNodeCount) - - return nil -} - -func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone := d.Get("zone").(string) - name := d.Get("name").(string) - cluster := d.Get("cluster").(string) - - op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Delete( - project, zone, cluster, name).Do() - if err != nil { - return fmt.Errorf("Error deleting NodePool: %s", err) - } - - // Wait until it's deleted - waitErr := containerOperationWait(config, op, project, zone, "deleting GKE NodePool", 10, 2) - if waitErr != nil { - return waitErr - } - - log.Printf("[INFO] GKE NodePool %s has been deleted", d.Id()) - - d.SetId("") - - return nil -} - -func resourceContainerNodePoolExists(d *schema.ResourceData, meta interface{}) (bool, error) { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return false, err - } - - zone := d.Get("zone").(string) - name := d.Get("name").(string) - cluster := d.Get("cluster").(string) - - _, err = config.clientContainer.Projects.Zones.Clusters.NodePools.Get( - project, zone, cluster, name).Do() - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Container NodePool %q because it's gone", name) - // The resource doesn't exist anymore - return false, err - } - // There was some other error in reading the resource - return true, err - } - return true, nil -} diff --git a/builtin/providers/google/resource_container_node_pool_test.go b/builtin/providers/google/resource_container_node_pool_test.go deleted file mode 100644 index a6b0da809..000000000 --- a/builtin/providers/google/resource_container_node_pool_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package google - -import ( - "fmt" - "strconv" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccContainerNodePool_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckContainerNodePoolDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccContainerNodePool_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckContainerNodePoolMatches("google_container_node_pool.np"), - ), - }, - }, - }) -} - -func testAccCheckContainerNodePoolDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_container_node_pool" { - continue - } - - attributes := rs.Primary.Attributes - _, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get( - config.Project, attributes["zone"], attributes["cluster"], attributes["name"]).Do() - if err == nil { - return fmt.Errorf("NodePool still exists") - } - } - - return nil -} - -func testAccCheckContainerNodePoolMatches(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - attributes := rs.Primary.Attributes - found, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get( - config.Project, attributes["zone"], attributes["cluster"], attributes["name"]).Do() - if err != nil { - return err - } - - if found.Name != attributes["name"] { - return fmt.Errorf("NodePool not found") - } - - inc, err := strconv.Atoi(attributes["initial_node_count"]) - if err != nil { - return err - } - if found.InitialNodeCount != int64(inc) { - return fmt.Errorf("Mismatched initialNodeCount. TF State: %s. GCP State: %d", - attributes["initial_node_count"], found.InitialNodeCount) - } - return nil - } -} - -var testAccContainerNodePool_basic = fmt.Sprintf(` -resource "google_container_cluster" "cluster" { - name = "tf-cluster-nodepool-test-%s" - zone = "us-central1-a" - initial_node_count = 3 - - master_auth { - username = "mr.yoda" - password = "adoy.rm" - } -} - -resource "google_container_node_pool" "np" { - name = "tf-nodepool-test-%s" - zone = "us-central1-a" - cluster = "${google_container_cluster.cluster.name}" - initial_node_count = 2 -}`, acctest.RandString(10), acctest.RandString(10)) diff --git a/builtin/providers/google/resource_dns_managed_zone.go b/builtin/providers/google/resource_dns_managed_zone.go deleted file mode 100644 index a934460c0..000000000 --- a/builtin/providers/google/resource_dns_managed_zone.go +++ /dev/null @@ -1,127 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/dns/v1" -) - -func resourceDnsManagedZone() *schema.Resource { - return &schema.Resource{ - Create: resourceDnsManagedZoneCreate, - Read: resourceDnsManagedZoneRead, - Delete: resourceDnsManagedZoneDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - Schema: map[string]*schema.Schema{ - "dns_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "Managed by Terraform", - }, - - "name_servers": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - // Google Cloud DNS ManagedZone resources do not have a SelfLink attribute. - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceDnsManagedZoneCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Build the parameter - zone := &dns.ManagedZone{ - Name: d.Get("name").(string), - DnsName: d.Get("dns_name").(string), - } - // Optional things - if v, ok := d.GetOk("description"); ok { - zone.Description = v.(string) - } - if v, ok := d.GetOk("dns_name"); ok { - zone.DnsName = v.(string) - } - - log.Printf("[DEBUG] DNS ManagedZone create request: %#v", zone) - zone, err = config.clientDns.ManagedZones.Create(project, zone).Do() - if err != nil { - return fmt.Errorf("Error creating DNS ManagedZone: %s", err) - } - - d.SetId(zone.Name) - - return resourceDnsManagedZoneRead(d, meta) -} - -func resourceDnsManagedZoneRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone, err := config.clientDns.ManagedZones.Get( - project, d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DNS Managed Zone %q", d.Get("name").(string))) - } - - d.Set("name_servers", zone.NameServers) - d.Set("name", zone.Name) - d.Set("dns_name", zone.DnsName) - d.Set("description", zone.Description) - - return nil -} - -func resourceDnsManagedZoneDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - err = config.clientDns.ManagedZones.Delete(project, d.Id()).Do() - if err != nil { - return fmt.Errorf("Error deleting DNS ManagedZone: %s", err) - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/google/resource_dns_managed_zone_test.go b/builtin/providers/google/resource_dns_managed_zone_test.go deleted file mode 100644 index 73d55128f..000000000 --- a/builtin/providers/google/resource_dns_managed_zone_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/dns/v1" -) - -func TestAccDnsManagedZone_basic(t *testing.T) { - var zone dns.ManagedZone - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDnsManagedZoneDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDnsManagedZone_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckDnsManagedZoneExists( - "google_dns_managed_zone.foobar", &zone), - ), - }, - }, - }) -} - -func testAccCheckDnsManagedZoneDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_dns_zone" { - continue - } - - _, err := config.clientDns.ManagedZones.Get( - config.Project, rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("DNS ManagedZone still exists") - } - } - - return nil -} - -func testAccCheckDnsManagedZoneExists(n string, zone *dns.ManagedZone) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientDns.ManagedZones.Get( - config.Project, rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("DNS Zone not found") - } - - *zone = *found - - return nil - } -} - -var testAccDnsManagedZone_basic = fmt.Sprintf(` -resource "google_dns_managed_zone" "foobar" { - name = "mzone-test-%s" - dns_name = "hashicorptest.com." -}`, acctest.RandString(10)) diff --git a/builtin/providers/google/resource_dns_record_set.go b/builtin/providers/google/resource_dns_record_set.go deleted file mode 100644 index 0f322bd86..000000000 --- a/builtin/providers/google/resource_dns_record_set.go +++ /dev/null @@ -1,249 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/dns/v1" -) - -func resourceDnsRecordSet() *schema.Resource { - return &schema.Resource{ - Create: resourceDnsRecordSetCreate, - Read: resourceDnsRecordSetRead, - Delete: resourceDnsRecordSetDelete, - Update: resourceDnsRecordSetUpdate, - - Schema: map[string]*schema.Schema{ - "managed_zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "rrdatas": &schema.Schema{ - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "ttl": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceDnsRecordSetCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone := d.Get("managed_zone").(string) - - // Build the change - chg := &dns.Change{ - Additions: []*dns.ResourceRecordSet{ - &dns.ResourceRecordSet{ - Name: d.Get("name").(string), - Type: d.Get("type").(string), - Ttl: int64(d.Get("ttl").(int)), - Rrdatas: rrdata(d), - }, - }, - } - - log.Printf("[DEBUG] DNS Record create request: %#v", chg) - chg, err = config.clientDns.Changes.Create(project, zone, chg).Do() - if err != nil { - return fmt.Errorf("Error creating DNS RecordSet: %s", err) - } - - d.SetId(chg.Id) - - w := &DnsChangeWaiter{ - Service: config.clientDns, - Change: chg, - Project: project, - ManagedZone: zone, - } - _, err = w.Conf().WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for Google DNS change: %s", err) - } - - return resourceDnsRecordSetRead(d, meta) -} - -func resourceDnsRecordSetRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone := d.Get("managed_zone").(string) - - // name and type are effectively the 'key' - name := d.Get("name").(string) - dnsType := d.Get("type").(string) - - resp, err := config.clientDns.ResourceRecordSets.List( - project, zone).Name(name).Type(dnsType).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DNS Record Set %q", d.Get("name").(string))) - } - if len(resp.Rrsets) == 0 { - // The resource doesn't exist anymore - d.SetId("") - return nil - } - - if len(resp.Rrsets) > 1 { - return fmt.Errorf("Only expected 1 record set, got %d", len(resp.Rrsets)) - } - - d.Set("ttl", resp.Rrsets[0].Ttl) - d.Set("rrdatas", resp.Rrsets[0].Rrdatas) - - return nil -} - -func resourceDnsRecordSetDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone := d.Get("managed_zone").(string) - - // Build the change - chg := &dns.Change{ - Deletions: []*dns.ResourceRecordSet{ - &dns.ResourceRecordSet{ - Name: d.Get("name").(string), - Type: d.Get("type").(string), - Ttl: int64(d.Get("ttl").(int)), - Rrdatas: rrdata(d), - }, - }, - } - - log.Printf("[DEBUG] DNS Record delete request: %#v", chg) - chg, err = config.clientDns.Changes.Create(project, zone, chg).Do() - if err != nil { - return fmt.Errorf("Error deleting DNS RecordSet: %s", err) - } - - w := &DnsChangeWaiter{ - Service: config.clientDns, - Change: chg, - Project: project, - ManagedZone: zone, - } - _, err = w.Conf().WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for Google DNS change: %s", err) - } - - d.SetId("") - return nil -} - -func resourceDnsRecordSetUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone := d.Get("managed_zone").(string) - recordName := d.Get("name").(string) - - oldTtl, newTtl := d.GetChange("ttl") - oldType, newType := d.GetChange("type") - - oldCountRaw, _ := d.GetChange("rrdatas.#") - oldCount := oldCountRaw.(int) - - chg := &dns.Change{ - Deletions: []*dns.ResourceRecordSet{ - &dns.ResourceRecordSet{ - Name: recordName, - Type: oldType.(string), - Ttl: int64(oldTtl.(int)), - Rrdatas: make([]string, oldCount), - }, - }, - Additions: []*dns.ResourceRecordSet{ - &dns.ResourceRecordSet{ - Name: recordName, - Type: newType.(string), - Ttl: int64(newTtl.(int)), - Rrdatas: rrdata(d), - }, - }, - } - - for i := 0; i < oldCount; i++ { - rrKey := fmt.Sprintf("rrdatas.%d", i) - oldRR, _ := d.GetChange(rrKey) - chg.Deletions[0].Rrdatas[i] = oldRR.(string) - } - log.Printf("[DEBUG] DNS Record change request: %#v old: %#v new: %#v", chg, chg.Deletions[0], chg.Additions[0]) - chg, err = config.clientDns.Changes.Create(project, zone, chg).Do() - if err != nil { - return fmt.Errorf("Error changing DNS RecordSet: %s", err) - } - - w := &DnsChangeWaiter{ - Service: config.clientDns, - Change: chg, - Project: project, - ManagedZone: zone, - } - if _, err = w.Conf().WaitForState(); err != nil { - return fmt.Errorf("Error waiting for Google DNS change: %s", err) - } - - return resourceDnsRecordSetRead(d, meta) -} - -func rrdata( - d *schema.ResourceData, -) []string { - rrdatasCount := d.Get("rrdatas.#").(int) - data := make([]string, rrdatasCount) - for i := 0; i < rrdatasCount; i++ { - data[i] = d.Get(fmt.Sprintf("rrdatas.%d", i)).(string) - } - return data -} diff --git a/builtin/providers/google/resource_dns_record_set_test.go b/builtin/providers/google/resource_dns_record_set_test.go deleted file mode 100644 index 35e1ac347..000000000 --- a/builtin/providers/google/resource_dns_record_set_test.go +++ /dev/null @@ -1,169 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDnsRecordSet_basic(t *testing.T) { - zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(10)) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDnsRecordSetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300), - Check: resource.ComposeTestCheckFunc( - testAccCheckDnsRecordSetExists( - "google_dns_record_set.foobar", zoneName), - ), - }, - }, - }) -} - -func TestAccDnsRecordSet_modify(t *testing.T) { - zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(10)) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDnsRecordSetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300), - Check: resource.ComposeTestCheckFunc( - testAccCheckDnsRecordSetExists( - "google_dns_record_set.foobar", zoneName), - ), - }, - resource.TestStep{ - Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.11", 300), - Check: resource.ComposeTestCheckFunc( - testAccCheckDnsRecordSetExists( - "google_dns_record_set.foobar", zoneName), - ), - }, - resource.TestStep{ - Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.11", 600), - Check: resource.ComposeTestCheckFunc( - testAccCheckDnsRecordSetExists( - "google_dns_record_set.foobar", zoneName), - ), - }, - }, - }) -} - -func TestAccDnsRecordSet_changeType(t *testing.T) { - zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(10)) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDnsRecordSetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300), - Check: resource.ComposeTestCheckFunc( - testAccCheckDnsRecordSetExists( - "google_dns_record_set.foobar", zoneName), - ), - }, - resource.TestStep{ - Config: testAccDnsRecordSet_bigChange(zoneName, 600), - Check: resource.ComposeTestCheckFunc( - testAccCheckDnsRecordSetExists( - "google_dns_record_set.foobar", zoneName), - ), - }, - }, - }) -} - -func testAccCheckDnsRecordSetDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - // Deletion of the managed_zone implies everything is gone - if rs.Type == "google_dns_managed_zone" { - _, err := config.clientDns.ManagedZones.Get( - config.Project, rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("DNS ManagedZone still exists") - } - } - } - - return nil -} - -func testAccCheckDnsRecordSetExists(resourceType, resourceName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceType] - if !ok { - return fmt.Errorf("Not found: %s", resourceName) - } - - dnsName := rs.Primary.Attributes["name"] - dnsType := rs.Primary.Attributes["type"] - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - resp, err := config.clientDns.ResourceRecordSets.List( - config.Project, resourceName).Name(dnsName).Type(dnsType).Do() - if err != nil { - return fmt.Errorf("Error confirming DNS RecordSet existence: %#v", err) - } - switch len(resp.Rrsets) { - case 0: - // The resource doesn't exist anymore - return fmt.Errorf("DNS RecordSet not found") - case 1: - return nil - default: - return fmt.Errorf("Only expected 1 record set, got %d", len(resp.Rrsets)) - } - } -} - -func testAccDnsRecordSet_basic(zoneName string, addr2 string, ttl int) string { - return fmt.Sprintf(` - resource "google_dns_managed_zone" "parent-zone" { - name = "%s" - dns_name = "hashicorptest.com." - description = "Test Description" - } - resource "google_dns_record_set" "foobar" { - managed_zone = "${google_dns_managed_zone.parent-zone.name}" - name = "test-record.hashicorptest.com." - type = "A" - rrdatas = ["127.0.0.1", "%s"] - ttl = %d - } - `, zoneName, addr2, ttl) -} - -func testAccDnsRecordSet_bigChange(zoneName string, ttl int) string { - return fmt.Sprintf(` - resource "google_dns_managed_zone" "parent-zone" { - name = "%s" - dns_name = "hashicorptest.com." - description = "Test Description" - } - resource "google_dns_record_set" "foobar" { - managed_zone = "${google_dns_managed_zone.parent-zone.name}" - name = "test-record.hashicorptest.com." - type = "CNAME" - rrdatas = ["www.terraform.io."] - ttl = %d - } - `, zoneName, ttl) -} diff --git a/builtin/providers/google/resource_google_project.go b/builtin/providers/google/resource_google_project.go deleted file mode 100644 index 4e71d0d46..000000000 --- a/builtin/providers/google/resource_google_project.go +++ /dev/null @@ -1,230 +0,0 @@ -package google - -import ( - "fmt" - "log" - "net/http" - "strconv" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/cloudbilling/v1" - "google.golang.org/api/cloudresourcemanager/v1" - "google.golang.org/api/googleapi" -) - -// resourceGoogleProject returns a *schema.Resource that allows a customer -// to declare a Google Cloud Project resource. -func resourceGoogleProject() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - Create: resourceGoogleProjectCreate, - Read: resourceGoogleProjectRead, - Update: resourceGoogleProjectUpdate, - Delete: resourceGoogleProjectDelete, - - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - MigrateState: resourceGoogleProjectMigrateState, - - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "The id field has been removed. Use project_id instead.", - }, - "project_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "skip_delete": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "org_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "policy_data": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "Use the 'google_project_iam_policy' resource to define policies for a Google Project", - }, - "policy_etag": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Removed: "Use the the 'google_project_iam_policy' resource to define policies for a Google Project", - }, - "number": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "billing_account": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -func resourceGoogleProjectCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - var pid string - var err error - pid = d.Get("project_id").(string) - - log.Printf("[DEBUG]: Creating new project %q", pid) - project := &cloudresourcemanager.Project{ - ProjectId: pid, - Name: d.Get("name").(string), - Parent: &cloudresourcemanager.ResourceId{ - Id: d.Get("org_id").(string), - Type: "organization", - }, - } - - op, err := config.clientResourceManager.Projects.Create(project).Do() - if err != nil { - return fmt.Errorf("Error creating project %s (%s): %s.", project.ProjectId, project.Name, err) - } - - d.SetId(pid) - - // Wait for the operation to complete - waitErr := resourceManagerOperationWait(config, op, "project to create") - if waitErr != nil { - // The resource wasn't actually created - d.SetId("") - return waitErr - } - - // Set the billing account - if v, ok := d.GetOk("billing_account"); ok { - name := v.(string) - ba := cloudbilling.ProjectBillingInfo{ - BillingAccountName: "billingAccounts/" + name, - } - _, err = config.clientBilling.Projects.UpdateBillingInfo(prefixedProject(pid), &ba).Do() - if err != nil { - d.Set("billing_account", "") - if _err, ok := err.(*googleapi.Error); ok { - return fmt.Errorf("Error setting billing account %q for project %q: %v", name, prefixedProject(pid), _err) - } - return fmt.Errorf("Error setting billing account %q for project %q: %v", name, prefixedProject(pid), err) - } - } - - return resourceGoogleProjectRead(d, meta) -} - -func resourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - pid := d.Id() - - // Read the project - p, err := config.clientResourceManager.Projects.Get(pid).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Project %q", pid)) - } - - d.Set("project_id", pid) - d.Set("number", strconv.FormatInt(int64(p.ProjectNumber), 10)) - d.Set("name", p.Name) - - if p.Parent != nil { - d.Set("org_id", p.Parent.Id) - } - - // Read the billing account - ba, err := config.clientBilling.Projects.GetBillingInfo(prefixedProject(pid)).Do() - if err != nil { - return fmt.Errorf("Error reading billing account for project %q: %v", prefixedProject(pid), err) - } - if ba.BillingAccountName != "" { - // BillingAccountName is contains the resource name of the billing account - // associated with the project, if any. For example, - // `billingAccounts/012345-567890-ABCDEF`. We care about the ID and not - // the `billingAccounts/` prefix, so we need to remove that. If the - // prefix ever changes, we'll validate to make sure it's something we - // recognize. - _ba := strings.TrimPrefix(ba.BillingAccountName, "billingAccounts/") - if ba.BillingAccountName == _ba { - return fmt.Errorf("Error parsing billing account for project %q. Expected value to begin with 'billingAccounts/' but got %s", prefixedProject(pid), ba.BillingAccountName) - } - d.Set("billing_account", _ba) - } - return nil -} - -func prefixedProject(pid string) string { - return "projects/" + pid -} - -func resourceGoogleProjectUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - pid := d.Id() - - // Read the project - // we need the project even though refresh has already been called - // because the API doesn't support patch, so we need the actual object - p, err := config.clientResourceManager.Projects.Get(pid).Do() - if err != nil { - if v, ok := err.(*googleapi.Error); ok && v.Code == http.StatusNotFound { - return fmt.Errorf("Project %q does not exist.", pid) - } - return fmt.Errorf("Error checking project %q: %s", pid, err) - } - - // Project name has changed - if ok := d.HasChange("name"); ok { - p.Name = d.Get("name").(string) - // Do update on project - p, err = config.clientResourceManager.Projects.Update(p.ProjectId, p).Do() - if err != nil { - return fmt.Errorf("Error updating project %q: %s", p.Name, err) - } - } - - // Billing account has changed - if ok := d.HasChange("billing_account"); ok { - name := d.Get("billing_account").(string) - ba := cloudbilling.ProjectBillingInfo{ - BillingAccountName: "billingAccounts/" + name, - } - _, err = config.clientBilling.Projects.UpdateBillingInfo(prefixedProject(pid), &ba).Do() - if err != nil { - d.Set("billing_account", "") - if _err, ok := err.(*googleapi.Error); ok { - return fmt.Errorf("Error updating billing account %q for project %q: %v", name, prefixedProject(pid), _err) - } - return fmt.Errorf("Error updating billing account %q for project %q: %v", name, prefixedProject(pid), err) - } - } - return nil -} - -func resourceGoogleProjectDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - // Only delete projects if skip_delete isn't set - if !d.Get("skip_delete").(bool) { - pid := d.Id() - _, err := config.clientResourceManager.Projects.Delete(pid).Do() - if err != nil { - return fmt.Errorf("Error deleting project %q: %s", pid, err) - } - } - d.SetId("") - return nil -} diff --git a/builtin/providers/google/resource_google_project_iam_policy.go b/builtin/providers/google/resource_google_project_iam_policy.go deleted file mode 100644 index 4b2ec79b7..000000000 --- a/builtin/providers/google/resource_google_project_iam_policy.go +++ /dev/null @@ -1,419 +0,0 @@ -package google - -import ( - "encoding/json" - "fmt" - "log" - "sort" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -func resourceGoogleProjectIamPolicy() *schema.Resource { - return &schema.Resource{ - Create: resourceGoogleProjectIamPolicyCreate, - Read: resourceGoogleProjectIamPolicyRead, - Update: resourceGoogleProjectIamPolicyUpdate, - Delete: resourceGoogleProjectIamPolicyDelete, - - Schema: map[string]*schema.Schema{ - "project": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "policy_data": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: jsonPolicyDiffSuppress, - }, - "authoritative": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - "etag": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "restore_policy": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "disable_project": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - }, - } -} - -func resourceGoogleProjectIamPolicyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - pid := d.Get("project").(string) - // Get the policy in the template - p, err := getResourceIamPolicy(d) - if err != nil { - return fmt.Errorf("Could not get valid 'policy_data' from resource: %v", err) - } - - // An authoritative policy is applied without regard for any existing IAM - // policy. - if v, ok := d.GetOk("authoritative"); ok && v.(bool) { - log.Printf("[DEBUG] Setting authoritative IAM policy for project %q", pid) - err := setProjectIamPolicy(p, config, pid) - if err != nil { - return err - } - } else { - log.Printf("[DEBUG] Setting non-authoritative IAM policy for project %q", pid) - // This is a non-authoritative policy, meaning it should be merged with - // any existing policy - ep, err := getProjectIamPolicy(pid, config) - if err != nil { - return err - } - - // First, subtract the policy defined in the template from the - // current policy in the project, and save the result. This will - // allow us to restore the original policy at some point (which - // assumes that Terraform owns any common policy that exists in - // the template and project at create time. - rp := subtractIamPolicy(ep, p) - rps, err := json.Marshal(rp) - if err != nil { - return fmt.Errorf("Error marshaling restorable IAM policy: %v", err) - } - d.Set("restore_policy", string(rps)) - - // Merge the policies together - mb := mergeBindings(append(p.Bindings, rp.Bindings...)) - ep.Bindings = mb - if err = setProjectIamPolicy(ep, config, pid); err != nil { - return fmt.Errorf("Error applying IAM policy to project: %v", err) - } - } - d.SetId(pid) - return resourceGoogleProjectIamPolicyRead(d, meta) -} - -func resourceGoogleProjectIamPolicyRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG]: Reading google_project_iam_policy") - config := meta.(*Config) - pid := d.Get("project").(string) - - p, err := getProjectIamPolicy(pid, config) - if err != nil { - return err - } - - var bindings []*cloudresourcemanager.Binding - if v, ok := d.GetOk("restore_policy"); ok { - var restored cloudresourcemanager.Policy - // if there's a restore policy, subtract it from the policy_data - err := json.Unmarshal([]byte(v.(string)), &restored) - if err != nil { - return fmt.Errorf("Error unmarshaling restorable IAM policy: %v", err) - } - subtracted := subtractIamPolicy(p, &restored) - bindings = subtracted.Bindings - } else { - bindings = p.Bindings - } - // we only marshal the bindings, because only the bindings get set in the config - pBytes, err := json.Marshal(&cloudresourcemanager.Policy{Bindings: bindings}) - if err != nil { - return fmt.Errorf("Error marshaling IAM policy: %v", err) - } - log.Printf("[DEBUG]: Setting etag=%s", p.Etag) - d.Set("etag", p.Etag) - d.Set("policy_data", string(pBytes)) - return nil -} - -func resourceGoogleProjectIamPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG]: Updating google_project_iam_policy") - config := meta.(*Config) - pid := d.Get("project").(string) - - // Get the policy in the template - p, err := getResourceIamPolicy(d) - if err != nil { - return fmt.Errorf("Could not get valid 'policy_data' from resource: %v", err) - } - pBytes, _ := json.Marshal(p) - log.Printf("[DEBUG] Got policy from config: %s", string(pBytes)) - - // An authoritative policy is applied without regard for any existing IAM - // policy. - if v, ok := d.GetOk("authoritative"); ok && v.(bool) { - log.Printf("[DEBUG] Updating authoritative IAM policy for project %q", pid) - err := setProjectIamPolicy(p, config, pid) - if err != nil { - return fmt.Errorf("Error setting project IAM policy: %v", err) - } - d.Set("restore_policy", "") - } else { - log.Printf("[DEBUG] Updating non-authoritative IAM policy for project %q", pid) - // Get the previous policy from state - pp, err := getPrevResourceIamPolicy(d) - if err != nil { - return fmt.Errorf("Error retrieving previous version of changed project IAM policy: %v", err) - } - ppBytes, _ := json.Marshal(pp) - log.Printf("[DEBUG] Got previous version of changed project IAM policy: %s", string(ppBytes)) - - // Get the existing IAM policy from the API - ep, err := getProjectIamPolicy(pid, config) - if err != nil { - return fmt.Errorf("Error retrieving IAM policy from project API: %v", err) - } - epBytes, _ := json.Marshal(ep) - log.Printf("[DEBUG] Got existing version of changed IAM policy from project API: %s", string(epBytes)) - - // Subtract the previous and current policies from the policy retrieved from the API - rp := subtractIamPolicy(ep, pp) - rpBytes, _ := json.Marshal(rp) - log.Printf("[DEBUG] After subtracting the previous policy from the existing policy, remaining policies: %s", string(rpBytes)) - rp = subtractIamPolicy(rp, p) - rpBytes, _ = json.Marshal(rp) - log.Printf("[DEBUG] After subtracting the remaining policies from the config policy, remaining policies: %s", string(rpBytes)) - rps, err := json.Marshal(rp) - if err != nil { - return fmt.Errorf("Error marhsaling restorable IAM policy: %v", err) - } - d.Set("restore_policy", string(rps)) - - // Merge the policies together - mb := mergeBindings(append(p.Bindings, rp.Bindings...)) - ep.Bindings = mb - if err = setProjectIamPolicy(ep, config, pid); err != nil { - return fmt.Errorf("Error applying IAM policy to project: %v", err) - } - } - - return resourceGoogleProjectIamPolicyRead(d, meta) -} - -func resourceGoogleProjectIamPolicyDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG]: Deleting google_project_iam_policy") - config := meta.(*Config) - pid := d.Get("project").(string) - - // Get the existing IAM policy from the API - ep, err := getProjectIamPolicy(pid, config) - if err != nil { - return fmt.Errorf("Error retrieving IAM policy from project API: %v", err) - } - // Deleting an authoritative policy will leave the project with no policy, - // and unaccessible by anyone without org-level privs. For this reason, the - // "disable_project" property must be set to true, forcing the user to ack - // this outcome - if v, ok := d.GetOk("authoritative"); ok && v.(bool) { - if v, ok := d.GetOk("disable_project"); !ok || !v.(bool) { - return fmt.Errorf("You must set 'disable_project' to true before deleting an authoritative IAM policy") - } - ep.Bindings = make([]*cloudresourcemanager.Binding, 0) - - } else { - // A non-authoritative policy should set the policy to the value of "restore_policy" in state - // Get the previous policy from state - rp, err := getRestoreIamPolicy(d) - if err != nil { - return fmt.Errorf("Error retrieving previous version of changed project IAM policy: %v", err) - } - ep.Bindings = rp.Bindings - } - if err = setProjectIamPolicy(ep, config, pid); err != nil { - return fmt.Errorf("Error applying IAM policy to project: %v", err) - } - d.SetId("") - return nil -} - -// Subtract all bindings in policy b from policy a, and return the result -func subtractIamPolicy(a, b *cloudresourcemanager.Policy) *cloudresourcemanager.Policy { - am := rolesToMembersMap(a.Bindings) - - for _, b := range b.Bindings { - if _, ok := am[b.Role]; ok { - for _, m := range b.Members { - delete(am[b.Role], m) - } - if len(am[b.Role]) == 0 { - delete(am, b.Role) - } - } - } - a.Bindings = rolesToMembersBinding(am) - return a -} - -func setProjectIamPolicy(policy *cloudresourcemanager.Policy, config *Config, pid string) error { - // Apply the policy - pbytes, _ := json.Marshal(policy) - log.Printf("[DEBUG] Setting policy %#v for project: %s", string(pbytes), pid) - _, err := config.clientResourceManager.Projects.SetIamPolicy(pid, - &cloudresourcemanager.SetIamPolicyRequest{Policy: policy}).Do() - - if err != nil { - return fmt.Errorf("Error applying IAM policy for project %q. Policy is %#v, error is %s", pid, policy, err) - } - return nil -} - -// Get a cloudresourcemanager.Policy from a schema.ResourceData -func getResourceIamPolicy(d *schema.ResourceData) (*cloudresourcemanager.Policy, error) { - ps := d.Get("policy_data").(string) - // The policy string is just a marshaled cloudresourcemanager.Policy. - policy := &cloudresourcemanager.Policy{} - if err := json.Unmarshal([]byte(ps), policy); err != nil { - return nil, fmt.Errorf("Could not unmarshal %s:\n: %v", ps, err) - } - return policy, nil -} - -// Get the previous cloudresourcemanager.Policy from a schema.ResourceData if the -// resource has changed -func getPrevResourceIamPolicy(d *schema.ResourceData) (*cloudresourcemanager.Policy, error) { - var policy *cloudresourcemanager.Policy = &cloudresourcemanager.Policy{} - if d.HasChange("policy_data") { - v, _ := d.GetChange("policy_data") - if err := json.Unmarshal([]byte(v.(string)), policy); err != nil { - return nil, fmt.Errorf("Could not unmarshal previous policy %s:\n: %v", v, err) - } - } - return policy, nil -} - -// Get the restore_policy that can be used to restore a project's IAM policy to its -// state before it was adopted into Terraform -func getRestoreIamPolicy(d *schema.ResourceData) (*cloudresourcemanager.Policy, error) { - if v, ok := d.GetOk("restore_policy"); ok { - policy := &cloudresourcemanager.Policy{} - if err := json.Unmarshal([]byte(v.(string)), policy); err != nil { - return nil, fmt.Errorf("Could not unmarshal previous policy %s:\n: %v", v, err) - } - return policy, nil - } - return nil, fmt.Errorf("Resource does not have a 'restore_policy' attribute defined.") -} - -// Retrieve the existing IAM Policy for a Project -func getProjectIamPolicy(project string, config *Config) (*cloudresourcemanager.Policy, error) { - p, err := config.clientResourceManager.Projects.GetIamPolicy(project, - &cloudresourcemanager.GetIamPolicyRequest{}).Do() - - if err != nil { - return nil, fmt.Errorf("Error retrieving IAM policy for project %q: %s", project, err) - } - return p, nil -} - -// Convert a map of roles->members to a list of Binding -func rolesToMembersBinding(m map[string]map[string]bool) []*cloudresourcemanager.Binding { - bindings := make([]*cloudresourcemanager.Binding, 0) - for role, members := range m { - b := cloudresourcemanager.Binding{ - Role: role, - Members: make([]string, 0), - } - for m, _ := range members { - b.Members = append(b.Members, m) - } - bindings = append(bindings, &b) - } - return bindings -} - -// Map a role to a map of members, allowing easy merging of multiple bindings. -func rolesToMembersMap(bindings []*cloudresourcemanager.Binding) map[string]map[string]bool { - bm := make(map[string]map[string]bool) - // Get each binding - for _, b := range bindings { - // Initialize members map - if _, ok := bm[b.Role]; !ok { - bm[b.Role] = make(map[string]bool) - } - // Get each member (user/principal) for the binding - for _, m := range b.Members { - // Add the member - bm[b.Role][m] = true - } - } - return bm -} - -// Merge multiple Bindings such that Bindings with the same Role result in -// a single Binding with combined Members -func mergeBindings(bindings []*cloudresourcemanager.Binding) []*cloudresourcemanager.Binding { - bm := rolesToMembersMap(bindings) - rb := make([]*cloudresourcemanager.Binding, 0) - - for role, members := range bm { - var b cloudresourcemanager.Binding - b.Role = role - b.Members = make([]string, 0) - for m, _ := range members { - b.Members = append(b.Members, m) - } - rb = append(rb, &b) - } - - return rb -} - -func jsonPolicyDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - var oldPolicy, newPolicy cloudresourcemanager.Policy - if err := json.Unmarshal([]byte(old), &oldPolicy); err != nil { - log.Printf("[ERROR] Could not unmarshal old policy %s: %v", old, err) - return false - } - if err := json.Unmarshal([]byte(new), &newPolicy); err != nil { - log.Printf("[ERROR] Could not unmarshal new policy %s: %v", new, err) - return false - } - oldPolicy.Bindings = mergeBindings(oldPolicy.Bindings) - newPolicy.Bindings = mergeBindings(newPolicy.Bindings) - if newPolicy.Etag != oldPolicy.Etag { - return false - } - if newPolicy.Version != oldPolicy.Version { - return false - } - if len(newPolicy.Bindings) != len(oldPolicy.Bindings) { - return false - } - sort.Sort(sortableBindings(newPolicy.Bindings)) - sort.Sort(sortableBindings(oldPolicy.Bindings)) - for pos, newBinding := range newPolicy.Bindings { - oldBinding := oldPolicy.Bindings[pos] - if oldBinding.Role != newBinding.Role { - return false - } - if len(oldBinding.Members) != len(newBinding.Members) { - return false - } - sort.Strings(oldBinding.Members) - sort.Strings(newBinding.Members) - for i, newMember := range newBinding.Members { - oldMember := oldBinding.Members[i] - if newMember != oldMember { - return false - } - } - } - return true -} - -type sortableBindings []*cloudresourcemanager.Binding - -func (b sortableBindings) Len() int { - return len(b) -} -func (b sortableBindings) Swap(i, j int) { - b[i], b[j] = b[j], b[i] -} -func (b sortableBindings) Less(i, j int) bool { - return b[i].Role < b[j].Role -} diff --git a/builtin/providers/google/resource_google_project_iam_policy_test.go b/builtin/providers/google/resource_google_project_iam_policy_test.go deleted file mode 100644 index 24052c961..000000000 --- a/builtin/providers/google/resource_google_project_iam_policy_test.go +++ /dev/null @@ -1,707 +0,0 @@ -package google - -import ( - "encoding/json" - "fmt" - "reflect" - "sort" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/cloudresourcemanager/v1" -) - -func TestSubtractIamPolicy(t *testing.T) { - table := []struct { - a *cloudresourcemanager.Policy - b *cloudresourcemanager.Policy - expect cloudresourcemanager.Policy - }{ - { - a: &cloudresourcemanager.Policy{ - Bindings: []*cloudresourcemanager.Binding{ - { - Role: "a", - Members: []string{ - "1", - "2", - }, - }, - { - Role: "b", - Members: []string{ - "1", - "2", - }, - }, - }, - }, - b: &cloudresourcemanager.Policy{ - Bindings: []*cloudresourcemanager.Binding{ - { - Role: "a", - Members: []string{ - "3", - "4", - }, - }, - { - Role: "b", - Members: []string{ - "1", - "2", - }, - }, - }, - }, - expect: cloudresourcemanager.Policy{ - Bindings: []*cloudresourcemanager.Binding{ - { - Role: "a", - Members: []string{ - "1", - "2", - }, - }, - }, - }, - }, - { - a: &cloudresourcemanager.Policy{ - Bindings: []*cloudresourcemanager.Binding{ - { - Role: "a", - Members: []string{ - "1", - "2", - }, - }, - { - Role: "b", - Members: []string{ - "1", - "2", - }, - }, - }, - }, - b: &cloudresourcemanager.Policy{ - Bindings: []*cloudresourcemanager.Binding{ - { - Role: "a", - Members: []string{ - "1", - "2", - }, - }, - { - Role: "b", - Members: []string{ - "1", - "2", - }, - }, - }, - }, - expect: cloudresourcemanager.Policy{ - Bindings: []*cloudresourcemanager.Binding{}, - }, - }, - { - a: &cloudresourcemanager.Policy{ - Bindings: []*cloudresourcemanager.Binding{ - { - Role: "a", - Members: []string{ - "1", - "2", - "3", - }, - }, - { - Role: "b", - Members: []string{ - "1", - "2", - "3", - }, - }, - }, - }, - b: &cloudresourcemanager.Policy{ - Bindings: []*cloudresourcemanager.Binding{ - { - Role: "a", - Members: []string{ - "1", - "3", - }, - }, - { - Role: "b", - Members: []string{ - "1", - "2", - "3", - }, - }, - }, - }, - expect: cloudresourcemanager.Policy{ - Bindings: []*cloudresourcemanager.Binding{ - { - Role: "a", - Members: []string{ - "2", - }, - }, - }, - }, - }, - { - a: &cloudresourcemanager.Policy{ - Bindings: []*cloudresourcemanager.Binding{ - { - Role: "a", - Members: []string{ - "1", - "2", - "3", - }, - }, - { - Role: "b", - Members: []string{ - "1", - "2", - "3", - }, - }, - }, - }, - b: &cloudresourcemanager.Policy{ - Bindings: []*cloudresourcemanager.Binding{ - { - Role: "a", - Members: []string{ - "1", - "2", - "3", - }, - }, - { - Role: "b", - Members: []string{ - "1", - "2", - "3", - }, - }, - }, - }, - expect: cloudresourcemanager.Policy{ - Bindings: []*cloudresourcemanager.Binding{}, - }, - }, - } - - for _, test := range table { - c := subtractIamPolicy(test.a, test.b) - sort.Sort(sortableBindings(c.Bindings)) - for i, _ := range c.Bindings { - sort.Strings(c.Bindings[i].Members) - } - - if !reflect.DeepEqual(derefBindings(c.Bindings), derefBindings(test.expect.Bindings)) { - t.Errorf("\ngot %+v\nexpected %+v", derefBindings(c.Bindings), derefBindings(test.expect.Bindings)) - } - } -} - -// Test that an IAM policy can be applied to a project -func TestAccGoogleProjectIamPolicy_basic(t *testing.T) { - pid := "terraform-" + acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - // Create a new project - resource.TestStep{ - Config: testAccGoogleProject_create(pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testAccGoogleProjectExistingPolicy(pid), - ), - }, - // Apply an IAM policy from a data source. The application - // merges policies, so we validate the expected state. - resource.TestStep{ - Config: testAccGoogleProjectAssociatePolicyBasic(pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleProjectIamPolicyIsMerged("google_project_iam_policy.acceptance", "data.google_iam_policy.admin", pid), - ), - }, - // Finally, remove the custom IAM policy from config and apply, then - // confirm that the project is in its original state. - resource.TestStep{ - Config: testAccGoogleProject_create(pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testAccGoogleProjectExistingPolicy(pid), - ), - }, - }, - }) -} - -// Test that a non-collapsed IAM policy doesn't perpetually diff -func TestAccGoogleProjectIamPolicy_expanded(t *testing.T) { - pid := "terraform-" + acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccGoogleProjectAssociatePolicyExpanded(pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleProjectIamPolicyExists("google_project_iam_policy.acceptance", "data.google_iam_policy.expanded", pid), - ), - }, - }, - }) -} - -func getStatePrimaryResource(s *terraform.State, res, expectedID string) (*terraform.InstanceState, error) { - // Get the project resource - resource, ok := s.RootModule().Resources[res] - if !ok { - return nil, fmt.Errorf("Not found: %s", res) - } - if resource.Primary.Attributes["id"] != expectedID && expectedID != "" { - return nil, fmt.Errorf("Expected project %q to match ID %q in state", resource.Primary.ID, expectedID) - } - return resource.Primary, nil -} - -func getGoogleProjectIamPolicyFromResource(resource *terraform.InstanceState) (cloudresourcemanager.Policy, error) { - var p cloudresourcemanager.Policy - ps, ok := resource.Attributes["policy_data"] - if !ok { - return p, fmt.Errorf("Resource %q did not have a 'policy_data' attribute. Attributes were %#v", resource.ID, resource.Attributes) - } - if err := json.Unmarshal([]byte(ps), &p); err != nil { - return p, fmt.Errorf("Could not unmarshal %s:\n: %v", ps, err) - } - return p, nil -} - -func getGoogleProjectIamPolicyFromState(s *terraform.State, res, expectedID string) (cloudresourcemanager.Policy, error) { - project, err := getStatePrimaryResource(s, res, expectedID) - if err != nil { - return cloudresourcemanager.Policy{}, err - } - return getGoogleProjectIamPolicyFromResource(project) -} - -func compareBindings(a, b []*cloudresourcemanager.Binding) bool { - a = mergeBindings(a) - b = mergeBindings(b) - sort.Sort(sortableBindings(a)) - sort.Sort(sortableBindings(b)) - return reflect.DeepEqual(derefBindings(a), derefBindings(b)) -} - -func testAccCheckGoogleProjectIamPolicyExists(projectRes, policyRes, pid string) resource.TestCheckFunc { - return func(s *terraform.State) error { - projectPolicy, err := getGoogleProjectIamPolicyFromState(s, projectRes, pid) - if err != nil { - return fmt.Errorf("Error retrieving IAM policy for project from state: %s", err) - } - policyPolicy, err := getGoogleProjectIamPolicyFromState(s, policyRes, "") - if err != nil { - return fmt.Errorf("Error retrieving IAM policy for data_policy from state: %s", err) - } - - // The bindings in both policies should be identical - if !compareBindings(projectPolicy.Bindings, policyPolicy.Bindings) { - return fmt.Errorf("Project and data source policies do not match: project policy is %+v, data resource policy is %+v", derefBindings(projectPolicy.Bindings), derefBindings(policyPolicy.Bindings)) - } - return nil - } -} - -func testAccCheckGoogleProjectIamPolicyIsMerged(projectRes, policyRes, pid string) resource.TestCheckFunc { - return func(s *terraform.State) error { - err := testAccCheckGoogleProjectIamPolicyExists(projectRes, policyRes, pid)(s) - if err != nil { - return err - } - - projectPolicy, err := getGoogleProjectIamPolicyFromState(s, projectRes, pid) - if err != nil { - return fmt.Errorf("Error retrieving IAM policy for project from state: %s", err) - } - - // Merge the project policy in Terraform state with the policy the project had before the config was applied - var expected []*cloudresourcemanager.Binding - expected = append(expected, originalPolicy.Bindings...) - expected = append(expected, projectPolicy.Bindings...) - expected = mergeBindings(expected) - - // Retrieve the actual policy from the project - c := testAccProvider.Meta().(*Config) - actual, err := getProjectIamPolicy(pid, c) - if err != nil { - return fmt.Errorf("Failed to retrieve IAM Policy for project %q: %s", pid, err) - } - // The bindings should match, indicating the policy was successfully applied and merged - if !compareBindings(actual.Bindings, expected) { - return fmt.Errorf("Actual and expected project policies do not match: actual policy is %+v, expected policy is %+v", derefBindings(actual.Bindings), derefBindings(expected)) - } - - return nil - } -} - -func TestIamRolesToMembersBinding(t *testing.T) { - table := []struct { - expect []*cloudresourcemanager.Binding - input map[string]map[string]bool - }{ - { - expect: []*cloudresourcemanager.Binding{ - { - Role: "role-1", - Members: []string{ - "member-1", - "member-2", - }, - }, - }, - input: map[string]map[string]bool{ - "role-1": map[string]bool{ - "member-1": true, - "member-2": true, - }, - }, - }, - { - expect: []*cloudresourcemanager.Binding{ - { - Role: "role-1", - Members: []string{ - "member-1", - "member-2", - }, - }, - }, - input: map[string]map[string]bool{ - "role-1": map[string]bool{ - "member-1": true, - "member-2": true, - }, - }, - }, - { - expect: []*cloudresourcemanager.Binding{ - { - Role: "role-1", - Members: []string{}, - }, - }, - input: map[string]map[string]bool{ - "role-1": map[string]bool{}, - }, - }, - } - - for _, test := range table { - got := rolesToMembersBinding(test.input) - - sort.Sort(sortableBindings(got)) - for i, _ := range got { - sort.Strings(got[i].Members) - } - - if !reflect.DeepEqual(derefBindings(got), derefBindings(test.expect)) { - t.Errorf("got %+v, expected %+v", derefBindings(got), derefBindings(test.expect)) - } - } -} -func TestIamRolesToMembersMap(t *testing.T) { - table := []struct { - input []*cloudresourcemanager.Binding - expect map[string]map[string]bool - }{ - { - input: []*cloudresourcemanager.Binding{ - { - Role: "role-1", - Members: []string{ - "member-1", - "member-2", - }, - }, - }, - expect: map[string]map[string]bool{ - "role-1": map[string]bool{ - "member-1": true, - "member-2": true, - }, - }, - }, - { - input: []*cloudresourcemanager.Binding{ - { - Role: "role-1", - Members: []string{ - "member-1", - "member-2", - "member-1", - "member-2", - }, - }, - }, - expect: map[string]map[string]bool{ - "role-1": map[string]bool{ - "member-1": true, - "member-2": true, - }, - }, - }, - { - input: []*cloudresourcemanager.Binding{ - { - Role: "role-1", - }, - }, - expect: map[string]map[string]bool{ - "role-1": map[string]bool{}, - }, - }, - } - - for _, test := range table { - got := rolesToMembersMap(test.input) - if !reflect.DeepEqual(got, test.expect) { - t.Errorf("got %+v, expected %+v", got, test.expect) - } - } -} - -func TestIamMergeBindings(t *testing.T) { - table := []struct { - input []*cloudresourcemanager.Binding - expect []cloudresourcemanager.Binding - }{ - { - input: []*cloudresourcemanager.Binding{ - { - Role: "role-1", - Members: []string{ - "member-1", - "member-2", - }, - }, - { - Role: "role-1", - Members: []string{ - "member-3", - }, - }, - }, - expect: []cloudresourcemanager.Binding{ - { - Role: "role-1", - Members: []string{ - "member-1", - "member-2", - "member-3", - }, - }, - }, - }, - { - input: []*cloudresourcemanager.Binding{ - { - Role: "role-1", - Members: []string{ - "member-3", - "member-4", - }, - }, - { - Role: "role-1", - Members: []string{ - "member-2", - "member-1", - }, - }, - { - Role: "role-2", - Members: []string{ - "member-1", - }, - }, - { - Role: "role-1", - Members: []string{ - "member-5", - }, - }, - { - Role: "role-3", - Members: []string{ - "member-1", - }, - }, - { - Role: "role-2", - Members: []string{ - "member-2", - }, - }, - }, - expect: []cloudresourcemanager.Binding{ - { - Role: "role-1", - Members: []string{ - "member-1", - "member-2", - "member-3", - "member-4", - "member-5", - }, - }, - { - Role: "role-2", - Members: []string{ - "member-1", - "member-2", - }, - }, - { - Role: "role-3", - Members: []string{ - "member-1", - }, - }, - }, - }, - } - - for _, test := range table { - got := mergeBindings(test.input) - sort.Sort(sortableBindings(got)) - for i, _ := range got { - sort.Strings(got[i].Members) - } - - if !reflect.DeepEqual(derefBindings(got), test.expect) { - t.Errorf("\ngot %+v\nexpected %+v", derefBindings(got), test.expect) - } - } -} - -func derefBindings(b []*cloudresourcemanager.Binding) []cloudresourcemanager.Binding { - db := make([]cloudresourcemanager.Binding, len(b)) - - for i, v := range b { - db[i] = *v - sort.Strings(db[i].Members) - } - return db -} - -// Confirm that a project has an IAM policy with at least 1 binding -func testAccGoogleProjectExistingPolicy(pid string) resource.TestCheckFunc { - return func(s *terraform.State) error { - c := testAccProvider.Meta().(*Config) - var err error - originalPolicy, err = getProjectIamPolicy(pid, c) - if err != nil { - return fmt.Errorf("Failed to retrieve IAM Policy for project %q: %s", pid, err) - } - if len(originalPolicy.Bindings) == 0 { - return fmt.Errorf("Refuse to run test against project with zero IAM Bindings. This is likely an error in the test code that is not properly identifying the IAM policy of a project.") - } - return nil - } -} - -func testAccGoogleProjectAssociatePolicyBasic(pid, name, org string) string { - return fmt.Sprintf(` -resource "google_project" "acceptance" { - project_id = "%s" - name = "%s" - org_id = "%s" -} -resource "google_project_iam_policy" "acceptance" { - project = "${google_project.acceptance.id}" - policy_data = "${data.google_iam_policy.admin.policy_data}" -} -data "google_iam_policy" "admin" { - binding { - role = "roles/storage.objectViewer" - members = [ - "user:evanbrown@google.com", - ] - } - binding { - role = "roles/compute.instanceAdmin" - members = [ - "user:evanbrown@google.com", - "user:evandbrown@gmail.com", - ] - } -} -`, pid, name, org) -} - -func testAccGoogleProject_create(pid, name, org string) string { - return fmt.Sprintf(` -resource "google_project" "acceptance" { - project_id = "%s" - name = "%s" - org_id = "%s" -}`, pid, name, org) -} - -func testAccGoogleProject_createBilling(pid, name, org, billing string) string { - return fmt.Sprintf(` -resource "google_project" "acceptance" { - project_id = "%s" - name = "%s" - org_id = "%s" - billing_account = "%s" -}`, pid, name, org, billing) -} - -func testAccGoogleProjectAssociatePolicyExpanded(pid, name, org string) string { - return fmt.Sprintf(` -resource "google_project" "acceptance" { - project_id = "%s" - name = "%s" - org_id = "%s" -} -resource "google_project_iam_policy" "acceptance" { - project = "${google_project.acceptance.id}" - policy_data = "${data.google_iam_policy.expanded.policy_data}" - authoritative = false -} -data "google_iam_policy" "expanded" { - binding { - role = "roles/viewer" - members = [ - "user:paddy@carvers.co", - ] - } - - binding { - role = "roles/viewer" - members = [ - "user:paddy@hashicorp.com", - ] - } -}`, pid, name, org) -} diff --git a/builtin/providers/google/resource_google_project_migrate.go b/builtin/providers/google/resource_google_project_migrate.go deleted file mode 100644 index 09fccd311..000000000 --- a/builtin/providers/google/resource_google_project_migrate.go +++ /dev/null @@ -1,47 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/terraform" -) - -func resourceGoogleProjectMigrateState(v int, s *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - if s.Empty() { - log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return s, nil - } - - switch v { - case 0: - log.Println("[INFO] Found Google Project State v0; migrating to v1") - s, err := migrateGoogleProjectStateV0toV1(s, meta.(*Config)) - if err != nil { - return s, err - } - return s, nil - default: - return s, fmt.Errorf("Unexpected schema version: %d", v) - } -} - -// This migration adjusts google_project resources to include several additional attributes -// required to support project creation/deletion that was added in V1. -func migrateGoogleProjectStateV0toV1(s *terraform.InstanceState, config *Config) (*terraform.InstanceState, error) { - log.Printf("[DEBUG] Attributes before migration: %#v", s.Attributes) - - s.Attributes["skip_delete"] = "true" - s.Attributes["project_id"] = s.ID - - if s.Attributes["policy_data"] != "" { - p, err := getProjectIamPolicy(s.ID, config) - if err != nil { - return s, fmt.Errorf("Could not retrieve project's IAM policy while attempting to migrate state from V0 to V1: %v", err) - } - s.Attributes["policy_etag"] = p.Etag - } - - log.Printf("[DEBUG] Attributes after migration: %#v", s.Attributes) - return s, nil -} diff --git a/builtin/providers/google/resource_google_project_migrate_test.go b/builtin/providers/google/resource_google_project_migrate_test.go deleted file mode 100644 index 8aeff3640..000000000 --- a/builtin/providers/google/resource_google_project_migrate_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package google - -import ( - "testing" - - "github.com/hashicorp/terraform/terraform" -) - -func TestGoogleProjectMigrateState(t *testing.T) { - cases := map[string]struct { - StateVersion int - Attributes map[string]string - Expected map[string]string - Meta interface{} - }{ - "deprecate policy_data and support creation/deletion": { - StateVersion: 0, - Attributes: map[string]string{}, - Expected: map[string]string{ - "project_id": "test-project", - "skip_delete": "true", - }, - Meta: &Config{}, - }, - } - - for tn, tc := range cases { - is := &terraform.InstanceState{ - ID: "test-project", - Attributes: tc.Attributes, - } - is, err := resourceGoogleProjectMigrateState( - tc.StateVersion, is, tc.Meta) - - if err != nil { - t.Fatalf("bad: %s, err: %#v", tn, err) - } - - for k, v := range tc.Expected { - if is.Attributes[k] != v { - t.Fatalf( - "bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", - tn, k, v, k, is.Attributes[k], is.Attributes) - } - } - } -} - -func TestGoogleProjectMigrateState_empty(t *testing.T) { - var is *terraform.InstanceState - var meta *Config - - // should handle nil - is, err := resourceGoogleProjectMigrateState(0, is, meta) - - if err != nil { - t.Fatalf("err: %#v", err) - } - if is != nil { - t.Fatalf("expected nil instancestate, got: %#v", is) - } - - // should handle non-nil but empty - is = &terraform.InstanceState{} - is, err = resourceGoogleProjectMigrateState(0, is, meta) - - if err != nil { - t.Fatalf("err: %#v", err) - } -} diff --git a/builtin/providers/google/resource_google_project_services.go b/builtin/providers/google/resource_google_project_services.go deleted file mode 100644 index 3a9c66730..000000000 --- a/builtin/providers/google/resource_google_project_services.go +++ /dev/null @@ -1,229 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/servicemanagement/v1" -) - -func resourceGoogleProjectServices() *schema.Resource { - return &schema.Resource{ - Create: resourceGoogleProjectServicesCreate, - Read: resourceGoogleProjectServicesRead, - Update: resourceGoogleProjectServicesUpdate, - Delete: resourceGoogleProjectServicesDelete, - - Schema: map[string]*schema.Schema{ - "project": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "services": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - } -} - -// These services can only be enabled as a side-effect of enabling other services, -// so don't bother storing them in the config or using them for diffing. -var ignore = map[string]struct{}{ - "containeranalysis.googleapis.com": struct{}{}, - "dataproc-control.googleapis.com": struct{}{}, - "source.googleapis.com": struct{}{}, -} - -func resourceGoogleProjectServicesCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - pid := d.Get("project").(string) - - // Get services from config - cfgServices := getConfigServices(d) - - // Get services from API - apiServices, err := getApiServices(pid, config) - if err != nil { - return fmt.Errorf("Error creating services: %v", err) - } - - // This call disables any APIs that aren't defined in cfgServices, - // and enables all of those that are - err = reconcileServices(cfgServices, apiServices, config, pid) - if err != nil { - return fmt.Errorf("Error creating services: %v", err) - } - - d.SetId(pid) - return resourceGoogleProjectServicesRead(d, meta) -} - -func resourceGoogleProjectServicesRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - services, err := getApiServices(d.Id(), config) - if err != nil { - return err - } - - d.Set("services", services) - return nil -} - -func resourceGoogleProjectServicesUpdate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG]: Updating google_project_services") - config := meta.(*Config) - pid := d.Get("project").(string) - - // Get services from config - cfgServices := getConfigServices(d) - - // Get services from API - apiServices, err := getApiServices(pid, config) - if err != nil { - return fmt.Errorf("Error updating services: %v", err) - } - - // This call disables any APIs that aren't defined in cfgServices, - // and enables all of those that are - err = reconcileServices(cfgServices, apiServices, config, pid) - if err != nil { - return fmt.Errorf("Error updating services: %v", err) - } - - return resourceGoogleProjectServicesRead(d, meta) -} - -func resourceGoogleProjectServicesDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG]: Deleting google_project_services") - config := meta.(*Config) - services := resourceServices(d) - for _, s := range services { - disableService(s, d.Id(), config) - } - d.SetId("") - return nil -} - -// This function ensures that the services enabled for a project exactly match that -// in a config by disabling any services that are returned by the API but not present -// in the config -func reconcileServices(cfgServices, apiServices []string, config *Config, pid string) error { - // Helper to convert slice to map - m := func(vals []string) map[string]struct{} { - sm := make(map[string]struct{}) - for _, s := range vals { - sm[s] = struct{}{} - } - return sm - } - - cfgMap := m(cfgServices) - apiMap := m(apiServices) - - for k, _ := range apiMap { - if _, ok := cfgMap[k]; !ok { - // The service in the API is not in the config; disable it. - err := disableService(k, pid, config) - if err != nil { - return err - } - } else { - // The service exists in the config and the API, so we don't need - // to re-enable it - delete(cfgMap, k) - } - } - - for k, _ := range cfgMap { - err := enableService(k, pid, config) - if err != nil { - return err - } - } - return nil -} - -// Retrieve services defined in a config -func getConfigServices(d *schema.ResourceData) (services []string) { - if v, ok := d.GetOk("services"); ok { - for _, svc := range v.(*schema.Set).List() { - services = append(services, svc.(string)) - } - } - return -} - -// Retrieve a project's services from the API -func getApiServices(pid string, config *Config) ([]string, error) { - apiServices := make([]string, 0) - // Get services from the API - token := "" - for paginate := true; paginate; { - svcResp, err := config.clientServiceMan.Services.List().ConsumerId("project:" + pid).PageToken(token).Do() - if err != nil { - return apiServices, err - } - for _, v := range svcResp.Services { - if _, ok := ignore[v.ServiceName]; !ok { - apiServices = append(apiServices, v.ServiceName) - } - } - token = svcResp.NextPageToken - paginate = token != "" - } - return apiServices, nil -} - -func enableService(s, pid string, config *Config) error { - esr := newEnableServiceRequest(pid) - sop, err := config.clientServiceMan.Services.Enable(s, esr).Do() - if err != nil { - return fmt.Errorf("Error enabling service %q for project %q: %v", s, pid, err) - } - // Wait for the operation to complete - waitErr := serviceManagementOperationWait(config, sop, "api to enable") - if waitErr != nil { - return waitErr - } - return nil -} -func disableService(s, pid string, config *Config) error { - dsr := newDisableServiceRequest(pid) - sop, err := config.clientServiceMan.Services.Disable(s, dsr).Do() - if err != nil { - return fmt.Errorf("Error disabling service %q for project %q: %v", s, pid, err) - } - // Wait for the operation to complete - waitErr := serviceManagementOperationWait(config, sop, "api to disable") - if waitErr != nil { - return waitErr - } - return nil -} - -func newEnableServiceRequest(pid string) *servicemanagement.EnableServiceRequest { - return &servicemanagement.EnableServiceRequest{ConsumerId: "project:" + pid} -} - -func newDisableServiceRequest(pid string) *servicemanagement.DisableServiceRequest { - return &servicemanagement.DisableServiceRequest{ConsumerId: "project:" + pid} -} - -func resourceServices(d *schema.ResourceData) []string { - // Calculate the tags - var services []string - if s := d.Get("services"); s != nil { - ss := s.(*schema.Set) - services = make([]string, ss.Len()) - for i, v := range ss.List() { - services[i] = v.(string) - } - } - return services -} diff --git a/builtin/providers/google/resource_google_project_services_test.go b/builtin/providers/google/resource_google_project_services_test.go deleted file mode 100644 index e8af051cd..000000000 --- a/builtin/providers/google/resource_google_project_services_test.go +++ /dev/null @@ -1,291 +0,0 @@ -package google - -import ( - "bytes" - "fmt" - "log" - "os" - "reflect" - "sort" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/servicemanagement/v1" -) - -// Test that services can be enabled and disabled on a project -func TestAccGoogleProjectServices_basic(t *testing.T) { - pid := "terraform-" + acctest.RandString(10) - services1 := []string{"iam.googleapis.com", "cloudresourcemanager.googleapis.com"} - services2 := []string{"cloudresourcemanager.googleapis.com"} - oobService := "iam.googleapis.com" - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - // Create a new project with some services - resource.TestStep{ - Config: testAccGoogleProjectAssociateServicesBasic(services1, pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testProjectServicesMatch(services1, pid), - ), - }, - // Update services to remove one - resource.TestStep{ - Config: testAccGoogleProjectAssociateServicesBasic(services2, pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testProjectServicesMatch(services2, pid), - ), - }, - // Add a service out-of-band and ensure it is removed - resource.TestStep{ - PreConfig: func() { - config := testAccProvider.Meta().(*Config) - enableService(oobService, pid, config) - }, - Config: testAccGoogleProjectAssociateServicesBasic(services2, pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testProjectServicesMatch(services2, pid), - ), - }, - }, - }) -} - -// Test that services are authoritative when a project has existing -// sevices not represented in config -func TestAccGoogleProjectServices_authoritative(t *testing.T) { - pid := "terraform-" + acctest.RandString(10) - services := []string{"cloudresourcemanager.googleapis.com"} - oobService := "iam.googleapis.com" - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - // Create a new project with no services - resource.TestStep{ - Config: testAccGoogleProject_create(pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleProjectExists("google_project.acceptance", pid), - ), - }, - // Add a service out-of-band, then apply a config that creates a service. - // It should remove the out-of-band service. - resource.TestStep{ - PreConfig: func() { - config := testAccProvider.Meta().(*Config) - enableService(oobService, pid, config) - }, - Config: testAccGoogleProjectAssociateServicesBasic(services, pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testProjectServicesMatch(services, pid), - ), - }, - }, - }) -} - -// Test that services are authoritative when a project has existing -// sevices, some which are represented in the config and others -// that are not -func TestAccGoogleProjectServices_authoritative2(t *testing.T) { - pid := "terraform-" + acctest.RandString(10) - oobServices := []string{"iam.googleapis.com", "cloudresourcemanager.googleapis.com"} - services := []string{"iam.googleapis.com"} - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - // Create a new project with no services - resource.TestStep{ - Config: testAccGoogleProject_create(pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleProjectExists("google_project.acceptance", pid), - ), - }, - // Add a service out-of-band, then apply a config that creates a service. - // It should remove the out-of-band service. - resource.TestStep{ - PreConfig: func() { - config := testAccProvider.Meta().(*Config) - for _, s := range oobServices { - enableService(s, pid, config) - } - }, - Config: testAccGoogleProjectAssociateServicesBasic(services, pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testProjectServicesMatch(services, pid), - ), - }, - }, - }) -} - -// Test that services that can't be enabled on their own (such as dataproc-control.googleapis.com) -// don't end up causing diffs when they are enabled as a side-effect of a different service's -// enablement. -func TestAccGoogleProjectServices_ignoreUnenablableServices(t *testing.T) { - skipIfEnvNotSet(t, - []string{ - "GOOGLE_ORG", - "GOOGLE_BILLING_ACCOUNT", - }..., - ) - - billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT") - pid := "terraform-" + acctest.RandString(10) - services := []string{ - "dataproc.googleapis.com", - // The following services are enabled as a side-effect of dataproc's enablement - "storage-component.googleapis.com", - "deploymentmanager.googleapis.com", - "replicapool.googleapis.com", - "replicapoolupdater.googleapis.com", - "resourceviews.googleapis.com", - "compute-component.googleapis.com", - "container.googleapis.com", - "containerregistry.googleapis.com", - "storage-api.googleapis.com", - "pubsub.googleapis.com", - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccGoogleProjectAssociateServicesBasic_withBilling(services, pid, pname, org, billingId), - Check: resource.ComposeTestCheckFunc( - testProjectServicesMatch(services, pid), - ), - }, - }, - }) -} - -func TestAccGoogleProjectServices_manyServices(t *testing.T) { - skipIfEnvNotSet(t, - []string{ - "GOOGLE_ORG", - "GOOGLE_BILLING_ACCOUNT", - }..., - ) - - billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT") - pid := "terraform-" + acctest.RandString(10) - services := []string{ - "bigquery-json.googleapis.com", - "cloudbuild.googleapis.com", - "cloudfunctions.googleapis.com", - "cloudresourcemanager.googleapis.com", - "cloudtrace.googleapis.com", - "compute-component.googleapis.com", - "container.googleapis.com", - "containerregistry.googleapis.com", - "dataflow.googleapis.com", - "dataproc.googleapis.com", - "deploymentmanager.googleapis.com", - "dns.googleapis.com", - "endpoints.googleapis.com", - "iam.googleapis.com", - "logging.googleapis.com", - "ml.googleapis.com", - "monitoring.googleapis.com", - "pubsub.googleapis.com", - "replicapool.googleapis.com", - "replicapoolupdater.googleapis.com", - "resourceviews.googleapis.com", - "runtimeconfig.googleapis.com", - "servicecontrol.googleapis.com", - "servicemanagement.googleapis.com", - "sourcerepo.googleapis.com", - "spanner.googleapis.com", - "storage-api.googleapis.com", - "storage-component.googleapis.com", - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccGoogleProjectAssociateServicesBasic_withBilling(services, pid, pname, org, billingId), - Check: resource.ComposeTestCheckFunc( - testProjectServicesMatch(services, pid), - ), - }, - }, - }) -} - -func testAccGoogleProjectAssociateServicesBasic(services []string, pid, name, org string) string { - return fmt.Sprintf(` -resource "google_project" "acceptance" { - project_id = "%s" - name = "%s" - org_id = "%s" -} -resource "google_project_services" "acceptance" { - project = "${google_project.acceptance.project_id}" - services = [%s] -} -`, pid, name, org, testStringsToString(services)) -} - -func testAccGoogleProjectAssociateServicesBasic_withBilling(services []string, pid, name, org, billing string) string { - return fmt.Sprintf(` -resource "google_project" "acceptance" { - project_id = "%s" - name = "%s" - org_id = "%s" - billing_account = "%s" -} -resource "google_project_services" "acceptance" { - project = "${google_project.acceptance.project_id}" - services = [%s] -} -`, pid, name, org, billing, testStringsToString(services)) -} - -func testProjectServicesMatch(services []string, pid string) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - apiServices, err := getApiServices(pid, config) - if err != nil { - return fmt.Errorf("Error listing services for project %q: %v", pid, err) - } - - sort.Strings(services) - sort.Strings(apiServices) - if !reflect.DeepEqual(services, apiServices) { - return fmt.Errorf("Services in config (%v) do not exactly match services returned by API (%v)", services, apiServices) - } - - return nil - } -} - -func testStringsToString(s []string) string { - var b bytes.Buffer - for i, v := range s { - b.WriteString(fmt.Sprintf("\"%s\"", v)) - if i < len(s)-1 { - b.WriteString(",") - } - } - r := b.String() - log.Printf("[DEBUG]: Converted list of strings to %s", r) - return b.String() -} - -func testManagedServicesToString(svcs []*servicemanagement.ManagedService) string { - var b bytes.Buffer - for _, s := range svcs { - b.WriteString(s.ServiceName) - } - return b.String() -} diff --git a/builtin/providers/google/resource_google_project_test.go b/builtin/providers/google/resource_google_project_test.go deleted file mode 100644 index fea4c7465..000000000 --- a/builtin/providers/google/resource_google_project_test.go +++ /dev/null @@ -1,246 +0,0 @@ -package google - -import ( - "fmt" - "os" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var ( - org = multiEnvSearch([]string{ - "GOOGLE_ORG", - }) - - pname = "Terraform Acceptance Tests" - originalPolicy *cloudresourcemanager.Policy -) - -func multiEnvSearch(ks []string) string { - for _, k := range ks { - if v := os.Getenv(k); v != "" { - return v - } - } - return "" -} - -// Test that a Project resource can be created and an IAM policy -// associated -func TestAccGoogleProject_create(t *testing.T) { - pid := "terraform-" + acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - // This step imports an existing project - resource.TestStep{ - Config: testAccGoogleProject_create(pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleProjectExists("google_project.acceptance", pid), - ), - }, - }, - }) -} - -// Test that a Project resource can be created with an associated -// billing account -func TestAccGoogleProject_createBilling(t *testing.T) { - skipIfEnvNotSet(t, - []string{ - "GOOGLE_ORG", - "GOOGLE_BILLING_ACCOUNT", - }..., - ) - - billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT") - pid := "terraform-" + acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - // This step creates a new project with a billing account - resource.TestStep{ - Config: testAccGoogleProject_createBilling(pid, pname, org, billingId), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleProjectHasBillingAccount("google_project.acceptance", pid, billingId), - ), - }, - }, - }) -} - -// Test that a Project resource can be created and updated -// with billing account information -func TestAccGoogleProject_updateBilling(t *testing.T) { - skipIfEnvNotSet(t, - []string{ - "GOOGLE_ORG", - "GOOGLE_BILLING_ACCOUNT", - "GOOGLE_BILLING_ACCOUNT_2", - }..., - ) - - billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT") - billingId2 := os.Getenv("GOOGLE_BILLING_ACCOUNT_2") - pid := "terraform-" + acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - // This step creates a new project without a billing account - resource.TestStep{ - Config: testAccGoogleProject_create(pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleProjectExists("google_project.acceptance", pid), - ), - }, - // Update to include a billing account - resource.TestStep{ - Config: testAccGoogleProject_createBilling(pid, pname, org, billingId), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleProjectHasBillingAccount("google_project.acceptance", pid, billingId), - ), - }, - // Update to a different billing account - resource.TestStep{ - Config: testAccGoogleProject_createBilling(pid, pname, org, billingId2), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleProjectHasBillingAccount("google_project.acceptance", pid, billingId2), - ), - }, - }, - }) -} - -// Test that a Project resource merges the IAM policies that already -// exist, and won't lock people out. -func TestAccGoogleProject_merge(t *testing.T) { - pid := "terraform-" + acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - // when policy_data is set, merge - { - Config: testAccGoogleProject_toMerge(pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleProjectExists("google_project.acceptance", pid), - testAccCheckGoogleProjectHasMoreBindingsThan(pid, 1), - ), - }, - // when policy_data is unset, restore to what it was - { - Config: testAccGoogleProject_mergeEmpty(pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleProjectExists("google_project.acceptance", pid), - testAccCheckGoogleProjectHasMoreBindingsThan(pid, 0), - ), - }, - }, - }) -} - -func testAccCheckGoogleProjectExists(r, pid string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[r] - if !ok { - return fmt.Errorf("Not found: %s", r) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - if rs.Primary.ID != pid { - return fmt.Errorf("Expected project %q to match ID %q in state", pid, rs.Primary.ID) - } - - return nil - } -} - -func testAccCheckGoogleProjectHasBillingAccount(r, pid, billingId string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[r] - if !ok { - return fmt.Errorf("Not found: %s", r) - } - - // State should match expected - if rs.Primary.Attributes["billing_account"] != billingId { - return fmt.Errorf("Billing ID in state (%s) does not match expected value (%s)", rs.Primary.Attributes["billing_account"], billingId) - } - - // Actual value in API should match state and expected - // Read the billing account - config := testAccProvider.Meta().(*Config) - ba, err := config.clientBilling.Projects.GetBillingInfo(prefixedProject(pid)).Do() - if err != nil { - return fmt.Errorf("Error reading billing account for project %q: %v", prefixedProject(pid), err) - } - if billingId != strings.TrimPrefix(ba.BillingAccountName, "billingAccounts/") { - return fmt.Errorf("Billing ID returned by API (%s) did not match expected value (%s)", ba.BillingAccountName, billingId) - } - return nil - } -} - -func testAccCheckGoogleProjectHasMoreBindingsThan(pid string, count int) resource.TestCheckFunc { - return func(s *terraform.State) error { - policy, err := getProjectIamPolicy(pid, testAccProvider.Meta().(*Config)) - if err != nil { - return err - } - if len(policy.Bindings) <= count { - return fmt.Errorf("Expected more than %d bindings, got %d: %#v", count, len(policy.Bindings), policy.Bindings) - } - return nil - } -} - -func testAccGoogleProject_toMerge(pid, name, org string) string { - return fmt.Sprintf(` -resource "google_project" "acceptance" { - project_id = "%s" - name = "%s" - org_id = "%s" -} - -resource "google_project_iam_policy" "acceptance" { - project = "${google_project.acceptance.project_id}" - policy_data = "${data.google_iam_policy.acceptance.policy_data}" -} - -data "google_iam_policy" "acceptance" { - binding { - role = "roles/storage.objectViewer" - members = [ - "user:evanbrown@google.com", - ] - } -}`, pid, name, org) -} - -func testAccGoogleProject_mergeEmpty(pid, name, org string) string { - return fmt.Sprintf(` -resource "google_project" "acceptance" { - project_id = "%s" - name = "%s" - org_id = "%s" -}`, pid, name, org) -} - -func skipIfEnvNotSet(t *testing.T, envs ...string) { - for _, k := range envs { - if os.Getenv(k) == "" { - t.Skipf("Environment variable %s is not set", k) - } - } -} diff --git a/builtin/providers/google/resource_google_service_account.go b/builtin/providers/google/resource_google_service_account.go deleted file mode 100644 index 6e3e6abe1..000000000 --- a/builtin/providers/google/resource_google_service_account.go +++ /dev/null @@ -1,311 +0,0 @@ -package google - -import ( - "encoding/json" - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/iam/v1" -) - -func resourceGoogleServiceAccount() *schema.Resource { - return &schema.Resource{ - Create: resourceGoogleServiceAccountCreate, - Read: resourceGoogleServiceAccountRead, - Delete: resourceGoogleServiceAccountDelete, - Update: resourceGoogleServiceAccountUpdate, - Schema: map[string]*schema.Schema{ - "email": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "unique_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "account_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "display_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "policy_data": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -func resourceGoogleServiceAccountCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - aid := d.Get("account_id").(string) - displayName := d.Get("display_name").(string) - - sa := &iam.ServiceAccount{ - DisplayName: displayName, - } - - r := &iam.CreateServiceAccountRequest{ - AccountId: aid, - ServiceAccount: sa, - } - - sa, err = config.clientIAM.Projects.ServiceAccounts.Create("projects/"+project, r).Do() - if err != nil { - return fmt.Errorf("Error creating service account: %s", err) - } - - d.SetId(sa.Name) - - // Apply the IAM policy if it is set - if pString, ok := d.GetOk("policy_data"); ok { - // The policy string is just a marshaled cloudresourcemanager.Policy. - // Unmarshal it to a struct. - var policy iam.Policy - if err = json.Unmarshal([]byte(pString.(string)), &policy); err != nil { - return err - } - - // Retrieve existing IAM policy from project. This will be merged - // with the policy defined here. - // TODO(evanbrown): Add an 'authoritative' flag that allows policy - // in manifest to overwrite existing policy. - p, err := getServiceAccountIamPolicy(sa.Name, config) - if err != nil { - return fmt.Errorf("Could not find service account %q when applying IAM policy: %s", sa.Name, err) - } - log.Printf("[DEBUG] Got existing bindings for service account: %#v", p.Bindings) - - // Merge the existing policy bindings with those defined in this manifest. - p.Bindings = saMergeBindings(append(p.Bindings, policy.Bindings...)) - - // Apply the merged policy - log.Printf("[DEBUG] Setting new policy for service account: %#v", p) - _, err = config.clientIAM.Projects.ServiceAccounts.SetIamPolicy(sa.Name, - &iam.SetIamPolicyRequest{Policy: p}).Do() - - if err != nil { - return fmt.Errorf("Error applying IAM policy for service account %q: %s", sa.Name, err) - } - } - return resourceGoogleServiceAccountRead(d, meta) -} - -func resourceGoogleServiceAccountRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - // Confirm the service account exists - sa, err := config.clientIAM.Projects.ServiceAccounts.Get(d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Service Account %q", d.Id())) - } - - d.Set("email", sa.Email) - d.Set("unique_id", sa.UniqueId) - d.Set("name", sa.Name) - d.Set("display_name", sa.DisplayName) - return nil -} - -func resourceGoogleServiceAccountDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - name := d.Id() - _, err := config.clientIAM.Projects.ServiceAccounts.Delete(name).Do() - if err != nil { - return err - } - d.SetId("") - return nil -} - -func resourceGoogleServiceAccountUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - var err error - if ok := d.HasChange("display_name"); ok { - sa, err := config.clientIAM.Projects.ServiceAccounts.Get(d.Id()).Do() - if err != nil { - return fmt.Errorf("Error retrieving service account %q: %s", d.Id(), err) - } - _, err = config.clientIAM.Projects.ServiceAccounts.Update(d.Id(), - &iam.ServiceAccount{ - DisplayName: d.Get("display_name").(string), - Etag: sa.Etag, - }).Do() - if err != nil { - return fmt.Errorf("Error updating service account %q: %s", d.Id(), err) - } - } - - if ok := d.HasChange("policy_data"); ok { - // The policy string is just a marshaled cloudresourcemanager.Policy. - // Unmarshal it to a struct that contains the old and new policies - oldP, newP := d.GetChange("policy_data") - oldPString := oldP.(string) - newPString := newP.(string) - - // JSON Unmarshaling would fail - if oldPString == "" { - oldPString = "{}" - } - if newPString == "" { - newPString = "{}" - } - - log.Printf("[DEBUG]: Old policy: %q\nNew policy: %q", string(oldPString), string(newPString)) - - var oldPolicy, newPolicy iam.Policy - if err = json.Unmarshal([]byte(newPString), &newPolicy); err != nil { - return err - } - if err = json.Unmarshal([]byte(oldPString), &oldPolicy); err != nil { - return err - } - - // Find any Roles and Members that were removed (i.e., those that are present - // in the old but absent in the new - oldMap := saRolesToMembersMap(oldPolicy.Bindings) - newMap := saRolesToMembersMap(newPolicy.Bindings) - deleted := make(map[string]map[string]bool) - - // Get each role and its associated members in the old state - for role, members := range oldMap { - // Initialize map for role - if _, ok := deleted[role]; !ok { - deleted[role] = make(map[string]bool) - } - // The role exists in the new state - if _, ok := newMap[role]; ok { - // Check each memeber - for member, _ := range members { - // Member does not exist in new state, so it was deleted - if _, ok = newMap[role][member]; !ok { - deleted[role][member] = true - } - } - } else { - // This indicates an entire role was deleted. Mark all members - // for delete. - for member, _ := range members { - deleted[role][member] = true - } - } - } - log.Printf("[DEBUG] Roles and Members to be deleted: %#v", deleted) - - // Retrieve existing IAM policy from project. This will be merged - // with the policy in the current state - // TODO(evanbrown): Add an 'authoritative' flag that allows policy - // in manifest to overwrite existing policy. - p, err := getServiceAccountIamPolicy(d.Id(), config) - if err != nil { - return err - } - log.Printf("[DEBUG] Got existing bindings from service account %q: %#v", d.Id(), p.Bindings) - - // Merge existing policy with policy in the current state - log.Printf("[DEBUG] Merging new bindings from service account %q: %#v", d.Id(), newPolicy.Bindings) - mergedBindings := saMergeBindings(append(p.Bindings, newPolicy.Bindings...)) - - // Remove any roles and members that were explicitly deleted - mergedBindingsMap := saRolesToMembersMap(mergedBindings) - for role, members := range deleted { - for member, _ := range members { - delete(mergedBindingsMap[role], member) - } - } - - p.Bindings = saRolesToMembersBinding(mergedBindingsMap) - log.Printf("[DEBUG] Setting new policy for project: %#v", p) - - dump, _ := json.MarshalIndent(p.Bindings, " ", " ") - log.Printf(string(dump)) - _, err = config.clientIAM.Projects.ServiceAccounts.SetIamPolicy(d.Id(), - &iam.SetIamPolicyRequest{Policy: p}).Do() - - if err != nil { - return fmt.Errorf("Error applying IAM policy for service account %q: %s", d.Id(), err) - } - } - return nil -} - -// Retrieve the existing IAM Policy for a service account -func getServiceAccountIamPolicy(sa string, config *Config) (*iam.Policy, error) { - p, err := config.clientIAM.Projects.ServiceAccounts.GetIamPolicy(sa).Do() - - if err != nil { - return nil, fmt.Errorf("Error retrieving IAM policy for service account %q: %s", sa, err) - } - return p, nil -} - -// Convert a map of roles->members to a list of Binding -func saRolesToMembersBinding(m map[string]map[string]bool) []*iam.Binding { - bindings := make([]*iam.Binding, 0) - for role, members := range m { - b := iam.Binding{ - Role: role, - Members: make([]string, 0), - } - for m, _ := range members { - b.Members = append(b.Members, m) - } - bindings = append(bindings, &b) - } - return bindings -} - -// Map a role to a map of members, allowing easy merging of multiple bindings. -func saRolesToMembersMap(bindings []*iam.Binding) map[string]map[string]bool { - bm := make(map[string]map[string]bool) - // Get each binding - for _, b := range bindings { - // Initialize members map - if _, ok := bm[b.Role]; !ok { - bm[b.Role] = make(map[string]bool) - } - // Get each member (user/principal) for the binding - for _, m := range b.Members { - // Add the member - bm[b.Role][m] = true - } - } - return bm -} - -// Merge multiple Bindings such that Bindings with the same Role result in -// a single Binding with combined Members -func saMergeBindings(bindings []*iam.Binding) []*iam.Binding { - bm := saRolesToMembersMap(bindings) - rb := make([]*iam.Binding, 0) - - for role, members := range bm { - var b iam.Binding - b.Role = role - b.Members = make([]string, 0) - for m, _ := range members { - b.Members = append(b.Members, m) - } - rb = append(rb, &b) - } - - return rb -} diff --git a/builtin/providers/google/resource_google_service_account_test.go b/builtin/providers/google/resource_google_service_account_test.go deleted file mode 100644 index 6377be39f..000000000 --- a/builtin/providers/google/resource_google_service_account_test.go +++ /dev/null @@ -1,151 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -var ( - projectId = multiEnvSearch([]string{ - "GOOGLE_PROJECT", - "GCLOUD_PROJECT", - "CLOUDSDK_CORE_PROJECT", - }) -) - -// Test that a service account resource can be created, updated, and destroyed -func TestAccGoogleServiceAccount_basic(t *testing.T) { - accountId := "a" + acctest.RandString(10) - displayName := "Terraform Test" - displayName2 := "Terraform Test Update" - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - // The first step creates a basic service account - resource.TestStep{ - Config: testAccGoogleServiceAccountBasic(accountId, displayName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleServiceAccountExists("google_service_account.acceptance"), - ), - }, - // The second step updates the service account - resource.TestStep{ - Config: testAccGoogleServiceAccountBasic(accountId, displayName2), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleServiceAccountNameModified("google_service_account.acceptance", displayName2), - ), - }, - }, - }) -} - -// Test that a service account resource can be created with a policy, updated, -// and destroyed. -func TestAccGoogleServiceAccount_createPolicy(t *testing.T) { - accountId := "a" + acctest.RandString(10) - displayName := "Terraform Test" - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - // The first step creates a basic service account with an IAM policy - resource.TestStep{ - Config: testAccGoogleServiceAccountPolicy(accountId, projectId), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleServiceAccountPolicyCount("google_service_account.acceptance", 1), - ), - }, - // The second step updates the service account with no IAM policy - resource.TestStep{ - Config: testAccGoogleServiceAccountBasic(accountId, displayName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleServiceAccountPolicyCount("google_service_account.acceptance", 0), - ), - }, - // The final step re-applies the IAM policy - resource.TestStep{ - Config: testAccGoogleServiceAccountPolicy(accountId, projectId), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleServiceAccountPolicyCount("google_service_account.acceptance", 1), - ), - }, - }, - }) -} - -func testAccCheckGoogleServiceAccountPolicyCount(r string, n int) resource.TestCheckFunc { - return func(s *terraform.State) error { - c := testAccProvider.Meta().(*Config) - p, err := getServiceAccountIamPolicy(s.RootModule().Resources[r].Primary.ID, c) - if err != nil { - return fmt.Errorf("Failed to retrieve IAM Policy for service account: %s", err) - } - if len(p.Bindings) != n { - return fmt.Errorf("The service account has %v bindings but %v were expected", len(p.Bindings), n) - } - return nil - } -} - -func testAccCheckGoogleServiceAccountExists(r string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[r] - if !ok { - return fmt.Errorf("Not found: %s", r) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - return nil - } -} - -func testAccCheckGoogleServiceAccountNameModified(r, n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[r] - if !ok { - return fmt.Errorf("Not found: %s", r) - } - - if rs.Primary.Attributes["display_name"] != n { - return fmt.Errorf("display_name is %q expected %q", rs.Primary.Attributes["display_name"], n) - } - - return nil - } -} - -func testAccGoogleServiceAccountBasic(account, name string) string { - t := `resource "google_service_account" "acceptance" { - account_id = "%v" - display_name = "%v" - }` - return fmt.Sprintf(t, account, name) -} - -func testAccGoogleServiceAccountPolicy(account, name string) string { - - t := `resource "google_service_account" "acceptance" { - account_id = "%v" - display_name = "%v" - policy_data = "${data.google_iam_policy.service_account.policy_data}" -} - -data "google_iam_policy" "service_account" { - binding { - role = "roles/iam.serviceAccountActor" - members = [ - "serviceAccount:%v@%v.iam.gserviceaccount.com", - ] - } -}` - - return fmt.Sprintf(t, account, name, account, projectId) -} diff --git a/builtin/providers/google/resource_pubsub_subscription.go b/builtin/providers/google/resource_pubsub_subscription.go deleted file mode 100644 index 04c0414b0..000000000 --- a/builtin/providers/google/resource_pubsub_subscription.go +++ /dev/null @@ -1,150 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/pubsub/v1" -) - -func resourcePubsubSubscription() *schema.Resource { - return &schema.Resource{ - Create: resourcePubsubSubscriptionCreate, - Read: resourcePubsubSubscriptionRead, - Delete: resourcePubsubSubscriptionDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "topic": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "ack_deadline_seconds": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "path": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "push_config": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "attributes": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Elem: schema.TypeString, - }, - - "push_endpoint": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - }, - }, - }, - } -} - -func cleanAdditionalArgs(args map[string]interface{}) map[string]string { - cleaned_args := make(map[string]string) - for k, v := range args { - cleaned_args[k] = v.(string) - } - return cleaned_args -} - -func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := fmt.Sprintf("projects/%s/subscriptions/%s", project, d.Get("name").(string)) - computed_topic_name := fmt.Sprintf("projects/%s/topics/%s", project, d.Get("topic").(string)) - - // process optional parameters - var ackDeadlineSeconds int64 - ackDeadlineSeconds = 10 - if v, ok := d.GetOk("ack_deadline_seconds"); ok { - ackDeadlineSeconds = int64(v.(int)) - } - - var subscription *pubsub.Subscription - if v, ok := d.GetOk("push_config"); ok { - push_configs := v.([]interface{}) - - if len(push_configs) > 1 { - return fmt.Errorf("At most one PushConfig is allowed per subscription!") - } - - push_config := push_configs[0].(map[string]interface{}) - attributes := push_config["attributes"].(map[string]interface{}) - attributesClean := cleanAdditionalArgs(attributes) - pushConfig := &pubsub.PushConfig{Attributes: attributesClean, PushEndpoint: push_config["push_endpoint"].(string)} - subscription = &pubsub.Subscription{AckDeadlineSeconds: ackDeadlineSeconds, Topic: computed_topic_name, PushConfig: pushConfig} - } else { - subscription = &pubsub.Subscription{AckDeadlineSeconds: ackDeadlineSeconds, Topic: computed_topic_name} - } - - call := config.clientPubsub.Projects.Subscriptions.Create(name, subscription) - res, err := call.Do() - if err != nil { - return err - } - - d.SetId(res.Name) - d.Set("path", name) - - return nil -} - -func resourcePubsubSubscriptionRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - name := d.Id() - call := config.clientPubsub.Projects.Subscriptions.Get(name) - _, err := call.Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Pubsub Subscription %q", name)) - } - - return nil -} - -func resourcePubsubSubscriptionDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - name := d.Id() - call := config.clientPubsub.Projects.Subscriptions.Delete(name) - _, err := call.Do() - if err != nil { - return err - } - - return nil -} diff --git a/builtin/providers/google/resource_pubsub_subscription_test.go b/builtin/providers/google/resource_pubsub_subscription_test.go deleted file mode 100644 index 01230656f..000000000 --- a/builtin/providers/google/resource_pubsub_subscription_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccPubsubSubscriptionCreate(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPubsubSubscriptionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccPubsubSubscription, - Check: resource.ComposeTestCheckFunc( - testAccPubsubSubscriptionExists( - "google_pubsub_subscription.foobar_sub"), - resource.TestCheckResourceAttrSet("google_pubsub_subscription.foobar_sub", "path"), - ), - }, - }, - }) -} - -func testAccCheckPubsubSubscriptionDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_pubsub_subscription" { - continue - } - - config := testAccProvider.Meta().(*Config) - sub, _ := config.clientPubsub.Projects.Subscriptions.Get(rs.Primary.ID).Do() - if sub != nil { - return fmt.Errorf("Subscription still present") - } - } - - return nil -} - -func testAccPubsubSubscriptionExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - config := testAccProvider.Meta().(*Config) - _, err := config.clientPubsub.Projects.Subscriptions.Get(rs.Primary.ID).Do() - if err != nil { - return fmt.Errorf("Subscription does not exist") - } - - return nil - } -} - -var testAccPubsubSubscription = fmt.Sprintf(` -resource "google_pubsub_topic" "foobar_sub" { - name = "pssub-test-%s" -} - -resource "google_pubsub_subscription" "foobar_sub" { - name = "pssub-test-%s" - topic = "${google_pubsub_topic.foobar_sub.name}" - ack_deadline_seconds = 20 -}`, acctest.RandString(10), acctest.RandString(10)) diff --git a/builtin/providers/google/resource_pubsub_topic.go b/builtin/providers/google/resource_pubsub_topic.go deleted file mode 100644 index ba78a6f74..000000000 --- a/builtin/providers/google/resource_pubsub_topic.go +++ /dev/null @@ -1,78 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/pubsub/v1" -) - -func resourcePubsubTopic() *schema.Resource { - return &schema.Resource{ - Create: resourcePubsubTopicCreate, - Read: resourcePubsubTopicRead, - Delete: resourcePubsubTopicDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourcePubsubTopicCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := fmt.Sprintf("projects/%s/topics/%s", project, d.Get("name").(string)) - topic := &pubsub.Topic{} - - call := config.clientPubsub.Projects.Topics.Create(name, topic) - res, err := call.Do() - if err != nil { - return err - } - - d.SetId(res.Name) - - return nil -} - -func resourcePubsubTopicRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - name := d.Id() - call := config.clientPubsub.Projects.Topics.Get(name) - _, err := call.Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Pubsub Topic %q", name)) - } - - return nil -} - -func resourcePubsubTopicDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - name := d.Id() - call := config.clientPubsub.Projects.Topics.Delete(name) - _, err := call.Do() - if err != nil { - return err - } - - return nil -} diff --git a/builtin/providers/google/resource_pubsub_topic_test.go b/builtin/providers/google/resource_pubsub_topic_test.go deleted file mode 100644 index 1d03aae0c..000000000 --- a/builtin/providers/google/resource_pubsub_topic_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccPubsubTopicCreate(t *testing.T) { - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPubsubTopicDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccPubsubTopic, - Check: resource.ComposeTestCheckFunc( - testAccPubsubTopicExists( - "google_pubsub_topic.foobar"), - ), - }, - }, - }) -} - -func testAccCheckPubsubTopicDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_pubsub_topic" { - continue - } - - config := testAccProvider.Meta().(*Config) - topic, _ := config.clientPubsub.Projects.Topics.Get(rs.Primary.ID).Do() - if topic != nil { - return fmt.Errorf("Topic still present") - } - } - - return nil -} - -func testAccPubsubTopicExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - config := testAccProvider.Meta().(*Config) - _, err := config.clientPubsub.Projects.Topics.Get(rs.Primary.ID).Do() - if err != nil { - return fmt.Errorf("Topic does not exist") - } - - return nil - } -} - -var testAccPubsubTopic = fmt.Sprintf(` -resource "google_pubsub_topic" "foobar" { - name = "pstopic-test-%s" -}`, acctest.RandString(10)) diff --git a/builtin/providers/google/resource_sql_database.go b/builtin/providers/google/resource_sql_database.go deleted file mode 100644 index a6b034aa5..000000000 --- a/builtin/providers/google/resource_sql_database.go +++ /dev/null @@ -1,135 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" - - "google.golang.org/api/sqladmin/v1beta4" -) - -func resourceSqlDatabase() *schema.Resource { - return &schema.Resource{ - Create: resourceSqlDatabaseCreate, - Read: resourceSqlDatabaseRead, - Delete: resourceSqlDatabaseDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "instance": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceSqlDatabaseCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - database_name := d.Get("name").(string) - instance_name := d.Get("instance").(string) - - db := &sqladmin.Database{ - Name: database_name, - Instance: instance_name, - } - - mutexKV.Lock(instanceMutexKey(project, instance_name)) - defer mutexKV.Unlock(instanceMutexKey(project, instance_name)) - op, err := config.clientSqlAdmin.Databases.Insert(project, instance_name, - db).Do() - - if err != nil { - return fmt.Errorf("Error, failed to insert "+ - "database %s into instance %s: %s", database_name, - instance_name, err) - } - - err = sqladminOperationWait(config, op, "Insert Database") - - if err != nil { - return fmt.Errorf("Error, failure waiting for insertion of %s "+ - "into %s: %s", database_name, instance_name, err) - } - - return resourceSqlDatabaseRead(d, meta) -} - -func resourceSqlDatabaseRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - database_name := d.Get("name").(string) - instance_name := d.Get("instance").(string) - - db, err := config.clientSqlAdmin.Databases.Get(project, instance_name, - database_name).Do() - - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("SQL Database %q in instance %q", database_name, instance_name)) - } - - d.Set("self_link", db.SelfLink) - d.SetId(instance_name + ":" + database_name) - - return nil -} - -func resourceSqlDatabaseDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - database_name := d.Get("name").(string) - instance_name := d.Get("instance").(string) - - mutexKV.Lock(instanceMutexKey(project, instance_name)) - defer mutexKV.Unlock(instanceMutexKey(project, instance_name)) - op, err := config.clientSqlAdmin.Databases.Delete(project, instance_name, - database_name).Do() - - if err != nil { - return fmt.Errorf("Error, failed to delete"+ - "database %s in instance %s: %s", database_name, - instance_name, err) - } - - err = sqladminOperationWait(config, op, "Delete Database") - - if err != nil { - return fmt.Errorf("Error, failure waiting for deletion of %s "+ - "in %s: %s", database_name, instance_name, err) - } - - return nil -} diff --git a/builtin/providers/google/resource_sql_database_instance.go b/builtin/providers/google/resource_sql_database_instance.go deleted file mode 100644 index 109c25a88..000000000 --- a/builtin/providers/google/resource_sql_database_instance.go +++ /dev/null @@ -1,1178 +0,0 @@ -package google - -import ( - "fmt" - "log" - "regexp" - "strings" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "google.golang.org/api/googleapi" - "google.golang.org/api/sqladmin/v1beta4" -) - -func resourceSqlDatabaseInstance() *schema.Resource { - return &schema.Resource{ - Create: resourceSqlDatabaseInstanceCreate, - Read: resourceSqlDatabaseInstanceRead, - Update: resourceSqlDatabaseInstanceUpdate, - Delete: resourceSqlDatabaseInstanceDelete, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "settings": &schema.Schema{ - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "version": &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - }, - "tier": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "activation_policy": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "authorized_gae_applications": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "backup_configuration": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "binary_log_enabled": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - "enabled": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - "start_time": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "crash_safe_replication": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - "database_flags": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "value": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "disk_autoresize": &schema.Schema{ - Type: schema.TypeBool, - Default: true, - Optional: true, - DiffSuppressFunc: suppressFirstGen, - }, - "disk_size": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - "disk_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "ip_configuration": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "authorized_networks": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "expiration_time": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "value": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "ipv4_enabled": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - "require_ssl": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - }, - }, - }, - "location_preference": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "follow_gae_application": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "zone": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "maintenance_window": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "day": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - return validateNumericRange(v, k, 1, 7) - }, - }, - "hour": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - return validateNumericRange(v, k, 0, 23) - }, - }, - "update_track": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "pricing_plan": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "replication_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - - "database_version": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "MYSQL_5_6", - ForceNew: true, - }, - - "ip_address": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ip_address": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "time_to_retire": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - }, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "master_instance_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "replica_configuration": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ca_certificate": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "client_certificate": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "client_key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "connect_retry_interval": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - "dump_file_path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "failover_target": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - "master_heartbeat_period": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - "password": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "ssl_cipher": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "username": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "verify_server_certificate": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - }, - }, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -// Suppress diff with any disk_autoresize value on 1st Generation Instances -func suppressFirstGen(k, old, new string, d *schema.ResourceData) bool { - settingsList := d.Get("settings").([]interface{}) - - settings := settingsList[0].(map[string]interface{}) - tier := settings["tier"].(string) - matched, err := regexp.MatchString("db*", tier) - if err != nil { - log.Printf("[ERR] error with regex in diff supression for disk_autoresize: %s", err) - } - if !matched { - log.Printf("[DEBUG] suppressing diff on disk_autoresize due to 1st gen instance type") - return true - } - return false -} - -func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - region := d.Get("region").(string) - databaseVersion := d.Get("database_version").(string) - - _settingsList := d.Get("settings").([]interface{}) - - _settings := _settingsList[0].(map[string]interface{}) - settings := &sqladmin.Settings{ - Tier: _settings["tier"].(string), - ForceSendFields: []string{"StorageAutoResize"}, - } - - if v, ok := _settings["activation_policy"]; ok { - settings.ActivationPolicy = v.(string) - } - - if v, ok := _settings["authorized_gae_applications"]; ok { - settings.AuthorizedGaeApplications = make([]string, 0) - for _, app := range v.([]interface{}) { - settings.AuthorizedGaeApplications = append(settings.AuthorizedGaeApplications, - app.(string)) - } - } - - if v, ok := _settings["backup_configuration"]; ok { - _backupConfigurationList := v.([]interface{}) - if len(_backupConfigurationList) > 1 { - return fmt.Errorf("At most one backup_configuration block is allowed") - } - - if len(_backupConfigurationList) == 1 && _backupConfigurationList[0] != nil { - settings.BackupConfiguration = &sqladmin.BackupConfiguration{} - _backupConfiguration := _backupConfigurationList[0].(map[string]interface{}) - - if vp, okp := _backupConfiguration["binary_log_enabled"]; okp { - settings.BackupConfiguration.BinaryLogEnabled = vp.(bool) - } - - if vp, okp := _backupConfiguration["enabled"]; okp { - settings.BackupConfiguration.Enabled = vp.(bool) - } - - if vp, okp := _backupConfiguration["start_time"]; okp { - settings.BackupConfiguration.StartTime = vp.(string) - } - } - } - - if v, ok := _settings["crash_safe_replication"]; ok { - settings.CrashSafeReplicationEnabled = v.(bool) - } - - settings.StorageAutoResize = _settings["disk_autoresize"].(bool) - - if v, ok := _settings["disk_size"]; ok && v.(int) > 0 { - settings.DataDiskSizeGb = int64(v.(int)) - } - - if v, ok := _settings["disk_type"]; ok && len(v.(string)) > 0 { - settings.DataDiskType = v.(string) - } - - if v, ok := _settings["database_flags"]; ok { - settings.DatabaseFlags = make([]*sqladmin.DatabaseFlags, 0) - _databaseFlagsList := v.([]interface{}) - for _, _flag := range _databaseFlagsList { - _entry := _flag.(map[string]interface{}) - flag := &sqladmin.DatabaseFlags{} - if vp, okp := _entry["name"]; okp { - flag.Name = vp.(string) - } - - if vp, okp := _entry["value"]; okp { - flag.Value = vp.(string) - } - - settings.DatabaseFlags = append(settings.DatabaseFlags, flag) - } - } - - if v, ok := _settings["ip_configuration"]; ok { - _ipConfigurationList := v.([]interface{}) - if len(_ipConfigurationList) > 1 { - return fmt.Errorf("At most one ip_configuration block is allowed") - } - - if len(_ipConfigurationList) == 1 && _ipConfigurationList[0] != nil { - settings.IpConfiguration = &sqladmin.IpConfiguration{} - _ipConfiguration := _ipConfigurationList[0].(map[string]interface{}) - - if vp, okp := _ipConfiguration["ipv4_enabled"]; okp { - settings.IpConfiguration.Ipv4Enabled = vp.(bool) - } - - if vp, okp := _ipConfiguration["require_ssl"]; okp { - settings.IpConfiguration.RequireSsl = vp.(bool) - } - - if vp, okp := _ipConfiguration["authorized_networks"]; okp { - settings.IpConfiguration.AuthorizedNetworks = make([]*sqladmin.AclEntry, 0) - _authorizedNetworksList := vp.([]interface{}) - for _, _acl := range _authorizedNetworksList { - _entry := _acl.(map[string]interface{}) - entry := &sqladmin.AclEntry{} - - if vpp, okpp := _entry["expiration_time"]; okpp { - entry.ExpirationTime = vpp.(string) - } - - if vpp, okpp := _entry["name"]; okpp { - entry.Name = vpp.(string) - } - - if vpp, okpp := _entry["value"]; okpp { - entry.Value = vpp.(string) - } - - settings.IpConfiguration.AuthorizedNetworks = append( - settings.IpConfiguration.AuthorizedNetworks, entry) - } - } - } - } - - if v, ok := _settings["location_preference"]; ok { - _locationPreferenceList := v.([]interface{}) - if len(_locationPreferenceList) > 1 { - return fmt.Errorf("At most one location_preference block is allowed") - } - - if len(_locationPreferenceList) == 1 && _locationPreferenceList[0] != nil { - settings.LocationPreference = &sqladmin.LocationPreference{} - _locationPreference := _locationPreferenceList[0].(map[string]interface{}) - - if vp, okp := _locationPreference["follow_gae_application"]; okp { - settings.LocationPreference.FollowGaeApplication = vp.(string) - } - - if vp, okp := _locationPreference["zone"]; okp { - settings.LocationPreference.Zone = vp.(string) - } - } - } - - if v, ok := _settings["maintenance_window"]; ok && len(v.([]interface{})) > 0 { - settings.MaintenanceWindow = &sqladmin.MaintenanceWindow{} - _maintenanceWindow := v.([]interface{})[0].(map[string]interface{}) - - if vp, okp := _maintenanceWindow["day"]; okp { - settings.MaintenanceWindow.Day = int64(vp.(int)) - } - - if vp, okp := _maintenanceWindow["hour"]; okp { - settings.MaintenanceWindow.Hour = int64(vp.(int)) - } - - if vp, ok := _maintenanceWindow["update_track"]; ok { - if len(vp.(string)) > 0 { - settings.MaintenanceWindow.UpdateTrack = vp.(string) - } - } - } - - if v, ok := _settings["pricing_plan"]; ok { - settings.PricingPlan = v.(string) - } - - if v, ok := _settings["replication_type"]; ok { - settings.ReplicationType = v.(string) - } - - instance := &sqladmin.DatabaseInstance{ - Region: region, - Settings: settings, - DatabaseVersion: databaseVersion, - } - - if v, ok := d.GetOk("name"); ok { - instance.Name = v.(string) - } else { - instance.Name = resource.UniqueId() - d.Set("name", instance.Name) - } - - if v, ok := d.GetOk("replica_configuration"); ok { - _replicaConfigurationList := v.([]interface{}) - - if len(_replicaConfigurationList) == 1 && _replicaConfigurationList[0] != nil { - replicaConfiguration := &sqladmin.ReplicaConfiguration{} - mySqlReplicaConfiguration := &sqladmin.MySqlReplicaConfiguration{} - _replicaConfiguration := _replicaConfigurationList[0].(map[string]interface{}) - - if vp, okp := _replicaConfiguration["failover_target"]; okp { - replicaConfiguration.FailoverTarget = vp.(bool) - } - - if vp, okp := _replicaConfiguration["ca_certificate"]; okp { - mySqlReplicaConfiguration.CaCertificate = vp.(string) - } - - if vp, okp := _replicaConfiguration["client_certificate"]; okp { - mySqlReplicaConfiguration.ClientCertificate = vp.(string) - } - - if vp, okp := _replicaConfiguration["client_key"]; okp { - mySqlReplicaConfiguration.ClientKey = vp.(string) - } - - if vp, okp := _replicaConfiguration["connect_retry_interval"]; okp { - mySqlReplicaConfiguration.ConnectRetryInterval = int64(vp.(int)) - } - - if vp, okp := _replicaConfiguration["dump_file_path"]; okp { - mySqlReplicaConfiguration.DumpFilePath = vp.(string) - } - - if vp, okp := _replicaConfiguration["master_heartbeat_period"]; okp { - mySqlReplicaConfiguration.MasterHeartbeatPeriod = int64(vp.(int)) - } - - if vp, okp := _replicaConfiguration["password"]; okp { - mySqlReplicaConfiguration.Password = vp.(string) - } - - if vp, okp := _replicaConfiguration["ssl_cipher"]; okp { - mySqlReplicaConfiguration.SslCipher = vp.(string) - } - - if vp, okp := _replicaConfiguration["username"]; okp { - mySqlReplicaConfiguration.Username = vp.(string) - } - - if vp, okp := _replicaConfiguration["verify_server_certificate"]; okp { - mySqlReplicaConfiguration.VerifyServerCertificate = vp.(bool) - } - - replicaConfiguration.MysqlReplicaConfiguration = mySqlReplicaConfiguration - instance.ReplicaConfiguration = replicaConfiguration - } - } - - if v, ok := d.GetOk("master_instance_name"); ok { - instance.MasterInstanceName = v.(string) - } - - op, err := config.clientSqlAdmin.Instances.Insert(project, instance).Do() - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 409 { - return fmt.Errorf("Error, the name %s is unavailable because it was used recently", instance.Name) - } else { - return fmt.Errorf("Error, failed to create instance %s: %s", instance.Name, err) - } - } - - err = sqladminOperationWait(config, op, "Create Instance") - if err != nil { - return err - } - - err = resourceSqlDatabaseInstanceRead(d, meta) - if err != nil { - return err - } - - // If a root user exists with a wildcard ('%') hostname, delete it. - users, err := config.clientSqlAdmin.Users.List(project, instance.Name).Do() - if err != nil { - return fmt.Errorf("Error, attempting to list users associated with instance %s: %s", instance.Name, err) - } - for _, u := range users.Items { - if u.Name == "root" && u.Host == "%" { - op, err = config.clientSqlAdmin.Users.Delete(project, instance.Name, u.Host, u.Name).Do() - if err != nil { - return fmt.Errorf("Error, failed to delete default 'root'@'*' user, but the database was created successfully: %s", err) - } - err = sqladminOperationWait(config, op, "Delete default root User") - if err != nil { - return err - } - } - } - - return nil -} - -func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - instance, err := config.clientSqlAdmin.Instances.Get(project, - d.Get("name").(string)).Do() - - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("SQL Database Instance %q", d.Get("name").(string))) - } - - _settingsList := d.Get("settings").([]interface{}) - _settings := _settingsList[0].(map[string]interface{}) - - settings := instance.Settings - _settings["version"] = settings.SettingsVersion - _settings["tier"] = settings.Tier - - // Take care to only update attributes that the user has defined explicitly - if v, ok := _settings["activation_policy"]; ok && len(v.(string)) > 0 { - _settings["activation_policy"] = settings.ActivationPolicy - } - - if v, ok := _settings["authorized_gae_applications"]; ok && len(v.([]interface{})) > 0 { - _authorized_gae_applications := make([]interface{}, 0) - for _, app := range settings.AuthorizedGaeApplications { - _authorized_gae_applications = append(_authorized_gae_applications, app) - } - _settings["authorized_gae_applications"] = _authorized_gae_applications - } - - if v, ok := _settings["backup_configuration"]; ok { - _backupConfigurationList := v.([]interface{}) - if len(_backupConfigurationList) > 1 { - return fmt.Errorf("At most one backup_configuration block is allowed") - } - - if len(_backupConfigurationList) == 1 && _backupConfigurationList[0] != nil { - _backupConfiguration := _backupConfigurationList[0].(map[string]interface{}) - - if vp, okp := _backupConfiguration["binary_log_enabled"]; okp && vp != nil { - _backupConfiguration["binary_log_enabled"] = settings.BackupConfiguration.BinaryLogEnabled - } - - if vp, okp := _backupConfiguration["enabled"]; okp && vp != nil { - _backupConfiguration["enabled"] = settings.BackupConfiguration.Enabled - } - - if vp, okp := _backupConfiguration["start_time"]; okp && len(vp.(string)) > 0 { - _backupConfiguration["start_time"] = settings.BackupConfiguration.StartTime - } - - _backupConfigurationList[0] = _backupConfiguration - _settings["backup_configuration"] = _backupConfigurationList - } - } - - if v, ok := _settings["crash_safe_replication"]; ok && v != nil { - _settings["crash_safe_replication"] = settings.CrashSafeReplicationEnabled - } - - _settings["disk_autoresize"] = settings.StorageAutoResize - - if v, ok := _settings["disk_size"]; ok && v != nil { - if v.(int) > 0 && settings.DataDiskSizeGb < int64(v.(int)) { - _settings["disk_size"] = settings.DataDiskSizeGb - } - } - - if v, ok := _settings["disk_type"]; ok && v != nil { - if len(v.(string)) > 0 { - _settings["disk_type"] = settings.DataDiskType - } - } - - if v, ok := _settings["database_flags"]; ok && len(v.([]interface{})) > 0 { - _flag_map := make(map[string]string) - // First keep track of localy defined flag pairs - for _, _flag := range _settings["database_flags"].([]interface{}) { - _entry := _flag.(map[string]interface{}) - _flag_map[_entry["name"].(string)] = _entry["value"].(string) - } - - _database_flags := make([]interface{}, 0) - // Next read the flag pairs from the server, and reinsert those that - // correspond to ones defined locally - for _, entry := range settings.DatabaseFlags { - if _, okp := _flag_map[entry.Name]; okp { - _entry := make(map[string]interface{}) - _entry["name"] = entry.Name - _entry["value"] = entry.Value - _database_flags = append(_database_flags, _entry) - } - } - _settings["database_flags"] = _database_flags - } - - if v, ok := _settings["ip_configuration"]; ok { - _ipConfigurationList := v.([]interface{}) - if len(_ipConfigurationList) > 1 { - return fmt.Errorf("At most one ip_configuration block is allowed") - } - - if len(_ipConfigurationList) == 1 && _ipConfigurationList[0] != nil { - _ipConfiguration := _ipConfigurationList[0].(map[string]interface{}) - - if vp, okp := _ipConfiguration["ipv4_enabled"]; okp && vp != nil { - _ipConfiguration["ipv4_enabled"] = settings.IpConfiguration.Ipv4Enabled - } - - if vp, okp := _ipConfiguration["require_ssl"]; okp && vp != nil { - _ipConfiguration["require_ssl"] = settings.IpConfiguration.RequireSsl - } - - if vp, okp := _ipConfiguration["authorized_networks"]; okp && vp != nil { - _authorizedNetworksList := vp.([]interface{}) - _ipc_map := make(map[string]interface{}) - // First keep track of locally defined ip configurations - for _, _ipc := range _authorizedNetworksList { - if _ipc == nil { - continue - } - _entry := _ipc.(map[string]interface{}) - if _entry["value"] == nil { - continue - } - _value := make(map[string]interface{}) - _value["name"] = _entry["name"] - _value["expiration_time"] = _entry["expiration_time"] - // We key on value, since that is the only required part of - // this 3-tuple - _ipc_map[_entry["value"].(string)] = _value - } - _authorized_networks := make([]interface{}, 0) - // Next read the network tuples from the server, and reinsert those that - // correspond to ones defined locally - for _, entry := range settings.IpConfiguration.AuthorizedNetworks { - if _, okp := _ipc_map[entry.Value]; okp { - _entry := make(map[string]interface{}) - _entry["value"] = entry.Value - _entry["name"] = entry.Name - _entry["expiration_time"] = entry.ExpirationTime - _authorized_networks = append(_authorized_networks, _entry) - } - } - _ipConfiguration["authorized_networks"] = _authorized_networks - } - _ipConfigurationList[0] = _ipConfiguration - _settings["ip_configuration"] = _ipConfigurationList - } - } - - if v, ok := _settings["location_preference"]; ok && len(v.([]interface{})) > 0 { - _locationPreferenceList := v.([]interface{}) - if len(_locationPreferenceList) > 1 { - return fmt.Errorf("At most one location_preference block is allowed") - } - - if len(_locationPreferenceList) == 1 && _locationPreferenceList[0] != nil && - settings.LocationPreference != nil { - _locationPreference := _locationPreferenceList[0].(map[string]interface{}) - - if vp, okp := _locationPreference["follow_gae_application"]; okp && vp != nil { - _locationPreference["follow_gae_application"] = - settings.LocationPreference.FollowGaeApplication - } - - if vp, okp := _locationPreference["zone"]; okp && vp != nil { - _locationPreference["zone"] = settings.LocationPreference.Zone - } - - _locationPreferenceList[0] = _locationPreference - _settings["location_preference"] = _locationPreferenceList[0] - } - } - - if v, ok := _settings["maintenance_window"]; ok && len(v.([]interface{})) > 0 && - settings.MaintenanceWindow != nil { - _maintenanceWindow := v.([]interface{})[0].(map[string]interface{}) - - if vp, okp := _maintenanceWindow["day"]; okp && vp != nil { - _maintenanceWindow["day"] = settings.MaintenanceWindow.Day - } - - if vp, okp := _maintenanceWindow["hour"]; okp && vp != nil { - _maintenanceWindow["hour"] = settings.MaintenanceWindow.Hour - } - - if vp, ok := _maintenanceWindow["update_track"]; ok && vp != nil { - if len(vp.(string)) > 0 { - _maintenanceWindow["update_track"] = settings.MaintenanceWindow.UpdateTrack - } - } - } - - if v, ok := _settings["pricing_plan"]; ok && len(v.(string)) > 0 { - _settings["pricing_plan"] = settings.PricingPlan - } - - if v, ok := _settings["replication_type"]; ok && len(v.(string)) > 0 { - _settings["replication_type"] = settings.ReplicationType - } - - _settingsList[0] = _settings - d.Set("settings", _settingsList) - - if v, ok := d.GetOk("replica_configuration"); ok && v != nil { - _replicaConfigurationList := v.([]interface{}) - if len(_replicaConfigurationList) == 1 && _replicaConfigurationList[0] != nil { - _replicaConfiguration := _replicaConfigurationList[0].(map[string]interface{}) - - if vp, okp := _replicaConfiguration["failover_target"]; okp && vp != nil { - _replicaConfiguration["failover_target"] = instance.ReplicaConfiguration.FailoverTarget - } - - // Don't attempt to assign anything from instance.ReplicaConfiguration.MysqlReplicaConfiguration, - // since those fields are set on create and then not stored. See description at - // https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances - - _replicaConfigurationList[0] = _replicaConfiguration - d.Set("replica_configuration", _replicaConfigurationList) - } - } - - _ipAddresses := make([]interface{}, len(instance.IpAddresses)) - - for i, ip := range instance.IpAddresses { - _ipAddress := make(map[string]interface{}) - - _ipAddress["ip_address"] = ip.IpAddress - _ipAddress["time_to_retire"] = ip.TimeToRetire - - _ipAddresses[i] = _ipAddress - } - - d.Set("ip_address", _ipAddresses) - - if v, ok := d.GetOk("master_instance_name"); ok && v != nil { - d.Set("master_instance_name", strings.TrimPrefix(instance.MasterInstanceName, project+":")) - } - - d.Set("self_link", instance.SelfLink) - d.SetId(instance.Name) - - return nil -} - -func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - d.Partial(true) - - instance, err := config.clientSqlAdmin.Instances.Get(project, - d.Get("name").(string)).Do() - - if err != nil { - return fmt.Errorf("Error retrieving instance %s: %s", - d.Get("name").(string), err) - } - - if d.HasChange("settings") { - _oListCast, _settingsListCast := d.GetChange("settings") - _oList := _oListCast.([]interface{}) - _o := _oList[0].(map[string]interface{}) - _settingsList := _settingsListCast.([]interface{}) - - _settings := _settingsList[0].(map[string]interface{}) - settings := &sqladmin.Settings{ - Tier: _settings["tier"].(string), - SettingsVersion: instance.Settings.SettingsVersion, - ForceSendFields: []string{"StorageAutoResize"}, - } - - if v, ok := _settings["activation_policy"]; ok { - settings.ActivationPolicy = v.(string) - } - - if v, ok := _settings["authorized_gae_applications"]; ok { - settings.AuthorizedGaeApplications = make([]string, 0) - for _, app := range v.([]interface{}) { - settings.AuthorizedGaeApplications = append(settings.AuthorizedGaeApplications, - app.(string)) - } - } - - if v, ok := _settings["backup_configuration"]; ok { - _backupConfigurationList := v.([]interface{}) - if len(_backupConfigurationList) > 1 { - return fmt.Errorf("At most one backup_configuration block is allowed") - } - - if len(_backupConfigurationList) == 1 && _backupConfigurationList[0] != nil { - settings.BackupConfiguration = &sqladmin.BackupConfiguration{} - _backupConfiguration := _backupConfigurationList[0].(map[string]interface{}) - - if vp, okp := _backupConfiguration["binary_log_enabled"]; okp { - settings.BackupConfiguration.BinaryLogEnabled = vp.(bool) - } - - if vp, okp := _backupConfiguration["enabled"]; okp { - settings.BackupConfiguration.Enabled = vp.(bool) - } - - if vp, okp := _backupConfiguration["start_time"]; okp { - settings.BackupConfiguration.StartTime = vp.(string) - } - } - } - - if v, ok := _settings["crash_safe_replication"]; ok { - settings.CrashSafeReplicationEnabled = v.(bool) - } - - settings.StorageAutoResize = _settings["disk_autoresize"].(bool) - - if v, ok := _settings["disk_size"]; ok { - if v.(int) > 0 && int64(v.(int)) > instance.Settings.DataDiskSizeGb { - settings.DataDiskSizeGb = int64(v.(int)) - } - } - - if v, ok := _settings["disk_type"]; ok && len(v.(string)) > 0 { - settings.DataDiskType = v.(string) - } - - _oldDatabaseFlags := make([]interface{}, 0) - if ov, ook := _o["database_flags"]; ook { - _oldDatabaseFlags = ov.([]interface{}) - } - - if v, ok := _settings["database_flags"]; ok || len(_oldDatabaseFlags) > 0 { - oldDatabaseFlags := settings.DatabaseFlags - settings.DatabaseFlags = make([]*sqladmin.DatabaseFlags, 0) - _databaseFlagsList := make([]interface{}, 0) - if v != nil { - _databaseFlagsList = v.([]interface{}) - } - - _odbf_map := make(map[string]interface{}) - for _, _dbf := range _oldDatabaseFlags { - _entry := _dbf.(map[string]interface{}) - _odbf_map[_entry["name"].(string)] = true - } - - // First read the flags from the server, and reinsert those that - // were not previously defined - for _, entry := range oldDatabaseFlags { - _, ok_old := _odbf_map[entry.Name] - if !ok_old { - settings.DatabaseFlags = append( - settings.DatabaseFlags, entry) - } - } - // finally, insert only those that were previously defined - // and are still defined. - for _, _flag := range _databaseFlagsList { - _entry := _flag.(map[string]interface{}) - flag := &sqladmin.DatabaseFlags{} - if vp, okp := _entry["name"]; okp { - flag.Name = vp.(string) - } - - if vp, okp := _entry["value"]; okp { - flag.Value = vp.(string) - } - - settings.DatabaseFlags = append(settings.DatabaseFlags, flag) - } - } - - if v, ok := _settings["ip_configuration"]; ok { - _ipConfigurationList := v.([]interface{}) - if len(_ipConfigurationList) > 1 { - return fmt.Errorf("At most one ip_configuration block is allowed") - } - - if len(_ipConfigurationList) == 1 && _ipConfigurationList[0] != nil { - settings.IpConfiguration = &sqladmin.IpConfiguration{} - _ipConfiguration := _ipConfigurationList[0].(map[string]interface{}) - - if vp, okp := _ipConfiguration["ipv4_enabled"]; okp { - settings.IpConfiguration.Ipv4Enabled = vp.(bool) - } - - if vp, okp := _ipConfiguration["require_ssl"]; okp { - settings.IpConfiguration.RequireSsl = vp.(bool) - } - - _oldAuthorizedNetworkList := make([]interface{}, 0) - if ov, ook := _o["ip_configuration"]; ook { - _oldIpConfList := ov.([]interface{}) - if len(_oldIpConfList) > 0 { - _oldIpConf := _oldIpConfList[0].(map[string]interface{}) - if ovp, ookp := _oldIpConf["authorized_networks"]; ookp { - _oldAuthorizedNetworkList = ovp.([]interface{}) - } - } - } - - if vp, okp := _ipConfiguration["authorized_networks"]; okp || len(_oldAuthorizedNetworkList) > 0 { - oldAuthorizedNetworks := instance.Settings.IpConfiguration.AuthorizedNetworks - settings.IpConfiguration.AuthorizedNetworks = make([]*sqladmin.AclEntry, 0) - - _authorizedNetworksList := make([]interface{}, 0) - if vp != nil { - _authorizedNetworksList = vp.([]interface{}) - } - _oipc_map := make(map[string]interface{}) - for _, _ipc := range _oldAuthorizedNetworkList { - _entry := _ipc.(map[string]interface{}) - _oipc_map[_entry["value"].(string)] = true - } - // Next read the network tuples from the server, and reinsert those that - // were not previously defined - for _, entry := range oldAuthorizedNetworks { - _, ok_old := _oipc_map[entry.Value] - if !ok_old { - settings.IpConfiguration.AuthorizedNetworks = append( - settings.IpConfiguration.AuthorizedNetworks, entry) - } - } - // finally, update old entries and insert new ones - // and are still defined. - for _, _ipc := range _authorizedNetworksList { - _entry := _ipc.(map[string]interface{}) - entry := &sqladmin.AclEntry{} - - if vpp, okpp := _entry["expiration_time"]; okpp { - entry.ExpirationTime = vpp.(string) - } - - if vpp, okpp := _entry["name"]; okpp { - entry.Name = vpp.(string) - } - - if vpp, okpp := _entry["value"]; okpp { - entry.Value = vpp.(string) - } - - settings.IpConfiguration.AuthorizedNetworks = append( - settings.IpConfiguration.AuthorizedNetworks, entry) - } - } - } - } - - if v, ok := _settings["location_preference"]; ok { - _locationPreferenceList := v.([]interface{}) - if len(_locationPreferenceList) > 1 { - return fmt.Errorf("At most one location_preference block is allowed") - } - - if len(_locationPreferenceList) == 1 && _locationPreferenceList[0] != nil { - settings.LocationPreference = &sqladmin.LocationPreference{} - _locationPreference := _locationPreferenceList[0].(map[string]interface{}) - - if vp, okp := _locationPreference["follow_gae_application"]; okp { - settings.LocationPreference.FollowGaeApplication = vp.(string) - } - - if vp, okp := _locationPreference["zone"]; okp { - settings.LocationPreference.Zone = vp.(string) - } - } - } - - if v, ok := _settings["maintenance_window"]; ok && len(v.([]interface{})) > 0 { - settings.MaintenanceWindow = &sqladmin.MaintenanceWindow{} - _maintenanceWindow := v.([]interface{})[0].(map[string]interface{}) - - if vp, okp := _maintenanceWindow["day"]; okp { - settings.MaintenanceWindow.Day = int64(vp.(int)) - } - - if vp, okp := _maintenanceWindow["hour"]; okp { - settings.MaintenanceWindow.Hour = int64(vp.(int)) - } - - if vp, ok := _maintenanceWindow["update_track"]; ok { - if len(vp.(string)) > 0 { - settings.MaintenanceWindow.UpdateTrack = vp.(string) - } - } - } - - if v, ok := _settings["pricing_plan"]; ok { - settings.PricingPlan = v.(string) - } - - if v, ok := _settings["replication_type"]; ok { - settings.ReplicationType = v.(string) - } - - instance.Settings = settings - } - - d.Partial(false) - - op, err := config.clientSqlAdmin.Instances.Update(project, instance.Name, instance).Do() - if err != nil { - return fmt.Errorf("Error, failed to update instance %s: %s", instance.Name, err) - } - - err = sqladminOperationWait(config, op, "Create Instance") - if err != nil { - return err - } - - return resourceSqlDatabaseInstanceRead(d, meta) -} - -func resourceSqlDatabaseInstanceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - op, err := config.clientSqlAdmin.Instances.Delete(project, d.Get("name").(string)).Do() - - if err != nil { - return fmt.Errorf("Error, failed to delete instance %s: %s", d.Get("name").(string), err) - } - - err = sqladminOperationWait(config, op, "Delete Instance") - if err != nil { - return err - } - - return nil -} - -func validateNumericRange(v interface{}, k string, min int, max int) (ws []string, errors []error) { - value := v.(int) - if min > value || value > max { - errors = append(errors, fmt.Errorf( - "%q outside range %d-%d.", k, min, max)) - } - return -} - -func instanceMutexKey(project, instance_name string) string { - return fmt.Sprintf("google-sql-database-instance-%s-%s", project, instance_name) -} diff --git a/builtin/providers/google/resource_sql_database_instance_test.go b/builtin/providers/google/resource_sql_database_instance_test.go deleted file mode 100644 index 4ff5192da..000000000 --- a/builtin/providers/google/resource_sql_database_instance_test.go +++ /dev/null @@ -1,821 +0,0 @@ -package google - -/** - * Note! You must run these tests once at a time. Google Cloud SQL does - * not allow you to reuse a database for a short time after you reserved it, - * and for this reason the tests will fail if the same config is used serveral - * times in short succession. - */ - -import ( - "fmt" - "log" - "strconv" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "google.golang.org/api/sqladmin/v1beta4" -) - -func init() { - resource.AddTestSweepers("gcp_sql_db_instance", &resource.Sweeper{ - Name: "gcp_sql_db_instance", - F: testSweepDatabases, - }) -} - -func testSweepDatabases(region string) error { - config, err := sharedConfigForRegion(region) - if err != nil { - return fmt.Errorf("error getting shared config for region: %s", err) - } - - err = config.loadAndValidate() - if err != nil { - log.Fatalf("error loading: %s", err) - } - - found, err := config.clientSqlAdmin.Instances.List(config.Project).Do() - if err != nil { - log.Fatalf("error listing databases: %s", err) - } - - if len(found.Items) == 0 { - log.Printf("No databases found") - return nil - } - - for _, d := range found.Items { - var testDbInstance bool - for _, testName := range []string{"tf-lw-", "sqldatabasetest"} { - // only destroy instances we know to fit our test naming pattern - if strings.HasPrefix(d.Name, testName) { - testDbInstance = true - } - } - - if !testDbInstance { - continue - } - - log.Printf("Destroying SQL Instance (%s)", d.Name) - - // replicas need to be stopped and destroyed before destroying a master - // instance. The ordering slice tracks replica databases for a given master - // and we call destroy on them before destroying the master - var ordering []string - for _, replicaName := range d.ReplicaNames { - // need to stop replication before being able to destroy a database - op, err := config.clientSqlAdmin.Instances.StopReplica(config.Project, replicaName).Do() - - if err != nil { - return fmt.Errorf("error, failed to stop replica instance (%s) for instance (%s): %s", replicaName, d.Name, err) - } - - err = sqladminOperationWait(config, op, "Stop Replica") - if err != nil { - if strings.Contains(err.Error(), "does not exist") { - log.Printf("Replication operation not found") - } else { - return err - } - } - - ordering = append(ordering, replicaName) - } - - // ordering has a list of replicas (or none), now add the primary to the end - ordering = append(ordering, d.Name) - - for _, db := range ordering { - // destroy instances, replicas first - op, err := config.clientSqlAdmin.Instances.Delete(config.Project, db).Do() - - if err != nil { - if strings.Contains(err.Error(), "409") { - // the GCP api can return a 409 error after the delete operation - // reaches a successful end - log.Printf("Operation not found, got 409 response") - continue - } - - return fmt.Errorf("Error, failed to delete instance %s: %s", db, err) - } - - err = sqladminOperationWait(config, op, "Delete Instance") - if err != nil { - if strings.Contains(err.Error(), "does not exist") { - log.Printf("SQL instance not found") - continue - } - return err - } - } - } - - return nil -} - -func TestAccGoogleSqlDatabaseInstance_basic(t *testing.T) { - var instance sqladmin.DatabaseInstance - databaseID := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf( - testGoogleSqlDatabaseInstance_basic, databaseID), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleSqlDatabaseInstanceExists( - "google_sql_database_instance.instance", &instance), - testAccCheckGoogleSqlDatabaseInstanceEquals( - "google_sql_database_instance.instance", &instance), - ), - }, - }, - }) -} - -func TestAccGoogleSqlDatabaseInstance_basic2(t *testing.T) { - var instance sqladmin.DatabaseInstance - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testGoogleSqlDatabaseInstance_basic2, - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleSqlDatabaseInstanceExists( - "google_sql_database_instance.instance", &instance), - testAccCheckGoogleSqlDatabaseInstanceEquals( - "google_sql_database_instance.instance", &instance), - ), - }, - }, - }) -} - -func TestAccGoogleSqlDatabaseInstance_basic3(t *testing.T) { - var instance sqladmin.DatabaseInstance - databaseID := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf( - testGoogleSqlDatabaseInstance_basic3, databaseID), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleSqlDatabaseInstanceExists( - "google_sql_database_instance.instance", &instance), - testAccCheckGoogleSqlDatabaseInstanceEquals( - "google_sql_database_instance.instance", &instance), - testAccCheckGoogleSqlDatabaseRootUserDoesNotExist( - &instance), - ), - }, - }, - }) -} -func TestAccGoogleSqlDatabaseInstance_settings_basic(t *testing.T) { - var instance sqladmin.DatabaseInstance - databaseID := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf( - testGoogleSqlDatabaseInstance_settings, databaseID), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleSqlDatabaseInstanceExists( - "google_sql_database_instance.instance", &instance), - testAccCheckGoogleSqlDatabaseInstanceEquals( - "google_sql_database_instance.instance", &instance), - ), - }, - }, - }) -} - -func TestAccGoogleSqlDatabaseInstance_slave(t *testing.T) { - var instance sqladmin.DatabaseInstance - masterID := acctest.RandInt() - slaveID := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf( - testGoogleSqlDatabaseInstance_slave, masterID, slaveID), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleSqlDatabaseInstanceExists( - "google_sql_database_instance.instance_master", &instance), - testAccCheckGoogleSqlDatabaseInstanceEquals( - "google_sql_database_instance.instance_master", &instance), - testAccCheckGoogleSqlDatabaseInstanceExists( - "google_sql_database_instance.instance_slave", &instance), - testAccCheckGoogleSqlDatabaseInstanceEquals( - "google_sql_database_instance.instance_slave", &instance), - ), - }, - }, - }) -} - -func TestAccGoogleSqlDatabaseInstance_diskspecs(t *testing.T) { - var instance sqladmin.DatabaseInstance - masterID := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf( - testGoogleSqlDatabaseInstance_diskspecs, masterID), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleSqlDatabaseInstanceExists( - "google_sql_database_instance.instance", &instance), - testAccCheckGoogleSqlDatabaseInstanceEquals( - "google_sql_database_instance.instance", &instance), - ), - }, - }, - }) -} - -func TestAccGoogleSqlDatabaseInstance_maintenance(t *testing.T) { - var instance sqladmin.DatabaseInstance - masterID := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf( - testGoogleSqlDatabaseInstance_maintenance, masterID), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleSqlDatabaseInstanceExists( - "google_sql_database_instance.instance", &instance), - testAccCheckGoogleSqlDatabaseInstanceEquals( - "google_sql_database_instance.instance", &instance), - ), - }, - }, - }) -} - -func TestAccGoogleSqlDatabaseInstance_settings_upgrade(t *testing.T) { - var instance sqladmin.DatabaseInstance - databaseID := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf( - testGoogleSqlDatabaseInstance_basic, databaseID), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleSqlDatabaseInstanceExists( - "google_sql_database_instance.instance", &instance), - testAccCheckGoogleSqlDatabaseInstanceEquals( - "google_sql_database_instance.instance", &instance), - ), - }, - resource.TestStep{ - Config: fmt.Sprintf( - testGoogleSqlDatabaseInstance_settings, databaseID), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleSqlDatabaseInstanceExists( - "google_sql_database_instance.instance", &instance), - testAccCheckGoogleSqlDatabaseInstanceEquals( - "google_sql_database_instance.instance", &instance), - ), - }, - }, - }) -} - -func TestAccGoogleSqlDatabaseInstance_settings_downgrade(t *testing.T) { - var instance sqladmin.DatabaseInstance - databaseID := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf( - testGoogleSqlDatabaseInstance_settings, databaseID), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleSqlDatabaseInstanceExists( - "google_sql_database_instance.instance", &instance), - testAccCheckGoogleSqlDatabaseInstanceEquals( - "google_sql_database_instance.instance", &instance), - ), - }, - resource.TestStep{ - Config: fmt.Sprintf( - testGoogleSqlDatabaseInstance_basic, databaseID), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleSqlDatabaseInstanceExists( - "google_sql_database_instance.instance", &instance), - testAccCheckGoogleSqlDatabaseInstanceEquals( - "google_sql_database_instance.instance", &instance), - ), - }, - }, - }) -} - -// GH-4222 -func TestAccGoogleSqlDatabaseInstance_authNets(t *testing.T) { - // var instance sqladmin.DatabaseInstance - databaseID := acctest.RandInt() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf( - testGoogleSqlDatabaseInstance_authNets_step1, databaseID), - }, - resource.TestStep{ - Config: fmt.Sprintf( - testGoogleSqlDatabaseInstance_authNets_step2, databaseID), - }, - resource.TestStep{ - Config: fmt.Sprintf( - testGoogleSqlDatabaseInstance_authNets_step1, databaseID), - }, - }, - }) -} - -// Tests that a SQL instance can be referenced from more than one other resource without -// throwing an error during provisioning, see #9018. -func TestAccGoogleSqlDatabaseInstance_multipleOperations(t *testing.T) { - databaseID, instanceID, userID := acctest.RandString(8), acctest.RandString(8), acctest.RandString(8) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf( - testGoogleSqlDatabaseInstance_multipleOperations, databaseID, instanceID, userID), - }, - }, - }) -} - -func testAccCheckGoogleSqlDatabaseInstanceEquals(n string, - instance *sqladmin.DatabaseInstance) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - attributes := rs.Primary.Attributes - - server := instance.Name - local := attributes["name"] - if server != local { - return fmt.Errorf("Error name mismatch, (%s, %s)", server, local) - } - - server = instance.Settings.Tier - local = attributes["settings.0.tier"] - if server != local { - return fmt.Errorf("Error settings.tier mismatch, (%s, %s)", server, local) - } - - server = strings.TrimPrefix(instance.MasterInstanceName, instance.Project+":") - local = attributes["master_instance_name"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error master_instance_name mismatch, (%s, %s)", server, local) - } - - server = instance.Settings.ActivationPolicy - local = attributes["settings.0.activation_policy"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error settings.activation_policy mismatch, (%s, %s)", server, local) - } - - if instance.Settings.BackupConfiguration != nil { - server = strconv.FormatBool(instance.Settings.BackupConfiguration.BinaryLogEnabled) - local = attributes["settings.0.backup_configuration.0.binary_log_enabled"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error settings.backup_configuration.binary_log_enabled mismatch, (%s, %s)", server, local) - } - - server = strconv.FormatBool(instance.Settings.BackupConfiguration.Enabled) - local = attributes["settings.0.backup_configuration.0.enabled"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error settings.backup_configuration.enabled mismatch, (%s, %s)", server, local) - } - - server = instance.Settings.BackupConfiguration.StartTime - local = attributes["settings.0.backup_configuration.0.start_time"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error settings.backup_configuration.start_time mismatch, (%s, %s)", server, local) - } - } - - server = strconv.FormatBool(instance.Settings.CrashSafeReplicationEnabled) - local = attributes["settings.0.crash_safe_replication"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error settings.crash_safe_replication mismatch, (%s, %s)", server, local) - } - - server = strconv.FormatBool(instance.Settings.StorageAutoResize) - local = attributes["settings.0.disk_autoresize"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error settings.disk_autoresize mismatch, (%s, %s)", server, local) - } - - server = strconv.FormatInt(instance.Settings.DataDiskSizeGb, 10) - local = attributes["settings.0.disk_size"] - if server != local && len(server) > 0 && len(local) > 0 && local != "0" { - return fmt.Errorf("Error settings.disk_size mismatch, (%s, %s)", server, local) - } - - server = instance.Settings.DataDiskType - local = attributes["settings.0.disk_type"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error settings.disk_type mismatch, (%s, %s)", server, local) - } - - if instance.Settings.IpConfiguration != nil { - server = strconv.FormatBool(instance.Settings.IpConfiguration.Ipv4Enabled) - local = attributes["settings.0.ip_configuration.0.ipv4_enabled"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error settings.ip_configuration.ipv4_enabled mismatch, (%s, %s)", server, local) - } - - server = strconv.FormatBool(instance.Settings.IpConfiguration.RequireSsl) - local = attributes["settings.0.ip_configuration.0.require_ssl"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error settings.ip_configuration.require_ssl mismatch, (%s, %s)", server, local) - } - } - - if instance.Settings.LocationPreference != nil { - server = instance.Settings.LocationPreference.FollowGaeApplication - local = attributes["settings.0.location_preference.0.follow_gae_application"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error settings.location_preference.follow_gae_application mismatch, (%s, %s)", server, local) - } - - server = instance.Settings.LocationPreference.Zone - local = attributes["settings.0.location_preference.0.zone"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error settings.location_preference.zone mismatch, (%s, %s)", server, local) - } - } - - if instance.Settings.MaintenanceWindow != nil { - server = strconv.FormatInt(instance.Settings.MaintenanceWindow.Day, 10) - local = attributes["settings.0.maintenance_window.0.day"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error settings.maintenance_window.day mismatch, (%s, %s)", server, local) - } - - server = strconv.FormatInt(instance.Settings.MaintenanceWindow.Hour, 10) - local = attributes["settings.0.maintenance_window.0.hour"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error settings.maintenance_window.hour mismatch, (%s, %s)", server, local) - } - - server = instance.Settings.MaintenanceWindow.UpdateTrack - local = attributes["settings.0.maintenance_window.0.update_track"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error settings.maintenance_window.update_track mismatch, (%s, %s)", server, local) - } - } - - server = instance.Settings.PricingPlan - local = attributes["settings.0.pricing_plan"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error settings.pricing_plan mismatch, (%s, %s)", server, local) - } - - if instance.ReplicaConfiguration != nil { - server = strconv.FormatBool(instance.ReplicaConfiguration.FailoverTarget) - local = attributes["replica_configuration.0.failover_target"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error replica_configuration.failover_target mismatch, (%s, %s)", server, local) - } - } - - return nil - } -} - -func testAccCheckGoogleSqlDatabaseInstanceExists(n string, - instance *sqladmin.DatabaseInstance) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - found, err := config.clientSqlAdmin.Instances.Get(config.Project, - rs.Primary.Attributes["name"]).Do() - - *instance = *found - - if err != nil { - return fmt.Errorf("Not found: %s", n) - } - - return nil - } -} - -func testAccGoogleSqlDatabaseInstanceDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - config := testAccProvider.Meta().(*Config) - if rs.Type != "google_sql_database_instance" { - continue - } - - _, err := config.clientSqlAdmin.Instances.Get(config.Project, - rs.Primary.Attributes["name"]).Do() - if err == nil { - return fmt.Errorf("Database Instance still exists") - } - } - - return nil -} - -func testAccCheckGoogleSqlDatabaseRootUserDoesNotExist( - instance *sqladmin.DatabaseInstance) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - users, err := config.clientSqlAdmin.Users.List(config.Project, instance.Name).Do() - - if err != nil { - return fmt.Errorf("Could not list database users for %q: %s", instance.Name, err) - } - - for _, u := range users.Items { - if u.Name == "root" && u.Host == "%" { - return fmt.Errorf("%v@%v user still exists", u.Name, u.Host) - } - } - - return nil - } -} - -var testGoogleSqlDatabaseInstance_basic = ` -resource "google_sql_database_instance" "instance" { - name = "tf-lw-%d" - region = "us-central" - settings { - tier = "D0" - crash_safe_replication = false - } -} -` - -var testGoogleSqlDatabaseInstance_basic2 = ` -resource "google_sql_database_instance" "instance" { - region = "us-central" - settings { - tier = "D0" - crash_safe_replication = false - } -} -` -var testGoogleSqlDatabaseInstance_basic3 = ` -resource "google_sql_database_instance" "instance" { - name = "tf-lw-%d" - region = "us-central" - settings { - tier = "db-f1-micro" - } -} -` - -var testGoogleSqlDatabaseInstance_settings = ` -resource "google_sql_database_instance" "instance" { - name = "tf-lw-%d" - region = "us-central" - settings { - tier = "D0" - crash_safe_replication = false - replication_type = "ASYNCHRONOUS" - location_preference { - zone = "us-central1-f" - } - - ip_configuration { - ipv4_enabled = "true" - authorized_networks { - value = "108.12.12.12" - name = "misc" - expiration_time = "2017-11-15T16:19:00.094Z" - } - } - - backup_configuration { - enabled = "true" - start_time = "19:19" - } - - activation_policy = "ON_DEMAND" - } -} -` - -// Note - this test is not feasible to run unless we generate -// backups first. -var testGoogleSqlDatabaseInstance_replica = ` -resource "google_sql_database_instance" "instance_master" { - name = "tf-lw-%d" - database_version = "MYSQL_5_6" - region = "us-east1" - - settings { - tier = "D0" - crash_safe_replication = true - - backup_configuration { - enabled = true - start_time = "00:00" - binary_log_enabled = true - } - } -} - -resource "google_sql_database_instance" "instance" { - name = "tf-lw-%d" - database_version = "MYSQL_5_6" - region = "us-central" - - settings { - tier = "D0" - } - - master_instance_name = "${google_sql_database_instance.instance_master.name}" - - replica_configuration { - ca_certificate = "${file("~/tmp/fake.pem")}" - client_certificate = "${file("~/tmp/fake.pem")}" - client_key = "${file("~/tmp/fake.pem")}" - connect_retry_interval = 100 - master_heartbeat_period = 10000 - password = "password" - username = "username" - ssl_cipher = "ALL" - verify_server_certificate = false - } -} -` - -var testGoogleSqlDatabaseInstance_slave = ` -resource "google_sql_database_instance" "instance_master" { - name = "tf-lw-%d" - region = "us-central1" - - settings { - tier = "db-f1-micro" - - backup_configuration { - enabled = true - binary_log_enabled = true - } - } -} - -resource "google_sql_database_instance" "instance_slave" { - name = "tf-lw-%d" - region = "us-central1" - - master_instance_name = "${google_sql_database_instance.instance_master.name}" - - settings { - tier = "db-f1-micro" - } -} -` - -var testGoogleSqlDatabaseInstance_diskspecs = ` -resource "google_sql_database_instance" "instance" { - name = "tf-lw-%d" - region = "us-central1" - - settings { - tier = "db-f1-micro" - disk_autoresize = true - disk_size = 15 - disk_type = "PD_HDD" - } -} -` - -var testGoogleSqlDatabaseInstance_maintenance = ` -resource "google_sql_database_instance" "instance" { - name = "tf-lw-%d" - region = "us-central1" - - settings { - tier = "db-f1-micro" - - maintenance_window { - day = 7 - hour = 3 - update_track = "canary" - } - } -} -` - -var testGoogleSqlDatabaseInstance_authNets_step1 = ` -resource "google_sql_database_instance" "instance" { - name = "tf-lw-%d" - region = "us-central" - settings { - tier = "D0" - crash_safe_replication = false - - ip_configuration { - ipv4_enabled = "true" - authorized_networks { - value = "108.12.12.12" - name = "misc" - expiration_time = "2017-11-15T16:19:00.094Z" - } - } - } -} -` - -var testGoogleSqlDatabaseInstance_authNets_step2 = ` -resource "google_sql_database_instance" "instance" { - name = "tf-lw-%d" - region = "us-central" - settings { - tier = "D0" - crash_safe_replication = false - - ip_configuration { - ipv4_enabled = "true" - } - } -} -` - -var testGoogleSqlDatabaseInstance_multipleOperations = ` -resource "google_sql_database_instance" "instance" { - name = "tf-test-%s" - region = "us-central" - settings { - tier = "D0" - crash_safe_replication = false - } -} - -resource "google_sql_database" "database" { - name = "tf-test-%s" - instance = "${google_sql_database_instance.instance.name}" -} - -resource "google_sql_user" "user" { - name = "tf-test-%s" - instance = "${google_sql_database_instance.instance.name}" - host = "google.com" - password = "hunter2" -} -` diff --git a/builtin/providers/google/resource_sql_database_test.go b/builtin/providers/google/resource_sql_database_test.go deleted file mode 100644 index 509fa1de1..000000000 --- a/builtin/providers/google/resource_sql_database_test.go +++ /dev/null @@ -1,115 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "google.golang.org/api/sqladmin/v1beta4" -) - -func TestAccGoogleSqlDatabase_basic(t *testing.T) { - var database sqladmin.Database - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testGoogleSqlDatabase_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleSqlDatabaseExists( - "google_sql_database.database", &database), - testAccCheckGoogleSqlDatabaseEquals( - "google_sql_database.database", &database), - ), - }, - }, - }) -} - -func testAccCheckGoogleSqlDatabaseEquals(n string, - database *sqladmin.Database) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Resource not found: %s", n) - } - - database_name := rs.Primary.Attributes["name"] - instance_name := rs.Primary.Attributes["instance"] - - if database_name != database.Name { - return fmt.Errorf("Error name mismatch, (%s, %s)", database_name, database.Name) - } - - if instance_name != database.Instance { - return fmt.Errorf("Error instance_name mismatch, (%s, %s)", instance_name, database.Instance) - } - - return nil - } -} - -func testAccCheckGoogleSqlDatabaseExists(n string, - database *sqladmin.Database) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Resource not found: %s", n) - } - - database_name := rs.Primary.Attributes["name"] - instance_name := rs.Primary.Attributes["instance"] - found, err := config.clientSqlAdmin.Databases.Get(config.Project, - instance_name, database_name).Do() - - if err != nil { - return fmt.Errorf("Not found: %s: %s", n, err) - } - - *database = *found - - return nil - } -} - -func testAccGoogleSqlDatabaseDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - config := testAccProvider.Meta().(*Config) - if rs.Type != "google_sql_database" { - continue - } - - database_name := rs.Primary.Attributes["name"] - instance_name := rs.Primary.Attributes["instance"] - _, err := config.clientSqlAdmin.Databases.Get(config.Project, - instance_name, database_name).Do() - - if err == nil { - return fmt.Errorf("Database resource still exists") - } - } - - return nil -} - -var testGoogleSqlDatabase_basic = fmt.Sprintf(` -resource "google_sql_database_instance" "instance" { - name = "sqldatabasetest%s" - region = "us-central" - settings { - tier = "D0" - } -} - -resource "google_sql_database" "database" { - name = "sqldatabasetest%s" - instance = "${google_sql_database_instance.instance.name}" -} -`, acctest.RandString(10), acctest.RandString(10)) diff --git a/builtin/providers/google/resource_sql_user.go b/builtin/providers/google/resource_sql_user.go deleted file mode 100644 index bc98f2bb7..000000000 --- a/builtin/providers/google/resource_sql_user.go +++ /dev/null @@ -1,221 +0,0 @@ -package google - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/sqladmin/v1beta4" -) - -func resourceSqlUser() *schema.Resource { - return &schema.Resource{ - Create: resourceSqlUserCreate, - Read: resourceSqlUserRead, - Update: resourceSqlUserUpdate, - Delete: resourceSqlUserDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - SchemaVersion: 1, - MigrateState: resourceSqlUserMigrateState, - - Schema: map[string]*schema.Schema{ - "host": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "instance": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "password": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Sensitive: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceSqlUserCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - instance := d.Get("instance").(string) - password := d.Get("password").(string) - host := d.Get("host").(string) - - user := &sqladmin.User{ - Name: name, - Instance: instance, - Password: password, - Host: host, - } - - mutexKV.Lock(instanceMutexKey(project, instance)) - defer mutexKV.Unlock(instanceMutexKey(project, instance)) - op, err := config.clientSqlAdmin.Users.Insert(project, instance, - user).Do() - - if err != nil { - return fmt.Errorf("Error, failed to insert "+ - "user %s into instance %s: %s", name, instance, err) - } - - d.SetId(fmt.Sprintf("%s/%s", instance, name)) - - err = sqladminOperationWait(config, op, "Insert User") - - if err != nil { - return fmt.Errorf("Error, failure waiting for insertion of %s "+ - "into %s: %s", name, instance, err) - } - - return resourceSqlUserRead(d, meta) -} - -func resourceSqlUserRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - instanceAndName := strings.SplitN(d.Id(), "/", 2) - if len(instanceAndName) != 2 { - return fmt.Errorf( - "Wrong number of arguments when specifying imported id. Expected: 2. Saw: %d. Expected Input: $INSTANCENAME/$SQLUSERNAME Input: %s", - len(instanceAndName), - d.Id()) - } - - instance := instanceAndName[0] - name := instanceAndName[1] - - users, err := config.clientSqlAdmin.Users.List(project, instance).Do() - - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("SQL User %q in instance %q", name, instance)) - } - - var user *sqladmin.User - for _, currentUser := range users.Items { - if currentUser.Name == name { - user = currentUser - break - } - } - - if user == nil { - log.Printf("[WARN] Removing SQL User %q because it's gone", d.Get("name").(string)) - d.SetId("") - - return nil - } - - d.Set("host", user.Host) - d.Set("instance", user.Instance) - d.Set("name", user.Name) - return nil -} - -func resourceSqlUserUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - if d.HasChange("password") { - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - instance := d.Get("instance").(string) - host := d.Get("host").(string) - password := d.Get("password").(string) - - user := &sqladmin.User{ - Name: name, - Instance: instance, - Password: password, - Host: host, - } - - mutexKV.Lock(instanceMutexKey(project, instance)) - defer mutexKV.Unlock(instanceMutexKey(project, instance)) - op, err := config.clientSqlAdmin.Users.Update(project, instance, host, name, - user).Do() - - if err != nil { - return fmt.Errorf("Error, failed to update"+ - "user %s into user %s: %s", name, instance, err) - } - - err = sqladminOperationWait(config, op, "Insert User") - - if err != nil { - return fmt.Errorf("Error, failure waiting for update of %s "+ - "in %s: %s", name, instance, err) - } - - return resourceSqlUserRead(d, meta) - } - - return nil -} - -func resourceSqlUserDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - instance := d.Get("instance").(string) - host := d.Get("host").(string) - - mutexKV.Lock(instanceMutexKey(project, instance)) - defer mutexKV.Unlock(instanceMutexKey(project, instance)) - op, err := config.clientSqlAdmin.Users.Delete(project, instance, host, name).Do() - - if err != nil { - return fmt.Errorf("Error, failed to delete"+ - "user %s in instance %s: %s", name, - instance, err) - } - - err = sqladminOperationWait(config, op, "Delete User") - - if err != nil { - return fmt.Errorf("Error, failure waiting for deletion of %s "+ - "in %s: %s", name, instance, err) - } - - return nil -} diff --git a/builtin/providers/google/resource_sql_user_migrate.go b/builtin/providers/google/resource_sql_user_migrate.go deleted file mode 100644 index 7f52771ad..000000000 --- a/builtin/providers/google/resource_sql_user_migrate.go +++ /dev/null @@ -1,39 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/terraform" -) - -func resourceSqlUserMigrateState( - v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - if is.Empty() { - log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return is, nil - } - - switch v { - case 0: - log.Println("[INFO] Found Google Sql User State v0; migrating to v1") - is, err := migrateSqlUserStateV0toV1(is) - if err != nil { - return is, err - } - return is, nil - default: - return is, fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateSqlUserStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { - log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - - name := is.Attributes["name"] - instance := is.Attributes["instance"] - is.ID = fmt.Sprintf("%s/%s", instance, name) - - log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} diff --git a/builtin/providers/google/resource_sql_user_migrate_test.go b/builtin/providers/google/resource_sql_user_migrate_test.go deleted file mode 100644 index 5e03d8d75..000000000 --- a/builtin/providers/google/resource_sql_user_migrate_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package google - -import ( - "testing" - - "github.com/hashicorp/terraform/terraform" -) - -func TestSqlUserMigrateState(t *testing.T) { - cases := map[string]struct { - StateVersion int - Attributes map[string]string - Expected map[string]string - Meta interface{} - ID string - ExpectedID string - }{ - "change id from $NAME to $INSTANCENAME.$NAME": { - StateVersion: 0, - Attributes: map[string]string{ - "name": "tf-user", - "instance": "tf-instance", - }, - Expected: map[string]string{ - "name": "tf-user", - "instance": "tf-instance", - }, - Meta: &Config{}, - ID: "tf-user", - ExpectedID: "tf-instance/tf-user", - }, - } - - for tn, tc := range cases { - is := &terraform.InstanceState{ - ID: tc.ID, - Attributes: tc.Attributes, - } - is, err := resourceSqlUserMigrateState( - tc.StateVersion, is, tc.Meta) - - if err != nil { - t.Fatalf("bad: %s, err: %#v", tn, err) - } - - if is.ID != tc.ExpectedID { - t.Fatalf("bad ID.\n\n expected: %s\n got: %s", tc.ExpectedID, is.ID) - } - - for k, v := range tc.Expected { - if is.Attributes[k] != v { - t.Fatalf( - "bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", - tn, k, v, k, is.Attributes[k], is.Attributes) - } - } - } -} - -func TestSqlUserMigrateState_empty(t *testing.T) { - var is *terraform.InstanceState - var meta *Config - - // should handle nil - is, err := resourceSqlUserMigrateState(0, is, meta) - - if err != nil { - t.Fatalf("err: %#v", err) - } - if is != nil { - t.Fatalf("expected nil instancestate, got: %#v", is) - } - - // should handle non-nil but empty - is = &terraform.InstanceState{} - is, err = resourceSqlUserMigrateState(0, is, meta) - - if err != nil { - t.Fatalf("err: %#v", err) - } -} diff --git a/builtin/providers/google/resource_sql_user_test.go b/builtin/providers/google/resource_sql_user_test.go deleted file mode 100644 index 0b91b398c..000000000 --- a/builtin/providers/google/resource_sql_user_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccGoogleSqlUser_basic(t *testing.T) { - user := acctest.RandString(10) - instance := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleSqlUserDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testGoogleSqlUser_basic(instance, user), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleSqlUserExists("google_sql_user.user"), - ), - }, - }, - }) -} - -func TestAccGoogleSqlUser_update(t *testing.T) { - user := acctest.RandString(10) - instance := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleSqlUserDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testGoogleSqlUser_basic(instance, user), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleSqlUserExists("google_sql_user.user"), - ), - }, - - resource.TestStep{ - Config: testGoogleSqlUser_basic2(instance, user), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleSqlUserExists("google_sql_user.user"), - ), - }, - }, - }) -} - -func testAccCheckGoogleSqlUserExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Resource not found: %s", n) - } - - name := rs.Primary.Attributes["name"] - instance := rs.Primary.Attributes["instance"] - host := rs.Primary.Attributes["host"] - users, err := config.clientSqlAdmin.Users.List(config.Project, - instance).Do() - - for _, user := range users.Items { - if user.Name == name && user.Host == host { - return nil - } - } - - return fmt.Errorf("Not found: %s: %s", n, err) - } -} - -func testAccGoogleSqlUserDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - config := testAccProvider.Meta().(*Config) - if rs.Type != "google_sql_database" { - continue - } - - name := rs.Primary.Attributes["name"] - instance := rs.Primary.Attributes["instance"] - host := rs.Primary.Attributes["host"] - users, err := config.clientSqlAdmin.Users.List(config.Project, - instance).Do() - - for _, user := range users.Items { - if user.Name == name && user.Host == host { - return fmt.Errorf("User still %s exists %s", name, err) - } - } - - return nil - } - - return nil -} - -func testGoogleSqlUser_basic(instance, user string) string { - return fmt.Sprintf(` - resource "google_sql_database_instance" "instance" { - name = "i%s" - region = "us-central" - settings { - tier = "D0" - } - } - - resource "google_sql_user" "user" { - name = "user%s" - instance = "${google_sql_database_instance.instance.name}" - host = "google.com" - password = "hunter2" - } - `, instance, user) -} - -func testGoogleSqlUser_basic2(instance, user string) string { - return fmt.Sprintf(` - resource "google_sql_database_instance" "instance" { - name = "i%s" - region = "us-central" - settings { - tier = "D0" - } - } - - resource "google_sql_user" "user" { - name = "user%s" - instance = "${google_sql_database_instance.instance.name}" - host = "google.com" - password = "oops" - } - `, instance, user) -} diff --git a/builtin/providers/google/resource_storage_bucket.go b/builtin/providers/google/resource_storage_bucket.go deleted file mode 100644 index b60b76acb..000000000 --- a/builtin/providers/google/resource_storage_bucket.go +++ /dev/null @@ -1,380 +0,0 @@ -package google - -import ( - "errors" - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "google.golang.org/api/googleapi" - "google.golang.org/api/storage/v1" -) - -func resourceStorageBucket() *schema.Resource { - return &schema.Resource{ - Create: resourceStorageBucketCreate, - Read: resourceStorageBucketRead, - Update: resourceStorageBucketUpdate, - Delete: resourceStorageBucketDelete, - Importer: &schema.ResourceImporter{ - State: resourceStorageBucketStateImporter, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "force_destroy": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "location": &schema.Schema{ - Type: schema.TypeString, - Default: "US", - Optional: true, - ForceNew: true, - }, - - "predefined_acl": &schema.Schema{ - Type: schema.TypeString, - Deprecated: "Please use resource \"storage_bucket_acl.predefined_acl\" instead.", - Optional: true, - ForceNew: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "url": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "storage_class": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "STANDARD", - ForceNew: true, - }, - - "website": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "main_page_suffix": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "not_found_page": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - - "cors": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "origin": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "method": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "response_header": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "max_age_seconds": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - }, - }, - }, - }, - } -} - -func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Get the bucket and acl - bucket := d.Get("name").(string) - location := d.Get("location").(string) - - // Create a bucket, setting the acl, location and name. - sb := &storage.Bucket{Name: bucket, Location: location} - - if v, ok := d.GetOk("storage_class"); ok { - sb.StorageClass = v.(string) - } - - if v, ok := d.GetOk("website"); ok { - websites := v.([]interface{}) - - if len(websites) > 1 { - return fmt.Errorf("At most one website block is allowed") - } - - sb.Website = &storage.BucketWebsite{} - - website := websites[0].(map[string]interface{}) - - if v, ok := website["not_found_page"]; ok { - sb.Website.NotFoundPage = v.(string) - } - - if v, ok := website["main_page_suffix"]; ok { - sb.Website.MainPageSuffix = v.(string) - } - } - - if v, ok := d.GetOk("cors"); ok { - sb.Cors = expandCors(v.([]interface{})) - } - - var res *storage.Bucket - - err = resource.Retry(1*time.Minute, func() *resource.RetryError { - call := config.clientStorage.Buckets.Insert(project, sb) - if v, ok := d.GetOk("predefined_acl"); ok { - call = call.PredefinedAcl(v.(string)) - } - - res, err = call.Do() - if err == nil { - return nil - } - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 429 { - return resource.RetryableError(gerr) - } - return resource.NonRetryableError(err) - }) - - if err != nil { - fmt.Printf("Error creating bucket %s: %v", bucket, err) - return err - } - - log.Printf("[DEBUG] Created bucket %v at location %v\n\n", res.Name, res.SelfLink) - - d.SetId(res.Id) - return resourceStorageBucketRead(d, meta) -} - -func resourceStorageBucketUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - sb := &storage.Bucket{} - - if d.HasChange("website") { - if v, ok := d.GetOk("website"); ok { - websites := v.([]interface{}) - - if len(websites) > 1 { - return fmt.Errorf("At most one website block is allowed") - } - - // Setting fields to "" to be explicit that the PATCH call will - // delete this field. - if len(websites) == 0 { - sb.Website.NotFoundPage = "" - sb.Website.MainPageSuffix = "" - } else { - website := websites[0].(map[string]interface{}) - sb.Website = &storage.BucketWebsite{} - if v, ok := website["not_found_page"]; ok { - sb.Website.NotFoundPage = v.(string) - } else { - sb.Website.NotFoundPage = "" - } - - if v, ok := website["main_page_suffix"]; ok { - sb.Website.MainPageSuffix = v.(string) - } else { - sb.Website.MainPageSuffix = "" - } - } - } - } - - if v, ok := d.GetOk("cors"); ok { - sb.Cors = expandCors(v.([]interface{})) - } - - res, err := config.clientStorage.Buckets.Patch(d.Get("name").(string), sb).Do() - - if err != nil { - return err - } - - log.Printf("[DEBUG] Patched bucket %v at location %v\n\n", res.Name, res.SelfLink) - - // Assign the bucket ID as the resource ID - d.Set("self_link", res.SelfLink) - d.SetId(res.Id) - - return nil -} - -func resourceStorageBucketRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - // Get the bucket and acl - bucket := d.Get("name").(string) - res, err := config.clientStorage.Buckets.Get(bucket).Do() - - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Storage Bucket %q", d.Get("name").(string))) - } - - log.Printf("[DEBUG] Read bucket %v at location %v\n\n", res.Name, res.SelfLink) - - // Update the bucket ID according to the resource ID - d.Set("self_link", res.SelfLink) - d.Set("url", fmt.Sprintf("gs://%s", bucket)) - d.Set("storage_class", res.StorageClass) - d.Set("location", res.Location) - d.Set("cors", flattenCors(res.Cors)) - d.SetId(res.Id) - return nil -} - -func resourceStorageBucketDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - // Get the bucket - bucket := d.Get("name").(string) - - for { - res, err := config.clientStorage.Objects.List(bucket).Do() - if err != nil { - fmt.Printf("Error Objects.List failed: %v", err) - return err - } - - if len(res.Items) != 0 { - if d.Get("force_destroy").(bool) { - // purge the bucket... - log.Printf("[DEBUG] GCS Bucket attempting to forceDestroy\n\n") - - for _, object := range res.Items { - log.Printf("[DEBUG] Found %s", object.Name) - if err := config.clientStorage.Objects.Delete(bucket, object.Name).Do(); err != nil { - log.Fatalf("Error trying to delete object: %s %s\n\n", object.Name, err) - } else { - log.Printf("Object deleted: %s \n\n", object.Name) - } - } - - } else { - delete_err := errors.New("Error trying to delete a bucket containing objects without `force_destroy` set to true") - log.Printf("Error! %s : %s\n\n", bucket, delete_err) - return delete_err - } - } else { - break // 0 items, bucket empty - } - } - - // remove empty bucket - err := resource.Retry(1*time.Minute, func() *resource.RetryError { - err := config.clientStorage.Buckets.Delete(bucket).Do() - if err == nil { - return nil - } - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 429 { - return resource.RetryableError(gerr) - } - return resource.NonRetryableError(err) - }) - if err != nil { - fmt.Printf("Error deleting bucket %s: %v\n\n", bucket, err) - return err - } - log.Printf("[DEBUG] Deleted bucket %v\n\n", bucket) - - return nil -} - -func resourceStorageBucketStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - d.Set("name", d.Id()) - return []*schema.ResourceData{d}, nil -} - -func expandCors(configured []interface{}) []*storage.BucketCors { - corsRules := make([]*storage.BucketCors, 0, len(configured)) - for _, raw := range configured { - data := raw.(map[string]interface{}) - corsRule := storage.BucketCors{ - Origin: convertSchemaArrayToStringArray(data["origin"].([]interface{})), - Method: convertSchemaArrayToStringArray(data["method"].([]interface{})), - ResponseHeader: convertSchemaArrayToStringArray(data["response_header"].([]interface{})), - MaxAgeSeconds: int64(data["max_age_seconds"].(int)), - } - - corsRules = append(corsRules, &corsRule) - } - return corsRules -} - -func convertSchemaArrayToStringArray(input []interface{}) []string { - output := make([]string, 0, len(input)) - for _, val := range input { - output = append(output, val.(string)) - } - - return output -} - -func flattenCors(corsRules []*storage.BucketCors) []map[string]interface{} { - corsRulesSchema := make([]map[string]interface{}, 0, len(corsRules)) - for _, corsRule := range corsRules { - data := map[string]interface{}{ - "origin": corsRule.Origin, - "method": corsRule.Method, - "response_header": corsRule.ResponseHeader, - "max_age_seconds": corsRule.MaxAgeSeconds, - } - - corsRulesSchema = append(corsRulesSchema, data) - } - return corsRulesSchema -} diff --git a/builtin/providers/google/resource_storage_bucket_acl.go b/builtin/providers/google/resource_storage_bucket_acl.go deleted file mode 100644 index 428c1cecf..000000000 --- a/builtin/providers/google/resource_storage_bucket_acl.go +++ /dev/null @@ -1,294 +0,0 @@ -package google - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - - "google.golang.org/api/storage/v1" -) - -func resourceStorageBucketAcl() *schema.Resource { - return &schema.Resource{ - Create: resourceStorageBucketAclCreate, - Read: resourceStorageBucketAclRead, - Update: resourceStorageBucketAclUpdate, - Delete: resourceStorageBucketAclDelete, - - Schema: map[string]*schema.Schema{ - "bucket": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "default_acl": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "predefined_acl": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "role_entity": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -type RoleEntity struct { - Role string - Entity string -} - -func getBucketAclId(bucket string) string { - return bucket + "-acl" -} - -func getRoleEntityPair(role_entity string) (*RoleEntity, error) { - split := strings.Split(role_entity, ":") - if len(split) != 2 { - return nil, fmt.Errorf("Error, each role entity pair must be " + - "formatted as ROLE:entity") - } - - return &RoleEntity{Role: split[0], Entity: split[1]}, nil -} - -func resourceStorageBucketAclCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - bucket := d.Get("bucket").(string) - predefined_acl := "" - default_acl := "" - role_entity := make([]interface{}, 0) - - if v, ok := d.GetOk("predefined_acl"); ok { - predefined_acl = v.(string) - } - - if v, ok := d.GetOk("role_entity"); ok { - role_entity = v.([]interface{}) - } - - if v, ok := d.GetOk("default_acl"); ok { - default_acl = v.(string) - } - - if len(predefined_acl) > 0 { - if len(role_entity) > 0 { - return fmt.Errorf("Error, you cannot specify both " + - "\"predefined_acl\" and \"role_entity\"") - } - - res, err := config.clientStorage.Buckets.Get(bucket).Do() - - if err != nil { - return fmt.Errorf("Error reading bucket %s: %v", bucket, err) - } - - res, err = config.clientStorage.Buckets.Update(bucket, - res).PredefinedAcl(predefined_acl).Do() - - if err != nil { - return fmt.Errorf("Error updating bucket %s: %v", bucket, err) - } - - return resourceStorageBucketAclRead(d, meta) - } else if len(role_entity) > 0 { - for _, v := range role_entity { - pair, err := getRoleEntityPair(v.(string)) - - bucketAccessControl := &storage.BucketAccessControl{ - Role: pair.Role, - Entity: pair.Entity, - } - - log.Printf("[DEBUG]: storing re %s-%s", pair.Role, pair.Entity) - - _, err = config.clientStorage.BucketAccessControls.Insert(bucket, bucketAccessControl).Do() - - if err != nil { - return fmt.Errorf("Error updating ACL for bucket %s: %v", bucket, err) - } - } - - return resourceStorageBucketAclRead(d, meta) - } - - if len(default_acl) > 0 { - res, err := config.clientStorage.Buckets.Get(bucket).Do() - - if err != nil { - return fmt.Errorf("Error reading bucket %s: %v", bucket, err) - } - - res, err = config.clientStorage.Buckets.Update(bucket, - res).PredefinedDefaultObjectAcl(default_acl).Do() - - if err != nil { - return fmt.Errorf("Error updating bucket %s: %v", bucket, err) - } - - return resourceStorageBucketAclRead(d, meta) - } - - return nil -} - -func resourceStorageBucketAclRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - bucket := d.Get("bucket").(string) - - // Predefined ACLs cannot easily be parsed once they have been processed - // by the GCP server - if _, ok := d.GetOk("predefined_acl"); !ok { - role_entity := make([]interface{}, 0) - re_local := d.Get("role_entity").([]interface{}) - re_local_map := make(map[string]string) - for _, v := range re_local { - res, err := getRoleEntityPair(v.(string)) - - if err != nil { - return fmt.Errorf( - "Old state has malformed Role/Entity pair: %v", err) - } - - re_local_map[res.Entity] = res.Role - } - - res, err := config.clientStorage.BucketAccessControls.List(bucket).Do() - - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Storage Bucket ACL for bucket %q", d.Get("bucket").(string))) - } - - for _, v := range res.Items { - log.Printf("[DEBUG]: examining re %s-%s", v.Role, v.Entity) - // We only store updates to the locally defined access controls - if _, in := re_local_map[v.Entity]; in { - role_entity = append(role_entity, fmt.Sprintf("%s:%s", v.Role, v.Entity)) - log.Printf("[DEBUG]: saving re %s-%s", v.Role, v.Entity) - } - } - - d.Set("role_entity", role_entity) - } - - d.SetId(getBucketAclId(bucket)) - return nil -} - -func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - bucket := d.Get("bucket").(string) - - if d.HasChange("role_entity") { - o, n := d.GetChange("role_entity") - old_re, new_re := o.([]interface{}), n.([]interface{}) - - old_re_map := make(map[string]string) - for _, v := range old_re { - res, err := getRoleEntityPair(v.(string)) - - if err != nil { - return fmt.Errorf( - "Old state has malformed Role/Entity pair: %v", err) - } - - old_re_map[res.Entity] = res.Role - } - - for _, v := range new_re { - pair, err := getRoleEntityPair(v.(string)) - - bucketAccessControl := &storage.BucketAccessControl{ - Role: pair.Role, - Entity: pair.Entity, - } - - // If the old state is missing this entity, it needs to - // be created. Otherwise it is updated - if _, ok := old_re_map[pair.Entity]; ok { - _, err = config.clientStorage.BucketAccessControls.Update( - bucket, pair.Entity, bucketAccessControl).Do() - } else { - _, err = config.clientStorage.BucketAccessControls.Insert( - bucket, bucketAccessControl).Do() - } - - // Now we only store the keys that have to be removed - delete(old_re_map, pair.Entity) - - if err != nil { - return fmt.Errorf("Error updating ACL for bucket %s: %v", bucket, err) - } - } - - for entity, _ := range old_re_map { - log.Printf("[DEBUG]: removing entity %s", entity) - err := config.clientStorage.BucketAccessControls.Delete(bucket, entity).Do() - - if err != nil { - return fmt.Errorf("Error updating ACL for bucket %s: %v", bucket, err) - } - } - - return resourceStorageBucketAclRead(d, meta) - } - - if d.HasChange("default_acl") { - default_acl := d.Get("default_acl").(string) - - res, err := config.clientStorage.Buckets.Get(bucket).Do() - - if err != nil { - return fmt.Errorf("Error reading bucket %s: %v", bucket, err) - } - - res, err = config.clientStorage.Buckets.Update(bucket, - res).PredefinedDefaultObjectAcl(default_acl).Do() - - if err != nil { - return fmt.Errorf("Error updating bucket %s: %v", bucket, err) - } - - return resourceStorageBucketAclRead(d, meta) - } - - return nil -} - -func resourceStorageBucketAclDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - bucket := d.Get("bucket").(string) - - re_local := d.Get("role_entity").([]interface{}) - for _, v := range re_local { - res, err := getRoleEntityPair(v.(string)) - if err != nil { - return err - } - - log.Printf("[DEBUG]: removing entity %s", res.Entity) - - err = config.clientStorage.BucketAccessControls.Delete(bucket, res.Entity).Do() - - if err != nil { - return fmt.Errorf("Error deleting entity %s ACL: %s", res.Entity, err) - } - } - - return nil -} diff --git a/builtin/providers/google/resource_storage_bucket_acl_test.go b/builtin/providers/google/resource_storage_bucket_acl_test.go deleted file mode 100644 index 05de2d5ed..000000000 --- a/builtin/providers/google/resource_storage_bucket_acl_test.go +++ /dev/null @@ -1,245 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - //"google.golang.org/api/storage/v1" -) - -var roleEntityBasic1 = "OWNER:user-omeemail@gmail.com" - -var roleEntityBasic2 = "READER:user-anotheremail@gmail.com" - -var roleEntityBasic3_owner = "OWNER:user-yetanotheremail@gmail.com" - -var roleEntityBasic3_reader = "READER:user-yetanotheremail@gmail.com" - -func testBucketName() string { - return fmt.Sprintf("%s-%d", "tf-test-acl-bucket", acctest.RandInt()) -} - -func TestAccGoogleStorageBucketAcl_basic(t *testing.T) { - bucketName := testBucketName() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleStorageBucketAclDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testGoogleStorageBucketsAclBasic1(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic1), - testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic2), - ), - }, - }, - }) -} - -func TestAccGoogleStorageBucketAcl_upgrade(t *testing.T) { - bucketName := testBucketName() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleStorageBucketAclDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testGoogleStorageBucketsAclBasic1(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic1), - testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic2), - ), - }, - - resource.TestStep{ - Config: testGoogleStorageBucketsAclBasic2(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic2), - testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic3_owner), - ), - }, - - resource.TestStep{ - Config: testGoogleStorageBucketsAclBasicDelete(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageBucketAclDelete(bucketName, roleEntityBasic1), - testAccCheckGoogleStorageBucketAclDelete(bucketName, roleEntityBasic2), - testAccCheckGoogleStorageBucketAclDelete(bucketName, roleEntityBasic3_owner), - ), - }, - }, - }) -} - -func TestAccGoogleStorageBucketAcl_downgrade(t *testing.T) { - bucketName := testBucketName() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleStorageBucketAclDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testGoogleStorageBucketsAclBasic2(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic2), - testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic3_owner), - ), - }, - - resource.TestStep{ - Config: testGoogleStorageBucketsAclBasic3(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic2), - testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic3_reader), - ), - }, - - resource.TestStep{ - Config: testGoogleStorageBucketsAclBasicDelete(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageBucketAclDelete(bucketName, roleEntityBasic1), - testAccCheckGoogleStorageBucketAclDelete(bucketName, roleEntityBasic2), - testAccCheckGoogleStorageBucketAclDelete(bucketName, roleEntityBasic3_owner), - ), - }, - }, - }) -} - -func TestAccGoogleStorageBucketAcl_predefined(t *testing.T) { - bucketName := testBucketName() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleStorageBucketAclDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testGoogleStorageBucketsAclPredefined(bucketName), - }, - }, - }) -} - -func testAccCheckGoogleStorageBucketAclDelete(bucket, roleEntityS string) resource.TestCheckFunc { - return func(s *terraform.State) error { - roleEntity, _ := getRoleEntityPair(roleEntityS) - config := testAccProvider.Meta().(*Config) - - _, err := config.clientStorage.BucketAccessControls.Get(bucket, roleEntity.Entity).Do() - - if err != nil { - return nil - } - - return fmt.Errorf("Error, entity %s still exists", roleEntity.Entity) - } -} - -func testAccCheckGoogleStorageBucketAcl(bucket, roleEntityS string) resource.TestCheckFunc { - return func(s *terraform.State) error { - roleEntity, _ := getRoleEntityPair(roleEntityS) - config := testAccProvider.Meta().(*Config) - - res, err := config.clientStorage.BucketAccessControls.Get(bucket, roleEntity.Entity).Do() - - if err != nil { - return fmt.Errorf("Error retrieving contents of acl for bucket %s: %s", bucket, err) - } - - if res.Role != roleEntity.Role { - return fmt.Errorf("Error, Role mismatch %s != %s", res.Role, roleEntity.Role) - } - - return nil - } -} - -func testAccGoogleStorageBucketAclDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_storage_bucket_acl" { - continue - } - - bucket := rs.Primary.Attributes["bucket"] - - _, err := config.clientStorage.BucketAccessControls.List(bucket).Do() - - if err == nil { - return fmt.Errorf("Acl for bucket %s still exists", bucket) - } - } - - return nil -} - -func testGoogleStorageBucketsAclBasic1(bucketName string) string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "%s" -} - -resource "google_storage_bucket_acl" "acl" { - bucket = "${google_storage_bucket.bucket.name}" - role_entity = ["%s", "%s"] -} -`, bucketName, roleEntityBasic1, roleEntityBasic2) -} - -func testGoogleStorageBucketsAclBasic2(bucketName string) string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "%s" -} - -resource "google_storage_bucket_acl" "acl" { - bucket = "${google_storage_bucket.bucket.name}" - role_entity = ["%s", "%s"] -} -`, bucketName, roleEntityBasic2, roleEntityBasic3_owner) -} - -func testGoogleStorageBucketsAclBasicDelete(bucketName string) string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "%s" -} - -resource "google_storage_bucket_acl" "acl" { - bucket = "${google_storage_bucket.bucket.name}" - role_entity = [] -} -`, bucketName) -} - -func testGoogleStorageBucketsAclBasic3(bucketName string) string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "%s" -} - -resource "google_storage_bucket_acl" "acl" { - bucket = "${google_storage_bucket.bucket.name}" - role_entity = ["%s", "%s"] -} -`, bucketName, roleEntityBasic2, roleEntityBasic3_reader) -} - -func testGoogleStorageBucketsAclPredefined(bucketName string) string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "%s" -} - -resource "google_storage_bucket_acl" "acl" { - bucket = "${google_storage_bucket.bucket.name}" - predefined_acl = "projectPrivate" - default_acl = "projectPrivate" -} -`, bucketName) -} diff --git a/builtin/providers/google/resource_storage_bucket_object.go b/builtin/providers/google/resource_storage_bucket_object.go deleted file mode 100644 index bbf9c1f28..000000000 --- a/builtin/providers/google/resource_storage_bucket_object.go +++ /dev/null @@ -1,226 +0,0 @@ -package google - -import ( - "bytes" - "fmt" - "io" - "log" - "os" - - "github.com/hashicorp/terraform/helper/schema" - - "google.golang.org/api/googleapi" - "google.golang.org/api/storage/v1" -) - -func resourceStorageBucketObject() *schema.Resource { - return &schema.Resource{ - Create: resourceStorageBucketObjectCreate, - Read: resourceStorageBucketObjectRead, - Delete: resourceStorageBucketObjectDelete, - - Schema: map[string]*schema.Schema{ - "bucket": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "cache_control": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Optional: true, - }, - - "content_disposition": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Optional: true, - }, - - "content_encoding": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Optional: true, - }, - - "content_language": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Optional: true, - }, - - "content_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "content": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"source"}, - }, - - "crc32c": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "md5hash": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "predefined_acl": &schema.Schema{ - Type: schema.TypeString, - Deprecated: "Please use resource \"storage_object_acl.predefined_acl\" instead.", - Optional: true, - ForceNew: true, - }, - - "source": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"content"}, - }, - - "storage_class": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - }, - } -} - -func objectGetId(object *storage.Object) string { - return object.Bucket + "-" + object.Name -} - -func resourceStorageBucketObjectCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - bucket := d.Get("bucket").(string) - name := d.Get("name").(string) - var media io.Reader - - if v, ok := d.GetOk("source"); ok { - err := error(nil) - media, err = os.Open(v.(string)) - if err != nil { - return err - } - } else if v, ok := d.GetOk("content"); ok { - media = bytes.NewReader([]byte(v.(string))) - } else { - return fmt.Errorf("Error, either \"content\" or \"string\" must be specified") - } - - objectsService := storage.NewObjectsService(config.clientStorage) - object := &storage.Object{Bucket: bucket} - - if v, ok := d.GetOk("cache_control"); ok { - object.CacheControl = v.(string) - } - - if v, ok := d.GetOk("content_disposition"); ok { - object.ContentDisposition = v.(string) - } - - if v, ok := d.GetOk("content_encoding"); ok { - object.ContentEncoding = v.(string) - } - - if v, ok := d.GetOk("content_language"); ok { - object.ContentLanguage = v.(string) - } - - if v, ok := d.GetOk("content_type"); ok { - object.ContentType = v.(string) - } - - if v, ok := d.GetOk("storage_class"); ok { - object.StorageClass = v.(string) - } - - insertCall := objectsService.Insert(bucket, object) - insertCall.Name(name) - insertCall.Media(media) - if v, ok := d.GetOk("predefined_acl"); ok { - insertCall.PredefinedAcl(v.(string)) - } - - _, err := insertCall.Do() - - if err != nil { - return fmt.Errorf("Error uploading object %s: %s", name, err) - } - - return resourceStorageBucketObjectRead(d, meta) -} - -func resourceStorageBucketObjectRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - bucket := d.Get("bucket").(string) - name := d.Get("name").(string) - - objectsService := storage.NewObjectsService(config.clientStorage) - getCall := objectsService.Get(bucket, name) - - res, err := getCall.Do() - - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Storage Bucket Object %q", d.Get("name").(string))) - } - - d.Set("md5hash", res.Md5Hash) - d.Set("crc32c", res.Crc32c) - d.Set("cache_control", res.CacheControl) - d.Set("content_disposition", res.ContentDisposition) - d.Set("content_encoding", res.ContentEncoding) - d.Set("content_language", res.ContentLanguage) - d.Set("content_type", res.ContentType) - d.Set("storage_class", res.StorageClass) - - d.SetId(objectGetId(res)) - - return nil -} - -func resourceStorageBucketObjectDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - bucket := d.Get("bucket").(string) - name := d.Get("name").(string) - - objectsService := storage.NewObjectsService(config.clientStorage) - - DeleteCall := objectsService.Delete(bucket, name) - err := DeleteCall.Do() - - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Bucket Object %q because it's gone", name) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error deleting contents of object %s: %s", name, err) - } - - return nil -} diff --git a/builtin/providers/google/resource_storage_bucket_object_test.go b/builtin/providers/google/resource_storage_bucket_object_test.go deleted file mode 100644 index d3eff46db..000000000 --- a/builtin/providers/google/resource_storage_bucket_object_test.go +++ /dev/null @@ -1,301 +0,0 @@ -package google - -import ( - "crypto/md5" - "encoding/base64" - "fmt" - "io/ioutil" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "google.golang.org/api/storage/v1" -) - -var tf, err = ioutil.TempFile("", "tf-gce-test") -var bucketName = "tf-gce-bucket-test" -var objectName = "tf-gce-test" -var content = "now this is content!" - -func TestAccGoogleStorageObject_basic(t *testing.T) { - bucketName := testBucketName() - data := []byte("data data data") - h := md5.New() - h.Write(data) - data_md5 := base64.StdEncoding.EncodeToString(h.Sum(nil)) - - ioutil.WriteFile(tf.Name(), data, 0644) - resource.Test(t, resource.TestCase{ - PreCheck: func() { - if err != nil { - panic(err) - } - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleStorageObjectDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testGoogleStorageBucketsObjectBasic(bucketName), - Check: testAccCheckGoogleStorageObject(bucketName, objectName, data_md5), - }, - }, - }) -} - -func TestAccGoogleStorageObject_content(t *testing.T) { - bucketName := testBucketName() - data := []byte(content) - h := md5.New() - h.Write(data) - data_md5 := base64.StdEncoding.EncodeToString(h.Sum(nil)) - - ioutil.WriteFile(tf.Name(), data, 0644) - resource.Test(t, resource.TestCase{ - PreCheck: func() { - if err != nil { - panic(err) - } - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleStorageObjectDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testGoogleStorageBucketsObjectContent(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageObject(bucketName, objectName, data_md5), - resource.TestCheckResourceAttr( - "google_storage_bucket_object.object", "content_type", "text/plain; charset=utf-8"), - resource.TestCheckResourceAttr( - "google_storage_bucket_object.object", "storage_class", "STANDARD"), - ), - }, - }, - }) -} - -func TestAccGoogleStorageObject_withContentCharacteristics(t *testing.T) { - bucketName := testBucketName() - data := []byte(content) - h := md5.New() - h.Write(data) - data_md5 := base64.StdEncoding.EncodeToString(h.Sum(nil)) - ioutil.WriteFile(tf.Name(), data, 0644) - - disposition, encoding, language, content_type := "inline", "compress", "en", "binary/octet-stream" - resource.Test(t, resource.TestCase{ - PreCheck: func() { - if err != nil { - panic(err) - } - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleStorageObjectDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testGoogleStorageBucketsObject_optionalContentFields( - bucketName, disposition, encoding, language, content_type), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageObject(bucketName, objectName, data_md5), - resource.TestCheckResourceAttr( - "google_storage_bucket_object.object", "content_disposition", disposition), - resource.TestCheckResourceAttr( - "google_storage_bucket_object.object", "content_encoding", encoding), - resource.TestCheckResourceAttr( - "google_storage_bucket_object.object", "content_language", language), - resource.TestCheckResourceAttr( - "google_storage_bucket_object.object", "content_type", content_type), - ), - }, - }, - }) -} - -func TestAccGoogleStorageObject_cacheControl(t *testing.T) { - bucketName := testBucketName() - data := []byte(content) - h := md5.New() - h.Write(data) - data_md5 := base64.StdEncoding.EncodeToString(h.Sum(nil)) - ioutil.WriteFile(tf.Name(), data, 0644) - - cacheControl := "private" - resource.Test(t, resource.TestCase{ - PreCheck: func() { - if err != nil { - panic(err) - } - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleStorageObjectDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testGoogleStorageBucketsObject_cacheControl(bucketName, cacheControl), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageObject(bucketName, objectName, data_md5), - resource.TestCheckResourceAttr( - "google_storage_bucket_object.object", "cache_control", cacheControl), - ), - }, - }, - }) -} - -func TestAccGoogleStorageObject_storageClass(t *testing.T) { - bucketName := testBucketName() - data := []byte(content) - h := md5.New() - h.Write(data) - data_md5 := base64.StdEncoding.EncodeToString(h.Sum(nil)) - ioutil.WriteFile(tf.Name(), data, 0644) - - storageClass := "MULTI_REGIONAL" - resource.Test(t, resource.TestCase{ - PreCheck: func() { - if err != nil { - panic(err) - } - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleStorageObjectDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testGoogleStorageBucketsObject_storageClass(bucketName, storageClass), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageObject(bucketName, objectName, data_md5), - resource.TestCheckResourceAttr( - "google_storage_bucket_object.object", "storage_class", storageClass), - ), - }, - }, - }) -} - -func testAccCheckGoogleStorageObject(bucket, object, md5 string) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - objectsService := storage.NewObjectsService(config.clientStorage) - - getCall := objectsService.Get(bucket, object) - res, err := getCall.Do() - - if err != nil { - return fmt.Errorf("Error retrieving contents of object %s: %s", object, err) - } - - if md5 != res.Md5Hash { - return fmt.Errorf("Error contents of %s garbled, md5 hashes don't match (%s, %s)", object, md5, res.Md5Hash) - } - - return nil - } -} - -func testAccGoogleStorageObjectDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_storage_bucket_object" { - continue - } - - bucket := rs.Primary.Attributes["bucket"] - name := rs.Primary.Attributes["name"] - - objectsService := storage.NewObjectsService(config.clientStorage) - - getCall := objectsService.Get(bucket, name) - _, err := getCall.Do() - - if err == nil { - return fmt.Errorf("Object %s still exists", name) - } - } - - return nil -} - -func testGoogleStorageBucketsObjectContent(bucketName string) string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "%s" -} - -resource "google_storage_bucket_object" "object" { - name = "%s" - bucket = "${google_storage_bucket.bucket.name}" - content = "%s" - predefined_acl = "projectPrivate" -} -`, bucketName, objectName, content) -} - -func testGoogleStorageBucketsObjectBasic(bucketName string) string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "%s" -} - -resource "google_storage_bucket_object" "object" { - name = "%s" - bucket = "${google_storage_bucket.bucket.name}" - source = "%s" - predefined_acl = "projectPrivate" -} -`, bucketName, objectName, tf.Name()) -} - -func testGoogleStorageBucketsObject_optionalContentFields( - bucketName, disposition, encoding, language, content_type string) string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "%s" -} - -resource "google_storage_bucket_object" "object" { - name = "%s" - bucket = "${google_storage_bucket.bucket.name}" - content = "%s" - content_disposition = "%s" - content_encoding = "%s" - content_language = "%s" - content_type = "%s" -} -`, bucketName, objectName, content, disposition, encoding, language, content_type) -} - -func testGoogleStorageBucketsObject_cacheControl(bucketName, cacheControl string) string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "%s" -} - -resource "google_storage_bucket_object" "object" { - name = "%s" - bucket = "${google_storage_bucket.bucket.name}" - source = "%s" - cache_control = "%s" -} -`, bucketName, objectName, tf.Name(), cacheControl) -} - -func testGoogleStorageBucketsObject_storageClass(bucketName string, storageClass string) string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "%s" -} - -resource "google_storage_bucket_object" "object" { - name = "%s" - bucket = "${google_storage_bucket.bucket.name}" - content = "%s" - storage_class = "%s" -} -`, bucketName, objectName, content, storageClass) -} diff --git a/builtin/providers/google/resource_storage_bucket_test.go b/builtin/providers/google/resource_storage_bucket_test.go deleted file mode 100644 index cc0518044..000000000 --- a/builtin/providers/google/resource_storage_bucket_test.go +++ /dev/null @@ -1,382 +0,0 @@ -package google - -import ( - "bytes" - "fmt" - "log" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "google.golang.org/api/googleapi" - storage "google.golang.org/api/storage/v1" -) - -func TestAccStorageBucket_basic(t *testing.T) { - var bucket storage.Bucket - bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccStorageBucketDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccStorageBucket_basic(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckStorageBucketExists( - "google_storage_bucket.bucket", bucketName, &bucket), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "location", "US"), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "force_destroy", "false"), - ), - }, - }, - }) -} - -func TestAccStorageBucket_customAttributes(t *testing.T) { - var bucket storage.Bucket - bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccStorageBucketDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccStorageBucket_customAttributes(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckStorageBucketExists( - "google_storage_bucket.bucket", bucketName, &bucket), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "location", "EU"), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "force_destroy", "true"), - ), - }, - }, - }) -} - -func TestAccStorageBucket_storageClass(t *testing.T) { - var bucket storage.Bucket - bucketName := fmt.Sprintf("tf-test-acc-bucket-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccStorageBucketDestroy, - Steps: []resource.TestStep{ - { - Config: testAccStorageBucket_storageClass(bucketName, "MULTI_REGIONAL", ""), - Check: resource.ComposeTestCheckFunc( - testAccCheckStorageBucketExists( - "google_storage_bucket.bucket", bucketName, &bucket), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "storage_class", "MULTI_REGIONAL"), - ), - }, - { - Config: testAccStorageBucket_storageClass(bucketName, "NEARLINE", ""), - Check: resource.ComposeTestCheckFunc( - testAccCheckStorageBucketExists( - "google_storage_bucket.bucket", bucketName, &bucket), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "storage_class", "NEARLINE"), - ), - }, - { - Config: testAccStorageBucket_storageClass(bucketName, "REGIONAL", "US-CENTRAL1"), - Check: resource.ComposeTestCheckFunc( - testAccCheckStorageBucketExists( - "google_storage_bucket.bucket", bucketName, &bucket), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "storage_class", "REGIONAL"), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "location", "US-CENTRAL1"), - ), - }, - }, - }) -} - -func TestAccStorageBucket_update(t *testing.T) { - var bucket storage.Bucket - bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccStorageBucketDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccStorageBucket_basic(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckStorageBucketExists( - "google_storage_bucket.bucket", bucketName, &bucket), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "location", "US"), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "force_destroy", "false"), - ), - }, - resource.TestStep{ - Config: testAccStorageBucket_customAttributes(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckStorageBucketExists( - "google_storage_bucket.bucket", bucketName, &bucket), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "predefined_acl", "publicReadWrite"), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "location", "EU"), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "force_destroy", "true"), - ), - }, - }, - }) -} - -func TestAccStorageBucket_forceDestroy(t *testing.T) { - var bucket storage.Bucket - bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccStorageBucketDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccStorageBucket_customAttributes(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckStorageBucketExists( - "google_storage_bucket.bucket", bucketName, &bucket), - ), - }, - resource.TestStep{ - Config: testAccStorageBucket_customAttributes(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckStorageBucketPutItem(bucketName), - ), - }, - resource.TestStep{ - Config: testAccStorageBucket_customAttributes("idontexist"), - Check: resource.ComposeTestCheckFunc( - testAccCheckStorageBucketMissing(bucketName), - ), - }, - }, - }) -} - -func TestAccStorageBucket_cors(t *testing.T) { - var bucket storage.Bucket - bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccStorageBucketDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testGoogleStorageBucketsCors(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckStorageBucketExists( - "google_storage_bucket.bucket", bucketName, &bucket), - ), - }, - }, - }) - - if len(bucket.Cors) != 2 { - t.Errorf("Expected # of cors elements to be 2, got %d", len(bucket.Cors)) - } - - firstArr := bucket.Cors[0] - if firstArr.MaxAgeSeconds != 10 { - t.Errorf("Expected first block's MaxAgeSeconds to be 10, got %d", firstArr.MaxAgeSeconds) - } - - for i, v := range []string{"abc", "def"} { - if firstArr.Origin[i] != v { - t.Errorf("Expected value in first block origin to be to be %v, got %v", v, firstArr.Origin[i]) - } - } - - for i, v := range []string{"a1a"} { - if firstArr.Method[i] != v { - t.Errorf("Expected value in first block method to be to be %v, got %v", v, firstArr.Method[i]) - } - } - - for i, v := range []string{"123", "456", "789"} { - if firstArr.ResponseHeader[i] != v { - t.Errorf("Expected value in first block response headerto be to be %v, got %v", v, firstArr.ResponseHeader[i]) - } - } - - secondArr := bucket.Cors[1] - if secondArr.MaxAgeSeconds != 5 { - t.Errorf("Expected second block's MaxAgeSeconds to be 5, got %d", secondArr.MaxAgeSeconds) - } - - for i, v := range []string{"ghi", "jkl"} { - if secondArr.Origin[i] != v { - t.Errorf("Expected value in second block origin to be to be %v, got %v", v, secondArr.Origin[i]) - } - } - - for i, v := range []string{"z9z"} { - if secondArr.Method[i] != v { - t.Errorf("Expected value in second block method to be to be %v, got %v", v, secondArr.Method[i]) - } - } - - for i, v := range []string{"000"} { - if secondArr.ResponseHeader[i] != v { - t.Errorf("Expected value in second block response headerto be to be %v, got %v", v, secondArr.ResponseHeader[i]) - } - } -} - -func testAccCheckStorageBucketExists(n string, bucketName string, bucket *storage.Bucket) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Project_ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientStorage.Buckets.Get(rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Id != rs.Primary.ID { - return fmt.Errorf("Bucket not found") - } - - if found.Name != bucketName { - return fmt.Errorf("expected name %s, got %s", bucketName, found.Name) - } - - *bucket = *found - return nil - } -} - -func testAccCheckStorageBucketPutItem(bucketName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - data := bytes.NewBufferString("test") - dataReader := bytes.NewReader(data.Bytes()) - object := &storage.Object{Name: "bucketDestroyTestFile"} - - // This needs to use Media(io.Reader) call, otherwise it does not go to /upload API and fails - if res, err := config.clientStorage.Objects.Insert(bucketName, object).Media(dataReader).Do(); err == nil { - log.Printf("[INFO] Created object %v at location %v\n\n", res.Name, res.SelfLink) - } else { - return fmt.Errorf("Objects.Insert failed: %v", err) - } - - return nil - } -} - -func testAccCheckStorageBucketMissing(bucketName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - _, err := config.clientStorage.Buckets.Get(bucketName).Do() - if err == nil { - return fmt.Errorf("Found %s", bucketName) - } - - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - return nil - } - - return err - } -} - -func testAccStorageBucketDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_storage_bucket" { - continue - } - - _, err := config.clientStorage.Buckets.Get(rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("Bucket still exists") - } - } - - return nil -} - -func testAccStorageBucket_basic(bucketName string) string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "%s" -} -`, bucketName) -} - -func testAccStorageBucket_customAttributes(bucketName string) string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "%s" - predefined_acl = "publicReadWrite" - location = "EU" - force_destroy = "true" -} -`, bucketName) -} - -func testAccStorageBucket_storageClass(bucketName, storageClass, location string) string { - var locationBlock string - if location != "" { - locationBlock = fmt.Sprintf(` - location = "%s"`, location) - } - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "%s" - storage_class = "%s"%s -} -`, bucketName, storageClass, locationBlock) -} - -func testGoogleStorageBucketsCors(bucketName string) string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "%s" - cors { - origin = ["abc", "def"] - method = ["a1a"] - response_header = ["123", "456", "789"] - max_age_seconds = 10 - } - - cors { - origin = ["ghi", "jkl"] - method = ["z9z"] - response_header = ["000"] - max_age_seconds = 5 - } -} -`, bucketName) -} diff --git a/builtin/providers/google/resource_storage_object_acl.go b/builtin/providers/google/resource_storage_object_acl.go deleted file mode 100644 index 718260d98..000000000 --- a/builtin/providers/google/resource_storage_object_acl.go +++ /dev/null @@ -1,249 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - - "google.golang.org/api/storage/v1" -) - -func resourceStorageObjectAcl() *schema.Resource { - return &schema.Resource{ - Create: resourceStorageObjectAclCreate, - Read: resourceStorageObjectAclRead, - Update: resourceStorageObjectAclUpdate, - Delete: resourceStorageObjectAclDelete, - - Schema: map[string]*schema.Schema{ - "bucket": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "object": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "predefined_acl": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "role_entity": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func getObjectAclId(object string) string { - return object + "-acl" -} - -func resourceStorageObjectAclCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - bucket := d.Get("bucket").(string) - object := d.Get("object").(string) - - predefined_acl := "" - role_entity := make([]interface{}, 0) - - if v, ok := d.GetOk("predefined_acl"); ok { - predefined_acl = v.(string) - } - - if v, ok := d.GetOk("role_entity"); ok { - role_entity = v.([]interface{}) - } - - if len(predefined_acl) > 0 { - if len(role_entity) > 0 { - return fmt.Errorf("Error, you cannot specify both " + - "\"predefined_acl\" and \"role_entity\"") - } - - res, err := config.clientStorage.Objects.Get(bucket, object).Do() - - if err != nil { - return fmt.Errorf("Error reading object %s: %v", bucket, err) - } - - res, err = config.clientStorage.Objects.Update(bucket, object, - res).PredefinedAcl(predefined_acl).Do() - - if err != nil { - return fmt.Errorf("Error updating object %s: %v", bucket, err) - } - - return resourceStorageBucketAclRead(d, meta) - } else if len(role_entity) > 0 { - for _, v := range role_entity { - pair, err := getRoleEntityPair(v.(string)) - - objectAccessControl := &storage.ObjectAccessControl{ - Role: pair.Role, - Entity: pair.Entity, - } - - log.Printf("[DEBUG]: setting role = %s, entity = %s", pair.Role, pair.Entity) - - _, err = config.clientStorage.ObjectAccessControls.Insert(bucket, - object, objectAccessControl).Do() - - if err != nil { - return fmt.Errorf("Error setting ACL for %s on object %s: %v", pair.Entity, object, err) - } - } - - return resourceStorageObjectAclRead(d, meta) - } - - return fmt.Errorf("Error, you must specify either " + - "\"predefined_acl\" or \"role_entity\"") -} - -func resourceStorageObjectAclRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - bucket := d.Get("bucket").(string) - object := d.Get("object").(string) - - // Predefined ACLs cannot easily be parsed once they have been processed - // by the GCP server - if _, ok := d.GetOk("predefined_acl"); !ok { - role_entity := make([]interface{}, 0) - re_local := d.Get("role_entity").([]interface{}) - re_local_map := make(map[string]string) - for _, v := range re_local { - res, err := getRoleEntityPair(v.(string)) - - if err != nil { - return fmt.Errorf( - "Old state has malformed Role/Entity pair: %v", err) - } - - re_local_map[res.Entity] = res.Role - } - - res, err := config.clientStorage.ObjectAccessControls.List(bucket, object).Do() - - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Storage Object ACL for Bucket %q", d.Get("bucket").(string))) - } - - for _, v := range res.Items { - role := v.Role - entity := v.Entity - if _, in := re_local_map[entity]; in { - role_entity = append(role_entity, fmt.Sprintf("%s:%s", role, entity)) - log.Printf("[DEBUG]: saving re %s-%s", role, entity) - } - } - - d.Set("role_entity", role_entity) - } - - d.SetId(getObjectAclId(object)) - return nil -} - -func resourceStorageObjectAclUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - bucket := d.Get("bucket").(string) - object := d.Get("object").(string) - - if d.HasChange("role_entity") { - o, n := d.GetChange("role_entity") - old_re, new_re := o.([]interface{}), n.([]interface{}) - - old_re_map := make(map[string]string) - for _, v := range old_re { - res, err := getRoleEntityPair(v.(string)) - - if err != nil { - return fmt.Errorf( - "Old state has malformed Role/Entity pair: %v", err) - } - - old_re_map[res.Entity] = res.Role - } - - for _, v := range new_re { - pair, err := getRoleEntityPair(v.(string)) - - objectAccessControl := &storage.ObjectAccessControl{ - Role: pair.Role, - Entity: pair.Entity, - } - - // If the old state is missing this entity, it needs to - // be created. Otherwise it is updated - if _, ok := old_re_map[pair.Entity]; ok { - _, err = config.clientStorage.ObjectAccessControls.Update( - bucket, object, pair.Entity, objectAccessControl).Do() - } else { - _, err = config.clientStorage.ObjectAccessControls.Insert( - bucket, object, objectAccessControl).Do() - } - - // Now we only store the keys that have to be removed - delete(old_re_map, pair.Entity) - - if err != nil { - return fmt.Errorf("Error updating ACL for object %s: %v", bucket, err) - } - } - - for entity, _ := range old_re_map { - log.Printf("[DEBUG]: removing entity %s", entity) - err := config.clientStorage.ObjectAccessControls.Delete(bucket, object, entity).Do() - - if err != nil { - return fmt.Errorf("Error updating ACL for object %s: %v", bucket, err) - } - } - - return resourceStorageObjectAclRead(d, meta) - } - - return nil -} - -func resourceStorageObjectAclDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - bucket := d.Get("bucket").(string) - object := d.Get("object").(string) - - re_local := d.Get("role_entity").([]interface{}) - for _, v := range re_local { - res, err := getRoleEntityPair(v.(string)) - if err != nil { - return err - } - - entity := res.Entity - - log.Printf("[DEBUG]: removing entity %s", entity) - - err = config.clientStorage.ObjectAccessControls.Delete(bucket, object, - entity).Do() - - if err != nil { - return fmt.Errorf("Error deleting entity %s ACL: %s", - entity, err) - } - } - - return nil -} diff --git a/builtin/providers/google/resource_storage_object_acl_test.go b/builtin/providers/google/resource_storage_object_acl_test.go deleted file mode 100644 index b3dfcd51e..000000000 --- a/builtin/providers/google/resource_storage_object_acl_test.go +++ /dev/null @@ -1,330 +0,0 @@ -package google - -import ( - "fmt" - "io/ioutil" - "math/rand" - "testing" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - //"google.golang.org/api/storage/v1" -) - -var tfObjectAcl, errObjectAcl = ioutil.TempFile("", "tf-gce-test") - -func testAclObjectName() string { - return fmt.Sprintf("%s-%d", "tf-test-acl-object", - rand.New(rand.NewSource(time.Now().UnixNano())).Int()) -} - -func TestAccGoogleStorageObjectAcl_basic(t *testing.T) { - bucketName := testBucketName() - objectName := testAclObjectName() - objectData := []byte("data data data") - ioutil.WriteFile(tfObjectAcl.Name(), objectData, 0644) - resource.Test(t, resource.TestCase{ - PreCheck: func() { - if errObjectAcl != nil { - panic(errObjectAcl) - } - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleStorageObjectAclDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testGoogleStorageObjectsAclBasic1(bucketName, objectName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageObjectAcl(bucketName, - objectName, roleEntityBasic1), - testAccCheckGoogleStorageObjectAcl(bucketName, - objectName, roleEntityBasic2), - ), - }, - }, - }) -} - -func TestAccGoogleStorageObjectAcl_upgrade(t *testing.T) { - bucketName := testBucketName() - objectName := testAclObjectName() - objectData := []byte("data data data") - ioutil.WriteFile(tfObjectAcl.Name(), objectData, 0644) - resource.Test(t, resource.TestCase{ - PreCheck: func() { - if errObjectAcl != nil { - panic(errObjectAcl) - } - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleStorageObjectAclDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testGoogleStorageObjectsAclBasic1(bucketName, objectName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageObjectAcl(bucketName, - objectName, roleEntityBasic1), - testAccCheckGoogleStorageObjectAcl(bucketName, - objectName, roleEntityBasic2), - ), - }, - - resource.TestStep{ - Config: testGoogleStorageObjectsAclBasic2(bucketName, objectName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageObjectAcl(bucketName, - objectName, roleEntityBasic2), - testAccCheckGoogleStorageObjectAcl(bucketName, - objectName, roleEntityBasic3_owner), - ), - }, - - resource.TestStep{ - Config: testGoogleStorageObjectsAclBasicDelete(bucketName, objectName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageObjectAclDelete(bucketName, - objectName, roleEntityBasic1), - testAccCheckGoogleStorageObjectAclDelete(bucketName, - objectName, roleEntityBasic2), - testAccCheckGoogleStorageObjectAclDelete(bucketName, - objectName, roleEntityBasic3_reader), - ), - }, - }, - }) -} - -func TestAccGoogleStorageObjectAcl_downgrade(t *testing.T) { - bucketName := testBucketName() - objectName := testAclObjectName() - objectData := []byte("data data data") - ioutil.WriteFile(tfObjectAcl.Name(), objectData, 0644) - resource.Test(t, resource.TestCase{ - PreCheck: func() { - if errObjectAcl != nil { - panic(errObjectAcl) - } - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleStorageObjectAclDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testGoogleStorageObjectsAclBasic2(bucketName, objectName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageObjectAcl(bucketName, - objectName, roleEntityBasic2), - testAccCheckGoogleStorageObjectAcl(bucketName, - objectName, roleEntityBasic3_owner), - ), - }, - - resource.TestStep{ - Config: testGoogleStorageObjectsAclBasic3(bucketName, objectName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageObjectAcl(bucketName, - objectName, roleEntityBasic2), - testAccCheckGoogleStorageObjectAcl(bucketName, - objectName, roleEntityBasic3_reader), - ), - }, - - resource.TestStep{ - Config: testGoogleStorageObjectsAclBasicDelete(bucketName, objectName), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageObjectAclDelete(bucketName, - objectName, roleEntityBasic1), - testAccCheckGoogleStorageObjectAclDelete(bucketName, - objectName, roleEntityBasic2), - testAccCheckGoogleStorageObjectAclDelete(bucketName, - objectName, roleEntityBasic3_reader), - ), - }, - }, - }) -} - -func TestAccGoogleStorageObjectAcl_predefined(t *testing.T) { - bucketName := testBucketName() - objectName := testAclObjectName() - objectData := []byte("data data data") - ioutil.WriteFile(tfObjectAcl.Name(), objectData, 0644) - resource.Test(t, resource.TestCase{ - PreCheck: func() { - if errObjectAcl != nil { - panic(errObjectAcl) - } - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleStorageObjectAclDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testGoogleStorageObjectsAclPredefined(bucketName, objectName), - }, - }, - }) -} - -func testAccCheckGoogleStorageObjectAcl(bucket, object, roleEntityS string) resource.TestCheckFunc { - return func(s *terraform.State) error { - roleEntity, _ := getRoleEntityPair(roleEntityS) - config := testAccProvider.Meta().(*Config) - - res, err := config.clientStorage.ObjectAccessControls.Get(bucket, - object, roleEntity.Entity).Do() - - if err != nil { - return fmt.Errorf("Error retrieving contents of acl for bucket %s: %s", bucket, err) - } - - if res.Role != roleEntity.Role { - return fmt.Errorf("Error, Role mismatch %s != %s", res.Role, roleEntity.Role) - } - - return nil - } -} - -func testAccCheckGoogleStorageObjectAclDelete(bucket, object, roleEntityS string) resource.TestCheckFunc { - return func(s *terraform.State) error { - roleEntity, _ := getRoleEntityPair(roleEntityS) - config := testAccProvider.Meta().(*Config) - - _, err := config.clientStorage.ObjectAccessControls.Get(bucket, - object, roleEntity.Entity).Do() - - if err != nil { - return nil - } - - return fmt.Errorf("Error, Entity still exists %s", roleEntity.Entity) - } -} - -func testAccGoogleStorageObjectAclDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_storage_bucket_acl" { - continue - } - - bucket := rs.Primary.Attributes["bucket"] - object := rs.Primary.Attributes["object"] - - _, err := config.clientStorage.ObjectAccessControls.List(bucket, object).Do() - - if err == nil { - return fmt.Errorf("Acl for bucket %s still exists", bucket) - } - } - - return nil -} - -func testGoogleStorageObjectsAclBasicDelete(bucketName string, objectName string) string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "%s" -} - -resource "google_storage_bucket_object" "object" { - name = "%s" - bucket = "${google_storage_bucket.bucket.name}" - source = "%s" -} - -resource "google_storage_object_acl" "acl" { - object = "${google_storage_bucket_object.object.name}" - bucket = "${google_storage_bucket.bucket.name}" - role_entity = [] -} -`, bucketName, objectName, tfObjectAcl.Name()) -} - -func testGoogleStorageObjectsAclBasic1(bucketName string, objectName string) string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "%s" -} - -resource "google_storage_bucket_object" "object" { - name = "%s" - bucket = "${google_storage_bucket.bucket.name}" - source = "%s" -} - -resource "google_storage_object_acl" "acl" { - object = "${google_storage_bucket_object.object.name}" - bucket = "${google_storage_bucket.bucket.name}" - role_entity = ["%s", "%s"] -} -`, bucketName, objectName, tfObjectAcl.Name(), - roleEntityBasic1, roleEntityBasic2) -} - -func testGoogleStorageObjectsAclBasic2(bucketName string, objectName string) string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "%s" -} - -resource "google_storage_bucket_object" "object" { - name = "%s" - bucket = "${google_storage_bucket.bucket.name}" - source = "%s" -} - -resource "google_storage_object_acl" "acl" { - object = "${google_storage_bucket_object.object.name}" - bucket = "${google_storage_bucket.bucket.name}" - role_entity = ["%s", "%s"] -} -`, bucketName, objectName, tfObjectAcl.Name(), - roleEntityBasic2, roleEntityBasic3_owner) -} - -func testGoogleStorageObjectsAclBasic3(bucketName string, objectName string) string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "%s" -} - -resource "google_storage_bucket_object" "object" { - name = "%s" - bucket = "${google_storage_bucket.bucket.name}" - source = "%s" -} - -resource "google_storage_object_acl" "acl" { - object = "${google_storage_bucket_object.object.name}" - bucket = "${google_storage_bucket.bucket.name}" - role_entity = ["%s", "%s"] -} -`, bucketName, objectName, tfObjectAcl.Name(), - roleEntityBasic2, roleEntityBasic3_reader) -} - -func testGoogleStorageObjectsAclPredefined(bucketName string, objectName string) string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "%s" -} - -resource "google_storage_bucket_object" "object" { - name = "%s" - bucket = "${google_storage_bucket.bucket.name}" - source = "%s" -} - -resource "google_storage_object_acl" "acl" { - object = "${google_storage_bucket_object.object.name}" - bucket = "${google_storage_bucket.bucket.name}" - predefined_acl = "projectPrivate" -} -`, bucketName, objectName, tfObjectAcl.Name()) -} diff --git a/builtin/providers/google/resourcemanager_operation.go b/builtin/providers/google/resourcemanager_operation.go deleted file mode 100644 index 32c6d3435..000000000 --- a/builtin/providers/google/resourcemanager_operation.go +++ /dev/null @@ -1,64 +0,0 @@ -package google - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "google.golang.org/api/cloudresourcemanager/v1" -) - -type ResourceManagerOperationWaiter struct { - Service *cloudresourcemanager.Service - Op *cloudresourcemanager.Operation -} - -func (w *ResourceManagerOperationWaiter) RefreshFunc() resource.StateRefreshFunc { - return func() (interface{}, string, error) { - op, err := w.Service.Operations.Get(w.Op.Name).Do() - - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] Got %v while polling for operation %s's 'done' status", op.Done, w.Op.Name) - - return op, fmt.Sprint(op.Done), nil - } -} - -func (w *ResourceManagerOperationWaiter) Conf() *resource.StateChangeConf { - return &resource.StateChangeConf{ - Pending: []string{"false"}, - Target: []string{"true"}, - Refresh: w.RefreshFunc(), - } -} - -func resourceManagerOperationWait(config *Config, op *cloudresourcemanager.Operation, activity string) error { - return resourceManagerOperationWaitTime(config, op, activity, 4) -} - -func resourceManagerOperationWaitTime(config *Config, op *cloudresourcemanager.Operation, activity string, timeoutMin int) error { - w := &ResourceManagerOperationWaiter{ - Service: config.clientResourceManager, - Op: op, - } - - state := w.Conf() - state.Delay = 10 * time.Second - state.Timeout = time.Duration(timeoutMin) * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for %s: %s", activity, err) - } - - op = opRaw.(*cloudresourcemanager.Operation) - if op.Error != nil { - return fmt.Errorf("Error code %v, message: %s", op.Error.Code, op.Error.Message) - } - - return nil -} diff --git a/builtin/providers/google/service_scope.go b/builtin/providers/google/service_scope.go deleted file mode 100644 index 45bcf6004..000000000 --- a/builtin/providers/google/service_scope.go +++ /dev/null @@ -1,38 +0,0 @@ -package google - -func canonicalizeServiceScope(scope string) string { - // This is a convenience map of short names used by the gcloud tool - // to the GCE auth endpoints they alias to. - scopeMap := map[string]string{ - "bigquery": "https://www.googleapis.com/auth/bigquery", - "cloud-platform": "https://www.googleapis.com/auth/cloud-platform", - "cloud-source-repos": "https://www.googleapis.com/auth/source.full_control", - "cloud-source-repos-ro": "https://www.googleapis.com/auth/source.read_only", - "compute-ro": "https://www.googleapis.com/auth/compute.readonly", - "compute-rw": "https://www.googleapis.com/auth/compute", - "datastore": "https://www.googleapis.com/auth/datastore", - "logging-write": "https://www.googleapis.com/auth/logging.write", - "monitoring": "https://www.googleapis.com/auth/monitoring", - "monitoring-write": "https://www.googleapis.com/auth/monitoring.write", - "pubsub": "https://www.googleapis.com/auth/pubsub", - "service-control": "https://www.googleapis.com/auth/servicecontrol", - "service-management": "https://www.googleapis.com/auth/service.management.readonly", - "sql": "https://www.googleapis.com/auth/sqlservice", - "sql-admin": "https://www.googleapis.com/auth/sqlservice.admin", - "storage-full": "https://www.googleapis.com/auth/devstorage.full_control", - "storage-ro": "https://www.googleapis.com/auth/devstorage.read_only", - "storage-rw": "https://www.googleapis.com/auth/devstorage.read_write", - "taskqueue": "https://www.googleapis.com/auth/taskqueue", - "trace-append": "https://www.googleapis.com/auth/trace.append", - "trace-ro": "https://www.googleapis.com/auth/trace.readonly", - "useraccounts-ro": "https://www.googleapis.com/auth/cloud.useraccounts.readonly", - "useraccounts-rw": "https://www.googleapis.com/auth/cloud.useraccounts", - "userinfo-email": "https://www.googleapis.com/auth/userinfo.email", - } - - if matchedURL, ok := scopeMap[scope]; ok { - return matchedURL - } - - return scope -} diff --git a/builtin/providers/google/serviceman_operation.go b/builtin/providers/google/serviceman_operation.go deleted file mode 100644 index 299cd1e86..000000000 --- a/builtin/providers/google/serviceman_operation.go +++ /dev/null @@ -1,67 +0,0 @@ -package google - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "google.golang.org/api/servicemanagement/v1" -) - -type ServiceManagementOperationWaiter struct { - Service *servicemanagement.APIService - Op *servicemanagement.Operation -} - -func (w *ServiceManagementOperationWaiter) RefreshFunc() resource.StateRefreshFunc { - return func() (interface{}, string, error) { - var op *servicemanagement.Operation - var err error - - op, err = w.Service.Operations.Get(w.Op.Name).Do() - - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] Got %v while polling for operation %s's 'done' status", op.Done, w.Op.Name) - - return op, fmt.Sprint(op.Done), nil - } -} - -func (w *ServiceManagementOperationWaiter) Conf() *resource.StateChangeConf { - return &resource.StateChangeConf{ - Pending: []string{"false"}, - Target: []string{"true"}, - Refresh: w.RefreshFunc(), - } -} - -func serviceManagementOperationWait(config *Config, op *servicemanagement.Operation, activity string) error { - return serviceManagementOperationWaitTime(config, op, activity, 4) -} - -func serviceManagementOperationWaitTime(config *Config, op *servicemanagement.Operation, activity string, timeoutMin int) error { - w := &ServiceManagementOperationWaiter{ - Service: config.clientServiceMan, - Op: op, - } - - state := w.Conf() - state.Delay = 10 * time.Second - state.Timeout = time.Duration(timeoutMin) * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for %s: %s", activity, err) - } - - op = opRaw.(*servicemanagement.Operation) - if op.Error != nil { - return fmt.Errorf("Error code %v, message: %s", op.Error.Code, op.Error.Message) - } - - return nil -} diff --git a/builtin/providers/google/sqladmin_operation.go b/builtin/providers/google/sqladmin_operation.go deleted file mode 100644 index 3e91caf5e..000000000 --- a/builtin/providers/google/sqladmin_operation.go +++ /dev/null @@ -1,81 +0,0 @@ -package google - -import ( - "bytes" - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "google.golang.org/api/sqladmin/v1beta4" -) - -type SqlAdminOperationWaiter struct { - Service *sqladmin.Service - Op *sqladmin.Operation - Project string -} - -func (w *SqlAdminOperationWaiter) RefreshFunc() resource.StateRefreshFunc { - return func() (interface{}, string, error) { - var op *sqladmin.Operation - var err error - - log.Printf("[DEBUG] self_link: %s", w.Op.SelfLink) - op, err = w.Service.Operations.Get(w.Project, w.Op.Name).Do() - - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] Got %q when asking for operation %q", op.Status, w.Op.Name) - - return op, op.Status, nil - } -} - -func (w *SqlAdminOperationWaiter) Conf() *resource.StateChangeConf { - return &resource.StateChangeConf{ - Pending: []string{"PENDING", "RUNNING"}, - Target: []string{"DONE"}, - Refresh: w.RefreshFunc(), - } -} - -// SqlAdminOperationError wraps sqladmin.OperationError and implements the -// error interface so it can be returned. -type SqlAdminOperationError sqladmin.OperationErrors - -func (e SqlAdminOperationError) Error() string { - var buf bytes.Buffer - - for _, err := range e.Errors { - buf.WriteString(err.Message + "\n") - } - - return buf.String() -} - -func sqladminOperationWait(config *Config, op *sqladmin.Operation, activity string) error { - w := &SqlAdminOperationWaiter{ - Service: config.clientSqlAdmin, - Op: op, - Project: config.Project, - } - - state := w.Conf() - state.Timeout = 10 * time.Minute - state.MinTimeout = 2 * time.Second - state.Delay = 5 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for %s (op %s): %s", activity, op.Name, err) - } - - op = opRaw.(*sqladmin.Operation) - if op.Error != nil { - return SqlAdminOperationError(*op.Error) - } - - return nil -} diff --git a/builtin/providers/google/test-fixtures/fake_account.json b/builtin/providers/google/test-fixtures/fake_account.json deleted file mode 100644 index f3362d6d2..000000000 --- a/builtin/providers/google/test-fixtures/fake_account.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "private_key_id": "foo", - "private_key": "bar", - "client_email": "foo@bar.com", - "client_id": "id@foo.com", - "type": "service_account" -} diff --git a/builtin/providers/google/test-fixtures/fake_client.json b/builtin/providers/google/test-fixtures/fake_client.json deleted file mode 100644 index d88fe4cd7..000000000 --- a/builtin/providers/google/test-fixtures/fake_client.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "web": { - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "client_secret": "foo", - "token_uri": "https://accounts.google.com/o/oauth2/token", - "client_email": "foo@developer.gserviceaccount.com", - "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/foo@developer.gserviceaccount.com", - "client_id": "foo.apps.googleusercontent.com", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs" - } -} diff --git a/builtin/providers/google/test-fixtures/ssl_cert/test.crt b/builtin/providers/google/test-fixtures/ssl_cert/test.crt deleted file mode 100644 index 122d22d85..000000000 --- a/builtin/providers/google/test-fixtures/ssl_cert/test.crt +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDgjCCAmoCCQCPrrFCwXharzANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMC -VVMxETAPBgNVBAgMCE5ldy1Zb3JrMQwwCgYDVQQHDANOWUMxFTATBgNVBAoMDE9y -Z2FuaXphdGlvbjEQMA4GA1UECwwHU2VjdGlvbjEQMA4GA1UEAwwHTXkgTmFtZTEX -MBUGCSqGSIb3DQEJARYIbWVAbWUubWUwHhcNMTUxMTIwMTM0MTIwWhcNMTYxMTE5 -MTM0MTIwWjCBgjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldy1Zb3JrMQwwCgYD -VQQHDANOWUMxFTATBgNVBAoMDE9yZ2FuaXphdGlvbjEQMA4GA1UECwwHU2VjdGlv -bjEQMA4GA1UEAwwHTXkgTmFtZTEXMBUGCSqGSIb3DQEJARYIbWVAbWUubWUwggEi -MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDbTuIV7EySLAijNAnsXG7HO/m4 -pu1Yy2sWWcqIifaSq0pL3JUGmWRKFRTb4msFIuKrkvsMLxWy6zIOnx0okRb7sTKb -XLBiN7zjSLCD6k31zlllO0GHkPu923VeGZ52xlIWxo22R2yoRuddD0YkQPctV7q9 -H7sKJq2141Ut9reMT2LKVRPlzf8wTcv+F+cAc3/i9Tib90GqclGrwk6XE59RBgzT -m9V7b/V+uusDtj6T3/ne5MHnq4g6lUz4mE7FneDVealjx7fHXtWSmR7dfbJilJj1 -foR/wPBeopdR5wAZS26bHjFIBMqAc7AgxbXdMorEDIY4i2OFjPTu22YYtmFZAgMB -AAEwDQYJKoZIhvcNAQELBQADggEBAHmgedgYDSIPiyaZnCWG56jFqYtHYS5xMOFS -T4FBEPsqgjbSYgjiugeQ37+nsbg/NQf4Z/Ca9CS20f7et8pjZWYqbqdGbifHSUAP -MsR3MK/8EsNVskioufvgExNrqHbcJD8aKrBHAyA6NbjaTnnBPrwdfcXxnWdpPNOh -yG6xSdi807t2e7dX59Nr6Fg6DHd9XPEM7VL/k5RBQyBf1ZgrO9cwA2jl8UtWKpaa -fO24S7Acwggi9TjJnyHOhWh21DEUEQG+czXAd5/LSjynTcI7xmuyfEgqJPIrskPv -OqM8II/iNr9Zglvp6hlmzIWnhgwLZiEljYGuMRNhr21jlHsCCYY= ------END CERTIFICATE----- diff --git a/builtin/providers/google/test-fixtures/ssl_cert/test.csr b/builtin/providers/google/test-fixtures/ssl_cert/test.csr deleted file mode 100644 index dee9945ed..000000000 --- a/builtin/providers/google/test-fixtures/ssl_cert/test.csr +++ /dev/null @@ -1,17 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIICyDCCAbACAQAwgYIxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhOZXctWW9yazEM -MAoGA1UEBwwDTllDMRUwEwYDVQQKDAxPcmdhbml6YXRpb24xEDAOBgNVBAsMB1Nl -Y3Rpb24xEDAOBgNVBAMMB015IE5hbWUxFzAVBgkqhkiG9w0BCQEWCG1lQG1lLm1l -MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA207iFexMkiwIozQJ7Fxu -xzv5uKbtWMtrFlnKiIn2kqtKS9yVBplkShUU2+JrBSLiq5L7DC8VsusyDp8dKJEW -+7Eym1ywYje840iwg+pN9c5ZZTtBh5D7vdt1XhmedsZSFsaNtkdsqEbnXQ9GJED3 -LVe6vR+7CiatteNVLfa3jE9iylUT5c3/ME3L/hfnAHN/4vU4m/dBqnJRq8JOlxOf -UQYM05vVe2/1frrrA7Y+k9/53uTB56uIOpVM+JhOxZ3g1XmpY8e3x17Vkpke3X2y -YpSY9X6Ef8DwXqKXUecAGUtumx4xSATKgHOwIMW13TKKxAyGOItjhYz07ttmGLZh -WQIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAGtNMtOtE7gUP5DbkZNxPsoGazkM -c3//gjH3MsTFzQ39r1uNq3fnbBBoYeQnsI05Bf7kSEVeT6fzdl5aBhOWxFF6uyTI -TZzcH9kvZ2IwFDbsa6vqrIJ6jIkpCIfPR8wN5LlBca9oZwJnt4ejF3RB5YBfnmeo -t5JXTbxGRvPBVRZCfJgcxcn731m1Rc8c9wud2IaNWiLob2J/92BJhSt/aiYps/TJ -ww5dRi6zhpxhR+RjlstG3C6oeYeQlSgzeBjhRcxtPHQWfcVfRLCtubqvuUQPcpw2 -YqMujh4vyKo+JEtqI8gqp4Bu0HVI1vr1vhblntFrQb0kueqV94HarE0uH+c= ------END CERTIFICATE REQUEST----- diff --git a/builtin/providers/google/test-fixtures/ssl_cert/test.key b/builtin/providers/google/test-fixtures/ssl_cert/test.key deleted file mode 100644 index 92dd45137..000000000 --- a/builtin/providers/google/test-fixtures/ssl_cert/test.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEA207iFexMkiwIozQJ7Fxuxzv5uKbtWMtrFlnKiIn2kqtKS9yV -BplkShUU2+JrBSLiq5L7DC8VsusyDp8dKJEW+7Eym1ywYje840iwg+pN9c5ZZTtB -h5D7vdt1XhmedsZSFsaNtkdsqEbnXQ9GJED3LVe6vR+7CiatteNVLfa3jE9iylUT -5c3/ME3L/hfnAHN/4vU4m/dBqnJRq8JOlxOfUQYM05vVe2/1frrrA7Y+k9/53uTB -56uIOpVM+JhOxZ3g1XmpY8e3x17Vkpke3X2yYpSY9X6Ef8DwXqKXUecAGUtumx4x -SATKgHOwIMW13TKKxAyGOItjhYz07ttmGLZhWQIDAQABAoIBABEjzyOrfiiGbH5k -2MmyR64mj9PQqAgijdIHXn7hWXYJERtwt+z2HBJ2J1UwEvEp0tFaAWjoXSfInfbq -lJrRDBzLsorV6asjdA3HZpRIwaMOZ4oz4WE5AZPLDRc3pVzfDxdcmUK/vkxAjmCF -ixPWR/sxOhUB39phP35RsByRhbLfdGQkSspmD41imASqdqG96wsuc9Rk1Qjx9szr -kUxZkQGKUkRz4yQCwTR4+w2I21/cT5kxwM/KZG5f62tqB9urtFuTONrm7Z7xJv1T -BkHxQJxtsGhG8Dp8RB3t5PLou39xaBrjS5lpzJYtzrja25XGNEuONiQlWEDmk7li -acJWPQECgYEA98hjLlSO2sudUI36kJWc9CBqFznnUD2hIWRBM/Xc7mBhFGWxoxGm -f2xri91XbfH3oICIIBs52AdCyfjYbpF0clq8pSL+gHzRQTLcLUKVz3BxnxJAxyIG -QYPxmtMLVSzB5eZh+bPvcCyzd2ALDE1vFClQI/BcK/2dsJcXP2gSqdECgYEA4pTA -3okbdWOutnOwakyfVAbXjMx81D9ii2ZGHbuPY4PSD/tAe8onkEzHJgvinjddbi9p -oGwFhPqgfdWX7YNz5qsj9HP6Ehy7dw/EwvmX49yHsere85LiPMn/T9KkK0Pbn+HY -+0Q+ov/2wV3J7zPo8fffyQYizUKexGUN3XspGQkCgYEArFsMeobBE/q8g/MuzvHz -SnFduqhBebRU59hH7q/gLUSHYtvWM7ssWMh/Crw9e7HrcQ7XIZYup1FtqPZa/pZZ -LM5nGGt+IrwwBq0tMKJ3eOMbde4Jdzr4pQv1vJ9+65GFkritgDckn5/IeoopRTZ7 -xMd0AnvIcaUp0lNXDXkEOnECgYAk2C2YwlDdwOzrLFrWnkkWX9pzQdlWpkv/AQ2L -zjEd7JSfFqtAtfnDBEkqDaq3MaeWwEz70jT/j8XDUJVZARQ6wT+ig615foSZcs37 -Kp0hZ34FV30TvKHfYrWKpGUfx/QRxqcDDPDmjprwjLDGnflWR4lzZfUIzbmFlC0y -A9IGCQKBgH3ieP6nYCJexppvdxoycFkp3bSPr26MOCvACNsa+wJxBo59Zxs0YAmJ -9f6OOdUExueRY5iZCy0KPSgjYj96RuR0gV3cKc/WdOot4Ypgc/TK+r/UPDM2VAHk -yJuxkyXdOrstesxZIxpourS3kONtQUqMFmdqQeBngZl4v7yBtiRW ------END RSA PRIVATE KEY----- diff --git a/builtin/providers/grafana/provider.go b/builtin/providers/grafana/provider.go deleted file mode 100644 index bd29c7201..000000000 --- a/builtin/providers/grafana/provider.go +++ /dev/null @@ -1,41 +0,0 @@ -package grafana - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - - gapi "github.com/apparentlymart/go-grafana-api" -) - -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "url": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("GRAFANA_URL", nil), - Description: "URL of the root of the target Grafana server.", - }, - "auth": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("GRAFANA_AUTH", nil), - Description: "Credentials for accessing the Grafana API.", - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "grafana_dashboard": ResourceDashboard(), - "grafana_data_source": ResourceDataSource(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - return gapi.New( - d.Get("auth").(string), - d.Get("url").(string), - ) -} diff --git a/builtin/providers/grafana/provider_test.go b/builtin/providers/grafana/provider_test.go deleted file mode 100644 index 9daba4c72..000000000 --- a/builtin/providers/grafana/provider_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package grafana - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// To run these acceptance tests, you will need a Grafana server. -// Grafana can be downloaded here: http://grafana.org/download/ -// -// The tests will need an API key to authenticate with the server. To create -// one, use the menu for one of your installation's organizations (The -// "Main Org." is fine if you've just done a fresh installation to run these -// tests) to reach the "API Keys" admin page. -// -// Giving the API key the Admin role is the easiest way to ensure enough -// access is granted to run all of the tests. -// -// Once you've created the API key, set the GRAFANA_URL and GRAFANA_AUTH -// environment variables to the Grafana base URL and the API key respectively, -// and then run: -// make testacc TEST=./builtin/providers/grafana - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "grafana": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("GRAFANA_URL"); v == "" { - t.Fatal("GRAFANA_URL must be set for acceptance tests") - } - if v := os.Getenv("GRAFANA_AUTH"); v == "" { - t.Fatal("GRAFANA_AUTH must be set for acceptance tests") - } -} diff --git a/builtin/providers/grafana/resource_dashboard.go b/builtin/providers/grafana/resource_dashboard.go deleted file mode 100644 index aaac23da1..000000000 --- a/builtin/providers/grafana/resource_dashboard.go +++ /dev/null @@ -1,127 +0,0 @@ -package grafana - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/terraform/helper/schema" - - gapi "github.com/apparentlymart/go-grafana-api" -) - -func ResourceDashboard() *schema.Resource { - return &schema.Resource{ - Create: CreateDashboard, - Delete: DeleteDashboard, - Read: ReadDashboard, - - Schema: map[string]*schema.Schema{ - "slug": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "config_json": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - StateFunc: NormalizeDashboardConfigJSON, - ValidateFunc: ValidateDashboardConfigJSON, - }, - }, - } -} - -func CreateDashboard(d *schema.ResourceData, meta interface{}) error { - client := meta.(*gapi.Client) - - model := prepareDashboardModel(d.Get("config_json").(string)) - - resp, err := client.SaveDashboard(model, false) - if err != nil { - return err - } - - d.SetId(resp.Slug) - - return ReadDashboard(d, meta) -} - -func ReadDashboard(d *schema.ResourceData, meta interface{}) error { - client := meta.(*gapi.Client) - - slug := d.Id() - - dashboard, err := client.Dashboard(slug) - if err != nil { - return err - } - - configJSONBytes, err := json.Marshal(dashboard.Model) - if err != nil { - return err - } - - configJSON := NormalizeDashboardConfigJSON(string(configJSONBytes)) - - d.SetId(dashboard.Meta.Slug) - d.Set("slug", dashboard.Meta.Slug) - d.Set("config_json", configJSON) - - return nil -} - -func DeleteDashboard(d *schema.ResourceData, meta interface{}) error { - client := meta.(*gapi.Client) - - slug := d.Id() - return client.DeleteDashboard(slug) -} - -func prepareDashboardModel(configJSON string) map[string]interface{} { - configMap := map[string]interface{}{} - err := json.Unmarshal([]byte(configJSON), &configMap) - if err != nil { - // The validate function should've taken care of this. - panic(fmt.Errorf("Invalid JSON got into prepare func")) - } - - delete(configMap, "id") - configMap["version"] = 0 - - return configMap -} - -func ValidateDashboardConfigJSON(configI interface{}, k string) ([]string, []error) { - configJSON := configI.(string) - configMap := map[string]interface{}{} - err := json.Unmarshal([]byte(configJSON), &configMap) - if err != nil { - return nil, []error{err} - } - return nil, nil -} - -func NormalizeDashboardConfigJSON(configI interface{}) string { - configJSON := configI.(string) - - configMap := map[string]interface{}{} - err := json.Unmarshal([]byte(configJSON), &configMap) - if err != nil { - // The validate function should've taken care of this. - return "" - } - - // Some properties are managed by this provider and are thus not - // significant when included in the JSON. - delete(configMap, "id") - delete(configMap, "version") - - ret, err := json.Marshal(configMap) - if err != nil { - // Should never happen. - return configJSON - } - - return string(ret) -} diff --git a/builtin/providers/grafana/resource_dashboard_test.go b/builtin/providers/grafana/resource_dashboard_test.go deleted file mode 100644 index f02a756cd..000000000 --- a/builtin/providers/grafana/resource_dashboard_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package grafana - -import ( - "fmt" - "regexp" - "testing" - - gapi "github.com/apparentlymart/go-grafana-api" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDashboard_basic(t *testing.T) { - var dashboard gapi.Dashboard - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccDashboardCheckDestroy(&dashboard), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDashboardConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccDashboardCheckExists("grafana_dashboard.test", &dashboard), - resource.TestMatchResourceAttr( - "grafana_dashboard.test", "id", regexp.MustCompile(`terraform-acceptance-test.*`), - ), - ), - }, - }, - }) -} - -func testAccDashboardCheckExists(rn string, dashboard *gapi.Dashboard) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[rn] - if !ok { - return fmt.Errorf("resource not found: %s", rn) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("resource id not set") - } - - client := testAccProvider.Meta().(*gapi.Client) - gotDashboard, err := client.Dashboard(rs.Primary.ID) - if err != nil { - return fmt.Errorf("error getting dashboard: %s", err) - } - - *dashboard = *gotDashboard - - return nil - } -} - -func testAccDashboardCheckDestroy(dashboard *gapi.Dashboard) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*gapi.Client) - _, err := client.Dashboard(dashboard.Meta.Slug) - if err == nil { - return fmt.Errorf("dashboard still exists") - } - return nil - } -} - -// The "id" and "version" properties in the config below are there to test -// that we correctly normalize them away. They are not actually used by this -// resource, since it uses slugs for identification and never modifies an -// existing dashboard. -const testAccDashboardConfig_basic = ` -resource "grafana_dashboard" "test" { - config_json = < -1 { - plan = plan[:idx] - } - } - } - - d.Set("name", addon.Name) - d.Set("plan", plan) - d.Set("provider_id", addon.ProviderID) - if err := d.Set("config_vars", addon.ConfigVars); err != nil { - return err - } - - return nil -} - -func resourceHerokuAddonUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - app := d.Get("app").(string) - - if d.HasChange("plan") { - ad, err := client.AddOnUpdate( - context.TODO(), app, d.Id(), heroku.AddOnUpdateOpts{Plan: d.Get("plan").(string)}) - if err != nil { - return err - } - - // Store the new ID - d.SetId(ad.ID) - } - - return resourceHerokuAddonRead(d, meta) -} - -func resourceHerokuAddonDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - log.Printf("[INFO] Deleting Addon: %s", d.Id()) - - // Destroy the app - _, err := client.AddOnDelete(context.TODO(), d.Get("app").(string), d.Id()) - if err != nil { - return fmt.Errorf("Error deleting addon: %s", err) - } - - d.SetId("") - return nil -} - -func resourceHerokuAddonRetrieve(id string, client *heroku.Service) (*heroku.AddOn, error) { - addon, err := client.AddOnInfo(context.TODO(), id) - - if err != nil { - return nil, fmt.Errorf("Error retrieving addon: %s", err) - } - - return addon, nil -} - -func resourceHerokuAddonRetrieveByApp(app string, id string, client *heroku.Service) (*heroku.AddOn, error) { - addon, err := client.AddOnInfoByApp(context.TODO(), app, id) - - if err != nil { - return nil, fmt.Errorf("Error retrieving addon: %s", err) - } - - return addon, nil -} - -// AddOnStateRefreshFunc returns a resource.StateRefreshFunc that is used to -// watch an AddOn. -func AddOnStateRefreshFunc(client *heroku.Service, appID, addOnID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - addon, err := resourceHerokuAddonRetrieveByApp(appID, addOnID, client) - - if err != nil { - return nil, "", err - } - - // The type conversion here can be dropped when the vendored version of - // heroku-go is updated. - return (*heroku.AddOn)(addon), addon.State, nil - } -} diff --git a/builtin/providers/heroku/resource_heroku_addon_test.go b/builtin/providers/heroku/resource_heroku_addon_test.go deleted file mode 100644 index 22ae2bf1f..000000000 --- a/builtin/providers/heroku/resource_heroku_addon_test.go +++ /dev/null @@ -1,162 +0,0 @@ -package heroku - -import ( - "context" - "fmt" - "testing" - - "github.com/cyberdelia/heroku-go/v3" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccHerokuAddon_Basic(t *testing.T) { - var addon heroku.AddOn - appName := fmt.Sprintf("tftest-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckHerokuAddonDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckHerokuAddonConfig_basic(appName), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuAddonExists("heroku_addon.foobar", &addon), - testAccCheckHerokuAddonAttributes(&addon, "deployhooks:http"), - resource.TestCheckResourceAttr( - "heroku_addon.foobar", "config.0.url", "http://google.com"), - resource.TestCheckResourceAttr( - "heroku_addon.foobar", "app", appName), - resource.TestCheckResourceAttr( - "heroku_addon.foobar", "plan", "deployhooks:http"), - ), - }, - }, - }) -} - -// GH-198 -func TestAccHerokuAddon_noPlan(t *testing.T) { - var addon heroku.AddOn - appName := fmt.Sprintf("tftest-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckHerokuAddonDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckHerokuAddonConfig_no_plan(appName), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuAddonExists("heroku_addon.foobar", &addon), - testAccCheckHerokuAddonAttributes(&addon, "memcachier:dev"), - resource.TestCheckResourceAttr( - "heroku_addon.foobar", "app", appName), - resource.TestCheckResourceAttr( - "heroku_addon.foobar", "plan", "memcachier"), - ), - }, - { - Config: testAccCheckHerokuAddonConfig_no_plan(appName), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuAddonExists("heroku_addon.foobar", &addon), - testAccCheckHerokuAddonAttributes(&addon, "memcachier:dev"), - resource.TestCheckResourceAttr( - "heroku_addon.foobar", "app", appName), - resource.TestCheckResourceAttr( - "heroku_addon.foobar", "plan", "memcachier"), - ), - }, - }, - }) -} - -func testAccCheckHerokuAddonDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*heroku.Service) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "heroku_addon" { - continue - } - - _, err := client.AddOnInfoByApp(context.TODO(), rs.Primary.Attributes["app"], rs.Primary.ID) - - if err == nil { - return fmt.Errorf("Addon still exists") - } - } - - return nil -} - -func testAccCheckHerokuAddonAttributes(addon *heroku.AddOn, n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if addon.Plan.Name != n { - return fmt.Errorf("Bad plan: %s", addon.Plan.Name) - } - - return nil - } -} - -func testAccCheckHerokuAddonExists(n string, addon *heroku.AddOn) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Addon ID is set") - } - - client := testAccProvider.Meta().(*heroku.Service) - - foundAddon, err := client.AddOnInfoByApp(context.TODO(), rs.Primary.Attributes["app"], rs.Primary.ID) - - if err != nil { - return err - } - - if foundAddon.ID != rs.Primary.ID { - return fmt.Errorf("Addon not found") - } - - *addon = *foundAddon - - return nil - } -} - -func testAccCheckHerokuAddonConfig_basic(appName string) string { - return fmt.Sprintf(` -resource "heroku_app" "foobar" { - name = "%s" - region = "us" -} - -resource "heroku_addon" "foobar" { - app = "${heroku_app.foobar.name}" - plan = "deployhooks:http" - config { - url = "http://google.com" - } -}`, appName) -} - -func testAccCheckHerokuAddonConfig_no_plan(appName string) string { - return fmt.Sprintf(` -resource "heroku_app" "foobar" { - name = "%s" - region = "us" -} - -resource "heroku_addon" "foobar" { - app = "${heroku_app.foobar.name}" - plan = "memcachier" -}`, appName) -} diff --git a/builtin/providers/heroku/resource_heroku_app.go b/builtin/providers/heroku/resource_heroku_app.go deleted file mode 100644 index 575f8f08b..000000000 --- a/builtin/providers/heroku/resource_heroku_app.go +++ /dev/null @@ -1,565 +0,0 @@ -package heroku - -import ( - "context" - "fmt" - "log" - - "github.com/cyberdelia/heroku-go/v3" - multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/schema" -) - -// herokuApplication is a value type used to hold the details of an -// application. We use this for common storage of values needed for the -// heroku.App and heroku.OrganizationApp types -type herokuApplication struct { - Name string - Region string - Space string - Stack string - GitURL string - WebURL string - OrganizationName string - Locked bool -} - -// type application is used to store all the details of a heroku app -type application struct { - Id string // Id of the resource - - App *herokuApplication // The heroku application - Client *heroku.Service // Client to interact with the heroku API - Vars map[string]string // The vars on the application - Buildpacks []string // The application's buildpack names or URLs - Organization bool // is the application organization app -} - -// Updates the application to have the latest from remote -func (a *application) Update() error { - var errs []error - var err error - - if !a.Organization { - app, err := a.Client.AppInfo(context.TODO(), a.Id) - if err != nil { - errs = append(errs, err) - } else { - a.App = &herokuApplication{} - a.App.Name = app.Name - a.App.Region = app.Region.Name - a.App.Stack = app.Stack.Name - a.App.GitURL = app.GitURL - a.App.WebURL = app.WebURL - } - } else { - app, err := a.Client.OrganizationAppInfo(context.TODO(), a.Id) - if err != nil { - errs = append(errs, err) - } else { - // No inheritance between OrganizationApp and App is killing it :/ - a.App = &herokuApplication{} - a.App.Name = app.Name - a.App.Region = app.Region.Name - a.App.Stack = app.Stack.Name - a.App.GitURL = app.GitURL - a.App.WebURL = app.WebURL - if app.Space != nil { - a.App.Space = app.Space.Name - } - if app.Organization != nil { - a.App.OrganizationName = app.Organization.Name - } else { - log.Println("[DEBUG] Something is wrong - didn't get information about organization name, while the app is marked as being so") - } - a.App.Locked = app.Locked - } - } - - a.Buildpacks, err = retrieveBuildpacks(a.Id, a.Client) - if err != nil { - errs = append(errs, err) - } - - a.Vars, err = retrieveConfigVars(a.Id, a.Client) - if err != nil { - errs = append(errs, err) - } - - if len(errs) > 0 { - return &multierror.Error{Errors: errs} - } - - return nil -} - -func resourceHerokuApp() *schema.Resource { - return &schema.Resource{ - Create: switchHerokuAppCreate, - Read: resourceHerokuAppRead, - Update: resourceHerokuAppUpdate, - Delete: resourceHerokuAppDelete, - - Importer: &schema.ResourceImporter{ - State: resourceHerokuAppImport, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - - "space": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "region": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "stack": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "buildpacks": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "config_vars": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeMap, - }, - }, - - "all_config_vars": { - Type: schema.TypeMap, - Computed: true, - }, - - "git_url": { - Type: schema.TypeString, - Computed: true, - }, - - "web_url": { - Type: schema.TypeString, - Computed: true, - }, - - "heroku_hostname": { - Type: schema.TypeString, - Computed: true, - }, - - "organization": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - - "locked": { - Type: schema.TypeBool, - Optional: true, - }, - - "personal": { - Type: schema.TypeBool, - Optional: true, - }, - }, - }, - }, - }, - } -} - -func isOrganizationApp(d *schema.ResourceData) bool { - v := d.Get("organization").([]interface{}) - return len(v) > 0 && v[0] != nil -} - -func resourceHerokuAppImport(d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { - client := m.(*heroku.Service) - - app, err := client.AppInfo(context.TODO(), d.Id()) - if err != nil { - return nil, err - } - - // Flag organization apps by setting the organization name - if app.Organization != nil { - d.Set("organization", []map[string]interface{}{ - {"name": app.Organization.Name}, - }) - } - - return []*schema.ResourceData{d}, nil -} - -func switchHerokuAppCreate(d *schema.ResourceData, meta interface{}) error { - if isOrganizationApp(d) { - return resourceHerokuOrgAppCreate(d, meta) - } - - return resourceHerokuAppCreate(d, meta) -} - -func resourceHerokuAppCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - // Build up our creation options - opts := heroku.AppCreateOpts{} - - if v, ok := d.GetOk("name"); ok { - vs := v.(string) - log.Printf("[DEBUG] App name: %s", vs) - opts.Name = &vs - } - if v, ok := d.GetOk("region"); ok { - vs := v.(string) - log.Printf("[DEBUG] App region: %s", vs) - opts.Region = &vs - } - if v, ok := d.GetOk("stack"); ok { - vs := v.(string) - log.Printf("[DEBUG] App stack: %s", vs) - opts.Stack = &vs - } - - log.Printf("[DEBUG] Creating Heroku app...") - a, err := client.AppCreate(context.TODO(), opts) - if err != nil { - return err - } - - d.SetId(a.Name) - log.Printf("[INFO] App ID: %s", d.Id()) - - if err := performAppPostCreateTasks(d, client); err != nil { - return err - } - - return resourceHerokuAppRead(d, meta) -} - -func resourceHerokuOrgAppCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - // Build up our creation options - opts := heroku.OrganizationAppCreateOpts{} - - v := d.Get("organization").([]interface{}) - if len(v) > 1 { - return fmt.Errorf("Error Creating Heroku App: Only 1 Heroku Organization is permitted") - } - orgDetails := v[0].(map[string]interface{}) - - if v := orgDetails["name"]; v != nil { - vs := v.(string) - log.Printf("[DEBUG] Organization name: %s", vs) - opts.Organization = &vs - } - - if v := orgDetails["personal"]; v != nil { - vs := v.(bool) - log.Printf("[DEBUG] Organization Personal: %t", vs) - opts.Personal = &vs - } - - if v := orgDetails["locked"]; v != nil { - vs := v.(bool) - log.Printf("[DEBUG] Organization locked: %t", vs) - opts.Locked = &vs - } - - if v := d.Get("name"); v != nil { - vs := v.(string) - log.Printf("[DEBUG] App name: %s", vs) - opts.Name = &vs - } - if v, ok := d.GetOk("region"); ok { - vs := v.(string) - log.Printf("[DEBUG] App region: %s", vs) - opts.Region = &vs - } - if v, ok := d.GetOk("space"); ok { - vs := v.(string) - log.Printf("[DEBUG] App space: %s", vs) - opts.Space = &vs - } - if v, ok := d.GetOk("stack"); ok { - vs := v.(string) - log.Printf("[DEBUG] App stack: %s", vs) - opts.Stack = &vs - } - - log.Printf("[DEBUG] Creating Heroku app...") - a, err := client.OrganizationAppCreate(context.TODO(), opts) - if err != nil { - return err - } - - d.SetId(a.Name) - log.Printf("[INFO] App ID: %s", d.Id()) - - if err := performAppPostCreateTasks(d, client); err != nil { - return err - } - - return resourceHerokuAppRead(d, meta) -} - -func resourceHerokuAppRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - configVars := make(map[string]string) - care := make(map[string]struct{}) - for _, v := range d.Get("config_vars").([]interface{}) { - for k := range v.(map[string]interface{}) { - care[k] = struct{}{} - } - } - - // Only track buildpacks when set in the configuration. - _, buildpacksConfigured := d.GetOk("buildpacks") - - organizationApp := isOrganizationApp(d) - - // Only set the config_vars that we have set in the configuration. - // The "all_config_vars" field has all of them. - app, err := resourceHerokuAppRetrieve(d.Id(), organizationApp, client) - if err != nil { - return err - } - - for k, v := range app.Vars { - if _, ok := care[k]; ok { - configVars[k] = v - } - } - var configVarsValue []map[string]string - if len(configVars) > 0 { - configVarsValue = []map[string]string{configVars} - } - - d.Set("name", app.App.Name) - d.Set("stack", app.App.Stack) - d.Set("region", app.App.Region) - d.Set("git_url", app.App.GitURL) - d.Set("web_url", app.App.WebURL) - if buildpacksConfigured { - d.Set("buildpacks", app.Buildpacks) - } - d.Set("config_vars", configVarsValue) - d.Set("all_config_vars", app.Vars) - if organizationApp { - d.Set("space", app.App.Space) - - orgDetails := map[string]interface{}{ - "name": app.App.OrganizationName, - "locked": app.App.Locked, - "personal": false, - } - err := d.Set("organization", []interface{}{orgDetails}) - if err != nil { - return err - } - } - - // We know that the hostname on heroku will be the name+herokuapp.com - // You need this to do things like create DNS CNAME records - d.Set("heroku_hostname", fmt.Sprintf("%s.herokuapp.com", app.App.Name)) - - return nil -} - -func resourceHerokuAppUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - // If name changed, update it - if d.HasChange("name") { - v := d.Get("name").(string) - opts := heroku.AppUpdateOpts{ - Name: &v, - } - - renamedApp, err := client.AppUpdate(context.TODO(), d.Id(), opts) - if err != nil { - return err - } - - // Store the new ID - d.SetId(renamedApp.Name) - } - - // If the config vars changed, then recalculate those - if d.HasChange("config_vars") { - o, n := d.GetChange("config_vars") - if o == nil { - o = []interface{}{} - } - if n == nil { - n = []interface{}{} - } - - err := updateConfigVars( - d.Id(), client, o.([]interface{}), n.([]interface{})) - if err != nil { - return err - } - } - - if d.HasChange("buildpacks") { - err := updateBuildpacks(d.Id(), client, d.Get("buildpacks").([]interface{})) - if err != nil { - return err - } - } - - return resourceHerokuAppRead(d, meta) -} - -func resourceHerokuAppDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - log.Printf("[INFO] Deleting App: %s", d.Id()) - _, err := client.AppDelete(context.TODO(), d.Id()) - if err != nil { - return fmt.Errorf("Error deleting App: %s", err) - } - - d.SetId("") - return nil -} - -func resourceHerokuAppRetrieve(id string, organization bool, client *heroku.Service) (*application, error) { - app := application{Id: id, Client: client, Organization: organization} - - err := app.Update() - - if err != nil { - return nil, fmt.Errorf("Error retrieving app: %s", err) - } - - return &app, nil -} - -func retrieveBuildpacks(id string, client *heroku.Service) ([]string, error) { - results, err := client.BuildpackInstallationList(context.TODO(), id, nil) - - if err != nil { - return nil, err - } - - buildpacks := []string{} - for _, installation := range results { - buildpacks = append(buildpacks, installation.Buildpack.Name) - } - - return buildpacks, nil -} - -func retrieveConfigVars(id string, client *heroku.Service) (map[string]string, error) { - vars, err := client.ConfigVarInfoForApp(context.TODO(), id) - - if err != nil { - return nil, err - } - - nonNullVars := map[string]string{} - for k, v := range vars { - if v != nil { - nonNullVars[k] = *v - } - } - - return nonNullVars, nil -} - -// Updates the config vars for from an expanded configuration. -func updateConfigVars( - id string, - client *heroku.Service, - o []interface{}, - n []interface{}) error { - vars := make(map[string]*string) - - for _, v := range o { - if v != nil { - for k := range v.(map[string]interface{}) { - vars[k] = nil - } - } - } - for _, v := range n { - if v != nil { - for k, v := range v.(map[string]interface{}) { - val := v.(string) - vars[k] = &val - } - } - } - - log.Printf("[INFO] Updating config vars: *%#v", vars) - if _, err := client.ConfigVarUpdate(context.TODO(), id, vars); err != nil { - return fmt.Errorf("Error updating config vars: %s", err) - } - - return nil -} - -func updateBuildpacks(id string, client *heroku.Service, v []interface{}) error { - opts := heroku.BuildpackInstallationUpdateOpts{ - Updates: []struct { - Buildpack string `json:"buildpack" url:"buildpack,key"` - }{}} - - for _, buildpack := range v { - opts.Updates = append(opts.Updates, struct { - Buildpack string `json:"buildpack" url:"buildpack,key"` - }{ - Buildpack: buildpack.(string), - }) - } - - if _, err := client.BuildpackInstallationUpdate(context.TODO(), id, opts); err != nil { - return fmt.Errorf("Error updating buildpacks: %s", err) - } - - return nil -} - -// performAppPostCreateTasks performs post-create tasks common to both org and non-org apps. -func performAppPostCreateTasks(d *schema.ResourceData, client *heroku.Service) error { - if v, ok := d.GetOk("config_vars"); ok { - if err := updateConfigVars(d.Id(), client, nil, v.([]interface{})); err != nil { - return err - } - } - - if v, ok := d.GetOk("buildpacks"); ok { - if err := updateBuildpacks(d.Id(), client, v.([]interface{})); err != nil { - return err - } - } - - return nil -} diff --git a/builtin/providers/heroku/resource_heroku_app_feature.go b/builtin/providers/heroku/resource_heroku_app_feature.go deleted file mode 100644 index 9718fdc67..000000000 --- a/builtin/providers/heroku/resource_heroku_app_feature.go +++ /dev/null @@ -1,101 +0,0 @@ -package heroku - -import ( - "context" - "log" - - heroku "github.com/cyberdelia/heroku-go/v3" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceHerokuAppFeature() *schema.Resource { - return &schema.Resource{ - Create: resourceHerokuAppFeatureCreate, - Update: resourceHerokuAppFeatureUpdate, - Read: resourceHerokuAppFeatureRead, - Delete: resourceHerokuAppFeatureDelete, - - Schema: map[string]*schema.Schema{ - "app": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "enabled": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - }, - } -} - -func resourceHerokuAppFeatureRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - app, id := parseCompositeID(d.Id()) - - feature, err := client.AppFeatureInfo(context.TODO(), app, id) - if err != nil { - return err - } - - d.Set("app", app) - d.Set("name", feature.Name) - d.Set("enabled", feature.Enabled) - - return nil -} - -func resourceHerokuAppFeatureCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - app := d.Get("app").(string) - featureName := d.Get("name").(string) - enabled := d.Get("enabled").(bool) - - opts := heroku.AppFeatureUpdateOpts{Enabled: enabled} - - log.Printf("[DEBUG] Feature set configuration: %#v, %#v", featureName, opts) - - feature, err := client.AppFeatureUpdate(context.TODO(), app, featureName, opts) - if err != nil { - return err - } - - d.SetId(buildCompositeID(app, feature.ID)) - - return resourceHerokuAppFeatureRead(d, meta) -} - -func resourceHerokuAppFeatureUpdate(d *schema.ResourceData, meta interface{}) error { - if d.HasChange("enabled") { - return resourceHerokuAppFeatureCreate(d, meta) - } - - return resourceHerokuAppFeatureRead(d, meta) -} - -func resourceHerokuAppFeatureDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - app, id := parseCompositeID(d.Id()) - featureName := d.Get("name").(string) - - log.Printf("[INFO] Deleting app feature %s (%s) for app %s", featureName, id, app) - opts := heroku.AppFeatureUpdateOpts{Enabled: false} - _, err := client.AppFeatureUpdate(context.TODO(), app, id, opts) - if err != nil { - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/heroku/resource_heroku_app_feature_test.go b/builtin/providers/heroku/resource_heroku_app_feature_test.go deleted file mode 100644 index 27e856cbd..000000000 --- a/builtin/providers/heroku/resource_heroku_app_feature_test.go +++ /dev/null @@ -1,135 +0,0 @@ -package heroku - -import ( - "context" - "fmt" - "testing" - - heroku "github.com/cyberdelia/heroku-go/v3" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccHerokuAppFeature(t *testing.T) { - var feature heroku.AppFeature - appName := fmt.Sprintf("tftest-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckHerokuFeatureDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckHerokuFeature_basic(appName), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuFeatureExists("heroku_app_feature.runtime_metrics", &feature), - testAccCheckHerokuFeatureEnabled(&feature, true), - resource.TestCheckResourceAttr( - "heroku_app_feature.runtime_metrics", "enabled", "true", - ), - ), - }, - { - Config: testAccCheckHerokuFeature_disabled(appName), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuFeatureExists("heroku_app_feature.runtime_metrics", &feature), - testAccCheckHerokuFeatureEnabled(&feature, false), - resource.TestCheckResourceAttr( - "heroku_app_feature.runtime_metrics", "enabled", "false", - ), - ), - }, - }, - }) -} - -func testAccCheckHerokuFeatureDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*heroku.Service) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "heroku_app_feature" { - continue - } - - _, err := client.AppFeatureInfo(context.TODO(), rs.Primary.Attributes["app"], rs.Primary.ID) - - if err == nil { - return fmt.Errorf("Feature still exists") - } - } - - return nil -} - -func testAccCheckHerokuFeatureExists(n string, feature *heroku.AppFeature) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No feature ID is set") - } - - app, id := parseCompositeID(rs.Primary.ID) - if app != rs.Primary.Attributes["app"] { - return fmt.Errorf("Bad app: %s", app) - } - - client := testAccProvider.Meta().(*heroku.Service) - - foundFeature, err := client.AppFeatureInfo(context.TODO(), app, id) - if err != nil { - return err - } - - if foundFeature.ID != id { - return fmt.Errorf("Feature not found") - } - - *feature = *foundFeature - return nil - } -} - -func testAccCheckHerokuFeatureEnabled(feature *heroku.AppFeature, enabled bool) resource.TestCheckFunc { - return func(s *terraform.State) error { - if feature.Enabled != enabled { - return fmt.Errorf("Bad enabled: %v", feature.Enabled) - } - - return nil - } -} - -func testAccCheckHerokuFeature_basic(appName string) string { - return fmt.Sprintf(` -resource "heroku_app" "example" { - name = "%s" - region = "us" -} - -resource "heroku_app_feature" "runtime_metrics" { - app = "${heroku_app.example.name}" - name = "log-runtime-metrics" -} -`, appName) -} - -func testAccCheckHerokuFeature_disabled(appName string) string { - return fmt.Sprintf(` -resource "heroku_app" "example" { - name = "%s" - region = "us" -} - -resource "heroku_app_feature" "runtime_metrics" { - app = "${heroku_app.example.name}" - name = "log-runtime-metrics" - enabled = false -} -`, appName) -} diff --git a/builtin/providers/heroku/resource_heroku_app_test.go b/builtin/providers/heroku/resource_heroku_app_test.go deleted file mode 100644 index 1a28f1813..000000000 --- a/builtin/providers/heroku/resource_heroku_app_test.go +++ /dev/null @@ -1,595 +0,0 @@ -package heroku - -import ( - "context" - "fmt" - "os" - "testing" - - "github.com/cyberdelia/heroku-go/v3" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccHerokuApp_Basic(t *testing.T) { - var app heroku.App - appName := fmt.Sprintf("tftest-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckHerokuAppDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckHerokuAppConfig_basic(appName), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuAppExists("heroku_app.foobar", &app), - testAccCheckHerokuAppAttributes(&app, appName), - resource.TestCheckResourceAttr( - "heroku_app.foobar", "name", appName), - resource.TestCheckResourceAttr( - "heroku_app.foobar", "config_vars.0.FOO", "bar"), - ), - }, - }, - }) -} - -func TestAccHerokuApp_NameChange(t *testing.T) { - var app heroku.App - appName := fmt.Sprintf("tftest-%s", acctest.RandString(10)) - appName2 := fmt.Sprintf("%s-v2", appName) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckHerokuAppDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckHerokuAppConfig_basic(appName), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuAppExists("heroku_app.foobar", &app), - testAccCheckHerokuAppAttributes(&app, appName), - resource.TestCheckResourceAttr( - "heroku_app.foobar", "name", appName), - resource.TestCheckResourceAttr( - "heroku_app.foobar", "config_vars.0.FOO", "bar"), - ), - }, - { - Config: testAccCheckHerokuAppConfig_updated(appName2), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuAppExists("heroku_app.foobar", &app), - testAccCheckHerokuAppAttributesUpdated(&app, appName2), - resource.TestCheckResourceAttr( - "heroku_app.foobar", "name", appName2), - resource.TestCheckResourceAttr( - "heroku_app.foobar", "config_vars.0.FOO", "bing"), - resource.TestCheckResourceAttr( - "heroku_app.foobar", "config_vars.0.BAZ", "bar"), - ), - }, - }, - }) -} - -func TestAccHerokuApp_NukeVars(t *testing.T) { - var app heroku.App - appName := fmt.Sprintf("tftest-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckHerokuAppDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckHerokuAppConfig_basic(appName), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuAppExists("heroku_app.foobar", &app), - testAccCheckHerokuAppAttributes(&app, appName), - resource.TestCheckResourceAttr( - "heroku_app.foobar", "name", appName), - resource.TestCheckResourceAttr( - "heroku_app.foobar", "config_vars.0.FOO", "bar"), - ), - }, - { - Config: testAccCheckHerokuAppConfig_no_vars(appName), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuAppExists("heroku_app.foobar", &app), - testAccCheckHerokuAppAttributesNoVars(&app, appName), - resource.TestCheckResourceAttr( - "heroku_app.foobar", "name", appName), - resource.TestCheckNoResourceAttr( - "heroku_app.foobar", "config_vars.0.FOO"), - ), - }, - }, - }) -} - -func TestAccHerokuApp_Buildpacks(t *testing.T) { - var app heroku.App - appName := fmt.Sprintf("tftest-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckHerokuAppDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckHerokuAppConfig_go(appName), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuAppExists("heroku_app.foobar", &app), - testAccCheckHerokuAppBuildpacks(appName, false), - resource.TestCheckResourceAttr("heroku_app.foobar", "buildpacks.0", "heroku/go"), - ), - }, - { - Config: testAccCheckHerokuAppConfig_multi(appName), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuAppExists("heroku_app.foobar", &app), - testAccCheckHerokuAppBuildpacks(appName, true), - resource.TestCheckResourceAttr( - "heroku_app.foobar", "buildpacks.0", "https://github.com/heroku/heroku-buildpack-multi-procfile"), - resource.TestCheckResourceAttr("heroku_app.foobar", "buildpacks.1", "heroku/go"), - ), - }, - { - Config: testAccCheckHerokuAppConfig_no_vars(appName), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuAppExists("heroku_app.foobar", &app), - testAccCheckHerokuAppNoBuildpacks(appName), - resource.TestCheckNoResourceAttr("heroku_app.foobar", "buildpacks.0"), - ), - }, - }, - }) -} - -func TestAccHerokuApp_ExternallySetBuildpacks(t *testing.T) { - var app heroku.App - appName := fmt.Sprintf("tftest-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckHerokuAppDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckHerokuAppConfig_no_vars(appName), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuAppExists("heroku_app.foobar", &app), - testAccCheckHerokuAppNoBuildpacks(appName), - resource.TestCheckNoResourceAttr("heroku_app.foobar", "buildpacks.0"), - ), - }, - { - PreConfig: testAccInstallUnconfiguredBuildpack(t, appName), - Config: testAccCheckHerokuAppConfig_no_vars(appName), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuAppExists("heroku_app.foobar", &app), - testAccCheckHerokuAppBuildpacks(appName, false), - resource.TestCheckNoResourceAttr("heroku_app.foobar", "buildpacks.0"), - ), - }, - }, - }) -} - -func TestAccHerokuApp_Organization(t *testing.T) { - var app heroku.OrganizationApp - appName := fmt.Sprintf("tftest-%s", acctest.RandString(10)) - org := os.Getenv("HEROKU_ORGANIZATION") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - if org == "" { - t.Skip("HEROKU_ORGANIZATION is not set; skipping test.") - } - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckHerokuAppDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckHerokuAppConfig_organization(appName, org), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuAppExistsOrg("heroku_app.foobar", &app), - testAccCheckHerokuAppAttributesOrg(&app, appName, "", org), - ), - }, - }, - }) -} - -func TestAccHerokuApp_Space(t *testing.T) { - var app heroku.OrganizationApp - appName := fmt.Sprintf("tftest-%s", acctest.RandString(10)) - org := os.Getenv("HEROKU_SPACES_ORGANIZATION") - spaceName := fmt.Sprintf("tftest-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - if org == "" { - t.Skip("HEROKU_ORGANIZATION is not set; skipping test.") - } - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckHerokuAppDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckHerokuAppConfig_space(appName, spaceName, org), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuAppExistsOrg("heroku_app.foobar", &app), - testAccCheckHerokuAppAttributesOrg(&app, appName, spaceName, org), - ), - }, - }, - }) -} - -func testAccCheckHerokuAppDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*heroku.Service) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "heroku_app" { - continue - } - - _, err := client.AppInfo(context.TODO(), rs.Primary.ID) - - if err == nil { - return fmt.Errorf("App still exists") - } - } - - return nil -} - -func testAccCheckHerokuAppAttributes(app *heroku.App, appName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*heroku.Service) - - if app.Region.Name != "us" { - return fmt.Errorf("Bad region: %s", app.Region.Name) - } - - if app.Stack.Name != "heroku-16" { - return fmt.Errorf("Bad stack: %s", app.Stack.Name) - } - - if app.Name != appName { - return fmt.Errorf("Bad name: %s", app.Name) - } - - vars, err := client.ConfigVarInfoForApp(context.TODO(), app.Name) - if err != nil { - return err - } - - if vars["FOO"] == nil || *vars["FOO"] != "bar" { - return fmt.Errorf("Bad config vars: %v", vars) - } - - return nil - } -} - -func testAccCheckHerokuAppAttributesUpdated(app *heroku.App, appName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*heroku.Service) - - if app.Name != appName { - return fmt.Errorf("Bad name: %s", app.Name) - } - - vars, err := client.ConfigVarInfoForApp(context.TODO(), app.Name) - if err != nil { - return err - } - - // Make sure we kept the old one - if vars["FOO"] == nil || *vars["FOO"] != "bing" { - return fmt.Errorf("Bad config vars: %v", vars) - } - - if vars["BAZ"] == nil || *vars["BAZ"] != "bar" { - return fmt.Errorf("Bad config vars: %v", vars) - } - - return nil - - } -} - -func testAccCheckHerokuAppAttributesNoVars(app *heroku.App, appName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*heroku.Service) - - if app.Name != appName { - return fmt.Errorf("Bad name: %s", app.Name) - } - - vars, err := client.ConfigVarInfoForApp(context.TODO(), app.Name) - if err != nil { - return err - } - - if len(vars) != 0 { - return fmt.Errorf("vars exist: %v", vars) - } - - return nil - } -} - -func testAccCheckHerokuAppBuildpacks(appName string, multi bool) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*heroku.Service) - - results, err := client.BuildpackInstallationList(context.TODO(), appName, nil) - if err != nil { - return err - } - - buildpacks := []string{} - for _, installation := range results { - buildpacks = append(buildpacks, installation.Buildpack.Name) - } - - if multi { - herokuMulti := "https://github.com/heroku/heroku-buildpack-multi-procfile" - if len(buildpacks) != 2 || buildpacks[0] != herokuMulti || buildpacks[1] != "heroku/go" { - return fmt.Errorf("Bad buildpacks: %v", buildpacks) - } - - return nil - } - - if len(buildpacks) != 1 || buildpacks[0] != "heroku/go" { - return fmt.Errorf("Bad buildpacks: %v", buildpacks) - } - - return nil - } -} - -func testAccCheckHerokuAppNoBuildpacks(appName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*heroku.Service) - - results, err := client.BuildpackInstallationList(context.TODO(), appName, nil) - if err != nil { - return err - } - - buildpacks := []string{} - for _, installation := range results { - buildpacks = append(buildpacks, installation.Buildpack.Name) - } - - if len(buildpacks) != 0 { - return fmt.Errorf("Bad buildpacks: %v", buildpacks) - } - - return nil - } -} - -func testAccCheckHerokuAppAttributesOrg(app *heroku.OrganizationApp, appName, space, org string) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*heroku.Service) - - if app.Region.Name != "us" && app.Region.Name != "virginia" { - return fmt.Errorf("Bad region: %s", app.Region.Name) - } - - var appSpace string - if app.Space != nil { - appSpace = app.Space.Name - } - - if appSpace != space { - return fmt.Errorf("Bad space: %s", appSpace) - } - - if app.Stack.Name != "heroku-16" { - return fmt.Errorf("Bad stack: %s", app.Stack.Name) - } - - if app.Name != appName { - return fmt.Errorf("Bad name: %s", app.Name) - } - - if app.Organization == nil || app.Organization.Name != org { - return fmt.Errorf("Bad org: %v", app.Organization) - } - - vars, err := client.ConfigVarInfoForApp(context.TODO(), app.Name) - if err != nil { - return err - } - - if vars["FOO"] == nil || *vars["FOO"] != "bar" { - return fmt.Errorf("Bad config vars: %v", vars) - } - - return nil - } -} - -func testAccCheckHerokuAppExists(n string, app *heroku.App) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No App Name is set") - } - - client := testAccProvider.Meta().(*heroku.Service) - - foundApp, err := client.AppInfo(context.TODO(), rs.Primary.ID) - - if err != nil { - return err - } - - if foundApp.Name != rs.Primary.ID { - return fmt.Errorf("App not found") - } - - *app = *foundApp - - return nil - } -} - -func testAccCheckHerokuAppExistsOrg(n string, app *heroku.OrganizationApp) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No App Name is set") - } - - client := testAccProvider.Meta().(*heroku.Service) - - foundApp, err := client.OrganizationAppInfo(context.TODO(), rs.Primary.ID) - - if err != nil { - return err - } - - if foundApp.Name != rs.Primary.ID { - return fmt.Errorf("App not found") - } - - *app = *foundApp - - return nil - } -} - -func testAccInstallUnconfiguredBuildpack(t *testing.T, appName string) func() { - return func() { - client := testAccProvider.Meta().(*heroku.Service) - - opts := heroku.BuildpackInstallationUpdateOpts{ - Updates: []struct { - Buildpack string `json:"buildpack" url:"buildpack,key"` - }{ - {Buildpack: "heroku/go"}, - }, - } - - _, err := client.BuildpackInstallationUpdate(context.TODO(), appName, opts) - if err != nil { - t.Fatalf("Error updating buildpacks: %s", err) - } - } -} - -func testAccCheckHerokuAppConfig_basic(appName string) string { - return fmt.Sprintf(` -resource "heroku_app" "foobar" { - name = "%s" - region = "us" - - config_vars { - FOO = "bar" - } -}`, appName) -} - -func testAccCheckHerokuAppConfig_go(appName string) string { - return fmt.Sprintf(` -resource "heroku_app" "foobar" { - name = "%s" - region = "us" - - buildpacks = ["heroku/go"] -}`, appName) -} - -func testAccCheckHerokuAppConfig_multi(appName string) string { - return fmt.Sprintf(` -resource "heroku_app" "foobar" { - name = "%s" - region = "us" - - buildpacks = [ - "https://github.com/heroku/heroku-buildpack-multi-procfile", - "heroku/go" - ] -}`, appName) -} - -func testAccCheckHerokuAppConfig_updated(appName string) string { - return fmt.Sprintf(` -resource "heroku_app" "foobar" { - name = "%s" - region = "us" - - config_vars { - FOO = "bing" - BAZ = "bar" - } -}`, appName) -} - -func testAccCheckHerokuAppConfig_no_vars(appName string) string { - return fmt.Sprintf(` -resource "heroku_app" "foobar" { - name = "%s" - region = "us" -}`, appName) -} - -func testAccCheckHerokuAppConfig_organization(appName, org string) string { - return fmt.Sprintf(` -resource "heroku_app" "foobar" { - name = "%s" - region = "us" - - organization { - name = "%s" - } - - config_vars { - FOO = "bar" - } -}`, appName, org) -} - -func testAccCheckHerokuAppConfig_space(appName, spaceName, org string) string { - return fmt.Sprintf(` -resource "heroku_space" "foobar" { - name = "%s" - organization = "%s" - region = "virginia" -} -resource "heroku_app" "foobar" { - name = "%s" - space = "${heroku_space.foobar.name}" - region = "virginia" - - organization { - name = "%s" - } - - config_vars { - FOO = "bar" - } -}`, spaceName, org, appName, org) -} diff --git a/builtin/providers/heroku/resource_heroku_cert.go b/builtin/providers/heroku/resource_heroku_cert.go deleted file mode 100644 index 764ee7c2a..000000000 --- a/builtin/providers/heroku/resource_heroku_cert.go +++ /dev/null @@ -1,133 +0,0 @@ -package heroku - -import ( - "context" - "fmt" - "log" - - "github.com/cyberdelia/heroku-go/v3" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceHerokuCert() *schema.Resource { - return &schema.Resource{ - Create: resourceHerokuCertCreate, - Read: resourceHerokuCertRead, - Update: resourceHerokuCertUpdate, - Delete: resourceHerokuCertDelete, - - Schema: map[string]*schema.Schema{ - "app": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "certificate_chain": { - Type: schema.TypeString, - Required: true, - }, - - "private_key": { - Type: schema.TypeString, - Required: true, - }, - - "cname": { - Type: schema.TypeString, - Computed: true, - }, - - "name": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceHerokuCertCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - app := d.Get("app").(string) - preprocess := true - opts := heroku.SSLEndpointCreateOpts{ - CertificateChain: d.Get("certificate_chain").(string), - Preprocess: &preprocess, - PrivateKey: d.Get("private_key").(string)} - - log.Printf("[DEBUG] SSL Certificate create configuration: %#v, %#v", app, opts) - a, err := client.SSLEndpointCreate(context.TODO(), app, opts) - if err != nil { - return fmt.Errorf("Error creating SSL endpoint: %s", err) - } - - d.SetId(a.ID) - log.Printf("[INFO] SSL Certificate ID: %s", d.Id()) - - return resourceHerokuCertRead(d, meta) -} - -func resourceHerokuCertRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - cert, err := resourceHerokuSSLCertRetrieve( - d.Get("app").(string), d.Id(), client) - if err != nil { - return err - } - - d.Set("certificate_chain", cert.CertificateChain) - d.Set("name", cert.Name) - d.Set("cname", cert.CName) - - return nil -} - -func resourceHerokuCertUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - app := d.Get("app").(string) - preprocess := true - rollback := false - opts := heroku.SSLEndpointUpdateOpts{ - CertificateChain: heroku.String(d.Get("certificate_chain").(string)), - Preprocess: &preprocess, - PrivateKey: heroku.String(d.Get("private_key").(string)), - Rollback: &rollback} - - if d.HasChange("certificate_chain") || d.HasChange("private_key") { - log.Printf("[DEBUG] SSL Certificate update configuration: %#v, %#v", app, opts) - _, err := client.SSLEndpointUpdate(context.TODO(), app, d.Id(), opts) - if err != nil { - return fmt.Errorf("Error updating SSL endpoint: %s", err) - } - } - - return resourceHerokuCertRead(d, meta) -} - -func resourceHerokuCertDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - log.Printf("[INFO] Deleting SSL Cert: %s", d.Id()) - - // Destroy the app - _, err := client.SSLEndpointDelete(context.TODO(), d.Get("app").(string), d.Id()) - if err != nil { - return fmt.Errorf("Error deleting SSL Cert: %s", err) - } - - d.SetId("") - return nil -} - -func resourceHerokuSSLCertRetrieve(app string, id string, client *heroku.Service) (*heroku.SSLEndpoint, error) { - addon, err := client.SSLEndpointInfo(context.TODO(), app, id) - - if err != nil { - return nil, fmt.Errorf("Error retrieving SSL Cert: %s", err) - } - - return addon, nil -} diff --git a/builtin/providers/heroku/resource_heroku_cert_test.go b/builtin/providers/heroku/resource_heroku_cert_test.go deleted file mode 100644 index f37cd7639..000000000 --- a/builtin/providers/heroku/resource_heroku_cert_test.go +++ /dev/null @@ -1,224 +0,0 @@ -package heroku - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "regexp" - "strings" - "testing" - "time" - - "github.com/cyberdelia/heroku-go/v3" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -// We break apart testing for EU and US because at present, Heroku deals with -// each a bit differently and the setup/teardown of separate tests seems to -// help them to perform more consistently. -// https://devcenter.heroku.com/articles/ssl-endpoint#add-certificate-and-intermediaries -// -// We also have a time.Sleep() set for the update step (step 2 of 2) in each -// region's tests. This is somewhat kludgy, but the Heroku API SSL Endpoint -// handles parts of the create and update requests asynchronously, and if you -// add a cert+key then immediately update it, and then delete it (end of test), -// there are scenarios where the operations get out of order. For now, sleeping -// on update seems to allow the test to run smoothly; in real life, this test -// case is definitely an extreme edge case. -func TestAccHerokuCert_EU(t *testing.T) { - var endpoint heroku.SSLEndpoint - appName := fmt.Sprintf("tftest-%s", acctest.RandString(10)) - - wd, _ := os.Getwd() - certFile := wd + "/test-fixtures/terraform.cert" - certFile2 := wd + "/test-fixtures/terraform2.cert" - keyFile := wd + "/test-fixtures/terraform.key" - keyFile2 := wd + "/test-fixtures/terraform2.key" - - certificateChainBytes, _ := ioutil.ReadFile(certFile) - certificateChain := string(certificateChainBytes) - certificateChain2Bytes, _ := ioutil.ReadFile(certFile2) - certificateChain2 := string(certificateChain2Bytes) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckHerokuCertDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckHerokuCertEUConfig(appName, certFile, keyFile), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuCertExists("heroku_cert.ssl_certificate", &endpoint), - testAccCheckHerokuCertificateChain(&endpoint, certificateChain), - resource.TestCheckResourceAttr( - "heroku_cert.ssl_certificate", - "cname", fmt.Sprintf("%s.herokuapp.com", appName)), - ), - }, - { - PreConfig: sleep(t, 15), - Config: testAccCheckHerokuCertEUConfig(appName, certFile2, keyFile2), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuCertExists("heroku_cert.ssl_certificate", &endpoint), - testAccCheckHerokuCertificateChain(&endpoint, certificateChain2), - resource.TestCheckResourceAttr( - "heroku_cert.ssl_certificate", - "cname", fmt.Sprintf("%s.herokuapp.com", appName)), - ), - }, - }, - }) -} - -func TestAccHerokuCert_US(t *testing.T) { - var endpoint heroku.SSLEndpoint - appName := fmt.Sprintf("tftest-%s", acctest.RandString(10)) - - wd, _ := os.Getwd() - certFile := wd + "/test-fixtures/terraform.cert" - certFile2 := wd + "/test-fixtures/terraform2.cert" - keyFile := wd + "/test-fixtures/terraform.key" - keyFile2 := wd + "/test-fixtures/terraform2.key" - - certificateChainBytes, _ := ioutil.ReadFile(certFile) - certificateChain := string(certificateChainBytes) - certificateChain2Bytes, _ := ioutil.ReadFile(certFile2) - certificateChain2 := string(certificateChain2Bytes) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckHerokuCertDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckHerokuCertUSConfig(appName, certFile2, keyFile2), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuCertExists("heroku_cert.ssl_certificate", &endpoint), - testAccCheckHerokuCertificateChain(&endpoint, certificateChain2), - resource.TestMatchResourceAttr( - "heroku_cert.ssl_certificate", - "cname", regexp.MustCompile(`herokussl`)), - ), - }, - { - PreConfig: sleep(t, 15), - Config: testAccCheckHerokuCertUSConfig(appName, certFile, keyFile), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuCertExists("heroku_cert.ssl_certificate", &endpoint), - testAccCheckHerokuCertificateChain(&endpoint, certificateChain), - resource.TestMatchResourceAttr( - "heroku_cert.ssl_certificate", - "cname", regexp.MustCompile(`herokussl`)), - ), - }, - }, - }) -} - -func testAccCheckHerokuCertEUConfig(appName, certFile, keyFile string) string { - return strings.TrimSpace(fmt.Sprintf(` -resource "heroku_app" "foobar" { - name = "%s" - region = "eu" -} - -resource "heroku_addon" "ssl" { - app = "${heroku_app.foobar.name}" - plan = "ssl:endpoint" -} - -resource "heroku_cert" "ssl_certificate" { - app = "${heroku_app.foobar.name}" - depends_on = ["heroku_addon.ssl"] - certificate_chain="${file("%s")}" - private_key="${file("%s")}" -}`, appName, certFile, keyFile)) -} - -func testAccCheckHerokuCertUSConfig(appName, certFile, keyFile string) string { - return strings.TrimSpace(fmt.Sprintf(` -resource "heroku_app" "foobar" { - name = "%s" - region = "us" -} - -resource "heroku_addon" "ssl" { - app = "${heroku_app.foobar.name}" - plan = "ssl:endpoint" -} - -resource "heroku_cert" "ssl_certificate" { - app = "${heroku_app.foobar.name}" - depends_on = ["heroku_addon.ssl"] - certificate_chain="${file("%s")}" - private_key="${file("%s")}" -}`, appName, certFile, keyFile)) -} - -func sleep(t *testing.T, amount time.Duration) func() { - return func() { - time.Sleep(amount * time.Second) - } -} - -func testAccCheckHerokuCertDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*heroku.Service) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "heroku_cert" { - continue - } - - _, err := client.SSLEndpointInfo(context.TODO(), rs.Primary.Attributes["app"], rs.Primary.ID) - - if err == nil { - return fmt.Errorf("Cerfificate still exists") - } - } - - return nil -} - -func testAccCheckHerokuCertificateChain(endpoint *heroku.SSLEndpoint, chain string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if endpoint.CertificateChain != chain { - return fmt.Errorf("Bad certificate chain: %s", endpoint.CertificateChain) - } - - return nil - } -} - -func testAccCheckHerokuCertExists(n string, endpoint *heroku.SSLEndpoint) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No SSL endpoint ID is set") - } - - client := testAccProvider.Meta().(*heroku.Service) - - foundEndpoint, err := client.SSLEndpointInfo(context.TODO(), rs.Primary.Attributes["app"], rs.Primary.ID) - - if err != nil { - return err - } - - if foundEndpoint.ID != rs.Primary.ID { - return fmt.Errorf("SSL endpoint not found") - } - - *endpoint = *foundEndpoint - - return nil - } -} diff --git a/builtin/providers/heroku/resource_heroku_domain.go b/builtin/providers/heroku/resource_heroku_domain.go deleted file mode 100644 index 1fb9bf128..000000000 --- a/builtin/providers/heroku/resource_heroku_domain.go +++ /dev/null @@ -1,87 +0,0 @@ -package heroku - -import ( - "context" - "fmt" - "log" - - "github.com/cyberdelia/heroku-go/v3" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceHerokuDomain() *schema.Resource { - return &schema.Resource{ - Create: resourceHerokuDomainCreate, - Read: resourceHerokuDomainRead, - Delete: resourceHerokuDomainDelete, - - Schema: map[string]*schema.Schema{ - "hostname": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "app": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "cname": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceHerokuDomainCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - app := d.Get("app").(string) - hostname := d.Get("hostname").(string) - - log.Printf("[DEBUG] Domain create configuration: %#v, %#v", app, hostname) - - do, err := client.DomainCreate(context.TODO(), app, heroku.DomainCreateOpts{Hostname: hostname}) - if err != nil { - return err - } - - d.SetId(do.ID) - d.Set("hostname", do.Hostname) - d.Set("cname", do.CName) - - log.Printf("[INFO] Domain ID: %s", d.Id()) - return nil -} - -func resourceHerokuDomainDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - log.Printf("[INFO] Deleting Domain: %s", d.Id()) - - // Destroy the domain - _, err := client.DomainDelete(context.TODO(), d.Get("app").(string), d.Id()) - if err != nil { - return fmt.Errorf("Error deleting domain: %s", err) - } - - return nil -} - -func resourceHerokuDomainRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - app := d.Get("app").(string) - do, err := client.DomainInfo(context.TODO(), app, d.Id()) - if err != nil { - return fmt.Errorf("Error retrieving domain: %s", err) - } - - d.Set("hostname", do.Hostname) - d.Set("cname", do.CName) - - return nil -} diff --git a/builtin/providers/heroku/resource_heroku_domain_test.go b/builtin/providers/heroku/resource_heroku_domain_test.go deleted file mode 100644 index e2863f9a4..000000000 --- a/builtin/providers/heroku/resource_heroku_domain_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package heroku - -import ( - "context" - "fmt" - "testing" - - "github.com/cyberdelia/heroku-go/v3" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccHerokuDomain_Basic(t *testing.T) { - var domain heroku.Domain - appName := fmt.Sprintf("tftest-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckHerokuDomainDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckHerokuDomainConfig_basic(appName), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuDomainExists("heroku_domain.foobar", &domain), - testAccCheckHerokuDomainAttributes(&domain), - resource.TestCheckResourceAttr( - "heroku_domain.foobar", "hostname", "terraform.example.com"), - resource.TestCheckResourceAttr( - "heroku_domain.foobar", "app", appName), - resource.TestCheckResourceAttr( - "heroku_domain.foobar", "cname", "terraform.example.com.herokudns.com"), - ), - }, - }, - }) -} - -func testAccCheckHerokuDomainDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*heroku.Service) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "heroku_domain" { - continue - } - - _, err := client.DomainInfo(context.TODO(), rs.Primary.Attributes["app"], rs.Primary.ID) - - if err == nil { - return fmt.Errorf("Domain still exists") - } - } - - return nil -} - -func testAccCheckHerokuDomainAttributes(Domain *heroku.Domain) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if Domain.Hostname != "terraform.example.com" { - return fmt.Errorf("Bad hostname: %s", Domain.Hostname) - } - - return nil - } -} - -func testAccCheckHerokuDomainExists(n string, Domain *heroku.Domain) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Domain ID is set") - } - - client := testAccProvider.Meta().(*heroku.Service) - - foundDomain, err := client.DomainInfo(context.TODO(), rs.Primary.Attributes["app"], rs.Primary.ID) - - if err != nil { - return err - } - - if foundDomain.ID != rs.Primary.ID { - return fmt.Errorf("Domain not found") - } - - *Domain = *foundDomain - - return nil - } -} - -func testAccCheckHerokuDomainConfig_basic(appName string) string { - return fmt.Sprintf(`resource "heroku_app" "foobar" { - name = "%s" - region = "us" -} - -resource "heroku_domain" "foobar" { - app = "${heroku_app.foobar.name}" - hostname = "terraform.example.com" -}`, appName) -} diff --git a/builtin/providers/heroku/resource_heroku_drain.go b/builtin/providers/heroku/resource_heroku_drain.go deleted file mode 100644 index fb27fa07b..000000000 --- a/builtin/providers/heroku/resource_heroku_drain.go +++ /dev/null @@ -1,102 +0,0 @@ -package heroku - -import ( - "context" - "fmt" - "log" - "strings" - "time" - - "github.com/cyberdelia/heroku-go/v3" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceHerokuDrain() *schema.Resource { - return &schema.Resource{ - Create: resourceHerokuDrainCreate, - Read: resourceHerokuDrainRead, - Delete: resourceHerokuDrainDelete, - - Schema: map[string]*schema.Schema{ - "url": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "app": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "token": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -const retryableError = `App hasn't yet been assigned a log channel. Please try again momentarily.` - -func resourceHerokuDrainCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - app := d.Get("app").(string) - url := d.Get("url").(string) - - log.Printf("[DEBUG] Drain create configuration: %#v, %#v", app, url) - - var dr *heroku.LogDrain - err := resource.Retry(2*time.Minute, func() *resource.RetryError { - d, err := client.LogDrainCreate(context.TODO(), app, heroku.LogDrainCreateOpts{URL: url}) - if err != nil { - if strings.Contains(err.Error(), retryableError) { - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - } - dr = d - return nil - }) - if err != nil { - return err - } - - d.SetId(dr.ID) - d.Set("url", dr.URL) - d.Set("token", dr.Token) - - log.Printf("[INFO] Drain ID: %s", d.Id()) - return nil -} - -func resourceHerokuDrainDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - log.Printf("[INFO] Deleting drain: %s", d.Id()) - - // Destroy the drain - _, err := client.LogDrainDelete(context.TODO(), d.Get("app").(string), d.Id()) - if err != nil { - return fmt.Errorf("Error deleting drain: %s", err) - } - - return nil -} - -func resourceHerokuDrainRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - dr, err := client.LogDrainInfo(context.TODO(), d.Get("app").(string), d.Id()) - if err != nil { - return fmt.Errorf("Error retrieving drain: %s", err) - } - - d.Set("url", dr.URL) - d.Set("token", dr.Token) - - return nil -} diff --git a/builtin/providers/heroku/resource_heroku_drain_test.go b/builtin/providers/heroku/resource_heroku_drain_test.go deleted file mode 100644 index 37614413b..000000000 --- a/builtin/providers/heroku/resource_heroku_drain_test.go +++ /dev/null @@ -1,112 +0,0 @@ -package heroku - -import ( - "context" - "fmt" - "testing" - - "github.com/cyberdelia/heroku-go/v3" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccHerokuDrain_Basic(t *testing.T) { - var drain heroku.LogDrain - appName := fmt.Sprintf("tftest-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckHerokuDrainDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckHerokuDrainConfig_basic(appName), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuDrainExists("heroku_drain.foobar", &drain), - testAccCheckHerokuDrainAttributes(&drain), - resource.TestCheckResourceAttr( - "heroku_drain.foobar", "url", "syslog://terraform.example.com:1234"), - resource.TestCheckResourceAttr( - "heroku_drain.foobar", "app", appName), - ), - }, - }, - }) -} - -func testAccCheckHerokuDrainDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*heroku.Service) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "heroku_drain" { - continue - } - - _, err := client.LogDrainInfo(context.TODO(), rs.Primary.Attributes["app"], rs.Primary.ID) - - if err == nil { - return fmt.Errorf("Drain still exists") - } - } - - return nil -} - -func testAccCheckHerokuDrainAttributes(Drain *heroku.LogDrain) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if Drain.URL != "syslog://terraform.example.com:1234" { - return fmt.Errorf("Bad URL: %s", Drain.URL) - } - - if Drain.Token == "" { - return fmt.Errorf("No token: %#v", Drain) - } - - return nil - } -} - -func testAccCheckHerokuDrainExists(n string, Drain *heroku.LogDrain) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Drain ID is set") - } - - client := testAccProvider.Meta().(*heroku.Service) - - foundDrain, err := client.LogDrainInfo(context.TODO(), rs.Primary.Attributes["app"], rs.Primary.ID) - - if err != nil { - return err - } - - if foundDrain.ID != rs.Primary.ID { - return fmt.Errorf("Drain not found") - } - - *Drain = *foundDrain - - return nil - } -} - -func testAccCheckHerokuDrainConfig_basic(appName string) string { - return fmt.Sprintf(` -resource "heroku_app" "foobar" { - name = "%s" - region = "us" -} - -resource "heroku_drain" "foobar" { - app = "${heroku_app.foobar.name}" - url = "syslog://terraform.example.com:1234" -}`, appName) -} diff --git a/builtin/providers/heroku/resource_heroku_pipeline.go b/builtin/providers/heroku/resource_heroku_pipeline.go deleted file mode 100644 index 94b0959c4..000000000 --- a/builtin/providers/heroku/resource_heroku_pipeline.go +++ /dev/null @@ -1,109 +0,0 @@ -package heroku - -import ( - "context" - "fmt" - "log" - - "github.com/cyberdelia/heroku-go/v3" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceHerokuPipeline() *schema.Resource { - return &schema.Resource{ - Create: resourceHerokuPipelineCreate, - Update: resourceHerokuPipelineUpdate, - Read: resourceHerokuPipelineRead, - Delete: resourceHerokuPipelineDelete, - - Importer: &schema.ResourceImporter{ - State: resourceHerokuPipelineImport, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - }, - } -} - -func resourceHerokuPipelineImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - client := meta.(*heroku.Service) - - p, err := client.PipelineInfo(context.TODO(), d.Id()) - if err != nil { - return nil, err - } - - d.Set("name", p.Name) - - return []*schema.ResourceData{d}, nil -} - -func resourceHerokuPipelineCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - opts := heroku.PipelineCreateOpts{ - Name: d.Get("name").(string), - } - - log.Printf("[DEBUG] Pipeline create configuration: %#v", opts) - - p, err := client.PipelineCreate(context.TODO(), opts) - if err != nil { - return fmt.Errorf("Error creating pipeline: %s", err) - } - - d.SetId(p.ID) - d.Set("name", p.Name) - - log.Printf("[INFO] Pipeline ID: %s", d.Id()) - - return resourceHerokuPipelineUpdate(d, meta) -} - -func resourceHerokuPipelineUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - if d.HasChange("name") { - name := d.Get("name").(string) - opts := heroku.PipelineUpdateOpts{ - Name: &name, - } - - _, err := client.PipelineUpdate(context.TODO(), d.Id(), opts) - if err != nil { - return err - } - } - - return resourceHerokuPipelineRead(d, meta) -} - -func resourceHerokuPipelineDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - log.Printf("[INFO] Deleting pipeline: %s", d.Id()) - - _, err := client.PipelineDelete(context.TODO(), d.Id()) - if err != nil { - return fmt.Errorf("Error deleting pipeline: %s", err) - } - - return nil -} - -func resourceHerokuPipelineRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - p, err := client.PipelineInfo(context.TODO(), d.Id()) - if err != nil { - return fmt.Errorf("Error retrieving pipeline: %s", err) - } - - d.Set("name", p.Name) - - return nil -} diff --git a/builtin/providers/heroku/resource_heroku_pipeline_coupling.go b/builtin/providers/heroku/resource_heroku_pipeline_coupling.go deleted file mode 100644 index 86443c9c1..000000000 --- a/builtin/providers/heroku/resource_heroku_pipeline_coupling.go +++ /dev/null @@ -1,105 +0,0 @@ -package heroku - -import ( - "context" - "fmt" - "log" - - "github.com/cyberdelia/heroku-go/v3" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceHerokuPipelineCoupling() *schema.Resource { - return &schema.Resource{ - Create: resourceHerokuPipelineCouplingCreate, - Read: resourceHerokuPipelineCouplingRead, - Delete: resourceHerokuPipelineCouplingDelete, - - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "app_id": { - Type: schema.TypeString, - Computed: true, - }, - "app": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "pipeline": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateUUID, - }, - "stage": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validatePipelineStageName, - }, - }, - } -} - -func resourceHerokuPipelineCouplingCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - opts := heroku.PipelineCouplingCreateOpts{ - App: d.Get("app").(string), - Pipeline: d.Get("pipeline").(string), - Stage: d.Get("stage").(string), - } - - log.Printf("[DEBUG] PipelineCoupling create configuration: %#v", opts) - - p, err := client.PipelineCouplingCreate(context.TODO(), opts) - if err != nil { - return fmt.Errorf("Error creating pipeline: %s", err) - } - - d.SetId(p.ID) - - log.Printf("[INFO] PipelineCoupling ID: %s", d.Id()) - - return resourceHerokuPipelineCouplingRead(d, meta) -} - -func resourceHerokuPipelineCouplingDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - log.Printf("[INFO] Deleting pipeline: %s", d.Id()) - - _, err := client.PipelineCouplingDelete(context.TODO(), d.Id()) - if err != nil { - return fmt.Errorf("Error deleting pipeline: %s", err) - } - - return nil -} - -func resourceHerokuPipelineCouplingRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - p, err := client.PipelineCouplingInfo(context.TODO(), d.Id()) - if err != nil { - return fmt.Errorf("Error retrieving pipeline: %s", err) - } - - // grab App info - app, err := client.AppInfo(context.TODO(), p.App.ID) - if err != nil { - log.Printf("[WARN] Error looking up addional App info for pipeline coupling (%s): %s", d.Id(), err) - } else { - d.Set("app", app.Name) - } - - d.Set("app_id", p.App.ID) - d.Set("stage", p.Stage) - d.Set("pipeline", p.Pipeline.ID) - - return nil -} diff --git a/builtin/providers/heroku/resource_heroku_pipeline_coupling_test.go b/builtin/providers/heroku/resource_heroku_pipeline_coupling_test.go deleted file mode 100644 index efdd05d1b..000000000 --- a/builtin/providers/heroku/resource_heroku_pipeline_coupling_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package heroku - -import ( - "context" - "fmt" - "testing" - - heroku "github.com/cyberdelia/heroku-go/v3" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccHerokuPipelineCoupling_Basic(t *testing.T) { - var coupling heroku.PipelineCoupling - - appName := fmt.Sprintf("tftest-%s", acctest.RandString(10)) - pipelineName := fmt.Sprintf("tftest-%s", acctest.RandString(10)) - stageName := "development" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckHerokuPipelineCouplingDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckHerokuPipelineCouplingConfig_basic(appName, pipelineName, stageName), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuPipelineCouplingExists("heroku_pipeline_coupling.default", &coupling), - testAccCheckHerokuPipelineCouplingAttributes( - &coupling, - "heroku_pipeline.default", - stageName, - ), - ), - }, - }, - }) -} - -func testAccCheckHerokuPipelineCouplingConfig_basic(appName, pipelineName, stageName string) string { - return fmt.Sprintf(` -resource "heroku_app" "default" { - name = "%s" - region = "us" -} - -resource "heroku_pipeline" "default" { - name = "%s" -} - -resource "heroku_pipeline_coupling" "default" { - app = "${heroku_app.default.id}" - pipeline = "${heroku_pipeline.default.id}" - stage = "%s" -} -`, appName, pipelineName, stageName) -} - -func testAccCheckHerokuPipelineCouplingExists(n string, pipeline *heroku.PipelineCoupling) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No coupling ID set") - } - - client := testAccProvider.Meta().(*heroku.Service) - - foundPipelineCoupling, err := client.PipelineCouplingInfo(context.TODO(), rs.Primary.ID) - if err != nil { - return err - } - - if foundPipelineCoupling.ID != rs.Primary.ID { - return fmt.Errorf("PipelineCoupling not found: %s != %s", foundPipelineCoupling.ID, rs.Primary.ID) - } - - *pipeline = *foundPipelineCoupling - - return nil - } -} - -func testAccCheckHerokuPipelineCouplingAttributes(coupling *heroku.PipelineCoupling, pipelineResource, stageName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - pipeline, ok := s.RootModule().Resources[pipelineResource] - if !ok { - return fmt.Errorf("Pipeline not found: %s", pipelineResource) - } - - if coupling.Pipeline.ID != pipeline.Primary.ID { - return fmt.Errorf("Bad pipeline ID: %v != %v", coupling.Pipeline.ID, pipeline.Primary.ID) - } - if coupling.Stage != stageName { - return fmt.Errorf("Bad stage: %s", coupling.Stage) - } - - return nil - } -} - -func testAccCheckHerokuPipelineCouplingDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*heroku.Service) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "heroku_pipeline_coupling" { - continue - } - - _, err := client.PipelineCouplingInfo(context.TODO(), rs.Primary.ID) - - if err == nil { - return fmt.Errorf("PipelineCoupling still exists") - } - } - - return nil -} diff --git a/builtin/providers/heroku/resource_heroku_pipeline_test.go b/builtin/providers/heroku/resource_heroku_pipeline_test.go deleted file mode 100644 index 93f0b250c..000000000 --- a/builtin/providers/heroku/resource_heroku_pipeline_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package heroku - -import ( - "context" - "fmt" - "testing" - - heroku "github.com/cyberdelia/heroku-go/v3" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccHerokuPipeline_Basic(t *testing.T) { - var pipeline heroku.Pipeline - pipelineName := fmt.Sprintf("tftest-%s", acctest.RandString(10)) - pipelineName2 := fmt.Sprintf("%s-2", pipelineName) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckHerokuPipelineDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckHerokuPipelineConfig_basic(pipelineName), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuPipelineExists("heroku_pipeline.foobar", &pipeline), - resource.TestCheckResourceAttr( - "heroku_pipeline.foobar", "name", pipelineName), - ), - }, - { - Config: testAccCheckHerokuPipelineConfig_basic(pipelineName2), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "heroku_pipeline.foobar", "name", pipelineName2), - ), - }, - }, - }) -} - -func testAccCheckHerokuPipelineConfig_basic(pipelineName string) string { - return fmt.Sprintf(` -resource "heroku_pipeline" "foobar" { - name = "%s" -} -`, pipelineName) -} - -func testAccCheckHerokuPipelineExists(n string, pipeline *heroku.Pipeline) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No pipeline name set") - } - - client := testAccProvider.Meta().(*heroku.Service) - - foundPipeline, err := client.PipelineInfo(context.TODO(), rs.Primary.ID) - if err != nil { - return err - } - - if foundPipeline.ID != rs.Primary.ID { - return fmt.Errorf("Pipeline not found") - } - - *pipeline = *foundPipeline - - return nil - } -} - -func testAccCheckHerokuPipelineDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*heroku.Service) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "heroku_pipeline" { - continue - } - - _, err := client.PipelineInfo(context.TODO(), rs.Primary.ID) - - if err == nil { - return fmt.Errorf("Pipeline still exists") - } - } - - return nil -} diff --git a/builtin/providers/heroku/resource_heroku_space.go b/builtin/providers/heroku/resource_heroku_space.go deleted file mode 100644 index 0f588fc71..000000000 --- a/builtin/providers/heroku/resource_heroku_space.go +++ /dev/null @@ -1,148 +0,0 @@ -package heroku - -import ( - "context" - "fmt" - "log" - "time" - - heroku "github.com/cyberdelia/heroku-go/v3" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceHerokuSpace() *schema.Resource { - return &schema.Resource{ - Create: resourceHerokuSpaceCreate, - Read: resourceHerokuSpaceRead, - Update: resourceHerokuSpaceUpdate, - Delete: resourceHerokuSpaceDelete, - - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - - "organization": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "region": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceHerokuSpaceCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - opts := heroku.SpaceCreateOpts{} - opts.Name = d.Get("name").(string) - opts.Organization = d.Get("organization").(string) - - if v, ok := d.GetOk("region"); ok { - vs := v.(string) - opts.Region = &vs - } - - space, err := client.SpaceCreate(context.TODO(), opts) - if err != nil { - return err - } - - d.SetId(space.ID) - log.Printf("[INFO] Space ID: %s", d.Id()) - - // Wait for the Space to be allocated - log.Printf("[DEBUG] Waiting for Space (%s) to be allocated", d.Id()) - stateConf := &resource.StateChangeConf{ - Pending: []string{"allocating"}, - Target: []string{"allocated"}, - Refresh: SpaceStateRefreshFunc(client, d.Id()), - Timeout: 20 * time.Minute, - } - - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for Space (%s) to become available: %s", d.Id(), err) - } - - return resourceHerokuSpaceRead(d, meta) -} - -func resourceHerokuSpaceRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - spaceRaw, _, err := SpaceStateRefreshFunc(client, d.Id())() - if err != nil { - return err - } - space := spaceRaw.(*heroku.Space) - - setSpaceAttributes(d, space) - return nil -} - -func resourceHerokuSpaceUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - if !d.HasChange("name") { - return nil - } - - name := d.Get("name").(string) - opts := heroku.SpaceUpdateOpts{Name: &name} - - space, err := client.SpaceUpdate(context.TODO(), d.Id(), opts) - if err != nil { - return err - } - - // The type conversion here can be dropped when the vendored version of - // heroku-go is updated. - setSpaceAttributes(d, (*heroku.Space)(space)) - return nil -} - -func setSpaceAttributes(d *schema.ResourceData, space *heroku.Space) { - d.Set("name", space.Name) - d.Set("organization", space.Organization.Name) - d.Set("region", space.Region.Name) -} - -func resourceHerokuSpaceDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*heroku.Service) - - log.Printf("[INFO] Deleting space: %s", d.Id()) - _, err := client.SpaceDelete(context.TODO(), d.Id()) - if err != nil { - return err - } - - d.SetId("") - return nil -} - -// SpaceStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// a Space. -func SpaceStateRefreshFunc(client *heroku.Service, id string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - space, err := client.SpaceInfo(context.TODO(), id) - if err != nil { - return nil, "", err - } - - // The type conversion here can be dropped when the vendored version of - // heroku-go is updated. - return (*heroku.Space)(space), space.State, nil - } -} diff --git a/builtin/providers/heroku/resource_heroku_space_test.go b/builtin/providers/heroku/resource_heroku_space_test.go deleted file mode 100644 index 4f7d7e4a1..000000000 --- a/builtin/providers/heroku/resource_heroku_space_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package heroku - -import ( - "context" - "fmt" - "os" - "testing" - - heroku "github.com/cyberdelia/heroku-go/v3" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccHerokuSpace_Basic(t *testing.T) { - var space heroku.Space - spaceName := fmt.Sprintf("tftest-%s", acctest.RandString(10)) - spaceName2 := fmt.Sprintf("tftest-%s", acctest.RandString(10)) - org := os.Getenv("HEROKU_ORGANIZATION") - - // HEROKU_SPACES_ORGANIZATION allows us to use a special Organization managed by Heroku for the - // strict purpose of testing Heroku Spaces. It has the following resource limits - // - 2 spaces - // - 2 apps per space - // - 2 dynos per space - spacesOrg := os.Getenv("HEROKU_SPACES_ORGANIZATION") - if spacesOrg != "" { - org = spacesOrg - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - if org == "" { - t.Skip("HEROKU_ORGANIZATION is not set; skipping test.") - } - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckHerokuSpaceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckHerokuSpaceConfig_basic(spaceName, org), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuSpaceExists("heroku_space.foobar", &space), - testAccCheckHerokuSpaceAttributes(&space, spaceName), - ), - }, - { - Config: testAccCheckHerokuSpaceConfig_basic(spaceName2, org), - Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuSpaceExists("heroku_space.foobar", &space), - testAccCheckHerokuSpaceAttributes(&space, spaceName2), - ), - }, - }, - }) -} - -func testAccCheckHerokuSpaceConfig_basic(spaceName, orgName string) string { - return fmt.Sprintf(` -resource "heroku_space" "foobar" { - name = "%s" - organization = "%s" - region = "virginia" -} -`, spaceName, orgName) -} - -func testAccCheckHerokuSpaceExists(n string, space *heroku.Space) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No space name set") - } - - client := testAccProvider.Meta().(*heroku.Service) - - foundSpace, err := client.SpaceInfo(context.TODO(), rs.Primary.ID) - if err != nil { - return err - } - - if foundSpace.ID != rs.Primary.ID { - return fmt.Errorf("Space not found") - } - - *space = *foundSpace - - return nil - } -} - -func testAccCheckHerokuSpaceAttributes(space *heroku.Space, spaceName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if space.Name != spaceName { - return fmt.Errorf("Bad name: %s", space.Name) - } - - return nil - } -} - -func testAccCheckHerokuSpaceDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*heroku.Service) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "heroku_space" { - continue - } - - _, err := client.SpaceInfo(context.TODO(), rs.Primary.ID) - - if err == nil { - return fmt.Errorf("Space still exists") - } - } - - return nil -} diff --git a/builtin/providers/heroku/test-fixtures/terraform.cert b/builtin/providers/heroku/test-fixtures/terraform.cert deleted file mode 100644 index 8f8b982f6..000000000 --- a/builtin/providers/heroku/test-fixtures/terraform.cert +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDCzCCAfOgAwIBAgIJAIUuu5XX/tCRMA0GCSqGSIb3DQEBBQUAMBwxGjAYBgNV -BAMMEXd3dy50ZXJyYWZvcm0ub3JnMB4XDTE0MTIxNDIwMzA0MFoXDTI0MTIxMTIw -MzA0MFowHDEaMBgGA1UEAwwRd3d3LnRlcnJhZm9ybS5vcmcwggEiMA0GCSqGSIb3 -DQEBAQUAA4IBDwAwggEKAoIBAQC7sp6oJ6czdRpl5azB7jaLCwQ38eqV2TRFPVVj -PD7cWyhV2REFtqd7vEF/AUrp3+ACvc6mLdTjDuaGVVga4oA42Qgqz5Wzkl3tnBSB -DlxFXXg+p4UjJWZPLUiOMbvHWNGthO9G1dp5h9rhqV7wJhyAlTqlnV7aaeSWcJgY -fh7xMe50BlAmh6ywpnnlZzsy4eJiJwgbglG8OU0JK+1OxdOUDe/1eUOhFPPx1U/p -25t8Z6qaI8FDLPwTVZzrvOZ0vTQSKyeA0ZhBTH1GhUqroogDlPETgkne6YqvZoxl -o8+9Wdjln2bjYe/nRWYKR5BxC46PnNJMPPJFI/VNLPYanAu5AgMBAAGjUDBOMB0G -A1UdDgQWBBQaJeROghiSQGVn30ARllECycYsczAfBgNVHSMEGDAWgBQaJeROghiS -QGVn30ARllECycYsczAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQCa -iRpZ0b4KVqDT3bqc9UZV491UdBVF9BN0CV4BLvg9KPyRcftujZu0lKFu+wGlAlYr -bV6DjqHgFXltBzIFM/y790EivkgePcFv+0HKy1O8ELLduQcigYT5AC7h34xxWBy7 -96VW6qD7/OOjvexVdKmTfXO/njdmot38/uO9TdfJPQzCHrgpzjBCcI+eBFnvQwzb -gOpMlh04U4nDeITOTbraLur1zWQjzSA9DjaGGA+IQ556MUPAS85YmJ4Jf+f8UW3o -sZmzFojLFd8EhVRZDE4tZzqo/vN0plGv/Kh/oob5Cnp6EJ3BbG6y9tzATMWz/hRo -oeTtQe3gt6gPKBS+UeBf ------END CERTIFICATE----- diff --git a/builtin/providers/heroku/test-fixtures/terraform.key b/builtin/providers/heroku/test-fixtures/terraform.key deleted file mode 100644 index 590c2068d..000000000 --- a/builtin/providers/heroku/test-fixtures/terraform.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAu7KeqCenM3UaZeWswe42iwsEN/Hqldk0RT1VYzw+3FsoVdkR -Bbane7xBfwFK6d/gAr3Opi3U4w7mhlVYGuKAONkIKs+Vs5Jd7ZwUgQ5cRV14PqeF -IyVmTy1IjjG7x1jRrYTvRtXaeYfa4ale8CYcgJU6pZ1e2mnklnCYGH4e8THudAZQ -JoessKZ55Wc7MuHiYicIG4JRvDlNCSvtTsXTlA3v9XlDoRTz8dVP6dubfGeqmiPB -Qyz8E1Wc67zmdL00EisngNGYQUx9RoVKq6KIA5TxE4JJ3umKr2aMZaPPvVnY5Z9m -42Hv50VmCkeQcQuOj5zSTDzyRSP1TSz2GpwLuQIDAQABAoIBAQCethLiLWV8ZXDE -6MiD02HbgJ04kR7DRr6kLZCeMLsWqR4aOUnjgudsAWuAcR9fUyagKs8qRWbV+CuF -O3UchpnVd+8oBA+ZoBI8cNYFqpbrMHYUxKIXbfBs0uWfFv6pOblS+C07wGjUisPS -PN1CQ3emYokMsV0bYp8fdmWlkD+pwhHH3vsPm4sYwbabaURRxZYLNyYd/4Czafqz -UBWbAJ+xap6t/WLJCR7goHCVX0DNtpNfzoYK5/rKpQzw0H+L1pB5yghx5GbslthB -xtTb1LjMMl4AUuU5Bv1XLzDZlS/HUYQyefhljlqJPC678KNlegRLjn2YZeZF07X1 -b/KSKDrhAoGBAPVST4JYGS9OoW4Fvu4SV7AJGW1ewHhAmzO1XFrYD8DvSjf3Cinn -ylTYwkQK9ayLLM405Sof2J55NbEakDs5sahN93mqGrk9bWPZrFHgDf0NiMoyI/0N -/ZBXhkBeg9LitBvmEWiBlGK55At0zePWVDcUtXg+d0tSJC4o0y1DoGo7AoGBAMPe -ML95QKabWsCRpGKVwhOFrEp68rZlugwJEjzubC8EXHX8dNy3IURl1j8tSM0kDGay -CDMpxOjqzVyLmLqfIghiG1nkQU7EnJdx86k3AaesHoJff3Ywi+9DwC5r/T3zg20U -Qkr1c4Yxv0Lk3IluHwjPaT/uMd4utlPB6EpAvY6bAoGAdcNBb6yiylbQn2Qat2YO -ue5kSmBFvHQnDLdu0h0N0uwLkLoCIwOl2P0EpG0uadmVdJdnusT203wUDiRWQFf9 -tHFY7wp9MZcPP/NqCROpI2Sv2YAgToW8xuF9DMFSPpWdKBdVG/m4JXxewDEd9NUa -MCa8xjAWTA3uWEo4tW3VP6kCgYAKAYvT/EnFOSKFu+r97lCf1rBajbVghAnhG4WG -/1cff8WJcYA21lQovlsXlySk9jZ7+JRaqMOacoRTOf5vajm+2+Qxz2tWrsyhH/0m -o9y9yBk259IHI6vCaV+j/3hMdeg85lAMrEVekaQHstFhY/LJ7G6gCXcatqAx3zIS -uQP2CQKBgQDGx31nzcrYcJz0EtZPJ5n4JMKrx9S4ZQ2+C7p6oDYXJl2Woh0f+c3K -X1oW1MLFChq+5Or9IVh11cePtV3a6X10Q9xSLQnXdPC9X4QsZoWOUBh5fzw301g1 -WB9ncXqqew3EPHm1G5j4hZ+Gjz/P53TLRYTEYW8fm1Rv9ga5wiJpiw== ------END RSA PRIVATE KEY----- diff --git a/builtin/providers/heroku/test-fixtures/terraform2.cert b/builtin/providers/heroku/test-fixtures/terraform2.cert deleted file mode 100644 index 41b7785ee..000000000 --- a/builtin/providers/heroku/test-fixtures/terraform2.cert +++ /dev/null @@ -1,29 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIE5jCCAs6gAwIBAgIBATANBgkqhkiG9w0BAQsFADATMREwDwYDVQQDEwhDZXJ0 -QXV0aDAeFw0xNzA1MDUwMDI4NDhaFw0yNzA1MDUwMDI4NTVaMBMxETAPBgNVBAMT -CENlcnRBdXRoMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA3f07ApBL -Infz43y96n6iWtQgk4nVEMN2Qc5a4bGm7Ha/gnNGYnv87CGpd6VNBrd07zXlOGXo -5DCly3AxjmFaHw9wzKadXmU2P91tQ4S253tFN8lirMPe+2hS5RnAPw9NsvYsU0Iy -wp/+1a2OpDGtUv00fyU9tY8M1wQo/33xgTyc+8DdEDQ08/PjCKbA5vvpfra5fE+S -g0Gcw8j6Y/iXcnj6LrTvY7I9G3doyAUezGCpAqPErVCDj5nokdJ17zs0kWbr/e4z -o7yKvQOw7majlaX/R6v8tuYvEZ8fokXR7ecGeco86d7sH+OHafOkYQgf+forGMh6 -PSJ5z3WWnLKPvzGgvqoDemGfIyfeRHGTI+e8+4DrcvJGlLS0vBFCYxqXhlvmi6xf -u9G0/zKV/VbIeJS1hTus6sYIIRMzABNwYIGGIz4eBjnO1jakowsdxfgsBvpMwVB+ -yZ+yxv/L9DCVK/VW3VfxQoQ+IDRTokfU/6yKWeonYVtTH57upJ7tRKRy2pmFCJEo -c4T7IKg5ENfGhSkKHuN8CLoWdRREh2KTTijET0qUHCWPbOl9MgW6Y4Jf2qYkggGk -CTRDk7iGuLKT6tMqVrqcmA/AhBpgZKg5N+rIbwTfIHzGNql93bc1FX0KVh7LlV64 -9ZRR4mksrk26pDnLqAfiT15aRsIrYZYdbh0CAwEAAaNFMEMwDgYDVR0PAQH/BAQD -AgEGMBIGA1UdEwEB/wQIMAYBAf8CAQAwHQYDVR0OBBYEFDq5vNYjPHDUyIgkTKCX -9N27PvuiMA0GCSqGSIb3DQEBCwUAA4ICAQCW4RfKiHsFLOwDTTVkQzb0G2A9Ajeg -66lWSuNidntbr+w5595G0ae3qCumaVQSNWKtMAkdahzI+0J00YGWpnqS2rJIIgD+ -8I67EpQ6wi60EUMAECNl7vJvLH/IXt6IVY1wr3ci3G56aKVZIsnaoFi2vxnwHNz3 -uOkNcj0OO4Nosw8aThzsitiddu4BVk+8AFq0cylroQzWjoCrkKOyWMyKTcK6qnS7 -ayyCK02AQPhXFYxet7NVY5hgldto2BAsijBX1Xl6XR5QGSrIQw/2nkAywVSnAGip -Ofk/njzZH9FhUnEY1C5vB2SU310LIOq1evvF8nd7csnI7wdjfHQ3m70PO6p9DGK/ -W0tET8NtuW2JV38KSNMrYxF2Hs7Il92x8JVQu9LtjiKTcJLdnnAQl0OvHUAgAmU9 -BRHOsfWD4Cxiwes0OZHuOpoghh7HV1A+JS5e9qNCCEzarQe4H2Zv8JkeBFbIaEH/ -bcT3a2Rtt6cvDw9mSXSw7p85/810n8af4T1D3aeLFdoVrpeTZVUdyFygcuWo1U4D -JxaRAyJHvi4IJmwpjehn7DoFasNefPBVVFi28QCJXFHpe0DNm8MvI5fGhyZ2uW8t -+6PDgumsZLQT7jJNl9ubYV3U0Nsymvvwqx4LY2nql0agEzJ0F9ekoA50csoxe2ir -/DOgG1nKIc186A== ------END CERTIFICATE----- diff --git a/builtin/providers/heroku/test-fixtures/terraform2.key b/builtin/providers/heroku/test-fixtures/terraform2.key deleted file mode 100644 index c5fee3428..000000000 --- a/builtin/providers/heroku/test-fixtures/terraform2.key +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEA3f07ApBLInfz43y96n6iWtQgk4nVEMN2Qc5a4bGm7Ha/gnNG -Ynv87CGpd6VNBrd07zXlOGXo5DCly3AxjmFaHw9wzKadXmU2P91tQ4S253tFN8li -rMPe+2hS5RnAPw9NsvYsU0Iywp/+1a2OpDGtUv00fyU9tY8M1wQo/33xgTyc+8Dd -EDQ08/PjCKbA5vvpfra5fE+Sg0Gcw8j6Y/iXcnj6LrTvY7I9G3doyAUezGCpAqPE -rVCDj5nokdJ17zs0kWbr/e4zo7yKvQOw7majlaX/R6v8tuYvEZ8fokXR7ecGeco8 -6d7sH+OHafOkYQgf+forGMh6PSJ5z3WWnLKPvzGgvqoDemGfIyfeRHGTI+e8+4Dr -cvJGlLS0vBFCYxqXhlvmi6xfu9G0/zKV/VbIeJS1hTus6sYIIRMzABNwYIGGIz4e -BjnO1jakowsdxfgsBvpMwVB+yZ+yxv/L9DCVK/VW3VfxQoQ+IDRTokfU/6yKWeon -YVtTH57upJ7tRKRy2pmFCJEoc4T7IKg5ENfGhSkKHuN8CLoWdRREh2KTTijET0qU -HCWPbOl9MgW6Y4Jf2qYkggGkCTRDk7iGuLKT6tMqVrqcmA/AhBpgZKg5N+rIbwTf -IHzGNql93bc1FX0KVh7LlV649ZRR4mksrk26pDnLqAfiT15aRsIrYZYdbh0CAwEA -AQKCAgAKdpIed9CiykablViaQefDIjZ63cdGKABd760G8Emu4ZX7PxW1NKTiOF/1 -fLwZsfH4CHFKbDtC7iwSX7JmRJ5r0l19t+i490pMTlKFGS9Jz9yeWYamIAFVlkA5 -/jG6hy0hX0sNjZQ46jOnvKt5f8HspHSh/Y5gDWMMi2ynRjdo4QOBNkD1L5DDYt5z -nPCAsqT5zQEHI/UC7MfHzqRGrAPvaFZadzrFVzRcJA+zRdKCzZeJwVBW3vGkhhuZ -K/NVGFRM+i3rZRvX/t4HNLJVOk9BkXZr2WZq9ISJbxednW7cqMP8X5TpbRFyG1ZZ -nxtDW4+uR6VaYLCqSwK0zZUQw7XUtbSSlxPG90dZyNkR5n3grK0wORro8zV/8BMg -qON/jxUJFLOxCBDT5jThjFm6mk9dkUF/uzubm7k5LdRP4v1J8LojEF1BrP5g8JEN -cOBwC2z10uwcSSdVPF7FMJ35EctIzFw+eA2qeYqCywTSd7J1AQ5vNcjc+2wiY2Ff -4qIDAvgaXV+g7JsmqaEFWTs4o5PwKZXp4qEU4hhZ1636vi9aGJPYcsZqWb5os/+j -OQFWGrk16gDyzC3J3Qh8cnVHQko7yrWbhMOXSAN5W/1npovhVeYOVUtCLRfog66b -ZrCmRkoFdRaQeRD3iYHu5dLye8g4k1PWB/+Uq17GjAWp12wenQKCAQEA9usfQVgt -4C2tRwsHUXaRihiY+MM7YlXDwh4qzDXQk8cfdGabR6MIvGPcpsDpxDidPHUuU0qy -NZC4y4rrZnFJaQ3YaD6lDlAuICyHqrQcYBYB7wB9gZy/Un34P8WnE93kTfDZRhd0 -7SF94lBW+cb2BjzcyUsYLf/0UVEt8kTWxP1T39fHyQEGxwJcbPvjRLZoYWkTnPZb -cx/hBk8gclVPUV0w9idNuWz4ittkEF3oEEtu+06pjP8DZ+lJ/Hn6hwJjN66oCXKH -9TLnvygJZjub8BKXRpbRJ989dQvGl7w+rlT/gyJvewjtn/GU6OEvFyaHh2Hgqy6y -ROM9nQgGIwtqWwKCAQEA5idim5tM6lYbR5oTeczqVTNE69gnzmQVMFML7Eb2Qnne -BFvpHuKouJksY514rSrXusirYRqhX5WU8exkn/h3T9LkRyLA3pUfBHxqcejMfuRG -MYIy9nIgiV9xEc73hZsi4xCWEgFfmS9WCB7Z46Zi3PayZg4Hrq7C69ez1drt6njQ -R09qCLUJD+DRTgEax08eZNYLeqcy20ofCGu2UZyyr3ZIS7wGLxNqzic26E24r2GG -K2KqiFH6isUS4EnthwV29EqvPcsq57A4s9Uva8xkORUhJCtY/7M1klUo/89HxVGp -OZ4+sxYqZCKcQJO8+i/4RPwF3QYZ/uh/gcZM2z9C5wKCAQAyjsQQkiiajV+8ezKd -aIS2XQD9dqQzJ1J07c5fj+lMSOpU4CmNSoGgaWYlsrxq1BjF50x7+4Bv3VkpPCGl -ES8x1oboGWOcgahgKB4DQuvIdNkigdww7NJz5p0tGaBzPezgVJ94bZcgcsoey8pz -TFzVvCKNCNZDnPP+rnuU7ql3HlPNMpaSvqYPm5knK5BGYn8O6v/8FKl28iEWNJ91 -KaibBVTgIf4VKI3fiLp9a2z34SoxRNMMrq6Y2Tiv/J3ihQehwB5iCNRzzV+MUXtT -NoNgbb4R0xGyc1BXJfkc2ouPEJJc3HEtJQ/avxF5eZo1yErZ2p2xD1erKUhVXe47 -wLufAoIBAQC853DBJXvBD0G+yFDZ9P4VRkp4hWdOuMjHbDJqEWiI8Xvv+fxilElF -krtjW9mz0GlW7uPzhKcVTDH/SzbgMlDDnOYvGPBTAPR/exrnOdu2/ug6NJJdwxi/ -iC3HHyf8anP9CR0T1DrCAZ9MdP4EIwocMQQGTdeyYdCtQNNjYRlMDTNuhFkUonq4 -pJ9GthNjqaXZv/GWD2vnn3PPNpFjdQkYiS4Xs1EkDHzqjjc7/qbqlFJKg+ZSk27f -vZebrjIeU7bqFe61+m7R0csIl58fjJhqXdRg2o9m+JGs9Ob86AYRh9As8Zym4zeS -DvJO8rP2aa8N+Alb+2kU14HoY3mrrsXbAoIBAQDK0jCxdc73h4u2B2zlX4eyHAy7 -oPpwhIjuuMVXbsR5MqlIOpD7QjqujnMTN0MSslV1GzwhfQO7cN6ijQQiWWiHzCKd -O6NqetPQnn19ddqFLWcrl/WzZdVTDeXyhAaFffy+x8dhPVUdPs/ZDXXAF43LFwly -2kSTWnfwZ5Yvi3K2SB/dO48I77qEUF370/wstdHviSttbI5HhtiRljCU6mwpms34 -4KdDCCxPleZ7Dl2m8v+FkdWkZomLi9wo/XzBo/z5RcI5gjt83OJ6pLBTcLDo7WOc -g2XM7rqQoQr8bilH+eMAZtEm/axwZHIcTTqsyt3Mp09KL65MB+581V5TnGSu ------END RSA PRIVATE KEY----- diff --git a/builtin/providers/heroku/validators.go b/builtin/providers/heroku/validators.go deleted file mode 100644 index 0b3702247..000000000 --- a/builtin/providers/heroku/validators.go +++ /dev/null @@ -1,38 +0,0 @@ -package heroku - -import ( - "fmt" - "strings" - - "github.com/satori/uuid" -) - -func validatePipelineStageName(v interface{}, k string) (ws []string, errors []error) { - validPipelineStageNames := []string{ - "review", - "development", - "staging", - "production", - } - - for _, s := range validPipelineStageNames { - if v == s { - return - } - } - - err := fmt.Errorf( - "%s is an invalid pipeline stage, must be one of [%s]", - v, - strings.Join(validPipelineStageNames, ", "), - ) - errors = append(errors, err) - return -} - -func validateUUID(v interface{}, k string) (ws []string, errors []error) { - if _, err := uuid.FromString(v.(string)); err != nil { - errors = append(errors, fmt.Errorf("%q is an invalid UUID: %s", k, err)) - } - return -} diff --git a/builtin/providers/heroku/validators_test.go b/builtin/providers/heroku/validators_test.go deleted file mode 100644 index 6131be8bc..000000000 --- a/builtin/providers/heroku/validators_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package heroku - -import "testing" - -func TestPipelineStage(t *testing.T) { - valid := []string{ - "review", - "development", - "staging", - "production", - } - for _, v := range valid { - _, errors := validatePipelineStageName(v, "stage") - if len(errors) != 0 { - t.Fatalf("%q should be a valid stage: %q", v, errors) - } - } - - invalid := []string{ - "foobarbaz", - "another-stage", - "", - } - for _, v := range invalid { - _, errors := validatePipelineStageName(v, "stage") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid stage", v) - } - } -} - -func TestValidateUUID(t *testing.T) { - valid := []string{ - "4812ccbc-2a2e-4c6c-bae4-a3d04ed51c0e", - } - for _, v := range valid { - _, errors := validateUUID(v, "id") - if len(errors) != 0 { - t.Fatalf("%q should be a valid UUID: %q", v, errors) - } - } - - invalid := []string{ - "foobarbaz", - "my-app-name", - } - for _, v := range invalid { - _, errors := validateUUID(v, "id") - if len(errors) == 0 { - t.Fatalf("%q should be an invalid UUID", v) - } - } -} diff --git a/builtin/providers/http/data_source.go b/builtin/providers/http/data_source.go deleted file mode 100644 index 221de4adf..000000000 --- a/builtin/providers/http/data_source.go +++ /dev/null @@ -1,104 +0,0 @@ -package http - -import ( - "fmt" - "io/ioutil" - "net/http" - "regexp" - "time" - - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSource() *schema.Resource { - return &schema.Resource{ - Read: dataSourceRead, - - Schema: map[string]*schema.Schema{ - "url": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "request_headers": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "body": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - } -} - -func dataSourceRead(d *schema.ResourceData, meta interface{}) error { - - url := d.Get("url").(string) - headers := d.Get("request_headers").(map[string]interface{}) - - client := &http.Client{} - - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return fmt.Errorf("Error creating request: %s", err) - } - - for name, value := range headers { - req.Header.Set(name, value.(string)) - } - - resp, err := client.Do(req) - if err != nil { - return fmt.Errorf("Error during making a request: %s", url) - } - - defer resp.Body.Close() - - if resp.StatusCode != 200 { - return fmt.Errorf("HTTP request error. Response code: %d", resp.StatusCode) - } - - contentType := resp.Header.Get("Content-Type") - if contentType == "" || isContentTypeAllowed(contentType) == false { - return fmt.Errorf("Content-Type is not a text type. Got: %s", contentType) - } - - bytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return fmt.Errorf("Error while reading response body. %s", err) - } - - d.Set("body", string(bytes)) - d.SetId(time.Now().UTC().String()) - - return nil -} - -// This is to prevent potential issues w/ binary files -// and generally unprintable characters -// See https://github.com/hashicorp/terraform/pull/3858#issuecomment-156856738 -func isContentTypeAllowed(contentType string) bool { - allowedContentTypes := []*regexp.Regexp{ - regexp.MustCompile("^text/.+"), - regexp.MustCompile("^application/json$"), - } - - for _, r := range allowedContentTypes { - if r.MatchString(contentType) { - return true - } - } - - return false -} diff --git a/builtin/providers/http/data_source_test.go b/builtin/providers/http/data_source_test.go deleted file mode 100644 index 8ad73ce36..000000000 --- a/builtin/providers/http/data_source_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package http - -import ( - "fmt" - "net/http" - "net/http/httptest" - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -type TestHttpMock struct { - server *httptest.Server -} - -const testDataSourceConfig_basic = ` -data "http" "http_test" { - url = "%s/meta_%d.txt" -} - -output "body" { - value = "${data.http.http_test.body}" -} -` - -func TestDataSource_http200(t *testing.T) { - testHttpMock := setUpMockHttpServer() - - defer testHttpMock.server.Close() - - resource.UnitTest(t, resource.TestCase{ - Providers: testProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testDataSourceConfig_basic, testHttpMock.server.URL, 200), - Check: func(s *terraform.State) error { - _, ok := s.RootModule().Resources["data.http.http_test"] - if !ok { - return fmt.Errorf("missing data resource") - } - - outputs := s.RootModule().Outputs - - if outputs["body"].Value != "1.0.0" { - return fmt.Errorf( - `'body' output is %s; want '1.0.0'`, - outputs["body"].Value, - ) - } - - return nil - }, - }, - }, - }) -} - -func TestDataSource_http404(t *testing.T) { - testHttpMock := setUpMockHttpServer() - - defer testHttpMock.server.Close() - - resource.UnitTest(t, resource.TestCase{ - Providers: testProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testDataSourceConfig_basic, testHttpMock.server.URL, 404), - ExpectError: regexp.MustCompile("HTTP request error. Response code: 404"), - }, - }, - }) -} - -const testDataSourceConfig_withHeaders = ` -data "http" "http_test" { - url = "%s/restricted/meta_%d.txt" - - request_headers = { - "Authorization" = "Zm9vOmJhcg==" - } -} - -output "body" { - value = "${data.http.http_test.body}" -} -` - -func TestDataSource_withHeaders200(t *testing.T) { - testHttpMock := setUpMockHttpServer() - - defer testHttpMock.server.Close() - - resource.UnitTest(t, resource.TestCase{ - Providers: testProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testDataSourceConfig_withHeaders, testHttpMock.server.URL, 200), - Check: func(s *terraform.State) error { - _, ok := s.RootModule().Resources["data.http.http_test"] - if !ok { - return fmt.Errorf("missing data resource") - } - - outputs := s.RootModule().Outputs - - if outputs["body"].Value != "1.0.0" { - return fmt.Errorf( - `'body' output is %s; want '1.0.0'`, - outputs["body"].Value, - ) - } - - return nil - }, - }, - }, - }) -} - -const testDataSourceConfig_error = ` -data "http" "http_test" { - -} -` - -func TestDataSource_compileError(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testDataSourceConfig_error, - ExpectError: regexp.MustCompile("required field is not set"), - }, - }, - }) -} - -func setUpMockHttpServer() *TestHttpMock { - Server := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/meta_200.txt" { - w.WriteHeader(http.StatusOK) - w.Write([]byte("1.0.0")) - } else if r.URL.Path == "/restricted/meta_200.txt" { - if r.Header.Get("Authorization") == "Zm9vOmJhcg==" { - w.WriteHeader(http.StatusOK) - w.Write([]byte("1.0.0")) - } else { - w.WriteHeader(http.StatusForbidden) - } - } else if r.URL.Path == "/meta_404.txt" { - w.WriteHeader(http.StatusNotFound) - } else { - w.WriteHeader(http.StatusNotFound) - } - - w.Header().Add("Content-Type", "text/plain") - }), - ) - - return &TestHttpMock{ - server: Server, - } -} diff --git a/builtin/providers/http/provider.go b/builtin/providers/http/provider.go deleted file mode 100644 index e11b68346..000000000 --- a/builtin/providers/http/provider.go +++ /dev/null @@ -1,18 +0,0 @@ -package http - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{}, - - DataSourcesMap: map[string]*schema.Resource{ - "http": dataSource(), - }, - - ResourcesMap: map[string]*schema.Resource{}, - } -} diff --git a/builtin/providers/http/provider_test.go b/builtin/providers/http/provider_test.go deleted file mode 100644 index dd21abd45..000000000 --- a/builtin/providers/http/provider_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package http - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testProviders = map[string]terraform.ResourceProvider{ - "http": Provider(), -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} diff --git a/builtin/providers/icinga2/provider.go b/builtin/providers/icinga2/provider.go deleted file mode 100644 index 2f6f853e7..000000000 --- a/builtin/providers/icinga2/provider.go +++ /dev/null @@ -1,108 +0,0 @@ -package icinga2 - -import ( - "fmt" - "net/url" - "os" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - "github.com/lrsmith/go-icinga2-api/iapi" -) - -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "api_url": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("ICINGA2_API_URL", nil), - Description: descriptions["api_url"], - }, - "api_user": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("ICINGA2_API_USER", nil), - Description: descriptions["api_user"], - }, - "api_password": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("ICINGA2_API_PASSWORD", nil), - Description: descriptions["api_password"], - }, - "insecure_skip_tls_verify": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - DefaultFunc: EnvBoolDefaultFunc("ICINGA2_INSECURE_SKIP_TLS_VERIFY", false), - Description: descriptions["insecure_skip_tls_verify"], - }, - }, - ResourcesMap: map[string]*schema.Resource{ - "icinga2_host": resourceIcinga2Host(), - "icinga2_hostgroup": resourceIcinga2Hostgroup(), - "icinga2_checkcommand": resourceIcinga2Checkcommand(), - "icinga2_service": resourceIcinga2Service(), - }, - ConfigureFunc: configureProvider, - } -} - -func configureProvider(d *schema.ResourceData) (interface{}, error) { - - config, _ := iapi.New( - d.Get("api_user").(string), - d.Get("api_password").(string), - d.Get("api_url").(string), - d.Get("insecure_skip_tls_verify").(bool), - ) - - err := validateURL(d.Get("api_url").(string)) - - if err := config.Connect(); err != nil { - return nil, err - } - - return config, err -} - -var descriptions map[string]string - -func init() { - descriptions = map[string]string{ - "api_url": "The address of the Icinga2 server.\n", - "api_user": "The user to authenticate to the Icinga2 Server as.\n", - "api_password": "The password for authenticating to the Icinga2 server.\n", - "insecure_skip_tls_verify": "Disable TLS verify when connecting to Icinga2 Server\n", - } -} - -func validateURL(urlString string) error { - - //ICINGA2_API_URL=https://127.0.0.1:4665/v1 - tokens, err := url.Parse(urlString) - if err != nil { - return err - } - - if tokens.Scheme != "https" { - return fmt.Errorf("Error : Requests are only allowed to use the HTTPS protocol so that traffic remains encrypted.") - } - - if tokens.Path != "/v1" { - return fmt.Errorf("Error : Invalid API version %s specified. Only v1 is currently supported.", tokens.Path) - } - - return nil -} - -// EnvBoolDefaultFunc is a helper function that returns -func EnvBoolDefaultFunc(k string, dv interface{}) schema.SchemaDefaultFunc { - return func() (interface{}, error) { - if v := os.Getenv(k); v == "true" { - return true, nil - } - - return false, nil - } -} diff --git a/builtin/providers/icinga2/provider_test.go b/builtin/providers/icinga2/provider_test.go deleted file mode 100644 index 1f09aa2a5..000000000 --- a/builtin/providers/icinga2/provider_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package icinga2 - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "icinga2": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProviderImpl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - - v := os.Getenv("ICINGA2_API_URL") - if v == "" { - t.Fatal("ICINGA2_API_URL must be set for acceptance tests") - } - - v = os.Getenv("ICINGA2_API_USER") - if v == "" { - t.Fatal("ICINGA2_API_USER must be set for acceptance tests") - } - - v = os.Getenv("ICINGA2_API_PASSWORD") - if v == "" { - t.Fatal("ICINGA2_API_PASSWORD must be set for acceptance tests") - } - -} diff --git a/builtin/providers/icinga2/resource_icinga2_checkcommand.go b/builtin/providers/icinga2/resource_icinga2_checkcommand.go deleted file mode 100644 index 31b09badf..000000000 --- a/builtin/providers/icinga2/resource_icinga2_checkcommand.go +++ /dev/null @@ -1,118 +0,0 @@ -package icinga2 - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/lrsmith/go-icinga2-api/iapi" -) - -func resourceIcinga2Checkcommand() *schema.Resource { - - return &schema.Resource{ - Create: resourceIcinga2CheckcommandCreate, - Read: resourceIcinga2CheckcommandRead, - Delete: resourceIcinga2CheckcommandDelete, - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "Name", - ForceNew: true, - }, - "command": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "templates": &schema.Schema{ - Type: schema.TypeList, - Required: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "arguments": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceIcinga2CheckcommandCreate(d *schema.ResourceData, meta interface{}) error { - - client := meta.(*iapi.Server) - - name := d.Get("name").(string) - command := d.Get("command").(string) - - arguments := make(map[string]string) - iterator := d.Get("arguments").(map[string]interface{}) - - for key, value := range iterator { - arguments[key] = value.(string) - } - - checkcommands, err := client.CreateCheckcommand(name, command, arguments) - if err != nil { - return err - } - - found := false - for _, checkcommand := range checkcommands { - if checkcommand.Name == name { - d.SetId(name) - found = true - } - } - - if !found { - return fmt.Errorf("Failed to create Checkcommand %s : %s", name, err) - } - - return nil -} - -func resourceIcinga2CheckcommandRead(d *schema.ResourceData, meta interface{}) error { - - client := meta.(*iapi.Server) - - name := d.Get("name").(string) - - checkcommands, err := client.GetCheckcommand(name) - if err != nil { - return err - } - - found := false - for _, checkcommand := range checkcommands { - if checkcommand.Name == name { - d.SetId(name) - d.Set("command", checkcommand.Attrs.Command[0]) - d.Set("Templates", checkcommand.Attrs.Templates) - d.Set("arguments", checkcommand.Attrs.Arguments) - found = true - } - } - - if !found { - return fmt.Errorf("Failed to Read Checkcommand %s : %s", name, err) - } - - return nil -} - -func resourceIcinga2CheckcommandDelete(d *schema.ResourceData, meta interface{}) error { - - client := meta.(*iapi.Server) - - name := d.Get("name").(string) - - err := client.DeleteCheckcommand(name) - if err != nil { - return fmt.Errorf("Failed to Delete Checkcommand %s : %s", name, err) - } - - return nil -} diff --git a/builtin/providers/icinga2/resource_icinga2_checkcommand_test.go b/builtin/providers/icinga2/resource_icinga2_checkcommand_test.go deleted file mode 100644 index f3c5dca6a..000000000 --- a/builtin/providers/icinga2/resource_icinga2_checkcommand_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package icinga2 - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/lrsmith/go-icinga2-api/iapi" -) - -func TestAccCreateCheckcommand(t *testing.T) { - - var testAccCreateCheckcommand = fmt.Sprintf(` - resource "icinga2_checkcommand" "checkcommand" { - name = "terraform-test-checkcommand-1" - templates = [ "plugin-check-command" ] - command = "/usr/local/bin/check_command" - arguments = { - "-I" = "$IARG$" - "-J" = "$JARG$" } - }`) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCreateCheckcommand, - Check: resource.ComposeTestCheckFunc( - testAccCheckCheckcommandExists("icinga2_checkcommand.checkcommand"), - testAccCheckResourceState("icinga2_checkcommand.checkcommand", "name", "terraform-test-checkcommand-1"), - testAccCheckResourceState("icinga2_checkcommand.checkcommand", "command", "/usr/local/bin/check_command"), - testAccCheckResourceState("icinga2_checkcommand.checkcommand", "arguments.%", "2"), - testAccCheckResourceState("icinga2_checkcommand.checkcommand", "arguments.-I", "$IARG$"), - testAccCheckResourceState("icinga2_checkcommand.checkcommand", "arguments.-J", "$JARG$"), - ), - }, - }, - }) -} - -func testAccCheckCheckcommandExists(rn string) resource.TestCheckFunc { - return func(s *terraform.State) error { - resource, ok := s.RootModule().Resources[rn] - if !ok { - return fmt.Errorf("Checkcommand resource not found: %s", rn) - } - - if resource.Primary.ID == "" { - return fmt.Errorf("Checkcommand resource id not set") - } - - client := testAccProvider.Meta().(*iapi.Server) - _, err := client.GetCheckcommand(resource.Primary.ID) - if err != nil { - return fmt.Errorf("Error getting getting Checkcommand: %s", err) - } - - return nil - } -} diff --git a/builtin/providers/icinga2/resource_icinga2_host.go b/builtin/providers/icinga2/resource_icinga2_host.go deleted file mode 100644 index 454537e8a..000000000 --- a/builtin/providers/icinga2/resource_icinga2_host.go +++ /dev/null @@ -1,121 +0,0 @@ -package icinga2 - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/lrsmith/go-icinga2-api/iapi" -) - -func resourceIcinga2Host() *schema.Resource { - - return &schema.Resource{ - Create: resourceIcinga2HostCreate, - Read: resourceIcinga2HostRead, - Delete: resourceIcinga2HostDelete, - Schema: map[string]*schema.Schema{ - "hostname": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "Hostname", - ForceNew: true, - }, - "address": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "check_command": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "vars": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceIcinga2HostCreate(d *schema.ResourceData, meta interface{}) error { - - client := meta.(*iapi.Server) - - hostname := d.Get("hostname").(string) - address := d.Get("address").(string) - checkCommand := d.Get("check_command").(string) - - vars := make(map[string]string) - - // Normalize from map[string]interface{} to map[string]string - iterator := d.Get("vars").(map[string]interface{}) - for key, value := range iterator { - vars[key] = value.(string) - } - - // Call CreateHost with normalized data - hosts, err := client.CreateHost(hostname, address, checkCommand, vars) - if err != nil { - return err - } - - found := false - for _, host := range hosts { - if host.Name == hostname { - d.SetId(hostname) - found = true - } - } - - if !found { - return fmt.Errorf("Failed to Create Host %s : %s", hostname, err) - } - - return nil -} - -func resourceIcinga2HostRead(d *schema.ResourceData, meta interface{}) error { - - client := meta.(*iapi.Server) - - hostname := d.Get("hostname").(string) - - hosts, err := client.GetHost(hostname) - if err != nil { - return err - } - - found := false - for _, host := range hosts { - if host.Name == hostname { - d.SetId(hostname) - d.Set("hostname", host.Name) - d.Set("address", host.Attrs.Address) - d.Set("check_command", host.Attrs.CheckCommand) - d.Set("vars", host.Attrs.Vars) - found = true - } - } - - if !found { - return fmt.Errorf("Failed to Read Host %s : %s", hostname, err) - } - - return nil -} - -func resourceIcinga2HostDelete(d *schema.ResourceData, meta interface{}) error { - - client := meta.(*iapi.Server) - hostname := d.Get("hostname").(string) - - err := client.DeleteHost(hostname) - if err != nil { - return fmt.Errorf("Failed to Delete Host %s : %s", hostname, err) - } - - return nil - -} diff --git a/builtin/providers/icinga2/resource_icinga2_host_test.go b/builtin/providers/icinga2/resource_icinga2_host_test.go deleted file mode 100644 index c33a2e9e1..000000000 --- a/builtin/providers/icinga2/resource_icinga2_host_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package icinga2 - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/lrsmith/go-icinga2-api/iapi" -) - -func TestAccCreateBasicHost(t *testing.T) { - - var testAccCreateBasicHost = fmt.Sprintf(` - resource "icinga2_host" "tf-1" { - hostname = "terraform-host-1" - address = "10.10.10.1" - check_command = "hostalive" - }`) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCreateBasicHost, - Check: resource.ComposeTestCheckFunc( - testAccCheckHostExists("icinga2_host.tf-1"), - testAccCheckResourceState("icinga2_host.tf-1", "hostname", "terraform-host-1"), - testAccCheckResourceState("icinga2_host.tf-1", "address", "10.10.10.1"), - testAccCheckResourceState("icinga2_host.tf-1", "check_command", "hostalive"), - ), - }, - }, - }) -} - -func TestAccCreateVariableHost(t *testing.T) { - - var testAccCreateVariableHost = fmt.Sprintf(` - resource "icinga2_host" "tf-3" { - hostname = "terraform-host-3" - address = "10.10.10.3" - check_command = "hostalive" - vars { - os = "linux" - osver = "1" - allowance = "none" } - }`) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCreateVariableHost, - Check: resource.ComposeTestCheckFunc( - testAccCheckHostExists("icinga2_host.tf-3"), - testAccCheckResourceState("icinga2_host.tf-3", "hostname", "terraform-host-3"), - testAccCheckResourceState("icinga2_host.tf-3", "address", "10.10.10.3"), - testAccCheckResourceState("icinga2_host.tf-3", "check_command", "hostalive"), - testAccCheckResourceState("icinga2_host.tf-3", "vars.%", "3"), - testAccCheckResourceState("icinga2_host.tf-3", "vars.allowance", "none"), - testAccCheckResourceState("icinga2_host.tf-3", "vars.os", "linux"), - testAccCheckResourceState("icinga2_host.tf-3", "vars.osver", "1"), - ), - }, - }, - }) -} - -func testAccCheckHostExists(rn string) resource.TestCheckFunc { - return func(s *terraform.State) error { - resource, ok := s.RootModule().Resources[rn] - if !ok { - return fmt.Errorf("Host resource not found: %s", rn) - } - - if resource.Primary.ID == "" { - return fmt.Errorf("resource id not set") - } - - client := testAccProvider.Meta().(*iapi.Server) - _, err := client.GetHost(resource.Primary.ID) - if err != nil { - return fmt.Errorf("error getting getting host: %s", err) - } - - return nil - } - -} diff --git a/builtin/providers/icinga2/resource_icinga2_hostgroup.go b/builtin/providers/icinga2/resource_icinga2_hostgroup.go deleted file mode 100644 index 1be97c395..000000000 --- a/builtin/providers/icinga2/resource_icinga2_hostgroup.go +++ /dev/null @@ -1,98 +0,0 @@ -package icinga2 - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/lrsmith/go-icinga2-api/iapi" -) - -func resourceIcinga2Hostgroup() *schema.Resource { - - return &schema.Resource{ - Create: resourceIcinga2HostgroupCreate, - Read: resourceIcinga2HostgroupRead, - Delete: resourceIcinga2HostgroupDelete, - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "name", - ForceNew: true, - }, - "display_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "Display name of Host Group", - ForceNew: true, - }, - }, - } -} - -func resourceIcinga2HostgroupCreate(d *schema.ResourceData, meta interface{}) error { - - client := meta.(*iapi.Server) - - name := d.Get("name").(string) - displayName := d.Get("display_name").(string) - - hostgroups, err := client.CreateHostgroup(name, displayName) - if err != nil { - return err - } - - found := false - for _, hostgroup := range hostgroups { - if hostgroup.Name == name { - d.SetId(name) - found = true - } - } - - if !found { - return fmt.Errorf("Failed to Create Hostgroup %s : %s", name, err) - } - - return nil -} - -func resourceIcinga2HostgroupRead(d *schema.ResourceData, meta interface{}) error { - - client := meta.(*iapi.Server) - name := d.Get("name").(string) - - hostgroups, err := client.GetHostgroup(name) - if err != nil { - return err - } - - found := false - for _, hostgroup := range hostgroups { - if hostgroup.Name == name { - d.SetId(name) - d.Set("display_name", hostgroup.Attrs.DisplayName) - found = true - } - } - - if !found { - return fmt.Errorf("Failed to Read Hostgroup %s : %s", name, err) - } - - return nil -} - -func resourceIcinga2HostgroupDelete(d *schema.ResourceData, meta interface{}) error { - - client := meta.(*iapi.Server) - name := d.Get("name").(string) - - err := client.DeleteHostgroup(name) - if err != nil { - return fmt.Errorf("Failed to Delete Hostgroup %s : %s", name, err) - } - - return nil - -} diff --git a/builtin/providers/icinga2/resource_icinga2_hostgroup_test.go b/builtin/providers/icinga2/resource_icinga2_hostgroup_test.go deleted file mode 100644 index a06697878..000000000 --- a/builtin/providers/icinga2/resource_icinga2_hostgroup_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package icinga2 - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/lrsmith/go-icinga2-api/iapi" -) - -func TestAccCreateBasicHostGroup(t *testing.T) { - - var testAccCreateBasicHostGroup = fmt.Sprintf(` - resource "icinga2_hostgroup" "tf-hg-1" { - name = "terraform-hostgroup-1" - display_name = "Terraform Test HostGroup" - }`) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCreateBasicHostGroup, - Check: resource.ComposeTestCheckFunc( - testAccCheckHostgroupExists("icinga2_hostgroup.tf-hg-1"), - testAccCheckResourceState("icinga2_hostgroup.tf-hg-1", "name", "terraform-hostgroup-1"), - testAccCheckResourceState("icinga2_hostgroup.tf-hg-1", "display_name", "Terraform Test HostGroup"), - ), - }, - }, - }) -} - -func testAccCheckHostgroupExists(rn string) resource.TestCheckFunc { - return func(s *terraform.State) error { - resource, ok := s.RootModule().Resources[rn] - if !ok { - return fmt.Errorf("Hostgroup resource not found: %s", rn) - } - - if resource.Primary.ID == "" { - return fmt.Errorf("Hostgroup resource id not set") - } - - client := testAccProvider.Meta().(*iapi.Server) - _, err := client.GetHostgroup(resource.Primary.ID) - if err != nil { - return fmt.Errorf("Error getting getting hostgroup: %s", err) - } - - return nil - } -} diff --git a/builtin/providers/icinga2/resource_icinga2_service.go b/builtin/providers/icinga2/resource_icinga2_service.go deleted file mode 100644 index d320c0b3e..000000000 --- a/builtin/providers/icinga2/resource_icinga2_service.go +++ /dev/null @@ -1,111 +0,0 @@ -package icinga2 - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/lrsmith/go-icinga2-api/iapi" -) - -func resourceIcinga2Service() *schema.Resource { - - return &schema.Resource{ - Create: resourceIcinga2ServiceCreate, - Read: resourceIcinga2ServiceRead, - Delete: resourceIcinga2ServiceDelete, - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "ServiceName", - ForceNew: true, - }, - "hostname": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "Hostname", - ForceNew: true, - }, - "check_command": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "CheckCommand", - ForceNew: true, - }, - }, - } -} - -func resourceIcinga2ServiceCreate(d *schema.ResourceData, meta interface{}) error { - - client := meta.(*iapi.Server) - - hostname := d.Get("hostname").(string) - name := d.Get("name").(string) - checkcommand := d.Get("check_command").(string) - - services, err := client.CreateService(name, hostname, checkcommand) - if err != nil { - return err - } - - found := false - for _, service := range services { - if service.Name == hostname+"!"+name { - d.SetId(hostname + "!" + name) - found = true - } - } - - if !found { - return fmt.Errorf("Failed to Create Service %s!%s : %s", hostname, name, err) - } - - return nil - -} - -func resourceIcinga2ServiceRead(d *schema.ResourceData, meta interface{}) error { - - client := meta.(*iapi.Server) - - hostname := d.Get("hostname").(string) - name := d.Get("name").(string) - - services, err := client.GetService(name, hostname) - if err != nil { - return err - } - - found := false - for _, service := range services { - if service.Name == hostname+"!"+name { - d.SetId(hostname + "!" + name) - d.Set("hostname", hostname) - d.Set("check_command", service.Attrs.CheckCommand) - found = true - } - } - - if !found { - return fmt.Errorf("Failed to Read Service %s!%s : %s", hostname, name, err) - } - - return nil - -} - -func resourceIcinga2ServiceDelete(d *schema.ResourceData, meta interface{}) error { - - client := meta.(*iapi.Server) - - hostname := d.Get("hostname").(string) - name := d.Get("name").(string) - - err := client.DeleteService(name, hostname) - if err != nil { - return fmt.Errorf("Failed to Delete Service %s!%s : %s", hostname, name, err) - } - - return nil -} diff --git a/builtin/providers/icinga2/resource_icinga2_service_test.go b/builtin/providers/icinga2/resource_icinga2_service_test.go deleted file mode 100644 index a9c9a63f5..000000000 --- a/builtin/providers/icinga2/resource_icinga2_service_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package icinga2 - -import ( - "fmt" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/lrsmith/go-icinga2-api/iapi" -) - -func TestAccCreateService(t *testing.T) { - - var testAccCreateService = fmt.Sprintf(` - resource "icinga2_service" "tf-service-1" { - hostname = "docker-icinga2" - name = "ssh3" - check_command = "ssh" - }`) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCreateService, - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceExists("icinga2_service.tf-service-1"), - testAccCheckResourceState("icinga2_service.tf-service-1", "hostname", "docker-icinga2"), - testAccCheckResourceState("icinga2_service.tf-service-1", "name", "ssh3"), - testAccCheckResourceState("icinga2_service.tf-service-1", "check_command", "ssh"), - ), - }, - }, - }) -} - -func testAccCheckServiceExists(rn string) resource.TestCheckFunc { - return func(s *terraform.State) error { - resource, ok := s.RootModule().Resources[rn] - if !ok { - return fmt.Errorf("Service resource not found: %s", rn) - } - - if resource.Primary.ID == "" { - return fmt.Errorf("resource id not set") - } - - client := testAccProvider.Meta().(*iapi.Server) - tokens := strings.Split(resource.Primary.ID, "!") - - _, err := client.GetService(tokens[1], tokens[0]) - if err != nil { - return fmt.Errorf("error getting getting Service: %s", err) - } - - return nil - } - -} diff --git a/builtin/providers/icinga2/utilities_test.go b/builtin/providers/icinga2/utilities_test.go deleted file mode 100644 index 7d87102b2..000000000 --- a/builtin/providers/icinga2/utilities_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package icinga2 - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func testAccCheckResourceState(resourceName, key, value string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return fmt.Errorf("Not found: %s", resourceName) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - p := rs.Primary - - if p.Attributes[key] != value { - return fmt.Errorf( - "%s != %s (actual: %s)", key, value, p.Attributes[key]) - } - - return nil - } - -} diff --git a/builtin/providers/ignition/provider.go b/builtin/providers/ignition/provider.go deleted file mode 100644 index 81462e361..000000000 --- a/builtin/providers/ignition/provider.go +++ /dev/null @@ -1,239 +0,0 @@ -package ignition - -import ( - "bytes" - "crypto/sha256" - "encoding/hex" - "encoding/json" - "fmt" - "net/url" - "sync" - - "github.com/coreos/go-systemd/unit" - "github.com/coreos/ignition/config/types" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// globalCache keeps the instances of the internal types of ignition generated -// by the different data resources with the goal to be reused by the -// ignition_config data resource. The key of the maps are a hash of the types -// calculated on the type serialized to JSON. -var globalCache = &cache{ - disks: make(map[string]*types.Disk, 0), - arrays: make(map[string]*types.Raid, 0), - filesystems: make(map[string]*types.Filesystem, 0), - files: make(map[string]*types.File, 0), - systemdUnits: make(map[string]*types.SystemdUnit, 0), - networkdUnits: make(map[string]*types.NetworkdUnit, 0), - users: make(map[string]*types.User, 0), - groups: make(map[string]*types.Group, 0), -} - -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - DataSourcesMap: map[string]*schema.Resource{ - "ignition_config": resourceConfig(), - "ignition_disk": resourceDisk(), - "ignition_raid": resourceRaid(), - "ignition_filesystem": resourceFilesystem(), - "ignition_file": resourceFile(), - "ignition_systemd_unit": resourceSystemdUnit(), - "ignition_networkd_unit": resourceNetworkdUnit(), - "ignition_user": resourceUser(), - "ignition_group": resourceGroup(), - }, - ResourcesMap: map[string]*schema.Resource{ - "ignition_config": schema.DataSourceResourceShim( - "ignition_config", - resourceConfig(), - ), - "ignition_disk": schema.DataSourceResourceShim( - "ignition_disk", - resourceDisk(), - ), - "ignition_raid": schema.DataSourceResourceShim( - "ignition_raid", - resourceRaid(), - ), - "ignition_filesystem": schema.DataSourceResourceShim( - "ignition_filesystem", - resourceFilesystem(), - ), - "ignition_file": schema.DataSourceResourceShim( - "ignition_file", - resourceFile(), - ), - "ignition_systemd_unit": schema.DataSourceResourceShim( - "ignition_systemd_unit", - resourceSystemdUnit(), - ), - "ignition_networkd_unit": schema.DataSourceResourceShim( - "ignition_networkd_unit", - resourceNetworkdUnit(), - ), - "ignition_user": schema.DataSourceResourceShim( - "ignition_user", - resourceUser(), - ), - "ignition_group": schema.DataSourceResourceShim( - "ignition_group", - resourceGroup(), - ), - }, - } -} - -type cache struct { - disks map[string]*types.Disk - arrays map[string]*types.Raid - filesystems map[string]*types.Filesystem - files map[string]*types.File - systemdUnits map[string]*types.SystemdUnit - networkdUnits map[string]*types.NetworkdUnit - users map[string]*types.User - groups map[string]*types.Group - - sync.Mutex -} - -func (c *cache) addDisk(g *types.Disk) string { - c.Lock() - defer c.Unlock() - - id := id(g) - c.disks[id] = g - - return id -} - -func (c *cache) addRaid(r *types.Raid) string { - c.Lock() - defer c.Unlock() - - id := id(r) - c.arrays[id] = r - - return id -} - -func (c *cache) addFilesystem(f *types.Filesystem) string { - c.Lock() - defer c.Unlock() - - id := id(f) - c.filesystems[id] = f - - return id -} - -func (c *cache) addFile(f *types.File) string { - c.Lock() - defer c.Unlock() - - id := id(f) - c.files[id] = f - - return id -} - -func (c *cache) addSystemdUnit(u *types.SystemdUnit) string { - c.Lock() - defer c.Unlock() - - id := id(u) - c.systemdUnits[id] = u - - return id -} - -func (c *cache) addNetworkdUnit(u *types.NetworkdUnit) string { - c.Lock() - defer c.Unlock() - - id := id(u) - c.networkdUnits[id] = u - - return id -} - -func (c *cache) addUser(u *types.User) string { - c.Lock() - defer c.Unlock() - - id := id(u) - c.users[id] = u - - return id -} - -func (c *cache) addGroup(g *types.Group) string { - c.Lock() - defer c.Unlock() - - id := id(g) - c.groups[id] = g - - return id -} - -func id(input interface{}) string { - b, _ := json.Marshal(input) - return hash(string(b)) -} - -func hash(s string) string { - sha := sha256.Sum256([]byte(s)) - return hex.EncodeToString(sha[:]) -} - -func castSliceInterface(i []interface{}) []string { - var o []string - for _, value := range i { - o = append(o, value.(string)) - } - - return o -} - -func getUInt(d *schema.ResourceData, key string) *uint { - var uid *uint - if value, ok := d.GetOk(key); ok { - u := uint(value.(int)) - uid = &u - } - - return uid -} - -var errEmptyUnit = fmt.Errorf("invalid or empty unit content") - -func validateUnitContent(content string) error { - c := bytes.NewBufferString(content) - unit, err := unit.Deserialize(c) - if err != nil { - return fmt.Errorf("invalid unit content: %s", err) - } - - if len(unit) == 0 { - return errEmptyUnit - } - - return nil -} - -func buildURL(raw string) (types.Url, error) { - u, err := url.Parse(raw) - if err != nil { - return types.Url{}, err - } - - return types.Url(*u), nil -} - -func buildHash(raw string) (types.Hash, error) { - h := types.Hash{} - err := h.UnmarshalJSON([]byte(fmt.Sprintf("%q", raw))) - - return h, err -} diff --git a/builtin/providers/ignition/provider_test.go b/builtin/providers/ignition/provider_test.go deleted file mode 100644 index 6ee665bb5..000000000 --- a/builtin/providers/ignition/provider_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package ignition - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testProviders = map[string]terraform.ResourceProvider{ - "ignition": Provider(), -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestValidateUnit(t *testing.T) { - if err := validateUnitContent(""); err == nil { - t.Fatalf("error not found, expected error") - } - - if err := validateUnitContent("[foo]qux"); err == nil { - t.Fatalf("error not found, expected error") - } - - if err := validateUnitContent("[foo]\nqux=foo\nfoo"); err == nil { - t.Fatalf("error not found, expected error") - } -} diff --git a/builtin/providers/ignition/resource_ignition_config.go b/builtin/providers/ignition/resource_ignition_config.go deleted file mode 100644 index c75e50afd..000000000 --- a/builtin/providers/ignition/resource_ignition_config.go +++ /dev/null @@ -1,308 +0,0 @@ -package ignition - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/terraform/helper/schema" - - "github.com/coreos/ignition/config/types" -) - -var configReferenceResource = &schema.Resource{ - Schema: map[string]*schema.Schema{ - "source": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Required: true, - }, - "verification": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Optional: true, - }, - }, -} - -func resourceConfig() *schema.Resource { - return &schema.Resource{ - Exists: resourceIgnitionFileExists, - Read: resourceIgnitionFileRead, - Schema: map[string]*schema.Schema{ - "disks": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "arrays": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "filesystems": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "files": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "systemd": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "networkd": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "users": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "groups": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "replace": &schema.Schema{ - Type: schema.TypeList, - ForceNew: true, - Optional: true, - MaxItems: 1, - Elem: configReferenceResource, - }, - "append": &schema.Schema{ - Type: schema.TypeList, - ForceNew: true, - Optional: true, - Elem: configReferenceResource, - }, - "rendered": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceIgnitionFileRead(d *schema.ResourceData, meta interface{}) error { - rendered, err := renderConfig(d, globalCache) - if err != nil { - return err - } - - if err := d.Set("rendered", rendered); err != nil { - return err - } - - d.SetId(hash(rendered)) - return nil -} - -func resourceIgnitionFileExists(d *schema.ResourceData, meta interface{}) (bool, error) { - rendered, err := renderConfig(d, globalCache) - if err != nil { - return false, err - } - - return hash(rendered) == d.Id(), nil -} - -func renderConfig(d *schema.ResourceData, c *cache) (string, error) { - i, err := buildConfig(d, c) - if err != nil { - return "", err - } - - bytes, err := json.MarshalIndent(i, " ", " ") - - if err != nil { - return "", err - } - - return string(bytes), nil -} - -func buildConfig(d *schema.ResourceData, c *cache) (*types.Config, error) { - var err error - config := &types.Config{} - config.Ignition, err = buildIgnition(d) - if err != nil { - return nil, err - } - - config.Storage, err = buildStorage(d, c) - if err != nil { - return nil, err - } - - config.Systemd, err = buildSystemd(d, c) - if err != nil { - return nil, err - } - - config.Networkd, err = buildNetworkd(d, c) - if err != nil { - return nil, err - } - - config.Passwd, err = buildPasswd(d, c) - if err != nil { - return nil, err - } - - return config, nil -} - -func buildIgnition(d *schema.ResourceData) (types.Ignition, error) { - var err error - - i := types.Ignition{} - i.Version.UnmarshalJSON([]byte(`"2.0.0"`)) - - rr := d.Get("replace.0").(map[string]interface{}) - if len(rr) != 0 { - i.Config.Replace, err = buildConfigReference(rr) - if err != nil { - return i, err - } - } - - ar := d.Get("append").([]interface{}) - if len(ar) != 0 { - for _, rr := range ar { - r, err := buildConfigReference(rr.(map[string]interface{})) - if err != nil { - return i, err - } - - i.Config.Append = append(i.Config.Append, *r) - } - } - - return i, nil -} - -func buildConfigReference(raw map[string]interface{}) (*types.ConfigReference, error) { - r := &types.ConfigReference{} - - src, err := buildURL(raw["source"].(string)) - if err != nil { - return nil, err - } - - r.Source = src - - hash, err := buildHash(raw["verification"].(string)) - if err != nil { - return nil, err - } - - r.Verification.Hash = &hash - - return r, nil -} - -func buildStorage(d *schema.ResourceData, c *cache) (types.Storage, error) { - storage := types.Storage{} - - for _, id := range d.Get("disks").([]interface{}) { - d, ok := c.disks[id.(string)] - if !ok { - return storage, fmt.Errorf("invalid disk %q, unknown disk id", id) - } - - storage.Disks = append(storage.Disks, *d) - } - - for _, id := range d.Get("arrays").([]interface{}) { - a, ok := c.arrays[id.(string)] - if !ok { - return storage, fmt.Errorf("invalid raid %q, unknown raid id", id) - } - - storage.Arrays = append(storage.Arrays, *a) - } - - for _, id := range d.Get("filesystems").([]interface{}) { - f, ok := c.filesystems[id.(string)] - if !ok { - return storage, fmt.Errorf("invalid filesystem %q, unknown filesystem id", id) - } - - storage.Filesystems = append(storage.Filesystems, *f) - } - - for _, id := range d.Get("files").([]interface{}) { - f, ok := c.files[id.(string)] - if !ok { - return storage, fmt.Errorf("invalid file %q, unknown file id", id) - } - - storage.Files = append(storage.Files, *f) - } - - return storage, nil - -} - -func buildSystemd(d *schema.ResourceData, c *cache) (types.Systemd, error) { - systemd := types.Systemd{} - - for _, id := range d.Get("systemd").([]interface{}) { - u, ok := c.systemdUnits[id.(string)] - if !ok { - return systemd, fmt.Errorf("invalid systemd unit %q, unknown systemd unit id", id) - } - - systemd.Units = append(systemd.Units, *u) - } - - return systemd, nil - -} - -func buildNetworkd(d *schema.ResourceData, c *cache) (types.Networkd, error) { - networkd := types.Networkd{} - - for _, id := range d.Get("networkd").([]interface{}) { - u, ok := c.networkdUnits[id.(string)] - if !ok { - return networkd, fmt.Errorf("invalid networkd unit %q, unknown networkd unit id", id) - } - - networkd.Units = append(networkd.Units, *u) - } - - return networkd, nil -} - -func buildPasswd(d *schema.ResourceData, c *cache) (types.Passwd, error) { - passwd := types.Passwd{} - - for _, id := range d.Get("users").([]interface{}) { - u, ok := c.users[id.(string)] - if !ok { - return passwd, fmt.Errorf("invalid user %q, unknown user id", id) - } - - passwd.Users = append(passwd.Users, *u) - } - - for _, id := range d.Get("groups").([]interface{}) { - g, ok := c.groups[id.(string)] - if !ok { - return passwd, fmt.Errorf("invalid group %q, unknown group id", id) - } - - passwd.Groups = append(passwd.Groups, *g) - } - - return passwd, nil - -} diff --git a/builtin/providers/ignition/resource_ignition_config_test.go b/builtin/providers/ignition/resource_ignition_config_test.go deleted file mode 100644 index 22ea3daa0..000000000 --- a/builtin/providers/ignition/resource_ignition_config_test.go +++ /dev/null @@ -1,116 +0,0 @@ -package ignition - -import ( - "encoding/json" - "fmt" - "regexp" - "testing" - - "github.com/coreos/ignition/config/types" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestIngnitionFileReplace(t *testing.T) { - testIgnition(t, ` - data "ignition_config" "test" { - replace { - source = "foo" - verification = "sha512-0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" - } - } - `, func(c *types.Config) error { - r := c.Ignition.Config.Replace - if r == nil { - return fmt.Errorf("unable to find replace config") - } - - if r.Source.String() != "foo" { - return fmt.Errorf("config.replace.source, found %q", r.Source) - } - - if r.Verification.Hash.Sum != "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" { - return fmt.Errorf("config.replace.verification, found %q", r.Verification.Hash) - } - - return nil - }) -} - -func TestIngnitionFileAppend(t *testing.T) { - testIgnition(t, ` - data "ignition_config" "test" { - append { - source = "foo" - verification = "sha512-0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" - } - - append { - source = "foo" - verification = "sha512-0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" - } - } - `, func(c *types.Config) error { - a := c.Ignition.Config.Append - if len(a) != 2 { - return fmt.Errorf("unable to find append config, expected 2") - } - - if a[0].Source.String() != "foo" { - return fmt.Errorf("config.replace.source, found %q", a[0].Source) - } - - if a[0].Verification.Hash.Sum != "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" { - return fmt.Errorf("config.replace.verification, found %q", a[0].Verification.Hash) - } - - return nil - }) -} - -func testIgnitionError(t *testing.T, input string, expectedErr *regexp.Regexp) { - resource.Test(t, resource.TestCase{ - IsUnitTest: true, - Providers: testProviders, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testTemplate, input), - ExpectError: expectedErr, - }, - }, - }) -} - -func testIgnition(t *testing.T, input string, assert func(*types.Config) error) { - check := func(s *terraform.State) error { - got := s.RootModule().Outputs["rendered"].Value.(string) - - c := &types.Config{} - err := json.Unmarshal([]byte(got), c) - if err != nil { - return err - } - - return assert(c) - } - - resource.Test(t, resource.TestCase{ - IsUnitTest: true, - Providers: testProviders, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf(testTemplate, input), - Check: check, - }, - }, - }) -} - -var testTemplate = ` -%s - -output "rendered" { - value = "${data.ignition_config.test.rendered}" -} - -` diff --git a/builtin/providers/ignition/resource_ignition_disk.go b/builtin/providers/ignition/resource_ignition_disk.go deleted file mode 100644 index 8ef6c7e05..000000000 --- a/builtin/providers/ignition/resource_ignition_disk.go +++ /dev/null @@ -1,99 +0,0 @@ -package ignition - -import ( - "github.com/coreos/ignition/config/types" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceDisk() *schema.Resource { - return &schema.Resource{ - Exists: resourceDiskExists, - Read: resourceDiskRead, - Schema: map[string]*schema.Schema{ - "device": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "wipe_table": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - "partition": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "label": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "number": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - "size": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - "start": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - "type_guid": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - }, - }, - }, - } -} - -func resourceDiskRead(d *schema.ResourceData, meta interface{}) error { - id, err := buildDisk(d, globalCache) - if err != nil { - return err - } - - d.SetId(id) - return nil -} - -func resourceDiskExists(d *schema.ResourceData, meta interface{}) (bool, error) { - id, err := buildDisk(d, globalCache) - if err != nil { - return false, err - } - - return id == d.Id(), nil -} - -func buildDisk(d *schema.ResourceData, c *cache) (string, error) { - var partitions []types.Partition - for _, raw := range d.Get("partition").([]interface{}) { - v := raw.(map[string]interface{}) - - partitions = append(partitions, types.Partition{ - Label: types.PartitionLabel(v["label"].(string)), - Number: v["number"].(int), - Size: types.PartitionDimension(v["size"].(int)), - Start: types.PartitionDimension(v["start"].(int)), - TypeGUID: types.PartitionTypeGUID(v["type_guid"].(string)), - }) - } - - return c.addDisk(&types.Disk{ - Device: types.Path(d.Get("device").(string)), - WipeTable: d.Get("wipe_table").(bool), - Partitions: partitions, - }), nil -} diff --git a/builtin/providers/ignition/resource_ignition_disk_test.go b/builtin/providers/ignition/resource_ignition_disk_test.go deleted file mode 100644 index 847fca225..000000000 --- a/builtin/providers/ignition/resource_ignition_disk_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package ignition - -import ( - "fmt" - "testing" - - "github.com/coreos/ignition/config/types" -) - -func TestIngnitionDisk(t *testing.T) { - testIgnition(t, ` - data "ignition_disk" "foo" { - device = "/foo" - partition { - label = "qux" - size = 42 - start = 2048 - type_guid = "01234567-89AB-CDEF-EDCB-A98765432101" - } - } - - data "ignition_config" "test" { - disks = [ - "${data.ignition_disk.foo.id}", - ] - } - `, func(c *types.Config) error { - if len(c.Storage.Disks) != 1 { - return fmt.Errorf("disks, found %d", len(c.Storage.Disks)) - } - - d := c.Storage.Disks[0] - if d.Device != "/foo" { - return fmt.Errorf("name, found %q", d.Device) - } - - if len(d.Partitions) != 1 { - return fmt.Errorf("parition, found %d", len(d.Partitions)) - } - - p := d.Partitions[0] - if p.Label != "qux" { - return fmt.Errorf("parition.0.label, found %q", p.Label) - } - - if p.Size != 42 { - return fmt.Errorf("parition.0.size, found %q", p.Size) - } - - if p.Start != 2048 { - return fmt.Errorf("parition.0.start, found %q", p.Start) - } - - if p.TypeGUID != "01234567-89AB-CDEF-EDCB-A98765432101" { - return fmt.Errorf("parition.0.type_guid, found %q", p.TypeGUID) - } - - return nil - }) -} - -func TestIngnitionDiskResource(t *testing.T) { - testIgnition(t, ` - resource "ignition_disk" "foo" { - device = "/foo" - partition { - label = "qux" - } - } - - data "ignition_config" "test" { - disks = [ - "${ignition_disk.foo.id}", - ] - } - `, func(c *types.Config) error { - if len(c.Storage.Disks) != 1 { - return fmt.Errorf("disks, found %d", len(c.Storage.Disks)) - } - - return nil - }) -} diff --git a/builtin/providers/ignition/resource_ignition_file.go b/builtin/providers/ignition/resource_ignition_file.go deleted file mode 100644 index 0f73ea6ed..000000000 --- a/builtin/providers/ignition/resource_ignition_file.go +++ /dev/null @@ -1,178 +0,0 @@ -package ignition - -import ( - "encoding/base64" - "fmt" - - "github.com/coreos/ignition/config/types" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceFile() *schema.Resource { - return &schema.Resource{ - Exists: resourceFileExists, - Read: resourceFileRead, - Schema: map[string]*schema.Schema{ - "filesystem": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "path": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "content": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "mime": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "text/plain", - }, - - "content": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - }, - }, - "source": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "source": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "compression": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "verification": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - }, - }, - "mode": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - "uid": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - "gid": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceFileRead(d *schema.ResourceData, meta interface{}) error { - id, err := buildFile(d, globalCache) - if err != nil { - return err - } - - d.SetId(id) - return nil -} - -func resourceFileExists(d *schema.ResourceData, meta interface{}) (bool, error) { - id, err := buildFile(d, globalCache) - if err != nil { - return false, err - } - - return id == d.Id(), nil -} - -func buildFile(d *schema.ResourceData, c *cache) (string, error) { - _, hasContent := d.GetOk("content") - _, hasSource := d.GetOk("source") - if hasContent && hasSource { - return "", fmt.Errorf("content and source options are incompatible") - } - - if !hasContent && !hasSource { - return "", fmt.Errorf("content or source options must be present") - } - - var compression types.Compression - var source types.Url - var hash *types.Hash - var err error - - if hasContent { - source, err = encodeDataURL( - d.Get("content.0.mime").(string), - d.Get("content.0.content").(string), - ) - - if err != nil { - return "", err - } - } - - if hasSource { - source, err = buildURL(d.Get("source.0.source").(string)) - if err != nil { - return "", err - } - - compression = types.Compression(d.Get("source.0.compression").(string)) - h, err := buildHash(d.Get("source.0.verification").(string)) - if err != nil { - return "", err - } - - hash = &h - } - - return c.addFile(&types.File{ - Filesystem: d.Get("filesystem").(string), - Path: types.Path(d.Get("path").(string)), - Contents: types.FileContents{ - Compression: compression, - Source: source, - Verification: types.Verification{ - Hash: hash, - }, - }, - User: types.FileUser{ - Id: d.Get("uid").(int), - }, - Group: types.FileGroup{ - Id: d.Get("gid").(int), - }, - Mode: types.FileMode(d.Get("mode").(int)), - }), nil -} - -func encodeDataURL(mime, content string) (types.Url, error) { - base64 := base64.StdEncoding.EncodeToString([]byte(content)) - return buildURL( - fmt.Sprintf("data:%s;charset=utf-8;base64,%s", mime, base64), - ) -} diff --git a/builtin/providers/ignition/resource_ignition_file_test.go b/builtin/providers/ignition/resource_ignition_file_test.go deleted file mode 100644 index 811c99e65..000000000 --- a/builtin/providers/ignition/resource_ignition_file_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package ignition - -import ( - "fmt" - "testing" - - "github.com/coreos/ignition/config/types" -) - -func TestIngnitionFile(t *testing.T) { - testIgnition(t, ` - data "ignition_file" "foo" { - filesystem = "foo" - path = "/foo" - content { - content = "foo" - } - mode = 420 - uid = 42 - gid = 84 - } - - data "ignition_file" "qux" { - filesystem = "qux" - path = "/qux" - source { - source = "qux" - compression = "gzip" - verification = "sha512-0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" - } - } - - data "ignition_config" "test" { - files = [ - "${data.ignition_file.foo.id}", - "${data.ignition_file.qux.id}", - ] - } - `, func(c *types.Config) error { - if len(c.Storage.Files) != 2 { - return fmt.Errorf("arrays, found %d", len(c.Storage.Arrays)) - } - - f := c.Storage.Files[0] - if f.Filesystem != "foo" { - return fmt.Errorf("filesystem, found %q", f.Filesystem) - } - - if f.Path != "/foo" { - return fmt.Errorf("path, found %q", f.Path) - } - - if f.Contents.Source.String() != "data:text/plain;charset=utf-8;base64,Zm9v" { - return fmt.Errorf("contents.source, found %q", f.Contents.Source) - } - - if f.Mode != types.FileMode(420) { - return fmt.Errorf("mode, found %q", f.Mode) - } - - if f.User.Id != 42 { - return fmt.Errorf("uid, found %q", f.User.Id) - } - - if f.Group.Id != 84 { - return fmt.Errorf("gid, found %q", f.Group.Id) - } - - f = c.Storage.Files[1] - if f.Filesystem != "qux" { - return fmt.Errorf("filesystem, found %q", f.Filesystem) - } - - if f.Path != "/qux" { - return fmt.Errorf("path, found %q", f.Path) - } - - if f.Contents.Source.String() != "qux" { - return fmt.Errorf("contents.source, found %q", f.Contents.Source) - } - - if f.Contents.Compression != "gzip" { - return fmt.Errorf("contents.compression, found %q", f.Contents.Compression) - } - - if f.Contents.Verification.Hash.Sum != "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" { - return fmt.Errorf("config.replace.verification, found %q", f.Contents.Verification.Hash) - } - - return nil - }) -} diff --git a/builtin/providers/ignition/resource_ignition_filesystem.go b/builtin/providers/ignition/resource_ignition_filesystem.go deleted file mode 100644 index ce858e80c..000000000 --- a/builtin/providers/ignition/resource_ignition_filesystem.go +++ /dev/null @@ -1,122 +0,0 @@ -package ignition - -import ( - "fmt" - - "github.com/coreos/ignition/config/types" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceFilesystem() *schema.Resource { - return &schema.Resource{ - Exists: resourceFilesystemExists, - Read: resourceFilesystemRead, - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "mount": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "device": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "format": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "create": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - "force": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - "options": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - "path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceFilesystemRead(d *schema.ResourceData, meta interface{}) error { - id, err := buildFilesystem(d, globalCache) - if err != nil { - return err - } - - d.SetId(id) - return nil -} - -func resourceFilesystemExists(d *schema.ResourceData, meta interface{}) (bool, error) { - id, err := buildFilesystem(d, globalCache) - if err != nil { - return false, err - } - - return id == d.Id(), nil -} - -func buildFilesystem(d *schema.ResourceData, c *cache) (string, error) { - var mount *types.FilesystemMount - if _, ok := d.GetOk("mount"); ok { - mount = &types.FilesystemMount{ - Device: types.Path(d.Get("mount.0.device").(string)), - Format: types.FilesystemFormat(d.Get("mount.0.format").(string)), - } - - create, hasCreate := d.GetOk("mount.0.create") - force, hasForce := d.GetOk("mount.0.force") - options, hasOptions := d.GetOk("mount.0.options") - if hasCreate || hasOptions || hasForce { - mount.Create = &types.FilesystemCreate{ - Force: force.(bool), - Options: castSliceInterface(options.([]interface{})), - } - } - - if !create.(bool) && (hasForce || hasOptions) { - return "", fmt.Errorf("create should be true when force or options is used") - } - } - - var path *types.Path - if p, ok := d.GetOk("path"); ok { - tp := types.Path(p.(string)) - path = &tp - } - - if mount != nil && path != nil { - return "", fmt.Errorf("mount and path are mutually exclusive") - } - - return c.addFilesystem(&types.Filesystem{ - Name: d.Get("name").(string), - Mount: mount, - Path: path, - }), nil -} diff --git a/builtin/providers/ignition/resource_ignition_filesystem_test.go b/builtin/providers/ignition/resource_ignition_filesystem_test.go deleted file mode 100644 index cfb698554..000000000 --- a/builtin/providers/ignition/resource_ignition_filesystem_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package ignition - -import ( - "fmt" - "regexp" - "testing" - - "github.com/coreos/ignition/config/types" -) - -func TestIngnitionFilesystem(t *testing.T) { - testIgnition(t, ` - data "ignition_filesystem" "foo" { - name = "foo" - path = "/foo" - } - - data "ignition_filesystem" "qux" { - name = "qux" - mount { - device = "/qux" - format = "ext4" - } - } - - data "ignition_filesystem" "baz" { - name = "baz" - mount { - device = "/baz" - format = "ext4" - create = true - } - } - - data "ignition_filesystem" "bar" { - name = "bar" - mount { - device = "/bar" - format = "ext4" - create = true - force = true - options = ["rw"] - } - } - - data "ignition_config" "test" { - filesystems = [ - "${data.ignition_filesystem.foo.id}", - "${data.ignition_filesystem.qux.id}", - "${data.ignition_filesystem.baz.id}", - "${data.ignition_filesystem.bar.id}", - ] - } - `, func(c *types.Config) error { - if len(c.Storage.Filesystems) != 4 { - return fmt.Errorf("disks, found %d", len(c.Storage.Filesystems)) - } - - f := c.Storage.Filesystems[0] - if f.Name != "foo" { - return fmt.Errorf("name, found %q", f.Name) - } - - if f.Mount != nil { - return fmt.Errorf("mount, found %q", f.Mount.Device) - } - - if string(*f.Path) != "/foo" { - return fmt.Errorf("path, found %q", f.Path) - } - - f = c.Storage.Filesystems[1] - if f.Name != "qux" { - return fmt.Errorf("name, found %q", f.Name) - } - - if f.Mount.Device != "/qux" { - return fmt.Errorf("mount.0.device, found %q", f.Mount.Device) - } - - if f.Mount.Format != "ext4" { - return fmt.Errorf("mount.0.format, found %q", f.Mount.Format) - } - - if f.Mount.Create != nil { - return fmt.Errorf("mount, create was found %#v", f.Mount.Create) - } - - f = c.Storage.Filesystems[2] - if f.Name != "baz" { - return fmt.Errorf("name, found %q", f.Name) - } - - if f.Mount.Device != "/baz" { - return fmt.Errorf("mount.0.device, found %q", f.Mount.Device) - } - - if f.Mount.Format != "ext4" { - return fmt.Errorf("mount.0.format, found %q", f.Mount.Format) - } - - if f.Mount.Create.Force != false { - return fmt.Errorf("mount.0.force, found %t", f.Mount.Create.Force) - } - - f = c.Storage.Filesystems[3] - if f.Name != "bar" { - return fmt.Errorf("name, found %q", f.Name) - } - - if f.Mount.Device != "/bar" { - return fmt.Errorf("mount.0.device, found %q", f.Mount.Device) - } - - if f.Mount.Format != "ext4" { - return fmt.Errorf("mount.0.format, found %q", f.Mount.Format) - } - - if f.Mount.Create.Force != true { - return fmt.Errorf("mount.0.force, found %t", f.Mount.Create.Force) - } - - if len(f.Mount.Create.Options) != 1 || f.Mount.Create.Options[0] != "rw" { - return fmt.Errorf("mount.0.options, found %q", f.Mount.Create.Options) - } - - return nil - }) -} - -func TestIngnitionFilesystemMissingCreate(t *testing.T) { - testIgnitionError(t, ` - data "ignition_filesystem" "bar" { - name = "bar" - mount { - device = "/bar" - format = "ext4" - force = true - } - } - - data "ignition_config" "test" { - filesystems = [ - "${data.ignition_filesystem.bar.id}", - ] - } - `, regexp.MustCompile("create should be true when force or options is used")) -} diff --git a/builtin/providers/ignition/resource_ignition_group.go b/builtin/providers/ignition/resource_ignition_group.go deleted file mode 100644 index 125e97e73..000000000 --- a/builtin/providers/ignition/resource_ignition_group.go +++ /dev/null @@ -1,57 +0,0 @@ -package ignition - -import ( - "github.com/coreos/ignition/config/types" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceGroup() *schema.Resource { - return &schema.Resource{ - Exists: resourceGroupExists, - Read: resourceGroupRead, - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "gid": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - "password_hash": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceGroupRead(d *schema.ResourceData, meta interface{}) error { - id, err := buildGroup(d, globalCache) - if err != nil { - return err - } - - d.SetId(id) - return nil -} - -func resourceGroupExists(d *schema.ResourceData, meta interface{}) (bool, error) { - id, err := buildGroup(d, globalCache) - if err != nil { - return false, err - } - - return id == d.Id(), nil -} - -func buildGroup(d *schema.ResourceData, c *cache) (string, error) { - return c.addGroup(&types.Group{ - Name: d.Get("name").(string), - PasswordHash: d.Get("password_hash").(string), - Gid: getUInt(d, "gid"), - }), nil -} diff --git a/builtin/providers/ignition/resource_ignition_group_test.go b/builtin/providers/ignition/resource_ignition_group_test.go deleted file mode 100644 index 2eb1abc6b..000000000 --- a/builtin/providers/ignition/resource_ignition_group_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package ignition - -import ( - "fmt" - "testing" - - "github.com/coreos/ignition/config/types" -) - -func TestIngnitionGroup(t *testing.T) { - testIgnition(t, ` - data "ignition_group" "foo" { - name = "foo" - password_hash = "password" - gid = 42 - } - - data "ignition_group" "qux" { - name = "qux" - } - - data "ignition_config" "test" { - groups = [ - "${data.ignition_group.foo.id}", - "${data.ignition_group.qux.id}", - ] - } - `, func(c *types.Config) error { - if len(c.Passwd.Groups) != 2 { - return fmt.Errorf("groups, found %d", len(c.Passwd.Groups)) - } - - g := c.Passwd.Groups[0] - - if g.Name != "foo" { - return fmt.Errorf("name, found %q", g.Name) - } - - if g.PasswordHash != "password" { - return fmt.Errorf("password_hash, found %q", g.PasswordHash) - } - - if g.Gid == nil || *g.Gid != uint(42) { - return fmt.Errorf("gid, found %q", *g.Gid) - } - - g = c.Passwd.Groups[1] - - if g.Name != "qux" { - return fmt.Errorf("name, found %q", g.Name) - } - - if g.Gid != nil { - return fmt.Errorf("uid, found %d", *g.Gid) - } - - return nil - }) -} diff --git a/builtin/providers/ignition/resource_ignition_networkd_unit.go b/builtin/providers/ignition/resource_ignition_networkd_unit.go deleted file mode 100644 index 9fd40ed51..000000000 --- a/builtin/providers/ignition/resource_ignition_networkd_unit.go +++ /dev/null @@ -1,60 +0,0 @@ -package ignition - -import ( - "github.com/coreos/ignition/config/types" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceNetworkdUnit() *schema.Resource { - return &schema.Resource{ - Exists: resourceNetworkdUnitExists, - Read: resourceNetworkdUnitRead, - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "content": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceNetworkdUnitRead(d *schema.ResourceData, meta interface{}) error { - id, err := buildNetworkdUnit(d, globalCache) - if err != nil { - return err - } - - d.SetId(id) - return nil -} - -func resourceNetworkdUnitDelete(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} - -func resourceNetworkdUnitExists(d *schema.ResourceData, meta interface{}) (bool, error) { - id, err := buildNetworkdUnit(d, globalCache) - if err != nil { - return false, err - } - - return id == d.Id(), nil -} - -func buildNetworkdUnit(d *schema.ResourceData, c *cache) (string, error) { - if err := validateUnitContent(d.Get("content").(string)); err != nil { - return "", err - } - - return c.addNetworkdUnit(&types.NetworkdUnit{ - Name: types.NetworkdUnitName(d.Get("name").(string)), - Contents: d.Get("content").(string), - }), nil -} diff --git a/builtin/providers/ignition/resource_ignition_networkd_unit_test.go b/builtin/providers/ignition/resource_ignition_networkd_unit_test.go deleted file mode 100644 index f6f460f86..000000000 --- a/builtin/providers/ignition/resource_ignition_networkd_unit_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package ignition - -import ( - "fmt" - "testing" - - "github.com/coreos/ignition/config/types" -) - -func TestIngnitionNetworkdUnit(t *testing.T) { - testIgnition(t, ` - data "ignition_networkd_unit" "foo" { - name = "foo.link" - content = "[Match]\nName=eth0\n\n[Network]\nAddress=10.0.1.7\n" - } - - data "ignition_config" "test" { - networkd = [ - "${data.ignition_networkd_unit.foo.id}", - ] - } - `, func(c *types.Config) error { - if len(c.Networkd.Units) != 1 { - return fmt.Errorf("networkd, found %d", len(c.Networkd.Units)) - } - - u := c.Networkd.Units[0] - - if u.Name != "foo.link" { - return fmt.Errorf("name, found %q", u.Name) - } - - if u.Contents != "[Match]\nName=eth0\n\n[Network]\nAddress=10.0.1.7\n" { - return fmt.Errorf("content, found %q", u.Contents) - } - - return nil - }) -} diff --git a/builtin/providers/ignition/resource_ignition_raid.go b/builtin/providers/ignition/resource_ignition_raid.go deleted file mode 100644 index dab1a5f7c..000000000 --- a/builtin/providers/ignition/resource_ignition_raid.go +++ /dev/null @@ -1,69 +0,0 @@ -package ignition - -import ( - "github.com/coreos/ignition/config/types" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceRaid() *schema.Resource { - return &schema.Resource{ - Exists: resourceRaidExists, - Read: resourceRaidRead, - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "level": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "devices": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "spares": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceRaidRead(d *schema.ResourceData, meta interface{}) error { - id, err := buildRaid(d, globalCache) - if err != nil { - return err - } - - d.SetId(id) - return nil -} - -func resourceRaidExists(d *schema.ResourceData, meta interface{}) (bool, error) { - id, err := buildRaid(d, globalCache) - if err != nil { - return false, err - } - - return id == d.Id(), nil -} - -func buildRaid(d *schema.ResourceData, c *cache) (string, error) { - var devices []types.Path - for _, value := range d.Get("devices").([]interface{}) { - devices = append(devices, types.Path(value.(string))) - } - - return c.addRaid(&types.Raid{ - Name: d.Get("name").(string), - Level: d.Get("level").(string), - Devices: devices, - Spares: d.Get("spares").(int), - }), nil -} diff --git a/builtin/providers/ignition/resource_ignition_raid_test.go b/builtin/providers/ignition/resource_ignition_raid_test.go deleted file mode 100644 index 67ff86a3e..000000000 --- a/builtin/providers/ignition/resource_ignition_raid_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package ignition - -import ( - "fmt" - "testing" - - "github.com/coreos/ignition/config/types" -) - -func TestIngnitionRaid(t *testing.T) { - testIgnition(t, ` - data "ignition_raid" "foo" { - name = "foo" - level = "raid10" - devices = ["/foo"] - spares = 42 - } - - data "ignition_config" "test" { - arrays = [ - "${data.ignition_raid.foo.id}", - ] - } - `, func(c *types.Config) error { - if len(c.Storage.Arrays) != 1 { - return fmt.Errorf("arrays, found %d", len(c.Storage.Arrays)) - } - - a := c.Storage.Arrays[0] - if a.Name != "foo" { - return fmt.Errorf("name, found %q", a.Name) - } - - if len(a.Devices) != 1 || a.Devices[0] != "/foo" { - return fmt.Errorf("devices, found %v", a.Devices) - } - - if a.Level != "raid10" { - return fmt.Errorf("level, found %q", a.Level) - } - - if a.Spares != 42 { - return fmt.Errorf("spares, found %q", a.Spares) - } - - return nil - }) -} diff --git a/builtin/providers/ignition/resource_ignition_systemd_unit.go b/builtin/providers/ignition/resource_ignition_systemd_unit.go deleted file mode 100644 index 88fe9b206..000000000 --- a/builtin/providers/ignition/resource_ignition_systemd_unit.go +++ /dev/null @@ -1,104 +0,0 @@ -package ignition - -import ( - "github.com/coreos/ignition/config/types" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceSystemdUnit() *schema.Resource { - return &schema.Resource{ - Exists: resourceSystemdUnitExists, - Read: resourceSystemdUnitRead, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "enable": { - Type: schema.TypeBool, - Optional: true, - Default: true, - ForceNew: true, - }, - "mask": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - "content": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "dropin": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "content": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - }, - }, - }, - } -} - -func resourceSystemdUnitRead(d *schema.ResourceData, meta interface{}) error { - id, err := buildSystemdUnit(d, globalCache) - if err != nil { - return err - } - - d.SetId(id) - return nil -} - -func resourceSystemdUnitExists(d *schema.ResourceData, meta interface{}) (bool, error) { - id, err := buildSystemdUnit(d, globalCache) - if err != nil { - return false, err - } - - return id == d.Id(), nil -} - -func buildSystemdUnit(d *schema.ResourceData, c *cache) (string, error) { - var dropins []types.SystemdUnitDropIn - for _, raw := range d.Get("dropin").([]interface{}) { - value := raw.(map[string]interface{}) - - if err := validateUnitContent(value["content"].(string)); err != nil { - return "", err - } - - dropins = append(dropins, types.SystemdUnitDropIn{ - Name: types.SystemdUnitDropInName(value["name"].(string)), - Contents: value["content"].(string), - }) - } - - if err := validateUnitContent(d.Get("content").(string)); err != nil { - if err != errEmptyUnit { - return "", err - } - } - - return c.addSystemdUnit(&types.SystemdUnit{ - Name: types.SystemdUnitName(d.Get("name").(string)), - Contents: d.Get("content").(string), - Enable: d.Get("enable").(bool), - Mask: d.Get("mask").(bool), - DropIns: dropins, - }), nil -} diff --git a/builtin/providers/ignition/resource_ignition_systemd_unit_test.go b/builtin/providers/ignition/resource_ignition_systemd_unit_test.go deleted file mode 100644 index c1568ac11..000000000 --- a/builtin/providers/ignition/resource_ignition_systemd_unit_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package ignition - -import ( - "fmt" - "testing" - - "github.com/coreos/ignition/config/types" -) - -func TestIngnitionSystemdUnit(t *testing.T) { - testIgnition(t, ` - data "ignition_systemd_unit" "foo" { - name = "foo.service" - content = "[Match]\nName=eth0\n\n[Network]\nAddress=10.0.1.7\n" - enable = false - mask = true - - dropin { - name = "foo.conf" - content = "[Match]\nName=eth0\n\n[Network]\nAddress=10.0.1.7\n" - } - } - - data "ignition_config" "test" { - systemd = [ - "${data.ignition_systemd_unit.foo.id}", - ] - } - `, func(c *types.Config) error { - if len(c.Systemd.Units) != 1 { - return fmt.Errorf("systemd, found %d", len(c.Systemd.Units)) - } - - u := c.Systemd.Units[0] - - if u.Name != "foo.service" { - return fmt.Errorf("name, found %q", u.Name) - } - - if u.Contents != "[Match]\nName=eth0\n\n[Network]\nAddress=10.0.1.7\n" { - return fmt.Errorf("content, found %q", u.Contents) - } - - if u.Mask != true { - return fmt.Errorf("mask, found %t", u.Mask) - } - - if u.Enable != false { - return fmt.Errorf("enable, found %t", u.Enable) - } - - if len(u.DropIns) != 1 { - return fmt.Errorf("dropins, found %q", u.DropIns) - } - - return nil - }) -} - -func TestIngnitionSystemdUnitEmptyContentWithDropIn(t *testing.T) { - testIgnition(t, ` - data "ignition_systemd_unit" "foo" { - name = "foo.service" - dropin { - name = "foo.conf" - content = "[Match]\nName=eth0\n\n[Network]\nAddress=10.0.1.7\n" - } - } - - data "ignition_config" "test" { - systemd = [ - "${data.ignition_systemd_unit.foo.id}", - ] - } - `, func(c *types.Config) error { - if len(c.Systemd.Units) != 1 { - return fmt.Errorf("systemd, found %d", len(c.Systemd.Units)) - } - - u := c.Systemd.Units[0] - - if u.Name != "foo.service" { - return fmt.Errorf("name, found %q", u.Name) - } - - if u.Contents != "" { - return fmt.Errorf("content, found %q", u.Contents) - } - - if len(u.DropIns) != 1 { - return fmt.Errorf("dropins, found %q", u.DropIns) - } - - return nil - }) -} - -// #11325 -func TestIgnitionSystemdUnit_emptyContent(t *testing.T) { - testIgnition(t, ` - data "ignition_systemd_unit" "foo" { - name = "foo.service" - enable = true - } - - data "ignition_config" "test" { - systemd = [ - "${data.ignition_systemd_unit.foo.id}", - ] - } - `, func(c *types.Config) error { - if len(c.Systemd.Units) != 1 { - return fmt.Errorf("systemd, found %d", len(c.Systemd.Units)) - } - - u := c.Systemd.Units[0] - if u.Name != "foo.service" { - return fmt.Errorf("name, expected 'foo.service', found %q", u.Name) - } - if u.Contents != "" { - return fmt.Errorf("expected empty content, found %q", u.Contents) - } - if len(u.DropIns) != 0 { - return fmt.Errorf("expected 0 dropins, found %q", u.DropIns) - } - return nil - }) -} diff --git a/builtin/providers/ignition/resource_ignition_user.go b/builtin/providers/ignition/resource_ignition_user.go deleted file mode 100644 index 183e6c8c1..000000000 --- a/builtin/providers/ignition/resource_ignition_user.go +++ /dev/null @@ -1,126 +0,0 @@ -package ignition - -import ( - "reflect" - - "github.com/coreos/ignition/config/types" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceUser() *schema.Resource { - return &schema.Resource{ - Exists: resourceUserExists, - Read: resourceUserRead, - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "password_hash": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "ssh_authorized_keys": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "uid": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - "gecos": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "home_dir": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "no_create_home": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - "primary_group": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "groups": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "no_user_group": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - "no_log_init": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - "shell": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceUserRead(d *schema.ResourceData, meta interface{}) error { - id, err := buildUser(d, globalCache) - if err != nil { - return err - } - - d.SetId(id) - return nil -} - -func resourceUserExists(d *schema.ResourceData, meta interface{}) (bool, error) { - id, err := buildUser(d, globalCache) - if err != nil { - return false, err - } - - return id == d.Id(), nil -} - -func buildUser(d *schema.ResourceData, c *cache) (string, error) { - uc := types.UserCreate{ - Uid: getUInt(d, "uid"), - GECOS: d.Get("gecos").(string), - Homedir: d.Get("home_dir").(string), - NoCreateHome: d.Get("no_create_home").(bool), - PrimaryGroup: d.Get("primary_group").(string), - Groups: castSliceInterface(d.Get("groups").([]interface{})), - NoUserGroup: d.Get("no_user_group").(bool), - NoLogInit: d.Get("no_log_init").(bool), - Shell: d.Get("shell").(string), - } - - puc := &uc - if reflect.DeepEqual(uc, types.UserCreate{}) { // check if the struct is empty - puc = nil - } - - user := types.User{ - Name: d.Get("name").(string), - PasswordHash: d.Get("password_hash").(string), - SSHAuthorizedKeys: castSliceInterface(d.Get("ssh_authorized_keys").([]interface{})), - Create: puc, - } - - return c.addUser(&user), nil -} diff --git a/builtin/providers/ignition/resource_ignition_user_test.go b/builtin/providers/ignition/resource_ignition_user_test.go deleted file mode 100644 index fa066a542..000000000 --- a/builtin/providers/ignition/resource_ignition_user_test.go +++ /dev/null @@ -1,112 +0,0 @@ -package ignition - -import ( - "fmt" - "testing" - - "github.com/coreos/ignition/config/types" -) - -func TestIngnitionUser(t *testing.T) { - testIgnition(t, ` - data "ignition_user" "foo" { - name = "foo" - password_hash = "password" - ssh_authorized_keys = ["keys"] - uid = 42 - gecos = "gecos" - home_dir = "home" - no_create_home = true - primary_group = "primary_group" - groups = ["group"] - no_user_group = true - no_log_init = true - shell = "shell" - } - - data "ignition_user" "qux" { - name = "qux" - } - - data "ignition_config" "test" { - users = [ - "${data.ignition_user.foo.id}", - "${data.ignition_user.qux.id}", - ] - } - `, func(c *types.Config) error { - if len(c.Passwd.Users) != 2 { - return fmt.Errorf("Lenght of field Users didn't match. Expected: %d, Given: %d", 2, len(c.Passwd.Users)) - } - - u := c.Passwd.Users[0] - - if u.Name != "foo" { - return fmt.Errorf("Field Name didn't match. Expected: %s, Given: %s", "foo", u.Name) - } - - if u.PasswordHash != "password" { - return fmt.Errorf("Field PasswordHash didn't match. Expected: %s, Given: %s", "password", u.PasswordHash) - } - - if len(u.SSHAuthorizedKeys) != 1 { - return fmt.Errorf("Lenght of field SSHAuthorizedKeys didn't match. Expected: %d, Given: %d", 1, len(u.SSHAuthorizedKeys)) - } - - if u.SSHAuthorizedKeys[0] != "keys" { - return fmt.Errorf("Field SSHAuthorizedKeys didn't match. Expected: %s, Given: %s", "keys", u.SSHAuthorizedKeys[0]) - } - - if *u.Create.Uid != uint(42) { - return fmt.Errorf("Field Uid didn't match. Expected: %d, Given: %d", uint(42), u.Create.Uid) - } - - if u.Create.GECOS != "gecos" { - return fmt.Errorf("Field GECOS didn't match. Expected: %s, Given: %s", "gecos", u.Create.GECOS) - } - - if u.Create.Homedir != "home" { - return fmt.Errorf("Field Homedir didn't match. Expected: %s, Given: %s", "home", u.Create.Homedir) - } - - if u.Create.NoCreateHome != true { - return fmt.Errorf("Field NoCreateHome didn't match. Expected: %t, Given: %t", true, u.Create.NoCreateHome) - } - - if u.Create.PrimaryGroup != "primary_group" { - return fmt.Errorf("Field PrimaryGroup didn't match. Expected: %s, Given: %s", "primary_group", u.Create.PrimaryGroup) - } - - if len(u.Create.Groups) != 1 { - return fmt.Errorf("Lenght of field Groups didn't match. Expected: %d, Given: %d", 1, len(u.Create.Groups)) - } - - if u.Create.Groups[0] != "group" { - return fmt.Errorf("Field Groups didn't match. Expected: %s, Given: %s", "group", u.Create.Groups[0]) - } - - if u.Create.NoUserGroup != true { - return fmt.Errorf("Field NoUserGroup didn't match. Expected: %t, Given: %t", true, u.Create.NoUserGroup) - } - - if u.Create.NoLogInit != true { - return fmt.Errorf("Field NoLogInit didn't match. Expected: %t, Given: %t", true, u.Create.NoLogInit) - } - - if u.Create.Shell != "shell" { - return fmt.Errorf("Field Shell didn't match. Expected: %s, Given: %s", "shell", u.Create.Shell) - } - - u = c.Passwd.Users[1] - - if u.Name != "qux" { - return fmt.Errorf("Field Name didn't match. Expected: %s, Given: %s", "qux", u.Name) - } - - if u.Create != nil { - return fmt.Errorf("Field Create didn't match. Expected: %v, Given: %v", nil, u.Create) - } - - return nil - }) -} diff --git a/builtin/providers/influxdb/continuous_query.go b/builtin/providers/influxdb/continuous_query.go deleted file mode 100644 index 2bc921aa6..000000000 --- a/builtin/providers/influxdb/continuous_query.go +++ /dev/null @@ -1,120 +0,0 @@ -package influxdb - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/influxdata/influxdb/client" -) - -func resourceContinuousQuery() *schema.Resource { - return &schema.Resource{ - Create: createContinuousQuery, - Read: readContinuousQuery, - Delete: deleteContinuousQuery, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "database": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "query": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func createContinuousQuery(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*client.Client) - - name := d.Get("name").(string) - database := d.Get("database").(string) - - queryStr := fmt.Sprintf("CREATE CONTINUOUS QUERY %s ON %s BEGIN %s END", name, quoteIdentifier(database), d.Get("query").(string)) - query := client.Query{ - Command: queryStr, - } - - resp, err := conn.Query(query) - if err != nil { - return err - } - if resp.Err != nil { - return resp.Err - } - - d.Set("name", name) - d.Set("database", database) - d.Set("query", d.Get("query").(string)) - d.SetId(fmt.Sprintf("influxdb-cq:%s", name)) - - return readContinuousQuery(d, meta) -} - -func readContinuousQuery(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*client.Client) - name := d.Get("name").(string) - database := d.Get("database").(string) - - // InfluxDB doesn't have a command to check the existence of a single - // ContinuousQuery, so we instead must read the list of all ContinuousQuerys and see - // if ours is present in it. - query := client.Query{ - Command: "SHOW CONTINUOUS QUERIES", - } - - resp, err := conn.Query(query) - if err != nil { - return err - } - if resp.Err != nil { - return resp.Err - } - - for _, series := range resp.Results[0].Series { - if series.Name == database { - for _, result := range series.Values { - if result[0].(string) == name { - return nil - } - } - } - } - - // If we fell out here then we didn't find our ContinuousQuery in the list. - d.SetId("") - - return nil -} - -func deleteContinuousQuery(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*client.Client) - name := d.Get("name").(string) - database := d.Get("database").(string) - - queryStr := fmt.Sprintf("DROP CONTINUOUS QUERY %s ON %s", name, quoteIdentifier(database)) - query := client.Query{ - Command: queryStr, - } - - resp, err := conn.Query(query) - if err != nil { - return err - } - if resp.Err != nil { - return resp.Err - } - - d.SetId("") - - return nil -} diff --git a/builtin/providers/influxdb/continuous_query_test.go b/builtin/providers/influxdb/continuous_query_test.go deleted file mode 100644 index 78fdaa21b..000000000 --- a/builtin/providers/influxdb/continuous_query_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package influxdb - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/influxdata/influxdb/client" -) - -func TestAccInfluxDBContiuousQuery(t *testing.T) { - resource.Test(t, resource.TestCase{ - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccContiuousQueryConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckContiuousQueryExists("influxdb_continuous_query.minnie"), - resource.TestCheckResourceAttr( - "influxdb_continuous_query.minnie", "name", "minnie", - ), - resource.TestCheckResourceAttr( - "influxdb_continuous_query.minnie", "database", "terraform-test", - ), - resource.TestCheckResourceAttr( - "influxdb_continuous_query.minnie", "query", "SELECT min(mouse) INTO min_mouse FROM zoo GROUP BY time(30m)", - ), - ), - }, - }, - }) -} - -func testAccCheckContiuousQueryExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ContiuousQuery id set") - } - - conn := testAccProvider.Meta().(*client.Client) - - query := client.Query{ - Command: "SHOW CONTINUOUS QUERIES", - } - - resp, err := conn.Query(query) - if err != nil { - return err - } - - if resp.Err != nil { - return resp.Err - } - - for _, series := range resp.Results[0].Series { - if series.Name == rs.Primary.Attributes["database"] { - for _, result := range series.Values { - if result[0].(string) == rs.Primary.Attributes["name"] { - return nil - } - } - } - } - - return fmt.Errorf("ContiuousQuery %q does not exist", rs.Primary.Attributes["name"]) - } -} - -var testAccContiuousQueryConfig = ` - -resource "influxdb_database" "test" { - name = "terraform-test" -} - -resource "influxdb_continuous_query" "minnie" { - name = "minnie" - database = "${influxdb_database.test.name}" - query = "SELECT min(mouse) INTO min_mouse FROM zoo GROUP BY time(30m)" -} - -` diff --git a/builtin/providers/influxdb/provider.go b/builtin/providers/influxdb/provider.go deleted file mode 100644 index 0917d22b6..000000000 --- a/builtin/providers/influxdb/provider.go +++ /dev/null @@ -1,75 +0,0 @@ -package influxdb - -import ( - "fmt" - "net/url" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - "github.com/influxdata/influxdb/client" -) - -var quoteReplacer = strings.NewReplacer(`"`, `\"`) - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - ResourcesMap: map[string]*schema.Resource{ - "influxdb_database": resourceDatabase(), - "influxdb_user": resourceUser(), - "influxdb_continuous_query": resourceContinuousQuery(), - }, - - Schema: map[string]*schema.Schema{ - "url": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc( - "INFLUXDB_URL", "http://localhost:8086/", - ), - }, - "username": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("INFLUXDB_USERNAME", ""), - }, - "password": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("INFLUXDB_PASSWORD", ""), - }, - }, - - ConfigureFunc: configure, - } -} - -func configure(d *schema.ResourceData) (interface{}, error) { - url, err := url.Parse(d.Get("url").(string)) - if err != nil { - return nil, fmt.Errorf("invalid InfluxDB URL: %s", err) - } - - config := client.Config{ - URL: *url, - Username: d.Get("username").(string), - Password: d.Get("password").(string), - } - - conn, err := client.NewClient(config) - if err != nil { - return nil, err - } - - _, _, err = conn.Ping() - if err != nil { - return nil, fmt.Errorf("error pinging server: %s", err) - } - - return conn, nil -} - -func quoteIdentifier(ident string) string { - return fmt.Sprintf(`%q`, quoteReplacer.Replace(ident)) -} diff --git a/builtin/providers/influxdb/provider_test.go b/builtin/providers/influxdb/provider_test.go deleted file mode 100644 index d98552712..000000000 --- a/builtin/providers/influxdb/provider_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package influxdb - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// To run these acceptance tests, you will need an InfluxDB server. -// If you download an InfluxDB distribution and run it with its default -// settings, on the same host where the tests are being run, then these tests -// should work with no further configuration. -// -// To run the tests against a remote InfluxDB server, set the INFLUXDB_URL, -// INFLUXDB_USERNAME and INFLUXDB_PASSWORD environment variables. - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "influxdb": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} diff --git a/builtin/providers/influxdb/resource_database.go b/builtin/providers/influxdb/resource_database.go deleted file mode 100644 index 257e95054..000000000 --- a/builtin/providers/influxdb/resource_database.go +++ /dev/null @@ -1,99 +0,0 @@ -package influxdb - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/influxdata/influxdb/client" -) - -func resourceDatabase() *schema.Resource { - return &schema.Resource{ - Create: createDatabase, - Read: readDatabase, - Delete: deleteDatabase, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func createDatabase(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*client.Client) - - name := d.Get("name").(string) - queryStr := fmt.Sprintf("CREATE DATABASE %s", quoteIdentifier(name)) - query := client.Query{ - Command: queryStr, - } - - resp, err := conn.Query(query) - if err != nil { - return err - } - if resp.Err != nil { - return resp.Err - } - - d.SetId(name) - - return nil -} - -func readDatabase(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*client.Client) - name := d.Id() - - // InfluxDB doesn't have a command to check the existence of a single - // database, so we instead must read the list of all databases and see - // if ours is present in it. - query := client.Query{ - Command: "SHOW DATABASES", - } - - resp, err := conn.Query(query) - if err != nil { - return err - } - if resp.Err != nil { - return resp.Err - } - - for _, result := range resp.Results[0].Series[0].Values { - if result[0] == name { - return nil - } - } - - // If we fell out here then we didn't find our database in the list. - d.SetId("") - - return nil -} - -func deleteDatabase(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*client.Client) - name := d.Id() - - queryStr := fmt.Sprintf("DROP DATABASE %s", quoteIdentifier(name)) - query := client.Query{ - Command: queryStr, - } - - resp, err := conn.Query(query) - if err != nil { - return err - } - if resp.Err != nil { - return resp.Err - } - - d.SetId("") - - return nil -} diff --git a/builtin/providers/influxdb/resource_database_test.go b/builtin/providers/influxdb/resource_database_test.go deleted file mode 100644 index 07f4cf6a2..000000000 --- a/builtin/providers/influxdb/resource_database_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package influxdb - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/influxdata/influxdb/client" -) - -func TestAccInfluxDBDatabase(t *testing.T) { - resource.Test(t, resource.TestCase{ - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDatabaseConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckDatabaseExists("influxdb_database.test"), - resource.TestCheckResourceAttr( - "influxdb_database.test", "name", "terraform-test", - ), - ), - }, - }, - }) -} - -func testAccCheckDatabaseExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No database id set") - } - - conn := testAccProvider.Meta().(*client.Client) - - query := client.Query{ - Command: "SHOW DATABASES", - } - - resp, err := conn.Query(query) - if err != nil { - return err - } - - if resp.Err != nil { - return resp.Err - } - - for _, result := range resp.Results[0].Series[0].Values { - if result[0] == rs.Primary.Attributes["name"] { - return nil - } - } - - return fmt.Errorf("Database %q does not exist", rs.Primary.Attributes["name"]) - } -} - -var testAccDatabaseConfig = ` - -resource "influxdb_database" "test" { - name = "terraform-test" -} - -` diff --git a/builtin/providers/influxdb/resource_user.go b/builtin/providers/influxdb/resource_user.go deleted file mode 100644 index 21475a314..000000000 --- a/builtin/providers/influxdb/resource_user.go +++ /dev/null @@ -1,271 +0,0 @@ -package influxdb - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/influxdata/influxdb/client" -) - -func resourceUser() *schema.Resource { - return &schema.Resource{ - Create: createUser, - Read: readUser, - Update: updateUser, - Delete: deleteUser, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "password": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "admin": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - "grant": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "database": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "privilege": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - } -} - -func createUser(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*client.Client) - - name := d.Get("name").(string) - password := d.Get("password").(string) - - is_admin := d.Get("admin").(bool) - admin_privileges := "" - if is_admin { - admin_privileges = "WITH ALL PRIVILEGES" - } - - queryStr := fmt.Sprintf("CREATE USER %s WITH PASSWORD '%s' %s", name, password, admin_privileges) - query := client.Query{ - Command: queryStr, - } - - resp, err := conn.Query(query) - if err != nil { - return err - } - if resp.Err != nil { - return resp.Err - } - - d.SetId(fmt.Sprintf("influxdb-user:%s", name)) - - if v, ok := d.GetOk("grant"); ok { - grants := v.([]interface{}) - for _, vv := range grants { - grant := vv.(map[string]interface{}) - if err := grantPrivilegeOn(conn, grant["privilege"].(string), grant["database"].(string), name); err != nil { - return err - } - } - } - - return readUser(d, meta) -} - -func exec(conn *client.Client, query string) error { - resp, err := conn.Query(client.Query{ - Command: query, - }) - if err != nil { - return err - } - if resp.Err != nil { - return resp.Err - } - return nil -} - -func grantPrivilegeOn(conn *client.Client, privilege, database, user string) error { - return exec(conn, fmt.Sprintf("GRANT %s ON %s TO %s", privilege, quoteIdentifier(database), user)) -} - -func revokePrivilegeOn(conn *client.Client, privilege, database, user string) error { - return exec(conn, fmt.Sprintf("REVOKE %s ON %s FROM %s", privilege, quoteIdentifier(database), user)) -} - -func grantAllOn(conn *client.Client, user string) error { - return exec(conn, fmt.Sprintf("GRANT ALL PRIVILEGES TO %s", user)) -} - -func revokeAllOn(conn *client.Client, user string) error { - return exec(conn, fmt.Sprintf("REVOKE ALL PRIVILEGES FROM %s", user)) -} - -func readUser(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*client.Client) - name := d.Get("name").(string) - - // InfluxDB doesn't have a command to check the existence of a single - // User, so we instead must read the list of all Users and see - // if ours is present in it. - query := client.Query{ - Command: "SHOW USERS", - } - - resp, err := conn.Query(query) - if err != nil { - return err - } - if resp.Err != nil { - return resp.Err - } - - var found = false - for _, result := range resp.Results[0].Series[0].Values { - if result[0] == name { - found = true - d.Set("admin", result[1].(bool)) - break - } - } - - if !found { - // If we fell out here then we didn't find our User in the list. - d.SetId("") - - return nil - } - - return readGrants(d, meta) -} - -func readGrants(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*client.Client) - name := d.Get("name").(string) - - query := client.Query{ - Command: fmt.Sprintf("SHOW GRANTS FOR %s", name), - } - - resp, err := conn.Query(query) - if err != nil { - return err - } - - if resp.Err != nil { - return resp.Err - } - - var grants = []map[string]string{} - for _, result := range resp.Results[0].Series[0].Values { - if result[1].(string) != "NO PRIVILEGES" { - var grant = map[string]string{ - "database": result[0].(string), - "privilege": strings.ToLower(result[1].(string)), - } - grants = append(grants, grant) - } - } - d.Set("grant", grants) - return nil -} - -func updateUser(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*client.Client) - name := d.Get("name").(string) - - if d.HasChange("admin") { - if !d.Get("admin").(bool) { - revokeAllOn(conn, name) - } else { - grantAllOn(conn, name) - } - } - - if d.HasChange("grant") { - oldGrantV, newGrantV := d.GetChange("grant") - oldGrant := oldGrantV.([]interface{}) - newGrant := newGrantV.([]interface{}) - - for _, oGV := range oldGrant { - oldGrant := oGV.(map[string]interface{}) - - exists := false - privilege := oldGrant["privilege"].(string) - for _, nGV := range newGrant { - newGrant := nGV.(map[string]interface{}) - - if newGrant["database"].(string) == oldGrant["database"].(string) { - exists = true - privilege = newGrant["privilege"].(string) - } - } - - if !exists { - revokePrivilegeOn(conn, oldGrant["privilege"].(string), oldGrant["database"].(string), name) - } else { - if privilege != oldGrant["privilege"].(string) { - grantPrivilegeOn(conn, privilege, oldGrant["database"].(string), name) - } - } - } - - for _, nGV := range newGrant { - newGrant := nGV.(map[string]interface{}) - - exists := false - for _, oGV := range oldGrant { - oldGrant := oGV.(map[string]interface{}) - - exists = exists || (newGrant["database"].(string) == oldGrant["database"].(string) && newGrant["privilege"].(string) == oldGrant["privilege"].(string)) - } - - if !exists { - grantPrivilegeOn(conn, newGrant["privilege"].(string), newGrant["database"].(string), name) - } - } - } - - return readUser(d, meta) -} - -func deleteUser(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*client.Client) - name := d.Get("name").(string) - - queryStr := fmt.Sprintf("DROP USER %s", name) - query := client.Query{ - Command: queryStr, - } - - resp, err := conn.Query(query) - if err != nil { - return err - } - if resp.Err != nil { - return resp.Err - } - - d.SetId("") - - return nil -} diff --git a/builtin/providers/influxdb/resource_user_test.go b/builtin/providers/influxdb/resource_user_test.go deleted file mode 100644 index 587796e79..000000000 --- a/builtin/providers/influxdb/resource_user_test.go +++ /dev/null @@ -1,349 +0,0 @@ -package influxdb - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/influxdata/influxdb/client" -) - -func TestAccInfluxDBUser_admin(t *testing.T) { - resource.Test(t, resource.TestCase{ - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccUserConfig_admin, - Check: resource.ComposeTestCheckFunc( - testAccCheckUserExists("influxdb_user.test"), - resource.TestCheckResourceAttr( - "influxdb_user.test", "name", "terraform_test", - ), - resource.TestCheckResourceAttr( - "influxdb_user.test", "password", "terraform", - ), - resource.TestCheckResourceAttr( - "influxdb_user.test", "admin", "true", - ), - ), - }, - resource.TestStep{ - Config: testAccUserConfig_revoke, - Check: resource.ComposeTestCheckFunc( - testAccCheckUserExists("influxdb_user.test"), - testAccCheckUserNoAdmin("influxdb_user.test"), - resource.TestCheckResourceAttr( - "influxdb_user.test", "name", "terraform_test", - ), - resource.TestCheckResourceAttr( - "influxdb_user.test", "password", "terraform", - ), - resource.TestCheckResourceAttr( - "influxdb_user.test", "admin", "false", - ), - ), - }, - }, - }) -} - -func TestAccInfluxDBUser_grant(t *testing.T) { - resource.Test(t, resource.TestCase{ - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccUserConfig_grant, - Check: resource.ComposeTestCheckFunc( - testAccCheckUserExists("influxdb_user.test"), - testAccCheckUserGrants("influxdb_user.test", "terraform-green", "READ"), - resource.TestCheckResourceAttr( - "influxdb_user.test", "name", "terraform_test", - ), - resource.TestCheckResourceAttr( - "influxdb_user.test", "password", "terraform", - ), - resource.TestCheckResourceAttr( - "influxdb_user.test", "admin", "false", - ), - resource.TestCheckResourceAttr( - "influxdb_user.test", "grant.#", "1", - ), - ), - }, - resource.TestStep{ - Config: testAccUserConfig_grantUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckUserGrants("influxdb_user.test", "terraform-green", "WRITE"), - testAccCheckUserGrants("influxdb_user.test", "terraform-blue", "READ"), - resource.TestCheckResourceAttr( - "influxdb_user.test", "name", "terraform_test", - ), - resource.TestCheckResourceAttr( - "influxdb_user.test", "password", "terraform", - ), - resource.TestCheckResourceAttr( - "influxdb_user.test", "admin", "false", - ), - resource.TestCheckResourceAttr( - "influxdb_user.test", "grant.#", "2", - ), - ), - }, - }, - }) -} - -func TestAccInfluxDBUser_revoke(t *testing.T) { - resource.Test(t, resource.TestCase{ - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccUserConfig_grant, - Check: resource.ComposeTestCheckFunc( - testAccCheckUserExists("influxdb_user.test"), - testAccCheckUserGrants("influxdb_user.test", "terraform-green", "READ"), - resource.TestCheckResourceAttr( - "influxdb_user.test", "name", "terraform_test", - ), - resource.TestCheckResourceAttr( - "influxdb_user.test", "password", "terraform", - ), - resource.TestCheckResourceAttr( - "influxdb_user.test", "admin", "false", - ), - resource.TestCheckResourceAttr( - "influxdb_user.test", "grant.#", "1", - ), - ), - }, - resource.TestStep{ - Config: testAccUserConfig_revoke, - Check: resource.ComposeTestCheckFunc( - testAccCheckUserGrantsEmpty("influxdb_user.test"), - resource.TestCheckResourceAttr( - "influxdb_user.test", "name", "terraform_test", - ), - resource.TestCheckResourceAttr( - "influxdb_user.test", "password", "terraform", - ), - resource.TestCheckResourceAttr( - "influxdb_user.test", "admin", "false", - ), - resource.TestCheckResourceAttr( - "influxdb_user.test", "grant.#", "0", - ), - ), - }, - }, - }) -} - -func testAccCheckUserExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No user id set") - } - - conn := testAccProvider.Meta().(*client.Client) - - query := client.Query{ - Command: "SHOW USERS", - } - - resp, err := conn.Query(query) - if err != nil { - return err - } - - if resp.Err != nil { - return resp.Err - } - - for _, result := range resp.Results[0].Series[0].Values { - if result[0] == rs.Primary.Attributes["name"] { - return nil - } - } - - return fmt.Errorf("User %q does not exist", rs.Primary.Attributes["name"]) - } -} - -func testAccCheckUserNoAdmin(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No user id set") - } - - conn := testAccProvider.Meta().(*client.Client) - - query := client.Query{ - Command: "SHOW USERS", - } - - resp, err := conn.Query(query) - if err != nil { - return err - } - - if resp.Err != nil { - return resp.Err - } - - for _, result := range resp.Results[0].Series[0].Values { - if result[0] == rs.Primary.Attributes["name"] { - if result[1].(bool) == true { - return fmt.Errorf("User %q is admin", rs.Primary.ID) - } - - return nil - } - } - - return fmt.Errorf("User %q does not exist", rs.Primary.Attributes["name"]) - } -} - -func testAccCheckUserGrantsEmpty(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No user id set") - } - - conn := testAccProvider.Meta().(*client.Client) - - query := client.Query{ - Command: fmt.Sprintf("SHOW GRANTS FOR %s", rs.Primary.Attributes["name"]), - } - - resp, err := conn.Query(query) - if err != nil { - return err - } - - if resp.Err != nil { - return resp.Err - } - - for _, result := range resp.Results[0].Series[0].Values { - if result[1].(string) != "NO PRIVILEGES" { - return fmt.Errorf("User %q still has grants: %#v", rs.Primary.ID, resp.Results[0].Series[0].Values) - } - } - - return nil - } -} - -func testAccCheckUserGrants(n, database, privilege string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No user id set") - } - - conn := testAccProvider.Meta().(*client.Client) - - query := client.Query{ - Command: fmt.Sprintf("SHOW GRANTS FOR %s", rs.Primary.Attributes["name"]), - } - - resp, err := conn.Query(query) - if err != nil { - return err - } - - if resp.Err != nil { - return resp.Err - } - - for _, result := range resp.Results[0].Series[0].Values { - if result[0].(string) == database && result[1].(string) == privilege { - return nil - } - } - - return fmt.Errorf("Privilege %q on %q for %q does not exist", privilege, database, rs.Primary.Attributes["name"]) - } -} - -var testAccUserConfig_admin = ` -resource "influxdb_user" "test" { - name = "terraform_test" - password = "terraform" - admin = true -} -` - -var testAccUserConfig_grant = ` -resource "influxdb_database" "green" { - name = "terraform-green" -} - -resource "influxdb_user" "test" { - name = "terraform_test" - password = "terraform" - - grant { - database = "${influxdb_database.green.name}" - privilege = "read" - } -} -` - -var testAccUserConfig_revoke = ` -resource "influxdb_database" "green" { - name = "terraform-green" -} - -resource "influxdb_user" "test" { - name = "terraform_test" - password = "terraform" - admin = false -} -` - -var testAccUserConfig_grantUpdate = ` -resource "influxdb_database" "green" { - name = "terraform-green" -} - -resource "influxdb_database" "blue" { - name = "terraform-blue" -} - -resource "influxdb_user" "test" { - name = "terraform_test" - password = "terraform" - - grant { - database = "${influxdb_database.green.name}" - privilege = "write" - } - - grant { - database = "${influxdb_database.blue.name}" - privilege = "read" - } -} -` diff --git a/builtin/providers/kubernetes/diff_supress_funcs.go b/builtin/providers/kubernetes/diff_supress_funcs.go deleted file mode 100644 index 3c22a0764..000000000 --- a/builtin/providers/kubernetes/diff_supress_funcs.go +++ /dev/null @@ -1,18 +0,0 @@ -package kubernetes - -import ( - "github.com/hashicorp/terraform/helper/schema" - "k8s.io/apimachinery/pkg/api/resource" -) - -func suppressEquivalentResourceQuantity(k, old, new string, d *schema.ResourceData) bool { - oldQ, err := resource.ParseQuantity(old) - if err != nil { - return false - } - newQ, err := resource.ParseQuantity(new) - if err != nil { - return false - } - return oldQ.Cmp(newQ) == 0 -} diff --git a/builtin/providers/kubernetes/patch_operations.go b/builtin/providers/kubernetes/patch_operations.go deleted file mode 100644 index e794a1324..000000000 --- a/builtin/providers/kubernetes/patch_operations.go +++ /dev/null @@ -1,135 +0,0 @@ -package kubernetes - -import ( - "encoding/json" - "reflect" - "sort" - "strings" -) - -func diffStringMap(pathPrefix string, oldV, newV map[string]interface{}) PatchOperations { - ops := make([]PatchOperation, 0, 0) - - pathPrefix = strings.TrimRight(pathPrefix, "/") - - // This is suboptimal for adding whole new map from scratch - // or deleting the whole map, but it's actually intention. - // There may be some other map items managed outside of TF - // and we don't want to touch these. - - for k, _ := range oldV { - if _, ok := newV[k]; ok { - continue - } - ops = append(ops, &RemoveOperation{Path: pathPrefix + "/" + k}) - } - - for k, v := range newV { - newValue := v.(string) - - if oldValue, ok := oldV[k].(string); ok { - if oldValue == newValue { - continue - } - - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/" + k, - Value: newValue, - }) - continue - } - - ops = append(ops, &AddOperation{ - Path: pathPrefix + "/" + k, - Value: newValue, - }) - } - - return ops -} - -type PatchOperations []PatchOperation - -func (po PatchOperations) MarshalJSON() ([]byte, error) { - var v []PatchOperation = po - return json.Marshal(v) -} - -func (po PatchOperations) Equal(ops []PatchOperation) bool { - var v []PatchOperation = po - - sort.Slice(v, sortByPathAsc(v)) - sort.Slice(ops, sortByPathAsc(ops)) - - return reflect.DeepEqual(v, ops) -} - -func sortByPathAsc(ops []PatchOperation) func(i, j int) bool { - return func(i, j int) bool { - return ops[i].GetPath() < ops[j].GetPath() - } -} - -type PatchOperation interface { - MarshalJSON() ([]byte, error) - GetPath() string -} - -type ReplaceOperation struct { - Path string `json:"path"` - Value interface{} `json:"value"` - Op string `json:"op"` -} - -func (o *ReplaceOperation) GetPath() string { - return o.Path -} - -func (o *ReplaceOperation) MarshalJSON() ([]byte, error) { - o.Op = "replace" - return json.Marshal(*o) -} - -func (o *ReplaceOperation) String() string { - b, _ := o.MarshalJSON() - return string(b) -} - -type AddOperation struct { - Path string `json:"path"` - Value interface{} `json:"value"` - Op string `json:"op"` -} - -func (o *AddOperation) GetPath() string { - return o.Path -} - -func (o *AddOperation) MarshalJSON() ([]byte, error) { - o.Op = "add" - return json.Marshal(*o) -} - -func (o *AddOperation) String() string { - b, _ := o.MarshalJSON() - return string(b) -} - -type RemoveOperation struct { - Path string `json:"path"` - Op string `json:"op"` -} - -func (o *RemoveOperation) GetPath() string { - return o.Path -} - -func (o *RemoveOperation) MarshalJSON() ([]byte, error) { - o.Op = "remove" - return json.Marshal(*o) -} - -func (o *RemoveOperation) String() string { - b, _ := o.MarshalJSON() - return string(b) -} diff --git a/builtin/providers/kubernetes/patch_operations_test.go b/builtin/providers/kubernetes/patch_operations_test.go deleted file mode 100644 index c60a5e628..000000000 --- a/builtin/providers/kubernetes/patch_operations_test.go +++ /dev/null @@ -1,126 +0,0 @@ -package kubernetes - -import ( - "fmt" - "testing" -) - -func TestDiffStringMap(t *testing.T) { - testCases := []struct { - Path string - Old map[string]interface{} - New map[string]interface{} - ExpectedOps PatchOperations - }{ - { - Path: "/parent/", - Old: map[string]interface{}{ - "one": "111", - "two": "222", - }, - New: map[string]interface{}{ - "one": "111", - "two": "222", - "three": "333", - }, - ExpectedOps: []PatchOperation{ - &AddOperation{ - Path: "/parent/three", - Value: "333", - }, - }, - }, - { - Path: "/parent/", - Old: map[string]interface{}{ - "one": "111", - "two": "222", - }, - New: map[string]interface{}{ - "one": "111", - "two": "abcd", - }, - ExpectedOps: []PatchOperation{ - &ReplaceOperation{ - Path: "/parent/two", - Value: "abcd", - }, - }, - }, - { - Path: "/parent/", - Old: map[string]interface{}{ - "one": "111", - "two": "222", - }, - New: map[string]interface{}{ - "two": "abcd", - "three": "333", - }, - ExpectedOps: []PatchOperation{ - &RemoveOperation{Path: "/parent/one"}, - &ReplaceOperation{ - Path: "/parent/two", - Value: "abcd", - }, - &AddOperation{ - Path: "/parent/three", - Value: "333", - }, - }, - }, - { - Path: "/parent/", - Old: map[string]interface{}{ - "one": "111", - "two": "222", - }, - New: map[string]interface{}{ - "two": "222", - }, - ExpectedOps: []PatchOperation{ - &RemoveOperation{Path: "/parent/one"}, - }, - }, - { - Path: "/parent/", - Old: map[string]interface{}{ - "one": "111", - "two": "222", - }, - New: map[string]interface{}{}, - ExpectedOps: []PatchOperation{ - &RemoveOperation{Path: "/parent/one"}, - &RemoveOperation{Path: "/parent/two"}, - }, - }, - { - Path: "/parent/", - Old: map[string]interface{}{}, - New: map[string]interface{}{ - "one": "111", - "two": "222", - }, - ExpectedOps: []PatchOperation{ - &AddOperation{ - Path: "/parent/one", - Value: "111", - }, - &AddOperation{ - Path: "/parent/two", - Value: "222", - }, - }, - }, - } - - for i, tc := range testCases { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - ops := diffStringMap(tc.Path, tc.Old, tc.New) - if !tc.ExpectedOps.Equal(ops) { - t.Fatalf("Operations don't match.\nExpected: %v\nGiven: %v\n", tc.ExpectedOps, ops) - } - }) - } - -} diff --git a/builtin/providers/kubernetes/provider.go b/builtin/providers/kubernetes/provider.go deleted file mode 100644 index 0b106b892..000000000 --- a/builtin/providers/kubernetes/provider.go +++ /dev/null @@ -1,200 +0,0 @@ -package kubernetes - -import ( - "bytes" - "fmt" - "log" - "os" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/go-homedir" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" -) - -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "host": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_HOST", ""), - Description: "The hostname (in form of URI) of Kubernetes master.", - }, - "username": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_USER", ""), - Description: "The username to use for HTTP basic authentication when accessing the Kubernetes master endpoint.", - }, - "password": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_PASSWORD", ""), - Description: "The password to use for HTTP basic authentication when accessing the Kubernetes master endpoint.", - }, - "insecure": { - Type: schema.TypeBool, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_INSECURE", false), - Description: "Whether server should be accessed without verifying the TLS certificate.", - }, - "client_certificate": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_CLIENT_CERT_DATA", ""), - Description: "PEM-encoded client certificate for TLS authentication.", - }, - "client_key": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_CLIENT_KEY_DATA", ""), - Description: "PEM-encoded client certificate key for TLS authentication.", - }, - "cluster_ca_certificate": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_CLUSTER_CA_CERT_DATA", ""), - Description: "PEM-encoded root certificates bundle for TLS authentication.", - }, - "config_path": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc( - []string{ - "KUBE_CONFIG", - "KUBECONFIG", - }, - "~/.kube/config"), - Description: "Path to the kube config file, defaults to ~/.kube/config", - }, - "config_context": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_CTX", ""), - }, - "config_context_auth_info": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_CTX_AUTH_INFO", ""), - Description: "", - }, - "config_context_cluster": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("KUBE_CTX_CLUSTER", ""), - Description: "", - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "kubernetes_config_map": resourceKubernetesConfigMap(), - "kubernetes_horizontal_pod_autoscaler": resourceKubernetesHorizontalPodAutoscaler(), - "kubernetes_limit_range": resourceKubernetesLimitRange(), - "kubernetes_namespace": resourceKubernetesNamespace(), - "kubernetes_persistent_volume": resourceKubernetesPersistentVolume(), - "kubernetes_persistent_volume_claim": resourceKubernetesPersistentVolumeClaim(), - "kubernetes_pod": resourceKubernetesPod(), - "kubernetes_resource_quota": resourceKubernetesResourceQuota(), - "kubernetes_secret": resourceKubernetesSecret(), - "kubernetes_service": resourceKubernetesService(), - }, - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - // Config file loading - cfg, err := tryLoadingConfigFile(d) - if err != nil { - return nil, err - } - if cfg == nil { - cfg = &restclient.Config{} - } - - // Overriding with static configuration - cfg.UserAgent = fmt.Sprintf("HashiCorp/1.0 Terraform/%s", terraform.VersionString()) - - if v, ok := d.GetOk("host"); ok { - cfg.Host = v.(string) - } - if v, ok := d.GetOk("username"); ok { - cfg.Username = v.(string) - } - if v, ok := d.GetOk("password"); ok { - cfg.Password = v.(string) - } - if v, ok := d.GetOk("insecure"); ok { - cfg.Insecure = v.(bool) - } - if v, ok := d.GetOk("cluster_ca_certificate"); ok { - cfg.CAData = bytes.NewBufferString(v.(string)).Bytes() - } - if v, ok := d.GetOk("client_certificate"); ok { - cfg.CertData = bytes.NewBufferString(v.(string)).Bytes() - } - if v, ok := d.GetOk("client_key"); ok { - cfg.KeyData = bytes.NewBufferString(v.(string)).Bytes() - } - - k, err := kubernetes.NewForConfig(cfg) - if err != nil { - return nil, fmt.Errorf("Failed to configure: %s", err) - } - - return k, nil -} - -func tryLoadingConfigFile(d *schema.ResourceData) (*restclient.Config, error) { - path, err := homedir.Expand(d.Get("config_path").(string)) - if err != nil { - return nil, err - } - - loader := &clientcmd.ClientConfigLoadingRules{ - ExplicitPath: path, - } - - overrides := &clientcmd.ConfigOverrides{} - ctxSuffix := "; default context" - - ctx, ctxOk := d.GetOk("config_context") - authInfo, authInfoOk := d.GetOk("config_context_auth_info") - cluster, clusterOk := d.GetOk("config_context_cluster") - if ctxOk || authInfoOk || clusterOk { - ctxSuffix = "; overriden context" - if ctxOk { - overrides.CurrentContext = ctx.(string) - ctxSuffix += fmt.Sprintf("; config ctx: %s", overrides.CurrentContext) - log.Printf("[DEBUG] Using custom current context: %q", overrides.CurrentContext) - } - - overrides.Context = clientcmdapi.Context{} - if authInfoOk { - overrides.Context.AuthInfo = authInfo.(string) - ctxSuffix += fmt.Sprintf("; auth_info: %s", overrides.Context.AuthInfo) - } - if clusterOk { - overrides.Context.Cluster = cluster.(string) - ctxSuffix += fmt.Sprintf("; cluster: %s", overrides.Context.Cluster) - } - log.Printf("[DEBUG] Using overidden context: %#v", overrides.Context) - } - - cc := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loader, overrides) - cfg, err := cc.ClientConfig() - if err != nil { - if pathErr, ok := err.(*os.PathError); ok && os.IsNotExist(pathErr.Err) { - log.Printf("[INFO] Unable to load config file as it doesn't exist at %q", path) - return nil, nil - } - return nil, fmt.Errorf("Failed to load config (%s%s): %s", path, ctxSuffix, err) - } - - log.Printf("[INFO] Successfully loaded config file (%s%s)", path, ctxSuffix) - return cfg, nil -} diff --git a/builtin/providers/kubernetes/provider_test.go b/builtin/providers/kubernetes/provider_test.go deleted file mode 100644 index d985927da..000000000 --- a/builtin/providers/kubernetes/provider_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package kubernetes - -import ( - "os" - "strings" - "testing" - - "github.com/hashicorp/terraform/builtin/providers/google" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "kubernetes": testAccProvider, - "google": google.Provider(), - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - hasFileCfg := (os.Getenv("KUBE_CTX_AUTH_INFO") != "" && os.Getenv("KUBE_CTX_CLUSTER") != "") - hasStaticCfg := (os.Getenv("KUBE_HOST") != "" && - os.Getenv("KUBE_USER") != "" && - os.Getenv("KUBE_PASSWORD") != "" && - os.Getenv("KUBE_CLIENT_CERT_DATA") != "" && - os.Getenv("KUBE_CLIENT_KEY_DATA") != "" && - os.Getenv("KUBE_CLUSTER_CA_CERT_DATA") != "") - - if !hasFileCfg && !hasStaticCfg { - t.Fatalf("File config (KUBE_CTX_AUTH_INFO and KUBE_CTX_CLUSTER) or static configuration"+ - " (%s) must be set for acceptance tests", - strings.Join([]string{ - "KUBE_HOST", - "KUBE_USER", - "KUBE_PASSWORD", - "KUBE_CLIENT_CERT_DATA", - "KUBE_CLIENT_KEY_DATA", - "KUBE_CLUSTER_CA_CERT_DATA", - }, ", ")) - } - - if os.Getenv("GOOGLE_PROJECT") == "" || os.Getenv("GOOGLE_REGION") == "" || os.Getenv("GOOGLE_ZONE") == "" { - t.Fatal("GOOGLE_PROJECT, GOOGLE_REGION and GOOGLE_ZONE must be set for acceptance tests") - } -} diff --git a/builtin/providers/kubernetes/resource_kubernetes_config_map.go b/builtin/providers/kubernetes/resource_kubernetes_config_map.go deleted file mode 100644 index a0f679c10..000000000 --- a/builtin/providers/kubernetes/resource_kubernetes_config_map.go +++ /dev/null @@ -1,131 +0,0 @@ -package kubernetes - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkgApi "k8s.io/apimachinery/pkg/types" - api "k8s.io/kubernetes/pkg/api/v1" - kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" -) - -func resourceKubernetesConfigMap() *schema.Resource { - return &schema.Resource{ - Create: resourceKubernetesConfigMapCreate, - Read: resourceKubernetesConfigMapRead, - Exists: resourceKubernetesConfigMapExists, - Update: resourceKubernetesConfigMapUpdate, - Delete: resourceKubernetesConfigMapDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "metadata": namespacedMetadataSchema("config map", true), - "data": { - Type: schema.TypeMap, - Description: "A map of the configuration data.", - Optional: true, - }, - }, - } -} - -func resourceKubernetesConfigMapCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - metadata := expandMetadata(d.Get("metadata").([]interface{})) - cfgMap := api.ConfigMap{ - ObjectMeta: metadata, - Data: expandStringMap(d.Get("data").(map[string]interface{})), - } - log.Printf("[INFO] Creating new config map: %#v", cfgMap) - out, err := conn.CoreV1().ConfigMaps(metadata.Namespace).Create(&cfgMap) - if err != nil { - return err - } - log.Printf("[INFO] Submitted new config map: %#v", out) - d.SetId(buildId(out.ObjectMeta)) - - return resourceKubernetesConfigMapRead(d, meta) -} - -func resourceKubernetesConfigMapRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - log.Printf("[INFO] Reading config map %s", name) - cfgMap, err := conn.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - log.Printf("[DEBUG] Received error: %#v", err) - return err - } - log.Printf("[INFO] Received config map: %#v", cfgMap) - err = d.Set("metadata", flattenMetadata(cfgMap.ObjectMeta)) - if err != nil { - return err - } - d.Set("data", cfgMap.Data) - - return nil -} - -func resourceKubernetesConfigMapUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - - ops := patchMetadata("metadata.0.", "/metadata/", d) - if d.HasChange("data") { - oldV, newV := d.GetChange("data") - diffOps := diffStringMap("/data/", oldV.(map[string]interface{}), newV.(map[string]interface{})) - ops = append(ops, diffOps...) - } - data, err := ops.MarshalJSON() - if err != nil { - return fmt.Errorf("Failed to marshal update operations: %s", err) - } - log.Printf("[INFO] Updating config map %q: %v", name, string(data)) - out, err := conn.CoreV1().ConfigMaps(namespace).Patch(name, pkgApi.JSONPatchType, data) - if err != nil { - return fmt.Errorf("Failed to update Config Map: %s", err) - } - log.Printf("[INFO] Submitted updated config map: %#v", out) - d.SetId(buildId(out.ObjectMeta)) - - return resourceKubernetesConfigMapRead(d, meta) -} - -func resourceKubernetesConfigMapDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - log.Printf("[INFO] Deleting config map: %#v", name) - err := conn.CoreV1().ConfigMaps(namespace).Delete(name, &metav1.DeleteOptions{}) - if err != nil { - return err - } - - log.Printf("[INFO] Config map %s deleted", name) - - d.SetId("") - return nil -} - -func resourceKubernetesConfigMapExists(d *schema.ResourceData, meta interface{}) (bool, error) { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - log.Printf("[INFO] Checking config map %s", name) - _, err := conn.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - if statusErr, ok := err.(*errors.StatusError); ok && statusErr.ErrStatus.Code == 404 { - return false, nil - } - log.Printf("[DEBUG] Received error: %#v", err) - } - return true, err -} diff --git a/builtin/providers/kubernetes/resource_kubernetes_config_map_test.go b/builtin/providers/kubernetes/resource_kubernetes_config_map_test.go deleted file mode 100644 index b873b5571..000000000 --- a/builtin/providers/kubernetes/resource_kubernetes_config_map_test.go +++ /dev/null @@ -1,285 +0,0 @@ -package kubernetes - -import ( - "fmt" - "reflect" - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "k8s.io/kubernetes/pkg/api/v1" - kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" -) - -func TestAccKubernetesConfigMap_basic(t *testing.T) { - var conf api.ConfigMap - name := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_config_map.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesConfigMapDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesConfigMapConfig_basic(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesConfigMapExists("kubernetes_config_map.test", &conf), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "metadata.0.annotations.%", "2"), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "metadata.0.annotations.TestAnnotationOne", "one"), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "metadata.0.annotations.TestAnnotationTwo", "two"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{"TestAnnotationOne": "one", "TestAnnotationTwo": "two"}), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "metadata.0.labels.%", "3"), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "metadata.0.labels.TestLabelOne", "one"), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "metadata.0.labels.TestLabelTwo", "two"), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "metadata.0.labels.TestLabelThree", "three"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{"TestLabelOne": "one", "TestLabelTwo": "two", "TestLabelThree": "three"}), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_config_map.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_config_map.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_config_map.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_config_map.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "data.%", "2"), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "data.one", "first"), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "data.two", "second"), - testAccCheckConfigMapData(&conf, map[string]string{"one": "first", "two": "second"}), - ), - }, - { - Config: testAccKubernetesConfigMapConfig_modified(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesConfigMapExists("kubernetes_config_map.test", &conf), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "metadata.0.annotations.%", "2"), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "metadata.0.annotations.TestAnnotationOne", "one"), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "metadata.0.annotations.Different", "1234"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{"TestAnnotationOne": "one", "Different": "1234"}), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "metadata.0.labels.%", "2"), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "metadata.0.labels.TestLabelOne", "one"), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "metadata.0.labels.TestLabelThree", "three"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{"TestLabelOne": "one", "TestLabelThree": "three"}), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_config_map.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_config_map.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_config_map.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_config_map.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "data.%", "3"), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "data.one", "first"), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "data.two", "second"), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "data.nine", "ninth"), - testAccCheckConfigMapData(&conf, map[string]string{"one": "first", "two": "second", "nine": "ninth"}), - ), - }, - { - Config: testAccKubernetesConfigMapConfig_noData(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesConfigMapExists("kubernetes_config_map.test", &conf), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_config_map.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_config_map.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_config_map.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_config_map.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "data.%", "0"), - testAccCheckConfigMapData(&conf, map[string]string{}), - ), - }, - }, - }) -} - -func TestAccKubernetesConfigMap_importBasic(t *testing.T) { - resourceName := "kubernetes_config_map.test" - name := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesConfigMapDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesConfigMapConfig_basic(name), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccKubernetesConfigMap_generatedName(t *testing.T) { - var conf api.ConfigMap - prefix := "tf-acc-test-gen-" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_config_map.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesConfigMapDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesConfigMapConfig_generatedName(prefix), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesConfigMapExists("kubernetes_config_map.test", &conf), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_config_map.test", "metadata.0.generate_name", prefix), - resource.TestMatchResourceAttr("kubernetes_config_map.test", "metadata.0.name", regexp.MustCompile("^"+prefix)), - resource.TestCheckResourceAttrSet("kubernetes_config_map.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_config_map.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_config_map.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_config_map.test", "metadata.0.uid"), - ), - }, - }, - }) -} - -func TestAccKubernetesConfigMap_importGeneratedName(t *testing.T) { - resourceName := "kubernetes_config_map.test" - prefix := "tf-acc-test-gen-import-" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesConfigMapDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesConfigMapConfig_generatedName(prefix), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccCheckConfigMapData(m *api.ConfigMap, expected map[string]string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if len(expected) == 0 && len(m.Data) == 0 { - return nil - } - if !reflect.DeepEqual(m.Data, expected) { - return fmt.Errorf("%s data don't match.\nExpected: %q\nGiven: %q", - m.Name, expected, m.Data) - } - return nil - } -} - -func testAccCheckKubernetesConfigMapDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*kubernetes.Clientset) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "kubernetes_config_map" { - continue - } - namespace, name := idParts(rs.Primary.ID) - resp, err := conn.CoreV1().ConfigMaps(namespace).Get(name, meta_v1.GetOptions{}) - if err == nil { - if resp.Name == rs.Primary.ID { - return fmt.Errorf("Config Map still exists: %s", rs.Primary.ID) - } - } - } - - return nil -} - -func testAccCheckKubernetesConfigMapExists(n string, obj *api.ConfigMap) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - conn := testAccProvider.Meta().(*kubernetes.Clientset) - namespace, name := idParts(rs.Primary.ID) - out, err := conn.CoreV1().ConfigMaps(namespace).Get(name, meta_v1.GetOptions{}) - if err != nil { - return err - } - - *obj = *out - return nil - } -} - -func testAccKubernetesConfigMapConfig_basic(name string) string { - return fmt.Sprintf(` -resource "kubernetes_config_map" "test" { - metadata { - annotations { - TestAnnotationOne = "one" - TestAnnotationTwo = "two" - } - labels { - TestLabelOne = "one" - TestLabelTwo = "two" - TestLabelThree = "three" - } - name = "%s" - } - data { - one = "first" - two = "second" - } -}`, name) -} - -func testAccKubernetesConfigMapConfig_modified(name string) string { - return fmt.Sprintf(` -resource "kubernetes_config_map" "test" { - metadata { - annotations { - TestAnnotationOne = "one" - Different = "1234" - } - labels { - TestLabelOne = "one" - TestLabelThree = "three" - } - name = "%s" - } - data { - one = "first" - two = "second" - nine = "ninth" - } -}`, name) -} - -func testAccKubernetesConfigMapConfig_noData(name string) string { - return fmt.Sprintf(` -resource "kubernetes_config_map" "test" { - metadata { - name = "%s" - } -}`, name) -} - -func testAccKubernetesConfigMapConfig_generatedName(prefix string) string { - return fmt.Sprintf(` -resource "kubernetes_config_map" "test" { - metadata { - generate_name = "%s" - } - data { - one = "first" - two = "second" - } -}`, prefix) -} diff --git a/builtin/providers/kubernetes/resource_kubernetes_horizontal_pod_autoscaler.go b/builtin/providers/kubernetes/resource_kubernetes_horizontal_pod_autoscaler.go deleted file mode 100644 index f734c43c9..000000000 --- a/builtin/providers/kubernetes/resource_kubernetes_horizontal_pod_autoscaler.go +++ /dev/null @@ -1,184 +0,0 @@ -package kubernetes - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "k8s.io/apimachinery/pkg/api/errors" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkgApi "k8s.io/apimachinery/pkg/types" - api "k8s.io/kubernetes/pkg/apis/autoscaling/v1" - kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" -) - -func resourceKubernetesHorizontalPodAutoscaler() *schema.Resource { - return &schema.Resource{ - Create: resourceKubernetesHorizontalPodAutoscalerCreate, - Read: resourceKubernetesHorizontalPodAutoscalerRead, - Exists: resourceKubernetesHorizontalPodAutoscalerExists, - Update: resourceKubernetesHorizontalPodAutoscalerUpdate, - Delete: resourceKubernetesHorizontalPodAutoscalerDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "metadata": namespacedMetadataSchema("horizontal pod autoscaler", true), - "spec": { - Type: schema.TypeList, - Description: "Behaviour of the autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "max_replicas": { - Type: schema.TypeInt, - Description: "Upper limit for the number of pods that can be set by the autoscaler.", - Required: true, - }, - "min_replicas": { - Type: schema.TypeInt, - Description: "Lower limit for the number of pods that can be set by the autoscaler, defaults to `1`.", - Optional: true, - Default: 1, - }, - "scale_target_ref": { - Type: schema.TypeList, - Description: "Reference to scaled resource. e.g. Replication Controller", - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "api_version": { - Type: schema.TypeString, - Description: "API version of the referent", - Optional: true, - }, - "kind": { - Type: schema.TypeString, - Description: "Kind of the referent. e.g. `ReplicationController`. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - Required: true, - }, - "name": { - Type: schema.TypeString, - Description: "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - Required: true, - }, - }, - }, - }, - "target_cpu_utilization_percentage": { - Type: schema.TypeInt, - Description: "Target average CPU utilization (represented as a percentage of requested CPU) over all the pods. If not specified the default autoscaling policy will be used.", - Optional: true, - Computed: true, - }, - }, - }, - }, - }, - } -} - -func resourceKubernetesHorizontalPodAutoscalerCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - metadata := expandMetadata(d.Get("metadata").([]interface{})) - svc := api.HorizontalPodAutoscaler{ - ObjectMeta: metadata, - Spec: expandHorizontalPodAutoscalerSpec(d.Get("spec").([]interface{})), - } - log.Printf("[INFO] Creating new horizontal pod autoscaler: %#v", svc) - out, err := conn.AutoscalingV1().HorizontalPodAutoscalers(metadata.Namespace).Create(&svc) - if err != nil { - return err - } - - log.Printf("[INFO] Submitted new horizontal pod autoscaler: %#v", out) - d.SetId(buildId(out.ObjectMeta)) - - return resourceKubernetesHorizontalPodAutoscalerRead(d, meta) -} - -func resourceKubernetesHorizontalPodAutoscalerRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - log.Printf("[INFO] Reading horizontal pod autoscaler %s", name) - svc, err := conn.AutoscalingV1().HorizontalPodAutoscalers(namespace).Get(name, meta_v1.GetOptions{}) - if err != nil { - log.Printf("[DEBUG] Received error: %#v", err) - return err - } - log.Printf("[INFO] Received horizontal pod autoscaler: %#v", svc) - err = d.Set("metadata", flattenMetadata(svc.ObjectMeta)) - if err != nil { - return err - } - - flattened := flattenHorizontalPodAutoscalerSpec(svc.Spec) - log.Printf("[DEBUG] Flattened horizontal pod autoscaler spec: %#v", flattened) - err = d.Set("spec", flattened) - if err != nil { - return err - } - - return nil -} - -func resourceKubernetesHorizontalPodAutoscalerUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - - ops := patchMetadata("metadata.0.", "/metadata/", d) - if d.HasChange("spec") { - diffOps := patchHorizontalPodAutoscalerSpec("spec.0.", "/spec", d) - ops = append(ops, diffOps...) - } - data, err := ops.MarshalJSON() - if err != nil { - return fmt.Errorf("Failed to marshal update operations: %s", err) - } - log.Printf("[INFO] Updating horizontal pod autoscaler %q: %v", name, string(data)) - out, err := conn.AutoscalingV1().HorizontalPodAutoscalers(namespace).Patch(name, pkgApi.JSONPatchType, data) - if err != nil { - return fmt.Errorf("Failed to update horizontal pod autoscaler: %s", err) - } - log.Printf("[INFO] Submitted updated horizontal pod autoscaler: %#v", out) - d.SetId(buildId(out.ObjectMeta)) - - return resourceKubernetesHorizontalPodAutoscalerRead(d, meta) -} - -func resourceKubernetesHorizontalPodAutoscalerDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - log.Printf("[INFO] Deleting horizontal pod autoscaler: %#v", name) - err := conn.AutoscalingV1().HorizontalPodAutoscalers(namespace).Delete(name, &meta_v1.DeleteOptions{}) - if err != nil { - return err - } - - log.Printf("[INFO] Horizontal Pod Autoscaler %s deleted", name) - - d.SetId("") - return nil -} - -func resourceKubernetesHorizontalPodAutoscalerExists(d *schema.ResourceData, meta interface{}) (bool, error) { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - log.Printf("[INFO] Checking horizontal pod autoscaler %s", name) - _, err := conn.AutoscalingV1().HorizontalPodAutoscalers(namespace).Get(name, meta_v1.GetOptions{}) - if err != nil { - if statusErr, ok := err.(*errors.StatusError); ok && statusErr.ErrStatus.Code == 404 { - return false, nil - } - log.Printf("[DEBUG] Received error: %#v", err) - } - return true, err -} diff --git a/builtin/providers/kubernetes/resource_kubernetes_horizontal_pod_autoscaler_test.go b/builtin/providers/kubernetes/resource_kubernetes_horizontal_pod_autoscaler_test.go deleted file mode 100644 index 13ab68bfe..000000000 --- a/builtin/providers/kubernetes/resource_kubernetes_horizontal_pod_autoscaler_test.go +++ /dev/null @@ -1,278 +0,0 @@ -package kubernetes - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "k8s.io/kubernetes/pkg/apis/autoscaling/v1" - kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" -) - -func TestAccKubernetesHorizontalPodAutoscaler_basic(t *testing.T) { - var conf api.HorizontalPodAutoscaler - name := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_horizontal_pod_autoscaler.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesHorizontalPodAutoscalerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesHorizontalPodAutoscalerConfig_basic(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesHorizontalPodAutoscalerExists("kubernetes_horizontal_pod_autoscaler.test", &conf), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.annotations.%", "1"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.annotations.TestAnnotationOne", "one"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{"TestAnnotationOne": "one"}), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.labels.%", "3"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.labels.TestLabelOne", "one"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.labels.TestLabelThree", "three"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.labels.TestLabelFour", "four"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{"TestLabelOne": "one", "TestLabelThree": "three", "TestLabelFour": "four"}), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "spec.#", "1"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "spec.0.max_replicas", "10"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "spec.0.min_replicas", "1"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "spec.0.scale_target_ref.#", "1"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "spec.0.scale_target_ref.0.kind", "ReplicationController"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "spec.0.scale_target_ref.0.name", "TerraformAccTest"), - ), - }, - { - Config: testAccKubernetesHorizontalPodAutoscalerConfig_metaModified(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesHorizontalPodAutoscalerExists("kubernetes_horizontal_pod_autoscaler.test", &conf), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.annotations.%", "2"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.annotations.TestAnnotationOne", "one"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.annotations.TestAnnotationTwo", "two"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{"TestAnnotationOne": "one", "TestAnnotationTwo": "two"}), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.labels.%", "3"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.labels.TestLabelOne", "one"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.labels.TestLabelTwo", "two"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.labels.TestLabelThree", "three"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{"TestLabelOne": "one", "TestLabelTwo": "two", "TestLabelThree": "three"}), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "spec.#", "1"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "spec.0.max_replicas", "10"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "spec.0.min_replicas", "1"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "spec.0.scale_target_ref.#", "1"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "spec.0.scale_target_ref.0.kind", "ReplicationController"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "spec.0.scale_target_ref.0.name", "TerraformAccTest"), - ), - }, - { - Config: testAccKubernetesHorizontalPodAutoscalerConfig_specModified(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesHorizontalPodAutoscalerExists("kubernetes_horizontal_pod_autoscaler.test", &conf), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "spec.#", "1"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "spec.0.max_replicas", "8"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "spec.0.min_replicas", "1"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "spec.0.scale_target_ref.#", "1"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "spec.0.scale_target_ref.0.kind", "ReplicationController"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "spec.0.scale_target_ref.0.name", "TerraformAccTestModified"), - ), - }, - }, - }) -} - -func TestAccKubernetesHorizontalPodAutoscaler_generatedName(t *testing.T) { - var conf api.HorizontalPodAutoscaler - prefix := "tf-acc-test-" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_horizontal_pod_autoscaler.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesHorizontalPodAutoscalerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesHorizontalPodAutoscalerConfig_generatedName(prefix), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesHorizontalPodAutoscalerExists("kubernetes_horizontal_pod_autoscaler.test", &conf), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.generate_name", prefix), - resource.TestCheckResourceAttrSet("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_horizontal_pod_autoscaler.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "spec.#", "1"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "spec.0.max_replicas", "1"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "spec.0.min_replicas", "1"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "spec.0.scale_target_ref.#", "1"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "spec.0.scale_target_ref.0.kind", "ReplicationController"), - resource.TestCheckResourceAttr("kubernetes_horizontal_pod_autoscaler.test", "spec.0.scale_target_ref.0.name", "TerraformAccTestGeneratedName"), - ), - }, - }, - }) -} - -func TestAccKubernetesHorizontalPodAutoscaler_importBasic(t *testing.T) { - resourceName := "kubernetes_horizontal_pod_autoscaler.test" - name := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesHorizontalPodAutoscalerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesHorizontalPodAutoscalerConfig_basic(name), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccCheckKubernetesHorizontalPodAutoscalerDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*kubernetes.Clientset) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "kubernetes_horizontal_pod_autoscaler" { - continue - } - namespace, name := idParts(rs.Primary.ID) - resp, err := conn.AutoscalingV1().HorizontalPodAutoscalers(namespace).Get(name, meta_v1.GetOptions{}) - if err == nil { - if resp.Namespace == namespace && resp.Name == name { - return fmt.Errorf("Horizontal Pod Autoscaler still exists: %s", rs.Primary.ID) - } - } - } - - return nil -} - -func testAccCheckKubernetesHorizontalPodAutoscalerExists(n string, obj *api.HorizontalPodAutoscaler) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - conn := testAccProvider.Meta().(*kubernetes.Clientset) - namespace, name := idParts(rs.Primary.ID) - out, err := conn.AutoscalingV1().HorizontalPodAutoscalers(namespace).Get(name, meta_v1.GetOptions{}) - if err != nil { - return err - } - - *obj = *out - return nil - } -} - -func testAccKubernetesHorizontalPodAutoscalerConfig_basic(name string) string { - return fmt.Sprintf(` -resource "kubernetes_horizontal_pod_autoscaler" "test" { - metadata { - annotations { - TestAnnotationOne = "one" - } - labels { - TestLabelOne = "one" - TestLabelThree = "three" - TestLabelFour = "four" - } - name = "%s" - } - spec { - max_replicas = 10 - scale_target_ref { - kind = "ReplicationController" - name = "TerraformAccTest" - } - } -} -`, name) -} - -func testAccKubernetesHorizontalPodAutoscalerConfig_metaModified(name string) string { - return fmt.Sprintf(` -resource "kubernetes_horizontal_pod_autoscaler" "test" { - metadata { - annotations { - TestAnnotationOne = "one" - TestAnnotationTwo = "two" - } - labels { - TestLabelOne = "one" - TestLabelTwo = "two" - TestLabelThree = "three" - } - name = "%s" - } - spec { - max_replicas = 10 - scale_target_ref { - kind = "ReplicationController" - name = "TerraformAccTest" - } - } -} -`, name) -} - -func testAccKubernetesHorizontalPodAutoscalerConfig_specModified(name string) string { - return fmt.Sprintf(` -resource "kubernetes_horizontal_pod_autoscaler" "test" { - metadata { - name = "%s" - } - spec { - max_replicas = 8 - scale_target_ref { - kind = "ReplicationController" - name = "TerraformAccTestModified" - } - } -} -`, name) -} - -func testAccKubernetesHorizontalPodAutoscalerConfig_generatedName(prefix string) string { - return fmt.Sprintf(` -resource "kubernetes_horizontal_pod_autoscaler" "test" { - metadata { - generate_name = "%s" - } - spec { - max_replicas = 1 - scale_target_ref { - kind = "ReplicationController" - name = "TerraformAccTestGeneratedName" - } - } -} -`, prefix) -} diff --git a/builtin/providers/kubernetes/resource_kubernetes_limit_range.go b/builtin/providers/kubernetes/resource_kubernetes_limit_range.go deleted file mode 100644 index 2b1f5f6e9..000000000 --- a/builtin/providers/kubernetes/resource_kubernetes_limit_range.go +++ /dev/null @@ -1,189 +0,0 @@ -package kubernetes - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "k8s.io/apimachinery/pkg/api/errors" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkgApi "k8s.io/apimachinery/pkg/types" - api "k8s.io/kubernetes/pkg/api/v1" - kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" -) - -func resourceKubernetesLimitRange() *schema.Resource { - return &schema.Resource{ - Create: resourceKubernetesLimitRangeCreate, - Read: resourceKubernetesLimitRangeRead, - Exists: resourceKubernetesLimitRangeExists, - Update: resourceKubernetesLimitRangeUpdate, - Delete: resourceKubernetesLimitRangeDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "metadata": namespacedMetadataSchema("limit range", true), - "spec": { - Type: schema.TypeList, - Description: "Spec defines the limits enforced. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeList, - Description: "Limits is the list of objects that are enforced.", - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "default": { - Type: schema.TypeMap, - Description: "Default resource requirement limit value by resource name if resource limit is omitted.", - Optional: true, - }, - "default_request": { - Type: schema.TypeMap, - Description: "The default resource requirement request value by resource name if resource request is omitted.", - Optional: true, - Computed: true, - }, - "max": { - Type: schema.TypeMap, - Description: "Max usage constraints on this kind by resource name.", - Optional: true, - }, - "max_limit_request_ratio": { - Type: schema.TypeMap, - Description: "The named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.", - Optional: true, - }, - "min": { - Type: schema.TypeMap, - Description: "Min usage constraints on this kind by resource name.", - Optional: true, - }, - "type": { - Type: schema.TypeString, - Description: "Type of resource that this limit applies to.", - Optional: true, - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func resourceKubernetesLimitRangeCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - metadata := expandMetadata(d.Get("metadata").([]interface{})) - spec, err := expandLimitRangeSpec(d.Get("spec").([]interface{}), d.IsNewResource()) - if err != nil { - return err - } - limitRange := api.LimitRange{ - ObjectMeta: metadata, - Spec: spec, - } - log.Printf("[INFO] Creating new limit range: %#v", limitRange) - out, err := conn.CoreV1().LimitRanges(metadata.Namespace).Create(&limitRange) - if err != nil { - return fmt.Errorf("Failed to create limit range: %s", err) - } - log.Printf("[INFO] Submitted new limit range: %#v", out) - d.SetId(buildId(out.ObjectMeta)) - - return resourceKubernetesLimitRangeRead(d, meta) -} - -func resourceKubernetesLimitRangeRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - log.Printf("[INFO] Reading limit range %s", name) - limitRange, err := conn.CoreV1().LimitRanges(namespace).Get(name, meta_v1.GetOptions{}) - if err != nil { - log.Printf("[DEBUG] Received error: %#v", err) - return err - } - log.Printf("[INFO] Received limit range: %#v", limitRange) - - err = d.Set("metadata", flattenMetadata(limitRange.ObjectMeta)) - if err != nil { - return err - } - err = d.Set("spec", flattenLimitRangeSpec(limitRange.Spec)) - if err != nil { - return err - } - - return nil -} - -func resourceKubernetesLimitRangeUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - - ops := patchMetadata("metadata.0.", "/metadata/", d) - if d.HasChange("spec") { - spec, err := expandLimitRangeSpec(d.Get("spec").([]interface{}), d.IsNewResource()) - if err != nil { - return err - } - ops = append(ops, &ReplaceOperation{ - Path: "/spec", - Value: spec, - }) - } - data, err := ops.MarshalJSON() - if err != nil { - return fmt.Errorf("Failed to marshal update operations: %s", err) - } - log.Printf("[INFO] Updating limit range %q: %v", name, string(data)) - out, err := conn.CoreV1().LimitRanges(namespace).Patch(name, pkgApi.JSONPatchType, data) - if err != nil { - return fmt.Errorf("Failed to update limit range: %s", err) - } - log.Printf("[INFO] Submitted updated limit range: %#v", out) - d.SetId(buildId(out.ObjectMeta)) - - return resourceKubernetesLimitRangeRead(d, meta) -} - -func resourceKubernetesLimitRangeDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - log.Printf("[INFO] Deleting limit range: %#v", name) - err := conn.CoreV1().LimitRanges(namespace).Delete(name, &meta_v1.DeleteOptions{}) - if err != nil { - return err - } - - log.Printf("[INFO] Limit range %s deleted", name) - - d.SetId("") - return nil -} - -func resourceKubernetesLimitRangeExists(d *schema.ResourceData, meta interface{}) (bool, error) { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - log.Printf("[INFO] Checking limit range %s", name) - _, err := conn.CoreV1().LimitRanges(namespace).Get(name, meta_v1.GetOptions{}) - if err != nil { - if statusErr, ok := err.(*errors.StatusError); ok && statusErr.ErrStatus.Code == 404 { - return false, nil - } - log.Printf("[DEBUG] Received error: %#v", err) - } - return true, err -} diff --git a/builtin/providers/kubernetes/resource_kubernetes_limit_range_test.go b/builtin/providers/kubernetes/resource_kubernetes_limit_range_test.go deleted file mode 100644 index c60330ed7..000000000 --- a/builtin/providers/kubernetes/resource_kubernetes_limit_range_test.go +++ /dev/null @@ -1,476 +0,0 @@ -package kubernetes - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "k8s.io/kubernetes/pkg/api/v1" - kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" -) - -func TestAccKubernetesLimitRange_basic(t *testing.T) { - var conf api.LimitRange - name := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_limit_range.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesLimitRangeDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesLimitRangeConfig_basic(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesLimitRangeExists("kubernetes_limit_range.test", &conf), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.annotations.%", "1"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.annotations.TestAnnotationOne", "one"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{"TestAnnotationOne": "one"}), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.labels.%", "3"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.labels.TestLabelOne", "one"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.labels.TestLabelThree", "three"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.labels.TestLabelFour", "four"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{"TestLabelOne": "one", "TestLabelThree": "three", "TestLabelFour": "four"}), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.#", "1"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.default.%", "2"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.default.cpu", "200m"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.default.memory", "512M"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.default_request.%", "2"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.default_request.cpu", "100m"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.default_request.memory", "256M"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.type", "Container"), - ), - }, - { - Config: testAccKubernetesLimitRangeConfig_metaModified(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesLimitRangeExists("kubernetes_limit_range.test", &conf), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.annotations.%", "2"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.annotations.TestAnnotationOne", "one"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.annotations.TestAnnotationTwo", "two"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{"TestAnnotationOne": "one", "TestAnnotationTwo": "two"}), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.labels.%", "3"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.labels.TestLabelOne", "one"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.labels.TestLabelTwo", "two"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.labels.TestLabelThree", "three"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{"TestLabelOne": "one", "TestLabelTwo": "two", "TestLabelThree": "three"}), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.#", "1"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.default.%", "2"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.default.cpu", "200m"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.default.memory", "512M"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.default_request.%", "2"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.default_request.cpu", "100m"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.default_request.memory", "256M"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.type", "Container"), - ), - }, - { - Config: testAccKubernetesLimitRangeConfig_specModified(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesLimitRangeExists("kubernetes_limit_range.test", &conf), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.#", "1"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.default.%", "2"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.default.cpu", "200m"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.default.memory", "1024M"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.default_request.%", "2"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.default_request.cpu", "100m"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.default_request.memory", "256M"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.max.%", "1"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.max.cpu", "500m"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.min.%", "2"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.min.cpu", "10m"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.min.memory", "10M"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.type", "Container"), - ), - }, - }, - }) -} - -func TestAccKubernetesLimitRange_generatedName(t *testing.T) { - var conf api.LimitRange - prefix := "tf-acc-test-" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_limit_range.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesLimitRangeDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesLimitRangeConfig_generatedName(prefix), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesLimitRangeExists("kubernetes_limit_range.test", &conf), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.generate_name", prefix), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.#", "1"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.type", "Pod"), - ), - }, - }, - }) -} - -func TestAccKubernetesLimitRange_typeChange(t *testing.T) { - var conf api.LimitRange - name := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_limit_range.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesLimitRangeDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesLimitRangeConfig_typeChange(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesLimitRangeExists("kubernetes_limit_range.test", &conf), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.#", "1"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.default.%", "2"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.default.cpu", "200m"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.default.memory", "1024M"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.type", "Container"), - ), - }, - { - Config: testAccKubernetesLimitRangeConfig_typeChangeModified(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesLimitRangeExists("kubernetes_limit_range.test", &conf), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.#", "1"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.min.%", "2"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.min.cpu", "200m"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.min.memory", "1024M"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.type", "Pod"), - ), - }, - }, - }) -} - -func TestAccKubernetesLimitRange_multipleLimits(t *testing.T) { - var conf api.LimitRange - name := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_limit_range.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesLimitRangeDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesLimitRangeConfig_multipleLimits(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesLimitRangeExists("kubernetes_limit_range.test", &conf), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_limit_range.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.#", "3"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.max.%", "2"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.max.cpu", "200m"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.max.memory", "1024M"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.0.type", "Pod"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.1.min.%", "1"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.1.min.storage", "24M"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.1.type", "PersistentVolumeClaim"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.2.default.%", "2"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.2.default.cpu", "50m"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.2.default.memory", "24M"), - resource.TestCheckResourceAttr("kubernetes_limit_range.test", "spec.0.limit.2.type", "Container"), - ), - }, - }, - }) -} - -func TestAccKubernetesLimitRange_importBasic(t *testing.T) { - resourceName := "kubernetes_limit_range.test" - name := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesLimitRangeDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesLimitRangeConfig_basic(name), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccCheckKubernetesLimitRangeDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*kubernetes.Clientset) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "kubernetes_limit_range" { - continue - } - namespace, name := idParts(rs.Primary.ID) - resp, err := conn.CoreV1().LimitRanges(namespace).Get(name, meta_v1.GetOptions{}) - if err == nil { - if resp.Namespace == namespace && resp.Name == name { - return fmt.Errorf("Limit Range still exists: %s", rs.Primary.ID) - } - } - } - - return nil -} - -func testAccCheckKubernetesLimitRangeExists(n string, obj *api.LimitRange) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - conn := testAccProvider.Meta().(*kubernetes.Clientset) - namespace, name := idParts(rs.Primary.ID) - out, err := conn.CoreV1().LimitRanges(namespace).Get(name, meta_v1.GetOptions{}) - if err != nil { - return err - } - - *obj = *out - return nil - } -} - -func testAccKubernetesLimitRangeConfig_basic(name string) string { - return fmt.Sprintf(` -resource "kubernetes_limit_range" "test" { - metadata { - annotations { - TestAnnotationOne = "one" - } - labels { - TestLabelOne = "one" - TestLabelThree = "three" - TestLabelFour = "four" - } - name = "%s" - } - spec { - limit { - type = "Container" - - default { - cpu = "200m" - memory = "512M" - } - - default_request { - cpu = "100m" - memory = "256M" - } - } - } -} -`, name) -} - -func testAccKubernetesLimitRangeConfig_metaModified(name string) string { - return fmt.Sprintf(` -resource "kubernetes_limit_range" "test" { - metadata { - annotations { - TestAnnotationOne = "one" - TestAnnotationTwo = "two" - } - labels { - TestLabelOne = "one" - TestLabelTwo = "two" - TestLabelThree = "three" - } - name = "%s" - } - spec { - limit { - type = "Container" - - default { - cpu = "200m" - memory = "512M" - } - - default_request { - cpu = "100m" - memory = "256M" - } - } - } -} -`, name) -} - -func testAccKubernetesLimitRangeConfig_specModified(name string) string { - return fmt.Sprintf(` -resource "kubernetes_limit_range" "test" { - metadata { - name = "%s" - } - spec { - limit { - type = "Container" - - default { - cpu = "200m" - memory = "1024M" - } - - max { - cpu = "500m" - } - - min { - cpu = "10m" - memory = "10M" - } - } - } -} -`, name) -} - -func testAccKubernetesLimitRangeConfig_generatedName(prefix string) string { - return fmt.Sprintf(` -resource "kubernetes_limit_range" "test" { - metadata { - generate_name = "%s" - } - spec { - limit { - type = "Pod" - } - } -} -`, prefix) -} - -func testAccKubernetesLimitRangeConfig_typeChange(name string) string { - return fmt.Sprintf(` -resource "kubernetes_limit_range" "test" { - metadata { - name = "%s" - } - spec { - limit { - type = "Container" - default { - cpu = "200m" - memory = "1024M" - } - } - } -} -`, name) -} - -func testAccKubernetesLimitRangeConfig_typeChangeModified(name string) string { - return fmt.Sprintf(` -resource "kubernetes_limit_range" "test" { - metadata { - name = "%s" - } - spec { - limit { - type = "Pod" - min { - cpu = "200m" - memory = "1024M" - } - } - } -} -`, name) -} - -func testAccKubernetesLimitRangeConfig_multipleLimits(name string) string { - return fmt.Sprintf(` -resource "kubernetes_limit_range" "test" { - metadata { - name = "%s" - } - spec { - limit { - type = "Pod" - max { - cpu = "200m" - memory = "1024M" - } - } - limit { - type = "PersistentVolumeClaim" - min { - storage = "24M" - } - } - limit { - type = "Container" - default { - cpu = "50m" - memory = "24M" - } - } - } -} -`, name) -} diff --git a/builtin/providers/kubernetes/resource_kubernetes_namespace.go b/builtin/providers/kubernetes/resource_kubernetes_namespace.go deleted file mode 100644 index a2e29843e..000000000 --- a/builtin/providers/kubernetes/resource_kubernetes_namespace.go +++ /dev/null @@ -1,144 +0,0 @@ -package kubernetes - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "k8s.io/apimachinery/pkg/api/errors" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkgApi "k8s.io/apimachinery/pkg/types" - api "k8s.io/kubernetes/pkg/api/v1" - kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" -) - -func resourceKubernetesNamespace() *schema.Resource { - return &schema.Resource{ - Create: resourceKubernetesNamespaceCreate, - Read: resourceKubernetesNamespaceRead, - Exists: resourceKubernetesNamespaceExists, - Update: resourceKubernetesNamespaceUpdate, - Delete: resourceKubernetesNamespaceDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "metadata": metadataSchema("namespace", true), - }, - } -} - -func resourceKubernetesNamespaceCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - metadata := expandMetadata(d.Get("metadata").([]interface{})) - namespace := api.Namespace{ - ObjectMeta: metadata, - } - log.Printf("[INFO] Creating new namespace: %#v", namespace) - out, err := conn.CoreV1().Namespaces().Create(&namespace) - if err != nil { - return err - } - log.Printf("[INFO] Submitted new namespace: %#v", out) - d.SetId(out.Name) - - return resourceKubernetesNamespaceRead(d, meta) -} - -func resourceKubernetesNamespaceRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - name := d.Id() - log.Printf("[INFO] Reading namespace %s", name) - namespace, err := conn.CoreV1().Namespaces().Get(name, meta_v1.GetOptions{}) - if err != nil { - log.Printf("[DEBUG] Received error: %#v", err) - return err - } - log.Printf("[INFO] Received namespace: %#v", namespace) - err = d.Set("metadata", flattenMetadata(namespace.ObjectMeta)) - if err != nil { - return err - } - - return nil -} - -func resourceKubernetesNamespaceUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - ops := patchMetadata("metadata.0.", "/metadata/", d) - data, err := ops.MarshalJSON() - if err != nil { - return fmt.Errorf("Failed to marshal update operations: %s", err) - } - - log.Printf("[INFO] Updating namespace: %s", ops) - out, err := conn.CoreV1().Namespaces().Patch(d.Id(), pkgApi.JSONPatchType, data) - if err != nil { - return err - } - log.Printf("[INFO] Submitted updated namespace: %#v", out) - d.SetId(out.Name) - - return resourceKubernetesNamespaceRead(d, meta) -} - -func resourceKubernetesNamespaceDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - name := d.Id() - log.Printf("[INFO] Deleting namespace: %#v", name) - err := conn.CoreV1().Namespaces().Delete(name, &meta_v1.DeleteOptions{}) - if err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Target: []string{}, - Pending: []string{"Terminating"}, - Timeout: 5 * time.Minute, - Refresh: func() (interface{}, string, error) { - out, err := conn.CoreV1().Namespaces().Get(name, meta_v1.GetOptions{}) - if err != nil { - if statusErr, ok := err.(*errors.StatusError); ok && statusErr.ErrStatus.Code == 404 { - return nil, "", nil - } - log.Printf("[ERROR] Received error: %#v", err) - return out, "Error", err - } - - statusPhase := fmt.Sprintf("%v", out.Status.Phase) - log.Printf("[DEBUG] Namespace %s status received: %#v", out.Name, statusPhase) - return out, statusPhase, nil - }, - } - _, err = stateConf.WaitForState() - if err != nil { - return err - } - log.Printf("[INFO] Namespace %s deleted", name) - - d.SetId("") - return nil -} - -func resourceKubernetesNamespaceExists(d *schema.ResourceData, meta interface{}) (bool, error) { - conn := meta.(*kubernetes.Clientset) - - name := d.Id() - log.Printf("[INFO] Checking namespace %s", name) - _, err := conn.CoreV1().Namespaces().Get(name, meta_v1.GetOptions{}) - if err != nil { - if statusErr, ok := err.(*errors.StatusError); ok && statusErr.ErrStatus.Code == 404 { - return false, nil - } - log.Printf("[DEBUG] Received error: %#v", err) - } - log.Printf("[INFO] Namespace %s exists", name) - return true, err -} diff --git a/builtin/providers/kubernetes/resource_kubernetes_namespace_test.go b/builtin/providers/kubernetes/resource_kubernetes_namespace_test.go deleted file mode 100644 index 8092b4864..000000000 --- a/builtin/providers/kubernetes/resource_kubernetes_namespace_test.go +++ /dev/null @@ -1,273 +0,0 @@ -package kubernetes - -import ( - "fmt" - "reflect" - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "k8s.io/kubernetes/pkg/api/v1" - kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" -) - -func TestAccKubernetesNamespace_basic(t *testing.T) { - var conf api.Namespace - nsName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_namespace.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesNamespaceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesNamespaceConfig_basic(nsName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesNamespaceExists("kubernetes_namespace.test", &conf), - resource.TestCheckResourceAttr("kubernetes_namespace.test", "metadata.0.annotations.%", "2"), - resource.TestCheckResourceAttr("kubernetes_namespace.test", "metadata.0.annotations.TestAnnotationOne", "one"), - resource.TestCheckResourceAttr("kubernetes_namespace.test", "metadata.0.annotations.TestAnnotationTwo", "two"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{"TestAnnotationOne": "one", "TestAnnotationTwo": "two"}), - resource.TestCheckResourceAttr("kubernetes_namespace.test", "metadata.0.labels.%", "3"), - resource.TestCheckResourceAttr("kubernetes_namespace.test", "metadata.0.labels.TestLabelOne", "one"), - resource.TestCheckResourceAttr("kubernetes_namespace.test", "metadata.0.labels.TestLabelTwo", "two"), - resource.TestCheckResourceAttr("kubernetes_namespace.test", "metadata.0.labels.TestLabelThree", "three"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{"TestLabelOne": "one", "TestLabelTwo": "two", "TestLabelThree": "three"}), - resource.TestCheckResourceAttr("kubernetes_namespace.test", "metadata.0.name", nsName), - resource.TestCheckResourceAttrSet("kubernetes_namespace.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_namespace.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_namespace.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_namespace.test", "metadata.0.uid"), - ), - }, - { - Config: testAccKubernetesNamespaceConfig_smallerLists(nsName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesNamespaceExists("kubernetes_namespace.test", &conf), - resource.TestCheckResourceAttr("kubernetes_namespace.test", "metadata.0.annotations.%", "2"), - resource.TestCheckResourceAttr("kubernetes_namespace.test", "metadata.0.annotations.TestAnnotationOne", "one"), - resource.TestCheckResourceAttr("kubernetes_namespace.test", "metadata.0.annotations.Different", "1234"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{"TestAnnotationOne": "one", "Different": "1234"}), - resource.TestCheckResourceAttr("kubernetes_namespace.test", "metadata.0.labels.%", "2"), - resource.TestCheckResourceAttr("kubernetes_namespace.test", "metadata.0.labels.TestLabelOne", "one"), - resource.TestCheckResourceAttr("kubernetes_namespace.test", "metadata.0.labels.TestLabelThree", "three"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{"TestLabelOne": "one", "TestLabelThree": "three"}), - resource.TestCheckResourceAttr("kubernetes_namespace.test", "metadata.0.name", nsName), - resource.TestCheckResourceAttrSet("kubernetes_namespace.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_namespace.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_namespace.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_namespace.test", "metadata.0.uid"), - ), - }, - { - Config: testAccKubernetesNamespaceConfig_noLists(nsName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesNamespaceExists("kubernetes_namespace.test", &conf), - resource.TestCheckResourceAttr("kubernetes_namespace.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_namespace.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_namespace.test", "metadata.0.name", nsName), - resource.TestCheckResourceAttrSet("kubernetes_namespace.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_namespace.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_namespace.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_namespace.test", "metadata.0.uid"), - ), - }, - }, - }) -} - -func TestAccKubernetesNamespace_importBasic(t *testing.T) { - resourceName := "kubernetes_namespace.test" - nsName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesNamespaceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesNamespaceConfig_basic(nsName), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccKubernetesNamespace_generatedName(t *testing.T) { - var conf api.Namespace - prefix := "tf-acc-test-gen-" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_namespace.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesNamespaceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesNamespaceConfig_generatedName(prefix), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesNamespaceExists("kubernetes_namespace.test", &conf), - resource.TestCheckResourceAttr("kubernetes_namespace.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_namespace.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_namespace.test", "metadata.0.generate_name", prefix), - resource.TestMatchResourceAttr("kubernetes_namespace.test", "metadata.0.name", regexp.MustCompile("^"+prefix)), - resource.TestCheckResourceAttrSet("kubernetes_namespace.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_namespace.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_namespace.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_namespace.test", "metadata.0.uid"), - ), - }, - }, - }) -} - -func TestAccKubernetesNamespace_importGeneratedName(t *testing.T) { - resourceName := "kubernetes_namespace.test" - prefix := "tf-acc-test-gen-import-" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesNamespaceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesNamespaceConfig_generatedName(prefix), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccCheckMetaAnnotations(om *meta_v1.ObjectMeta, expected map[string]string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if len(expected) == 0 && len(om.Annotations) == 0 { - return nil - } - if !reflect.DeepEqual(om.Annotations, expected) { - return fmt.Errorf("%s annotations don't match.\nExpected: %q\nGiven: %q", - om.Name, expected, om.Annotations) - } - return nil - } -} - -func testAccCheckMetaLabels(om *meta_v1.ObjectMeta, expected map[string]string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if len(expected) == 0 && len(om.Labels) == 0 { - return nil - } - if !reflect.DeepEqual(om.Labels, expected) { - return fmt.Errorf("%s labels don't match.\nExpected: %q\nGiven: %q", - om.Name, expected, om.Labels) - } - return nil - } -} - -func testAccCheckKubernetesNamespaceDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*kubernetes.Clientset) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "kubernetes_namespace" { - continue - } - - resp, err := conn.CoreV1().Namespaces().Get(rs.Primary.ID, meta_v1.GetOptions{}) - if err == nil { - if resp.Name == rs.Primary.ID { - return fmt.Errorf("Namespace still exists: %s", rs.Primary.ID) - } - } - } - - return nil -} - -func testAccCheckKubernetesNamespaceExists(n string, obj *api.Namespace) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - conn := testAccProvider.Meta().(*kubernetes.Clientset) - out, err := conn.CoreV1().Namespaces().Get(rs.Primary.ID, meta_v1.GetOptions{}) - if err != nil { - return err - } - - *obj = *out - return nil - } -} - -func testAccKubernetesNamespaceConfig_basic(nsName string) string { - return fmt.Sprintf(` -resource "kubernetes_namespace" "test" { - metadata { - annotations { - TestAnnotationOne = "one" - TestAnnotationTwo = "two" - } - labels { - TestLabelOne = "one" - TestLabelTwo = "two" - TestLabelThree = "three" - } - name = "%s" - } -}`, nsName) -} - -func testAccKubernetesNamespaceConfig_smallerLists(nsName string) string { - return fmt.Sprintf(` -resource "kubernetes_namespace" "test" { - metadata { - annotations { - TestAnnotationOne = "one" - Different = "1234" - } - labels { - TestLabelOne = "one" - TestLabelThree = "three" - } - name = "%s" - } -}`, nsName) -} - -func testAccKubernetesNamespaceConfig_noLists(nsName string) string { - return fmt.Sprintf(` -resource "kubernetes_namespace" "test" { - metadata { - name = "%s" - } -}`, nsName) -} - -func testAccKubernetesNamespaceConfig_generatedName(prefix string) string { - return fmt.Sprintf(` -resource "kubernetes_namespace" "test" { - metadata { - generate_name = "%s" - } -}`, prefix) -} diff --git a/builtin/providers/kubernetes/resource_kubernetes_persistent_volume.go b/builtin/providers/kubernetes/resource_kubernetes_persistent_volume.go deleted file mode 100644 index 36c72f4ae..000000000 --- a/builtin/providers/kubernetes/resource_kubernetes_persistent_volume.go +++ /dev/null @@ -1,196 +0,0 @@ -package kubernetes - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "k8s.io/apimachinery/pkg/api/errors" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkgApi "k8s.io/apimachinery/pkg/types" - api "k8s.io/kubernetes/pkg/api/v1" - kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" -) - -func resourceKubernetesPersistentVolume() *schema.Resource { - return &schema.Resource{ - Create: resourceKubernetesPersistentVolumeCreate, - Read: resourceKubernetesPersistentVolumeRead, - Exists: resourceKubernetesPersistentVolumeExists, - Update: resourceKubernetesPersistentVolumeUpdate, - Delete: resourceKubernetesPersistentVolumeDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "metadata": metadataSchema("persistent volume", false), - "spec": { - Type: schema.TypeList, - Description: "Spec of the persistent volume owned by the cluster", - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "access_modes": { - Type: schema.TypeSet, - Description: "Contains all ways the volume can be mounted. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes", - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "capacity": { - Type: schema.TypeMap, - Description: "A description of the persistent volume's resources and capacity. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity", - Required: true, - Elem: schema.TypeString, - ValidateFunc: validateResourceList, - }, - "persistent_volume_reclaim_policy": { - Type: schema.TypeString, - Description: "What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recycling must be supported by the volume plugin underlying this persistent volume. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#recycling-policy", - Optional: true, - Default: "Retain", - }, - "persistent_volume_source": { - Type: schema.TypeList, - Description: "The specification of a persistent volume.", - Required: true, - MaxItems: 1, - Elem: persistentVolumeSourceSchema(), - }, - }, - }, - }, - }, - } -} - -func resourceKubernetesPersistentVolumeCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - metadata := expandMetadata(d.Get("metadata").([]interface{})) - spec, err := expandPersistentVolumeSpec(d.Get("spec").([]interface{})) - if err != nil { - return err - } - volume := api.PersistentVolume{ - ObjectMeta: metadata, - Spec: spec, - } - - log.Printf("[INFO] Creating new persistent volume: %#v", volume) - out, err := conn.CoreV1().PersistentVolumes().Create(&volume) - if err != nil { - return err - } - log.Printf("[INFO] Submitted new persistent volume: %#v", out) - - stateConf := &resource.StateChangeConf{ - Target: []string{"Available", "Bound"}, - Pending: []string{"Pending"}, - Timeout: 5 * time.Minute, - Refresh: func() (interface{}, string, error) { - out, err := conn.CoreV1().PersistentVolumes().Get(metadata.Name, meta_v1.GetOptions{}) - if err != nil { - log.Printf("[ERROR] Received error: %#v", err) - return out, "Error", err - } - - statusPhase := fmt.Sprintf("%v", out.Status.Phase) - log.Printf("[DEBUG] Persistent volume %s status received: %#v", out.Name, statusPhase) - return out, statusPhase, nil - }, - } - _, err = stateConf.WaitForState() - if err != nil { - return err - } - log.Printf("[INFO] Persistent volume %s created", out.Name) - - d.SetId(out.Name) - - return resourceKubernetesPersistentVolumeRead(d, meta) -} - -func resourceKubernetesPersistentVolumeRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - name := d.Id() - log.Printf("[INFO] Reading persistent volume %s", name) - volume, err := conn.CoreV1().PersistentVolumes().Get(name, meta_v1.GetOptions{}) - if err != nil { - log.Printf("[DEBUG] Received error: %#v", err) - return err - } - log.Printf("[INFO] Received persistent volume: %#v", volume) - err = d.Set("metadata", flattenMetadata(volume.ObjectMeta)) - if err != nil { - return err - } - err = d.Set("spec", flattenPersistentVolumeSpec(volume.Spec)) - if err != nil { - return err - } - - return nil -} - -func resourceKubernetesPersistentVolumeUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - ops := patchMetadata("metadata.0.", "/metadata/", d) - if d.HasChange("spec") { - specOps, err := patchPersistentVolumeSpec("/spec", "spec", d) - if err != nil { - return err - } - ops = append(ops, specOps...) - } - data, err := ops.MarshalJSON() - if err != nil { - return fmt.Errorf("Failed to marshal update operations: %s", err) - } - - log.Printf("[INFO] Updating persistent volume %s: %s", d.Id(), ops) - out, err := conn.CoreV1().PersistentVolumes().Patch(d.Id(), pkgApi.JSONPatchType, data) - if err != nil { - return err - } - log.Printf("[INFO] Submitted updated persistent volume: %#v", out) - d.SetId(out.Name) - - return resourceKubernetesPersistentVolumeRead(d, meta) -} - -func resourceKubernetesPersistentVolumeDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - name := d.Id() - log.Printf("[INFO] Deleting persistent volume: %#v", name) - err := conn.CoreV1().PersistentVolumes().Delete(name, &meta_v1.DeleteOptions{}) - if err != nil { - return err - } - - log.Printf("[INFO] Persistent volume %s deleted", name) - - d.SetId("") - return nil -} - -func resourceKubernetesPersistentVolumeExists(d *schema.ResourceData, meta interface{}) (bool, error) { - conn := meta.(*kubernetes.Clientset) - - name := d.Id() - log.Printf("[INFO] Checking persistent volume %s", name) - _, err := conn.CoreV1().PersistentVolumes().Get(name, meta_v1.GetOptions{}) - if err != nil { - if statusErr, ok := err.(*errors.StatusError); ok && statusErr.ErrStatus.Code == 404 { - return false, nil - } - log.Printf("[DEBUG] Received error: %#v", err) - } - return true, err -} diff --git a/builtin/providers/kubernetes/resource_kubernetes_persistent_volume_claim.go b/builtin/providers/kubernetes/resource_kubernetes_persistent_volume_claim.go deleted file mode 100644 index 6c7168509..000000000 --- a/builtin/providers/kubernetes/resource_kubernetes_persistent_volume_claim.go +++ /dev/null @@ -1,287 +0,0 @@ -package kubernetes - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "k8s.io/apimachinery/pkg/api/errors" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - pkgApi "k8s.io/apimachinery/pkg/types" - api "k8s.io/kubernetes/pkg/api/v1" - kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" -) - -func resourceKubernetesPersistentVolumeClaim() *schema.Resource { - return &schema.Resource{ - Create: resourceKubernetesPersistentVolumeClaimCreate, - Read: resourceKubernetesPersistentVolumeClaimRead, - Exists: resourceKubernetesPersistentVolumeClaimExists, - Update: resourceKubernetesPersistentVolumeClaimUpdate, - Delete: resourceKubernetesPersistentVolumeClaimDelete, - Importer: &schema.ResourceImporter{ - State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - d.Set("wait_until_bound", true) - return []*schema.ResourceData{d}, nil - }, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(5 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "metadata": namespacedMetadataSchema("persistent volume claim", true), - "spec": { - Type: schema.TypeList, - Description: "Spec defines the desired characteristics of a volume requested by a pod author. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims", - Required: true, - ForceNew: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "access_modes": { - Type: schema.TypeSet, - Description: "A set of the desired access modes the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1", - Required: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "resources": { - Type: schema.TypeList, - Description: "A list of the minimum resources the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources", - Required: true, - ForceNew: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "limits": { - Type: schema.TypeMap, - Description: "Map describing the maximum amount of compute resources allowed. More info: http://kubernetes.io/docs/user-guide/compute-resources/", - Optional: true, - ForceNew: true, - }, - "requests": { - Type: schema.TypeMap, - Description: "Map describing the minimum amount of compute resources required. If this is omitted for a container, it defaults to `limits` if that is explicitly specified, otherwise to an implementation-defined value. More info: http://kubernetes.io/docs/user-guide/compute-resources/", - Optional: true, - ForceNew: true, - }, - }, - }, - }, - "selector": { - Type: schema.TypeList, - Description: "A label query over volumes to consider for binding.", - Optional: true, - ForceNew: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "match_expressions": { - Type: schema.TypeList, - Description: "A list of label selector requirements. The requirements are ANDed.", - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Description: "The label key that the selector applies to.", - Optional: true, - ForceNew: true, - }, - "operator": { - Type: schema.TypeString, - Description: "A key's relationship to a set of values. Valid operators ard `In`, `NotIn`, `Exists` and `DoesNotExist`.", - Optional: true, - ForceNew: true, - }, - "values": { - Type: schema.TypeSet, - Description: "An array of string values. If the operator is `In` or `NotIn`, the values array must be non-empty. If the operator is `Exists` or `DoesNotExist`, the values array must be empty. This array is replaced during a strategic merge patch.", - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - }, - }, - "match_labels": { - Type: schema.TypeMap, - Description: "A map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of `match_expressions`, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", - Optional: true, - ForceNew: true, - }, - }, - }, - }, - "volume_name": { - Type: schema.TypeString, - Description: "The binding reference to the PersistentVolume backing this claim.", - Optional: true, - ForceNew: true, - Computed: true, - }, - }, - }, - }, - "wait_until_bound": { - Type: schema.TypeBool, - Description: "Whether to wait for the claim to reach `Bound` state (to find volume in which to claim the space)", - Optional: true, - Default: true, - }, - }, - } -} - -func resourceKubernetesPersistentVolumeClaimCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - metadata := expandMetadata(d.Get("metadata").([]interface{})) - spec, err := expandPersistentVolumeClaimSpec(d.Get("spec").([]interface{})) - if err != nil { - return err - } - - claim := api.PersistentVolumeClaim{ - ObjectMeta: metadata, - Spec: spec, - } - - log.Printf("[INFO] Creating new persistent volume claim: %#v", claim) - out, err := conn.CoreV1().PersistentVolumeClaims(metadata.Namespace).Create(&claim) - if err != nil { - return err - } - log.Printf("[INFO] Submitted new persistent volume claim: %#v", out) - - d.SetId(buildId(out.ObjectMeta)) - name := out.ObjectMeta.Name - - if d.Get("wait_until_bound").(bool) { - var lastEvent api.Event - stateConf := &resource.StateChangeConf{ - Target: []string{"Bound"}, - Pending: []string{"Pending"}, - Timeout: d.Timeout(schema.TimeoutCreate), - Refresh: func() (interface{}, string, error) { - out, err := conn.CoreV1().PersistentVolumeClaims(metadata.Namespace).Get(name, meta_v1.GetOptions{}) - if err != nil { - log.Printf("[ERROR] Received error: %#v", err) - return out, "", err - } - - events, err := conn.CoreV1().Events(metadata.Namespace).List(meta_v1.ListOptions{ - FieldSelector: fields.Set(map[string]string{ - "involvedObject.name": metadata.Name, - "involvedObject.namespace": metadata.Namespace, - "involvedObject.kind": "PersistentVolumeClaim", - }).String(), - }) - if err != nil { - return out, "", err - } - if len(events.Items) > 0 { - lastEvent = events.Items[0] - } - - statusPhase := fmt.Sprintf("%v", out.Status.Phase) - log.Printf("[DEBUG] Persistent volume claim %s status received: %#v", out.Name, statusPhase) - return out, statusPhase, nil - }, - } - _, err = stateConf.WaitForState() - if err != nil { - reason := "" - if lastEvent.Reason != "" { - reason = fmt.Sprintf(". Reason: %s: %s", lastEvent.Reason, lastEvent.Message) - } - return fmt.Errorf("%s%s", err, reason) - } - } - log.Printf("[INFO] Persistent volume claim %s created", out.Name) - - return resourceKubernetesPersistentVolumeClaimRead(d, meta) -} - -func resourceKubernetesPersistentVolumeClaimRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - log.Printf("[INFO] Reading persistent volume claim %s", name) - claim, err := conn.CoreV1().PersistentVolumeClaims(namespace).Get(name, meta_v1.GetOptions{}) - if err != nil { - log.Printf("[DEBUG] Received error: %#v", err) - return err - } - log.Printf("[INFO] Received persistent volume claim: %#v", claim) - err = d.Set("metadata", flattenMetadata(claim.ObjectMeta)) - if err != nil { - return err - } - err = d.Set("spec", flattenPersistentVolumeClaimSpec(claim.Spec)) - if err != nil { - return err - } - - return nil -} - -func resourceKubernetesPersistentVolumeClaimUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - namespace, name := idParts(d.Id()) - - ops := patchMetadata("metadata.0.", "/metadata/", d) - // The whole spec is ForceNew = nothing to update there - data, err := ops.MarshalJSON() - if err != nil { - return fmt.Errorf("Failed to marshal update operations: %s", err) - } - - log.Printf("[INFO] Updating persistent volume claim: %s", ops) - out, err := conn.CoreV1().PersistentVolumeClaims(namespace).Patch(name, pkgApi.JSONPatchType, data) - if err != nil { - return err - } - log.Printf("[INFO] Submitted updated persistent volume claim: %#v", out) - - return resourceKubernetesPersistentVolumeClaimRead(d, meta) -} - -func resourceKubernetesPersistentVolumeClaimDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - log.Printf("[INFO] Deleting persistent volume claim: %#v", name) - err := conn.CoreV1().PersistentVolumeClaims(namespace).Delete(name, &meta_v1.DeleteOptions{}) - if err != nil { - return err - } - - log.Printf("[INFO] Persistent volume claim %s deleted", name) - - d.SetId("") - return nil -} - -func resourceKubernetesPersistentVolumeClaimExists(d *schema.ResourceData, meta interface{}) (bool, error) { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - log.Printf("[INFO] Checking persistent volume claim %s", name) - _, err := conn.CoreV1().PersistentVolumeClaims(namespace).Get(name, meta_v1.GetOptions{}) - if err != nil { - if statusErr, ok := err.(*errors.StatusError); ok && statusErr.ErrStatus.Code == 404 { - return false, nil - } - log.Printf("[DEBUG] Received error: %#v", err) - } - return true, err -} diff --git a/builtin/providers/kubernetes/resource_kubernetes_persistent_volume_claim_test.go b/builtin/providers/kubernetes/resource_kubernetes_persistent_volume_claim_test.go deleted file mode 100644 index 05893ef66..000000000 --- a/builtin/providers/kubernetes/resource_kubernetes_persistent_volume_claim_test.go +++ /dev/null @@ -1,717 +0,0 @@ -package kubernetes - -import ( - "fmt" - "os" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "k8s.io/kubernetes/pkg/api/v1" - kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" -) - -func TestAccKubernetesPersistentVolumeClaim_basic(t *testing.T) { - var conf api.PersistentVolumeClaim - name := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_persistent_volume_claim.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesPersistentVolumeClaimDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesPersistentVolumeClaimConfig_basic(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesPersistentVolumeClaimExists("kubernetes_persistent_volume_claim.test", &conf), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.annotations.%", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.annotations.TestAnnotationOne", "one"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{ - "TestAnnotationOne": "one", - "volume.beta.kubernetes.io/storage-provisioner": "kubernetes.io/gce-pd", - }), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.%", "3"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.TestLabelOne", "one"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.TestLabelThree", "three"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.TestLabelFour", "four"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{"TestLabelOne": "one", "TestLabelThree": "three", "TestLabelFour": "four"}), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.#", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.#", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.%", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.storage", "5Gi"), - ), - }, - { - Config: testAccKubernetesPersistentVolumeClaimConfig_metaModified(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesPersistentVolumeClaimExists("kubernetes_persistent_volume_claim.test", &conf), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.annotations.%", "2"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.annotations.TestAnnotationOne", "one"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.annotations.TestAnnotationTwo", "two"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{ - "TestAnnotationOne": "one", - "TestAnnotationTwo": "two", - "volume.beta.kubernetes.io/storage-provisioner": "kubernetes.io/gce-pd", - }), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.%", "3"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.TestLabelOne", "one"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.TestLabelTwo", "two"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.TestLabelThree", "three"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{"TestLabelOne": "one", "TestLabelTwo": "two", "TestLabelThree": "three"}), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.#", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.#", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.%", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.storage", "5Gi"), - ), - }, - }, - }) -} - -func TestAccKubernetesPersistentVolumeClaim_importBasic(t *testing.T) { - resourceName := "kubernetes_persistent_volume_claim.test" - volumeName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) - claimName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) - diskName := fmt.Sprintf("tf-acc-test-disk-%s", acctest.RandString(10)) - zone := os.Getenv("GOOGLE_ZONE") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesPersistentVolumeClaimDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesPersistentVolumeClaimConfig_import(volumeName, claimName, diskName, zone), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccKubernetesPersistentVolumeClaim_volumeMatch(t *testing.T) { - var pvcConf api.PersistentVolumeClaim - var pvConf api.PersistentVolume - - claimName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) - volumeName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) - volumeNameModified := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) - diskName := fmt.Sprintf("tf-acc-test-disk-%s", acctest.RandString(10)) - zone := os.Getenv("GOOGLE_ZONE") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_persistent_volume_claim.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesPersistentVolumeClaimDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesPersistentVolumeClaimConfig_volumeMatch(volumeName, claimName, diskName, zone), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesPersistentVolumeClaimExists("kubernetes_persistent_volume_claim.test", &pvcConf), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&pvcConf.ObjectMeta, map[string]string{"pv.kubernetes.io/bind-completed": "yes"}), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&pvcConf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.name", claimName), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.#", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.#", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.%", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.storage", "5Gi"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.volume_name", volumeName), - testAccCheckKubernetesPersistentVolumeExists("kubernetes_persistent_volume.test", &pvConf), - testAccCheckMetaAnnotations(&pvConf.ObjectMeta, map[string]string{"pv.kubernetes.io/bound-by-controller": "yes"}), - ), - }, - { - Config: testAccKubernetesPersistentVolumeClaimConfig_volumeMatch_modified(volumeNameModified, claimName, diskName, zone), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesPersistentVolumeClaimExists("kubernetes_persistent_volume_claim.test", &pvcConf), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&pvcConf.ObjectMeta, map[string]string{"pv.kubernetes.io/bind-completed": "yes"}), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&pvcConf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.name", claimName), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.#", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.#", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.%", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.storage", "5Gi"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.volume_name", volumeNameModified), - testAccCheckKubernetesPersistentVolumeExists("kubernetes_persistent_volume.test2", &pvConf), - testAccCheckMetaAnnotations(&pvConf.ObjectMeta, map[string]string{"pv.kubernetes.io/bound-by-controller": "yes"}), - ), - }, - }, - }) -} - -// Label matching isn't supported on GCE -// TODO: Re-enable when we build test env for K8S that supports it - -// func TestAccKubernetesPersistentVolumeClaim_labelsMatch(t *testing.T) { -// var conf api.PersistentVolumeClaim -// claimName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) -// volumeName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) - -// resource.Test(t, resource.TestCase{ -// PreCheck: func() { testAccPreCheck(t) }, -// IDRefreshName: "kubernetes_persistent_volume_claim.test", -// Providers: testAccProviders, -// CheckDestroy: testAccCheckKubernetesPersistentVolumeClaimDestroy, -// Steps: []resource.TestStep{ -// { -// Config: testAccKubernetesPersistentVolumeClaimConfig_labelsMatch(volumeName, claimName), -// Check: resource.ComposeAggregateTestCheckFunc( -// testAccCheckKubernetesPersistentVolumeClaimExists("kubernetes_persistent_volume_claim.test", &conf), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.annotations.%", "0"), -// testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{"pv.kubernetes.io/bind-completed": "yes", "pv.kubernetes.io/bound-by-controller": "yes"}), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.%", "0"), -// testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.name", claimName), -// resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.generation"), -// resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.resource_version"), -// resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.self_link"), -// resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.uid"), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.#", "1"), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.#", "1"), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.%", "1"), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.storage", "5Gi"), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.selector.#", "1"), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.selector.0.match_labels.%", "1"), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.selector.0.match_labels.TfAccTestEnvironment", "blablah"), -// ), -// }, -// }, -// }) -// } - -// func TestAccKubernetesPersistentVolumeClaim_labelsMatchExpression(t *testing.T) { -// var conf api.PersistentVolumeClaim -// claimName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) -// volumeName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) - -// resource.Test(t, resource.TestCase{ -// PreCheck: func() { testAccPreCheck(t) }, -// IDRefreshName: "kubernetes_persistent_volume_claim.test", -// Providers: testAccProviders, -// CheckDestroy: testAccCheckKubernetesPersistentVolumeClaimDestroy, -// Steps: []resource.TestStep{ -// { -// Config: testAccKubernetesPersistentVolumeClaimConfig_labelsMatchExpression(volumeName, claimName), -// Check: resource.ComposeAggregateTestCheckFunc( -// testAccCheckKubernetesPersistentVolumeClaimExists("kubernetes_persistent_volume_claim.test", &conf), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.annotations.%", "0"), -// testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{"pv.kubernetes.io/bind-completed": "yes", "pv.kubernetes.io/bound-by-controller": "yes"}), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.%", "0"), -// testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.name", claimName), -// resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.generation"), -// resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.resource_version"), -// resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.self_link"), -// resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.uid"), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.#", "1"), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.#", "1"), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.%", "1"), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.storage", "5Gi"), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.selector.#", "1"), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.selector.0.match_expressions.#", "1"), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.selector.0.match_expressions.0.key", "TfAccTestEnvironment"), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.selector.0.match_expressions.0.operator", "In"), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.selector.0.match_expressions.0.values.#", "3"), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.selector.0.match_expressions.0.values.1187371253", "three"), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.selector.0.match_expressions.0.values.2053932785", "one"), -// resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.selector.0.match_expressions.0.values.298486374", "two"), -// ), -// }, -// }, -// }) -// } - -func TestAccKubernetesPersistentVolumeClaim_volumeUpdate(t *testing.T) { - var pvcConf api.PersistentVolumeClaim - var pvConf api.PersistentVolume - - claimName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) - volumeName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) - diskName := fmt.Sprintf("tf-acc-test-disk-%s", acctest.RandString(10)) - zone := os.Getenv("GOOGLE_ZONE") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_persistent_volume_claim.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesPersistentVolumeClaimDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesPersistentVolumeClaimConfig_volumeUpdate(volumeName, claimName, "5Gi", diskName, zone), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesPersistentVolumeClaimExists("kubernetes_persistent_volume_claim.test", &pvcConf), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&pvcConf.ObjectMeta, map[string]string{"pv.kubernetes.io/bind-completed": "yes"}), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&pvcConf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.name", claimName), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.#", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.#", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.%", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.storage", "5Gi"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.volume_name", volumeName), - testAccCheckKubernetesPersistentVolumeExists("kubernetes_persistent_volume.test", &pvConf), - testAccCheckMetaAnnotations(&pvConf.ObjectMeta, map[string]string{"pv.kubernetes.io/bound-by-controller": "yes"}), - testAccCheckClaimRef(&pvConf, &ObjectRefStatic{Namespace: "default", Name: claimName}), - ), - }, - { - Config: testAccKubernetesPersistentVolumeClaimConfig_volumeUpdate(volumeName, claimName, "10Gi", diskName, zone), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesPersistentVolumeClaimExists("kubernetes_persistent_volume_claim.test", &pvcConf), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&pvcConf.ObjectMeta, map[string]string{"pv.kubernetes.io/bind-completed": "yes"}), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&pvcConf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "metadata.0.name", claimName), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume_claim.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.#", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.#", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.%", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.resources.0.requests.storage", "5Gi"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume_claim.test", "spec.0.volume_name", volumeName), - testAccCheckKubernetesPersistentVolumeExists("kubernetes_persistent_volume.test", &pvConf), - testAccCheckMetaAnnotations(&pvConf.ObjectMeta, map[string]string{"pv.kubernetes.io/bound-by-controller": "yes"}), - testAccCheckClaimRef(&pvConf, &ObjectRefStatic{Namespace: "default", Name: claimName}), - ), - }, - }, - }) -} - -func testAccCheckKubernetesPersistentVolumeClaimDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*kubernetes.Clientset) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "kubernetes_persistent_volume_claim" { - continue - } - namespace, name := idParts(rs.Primary.ID) - resp, err := conn.CoreV1().PersistentVolumeClaims(namespace).Get(name, meta_v1.GetOptions{}) - if err == nil { - if resp.Namespace == namespace && resp.Name == name { - return fmt.Errorf("Persistent Volume still exists: %s", rs.Primary.ID) - } - } - } - - return nil -} - -func testAccCheckKubernetesPersistentVolumeClaimExists(n string, obj *api.PersistentVolumeClaim) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - conn := testAccProvider.Meta().(*kubernetes.Clientset) - namespace, name := idParts(rs.Primary.ID) - out, err := conn.CoreV1().PersistentVolumeClaims(namespace).Get(name, meta_v1.GetOptions{}) - if err != nil { - return err - } - - *obj = *out - return nil - } -} - -func testAccCheckClaimRef(pv *api.PersistentVolume, expected *ObjectRefStatic) resource.TestCheckFunc { - return func(s *terraform.State) error { - or := pv.Spec.ClaimRef - if or == nil { - return fmt.Errorf("Expected ClaimRef to be not-nil, specifically %#v", *expected) - } - if or.Namespace != expected.Namespace { - return fmt.Errorf("Expected object reference %q, given: %q", expected.Namespace, or.Namespace) - } - if or.Name != expected.Name { - return fmt.Errorf("Expected object reference %q, given: %q", expected.Name, or.Name) - } - return nil - } -} - -type ObjectRefStatic struct { - Namespace string - Name string -} - -func testAccKubernetesPersistentVolumeClaimConfig_basic(name string) string { - return fmt.Sprintf(` -resource "kubernetes_persistent_volume_claim" "test" { - metadata { - annotations { - TestAnnotationOne = "one" - } - labels { - TestLabelOne = "one" - TestLabelThree = "three" - TestLabelFour = "four" - } - name = "%s" - } - spec { - access_modes = ["ReadWriteMany"] - resources { - requests { - storage = "5Gi" - } - } - selector { - match_expressions { - key = "environment" - operator = "In" - values = ["non-exists-12345"] - } - } - } - wait_until_bound = false -} -`, name) -} - -func testAccKubernetesPersistentVolumeClaimConfig_metaModified(name string) string { - return fmt.Sprintf(` -resource "kubernetes_persistent_volume_claim" "test" { - metadata { - annotations { - TestAnnotationOne = "one" - TestAnnotationTwo = "two" - } - labels { - TestLabelOne = "one" - TestLabelTwo = "two" - TestLabelThree = "three" - } - name = "%s" - } - spec { - access_modes = ["ReadWriteMany"] - resources { - requests { - storage = "5Gi" - } - } - selector { - match_expressions { - key = "environment" - operator = "In" - values = ["non-exists-12345"] - } - } - } - wait_until_bound = false -} -`, name) -} - -func testAccKubernetesPersistentVolumeClaimConfig_import(volumeName, claimName, diskName, zone string) string { - return fmt.Sprintf(` -resource "kubernetes_persistent_volume" "test" { - metadata { - name = "%s" - } - spec { - capacity { - storage = "10Gi" - } - access_modes = ["ReadWriteMany"] - persistent_volume_source { - gce_persistent_disk { - pd_name = "${google_compute_disk.test.name}" - } - } - } -} - -resource "google_compute_disk" "test" { - name = "%s" - type = "pd-ssd" - zone = "%s" - image = "debian-8-jessie-v20170523" - size = 10 -} - -resource "kubernetes_persistent_volume_claim" "test" { - metadata { - name = "%s" - } - spec { - access_modes = ["ReadWriteMany"] - resources { - requests { - storage = "5Gi" - } - } - volume_name = "${kubernetes_persistent_volume.test.metadata.0.name}" - } -} -`, volumeName, diskName, zone, claimName) -} - -func testAccKubernetesPersistentVolumeClaimConfig_volumeMatch(volumeName, claimName, diskName, zone string) string { - return fmt.Sprintf(` -resource "kubernetes_persistent_volume" "test" { - metadata { - name = "%s" - } - spec { - capacity { - storage = "10Gi" - } - access_modes = ["ReadWriteMany"] - persistent_volume_source { - gce_persistent_disk { - pd_name = "${google_compute_disk.test.name}" - } - } - } -} - -resource "google_compute_disk" "test" { - name = "%s" - type = "pd-ssd" - zone = "%s" - image = "debian-8-jessie-v20170523" - size = 10 -} - -resource "kubernetes_persistent_volume_claim" "test" { - metadata { - name = "%s" - } - spec { - access_modes = ["ReadWriteMany"] - resources { - requests { - storage = "5Gi" - } - } - volume_name = "${kubernetes_persistent_volume.test.metadata.0.name}" - } -} -`, volumeName, diskName, zone, claimName) -} - -func testAccKubernetesPersistentVolumeClaimConfig_volumeMatch_modified(volumeName, claimName, diskName, zone string) string { - return fmt.Sprintf(` -resource "kubernetes_persistent_volume" "test2" { - metadata { - name = "%s" - } - spec { - capacity { - storage = "10Gi" - } - access_modes = ["ReadWriteMany"] - persistent_volume_source { - gce_persistent_disk { - pd_name = "${google_compute_disk.test.name}" - } - } - } -} - -resource "google_compute_disk" "test" { - name = "%s" - type = "pd-ssd" - zone = "%s" - image = "debian-8-jessie-v20170523" - size = 10 -} - -resource "kubernetes_persistent_volume_claim" "test" { - metadata { - name = "%s" - } - spec { - access_modes = ["ReadWriteMany"] - resources { - requests { - storage = "5Gi" - } - } - volume_name = "${kubernetes_persistent_volume.test2.metadata.0.name}" - } -} -`, volumeName, diskName, zone, claimName) -} - -// func testAccKubernetesPersistentVolumeClaimConfig_labelsMatch(volumeName, claimName string) string { -// return fmt.Sprintf(` -// resource "kubernetes_persistent_volume" "test" { -// metadata { -// labels { -// TfAccTestEnvironment = "blablah" -// } -// name = "%s" -// } -// spec { -// capacity { -// storage = "10Gi" -// } -// access_modes = ["ReadWriteMany"] -// persistent_volume_source { -// gce_persistent_disk { -// pd_name = "test123" -// } -// } -// } -// } - -// resource "kubernetes_persistent_volume_claim" "test" { -// metadata { -// name = "%s" -// } -// spec { -// access_modes = ["ReadWriteMany"] -// resources { -// requests { -// storage = "5Gi" -// } -// } -// selector { -// match_labels { -// TfAccTestEnvironment = "blablah" -// } -// } -// } -// } -// `, volumeName, claimName) -// } - -// func testAccKubernetesPersistentVolumeClaimConfig_labelsMatchExpression(volumeName, claimName string) string { -// return fmt.Sprintf(` -// resource "kubernetes_persistent_volume" "test" { -// metadata { -// labels { -// TfAccTestEnvironment = "two" -// } -// name = "%s" -// } -// spec { -// capacity { -// storage = "10Gi" -// } -// access_modes = ["ReadWriteMany"] -// persistent_volume_source { -// gce_persistent_disk { -// pd_name = "test123" -// } -// } -// } -// } - -// resource "kubernetes_persistent_volume_claim" "test" { -// metadata { -// name = "%s" -// } -// spec { -// access_modes = ["ReadWriteMany"] -// resources { -// requests { -// storage = "5Gi" -// } -// } -// selector { -// match_expressions { -// key = "TfAccTestEnvironment" -// operator = "In" -// values = ["one", "three", "two"] -// } -// } -// } -// } -// `, volumeName, claimName) -// } - -func testAccKubernetesPersistentVolumeClaimConfig_volumeUpdate(volumeName, claimName, storage, diskName, zone string) string { - return fmt.Sprintf(` -resource "kubernetes_persistent_volume" "test" { - metadata { - name = "%s" - } - spec { - capacity { - storage = "%s" - } - access_modes = ["ReadWriteMany"] - persistent_volume_source { - gce_persistent_disk { - pd_name = "${google_compute_disk.test.name}" - } - } - } -} - -resource "google_compute_disk" "test" { - name = "%s" - type = "pd-ssd" - zone = "%s" - image = "debian-8-jessie-v20170523" - size = 10 -} - -resource "kubernetes_persistent_volume_claim" "test" { - metadata { - name = "%s" - } - spec { - access_modes = ["ReadWriteMany"] - resources { - requests { - storage = "5Gi" - } - } - volume_name = "${kubernetes_persistent_volume.test.metadata.0.name}" - } -} -`, volumeName, storage, diskName, zone, claimName) -} diff --git a/builtin/providers/kubernetes/resource_kubernetes_persistent_volume_test.go b/builtin/providers/kubernetes/resource_kubernetes_persistent_volume_test.go deleted file mode 100644 index ec5e0237d..000000000 --- a/builtin/providers/kubernetes/resource_kubernetes_persistent_volume_test.go +++ /dev/null @@ -1,420 +0,0 @@ -package kubernetes - -import ( - "fmt" - "os" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "k8s.io/kubernetes/pkg/api/v1" - kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" -) - -func TestAccKubernetesPersistentVolume_basic(t *testing.T) { - var conf api.PersistentVolume - randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - name := fmt.Sprintf("tf-acc-test-%s", randString) - diskName := fmt.Sprintf("tf-acc-test-disk-%s", randString) - - region := os.Getenv("GOOGLE_REGION") - zone := os.Getenv("GOOGLE_ZONE") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_persistent_volume.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesPersistentVolumeDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesPersistentVolumeConfig_basic(name, diskName, zone), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesPersistentVolumeExists("kubernetes_persistent_volume.test", &conf), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.annotations.%", "2"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.annotations.TestAnnotationOne", "one"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.annotations.TestAnnotationTwo", "two"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{"TestAnnotationOne": "one", "TestAnnotationTwo": "two"}), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.labels.%", "3"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.labels.TestLabelOne", "one"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.labels.TestLabelTwo", "two"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.labels.TestLabelThree", "three"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{ - "TestLabelOne": "one", - "TestLabelTwo": "two", - "TestLabelThree": "three", - "failure-domain.beta.kubernetes.io/region": region, - "failure-domain.beta.kubernetes.io/zone": zone, - }), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.capacity.%", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.capacity.storage", "123Gi"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.access_modes.#", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.gce_persistent_disk.#", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.gce_persistent_disk.0.pd_name", diskName), - ), - }, - { - Config: testAccKubernetesPersistentVolumeConfig_modified(name, diskName, zone), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesPersistentVolumeExists("kubernetes_persistent_volume.test", &conf), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.annotations.%", "2"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.annotations.TestAnnotationOne", "one"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.annotations.TestAnnotationTwo", "two"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{"TestAnnotationOne": "one", "TestAnnotationTwo": "two"}), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.labels.%", "3"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.labels.TestLabelOne", "one"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.labels.TestLabelTwo", "two"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.labels.TestLabelThree", "three"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{ - "TestLabelOne": "one", - "TestLabelTwo": "two", - "TestLabelThree": "three", - "failure-domain.beta.kubernetes.io/region": region, - "failure-domain.beta.kubernetes.io/zone": zone, - }), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.capacity.%", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.capacity.storage", "42Mi"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.access_modes.#", "2"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.access_modes.1245328686", "ReadWriteOnce"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.gce_persistent_disk.#", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.gce_persistent_disk.0.fs_type", "ntfs"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.gce_persistent_disk.0.pd_name", diskName), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.gce_persistent_disk.0.read_only", "true"), - ), - }, - }, - }) -} - -func TestAccKubernetesPersistentVolume_importBasic(t *testing.T) { - resourceName := "kubernetes_persistent_volume.test" - randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - name := fmt.Sprintf("tf-acc-test-import-%s", randString) - diskName := fmt.Sprintf("tf-acc-test-disk-%s", randString) - - zone := os.Getenv("GOOGLE_ZONE") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesPersistentVolumeDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesPersistentVolumeConfig_basic(name, diskName, zone), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccKubernetesPersistentVolume_volumeSource(t *testing.T) { - var conf api.PersistentVolume - randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - name := fmt.Sprintf("tf-acc-test-%s", randString) - diskName := fmt.Sprintf("tf-acc-test-disk-%s", randString) - - region := os.Getenv("GOOGLE_REGION") - zone := os.Getenv("GOOGLE_ZONE") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_persistent_volume.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesPersistentVolumeDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesPersistentVolumeConfig_volumeSource(name, diskName, zone), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesPersistentVolumeExists("kubernetes_persistent_volume.test", &conf), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{ - "failure-domain.beta.kubernetes.io/region": region, - "failure-domain.beta.kubernetes.io/zone": zone, - }), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.capacity.%", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.capacity.storage", "123Gi"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.access_modes.#", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.gce_persistent_disk.#", "1"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.gce_persistent_disk.0.pd_name"), - ), - }, - { - Config: testAccKubernetesPersistentVolumeConfig_volumeSource_modified(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesPersistentVolumeExists("kubernetes_persistent_volume.test", &conf), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{ - "failure-domain.beta.kubernetes.io/region": region, - "failure-domain.beta.kubernetes.io/zone": zone, - }), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.capacity.%", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.capacity.storage", "123Gi"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.access_modes.#", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.host_path.#", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.host_path.0.path", "/custom/testing/path"), - ), - }, - }, - }) -} - -func TestAccKubernetesPersistentVolume_cephFsSecretRef(t *testing.T) { - var conf api.PersistentVolume - randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - name := fmt.Sprintf("tf-acc-test-%s", randString) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_persistent_volume.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesPersistentVolumeDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesPersistentVolumeConfig_cephFsSecretRef(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesPersistentVolumeExists("kubernetes_persistent_volume.test", &conf), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_persistent_volume.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.capacity.%", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.capacity.storage", "2Gi"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.access_modes.#", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.access_modes.1254135962", "ReadWriteMany"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.ceph_fs.#", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.ceph_fs.0.monitors.#", "2"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.ceph_fs.0.monitors.2848821021", "10.16.154.78:6789"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.ceph_fs.0.monitors.4263435410", "10.16.154.82:6789"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.ceph_fs.0.secret_ref.#", "1"), - resource.TestCheckResourceAttr("kubernetes_persistent_volume.test", "spec.0.persistent_volume_source.0.ceph_fs.0.secret_ref.0.name", "ceph-secret"), - ), - }, - }, - }) -} - -func testAccCheckKubernetesPersistentVolumeDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*kubernetes.Clientset) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "kubernetes_persistent_volume" { - continue - } - name := rs.Primary.ID - resp, err := conn.CoreV1().PersistentVolumes().Get(name, meta_v1.GetOptions{}) - if err == nil { - if resp.Name == rs.Primary.ID { - return fmt.Errorf("Persistent Volume still exists: %s", rs.Primary.ID) - } - } - } - - return nil -} - -func testAccCheckKubernetesPersistentVolumeExists(n string, obj *api.PersistentVolume) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - conn := testAccProvider.Meta().(*kubernetes.Clientset) - name := rs.Primary.ID - out, err := conn.CoreV1().PersistentVolumes().Get(name, meta_v1.GetOptions{}) - if err != nil { - return err - } - - *obj = *out - return nil - } -} - -func testAccKubernetesPersistentVolumeConfig_basic(name, diskName, zone string) string { - return fmt.Sprintf(` -resource "kubernetes_persistent_volume" "test" { - metadata { - annotations { - TestAnnotationOne = "one" - TestAnnotationTwo = "two" - } - labels { - TestLabelOne = "one" - TestLabelTwo = "two" - TestLabelThree = "three" - } - name = "%s" - } - spec { - capacity { - storage = "123Gi" - } - access_modes = ["ReadWriteMany"] - persistent_volume_source { - gce_persistent_disk { - pd_name = "${google_compute_disk.test.name}" - } - } - } -} - -resource "google_compute_disk" "test" { - name = "%s" - type = "pd-ssd" - zone = "%s" - image = "debian-8-jessie-v20170523" - size = 10 -} -`, name, diskName, zone) -} - -func testAccKubernetesPersistentVolumeConfig_modified(name, diskName, zone string) string { - return fmt.Sprintf(` -resource "kubernetes_persistent_volume" "test" { - metadata { - annotations { - TestAnnotationOne = "one" - TestAnnotationTwo = "two" - } - labels { - TestLabelOne = "one" - TestLabelTwo = "two" - TestLabelThree = "three" - } - name = "%s" - } - spec { - capacity { - storage = "42Mi" - } - access_modes = ["ReadWriteMany", "ReadWriteOnce"] - persistent_volume_source { - gce_persistent_disk { - fs_type = "ntfs" - pd_name = "${google_compute_disk.test.name}" - read_only = true - } - } - } -} - -resource "google_compute_disk" "test" { - name = "%s" - type = "pd-ssd" - zone = "%s" - image = "debian-8-jessie-v20170523" - size = 10 -} -`, name, diskName, zone) -} - -func testAccKubernetesPersistentVolumeConfig_volumeSource(name, diskName, zone string) string { - return fmt.Sprintf(` -resource "kubernetes_persistent_volume" "test" { - metadata { - name = "%s" - } - spec { - capacity { - storage = "123Gi" - } - access_modes = ["ReadWriteMany"] - persistent_volume_source { - gce_persistent_disk { - pd_name = "${google_compute_disk.test.name}" - } - } - } -} - -resource "google_compute_disk" "test" { - name = "%s" - type = "pd-ssd" - zone = "%s" - image = "debian-8-jessie-v20170523" - size = 12 -} -`, name, diskName, zone) -} - -func testAccKubernetesPersistentVolumeConfig_volumeSource_modified(name string) string { - return fmt.Sprintf(` -resource "kubernetes_persistent_volume" "test" { - metadata { - name = "%s" - } - spec { - capacity { - storage = "123Gi" - } - access_modes = ["ReadWriteMany"] - persistent_volume_source { - host_path { - path = "/custom/testing/path" - } - } - } -}`, name) -} - -func testAccKubernetesPersistentVolumeConfig_cephFsSecretRef(name string) string { - return fmt.Sprintf(` -resource "kubernetes_persistent_volume" "test" { - metadata { - name = "%s" - } - spec { - capacity { - storage = "2Gi" - } - access_modes = ["ReadWriteMany"] - persistent_volume_source { - ceph_fs { - monitors = ["10.16.154.78:6789", "10.16.154.82:6789"] - secret_ref { - name = "ceph-secret" - } - } - } - } -}`, name) -} diff --git a/builtin/providers/kubernetes/resource_kubernetes_pod.go b/builtin/providers/kubernetes/resource_kubernetes_pod.go deleted file mode 100644 index 68bd6e7b6..000000000 --- a/builtin/providers/kubernetes/resource_kubernetes_pod.go +++ /dev/null @@ -1,195 +0,0 @@ -package kubernetes - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkgApi "k8s.io/apimachinery/pkg/types" - api "k8s.io/kubernetes/pkg/api/v1" - kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" -) - -func resourceKubernetesPod() *schema.Resource { - return &schema.Resource{ - Create: resourceKubernetesPodCreate, - Read: resourceKubernetesPodRead, - Update: resourceKubernetesPodUpdate, - Delete: resourceKubernetesPodDelete, - Exists: resourceKubernetesPodExists, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - Schema: map[string]*schema.Schema{ - "metadata": namespacedMetadataSchema("pod", true), - "spec": { - Type: schema.TypeList, - Description: "Spec of the pod owned by the cluster", - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: podSpecFields(), - }, - }, - }, - } -} -func resourceKubernetesPodCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - metadata := expandMetadata(d.Get("metadata").([]interface{})) - spec, err := expandPodSpec(d.Get("spec").([]interface{})) - if err != nil { - return err - } - - spec.AutomountServiceAccountToken = ptrToBool(false) - - pod := api.Pod{ - ObjectMeta: metadata, - Spec: spec, - } - - log.Printf("[INFO] Creating new pod: %#v", pod) - out, err := conn.CoreV1().Pods(metadata.Namespace).Create(&pod) - - if err != nil { - return err - } - log.Printf("[INFO] Submitted new pod: %#v", out) - - d.SetId(buildId(out.ObjectMeta)) - - stateConf := &resource.StateChangeConf{ - Target: []string{"Running"}, - Pending: []string{"Pending"}, - Timeout: 5 * time.Minute, - Refresh: func() (interface{}, string, error) { - out, err := conn.CoreV1().Pods(metadata.Namespace).Get(metadata.Name, metav1.GetOptions{}) - if err != nil { - log.Printf("[ERROR] Received error: %#v", err) - return out, "Error", err - } - - statusPhase := fmt.Sprintf("%v", out.Status.Phase) - log.Printf("[DEBUG] Pods %s status received: %#v", out.Name, statusPhase) - return out, statusPhase, nil - }, - } - _, err = stateConf.WaitForState() - if err != nil { - return err - } - log.Printf("[INFO] Pod %s created", out.Name) - - return resourceKubernetesPodRead(d, meta) -} - -func resourceKubernetesPodUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - namespace, name := idParts(d.Id()) - ops := patchMetadata("metadata.0.", "/metadata/", d) - if d.HasChange("spec") { - specOps, err := patchPodSpec("/spec", "spec.0.", d) - if err != nil { - return err - } - ops = append(ops, specOps...) - } - data, err := ops.MarshalJSON() - if err != nil { - return fmt.Errorf("Failed to marshal update operations: %s", err) - } - - log.Printf("[INFO] Updating pod %s: %s", d.Id(), ops) - - out, err := conn.CoreV1().Pods(namespace).Patch(name, pkgApi.JSONPatchType, data) - if err != nil { - return err - } - log.Printf("[INFO] Submitted updated pod: %#v", out) - - d.SetId(buildId(out.ObjectMeta)) - return resourceKubernetesPodRead(d, meta) -} - -func resourceKubernetesPodRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - namespace, name := idParts(d.Id()) - - log.Printf("[INFO] Reading pod %s", name) - pod, err := conn.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - log.Printf("[DEBUG] Received error: %#v", err) - return err - } - log.Printf("[INFO] Received pod: %#v", pod) - - err = d.Set("metadata", flattenMetadata(pod.ObjectMeta)) - if err != nil { - return err - } - - podSpec, err := flattenPodSpec(pod.Spec) - if err != nil { - return err - } - - err = d.Set("spec", podSpec) - if err != nil { - return err - } - return nil - -} - -func resourceKubernetesPodDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - namespace, name := idParts(d.Id()) - log.Printf("[INFO] Deleting pod: %#v", name) - err := conn.CoreV1().Pods(namespace).Delete(name, nil) - if err != nil { - return err - } - - err = resource.Retry(1*time.Minute, func() *resource.RetryError { - out, err := conn.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - if statusErr, ok := err.(*errors.StatusError); ok && statusErr.ErrStatus.Code == 404 { - return nil - } - return resource.NonRetryableError(err) - } - - log.Printf("[DEBUG] Current state of pod: %#v", out.Status.Phase) - e := fmt.Errorf("Pod %s still exists (%s)", name, out.Status.Phase) - return resource.RetryableError(e) - }) - if err != nil { - return err - } - - log.Printf("[INFO] Pod %s deleted", name) - - d.SetId("") - return nil -} - -func resourceKubernetesPodExists(d *schema.ResourceData, meta interface{}) (bool, error) { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - log.Printf("[INFO] Checking pod %s", name) - _, err := conn.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - if statusErr, ok := err.(*errors.StatusError); ok && statusErr.ErrStatus.Code == 404 { - return false, nil - } - log.Printf("[DEBUG] Received error: %#v", err) - } - return true, err -} diff --git a/builtin/providers/kubernetes/resource_kubernetes_pod_test.go b/builtin/providers/kubernetes/resource_kubernetes_pod_test.go deleted file mode 100644 index d4a048a91..000000000 --- a/builtin/providers/kubernetes/resource_kubernetes_pod_test.go +++ /dev/null @@ -1,724 +0,0 @@ -package kubernetes - -import ( - "fmt" - "testing" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "k8s.io/kubernetes/pkg/api/v1" - kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccKubernetesPod_basic(t *testing.T) { - var conf api.Pod - - podName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - secretName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - configMapName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - imageName1 := "nginx:1.7.9" - imageName2 := "nginx:1.11" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesPodDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesPodConfigBasic(secretName, configMapName, podName, imageName1), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesPodExists("kubernetes_pod.test", &conf), - resource.TestCheckResourceAttr("kubernetes_pod.test", "metadata.0.annotations.%", "0"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "metadata.0.labels.%", "1"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "metadata.0.labels.app", "pod_label"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "metadata.0.name", podName), - resource.TestCheckResourceAttrSet("kubernetes_pod.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_pod.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_pod.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_pod.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.env.0.value_from.0.secret_key_ref.0.name", secretName), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.env.1.value_from.0.config_map_key_ref.0.name", configMapName), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.image", imageName1), - ), - }, - { - Config: testAccKubernetesPodConfigBasic(secretName, configMapName, podName, imageName2), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesPodExists("kubernetes_pod.test", &conf), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.image", imageName2), - ), - }, - }, - }) -} - -func TestAccKubernetesPod_importBasic(t *testing.T) { - resourceName := "kubernetes_pod.test" - podName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - imageName := "nginx:1.7.9" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesPodDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesPodConfigWithSecurityContext(podName, imageName), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"metadata.0.resource_version"}, - }, - }, - }) -} - -func TestAccKubernetesPod_with_pod_security_context(t *testing.T) { - var conf api.Pod - - podName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - imageName := "nginx:1.7.9" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesPodDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesPodConfigWithSecurityContext(podName, imageName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesPodExists("kubernetes_pod.test", &conf), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.security_context.0.run_as_non_root", "true"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.security_context.0.supplemental_groups.#", "1"), - ), - }, - }, - }) -} - -func TestAccKubernetesPod_with_container_liveness_probe_using_exec(t *testing.T) { - var conf api.Pod - - podName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - imageName := "gcr.io/google_containers/busybox" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesPodDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesPodConfigWithLivenessProbeUsingExec(podName, imageName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesPodExists("kubernetes_pod.test", &conf), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.args.#", "3"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.liveness_probe.#", "1"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.liveness_probe.0.exec.#", "1"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.liveness_probe.0.exec.0.command.#", "2"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.liveness_probe.0.exec.0.command.0", "cat"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.liveness_probe.0.exec.0.command.1", "/tmp/healthy"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.liveness_probe.0.failure_threshold", "3"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.liveness_probe.0.initial_delay_seconds", "5"), - ), - }, - }, - }) -} - -func TestAccKubernetesPod_with_container_liveness_probe_using_http_get(t *testing.T) { - var conf api.Pod - - podName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - imageName := "gcr.io/google_containers/liveness" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesPodDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesPodConfigWithLivenessProbeUsingHTTPGet(podName, imageName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesPodExists("kubernetes_pod.test", &conf), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.args.#", "1"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.liveness_probe.#", "1"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.liveness_probe.0.http_get.#", "1"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.liveness_probe.0.http_get.0.path", "/healthz"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.liveness_probe.0.http_get.0.port", "8080"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.liveness_probe.0.http_get.0.http_header.#", "1"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.liveness_probe.0.http_get.0.http_header.0.name", "X-Custom-Header"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.liveness_probe.0.http_get.0.http_header.0.value", "Awesome"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.liveness_probe.0.initial_delay_seconds", "3"), - ), - }, - }, - }) -} - -func TestAccKubernetesPod_with_container_liveness_probe_using_tcp(t *testing.T) { - var conf api.Pod - - podName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - imageName := "gcr.io/google_containers/liveness" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesPodDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesPodConfigWithLivenessProbeUsingTCP(podName, imageName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesPodExists("kubernetes_pod.test", &conf), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.args.#", "1"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.liveness_probe.#", "1"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.liveness_probe.0.tcp_socket.#", "1"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.liveness_probe.0.tcp_socket.0.port", "8080"), - ), - }, - }, - }) -} - -func TestAccKubernetesPod_with_container_lifecycle(t *testing.T) { - var conf api.Pod - - podName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - imageName := "gcr.io/google_containers/liveness" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesPodDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesPodConfigWithLifeCycle(podName, imageName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesPodExists("kubernetes_pod.test", &conf), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.lifecycle.#", "1"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.lifecycle.0.post_start.#", "1"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.lifecycle.0.post_start.0.exec.#", "1"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.lifecycle.0.post_start.0.exec.0.command.#", "2"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.lifecycle.0.post_start.0.exec.0.command.0", "ls"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.lifecycle.0.post_start.0.exec.0.command.1", "-al"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.lifecycle.0.pre_stop.#", "1"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.lifecycle.0.pre_stop.0.exec.#", "1"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.lifecycle.0.pre_stop.0.exec.0.command.#", "1"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.lifecycle.0.pre_stop.0.exec.0.command.0", "date"), - ), - }, - }, - }) -} - -func TestAccKubernetesPod_with_container_security_context(t *testing.T) { - var conf api.Pod - - podName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - imageName := "nginx:1.7.9" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesPodDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesPodConfigWithContainerSecurityContext(podName, imageName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesPodExists("kubernetes_pod.test", &conf), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.security_context.#", "1"), - ), - }, - }, - }) -} - -func TestAccKubernetesPod_with_volume_mount(t *testing.T) { - var conf api.Pod - - podName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - secretName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - imageName := "nginx:1.7.9" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesPodDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesPodConfigWithVolumeMounts(secretName, podName, imageName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesPodExists("kubernetes_pod.test", &conf), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.image", imageName), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.volume_mount.#", "1"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.volume_mount.0.mount_path", "/tmp/my_path"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.volume_mount.0.name", "db"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.volume_mount.0.read_only", "false"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.volume_mount.0.sub_path", ""), - ), - }, - }, - }) -} - -func TestAccKubernetesPod_with_resource_requirements(t *testing.T) { - var conf api.Pod - - podName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - imageName := "nginx:1.7.9" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesPodDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesPodConfigWithResourceRequirements(podName, imageName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesPodExists("kubernetes_pod.test", &conf), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.image", imageName), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.resources.0.requests.0.memory", "50Mi"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.resources.0.requests.0.cpu", "250m"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.resources.0.limits.0.memory", "512Mi"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.resources.0.limits.0.cpu", "500m"), - ), - }, - }, - }) -} - -func TestAccKubernetesPod_with_empty_dir_volume(t *testing.T) { - var conf api.Pod - - podName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - imageName := "nginx:1.7.9" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesPodDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesPodConfigWithEmptyDirVolumes(podName, imageName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesPodExists("kubernetes_pod.test", &conf), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.image", imageName), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.volume_mount.#", "1"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.volume_mount.0.mount_path", "/cache"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.container.0.volume_mount.0.name", "cache-volume"), - resource.TestCheckResourceAttr("kubernetes_pod.test", "spec.0.volume.0.empty_dir.0.medium", "Memory"), - ), - }, - }, - }) -} - -func testAccCheckKubernetesPodDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*kubernetes.Clientset) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "kubernetes_pod" { - continue - } - namespace, name := idParts(rs.Primary.ID) - resp, err := conn.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) - if err == nil { - if resp.Namespace == namespace && resp.Name == name { - return fmt.Errorf("Pod still exists: %s: %#v", rs.Primary.ID, resp.Status.Phase) - } - } - } - - return nil -} - -func testAccCheckKubernetesPodExists(n string, obj *api.Pod) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - conn := testAccProvider.Meta().(*kubernetes.Clientset) - - namespace, name := idParts(rs.Primary.ID) - out, err := conn.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return err - } - *obj = *out - return nil - } -} - -func testAccKubernetesPodConfigBasic(secretName, configMapName, podName, imageName string) string { - return fmt.Sprintf(` - -resource "kubernetes_secret" "test" { - metadata { - name = "%s" - } - - data { - one = "first" - } -} - -resource "kubernetes_config_map" "test" { - metadata { - name = "%s" - } - - data { - one = "ONE" - } -} - -resource "kubernetes_pod" "test" { - metadata { - labels { - app = "pod_label" - } - - name = "%s" - } - - spec { - container { - image = "%s" - name = "containername" - - env = [{ - name = "EXPORTED_VARIBALE_FROM_SECRET" - - value_from { - secret_key_ref { - name = "${kubernetes_secret.test.metadata.0.name}" - key = "one" - } - } - }, - { - name = "EXPORTED_VARIBALE_FROM_CONFIG_MAP" - - value_from { - config_map_key_ref { - name = "${kubernetes_config_map.test.metadata.0.name}" - key = "one" - } - } - }, - ] - } - volume { - name = "db" - secret = { - secret_name = "${kubernetes_secret.test.metadata.0.name}" - } - } - } -} - `, secretName, configMapName, podName, imageName) -} - -func testAccKubernetesPodConfigWithSecurityContext(podName, imageName string) string { - return fmt.Sprintf(` -resource "kubernetes_pod" "test" { - metadata { - labels { - app = "pod_label" - } - - name = "%s" - } - spec { - security_context { - run_as_non_root = true - run_as_user = 101 - supplemental_groups = [101] - } - container { - image = "%s" - name = "containername" - } - } -} - `, podName, imageName) -} - -func testAccKubernetesPodConfigWithLivenessProbeUsingExec(podName, imageName string) string { - return fmt.Sprintf(` -resource "kubernetes_pod" "test" { - metadata { - labels { - app = "pod_label" - } - - name = "%s" - } - - spec { - container { - image = "%s" - name = "containername" - args = ["/bin/sh", "-c", "touch /tmp/healthy; sleep 300; rm -rf /tmp/healthy; sleep 600"] - - liveness_probe { - exec { - command = ["cat", "/tmp/healthy"] - } - - initial_delay_seconds = 5 - period_seconds = 5 - } - } - } -} - `, podName, imageName) -} - -func testAccKubernetesPodConfigWithLivenessProbeUsingHTTPGet(podName, imageName string) string { - return fmt.Sprintf(` -resource "kubernetes_pod" "test" { - metadata { - labels { - app = "pod_label" - } - - name = "%s" - } - - spec { - container { - image = "%s" - name = "containername" - args = ["/server"] - - liveness_probe { - http_get { - path = "/healthz" - port = 8080 - - http_header { - name = "X-Custom-Header" - value = "Awesome" - } - } - initial_delay_seconds = 3 - period_seconds = 3 - } - } - } -} - `, podName, imageName) -} - -func testAccKubernetesPodConfigWithLivenessProbeUsingTCP(podName, imageName string) string { - return fmt.Sprintf(` -resource "kubernetes_pod" "test" { - metadata { - labels { - app = "pod_label" - } - - name = "%s" - } - spec { - container { - image = "%s" - name = "containername" - args = ["/server"] - - liveness_probe { - tcp_socket { - port = 8080 - } - - initial_delay_seconds = 3 - period_seconds = 3 - } - } - } -} - `, podName, imageName) -} - -func testAccKubernetesPodConfigWithLifeCycle(podName, imageName string) string { - return fmt.Sprintf(` -resource "kubernetes_pod" "test" { - metadata { - labels { - app = "pod_label" - } - - name = "%s" - } - spec { - container { - image = "%s" - name = "containername" - args = ["/server"] - - lifecycle { - post_start { - exec { - command = ["ls", "-al"] - } - } - pre_stop { - exec { - command = ["date"] - } - } - } - } - } -} - - `, podName, imageName) -} - -func testAccKubernetesPodConfigWithContainerSecurityContext(podName, imageName string) string { - return fmt.Sprintf(` -resource "kubernetes_pod" "test" { - metadata { - labels { - app = "pod_label" - } - - name = "%s" - } - spec { - container { - image = "%s" - name = "containername" - - security_context { - privileged = true - run_as_user = 1 - se_linux_options { - level = "s0:c123,c456" - } - } - } - } -} - - - `, podName, imageName) -} - -func testAccKubernetesPodConfigWithVolumeMounts(secretName, podName, imageName string) string { - return fmt.Sprintf(` - -resource "kubernetes_secret" "test" { - metadata { - name = "%s" - } - - data { - one = "first" - } -} - -resource "kubernetes_pod" "test" { - metadata { - labels { - app = "pod_label" - } - - name = "%s" - } - - spec { - container { - image = "%s" - name = "containername" - volume_mount { - mount_path = "/tmp/my_path" - name = "db" - } - } - volume { - name = "db" - secret = { - secret_name = "${kubernetes_secret.test.metadata.0.name}" - } - } - } -} - `, secretName, podName, imageName) -} - -func testAccKubernetesPodConfigWithResourceRequirements(podName, imageName string) string { - return fmt.Sprintf(` - -resource "kubernetes_pod" "test" { - metadata { - labels { - app = "pod_label" - } - - name = "%s" - } - - spec { - container { - image = "%s" - name = "containername" - - resources{ - limits{ - cpu = "0.5" - memory = "512Mi" - } - requests{ - cpu = "250m" - memory = "50Mi" - } - } - - } - } -} - `, podName, imageName) -} - -func testAccKubernetesPodConfigWithEmptyDirVolumes(podName, imageName string) string { - return fmt.Sprintf(` -resource "kubernetes_pod" "test" { - metadata { - labels { - app = "pod_label" - } - - name = "%s" - } - - spec { - container { - image = "%s" - name = "containername" - volume_mount { - mount_path = "/cache" - name = "cache-volume" - } - } - volume { - name = "cache-volume" - empty_dir = { - medium = "Memory" - } - } - } -} -`, podName, imageName) -} diff --git a/builtin/providers/kubernetes/resource_kubernetes_resource_quota.go b/builtin/providers/kubernetes/resource_kubernetes_resource_quota.go deleted file mode 100644 index 78e0710ad..000000000 --- a/builtin/providers/kubernetes/resource_kubernetes_resource_quota.go +++ /dev/null @@ -1,212 +0,0 @@ -package kubernetes - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "k8s.io/apimachinery/pkg/api/errors" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkgApi "k8s.io/apimachinery/pkg/types" - api "k8s.io/kubernetes/pkg/api/v1" - kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" -) - -func resourceKubernetesResourceQuota() *schema.Resource { - return &schema.Resource{ - Create: resourceKubernetesResourceQuotaCreate, - Read: resourceKubernetesResourceQuotaRead, - Exists: resourceKubernetesResourceQuotaExists, - Update: resourceKubernetesResourceQuotaUpdate, - Delete: resourceKubernetesResourceQuotaDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "metadata": namespacedMetadataSchema("resource quota", true), - "spec": { - Type: schema.TypeList, - Description: "Spec defines the desired quota. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "hard": { - Type: schema.TypeMap, - Description: "The set of desired hard limits for each named resource. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", - Optional: true, - Elem: schema.TypeString, - ValidateFunc: validateResourceList, - }, - "scopes": { - Type: schema.TypeSet, - Description: "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.", - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - }, - }, - }, - } -} - -func resourceKubernetesResourceQuotaCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - metadata := expandMetadata(d.Get("metadata").([]interface{})) - spec, err := expandResourceQuotaSpec(d.Get("spec").([]interface{})) - if err != nil { - return err - } - resQuota := api.ResourceQuota{ - ObjectMeta: metadata, - Spec: spec, - } - log.Printf("[INFO] Creating new resource quota: %#v", resQuota) - out, err := conn.CoreV1().ResourceQuotas(metadata.Namespace).Create(&resQuota) - if err != nil { - return fmt.Errorf("Failed to create resource quota: %s", err) - } - log.Printf("[INFO] Submitted new resource quota: %#v", out) - d.SetId(buildId(out.ObjectMeta)) - - err = resource.Retry(1*time.Minute, func() *resource.RetryError { - quota, err := conn.CoreV1().ResourceQuotas(out.Namespace).Get(out.Name, meta_v1.GetOptions{}) - if err != nil { - return resource.NonRetryableError(err) - } - if resourceListEquals(spec.Hard, quota.Status.Hard) { - return nil - } - err = fmt.Errorf("Quotas don't match after creation.\nExpected: %#v\nGiven: %#v", - spec.Hard, quota.Status.Hard) - return resource.RetryableError(err) - }) - if err != nil { - return err - } - - return resourceKubernetesResourceQuotaRead(d, meta) -} - -func resourceKubernetesResourceQuotaRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - log.Printf("[INFO] Reading resource quota %s", name) - resQuota, err := conn.CoreV1().ResourceQuotas(namespace).Get(name, meta_v1.GetOptions{}) - if err != nil { - log.Printf("[DEBUG] Received error: %#v", err) - return err - } - log.Printf("[INFO] Received resource quota: %#v", resQuota) - - // This is to work around K8S bug - // See https://github.com/kubernetes/kubernetes/issues/44539 - if resQuota.ObjectMeta.GenerateName == "" { - if v, ok := d.GetOk("metadata.0.generate_name"); ok { - resQuota.ObjectMeta.GenerateName = v.(string) - } - } - - err = d.Set("metadata", flattenMetadata(resQuota.ObjectMeta)) - if err != nil { - return err - } - err = d.Set("spec", flattenResourceQuotaSpec(resQuota.Spec)) - if err != nil { - return err - } - - return nil -} - -func resourceKubernetesResourceQuotaUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - - ops := patchMetadata("metadata.0.", "/metadata/", d) - var spec api.ResourceQuotaSpec - waitForChangedSpec := false - if d.HasChange("spec") { - var err error - spec, err = expandResourceQuotaSpec(d.Get("spec").([]interface{})) - if err != nil { - return err - } - ops = append(ops, &ReplaceOperation{ - Path: "/spec", - Value: spec, - }) - waitForChangedSpec = true - } - data, err := ops.MarshalJSON() - if err != nil { - return fmt.Errorf("Failed to marshal update operations: %s", err) - } - log.Printf("[INFO] Updating resource quota %q: %v", name, string(data)) - out, err := conn.CoreV1().ResourceQuotas(namespace).Patch(name, pkgApi.JSONPatchType, data) - if err != nil { - return fmt.Errorf("Failed to update resource quota: %s", err) - } - log.Printf("[INFO] Submitted updated resource quota: %#v", out) - d.SetId(buildId(out.ObjectMeta)) - - if waitForChangedSpec { - err = resource.Retry(1*time.Minute, func() *resource.RetryError { - quota, err := conn.CoreV1().ResourceQuotas(namespace).Get(name, meta_v1.GetOptions{}) - if err != nil { - return resource.NonRetryableError(err) - } - if resourceListEquals(spec.Hard, quota.Status.Hard) { - return nil - } - err = fmt.Errorf("Quotas don't match after update.\nExpected: %#v\nGiven: %#v", - spec.Hard, quota.Status.Hard) - return resource.RetryableError(err) - }) - if err != nil { - return err - } - } - - return resourceKubernetesResourceQuotaRead(d, meta) -} - -func resourceKubernetesResourceQuotaDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - log.Printf("[INFO] Deleting resource quota: %#v", name) - err := conn.CoreV1().ResourceQuotas(namespace).Delete(name, &meta_v1.DeleteOptions{}) - if err != nil { - return err - } - - log.Printf("[INFO] Resource quota %s deleted", name) - - d.SetId("") - return nil -} - -func resourceKubernetesResourceQuotaExists(d *schema.ResourceData, meta interface{}) (bool, error) { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - log.Printf("[INFO] Checking resource quota %s", name) - _, err := conn.CoreV1().ResourceQuotas(namespace).Get(name, meta_v1.GetOptions{}) - if err != nil { - if statusErr, ok := err.(*errors.StatusError); ok && statusErr.ErrStatus.Code == 404 { - return false, nil - } - log.Printf("[DEBUG] Received error: %#v", err) - } - return true, err -} diff --git a/builtin/providers/kubernetes/resource_kubernetes_resource_quota_test.go b/builtin/providers/kubernetes/resource_kubernetes_resource_quota_test.go deleted file mode 100644 index c92a41734..000000000 --- a/builtin/providers/kubernetes/resource_kubernetes_resource_quota_test.go +++ /dev/null @@ -1,353 +0,0 @@ -package kubernetes - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "k8s.io/kubernetes/pkg/api/v1" - kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" -) - -func TestAccKubernetesResourceQuota_basic(t *testing.T) { - var conf api.ResourceQuota - name := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_resource_quota.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesResourceQuotaDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesResourceQuotaConfig_basic(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesResourceQuotaExists("kubernetes_resource_quota.test", &conf), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.annotations.%", "1"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.annotations.TestAnnotationOne", "one"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{"TestAnnotationOne": "one"}), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.labels.%", "3"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.labels.TestLabelOne", "one"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.labels.TestLabelThree", "three"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.labels.TestLabelFour", "four"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{"TestLabelOne": "one", "TestLabelThree": "three", "TestLabelFour": "four"}), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_resource_quota.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_resource_quota.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_resource_quota.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_resource_quota.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "spec.0.hard.%", "3"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "spec.0.hard.limits.cpu", "2"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "spec.0.hard.limits.memory", "2Gi"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "spec.0.hard.pods", "4"), - ), - }, - { - Config: testAccKubernetesResourceQuotaConfig_metaModified(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesResourceQuotaExists("kubernetes_resource_quota.test", &conf), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.annotations.%", "2"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.annotations.TestAnnotationOne", "one"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.annotations.TestAnnotationTwo", "two"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{"TestAnnotationOne": "one", "TestAnnotationTwo": "two"}), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.labels.%", "3"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.labels.TestLabelOne", "one"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.labels.TestLabelTwo", "two"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.labels.TestLabelThree", "three"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{"TestLabelOne": "one", "TestLabelTwo": "two", "TestLabelThree": "three"}), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_resource_quota.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_resource_quota.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_resource_quota.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_resource_quota.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "spec.0.hard.%", "3"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "spec.0.hard.limits.cpu", "2"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "spec.0.hard.limits.memory", "2Gi"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "spec.0.hard.pods", "4"), - ), - }, - { - Config: testAccKubernetesResourceQuotaConfig_specModified(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesResourceQuotaExists("kubernetes_resource_quota.test", &conf), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_resource_quota.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_resource_quota.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_resource_quota.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_resource_quota.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "spec.0.hard.%", "4"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "spec.0.hard.limits.cpu", "4"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "spec.0.hard.requests.cpu", "1"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "spec.0.hard.limits.memory", "4Gi"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "spec.0.hard.pods", "10"), - ), - }, - }, - }) -} - -func TestAccKubernetesResourceQuota_generatedName(t *testing.T) { - var conf api.ResourceQuota - prefix := "tf-acc-test-" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_resource_quota.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesResourceQuotaDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesResourceQuotaConfig_generatedName(prefix), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesResourceQuotaExists("kubernetes_resource_quota.test", &conf), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.generate_name", prefix), - resource.TestCheckResourceAttrSet("kubernetes_resource_quota.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_resource_quota.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_resource_quota.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_resource_quota.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "spec.0.hard.%", "1"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "spec.0.hard.pods", "10"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "spec.0.scopes.#", "0"), - ), - }, - }, - }) -} - -func TestAccKubernetesResourceQuota_withScopes(t *testing.T) { - var conf api.ResourceQuota - name := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_resource_quota.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesResourceQuotaDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesResourceQuotaConfig_withScopes(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesResourceQuotaExists("kubernetes_resource_quota.test", &conf), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_resource_quota.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_resource_quota.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_resource_quota.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_resource_quota.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "spec.0.hard.%", "1"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "spec.0.hard.pods", "10"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "spec.0.scopes.#", "1"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "spec.0.scopes.193563370", "BestEffort"), - ), - }, - { - Config: testAccKubernetesResourceQuotaConfig_withScopesModified(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesResourceQuotaExists("kubernetes_resource_quota.test", &conf), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_resource_quota.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_resource_quota.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_resource_quota.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_resource_quota.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "spec.0.hard.%", "1"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "spec.0.hard.pods", "10"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "spec.0.scopes.#", "1"), - resource.TestCheckResourceAttr("kubernetes_resource_quota.test", "spec.0.scopes.3022121741", "NotBestEffort"), - ), - }, - }, - }) -} - -func TestAccKubernetesResourceQuota_importBasic(t *testing.T) { - resourceName := "kubernetes_resource_quota.test" - name := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesResourceQuotaDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesResourceQuotaConfig_basic(name), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccCheckKubernetesResourceQuotaDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*kubernetes.Clientset) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "kubernetes_resource_quota" { - continue - } - namespace, name := idParts(rs.Primary.ID) - resp, err := conn.CoreV1().ResourceQuotas(namespace).Get(name, meta_v1.GetOptions{}) - if err == nil { - if resp.Namespace == namespace && resp.Name == name { - return fmt.Errorf("Resource Quota still exists: %s", rs.Primary.ID) - } - } - } - - return nil -} - -func testAccCheckKubernetesResourceQuotaExists(n string, obj *api.ResourceQuota) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - conn := testAccProvider.Meta().(*kubernetes.Clientset) - namespace, name := idParts(rs.Primary.ID) - out, err := conn.CoreV1().ResourceQuotas(namespace).Get(name, meta_v1.GetOptions{}) - if err != nil { - return err - } - - *obj = *out - return nil - } -} - -func testAccKubernetesResourceQuotaConfig_basic(name string) string { - return fmt.Sprintf(` -resource "kubernetes_resource_quota" "test" { - metadata { - annotations { - TestAnnotationOne = "one" - } - labels { - TestLabelOne = "one" - TestLabelThree = "three" - TestLabelFour = "four" - } - name = "%s" - } - spec { - hard { - "limits.cpu" = 2 - "limits.memory" = "2Gi" - pods = 4 - } - } -} -`, name) -} - -func testAccKubernetesResourceQuotaConfig_metaModified(name string) string { - return fmt.Sprintf(` -resource "kubernetes_resource_quota" "test" { - metadata { - annotations { - TestAnnotationOne = "one" - TestAnnotationTwo = "two" - } - labels { - TestLabelOne = "one" - TestLabelTwo = "two" - TestLabelThree = "three" - } - name = "%s" - } - spec { - hard { - "limits.cpu" = 2 - "limits.memory" = "2Gi" - pods = 4 - } - } -} -`, name) -} - -func testAccKubernetesResourceQuotaConfig_specModified(name string) string { - return fmt.Sprintf(` -resource "kubernetes_resource_quota" "test" { - metadata { - name = "%s" - } - spec { - hard { - "limits.cpu" = 4 - "requests.cpu" = 1 - "limits.memory" = "4Gi" - pods = 10 - } - } -} -`, name) -} - -func testAccKubernetesResourceQuotaConfig_generatedName(prefix string) string { - return fmt.Sprintf(` -resource "kubernetes_resource_quota" "test" { - metadata { - generate_name = "%s" - } - spec { - hard { - pods = 10 - } - } -} -`, prefix) -} - -func testAccKubernetesResourceQuotaConfig_withScopes(name string) string { - return fmt.Sprintf(` -resource "kubernetes_resource_quota" "test" { - metadata { - name = "%s" - } - spec { - hard { - pods = 10 - } - scopes = ["BestEffort"] - } -} -`, name) -} - -func testAccKubernetesResourceQuotaConfig_withScopesModified(name string) string { - return fmt.Sprintf(` -resource "kubernetes_resource_quota" "test" { - metadata { - name = "%s" - } - spec { - hard { - pods = 10 - } - scopes = ["NotBestEffort"] - } -} -`, name) -} diff --git a/builtin/providers/kubernetes/resource_kubernetes_secret.go b/builtin/providers/kubernetes/resource_kubernetes_secret.go deleted file mode 100644 index d31416514..000000000 --- a/builtin/providers/kubernetes/resource_kubernetes_secret.go +++ /dev/null @@ -1,160 +0,0 @@ -package kubernetes - -import ( - "log" - - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "k8s.io/apimachinery/pkg/api/errors" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkgApi "k8s.io/apimachinery/pkg/types" - api "k8s.io/kubernetes/pkg/api/v1" - kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" -) - -func resourceKubernetesSecret() *schema.Resource { - return &schema.Resource{ - Create: resourceKubernetesSecretCreate, - Read: resourceKubernetesSecretRead, - Exists: resourceKubernetesSecretExists, - Update: resourceKubernetesSecretUpdate, - Delete: resourceKubernetesSecretDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "metadata": namespacedMetadataSchema("secret", true), - "data": { - Type: schema.TypeMap, - Description: "A map of the secret data.", - Optional: true, - Sensitive: true, - }, - "type": { - Type: schema.TypeString, - Description: "Type of secret", - Default: "Opaque", - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceKubernetesSecretCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - metadata := expandMetadata(d.Get("metadata").([]interface{})) - secret := api.Secret{ - ObjectMeta: metadata, - StringData: expandStringMap(d.Get("data").(map[string]interface{})), - } - - if v, ok := d.GetOk("type"); ok { - secret.Type = api.SecretType(v.(string)) - } - - log.Printf("[INFO] Creating new secret: %#v", secret) - out, err := conn.CoreV1().Secrets(metadata.Namespace).Create(&secret) - if err != nil { - return err - } - - log.Printf("[INFO] Submitting new secret: %#v", out) - d.SetId(buildId(out.ObjectMeta)) - - return resourceKubernetesSecretRead(d, meta) -} - -func resourceKubernetesSecretRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - - log.Printf("[INFO] Reading secret %s", name) - secret, err := conn.CoreV1().Secrets(namespace).Get(name, meta_v1.GetOptions{}) - if err != nil { - return err - } - - log.Printf("[INFO] Received secret: %#v", secret) - err = d.Set("metadata", flattenMetadata(secret.ObjectMeta)) - if err != nil { - return err - } - - d.Set("data", byteMapToStringMap(secret.Data)) - d.Set("type", secret.Type) - - return nil -} - -func resourceKubernetesSecretUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - - ops := patchMetadata("metadata.0.", "/metadata/", d) - if d.HasChange("data") { - oldV, newV := d.GetChange("data") - - oldV = base64EncodeStringMap(oldV.(map[string]interface{})) - newV = base64EncodeStringMap(newV.(map[string]interface{})) - - diffOps := diffStringMap("/data/", oldV.(map[string]interface{}), newV.(map[string]interface{})) - - ops = append(ops, diffOps...) - } - - data, err := ops.MarshalJSON() - if err != nil { - return fmt.Errorf("Failed to marshal update operations: %s", err) - } - - log.Printf("[INFO] Updating secret %q: %v", name, data) - out, err := conn.CoreV1().Secrets(namespace).Patch(name, pkgApi.JSONPatchType, data) - if err != nil { - return fmt.Errorf("Failed to update secret: %s", err) - } - - log.Printf("[INFO] Submitting updated secret: %#v", out) - d.SetId(buildId(out.ObjectMeta)) - - return resourceKubernetesSecretRead(d, meta) -} - -func resourceKubernetesSecretDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - - log.Printf("[INFO] Deleting secret: %q", name) - err := conn.CoreV1().Secrets(namespace).Delete(name, &meta_v1.DeleteOptions{}) - if err != nil { - return err - } - - log.Printf("[INFO] Secret %s deleted", name) - - d.SetId("") - - return nil -} - -func resourceKubernetesSecretExists(d *schema.ResourceData, meta interface{}) (bool, error) { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - - log.Printf("[INFO] Checking secret %s", name) - _, err := conn.CoreV1().Secrets(namespace).Get(name, meta_v1.GetOptions{}) - if err != nil { - if statusErr, ok := err.(*errors.StatusError); ok && statusErr.ErrStatus.Code == 404 { - return false, nil - } - log.Printf("[DEBUG] Received error: %#v", err) - } - - return true, err -} diff --git a/builtin/providers/kubernetes/resource_kubernetes_secret_test.go b/builtin/providers/kubernetes/resource_kubernetes_secret_test.go deleted file mode 100644 index adbb0d9e5..000000000 --- a/builtin/providers/kubernetes/resource_kubernetes_secret_test.go +++ /dev/null @@ -1,321 +0,0 @@ -package kubernetes - -import ( - "fmt" - "reflect" - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "k8s.io/kubernetes/pkg/api/v1" - kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" -) - -func TestAccKubernetesSecret_basic(t *testing.T) { - var conf api.Secret - name := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_secret.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesSecretDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesSecretConfig_basic(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesSecretExists("kubernetes_secret.test", &conf), - resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.annotations.%", "2"), - resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.annotations.TestAnnotationOne", "one"), - resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.annotations.TestAnnotationTwo", "two"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{"TestAnnotationOne": "one", "TestAnnotationTwo": "two"}), - resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.labels.%", "3"), - resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.labels.TestLabelOne", "one"), - resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.labels.TestLabelTwo", "two"), - resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.labels.TestLabelThree", "three"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{"TestLabelOne": "one", "TestLabelTwo": "two", "TestLabelThree": "three"}), - resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_secret.test", "data.%", "2"), - resource.TestCheckResourceAttr("kubernetes_secret.test", "data.one", "first"), - resource.TestCheckResourceAttr("kubernetes_secret.test", "data.two", "second"), - resource.TestCheckResourceAttr("kubernetes_secret.test", "type", "Opaque"), - testAccCheckSecretData(&conf, map[string]string{"one": "first", "two": "second"}), - ), - }, - { - Config: testAccKubernetesSecretConfig_modified(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesSecretExists("kubernetes_secret.test", &conf), - resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.annotations.%", "2"), - resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.annotations.TestAnnotationOne", "one"), - resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.annotations.Different", "1234"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{"TestAnnotationOne": "one", "Different": "1234"}), - resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.labels.%", "2"), - resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.labels.TestLabelOne", "one"), - resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.labels.TestLabelThree", "three"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{"TestLabelOne": "one", "TestLabelThree": "three"}), - resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_secret.test", "data.%", "3"), - resource.TestCheckResourceAttr("kubernetes_secret.test", "data.one", "first"), - resource.TestCheckResourceAttr("kubernetes_secret.test", "data.two", "second"), - resource.TestCheckResourceAttr("kubernetes_secret.test", "data.nine", "ninth"), - resource.TestCheckResourceAttr("kubernetes_secret.test", "type", "Opaque"), - testAccCheckSecretData(&conf, map[string]string{"one": "first", "two": "second", "nine": "ninth"}), - ), - }, - { - Config: testAccKubernetesSecretConfig_noData(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesSecretExists("kubernetes_secret.test", &conf), - resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_secret.test", "data.%", "0"), - testAccCheckSecretData(&conf, map[string]string{}), - ), - }, - { - Config: testAccKubernetesSecretConfig_typeSpecified(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesSecretExists("kubernetes_secret.test", &conf), - resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_secret.test", "data.%", "2"), - resource.TestCheckResourceAttr("kubernetes_secret.test", "data.username", "admin"), - resource.TestCheckResourceAttr("kubernetes_secret.test", "data.password", "password"), - resource.TestCheckResourceAttr("kubernetes_secret.test", "type", "kubernetes.io/basic-auth"), - testAccCheckSecretData(&conf, map[string]string{"username": "admin", "password": "password"}), - ), - }, - }, - }) -} - -func TestAccKubernetesSecret_importBasic(t *testing.T) { - resourceName := "kubernetes_secret.test" - name := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesSecretDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesSecretConfig_basic(name), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccKubernetesSecret_generatedName(t *testing.T) { - var conf api.Secret - prefix := "tf-acc-test-gen-" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_secret.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesSecretDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesSecretConfig_generatedName(prefix), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesSecretExists("kubernetes_secret.test", &conf), - resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.generate_name", prefix), - resource.TestMatchResourceAttr("kubernetes_secret.test", "metadata.0.name", regexp.MustCompile("^"+prefix)), - resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.uid"), - ), - }, - }, - }) -} - -func TestAccKubernetesSecret_importGeneratedName(t *testing.T) { - resourceName := "kubernetes_secret.test" - prefix := "tf-acc-test-gen-import-" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesSecretDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesSecretConfig_generatedName(prefix), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccCheckSecretData(m *api.Secret, expected map[string]string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if len(expected) == 0 && len(m.Data) == 0 { - return nil - } - if !reflect.DeepEqual(byteMapToStringMap(m.Data), expected) { - return fmt.Errorf("%s data don't match.\nExpected: %q\nGiven: %q", - m.Name, expected, m.Data) - } - return nil - } -} - -func testAccCheckKubernetesSecretDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*kubernetes.Clientset) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "kubernetes_secret" { - continue - } - namespace, name := idParts(rs.Primary.ID) - resp, err := conn.CoreV1().Secrets(namespace).Get(name, meta_v1.GetOptions{}) - if err == nil { - if resp.Name == rs.Primary.ID { - return fmt.Errorf("Secret still exists: %s", rs.Primary.ID) - } - } - } - - return nil -} - -func testAccCheckKubernetesSecretExists(n string, obj *api.Secret) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - conn := testAccProvider.Meta().(*kubernetes.Clientset) - namespace, name := idParts(rs.Primary.ID) - out, err := conn.CoreV1().Secrets(namespace).Get(name, meta_v1.GetOptions{}) - if err != nil { - return err - } - - *obj = *out - return nil - } -} - -func testAccKubernetesSecretConfig_basic(name string) string { - return fmt.Sprintf(` -resource "kubernetes_secret" "test" { - metadata { - annotations { - TestAnnotationOne = "one" - TestAnnotationTwo = "two" - } - labels { - TestLabelOne = "one" - TestLabelTwo = "two" - TestLabelThree = "three" - } - name = "%s" - } - data { - one = "first" - two = "second" - } -}`, name) -} - -func testAccKubernetesSecretConfig_modified(name string) string { - return fmt.Sprintf(` -resource "kubernetes_secret" "test" { - metadata { - annotations { - TestAnnotationOne = "one" - Different = "1234" - } - labels { - TestLabelOne = "one" - TestLabelThree = "three" - } - name = "%s" - } - data { - one = "first" - two = "second" - nine = "ninth" - } -}`, name) -} - -func testAccKubernetesSecretConfig_noData(name string) string { - return fmt.Sprintf(` -resource "kubernetes_secret" "test" { - metadata { - name = "%s" - } -}`, name) -} - -func testAccKubernetesSecretConfig_typeSpecified(name string) string { - return fmt.Sprintf(` -resource "kubernetes_secret" "test" { - metadata { - name = "%s" - } - data { - username = "admin" - password = "password" - } - type = "kubernetes.io/basic-auth" -}`, name) -} - -func testAccKubernetesSecretConfig_generatedName(prefix string) string { - return fmt.Sprintf(` -resource "kubernetes_secret" "test" { - metadata { - generate_name = "%s" - } - data { - one = "first" - two = "second" - } -}`, prefix) -} diff --git a/builtin/providers/kubernetes/resource_kubernetes_service.go b/builtin/providers/kubernetes/resource_kubernetes_service.go deleted file mode 100644 index 27f6b6052..000000000 --- a/builtin/providers/kubernetes/resource_kubernetes_service.go +++ /dev/null @@ -1,226 +0,0 @@ -package kubernetes - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "k8s.io/apimachinery/pkg/api/errors" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkgApi "k8s.io/apimachinery/pkg/types" - api "k8s.io/kubernetes/pkg/api/v1" - kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" -) - -func resourceKubernetesService() *schema.Resource { - return &schema.Resource{ - Create: resourceKubernetesServiceCreate, - Read: resourceKubernetesServiceRead, - Exists: resourceKubernetesServiceExists, - Update: resourceKubernetesServiceUpdate, - Delete: resourceKubernetesServiceDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "metadata": namespacedMetadataSchema("service", true), - "spec": { - Type: schema.TypeList, - Description: "Spec defines the behavior of a service. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cluster_ip": { - Type: schema.TypeString, - Description: "The IP address of the service. It is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. `None` can be specified for headless services when proxying is not required. Ignored if type is `ExternalName`. More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies", - Optional: true, - ForceNew: true, - Computed: true, - }, - "external_ips": { - Type: schema.TypeSet, - Description: "A list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "external_name": { - Type: schema.TypeString, - Description: "The external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid DNS name and requires `type` to be `ExternalName`.", - Optional: true, - }, - "load_balancer_ip": { - Type: schema.TypeString, - Description: "Only applies to `type = LoadBalancer`. LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying this field when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", - Optional: true, - }, - "load_balancer_source_ranges": { - Type: schema.TypeSet, - Description: "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature. More info: http://kubernetes.io/docs/user-guide/services-firewalls", - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "port": { - Type: schema.TypeList, - Description: "The list of ports that are exposed by this service. More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies", - Required: true, - MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Description: "The name of this port within the service. All ports within the service must have unique names. Optional if only one ServicePort is defined on this service.", - Optional: true, - }, - "node_port": { - Type: schema.TypeInt, - Description: "The port on each node on which this service is exposed when `type` is `NodePort` or `LoadBalancer`. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the `type` of this service requires one. More info: http://kubernetes.io/docs/user-guide/services#type--nodeport", - Computed: true, - Optional: true, - }, - "port": { - Type: schema.TypeInt, - Description: "The port that will be exposed by this service.", - Required: true, - }, - "protocol": { - Type: schema.TypeString, - Description: "The IP protocol for this port. Supports `TCP` and `UDP`. Default is `TCP`.", - Optional: true, - Default: "TCP", - }, - "target_port": { - Type: schema.TypeInt, - Description: "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. This field is ignored for services with `cluster_ip = \"None\"`. More info: http://kubernetes.io/docs/user-guide/services#defining-a-service", - Required: true, - }, - }, - }, - }, - "selector": { - Type: schema.TypeMap, - Description: "Route service traffic to pods with label keys and values matching this selector. Only applies to types `ClusterIP`, `NodePort`, and `LoadBalancer`. More info: http://kubernetes.io/docs/user-guide/services#overview", - Optional: true, - }, - "session_affinity": { - Type: schema.TypeString, - Description: "Used to maintain session affinity. Supports `ClientIP` and `None`. Defaults to `None`. More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies", - Optional: true, - Default: "None", - }, - "type": { - Type: schema.TypeString, - Description: "Determines how the service is exposed. Defaults to `ClusterIP`. Valid options are `ExternalName`, `ClusterIP`, `NodePort`, and `LoadBalancer`. `ExternalName` maps to the specified `external_name`. More info: http://kubernetes.io/docs/user-guide/services#overview", - Optional: true, - Default: "ClusterIP", - }, - }, - }, - }, - }, - } -} - -func resourceKubernetesServiceCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - metadata := expandMetadata(d.Get("metadata").([]interface{})) - svc := api.Service{ - ObjectMeta: metadata, - Spec: expandServiceSpec(d.Get("spec").([]interface{})), - } - log.Printf("[INFO] Creating new service: %#v", svc) - out, err := conn.CoreV1().Services(metadata.Namespace).Create(&svc) - if err != nil { - return err - } - log.Printf("[INFO] Submitted new service: %#v", out) - d.SetId(buildId(out.ObjectMeta)) - - return resourceKubernetesServiceRead(d, meta) -} - -func resourceKubernetesServiceRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - log.Printf("[INFO] Reading service %s", name) - svc, err := conn.CoreV1().Services(namespace).Get(name, meta_v1.GetOptions{}) - if err != nil { - log.Printf("[DEBUG] Received error: %#v", err) - return err - } - log.Printf("[INFO] Received service: %#v", svc) - err = d.Set("metadata", flattenMetadata(svc.ObjectMeta)) - if err != nil { - return err - } - - flattened := flattenServiceSpec(svc.Spec) - log.Printf("[DEBUG] Flattened service spec: %#v", flattened) - err = d.Set("spec", flattened) - if err != nil { - return err - } - - return nil -} - -func resourceKubernetesServiceUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - - ops := patchMetadata("metadata.0.", "/metadata/", d) - if d.HasChange("spec") { - diffOps := patchServiceSpec("spec.0.", "/spec/", d) - ops = append(ops, diffOps...) - } - data, err := ops.MarshalJSON() - if err != nil { - return fmt.Errorf("Failed to marshal update operations: %s", err) - } - log.Printf("[INFO] Updating service %q: %v", name, string(data)) - out, err := conn.CoreV1().Services(namespace).Patch(name, pkgApi.JSONPatchType, data) - if err != nil { - return fmt.Errorf("Failed to update service: %s", err) - } - log.Printf("[INFO] Submitted updated service: %#v", out) - d.SetId(buildId(out.ObjectMeta)) - - return resourceKubernetesServiceRead(d, meta) -} - -func resourceKubernetesServiceDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - log.Printf("[INFO] Deleting service: %#v", name) - err := conn.CoreV1().Services(namespace).Delete(name, &meta_v1.DeleteOptions{}) - if err != nil { - return err - } - - log.Printf("[INFO] Service %s deleted", name) - - d.SetId("") - return nil -} - -func resourceKubernetesServiceExists(d *schema.ResourceData, meta interface{}) (bool, error) { - conn := meta.(*kubernetes.Clientset) - - namespace, name := idParts(d.Id()) - log.Printf("[INFO] Checking service %s", name) - _, err := conn.CoreV1().Services(namespace).Get(name, meta_v1.GetOptions{}) - if err != nil { - if statusErr, ok := err.(*errors.StatusError); ok && statusErr.ErrStatus.Code == 404 { - return false, nil - } - log.Printf("[DEBUG] Received error: %#v", err) - } - return true, err -} diff --git a/builtin/providers/kubernetes/resource_kubernetes_service_test.go b/builtin/providers/kubernetes/resource_kubernetes_service_test.go deleted file mode 100644 index 0cca38927..000000000 --- a/builtin/providers/kubernetes/resource_kubernetes_service_test.go +++ /dev/null @@ -1,500 +0,0 @@ -package kubernetes - -import ( - "fmt" - "reflect" - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - api "k8s.io/kubernetes/pkg/api/v1" - kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" -) - -func TestAccKubernetesService_basic(t *testing.T) { - var conf api.Service - name := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_service.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesServiceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesServiceConfig_basic(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesServiceExists("kubernetes_service.test", &conf), - resource.TestCheckResourceAttr("kubernetes_service.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_service.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_service.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_service.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_service.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.#", "1"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.#", "1"), - resource.TestCheckResourceAttrSet("kubernetes_service.test", "spec.0.cluster_ip"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.0.name", ""), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.0.node_port", "0"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.0.port", "8080"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.0.protocol", "TCP"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.0.target_port", "80"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.session_affinity", "None"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.type", "ClusterIP"), - testAccCheckServicePorts(&conf, []api.ServicePort{ - { - Port: int32(8080), - Protocol: api.ProtocolTCP, - TargetPort: intstr.FromInt(80), - }, - }), - ), - }, - { - Config: testAccKubernetesServiceConfig_modified(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesServiceExists("kubernetes_service.test", &conf), - resource.TestCheckResourceAttr("kubernetes_service.test", "metadata.0.name", name), - resource.TestCheckResourceAttrSet("kubernetes_service.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_service.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_service.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_service.test", "metadata.0.uid"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.#", "1"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.#", "1"), - resource.TestCheckResourceAttrSet("kubernetes_service.test", "spec.0.cluster_ip"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.0.name", ""), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.0.node_port", "0"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.0.port", "8081"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.0.protocol", "TCP"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.0.target_port", "80"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.session_affinity", "None"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.type", "ClusterIP"), - testAccCheckServicePorts(&conf, []api.ServicePort{ - { - Port: int32(8081), - Protocol: api.ProtocolTCP, - TargetPort: intstr.FromInt(80), - }, - }), - ), - }, - }, - }) -} - -func TestAccKubernetesService_loadBalancer(t *testing.T) { - var conf api.Service - name := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_service.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesServiceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesServiceConfig_loadBalancer(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesServiceExists("kubernetes_service.test", &conf), - resource.TestCheckResourceAttr("kubernetes_service.test", "metadata.0.name", name), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.#", "1"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.#", "1"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.#", "1"), - resource.TestCheckResourceAttrSet("kubernetes_service.test", "spec.0.port.0.node_port"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.0.port", "8888"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.0.protocol", "TCP"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.0.target_port", "80"), - resource.TestCheckResourceAttrSet("kubernetes_service.test", "spec.0.cluster_ip"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.external_ips.#", "2"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.external_ips.1452553500", "10.0.0.4"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.external_ips.3371212991", "10.0.0.3"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.external_name", "ext-name-"+name), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.load_balancer_ip", "12.0.0.120"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.load_balancer_source_ranges.#", "2"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.load_balancer_source_ranges.138364083", "10.0.0.5/32"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.load_balancer_source_ranges.445311837", "10.0.0.6/32"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.selector.%", "1"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.selector.App", "MyApp"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.session_affinity", "ClientIP"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.type", "LoadBalancer"), - testAccCheckServicePorts(&conf, []api.ServicePort{ - { - Port: int32(8888), - Protocol: api.ProtocolTCP, - TargetPort: intstr.FromInt(80), - }, - }), - ), - }, - { - Config: testAccKubernetesServiceConfig_loadBalancer_modified(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesServiceExists("kubernetes_service.test", &conf), - resource.TestCheckResourceAttr("kubernetes_service.test", "metadata.0.name", name), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.#", "1"), - resource.TestCheckResourceAttrSet("kubernetes_service.test", "spec.0.cluster_ip"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.external_ips.#", "2"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.external_ips.1452553500", "10.0.0.4"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.external_ips.563283338", "10.0.0.5"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.external_name", "ext-name-modified-"+name), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.load_balancer_ip", "12.0.0.125"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.load_balancer_source_ranges.#", "2"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.load_balancer_source_ranges.2271073252", "10.0.0.1/32"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.load_balancer_source_ranges.2515041290", "10.0.0.2/32"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.#", "1"), - resource.TestCheckResourceAttrSet("kubernetes_service.test", "spec.0.port.0.node_port"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.0.port", "9999"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.0.protocol", "TCP"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.0.target_port", "81"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.selector.%", "2"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.selector.App", "MyModifiedApp"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.selector.NewSelector", "NewValue"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.session_affinity", "ClientIP"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.type", "LoadBalancer"), - testAccCheckServicePorts(&conf, []api.ServicePort{ - { - Port: int32(9999), - Protocol: api.ProtocolTCP, - TargetPort: intstr.FromInt(81), - }, - }), - ), - }, - }, - }) -} - -func TestAccKubernetesService_nodePort(t *testing.T) { - var conf api.Service - name := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_service.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesServiceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesServiceConfig_nodePort(name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesServiceExists("kubernetes_service.test", &conf), - resource.TestCheckResourceAttr("kubernetes_service.test", "metadata.0.name", name), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.#", "1"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.#", "1"), - resource.TestCheckResourceAttrSet("kubernetes_service.test", "spec.0.cluster_ip"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.external_ips.#", "2"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.external_ips.1452553500", "10.0.0.4"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.external_ips.563283338", "10.0.0.5"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.external_name", "ext-name-"+name), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.load_balancer_ip", "12.0.0.125"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.#", "2"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.0.name", "first"), - resource.TestCheckResourceAttrSet("kubernetes_service.test", "spec.0.port.0.node_port"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.0.port", "10222"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.0.protocol", "TCP"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.0.target_port", "22"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.1.name", "second"), - resource.TestCheckResourceAttrSet("kubernetes_service.test", "spec.0.port.1.node_port"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.1.port", "10333"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.1.protocol", "TCP"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.port.1.target_port", "33"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.selector.%", "1"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.selector.App", "MyApp"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.session_affinity", "ClientIP"), - resource.TestCheckResourceAttr("kubernetes_service.test", "spec.0.type", "NodePort"), - testAccCheckServicePorts(&conf, []api.ServicePort{ - { - Name: "first", - Port: int32(10222), - Protocol: api.ProtocolTCP, - TargetPort: intstr.FromInt(22), - }, - { - Name: "second", - Port: int32(10333), - Protocol: api.ProtocolTCP, - TargetPort: intstr.FromInt(33), - }, - }), - ), - }, - }, - }) -} - -func TestAccKubernetesService_importBasic(t *testing.T) { - resourceName := "kubernetes_service.test" - name := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesServiceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesServiceConfig_basic(name), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccKubernetesService_generatedName(t *testing.T) { - var conf api.Service - prefix := "tf-acc-test-gen-" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "kubernetes_service.test", - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesServiceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesServiceConfig_generatedName(prefix), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKubernetesServiceExists("kubernetes_service.test", &conf), - resource.TestCheckResourceAttr("kubernetes_service.test", "metadata.0.annotations.%", "0"), - testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_service.test", "metadata.0.labels.%", "0"), - testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}), - resource.TestCheckResourceAttr("kubernetes_service.test", "metadata.0.generate_name", prefix), - resource.TestMatchResourceAttr("kubernetes_service.test", "metadata.0.name", regexp.MustCompile("^"+prefix)), - resource.TestCheckResourceAttrSet("kubernetes_service.test", "metadata.0.generation"), - resource.TestCheckResourceAttrSet("kubernetes_service.test", "metadata.0.resource_version"), - resource.TestCheckResourceAttrSet("kubernetes_service.test", "metadata.0.self_link"), - resource.TestCheckResourceAttrSet("kubernetes_service.test", "metadata.0.uid"), - ), - }, - }, - }) -} - -func TestAccKubernetesService_importGeneratedName(t *testing.T) { - resourceName := "kubernetes_service.test" - prefix := "tf-acc-test-gen-import-" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckKubernetesServiceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccKubernetesServiceConfig_generatedName(prefix), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccCheckServicePorts(svc *api.Service, expected []api.ServicePort) resource.TestCheckFunc { - return func(s *terraform.State) error { - if len(expected) == 0 && len(svc.Spec.Ports) == 0 { - return nil - } - - ports := svc.Spec.Ports - - // Ignore NodePorts as these are assigned randomly - for k, _ := range ports { - ports[k].NodePort = 0 - } - - if !reflect.DeepEqual(ports, expected) { - return fmt.Errorf("Service ports don't match.\nExpected: %#v\nGiven: %#v", - expected, svc.Spec.Ports) - } - - return nil - } -} - -func testAccCheckKubernetesServiceDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*kubernetes.Clientset) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "kubernetes_service" { - continue - } - namespace, name := idParts(rs.Primary.ID) - resp, err := conn.CoreV1().Services(namespace).Get(name, meta_v1.GetOptions{}) - if err == nil { - if resp.Name == rs.Primary.ID { - return fmt.Errorf("Service still exists: %s", rs.Primary.ID) - } - } - } - - return nil -} - -func testAccCheckKubernetesServiceExists(n string, obj *api.Service) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - conn := testAccProvider.Meta().(*kubernetes.Clientset) - namespace, name := idParts(rs.Primary.ID) - out, err := conn.CoreV1().Services(namespace).Get(name, meta_v1.GetOptions{}) - if err != nil { - return err - } - - *obj = *out - return nil - } -} - -func testAccKubernetesServiceConfig_basic(name string) string { - return fmt.Sprintf(` -resource "kubernetes_service" "test" { - metadata { - annotations { - TestAnnotationOne = "one" - TestAnnotationTwo = "two" - } - labels { - TestLabelOne = "one" - TestLabelTwo = "two" - TestLabelThree = "three" - } - name = "%s" - } - spec { - port { - port = 8080 - target_port = 80 - } - } -}`, name) -} - -func testAccKubernetesServiceConfig_modified(name string) string { - return fmt.Sprintf(` -resource "kubernetes_service" "test" { - metadata { - annotations { - TestAnnotationOne = "one" - Different = "1234" - } - labels { - TestLabelOne = "one" - TestLabelThree = "three" - } - name = "%s" - } - spec { - port { - port = 8081 - target_port = 80 - } - } -}`, name) -} - -func testAccKubernetesServiceConfig_loadBalancer(name string) string { - return fmt.Sprintf(` -resource "kubernetes_service" "test" { - metadata { - name = "%s" - } - spec { - external_name = "ext-name-%s" - external_ips = ["10.0.0.3", "10.0.0.4"] - load_balancer_ip = "12.0.0.120" - load_balancer_source_ranges = ["10.0.0.5/32", "10.0.0.6/32"] - selector { - App = "MyApp" - } - session_affinity = "ClientIP" - port { - port = 8888 - target_port = 80 - } - type = "LoadBalancer" - } -}`, name, name) -} - -func testAccKubernetesServiceConfig_loadBalancer_modified(name string) string { - return fmt.Sprintf(` -resource "kubernetes_service" "test" { - metadata { - name = "%s" - } - spec { - external_name = "ext-name-modified-%s" - external_ips = ["10.0.0.4", "10.0.0.5"] - load_balancer_ip = "12.0.0.125" - load_balancer_source_ranges = ["10.0.0.1/32", "10.0.0.2/32"] - selector { - App = "MyModifiedApp" - NewSelector = "NewValue" - } - session_affinity = "ClientIP" - port { - port = 9999 - target_port = 81 - } - type = "LoadBalancer" - } -}`, name, name) -} - -func testAccKubernetesServiceConfig_nodePort(name string) string { - return fmt.Sprintf(` -resource "kubernetes_service" "test" { - metadata { - name = "%s" - } - spec { - external_name = "ext-name-%s" - external_ips = ["10.0.0.4", "10.0.0.5"] - load_balancer_ip = "12.0.0.125" - selector { - App = "MyApp" - } - session_affinity = "ClientIP" - port { - name = "first" - port = 10222 - target_port = 22 - } - port { - name = "second" - port = 10333 - target_port = 33 - } - type = "NodePort" - } -}`, name, name) -} - -func testAccKubernetesServiceConfig_generatedName(prefix string) string { - return fmt.Sprintf(` -resource "kubernetes_service" "test" { - metadata { - generate_name = "%s" - } - spec { - port { - port = 8080 - target_port = 80 - } - } -}`, prefix) -} diff --git a/builtin/providers/kubernetes/schema_container.go b/builtin/providers/kubernetes/schema_container.go deleted file mode 100644 index 915690031..000000000 --- a/builtin/providers/kubernetes/schema_container.go +++ /dev/null @@ -1,580 +0,0 @@ -package kubernetes - -import "github.com/hashicorp/terraform/helper/schema" - -func handlerFields() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "exec": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: "exec specifies the action to take.", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "command": { - Type: schema.TypeList, - Description: `Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.`, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - "http_get": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: "Specifies the http request to perform.", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "host": { - Type: schema.TypeString, - Optional: true, - Description: `Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.`, - }, - "path": { - Type: schema.TypeString, - Optional: true, - Description: `Path to access on the HTTP server.`, - }, - "scheme": { - Type: schema.TypeString, - Optional: true, - Default: "HTTP", - Description: `Scheme to use for connecting to the host.`, - }, - "port": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validatePortNumOrName, - Description: `Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.`, - }, - "http_header": { - Type: schema.TypeList, - Optional: true, - Description: `Scheme to use for connecting to the host.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: "The header field name", - }, - "value": { - Type: schema.TypeString, - Optional: true, - Description: "The header field value", - }, - }, - }, - }, - }, - }, - }, - "tcp_socket": { - Type: schema.TypeList, - Optional: true, - Description: "TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "port": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validatePortNumOrName, - Description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.", - }, - }, - }, - }, - } -} - -func resourcesField() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "limits": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Description: "Describes the maximum amount of compute resources allowed. More info: http://kubernetes.io/docs/user-guide/compute-resources/", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validateResourceQuantity, - DiffSuppressFunc: suppressEquivalentResourceQuantity, - }, - "memory": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validateResourceQuantity, - DiffSuppressFunc: suppressEquivalentResourceQuantity, - }, - }, - }, - }, - "requests": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validateResourceQuantity, - DiffSuppressFunc: suppressEquivalentResourceQuantity, - }, - "memory": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validateResourceQuantity, - DiffSuppressFunc: suppressEquivalentResourceQuantity, - }, - }, - }, - }, - } -} - -func seLinuxOptionsField() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "level": { - Type: schema.TypeString, - Optional: true, - Description: "Level is SELinux level label that applies to the container.", - }, - "role": { - Type: schema.TypeString, - Optional: true, - Description: "Role is a SELinux role label that applies to the container.", - }, - "type": { - Type: schema.TypeString, - Optional: true, - Description: "Type is a SELinux type label that applies to the container.", - }, - "user": { - Type: schema.TypeString, - Optional: true, - Description: "User is a SELinux user label that applies to the container.", - }, - } -} - -func volumeMountFields() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "mount_path": { - Type: schema.TypeString, - Required: true, - Description: "Path within the container at which the volume should be mounted. Must not contain ':'.", - }, - "name": { - Type: schema.TypeString, - Required: true, - Description: "This must match the Name of a Volume.", - }, - "read_only": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.", - }, - "sub_path": { - Type: schema.TypeString, - Optional: true, - Description: `Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).`, - }, - } -} - -func containerFields() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "args": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands", - }, - "command": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands", - }, - "env": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: "List of environment variables to set in the container. Cannot be updated.", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: "Name of the environment variable. Must be a C_IDENTIFIER", - }, - "value": { - Type: schema.TypeString, - Optional: true, - Description: `Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".`, - }, - "value_from": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: "Source for the environment variable's value", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "config_map_key_ref": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: "Selects a key of a ConfigMap.", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Optional: true, - Description: "The key to select.", - }, - "name": { - Type: schema.TypeString, - Optional: true, - Description: "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - }, - }, - }, - }, - "field_ref": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.podIP..", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "api_version": { - Type: schema.TypeString, - Optional: true, - Default: "v1", - Description: `Version of the schema the FieldPath is written in terms of, defaults to "v1".`, - }, - "field_path": { - Type: schema.TypeString, - Optional: true, - Description: "Path of the field to select in the specified API version", - }, - }, - }, - }, - "resource_field_ref": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.podIP..", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "container_name": { - Type: schema.TypeString, - Optional: true, - }, - "resource": { - Type: schema.TypeString, - Required: true, - Description: "Resource to select", - }, - }, - }, - }, - "secret_key_ref": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.podIP..", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Optional: true, - Description: "The key of the secret to select from. Must be a valid secret key.", - }, - "name": { - Type: schema.TypeString, - Optional: true, - Description: "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "image": { - Type: schema.TypeString, - Optional: true, - Description: "Docker image name. More info: http://kubernetes.io/docs/user-guide/images", - }, - "image_pull_policy": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/images#updating-images", - }, - "lifecycle": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ForceNew: true, - Description: "Actions that the management system should take in response to container lifecycle events", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "post_start": { - Type: schema.TypeList, - Description: `post_start is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: http://kubernetes.io/docs/user-guide/container-environment#hook-details`, - Optional: true, - Elem: &schema.Resource{ - Schema: handlerFields(), - }, - }, - "pre_stop": { - Type: schema.TypeList, - Description: `pre_stop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: http://kubernetes.io/docs/user-guide/container-environment#hook-details`, - Optional: true, - Elem: &schema.Resource{ - Schema: handlerFields(), - }, - }, - }, - }, - }, - "liveness_probe": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ForceNew: true, - Description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes", - Elem: probeSchema(), - }, - "name": { - Type: schema.TypeString, - Required: true, - Description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", - }, - "port": { - Type: schema.TypeList, - Optional: true, - Description: `List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "container_port": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validatePortNumOrName, - Description: "Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.", - }, - "host_ip": { - Type: schema.TypeString, - Optional: true, - Description: "What host IP to bind the external port to.", - }, - "host_port": { - Type: schema.TypeInt, - Optional: true, - Description: "Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.", - }, - "name": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validatePortNumOrName, - Description: "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services", - }, - "protocol": { - Type: schema.TypeString, - Optional: true, - Description: `Protocol for port. Must be UDP or TCP. Defaults to "TCP".`, - Default: "TCP", - }, - }, - }, - }, - "readiness_probe": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ForceNew: true, - Description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes", - Elem: probeSchema(), - }, - "resources": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Computed: true, - Description: "Compute Resources required by this container. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources", - Elem: &schema.Resource{ - Schema: resourcesField(), - }, - }, - - "security_context": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ForceNew: true, - Description: "Security options the pod should run with. More info: http://releases.k8s.io/HEAD/docs/design/security_context.md", - Elem: securityContextSchema(), - }, - "stdin": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. ", - }, - "stdin_once": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF.", - }, - "termination_message_path": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "/dev/termination-log", - Description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Defaults to /dev/termination-log. Cannot be updated.", - }, - "tty": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Whether this container should allocate a TTY for itself", - }, - "volume_mount": { - Type: schema.TypeList, - Optional: true, - Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.", - Elem: &schema.Resource{ - Schema: volumeMountFields(), - }, - }, - "working_dir": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", - }, - } -} - -func probeSchema() *schema.Resource { - h := handlerFields() - h["failure_threshold"] = &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Description: "Minimum consecutive failures for the probe to be considered failed after having succeeded.", - Default: 3, - ValidateFunc: validatePositiveInteger, - } - h["initial_delay_seconds"] = &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Description: "Number of seconds after the container has started before liveness probes are initiated. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes", - } - h["period_seconds"] = &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 10, - ValidateFunc: validatePositiveInteger, - Description: "How often (in seconds) to perform the probe", - } - h["success_threshold"] = &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 1, - ValidateFunc: validatePositiveInteger, - Description: "Minimum consecutive successes for the probe to be considered successful after having failed.", - } - - h["timeout_seconds"] = &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 1, - ValidateFunc: validatePositiveInteger, - Description: "Number of seconds after which the probe times out. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes", - } - return &schema.Resource{ - Schema: h, - } - -} - -func securityContextSchema() *schema.Resource { - m := map[string]*schema.Schema{ - "privileged": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: `Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host.`, - }, - "read_only_root_filesystem": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Whether this container has a read-only root filesystem.", - }, - "run_as_non_root": { - Type: schema.TypeBool, - Description: "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does.", - Optional: true, - }, - "run_as_user": { - Type: schema.TypeInt, - Description: "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified", - Optional: true, - }, - "se_linux_options": { - Type: schema.TypeList, - Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: seLinuxOptionsField(), - }, - }, - "capabilities": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "add": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: "Added capabilities", - }, - "drop": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: "Removed capabilities", - }, - }, - }, - }, - } - - return &schema.Resource{ - Schema: m, - } -} diff --git a/builtin/providers/kubernetes/schema_metadata.go b/builtin/providers/kubernetes/schema_metadata.go deleted file mode 100644 index 684acd6ae..000000000 --- a/builtin/providers/kubernetes/schema_metadata.go +++ /dev/null @@ -1,110 +0,0 @@ -package kubernetes - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" -) - -func metadataFields(objectName string) map[string]*schema.Schema { - return map[string]*schema.Schema{ - "annotations": { - Type: schema.TypeMap, - Description: fmt.Sprintf("An unstructured key value map stored with the %s that may be used to store arbitrary metadata. More info: http://kubernetes.io/docs/user-guide/annotations", objectName), - Optional: true, - ValidateFunc: validateAnnotations, - }, - "generation": { - Type: schema.TypeInt, - Description: "A sequence number representing a specific generation of the desired state.", - Computed: true, - }, - "labels": { - Type: schema.TypeMap, - Description: fmt.Sprintf("Map of string keys and values that can be used to organize and categorize (scope and select) the %s. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", objectName), - Optional: true, - ValidateFunc: validateLabels, - }, - "name": { - Type: schema.TypeString, - Description: fmt.Sprintf("Name of the %s, must be unique. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", objectName), - Optional: true, - ForceNew: true, - Computed: true, - ValidateFunc: validateName, - }, - "resource_version": { - Type: schema.TypeString, - Description: fmt.Sprintf("An opaque value that represents the internal version of this %s that can be used by clients to determine when %s has changed. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#concurrency-control-and-consistency", objectName, objectName), - Computed: true, - }, - "self_link": { - Type: schema.TypeString, - Description: fmt.Sprintf("A URL representing this %s.", objectName), - Computed: true, - }, - "uid": { - Type: schema.TypeString, - Description: fmt.Sprintf("The unique in time and space value for this %s. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", objectName), - Computed: true, - }, - } -} - -func metadataSchema(objectName string, generatableName bool) *schema.Schema { - fields := metadataFields(objectName) - - if generatableName { - fields["generate_name"] = &schema.Schema{ - Type: schema.TypeString, - Description: "Prefix, used by the server, to generate a unique name ONLY IF the `name` field has not been provided. This value will also be combined with a unique suffix. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#idempotency", - Optional: true, - ForceNew: true, - ValidateFunc: validateGenerateName, - ConflictsWith: []string{"metadata.name"}, - } - fields["name"].ConflictsWith = []string{"metadata.generate_name"} - } - - return &schema.Schema{ - Type: schema.TypeList, - Description: fmt.Sprintf("Standard %s's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata", objectName), - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: fields, - }, - } -} - -func namespacedMetadataSchema(objectName string, generatableName bool) *schema.Schema { - fields := metadataFields(objectName) - fields["namespace"] = &schema.Schema{ - Type: schema.TypeString, - Description: fmt.Sprintf("Namespace defines the space within which name of the %s must be unique.", objectName), - Optional: true, - ForceNew: true, - Default: "default", - } - if generatableName { - fields["generate_name"] = &schema.Schema{ - Type: schema.TypeString, - Description: "Prefix, used by the server, to generate a unique name ONLY IF the `name` field has not been provided. This value will also be combined with a unique suffix. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#idempotency", - Optional: true, - ForceNew: true, - ValidateFunc: validateGenerateName, - ConflictsWith: []string{"metadata.name"}, - } - fields["name"].ConflictsWith = []string{"metadata.generate_name"} - } - - return &schema.Schema{ - Type: schema.TypeList, - Description: fmt.Sprintf("Standard %s's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata", objectName), - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: fields, - }, - } -} diff --git a/builtin/providers/kubernetes/schema_pod_spec.go b/builtin/providers/kubernetes/schema_pod_spec.go deleted file mode 100644 index 5bec115d0..000000000 --- a/builtin/providers/kubernetes/schema_pod_spec.go +++ /dev/null @@ -1,373 +0,0 @@ -package kubernetes - -import "github.com/hashicorp/terraform/helper/schema" - -func podSpecFields() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "active_deadline_seconds": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validatePositiveInteger, - Description: "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", - }, - "container": { - Type: schema.TypeList, - Optional: true, - Description: "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers", - Elem: &schema.Resource{ - Schema: containerFields(), - }, - }, - "dns_policy": { - Type: schema.TypeString, - Optional: true, - Default: "ClusterFirst", - Description: "Set DNS policy for containers within the pod. One of 'ClusterFirst' or 'Default'. Defaults to 'ClusterFirst'.", - }, - "host_ipc": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Use the host's ipc namespace. Optional: Default to false.", - }, - "host_network": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified.", - }, - - "host_pid": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Use the host's pid namespace.", - }, - - "hostname": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.", - }, - "image_pull_secrets": { - Type: schema.TypeList, - Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod", - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Description: "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - Required: true, - }, - }, - }, - }, - "node_name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.", - }, - "node_selector": { - Type: schema.TypeMap, - Optional: true, - Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: http://kubernetes.io/docs/user-guide/node-selection.", - }, - "restart_policy": { - Type: schema.TypeString, - Optional: true, - Default: "Always", - Description: "Restart policy for all containers within the pod. One of Always, OnFailure, Never. More info: http://kubernetes.io/docs/user-guide/pod-states#restartpolicy.", - }, - "security_context": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "fs_group": { - Type: schema.TypeInt, - Description: "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- If unset, the Kubelet will not modify the ownership and permissions of any volume.", - Optional: true, - }, - "run_as_non_root": { - Type: schema.TypeBool, - Description: "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does.", - Optional: true, - }, - "run_as_user": { - Type: schema.TypeInt, - Description: "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified", - Optional: true, - }, - "supplemental_groups": { - Type: schema.TypeSet, - Description: "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.", - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "se_linux_options": { - Type: schema.TypeList, - Description: "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: seLinuxOptionsField(), - }, - }, - }, - }, - }, - "service_account_name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md.", - }, - "subdomain": { - Type: schema.TypeString, - Optional: true, - Description: `If specified, the fully qualified Pod hostname will be "...svc.". If not specified, the pod will not have a domainname at all..`, - }, - "termination_grace_period_seconds": { - Type: schema.TypeInt, - Optional: true, - Default: 30, - ValidateFunc: validateTerminationGracePeriodSeconds, - Description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process.", - }, - - "volume": { - Type: schema.TypeList, - Optional: true, - Description: "List of volumes that can be mounted by containers belonging to the pod. More info: http://kubernetes.io/docs/user-guide/volumes", - Elem: volumeSchema(), - }, - } -} - -func volumeSchema() *schema.Resource { - v := commonVolumeSources() - - v["config_map"] = &schema.Schema{ - Type: schema.TypeList, - Description: "ConfigMap represents a configMap that should populate this volume", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "items": { - Type: schema.TypeList, - Description: `If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error. Paths must be relative and may not contain the '..' path or start with '..'.`, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Optional: true, - Description: "The key to project.", - }, - "mode": { - Type: schema.TypeInt, - Optional: true, - Description: `Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.`, - }, - "path": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateAttributeValueDoesNotContain(".."), - Description: `The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.`, - }, - }, - }, - }, - "default_mode": { - Type: schema.TypeInt, - Description: "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - Optional: true, - }, - "name": { - Type: schema.TypeString, - Description: "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - Optional: true, - }, - }, - }, - } - - v["git_repo"] = &schema.Schema{ - Type: schema.TypeList, - Description: "GitRepo represents a git repository at a particular revision.", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "directory": { - Type: schema.TypeString, - Description: "Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.", - Optional: true, - ValidateFunc: validateAttributeValueDoesNotContain(".."), - }, - "repository": { - Type: schema.TypeString, - Description: "Repository URL", - Optional: true, - }, - "revision": { - Type: schema.TypeString, - Description: "Commit hash for the specified revision.", - Optional: true, - }, - }, - }, - } - v["downward_api"] = &schema.Schema{ - Type: schema.TypeList, - Description: "DownwardAPI represents downward API about the pod that should populate this volume", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "default_mode": { - Type: schema.TypeInt, - Description: "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - Optional: true, - }, - "items": { - Type: schema.TypeList, - Description: `If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error. Paths must be relative and may not contain the '..' path or start with '..'.`, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "field_ref": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Description: "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "api_version": { - Type: schema.TypeString, - Optional: true, - Default: "v1", - Description: `Version of the schema the FieldPath is written in terms of, defaults to "v1".`, - }, - "field_path": { - Type: schema.TypeString, - Optional: true, - Description: "Path of the field to select in the specified API version", - }, - }, - }, - }, - "mode": { - Type: schema.TypeInt, - Optional: true, - Description: `Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.`, - }, - "path": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateAttributeValueDoesNotContain(".."), - Description: `Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'`, - }, - "resource_field_ref": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "container_name": { - Type: schema.TypeString, - Required: true, - }, - "quantity": { - Type: schema.TypeString, - Optional: true, - }, - "resource": { - Type: schema.TypeString, - Required: true, - Description: "Resource to select", - }, - }, - }, - }, - }, - }, - }, - }, - }, - } - - v["empty_dir"] = &schema.Schema{ - Type: schema.TypeList, - Description: "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "medium": { - Type: schema.TypeString, - Description: `What type of storage medium should back this directory. The default is "" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir`, - Optional: true, - Default: "", - ValidateFunc: validateAttributeValueIsIn([]string{"", "Memory"}), - }, - }, - }, - } - - v["persistent_volume_claim"] = &schema.Schema{ - Type: schema.TypeList, - Description: "The specification of a persistent volume.", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "claim_name": { - Type: schema.TypeString, - Description: "ClaimName is the name of a PersistentVolumeClaim in the same ", - Optional: true, - }, - "read_only": { - Type: schema.TypeBool, - Description: "Will force the ReadOnly setting in VolumeMounts.", - Optional: true, - Default: false, - }, - }, - }, - } - - v["secret"] = &schema.Schema{ - Type: schema.TypeList, - Description: "Secret represents a secret that should populate this volume. More info: http://kubernetes.io/docs/user-guide/volumes#secrets", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "secret_name": { - Type: schema.TypeString, - Description: "Name of the secret in the pod's namespace to use. More info: http://kubernetes.io/docs/user-guide/volumes#secrets", - Optional: true, - }, - }, - }, - } - v["name"] = &schema.Schema{ - Type: schema.TypeString, - Description: "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - Optional: true, - } - return &schema.Resource{ - Schema: v, - } -} diff --git a/builtin/providers/kubernetes/schema_volume_source.go b/builtin/providers/kubernetes/schema_volume_source.go deleted file mode 100644 index 008960ba3..000000000 --- a/builtin/providers/kubernetes/schema_volume_source.go +++ /dev/null @@ -1,559 +0,0 @@ -package kubernetes - -import ( - "github.com/hashicorp/terraform/helper/schema" -) - -func persistentVolumeSourceSchema() *schema.Resource { - return &schema.Resource{ - Schema: commonVolumeSources(), - } -} - -// Common volume sources between Persistent Volumes and Pod Volumes -func commonVolumeSources() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "host_path": { - Type: schema.TypeList, - Description: "Represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: http://kubernetes.io/docs/user-guide/volumes#hostpath", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "path": { - Type: schema.TypeString, - Description: "Path of the directory on the host. More info: http://kubernetes.io/docs/user-guide/volumes#hostpath", - Optional: true, - }, - }, - }, - }, - "aws_elastic_block_store": { - Type: schema.TypeList, - Description: "Represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "fs_type": { - Type: schema.TypeString, - Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore", - Optional: true, - }, - "partition": { - Type: schema.TypeInt, - Description: "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).", - Optional: true, - }, - "read_only": { - Type: schema.TypeBool, - Description: "Whether to set the read-only property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore", - Optional: true, - }, - "volume_id": { - Type: schema.TypeString, - Description: "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore", - Required: true, - }, - }, - }, - }, - "azure_disk": { - Type: schema.TypeList, - Description: "Represents an Azure Data Disk mount on the host and bind mount to the pod.", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "caching_mode": { - Type: schema.TypeString, - Description: "Host Caching mode: None, Read Only, Read Write.", - Required: true, - }, - "data_disk_uri": { - Type: schema.TypeString, - Description: "The URI the data disk in the blob storage", - Required: true, - }, - "disk_name": { - Type: schema.TypeString, - Description: "The Name of the data disk in the blob storage", - Required: true, - }, - "fs_type": { - Type: schema.TypeString, - Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - Optional: true, - }, - "read_only": { - Type: schema.TypeBool, - Description: "Whether to force the read-only setting in VolumeMounts. Defaults to false (read/write).", - Optional: true, - Default: false, - }, - }, - }, - }, - "azure_file": { - Type: schema.TypeList, - Description: "Represents an Azure File Service mount on the host and bind mount to the pod.", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "read_only": { - Type: schema.TypeBool, - Description: "Whether to force the read-only setting in VolumeMounts. Defaults to false (read/write).", - Optional: true, - }, - "secret_name": { - Type: schema.TypeString, - Description: "The name of secret that contains Azure Storage Account Name and Key", - Required: true, - }, - "share_name": { - Type: schema.TypeString, - Description: "Share Name", - Required: true, - }, - }, - }, - }, - "ceph_fs": { - Type: schema.TypeList, - Description: "Represents a Ceph FS mount on the host that shares a pod's lifetime", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "monitors": { - Type: schema.TypeSet, - Description: "Monitors is a collection of Ceph monitors More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "path": { - Type: schema.TypeString, - Description: "Used as the mounted root, rather than the full Ceph tree, default is /", - Optional: true, - }, - "read_only": { - Type: schema.TypeBool, - Description: "Whether to force the read-only setting in VolumeMounts. Defaults to `false` (read/write). More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - Optional: true, - }, - "secret_file": { - Type: schema.TypeString, - Description: "The path to key ring for User, default is /etc/ceph/user.secret More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - Optional: true, - }, - "secret_ref": { - Type: schema.TypeList, - Description: "Reference to the authentication secret for User, default is empty. More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Description: "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - Optional: true, - }, - }, - }, - }, - "user": { - Type: schema.TypeString, - Description: "User is the rados user name, default is admin. More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - Optional: true, - }, - }, - }, - }, - "cinder": { - Type: schema.TypeList, - Description: "Represents a cinder volume attached and mounted on kubelets host machine. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "fs_type": { - Type: schema.TypeString, - Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - Optional: true, - }, - "read_only": { - Type: schema.TypeBool, - Description: "Whether to force the read-only setting in VolumeMounts. Defaults to false (read/write). More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - Optional: true, - }, - "volume_id": { - Type: schema.TypeString, - Description: "Volume ID used to identify the volume in Cinder. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - Required: true, - }, - }, - }, - }, - "fc": { - Type: schema.TypeList, - Description: "Represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "fs_type": { - Type: schema.TypeString, - Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - Optional: true, - }, - "lun": { - Type: schema.TypeInt, - Description: "FC target lun number", - Required: true, - }, - "read_only": { - Type: schema.TypeBool, - Description: "Whether to force the read-only setting in VolumeMounts. Defaults to false (read/write).", - Optional: true, - }, - "target_ww_ns": { - Type: schema.TypeSet, - Description: "FC target worldwide names (WWNs)", - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - }, - }, - "flex_volume": { - Type: schema.TypeList, - Description: "Represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "driver": { - Type: schema.TypeString, - Description: "Driver is the name of the driver to use for this volume.", - Required: true, - }, - "fs_type": { - Type: schema.TypeString, - Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", - Optional: true, - }, - "options": { - Type: schema.TypeMap, - Description: "Extra command options if any.", - Optional: true, - }, - "read_only": { - Type: schema.TypeBool, - Description: "Whether to force the ReadOnly setting in VolumeMounts. Defaults to false (read/write).", - Optional: true, - }, - "secret_ref": { - Type: schema.TypeList, - Description: "Reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Description: "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - Optional: true, - }, - }, - }, - }, - }, - }, - }, - "flocker": { - Type: schema.TypeList, - Description: "Represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dataset_name": { - Type: schema.TypeString, - Description: "Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated", - Optional: true, - }, - "dataset_uuid": { - Type: schema.TypeString, - Description: "UUID of the dataset. This is unique identifier of a Flocker dataset", - Optional: true, - }, - }, - }, - }, - "gce_persistent_disk": { - Type: schema.TypeList, - Description: "Represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "fs_type": { - Type: schema.TypeString, - Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", - Optional: true, - }, - "partition": { - Type: schema.TypeInt, - Description: "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", - Optional: true, - }, - "pd_name": { - Type: schema.TypeString, - Description: "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", - Required: true, - }, - "read_only": { - Type: schema.TypeBool, - Description: "Whether to force the ReadOnly setting in VolumeMounts. Defaults to false. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", - Optional: true, - }, - }, - }, - }, - "glusterfs": { - Type: schema.TypeList, - Description: "Represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "endpoints_name": { - Type: schema.TypeString, - Description: "The endpoint name that details Glusterfs topology. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - Required: true, - }, - "path": { - Type: schema.TypeString, - Description: "The Glusterfs volume path. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - Required: true, - }, - "read_only": { - Type: schema.TypeBool, - Description: "Whether to force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - Optional: true, - }, - }, - }, - }, - "iscsi": { - Type: schema.TypeList, - Description: "Represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "fs_type": { - Type: schema.TypeString, - Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#iscsi", - Optional: true, - }, - "iqn": { - Type: schema.TypeString, - Description: "Target iSCSI Qualified Name.", - Required: true, - }, - "iscsi_interface": { - Type: schema.TypeString, - Description: "iSCSI interface name that uses an iSCSI transport. Defaults to 'default' (tcp).", - Optional: true, - Default: "default", - }, - "lun": { - Type: schema.TypeInt, - Description: "iSCSI target lun number.", - Optional: true, - }, - "read_only": { - Type: schema.TypeBool, - Description: "Whether to force the read-only setting in VolumeMounts. Defaults to false.", - Optional: true, - }, - "target_portal": { - Type: schema.TypeString, - Description: "iSCSI target portal. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", - Required: true, - }, - }, - }, - }, - "nfs": { - Type: schema.TypeList, - Description: "Represents an NFS mount on the host. Provisioned by an admin. More info: http://kubernetes.io/docs/user-guide/volumes#nfs", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "path": { - Type: schema.TypeString, - Description: "Path that is exported by the NFS server. More info: http://kubernetes.io/docs/user-guide/volumes#nfs", - Required: true, - }, - "read_only": { - Type: schema.TypeBool, - Description: "Whether to force the NFS export to be mounted with read-only permissions. Defaults to false. More info: http://kubernetes.io/docs/user-guide/volumes#nfs", - Optional: true, - }, - "server": { - Type: schema.TypeString, - Description: "Server is the hostname or IP address of the NFS server. More info: http://kubernetes.io/docs/user-guide/volumes#nfs", - Required: true, - }, - }, - }, - }, - "photon_persistent_disk": { - Type: schema.TypeList, - Description: "Represents a PhotonController persistent disk attached and mounted on kubelets host machine", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "fs_type": { - Type: schema.TypeString, - Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - Optional: true, - }, - "pd_id": { - Type: schema.TypeString, - Description: "ID that identifies Photon Controller persistent disk", - Required: true, - }, - }, - }, - }, - "quobyte": { - Type: schema.TypeList, - Description: "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "group": { - Type: schema.TypeString, - Description: "Group to map volume access to Default is no group", - Optional: true, - }, - "read_only": { - Type: schema.TypeBool, - Description: "Whether to force the Quobyte volume to be mounted with read-only permissions. Defaults to false.", - Optional: true, - }, - "registry": { - Type: schema.TypeString, - Description: "Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes", - Required: true, - }, - "user": { - Type: schema.TypeString, - Description: "User to map volume access to Defaults to serivceaccount user", - Optional: true, - }, - "volume": { - Type: schema.TypeString, - Description: "Volume is a string that references an already created Quobyte volume by name.", - Required: true, - }, - }, - }, - }, - "rbd": { - Type: schema.TypeList, - Description: "Represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ceph_monitors": { - Type: schema.TypeSet, - Description: "A collection of Ceph monitors. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "fs_type": { - Type: schema.TypeString, - Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#rbd", - Optional: true, - }, - "keyring": { - Type: schema.TypeString, - Description: "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - Optional: true, - Computed: true, - }, - "rados_user": { - Type: schema.TypeString, - Description: "The rados user name. Default is admin. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - Optional: true, - Default: "admin", - }, - "rbd_image": { - Type: schema.TypeString, - Description: "The rados image name. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - Required: true, - }, - "rbd_pool": { - Type: schema.TypeString, - Description: "The rados pool name. Default is rbd. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it.", - Optional: true, - Default: "rbd", - }, - "read_only": { - Type: schema.TypeBool, - Description: "Whether to force the read-only setting in VolumeMounts. Defaults to false. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - Optional: true, - Default: false, - }, - "secret_ref": { - Type: schema.TypeList, - Description: "Name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Description: "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - Optional: true, - }, - }, - }, - }, - }, - }, - }, - "vsphere_volume": { - Type: schema.TypeList, - Description: "Represents a vSphere volume attached and mounted on kubelets host machine", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "fs_type": { - Type: schema.TypeString, - Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - Optional: true, - }, - "volume_path": { - Type: schema.TypeString, - Description: "Path that identifies vSphere volume vmdk", - Required: true, - }, - }, - }, - }, - } -} diff --git a/builtin/providers/kubernetes/structure_horizontal_pod_autoscaler.go b/builtin/providers/kubernetes/structure_horizontal_pod_autoscaler.go deleted file mode 100644 index 44a56ea7c..000000000 --- a/builtin/providers/kubernetes/structure_horizontal_pod_autoscaler.go +++ /dev/null @@ -1,105 +0,0 @@ -package kubernetes - -import ( - "github.com/hashicorp/terraform/helper/schema" - api "k8s.io/kubernetes/pkg/apis/autoscaling/v1" -) - -func expandHorizontalPodAutoscalerSpec(in []interface{}) api.HorizontalPodAutoscalerSpec { - if len(in) == 0 || in[0] == nil { - return api.HorizontalPodAutoscalerSpec{} - } - spec := api.HorizontalPodAutoscalerSpec{} - m := in[0].(map[string]interface{}) - if v, ok := m["max_replicas"]; ok { - spec.MaxReplicas = int32(v.(int)) - } - if v, ok := m["min_replicas"].(int); ok && v > 0 { - spec.MinReplicas = ptrToInt32(int32(v)) - } - if v, ok := m["scale_target_ref"]; ok { - spec.ScaleTargetRef = expandCrossVersionObjectReference(v.([]interface{})) - } - if v, ok := m["target_cpu_utilization_percentage"].(int); ok && v > 0 { - spec.TargetCPUUtilizationPercentage = ptrToInt32(int32(v)) - } - - return spec -} - -func expandCrossVersionObjectReference(in []interface{}) api.CrossVersionObjectReference { - if len(in) == 0 || in[0] == nil { - return api.CrossVersionObjectReference{} - } - ref := api.CrossVersionObjectReference{} - m := in[0].(map[string]interface{}) - - if v, ok := m["api_version"]; ok { - ref.APIVersion = v.(string) - } - if v, ok := m["kind"]; ok { - ref.Kind = v.(string) - } - if v, ok := m["name"]; ok { - ref.Name = v.(string) - } - return ref -} - -func flattenHorizontalPodAutoscalerSpec(spec api.HorizontalPodAutoscalerSpec) []interface{} { - m := make(map[string]interface{}, 0) - m["max_replicas"] = spec.MaxReplicas - if spec.MinReplicas != nil { - m["min_replicas"] = *spec.MinReplicas - } - m["scale_target_ref"] = flattenCrossVersionObjectReference(spec.ScaleTargetRef) - if spec.TargetCPUUtilizationPercentage != nil { - m["target_cpu_utilization_percentage"] = *spec.TargetCPUUtilizationPercentage - } - return []interface{}{m} -} - -func flattenCrossVersionObjectReference(ref api.CrossVersionObjectReference) []interface{} { - m := make(map[string]interface{}, 0) - if ref.APIVersion != "" { - m["api_version"] = ref.APIVersion - } - if ref.Kind != "" { - m["kind"] = ref.Kind - } - if ref.Name != "" { - m["name"] = ref.Name - } - return []interface{}{m} -} - -func patchHorizontalPodAutoscalerSpec(prefix string, pathPrefix string, d *schema.ResourceData) []PatchOperation { - ops := make([]PatchOperation, 0) - - if d.HasChange(prefix + "max_replicas") { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/maxReplicas", - Value: d.Get(prefix + "max_replicas").(int), - }) - } - if d.HasChange(prefix + "min_replicas") { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/minReplicas", - Value: d.Get(prefix + "min_replicas").(int), - }) - } - if d.HasChange(prefix + "scale_target_ref") { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/scaleTargetRef", - Value: expandCrossVersionObjectReference(d.Get(prefix + "scale_target_ref").([]interface{})), - }) - } - if d.HasChange(prefix + "target_cpu_utilization_percentage") { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/targetCPUUtilizationPercentage", - Value: d.Get(prefix + "target_cpu_utilization_percentage").(int), - }) - } - - return ops -} diff --git a/builtin/providers/kubernetes/structure_persistent_volume_claim.go b/builtin/providers/kubernetes/structure_persistent_volume_claim.go deleted file mode 100644 index 08b4a1ae2..000000000 --- a/builtin/providers/kubernetes/structure_persistent_volume_claim.go +++ /dev/null @@ -1,134 +0,0 @@ -package kubernetes - -import ( - "github.com/hashicorp/terraform/helper/schema" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/api/v1" -) - -// Flatteners - -func flattenLabelSelector(in *metav1.LabelSelector) []interface{} { - att := make(map[string]interface{}) - if len(in.MatchLabels) > 0 { - att["match_labels"] = in.MatchLabels - } - if len(in.MatchExpressions) > 0 { - att["match_expressions"] = flattenLabelSelectorRequirement(in.MatchExpressions) - } - return []interface{}{att} -} - -func flattenLabelSelectorRequirement(in []metav1.LabelSelectorRequirement) []interface{} { - att := make([]interface{}, len(in), len(in)) - for i, n := range in { - m := make(map[string]interface{}) - m["key"] = n.Key - m["operator"] = n.Operator - m["values"] = newStringSet(schema.HashString, n.Values) - att[i] = m - } - return att -} - -func flattenPersistentVolumeClaimSpec(in v1.PersistentVolumeClaimSpec) []interface{} { - att := make(map[string]interface{}) - att["access_modes"] = flattenPersistentVolumeAccessModes(in.AccessModes) - att["resources"] = flattenResourceRequirements(in.Resources) - if in.Selector != nil { - att["selector"] = flattenLabelSelector(in.Selector) - } - if in.VolumeName != "" { - att["volume_name"] = in.VolumeName - } - return []interface{}{att} -} - -func flattenResourceRequirements(in v1.ResourceRequirements) []interface{} { - att := make(map[string]interface{}) - if len(in.Limits) > 0 { - att["limits"] = flattenResourceList(in.Limits) - } - if len(in.Requests) > 0 { - att["requests"] = flattenResourceList(in.Requests) - } - return []interface{}{att} -} - -// Expanders - -func expandLabelSelector(l []interface{}) *metav1.LabelSelector { - if len(l) == 0 || l[0] == nil { - return &metav1.LabelSelector{} - } - in := l[0].(map[string]interface{}) - obj := &metav1.LabelSelector{} - if v, ok := in["match_labels"].(map[string]interface{}); ok && len(v) > 0 { - obj.MatchLabels = expandStringMap(v) - } - if v, ok := in["match_expressions"].([]interface{}); ok && len(v) > 0 { - obj.MatchExpressions = expandLabelSelectorRequirement(v) - } - return obj -} - -func expandLabelSelectorRequirement(l []interface{}) []metav1.LabelSelectorRequirement { - if len(l) == 0 || l[0] == nil { - return []metav1.LabelSelectorRequirement{} - } - obj := make([]metav1.LabelSelectorRequirement, len(l), len(l)) - for i, n := range l { - in := n.(map[string]interface{}) - obj[i] = metav1.LabelSelectorRequirement{ - Key: in["key"].(string), - Operator: metav1.LabelSelectorOperator(in["operator"].(string)), - Values: sliceOfString(in["values"].(*schema.Set).List()), - } - } - return obj -} - -func expandPersistentVolumeClaimSpec(l []interface{}) (v1.PersistentVolumeClaimSpec, error) { - if len(l) == 0 || l[0] == nil { - return v1.PersistentVolumeClaimSpec{}, nil - } - in := l[0].(map[string]interface{}) - resourceRequirements, err := expandResourceRequirements(in["resources"].([]interface{})) - if err != nil { - return v1.PersistentVolumeClaimSpec{}, err - } - obj := v1.PersistentVolumeClaimSpec{ - AccessModes: expandPersistentVolumeAccessModes(in["access_modes"].(*schema.Set).List()), - Resources: resourceRequirements, - } - if v, ok := in["selector"].([]interface{}); ok && len(v) > 0 { - obj.Selector = expandLabelSelector(v) - } - if v, ok := in["volume_name"].(string); ok { - obj.VolumeName = v - } - return obj, nil -} - -func expandResourceRequirements(l []interface{}) (v1.ResourceRequirements, error) { - if len(l) == 0 || l[0] == nil { - return v1.ResourceRequirements{}, nil - } - in := l[0].(map[string]interface{}) - obj := v1.ResourceRequirements{} - if v, ok := in["limits"].(map[string]interface{}); ok && len(v) > 0 { - var err error - obj.Limits, err = expandMapToResourceList(v) - if err != nil { - return obj, err - } - } - if v, ok := in["requests"].(map[string]interface{}); ok && len(v) > 0 { - var err error - obj.Requests, err = expandMapToResourceList(v) - if err != nil { - return obj, err - } - } - return obj, nil -} diff --git a/builtin/providers/kubernetes/structure_persistent_volume_spec.go b/builtin/providers/kubernetes/structure_persistent_volume_spec.go deleted file mode 100644 index f5463d2ac..000000000 --- a/builtin/providers/kubernetes/structure_persistent_volume_spec.go +++ /dev/null @@ -1,988 +0,0 @@ -package kubernetes - -import ( - "k8s.io/kubernetes/pkg/api/v1" - - "github.com/hashicorp/terraform/helper/schema" -) - -// Flatteners - -func flattenAWSElasticBlockStoreVolumeSource(in *v1.AWSElasticBlockStoreVolumeSource) []interface{} { - att := make(map[string]interface{}) - att["volume_id"] = in.VolumeID - if in.FSType != "" { - att["fs_type"] = in.FSType - } - if in.Partition != 0 { - att["partition"] = in.Partition - } - if in.ReadOnly != false { - att["read_only"] = in.ReadOnly - } - return []interface{}{att} -} - -func flattenAzureDiskVolumeSource(in *v1.AzureDiskVolumeSource) []interface{} { - att := make(map[string]interface{}) - att["disk_name"] = in.DiskName - att["data_disk_uri"] = in.DataDiskURI - att["caching_mode"] = string(*in.CachingMode) - if in.FSType != nil { - att["fs_type"] = *in.FSType - } - if in.ReadOnly != nil { - att["read_only"] = *in.ReadOnly - } - return []interface{}{att} -} - -func flattenAzureFileVolumeSource(in *v1.AzureFileVolumeSource) []interface{} { - att := make(map[string]interface{}) - att["secret_name"] = in.SecretName - att["share_name"] = in.ShareName - if in.ReadOnly != false { - att["read_only"] = in.ReadOnly - } - return []interface{}{att} -} - -func flattenCephFSVolumeSource(in *v1.CephFSVolumeSource) []interface{} { - att := make(map[string]interface{}) - att["monitors"] = newStringSet(schema.HashString, in.Monitors) - if in.Path != "" { - att["path"] = in.Path - } - if in.User != "" { - att["user"] = in.User - } - if in.SecretFile != "" { - att["secret_file"] = in.SecretFile - } - if in.SecretRef != nil { - att["secret_ref"] = flattenLocalObjectReference(in.SecretRef) - } - if in.ReadOnly != false { - att["read_only"] = in.ReadOnly - } - return []interface{}{att} -} - -func flattenCinderVolumeSource(in *v1.CinderVolumeSource) []interface{} { - att := make(map[string]interface{}) - att["volume_id"] = in.VolumeID - if in.FSType != "" { - att["fs_type"] = in.FSType - } - if in.ReadOnly != false { - att["read_only"] = in.ReadOnly - } - return []interface{}{att} -} - -func flattenFCVolumeSource(in *v1.FCVolumeSource) []interface{} { - att := make(map[string]interface{}) - att["target_ww_ns"] = newStringSet(schema.HashString, in.TargetWWNs) - att["lun"] = *in.Lun - if in.FSType != "" { - att["fs_type"] = in.FSType - } - if in.ReadOnly != false { - att["read_only"] = in.ReadOnly - } - return []interface{}{att} -} - -func flattenFlexVolumeSource(in *v1.FlexVolumeSource) []interface{} { - att := make(map[string]interface{}) - att["driver"] = in.Driver - if in.FSType != "" { - att["fs_type"] = in.FSType - } - if in.SecretRef != nil { - att["secret_ref"] = flattenLocalObjectReference(in.SecretRef) - } - if in.ReadOnly != false { - att["read_only"] = in.ReadOnly - } - if len(in.Options) > 0 { - att["options"] = in.Options - } - return []interface{}{att} -} - -func flattenFlockerVolumeSource(in *v1.FlockerVolumeSource) []interface{} { - att := make(map[string]interface{}) - att["dataset_name"] = in.DatasetName - att["dataset_uuid"] = in.DatasetUUID - return []interface{}{att} -} - -func flattenGCEPersistentDiskVolumeSource(in *v1.GCEPersistentDiskVolumeSource) []interface{} { - att := make(map[string]interface{}) - att["pd_name"] = in.PDName - if in.FSType != "" { - att["fs_type"] = in.FSType - } - if in.Partition != 0 { - att["partition"] = in.Partition - } - if in.ReadOnly != false { - att["read_only"] = in.ReadOnly - } - return []interface{}{att} -} - -func flattenGlusterfsVolumeSource(in *v1.GlusterfsVolumeSource) []interface{} { - att := make(map[string]interface{}) - att["endpoints_name"] = in.EndpointsName - att["path"] = in.Path - if in.ReadOnly != false { - att["read_only"] = in.ReadOnly - } - return []interface{}{att} -} - -func flattenHostPathVolumeSource(in *v1.HostPathVolumeSource) []interface{} { - att := make(map[string]interface{}) - att["path"] = in.Path - return []interface{}{att} -} - -func flattenISCSIVolumeSource(in *v1.ISCSIVolumeSource) []interface{} { - att := make(map[string]interface{}) - if in.TargetPortal != "" { - att["target_portal"] = in.TargetPortal - } - if in.IQN != "" { - att["iqn"] = in.IQN - } - if in.Lun != 0 { - att["lun"] = in.Lun - } - if in.ISCSIInterface != "" { - att["iscsi_interface"] = in.ISCSIInterface - } - if in.FSType != "" { - att["fs_type"] = in.FSType - } - if in.ReadOnly != false { - att["read_only"] = in.ReadOnly - } - return []interface{}{att} -} - -func flattenLocalObjectReference(in *v1.LocalObjectReference) []interface{} { - att := make(map[string]interface{}) - if in.Name != "" { - att["name"] = in.Name - } - return []interface{}{att} -} - -func flattenNFSVolumeSource(in *v1.NFSVolumeSource) []interface{} { - att := make(map[string]interface{}) - att["server"] = in.Server - att["path"] = in.Path - if in.ReadOnly != false { - att["read_only"] = in.ReadOnly - } - return []interface{}{att} -} - -func flattenPersistentVolumeSource(in v1.PersistentVolumeSource) []interface{} { - att := make(map[string]interface{}) - if in.GCEPersistentDisk != nil { - att["gce_persistent_disk"] = flattenGCEPersistentDiskVolumeSource(in.GCEPersistentDisk) - } - if in.AWSElasticBlockStore != nil { - att["aws_elastic_block_store"] = flattenAWSElasticBlockStoreVolumeSource(in.AWSElasticBlockStore) - } - if in.HostPath != nil { - att["host_path"] = flattenHostPathVolumeSource(in.HostPath) - } - if in.Glusterfs != nil { - att["glusterfs"] = flattenGlusterfsVolumeSource(in.Glusterfs) - } - if in.NFS != nil { - att["nfs"] = flattenNFSVolumeSource(in.NFS) - } - if in.RBD != nil { - att["rbd"] = flattenRBDVolumeSource(in.RBD) - } - if in.ISCSI != nil { - att["iscsi"] = flattenISCSIVolumeSource(in.ISCSI) - } - if in.Cinder != nil { - att["cinder"] = flattenCinderVolumeSource(in.Cinder) - } - if in.CephFS != nil { - att["ceph_fs"] = flattenCephFSVolumeSource(in.CephFS) - } - if in.FC != nil { - att["fc"] = flattenFCVolumeSource(in.FC) - } - if in.Flocker != nil { - att["flocker"] = flattenFlockerVolumeSource(in.Flocker) - } - if in.FlexVolume != nil { - att["flex_volume"] = flattenFlexVolumeSource(in.FlexVolume) - } - if in.AzureFile != nil { - att["azure_file"] = flattenAzureFileVolumeSource(in.AzureFile) - } - if in.VsphereVolume != nil { - att["vsphere_volume"] = flattenVsphereVirtualDiskVolumeSource(in.VsphereVolume) - } - if in.Quobyte != nil { - att["quobyte"] = flattenQuobyteVolumeSource(in.Quobyte) - } - if in.AzureDisk != nil { - att["azure_disk"] = flattenAzureDiskVolumeSource(in.AzureDisk) - } - if in.PhotonPersistentDisk != nil { - att["photon_persistent_disk"] = flattenPhotonPersistentDiskVolumeSource(in.PhotonPersistentDisk) - } - return []interface{}{att} -} - -func flattenPersistentVolumeSpec(in v1.PersistentVolumeSpec) []interface{} { - att := make(map[string]interface{}) - if len(in.Capacity) > 0 { - att["capacity"] = flattenResourceList(in.Capacity) - } - - att["persistent_volume_source"] = flattenPersistentVolumeSource(in.PersistentVolumeSource) - if len(in.AccessModes) > 0 { - att["access_modes"] = flattenPersistentVolumeAccessModes(in.AccessModes) - } - if in.PersistentVolumeReclaimPolicy != "" { - att["persistent_volume_reclaim_policy"] = in.PersistentVolumeReclaimPolicy - } - return []interface{}{att} -} - -func flattenPhotonPersistentDiskVolumeSource(in *v1.PhotonPersistentDiskVolumeSource) []interface{} { - att := make(map[string]interface{}) - att["pd_id"] = in.PdID - if in.FSType != "" { - att["fs_type"] = in.FSType - } - return []interface{}{att} -} - -func flattenQuobyteVolumeSource(in *v1.QuobyteVolumeSource) []interface{} { - att := make(map[string]interface{}) - att["registry"] = in.Registry - att["volume"] = in.Volume - if in.ReadOnly != false { - att["read_only"] = in.ReadOnly - } - if in.User != "" { - att["user"] = in.User - } - if in.Group != "" { - att["group"] = in.Group - } - return []interface{}{att} -} - -func flattenRBDVolumeSource(in *v1.RBDVolumeSource) []interface{} { - att := make(map[string]interface{}) - att["ceph_monitors"] = newStringSet(schema.HashString, in.CephMonitors) - att["rbd_image"] = in.RBDImage - if in.FSType != "" { - att["fs_type"] = in.FSType - } - if in.RBDPool != "" { - att["rbd_pool"] = in.RBDPool - } - if in.RadosUser != "" { - att["rados_user"] = in.RadosUser - } - if in.Keyring != "" { - att["keyring"] = in.Keyring - } - if in.SecretRef != nil { - att["secret_ref"] = flattenLocalObjectReference(in.SecretRef) - } - if in.ReadOnly != false { - att["read_only"] = in.ReadOnly - } - return []interface{}{att} -} - -func flattenVsphereVirtualDiskVolumeSource(in *v1.VsphereVirtualDiskVolumeSource) []interface{} { - att := make(map[string]interface{}) - att["volume_path"] = in.VolumePath - if in.FSType != "" { - att["fs_type"] = in.FSType - } - return []interface{}{att} -} - -// Expanders - -func expandAWSElasticBlockStoreVolumeSource(l []interface{}) *v1.AWSElasticBlockStoreVolumeSource { - if len(l) == 0 || l[0] == nil { - return &v1.AWSElasticBlockStoreVolumeSource{} - } - in := l[0].(map[string]interface{}) - obj := &v1.AWSElasticBlockStoreVolumeSource{ - VolumeID: in["volume_id"].(string), - } - if v, ok := in["fs_type"].(string); ok { - obj.FSType = v - } - if v, ok := in["partition"].(int); ok { - obj.Partition = int32(v) - } - if v, ok := in["read_only"].(bool); ok { - obj.ReadOnly = v - } - return obj -} - -func expandAzureDiskVolumeSource(l []interface{}) *v1.AzureDiskVolumeSource { - if len(l) == 0 || l[0] == nil { - return &v1.AzureDiskVolumeSource{} - } - in := l[0].(map[string]interface{}) - cachingMode := v1.AzureDataDiskCachingMode(in["caching_mode"].(string)) - obj := &v1.AzureDiskVolumeSource{ - CachingMode: &cachingMode, - DiskName: in["disk_name"].(string), - DataDiskURI: in["data_disk_uri"].(string), - } - if v, ok := in["fs_type"].(string); ok { - obj.FSType = ptrToString(v) - } - if v, ok := in["read_only"].(bool); ok { - obj.ReadOnly = ptrToBool(v) - } - return obj -} - -func expandAzureFileVolumeSource(l []interface{}) *v1.AzureFileVolumeSource { - if len(l) == 0 || l[0] == nil { - return &v1.AzureFileVolumeSource{} - } - in := l[0].(map[string]interface{}) - obj := &v1.AzureFileVolumeSource{ - SecretName: in["secret_name"].(string), - ShareName: in["share_name"].(string), - } - if v, ok := in["read_only"].(bool); ok { - obj.ReadOnly = v - } - return obj -} - -func expandCephFSVolumeSource(l []interface{}) *v1.CephFSVolumeSource { - if len(l) == 0 || l[0] == nil { - return &v1.CephFSVolumeSource{} - } - in := l[0].(map[string]interface{}) - obj := &v1.CephFSVolumeSource{ - Monitors: sliceOfString(in["monitors"].(*schema.Set).List()), - } - if v, ok := in["path"].(string); ok { - obj.Path = v - } - if v, ok := in["user"].(string); ok { - obj.User = v - } - if v, ok := in["secret_file"].(string); ok { - obj.SecretFile = v - } - if v, ok := in["secret_ref"].([]interface{}); ok && len(v) > 0 { - obj.SecretRef = expandLocalObjectReference(v) - } - if v, ok := in["read_only"].(bool); ok { - obj.ReadOnly = v - } - return obj -} - -func expandCinderVolumeSource(l []interface{}) *v1.CinderVolumeSource { - if len(l) == 0 || l[0] == nil { - return &v1.CinderVolumeSource{} - } - in := l[0].(map[string]interface{}) - obj := &v1.CinderVolumeSource{ - VolumeID: in["volume_id"].(string), - } - if v, ok := in["fs_type"].(string); ok { - obj.FSType = v - } - if v, ok := in["read_only"].(bool); ok { - obj.ReadOnly = v - } - return obj -} - -func expandFCVolumeSource(l []interface{}) *v1.FCVolumeSource { - if len(l) == 0 || l[0] == nil { - return &v1.FCVolumeSource{} - } - in := l[0].(map[string]interface{}) - obj := &v1.FCVolumeSource{ - TargetWWNs: sliceOfString(in["target_ww_ns"].(*schema.Set).List()), - Lun: ptrToInt32(int32(in["lun"].(int))), - } - if v, ok := in["fs_type"].(string); ok { - obj.FSType = v - } - if v, ok := in["read_only"].(bool); ok { - obj.ReadOnly = v - } - return obj -} - -func expandFlexVolumeSource(l []interface{}) *v1.FlexVolumeSource { - if len(l) == 0 || l[0] == nil { - return &v1.FlexVolumeSource{} - } - in := l[0].(map[string]interface{}) - obj := &v1.FlexVolumeSource{ - Driver: in["driver"].(string), - } - if v, ok := in["fs_type"].(string); ok { - obj.FSType = v - } - if v, ok := in["secret_ref"].([]interface{}); ok && len(v) > 0 { - obj.SecretRef = expandLocalObjectReference(v) - } - if v, ok := in["read_only"].(bool); ok { - obj.ReadOnly = v - } - if v, ok := in["options"].(map[string]interface{}); ok && len(v) > 0 { - obj.Options = expandStringMap(v) - } - return obj -} - -func expandFlockerVolumeSource(l []interface{}) *v1.FlockerVolumeSource { - if len(l) == 0 || l[0] == nil { - return &v1.FlockerVolumeSource{} - } - in := l[0].(map[string]interface{}) - obj := &v1.FlockerVolumeSource{ - DatasetName: in["dataset_name"].(string), - DatasetUUID: in["dataset_uuid"].(string), - } - return obj -} - -func expandGCEPersistentDiskVolumeSource(l []interface{}) *v1.GCEPersistentDiskVolumeSource { - if len(l) == 0 || l[0] == nil { - return &v1.GCEPersistentDiskVolumeSource{} - } - in := l[0].(map[string]interface{}) - obj := &v1.GCEPersistentDiskVolumeSource{ - PDName: in["pd_name"].(string), - } - if v, ok := in["fs_type"].(string); ok { - obj.FSType = v - } - if v, ok := in["partition"].(int); ok { - obj.Partition = int32(v) - } - if v, ok := in["read_only"].(bool); ok { - obj.ReadOnly = v - } - return obj -} - -func expandGlusterfsVolumeSource(l []interface{}) *v1.GlusterfsVolumeSource { - if len(l) == 0 || l[0] == nil { - return &v1.GlusterfsVolumeSource{} - } - in := l[0].(map[string]interface{}) - obj := &v1.GlusterfsVolumeSource{ - EndpointsName: in["endpoints_name"].(string), - Path: in["path"].(string), - } - if v, ok := in["read_only"].(bool); ok { - obj.ReadOnly = v - } - return obj -} - -func expandHostPathVolumeSource(l []interface{}) *v1.HostPathVolumeSource { - if len(l) == 0 || l[0] == nil { - return &v1.HostPathVolumeSource{} - } - in := l[0].(map[string]interface{}) - obj := &v1.HostPathVolumeSource{ - Path: in["path"].(string), - } - return obj -} - -func expandISCSIVolumeSource(l []interface{}) *v1.ISCSIVolumeSource { - if len(l) == 0 || l[0] == nil { - return &v1.ISCSIVolumeSource{} - } - in := l[0].(map[string]interface{}) - obj := &v1.ISCSIVolumeSource{ - TargetPortal: in["target_portal"].(string), - IQN: in["iqn"].(string), - } - if v, ok := in["lun"].(int); ok { - obj.Lun = int32(v) - } - if v, ok := in["iscsi_interface"].(string); ok { - obj.ISCSIInterface = v - } - if v, ok := in["fs_type"].(string); ok { - obj.FSType = v - } - if v, ok := in["read_only"].(bool); ok { - obj.ReadOnly = v - } - return obj -} - -func expandLocalObjectReference(l []interface{}) *v1.LocalObjectReference { - if len(l) == 0 || l[0] == nil { - return &v1.LocalObjectReference{} - } - in := l[0].(map[string]interface{}) - obj := &v1.LocalObjectReference{} - if v, ok := in["name"].(string); ok { - obj.Name = v - } - return obj -} - -func expandNFSVolumeSource(l []interface{}) *v1.NFSVolumeSource { - if len(l) == 0 || l[0] == nil { - return &v1.NFSVolumeSource{} - } - in := l[0].(map[string]interface{}) - obj := &v1.NFSVolumeSource{ - Server: in["server"].(string), - Path: in["path"].(string), - } - if v, ok := in["read_only"].(bool); ok { - obj.ReadOnly = v - } - return obj -} - -func expandPersistentVolumeSource(l []interface{}) v1.PersistentVolumeSource { - if len(l) == 0 || l[0] == nil { - return v1.PersistentVolumeSource{} - } - in := l[0].(map[string]interface{}) - obj := v1.PersistentVolumeSource{} - if v, ok := in["gce_persistent_disk"].([]interface{}); ok && len(v) > 0 { - obj.GCEPersistentDisk = expandGCEPersistentDiskVolumeSource(v) - } - if v, ok := in["aws_elastic_block_store"].([]interface{}); ok && len(v) > 0 { - obj.AWSElasticBlockStore = expandAWSElasticBlockStoreVolumeSource(v) - } - if v, ok := in["host_path"].([]interface{}); ok && len(v) > 0 { - obj.HostPath = expandHostPathVolumeSource(v) - } - if v, ok := in["glusterfs"].([]interface{}); ok && len(v) > 0 { - obj.Glusterfs = expandGlusterfsVolumeSource(v) - } - if v, ok := in["nfs"].([]interface{}); ok && len(v) > 0 { - obj.NFS = expandNFSVolumeSource(v) - } - if v, ok := in["rbd"].([]interface{}); ok && len(v) > 0 { - obj.RBD = expandRBDVolumeSource(v) - } - if v, ok := in["iscsi"].([]interface{}); ok && len(v) > 0 { - obj.ISCSI = expandISCSIVolumeSource(v) - } - if v, ok := in["cinder"].([]interface{}); ok && len(v) > 0 { - obj.Cinder = expandCinderVolumeSource(v) - } - if v, ok := in["ceph_fs"].([]interface{}); ok && len(v) > 0 { - obj.CephFS = expandCephFSVolumeSource(v) - } - if v, ok := in["fc"].([]interface{}); ok && len(v) > 0 { - obj.FC = expandFCVolumeSource(v) - } - if v, ok := in["flocker"].([]interface{}); ok && len(v) > 0 { - obj.Flocker = expandFlockerVolumeSource(v) - } - if v, ok := in["flex_volume"].([]interface{}); ok && len(v) > 0 { - obj.FlexVolume = expandFlexVolumeSource(v) - } - if v, ok := in["azure_file"].([]interface{}); ok && len(v) > 0 { - obj.AzureFile = expandAzureFileVolumeSource(v) - } - if v, ok := in["vsphere_volume"].([]interface{}); ok && len(v) > 0 { - obj.VsphereVolume = expandVsphereVirtualDiskVolumeSource(v) - } - if v, ok := in["quobyte"].([]interface{}); ok && len(v) > 0 { - obj.Quobyte = expandQuobyteVolumeSource(v) - } - if v, ok := in["azure_disk"].([]interface{}); ok && len(v) > 0 { - obj.AzureDisk = expandAzureDiskVolumeSource(v) - } - if v, ok := in["photon_persistent_disk"].([]interface{}); ok && len(v) > 0 { - obj.PhotonPersistentDisk = expandPhotonPersistentDiskVolumeSource(v) - } - return obj -} - -func expandPersistentVolumeSpec(l []interface{}) (v1.PersistentVolumeSpec, error) { - if len(l) == 0 || l[0] == nil { - return v1.PersistentVolumeSpec{}, nil - } - in := l[0].(map[string]interface{}) - obj := v1.PersistentVolumeSpec{} - if v, ok := in["capacity"].(map[string]interface{}); ok && len(v) > 0 { - var err error - obj.Capacity, err = expandMapToResourceList(v) - if err != nil { - return obj, err - } - } - if v, ok := in["persistent_volume_source"].([]interface{}); ok && len(v) > 0 { - obj.PersistentVolumeSource = expandPersistentVolumeSource(v) - } - if v, ok := in["access_modes"].(*schema.Set); ok && v.Len() > 0 { - obj.AccessModes = expandPersistentVolumeAccessModes(v.List()) - } - if v, ok := in["persistent_volume_reclaim_policy"].(string); ok { - obj.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimPolicy(v) - } - return obj, nil -} - -func expandPhotonPersistentDiskVolumeSource(l []interface{}) *v1.PhotonPersistentDiskVolumeSource { - if len(l) == 0 || l[0] == nil { - return &v1.PhotonPersistentDiskVolumeSource{} - } - in := l[0].(map[string]interface{}) - obj := &v1.PhotonPersistentDiskVolumeSource{ - PdID: in["pd_id"].(string), - } - if v, ok := in["fs_type"].(string); ok { - obj.FSType = v - } - return obj -} - -func expandQuobyteVolumeSource(l []interface{}) *v1.QuobyteVolumeSource { - if len(l) == 0 || l[0] == nil { - return &v1.QuobyteVolumeSource{} - } - in := l[0].(map[string]interface{}) - obj := &v1.QuobyteVolumeSource{ - Registry: in["registry"].(string), - Volume: in["volume"].(string), - } - if v, ok := in["read_only"].(bool); ok { - obj.ReadOnly = v - } - if v, ok := in["user"].(string); ok { - obj.User = v - } - if v, ok := in["group"].(string); ok { - obj.Group = v - } - return obj -} - -func expandRBDVolumeSource(l []interface{}) *v1.RBDVolumeSource { - if len(l) == 0 || l[0] == nil { - return &v1.RBDVolumeSource{} - } - in := l[0].(map[string]interface{}) - obj := &v1.RBDVolumeSource{ - CephMonitors: expandStringSlice(in["ceph_monitors"].(*schema.Set).List()), - RBDImage: in["rbd_image"].(string), - } - if v, ok := in["fs_type"].(string); ok { - obj.FSType = v - } - if v, ok := in["rbd_pool"].(string); ok { - obj.RBDPool = v - } - if v, ok := in["rados_user"].(string); ok { - obj.RadosUser = v - } - if v, ok := in["keyring"].(string); ok { - obj.Keyring = v - } - if v, ok := in["secret_ref"].([]interface{}); ok && len(v) > 0 { - obj.SecretRef = expandLocalObjectReference(v) - } - if v, ok := in["read_only"].(bool); ok { - obj.ReadOnly = v - } - return obj -} - -func expandVsphereVirtualDiskVolumeSource(l []interface{}) *v1.VsphereVirtualDiskVolumeSource { - if len(l) == 0 || l[0] == nil { - return &v1.VsphereVirtualDiskVolumeSource{} - } - in := l[0].(map[string]interface{}) - obj := &v1.VsphereVirtualDiskVolumeSource{ - VolumePath: in["volume_path"].(string), - } - if v, ok := in["fs_type"].(string); ok { - obj.FSType = v - } - return obj -} - -func patchPersistentVolumeSpec(pathPrefix, prefix string, d *schema.ResourceData) (PatchOperations, error) { - ops := make([]PatchOperation, 0) - prefix += ".0." - - if d.HasChange(prefix + "capacity") { - v := d.Get(prefix + "capacity").(map[string]interface{}) - capacity, err := expandMapToResourceList(v) - if err != nil { - return ops, err - } - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/capacity", - Value: capacity, - }) - } - - if d.HasChange(prefix + "persistent_volume_source") { - ops = append(ops, patchPersistentVolumeSource( - pathPrefix, - prefix+"persistent_volume_source.0.", - d, - )...) - } - - if d.HasChange(prefix + "access_modes") { - v := d.Get(prefix + "access_modes").(*schema.Set) - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/accessModes", - Value: expandPersistentVolumeAccessModes(v.List()), - }) - } - if d.HasChange(prefix + "access_modes") { - v := d.Get(prefix + "persistent_volume_reclaim_policy").(string) - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/persistentVolumeReclaimPolicy", - Value: v1.PersistentVolumeReclaimPolicy(v), - }) - } - - return ops, nil -} - -func patchPersistentVolumeSource(pathPrefix, prefix string, d *schema.ResourceData) []PatchOperation { - ops := make([]PatchOperation, 0) - - if d.HasChange(prefix + "gce_persistent_disk") { - oldIn, newIn := d.GetChange(prefix + "gce_persistent_disk") - if v, ok := newIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/gcePersistentDisk", - Value: expandGCEPersistentDiskVolumeSource(v), - }) - } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &RemoveOperation{Path: pathPrefix + "/gcePersistentDisk"}) - } - } - - if d.HasChange(prefix + "aws_elastic_block_store") { - oldIn, newIn := d.GetChange(prefix + "aws_elastic_block_store") - if v, ok := newIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/awsElasticBlockStore", - Value: expandAWSElasticBlockStoreVolumeSource(v), - }) - } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &RemoveOperation{Path: pathPrefix + "/awsElasticBlockStore"}) - } - } - - if d.HasChange(prefix + "host_path") { - oldIn, newIn := d.GetChange(prefix + "host_path") - if v, ok := newIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/hostPath", - Value: expandHostPathVolumeSource(v), - }) - } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &RemoveOperation{Path: pathPrefix + "/hostPath"}) - } - } - - if d.HasChange(prefix + "glusterfs") { - oldIn, newIn := d.GetChange(prefix + "glusterfs") - if v, ok := newIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/glusterfs", - Value: expandGlusterfsVolumeSource(v), - }) - } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &RemoveOperation{Path: pathPrefix + "/glusterfs"}) - } - } - - if d.HasChange(prefix + "nfs") { - oldIn, newIn := d.GetChange(prefix + "nfs") - if v, ok := newIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/nfs", - Value: expandNFSVolumeSource(v), - }) - } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &RemoveOperation{Path: pathPrefix + "/nfs"}) - } - } - - if d.HasChange(prefix + "rbd") { - oldIn, newIn := d.GetChange(prefix + "rbd") - if v, ok := newIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/rbd", - Value: expandRBDVolumeSource(v), - }) - } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &RemoveOperation{Path: pathPrefix + "/rbd"}) - } - } - - if d.HasChange(prefix + "iscsi") { - oldIn, newIn := d.GetChange(prefix + "iscsi") - if v, ok := newIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/iscsi", - Value: expandISCSIVolumeSource(v), - }) - } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &RemoveOperation{Path: pathPrefix + "/iscsi"}) - } - } - - if d.HasChange(prefix + "cinder") { - oldIn, newIn := d.GetChange(prefix + "cinder") - if v, ok := newIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/cinder", - Value: expandCinderVolumeSource(v), - }) - } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &RemoveOperation{Path: pathPrefix + "/cinder"}) - } - } - - if d.HasChange(prefix + "ceph_fs") { - oldIn, newIn := d.GetChange(prefix + "ceph_fs") - if v, ok := newIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/cephfs", - Value: expandCephFSVolumeSource(v), - }) - } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &RemoveOperation{Path: pathPrefix + "/cephfs"}) - } - } - - if d.HasChange(prefix + "fc") { - oldIn, newIn := d.GetChange(prefix + "fc") - if v, ok := newIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/fc", - Value: expandFCVolumeSource(v), - }) - } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &RemoveOperation{Path: pathPrefix + "/fc"}) - } - } - - if d.HasChange(prefix + "flocker") { - oldIn, newIn := d.GetChange(prefix + "flocker") - if v, ok := newIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/flocker", - Value: expandFlockerVolumeSource(v), - }) - } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &RemoveOperation{Path: pathPrefix + "/flocker"}) - } - } - - if d.HasChange(prefix + "flex_volume") { - oldIn, newIn := d.GetChange(prefix + "flex_volume") - if v, ok := newIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/flexVolume", - Value: expandFlexVolumeSource(v), - }) - } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &RemoveOperation{Path: pathPrefix + "/flexVolume"}) - } - } - - if d.HasChange(prefix + "azure_file") { - oldIn, newIn := d.GetChange(prefix + "azure_file") - if v, ok := newIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/azureFile", - Value: expandAzureFileVolumeSource(v), - }) - } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &RemoveOperation{Path: pathPrefix + "/azureFile"}) - } - } - - if d.HasChange(prefix + "vsphere_volume") { - oldIn, newIn := d.GetChange(prefix + "vsphere_volume") - if v, ok := newIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/vsphereVolume", - Value: expandVsphereVirtualDiskVolumeSource(v), - }) - } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &RemoveOperation{Path: pathPrefix + "/vsphereVolume"}) - } - } - - if d.HasChange(prefix + "quobyte") { - oldIn, newIn := d.GetChange(prefix + "quobyte") - if v, ok := newIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/quobyte", - Value: expandQuobyteVolumeSource(v), - }) - } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &RemoveOperation{Path: pathPrefix + "/quobyte"}) - } - } - - if d.HasChange(prefix + "azure_disk") { - oldIn, newIn := d.GetChange(prefix + "azure_disk") - if v, ok := newIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/azureDisk", - Value: expandAzureDiskVolumeSource(v), - }) - } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &RemoveOperation{Path: pathPrefix + "/azureDisk"}) - } - } - - if d.HasChange(prefix + "photon_persistent_disk") { - oldIn, newIn := d.GetChange(prefix + "photon_persistent_disk") - if v, ok := newIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/photonPersistentDisk", - Value: expandPhotonPersistentDiskVolumeSource(v), - }) - } else if v, ok := oldIn.([]interface{}); ok && len(v) > 0 { - ops = append(ops, &RemoveOperation{Path: pathPrefix + "/photonPersistentDisk"}) - } - } - - return ops -} diff --git a/builtin/providers/kubernetes/structure_service_spec.go b/builtin/providers/kubernetes/structure_service_spec.go deleted file mode 100644 index 9fe4f3986..000000000 --- a/builtin/providers/kubernetes/structure_service_spec.go +++ /dev/null @@ -1,188 +0,0 @@ -package kubernetes - -import ( - "github.com/hashicorp/terraform/helper/schema" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/kubernetes/pkg/api/v1" -) - -// Flatteners - -func flattenIntOrString(in intstr.IntOrString) int { - return in.IntValue() -} - -func flattenServicePort(in []v1.ServicePort) []interface{} { - att := make([]interface{}, len(in), len(in)) - for i, n := range in { - m := make(map[string]interface{}) - m["name"] = n.Name - m["protocol"] = string(n.Protocol) - m["port"] = int(n.Port) - m["target_port"] = flattenIntOrString(n.TargetPort) - m["node_port"] = int(n.NodePort) - - att[i] = m - } - return att -} - -func flattenServiceSpec(in v1.ServiceSpec) []interface{} { - att := make(map[string]interface{}) - if len(in.Ports) > 0 { - att["port"] = flattenServicePort(in.Ports) - } - if len(in.Selector) > 0 { - att["selector"] = in.Selector - } - if in.ClusterIP != "" { - att["cluster_ip"] = in.ClusterIP - } - if in.Type != "" { - att["type"] = string(in.Type) - } - if len(in.ExternalIPs) > 0 { - att["external_ips"] = newStringSet(schema.HashString, in.ExternalIPs) - } - if in.SessionAffinity != "" { - att["session_affinity"] = string(in.SessionAffinity) - } - if in.LoadBalancerIP != "" { - att["load_balancer_ip"] = in.LoadBalancerIP - } - if len(in.LoadBalancerSourceRanges) > 0 { - att["load_balancer_source_ranges"] = newStringSet(schema.HashString, in.LoadBalancerSourceRanges) - } - if in.ExternalName != "" { - att["external_name"] = in.ExternalName - } - return []interface{}{att} -} - -// Expanders - -func expandIntOrString(in int) intstr.IntOrString { - return intstr.FromInt(in) -} - -func expandServicePort(l []interface{}) []v1.ServicePort { - if len(l) == 0 || l[0] == nil { - return []v1.ServicePort{} - } - obj := make([]v1.ServicePort, len(l), len(l)) - for i, n := range l { - cfg := n.(map[string]interface{}) - obj[i] = v1.ServicePort{ - Port: int32(cfg["port"].(int)), - TargetPort: expandIntOrString(cfg["target_port"].(int)), - } - if v, ok := cfg["name"].(string); ok { - obj[i].Name = v - } - if v, ok := cfg["protocol"].(string); ok { - obj[i].Protocol = v1.Protocol(v) - } - if v, ok := cfg["node_port"].(int); ok { - obj[i].NodePort = int32(v) - } - } - return obj -} - -func expandServiceSpec(l []interface{}) v1.ServiceSpec { - if len(l) == 0 || l[0] == nil { - return v1.ServiceSpec{} - } - in := l[0].(map[string]interface{}) - obj := v1.ServiceSpec{} - - if v, ok := in["port"].([]interface{}); ok && len(v) > 0 { - obj.Ports = expandServicePort(v) - } - if v, ok := in["selector"].(map[string]interface{}); ok && len(v) > 0 { - obj.Selector = expandStringMap(v) - } - if v, ok := in["cluster_ip"].(string); ok { - obj.ClusterIP = v - } - if v, ok := in["type"].(string); ok { - obj.Type = v1.ServiceType(v) - } - if v, ok := in["external_ips"].(*schema.Set); ok && v.Len() > 0 { - obj.ExternalIPs = sliceOfString(v.List()) - } - if v, ok := in["session_affinity"].(string); ok { - obj.SessionAffinity = v1.ServiceAffinity(v) - } - if v, ok := in["load_balancer_ip"].(string); ok { - obj.LoadBalancerIP = v - } - if v, ok := in["load_balancer_source_ranges"].(*schema.Set); ok && v.Len() > 0 { - obj.LoadBalancerSourceRanges = sliceOfString(v.List()) - } - if v, ok := in["external_name"].(string); ok { - obj.ExternalName = v - } - return obj -} - -// Patch Ops - -func patchServiceSpec(keyPrefix, pathPrefix string, d *schema.ResourceData) PatchOperations { - ops := make([]PatchOperation, 0, 0) - if d.HasChange(keyPrefix + "selector") { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "selector", - Value: d.Get(keyPrefix + "selector").(map[string]interface{}), - }) - } - if d.HasChange(keyPrefix + "type") { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "type", - Value: d.Get(keyPrefix + "type").(string), - }) - } - if d.HasChange(keyPrefix + "session_affinity") { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "sessionAffinity", - Value: d.Get(keyPrefix + "session_affinity").(string), - }) - } - if d.HasChange(keyPrefix + "load_balancer_ip") { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "loadBalancerIP", - Value: d.Get(keyPrefix + "load_balancer_ip").(string), - }) - } - if d.HasChange(keyPrefix + "load_balancer_source_ranges") { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "loadBalancerSourceRanges", - Value: d.Get(keyPrefix + "load_balancer_source_ranges").(*schema.Set).List(), - }) - } - if d.HasChange(keyPrefix + "port") { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "ports", - Value: expandServicePort(d.Get(keyPrefix + "port").([]interface{})), - }) - } - if d.HasChange(keyPrefix + "external_ips") { - // If we haven't done this the deprecated field would have priority - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "deprecatedPublicIPs", - Value: nil, - }) - - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "externalIPs", - Value: d.Get(keyPrefix + "external_ips").(*schema.Set).List(), - }) - } - if d.HasChange(keyPrefix + "external_name") { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "externalName", - Value: d.Get(keyPrefix + "external_name").(string), - }) - } - return ops -} diff --git a/builtin/providers/kubernetes/structures.go b/builtin/providers/kubernetes/structures.go deleted file mode 100644 index 8c1855176..000000000 --- a/builtin/providers/kubernetes/structures.go +++ /dev/null @@ -1,442 +0,0 @@ -package kubernetes - -import ( - "encoding/base64" - "fmt" - "net/url" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "k8s.io/kubernetes/pkg/api/v1" -) - -func idParts(id string) (string, string) { - parts := strings.Split(id, "/") - return parts[0], parts[1] -} - -func buildId(meta metav1.ObjectMeta) string { - return meta.Namespace + "/" + meta.Name -} - -func expandMetadata(in []interface{}) metav1.ObjectMeta { - meta := metav1.ObjectMeta{} - if len(in) < 1 { - return meta - } - m := in[0].(map[string]interface{}) - - meta.Annotations = expandStringMap(m["annotations"].(map[string]interface{})) - meta.Labels = expandStringMap(m["labels"].(map[string]interface{})) - - if v, ok := m["generate_name"]; ok { - meta.GenerateName = v.(string) - } - if v, ok := m["name"]; ok { - meta.Name = v.(string) - } - if v, ok := m["namespace"]; ok { - meta.Namespace = v.(string) - } - - return meta -} - -func patchMetadata(keyPrefix, pathPrefix string, d *schema.ResourceData) PatchOperations { - ops := make([]PatchOperation, 0, 0) - if d.HasChange(keyPrefix + "annotations") { - oldV, newV := d.GetChange(keyPrefix + "annotations") - diffOps := diffStringMap(pathPrefix+"annotations", oldV.(map[string]interface{}), newV.(map[string]interface{})) - ops = append(ops, diffOps...) - } - if d.HasChange(keyPrefix + "labels") { - oldV, newV := d.GetChange(keyPrefix + "labels") - diffOps := diffStringMap(pathPrefix+"labels", oldV.(map[string]interface{}), newV.(map[string]interface{})) - ops = append(ops, diffOps...) - } - return ops -} - -func expandStringMap(m map[string]interface{}) map[string]string { - result := make(map[string]string) - for k, v := range m { - result[k] = v.(string) - } - return result -} - -func expandStringSlice(s []interface{}) []string { - result := make([]string, len(s), len(s)) - for k, v := range s { - result[k] = v.(string) - } - return result -} - -func flattenMetadata(meta metav1.ObjectMeta) []map[string]interface{} { - m := make(map[string]interface{}) - m["annotations"] = removeInternalKeys(meta.Annotations) - if meta.GenerateName != "" { - m["generate_name"] = meta.GenerateName - } - m["labels"] = removeInternalKeys(meta.Labels) - m["name"] = meta.Name - m["resource_version"] = meta.ResourceVersion - m["self_link"] = meta.SelfLink - m["uid"] = fmt.Sprintf("%v", meta.UID) - m["generation"] = meta.Generation - - if meta.Namespace != "" { - m["namespace"] = meta.Namespace - } - - return []map[string]interface{}{m} -} - -func removeInternalKeys(m map[string]string) map[string]string { - for k, _ := range m { - if isInternalKey(k) { - delete(m, k) - } - } - return m -} - -func isInternalKey(annotationKey string) bool { - u, err := url.Parse("//" + annotationKey) - if err == nil && strings.HasSuffix(u.Hostname(), "kubernetes.io") { - return true - } - - return false -} - -func byteMapToStringMap(m map[string][]byte) map[string]string { - result := make(map[string]string) - for k, v := range m { - result[k] = string(v) - } - return result -} - -func ptrToString(s string) *string { - return &s -} - -func ptrToInt(i int) *int { - return &i -} - -func ptrToBool(b bool) *bool { - return &b -} - -func ptrToInt32(i int32) *int32 { - return &i -} - -func ptrToInt64(i int64) *int64 { - return &i -} - -func sliceOfString(slice []interface{}) []string { - result := make([]string, len(slice), len(slice)) - for i, s := range slice { - result[i] = s.(string) - } - return result -} - -func base64EncodeStringMap(m map[string]interface{}) map[string]interface{} { - result := make(map[string]interface{}) - for k, v := range m { - value := v.(string) - result[k] = (base64.StdEncoding.EncodeToString([]byte(value))) - } - return result -} - -func flattenResourceList(l api.ResourceList) map[string]string { - m := make(map[string]string) - for k, v := range l { - m[string(k)] = v.String() - } - return m -} - -func expandMapToResourceList(m map[string]interface{}) (api.ResourceList, error) { - out := make(map[api.ResourceName]resource.Quantity) - for stringKey, origValue := range m { - key := api.ResourceName(stringKey) - var value resource.Quantity - - if v, ok := origValue.(int); ok { - q := resource.NewQuantity(int64(v), resource.DecimalExponent) - value = *q - } else if v, ok := origValue.(string); ok { - var err error - value, err = resource.ParseQuantity(v) - if err != nil { - return out, err - } - } else { - return out, fmt.Errorf("Unexpected value type: %#v", origValue) - } - - out[key] = value - } - return out, nil -} - -func flattenPersistentVolumeAccessModes(in []api.PersistentVolumeAccessMode) *schema.Set { - var out = make([]interface{}, len(in), len(in)) - for i, v := range in { - out[i] = string(v) - } - return schema.NewSet(schema.HashString, out) -} - -func expandPersistentVolumeAccessModes(s []interface{}) []api.PersistentVolumeAccessMode { - out := make([]api.PersistentVolumeAccessMode, len(s), len(s)) - for i, v := range s { - out[i] = api.PersistentVolumeAccessMode(v.(string)) - } - return out -} - -func flattenResourceQuotaSpec(in api.ResourceQuotaSpec) []interface{} { - out := make([]interface{}, 1) - - m := make(map[string]interface{}, 0) - m["hard"] = flattenResourceList(in.Hard) - m["scopes"] = flattenResourceQuotaScopes(in.Scopes) - - out[0] = m - return out -} - -func expandResourceQuotaSpec(s []interface{}) (api.ResourceQuotaSpec, error) { - out := api.ResourceQuotaSpec{} - if len(s) < 1 { - return out, nil - } - m := s[0].(map[string]interface{}) - - if v, ok := m["hard"]; ok { - list, err := expandMapToResourceList(v.(map[string]interface{})) - if err != nil { - return out, err - } - out.Hard = list - } - - if v, ok := m["scopes"]; ok { - out.Scopes = expandResourceQuotaScopes(v.(*schema.Set).List()) - } - - return out, nil -} - -func flattenResourceQuotaScopes(in []api.ResourceQuotaScope) *schema.Set { - out := make([]string, len(in), len(in)) - for i, scope := range in { - out[i] = string(scope) - } - return newStringSet(schema.HashString, out) -} - -func expandResourceQuotaScopes(s []interface{}) []api.ResourceQuotaScope { - out := make([]api.ResourceQuotaScope, len(s), len(s)) - for i, scope := range s { - out[i] = api.ResourceQuotaScope(scope.(string)) - } - return out -} - -func newStringSet(f schema.SchemaSetFunc, in []string) *schema.Set { - var out = make([]interface{}, len(in), len(in)) - for i, v := range in { - out[i] = v - } - return schema.NewSet(f, out) -} -func newInt64Set(f schema.SchemaSetFunc, in []int64) *schema.Set { - var out = make([]interface{}, len(in), len(in)) - for i, v := range in { - out[i] = int(v) - } - return schema.NewSet(f, out) -} - -func resourceListEquals(x, y api.ResourceList) bool { - for k, v := range x { - yValue, ok := y[k] - if !ok { - return false - } - if v.Cmp(yValue) != 0 { - return false - } - } - for k, v := range y { - xValue, ok := x[k] - if !ok { - return false - } - if v.Cmp(xValue) != 0 { - return false - } - } - return true -} - -func expandLimitRangeSpec(s []interface{}, isNew bool) (api.LimitRangeSpec, error) { - out := api.LimitRangeSpec{} - if len(s) < 1 || s[0] == nil { - return out, nil - } - m := s[0].(map[string]interface{}) - - if limits, ok := m["limit"].([]interface{}); ok { - newLimits := make([]api.LimitRangeItem, len(limits), len(limits)) - - for i, l := range limits { - lrItem := api.LimitRangeItem{} - limit := l.(map[string]interface{}) - - if v, ok := limit["type"]; ok { - lrItem.Type = api.LimitType(v.(string)) - } - - // defaultRequest is forbidden for Pod limits, even though it's set & returned by API - // this is how we avoid sending it back - if v, ok := limit["default_request"]; ok { - drm := v.(map[string]interface{}) - if lrItem.Type == api.LimitTypePod && len(drm) > 0 { - if isNew { - return out, fmt.Errorf("limit.%d.default_request cannot be set for Pod limit", i) - } - } else { - el, err := expandMapToResourceList(drm) - if err != nil { - return out, err - } - lrItem.DefaultRequest = el - } - } - - if v, ok := limit["default"]; ok { - el, err := expandMapToResourceList(v.(map[string]interface{})) - if err != nil { - return out, err - } - lrItem.Default = el - } - if v, ok := limit["max"]; ok { - el, err := expandMapToResourceList(v.(map[string]interface{})) - if err != nil { - return out, err - } - lrItem.Max = el - } - if v, ok := limit["max_limit_request_ratio"]; ok { - el, err := expandMapToResourceList(v.(map[string]interface{})) - if err != nil { - return out, err - } - lrItem.MaxLimitRequestRatio = el - } - if v, ok := limit["min"]; ok { - el, err := expandMapToResourceList(v.(map[string]interface{})) - if err != nil { - return out, err - } - lrItem.Min = el - } - - newLimits[i] = lrItem - } - - out.Limits = newLimits - } - - return out, nil -} - -func flattenLimitRangeSpec(in api.LimitRangeSpec) []interface{} { - out := make([]interface{}, 1) - limits := make([]interface{}, len(in.Limits), len(in.Limits)) - - for i, l := range in.Limits { - m := make(map[string]interface{}, 0) - m["default"] = flattenResourceList(l.Default) - m["default_request"] = flattenResourceList(l.DefaultRequest) - m["max"] = flattenResourceList(l.Max) - m["max_limit_request_ratio"] = flattenResourceList(l.MaxLimitRequestRatio) - m["min"] = flattenResourceList(l.Min) - m["type"] = string(l.Type) - - limits[i] = m - } - out[0] = map[string]interface{}{ - "limit": limits, - } - return out -} - -func schemaSetToStringArray(set *schema.Set) []string { - array := make([]string, 0, set.Len()) - for _, elem := range set.List() { - e := elem.(string) - array = append(array, e) - } - return array -} - -func schemaSetToInt64Array(set *schema.Set) []int64 { - array := make([]int64, 0, set.Len()) - for _, elem := range set.List() { - e := elem.(int) - array = append(array, int64(e)) - } - return array -} -func flattenLabelSelectorRequirementList(l []metav1.LabelSelectorRequirement) []interface{} { - att := make([]map[string]interface{}, len(l)) - for i, v := range l { - m := map[string]interface{}{} - m["key"] = v.Key - m["values"] = newStringSet(schema.HashString, v.Values) - m["operator"] = string(v.Operator) - att[i] = m - } - return []interface{}{att} -} - -func flattenLocalObjectReferenceArray(in []api.LocalObjectReference) []interface{} { - att := make([]interface{}, len(in)) - for i, v := range in { - m := map[string]interface{}{} - if v.Name != "" { - m["name"] = v.Name - } - att[i] = m - } - return att -} -func expandLocalObjectReferenceArray(in []interface{}) []api.LocalObjectReference { - att := []api.LocalObjectReference{} - if len(in) < 1 { - return att - } - att = make([]api.LocalObjectReference, len(in)) - for i, c := range in { - p := c.(map[string]interface{}) - if name, ok := p["name"]; ok { - att[i].Name = name.(string) - } - } - return att -} diff --git a/builtin/providers/kubernetes/structures_container.go b/builtin/providers/kubernetes/structures_container.go deleted file mode 100644 index b01ba3683..000000000 --- a/builtin/providers/kubernetes/structures_container.go +++ /dev/null @@ -1,846 +0,0 @@ -package kubernetes - -import ( - "strconv" - - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/kubernetes/pkg/api/v1" -) - -func flattenCapability(in []v1.Capability) []string { - att := make([]string, 0, len(in)) - for i, v := range in { - att[i] = string(v) - } - return att -} - -func flattenContainerSecurityContext(in *v1.SecurityContext) []interface{} { - att := make(map[string]interface{}) - - if in.Privileged != nil { - att["privileged"] = *in.Privileged - } - if in.ReadOnlyRootFilesystem != nil { - att["read_only_root_filesystem"] = *in.ReadOnlyRootFilesystem - } - - if in.RunAsNonRoot != nil { - att["run_as_non_root"] = *in.RunAsNonRoot - } - if in.RunAsUser != nil { - att["run_as_user"] = *in.RunAsUser - } - - if in.SELinuxOptions != nil { - att["se_linux_options"] = flattenSeLinuxOptions(in.SELinuxOptions) - } - if in.Capabilities != nil { - att["capabilities"] = flattenSecurityCapabilities(in.Capabilities) - } - return []interface{}{att} - -} - -func flattenSecurityCapabilities(in *v1.Capabilities) []interface{} { - att := make(map[string]interface{}) - - if in.Add != nil { - att["add"] = flattenCapability(in.Add) - } - if in.Drop != nil { - att["drop"] = flattenCapability(in.Drop) - } - - return []interface{}{att} -} - -func flattenHandler(in *v1.Handler) []interface{} { - att := make(map[string]interface{}) - - if in.Exec != nil { - att["exec"] = flattenExec(in.Exec) - } - if in.HTTPGet != nil { - att["http_get"] = flattenHTTPGet(in.HTTPGet) - } - if in.TCPSocket != nil { - att["tcp_socket"] = flattenTCPSocket(in.TCPSocket) - } - - return []interface{}{att} -} - -func flattenHTTPHeader(in []v1.HTTPHeader) []interface{} { - att := make([]interface{}, len(in)) - for i, v := range in { - m := map[string]interface{}{} - - if v.Name != "" { - m["name"] = v.Name - } - - if v.Value != "" { - m["value"] = v.Value - } - att[i] = m - } - return att -} - -func expandPort(v string) intstr.IntOrString { - i, err := strconv.Atoi(v) - if err != nil { - return intstr.IntOrString{ - Type: intstr.String, - StrVal: v, - } - } - return intstr.IntOrString{ - Type: intstr.Int, - IntVal: int32(i), - } -} - -func flattenHTTPGet(in *v1.HTTPGetAction) []interface{} { - att := make(map[string]interface{}) - - if in.Host != "" { - att["host"] = in.Host - } - if in.Path != "" { - att["path"] = in.Path - } - att["port"] = in.Port.String() - att["scheme"] = in.Scheme - if len(in.HTTPHeaders) > 0 { - att["http_header"] = flattenHTTPHeader(in.HTTPHeaders) - } - - return []interface{}{att} -} - -func flattenTCPSocket(in *v1.TCPSocketAction) []interface{} { - att := make(map[string]interface{}) - att["port"] = in.Port.String() - return []interface{}{att} -} - -func flattenExec(in *v1.ExecAction) []interface{} { - att := make(map[string]interface{}) - if len(in.Command) > 0 { - att["command"] = in.Command - } - return []interface{}{att} -} - -func flattenLifeCycle(in *v1.Lifecycle) []interface{} { - att := make(map[string]interface{}) - - if in.PostStart != nil { - att["post_start"] = flattenHandler(in.PostStart) - } - if in.PreStop != nil { - att["pre_stop"] = flattenHandler(in.PreStop) - } - - return []interface{}{att} -} - -func flattenProbe(in *v1.Probe) []interface{} { - att := make(map[string]interface{}) - - att["failure_threshold"] = in.FailureThreshold - att["initial_delay_seconds"] = in.InitialDelaySeconds - att["period_seconds"] = in.PeriodSeconds - att["success_threshold"] = in.SuccessThreshold - att["timeout_seconds"] = in.TimeoutSeconds - - if in.Exec != nil { - att["exec"] = flattenExec(in.Exec) - } - if in.HTTPGet != nil { - att["http_get"] = flattenHTTPGet(in.HTTPGet) - } - if in.TCPSocket != nil { - att["tcp_socket"] = flattenTCPSocket(in.TCPSocket) - } - - return []interface{}{att} -} - -func flattenConfigMapKeyRef(in *v1.ConfigMapKeySelector) []interface{} { - att := make(map[string]interface{}) - - if in.Key != "" { - att["key"] = in.Key - } - if in.Name != "" { - att["name"] = in.Name - } - return []interface{}{att} -} - -func flattenObjectFieldSelector(in *v1.ObjectFieldSelector) []interface{} { - att := make(map[string]interface{}) - - if in.APIVersion != "" { - att["api_version"] = in.APIVersion - } - if in.FieldPath != "" { - att["field_path"] = in.FieldPath - } - return []interface{}{att} -} - -func flattenResourceFieldSelector(in *v1.ResourceFieldSelector) []interface{} { - att := make(map[string]interface{}) - - if in.ContainerName != "" { - att["container_name"] = in.ContainerName - } - if in.Resource != "" { - att["resource"] = in.Resource - } - return []interface{}{att} -} - -func flattenSecretKeyRef(in *v1.SecretKeySelector) []interface{} { - att := make(map[string]interface{}) - - if in.Key != "" { - att["key"] = in.Key - } - if in.Name != "" { - att["name"] = in.Name - } - return []interface{}{att} -} - -func flattenValueFrom(in *v1.EnvVarSource) []interface{} { - att := make(map[string]interface{}) - - if in.ConfigMapKeyRef != nil { - att["config_map_key_ref"] = flattenConfigMapKeyRef(in.ConfigMapKeyRef) - } - if in.ResourceFieldRef != nil { - att["resource_field_ref"] = flattenResourceFieldSelector(in.ResourceFieldRef) - } - if in.SecretKeyRef != nil { - att["secret_key_ref"] = flattenSecretKeyRef(in.SecretKeyRef) - } - if in.FieldRef != nil { - att["field_ref"] = flattenObjectFieldSelector(in.FieldRef) - } - return []interface{}{att} -} - -func flattenContainerVolumeMounts(in []v1.VolumeMount) ([]interface{}, error) { - att := make([]interface{}, len(in)) - for i, v := range in { - m := map[string]interface{}{} - m["read_only"] = v.ReadOnly - - if v.MountPath != "" { - m["mount_path"] = v.MountPath - - } - if v.Name != "" { - m["name"] = v.Name - - } - if v.SubPath != "" { - m["sub_path"] = v.SubPath - } - att[i] = m - } - return att, nil -} - -func flattenContainerEnvs(in []v1.EnvVar) []interface{} { - att := make([]interface{}, len(in)) - for i, v := range in { - m := map[string]interface{}{} - if v.Name != "" { - m["name"] = v.Name - } - if v.Value != "" { - m["value"] = v.Value - } - if v.ValueFrom != nil { - m["value_from"] = flattenValueFrom(v.ValueFrom) - } - - att[i] = m - } - return att -} - -func flattenContainerPorts(in []v1.ContainerPort) []interface{} { - att := make([]interface{}, len(in)) - for i, v := range in { - m := map[string]interface{}{} - m["container_port"] = v.ContainerPort - if v.HostIP != "" { - m["host_ip"] = v.HostIP - } - m["host_port"] = v.HostPort - if v.Name != "" { - m["name"] = v.Name - } - if v.Protocol != "" { - m["protocol"] = v.Protocol - } - att[i] = m - } - return att -} - -func flattenContainerResourceRequirements(in v1.ResourceRequirements) ([]interface{}, error) { - att := make(map[string]interface{}) - if len(in.Limits) > 0 { - att["limits"] = []interface{}{flattenResourceList(in.Limits)} - } - if len(in.Requests) > 0 { - att["requests"] = []interface{}{flattenResourceList(in.Requests)} - } - return []interface{}{att}, nil -} - -func flattenContainers(in []v1.Container) ([]interface{}, error) { - att := make([]interface{}, len(in)) - for i, v := range in { - c := make(map[string]interface{}) - c["image"] = v.Image - c["name"] = v.Name - if len(v.Command) > 0 { - c["command"] = v.Command - } - if len(v.Args) > 0 { - c["args"] = v.Args - } - - c["image_pull_policy"] = v.ImagePullPolicy - c["termination_message_path"] = v.TerminationMessagePath - c["stdin"] = v.Stdin - c["stdin_once"] = v.StdinOnce - c["tty"] = v.TTY - c["working_dir"] = v.WorkingDir - res, err := flattenContainerResourceRequirements(v.Resources) - if err != nil { - return nil, err - } - - c["resources"] = res - if v.LivenessProbe != nil { - c["liveness_probe"] = flattenProbe(v.LivenessProbe) - } - if v.ReadinessProbe != nil { - c["readiness_probe"] = flattenProbe(v.ReadinessProbe) - } - if v.Lifecycle != nil { - c["lifecycle"] = flattenLifeCycle(v.Lifecycle) - } - - if v.SecurityContext != nil { - c["security_context"] = flattenContainerSecurityContext(v.SecurityContext) - } - if len(v.Ports) > 0 { - c["port"] = flattenContainerPorts(v.Ports) - } - if len(v.Env) > 0 { - c["env"] = flattenContainerEnvs(v.Env) - } - - if len(v.VolumeMounts) > 0 { - volumeMounts, err := flattenContainerVolumeMounts(v.VolumeMounts) - if err != nil { - return nil, err - } - c["volume_mount"] = volumeMounts - } - att[i] = c - } - return att, nil -} - -func expandContainers(ctrs []interface{}) ([]v1.Container, error) { - if len(ctrs) == 0 { - return []v1.Container{}, nil - } - cs := make([]v1.Container, len(ctrs)) - for i, c := range ctrs { - ctr := c.(map[string]interface{}) - - if image, ok := ctr["image"]; ok { - cs[i].Image = image.(string) - } - if name, ok := ctr["name"]; ok { - cs[i].Name = name.(string) - } - if command, ok := ctr["command"].([]interface{}); ok { - cs[i].Command = expandStringSlice(command) - } - if args, ok := ctr["args"].([]interface{}); ok { - cs[i].Args = expandStringSlice(args) - } - - if v, ok := ctr["resources"].([]interface{}); ok && len(v) > 0 { - - var err error - cs[i].Resources, err = expandContainerResourceRequirements(v) - if err != nil { - return cs, err - } - } - - if v, ok := ctr["port"].([]interface{}); ok && len(v) > 0 { - var err error - cs[i].Ports, err = expandContainerPort(v) - if err != nil { - return cs, err - } - } - if v, ok := ctr["env"].([]interface{}); ok && len(v) > 0 { - var err error - cs[i].Env, err = expandContainerEnv(v) - if err != nil { - return cs, err - } - } - - if policy, ok := ctr["image_pull_policy"]; ok { - cs[i].ImagePullPolicy = v1.PullPolicy(policy.(string)) - } - - if v, ok := ctr["lifecycle"].([]interface{}); ok && len(v) > 0 { - cs[i].Lifecycle = expandLifeCycle(v) - } - - if v, ok := ctr["liveness_probe"].([]interface{}); ok && len(v) > 0 { - cs[i].LivenessProbe = expandProbe(v) - } - - if v, ok := ctr["readiness_probe"].([]interface{}); ok && len(v) > 0 { - cs[i].ReadinessProbe = expandProbe(v) - } - if v, ok := ctr["stdin"]; ok { - cs[i].Stdin = v.(bool) - } - if v, ok := ctr["stdin_once"]; ok { - cs[i].StdinOnce = v.(bool) - } - if v, ok := ctr["termination_message_path"]; ok { - cs[i].TerminationMessagePath = v.(string) - } - if v, ok := ctr["tty"]; ok { - cs[i].TTY = v.(bool) - } - if v, ok := ctr["security_context"].([]interface{}); ok && len(v) > 0 { - cs[i].SecurityContext = expandContainerSecurityContext(v) - } - - if v, ok := ctr["volume_mount"].([]interface{}); ok && len(v) > 0 { - var err error - cs[i].VolumeMounts, err = expandContainerVolumeMounts(v) - if err != nil { - return cs, err - } - } - } - return cs, nil -} - -func expandExec(l []interface{}) *v1.ExecAction { - if len(l) == 0 || l[0] == nil { - return &v1.ExecAction{} - } - in := l[0].(map[string]interface{}) - obj := v1.ExecAction{} - if v, ok := in["command"].([]interface{}); ok && len(v) > 0 { - obj.Command = expandStringSlice(v) - } - return &obj -} - -func expandHTTPHeaders(l []interface{}) []v1.HTTPHeader { - if len(l) == 0 { - return []v1.HTTPHeader{} - } - headers := make([]v1.HTTPHeader, len(l)) - for i, c := range l { - m := c.(map[string]interface{}) - if v, ok := m["name"]; ok { - headers[i].Name = v.(string) - } - if v, ok := m["value"]; ok { - headers[i].Value = v.(string) - } - } - return headers -} -func expandContainerSecurityContext(l []interface{}) *v1.SecurityContext { - if len(l) == 0 || l[0] == nil { - return &v1.SecurityContext{} - } - in := l[0].(map[string]interface{}) - obj := v1.SecurityContext{} - if v, ok := in["privileged"]; ok { - obj.Privileged = ptrToBool(v.(bool)) - } - if v, ok := in["read_only_root_filesystem"]; ok { - obj.ReadOnlyRootFilesystem = ptrToBool(v.(bool)) - } - if v, ok := in["run_as_non_root"]; ok { - obj.RunAsNonRoot = ptrToBool(v.(bool)) - } - if v, ok := in["run_as_user"]; ok { - obj.RunAsUser = ptrToInt64(int64(v.(int))) - } - if v, ok := in["se_linux_options"].([]interface{}); ok && len(v) > 0 { - obj.SELinuxOptions = expandSeLinuxOptions(v) - } - if v, ok := in["capabilities"].([]interface{}); ok && len(v) > 0 { - obj.Capabilities = expandSecurityCapabilities(v) - } - - return &obj -} - -func expandCapabilitySlice(s []interface{}) []v1.Capability { - result := make([]v1.Capability, len(s), len(s)) - for k, v := range s { - result[k] = v.(v1.Capability) - } - return result -} - -func expandSecurityCapabilities(l []interface{}) *v1.Capabilities { - if len(l) == 0 || l[0] == nil { - return &v1.Capabilities{} - } - in := l[0].(map[string]interface{}) - obj := v1.Capabilities{} - if v, ok := in["add"].([]interface{}); ok { - obj.Add = expandCapabilitySlice(v) - } - if v, ok := in["drop"].([]interface{}); ok { - obj.Drop = expandCapabilitySlice(v) - } - return &obj -} - -func expandTCPSocket(l []interface{}) *v1.TCPSocketAction { - if len(l) == 0 || l[0] == nil { - return &v1.TCPSocketAction{} - } - in := l[0].(map[string]interface{}) - obj := v1.TCPSocketAction{} - if v, ok := in["port"].(string); ok && len(v) > 0 { - obj.Port = expandPort(v) - } - return &obj -} - -func expandHTTPGet(l []interface{}) *v1.HTTPGetAction { - if len(l) == 0 || l[0] == nil { - return &v1.HTTPGetAction{} - } - in := l[0].(map[string]interface{}) - obj := v1.HTTPGetAction{} - if v, ok := in["host"].(string); ok && len(v) > 0 { - obj.Host = v - } - if v, ok := in["path"].(string); ok && len(v) > 0 { - obj.Path = v - } - if v, ok := in["scheme"].(string); ok && len(v) > 0 { - obj.Scheme = v1.URIScheme(v) - } - - if v, ok := in["port"].(string); ok && len(v) > 0 { - obj.Port = expandPort(v) - } - - if v, ok := in["http_header"].([]interface{}); ok && len(v) > 0 { - obj.HTTPHeaders = expandHTTPHeaders(v) - } - return &obj -} - -func expandProbe(l []interface{}) *v1.Probe { - if len(l) == 0 || l[0] == nil { - return &v1.Probe{} - } - in := l[0].(map[string]interface{}) - obj := v1.Probe{} - if v, ok := in["exec"].([]interface{}); ok && len(v) > 0 { - obj.Exec = expandExec(v) - } - if v, ok := in["http_get"].([]interface{}); ok && len(v) > 0 { - obj.HTTPGet = expandHTTPGet(v) - } - if v, ok := in["tcp_socket"].([]interface{}); ok && len(v) > 0 { - obj.TCPSocket = expandTCPSocket(v) - } - if v, ok := in["failure_threshold"].(int); ok { - obj.FailureThreshold = int32(v) - } - if v, ok := in["initial_delay_seconds"].(int); ok { - obj.InitialDelaySeconds = int32(v) - } - if v, ok := in["period_seconds"].(int); ok { - obj.PeriodSeconds = int32(v) - } - if v, ok := in["success_threshold"].(int); ok { - obj.SuccessThreshold = int32(v) - } - if v, ok := in["timeout_seconds"].(int); ok { - obj.TimeoutSeconds = int32(v) - } - - return &obj -} - -func expandHandlers(l []interface{}) *v1.Handler { - if len(l) == 0 || l[0] == nil { - return &v1.Handler{} - } - in := l[0].(map[string]interface{}) - obj := v1.Handler{} - if v, ok := in["exec"].([]interface{}); ok && len(v) > 0 { - obj.Exec = expandExec(v) - } - if v, ok := in["http_get"].([]interface{}); ok && len(v) > 0 { - obj.HTTPGet = expandHTTPGet(v) - } - if v, ok := in["tcp_socket"].([]interface{}); ok && len(v) > 0 { - obj.TCPSocket = expandTCPSocket(v) - } - return &obj - -} -func expandLifeCycle(l []interface{}) *v1.Lifecycle { - if len(l) == 0 || l[0] == nil { - return &v1.Lifecycle{} - } - in := l[0].(map[string]interface{}) - obj := &v1.Lifecycle{} - if v, ok := in["post_start"].([]interface{}); ok && len(v) > 0 { - obj.PostStart = expandHandlers(v) - } - if v, ok := in["pre_stop"].([]interface{}); ok && len(v) > 0 { - obj.PreStop = expandHandlers(v) - } - return obj -} - -func expandContainerVolumeMounts(in []interface{}) ([]v1.VolumeMount, error) { - if len(in) == 0 { - return []v1.VolumeMount{}, nil - } - vmp := make([]v1.VolumeMount, len(in)) - for i, c := range in { - p := c.(map[string]interface{}) - if mountPath, ok := p["mount_path"]; ok { - vmp[i].MountPath = mountPath.(string) - } - if name, ok := p["name"]; ok { - vmp[i].Name = name.(string) - } - if readOnly, ok := p["read_only"]; ok { - vmp[i].ReadOnly = readOnly.(bool) - } - if subPath, ok := p["sub_path"]; ok { - vmp[i].SubPath = subPath.(string) - } - } - return vmp, nil -} - -func expandContainerEnv(in []interface{}) ([]v1.EnvVar, error) { - if len(in) == 0 { - return []v1.EnvVar{}, nil - } - envs := make([]v1.EnvVar, len(in)) - for i, c := range in { - p := c.(map[string]interface{}) - if name, ok := p["name"]; ok { - envs[i].Name = name.(string) - } - if value, ok := p["value"]; ok { - envs[i].Value = value.(string) - } - if v, ok := p["value_from"].([]interface{}); ok && len(v) > 0 { - var err error - envs[i].ValueFrom, err = expandEnvValueFrom(v) - if err != nil { - return envs, err - } - } - } - return envs, nil -} - -func expandContainerPort(in []interface{}) ([]v1.ContainerPort, error) { - if len(in) == 0 { - return []v1.ContainerPort{}, nil - } - ports := make([]v1.ContainerPort, len(in)) - for i, c := range in { - p := c.(map[string]interface{}) - if containerPort, ok := p["container_port"]; ok { - ports[i].ContainerPort = int32(containerPort.(int)) - } - if hostIP, ok := p["host_ip"]; ok { - ports[i].HostIP = hostIP.(string) - } - if hostPort, ok := p["host_port"]; ok { - ports[i].HostPort = int32(hostPort.(int)) - } - if name, ok := p["name"]; ok { - ports[i].Name = name.(string) - } - if protocol, ok := p["protocol"]; ok { - ports[i].Protocol = v1.Protocol(protocol.(string)) - } - } - return ports, nil -} - -func expandConfigMapKeyRef(r []interface{}) (*v1.ConfigMapKeySelector, error) { - if len(r) == 0 || r[0] == nil { - return &v1.ConfigMapKeySelector{}, nil - } - in := r[0].(map[string]interface{}) - obj := &v1.ConfigMapKeySelector{} - - if v, ok := in["key"].(string); ok { - obj.Key = v - } - if v, ok := in["name"].(string); ok { - obj.Name = v - } - return obj, nil - -} -func expandFieldRef(r []interface{}) (*v1.ObjectFieldSelector, error) { - if len(r) == 0 || r[0] == nil { - return &v1.ObjectFieldSelector{}, nil - } - in := r[0].(map[string]interface{}) - obj := &v1.ObjectFieldSelector{} - - if v, ok := in["api_version"].(string); ok { - obj.APIVersion = v - } - if v, ok := in["field_path"].(string); ok { - obj.FieldPath = v - } - return obj, nil -} -func expandResourceFieldRef(r []interface{}) (*v1.ResourceFieldSelector, error) { - if len(r) == 0 || r[0] == nil { - return &v1.ResourceFieldSelector{}, nil - } - in := r[0].(map[string]interface{}) - obj := &v1.ResourceFieldSelector{} - - if v, ok := in["container_name"].(string); ok { - obj.ContainerName = v - } - if v, ok := in["resource"].(string); ok { - obj.Resource = v - } - return obj, nil -} -func expandSecretKeyRef(r []interface{}) (*v1.SecretKeySelector, error) { - if len(r) == 0 || r[0] == nil { - return &v1.SecretKeySelector{}, nil - } - in := r[0].(map[string]interface{}) - obj := &v1.SecretKeySelector{} - - if v, ok := in["key"].(string); ok { - obj.Key = v - } - if v, ok := in["name"].(string); ok { - obj.Name = v - } - return obj, nil -} - -func expandEnvValueFrom(r []interface{}) (*v1.EnvVarSource, error) { - if len(r) == 0 || r[0] == nil { - return &v1.EnvVarSource{}, nil - } - in := r[0].(map[string]interface{}) - obj := &v1.EnvVarSource{} - - var err error - if v, ok := in["config_map_key_ref"].([]interface{}); ok && len(v) > 0 { - obj.ConfigMapKeyRef, err = expandConfigMapKeyRef(v) - if err != nil { - return obj, err - } - } - if v, ok := in["field_ref"].([]interface{}); ok && len(v) > 0 { - obj.FieldRef, err = expandFieldRef(v) - if err != nil { - return obj, err - } - } - if v, ok := in["secret_key_ref"].([]interface{}); ok && len(v) > 0 { - obj.SecretKeyRef, err = expandSecretKeyRef(v) - if err != nil { - return obj, err - } - } - if v, ok := in["resource_field_ref"].([]interface{}); ok && len(v) > 0 { - obj.ResourceFieldRef, err = expandResourceFieldRef(v) - if err != nil { - return obj, err - } - } - return obj, nil - -} - -func expandContainerResourceRequirements(l []interface{}) (v1.ResourceRequirements, error) { - if len(l) == 0 || l[0] == nil { - return v1.ResourceRequirements{}, nil - } - in := l[0].(map[string]interface{}) - obj := v1.ResourceRequirements{} - - fn := func(in []interface{}) (v1.ResourceList, error) { - for _, c := range in { - p := c.(map[string]interface{}) - if p["cpu"] == "" { - delete(p, "cpu") - } - if p["memory"] == "" { - delete(p, "memory") - } - return expandMapToResourceList(p) - } - return nil, nil - } - - var err error - if v, ok := in["limits"].([]interface{}); ok && len(v) > 0 { - obj.Limits, err = fn(v) - if err != nil { - return obj, err - } - } - - if v, ok := in["requests"].([]interface{}); ok && len(v) > 0 { - obj.Requests, err = fn(v) - if err != nil { - return obj, err - } - } - - return obj, nil -} diff --git a/builtin/providers/kubernetes/structures_pod.go b/builtin/providers/kubernetes/structures_pod.go deleted file mode 100644 index 3cd69f005..000000000 --- a/builtin/providers/kubernetes/structures_pod.go +++ /dev/null @@ -1,684 +0,0 @@ -package kubernetes - -import ( - "strconv" - - "github.com/hashicorp/terraform/helper/schema" - "k8s.io/kubernetes/pkg/api/v1" -) - -// Flatteners - -func flattenPodSpec(in v1.PodSpec) ([]interface{}, error) { - att := make(map[string]interface{}) - if in.ActiveDeadlineSeconds != nil { - att["active_deadline_seconds"] = *in.ActiveDeadlineSeconds - } - containers, err := flattenContainers(in.Containers) - if err != nil { - return nil, err - } - att["container"] = containers - - att["dns_policy"] = in.DNSPolicy - - att["host_ipc"] = in.HostIPC - att["host_network"] = in.HostNetwork - att["host_pid"] = in.HostPID - - if in.Hostname != "" { - att["hostname"] = in.Hostname - } - att["image_pull_secrets"] = flattenLocalObjectReferenceArray(in.ImagePullSecrets) - - if in.NodeName != "" { - att["node_name"] = in.NodeName - } - if len(in.NodeSelector) > 0 { - att["node_selector"] = in.NodeSelector - } - if in.RestartPolicy != "" { - att["restart_policy"] = in.RestartPolicy - } - - if in.SecurityContext != nil { - att["security_context"] = flattenPodSecurityContext(in.SecurityContext) - } - if in.ServiceAccountName != "" { - att["service_account_name"] = in.ServiceAccountName - } - if in.Subdomain != "" { - att["subdomain"] = in.Subdomain - } - - if in.TerminationGracePeriodSeconds != nil { - att["termination_grace_period_seconds"] = *in.TerminationGracePeriodSeconds - } - - if len(in.Volumes) > 0 { - v, err := flattenVolumes(in.Volumes) - if err != nil { - return []interface{}{att}, err - } - att["volume"] = v - } - return []interface{}{att}, nil -} - -func flattenPodSecurityContext(in *v1.PodSecurityContext) []interface{} { - att := make(map[string]interface{}) - if in.FSGroup != nil { - att["fs_group"] = *in.FSGroup - } - - if in.RunAsNonRoot != nil { - att["run_as_non_root"] = *in.RunAsNonRoot - } - - if in.RunAsUser != nil { - att["run_as_user"] = *in.RunAsUser - } - - if len(in.SupplementalGroups) > 0 { - att["supplemental_groups"] = newInt64Set(schema.HashSchema(&schema.Schema{ - Type: schema.TypeInt, - }), in.SupplementalGroups) - } - if in.SELinuxOptions != nil { - att["se_linux_options"] = flattenSeLinuxOptions(in.SELinuxOptions) - } - - if len(att) > 0 { - return []interface{}{att} - } - return []interface{}{} -} - -func flattenSeLinuxOptions(in *v1.SELinuxOptions) []interface{} { - att := make(map[string]interface{}) - if in.User != "" { - att["user"] = in.User - } - if in.Role != "" { - att["role"] = in.Role - } - if in.User != "" { - att["type"] = in.Type - } - if in.Level != "" { - att["level"] = in.Level - } - return []interface{}{att} -} - -func flattenVolumes(volumes []v1.Volume) ([]interface{}, error) { - att := make([]interface{}, len(volumes)) - for i, v := range volumes { - obj := map[string]interface{}{} - - if v.Name != "" { - obj["name"] = v.Name - } - if v.ConfigMap != nil { - obj["config_map"] = flattenConfigMapVolumeSource(v.ConfigMap) - } - if v.GitRepo != nil { - obj["git_repo"] = flattenGitRepoVolumeSource(v.GitRepo) - } - if v.EmptyDir != nil { - obj["empty_dir"] = flattenEmptyDirVolumeSource(v.EmptyDir) - } - if v.DownwardAPI != nil { - obj["downward_api"] = flattenDownwardAPIVolumeSource(v.DownwardAPI) - } - if v.PersistentVolumeClaim != nil { - obj["persistent_volume_claim"] = flattenPersistentVolumeClaimVolumeSource(v.PersistentVolumeClaim) - } - if v.Secret != nil { - obj["secret"] = flattenSecretVolumeSource(v.Secret) - } - if v.GCEPersistentDisk != nil { - obj["gce_persistent_disk"] = flattenGCEPersistentDiskVolumeSource(v.GCEPersistentDisk) - } - if v.AWSElasticBlockStore != nil { - obj["aws_elastic_block_store"] = flattenAWSElasticBlockStoreVolumeSource(v.AWSElasticBlockStore) - } - if v.HostPath != nil { - obj["host_path"] = flattenHostPathVolumeSource(v.HostPath) - } - if v.Glusterfs != nil { - obj["glusterfs"] = flattenGlusterfsVolumeSource(v.Glusterfs) - } - if v.NFS != nil { - obj["nfs"] = flattenNFSVolumeSource(v.NFS) - } - if v.RBD != nil { - obj["rbd"] = flattenRBDVolumeSource(v.RBD) - } - if v.ISCSI != nil { - obj["iscsi"] = flattenISCSIVolumeSource(v.ISCSI) - } - if v.Cinder != nil { - obj["cinder"] = flattenCinderVolumeSource(v.Cinder) - } - if v.CephFS != nil { - obj["ceph_fs"] = flattenCephFSVolumeSource(v.CephFS) - } - if v.FC != nil { - obj["fc"] = flattenFCVolumeSource(v.FC) - } - if v.Flocker != nil { - obj["flocker"] = flattenFlockerVolumeSource(v.Flocker) - } - if v.FlexVolume != nil { - obj["flex_volume"] = flattenFlexVolumeSource(v.FlexVolume) - } - if v.AzureFile != nil { - obj["azure_file"] = flattenAzureFileVolumeSource(v.AzureFile) - } - if v.VsphereVolume != nil { - obj["vsphere_volume"] = flattenVsphereVirtualDiskVolumeSource(v.VsphereVolume) - } - if v.Quobyte != nil { - obj["quobyte"] = flattenQuobyteVolumeSource(v.Quobyte) - } - if v.AzureDisk != nil { - obj["azure_disk"] = flattenAzureDiskVolumeSource(v.AzureDisk) - } - if v.PhotonPersistentDisk != nil { - obj["photon_persistent_disk"] = flattenPhotonPersistentDiskVolumeSource(v.PhotonPersistentDisk) - } - att[i] = obj - } - return att, nil -} - -func flattenPersistentVolumeClaimVolumeSource(in *v1.PersistentVolumeClaimVolumeSource) []interface{} { - att := make(map[string]interface{}) - if in.ClaimName != "" { - att["claim_name"] = in.ClaimName - } - if in.ReadOnly { - att["read_only"] = in.ReadOnly - } - - return []interface{}{att} -} -func flattenGitRepoVolumeSource(in *v1.GitRepoVolumeSource) []interface{} { - att := make(map[string]interface{}) - if in.Directory != "" { - att["directory"] = in.Directory - } - - att["repository"] = in.Repository - - if in.Revision != "" { - att["revision"] = in.Revision - } - return []interface{}{att} -} - -func flattenDownwardAPIVolumeSource(in *v1.DownwardAPIVolumeSource) []interface{} { - att := make(map[string]interface{}) - if in.DefaultMode != nil { - att["default_mode"] = in.DefaultMode - } - if len(in.Items) > 0 { - att["items"] = flattenDownwardAPIVolumeFile(in.Items) - } - return []interface{}{att} -} - -func flattenDownwardAPIVolumeFile(in []v1.DownwardAPIVolumeFile) []interface{} { - att := make([]interface{}, len(in)) - for i, v := range in { - m := map[string]interface{}{} - if v.FieldRef != nil { - m["field_ref"] = flattenObjectFieldSelector(v.FieldRef) - } - if v.Mode != nil { - m["mode"] = *v.Mode - } - if v.Path != "" { - m["path"] = v.Path - } - if v.ResourceFieldRef != nil { - m["resource_field_ref"] = flattenResourceFieldSelector(v.ResourceFieldRef) - } - att[i] = m - } - return att -} - -func flattenConfigMapVolumeSource(in *v1.ConfigMapVolumeSource) []interface{} { - att := make(map[string]interface{}) - if in.DefaultMode != nil { - att["default_mode"] = *in.DefaultMode - } - att["name"] = in.Name - if len(in.Items) > 0 { - items := make([]interface{}, len(in.Items)) - for i, v := range in.Items { - m := map[string]interface{}{} - m["key"] = v.Key - m["mode"] = v.Mode - m["path"] = v.Path - items[i] = m - } - att["items"] = items - } - - return []interface{}{att} -} - -func flattenEmptyDirVolumeSource(in *v1.EmptyDirVolumeSource) []interface{} { - att := make(map[string]interface{}) - att["medium"] = in.Medium - return []interface{}{att} -} - -func flattenSecretVolumeSource(in *v1.SecretVolumeSource) []interface{} { - att := make(map[string]interface{}) - if in.SecretName != "" { - att["secret_name"] = in.SecretName - } - return []interface{}{att} -} - -// Expanders - -func expandPodSpec(p []interface{}) (v1.PodSpec, error) { - obj := v1.PodSpec{} - if len(p) == 0 || p[0] == nil { - return obj, nil - } - in := p[0].(map[string]interface{}) - - if v, ok := in["active_deadline_seconds"].(int); ok && v > 0 { - obj.ActiveDeadlineSeconds = ptrToInt64(int64(v)) - } - - if v, ok := in["container"].([]interface{}); ok && len(v) > 0 { - cs, err := expandContainers(v) - if err != nil { - return obj, err - } - obj.Containers = cs - } - - if v, ok := in["dns_policy"].(string); ok { - obj.DNSPolicy = v1.DNSPolicy(v) - } - - if v, ok := in["host_ipc"]; ok { - obj.HostIPC = v.(bool) - } - - if v, ok := in["host_network"]; ok { - obj.HostNetwork = v.(bool) - } - - if v, ok := in["host_pid"]; ok { - obj.HostPID = v.(bool) - } - - if v, ok := in["hostname"]; ok { - obj.Hostname = v.(string) - } - - if v, ok := in["image_pull_secrets"].([]interface{}); ok { - cs := expandLocalObjectReferenceArray(v) - obj.ImagePullSecrets = cs - } - - if v, ok := in["node_name"]; ok { - obj.NodeName = v.(string) - } - - if v, ok := in["node_selector"].(map[string]string); ok { - obj.NodeSelector = v - } - - if v, ok := in["restart_policy"].(string); ok { - obj.RestartPolicy = v1.RestartPolicy(v) - } - - if v, ok := in["security_context"].([]interface{}); ok && len(v) > 0 { - obj.SecurityContext = expandPodSecurityContext(v) - } - - if v, ok := in["service_account_name"].(string); ok { - obj.ServiceAccountName = v - } - - if v, ok := in["subdomain"].(string); ok { - obj.Subdomain = v - } - - if v, ok := in["termination_grace_period_seconds"].(int); ok { - obj.TerminationGracePeriodSeconds = ptrToInt64(int64(v)) - } - - if v, ok := in["volume"].([]interface{}); ok && len(v) > 0 { - cs, err := expandVolumes(v) - if err != nil { - return obj, err - } - obj.Volumes = cs - } - return obj, nil -} - -func expandPodSecurityContext(l []interface{}) *v1.PodSecurityContext { - if len(l) == 0 || l[0] == nil { - return &v1.PodSecurityContext{} - } - in := l[0].(map[string]interface{}) - obj := &v1.PodSecurityContext{} - if v, ok := in["fs_group"].(int); ok { - obj.FSGroup = ptrToInt64(int64(v)) - } - if v, ok := in["run_as_non_root"].(bool); ok { - obj.RunAsNonRoot = ptrToBool(v) - } - if v, ok := in["run_as_user"].(int); ok { - obj.RunAsUser = ptrToInt64(int64(v)) - } - if v, ok := in["supplemental_groups"].(*schema.Set); ok { - obj.SupplementalGroups = schemaSetToInt64Array(v) - } - - if v, ok := in["se_linux_options"].([]interface{}); ok && len(v) > 0 { - obj.SELinuxOptions = expandSeLinuxOptions(v) - } - - return obj -} - -func expandSeLinuxOptions(l []interface{}) *v1.SELinuxOptions { - if len(l) == 0 || l[0] == nil { - return &v1.SELinuxOptions{} - } - in := l[0].(map[string]interface{}) - obj := &v1.SELinuxOptions{} - if v, ok := in["level"]; ok { - obj.Level = v.(string) - } - if v, ok := in["role"]; ok { - obj.Role = v.(string) - } - if v, ok := in["type"]; ok { - obj.Type = v.(string) - } - if v, ok := in["user"]; ok { - obj.User = v.(string) - } - return obj -} - -func expandKeyPath(in []interface{}) []v1.KeyToPath { - if len(in) == 0 { - return []v1.KeyToPath{} - } - keyPaths := make([]v1.KeyToPath, len(in)) - for i, c := range in { - p := c.(map[string]interface{}) - if v, ok := p["key"].(string); ok { - keyPaths[i].Key = v - } - if v, ok := p["mode"].(int); ok { - keyPaths[i].Mode = ptrToInt32(int32(v)) - } - if v, ok := p["path"].(string); ok { - keyPaths[i].Path = v - } - - } - return keyPaths -} - -func expandDownwardAPIVolumeFile(in []interface{}) ([]v1.DownwardAPIVolumeFile, error) { - var err error - if len(in) == 0 { - return []v1.DownwardAPIVolumeFile{}, nil - } - dapivf := make([]v1.DownwardAPIVolumeFile, len(in)) - for i, c := range in { - p := c.(map[string]interface{}) - if v, ok := p["mode"].(int); ok { - dapivf[i].Mode = ptrToInt32(int32(v)) - } - if v, ok := p["path"].(string); ok { - dapivf[i].Path = v - } - if v, ok := p["field_ref"].([]interface{}); ok && len(v) > 0 { - dapivf[i].FieldRef, err = expandFieldRef(v) - if err != nil { - return dapivf, err - } - } - if v, ok := p["resource_field_ref"].([]interface{}); ok && len(v) > 0 { - dapivf[i].ResourceFieldRef, err = expandResourceFieldRef(v) - if err != nil { - return dapivf, err - } - } - } - return dapivf, nil -} - -func expandConfigMapVolumeSource(l []interface{}) *v1.ConfigMapVolumeSource { - if len(l) == 0 || l[0] == nil { - return &v1.ConfigMapVolumeSource{} - } - in := l[0].(map[string]interface{}) - obj := &v1.ConfigMapVolumeSource{ - DefaultMode: ptrToInt32(int32(in["default_mode "].(int))), - } - - if v, ok := in["name"].(string); ok { - obj.Name = v - } - - if v, ok := in["items"].([]interface{}); ok && len(v) > 0 { - obj.Items = expandKeyPath(v) - } - - return obj -} - -func expandDownwardAPIVolumeSource(l []interface{}) (*v1.DownwardAPIVolumeSource, error) { - if len(l) == 0 || l[0] == nil { - return &v1.DownwardAPIVolumeSource{}, nil - } - in := l[0].(map[string]interface{}) - obj := &v1.DownwardAPIVolumeSource{ - DefaultMode: ptrToInt32(int32(in["default_mode "].(int))), - } - if v, ok := in["items"].([]interface{}); ok && len(v) > 0 { - var err error - obj.Items, err = expandDownwardAPIVolumeFile(v) - if err != nil { - return obj, err - } - } - return obj, nil -} - -func expandGitRepoVolumeSource(l []interface{}) *v1.GitRepoVolumeSource { - if len(l) == 0 || l[0] == nil { - return &v1.GitRepoVolumeSource{} - } - in := l[0].(map[string]interface{}) - obj := &v1.GitRepoVolumeSource{} - - if v, ok := in["directory"].(string); ok { - obj.Directory = v - } - - if v, ok := in["repository"].(string); ok { - obj.Repository = v - } - if v, ok := in["revision"].(string); ok { - obj.Revision = v - } - return obj -} - -func expandEmptyDirVolumeSource(l []interface{}) *v1.EmptyDirVolumeSource { - if len(l) == 0 || l[0] == nil { - return &v1.EmptyDirVolumeSource{} - } - in := l[0].(map[string]interface{}) - obj := &v1.EmptyDirVolumeSource{ - Medium: v1.StorageMedium(in["medium"].(string)), - } - return obj -} - -func expandPersistentVolumeClaimVolumeSource(l []interface{}) *v1.PersistentVolumeClaimVolumeSource { - if len(l) == 0 || l[0] == nil { - return &v1.PersistentVolumeClaimVolumeSource{} - } - in := l[0].(map[string]interface{}) - obj := &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: in["claim_name"].(string), - ReadOnly: in["read_only"].(bool), - } - return obj -} - -func expandSecretVolumeSource(l []interface{}) *v1.SecretVolumeSource { - if len(l) == 0 || l[0] == nil { - return &v1.SecretVolumeSource{} - } - in := l[0].(map[string]interface{}) - obj := &v1.SecretVolumeSource{ - SecretName: in["secret_name"].(string), - } - return obj -} - -func expandVolumes(volumes []interface{}) ([]v1.Volume, error) { - if len(volumes) == 0 { - return []v1.Volume{}, nil - } - vl := make([]v1.Volume, len(volumes)) - for i, c := range volumes { - m := c.(map[string]interface{}) - - if value, ok := m["name"]; ok { - vl[i].Name = value.(string) - } - - if value, ok := m["config_map"].([]interface{}); ok && len(value) > 0 { - vl[i].ConfigMap = expandConfigMapVolumeSource(value) - } - if value, ok := m["git_repo"].([]interface{}); ok && len(value) > 0 { - vl[i].GitRepo = expandGitRepoVolumeSource(value) - } - - if value, ok := m["empty_dir"].([]interface{}); ok && len(value) > 0 { - vl[i].EmptyDir = expandEmptyDirVolumeSource(value) - } - if value, ok := m["downward_api"].([]interface{}); ok && len(value) > 0 { - var err error - vl[i].DownwardAPI, err = expandDownwardAPIVolumeSource(value) - if err != nil { - return vl, err - } - } - - if value, ok := m["persistent_volume_claim"].([]interface{}); ok && len(value) > 0 { - vl[i].PersistentVolumeClaim = expandPersistentVolumeClaimVolumeSource(value) - } - if value, ok := m["secret"].([]interface{}); ok && len(value) > 0 { - vl[i].Secret = expandSecretVolumeSource(value) - } - if v, ok := m["gce_persistent_disk"].([]interface{}); ok && len(v) > 0 { - vl[i].GCEPersistentDisk = expandGCEPersistentDiskVolumeSource(v) - } - if v, ok := m["aws_elastic_block_store"].([]interface{}); ok && len(v) > 0 { - vl[i].AWSElasticBlockStore = expandAWSElasticBlockStoreVolumeSource(v) - } - if v, ok := m["host_path"].([]interface{}); ok && len(v) > 0 { - vl[i].HostPath = expandHostPathVolumeSource(v) - } - if v, ok := m["glusterfs"].([]interface{}); ok && len(v) > 0 { - vl[i].Glusterfs = expandGlusterfsVolumeSource(v) - } - if v, ok := m["nfs"].([]interface{}); ok && len(v) > 0 { - vl[i].NFS = expandNFSVolumeSource(v) - } - if v, ok := m["rbd"].([]interface{}); ok && len(v) > 0 { - vl[i].RBD = expandRBDVolumeSource(v) - } - if v, ok := m["iscsi"].([]interface{}); ok && len(v) > 0 { - vl[i].ISCSI = expandISCSIVolumeSource(v) - } - if v, ok := m["cinder"].([]interface{}); ok && len(v) > 0 { - vl[i].Cinder = expandCinderVolumeSource(v) - } - if v, ok := m["ceph_fs"].([]interface{}); ok && len(v) > 0 { - vl[i].CephFS = expandCephFSVolumeSource(v) - } - if v, ok := m["fc"].([]interface{}); ok && len(v) > 0 { - vl[i].FC = expandFCVolumeSource(v) - } - if v, ok := m["flocker"].([]interface{}); ok && len(v) > 0 { - vl[i].Flocker = expandFlockerVolumeSource(v) - } - if v, ok := m["flex_volume"].([]interface{}); ok && len(v) > 0 { - vl[i].FlexVolume = expandFlexVolumeSource(v) - } - if v, ok := m["azure_file"].([]interface{}); ok && len(v) > 0 { - vl[i].AzureFile = expandAzureFileVolumeSource(v) - } - if v, ok := m["vsphere_volume"].([]interface{}); ok && len(v) > 0 { - vl[i].VsphereVolume = expandVsphereVirtualDiskVolumeSource(v) - } - if v, ok := m["quobyte"].([]interface{}); ok && len(v) > 0 { - vl[i].Quobyte = expandQuobyteVolumeSource(v) - } - if v, ok := m["azure_disk"].([]interface{}); ok && len(v) > 0 { - vl[i].AzureDisk = expandAzureDiskVolumeSource(v) - } - if v, ok := m["photon_persistent_disk"].([]interface{}); ok && len(v) > 0 { - vl[i].PhotonPersistentDisk = expandPhotonPersistentDiskVolumeSource(v) - } - } - return vl, nil -} - -func patchPodSpec(pathPrefix, prefix string, d *schema.ResourceData) (PatchOperations, error) { - ops := make([]PatchOperation, 0) - - if d.HasChange(prefix + "active_deadline_seconds") { - - v := d.Get(prefix + "active_deadline_seconds").(int) - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/activeDeadlineSeconds", - Value: v, - }) - } - - if d.HasChange(prefix + "container") { - containers := d.Get(prefix + "container").([]interface{}) - value, _ := expandContainers(containers) - - for i, v := range value { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/containers/" + strconv.Itoa(i) + "/image", - Value: v.Image, - }) - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/containers/" + strconv.Itoa(i) + "/name", - Value: v.Name, - }) - - } - - } - - return ops, nil -} diff --git a/builtin/providers/kubernetes/structures_test.go b/builtin/providers/kubernetes/structures_test.go deleted file mode 100644 index a423c3f9a..000000000 --- a/builtin/providers/kubernetes/structures_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package kubernetes - -import ( - "fmt" - "testing" -) - -func TestIsInternalKey(t *testing.T) { - testCases := []struct { - Key string - Expected bool - }{ - {"", false}, - {"anyKey", false}, - {"any.hostname.io", false}, - {"any.hostname.com/with/path", false}, - {"any.kubernetes.io", true}, - {"kubernetes.io", true}, - {"pv.kubernetes.io/any/path", true}, - } - for i, tc := range testCases { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - isInternal := isInternalKey(tc.Key) - if tc.Expected && isInternal != tc.Expected { - t.Fatalf("Expected %q to be internal", tc.Key) - } - if !tc.Expected && isInternal != tc.Expected { - t.Fatalf("Expected %q not to be internal", tc.Key) - } - }) - } -} diff --git a/builtin/providers/kubernetes/test-infra/main.tf b/builtin/providers/kubernetes/test-infra/main.tf deleted file mode 100644 index bbc62492b..000000000 --- a/builtin/providers/kubernetes/test-infra/main.tf +++ /dev/null @@ -1,67 +0,0 @@ -provider "google" { - // Provider settings to be provided via ENV variables -} - -data "google_compute_zones" "available" {} - -resource "random_id" "cluster_name" { - byte_length = 10 -} -resource "random_id" "username" { - byte_length = 14 -} -resource "random_id" "password" { - byte_length = 16 -} - -resource "google_container_cluster" "primary" { - name = "tf-acc-test-${random_id.cluster_name.hex}" - zone = "${data.google_compute_zones.available.names[0]}" - initial_node_count = 3 - - additional_zones = [ - "${data.google_compute_zones.available.names[1]}" - ] - - master_auth { - username = "${random_id.username.hex}" - password = "${random_id.password.hex}" - } - - node_config { - oauth_scopes = [ - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/logging.write", - "https://www.googleapis.com/auth/monitoring" - ] - } -} - -output "zone" { - value = "${data.google_compute_zones.available.names[0]}" -} - -output "endpoint" { - value = "${google_container_cluster.primary.endpoint}" -} - -output "username" { - value = "${google_container_cluster.primary.master_auth.0.username}" -} - -output "password" { - value = "${google_container_cluster.primary.master_auth.0.password}" -} - -output "client_certificate_b64" { - value = "${google_container_cluster.primary.master_auth.0.client_certificate}" -} - -output "client_key_b64" { - value = "${google_container_cluster.primary.master_auth.0.client_key}" -} - -output "cluster_ca_certificate_b64" { - value = "${google_container_cluster.primary.master_auth.0.cluster_ca_certificate}" -} diff --git a/builtin/providers/kubernetes/validators.go b/builtin/providers/kubernetes/validators.go deleted file mode 100644 index 03dd2cf55..000000000 --- a/builtin/providers/kubernetes/validators.go +++ /dev/null @@ -1,198 +0,0 @@ -package kubernetes - -import ( - "fmt" - "strconv" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - - "k8s.io/apimachinery/pkg/api/resource" - apiValidation "k8s.io/apimachinery/pkg/api/validation" - utilValidation "k8s.io/apimachinery/pkg/util/validation" -) - -func validateAnnotations(value interface{}, key string) (ws []string, es []error) { - m := value.(map[string]interface{}) - for k, _ := range m { - errors := utilValidation.IsQualifiedName(strings.ToLower(k)) - if len(errors) > 0 { - for _, e := range errors { - es = append(es, fmt.Errorf("%s (%q) %s", key, k, e)) - } - } - } - return -} - -func validateName(value interface{}, key string) (ws []string, es []error) { - v := value.(string) - - errors := apiValidation.NameIsDNSLabel(v, false) - if len(errors) > 0 { - for _, err := range errors { - es = append(es, fmt.Errorf("%s %s", key, err)) - } - } - return -} - -func validateGenerateName(value interface{}, key string) (ws []string, es []error) { - v := value.(string) - - errors := apiValidation.NameIsDNSLabel(v, true) - if len(errors) > 0 { - for _, err := range errors { - es = append(es, fmt.Errorf("%s %s", key, err)) - } - } - return -} - -func validateLabels(value interface{}, key string) (ws []string, es []error) { - m := value.(map[string]interface{}) - for k, v := range m { - for _, msg := range utilValidation.IsQualifiedName(k) { - es = append(es, fmt.Errorf("%s (%q) %s", key, k, msg)) - } - val := v.(string) - for _, msg := range utilValidation.IsValidLabelValue(val) { - es = append(es, fmt.Errorf("%s (%q) %s", key, val, msg)) - } - } - return -} - -func validatePortNum(value interface{}, key string) (ws []string, es []error) { - errors := utilValidation.IsValidPortNum(value.(int)) - if len(errors) > 0 { - for _, err := range errors { - es = append(es, fmt.Errorf("%s %s", key, err)) - } - } - return -} - -func validatePortName(value interface{}, key string) (ws []string, es []error) { - errors := utilValidation.IsValidPortName(value.(string)) - if len(errors) > 0 { - for _, err := range errors { - es = append(es, fmt.Errorf("%s %s", key, err)) - } - } - return -} -func validatePortNumOrName(value interface{}, key string) (ws []string, es []error) { - switch value.(type) { - case string: - intVal, err := strconv.Atoi(value.(string)) - if err != nil { - return validatePortName(value, key) - } - return validatePortNum(intVal, key) - case int: - return validatePortNum(value, key) - - default: - es = append(es, fmt.Errorf("%s must be defined of type string or int on the schema", key)) - return - } -} - -func validateResourceList(value interface{}, key string) (ws []string, es []error) { - m := value.(map[string]interface{}) - for k, value := range m { - if _, ok := value.(int); ok { - continue - } - - if v, ok := value.(string); ok { - _, err := resource.ParseQuantity(v) - if err != nil { - es = append(es, fmt.Errorf("%s.%s (%q): %s", key, k, v, err)) - } - continue - } - - err := "Value can be either string or int" - es = append(es, fmt.Errorf("%s.%s (%#v): %s", key, k, value, err)) - } - return -} - -func validateResourceQuantity(value interface{}, key string) (ws []string, es []error) { - if v, ok := value.(string); ok { - _, err := resource.ParseQuantity(v) - if err != nil { - es = append(es, fmt.Errorf("%s.%s : %s", key, v, err)) - } - } - return -} - -func validatePositiveInteger(value interface{}, key string) (ws []string, es []error) { - v := value.(int) - if v <= 0 { - es = append(es, fmt.Errorf("%s must be greater than 0", key)) - } - return -} - -func validateDNSPolicy(value interface{}, key string) (ws []string, es []error) { - v := value.(string) - if v != "ClusterFirst" && v != "Default" { - es = append(es, fmt.Errorf("%s must be either ClusterFirst or Default", key)) - } - return -} - -func validateRestartPolicy(value interface{}, key string) (ws []string, es []error) { - v := value.(string) - switch v { - case "Always", "OnFailure", "Never": - return - default: - es = append(es, fmt.Errorf("%s must be one of Always, OnFailure or Never ", key)) - } - return -} - -func validateTerminationGracePeriodSeconds(value interface{}, key string) (ws []string, es []error) { - v := value.(int) - if v < 0 { - es = append(es, fmt.Errorf("%s must be greater than or equal to 0", key)) - } - return -} - -func validateAttributeValueDoesNotContain(searchString string) schema.SchemaValidateFunc { - return func(v interface{}, k string) (ws []string, errors []error) { - input := v.(string) - if !strings.Contains(input, searchString) { - errors = append(errors, fmt.Errorf( - "%q must not contain %q", - k, searchString)) - } - return - } -} - -func validateAttributeValueIsIn(validValues []string) schema.SchemaValidateFunc { - return func(v interface{}, k string) (ws []string, errors []error) { - input := v.(string) - isValid := false - for _, s := range validValues { - if s == input { - isValid = true - break - } - } - if !isValid { - errors = append(errors, fmt.Errorf( - "%q must contain a value from %#v, got %q", - k, validValues, input)) - } - return - - } -} diff --git a/builtin/providers/librato/common_helpers_test.go b/builtin/providers/librato/common_helpers_test.go deleted file mode 100644 index dde279781..000000000 --- a/builtin/providers/librato/common_helpers_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package librato - -import ( - "log" - "testing" - "time" -) - -func sleep(t *testing.T, amount time.Duration) func() { - return func() { - log.Printf("[INFO] Sleeping for %d seconds...", amount) - time.Sleep(amount * time.Second) - } -} diff --git a/builtin/providers/librato/provider.go b/builtin/providers/librato/provider.go deleted file mode 100644 index 203e06fac..000000000 --- a/builtin/providers/librato/provider.go +++ /dev/null @@ -1,44 +0,0 @@ -package librato - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - "github.com/henrikhodne/go-librato/librato" -) - -// Provider returns a schema.Provider for Librato. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "email": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("LIBRATO_EMAIL", nil), - Description: "The email address for the Librato account.", - }, - - "token": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("LIBRATO_TOKEN", nil), - Description: "The auth token for the Librato account.", - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "librato_space": resourceLibratoSpace(), - "librato_space_chart": resourceLibratoSpaceChart(), - "librato_metric": resourceLibratoMetric(), - "librato_alert": resourceLibratoAlert(), - "librato_service": resourceLibratoService(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - client := librato.NewClient(d.Get("email").(string), d.Get("token").(string)) - - return client, nil -} diff --git a/builtin/providers/librato/provider_test.go b/builtin/providers/librato/provider_test.go deleted file mode 100644 index f25f17fe2..000000000 --- a/builtin/providers/librato/provider_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package librato - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "librato": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("LIBRATO_EMAIL"); v == "" { - t.Fatal("LIBRATO_EMAIL must be set for acceptance tests") - } - - if v := os.Getenv("LIBRATO_TOKEN"); v == "" { - t.Fatal("LIBRATO_TOKEN must be set for acceptance tests") - } -} diff --git a/builtin/providers/librato/resource_librato_alert.go b/builtin/providers/librato/resource_librato_alert.go deleted file mode 100644 index 26c1b0a56..000000000 --- a/builtin/providers/librato/resource_librato_alert.go +++ /dev/null @@ -1,480 +0,0 @@ -package librato - -import ( - "bytes" - "fmt" - "log" - "math" - "strconv" - "time" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/henrikhodne/go-librato/librato" -) - -func resourceLibratoAlert() *schema.Resource { - return &schema.Resource{ - Create: resourceLibratoAlertCreate, - Read: resourceLibratoAlertRead, - Update: resourceLibratoAlertUpdate, - Delete: resourceLibratoAlertDelete, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - "description": { - Type: schema.TypeString, - Optional: true, - }, - "active": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "rearm_seconds": { - Type: schema.TypeInt, - Optional: true, - Default: 600, - }, - "services": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "condition": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - }, - "metric_name": { - Type: schema.TypeString, - Required: true, - }, - "source": { - Type: schema.TypeString, - Optional: true, - }, - "detect_reset": { - Type: schema.TypeBool, - Optional: true, - }, - "duration": { - Type: schema.TypeInt, - Optional: true, - }, - "threshold": { - Type: schema.TypeFloat, - Optional: true, - }, - "summary_function": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - Set: resourceLibratoAlertConditionsHash, - }, - "attributes": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "runbook_url": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - } -} - -func resourceLibratoAlertConditionsHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["type"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["metric_name"].(string))) - - source, present := m["source"] - if present { - buf.WriteString(fmt.Sprintf("%s-", source.(string))) - } - - detectReset, present := m["detect_reset"] - if present { - buf.WriteString(fmt.Sprintf("%t-", detectReset.(bool))) - } - - duration, present := m["duration"] - if present { - buf.WriteString(fmt.Sprintf("%d-", duration.(int))) - } - - threshold, present := m["threshold"] - if present { - buf.WriteString(fmt.Sprintf("%f-", threshold.(float64))) - } - - summaryFunction, present := m["summary_function"] - if present { - buf.WriteString(fmt.Sprintf("%s-", summaryFunction.(string))) - } - - return hashcode.String(buf.String()) -} - -func resourceLibratoAlertCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*librato.Client) - - alert := librato.Alert{ - Name: librato.String(d.Get("name").(string)), - } - if v, ok := d.GetOk("description"); ok { - alert.Description = librato.String(v.(string)) - } - // GetOK returns not OK for false boolean values, use Get - alert.Active = librato.Bool(d.Get("active").(bool)) - if v, ok := d.GetOk("rearm_seconds"); ok { - alert.RearmSeconds = librato.Uint(uint(v.(int))) - } - if v, ok := d.GetOk("services"); ok { - vs := v.(*schema.Set) - services := make([]*string, vs.Len()) - for i, serviceData := range vs.List() { - services[i] = librato.String(serviceData.(string)) - } - alert.Services = services - } - if v, ok := d.GetOk("condition"); ok { - vs := v.(*schema.Set) - conditions := make([]librato.AlertCondition, vs.Len()) - for i, conditionDataM := range vs.List() { - conditionData := conditionDataM.(map[string]interface{}) - var condition librato.AlertCondition - if v, ok := conditionData["type"].(string); ok && v != "" { - condition.Type = librato.String(v) - } - if v, ok := conditionData["threshold"].(float64); ok && !math.IsNaN(v) { - condition.Threshold = librato.Float(v) - } - if v, ok := conditionData["metric_name"].(string); ok && v != "" { - condition.MetricName = librato.String(v) - } - if v, ok := conditionData["source"].(string); ok && v != "" { - condition.Source = librato.String(v) - } - if v, ok := conditionData["detect_reset"].(bool); ok { - condition.DetectReset = librato.Bool(v) - } - if v, ok := conditionData["duration"].(int); ok { - condition.Duration = librato.Uint(uint(v)) - } - if v, ok := conditionData["summary_function"].(string); ok && v != "" { - condition.SummaryFunction = librato.String(v) - } - conditions[i] = condition - } - alert.Conditions = conditions - } - if v, ok := d.GetOk("attributes"); ok { - attributeData := v.([]interface{}) - if len(attributeData) > 1 { - return fmt.Errorf("Only one set of attributes per alert is supported") - } else if len(attributeData) == 1 { - if attributeData[0] == nil { - return fmt.Errorf("No attributes found in attributes block") - } - attributeDataMap := attributeData[0].(map[string]interface{}) - attributes := new(librato.AlertAttributes) - if v, ok := attributeDataMap["runbook_url"].(string); ok && v != "" { - attributes.RunbookURL = librato.String(v) - } - alert.Attributes = attributes - } - } - - alertResult, _, err := client.Alerts.Create(&alert) - - if err != nil { - return fmt.Errorf("Error creating Librato alert %s: %s", *alert.Name, err) - } - log.Printf("[INFO] Created Librato alert: %s", *alertResult) - - retryErr := resource.Retry(1*time.Minute, func() *resource.RetryError { - _, _, err := client.Alerts.Get(*alertResult.ID) - if err != nil { - if errResp, ok := err.(*librato.ErrorResponse); ok && errResp.Response.StatusCode == 404 { - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - } - return nil - }) - if retryErr != nil { - return fmt.Errorf("Error creating librato alert: %s", err) - } - - d.SetId(strconv.FormatUint(uint64(*alertResult.ID), 10)) - - return resourceLibratoAlertRead(d, meta) -} - -func resourceLibratoAlertRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*librato.Client) - id, err := strconv.ParseUint(d.Id(), 10, 0) - if err != nil { - return err - } - - log.Printf("[INFO] Reading Librato Alert: %d", id) - alert, _, err := client.Alerts.Get(uint(id)) - if err != nil { - if errResp, ok := err.(*librato.ErrorResponse); ok && errResp.Response.StatusCode == 404 { - d.SetId("") - return nil - } - return fmt.Errorf("Error reading Librato Alert %s: %s", d.Id(), err) - } - log.Printf("[INFO] Received Librato Alert: %s", *alert) - - d.Set("name", alert.Name) - - if alert.Description != nil { - if err := d.Set("description", alert.Description); err != nil { - return err - } - } - if alert.Active != nil { - if err := d.Set("active", alert.Active); err != nil { - return err - } - } - if alert.RearmSeconds != nil { - if err := d.Set("rearm_seconds", alert.RearmSeconds); err != nil { - return err - } - } - - // Since the following aren't simple terraform types (TypeList), it's best to - // catch the error returned from the d.Set() function, and handle accordingly. - services := resourceLibratoAlertServicesGather(d, alert.Services.([]interface{})) - if err := d.Set("services", schema.NewSet(schema.HashString, services)); err != nil { - return err - } - - conditions := resourceLibratoAlertConditionsGather(d, alert.Conditions) - if err := d.Set("condition", schema.NewSet(resourceLibratoAlertConditionsHash, conditions)); err != nil { - return err - } - - attributes := resourceLibratoAlertAttributesGather(d, alert.Attributes) - if err := d.Set("attributes", attributes); err != nil { - return err - } - - return nil -} - -func resourceLibratoAlertServicesGather(d *schema.ResourceData, services []interface{}) []interface{} { - retServices := make([]interface{}, 0, len(services)) - - for _, s := range services { - serviceData := s.(map[string]interface{}) - // ID field is returned as float64, for whatever reason - retServices = append(retServices, fmt.Sprintf("%.f", serviceData["id"])) - } - - return retServices -} - -func resourceLibratoAlertConditionsGather(d *schema.ResourceData, conditions []librato.AlertCondition) []interface{} { - retConditions := make([]interface{}, 0, len(conditions)) - for _, c := range conditions { - condition := make(map[string]interface{}) - if c.Type != nil { - condition["type"] = *c.Type - } - if c.Threshold != nil { - condition["threshold"] = *c.Threshold - } - if c.MetricName != nil { - condition["metric_name"] = *c.MetricName - } - if c.Source != nil { - condition["source"] = *c.Source - } - if c.DetectReset != nil { - condition["detect_reset"] = *c.MetricName - } - if c.Duration != nil { - condition["duration"] = int(*c.Duration) - } - if c.SummaryFunction != nil { - condition["summary_function"] = *c.SummaryFunction - } - retConditions = append(retConditions, condition) - } - - return retConditions -} - -// Flattens an attributes hash into something that flatmap.Flatten() can handle -func resourceLibratoAlertAttributesGather(d *schema.ResourceData, attributes *librato.AlertAttributes) []map[string]interface{} { - result := make([]map[string]interface{}, 0, 1) - - if attributes != nil { - retAttributes := make(map[string]interface{}) - if attributes.RunbookURL != nil { - retAttributes["runbook_url"] = *attributes.RunbookURL - } - result = append(result, retAttributes) - } - - return result -} - -func resourceLibratoAlertUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*librato.Client) - - id, err := strconv.ParseUint(d.Id(), 10, 0) - if err != nil { - return err - } - - alert := new(librato.Alert) - alert.Name = librato.String(d.Get("name").(string)) - - if d.HasChange("description") { - alert.Description = librato.String(d.Get("description").(string)) - } - if d.HasChange("active") { - alert.Active = librato.Bool(d.Get("active").(bool)) - } - if d.HasChange("rearm_seconds") { - alert.RearmSeconds = librato.Uint(uint(d.Get("rearm_seconds").(int))) - } - if d.HasChange("services") { - vs := d.Get("services").(*schema.Set) - services := make([]*string, vs.Len()) - for i, serviceData := range vs.List() { - services[i] = librato.String(serviceData.(string)) - } - alert.Services = services - } - - vs := d.Get("condition").(*schema.Set) - conditions := make([]librato.AlertCondition, vs.Len()) - for i, conditionDataM := range vs.List() { - conditionData := conditionDataM.(map[string]interface{}) - var condition librato.AlertCondition - if v, ok := conditionData["type"].(string); ok && v != "" { - condition.Type = librato.String(v) - } - if v, ok := conditionData["threshold"].(float64); ok && !math.IsNaN(v) { - condition.Threshold = librato.Float(v) - } - if v, ok := conditionData["metric_name"].(string); ok && v != "" { - condition.MetricName = librato.String(v) - } - if v, ok := conditionData["source"].(string); ok && v != "" { - condition.Source = librato.String(v) - } - if v, ok := conditionData["detect_reset"].(bool); ok { - condition.DetectReset = librato.Bool(v) - } - if v, ok := conditionData["duration"].(int); ok { - condition.Duration = librato.Uint(uint(v)) - } - if v, ok := conditionData["summary_function"].(string); ok && v != "" { - condition.SummaryFunction = librato.String(v) - } - conditions[i] = condition - alert.Conditions = conditions - } - if d.HasChange("attributes") { - attributeData := d.Get("attributes").([]interface{}) - if attributeData[0] == nil { - return fmt.Errorf("No attributes found in attributes block") - } - attributeDataMap := attributeData[0].(map[string]interface{}) - attributes := new(librato.AlertAttributes) - if v, ok := attributeDataMap["runbook_url"].(string); ok && v != "" { - attributes.RunbookURL = librato.String(v) - } - alert.Attributes = attributes - } - - log.Printf("[INFO] Updating Librato alert: %s", alert) - _, updErr := client.Alerts.Update(uint(id), alert) - if updErr != nil { - return fmt.Errorf("Error updating Librato alert: %s", updErr) - } - - log.Printf("[INFO] Updated Librato alert %d", id) - - // Wait for propagation since Librato updates are eventually consistent - wait := resource.StateChangeConf{ - Pending: []string{fmt.Sprintf("%t", false)}, - Target: []string{fmt.Sprintf("%t", true)}, - Timeout: 5 * time.Minute, - MinTimeout: 2 * time.Second, - ContinuousTargetOccurence: 5, - Refresh: func() (interface{}, string, error) { - log.Printf("[DEBUG] Checking if Librato Alert %d was updated yet", id) - changedAlert, _, getErr := client.Alerts.Get(uint(id)) - if getErr != nil { - return changedAlert, "", getErr - } - return changedAlert, "true", nil - }, - } - - _, err = wait.WaitForState() - if err != nil { - return fmt.Errorf("Failed updating Librato Alert %d: %s", id, err) - } - - return resourceLibratoAlertRead(d, meta) -} - -func resourceLibratoAlertDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*librato.Client) - id, err := strconv.ParseUint(d.Id(), 10, 0) - if err != nil { - return err - } - - log.Printf("[INFO] Deleting Alert: %d", id) - _, err = client.Alerts.Delete(uint(id)) - if err != nil { - return fmt.Errorf("Error deleting Alert: %s", err) - } - - retryErr := resource.Retry(1*time.Minute, func() *resource.RetryError { - _, _, err := client.Alerts.Get(uint(id)) - if err != nil { - if errResp, ok := err.(*librato.ErrorResponse); ok && errResp.Response.StatusCode == 404 { - return nil - } - return resource.NonRetryableError(err) - } - return resource.RetryableError(fmt.Errorf("alert still exists")) - }) - if retryErr != nil { - return fmt.Errorf("Error deleting librato alert: %s", err) - } - - return nil -} diff --git a/builtin/providers/librato/resource_librato_alert_test.go b/builtin/providers/librato/resource_librato_alert_test.go deleted file mode 100644 index 3d2a76c80..000000000 --- a/builtin/providers/librato/resource_librato_alert_test.go +++ /dev/null @@ -1,345 +0,0 @@ -package librato - -import ( - "fmt" - "strconv" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/henrikhodne/go-librato/librato" -) - -func TestAccLibratoAlert_Minimal(t *testing.T) { - var alert librato.Alert - name := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLibratoAlertDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckLibratoAlertConfig_minimal(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckLibratoAlertExists("librato_alert.foobar", &alert), - testAccCheckLibratoAlertName(&alert, name), - resource.TestCheckResourceAttr( - "librato_alert.foobar", "name", name), - ), - }, - }, - }) -} - -func TestAccLibratoAlert_Basic(t *testing.T) { - var alert librato.Alert - name := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLibratoAlertDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckLibratoAlertConfig_basic(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckLibratoAlertExists("librato_alert.foobar", &alert), - testAccCheckLibratoAlertName(&alert, name), - testAccCheckLibratoAlertDescription(&alert, "A Test Alert"), - resource.TestCheckResourceAttr( - "librato_alert.foobar", "name", name), - ), - }, - }, - }) -} - -func TestAccLibratoAlert_Full(t *testing.T) { - var alert librato.Alert - name := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLibratoAlertDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckLibratoAlertConfig_full(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckLibratoAlertExists("librato_alert.foobar", &alert), - testAccCheckLibratoAlertName(&alert, name), - testAccCheckLibratoAlertDescription(&alert, "A Test Alert"), - resource.TestCheckResourceAttr( - "librato_alert.foobar", "name", name), - resource.TestCheckResourceAttr( - "librato_alert.foobar", "condition.836525194.metric_name", "librato.cpu.percent.idle"), - resource.TestCheckResourceAttr( - "librato_alert.foobar", "condition.836525194.type", "above"), - resource.TestCheckResourceAttr( - "librato_alert.foobar", "condition.836525194.threshold", "10"), - resource.TestCheckResourceAttr( - "librato_alert.foobar", "condition.836525194.duration", "600"), - ), - }, - }, - }) -} - -func TestAccLibratoAlert_Updated(t *testing.T) { - var alert librato.Alert - name := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLibratoAlertDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckLibratoAlertConfig_basic(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckLibratoAlertExists("librato_alert.foobar", &alert), - testAccCheckLibratoAlertDescription(&alert, "A Test Alert"), - resource.TestCheckResourceAttr( - "librato_alert.foobar", "name", name), - ), - }, - { - Config: testAccCheckLibratoAlertConfig_new_value(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckLibratoAlertExists("librato_alert.foobar", &alert), - testAccCheckLibratoAlertDescription(&alert, "A modified Test Alert"), - resource.TestCheckResourceAttr( - "librato_alert.foobar", "description", "A modified Test Alert"), - ), - }, - }, - }) -} - -func TestAccLibratoAlert_Rename(t *testing.T) { - var alert librato.Alert - name := acctest.RandString(10) - newName := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLibratoAlertDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckLibratoAlertConfig_basic(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckLibratoAlertExists("librato_alert.foobar", &alert), - resource.TestCheckResourceAttr( - "librato_alert.foobar", "name", name), - ), - }, - { - Config: testAccCheckLibratoAlertConfig_basic(newName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLibratoAlertExists("librato_alert.foobar", &alert), - resource.TestCheckResourceAttr( - "librato_alert.foobar", "name", newName), - ), - }, - }, - }) -} - -func TestAccLibratoAlert_FullUpdate(t *testing.T) { - var alert librato.Alert - name := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLibratoAlertDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckLibratoAlertConfig_full_update(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckLibratoAlertExists("librato_alert.foobar", &alert), - testAccCheckLibratoAlertName(&alert, name), - testAccCheckLibratoAlertDescription(&alert, "A Test Alert"), - resource.TestCheckResourceAttr( - "librato_alert.foobar", "name", name), - resource.TestCheckResourceAttr( - "librato_alert.foobar", "rearm_seconds", "1200"), - resource.TestCheckResourceAttr( - "librato_alert.foobar", "condition.2524844643.metric_name", "librato.cpu.percent.idle"), - resource.TestCheckResourceAttr( - "librato_alert.foobar", "condition.2524844643.type", "above"), - resource.TestCheckResourceAttr( - "librato_alert.foobar", "condition.2524844643.threshold", "10"), - resource.TestCheckResourceAttr( - "librato_alert.foobar", "condition.2524844643.duration", "60"), - ), - }, - }, - }) -} - -func testAccCheckLibratoAlertDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*librato.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "librato_alert" { - continue - } - - id, err := strconv.ParseUint(rs.Primary.ID, 10, 0) - if err != nil { - return fmt.Errorf("ID not a number") - } - - _, _, err = client.Alerts.Get(uint(id)) - - if err == nil { - return fmt.Errorf("Alert still exists") - } - } - - return nil -} - -func testAccCheckLibratoAlertName(alert *librato.Alert, name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if alert.Name == nil || *alert.Name != name { - return fmt.Errorf("Bad name: %s", *alert.Name) - } - - return nil - } -} - -func testAccCheckLibratoAlertDescription(alert *librato.Alert, description string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if alert.Description == nil || *alert.Description != description { - return fmt.Errorf("Bad description: %s", *alert.Description) - } - - return nil - } -} - -func testAccCheckLibratoAlertExists(n string, alert *librato.Alert) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Alert ID is set") - } - - client := testAccProvider.Meta().(*librato.Client) - - id, err := strconv.ParseUint(rs.Primary.ID, 10, 0) - if err != nil { - return fmt.Errorf("ID not a number") - } - - foundAlert, _, err := client.Alerts.Get(uint(id)) - - if err != nil { - return err - } - - if foundAlert.ID == nil || *foundAlert.ID != uint(id) { - return fmt.Errorf("Alert not found") - } - - *alert = *foundAlert - - return nil - } -} - -func testAccCheckLibratoAlertConfig_minimal(name string) string { - return fmt.Sprintf(` -resource "librato_alert" "foobar" { - name = "%s" -}`, name) -} - -func testAccCheckLibratoAlertConfig_basic(name string) string { - return fmt.Sprintf(` -resource "librato_alert" "foobar" { - name = "%s" - description = "A Test Alert" -}`, name) -} - -func testAccCheckLibratoAlertConfig_new_value(name string) string { - return fmt.Sprintf(` -resource "librato_alert" "foobar" { - name = "%s" - description = "A modified Test Alert" -}`, name) -} - -func testAccCheckLibratoAlertConfig_full(name string) string { - return fmt.Sprintf(` -resource "librato_service" "foobar" { - title = "Foo Bar" - type = "mail" - settings = < 0 && endpoint[0] == '/' { - proto = "unix" - } - - // mysqlts is the thread-safe implementation of mymysql, so we can - // safely re-use the same connection between multiple parallel - // operations. - conn := mysqlts.New(proto, "", endpoint, username, password) - - err := conn.Connect() - if err != nil { - return nil, err - } - - ver, err := serverVersion(conn) - if err != nil { - return nil, err - } - - return &providerConfiguration{ - Conn: conn, - ServerVersion: ver, - }, nil -} - -var identQuoteReplacer = strings.NewReplacer("`", "``") - -func quoteIdentifier(in string) string { - return fmt.Sprintf("`%s`", identQuoteReplacer.Replace(in)) -} - -func serverVersion(conn mysqlc.Conn) (*version.Version, error) { - rows, _, err := conn.Query("SELECT VERSION()") - if err != nil { - return nil, err - } - if len(rows) == 0 { - return nil, fmt.Errorf("SELECT VERSION() returned an empty set") - } - - return version.NewVersion(rows[0].Str(0)) -} diff --git a/builtin/providers/mysql/provider_test.go b/builtin/providers/mysql/provider_test.go deleted file mode 100644 index 824e2b2be..000000000 --- a/builtin/providers/mysql/provider_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package mysql - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// To run these acceptance tests, you will need access to a MySQL server. -// Amazon RDS is one way to get a MySQL server. If you use RDS, you can -// use the root account credentials you specified when creating an RDS -// instance to get the access necessary to run these tests. (the tests -// assume full access to the server.) -// -// Set the MYSQL_ENDPOINT and MYSQL_USERNAME environment variables before -// running the tests. If the given user has a password then you will also need -// to set MYSQL_PASSWORD. -// -// The tests assume a reasonably-vanilla MySQL configuration. In particular, -// they assume that the "utf8" character set is available and that -// "utf8_bin" is a valid collation that isn't the default for that character -// set. -// -// You can run the tests like this: -// make testacc TEST=./builtin/providers/mysql - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "mysql": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - for _, name := range []string{"MYSQL_ENDPOINT", "MYSQL_USERNAME"} { - if v := os.Getenv(name); v == "" { - t.Fatal("MYSQL_ENDPOINT, MYSQL_USERNAME and optionally MYSQL_PASSWORD must be set for acceptance tests") - } - } -} diff --git a/builtin/providers/mysql/resource_database.go b/builtin/providers/mysql/resource_database.go deleted file mode 100644 index 77060c3bf..000000000 --- a/builtin/providers/mysql/resource_database.go +++ /dev/null @@ -1,174 +0,0 @@ -package mysql - -import ( - "fmt" - "log" - "strings" - - mysqlc "github.com/ziutek/mymysql/mysql" - - "github.com/hashicorp/terraform/helper/schema" -) - -const defaultCharacterSetKeyword = "CHARACTER SET " -const defaultCollateKeyword = "COLLATE " - -func resourceDatabase() *schema.Resource { - return &schema.Resource{ - Create: CreateDatabase, - Update: UpdateDatabase, - Read: ReadDatabase, - Delete: DeleteDatabase, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "default_character_set": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "utf8", - }, - - "default_collation": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "utf8_general_ci", - }, - }, - } -} - -func CreateDatabase(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*providerConfiguration).Conn - - stmtSQL := databaseConfigSQL("CREATE", d) - log.Println("Executing statement:", stmtSQL) - - _, _, err := conn.Query(stmtSQL) - if err != nil { - return err - } - - d.SetId(d.Get("name").(string)) - - return nil -} - -func UpdateDatabase(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*providerConfiguration).Conn - - stmtSQL := databaseConfigSQL("ALTER", d) - log.Println("Executing statement:", stmtSQL) - - _, _, err := conn.Query(stmtSQL) - if err != nil { - return err - } - - return nil -} - -func ReadDatabase(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*providerConfiguration).Conn - - // This is kinda flimsy-feeling, since it depends on the formatting - // of the SHOW CREATE DATABASE output... but this data doesn't seem - // to be available any other way, so hopefully MySQL keeps this - // compatible in future releases. - - name := d.Id() - stmtSQL := "SHOW CREATE DATABASE " + quoteIdentifier(name) - - log.Println("Executing query:", stmtSQL) - rows, _, err := conn.Query(stmtSQL) - if err != nil { - if mysqlErr, ok := err.(*mysqlc.Error); ok { - if mysqlErr.Code == mysqlc.ER_BAD_DB_ERROR { - d.SetId("") - return nil - } - } - return err - } - - row := rows[0] - createSQL := string(row[1].([]byte)) - - defaultCharset := extractIdentAfter(createSQL, defaultCharacterSetKeyword) - defaultCollation := extractIdentAfter(createSQL, defaultCollateKeyword) - - if defaultCollation == "" && defaultCharset != "" { - // MySQL doesn't return the collation if it's the default one for - // the charset, so if we don't have a collation we need to go - // hunt for the default. - stmtSQL := "SHOW COLLATION WHERE `Charset` = '%s' AND `Default` = 'Yes'" - rows, _, err := conn.Query(stmtSQL, defaultCharset) - if err != nil { - return fmt.Errorf("Error getting default charset: %s", err) - } - if len(rows) == 0 { - return fmt.Errorf("Charset %s has no default collation", defaultCharset) - } - row := rows[0] - defaultCollation = string(row[0].([]byte)) - } - - d.Set("default_character_set", defaultCharset) - d.Set("default_collation", defaultCollation) - - return nil -} - -func DeleteDatabase(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*providerConfiguration).Conn - - name := d.Id() - stmtSQL := "DROP DATABASE " + quoteIdentifier(name) - log.Println("Executing statement:", stmtSQL) - - _, _, err := conn.Query(stmtSQL) - if err == nil { - d.SetId("") - } - return err -} - -func databaseConfigSQL(verb string, d *schema.ResourceData) string { - name := d.Get("name").(string) - defaultCharset := d.Get("default_character_set").(string) - defaultCollation := d.Get("default_collation").(string) - - var defaultCharsetClause string - var defaultCollationClause string - - if defaultCharset != "" { - defaultCharsetClause = defaultCharacterSetKeyword + quoteIdentifier(defaultCharset) - } - if defaultCollation != "" { - defaultCollationClause = defaultCollateKeyword + quoteIdentifier(defaultCollation) - } - - return fmt.Sprintf( - "%s DATABASE %s %s %s", - verb, - quoteIdentifier(name), - defaultCharsetClause, - defaultCollationClause, - ) -} - -func extractIdentAfter(sql string, keyword string) string { - charsetIndex := strings.Index(sql, keyword) - if charsetIndex != -1 { - charsetIndex += len(keyword) - remain := sql[charsetIndex:] - spaceIndex := strings.IndexRune(remain, ' ') - return remain[:spaceIndex] - } - - return "" -} diff --git a/builtin/providers/mysql/resource_database_test.go b/builtin/providers/mysql/resource_database_test.go deleted file mode 100644 index cc8f01f5e..000000000 --- a/builtin/providers/mysql/resource_database_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package mysql - -import ( - "fmt" - "strings" - "testing" - - mysqlc "github.com/ziutek/mymysql/mysql" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDatabase(t *testing.T) { - var dbName string - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccDatabaseCheckDestroy(dbName), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDatabaseConfig_basic, - Check: testAccDatabaseCheck( - "mysql_database.test", &dbName, - ), - }, - }, - }) -} - -func testAccDatabaseCheck(rn string, name *string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[rn] - if !ok { - return fmt.Errorf("resource not found: %s", rn) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("database id not set") - } - - conn := testAccProvider.Meta().(*providerConfiguration).Conn - rows, _, err := conn.Query("SHOW CREATE DATABASE terraform_acceptance_test") - if err != nil { - return fmt.Errorf("error reading database: %s", err) - } - if len(rows) != 1 { - return fmt.Errorf("expected 1 row reading database but got %d", len(rows)) - } - - row := rows[0] - createSQL := string(row[1].([]byte)) - - if strings.Index(createSQL, "CHARACTER SET utf8") == -1 { - return fmt.Errorf("database default charset isn't utf8") - } - if strings.Index(createSQL, "COLLATE utf8_bin") == -1 { - return fmt.Errorf("database default collation isn't utf8_bin") - } - - *name = rs.Primary.ID - - return nil - } -} - -func testAccDatabaseCheckDestroy(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*providerConfiguration).Conn - - _, _, err := conn.Query("SHOW CREATE DATABASE terraform_acceptance_test") - if err == nil { - return fmt.Errorf("database still exists after destroy") - } - if mysqlErr, ok := err.(*mysqlc.Error); ok { - if mysqlErr.Code == mysqlc.ER_BAD_DB_ERROR { - return nil - } - } - - return fmt.Errorf("got unexpected error: %s", err) - } -} - -const testAccDatabaseConfig_basic = ` -resource "mysql_database" "test" { - name = "terraform_acceptance_test" - default_character_set = "utf8" - default_collation = "utf8_bin" -} -` diff --git a/builtin/providers/mysql/resource_grant.go b/builtin/providers/mysql/resource_grant.go deleted file mode 100644 index 0414fe441..000000000 --- a/builtin/providers/mysql/resource_grant.go +++ /dev/null @@ -1,132 +0,0 @@ -package mysql - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceGrant() *schema.Resource { - return &schema.Resource{ - Create: CreateGrant, - Update: nil, - Read: ReadGrant, - Delete: DeleteGrant, - - Schema: map[string]*schema.Schema{ - "user": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "host": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "localhost", - }, - - "database": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "privileges": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "grant": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Default: false, - }, - }, - } -} - -func CreateGrant(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*providerConfiguration).Conn - - // create a comma-delimited string of privileges - var privileges string - var privilegesList []string - vL := d.Get("privileges").(*schema.Set).List() - for _, v := range vL { - privilegesList = append(privilegesList, v.(string)) - } - privileges = strings.Join(privilegesList, ",") - - stmtSQL := fmt.Sprintf("GRANT %s on %s.* TO '%s'@'%s'", - privileges, - d.Get("database").(string), - d.Get("user").(string), - d.Get("host").(string)) - - if d.Get("grant").(bool) { - stmtSQL = " WITH GRANT OPTION" - } - - log.Println("Executing statement:", stmtSQL) - _, _, err := conn.Query(stmtSQL) - if err != nil { - return err - } - - user := fmt.Sprintf("%s@%s:%s", d.Get("user").(string), d.Get("host").(string), d.Get("database")) - d.SetId(user) - - return ReadGrant(d, meta) -} - -func ReadGrant(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*providerConfiguration).Conn - - stmtSQL := fmt.Sprintf("SHOW GRANTS FOR '%s'@'%s'", - d.Get("user").(string), - d.Get("host").(string)) - - log.Println("Executing statement:", stmtSQL) - - _, _, err := conn.Query(stmtSQL) - if err != nil { - d.SetId("") - } - return nil -} - -func DeleteGrant(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*providerConfiguration).Conn - - stmtSQL := fmt.Sprintf("REVOKE GRANT OPTION ON %s.* FROM '%s'@'%s'", - d.Get("database").(string), - d.Get("user").(string), - d.Get("host").(string)) - - log.Println("Executing statement:", stmtSQL) - _, _, err := conn.Query(stmtSQL) - if err != nil { - return err - } - - stmtSQL = fmt.Sprintf("REVOKE ALL ON %s.* FROM '%s'@'%s'", - d.Get("database").(string), - d.Get("user").(string), - d.Get("host").(string)) - - log.Println("Executing statement:", stmtSQL) - _, _, err = conn.Query(stmtSQL) - if err != nil { - return err - } - - return nil -} diff --git a/builtin/providers/mysql/resource_grant_test.go b/builtin/providers/mysql/resource_grant_test.go deleted file mode 100644 index af6890efa..000000000 --- a/builtin/providers/mysql/resource_grant_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package mysql - -import ( - "fmt" - "log" - "strings" - "testing" - - mysqlc "github.com/ziutek/mymysql/mysql" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccGrant(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGrantCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccGrantConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccPrivilegeExists("mysql_grant.test", "SELECT"), - resource.TestCheckResourceAttr("mysql_grant.test", "user", "jdoe"), - resource.TestCheckResourceAttr("mysql_grant.test", "host", "example.com"), - resource.TestCheckResourceAttr("mysql_grant.test", "database", "foo"), - ), - }, - }, - }) -} - -func testAccPrivilegeExists(rn string, privilege string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[rn] - if !ok { - return fmt.Errorf("resource not found: %s", rn) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("grant id not set") - } - - id := strings.Split(rs.Primary.ID, ":") - userhost := strings.Split(id[0], "@") - user := userhost[0] - host := userhost[1] - - conn := testAccProvider.Meta().(*providerConfiguration).Conn - stmtSQL := fmt.Sprintf("SHOW GRANTS for '%s'@'%s'", user, host) - log.Println("Executing statement:", stmtSQL) - rows, _, err := conn.Query(stmtSQL) - if err != nil { - return fmt.Errorf("error reading grant: %s", err) - } - - if len(rows) == 0 { - return fmt.Errorf("grant not found for '%s'@'%s'", user, host) - } - - privilegeFound := false - for _, row := range rows { - log.Printf("Result Row: %s", row[0]) - privIndex := strings.Index(string(row[0].([]byte)), privilege) - if privIndex != -1 { - privilegeFound = true - } - } - - if !privilegeFound { - return fmt.Errorf("grant no found for '%s'@'%s'", user, host) - } - - return nil - } -} - -func testAccGrantCheckDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*providerConfiguration).Conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "mysql_grant" { - continue - } - - id := strings.Split(rs.Primary.ID, ":") - userhost := strings.Split(id[0], "@") - user := userhost[0] - host := userhost[1] - - stmtSQL := fmt.Sprintf("SHOW GRANTS for '%s'@'%s'", user, host) - log.Println("Executing statement:", stmtSQL) - rows, _, err := conn.Query(stmtSQL) - if err != nil { - if mysqlErr, ok := err.(*mysqlc.Error); ok { - if mysqlErr.Code == mysqlc.ER_NONEXISTING_GRANT { - return nil - } - } - - return fmt.Errorf("error reading grant: %s", err) - } - - if len(rows) != 0 { - return fmt.Errorf("grant still exists for'%s'@'%s'", user, host) - } - } - return nil -} - -const testAccGrantConfig_basic = ` -resource "mysql_user" "test" { - user = "jdoe" - host = "example.com" - password = "password" -} - -resource "mysql_grant" "test" { - user = "${mysql_user.test.user}" - host = "${mysql_user.test.host}" - database = "foo" - privileges = ["UPDATE", "SELECT"] -} -` diff --git a/builtin/providers/mysql/resource_user.go b/builtin/providers/mysql/resource_user.go deleted file mode 100644 index ce9bec118..000000000 --- a/builtin/providers/mysql/resource_user.go +++ /dev/null @@ -1,130 +0,0 @@ -package mysql - -import ( - "fmt" - "log" - - "github.com/hashicorp/go-version" - - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceUser() *schema.Resource { - return &schema.Resource{ - Create: CreateUser, - Update: UpdateUser, - Read: ReadUser, - Delete: DeleteUser, - - Schema: map[string]*schema.Schema{ - "user": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "host": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "localhost", - }, - - "password": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Sensitive: true, - }, - }, - } -} - -func CreateUser(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*providerConfiguration).Conn - - stmtSQL := fmt.Sprintf("CREATE USER '%s'@'%s'", - d.Get("user").(string), - d.Get("host").(string)) - - password := d.Get("password").(string) - if password != "" { - stmtSQL = stmtSQL + fmt.Sprintf(" IDENTIFIED BY '%s'", password) - } - - log.Println("Executing statement:", stmtSQL) - _, _, err := conn.Query(stmtSQL) - if err != nil { - return err - } - - user := fmt.Sprintf("%s@%s", d.Get("user").(string), d.Get("host").(string)) - d.SetId(user) - - return nil -} - -func UpdateUser(d *schema.ResourceData, meta interface{}) error { - conf := meta.(*providerConfiguration) - - if d.HasChange("password") { - _, newpw := d.GetChange("password") - var stmtSQL string - - /* ALTER USER syntax introduced in MySQL 5.7.6 deprecates SET PASSWORD (GH-8230) */ - ver, _ := version.NewVersion("5.7.6") - if conf.ServerVersion.LessThan(ver) { - stmtSQL = fmt.Sprintf("SET PASSWORD FOR '%s'@'%s' = PASSWORD('%s')", - d.Get("user").(string), - d.Get("host").(string), - newpw.(string)) - } else { - stmtSQL = fmt.Sprintf("ALTER USER '%s'@'%s' IDENTIFIED BY '%s'", - d.Get("user").(string), - d.Get("host").(string), - newpw.(string)) - } - - log.Println("Executing query:", stmtSQL) - _, _, err := conf.Conn.Query(stmtSQL) - if err != nil { - return err - } - } - - return nil -} - -func ReadUser(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*providerConfiguration).Conn - - stmtSQL := fmt.Sprintf("SELECT USER FROM mysql.user WHERE USER='%s'", - d.Get("user").(string)) - - log.Println("Executing statement:", stmtSQL) - - rows, _, err := conn.Query(stmtSQL) - log.Println("Returned rows:", len(rows)) - if err != nil { - return err - } - if len(rows) == 0 { - d.SetId("") - } - return nil -} - -func DeleteUser(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*providerConfiguration).Conn - - stmtSQL := fmt.Sprintf("DROP USER '%s'@'%s'", - d.Get("user").(string), - d.Get("host").(string)) - - log.Println("Executing statement:", stmtSQL) - - _, _, err := conn.Query(stmtSQL) - if err == nil { - d.SetId("") - } - return err -} diff --git a/builtin/providers/mysql/resource_user_test.go b/builtin/providers/mysql/resource_user_test.go deleted file mode 100644 index a9cdd3543..000000000 --- a/builtin/providers/mysql/resource_user_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package mysql - -import ( - "fmt" - "log" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccUser(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccUserCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccUserConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccUserExists("mysql_user.test"), - resource.TestCheckResourceAttr("mysql_user.test", "user", "jdoe"), - resource.TestCheckResourceAttr("mysql_user.test", "host", "example.com"), - resource.TestCheckResourceAttr("mysql_user.test", "password", "password"), - ), - }, - resource.TestStep{ - Config: testAccUserConfig_newPass, - Check: resource.ComposeTestCheckFunc( - testAccUserExists("mysql_user.test"), - resource.TestCheckResourceAttr("mysql_user.test", "user", "jdoe"), - resource.TestCheckResourceAttr("mysql_user.test", "host", "example.com"), - resource.TestCheckResourceAttr("mysql_user.test", "password", "password2"), - ), - }, - }, - }) -} - -func testAccUserExists(rn string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[rn] - if !ok { - return fmt.Errorf("resource not found: %s", rn) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("user id not set") - } - - conn := testAccProvider.Meta().(*providerConfiguration).Conn - stmtSQL := fmt.Sprintf("SELECT count(*) from mysql.user where CONCAT(user, '@', host) = '%s'", rs.Primary.ID) - log.Println("Executing statement:", stmtSQL) - rows, _, err := conn.Query(stmtSQL) - if err != nil { - return fmt.Errorf("error reading user: %s", err) - } - if len(rows) != 1 { - return fmt.Errorf("expected 1 row reading user but got %d", len(rows)) - } - - return nil - } -} - -func testAccUserCheckDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*providerConfiguration).Conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "mysql_user" { - continue - } - - stmtSQL := fmt.Sprintf("SELECT user from mysql.user where CONCAT(user, '@', host) = '%s'", rs.Primary.ID) - log.Println("Executing statement:", stmtSQL) - rows, _, err := conn.Query(stmtSQL) - if err != nil { - return fmt.Errorf("error issuing query: %s", err) - } - if len(rows) != 0 { - return fmt.Errorf("user still exists after destroy") - } - } - return nil -} - -const testAccUserConfig_basic = ` -resource "mysql_user" "test" { - user = "jdoe" - host = "example.com" - password = "password" -} -` - -const testAccUserConfig_newPass = ` -resource "mysql_user" "test" { - user = "jdoe" - host = "example.com" - password = "password2" -} -` diff --git a/builtin/providers/newrelic/config.go b/builtin/providers/newrelic/config.go deleted file mode 100644 index da96c6447..000000000 --- a/builtin/providers/newrelic/config.go +++ /dev/null @@ -1,29 +0,0 @@ -package newrelic - -import ( - "log" - - "github.com/hashicorp/terraform/helper/logging" - newrelic "github.com/paultyng/go-newrelic/api" -) - -// Config contains New Relic provider settings -type Config struct { - APIKey string - APIURL string -} - -// Client returns a new client for accessing New Relic -func (c *Config) Client() (*newrelic.Client, error) { - nrConfig := newrelic.Config{ - APIKey: c.APIKey, - Debug: logging.IsDebugOrHigher(), - BaseURL: c.APIURL, - } - - client := newrelic.New(nrConfig) - - log.Printf("[INFO] New Relic client configured") - - return &client, nil -} diff --git a/builtin/providers/newrelic/data_source_newrelic_application.go b/builtin/providers/newrelic/data_source_newrelic_application.go deleted file mode 100644 index e76a78782..000000000 --- a/builtin/providers/newrelic/data_source_newrelic_application.go +++ /dev/null @@ -1,65 +0,0 @@ -package newrelic - -import ( - "fmt" - "log" - "strconv" - - "github.com/hashicorp/terraform/helper/schema" - newrelic "github.com/paultyng/go-newrelic/api" -) - -func dataSourceNewRelicApplication() *schema.Resource { - return &schema.Resource{ - Read: dataSourceNewRelicApplicationRead, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "instance_ids": { - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeInt}, - Computed: true, - }, - "host_ids": { - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeInt}, - Computed: true, - }, - }, - } -} - -func dataSourceNewRelicApplicationRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*newrelic.Client) - - log.Printf("[INFO] Reading New Relic applications") - - applications, err := client.ListApplications() - if err != nil { - return err - } - - var application *newrelic.Application - name := d.Get("name").(string) - - for _, a := range applications { - if a.Name == name { - application = &a - break - } - } - - if application == nil { - return fmt.Errorf("The name '%s' does not match any New Relic applications.", name) - } - - d.SetId(strconv.Itoa(application.ID)) - d.Set("name", application.Name) - d.Set("instance_ids", application.Links.InstanceIDs) - d.Set("host_ids", application.Links.HostIDs) - - return nil -} diff --git a/builtin/providers/newrelic/data_source_newrelic_application_test.go b/builtin/providers/newrelic/data_source_newrelic_application_test.go deleted file mode 100644 index 21a85a35b..000000000 --- a/builtin/providers/newrelic/data_source_newrelic_application_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package newrelic - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccNewRelicApplication_Basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNewRelicApplicationConfig(), - Check: resource.ComposeTestCheckFunc( - testAccNewRelicApplication("data.newrelic_application.app"), - ), - }, - }, - }) -} - -func testAccNewRelicApplication(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - r := s.RootModule().Resources[n] - a := r.Primary.Attributes - - if a["id"] == "" { - return fmt.Errorf("Expected to get an application from New Relic") - } - - if a["name"] != testAccExpectedApplicationName { - return fmt.Errorf("Expected the application name to be: %s, but got: %s", testAccExpectedApplicationName, a["name"]) - } - - return nil - } -} - -// The test application for this data source is created in provider_test.go -func testAccNewRelicApplicationConfig() string { - return fmt.Sprintf(` -data "newrelic_application" "app" { - name = "%s" -} -`, testAccExpectedApplicationName) -} diff --git a/builtin/providers/newrelic/helpers.go b/builtin/providers/newrelic/helpers.go deleted file mode 100644 index 18f49135b..000000000 --- a/builtin/providers/newrelic/helpers.go +++ /dev/null @@ -1,37 +0,0 @@ -package newrelic - -import ( - "fmt" - "strconv" - "strings" -) - -func parseIDs(serializedID string, count int) ([]int, error) { - rawIDs := strings.SplitN(serializedID, ":", count) - if len(rawIDs) != count { - return []int{}, fmt.Errorf("Unable to parse ID %v", serializedID) - } - - ids := make([]int, count) - - for i, rawID := range rawIDs { - id, err := strconv.ParseInt(rawID, 10, 32) - if err != nil { - return ids, err - } - - ids[i] = int(id) - } - - return ids, nil -} - -func serializeIDs(ids []int) string { - idStrings := make([]string, len(ids)) - - for i, id := range ids { - idStrings[i] = strconv.Itoa(id) - } - - return strings.Join(idStrings, ":") -} diff --git a/builtin/providers/newrelic/helpers_test.go b/builtin/providers/newrelic/helpers_test.go deleted file mode 100644 index 837434f6e..000000000 --- a/builtin/providers/newrelic/helpers_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package newrelic - -import "testing" - -func TestParseIDs_Basic(t *testing.T) { - ids, err := parseIDs("1:2", 2) - if err != nil { - t.Fatal(err) - } - - if len(ids) != 2 { - t.Fatal(len(ids)) - } - - if ids[0] != 1 || ids[1] != 2 { - t.Fatal(ids) - } -} - -func TestSerializeIDs_Basic(t *testing.T) { - id := serializeIDs([]int{1, 2}) - - if id != "1:2" { - t.Fatal(id) - } -} diff --git a/builtin/providers/newrelic/import_newrelic_alert_channel_test.go b/builtin/providers/newrelic/import_newrelic_alert_channel_test.go deleted file mode 100644 index ac85062aa..000000000 --- a/builtin/providers/newrelic/import_newrelic_alert_channel_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package newrelic - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccNewRelicAlertChannel_import(t *testing.T) { - resourceName := "newrelic_alert_channel.foo" - rName := acctest.RandString(5) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNewRelicAlertChannelDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckNewRelicAlertChannelConfig(rName), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/newrelic/import_newrelic_alert_condition_test.go b/builtin/providers/newrelic/import_newrelic_alert_condition_test.go deleted file mode 100644 index e030dfdee..000000000 --- a/builtin/providers/newrelic/import_newrelic_alert_condition_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package newrelic - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccNewRelicAlertCondition_import(t *testing.T) { - resourceName := "newrelic_alert_condition.foo" - rName := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNewRelicAlertConditionDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckNewRelicAlertConditionConfig(rName), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/newrelic/import_newrelic_alert_policy_test.go b/builtin/providers/newrelic/import_newrelic_alert_policy_test.go deleted file mode 100644 index a1048a786..000000000 --- a/builtin/providers/newrelic/import_newrelic_alert_policy_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package newrelic - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccNewRelicAlertPolicy_import(t *testing.T) { - resourceName := "newrelic_alert_policy.foo" - rName := acctest.RandString(5) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNewRelicAlertPolicyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckNewRelicAlertPolicyConfig(rName), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/newrelic/provider.go b/builtin/providers/newrelic/provider.go deleted file mode 100644 index ac3a2e749..000000000 --- a/builtin/providers/newrelic/provider.go +++ /dev/null @@ -1,49 +0,0 @@ -package newrelic - -import ( - "log" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider represents a resource provider in Terraform -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "api_key": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("NEWRELIC_API_KEY", nil), - Sensitive: true, - }, - "api_url": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("NEWRELIC_API_URL", "https://api.newrelic.com/v2"), - }, - }, - - DataSourcesMap: map[string]*schema.Resource{ - "newrelic_application": dataSourceNewRelicApplication(), - }, - - ResourcesMap: map[string]*schema.Resource{ - "newrelic_alert_channel": resourceNewRelicAlertChannel(), - "newrelic_alert_condition": resourceNewRelicAlertCondition(), - "newrelic_alert_policy": resourceNewRelicAlertPolicy(), - "newrelic_alert_policy_channel": resourceNewRelicAlertPolicyChannel(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(data *schema.ResourceData) (interface{}, error) { - config := Config{ - APIKey: data.Get("api_key").(string), - APIURL: data.Get("api_url").(string), - } - log.Println("[INFO] Initializing New Relic client") - return config.Client() -} diff --git a/builtin/providers/newrelic/provider_test.go b/builtin/providers/newrelic/provider_test.go deleted file mode 100644 index 7d36419b8..000000000 --- a/builtin/providers/newrelic/provider_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package newrelic - -import ( - "fmt" - "os" - "testing" - "time" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - newrelic "github.com/newrelic/go-agent" -) - -var ( - testAccExpectedApplicationName string - testAccProviders map[string]terraform.ResourceProvider - testAccProvider *schema.Provider -) - -func init() { - testAccExpectedApplicationName = fmt.Sprintf("tf_test_%s", acctest.RandString(10)) - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "newrelic": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProviderImpl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("NEWRELIC_API_KEY"); v == "" { - t.Log(v) - t.Fatal("NEWRELIC_API_KEY must be set for acceptance tests") - } - - // setup fake application by logging some metrics - if v := os.Getenv("NEWRELIC_LICENSE_KEY"); len(v) > 0 { - config := newrelic.NewConfig(testAccExpectedApplicationName, v) - app, err := newrelic.NewApplication(config) - if err != nil { - t.Log(err) - t.Fatal("Error setting up New Relic application") - } - - if err := app.WaitForConnection(30 * time.Second); err != nil { - t.Log(err) - t.Fatal("Unable to setup New Relic application connection") - } - - if err := app.RecordCustomEvent("terraform test", nil); err != nil { - t.Log(err) - t.Fatal("Unable to record custom event in New Relic") - } - - app.Shutdown(30 * time.Second) - } else { - t.Log(v) - t.Fatal("NEWRELIC_LICENSE_KEY must be set for acceptance tests") - } -} diff --git a/builtin/providers/newrelic/resource_newrelic_alert_channel.go b/builtin/providers/newrelic/resource_newrelic_alert_channel.go deleted file mode 100644 index e8a642d2d..000000000 --- a/builtin/providers/newrelic/resource_newrelic_alert_channel.go +++ /dev/null @@ -1,168 +0,0 @@ -package newrelic - -import ( - "fmt" - "log" - "strconv" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" - newrelic "github.com/paultyng/go-newrelic/api" -) - -var alertChannelTypes = map[string][]string{ - "campfire": []string{ - "room", - "subdomain", - "token", - }, - "email": []string{ - "include_json_attachment", - "recipients", - }, - "hipchat": []string{ - "auth_token", - "base_url", - "room_id", - }, - "opsgenie": []string{ - "api_key", - "recipients", - "tags", - "teams", - }, - "pagerduty": []string{ - "service_key", - }, - "slack": []string{ - "channel", - "url", - }, - "user": []string{ - "user_id", - }, - "victorops": []string{ - "key", - "route_key", - }, - "webhook": []string{ - "auth_password", - "auth_type", - "auth_username", - "base_url", - "headers", - "payload_type", - "payload", - }, -} - -func resourceNewRelicAlertChannel() *schema.Resource { - validAlertChannelTypes := make([]string, 0, len(alertChannelTypes)) - for k := range alertChannelTypes { - validAlertChannelTypes = append(validAlertChannelTypes, k) - } - - return &schema.Resource{ - Create: resourceNewRelicAlertChannelCreate, - Read: resourceNewRelicAlertChannelRead, - // Update: Not currently supported in API - Delete: resourceNewRelicAlertChannelDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(validAlertChannelTypes, false), - }, - "configuration": { - Type: schema.TypeMap, - Required: true, - ForceNew: true, - //TODO: ValidateFunc: (use list of keys from map above) - Sensitive: true, - }, - }, - } -} - -func buildAlertChannelStruct(d *schema.ResourceData) *newrelic.AlertChannel { - channel := newrelic.AlertChannel{ - Name: d.Get("name").(string), - Type: d.Get("type").(string), - Configuration: d.Get("configuration").(map[string]interface{}), - } - - return &channel -} - -func resourceNewRelicAlertChannelCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*newrelic.Client) - channel := buildAlertChannelStruct(d) - - log.Printf("[INFO] Creating New Relic alert channel %s", channel.Name) - - channel, err := client.CreateAlertChannel(*channel) - if err != nil { - return err - } - - d.SetId(strconv.Itoa(channel.ID)) - - return nil -} - -func resourceNewRelicAlertChannelRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*newrelic.Client) - - id, err := strconv.ParseInt(d.Id(), 10, 32) - if err != nil { - return err - } - - log.Printf("[INFO] Reading New Relic alert channel %v", id) - - channel, err := client.GetAlertChannel(int(id)) - if err != nil { - if err == newrelic.ErrNotFound { - d.SetId("") - return nil - } - - return err - } - - d.Set("name", channel.Name) - d.Set("type", channel.Type) - if err := d.Set("configuration", channel.Configuration); err != nil { - return fmt.Errorf("[DEBUG] Error setting Alert Channel Configuration: %#v", err) - } - - return nil -} - -func resourceNewRelicAlertChannelDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*newrelic.Client) - - id, err := strconv.ParseInt(d.Id(), 10, 32) - if err != nil { - return err - } - - log.Printf("[INFO] Deleting New Relic alert channel %v", id) - - if err := client.DeleteAlertChannel(int(id)); err != nil { - return err - } - - d.SetId("") - - return nil -} diff --git a/builtin/providers/newrelic/resource_newrelic_alert_channel_test.go b/builtin/providers/newrelic/resource_newrelic_alert_channel_test.go deleted file mode 100644 index a062e26ca..000000000 --- a/builtin/providers/newrelic/resource_newrelic_alert_channel_test.go +++ /dev/null @@ -1,131 +0,0 @@ -package newrelic - -import ( - "fmt" - "strconv" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - newrelic "github.com/paultyng/go-newrelic/api" -) - -func TestAccNewRelicAlertChannel_Basic(t *testing.T) { - rName := acctest.RandString(5) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNewRelicAlertChannelDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckNewRelicAlertChannelConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckNewRelicAlertChannelExists("newrelic_alert_channel.foo"), - resource.TestCheckResourceAttr( - "newrelic_alert_channel.foo", "name", fmt.Sprintf("tf-test-%s", rName)), - resource.TestCheckResourceAttr( - "newrelic_alert_channel.foo", "type", "email"), - resource.TestCheckResourceAttr( - "newrelic_alert_channel.foo", "configuration.recipients", "foo@example.com"), - resource.TestCheckResourceAttr( - "newrelic_alert_channel.foo", "configuration.include_json_attachment", "1"), - ), - }, - resource.TestStep{ - Config: testAccCheckNewRelicAlertChannelConfigUpdated(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckNewRelicAlertChannelExists("newrelic_alert_channel.foo"), - resource.TestCheckResourceAttr( - "newrelic_alert_channel.foo", "name", fmt.Sprintf("tf-test-updated-%s", rName)), - resource.TestCheckResourceAttr( - "newrelic_alert_channel.foo", "type", "email"), - resource.TestCheckResourceAttr( - "newrelic_alert_channel.foo", "configuration.recipients", "bar@example.com"), - resource.TestCheckResourceAttr( - "newrelic_alert_channel.foo", "configuration.include_json_attachment", "0"), - ), - }, - }, - }) -} - -func testAccCheckNewRelicAlertChannelDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*newrelic.Client) - for _, r := range s.RootModule().Resources { - if r.Type != "newrelic_alert_channel" { - continue - } - - id, err := strconv.ParseInt(r.Primary.ID, 10, 32) - if err != nil { - return err - } - - _, err = client.GetAlertChannel(int(id)) - - if err == nil { - return fmt.Errorf("Alert channel still exists") - } - - } - return nil -} - -func testAccCheckNewRelicAlertChannelExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No channel ID is set") - } - - client := testAccProvider.Meta().(*newrelic.Client) - - id, err := strconv.ParseInt(rs.Primary.ID, 10, 32) - if err != nil { - return err - } - - found, err := client.GetAlertChannel(int(id)) - if err != nil { - return err - } - - if strconv.Itoa(found.ID) != rs.Primary.ID { - return fmt.Errorf("Channel not found: %v - %v", rs.Primary.ID, found) - } - - return nil - } -} - -func testAccCheckNewRelicAlertChannelConfig(rName string) string { - return fmt.Sprintf(` -resource "newrelic_alert_channel" "foo" { - name = "tf-test-%s" - type = "email" - - configuration = { - recipients = "foo@example.com" - include_json_attachment = "1" - } -} -`, rName) -} - -func testAccCheckNewRelicAlertChannelConfigUpdated(rName string) string { - return fmt.Sprintf(` -resource "newrelic_alert_channel" "foo" { - name = "tf-test-updated-%s" - type = "email" - - configuration = { - recipients = "bar@example.com" - include_json_attachment = "0" - } -} -`, rName) -} diff --git a/builtin/providers/newrelic/resource_newrelic_alert_condition.go b/builtin/providers/newrelic/resource_newrelic_alert_condition.go deleted file mode 100644 index 1021d9af2..000000000 --- a/builtin/providers/newrelic/resource_newrelic_alert_condition.go +++ /dev/null @@ -1,348 +0,0 @@ -package newrelic - -import ( - "fmt" - "log" - "strconv" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" - newrelic "github.com/paultyng/go-newrelic/api" -) - -var alertConditionTypes = map[string][]string{ - "apm_app_metric": []string{ - "apdex", - "error_percentage", - "response_time_background", - "response_time_web", - "throughput_background", - "throughput_web", - "user_defined", - }, - "apm_kt_metric": []string{ - "apdex", - "error_count", - "error_percentage", - "response_time", - "throughput", - }, - "browser_metric": []string{ - "ajax_response_time", - "ajax_throughput", - "dom_processing", - "end_user_apdex", - "network", - "page_rendering", - "page_view_throughput", - "page_views_with_js_errors", - "request_queuing", - "total_page_load", - "user_defined", - "web_application", - }, - "mobile_metric": []string{ - "database", - "images", - "json", - "mobile_crash_rate", - "network_error_percentage", - "network", - "status_error_percentage", - "user_defined", - "view_loading", - }, - "servers_metric": []string{ - "cpu_percentage", - "disk_io_percentage", - "fullest_disk_percentage", - "load_average_one_minute", - "memory_percentage", - "user_defined", - }, -} - -func resourceNewRelicAlertCondition() *schema.Resource { - validAlertConditionTypes := make([]string, 0, len(alertConditionTypes)) - for k := range alertConditionTypes { - validAlertConditionTypes = append(validAlertConditionTypes, k) - } - - return &schema.Resource{ - Create: resourceNewRelicAlertConditionCreate, - Read: resourceNewRelicAlertConditionRead, - Update: resourceNewRelicAlertConditionUpdate, - Delete: resourceNewRelicAlertConditionDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - Schema: map[string]*schema.Schema{ - "policy_id": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - }, - "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(validAlertConditionTypes, false), - }, - "entities": { - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeInt}, - Required: true, - MinItems: 1, - }, - "metric": { - Type: schema.TypeString, - Required: true, - //TODO: ValidateFunc from map - }, - "runbook_url": { - Type: schema.TypeString, - Optional: true, - }, - "condition_scope": { - Type: schema.TypeString, - Optional: true, - }, - "term": { - Type: schema.TypeList, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "duration": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: intInSlice([]int{5, 10, 15, 30, 60, 120}), - }, - "operator": { - Type: schema.TypeString, - Optional: true, - Default: "equal", - ValidateFunc: validation.StringInSlice([]string{"above", "below", "equal"}, false), - }, - "priority": { - Type: schema.TypeString, - Optional: true, - Default: "critical", - ValidateFunc: validation.StringInSlice([]string{"critical", "warning"}, false), - }, - "threshold": { - Type: schema.TypeFloat, - Required: true, - ValidateFunc: float64Gte(0.0), - }, - "time_function": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"all", "any"}, false), - }, - }, - }, - Required: true, - MinItems: 1, - }, - "user_defined_metric": { - Type: schema.TypeString, - Optional: true, - }, - "user_defined_value_function": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"average", "min", "max", "total", "sample_size"}, false), - }, - }, - } -} - -func buildAlertConditionStruct(d *schema.ResourceData) *newrelic.AlertCondition { - entitySet := d.Get("entities").([]interface{}) - entities := make([]string, len(entitySet)) - - for i, entity := range entitySet { - entities[i] = strconv.Itoa(entity.(int)) - } - - termSet := d.Get("term").([]interface{}) - terms := make([]newrelic.AlertConditionTerm, len(termSet)) - - for i, termI := range termSet { - termM := termI.(map[string]interface{}) - - terms[i] = newrelic.AlertConditionTerm{ - Duration: termM["duration"].(int), - Operator: termM["operator"].(string), - Priority: termM["priority"].(string), - Threshold: termM["threshold"].(float64), - TimeFunction: termM["time_function"].(string), - } - } - - condition := newrelic.AlertCondition{ - Type: d.Get("type").(string), - Name: d.Get("name").(string), - Enabled: true, - Entities: entities, - Metric: d.Get("metric").(string), - Terms: terms, - PolicyID: d.Get("policy_id").(int), - Scope: d.Get("condition_scope").(string), - } - - if attr, ok := d.GetOk("runbook_url"); ok { - condition.RunbookURL = attr.(string) - } - - if attrM, ok := d.GetOk("user_defined_metric"); ok { - if attrVF, ok := d.GetOk("user_defined_value_function"); ok { - condition.UserDefined = newrelic.AlertConditionUserDefined{ - Metric: attrM.(string), - ValueFunction: attrVF.(string), - } - } - } - - return &condition -} - -func readAlertConditionStruct(condition *newrelic.AlertCondition, d *schema.ResourceData) error { - ids, err := parseIDs(d.Id(), 2) - if err != nil { - return err - } - - policyID := ids[0] - - entities := make([]int, len(condition.Entities)) - for i, entity := range condition.Entities { - v, err := strconv.ParseInt(entity, 10, 32) - if err != nil { - return err - } - entities[i] = int(v) - } - - d.Set("policy_id", policyID) - d.Set("name", condition.Name) - d.Set("type", condition.Type) - d.Set("metric", condition.Metric) - d.Set("runbook_url", condition.RunbookURL) - d.Set("condition_scope", condition.Scope) - d.Set("user_defined_metric", condition.UserDefined.Metric) - d.Set("user_defined_value_function", condition.UserDefined.ValueFunction) - if err := d.Set("entities", entities); err != nil { - return fmt.Errorf("[DEBUG] Error setting alert condition entities: %#v", err) - } - - var terms []map[string]interface{} - - for _, src := range condition.Terms { - dst := map[string]interface{}{ - "duration": src.Duration, - "operator": src.Operator, - "priority": src.Priority, - "threshold": src.Threshold, - "time_function": src.TimeFunction, - } - terms = append(terms, dst) - } - - if err := d.Set("term", terms); err != nil { - return fmt.Errorf("[DEBUG] Error setting alert condition terms: %#v", err) - } - - return nil -} - -func resourceNewRelicAlertConditionCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*newrelic.Client) - condition := buildAlertConditionStruct(d) - - log.Printf("[INFO] Creating New Relic alert condition %s", condition.Name) - - condition, err := client.CreateAlertCondition(*condition) - if err != nil { - return err - } - - d.SetId(serializeIDs([]int{condition.PolicyID, condition.ID})) - - return nil -} - -func resourceNewRelicAlertConditionRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*newrelic.Client) - - log.Printf("[INFO] Reading New Relic alert condition %s", d.Id()) - - ids, err := parseIDs(d.Id(), 2) - if err != nil { - return err - } - - policyID := ids[0] - id := ids[1] - - condition, err := client.GetAlertCondition(policyID, id) - if err != nil { - if err == newrelic.ErrNotFound { - d.SetId("") - return nil - } - - return err - } - - return readAlertConditionStruct(condition, d) -} - -func resourceNewRelicAlertConditionUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*newrelic.Client) - condition := buildAlertConditionStruct(d) - - ids, err := parseIDs(d.Id(), 2) - if err != nil { - return err - } - - policyID := ids[0] - id := ids[1] - - condition.PolicyID = policyID - condition.ID = id - - log.Printf("[INFO] Updating New Relic alert condition %d", id) - - updatedCondition, err := client.UpdateAlertCondition(*condition) - if err != nil { - return err - } - - return readAlertConditionStruct(updatedCondition, d) -} - -func resourceNewRelicAlertConditionDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*newrelic.Client) - - ids, err := parseIDs(d.Id(), 2) - if err != nil { - return err - } - - policyID := ids[0] - id := ids[1] - - log.Printf("[INFO] Deleting New Relic alert condition %d", id) - - if err := client.DeleteAlertCondition(policyID, id); err != nil { - return err - } - - d.SetId("") - - return nil -} diff --git a/builtin/providers/newrelic/resource_newrelic_alert_condition_test.go b/builtin/providers/newrelic/resource_newrelic_alert_condition_test.go deleted file mode 100644 index a46938d82..000000000 --- a/builtin/providers/newrelic/resource_newrelic_alert_condition_test.go +++ /dev/null @@ -1,195 +0,0 @@ -package newrelic - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - newrelic "github.com/paultyng/go-newrelic/api" -) - -func TestAccNewRelicAlertCondition_Basic(t *testing.T) { - rName := acctest.RandString(5) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNewRelicAlertConditionDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckNewRelicAlertConditionConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckNewRelicAlertConditionExists("newrelic_alert_condition.foo"), - resource.TestCheckResourceAttr( - "newrelic_alert_condition.foo", "name", fmt.Sprintf("tf-test-%s", rName)), - resource.TestCheckResourceAttr( - "newrelic_alert_condition.foo", "type", "apm_app_metric"), - resource.TestCheckResourceAttr( - "newrelic_alert_condition.foo", "runbook_url", "https://foo.example.com"), - resource.TestCheckResourceAttr( - "newrelic_alert_condition.foo", "entities.#", "1"), - resource.TestCheckResourceAttr( - "newrelic_alert_condition.foo", "term.#", "1"), - resource.TestCheckResourceAttr( - "newrelic_alert_condition.foo", "term.0.duration", "5"), - resource.TestCheckResourceAttr( - "newrelic_alert_condition.foo", "term.0.operator", "below"), - resource.TestCheckResourceAttr( - "newrelic_alert_condition.foo", "term.0.priority", "critical"), - resource.TestCheckResourceAttr( - "newrelic_alert_condition.foo", "term.0.threshold", "0.75"), - resource.TestCheckResourceAttr( - "newrelic_alert_condition.foo", "term.0.time_function", "all"), - ), - }, - resource.TestStep{ - Config: testAccCheckNewRelicAlertConditionConfigUpdated(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckNewRelicAlertConditionExists("newrelic_alert_condition.foo"), - resource.TestCheckResourceAttr( - "newrelic_alert_condition.foo", "name", fmt.Sprintf("tf-test-updated-%s", rName)), - resource.TestCheckResourceAttr( - "newrelic_alert_condition.foo", "runbook_url", "https://bar.example.com"), - resource.TestCheckResourceAttr( - "newrelic_alert_condition.foo", "entities.#", "1"), - resource.TestCheckResourceAttr( - "newrelic_alert_condition.foo", "term.#", "1"), - resource.TestCheckResourceAttr( - "newrelic_alert_condition.foo", "term.0.duration", "10"), - resource.TestCheckResourceAttr( - "newrelic_alert_condition.foo", "term.0.operator", "below"), - resource.TestCheckResourceAttr( - "newrelic_alert_condition.foo", "term.0.priority", "critical"), - resource.TestCheckResourceAttr( - "newrelic_alert_condition.foo", "term.0.threshold", "0.65"), - resource.TestCheckResourceAttr( - "newrelic_alert_condition.foo", "term.0.time_function", "all"), - ), - }, - }, - }) -} - -// TODO: func TestAccNewRelicAlertCondition_Multi(t *testing.T) { - -func testAccCheckNewRelicAlertConditionDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*newrelic.Client) - for _, r := range s.RootModule().Resources { - if r.Type != "newrelic_alert_condition" { - continue - } - - ids, err := parseIDs(r.Primary.ID, 2) - if err != nil { - return err - } - - policyID := ids[0] - id := ids[1] - - _, err = client.GetAlertCondition(policyID, id) - if err == nil { - return fmt.Errorf("Alert condition still exists") - } - - } - return nil -} - -func testAccCheckNewRelicAlertConditionExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No alert condition ID is set") - } - - client := testAccProvider.Meta().(*newrelic.Client) - - ids, err := parseIDs(rs.Primary.ID, 2) - if err != nil { - return err - } - - policyID := ids[0] - id := ids[1] - - found, err := client.GetAlertCondition(policyID, id) - if err != nil { - return err - } - - if found.ID != id { - return fmt.Errorf("Alert condition not found: %v - %v", id, found) - } - - return nil - } -} - -func testAccCheckNewRelicAlertConditionConfig(rName string) string { - return fmt.Sprintf(` -data "newrelic_application" "app" { - name = "%[2]s" -} - -resource "newrelic_alert_policy" "foo" { - name = "tf-test-%[1]s" -} - -resource "newrelic_alert_condition" "foo" { - policy_id = "${newrelic_alert_policy.foo.id}" - - name = "tf-test-%[1]s" - type = "apm_app_metric" - entities = ["${data.newrelic_application.app.id}"] - metric = "apdex" - runbook_url = "https://foo.example.com" - condition_scope = "application" - - term { - duration = 5 - operator = "below" - priority = "critical" - threshold = "0.75" - time_function = "all" - } -} -`, rName, testAccExpectedApplicationName) -} - -func testAccCheckNewRelicAlertConditionConfigUpdated(rName string) string { - return fmt.Sprintf(` -data "newrelic_application" "app" { - name = "%[2]s" -} - -resource "newrelic_alert_policy" "foo" { - name = "tf-test-updated-%[1]s" -} - -resource "newrelic_alert_condition" "foo" { - policy_id = "${newrelic_alert_policy.foo.id}" - - name = "tf-test-updated-%[1]s" - type = "apm_app_metric" - entities = ["${data.newrelic_application.app.id}"] - metric = "apdex" - runbook_url = "https://bar.example.com" - condition_scope = "application" - - term { - duration = 10 - operator = "below" - priority = "critical" - threshold = "0.65" - time_function = "all" - } -} -`, rName, testAccExpectedApplicationName) -} - -// TODO: const testAccCheckNewRelicAlertConditionConfigMulti = ` diff --git a/builtin/providers/newrelic/resource_newrelic_alert_policy.go b/builtin/providers/newrelic/resource_newrelic_alert_policy.go deleted file mode 100644 index befc04cea..000000000 --- a/builtin/providers/newrelic/resource_newrelic_alert_policy.go +++ /dev/null @@ -1,119 +0,0 @@ -package newrelic - -import ( - "log" - "strconv" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" - newrelic "github.com/paultyng/go-newrelic/api" -) - -func resourceNewRelicAlertPolicy() *schema.Resource { - return &schema.Resource{ - Create: resourceNewRelicAlertPolicyCreate, - Read: resourceNewRelicAlertPolicyRead, - // Update: Not currently supported in API - Delete: resourceNewRelicAlertPolicyDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "incident_preference": { - Type: schema.TypeString, - Optional: true, - Default: "PER_POLICY", - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{"PER_POLICY", "PER_CONDITION", "PER_CONDITION_AND_TARGET"}, false), - }, - "created_at": { - Type: schema.TypeInt, - Computed: true, - }, - "updated_at": { - Type: schema.TypeInt, - Computed: true, - }, - }, - } -} - -func buildAlertPolicyStruct(d *schema.ResourceData) *newrelic.AlertPolicy { - policy := newrelic.AlertPolicy{ - Name: d.Get("name").(string), - } - - if attr, ok := d.GetOk("incident_preference"); ok { - policy.IncidentPreference = attr.(string) - } - - return &policy -} - -func resourceNewRelicAlertPolicyCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*newrelic.Client) - policy := buildAlertPolicyStruct(d) - - log.Printf("[INFO] Creating New Relic alert policy %s", policy.Name) - - policy, err := client.CreateAlertPolicy(*policy) - if err != nil { - return err - } - - d.SetId(strconv.Itoa(policy.ID)) - - return nil -} - -func resourceNewRelicAlertPolicyRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*newrelic.Client) - - id, err := strconv.ParseInt(d.Id(), 10, 32) - if err != nil { - return err - } - - log.Printf("[INFO] Reading New Relic alert policy %v", id) - - policy, err := client.GetAlertPolicy(int(id)) - if err != nil { - if err == newrelic.ErrNotFound { - d.SetId("") - return nil - } - - return err - } - - d.Set("name", policy.Name) - d.Set("incident_preference", policy.IncidentPreference) - d.Set("created_at", policy.CreatedAt) - d.Set("updated_at", policy.UpdatedAt) - - return nil -} - -func resourceNewRelicAlertPolicyDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*newrelic.Client) - - id, err := strconv.ParseInt(d.Id(), 10, 32) - if err != nil { - return err - } - - log.Printf("[INFO] Deleting New Relic alert policy %v", id) - - if err := client.DeleteAlertPolicy(int(id)); err != nil { - return err - } - - d.SetId("") - - return nil -} diff --git a/builtin/providers/newrelic/resource_newrelic_alert_policy_channel.go b/builtin/providers/newrelic/resource_newrelic_alert_policy_channel.go deleted file mode 100644 index df3eee640..000000000 --- a/builtin/providers/newrelic/resource_newrelic_alert_policy_channel.go +++ /dev/null @@ -1,137 +0,0 @@ -package newrelic - -import ( - "log" - - "github.com/hashicorp/terraform/helper/schema" - newrelic "github.com/paultyng/go-newrelic/api" -) - -func policyChannelExists(client *newrelic.Client, policyID int, channelID int) (bool, error) { - channel, err := client.GetAlertChannel(channelID) - if err != nil { - if err == newrelic.ErrNotFound { - return false, nil - } - - return false, err - } - - for _, id := range channel.Links.PolicyIDs { - if id == policyID { - return true, nil - } - } - - return false, nil -} - -func resourceNewRelicAlertPolicyChannel() *schema.Resource { - return &schema.Resource{ - Create: resourceNewRelicAlertPolicyChannelCreate, - Read: resourceNewRelicAlertPolicyChannelRead, - // Update: Not currently supported in API - Delete: resourceNewRelicAlertPolicyChannelDelete, - Schema: map[string]*schema.Schema{ - "policy_id": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "channel_id": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceNewRelicAlertPolicyChannelCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*newrelic.Client) - - policyID := d.Get("policy_id").(int) - channelID := d.Get("channel_id").(int) - - serializedID := serializeIDs([]int{policyID, channelID}) - - log.Printf("[INFO] Creating New Relic alert policy channel %s", serializedID) - - exists, err := policyChannelExists(client, policyID, channelID) - if err != nil { - return err - } - - if !exists { - err = client.UpdateAlertPolicyChannels(policyID, []int{channelID}) - if err != nil { - return err - } - } - - d.SetId(serializedID) - - return nil -} - -func resourceNewRelicAlertPolicyChannelRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*newrelic.Client) - - ids, err := parseIDs(d.Id(), 2) - if err != nil { - return err - } - - policyID := ids[0] - channelID := ids[1] - - log.Printf("[INFO] Reading New Relic alert policy channel %s", d.Id()) - - exists, err := policyChannelExists(client, policyID, channelID) - if err != nil { - return err - } - - if !exists { - d.SetId("") - return nil - } - - d.Set("policy_id", policyID) - d.Set("channel_id", channelID) - - return nil -} - -func resourceNewRelicAlertPolicyChannelDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*newrelic.Client) - - ids, err := parseIDs(d.Id(), 2) - if err != nil { - return err - } - - policyID := ids[0] - channelID := ids[1] - - log.Printf("[INFO] Deleting New Relic alert policy channel %s", d.Id()) - - exists, err := policyChannelExists(client, policyID, channelID) - if err != nil { - return err - } - - if exists { - if err := client.DeleteAlertPolicyChannel(policyID, channelID); err != nil { - switch err { - case newrelic.ErrNotFound: - return nil - } - return err - } - } - - d.SetId("") - - return nil -} diff --git a/builtin/providers/newrelic/resource_newrelic_alert_policy_channel_test.go b/builtin/providers/newrelic/resource_newrelic_alert_policy_channel_test.go deleted file mode 100644 index 7caef10df..000000000 --- a/builtin/providers/newrelic/resource_newrelic_alert_policy_channel_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package newrelic - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - newrelic "github.com/paultyng/go-newrelic/api" -) - -func TestAccNewRelicAlertPolicyChannel_Basic(t *testing.T) { - rName := acctest.RandString(5) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNewRelicAlertPolicyChannelDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckNewRelicAlertPolicyChannelConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckNewRelicAlertPolicyChannelExists("newrelic_alert_policy_channel.foo"), - ), - }, - resource.TestStep{ - Config: testAccCheckNewRelicAlertPolicyChannelConfigUpdated(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckNewRelicAlertPolicyChannelExists("newrelic_alert_policy_channel.foo"), - ), - }, - }, - }) -} - -func testAccCheckNewRelicAlertPolicyChannelDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*newrelic.Client) - for _, r := range s.RootModule().Resources { - if r.Type != "newrelic_alert_policy_channel" { - continue - } - - ids, err := parseIDs(r.Primary.ID, 2) - if err != nil { - return err - } - - policyID := ids[0] - channelID := ids[1] - - exists, err := policyChannelExists(client, policyID, channelID) - if err != nil { - return err - } - - if exists { - return fmt.Errorf("Resource still exists") - } - } - return nil -} - -func testAccCheckNewRelicAlertPolicyChannelExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No resource ID is set") - } - - client := testAccProvider.Meta().(*newrelic.Client) - - ids, err := parseIDs(rs.Primary.ID, 2) - if err != nil { - return err - } - - policyID := ids[0] - channelID := ids[1] - - exists, err := policyChannelExists(client, policyID, channelID) - if err != nil { - return err - } - if !exists { - return fmt.Errorf("Resource not found: %v", rs.Primary.ID) - } - - return nil - } -} - -func testAccCheckNewRelicAlertPolicyChannelConfig(rName string) string { - return fmt.Sprintf(` -resource "newrelic_alert_policy" "foo" { - name = "tf-test-%[1]s" -} - -resource "newrelic_alert_channel" "foo" { - name = "tf-test-%[1]s" - type = "email" - - configuration = { - recipients = "foo@example.com" - include_json_attachment = "1" - } -} - -resource "newrelic_alert_policy_channel" "foo" { - policy_id = "${newrelic_alert_policy.foo.id}" - channel_id = "${newrelic_alert_channel.foo.id}" -} -`, rName) -} - -func testAccCheckNewRelicAlertPolicyChannelConfigUpdated(rName string) string { - return fmt.Sprintf(` -resource "newrelic_alert_policy" "bar" { - name = "tf-test-updated-%[1]s" -} - -resource "newrelic_alert_channel" "foo" { - name = "tf-test-updated-%[1]s" - type = "email" - - configuration = { - recipients = "bar@example.com" - include_json_attachment = "0" - } -} - -resource "newrelic_alert_policy_channel" "foo" { - policy_id = "${newrelic_alert_policy.bar.id}" - channel_id = "${newrelic_alert_channel.foo.id}" -} -`, rName) -} diff --git a/builtin/providers/newrelic/resource_newrelic_alert_policy_test.go b/builtin/providers/newrelic/resource_newrelic_alert_policy_test.go deleted file mode 100644 index a76b452eb..000000000 --- a/builtin/providers/newrelic/resource_newrelic_alert_policy_test.go +++ /dev/null @@ -1,112 +0,0 @@ -package newrelic - -import ( - "fmt" - "strconv" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - newrelic "github.com/paultyng/go-newrelic/api" -) - -func TestAccNewRelicAlertPolicy_Basic(t *testing.T) { - rName := acctest.RandString(5) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNewRelicAlertPolicyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckNewRelicAlertPolicyConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckNewRelicAlertPolicyExists("newrelic_alert_policy.foo"), - resource.TestCheckResourceAttr( - "newrelic_alert_policy.foo", "name", fmt.Sprintf("tf-test-%s", rName)), - resource.TestCheckResourceAttr( - "newrelic_alert_policy.foo", "incident_preference", "PER_POLICY"), - ), - }, - resource.TestStep{ - Config: testAccCheckNewRelicAlertPolicyConfigUpdated(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckNewRelicAlertPolicyExists("newrelic_alert_policy.foo"), - resource.TestCheckResourceAttr( - "newrelic_alert_policy.foo", "name", fmt.Sprintf("tf-test-updated-%s", rName)), - resource.TestCheckResourceAttr( - "newrelic_alert_policy.foo", "incident_preference", "PER_CONDITION"), - ), - }, - }, - }) -} - -func testAccCheckNewRelicAlertPolicyDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*newrelic.Client) - for _, r := range s.RootModule().Resources { - if r.Type != "newrelic_alert_policy" { - continue - } - - id, err := strconv.ParseInt(r.Primary.ID, 10, 32) - if err != nil { - return err - } - - _, err = client.GetAlertPolicy(int(id)) - - if err == nil { - return fmt.Errorf("Policy still exists") - } - - } - return nil -} - -func testAccCheckNewRelicAlertPolicyExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No policy ID is set") - } - - client := testAccProvider.Meta().(*newrelic.Client) - - id, err := strconv.ParseInt(rs.Primary.ID, 10, 32) - if err != nil { - return err - } - - found, err := client.GetAlertPolicy(int(id)) - if err != nil { - return err - } - - if strconv.Itoa(found.ID) != rs.Primary.ID { - return fmt.Errorf("Policy not found: %v - %v", rs.Primary.ID, found) - } - - return nil - } -} - -func testAccCheckNewRelicAlertPolicyConfig(rName string) string { - return fmt.Sprintf(` -resource "newrelic_alert_policy" "foo" { - name = "tf-test-%s" -} -`, rName) -} - -func testAccCheckNewRelicAlertPolicyConfigUpdated(rName string) string { - return fmt.Sprintf(` -resource "newrelic_alert_policy" "foo" { - name = "tf-test-updated-%s" - incident_preference = "PER_CONDITION" -} -`, rName) -} diff --git a/builtin/providers/newrelic/validation.go b/builtin/providers/newrelic/validation.go deleted file mode 100644 index 11815b5b5..000000000 --- a/builtin/providers/newrelic/validation.go +++ /dev/null @@ -1,43 +0,0 @@ -package newrelic - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" -) - -func float64Gte(gte float64) schema.SchemaValidateFunc { - return func(i interface{}, k string) (s []string, es []error) { - v, ok := i.(float64) - if !ok { - es = append(es, fmt.Errorf("expected type of %s to be float64", k)) - return - } - - if v >= gte { - return - } - - es = append(es, fmt.Errorf("expected %s to be greater than or equal to %v, got %v", k, gte, v)) - return - } -} - -func intInSlice(valid []int) schema.SchemaValidateFunc { - return func(i interface{}, k string) (s []string, es []error) { - v, ok := i.(int) - if !ok { - es = append(es, fmt.Errorf("expected type of %s to be int", k)) - return - } - - for _, p := range valid { - if v == p { - return - } - } - - es = append(es, fmt.Errorf("expected %s to be one of %v, got %v", k, valid, v)) - return - } -} diff --git a/builtin/providers/newrelic/validation_test.go b/builtin/providers/newrelic/validation_test.go deleted file mode 100644 index 03552823b..000000000 --- a/builtin/providers/newrelic/validation_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package newrelic - -import ( - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/schema" -) - -type testCase struct { - val interface{} - f schema.SchemaValidateFunc - expectedErr *regexp.Regexp -} - -func TestValidationIntInInSlice(t *testing.T) { - runTestCases(t, []testCase{ - { - val: 2, - f: intInSlice([]int{1, 2, 3}), - }, - { - val: 4, - f: intInSlice([]int{1, 2, 3}), - expectedErr: regexp.MustCompile("expected [\\w]+ to be one of \\[1 2 3\\], got 4"), - }, - { - val: "foo", - f: intInSlice([]int{1, 2, 3}), - expectedErr: regexp.MustCompile("expected type of [\\w]+ to be int"), - }, - }) -} - -func TestValidationFloat64Gte(t *testing.T) { - runTestCases(t, []testCase{ - { - val: 1.1, - f: float64Gte(1.1), - }, - { - val: 1.2, - f: float64Gte(1.1), - }, - { - val: "foo", - f: float64Gte(1.1), - expectedErr: regexp.MustCompile("expected type of [\\w]+ to be float64"), - }, - { - val: 0.1, - f: float64Gte(1.1), - expectedErr: regexp.MustCompile("expected [\\w]+ to be greater than or equal to 1.1, got 0.1"), - }, - }) -} - -func runTestCases(t *testing.T, cases []testCase) { - matchErr := func(errs []error, r *regexp.Regexp) bool { - // err must match one provided - for _, err := range errs { - if r.MatchString(err.Error()) { - return true - } - } - - return false - } - - for i, tc := range cases { - _, errs := tc.f(tc.val, "test_property") - - if len(errs) == 0 && tc.expectedErr == nil { - continue - } - - if !matchErr(errs, tc.expectedErr) { - t.Fatalf("expected test case %d to produce error matching \"%s\", got %v", i, tc.expectedErr, errs) - } - } -} diff --git a/builtin/providers/nomad/provider.go b/builtin/providers/nomad/provider.go deleted file mode 100644 index 61f8603bc..000000000 --- a/builtin/providers/nomad/provider.go +++ /dev/null @@ -1,69 +0,0 @@ -package nomad - -import ( - "fmt" - - "github.com/hashicorp/nomad/api" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "address": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("NOMAD_ADDR", nil), - Description: "URL of the root of the target Nomad agent.", - }, - - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("NOMAD_REGION", ""), - Description: "Region of the target Nomad agent.", - }, - "ca_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("NOMAD_CACERT", ""), - Description: "A path to a PEM-encoded certificate authority used to verify the remote agent's certificate.", - }, - "cert_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("NOMAD_CLIENT_CERT", ""), - Description: "A path to a PEM-encoded certificate provided to the remote agent; requires use of key_file.", - }, - "key_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("NOMAD_CLIENT_KEY", ""), - Description: "A path to a PEM-encoded private key, required if cert_file is specified.", - }, - }, - - ConfigureFunc: providerConfigure, - - ResourcesMap: map[string]*schema.Resource{ - "nomad_job": resourceJob(), - }, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - config := api.DefaultConfig() - config.Address = d.Get("address").(string) - config.Region = d.Get("region").(string) - config.TLSConfig.CACert = d.Get("ca_file").(string) - config.TLSConfig.ClientCert = d.Get("cert_file").(string) - config.TLSConfig.ClientKey = d.Get("key_file").(string) - - client, err := api.NewClient(config) - if err != nil { - return nil, fmt.Errorf("failed to configure Nomad API: %s", err) - } - - return client, nil -} diff --git a/builtin/providers/nomad/provider_test.go b/builtin/providers/nomad/provider_test.go deleted file mode 100644 index edbf9abe0..000000000 --- a/builtin/providers/nomad/provider_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package nomad - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// How to run the acceptance tests for this provider: -// -// - Obtain an official Nomad release from https://nomadproject.io -// and extract the "nomad" binary -// -// - Run the following to start the Nomad agent in development mode: -// nomad agent -dev -// -// - Run the Terraform acceptance tests as usual: -// make testacc TEST=./builtin/providers/nomad -// -// The tests expect to be run in a fresh, empty Nomad server. - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -var testProvider *schema.Provider -var testProviders map[string]terraform.ResourceProvider - -func init() { - testProvider = Provider().(*schema.Provider) - testProviders = map[string]terraform.ResourceProvider{ - "nomad": testProvider, - } -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("NOMAD_ADDR"); v == "" { - os.Setenv("NOMAD_ADDR", "http://127.0.0.1:4646") - } -} diff --git a/builtin/providers/nomad/resource_job.go b/builtin/providers/nomad/resource_job.go deleted file mode 100644 index 36998db7f..000000000 --- a/builtin/providers/nomad/resource_job.go +++ /dev/null @@ -1,196 +0,0 @@ -package nomad - -import ( - "bytes" - "encoding/gob" - "fmt" - "log" - "reflect" - "strings" - - "github.com/hashicorp/nomad/api" - "github.com/hashicorp/nomad/jobspec" - "github.com/hashicorp/nomad/nomad/structs" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceJob() *schema.Resource { - return &schema.Resource{ - Create: resourceJobRegister, - Update: resourceJobRegister, - Delete: resourceJobDeregister, - Read: resourceJobRead, - Exists: resourceJobExists, - - Schema: map[string]*schema.Schema{ - "jobspec": { - Description: "Job specification. If you want to point to a file use the file() function.", - Required: true, - Type: schema.TypeString, - DiffSuppressFunc: jobspecDiffSuppress, - }, - - "deregister_on_destroy": { - Description: "If true, the job will be deregistered on destroy.", - Optional: true, - Default: true, - Type: schema.TypeBool, - }, - - "deregister_on_id_change": { - Description: "If true, the job will be deregistered when the job ID changes.", - Optional: true, - Default: true, - Type: schema.TypeBool, - }, - }, - } -} - -func resourceJobRegister(d *schema.ResourceData, meta interface{}) error { - client := meta.(*api.Client) - - // Get the jobspec itself - jobspecRaw := d.Get("jobspec").(string) - - // Parse it - jobspecStruct, err := jobspec.Parse(strings.NewReader(jobspecRaw)) - if err != nil { - return fmt.Errorf("error parsing jobspec: %s", err) - } - - // Initialize and validate - jobspecStruct.Canonicalize() - if err := jobspecStruct.Validate(); err != nil { - return fmt.Errorf("Error validating job: %v", err) - } - - // If we have an ID and its not equal to this jobspec, then we - // have to deregister the old job before we register the new job. - prevId := d.Id() - if !d.Get("deregister_on_id_change").(bool) { - // If we aren't deregistering on ID change, just pretend we - // don't have a prior ID. - prevId = "" - } - if prevId != "" && prevId != jobspecStruct.ID { - log.Printf( - "[INFO] Deregistering %q before registering %q", - prevId, jobspecStruct.ID) - - log.Printf("[DEBUG] Deregistering job: %q", prevId) - _, _, err := client.Jobs().Deregister(prevId, nil) - if err != nil { - return fmt.Errorf( - "error deregistering previous job %q "+ - "before registering new job %q: %s", - prevId, jobspecStruct.ID, err) - } - - // Success! Clear our state. - d.SetId("") - } - - // Convert it so that we can use it with the API - jobspecAPI, err := convertStructJob(jobspecStruct) - if err != nil { - return fmt.Errorf("error converting jobspec: %s", err) - } - - // Register the job - _, _, err = client.Jobs().Register(jobspecAPI, nil) - if err != nil { - return fmt.Errorf("error applying jobspec: %s", err) - } - - d.SetId(jobspecAPI.ID) - - return nil -} - -func resourceJobDeregister(d *schema.ResourceData, meta interface{}) error { - client := meta.(*api.Client) - - // If deregistration is disabled, then do nothing - if !d.Get("deregister_on_destroy").(bool) { - log.Printf( - "[WARN] Job %q will not deregister since 'deregister_on_destroy'"+ - " is false", d.Id()) - return nil - } - - id := d.Id() - log.Printf("[DEBUG] Deregistering job: %q", id) - _, _, err := client.Jobs().Deregister(id, nil) - if err != nil { - return fmt.Errorf("error deregistering job: %s", err) - } - - return nil -} - -func resourceJobExists(d *schema.ResourceData, meta interface{}) (bool, error) { - client := meta.(*api.Client) - - id := d.Id() - log.Printf("[DEBUG] Checking if job exists: %q", id) - _, _, err := client.Jobs().Info(id, nil) - if err != nil { - // As of Nomad 0.4.1, the API client returns an error for 404 - // rather than a nil result, so we must check this way. - if strings.Contains(err.Error(), "404") { - return false, nil - } - - return true, fmt.Errorf("error checking for job: %#v", err) - } - - return true, nil -} - -func resourceJobRead(d *schema.ResourceData, meta interface{}) error { - // We don't do anything at the moment. Exists is used to - // remove non-existent jobs but read doesn't have to do anything. - return nil -} - -// convertStructJob is used to take a *structs.Job and convert it to an *api.Job. -// -// This is unfortunate but it is how Nomad itself does it (this is copied -// line for line from Nomad). We'll mimic them exactly to get this done. -func convertStructJob(in *structs.Job) (*api.Job, error) { - gob.Register([]map[string]interface{}{}) - gob.Register([]interface{}{}) - var apiJob *api.Job - buf := new(bytes.Buffer) - if err := gob.NewEncoder(buf).Encode(in); err != nil { - return nil, err - } - if err := gob.NewDecoder(buf).Decode(&apiJob); err != nil { - return nil, err - } - return apiJob, nil -} - -// jobspecDiffSuppress is the DiffSuppressFunc used by the schema to -// check if two jobspecs are equal. -func jobspecDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - // Parse the old job - oldJob, err := jobspec.Parse(strings.NewReader(old)) - if err != nil { - return false - } - - // Parse the new job - newJob, err := jobspec.Parse(strings.NewReader(new)) - if err != nil { - return false - } - - // Init - oldJob.Canonicalize() - newJob.Canonicalize() - - // Check for jobspec equality - return reflect.DeepEqual(oldJob, newJob) -} diff --git a/builtin/providers/nomad/resource_job_test.go b/builtin/providers/nomad/resource_job_test.go deleted file mode 100644 index 7e71ce6b9..000000000 --- a/builtin/providers/nomad/resource_job_test.go +++ /dev/null @@ -1,335 +0,0 @@ -package nomad - -import ( - "errors" - "fmt" - "strings" - "testing" - - r "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/hashicorp/nomad/api" -) - -func TestResourceJob_basic(t *testing.T) { - r.Test(t, r.TestCase{ - Providers: testProviders, - PreCheck: func() { testAccPreCheck(t) }, - Steps: []r.TestStep{ - { - Config: testResourceJob_initialConfig, - Check: testResourceJob_initialCheck, - }, - }, - - CheckDestroy: testResourceJob_checkDestroy("foo"), - }) -} - -func TestResourceJob_refresh(t *testing.T) { - r.Test(t, r.TestCase{ - Providers: testProviders, - PreCheck: func() { testAccPreCheck(t) }, - Steps: []r.TestStep{ - { - Config: testResourceJob_initialConfig, - Check: testResourceJob_initialCheck, - }, - - // This should successfully cause the job to be recreated, - // testing the Exists function. - { - PreConfig: testResourceJob_deregister(t, "foo"), - Config: testResourceJob_initialConfig, - }, - }, - }) -} - -func TestResourceJob_disableDestroyDeregister(t *testing.T) { - r.Test(t, r.TestCase{ - Providers: testProviders, - PreCheck: func() { testAccPreCheck(t) }, - Steps: []r.TestStep{ - { - Config: testResourceJob_noDestroy, - Check: testResourceJob_initialCheck, - }, - - // Destroy with our setting set - { - Destroy: true, - Config: testResourceJob_noDestroy, - Check: testResourceJob_checkExists, - }, - - // Re-apply without the setting set - { - Config: testResourceJob_initialConfig, - Check: testResourceJob_checkExists, - }, - }, - }) -} - -func TestResourceJob_idChange(t *testing.T) { - r.Test(t, r.TestCase{ - Providers: testProviders, - PreCheck: func() { testAccPreCheck(t) }, - Steps: []r.TestStep{ - { - Config: testResourceJob_initialConfig, - Check: testResourceJob_initialCheck, - }, - - // Change our ID - { - Config: testResourceJob_updateConfig, - Check: testResourceJob_updateCheck, - }, - }, - }) -} - -func TestResourceJob_parameterizedJob(t *testing.T) { - r.Test(t, r.TestCase{ - Providers: testProviders, - PreCheck: func() { testAccPreCheck(t) }, - Steps: []r.TestStep{ - { - Config: testResourceJob_parameterizedJob, - Check: testResourceJob_initialCheck, - }, - }, - }) -} - -var testResourceJob_initialConfig = ` -resource "nomad_job" "test" { - jobspec = < 1 { - panic(fmt.Sprintf("list too long %#v", l)) - } - if len(l) == 0 { - return - } - if l[0] == nil { - return - } - - d := l[0].(map[string]interface{}) - - mr := reflect.ValueOf(m).Elem() - for _, f := range metaFields { - val, present := d[f.NameInDynamic] - if present { - fr := mr.Field(f.StructIndex) - fr.Set(reflect.ValueOf(val)) - } - - feed, present := d[f.NameInDynamicForFeed] - if present && feed != "" { - if feed == nil { - panic("unexpected nil") - } - fr := mr.Field(f.StructIndex) - fr.Set(reflect.ValueOf(map[string]interface{}{"feed": feed.(string)})) - } - } -} diff --git a/builtin/providers/ns1/permissions.go b/builtin/providers/ns1/permissions.go deleted file mode 100644 index 5ef4ac8f0..000000000 --- a/builtin/providers/ns1/permissions.go +++ /dev/null @@ -1,184 +0,0 @@ -package ns1 - -import ( - "github.com/hashicorp/terraform/helper/schema" - "gopkg.in/ns1/ns1-go.v2/rest/model/account" -) - -func addPermsSchema(s map[string]*schema.Schema) map[string]*schema.Schema { - s["dns_view_zones"] = &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - } - s["dns_manage_zones"] = &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - } - s["dns_zones_allow_by_default"] = &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - } - s["dns_zones_deny"] = &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - } - s["dns_zones_allow"] = &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - } - s["data_push_to_datafeeds"] = &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - } - s["data_manage_datasources"] = &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - } - s["data_manage_datafeeds"] = &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - } - s["account_manage_users"] = &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - } - s["account_manage_payment_methods"] = &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - } - s["account_manage_plan"] = &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - } - s["account_manage_teams"] = &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - } - s["account_manage_apikeys"] = &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - } - s["account_manage_account_settings"] = &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - } - s["account_view_activity_log"] = &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - } - s["account_view_invoices"] = &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - } - s["monitoring_manage_lists"] = &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - } - s["monitoring_manage_jobs"] = &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - } - s["monitoring_view_jobs"] = &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - } - return s -} - -func permissionsToResourceData(d *schema.ResourceData, permissions account.PermissionsMap) { - d.Set("dns_view_zones", permissions.DNS.ViewZones) - d.Set("dns_manage_zones", permissions.DNS.ManageZones) - d.Set("dns_zones_allow_by_default", permissions.DNS.ZonesAllowByDefault) - d.Set("dns_zones_deny", permissions.DNS.ZonesDeny) - d.Set("dns_zones_allow", permissions.DNS.ZonesAllow) - d.Set("data_push_to_datafeeds", permissions.Data.PushToDatafeeds) - d.Set("data_manage_datasources", permissions.Data.ManageDatasources) - d.Set("data_manage_datafeeds", permissions.Data.ManageDatafeeds) - d.Set("account_manage_users", permissions.Account.ManageUsers) - d.Set("account_manage_payment_methods", permissions.Account.ManagePaymentMethods) - d.Set("account_manage_plan", permissions.Account.ManagePlan) - d.Set("account_manage_teams", permissions.Account.ManageTeams) - d.Set("account_manage_apikeys", permissions.Account.ManageApikeys) - d.Set("account_manage_account_settings", permissions.Account.ManageAccountSettings) - d.Set("account_view_activity_log", permissions.Account.ViewActivityLog) - d.Set("account_view_invoices", permissions.Account.ViewInvoices) - d.Set("monitoring_manage_lists", permissions.Monitoring.ManageLists) - d.Set("monitoring_manage_jobs", permissions.Monitoring.ManageJobs) - d.Set("monitoring_view_jobs", permissions.Monitoring.ViewJobs) -} - -func resourceDataToPermissions(d *schema.ResourceData) account.PermissionsMap { - var p account.PermissionsMap - if v, ok := d.GetOk("dns_view_zones"); ok { - p.DNS.ViewZones = v.(bool) - } - if v, ok := d.GetOk("dns_manage_zones"); ok { - p.DNS.ManageZones = v.(bool) - } - if v, ok := d.GetOk("dns_zones_allow_by_default"); ok { - p.DNS.ZonesAllowByDefault = v.(bool) - } - if v, ok := d.GetOk("dns_zones_deny"); ok { - denyRaw := v.([]interface{}) - p.DNS.ZonesDeny = make([]string, len(denyRaw)) - for i, deny := range denyRaw { - p.DNS.ZonesDeny[i] = deny.(string) - } - } else { - p.DNS.ZonesDeny = make([]string, 0) - } - if v, ok := d.GetOk("dns_zones_allow"); ok { - allowRaw := v.([]interface{}) - p.DNS.ZonesAllow = make([]string, len(allowRaw)) - for i, allow := range allowRaw { - p.DNS.ZonesAllow[i] = allow.(string) - } - } else { - p.DNS.ZonesAllow = make([]string, 0) - } - if v, ok := d.GetOk("data_push_to_datafeeds"); ok { - p.Data.PushToDatafeeds = v.(bool) - } - if v, ok := d.GetOk("data_manage_datasources"); ok { - p.Data.ManageDatasources = v.(bool) - } - if v, ok := d.GetOk("data_manage_datafeeds"); ok { - p.Data.ManageDatafeeds = v.(bool) - } - if v, ok := d.GetOk("account_manage_users"); ok { - p.Account.ManageUsers = v.(bool) - } - if v, ok := d.GetOk("account_manage_payment_methods"); ok { - p.Account.ManagePaymentMethods = v.(bool) - } - if v, ok := d.GetOk("account_manage_plan"); ok { - p.Account.ManagePlan = v.(bool) - } - if v, ok := d.GetOk("account_manage_teams"); ok { - p.Account.ManageTeams = v.(bool) - } - if v, ok := d.GetOk("account_manage_apikeys"); ok { - p.Account.ManageApikeys = v.(bool) - } - if v, ok := d.GetOk("account_manage_account_settings"); ok { - p.Account.ManageAccountSettings = v.(bool) - } - if v, ok := d.GetOk("account_view_activity_log"); ok { - p.Account.ViewActivityLog = v.(bool) - } - if v, ok := d.GetOk("account_view_invoices"); ok { - p.Account.ViewInvoices = v.(bool) - } - if v, ok := d.GetOk("monitoring_manage_lists"); ok { - p.Monitoring.ManageLists = v.(bool) - } - if v, ok := d.GetOk("monitoring_manage_jobs"); ok { - p.Monitoring.ManageJobs = v.(bool) - } - if v, ok := d.GetOk("monitoring_view_jobs"); ok { - p.Monitoring.ViewJobs = v.(bool) - } - return p -} diff --git a/builtin/providers/ns1/provider.go b/builtin/providers/ns1/provider.go deleted file mode 100644 index ab0f54611..000000000 --- a/builtin/providers/ns1/provider.go +++ /dev/null @@ -1,67 +0,0 @@ -package ns1 - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "apikey": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("NS1_APIKEY", nil), - Description: descriptions["api_key"], - }, - "endpoint": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("NS1_ENDPOINT", nil), - Description: descriptions["endpoint"], - }, - "ignore_ssl": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("NS1_IGNORE_SSL", nil), - Description: descriptions["ignore_ssl"], - }, - }, - ResourcesMap: map[string]*schema.Resource{ - "ns1_zone": zoneResource(), - "ns1_record": recordResource(), - "ns1_datasource": dataSourceResource(), - "ns1_datafeed": dataFeedResource(), - "ns1_monitoringjob": monitoringJobResource(), - "ns1_notifylist": notifyListResource(), - "ns1_user": userResource(), - "ns1_apikey": apikeyResource(), - "ns1_team": teamResource(), - }, - ConfigureFunc: ns1Configure, - } -} - -func ns1Configure(d *schema.ResourceData) (interface{}, error) { - config := Config{ - Key: d.Get("apikey").(string), - } - - if v, ok := d.GetOk("endpoint"); ok { - config.Endpoint = v.(string) - } - if v, ok := d.GetOk("ignore_ssl"); ok { - config.IgnoreSSL = v.(bool) - } - - return config.Client() -} - -var descriptions map[string]string - -func init() { - descriptions = map[string]string{ - "api_key": "The ns1 API key, this is required", - } -} diff --git a/builtin/providers/ns1/provider_test.go b/builtin/providers/ns1/provider_test.go deleted file mode 100644 index d02ea7b28..000000000 --- a/builtin/providers/ns1/provider_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package ns1 - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "ns1": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("NS1_APIKEY"); v == "" { - t.Fatal("NS1_APIKEY must be set for acceptance tests") - } -} diff --git a/builtin/providers/ns1/resource_apikey.go b/builtin/providers/ns1/resource_apikey.go deleted file mode 100644 index 1ce1d5add..000000000 --- a/builtin/providers/ns1/resource_apikey.go +++ /dev/null @@ -1,109 +0,0 @@ -package ns1 - -import ( - "github.com/hashicorp/terraform/helper/schema" - - ns1 "gopkg.in/ns1/ns1-go.v2/rest" - "gopkg.in/ns1/ns1-go.v2/rest/model/account" -) - -func apikeyResource() *schema.Resource { - s := map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "key": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "teams": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - } - s = addPermsSchema(s) - return &schema.Resource{ - Schema: s, - Create: ApikeyCreate, - Read: ApikeyRead, - Update: ApikeyUpdate, - Delete: ApikeyDelete, - } -} - -func apikeyToResourceData(d *schema.ResourceData, k *account.APIKey) error { - d.SetId(k.ID) - d.Set("name", k.Name) - d.Set("key", k.Key) - d.Set("teams", k.TeamIDs) - permissionsToResourceData(d, k.Permissions) - return nil -} - -func resourceDataToApikey(k *account.APIKey, d *schema.ResourceData) error { - k.ID = d.Id() - k.Name = d.Get("name").(string) - if v, ok := d.GetOk("teams"); ok { - teamsRaw := v.([]interface{}) - k.TeamIDs = make([]string, len(teamsRaw)) - for i, team := range teamsRaw { - k.TeamIDs[i] = team.(string) - } - } else { - k.TeamIDs = make([]string, 0) - } - k.Permissions = resourceDataToPermissions(d) - return nil -} - -// ApikeyCreate creates ns1 API key -func ApikeyCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - k := account.APIKey{} - if err := resourceDataToApikey(&k, d); err != nil { - return err - } - if _, err := client.APIKeys.Create(&k); err != nil { - return err - } - return apikeyToResourceData(d, &k) -} - -// ApikeyRead reads API key from ns1 -func ApikeyRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - k, _, err := client.APIKeys.Get(d.Id()) - if err != nil { - return err - } - return apikeyToResourceData(d, k) -} - -//ApikeyDelete deletes the given ns1 api key -func ApikeyDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - _, err := client.APIKeys.Delete(d.Id()) - d.SetId("") - return err -} - -//ApikeyUpdate updates the given api key in ns1 -func ApikeyUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - k := account.APIKey{ - ID: d.Id(), - } - if err := resourceDataToApikey(&k, d); err != nil { - return err - } - if _, err := client.APIKeys.Update(&k); err != nil { - return err - } - return apikeyToResourceData(d, &k) -} diff --git a/builtin/providers/ns1/resource_datafeed.go b/builtin/providers/ns1/resource_datafeed.go deleted file mode 100644 index 98508c9bb..000000000 --- a/builtin/providers/ns1/resource_datafeed.go +++ /dev/null @@ -1,92 +0,0 @@ -package ns1 - -import ( - "github.com/hashicorp/terraform/helper/schema" - - ns1 "gopkg.in/ns1/ns1-go.v2/rest" - "gopkg.in/ns1/ns1-go.v2/rest/model/data" -) - -func dataFeedResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "source_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "config": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - }, - }, - Create: DataFeedCreate, - Read: DataFeedRead, - Update: DataFeedUpdate, - Delete: DataFeedDelete, - } -} - -func dataFeedToResourceData(d *schema.ResourceData, f *data.Feed) { - d.SetId(f.ID) - d.Set("name", f.Name) - d.Set("config", f.Config) -} - -func resourceDataToDataFeed(d *schema.ResourceData) *data.Feed { - return &data.Feed{ - Name: d.Get("name").(string), - SourceID: d.Get("source_id").(string), - Config: d.Get("config").(map[string]interface{}), - } -} - -// DataFeedCreate creates an ns1 datafeed -func DataFeedCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - f := resourceDataToDataFeed(d) - if _, err := client.DataFeeds.Create(d.Get("source_id").(string), f); err != nil { - return err - } - dataFeedToResourceData(d, f) - return nil -} - -// DataFeedRead reads the datafeed for the given ID from ns1 -func DataFeedRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - f, _, err := client.DataFeeds.Get(d.Get("source_id").(string), d.Id()) - if err != nil { - return err - } - dataFeedToResourceData(d, f) - return nil -} - -// DataFeedDelete delets the given datafeed from ns1 -func DataFeedDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - _, err := client.DataFeeds.Delete(d.Get("source_id").(string), d.Id()) - d.SetId("") - return err -} - -// DataFeedUpdate updates the given datafeed with modified parameters -func DataFeedUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - f := resourceDataToDataFeed(d) - f.ID = d.Id() - if _, err := client.DataFeeds.Update(d.Get("source_id").(string), f); err != nil { - return err - } - dataFeedToResourceData(d, f) - return nil -} diff --git a/builtin/providers/ns1/resource_datafeed_test.go b/builtin/providers/ns1/resource_datafeed_test.go deleted file mode 100644 index 4410eeb05..000000000 --- a/builtin/providers/ns1/resource_datafeed_test.go +++ /dev/null @@ -1,170 +0,0 @@ -package ns1 - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - ns1 "gopkg.in/ns1/ns1-go.v2/rest" - "gopkg.in/ns1/ns1-go.v2/rest/model/data" -) - -func TestAccDataFeed_basic(t *testing.T) { - var dataFeed data.Feed - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDataFeedDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataFeedBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckDataFeedExists("ns1_datafeed.foobar", "ns1_datasource.api", &dataFeed), - testAccCheckDataFeedName(&dataFeed, "terraform test"), - testAccCheckDataFeedConfig(&dataFeed, "label", "exampledc2"), - ), - }, - }, - }) -} - -func TestAccDataFeed_updated(t *testing.T) { - var dataFeed data.Feed - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDataFeedDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataFeedBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckDataFeedExists("ns1_datafeed.foobar", "ns1_datasource.api", &dataFeed), - testAccCheckDataFeedName(&dataFeed, "terraform test"), - testAccCheckDataFeedConfig(&dataFeed, "label", "exampledc2"), - ), - }, - resource.TestStep{ - Config: testAccDataFeedUpdated, - Check: resource.ComposeTestCheckFunc( - testAccCheckDataFeedExists("ns1_datafeed.foobar", "ns1_datasource.api", &dataFeed), - testAccCheckDataFeedName(&dataFeed, "terraform test"), - testAccCheckDataFeedConfig(&dataFeed, "label", "exampledc3"), - ), - }, - }, - }) -} - -func testAccCheckDataFeedExists(n string, dsrc string, dataFeed *data.Feed) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - ds, ok := s.RootModule().Resources[dsrc] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("NoID is set") - } - - if ds.Primary.ID == "" { - return fmt.Errorf("NoID is set for the datasource") - } - - client := testAccProvider.Meta().(*ns1.Client) - - foundFeed, _, err := client.DataFeeds.Get(ds.Primary.Attributes["id"], rs.Primary.Attributes["id"]) - - p := rs.Primary - - if err != nil { - return err - } - - if foundFeed.Name != p.Attributes["name"] { - return fmt.Errorf("DataFeed not found") - } - - *dataFeed = *foundFeed - - return nil - } -} - -func testAccCheckDataFeedDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*ns1.Client) - - var dataFeedID string - var dataSourceID string - - for _, rs := range s.RootModule().Resources { - - if rs.Type == "ns1_datasource" { - dataSourceID = rs.Primary.Attributes["id"] - } - - if rs.Type == "ns1_datafeed" { - dataFeedID = rs.Primary.Attributes["id"] - } - } - - df, _, _ := client.DataFeeds.Get(dataSourceID, dataFeedID) - - if df != nil { - return fmt.Errorf("DataFeed still exists: %#v", df) - } - - return nil -} - -func testAccCheckDataFeedName(dataFeed *data.Feed, expected string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if dataFeed.Name != expected { - return fmt.Errorf("Name: got: %#v want: %#v", dataFeed.Name, expected) - } - - return nil - } -} - -func testAccCheckDataFeedConfig(dataFeed *data.Feed, key, expected string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if dataFeed.Config[key] != expected { - return fmt.Errorf("Config[%s]: got: %#v, want: %s", key, dataFeed.Config[key], expected) - } - - return nil - } -} - -const testAccDataFeedBasic = ` -resource "ns1_datasource" "api" { - name = "terraform test" - sourcetype = "nsone_v1" -} - -resource "ns1_datafeed" "foobar" { - name = "terraform test" - source_id = "${ns1_datasource.api.id}" - config { - label = "exampledc2" - } -}` - -const testAccDataFeedUpdated = ` -resource "ns1_datasource" "api" { - name = "terraform test" - sourcetype = "nsone_v1" -} - -resource "ns1_datafeed" "foobar" { - name = "terraform test" - source_id = "${ns1_datasource.api.id}" - config { - label = "exampledc3" - } -}` diff --git a/builtin/providers/ns1/resource_datasource.go b/builtin/providers/ns1/resource_datasource.go deleted file mode 100644 index 66e359696..000000000 --- a/builtin/providers/ns1/resource_datasource.go +++ /dev/null @@ -1,86 +0,0 @@ -package ns1 - -import ( - "github.com/hashicorp/terraform/helper/schema" - - ns1 "gopkg.in/ns1/ns1-go.v2/rest" - "gopkg.in/ns1/ns1-go.v2/rest/model/data" -) - -func dataSourceResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "sourcetype": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "config": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - }, - }, - Create: DataSourceCreate, - Read: DataSourceRead, - Update: DataSourceUpdate, - Delete: DataSourceDelete, - } -} - -func dataSourceToResourceData(d *schema.ResourceData, s *data.Source) { - d.SetId(s.ID) - d.Set("name", s.Name) - d.Set("sourcetype", s.Type) - d.Set("config", s.Config) -} - -// DataSourceCreate creates an ns1 datasource -func DataSourceCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - s := data.NewSource(d.Get("name").(string), d.Get("sourcetype").(string)) - s.Config = d.Get("config").(map[string]interface{}) - if _, err := client.DataSources.Create(s); err != nil { - return err - } - dataSourceToResourceData(d, s) - return nil -} - -// DataSourceRead fetches info for the given datasource from ns1 -func DataSourceRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - s, _, err := client.DataSources.Get(d.Id()) - if err != nil { - return err - } - dataSourceToResourceData(d, s) - return nil -} - -// DataSourceDelete deteltes the given datasource from ns1 -func DataSourceDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - _, err := client.DataSources.Delete(d.Id()) - d.SetId("") - return err -} - -// DataSourceUpdate updates the datasource with given parameters -func DataSourceUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - s := data.NewSource(d.Get("name").(string), d.Get("sourcetype").(string)) - s.ID = d.Id() - if _, err := client.DataSources.Update(s); err != nil { - return err - } - dataSourceToResourceData(d, s) - return nil -} diff --git a/builtin/providers/ns1/resource_datasource_test.go b/builtin/providers/ns1/resource_datasource_test.go deleted file mode 100644 index 4c880a359..000000000 --- a/builtin/providers/ns1/resource_datasource_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package ns1 - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - ns1 "gopkg.in/ns1/ns1-go.v2/rest" - "gopkg.in/ns1/ns1-go.v2/rest/model/data" -) - -func TestAccDataSource_basic(t *testing.T) { - var dataSource data.Source - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDataSourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataSourceBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckDataSourceExists("ns1_datasource.foobar", &dataSource), - testAccCheckDataSourceName(&dataSource, "terraform test"), - testAccCheckDataSourceType(&dataSource, "nsone_v1"), - ), - }, - }, - }) -} - -func TestAccDataSource_updated(t *testing.T) { - var dataSource data.Source - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDataSourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDataSourceBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckDataSourceExists("ns1_datasource.foobar", &dataSource), - testAccCheckDataSourceName(&dataSource, "terraform test"), - testAccCheckDataSourceType(&dataSource, "nsone_v1"), - ), - }, - resource.TestStep{ - Config: testAccDataSourceUpdated, - Check: resource.ComposeTestCheckFunc( - testAccCheckDataSourceExists("ns1_datasource.foobar", &dataSource), - testAccCheckDataSourceName(&dataSource, "terraform test"), - testAccCheckDataSourceType(&dataSource, "nsone_monitoring"), - ), - }, - }, - }) -} - -func testAccCheckDataSourceExists(n string, dataSource *data.Source) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("NoID is set") - } - - client := testAccProvider.Meta().(*ns1.Client) - - foundSource, _, err := client.DataSources.Get(rs.Primary.Attributes["id"]) - - p := rs.Primary - - if err != nil { - return err - } - - if foundSource.Name != p.Attributes["name"] { - return fmt.Errorf("Datasource not found") - } - - *dataSource = *foundSource - - return nil - } -} - -func testAccCheckDataSourceDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*ns1.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "ns1_datasource" { - continue - } - - _, _, err := client.DataSources.Get(rs.Primary.Attributes["id"]) - - if err == nil { - return fmt.Errorf("Datasource still exists") - } - } - - return nil -} - -func testAccCheckDataSourceName(dataSource *data.Source, expected string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if dataSource.Name != expected { - return fmt.Errorf("Name: got: %#v want: %#v", dataSource.Name, expected) - } - - return nil - } -} - -func testAccCheckDataSourceType(dataSource *data.Source, expected string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if dataSource.Type != expected { - return fmt.Errorf("Type: got: %#v want: %#v", dataSource.Type, expected) - } - - return nil - } -} - -const testAccDataSourceBasic = ` -resource "ns1_datasource" "foobar" { - name = "terraform test" - sourcetype = "nsone_v1" -}` - -const testAccDataSourceUpdated = ` -resource "ns1_datasource" "foobar" { - name = "terraform test" - sourcetype = "nsone_monitoring" -}` diff --git a/builtin/providers/ns1/resource_monitoringjob.go b/builtin/providers/ns1/resource_monitoringjob.go deleted file mode 100644 index 65d30501c..000000000 --- a/builtin/providers/ns1/resource_monitoringjob.go +++ /dev/null @@ -1,298 +0,0 @@ -package ns1 - -import ( - "fmt" - "regexp" - "strconv" - - "github.com/hashicorp/terraform/helper/schema" - - ns1 "gopkg.in/ns1/ns1-go.v2/rest" - "gopkg.in/ns1/ns1-go.v2/rest/model/monitor" -) - -func monitoringJobResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Required - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "job_type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "regions": &schema.Schema{ - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "frequency": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - "config": &schema.Schema{ - Type: schema.TypeMap, - Required: true, - }, - // Optional - "active": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "rapid_recheck": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "policy": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "quorum", - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - value := v.(string) - if !regexp.MustCompile(`^(all|one|quorum)$`).MatchString(value) { - es = append(es, fmt.Errorf( - "only all, one, quorum allowed in %q", k)) - } - return - }, - }, - "notes": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "notify_delay": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - "notify_repeat": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - "notify_failback": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - "notify_regional": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - "notify_list": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "rules": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "value": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "comparison": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "key": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - // Computed - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - Create: MonitoringJobCreate, - Read: MonitoringJobRead, - Update: MonitoringJobUpdate, - Delete: MonitoringJobDelete, - } -} - -func monitoringJobToResourceData(d *schema.ResourceData, r *monitor.Job) error { - d.SetId(r.ID) - d.Set("name", r.Name) - d.Set("job_type", r.Type) - d.Set("active", r.Active) - d.Set("regions", r.Regions) - d.Set("frequency", r.Frequency) - d.Set("rapid_recheck", r.RapidRecheck) - config := make(map[string]string) - for k, v := range r.Config { - if k == "ssl" { - if v.(bool) { - config[k] = "1" - } else { - config[k] = "0" - } - } else { - switch t := v.(type) { - case string: - config[k] = t - case float64: - config[k] = strconv.FormatFloat(t, 'f', -1, 64) - } - } - } - err := d.Set("config", config) - if err != nil { - panic(fmt.Errorf("[DEBUG] Error setting Config error: %#v %#v", r.Config, err)) - } - d.Set("policy", r.Policy) - d.Set("notes", r.Notes) - d.Set("frequency", r.Frequency) - d.Set("notify_delay", r.NotifyDelay) - d.Set("notify_repeat", r.NotifyRepeat) - d.Set("notify_regional", r.NotifyRegional) - d.Set("notify_failback", r.NotifyFailback) - d.Set("notify_list", r.NotifyListID) - if len(r.Rules) > 0 { - rules := make([]map[string]interface{}, len(r.Rules)) - for i, r := range r.Rules { - m := make(map[string]interface{}) - m["value"] = r.Value - m["comparison"] = r.Comparison - m["key"] = r.Key - rules[i] = m - } - d.Set("rules", rules) - } - return nil -} - -func resourceDataToMonitoringJob(r *monitor.Job, d *schema.ResourceData) error { - r.ID = d.Id() - r.Name = d.Get("name").(string) - r.Type = d.Get("job_type").(string) - r.Active = d.Get("active").(bool) - rawRegions := d.Get("regions").([]interface{}) - r.Regions = make([]string, len(rawRegions)) - for i, v := range rawRegions { - r.Regions[i] = v.(string) - } - r.Frequency = d.Get("frequency").(int) - r.RapidRecheck = d.Get("rapid_recheck").(bool) - var rawRules []interface{} - if rawRules := d.Get("rules"); rawRules != nil { - r.Rules = make([]*monitor.Rule, len(rawRules.([]interface{}))) - for i, v := range rawRules.([]interface{}) { - rule := v.(map[string]interface{}) - r.Rules[i] = &monitor.Rule{ - Value: rule["value"].(string), - Comparison: rule["comparison"].(string), - Key: rule["key"].(string), - } - } - } else { - r.Rules = make([]*monitor.Rule, 0) - } - for i, v := range rawRules { - rule := v.(map[string]interface{}) - r.Rules[i] = &monitor.Rule{ - Comparison: rule["comparison"].(string), - Key: rule["key"].(string), - } - value := rule["value"].(string) - if i, err := strconv.Atoi(value); err == nil { - r.Rules[i].Value = i - } else { - r.Rules[i].Value = value - } - } - config := make(map[string]interface{}) - if rawConfig := d.Get("config"); rawConfig != nil { - for k, v := range rawConfig.(map[string]interface{}) { - if k == "ssl" { - if v.(string) == "1" { - config[k] = true - } - } else { - if i, err := strconv.Atoi(v.(string)); err == nil { - config[k] = i - } else { - config[k] = v - } - } - } - } - r.Config = config - r.RegionScope = "fixed" - r.Policy = d.Get("policy").(string) - if v, ok := d.GetOk("notes"); ok { - r.Notes = v.(string) - } - r.Frequency = d.Get("frequency").(int) - if v, ok := d.GetOk("notify_delay"); ok { - r.NotifyDelay = v.(int) - } - if v, ok := d.GetOk("notify_repeat"); ok { - r.NotifyRepeat = v.(int) - } - if v, ok := d.GetOk("notify_regional"); ok { - r.NotifyRegional = v.(bool) - } - if v, ok := d.GetOk("notify_failback"); ok { - r.NotifyFailback = v.(bool) - } - if v, ok := d.GetOk("notify_list"); ok { - r.NotifyListID = v.(string) - } - return nil -} - -// MonitoringJobCreate Creates monitoring job in ns1 -func MonitoringJobCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - j := monitor.Job{} - if err := resourceDataToMonitoringJob(&j, d); err != nil { - return err - } - if _, err := client.Jobs.Create(&j); err != nil { - return err - } - return monitoringJobToResourceData(d, &j) -} - -// MonitoringJobRead reads the given monitoring job from ns1 -func MonitoringJobRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - j, _, err := client.Jobs.Get(d.Id()) - if err != nil { - return err - } - return monitoringJobToResourceData(d, j) -} - -// MonitoringJobDelete deteltes the given monitoring job from ns1 -func MonitoringJobDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - _, err := client.Jobs.Delete(d.Id()) - d.SetId("") - return err -} - -// MonitoringJobUpdate updates the given monitoring job -func MonitoringJobUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - j := monitor.Job{ - ID: d.Id(), - } - if err := resourceDataToMonitoringJob(&j, d); err != nil { - return err - } - if _, err := client.Jobs.Update(&j); err != nil { - return err - } - return monitoringJobToResourceData(d, &j) -} diff --git a/builtin/providers/ns1/resource_monitoringjob_test.go b/builtin/providers/ns1/resource_monitoringjob_test.go deleted file mode 100644 index c470e9ffb..000000000 --- a/builtin/providers/ns1/resource_monitoringjob_test.go +++ /dev/null @@ -1,326 +0,0 @@ -package ns1 - -import ( - "fmt" - "reflect" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - ns1 "gopkg.in/ns1/ns1-go.v2/rest" - "gopkg.in/ns1/ns1-go.v2/rest/model/monitor" -) - -func TestAccMonitoringJob_basic(t *testing.T) { - var mj monitor.Job - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckMonitoringJobDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccMonitoringJobBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckMonitoringJobExists("ns1_monitoringjob.it", &mj), - testAccCheckMonitoringJobName(&mj, "terraform test"), - testAccCheckMonitoringJobActive(&mj, true), - testAccCheckMonitoringJobRegions(&mj, []string{"lga"}), - testAccCheckMonitoringJobType(&mj, "tcp"), - testAccCheckMonitoringJobFrequency(&mj, 60), - testAccCheckMonitoringJobRapidRecheck(&mj, false), - testAccCheckMonitoringJobPolicy(&mj, "quorum"), - testAccCheckMonitoringJobConfigSend(&mj, "HEAD / HTTP/1.0\r\n\r\n"), - testAccCheckMonitoringJobConfigPort(&mj, 443), - testAccCheckMonitoringJobConfigHost(&mj, "1.2.3.4"), - testAccCheckMonitoringJobRuleValue(&mj, "200 OK"), - testAccCheckMonitoringJobRuleComparison(&mj, "contains"), - testAccCheckMonitoringJobRuleKey(&mj, "output"), - ), - }, - }, - }) -} - -func TestAccMonitoringJob_updated(t *testing.T) { - var mj monitor.Job - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckMonitoringJobDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccMonitoringJobBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckMonitoringJobExists("ns1_monitoringjob.it", &mj), - testAccCheckMonitoringJobName(&mj, "terraform test"), - testAccCheckMonitoringJobActive(&mj, true), - testAccCheckMonitoringJobRegions(&mj, []string{"lga"}), - testAccCheckMonitoringJobType(&mj, "tcp"), - testAccCheckMonitoringJobFrequency(&mj, 60), - testAccCheckMonitoringJobRapidRecheck(&mj, false), - testAccCheckMonitoringJobPolicy(&mj, "quorum"), - testAccCheckMonitoringJobConfigSend(&mj, "HEAD / HTTP/1.0\r\n\r\n"), - testAccCheckMonitoringJobConfigPort(&mj, 443), - testAccCheckMonitoringJobConfigHost(&mj, "1.2.3.4"), - testAccCheckMonitoringJobRuleValue(&mj, "200 OK"), - testAccCheckMonitoringJobRuleComparison(&mj, "contains"), - testAccCheckMonitoringJobRuleKey(&mj, "output"), - ), - }, - resource.TestStep{ - Config: testAccMonitoringJobUpdated, - Check: resource.ComposeTestCheckFunc( - testAccCheckMonitoringJobExists("ns1_monitoringjob.it", &mj), - testAccCheckMonitoringJobName(&mj, "terraform test"), - testAccCheckMonitoringJobActive(&mj, true), - testAccCheckMonitoringJobRegions(&mj, []string{"lga"}), - testAccCheckMonitoringJobType(&mj, "tcp"), - testAccCheckMonitoringJobFrequency(&mj, 120), - testAccCheckMonitoringJobRapidRecheck(&mj, true), - testAccCheckMonitoringJobPolicy(&mj, "all"), - testAccCheckMonitoringJobConfigSend(&mj, "HEAD / HTTP/1.0\r\n\r\n"), - testAccCheckMonitoringJobConfigPort(&mj, 443), - testAccCheckMonitoringJobConfigHost(&mj, "1.1.1.1"), - testAccCheckMonitoringJobRuleValue(&mj, "200"), - testAccCheckMonitoringJobRuleComparison(&mj, "<="), - testAccCheckMonitoringJobRuleKey(&mj, "connect"), - ), - }, - }, - }) -} - -func testAccCheckMonitoringJobState(key, value string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources["ns1_monitoringjob.it"] - if !ok { - return fmt.Errorf("Not found: %s", "ns1_monitoringjob.it") - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - p := rs.Primary - if p.Attributes[key] != value { - return fmt.Errorf( - "%s != %s (actual: %s)", key, value, p.Attributes[key]) - } - - return nil - } -} - -func testAccCheckMonitoringJobExists(n string, monitoringJob *monitor.Job) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Resource not found: %v", n) - } - - id := rs.Primary.ID - if id == "" { - return fmt.Errorf("ID is not set") - } - - client := testAccProvider.Meta().(*ns1.Client) - - foundMj, _, err := client.Jobs.Get(id) - - if err != nil { - return err - } - - if foundMj.ID != id { - return fmt.Errorf("Monitoring Job not found want: %#v, got %#v", id, foundMj) - } - - *monitoringJob = *foundMj - - return nil - } -} - -func testAccCheckMonitoringJobDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*ns1.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "ns1_monitoringjob" { - continue - } - - mj, _, err := client.Jobs.Get(rs.Primary.Attributes["id"]) - - if err == nil { - return fmt.Errorf("Monitoring Job still exists %#v: %#v", err, mj) - } - } - - return nil -} - -func testAccCheckMonitoringJobName(mj *monitor.Job, expected string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if mj.Name != expected { - return fmt.Errorf("Name: got: %#v want: %#v", mj.Name, expected) - } - return nil - } -} - -func testAccCheckMonitoringJobActive(mj *monitor.Job, expected bool) resource.TestCheckFunc { - return func(s *terraform.State) error { - if mj.Active != expected { - return fmt.Errorf("Active: got: %#v want: %#v", mj.Active, expected) - } - return nil - } -} - -func testAccCheckMonitoringJobRegions(mj *monitor.Job, expected []string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if !reflect.DeepEqual(mj.Regions, expected) { - return fmt.Errorf("Regions: got: %#v want: %#v", mj.Regions, expected) - } - return nil - } -} - -func testAccCheckMonitoringJobType(mj *monitor.Job, expected string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if mj.Type != expected { - return fmt.Errorf("Type: got: %#v want: %#v", mj.Type, expected) - } - return nil - } -} - -func testAccCheckMonitoringJobFrequency(mj *monitor.Job, expected int) resource.TestCheckFunc { - return func(s *terraform.State) error { - if mj.Frequency != expected { - return fmt.Errorf("Frequency: got: %#v want: %#v", mj.Frequency, expected) - } - return nil - } -} - -func testAccCheckMonitoringJobRapidRecheck(mj *monitor.Job, expected bool) resource.TestCheckFunc { - return func(s *terraform.State) error { - if mj.RapidRecheck != expected { - return fmt.Errorf("RapidRecheck: got: %#v want: %#v", mj.RapidRecheck, expected) - } - return nil - } -} - -func testAccCheckMonitoringJobPolicy(mj *monitor.Job, expected string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if mj.Policy != expected { - return fmt.Errorf("Policy: got: %#v want: %#v", mj.Policy, expected) - } - return nil - } -} - -func testAccCheckMonitoringJobConfigSend(mj *monitor.Job, expected string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if mj.Config["send"].(string) != expected { - return fmt.Errorf("Config.send: got: %#v want: %#v", mj.Config["send"].(string), expected) - } - return nil - } -} - -func testAccCheckMonitoringJobConfigPort(mj *monitor.Job, expected float64) resource.TestCheckFunc { - return func(s *terraform.State) error { - if mj.Config["port"].(float64) != expected { - return fmt.Errorf("Config.port: got: %#v want: %#v", mj.Config["port"].(float64), expected) - } - return nil - } -} - -func testAccCheckMonitoringJobConfigHost(mj *monitor.Job, expected string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if mj.Config["host"].(string) != expected { - return fmt.Errorf("Config.host: got: %#v want: %#v", mj.Config["host"].(string), expected) - } - return nil - } -} - -func testAccCheckMonitoringJobRuleValue(mj *monitor.Job, expected string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if mj.Rules[0].Value.(string) != expected { - return fmt.Errorf("Rules[0].Value: got: %#v want: %#v", mj.Rules[0].Value.(string), expected) - } - return nil - } -} - -func testAccCheckMonitoringJobRuleComparison(mj *monitor.Job, expected string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if mj.Rules[0].Comparison != expected { - return fmt.Errorf("Rules[0].Comparison: got: %#v want: %#v", mj.Rules[0].Comparison, expected) - } - return nil - } -} - -func testAccCheckMonitoringJobRuleKey(mj *monitor.Job, expected string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if mj.Rules[0].Key != expected { - return fmt.Errorf("Rules[0].Key: got: %#v want: %#v", mj.Rules[0].Key, expected) - } - return nil - } -} - -const testAccMonitoringJobBasic = ` -resource "ns1_monitoringjob" "it" { - job_type = "tcp" - name = "terraform test" - - regions = ["lga"] - frequency = 60 - - config = { - ssl = "1", - send = "HEAD / HTTP/1.0\r\n\r\n" - port = 443 - host = "1.2.3.4" - } - rules = { - value = "200 OK" - comparison = "contains" - key = "output" - } -} -` - -const testAccMonitoringJobUpdated = ` -resource "ns1_monitoringjob" "it" { - job_type = "tcp" - name = "terraform test" - - active = true - regions = ["lga"] - frequency = 120 - rapid_recheck = true - policy = "all" - - config = { - ssl = "1", - send = "HEAD / HTTP/1.0\r\n\r\n" - port = 443 - host = "1.1.1.1" - } - rules = { - value = 200 - comparison = "<=" - key = "connect" - } -} -` diff --git a/builtin/providers/ns1/resource_notifylist.go b/builtin/providers/ns1/resource_notifylist.go deleted file mode 100644 index c6448d51f..000000000 --- a/builtin/providers/ns1/resource_notifylist.go +++ /dev/null @@ -1,140 +0,0 @@ -package ns1 - -import ( - "github.com/hashicorp/terraform/helper/schema" - - ns1 "gopkg.in/ns1/ns1-go.v2/rest" - "gopkg.in/ns1/ns1-go.v2/rest/model/monitor" -) - -func notifyListResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "notifications": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "config": &schema.Schema{ - Type: schema.TypeMap, - Required: true, - }, - }, - }, - }, - }, - Create: NotifyListCreate, - Read: NotifyListRead, - Update: NotifyListUpdate, - Delete: NotifyListDelete, - } -} - -func notifyListToResourceData(d *schema.ResourceData, nl *monitor.NotifyList) error { - d.SetId(nl.ID) - d.Set("name", nl.Name) - - if len(nl.Notifications) > 0 { - notifications := make([]map[string]interface{}, len(nl.Notifications)) - for i, n := range nl.Notifications { - ni := make(map[string]interface{}) - ni["type"] = n.Type - if n.Config != nil { - ni["config"] = n.Config - } - notifications[i] = ni - } - d.Set("notifications", notifications) - } - return nil -} - -func resourceDataToNotifyList(nl *monitor.NotifyList, d *schema.ResourceData) error { - nl.ID = d.Id() - - if rawNotifications := d.Get("notifications").([]interface{}); len(rawNotifications) > 0 { - ns := make([]*monitor.Notification, len(rawNotifications)) - for i, notificationRaw := range rawNotifications { - ni := notificationRaw.(map[string]interface{}) - config := ni["config"].(map[string]interface{}) - - switch ni["type"].(string) { - case "webhook": - ns[i] = monitor.NewWebNotification(config["url"].(string)) - case "email": - ns[i] = monitor.NewEmailNotification(config["email"].(string)) - case "datafeed": - ns[i] = monitor.NewFeedNotification(config["sourceid"].(string)) - } - } - nl.Notifications = ns - } - return nil -} - -// NotifyListCreate creates an ns1 notifylist -func NotifyListCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - nl := monitor.NewNotifyList(d.Get("name").(string)) - - if err := resourceDataToNotifyList(nl, d); err != nil { - return err - } - - if _, err := client.Notifications.Create(nl); err != nil { - return err - } - - return notifyListToResourceData(d, nl) -} - -// NotifyListRead fetches info for the given notifylist from ns1 -func NotifyListRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - - nl, _, err := client.Notifications.Get(d.Id()) - if err != nil { - return err - } - - return notifyListToResourceData(d, nl) -} - -// NotifyListDelete deletes the given notifylist from ns1 -func NotifyListDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - - _, err := client.Notifications.Delete(d.Id()) - d.SetId("") - - return err -} - -// NotifyListUpdate updates the notifylist with given parameters -func NotifyListUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - - nl := monitor.NewNotifyList(d.Get("name").(string)) - - if err := resourceDataToNotifyList(nl, d); err != nil { - return err - } - - if _, err := client.Notifications.Update(nl); err != nil { - return err - } - - return notifyListToResourceData(d, nl) -} diff --git a/builtin/providers/ns1/resource_notifylist_test.go b/builtin/providers/ns1/resource_notifylist_test.go deleted file mode 100644 index bd81a7fe1..000000000 --- a/builtin/providers/ns1/resource_notifylist_test.go +++ /dev/null @@ -1,158 +0,0 @@ -package ns1 - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - ns1 "gopkg.in/ns1/ns1-go.v2/rest" - "gopkg.in/ns1/ns1-go.v2/rest/model/monitor" -) - -func TestAccNotifyList_basic(t *testing.T) { - var nl monitor.NotifyList - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNotifyListDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNotifyListBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckNotifyListExists("ns1_notifylist.test", &nl), - testAccCheckNotifyListName(&nl, "terraform test"), - ), - }, - }, - }) -} - -func TestAccNotifyList_updated(t *testing.T) { - var nl monitor.NotifyList - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNotifyListDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNotifyListBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckNotifyListExists("ns1_notifylist.test", &nl), - testAccCheckNotifyListName(&nl, "terraform test"), - ), - }, - resource.TestStep{ - Config: testAccNotifyListUpdated, - Check: resource.ComposeTestCheckFunc( - testAccCheckNotifyListExists("ns1_notifylist.test", &nl), - testAccCheckNotifyListName(&nl, "terraform test"), - ), - }, - }, - }) -} - -func testAccCheckNotifyListState(key, value string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources["ns1_notifylist.test"] - if !ok { - return fmt.Errorf("Not found: %s", "ns1_notifylist.test") - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - p := rs.Primary - if p.Attributes[key] != value { - return fmt.Errorf( - "%s != %s (actual: %s)", key, value, p.Attributes[key]) - } - - return nil - } -} - -func testAccCheckNotifyListExists(n string, nl *monitor.NotifyList) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Resource not found: %v", n) - } - - id := rs.Primary.ID - if id == "" { - return fmt.Errorf("ID is not set") - } - - client := testAccProvider.Meta().(*ns1.Client) - - foundNl, _, err := client.Notifications.Get(id) - - if err != nil { - return err - } - - if foundNl.ID != id { - return fmt.Errorf("Notify List not found want: %#v, got %#v", id, foundNl) - } - - *nl = *foundNl - - return nil - } -} - -func testAccCheckNotifyListDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*ns1.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "ns1_notifylist" { - continue - } - - nl, _, err := client.Notifications.Get(rs.Primary.Attributes["id"]) - - if err == nil { - return fmt.Errorf("Notify List still exists %#v: %#v", err, nl) - } - } - - return nil -} - -func testAccCheckNotifyListName(nl *monitor.NotifyList, expected string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if nl.Name != expected { - return fmt.Errorf("Name: got: %#v want: %#v", nl.Name, expected) - } - return nil - } -} - -const testAccNotifyListBasic = ` -resource "ns1_notifylist" "test" { - name = "terraform test" - notifications = { - type = "webhook" - config = { - url = "http://localhost:9090" - } - } -} -` - -const testAccNotifyListUpdated = ` -resource "ns1_notifylist" "test" { - name = "terraform test" - notifications = { - type = "webhook" - config = { - url = "http://localhost:9091" - } - } -} -` diff --git a/builtin/providers/ns1/resource_record.go b/builtin/providers/ns1/resource_record.go deleted file mode 100644 index add703c3a..000000000 --- a/builtin/providers/ns1/resource_record.go +++ /dev/null @@ -1,368 +0,0 @@ -package ns1 - -import ( - "errors" - "fmt" - "log" - "strconv" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - - "github.com/mitchellh/hashstructure" - ns1 "gopkg.in/ns1/ns1-go.v2/rest" - "gopkg.in/ns1/ns1-go.v2/rest/model/data" - "gopkg.in/ns1/ns1-go.v2/rest/model/dns" - "gopkg.in/ns1/ns1-go.v2/rest/model/filter" -) - -var recordTypeStringEnum *StringEnum = NewStringEnum([]string{ - "A", - "AAAA", - "ALIAS", - "AFSDB", - "CNAME", - "DNAME", - "HINFO", - "MX", - "NAPTR", - "NS", - "PTR", - "RP", - "SPF", - "SRV", - "TXT", -}) - -func recordResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Required - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "domain": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: recordTypeStringEnum.ValidateFunc, - }, - // Optional - "ttl": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - // "meta": metaSchema, - "link": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "use_client_subnet": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "answers": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "answer": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - // "meta": metaSchema, - }, - }, - Set: genericHasher, - }, - "regions": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - // "meta": metaSchema, - }, - }, - Set: genericHasher, - }, - "filters": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "filter": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "disabled": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - "config": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - }, - }, - }, - }, - // Computed - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - Create: RecordCreate, - Read: RecordRead, - Update: RecordUpdate, - Delete: RecordDelete, - Importer: &schema.ResourceImporter{State: RecordStateFunc}, - } -} - -func genericHasher(v interface{}) int { - hash, err := hashstructure.Hash(v, nil) - if err != nil { - panic(fmt.Sprintf("error computing hash code for %#v: %s", v, err.Error())) - } - return int(hash) -} - -func recordToResourceData(d *schema.ResourceData, r *dns.Record) error { - d.SetId(r.ID) - d.Set("domain", r.Domain) - d.Set("zone", r.Zone) - d.Set("type", r.Type) - d.Set("ttl", r.TTL) - if r.Link != "" { - d.Set("link", r.Link) - } - // if r.Meta != nil { - // d.State() - // t := metaStructToDynamic(r.Meta) - // d.Set("meta", t) - // } - if r.UseClientSubnet != nil { - d.Set("use_client_subnet", *r.UseClientSubnet) - } - if len(r.Filters) > 0 { - filters := make([]map[string]interface{}, len(r.Filters)) - for i, f := range r.Filters { - m := make(map[string]interface{}) - m["filter"] = f.Type - if f.Disabled { - m["disabled"] = true - } - if f.Config != nil { - m["config"] = f.Config - } - filters[i] = m - } - d.Set("filters", filters) - } - if len(r.Answers) > 0 { - ans := &schema.Set{ - F: genericHasher, - } - log.Printf("Got back from ns1 answers: %+v", r.Answers) - for _, answer := range r.Answers { - ans.Add(answerToMap(*answer)) - } - log.Printf("Setting answers %+v", ans) - err := d.Set("answers", ans) - if err != nil { - return fmt.Errorf("[DEBUG] Error setting answers for: %s, error: %#v", r.Domain, err) - } - } - if len(r.Regions) > 0 { - regions := make([]map[string]interface{}, 0, len(r.Regions)) - for regionName, _ := range r.Regions { - newRegion := make(map[string]interface{}) - newRegion["name"] = regionName - // newRegion["meta"] = metaStructToDynamic(®ion.Meta) - regions = append(regions, newRegion) - } - log.Printf("Setting regions %+v", regions) - err := d.Set("regions", regions) - if err != nil { - return fmt.Errorf("[DEBUG] Error setting regions for: %s, error: %#v", r.Domain, err) - } - } - return nil -} - -func answerToMap(a dns.Answer) map[string]interface{} { - m := make(map[string]interface{}) - m["answer"] = strings.Join(a.Rdata, " ") - if a.RegionName != "" { - m["region"] = a.RegionName - } - // if a.Meta != nil { - // m["meta"] = metaStructToDynamic(a.Meta) - // } - return m -} - -func btoi(b bool) int { - if b { - return 1 - } - return 0 -} - -func resourceDataToRecord(r *dns.Record, d *schema.ResourceData) error { - r.ID = d.Id() - if answers := d.Get("answers").(*schema.Set); answers.Len() > 0 { - al := make([]*dns.Answer, answers.Len()) - for i, answerRaw := range answers.List() { - answer := answerRaw.(map[string]interface{}) - var a *dns.Answer - v := answer["answer"].(string) - switch d.Get("type") { - case "TXT", "SPF": - a = dns.NewTXTAnswer(v) - default: - a = dns.NewAnswer(strings.Split(v, " ")) - } - if v, ok := answer["region"]; ok { - a.RegionName = v.(string) - } - - // if v, ok := answer["meta"]; ok { - // metaDynamicToStruct(a.Meta, v) - // } - al[i] = a - } - r.Answers = al - if _, ok := d.GetOk("link"); ok { - return errors.New("Cannot have both link and answers in a record") - } - } - if v, ok := d.GetOk("ttl"); ok { - r.TTL = v.(int) - } - if v, ok := d.GetOk("link"); ok { - r.LinkTo(v.(string)) - } - // if v, ok := d.GetOk("meta"); ok { - // metaDynamicToStruct(r.Meta, v) - // } - useClientSubnet := d.Get("use_client_subnet").(bool) - r.UseClientSubnet = &useClientSubnet - - if rawFilters := d.Get("filters").([]interface{}); len(rawFilters) > 0 { - f := make([]*filter.Filter, len(rawFilters)) - for i, filterRaw := range rawFilters { - fi := filterRaw.(map[string]interface{}) - config := make(map[string]interface{}) - filter := filter.Filter{ - Type: fi["filter"].(string), - Config: config, - } - if disabled, ok := fi["disabled"]; ok { - filter.Disabled = disabled.(bool) - } - if rawConfig, ok := fi["config"]; ok { - for k, v := range rawConfig.(map[string]interface{}) { - if i, err := strconv.Atoi(v.(string)); err == nil { - filter.Config[k] = i - } else { - filter.Config[k] = v - } - } - } - f[i] = &filter - } - r.Filters = f - } - if regions := d.Get("regions").(*schema.Set); regions.Len() > 0 { - for _, regionRaw := range regions.List() { - region := regionRaw.(map[string]interface{}) - ns1R := data.Region{ - Meta: data.Meta{}, - } - // if v, ok := region["meta"]; ok { - // metaDynamicToStruct(&ns1R.Meta, v) - // } - - r.Regions[region["name"].(string)] = ns1R - } - } - return nil -} - -// RecordCreate creates DNS record in ns1 -func RecordCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - r := dns.NewRecord(d.Get("zone").(string), d.Get("domain").(string), d.Get("type").(string)) - if err := resourceDataToRecord(r, d); err != nil { - return err - } - if _, err := client.Records.Create(r); err != nil { - return err - } - return recordToResourceData(d, r) -} - -// RecordRead reads the DNS record from ns1 -func RecordRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - - r, _, err := client.Records.Get(d.Get("zone").(string), d.Get("domain").(string), d.Get("type").(string)) - if err != nil { - return err - } - - return recordToResourceData(d, r) -} - -// RecordDelete deltes the DNS record from ns1 -func RecordDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - _, err := client.Records.Delete(d.Get("zone").(string), d.Get("domain").(string), d.Get("type").(string)) - d.SetId("") - return err -} - -// RecordUpdate updates the given dns record in ns1 -func RecordUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - r := dns.NewRecord(d.Get("zone").(string), d.Get("domain").(string), d.Get("type").(string)) - if err := resourceDataToRecord(r, d); err != nil { - return err - } - if _, err := client.Records.Update(r); err != nil { - return err - } - return recordToResourceData(d, r) -} - -func RecordStateFunc(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - parts := strings.Split(d.Id(), "/") - if len(parts) != 3 { - return nil, fmt.Errorf("Invalid record specifier. Expecting 2 slashes (\"zone/domain/type\"), got %d.", len(parts)-1) - } - - d.Set("zone", parts[0]) - d.Set("domain", parts[1]) - d.Set("type", parts[2]) - - return []*schema.ResourceData{d}, nil -} diff --git a/builtin/providers/ns1/resource_record_test.go b/builtin/providers/ns1/resource_record_test.go deleted file mode 100644 index ec5075303..000000000 --- a/builtin/providers/ns1/resource_record_test.go +++ /dev/null @@ -1,378 +0,0 @@ -package ns1 - -import ( - "fmt" - "reflect" - "sort" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - ns1 "gopkg.in/ns1/ns1-go.v2/rest" - "gopkg.in/ns1/ns1-go.v2/rest/model/dns" -) - -func TestAccRecord_basic(t *testing.T) { - var record dns.Record - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRecordBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckRecordExists("ns1_record.it", &record), - testAccCheckRecordDomain(&record, "test.terraform-record-test.io"), - testAccCheckRecordTTL(&record, 60), - testAccCheckRecordUseClientSubnet(&record, true), - testAccCheckRecordRegionName(&record, []string{"cal"}), - // testAccCheckRecordAnswerMetaWeight(&record, 10), - testAccCheckRecordAnswerRdata(&record, 0, "test1.terraform-record-test.io"), - ), - }, - }, - }) -} - -func TestAccRecord_updated(t *testing.T) { - var record dns.Record - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRecordBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckRecordExists("ns1_record.it", &record), - testAccCheckRecordDomain(&record, "test.terraform-record-test.io"), - testAccCheckRecordTTL(&record, 60), - testAccCheckRecordUseClientSubnet(&record, true), - testAccCheckRecordRegionName(&record, []string{"cal"}), - // testAccCheckRecordAnswerMetaWeight(&record, 10), - testAccCheckRecordAnswerRdata(&record, 0, "test1.terraform-record-test.io"), - ), - }, - resource.TestStep{ - Config: testAccRecordUpdated, - Check: resource.ComposeTestCheckFunc( - testAccCheckRecordExists("ns1_record.it", &record), - testAccCheckRecordDomain(&record, "test.terraform-record-test.io"), - testAccCheckRecordTTL(&record, 120), - testAccCheckRecordUseClientSubnet(&record, false), - testAccCheckRecordRegionName(&record, []string{"ny", "wa"}), - // testAccCheckRecordAnswerMetaWeight(&record, 5), - testAccCheckRecordAnswerRdata(&record, 0, "test2.terraform-record-test.io"), - ), - }, - }, - }) -} - -func TestAccRecord_SPF(t *testing.T) { - var record dns.Record - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRecordSPF, - Check: resource.ComposeTestCheckFunc( - testAccCheckRecordExists("ns1_record.spf", &record), - testAccCheckRecordDomain(&record, "terraform-record-test.io"), - testAccCheckRecordTTL(&record, 86400), - testAccCheckRecordUseClientSubnet(&record, true), - testAccCheckRecordAnswerRdata(&record, 0, "v=DKIM1; k=rsa; p=XXXXXXXX"), - ), - }, - }, - }) -} - -func TestAccRecord_SRV(t *testing.T) { - var record dns.Record - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRecordDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRecordSRV, - Check: resource.ComposeTestCheckFunc( - testAccCheckRecordExists("ns1_record.srv", &record), - testAccCheckRecordDomain(&record, "_some-server._tcp.terraform-record-test.io"), - testAccCheckRecordTTL(&record, 86400), - testAccCheckRecordUseClientSubnet(&record, true), - testAccCheckRecordAnswerRdata(&record, 0, "10"), - testAccCheckRecordAnswerRdata(&record, 1, "0"), - testAccCheckRecordAnswerRdata(&record, 2, "2380"), - testAccCheckRecordAnswerRdata(&record, 3, "node-1.terraform-record-test.io"), - ), - }, - }, - }) -} - -func testAccCheckRecordExists(n string, record *dns.Record) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %v", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("NoID is set") - } - - client := testAccProvider.Meta().(*ns1.Client) - - p := rs.Primary - - foundRecord, _, err := client.Records.Get(p.Attributes["zone"], p.Attributes["domain"], p.Attributes["type"]) - if err != nil { - return fmt.Errorf("Record not found") - } - - if foundRecord.Domain != p.Attributes["domain"] { - return fmt.Errorf("Record not found") - } - - *record = *foundRecord - - return nil - } -} - -func testAccCheckRecordDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*ns1.Client) - - var recordDomain string - var recordZone string - var recordType string - - for _, rs := range s.RootModule().Resources { - if rs.Type != "ns1_record" { - continue - } - - if rs.Type == "ns1_record" { - recordType = rs.Primary.Attributes["type"] - recordDomain = rs.Primary.Attributes["domain"] - recordZone = rs.Primary.Attributes["zone"] - } - } - - foundRecord, _, err := client.Records.Get(recordZone, recordDomain, recordType) - if err != ns1.ErrRecordMissing { - return fmt.Errorf("Record still exists: %#v %#v", foundRecord, err) - } - - return nil -} - -func testAccCheckRecordDomain(r *dns.Record, expected string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if r.Domain != expected { - return fmt.Errorf("Domain: got: %#v want: %#v", r.Domain, expected) - } - return nil - } -} - -func testAccCheckRecordTTL(r *dns.Record, expected int) resource.TestCheckFunc { - return func(s *terraform.State) error { - if r.TTL != expected { - return fmt.Errorf("TTL: got: %#v want: %#v", r.TTL, expected) - } - return nil - } -} - -func testAccCheckRecordUseClientSubnet(r *dns.Record, expected bool) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *r.UseClientSubnet != expected { - return fmt.Errorf("UseClientSubnet: got: %#v want: %#v", *r.UseClientSubnet, expected) - } - return nil - } -} - -func testAccCheckRecordRegionName(r *dns.Record, expected []string) resource.TestCheckFunc { - return func(s *terraform.State) error { - regions := make([]string, len(r.Regions)) - - i := 0 - for k := range r.Regions { - regions[i] = k - i++ - } - sort.Strings(regions) - sort.Strings(expected) - if !reflect.DeepEqual(regions, expected) { - return fmt.Errorf("Regions: got: %#v want: %#v", regions, expected) - } - return nil - } -} - -func testAccCheckRecordAnswerMetaWeight(r *dns.Record, expected float64) resource.TestCheckFunc { - return func(s *terraform.State) error { - recordAnswer := r.Answers[0] - recordMetas := recordAnswer.Meta - weight := recordMetas.Weight.(float64) - if weight != expected { - return fmt.Errorf("Answers[0].Meta.Weight: got: %#v want: %#v", weight, expected) - } - return nil - } -} - -func testAccCheckRecordAnswerRdata(r *dns.Record, idx int, expected string) resource.TestCheckFunc { - return func(s *terraform.State) error { - recordAnswer := r.Answers[0] - recordAnswerString := recordAnswer.Rdata[idx] - if recordAnswerString != expected { - return fmt.Errorf("Answers[0].Rdata[%d]: got: %#v want: %#v", idx, recordAnswerString, expected) - } - return nil - } -} - -const testAccRecordBasic = ` -resource "ns1_record" "it" { - zone = "${ns1_zone.test.zone}" - domain = "test.${ns1_zone.test.zone}" - type = "CNAME" - ttl = 60 - - // meta { - // weight = 5 - // connections = 3 - // // up = false // Ignored by d.GetOk("meta.0.up") due to known issue - // } - - answers { - answer = "test1.terraform-record-test.io" - region = "cal" - - // meta { - // weight = 10 - // up = true - // } - } - - regions { - name = "cal" - // meta { - // up = true - // us_state = ["CA"] - // } - } - - filters { - filter = "up" - } - - filters { - filter = "geotarget_country" - } - - filters { - filter = "select_first_n" - config = {N=1} - } -} - -resource "ns1_zone" "test" { - zone = "terraform-record-test.io" -} -` - -const testAccRecordUpdated = ` -resource "ns1_record" "it" { - zone = "${ns1_zone.test.zone}" - domain = "test.${ns1_zone.test.zone}" - type = "CNAME" - ttl = 120 - use_client_subnet = false - - // meta { - // weight = 5 - // connections = 3 - // // up = false // Ignored by d.GetOk("meta.0.up") due to known issue - // } - - answers { - answer = "test2.terraform-record-test.io" - region = "ny" - - // meta { - // weight = 5 - // up = true - // } - } - - regions { - name = "wa" - // meta { - // us_state = ["WA"] - // } - } - - regions { - name = "ny" - // meta { - // us_state = ["NY"] - // } - } - - filters { - filter = "up" - } - - filters { - filter = "geotarget_country" - } -} - -resource "ns1_zone" "test" { - zone = "terraform-record-test.io" -} -` - -const testAccRecordSPF = ` -resource "ns1_record" "spf" { - zone = "${ns1_zone.test.zone}" - domain = "${ns1_zone.test.zone}" - type = "SPF" - ttl = 86400 - use_client_subnet = "true" - answers = { - answer = "v=DKIM1; k=rsa; p=XXXXXXXX" - } -} - -resource "ns1_zone" "test" { - zone = "terraform-record-test.io" -} -` - -const testAccRecordSRV = ` -resource "ns1_record" "srv" { - zone = "${ns1_zone.test.zone}" - domain = "_some-server._tcp.${ns1_zone.test.zone}" - type = "SRV" - ttl = 86400 - use_client_subnet = "true" - answers { - answer = "10 0 2380 node-1.${ns1_zone.test.zone}" - } -} - -resource "ns1_zone" "test" { - zone = "terraform-record-test.io" -} -` diff --git a/builtin/providers/ns1/resource_team.go b/builtin/providers/ns1/resource_team.go deleted file mode 100644 index 2cc9f1ccf..000000000 --- a/builtin/providers/ns1/resource_team.go +++ /dev/null @@ -1,89 +0,0 @@ -package ns1 - -import ( - "github.com/hashicorp/terraform/helper/schema" - - ns1 "gopkg.in/ns1/ns1-go.v2/rest" - "gopkg.in/ns1/ns1-go.v2/rest/model/account" -) - -func teamResource() *schema.Resource { - s := map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - } - s = addPermsSchema(s) - return &schema.Resource{ - Schema: s, - Create: TeamCreate, - Read: TeamRead, - Update: TeamUpdate, - Delete: TeamDelete, - } -} - -func teamToResourceData(d *schema.ResourceData, t *account.Team) error { - d.SetId(t.ID) - d.Set("name", t.Name) - permissionsToResourceData(d, t.Permissions) - return nil -} - -func resourceDataToTeam(t *account.Team, d *schema.ResourceData) error { - t.ID = d.Id() - t.Name = d.Get("name").(string) - t.Permissions = resourceDataToPermissions(d) - return nil -} - -// TeamCreate creates the given team in ns1 -func TeamCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - t := account.Team{} - if err := resourceDataToTeam(&t, d); err != nil { - return err - } - if _, err := client.Teams.Create(&t); err != nil { - return err - } - return teamToResourceData(d, &t) -} - -// TeamRead reads the team data from ns1 -func TeamRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - t, _, err := client.Teams.Get(d.Id()) - if err != nil { - return err - } - return teamToResourceData(d, t) -} - -// TeamDelete deletes the given team from ns1 -func TeamDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - _, err := client.Teams.Delete(d.Id()) - d.SetId("") - return err -} - -// TeamUpdate updates the given team in ns1 -func TeamUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - t := account.Team{ - ID: d.Id(), - } - if err := resourceDataToTeam(&t, d); err != nil { - return err - } - if _, err := client.Teams.Update(&t); err != nil { - return err - } - return teamToResourceData(d, &t) -} diff --git a/builtin/providers/ns1/resource_team_test.go b/builtin/providers/ns1/resource_team_test.go deleted file mode 100644 index 9f9b2ac17..000000000 --- a/builtin/providers/ns1/resource_team_test.go +++ /dev/null @@ -1,209 +0,0 @@ -package ns1 - -import ( - "fmt" - "reflect" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - ns1 "gopkg.in/ns1/ns1-go.v2/rest" - "gopkg.in/ns1/ns1-go.v2/rest/model/account" -) - -func TestAccTeam_basic(t *testing.T) { - var team account.Team - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckTeamDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccTeamBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckTeamExists("ns1_team.foobar", &team), - testAccCheckTeamName(&team, "terraform test"), - testAccCheckTeamDNSPermission(&team, "view_zones", true), - testAccCheckTeamDNSPermission(&team, "zones_allow_by_default", true), - testAccCheckTeamDNSPermissionZones(&team, "zones_allow", []string{"mytest.zone"}), - testAccCheckTeamDNSPermissionZones(&team, "zones_deny", []string{"myother.zone"}), - testAccCheckTeamDataPermission(&team, "manage_datasources", true), - ), - }, - }, - }) -} - -func TestAccTeam_updated(t *testing.T) { - var team account.Team - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckTeamDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccTeamBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckTeamExists("ns1_team.foobar", &team), - testAccCheckTeamName(&team, "terraform test"), - ), - }, - resource.TestStep{ - Config: testAccTeamUpdated, - Check: resource.ComposeTestCheckFunc( - testAccCheckTeamExists("ns1_team.foobar", &team), - testAccCheckTeamName(&team, "terraform test updated"), - testAccCheckTeamDNSPermission(&team, "view_zones", true), - testAccCheckTeamDNSPermission(&team, "zones_allow_by_default", true), - testAccCheckTeamDNSPermissionZones(&team, "zones_allow", []string{}), - testAccCheckTeamDNSPermissionZones(&team, "zones_deny", []string{}), - testAccCheckTeamDataPermission(&team, "manage_datasources", false), - ), - }, - }, - }) -} - -func testAccCheckTeamExists(n string, team *account.Team) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("NoID is set") - } - - client := testAccProvider.Meta().(*ns1.Client) - - foundTeam, _, err := client.Teams.Get(rs.Primary.Attributes["id"]) - if err != nil { - return err - } - - if foundTeam.Name != rs.Primary.Attributes["name"] { - return fmt.Errorf("Team not found") - } - - *team = *foundTeam - - return nil - } -} - -func testAccCheckTeamDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*ns1.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "ns1_team" { - continue - } - - team, _, err := client.Teams.Get(rs.Primary.Attributes["id"]) - if err == nil { - return fmt.Errorf("Team still exists: %#v: %#v", err, team.Name) - } - } - - return nil -} - -func testAccCheckTeamName(team *account.Team, expected string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if team.Name != expected { - return fmt.Errorf("Name: got: %s want: %s", team.Name, expected) - } - return nil - } -} - -func testAccCheckTeamDNSPermission(team *account.Team, perm string, expected bool) resource.TestCheckFunc { - return func(s *terraform.State) error { - dns := team.Permissions.DNS - - switch perm { - case "view_zones": - if dns.ViewZones != expected { - return fmt.Errorf("DNS.ViewZones: got: %t want: %t", dns.ViewZones, expected) - } - case "manage_zones": - if dns.ManageZones != expected { - return fmt.Errorf("DNS.ManageZones: got: %t want: %t", dns.ManageZones, expected) - } - case "zones_allow_by_default": - if dns.ZonesAllowByDefault != expected { - return fmt.Errorf("DNS.ZonesAllowByDefault: got: %t want: %t", dns.ZonesAllowByDefault, expected) - } - } - - return nil - } -} - -func testAccCheckTeamDataPermission(team *account.Team, perm string, expected bool) resource.TestCheckFunc { - return func(s *terraform.State) error { - data := team.Permissions.Data - - switch perm { - case "push_to_datafeeds": - if data.PushToDatafeeds != expected { - return fmt.Errorf("Data.PushToDatafeeds: got: %t want: %t", data.PushToDatafeeds, expected) - } - case "manage_datasources": - if data.ManageDatasources != expected { - return fmt.Errorf("Data.ManageDatasources: got: %t want: %t", data.ManageDatasources, expected) - } - case "manage_datafeeds": - if data.ManageDatafeeds != expected { - return fmt.Errorf("Data.ManageDatafeeds: got: %t want: %t", data.ManageDatafeeds, expected) - } - } - - return nil - } -} - -func testAccCheckTeamDNSPermissionZones(team *account.Team, perm string, expected []string) resource.TestCheckFunc { - return func(s *terraform.State) error { - dns := team.Permissions.DNS - - switch perm { - case "zones_allow": - if !reflect.DeepEqual(dns.ZonesAllow, expected) { - return fmt.Errorf("DNS.ZonesAllow: got: %v want: %v", dns.ZonesAllow, expected) - } - case "zones_deny": - if !reflect.DeepEqual(dns.ZonesDeny, expected) { - return fmt.Errorf("DNS.ZonesDeny: got: %v want: %v", dns.ZonesDeny, expected) - } - } - - return nil - } -} - -const testAccTeamBasic = ` -resource "ns1_team" "foobar" { - name = "terraform test" - - dns_view_zones = true - dns_zones_allow_by_default = true - dns_zones_allow = ["mytest.zone"] - dns_zones_deny = ["myother.zone"] - - data_manage_datasources = true -}` - -const testAccTeamUpdated = ` -resource "ns1_team" "foobar" { - name = "terraform test updated" - - dns_view_zones = true - dns_zones_allow_by_default = true - - data_manage_datasources = false -}` diff --git a/builtin/providers/ns1/resource_user.go b/builtin/providers/ns1/resource_user.go deleted file mode 100644 index 012d021f9..000000000 --- a/builtin/providers/ns1/resource_user.go +++ /dev/null @@ -1,126 +0,0 @@ -package ns1 - -import ( - "github.com/hashicorp/terraform/helper/schema" - - ns1 "gopkg.in/ns1/ns1-go.v2/rest" - "gopkg.in/ns1/ns1-go.v2/rest/model/account" -) - -func userResource() *schema.Resource { - s := map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "username": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "email": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "notify": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Elem: schema.TypeBool, - }, - "teams": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - } - s = addPermsSchema(s) - return &schema.Resource{ - Schema: s, - Create: UserCreate, - Read: UserRead, - Update: UserUpdate, - Delete: UserDelete, - } -} - -func userToResourceData(d *schema.ResourceData, u *account.User) error { - d.SetId(u.Username) - d.Set("name", u.Name) - d.Set("email", u.Email) - d.Set("teams", u.TeamIDs) - notify := make(map[string]bool) - notify["billing"] = u.Notify.Billing - d.Set("notify", notify) - permissionsToResourceData(d, u.Permissions) - return nil -} - -func resourceDataToUser(u *account.User, d *schema.ResourceData) error { - u.Name = d.Get("name").(string) - u.Username = d.Get("username").(string) - u.Email = d.Get("email").(string) - if v, ok := d.GetOk("teams"); ok { - teamsRaw := v.([]interface{}) - u.TeamIDs = make([]string, len(teamsRaw)) - for i, team := range teamsRaw { - u.TeamIDs[i] = team.(string) - } - } else { - u.TeamIDs = make([]string, 0) - } - if v, ok := d.GetOk("notify"); ok { - notifyRaw := v.(map[string]interface{}) - u.Notify.Billing = notifyRaw["billing"].(bool) - } - u.Permissions = resourceDataToPermissions(d) - return nil -} - -// UserCreate creates the given user in ns1 -func UserCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - u := account.User{} - if err := resourceDataToUser(&u, d); err != nil { - return err - } - if _, err := client.Users.Create(&u); err != nil { - return err - } - return userToResourceData(d, &u) -} - -// UserRead reads the given users data from ns1 -func UserRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - u, _, err := client.Users.Get(d.Id()) - if err != nil { - return err - } - return userToResourceData(d, u) -} - -// UserDelete deletes the given user from ns1 -func UserDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - _, err := client.Users.Delete(d.Id()) - d.SetId("") - return err -} - -// UserUpdate updates the user with given parameters in ns1 -func UserUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - u := account.User{ - Username: d.Id(), - } - if err := resourceDataToUser(&u, d); err != nil { - return err - } - if _, err := client.Users.Update(&u); err != nil { - return err - } - return userToResourceData(d, &u) -} diff --git a/builtin/providers/ns1/resource_user_test.go b/builtin/providers/ns1/resource_user_test.go deleted file mode 100644 index b32d7e453..000000000 --- a/builtin/providers/ns1/resource_user_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package ns1 - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - ns1 "gopkg.in/ns1/ns1-go.v2/rest" - "gopkg.in/ns1/ns1-go.v2/rest/model/account" -) - -func TestAccUser_basic(t *testing.T) { - var user account.User - rString := acctest.RandStringFromCharSet(15, acctest.CharSetAlphaNum) - name := fmt.Sprintf("terraform acc test user %s", rString) - username := fmt.Sprintf("tf_acc_test_user_%s", rString) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckUserDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccUserBasic(rString), - Check: resource.ComposeTestCheckFunc( - testAccCheckUserExists("ns1_user.u", &user), - resource.TestCheckResourceAttr("ns1_user.u", "email", "tf_acc_test_ns1@hashicorp.com"), - resource.TestCheckResourceAttr("ns1_user.u", "name", name), - resource.TestCheckResourceAttr("ns1_user.u", "teams.#", "1"), - resource.TestCheckResourceAttr("ns1_user.u", "notify.%", "1"), - resource.TestCheckResourceAttr("ns1_user.u", "notify.billing", "true"), - resource.TestCheckResourceAttr("ns1_user.u", "username", username), - ), - }, - }, - }) -} - -func testAccCheckUserDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*ns1.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "ns1_user" { - continue - } - - user, _, err := client.Users.Get(rs.Primary.Attributes["id"]) - if err == nil { - return fmt.Errorf("User still exists: %#v: %#v", err, user.Name) - } - } - - return nil -} - -func testAccCheckUserExists(n string, user *account.User) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - client := testAccProvider.Meta().(*ns1.Client) - - foundUser, _, err := client.Users.Get(rs.Primary.ID) - if err != nil { - return err - } - - if foundUser.Username != rs.Primary.ID { - return fmt.Errorf("User not found (%#v != %s)", foundUser, rs.Primary.ID) - } - - *user = *foundUser - - return nil - } -} - -func testAccUserBasic(rString string) string { - return fmt.Sprintf(`resource "ns1_team" "t" { - name = "terraform acc test team %s" -} - -resource "ns1_user" "u" { - name = "terraform acc test user %s" - username = "tf_acc_test_user_%s" - email = "tf_acc_test_ns1@hashicorp.com" - teams = ["${ns1_team.t.id}"] - notify { - billing = true - } -} -`, rString, rString, rString) -} diff --git a/builtin/providers/ns1/resource_zone.go b/builtin/providers/ns1/resource_zone.go deleted file mode 100644 index c08201511..000000000 --- a/builtin/providers/ns1/resource_zone.go +++ /dev/null @@ -1,174 +0,0 @@ -package ns1 - -import ( - "strings" - - "github.com/hashicorp/terraform/helper/schema" - - ns1 "gopkg.in/ns1/ns1-go.v2/rest" - "gopkg.in/ns1/ns1-go.v2/rest/model/dns" -) - -func zoneResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Required - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - // Optional - "ttl": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - // SOA attributes per https://tools.ietf.org/html/rfc1035). - "refresh": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "retry": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "expiry": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - // SOA MINUMUM overloaded as NX TTL per https://tools.ietf.org/html/rfc2308 - "nx_ttl": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - // TODO: test - "link": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - // TODO: test - "primary": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - // Computed - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "dns_servers": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "hostmaster": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - Create: ZoneCreate, - Read: ZoneRead, - Update: ZoneUpdate, - Delete: ZoneDelete, - Importer: &schema.ResourceImporter{State: ZoneStateFunc}, - } -} - -func zoneToResourceData(d *schema.ResourceData, z *dns.Zone) { - d.SetId(z.ID) - d.Set("hostmaster", z.Hostmaster) - d.Set("ttl", z.TTL) - d.Set("nx_ttl", z.NxTTL) - d.Set("refresh", z.Refresh) - d.Set("retry", z.Retry) - d.Set("expiry", z.Expiry) - d.Set("dns_servers", strings.Join(z.DNSServers[:], ",")) - if z.Secondary != nil && z.Secondary.Enabled { - d.Set("primary", z.Secondary.PrimaryIP) - } - if z.Link != nil && *z.Link != "" { - d.Set("link", *z.Link) - } -} - -func resourceToZoneData(z *dns.Zone, d *schema.ResourceData) { - z.ID = d.Id() - if v, ok := d.GetOk("hostmaster"); ok { - z.Hostmaster = v.(string) - } - if v, ok := d.GetOk("ttl"); ok { - z.TTL = v.(int) - } - if v, ok := d.GetOk("nx_ttl"); ok { - z.NxTTL = v.(int) - } - if v, ok := d.GetOk("refresh"); ok { - z.Refresh = v.(int) - } - if v, ok := d.GetOk("retry"); ok { - z.Retry = v.(int) - } - if v, ok := d.GetOk("expiry"); ok { - z.Expiry = v.(int) - } - if v, ok := d.GetOk("primary"); ok { - z.MakeSecondary(v.(string)) - } - if v, ok := d.GetOk("link"); ok { - z.LinkTo(v.(string)) - } -} - -// ZoneCreate creates the given zone in ns1 -func ZoneCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - z := dns.NewZone(d.Get("zone").(string)) - resourceToZoneData(z, d) - if _, err := client.Zones.Create(z); err != nil { - return err - } - zoneToResourceData(d, z) - return nil -} - -// ZoneRead reads the given zone data from ns1 -func ZoneRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - z, _, err := client.Zones.Get(d.Get("zone").(string)) - if err != nil { - return err - } - zoneToResourceData(d, z) - return nil -} - -// ZoneDelete deteles the given zone from ns1 -func ZoneDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - _, err := client.Zones.Delete(d.Get("zone").(string)) - d.SetId("") - return err -} - -// ZoneUpdate updates the zone with given params in ns1 -func ZoneUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ns1.Client) - z := dns.NewZone(d.Get("zone").(string)) - resourceToZoneData(z, d) - if _, err := client.Zones.Update(z); err != nil { - return err - } - zoneToResourceData(d, z) - return nil -} - -func ZoneStateFunc(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - d.Set("zone", d.Id()) - return []*schema.ResourceData{d}, nil -} diff --git a/builtin/providers/ns1/resource_zone_test.go b/builtin/providers/ns1/resource_zone_test.go deleted file mode 100644 index 7fa504468..000000000 --- a/builtin/providers/ns1/resource_zone_test.go +++ /dev/null @@ -1,189 +0,0 @@ -package ns1 - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - ns1 "gopkg.in/ns1/ns1-go.v2/rest" - "gopkg.in/ns1/ns1-go.v2/rest/model/dns" -) - -func TestAccZone_basic(t *testing.T) { - var zone dns.Zone - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckZoneDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccZoneBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckZoneExists("ns1_zone.it", &zone), - testAccCheckZoneName(&zone, "terraform-test-zone.io"), - testAccCheckZoneTTL(&zone, 3600), - testAccCheckZoneRefresh(&zone, 43200), - testAccCheckZoneRetry(&zone, 7200), - testAccCheckZoneExpiry(&zone, 1209600), - testAccCheckZoneNxTTL(&zone, 3600), - ), - }, - }, - }) -} - -func TestAccZone_updated(t *testing.T) { - var zone dns.Zone - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckZoneDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccZoneBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckZoneExists("ns1_zone.it", &zone), - testAccCheckZoneName(&zone, "terraform-test-zone.io"), - testAccCheckZoneTTL(&zone, 3600), - testAccCheckZoneRefresh(&zone, 43200), - testAccCheckZoneRetry(&zone, 7200), - testAccCheckZoneExpiry(&zone, 1209600), - testAccCheckZoneNxTTL(&zone, 3600), - ), - }, - resource.TestStep{ - Config: testAccZoneUpdated, - Check: resource.ComposeTestCheckFunc( - testAccCheckZoneExists("ns1_zone.it", &zone), - testAccCheckZoneName(&zone, "terraform-test-zone.io"), - testAccCheckZoneTTL(&zone, 10800), - testAccCheckZoneRefresh(&zone, 3600), - testAccCheckZoneRetry(&zone, 300), - testAccCheckZoneExpiry(&zone, 2592000), - testAccCheckZoneNxTTL(&zone, 3601), - ), - }, - }, - }) -} - -func testAccCheckZoneExists(n string, zone *dns.Zone) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("NoID is set") - } - - client := testAccProvider.Meta().(*ns1.Client) - - foundZone, _, err := client.Zones.Get(rs.Primary.Attributes["zone"]) - - p := rs.Primary - - if err != nil { - return err - } - - if foundZone.ID != p.Attributes["id"] { - return fmt.Errorf("Zone not found") - } - - *zone = *foundZone - - return nil - } -} - -func testAccCheckZoneDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*ns1.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "ns1_zone" { - continue - } - - zone, _, err := client.Zones.Get(rs.Primary.Attributes["zone"]) - - if err == nil { - return fmt.Errorf("Zone still exists: %#v: %#v", err, zone) - } - } - - return nil -} - -func testAccCheckZoneName(zone *dns.Zone, expected string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if zone.Zone != expected { - return fmt.Errorf("Zone: got: %s want: %s", zone.Zone, expected) - } - return nil - } -} - -func testAccCheckZoneTTL(zone *dns.Zone, expected int) resource.TestCheckFunc { - return func(s *terraform.State) error { - if zone.TTL != expected { - return fmt.Errorf("TTL: got: %d want: %d", zone.TTL, expected) - } - return nil - } -} -func testAccCheckZoneRefresh(zone *dns.Zone, expected int) resource.TestCheckFunc { - return func(s *terraform.State) error { - if zone.Refresh != expected { - return fmt.Errorf("Refresh: got: %d want: %d", zone.Refresh, expected) - } - return nil - } -} -func testAccCheckZoneRetry(zone *dns.Zone, expected int) resource.TestCheckFunc { - return func(s *terraform.State) error { - if zone.Retry != expected { - return fmt.Errorf("Retry: got: %d want: %d", zone.Retry, expected) - } - return nil - } -} -func testAccCheckZoneExpiry(zone *dns.Zone, expected int) resource.TestCheckFunc { - return func(s *terraform.State) error { - if zone.Expiry != expected { - return fmt.Errorf("Expiry: got: %d want: %d", zone.Expiry, expected) - } - return nil - } -} -func testAccCheckZoneNxTTL(zone *dns.Zone, expected int) resource.TestCheckFunc { - return func(s *terraform.State) error { - if zone.NxTTL != expected { - return fmt.Errorf("NxTTL: got: %d want: %d", zone.NxTTL, expected) - } - return nil - } -} - -const testAccZoneBasic = ` -resource "ns1_zone" "it" { - zone = "terraform-test-zone.io" -} -` - -const testAccZoneUpdated = ` -resource "ns1_zone" "it" { - zone = "terraform-test-zone.io" - ttl = 10800 - refresh = 3600 - retry = 300 - expiry = 2592000 - nx_ttl = 3601 - # link = "1.2.3.4.in-addr.arpa" # TODO - # primary = "1.2.3.4.in-addr.arpa" # TODO -} -` diff --git a/builtin/providers/ns1/string_enum.go b/builtin/providers/ns1/string_enum.go deleted file mode 100644 index b7d5337aa..000000000 --- a/builtin/providers/ns1/string_enum.go +++ /dev/null @@ -1,47 +0,0 @@ -package ns1 - -import ( - "fmt" - "strings" -) - -type StringEnum struct { - ValueMap map[string]int - Expecting string -} - -func NewStringEnum(values []string) *StringEnum { - valueMap := make(map[string]int) - quoted := make([]string, len(values), len(values)) - for i, value := range values { - _, present := valueMap[value] - if present { - panic(fmt.Sprintf("duplicate value %q", value)) - } - valueMap[value] = i - - quoted[i] = fmt.Sprintf("%q", value) - } - - return &StringEnum{ - ValueMap: valueMap, - Expecting: strings.Join(quoted, ", "), - } -} - -func (se *StringEnum) Check(v string) (int, error) { - i, present := se.ValueMap[v] - if present { - return i, nil - } else { - return -1, fmt.Errorf("expecting one of %s; got %q", se.Expecting, v) - } -} - -func (se *StringEnum) ValidateFunc(v interface{}, k string) (ws []string, es []error) { - _, err := se.Check(v.(string)) - if err != nil { - return nil, []error{err} - } - return nil, nil -} diff --git a/builtin/providers/null/data_source.go b/builtin/providers/null/data_source.go deleted file mode 100644 index 065029e7e..000000000 --- a/builtin/providers/null/data_source.go +++ /dev/null @@ -1,54 +0,0 @@ -package null - -import ( - "fmt" - "math/rand" - "time" - - "github.com/hashicorp/terraform/helper/schema" -) - -func init() { - rand.Seed(time.Now().Unix()) -} - -func dataSource() *schema.Resource { - return &schema.Resource{ - Read: dataSourceRead, - - Schema: map[string]*schema.Schema{ - "inputs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - }, - "outputs": &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - }, - "random": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "has_computed_default": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - } -} - -func dataSourceRead(d *schema.ResourceData, meta interface{}) error { - - inputs := d.Get("inputs") - d.Set("outputs", inputs) - - d.Set("random", fmt.Sprintf("%d", rand.Int())) - if d.Get("has_computed_default") == "" { - d.Set("has_computed_default", "default") - } - - d.SetId("static") - - return nil -} diff --git a/builtin/providers/null/provider.go b/builtin/providers/null/provider.go deleted file mode 100644 index 7f67877fd..000000000 --- a/builtin/providers/null/provider.go +++ /dev/null @@ -1,21 +0,0 @@ -package null - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{}, - - ResourcesMap: map[string]*schema.Resource{ - "null_resource": resource(), - }, - - DataSourcesMap: map[string]*schema.Resource{ - "null_data_source": dataSource(), - }, - } -} diff --git a/builtin/providers/null/provider_test.go b/builtin/providers/null/provider_test.go deleted file mode 100644 index 2cb1bb6ca..000000000 --- a/builtin/providers/null/provider_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package null - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} diff --git a/builtin/providers/null/resource.go b/builtin/providers/null/resource.go deleted file mode 100644 index a8467ad3d..000000000 --- a/builtin/providers/null/resource.go +++ /dev/null @@ -1,43 +0,0 @@ -package null - -import ( - "fmt" - "math/rand" - "time" - - "github.com/hashicorp/terraform/helper/schema" -) - -func init() { - rand.Seed(time.Now().Unix()) -} - -func resource() *schema.Resource { - return &schema.Resource{ - Create: resourceCreate, - Read: resourceRead, - Delete: resourceDelete, - - Schema: map[string]*schema.Schema{ - "triggers": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId(fmt.Sprintf("%d", rand.Int())) - return nil -} - -func resourceRead(d *schema.ResourceData, meta interface{}) error { - return nil -} - -func resourceDelete(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} diff --git a/builtin/providers/oneandone/config.go b/builtin/providers/oneandone/config.go deleted file mode 100644 index 1192c84e7..000000000 --- a/builtin/providers/oneandone/config.go +++ /dev/null @@ -1,24 +0,0 @@ -package oneandone - -import ( - "github.com/1and1/oneandone-cloudserver-sdk-go" -) - -type Config struct { - Token string - Retries int - Endpoint string - API *oneandone.API -} - -func (c *Config) Client() (*Config, error) { - token := oneandone.SetToken(c.Token) - - if len(c.Endpoint) > 0 { - c.API = oneandone.New(token, c.Endpoint) - } else { - c.API = oneandone.New(token, oneandone.BaseUrl) - } - - return c, nil -} diff --git a/builtin/providers/oneandone/provider.go b/builtin/providers/oneandone/provider.go deleted file mode 100644 index 8cc65f19b..000000000 --- a/builtin/providers/oneandone/provider.go +++ /dev/null @@ -1,56 +0,0 @@ -package oneandone - -import ( - "github.com/1and1/oneandone-cloudserver-sdk-go" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "token": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("ONEANDONE_TOKEN", nil), - Description: "1&1 token for API operations.", - }, - "retries": { - Type: schema.TypeInt, - Optional: true, - Default: 50, - DefaultFunc: schema.EnvDefaultFunc("ONEANDONE_RETRIES", nil), - }, - "endpoint": { - Type: schema.TypeString, - Optional: true, - Default: oneandone.BaseUrl, - DefaultFunc: schema.EnvDefaultFunc("ONEANDONE_ENDPOINT", nil), - }, - }, - ResourcesMap: map[string]*schema.Resource{ - "oneandone_server": resourceOneandOneServer(), - "oneandone_firewall_policy": resourceOneandOneFirewallPolicy(), - "oneandone_private_network": resourceOneandOnePrivateNetwork(), - "oneandone_public_ip": resourceOneandOnePublicIp(), - "oneandone_shared_storage": resourceOneandOneSharedStorage(), - "oneandone_monitoring_policy": resourceOneandOneMonitoringPolicy(), - "oneandone_loadbalancer": resourceOneandOneLoadbalancer(), - "oneandone_vpn": resourceOneandOneVPN(), - }, - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - var endpoint string - if d.Get("endpoint").(string) != oneandone.BaseUrl { - endpoint = d.Get("endpoint").(string) - } - config := Config{ - Token: d.Get("token").(string), - Retries: d.Get("retries").(int), - Endpoint: endpoint, - } - return config.Client() -} diff --git a/builtin/providers/oneandone/provider_test.go b/builtin/providers/oneandone/provider_test.go deleted file mode 100644 index 2057aac6d..000000000 --- a/builtin/providers/oneandone/provider_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package oneandone - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "oneandone": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("ONEANDONE_TOKEN"); v == "" { - t.Fatal("ONEANDONE_TOKEN must be set for acceptance tests") - } -} diff --git a/builtin/providers/oneandone/resource_oneandone_firewall_policy.go b/builtin/providers/oneandone/resource_oneandone_firewall_policy.go deleted file mode 100644 index c62b63b5c..000000000 --- a/builtin/providers/oneandone/resource_oneandone_firewall_policy.go +++ /dev/null @@ -1,274 +0,0 @@ -package oneandone - -import ( - "github.com/1and1/oneandone-cloudserver-sdk-go" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" - "strings" -) - -func resourceOneandOneFirewallPolicy() *schema.Resource { - return &schema.Resource{ - - Create: resourceOneandOneFirewallCreate, - Read: resourceOneandOneFirewallRead, - Update: resourceOneandOneFirewallUpdate, - Delete: resourceOneandOneFirewallDelete, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "description": { - Type: schema.TypeString, - Optional: true, - }, - "rules": { - Type: schema.TypeList, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "protocol": { - Type: schema.TypeString, - Required: true, - }, - "port_from": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(1, 65535), - }, - "port_to": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(1, 65535), - }, - "source_ip": { - Type: schema.TypeString, - Optional: true, - }, - "id": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - Required: true, - }, - }, - } -} - -func resourceOneandOneFirewallCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - req := oneandone.FirewallPolicyRequest{ - Name: d.Get("name").(string), - } - - if desc, ok := d.GetOk("description"); ok { - req.Description = desc.(string) - } - - req.Rules = getRules(d) - - fw_id, fw, err := config.API.CreateFirewallPolicy(&req) - if err != nil { - return err - } - - err = config.API.WaitForState(fw, "ACTIVE", 10, config.Retries) - if err != nil { - return err - } - - d.SetId(fw_id) - - if err != nil { - return err - } - - return resourceOneandOneFirewallRead(d, meta) -} - -func resourceOneandOneFirewallUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - if d.HasChange("name") || d.HasChange("description") { - fw, err := config.API.UpdateFirewallPolicy(d.Id(), d.Get("name").(string), d.Get("description").(string)) - if err != nil { - return err - } - err = config.API.WaitForState(fw, "ACTIVE", 10, config.Retries) - if err != nil { - return err - } - } - - if d.HasChange("rules") { - oldR, newR := d.GetChange("rules") - oldValues := oldR.([]interface{}) - newValues := newR.([]interface{}) - if len(oldValues) > len(newValues) { - diff := difference(oldValues, newValues) - for _, old := range diff { - o := old.(map[string]interface{}) - if o["id"] != nil { - old_id := o["id"].(string) - fw, err := config.API.DeleteFirewallPolicyRule(d.Id(), old_id) - if err != nil { - return err - } - - err = config.API.WaitForState(fw, "ACTIVE", 10, config.Retries) - if err != nil { - return err - } - } - } - } else { - var rules []oneandone.FirewallPolicyRule - - for _, raw := range newValues { - rl := raw.(map[string]interface{}) - - if rl["id"].(string) == "" { - rule := oneandone.FirewallPolicyRule{ - Protocol: rl["protocol"].(string), - } - - if rl["port_from"] != nil { - rule.PortFrom = oneandone.Int2Pointer(rl["port_from"].(int)) - } - if rl["port_to"] != nil { - rule.PortTo = oneandone.Int2Pointer(rl["port_to"].(int)) - } - - if rl["source_ip"] != nil { - rule.SourceIp = rl["source_ip"].(string) - } - - rules = append(rules, rule) - } - } - - if len(rules) > 0 { - fw, err := config.API.AddFirewallPolicyRules(d.Id(), rules) - if err != nil { - return err - } - - err = config.API.WaitForState(fw, "ACTIVE", 10, config.Retries) - } - } - } - - return resourceOneandOneFirewallRead(d, meta) -} - -func resourceOneandOneFirewallRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - fw, err := config.API.GetFirewallPolicy(d.Id()) - if err != nil { - if strings.Contains(err.Error(), "404") { - d.SetId("") - return nil - } - return err - } - - d.Set("rules", readRules(d, fw.Rules)) - d.Set("description", fw.Description) - - return nil -} - -func resourceOneandOneFirewallDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - fp, err := config.API.DeleteFirewallPolicy(d.Id()) - if err != nil { - return err - } - - err = config.API.WaitUntilDeleted(fp) - if err != nil { - return err - } - - return nil -} - -func readRules(d *schema.ResourceData, rules []oneandone.FirewallPolicyRule) interface{} { - rawRules := d.Get("rules").([]interface{}) - counter := 0 - for _, rR := range rawRules { - if len(rules) > counter { - rawMap := rR.(map[string]interface{}) - rawMap["id"] = rules[counter].Id - if rules[counter].SourceIp != "0.0.0.0" { - rawMap["source_ip"] = rules[counter].SourceIp - } - } - counter++ - } - - return rawRules -} - -func getRules(d *schema.ResourceData) []oneandone.FirewallPolicyRule { - var rules []oneandone.FirewallPolicyRule - - if raw, ok := d.GetOk("rules"); ok { - rawRules := raw.([]interface{}) - - for _, raw := range rawRules { - rl := raw.(map[string]interface{}) - rule := oneandone.FirewallPolicyRule{ - Protocol: rl["protocol"].(string), - } - - if rl["port_from"] != nil { - rule.PortFrom = oneandone.Int2Pointer(rl["port_from"].(int)) - } - if rl["port_to"] != nil { - rule.PortTo = oneandone.Int2Pointer(rl["port_to"].(int)) - } - - if rl["source_ip"] != nil { - rule.SourceIp = rl["source_ip"].(string) - } - - rules = append(rules, rule) - } - } - return rules -} - -func difference(oldV, newV []interface{}) (toreturn []interface{}) { - var ( - lenMin int - longest []interface{} - ) - // Determine the shortest length and the longest slice - if len(oldV) < len(newV) { - lenMin = len(oldV) - longest = newV - } else { - lenMin = len(newV) - longest = oldV - } - // compare common indeces - for i := 0; i < lenMin; i++ { - if oldV[i] == nil || newV[i] == nil { - continue - } - if oldV[i].(map[string]interface{})["id"] != newV[i].(map[string]interface{})["id"] { - toreturn = append(toreturn, newV) //out += fmt.Sprintf("=>\t%s\t%s\n", oldV[i], newV[i]) - } - } - // add indeces not in common - for _, v := range longest[lenMin:] { - //out += fmt.Sprintf("=>\t%s\n", v) - toreturn = append(toreturn, v) - } - return toreturn -} diff --git a/builtin/providers/oneandone/resource_oneandone_firewall_policy_test.go b/builtin/providers/oneandone/resource_oneandone_firewall_policy_test.go deleted file mode 100644 index 146d63c3c..000000000 --- a/builtin/providers/oneandone/resource_oneandone_firewall_policy_test.go +++ /dev/null @@ -1,178 +0,0 @@ -package oneandone - -import ( - "fmt" - "testing" - - "github.com/1and1/oneandone-cloudserver-sdk-go" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "os" - "time" -) - -func TestAccOneandoneFirewall_Basic(t *testing.T) { - var firewall oneandone.FirewallPolicy - - name := "test" - name_updated := "test1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDOneandoneFirewallDestroyCheck, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckOneandoneFirewall_basic, name), - - Check: resource.ComposeTestCheckFunc( - func(*terraform.State) error { - time.Sleep(10 * time.Second) - return nil - }, - testAccCheckOneandoneFirewallExists("oneandone_firewall_policy.fw", &firewall), - testAccCheckOneandoneFirewallAttributes("oneandone_firewall_policy.fw", name), - resource.TestCheckResourceAttr("oneandone_firewall_policy.fw", "name", name), - ), - }, - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckOneandoneFirewall_update, name_updated), - - Check: resource.ComposeTestCheckFunc( - func(*terraform.State) error { - time.Sleep(10 * time.Second) - return nil - }, - testAccCheckOneandoneFirewallExists("oneandone_firewall_policy.fw", &firewall), - testAccCheckOneandoneFirewallAttributes("oneandone_firewall_policy.fw", name_updated), - resource.TestCheckResourceAttr("oneandone_firewall_policy.fw", "name", name_updated), - ), - }, - }, - }) -} - -func testAccCheckDOneandoneFirewallDestroyCheck(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "oneandone_firewall_policy.fw" { - continue - } - - api := oneandone.New(os.Getenv("ONEANDONE_TOKEN"), oneandone.BaseUrl) - - _, err := api.GetFirewallPolicy(rs.Primary.ID) - - if err == nil { - return fmt.Errorf("Firewall Policy still exists %s %s", rs.Primary.ID, err.Error()) - } - } - - return nil -} -func testAccCheckOneandoneFirewallAttributes(n string, reverse_dns string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.Attributes["name"] != reverse_dns { - return fmt.Errorf("Bad name: expected %s : found %s ", reverse_dns, rs.Primary.Attributes["name"]) - } - - return nil - } -} - -func testAccCheckOneandoneFirewallExists(n string, fw_p *oneandone.FirewallPolicy) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - api := oneandone.New(os.Getenv("ONEANDONE_TOKEN"), oneandone.BaseUrl) - - found_fw, err := api.GetFirewallPolicy(rs.Primary.ID) - - if err != nil { - return fmt.Errorf("Error occured while fetching Firewall Policy: %s", rs.Primary.ID) - } - if found_fw.Id != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - fw_p = found_fw - - return nil - } -} - -const testAccCheckOneandoneFirewall_basic = ` -resource "oneandone_firewall_policy" "fw" { - name = "%s" - rules = [ - { - "protocol" = "TCP" - "port_from" = 80 - "port_to" = 80 - "source_ip" = "0.0.0.0" - }, - { - "protocol" = "ICMP" - "source_ip" = "0.0.0.0" - }, - { - "protocol" = "TCP" - "port_from" = 43 - "port_to" = 43 - "source_ip" = "0.0.0.0" - }, - { - "protocol" = "TCP" - "port_from" = 22 - "port_to" = 22 - "source_ip" = "0.0.0.0" - } - ] -}` - -const testAccCheckOneandoneFirewall_update = ` -resource "oneandone_firewall_policy" "fw" { - name = "%s" - rules = [ - { - "protocol" = "TCP" - "port_from" = 80 - "port_to" = 80 - "source_ip" = "0.0.0.0" - }, - { - "protocol" = "ICMP" - "source_ip" = "0.0.0.0" - }, - { - "protocol" = "TCP" - "port_from" = 43 - "port_to" = 43 - "source_ip" = "0.0.0.0" - }, - { - "protocol" = "TCP" - "port_from" = 22 - "port_to" = 22 - "source_ip" = "0.0.0.0" - }, - { - "protocol" = "TCP" - "port_from" = 88 - "port_to" = 88 - "source_ip" = "0.0.0.0" - }, - ] -}` diff --git a/builtin/providers/oneandone/resource_oneandone_loadbalancer.go b/builtin/providers/oneandone/resource_oneandone_loadbalancer.go deleted file mode 100644 index 627ec51df..000000000 --- a/builtin/providers/oneandone/resource_oneandone_loadbalancer.go +++ /dev/null @@ -1,370 +0,0 @@ -package oneandone - -import ( - "fmt" - "github.com/1and1/oneandone-cloudserver-sdk-go" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" - "log" - "strings" -) - -func resourceOneandOneLoadbalancer() *schema.Resource { - return &schema.Resource{ - Create: resourceOneandOneLoadbalancerCreate, - Read: resourceOneandOneLoadbalancerRead, - Update: resourceOneandOneLoadbalancerUpdate, - Delete: resourceOneandOneLoadbalancerDelete, - Schema: map[string]*schema.Schema{ - - "name": { - Type: schema.TypeString, - Required: true, - }, - "description": { - Type: schema.TypeString, - Optional: true, - }, - "method": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateMethod, - }, - "datacenter": { - Type: schema.TypeString, - Optional: true, - }, - "persistence": { - Type: schema.TypeBool, - Optional: true, - }, - "persistence_time": { - Type: schema.TypeInt, - Optional: true, - }, - "health_check_test": { - Type: schema.TypeString, - Optional: true, - }, - "health_check_interval": { - Type: schema.TypeInt, - Optional: true, - }, - "health_check_path": { - Type: schema.TypeString, - Optional: true, - }, - "health_check_path_parser": { - Type: schema.TypeString, - Optional: true, - }, - "rules": { - Type: schema.TypeList, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "protocol": { - Type: schema.TypeString, - Required: true, - }, - "port_balancer": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.IntBetween(1, 65535), - }, - "port_server": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.IntBetween(1, 65535), - }, - "source_ip": { - Type: schema.TypeString, - Required: true, - }, - "id": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - Required: true, - }, - }, - } -} - -func resourceOneandOneLoadbalancerCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - req := oneandone.LoadBalancerRequest{ - Name: d.Get("name").(string), - Rules: getLBRules(d), - } - - if raw, ok := d.GetOk("description"); ok { - req.Description = raw.(string) - } - - if raw, ok := d.GetOk("datacenter"); ok { - dcs, err := config.API.ListDatacenters() - if err != nil { - return fmt.Errorf("An error occured while fetching list of datacenters %s", err) - } - - decenter := raw.(string) - for _, dc := range dcs { - if strings.ToLower(dc.CountryCode) == strings.ToLower(decenter) { - req.DatacenterId = dc.Id - break - } - } - } - - if raw, ok := d.GetOk("method"); ok { - req.Method = raw.(string) - } - - if raw, ok := d.GetOk("persistence"); ok { - req.Persistence = oneandone.Bool2Pointer(raw.(bool)) - } - if raw, ok := d.GetOk("persistence_time"); ok { - req.PersistenceTime = oneandone.Int2Pointer(raw.(int)) - } - - if raw, ok := d.GetOk("health_check_test"); ok { - req.HealthCheckTest = raw.(string) - } - if raw, ok := d.GetOk("health_check_interval"); ok { - req.HealthCheckInterval = oneandone.Int2Pointer(raw.(int)) - } - if raw, ok := d.GetOk("health_check_path"); ok { - req.HealthCheckPath = raw.(string) - } - if raw, ok := d.GetOk("health_check_path_parser"); ok { - req.HealthCheckPathParser = raw.(string) - } - - lb_id, lb, err := config.API.CreateLoadBalancer(&req) - if err != nil { - return err - } - - err = config.API.WaitForState(lb, "ACTIVE", 10, config.Retries) - if err != nil { - return err - } - - d.SetId(lb_id) - - return resourceOneandOneLoadbalancerRead(d, meta) -} - -func getLBRules(d *schema.ResourceData) []oneandone.LoadBalancerRule { - var rules []oneandone.LoadBalancerRule - - if raw, ok := d.GetOk("rules"); ok { - rawRules := raw.([]interface{}) - log.Println("[DEBUG] raw rules:", raw) - for _, raw := range rawRules { - rl := raw.(map[string]interface{}) - rule := oneandone.LoadBalancerRule{ - Protocol: rl["protocol"].(string), - } - - if rl["port_balancer"] != nil { - rule.PortBalancer = uint16(rl["port_balancer"].(int)) - } - if rl["port_server"] != nil { - rule.PortServer = uint16(rl["port_server"].(int)) - } - - if rl["source_ip"] != nil { - rule.Source = rl["source_ip"].(string) - } - - rules = append(rules, rule) - } - } - return rules -} - -func resourceOneandOneLoadbalancerUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - if d.HasChange("name") || d.HasChange("description") || d.HasChange("method") || d.HasChange("persistence") || d.HasChange("persistence_time") || d.HasChange("health_check_test") || d.HasChange("health_check_interval") { - lb := oneandone.LoadBalancerRequest{} - if d.HasChange("name") { - _, n := d.GetChange("name") - lb.Name = n.(string) - } - if d.HasChange("description") { - _, n := d.GetChange("description") - lb.Description = n.(string) - } - if d.HasChange("method") { - _, n := d.GetChange("method") - lb.Method = (n.(string)) - } - if d.HasChange("persistence") { - _, n := d.GetChange("persistence") - lb.Persistence = oneandone.Bool2Pointer(n.(bool)) - } - if d.HasChange("persistence_time") { - _, n := d.GetChange("persistence_time") - lb.PersistenceTime = oneandone.Int2Pointer(n.(int)) - } - if d.HasChange("health_check_test") { - _, n := d.GetChange("health_check_test") - lb.HealthCheckTest = n.(string) - } - if d.HasChange("health_check_path") { - _, n := d.GetChange("health_check_path") - lb.HealthCheckPath = n.(string) - } - if d.HasChange("health_check_path_parser") { - _, n := d.GetChange("health_check_path_parser") - lb.HealthCheckPathParser = n.(string) - } - - ss, err := config.API.UpdateLoadBalancer(d.Id(), &lb) - - if err != nil { - return err - } - err = config.API.WaitForState(ss, "ACTIVE", 10, 30) - if err != nil { - return err - } - - } - - if d.HasChange("rules") { - oldR, newR := d.GetChange("rules") - oldValues := oldR.([]interface{}) - newValues := newR.([]interface{}) - if len(oldValues) > len(newValues) { - diff := difference(oldValues, newValues) - for _, old := range diff { - o := old.(map[string]interface{}) - if o["id"] != nil { - old_id := o["id"].(string) - fw, err := config.API.DeleteLoadBalancerRule(d.Id(), old_id) - if err != nil { - return err - } - - err = config.API.WaitForState(fw, "ACTIVE", 10, config.Retries) - if err != nil { - return err - } - } - } - } else { - var rules []oneandone.LoadBalancerRule - log.Println("[DEBUG] new values:", newValues) - - for _, raw := range newValues { - rl := raw.(map[string]interface{}) - log.Println("[DEBUG] rl:", rl) - - if rl["id"].(string) == "" { - rule := oneandone.LoadBalancerRule{ - Protocol: rl["protocol"].(string), - } - - rule.PortServer = uint16(rl["port_server"].(int)) - rule.PortBalancer = uint16(rl["port_balancer"].(int)) - - rule.Source = rl["source_ip"].(string) - - log.Println("[DEBUG] adding to list", rl["protocol"], rl["source_ip"], rl["port_balancer"], rl["port_server"]) - log.Println("[DEBUG] adding to list", rule) - - rules = append(rules, rule) - } - } - - log.Println("[DEBUG] new rules:", rules) - - if len(rules) > 0 { - fw, err := config.API.AddLoadBalancerRules(d.Id(), rules) - if err != nil { - return err - } - - err = config.API.WaitForState(fw, "ACTIVE", 10, config.Retries) - } - } - } - - return resourceOneandOneLoadbalancerRead(d, meta) -} - -func resourceOneandOneLoadbalancerRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - ss, err := config.API.GetLoadBalancer(d.Id()) - if err != nil { - if strings.Contains(err.Error(), "404") { - d.SetId("") - return nil - } - return err - } - - d.Set("name", ss.Name) - d.Set("description", ss.Description) - d.Set("datacenter", ss.Datacenter.CountryCode) - d.Set("method", ss.Method) - d.Set("persistence", ss.Persistence) - d.Set("persistence_time", ss.PersistenceTime) - d.Set("health_check_test", ss.HealthCheckTest) - d.Set("health_check_interval", ss.HealthCheckInterval) - d.Set("rules", getLoadbalancerRules(ss.Rules)) - - return nil -} - -func getLoadbalancerRules(rules []oneandone.LoadBalancerRule) []map[string]interface{} { - raw := make([]map[string]interface{}, 0, len(rules)) - - for _, rule := range rules { - - toadd := map[string]interface{}{ - "id": rule.Id, - "port_balancer": rule.PortBalancer, - "port_server": rule.PortServer, - "protocol": rule.Protocol, - "source_ip": rule.Source, - } - - raw = append(raw, toadd) - } - - return raw - -} - -func resourceOneandOneLoadbalancerDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - lb, err := config.API.DeleteLoadBalancer(d.Id()) - if err != nil { - return err - } - err = config.API.WaitUntilDeleted(lb) - if err != nil { - return err - } - - return nil -} - -func validateMethod(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if value != "ROUND_ROBIN" && value != "LEAST_CONNECTIONS" { - errors = append(errors, fmt.Errorf("%q value sholud be either 'ROUND_ROBIN' or 'LEAST_CONNECTIONS' not %q", k, value)) - } - - return -} diff --git a/builtin/providers/oneandone/resource_oneandone_loadbalancer_test.go b/builtin/providers/oneandone/resource_oneandone_loadbalancer_test.go deleted file mode 100644 index ecd0f9443..000000000 --- a/builtin/providers/oneandone/resource_oneandone_loadbalancer_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package oneandone - -import ( - "fmt" - "testing" - - "github.com/1and1/oneandone-cloudserver-sdk-go" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "os" - "time" -) - -func TestAccOneandoneLoadbalancer_Basic(t *testing.T) { - var lb oneandone.LoadBalancer - - name := "test_loadbalancer" - name_updated := "test_loadbalancer_renamed" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDOneandoneLoadbalancerDestroyCheck, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckOneandoneLoadbalancer_basic, name), - Check: resource.ComposeTestCheckFunc( - func(*terraform.State) error { - time.Sleep(10 * time.Second) - return nil - }, - testAccCheckOneandoneLoadbalancerExists("oneandone_loadbalancer.lb", &lb), - testAccCheckOneandoneLoadbalancerAttributes("oneandone_loadbalancer.lb", name), - resource.TestCheckResourceAttr("oneandone_loadbalancer.lb", "name", name), - ), - }, - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckOneandoneLoadbalancer_update, name_updated), - Check: resource.ComposeTestCheckFunc( - func(*terraform.State) error { - time.Sleep(10 * time.Second) - return nil - }, - testAccCheckOneandoneLoadbalancerExists("oneandone_loadbalancer.lb", &lb), - testAccCheckOneandoneLoadbalancerAttributes("oneandone_loadbalancer.lb", name_updated), - resource.TestCheckResourceAttr("oneandone_loadbalancer.lb", "name", name_updated), - ), - }, - }, - }) -} - -func testAccCheckDOneandoneLoadbalancerDestroyCheck(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "oneandone_loadbalancer.lb" { - continue - } - - api := oneandone.New(os.Getenv("ONEANDONE_TOKEN"), oneandone.BaseUrl) - - _, err := api.GetLoadBalancer(rs.Primary.ID) - - if err == nil { - return fmt.Errorf("Loadbalancer still exists %s %s", rs.Primary.ID, err.Error()) - } - } - - return nil -} -func testAccCheckOneandoneLoadbalancerAttributes(n string, reverse_dns string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.Attributes["name"] != reverse_dns { - return fmt.Errorf("Bad name: expected %s : found %s ", reverse_dns, rs.Primary.Attributes["name"]) - } - - return nil - } -} - -func testAccCheckOneandoneLoadbalancerExists(n string, fw_p *oneandone.LoadBalancer) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - api := oneandone.New(os.Getenv("ONEANDONE_TOKEN"), oneandone.BaseUrl) - - found_fw, err := api.GetLoadBalancer(rs.Primary.ID) - - if err != nil { - return fmt.Errorf("Error occured while fetching Loadbalancer: %s", rs.Primary.ID) - } - if found_fw.Id != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - fw_p = found_fw - - return nil - } -} - -const testAccCheckOneandoneLoadbalancer_basic = ` -resource "oneandone_loadbalancer" "lb" { - name = "%s" - method = "ROUND_ROBIN" - persistence = true - persistence_time = 60 - health_check_test = "TCP" - health_check_interval = 300 - datacenter = "US" - rules = [ - { - protocol = "TCP" - port_balancer = 8080 - port_server = 8089 - source_ip = "0.0.0.0" - }, - { - protocol = "TCP" - port_balancer = 9090 - port_server = 9099 - source_ip = "0.0.0.0" - } - ] -}` - -const testAccCheckOneandoneLoadbalancer_update = ` -resource "oneandone_loadbalancer" "lb" { - name = "%s" - method = "ROUND_ROBIN" - persistence = true - persistence_time = 60 - health_check_test = "TCP" - health_check_interval = 300 - datacenter = "US" - rules = [ - { - protocol = "TCP" - port_balancer = 8080 - port_server = 8089 - source_ip = "0.0.0.0" - } - ] -}` diff --git a/builtin/providers/oneandone/resource_oneandone_monitoring_policy.go b/builtin/providers/oneandone/resource_oneandone_monitoring_policy.go deleted file mode 100644 index a6af20dfc..000000000 --- a/builtin/providers/oneandone/resource_oneandone_monitoring_policy.go +++ /dev/null @@ -1,706 +0,0 @@ -package oneandone - -import ( - "github.com/1and1/oneandone-cloudserver-sdk-go" - "github.com/hashicorp/terraform/helper/schema" - "strings" -) - -func resourceOneandOneMonitoringPolicy() *schema.Resource { - return &schema.Resource{ - Create: resourceOneandOneMonitoringPolicyCreate, - Read: resourceOneandOneMonitoringPolicyRead, - Update: resourceOneandOneMonitoringPolicyUpdate, - Delete: resourceOneandOneMonitoringPolicyDelete, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "description": { - Type: schema.TypeString, - Optional: true, - }, - "email": { - Type: schema.TypeString, - Optional: true, - }, - "agent": { - Type: schema.TypeBool, - Required: true, - }, - "thresholds": { - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "warning": { - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "value": { - Type: schema.TypeInt, - Required: true, - }, - "alert": { - Type: schema.TypeBool, - Required: true, - }, - }, - }, - Required: true, - }, - "critical": { - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "value": { - Type: schema.TypeInt, - Required: true, - }, - "alert": { - Type: schema.TypeBool, - Required: true, - }, - }, - }, - Required: true, - }, - }, - }, - Required: true, - }, - "ram": { - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "warning": { - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "value": { - Type: schema.TypeInt, - Required: true, - }, - "alert": { - Type: schema.TypeBool, - Required: true, - }, - }, - }, - Required: true, - }, - "critical": { - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "value": { - Type: schema.TypeInt, - Required: true, - }, - "alert": { - Type: schema.TypeBool, - Required: true, - }, - }, - }, - Required: true, - }, - }, - }, - Required: true, - }, - "disk": { - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "warning": { - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "value": { - Type: schema.TypeInt, - Required: true, - }, - "alert": { - Type: schema.TypeBool, - Required: true, - }, - }, - }, - Required: true, - }, - "critical": { - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "value": { - Type: schema.TypeInt, - Required: true, - }, - "alert": { - Type: schema.TypeBool, - Required: true, - }, - }, - }, - Required: true, - }, - }, - }, - Required: true, - }, - "transfer": { - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "warning": { - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "value": { - Type: schema.TypeInt, - Required: true, - }, - "alert": { - Type: schema.TypeBool, - Required: true, - }, - }, - }, - Required: true, - }, - "critical": { - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "value": { - Type: schema.TypeInt, - Required: true, - }, - "alert": { - Type: schema.TypeBool, - Required: true, - }, - }, - }, - Required: true, - }, - }, - }, - Required: true, - }, - "internal_ping": { - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "warning": { - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "value": { - Type: schema.TypeInt, - Required: true, - }, - "alert": { - Type: schema.TypeBool, - Required: true, - }, - }, - }, - Required: true, - }, - "critical": { - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "value": { - Type: schema.TypeInt, - Required: true, - }, - "alert": { - Type: schema.TypeBool, - Required: true, - }, - }, - }, - Required: true, - }, - }, - }, - Required: true, - }, - }, - }, - Required: true, - }, - "ports": { - Type: schema.TypeList, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "email_notification": { - Type: schema.TypeBool, - Required: true, - }, - "port": { - Type: schema.TypeInt, - Required: true, - }, - "protocol": { - Type: schema.TypeString, - Optional: true, - }, - "alert_if": { - Type: schema.TypeString, - Optional: true, - }, - "id": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - Optional: true, - }, - "processes": { - Type: schema.TypeList, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - - "email_notification": { - Type: schema.TypeBool, - Required: true, - }, - "process": { - Type: schema.TypeString, - Required: true, - }, - "alert_if": { - Type: schema.TypeString, - Optional: true, - }, - "id": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - Optional: true, - }, - }, - } -} - -func resourceOneandOneMonitoringPolicyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - mp_request := oneandone.MonitoringPolicy{ - Name: d.Get("name").(string), - Agent: d.Get("agent").(bool), - Thresholds: getThresholds(d.Get("thresholds")), - } - - if raw, ok := d.GetOk("ports"); ok { - mp_request.Ports = getPorts(raw) - } - - if raw, ok := d.GetOk("processes"); ok { - mp_request.Processes = getProcesses(raw) - } - - mp_id, mp, err := config.API.CreateMonitoringPolicy(&mp_request) - if err != nil { - return err - } - - err = config.API.WaitForState(mp, "ACTIVE", 30, config.Retries) - if err != nil { - return err - } - - d.SetId(mp_id) - - return resourceOneandOneMonitoringPolicyRead(d, meta) -} - -func resourceOneandOneMonitoringPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - req := oneandone.MonitoringPolicy{} - if d.HasChange("name") { - _, n := d.GetChange("name") - req.Name = n.(string) - } - - if d.HasChange("description") { - _, n := d.GetChange("description") - req.Description = n.(string) - } - - if d.HasChange("email") { - _, n := d.GetChange("email") - req.Email = n.(string) - } - - if d.HasChange("agent") { - _, n := d.GetChange("agent") - req.Agent = n.(bool) - } - - if d.HasChange("thresholds") { - _, n := d.GetChange("thresholds") - req.Thresholds = getThresholds(n) - } - - mp, err := config.API.UpdateMonitoringPolicy(d.Id(), &req) - if err != nil { - return err - } - - err = config.API.WaitForState(mp, "ACTIVE", 30, config.Retries) - if err != nil { - return err - } - - if d.HasChange("ports") { - o, n := d.GetChange("ports") - oldValues := o.([]interface{}) - newValues := n.([]interface{}) - - if len(newValues) > len(oldValues) { - ports := getPorts(newValues) - - newports := []oneandone.MonitoringPort{} - - for _, p := range ports { - if p.Id == "" { - newports = append(newports, p) - } - } - - mp, err := config.API.AddMonitoringPolicyPorts(d.Id(), newports) - if err != nil { - return err - } - - err = config.API.WaitForState(mp, "ACTIVE", 30, config.Retries) - if err != nil { - return err - } - } else if len(oldValues) > len(newValues) { - diff := difference(oldValues, newValues) - ports := getPorts(diff) - - for _, port := range ports { - if port.Id == "" { - continue - } - - mp, err := config.API.DeleteMonitoringPolicyPort(d.Id(), port.Id) - if err != nil { - return err - } - - err = config.API.WaitForState(mp, "ACTIVE", 30, config.Retries) - if err != nil { - return err - } - } - } else if len(oldValues) == len(newValues) { - ports := getPorts(newValues) - - for _, port := range ports { - mp, err := config.API.ModifyMonitoringPolicyPort(d.Id(), port.Id, &port) - if err != nil { - return err - } - - err = config.API.WaitForState(mp, "ACTIVE", 30, config.Retries) - if err != nil { - return err - } - } - } - } - - if d.HasChange("processes") { - o, n := d.GetChange("processes") - oldValues := o.([]interface{}) - newValues := n.([]interface{}) - if len(newValues) > len(oldValues) { - processes := getProcesses(newValues) - - newprocesses := []oneandone.MonitoringProcess{} - - for _, p := range processes { - if p.Id == "" { - newprocesses = append(newprocesses, p) - } - } - - mp, err := config.API.AddMonitoringPolicyProcesses(d.Id(), newprocesses) - if err != nil { - return err - } - - err = config.API.WaitForState(mp, "ACTIVE", 30, config.Retries) - if err != nil { - return err - } - } else if len(oldValues) > len(newValues) { - diff := difference(oldValues, newValues) - processes := getProcesses(diff) - for _, process := range processes { - if process.Id == "" { - continue - } - - mp, err := config.API.DeleteMonitoringPolicyProcess(d.Id(), process.Id) - if err != nil { - return err - } - - err = config.API.WaitForState(mp, "ACTIVE", 30, config.Retries) - if err != nil { - return err - } - } - } else if len(oldValues) == len(newValues) { - processes := getProcesses(newValues) - - for _, process := range processes { - mp, err := config.API.ModifyMonitoringPolicyProcess(d.Id(), process.Id, &process) - if err != nil { - return err - } - - err = config.API.WaitForState(mp, "ACTIVE", 30, config.Retries) - if err != nil { - return err - } - } - } - } - - return resourceOneandOneMonitoringPolicyRead(d, meta) -} - -func resourceOneandOneMonitoringPolicyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - mp, err := config.API.GetMonitoringPolicy(d.Id()) - if err != nil { - if strings.Contains(err.Error(), "404") { - d.SetId("") - return nil - } - return err - } - - if len(mp.Servers) > 0 { - } - - if len(mp.Ports) > 0 { - pports := d.Get("ports").([]interface{}) - for i, raw_ports := range pports { - port := raw_ports.(map[string]interface{}) - port["id"] = mp.Ports[i].Id - } - d.Set("ports", pports) - } - - if len(mp.Processes) > 0 { - pprocesses := d.Get("processes").([]interface{}) - for i, raw_processes := range pprocesses { - process := raw_processes.(map[string]interface{}) - process["id"] = mp.Processes[i].Id - } - d.Set("processes", pprocesses) - } - - return nil -} - -func resourceOneandOneMonitoringPolicyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - mp, err := config.API.DeleteMonitoringPolicy(d.Id()) - if err != nil { - return err - } - - err = config.API.WaitUntilDeleted(mp) - if err != nil { - return err - } - - return nil -} - -func getThresholds(d interface{}) *oneandone.MonitoringThreshold { - raw_thresholds := d.(*schema.Set).List() - - toReturn := &oneandone.MonitoringThreshold{} - - for _, thresholds := range raw_thresholds { - th_set := thresholds.(map[string]interface{}) - - //CPU - cpu_raw := th_set["cpu"].(*schema.Set) - toReturn.Cpu = &oneandone.MonitoringLevel{} - for _, c := range cpu_raw.List() { - int_k := c.(map[string]interface{}) - for _, w := range int_k["warning"].(*schema.Set).List() { - toReturn.Cpu.Warning = &oneandone.MonitoringValue{ - Value: w.(map[string]interface{})["value"].(int), - Alert: w.(map[string]interface{})["alert"].(bool), - } - } - - for _, c := range int_k["critical"].(*schema.Set).List() { - toReturn.Cpu.Critical = &oneandone.MonitoringValue{ - Value: c.(map[string]interface{})["value"].(int), - Alert: c.(map[string]interface{})["alert"].(bool), - } - } - } - //RAM - ram_raw := th_set["ram"].(*schema.Set) - toReturn.Ram = &oneandone.MonitoringLevel{} - for _, c := range ram_raw.List() { - int_k := c.(map[string]interface{}) - for _, w := range int_k["warning"].(*schema.Set).List() { - toReturn.Ram.Warning = &oneandone.MonitoringValue{ - Value: w.(map[string]interface{})["value"].(int), - Alert: w.(map[string]interface{})["alert"].(bool), - } - } - - for _, c := range int_k["critical"].(*schema.Set).List() { - toReturn.Ram.Critical = &oneandone.MonitoringValue{ - Value: c.(map[string]interface{})["value"].(int), - Alert: c.(map[string]interface{})["alert"].(bool), - } - } - } - - //DISK - disk_raw := th_set["disk"].(*schema.Set) - toReturn.Disk = &oneandone.MonitoringLevel{} - for _, c := range disk_raw.List() { - int_k := c.(map[string]interface{}) - for _, w := range int_k["warning"].(*schema.Set).List() { - toReturn.Disk.Warning = &oneandone.MonitoringValue{ - Value: w.(map[string]interface{})["value"].(int), - Alert: w.(map[string]interface{})["alert"].(bool), - } - } - - for _, c := range int_k["critical"].(*schema.Set).List() { - toReturn.Disk.Critical = &oneandone.MonitoringValue{ - Value: c.(map[string]interface{})["value"].(int), - Alert: c.(map[string]interface{})["alert"].(bool), - } - } - } - - //TRANSFER - transfer_raw := th_set["transfer"].(*schema.Set) - toReturn.Transfer = &oneandone.MonitoringLevel{} - for _, c := range transfer_raw.List() { - int_k := c.(map[string]interface{}) - for _, w := range int_k["warning"].(*schema.Set).List() { - toReturn.Transfer.Warning = &oneandone.MonitoringValue{ - Value: w.(map[string]interface{})["value"].(int), - Alert: w.(map[string]interface{})["alert"].(bool), - } - } - - for _, c := range int_k["critical"].(*schema.Set).List() { - toReturn.Transfer.Critical = &oneandone.MonitoringValue{ - Value: c.(map[string]interface{})["value"].(int), - Alert: c.(map[string]interface{})["alert"].(bool), - } - } - } - //internal ping - ping_raw := th_set["internal_ping"].(*schema.Set) - toReturn.InternalPing = &oneandone.MonitoringLevel{} - for _, c := range ping_raw.List() { - int_k := c.(map[string]interface{}) - for _, w := range int_k["warning"].(*schema.Set).List() { - toReturn.InternalPing.Warning = &oneandone.MonitoringValue{ - Value: w.(map[string]interface{})["value"].(int), - Alert: w.(map[string]interface{})["alert"].(bool), - } - } - - for _, c := range int_k["critical"].(*schema.Set).List() { - toReturn.InternalPing.Critical = &oneandone.MonitoringValue{ - Value: c.(map[string]interface{})["value"].(int), - Alert: c.(map[string]interface{})["alert"].(bool), - } - } - } - } - - return toReturn -} - -func getProcesses(d interface{}) []oneandone.MonitoringProcess { - toReturn := []oneandone.MonitoringProcess{} - - for _, raw := range d.([]interface{}) { - port := raw.(map[string]interface{}) - m_port := oneandone.MonitoringProcess{ - EmailNotification: port["email_notification"].(bool), - } - - if port["id"] != nil { - m_port.Id = port["id"].(string) - } - - if port["process"] != nil { - m_port.Process = port["process"].(string) - } - - if port["alert_if"] != nil { - m_port.AlertIf = port["alert_if"].(string) - } - - toReturn = append(toReturn, m_port) - } - - return toReturn -} - -func getPorts(d interface{}) []oneandone.MonitoringPort { - toReturn := []oneandone.MonitoringPort{} - - for _, raw := range d.([]interface{}) { - port := raw.(map[string]interface{}) - m_port := oneandone.MonitoringPort{ - EmailNotification: port["email_notification"].(bool), - Port: port["port"].(int), - } - - if port["id"] != nil { - m_port.Id = port["id"].(string) - } - - if port["protocol"] != nil { - m_port.Protocol = port["protocol"].(string) - } - - if port["alert_if"] != nil { - m_port.AlertIf = port["alert_if"].(string) - } - - toReturn = append(toReturn, m_port) - } - - return toReturn -} diff --git a/builtin/providers/oneandone/resource_oneandone_monitoring_policy_test.go b/builtin/providers/oneandone/resource_oneandone_monitoring_policy_test.go deleted file mode 100644 index c6727ee21..000000000 --- a/builtin/providers/oneandone/resource_oneandone_monitoring_policy_test.go +++ /dev/null @@ -1,212 +0,0 @@ -package oneandone - -import ( - "fmt" - "testing" - - "github.com/1and1/oneandone-cloudserver-sdk-go" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "os" - "time" -) - -func TestAccOneandoneMonitoringPolicy_Basic(t *testing.T) { - var mp oneandone.MonitoringPolicy - - name := "test" - name_updated := "test1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDOneandoneMonitoringPolicyDestroyCheck, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckOneandoneMonitoringPolicy_basic, name), - Check: resource.ComposeTestCheckFunc( - func(*terraform.State) error { - time.Sleep(10 * time.Second) - return nil - }, - testAccCheckOneandoneMonitoringPolicyExists("oneandone_monitoring_policy.mp", &mp), - testAccCheckOneandoneMonitoringPolicyAttributes("oneandone_monitoring_policy.mp", name), - resource.TestCheckResourceAttr("oneandone_monitoring_policy.mp", "name", name), - ), - }, - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckOneandoneMonitoringPolicy_basic, name_updated), - Check: resource.ComposeTestCheckFunc( - func(*terraform.State) error { - time.Sleep(10 * time.Second) - return nil - }, - testAccCheckOneandoneMonitoringPolicyExists("oneandone_monitoring_policy.mp", &mp), - testAccCheckOneandoneMonitoringPolicyAttributes("oneandone_monitoring_policy.mp", name_updated), - resource.TestCheckResourceAttr("oneandone_monitoring_policy.mp", "name", name_updated), - ), - }, - }, - }) -} - -func testAccCheckDOneandoneMonitoringPolicyDestroyCheck(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "oneandone_monitoring_policy.mp" { - continue - } - - api := oneandone.New(os.Getenv("ONEANDONE_TOKEN"), oneandone.BaseUrl) - - _, err := api.GetMonitoringPolicy(rs.Primary.ID) - - if err == nil { - return fmt.Errorf("MonitoringPolicy still exists %s %s", rs.Primary.ID, err.Error()) - } - } - - return nil -} -func testAccCheckOneandoneMonitoringPolicyAttributes(n string, reverse_dns string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.Attributes["name"] != reverse_dns { - return fmt.Errorf("Bad name: expected %s : found %s ", reverse_dns, rs.Primary.Attributes["name"]) - } - - return nil - } -} - -func testAccCheckOneandoneMonitoringPolicyExists(n string, fw_p *oneandone.MonitoringPolicy) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - api := oneandone.New(os.Getenv("ONEANDONE_TOKEN"), oneandone.BaseUrl) - - found_fw, err := api.GetMonitoringPolicy(rs.Primary.ID) - - if err != nil { - return fmt.Errorf("Error occured while fetching MonitoringPolicy: %s", rs.Primary.ID) - } - if found_fw.Id != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - fw_p = found_fw - - return nil - } -} - -const testAccCheckOneandoneMonitoringPolicy_basic = ` -resource "oneandone_monitoring_policy" "mp" { - name = "%s" - agent = true - email = "email@address.com" - thresholds = { - cpu = { - warning = { - value = 50, - alert = false - } - critical = { - value = 66, - alert = false - } - } - ram = { - warning = { - value = 70, - alert = true - } - critical = { - value = 80, - alert = true - } - }, - ram = { - warning = { - value = 85, - alert = true - } - critical = { - value = 95, - alert = true - } - }, - disk = { - warning = { - value = 84, - alert = true - } - critical = { - value = 94, - alert = true - } - }, - transfer = { - warning = { - value = 1000, - alert = true - } - critical = { - value = 2000, - alert = true - } - }, - internal_ping = { - warning = { - value = 3000, - alert = true - } - critical = { - value = 4000, - alert = true - } - } - } - ports = [ - { - email_notification = true - port = 443 - protocol = "TCP" - alert_if = "NOT_RESPONDING" - }, - { - email_notification = false - port = 80 - protocol = "TCP" - alert_if = "NOT_RESPONDING" - }, - { - email_notification = true - port = 21 - protocol = "TCP" - alert_if = "NOT_RESPONDING" - } - ] - processes = [ - { - email_notification = false - process = "httpdeamon" - alert_if = "RUNNING" - }, - { - process = "iexplorer", - alert_if = "NOT_RUNNING" - email_notification = true - }] -}` diff --git a/builtin/providers/oneandone/resource_oneandone_private_network.go b/builtin/providers/oneandone/resource_oneandone_private_network.go deleted file mode 100644 index f9a4fc9e3..000000000 --- a/builtin/providers/oneandone/resource_oneandone_private_network.go +++ /dev/null @@ -1,291 +0,0 @@ -package oneandone - -import ( - "fmt" - "github.com/1and1/oneandone-cloudserver-sdk-go" - "github.com/hashicorp/terraform/helper/schema" - "strings" -) - -func resourceOneandOnePrivateNetwork() *schema.Resource { - return &schema.Resource{ - - Create: resourceOneandOnePrivateNetworkCreate, - Read: resourceOneandOnePrivateNetworkRead, - Update: resourceOneandOnePrivateNetworkUpdate, - Delete: resourceOneandOnePrivateNetworkDelete, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "description": { - Type: schema.TypeString, - Optional: true, - }, - "datacenter": { - Type: schema.TypeString, - Optional: true, - }, - "network_address": { - Type: schema.TypeString, - Optional: true, - }, - "subnet_mask": { - Type: schema.TypeString, - Optional: true, - }, - "server_ids": { - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - }, - }, - } -} - -func resourceOneandOnePrivateNetworkCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - req := oneandone.PrivateNetworkRequest{ - Name: d.Get("name").(string), - } - - if raw, ok := d.GetOk("description"); ok { - req.Description = raw.(string) - } - - if raw, ok := d.GetOk("network_address"); ok { - req.NetworkAddress = raw.(string) - } - - if raw, ok := d.GetOk("subnet_mask"); ok { - req.SubnetMask = raw.(string) - } - - if raw, ok := d.GetOk("datacenter"); ok { - dcs, err := config.API.ListDatacenters() - - if err != nil { - return fmt.Errorf("An error occured while fetching list of datacenters %s", err) - - } - - decenter := raw.(string) - for _, dc := range dcs { - if strings.ToLower(dc.CountryCode) == strings.ToLower(decenter) { - req.DatacenterId = dc.Id - break - } - } - } - - prn_id, prn, err := config.API.CreatePrivateNetwork(&req) - if err != nil { - return err - } - err = config.API.WaitForState(prn, "ACTIVE", 30, config.Retries) - - if err != nil { - return err - } - - d.SetId(prn_id) - - var ids []string - if raw, ok := d.GetOk("server_ids"); ok { - - rawIps := raw.(*schema.Set).List() - - for _, raw := range rawIps { - ids = append(ids, raw.(string)) - server, err := config.API.ShutdownServer(raw.(string), false) - if err != nil { - return err - } - err = config.API.WaitForState(server, "POWERED_OFF", 10, config.Retries) - if err != nil { - return err - } - - } - } - - prn, err = config.API.AttachPrivateNetworkServers(d.Id(), ids) - if err != nil { - return err - } - - err = config.API.WaitForState(prn, "ACTIVE", 30, config.Retries) - if err != nil { - return err - } - - for _, id := range ids { - server, err := config.API.StartServer(id) - if err != nil { - return err - } - - err = config.API.WaitForState(server, "POWERED_ON", 10, config.Retries) - if err != nil { - return err - } - } - - return resourceOneandOnePrivateNetworkRead(d, meta) -} - -func resourceOneandOnePrivateNetworkUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - if d.HasChange("name") || d.HasChange("description") || d.HasChange("network_address") || d.HasChange("subnet_mask") { - pnset := oneandone.PrivateNetworkRequest{} - - pnset.Name = d.Get("name").(string) - - pnset.Description = d.Get("description").(string) - pnset.NetworkAddress = d.Get("network_address").(string) - pnset.SubnetMask = d.Get("subnet_mask").(string) - - prn, err := config.API.UpdatePrivateNetwork(d.Id(), &pnset) - - if err != nil { - return err - } - - err = config.API.WaitForState(prn, "ACTIVE", 30, config.Retries) - if err != nil { - return err - } - } - - if d.HasChange("server_ids") { - o, n := d.GetChange("server_ids") - - newValues := n.(*schema.Set).List() - oldValues := o.(*schema.Set).List() - - var ids []string - for _, newV := range oldValues { - ids = append(ids, newV.(string)) - } - for _, id := range ids { - server, err := config.API.ShutdownServer(id, false) - if err != nil { - return err - } - err = config.API.WaitForState(server, "POWERED_OFF", 10, config.Retries) - if err != nil { - return err - } - - _, err = config.API.RemoveServerPrivateNetwork(id, d.Id()) - if err != nil { - return err - } - - prn, _ := config.API.GetPrivateNetwork(d.Id()) - - err = config.API.WaitForState(prn, "ACTIVE", 10, config.Retries) - if err != nil { - return err - } - - } - - var newids []string - - for _, newV := range newValues { - newids = append(newids, newV.(string)) - } - pn, err := config.API.AttachPrivateNetworkServers(d.Id(), newids) - - if err != nil { - return err - } - err = config.API.WaitForState(pn, "ACTIVE", 30, config.Retries) - if err != nil { - return err - } - - for _, id := range newids { - server, err := config.API.StartServer(id) - if err != nil { - return err - } - - err = config.API.WaitForState(server, "POWERED_ON", 10, config.Retries) - if err != nil { - return err - } - } - } - - return resourceOneandOnePrivateNetworkRead(d, meta) -} - -func resourceOneandOnePrivateNetworkRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - pn, err := config.API.GetPrivateNetwork(d.Id()) - if err != nil { - if strings.Contains(err.Error(), "404") { - d.SetId("") - return nil - } - return err - } - - d.Set("name", pn.Name) - d.Set("description", pn.Description) - d.Set("network_address", pn.NetworkAddress) - d.Set("subnet_mask", pn.SubnetMask) - d.Set("datacenter", pn.Datacenter.CountryCode) - - var toAdd []string - for _, s := range pn.Servers { - toAdd = append(toAdd, s.Id) - } - d.Set("server_ids", toAdd) - return nil -} - -func resourceOneandOnePrivateNetworkDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - pn, err := config.API.GetPrivateNetwork(d.Id()) - - for _, server := range pn.Servers { - srv, err := config.API.ShutdownServer(server.Id, false) - if err != nil { - return err - } - err = config.API.WaitForState(srv, "POWERED_OFF", 10, config.Retries) - if err != nil { - return err - } - } - - pn, err = config.API.DeletePrivateNetwork(d.Id()) - if err != nil { - return err - } - - err = config.API.WaitUntilDeleted(pn) - if err != nil { - return err - } - - for _, server := range pn.Servers { - srv, err := config.API.StartServer(server.Id) - if err != nil { - return err - } - err = config.API.WaitForState(srv, "POWERED_ON", 10, config.Retries) - if err != nil { - return err - } - } - - return nil -} diff --git a/builtin/providers/oneandone/resource_oneandone_private_network_test.go b/builtin/providers/oneandone/resource_oneandone_private_network_test.go deleted file mode 100644 index e91da76f2..000000000 --- a/builtin/providers/oneandone/resource_oneandone_private_network_test.go +++ /dev/null @@ -1,160 +0,0 @@ -package oneandone - -import ( - "fmt" - "testing" - - "github.com/1and1/oneandone-cloudserver-sdk-go" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "os" - "time" -) - -func TestAccOneandonePrivateNetwork_Basic(t *testing.T) { - var net oneandone.PrivateNetwork - - name := "test" - name_updated := "test1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckOneandonePrivateNetworkDestroyCheck, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckOneandonePrivateNetwork_basic, name), - Check: resource.ComposeTestCheckFunc( - func(*terraform.State) error { - time.Sleep(10 * time.Second) - return nil - }, - testAccCheckOneandonePrivateNetworkExists("oneandone_private_network.pn", &net), - testAccCheckOneandonePrivateNetworkAttributes("oneandone_private_network.pn", name), - resource.TestCheckResourceAttr("oneandone_private_network.pn", "name", name), - ), - }, - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckOneandonePrivateNetwork_basic, name_updated), - Check: resource.ComposeTestCheckFunc( - func(*terraform.State) error { - time.Sleep(10 * time.Second) - return nil - }, - testAccCheckOneandonePrivateNetworkExists("oneandone_private_network.pn", &net), - testAccCheckOneandonePrivateNetworkAttributes("oneandone_private_network.pn", name_updated), - resource.TestCheckResourceAttr("oneandone_private_network.pn", "name", name_updated), - ), - }, - }, - }) -} - -func testAccCheckOneandonePrivateNetworkDestroyCheck(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "oneandone_private_network" { - continue - } - - api := oneandone.New(os.Getenv("ONEANDONE_TOKEN"), oneandone.BaseUrl) - - _, err := api.GetPrivateNetwork(rs.Primary.ID) - - if err == nil { - return fmt.Errorf("PrivateNetwork still exists %s %s", rs.Primary.ID, err.Error()) - } - } - - return nil -} -func testAccCheckOneandonePrivateNetworkAttributes(n string, reverse_dns string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.Attributes["name"] != reverse_dns { - return fmt.Errorf("Bad name: expected %s : found %s ", reverse_dns, rs.Primary.Attributes["name"]) - } - - return nil - } -} - -func testAccCheckOneandonePrivateNetworkExists(n string, server *oneandone.PrivateNetwork) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - api := oneandone.New(os.Getenv("ONEANDONE_TOKEN"), oneandone.BaseUrl) - - found_server, err := api.GetPrivateNetwork(rs.Primary.ID) - - if err != nil { - return fmt.Errorf("Error occured while fetching PrivateNetwork: %s", rs.Primary.ID) - } - if found_server.Id != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - server = found_server - - return nil - } -} - -const testAccCheckOneandonePrivateNetwork_basic = ` -resource "oneandone_server" "server1" { - name = "server_private_net_01" - description = "ttt" - image = "CoreOS_Stable_64std" - datacenter = "US" - vcores = 1 - cores_per_processor = 1 - ram = 2 - password = "Kv40kd8PQb" - hdds = [ - { - disk_size = 60 - is_main = true - } - ] -} - -resource "oneandone_server" "server2" { - name = "server_private_net_02" - description = "ttt" - image = "CoreOS_Stable_64std" - datacenter = "US" - vcores = 1 - cores_per_processor = 1 - ram = 2 - password = "${oneandone_server.server1.password}" - hdds = [ - { - disk_size = 60 - is_main = true - } - ] -} - -resource "oneandone_private_network" "pn" { - name = "%s", - description = "new private net" - datacenter = "US" - network_address = "192.168.7.0" - subnet_mask = "255.255.255.0" - server_ids = [ - "${oneandone_server.server1.id}", - "${oneandone_server.server2.id}" - ] -} -` diff --git a/builtin/providers/oneandone/resource_oneandone_public_ip.go b/builtin/providers/oneandone/resource_oneandone_public_ip.go deleted file mode 100644 index 2c1bec240..000000000 --- a/builtin/providers/oneandone/resource_oneandone_public_ip.go +++ /dev/null @@ -1,133 +0,0 @@ -package oneandone - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "strings" -) - -func resourceOneandOnePublicIp() *schema.Resource { - return &schema.Resource{ - - Create: resourceOneandOnePublicIpCreate, - Read: resourceOneandOnePublicIpRead, - Update: resourceOneandOnePublicIpUpdate, - Delete: resourceOneandOnePublicIpDelete, - Schema: map[string]*schema.Schema{ - "ip_type": { //IPV4 or IPV6 - Type: schema.TypeString, - Required: true, - }, - "reverse_dns": { - Type: schema.TypeString, - Optional: true, - }, - "datacenter": { - Type: schema.TypeString, - Optional: true, - }, - "ip_address": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceOneandOnePublicIpCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - var reverse_dns string - var datacenter_id string - - if raw, ok := d.GetOk("reverse_dns"); ok { - reverse_dns = raw.(string) - } - - if raw, ok := d.GetOk("datacenter"); ok { - dcs, err := config.API.ListDatacenters() - - if err != nil { - return fmt.Errorf("An error occured while fetching list of datacenters %s", err) - - } - - decenter := raw.(string) - for _, dc := range dcs { - if strings.ToLower(dc.CountryCode) == strings.ToLower(decenter) { - datacenter_id = dc.Id - break - } - } - - } - - ip_id, ip, err := config.API.CreatePublicIp(d.Get("ip_type").(string), reverse_dns, datacenter_id) - if err != nil { - return err - } - - err = config.API.WaitForState(ip, "ACTIVE", 10, config.Retries) - if err != nil { - return err - } - d.SetId(ip_id) - - return resourceOneandOnePublicIpRead(d, meta) -} - -func resourceOneandOnePublicIpRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - ip, err := config.API.GetPublicIp(d.Id()) - if err != nil { - if strings.Contains(err.Error(), "404") { - d.SetId("") - return nil - } - return err - } - - d.Set("ip_address", ip.IpAddress) - d.Set("revers_dns", ip.ReverseDns) - d.Set("datacenter", ip.Datacenter.CountryCode) - d.Set("ip_type", ip.Type) - - return nil -} - -func resourceOneandOnePublicIpUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - if d.HasChange("reverse_dns") { - _, n := d.GetChange("reverse_dns") - ip, err := config.API.UpdatePublicIp(d.Id(), n.(string)) - if err != nil { - return err - } - - err = config.API.WaitForState(ip, "ACTIVE", 10, config.Retries) - if err != nil { - return err - } - } - - return resourceOneandOnePublicIpRead(d, meta) -} - -func resourceOneandOnePublicIpDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - ip, err := config.API.DeletePublicIp(d.Id()) - if err != nil { - return err - } - - err = config.API.WaitUntilDeleted(ip) - if err != nil { - - return err - } - - return nil -} diff --git a/builtin/providers/oneandone/resource_oneandone_public_ip_test.go b/builtin/providers/oneandone/resource_oneandone_public_ip_test.go deleted file mode 100644 index c797dc666..000000000 --- a/builtin/providers/oneandone/resource_oneandone_public_ip_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package oneandone - -import ( - "fmt" - "testing" - - "github.com/1and1/oneandone-cloudserver-sdk-go" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "os" - "time" -) - -func TestAccOneandonePublicIp_Basic(t *testing.T) { - var public_ip oneandone.PublicIp - - reverse_dns := "example.de" - reverse_dns_updated := "example.ba" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDOneandonePublicIpDestroyCheck, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckOneandonePublicIp_basic, reverse_dns), - Check: resource.ComposeTestCheckFunc( - func(*terraform.State) error { - time.Sleep(10 * time.Second) - return nil - }, - testAccCheckOneandonePublicIpExists("oneandone_public_ip.ip", &public_ip), - testAccCheckOneandonePublicIpAttributes("oneandone_public_ip.ip", reverse_dns), - resource.TestCheckResourceAttr("oneandone_public_ip.ip", "reverse_dns", reverse_dns), - ), - }, - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckOneandonePublicIp_basic, reverse_dns_updated), - Check: resource.ComposeTestCheckFunc( - func(*terraform.State) error { - time.Sleep(10 * time.Second) - return nil - }, - testAccCheckOneandonePublicIpExists("oneandone_public_ip.ip", &public_ip), - testAccCheckOneandonePublicIpAttributes("oneandone_public_ip.ip", reverse_dns_updated), - resource.TestCheckResourceAttr("oneandone_public_ip.ip", "reverse_dns", reverse_dns_updated), - ), - }, - }, - }) -} - -func testAccCheckDOneandonePublicIpDestroyCheck(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "oneandone_public_ip" { - continue - } - - api := oneandone.New(os.Getenv("ONEANDONE_TOKEN"), oneandone.BaseUrl) - - _, err := api.GetPublicIp(rs.Primary.ID) - - if err == nil { - return fmt.Errorf("Public IP still exists %s %s", rs.Primary.ID, err.Error()) - } - } - - return nil -} -func testAccCheckOneandonePublicIpAttributes(n string, reverse_dns string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.Attributes["reverse_dns"] != reverse_dns { - return fmt.Errorf("Bad name: expected %s : found %s ", reverse_dns, rs.Primary.Attributes["name"]) - } - - return nil - } -} - -func testAccCheckOneandonePublicIpExists(n string, public_ip *oneandone.PublicIp) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - api := oneandone.New(os.Getenv("ONEANDONE_TOKEN"), oneandone.BaseUrl) - - found_public_ip, err := api.GetPublicIp(rs.Primary.ID) - - if err != nil { - return fmt.Errorf("Error occured while fetching public IP: %s", rs.Primary.ID) - } - if found_public_ip.Id != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - public_ip = found_public_ip - - return nil - } -} - -const testAccCheckOneandonePublicIp_basic = ` -resource "oneandone_public_ip" "ip" { - "ip_type" = "IPV4" - "reverse_dns" = "%s" - "datacenter" = "GB" -}` diff --git a/builtin/providers/oneandone/resource_oneandone_server.go b/builtin/providers/oneandone/resource_oneandone_server.go deleted file mode 100644 index 930aba41a..000000000 --- a/builtin/providers/oneandone/resource_oneandone_server.go +++ /dev/null @@ -1,562 +0,0 @@ -package oneandone - -import ( - "crypto/x509" - "encoding/pem" - "fmt" - "github.com/1and1/oneandone-cloudserver-sdk-go" - "github.com/hashicorp/terraform/helper/schema" - "golang.org/x/crypto/ssh" - "io/ioutil" - "log" - "strings" - - "errors" -) - -func resourceOneandOneServer() *schema.Resource { - return &schema.Resource{ - Create: resourceOneandOneServerCreate, - Read: resourceOneandOneServerRead, - Update: resourceOneandOneServerUpdate, - Delete: resourceOneandOneServerDelete, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "description": { - Type: schema.TypeString, - Optional: true, - }, - "image": { - Type: schema.TypeString, - Required: true, - }, - "vcores": { - Type: schema.TypeInt, - Required: true, - }, - "cores_per_processor": { - Type: schema.TypeInt, - Required: true, - }, - "ram": { - Type: schema.TypeFloat, - Required: true, - }, - "ssh_key_path": { - Type: schema.TypeString, - Optional: true, - }, - "password": { - Type: schema.TypeString, - Optional: true, - Sensitive: true, - }, - "datacenter": { - Type: schema.TypeString, - Optional: true, - }, - "ip": { - Type: schema.TypeString, - Optional: true, - }, - "ips": { - Type: schema.TypeList, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, - "ip": { - Type: schema.TypeString, - Computed: true, - }, - "firewall_policy_id": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - Computed: true, - }, - "hdds": { - Type: schema.TypeList, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, - "disk_size": { - Type: schema.TypeInt, - Required: true, - }, - "is_main": { - Type: schema.TypeBool, - Optional: true, - }, - }, - }, - Required: true, - }, - "firewall_policy_id": { - Type: schema.TypeString, - Optional: true, - }, - "monitoring_policy_id": { - Type: schema.TypeString, - Optional: true, - }, - "loadbalancer_id": { - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -func resourceOneandOneServerCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - saps, _ := config.API.ListServerAppliances() - - var sa oneandone.ServerAppliance - for _, a := range saps { - - if a.Type == "IMAGE" && strings.Contains(strings.ToLower(a.Name), strings.ToLower(d.Get("image").(string))) { - sa = a - break - } - } - - var hdds []oneandone.Hdd - if raw, ok := d.GetOk("hdds"); ok { - rawhdds := raw.([]interface{}) - - var istheremain bool - for _, raw := range rawhdds { - hd := raw.(map[string]interface{}) - hdd := oneandone.Hdd{ - Size: hd["disk_size"].(int), - IsMain: hd["is_main"].(bool), - } - - if hdd.IsMain { - if hdd.Size < sa.MinHddSize { - return fmt.Errorf(fmt.Sprintf("Minimum required disk size %d", sa.MinHddSize)) - } - istheremain = true - } - - hdds = append(hdds, hdd) - } - - if !istheremain { - return fmt.Errorf("At least one HDD has to be %s", "`is_main`") - } - } - - req := oneandone.ServerRequest{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - ApplianceId: sa.Id, - PowerOn: true, - Hardware: oneandone.Hardware{ - Vcores: d.Get("vcores").(int), - CoresPerProcessor: d.Get("cores_per_processor").(int), - Ram: float32(d.Get("ram").(float64)), - Hdds: hdds, - }, - } - - if raw, ok := d.GetOk("ip"); ok { - - new_ip := raw.(string) - - ips, err := config.API.ListPublicIps() - if err != nil { - return err - } - - for _, ip := range ips { - if ip.IpAddress == new_ip { - req.IpId = ip.Id - break - } - } - - log.Println("[DEBUG] req.IP", req.IpId) - } - - if raw, ok := d.GetOk("datacenter"); ok { - - dcs, err := config.API.ListDatacenters() - - if err != nil { - return fmt.Errorf("An error occured while fetching list of datacenters %s", err) - - } - - decenter := raw.(string) - for _, dc := range dcs { - if strings.ToLower(dc.CountryCode) == strings.ToLower(decenter) { - req.DatacenterId = dc.Id - break - } - } - } - - if fwp_id, ok := d.GetOk("firewall_policy_id"); ok { - req.FirewallPolicyId = fwp_id.(string) - } - - if mp_id, ok := d.GetOk("monitoring_policy_id"); ok { - req.MonitoringPolicyId = mp_id.(string) - } - - if mp_id, ok := d.GetOk("loadbalancer_id"); ok { - req.LoadBalancerId = mp_id.(string) - } - - var privateKey string - if raw, ok := d.GetOk("ssh_key_path"); ok { - rawpath := raw.(string) - - priv, publicKey, err := getSshKey(rawpath) - privateKey = priv - if err != nil { - return err - } - - req.SSHKey = publicKey - } - - var password string - if raw, ok := d.GetOk("password"); ok { - req.Password = raw.(string) - password = req.Password - } - - server_id, server, err := config.API.CreateServer(&req) - if err != nil { - return err - } - - err = config.API.WaitForState(server, "POWERED_ON", 10, config.Retries) - - d.SetId(server_id) - server, err = config.API.GetServer(d.Id()) - if err != nil { - return err - } - - if password == "" { - password = server.FirstPassword - } - d.SetConnInfo(map[string]string{ - "type": "ssh", - "host": server.Ips[0].Ip, - "password": password, - "private_key": privateKey, - }) - - return resourceOneandOneServerRead(d, meta) -} - -func resourceOneandOneServerRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - server, err := config.API.GetServer(d.Id()) - - if err != nil { - if strings.Contains(err.Error(), "404") { - d.SetId("") - return nil - } - return err - } - - d.Set("name", server.Name) - d.Set("datacenter", server.Datacenter.CountryCode) - - d.Set("hdds", readHdds(server.Hardware)) - - d.Set("ips", readIps(server.Ips)) - - if len(server.FirstPassword) > 0 { - d.Set("password", server.FirstPassword) - } - - return nil -} - -func resourceOneandOneServerUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - if d.HasChange("name") || d.HasChange("description") { - _, name := d.GetChange("name") - _, description := d.GetChange("description") - server, err := config.API.RenameServer(d.Id(), name.(string), description.(string)) - if err != nil { - return err - } - - err = config.API.WaitForState(server, "POWERED_ON", 10, config.Retries) - - } - - if d.HasChange("hdds") { - oldV, newV := d.GetChange("hdds") - newValues := newV.([]interface{}) - oldValues := oldV.([]interface{}) - - if len(oldValues) > len(newValues) { - diff := difference(oldValues, newValues) - for _, old := range diff { - o := old.(map[string]interface{}) - old_id := o["id"].(string) - server, err := config.API.DeleteServerHdd(d.Id(), old_id) - if err != nil { - return err - } - - err = config.API.WaitForState(server, "POWERED_ON", 10, config.Retries) - if err != nil { - return err - } - } - } else { - for _, newHdd := range newValues { - n := newHdd.(map[string]interface{}) - //old := oldHdd.(map[string]interface{}) - - if n["id"].(string) == "" { - hdds := oneandone.ServerHdds{ - Hdds: []oneandone.Hdd{ - { - Size: n["disk_size"].(int), - IsMain: n["is_main"].(bool), - }, - }, - } - - server, err := config.API.AddServerHdds(d.Id(), &hdds) - - if err != nil { - return err - } - err = config.API.WaitForState(server, "POWERED_ON", 10, config.Retries) - if err != nil { - return err - } - } else { - id := n["id"].(string) - isMain := n["is_main"].(bool) - - if id != "" && !isMain { - log.Println("[DEBUG] Resizing existing HDD") - config.API.ResizeServerHdd(d.Id(), id, n["disk_size"].(int)) - } - } - - } - } - } - - if d.HasChange("monitoring_policy_id") { - o, n := d.GetChange("monitoring_policy_id") - - if n == nil { - mp, err := config.API.RemoveMonitoringPolicyServer(o.(string), d.Id()) - - if err != nil { - return err - } - - err = config.API.WaitForState(mp, "ACTIVE", 30, config.Retries) - if err != nil { - return err - } - } else { - mp, err := config.API.AttachMonitoringPolicyServers(n.(string), []string{d.Id()}) - if err != nil { - return err - } - - err = config.API.WaitForState(mp, "ACTIVE", 30, config.Retries) - if err != nil { - return err - } - } - } - - if d.HasChange("loadbalancer_id") { - o, n := d.GetChange("loadbalancer_id") - server, err := config.API.GetServer(d.Id()) - if err != nil { - return err - } - - if n == nil || n.(string) == "" { - log.Println("[DEBUG] Removing") - log.Println("[DEBUG] IPS:", server.Ips) - - for _, ip := range server.Ips { - mp, err := config.API.DeleteLoadBalancerServerIp(o.(string), ip.Id) - - if err != nil { - return err - } - - err = config.API.WaitForState(mp, "ACTIVE", 30, config.Retries) - if err != nil { - return err - } - } - } else { - log.Println("[DEBUG] Adding") - ip_ids := []string{} - for _, ip := range server.Ips { - ip_ids = append(ip_ids, ip.Id) - } - mp, err := config.API.AddLoadBalancerServerIps(n.(string), ip_ids) - if err != nil { - return err - } - - err = config.API.WaitForState(mp, "ACTIVE", 30, config.Retries) - if err != nil { - return err - } - - } - } - - if d.HasChange("firewall_policy_id") { - server, err := config.API.GetServer(d.Id()) - if err != nil { - return err - } - - o, n := d.GetChange("firewall_policy_id") - if n == nil { - for _, ip := range server.Ips { - mp, err := config.API.DeleteFirewallPolicyServerIp(o.(string), ip.Id) - if err != nil { - return err - } - - err = config.API.WaitForState(mp, "ACTIVE", 30, config.Retries) - if err != nil { - return err - } - } - } else { - ip_ids := []string{} - for _, ip := range server.Ips { - ip_ids = append(ip_ids, ip.Id) - } - - mp, err := config.API.AddFirewallPolicyServerIps(n.(string), ip_ids) - if err != nil { - return err - } - - err = config.API.WaitForState(mp, "ACTIVE", 30, config.Retries) - if err != nil { - return err - } - } - } - - return resourceOneandOneServerRead(d, meta) -} - -func resourceOneandOneServerDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - _, ok := d.GetOk("ip") - - server, err := config.API.DeleteServer(d.Id(), ok) - if err != nil { - return err - } - - err = config.API.WaitUntilDeleted(server) - - if err != nil { - log.Println("[DEBUG] ************ ERROR While waiting ************") - return err - } - return nil -} - -func readHdds(hardware *oneandone.Hardware) []map[string]interface{} { - hdds := make([]map[string]interface{}, 0, len(hardware.Hdds)) - - for _, hd := range hardware.Hdds { - hdds = append(hdds, map[string]interface{}{ - "id": hd.Id, - "disk_size": hd.Size, - "is_main": hd.IsMain, - }) - } - - return hdds -} - -func readIps(ips []oneandone.ServerIp) []map[string]interface{} { - raw := make([]map[string]interface{}, 0, len(ips)) - for _, ip := range ips { - - toadd := map[string]interface{}{ - "ip": ip.Ip, - "id": ip.Id, - } - - if ip.Firewall != nil { - toadd["firewall_policy_id"] = ip.Firewall.Id - } - raw = append(raw, toadd) - } - - return raw -} - -func getSshKey(path string) (privatekey string, publickey string, err error) { - pemBytes, err := ioutil.ReadFile(path) - - if err != nil { - return "", "", err - } - - block, _ := pem.Decode(pemBytes) - - if block == nil { - return "", "", errors.New("File " + path + " contains nothing") - } - - priv, err := x509.ParsePKCS1PrivateKey(block.Bytes) - - if err != nil { - return "", "", err - } - - priv_blk := pem.Block{ - Type: "RSA PRIVATE KEY", - Headers: nil, - Bytes: x509.MarshalPKCS1PrivateKey(priv), - } - - pub, err := ssh.NewPublicKey(&priv.PublicKey) - if err != nil { - return "", "", err - } - publickey = string(ssh.MarshalAuthorizedKey(pub)) - privatekey = string(pem.EncodeToMemory(&priv_blk)) - - return privatekey, publickey, nil -} diff --git a/builtin/providers/oneandone/resource_oneandone_server_test.go b/builtin/providers/oneandone/resource_oneandone_server_test.go deleted file mode 100644 index ed643abfa..000000000 --- a/builtin/providers/oneandone/resource_oneandone_server_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package oneandone - -import ( - "fmt" - "testing" - - "github.com/1and1/oneandone-cloudserver-sdk-go" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "os" - "time" -) - -func TestAccOneandoneServer_Basic(t *testing.T) { - var server oneandone.Server - - name := "test_server" - name_updated := "test_server_renamed" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDOneandoneServerDestroyCheck, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckOneandoneServer_basic, name, name), - Check: resource.ComposeTestCheckFunc( - func(*terraform.State) error { - time.Sleep(10 * time.Second) - return nil - }, - testAccCheckOneandoneServerExists("oneandone_server.server", &server), - testAccCheckOneandoneServerAttributes("oneandone_server.server", name), - resource.TestCheckResourceAttr("oneandone_server.server", "name", name), - ), - }, - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckOneandoneServer_basic, name_updated, name_updated), - Check: resource.ComposeTestCheckFunc( - func(*terraform.State) error { - time.Sleep(10 * time.Second) - return nil - }, - testAccCheckOneandoneServerExists("oneandone_server.server", &server), - testAccCheckOneandoneServerAttributes("oneandone_server.server", name_updated), - resource.TestCheckResourceAttr("oneandone_server.server", "name", name_updated), - ), - }, - }, - }) -} - -func testAccCheckDOneandoneServerDestroyCheck(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "oneandone_server" { - continue - } - - api := oneandone.New(os.Getenv("ONEANDONE_TOKEN"), oneandone.BaseUrl) - - _, err := api.GetServer(rs.Primary.ID) - - if err == nil { - return fmt.Errorf("Server still exists %s %s", rs.Primary.ID, err.Error()) - } - } - - return nil -} -func testAccCheckOneandoneServerAttributes(n string, reverse_dns string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.Attributes["name"] != reverse_dns { - return fmt.Errorf("Bad name: expected %s : found %s ", reverse_dns, rs.Primary.Attributes["name"]) - } - - return nil - } -} - -func testAccCheckOneandoneServerExists(n string, server *oneandone.Server) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - api := oneandone.New(os.Getenv("ONEANDONE_TOKEN"), oneandone.BaseUrl) - - found_server, err := api.GetServer(rs.Primary.ID) - - if err != nil { - return fmt.Errorf("Error occured while fetching Server: %s", rs.Primary.ID) - } - if found_server.Id != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - server = found_server - - return nil - } -} - -const testAccCheckOneandoneServer_basic = ` -resource "oneandone_server" "server" { - name = "%s" - description = "%s" - image = "ubuntu" - datacenter = "GB" - vcores = 1 - cores_per_processor = 1 - ram = 2 - password = "Kv40kd8PQb" - hdds = [ - { - disk_size = 20 - is_main = true - } - ] -}` diff --git a/builtin/providers/oneandone/resource_oneandone_vpn.go b/builtin/providers/oneandone/resource_oneandone_vpn.go deleted file mode 100644 index 865c3361a..000000000 --- a/builtin/providers/oneandone/resource_oneandone_vpn.go +++ /dev/null @@ -1,217 +0,0 @@ -package oneandone - -import ( - "crypto/md5" - "encoding/base64" - "fmt" - "github.com/1and1/oneandone-cloudserver-sdk-go" - "github.com/hashicorp/terraform/helper/schema" - "io" - "os" - fp "path/filepath" - "strings" -) - -func resourceOneandOneVPN() *schema.Resource { - return &schema.Resource{ - Create: resourceOneandOneVPNCreate, - Read: resourceOneandOneVPNRead, - Update: resourceOneandOneVPNUpdate, - Delete: resourceOneandOneVPNDelete, - Schema: map[string]*schema.Schema{ - - "name": { - Type: schema.TypeString, - Required: true, - }, - "description": { - Type: schema.TypeString, - Optional: true, - }, - "download_path": { - Type: schema.TypeString, - Computed: true, - }, - "datacenter": { - Type: schema.TypeString, - Optional: true, - }, - "file_name": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceOneandOneVPNCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - var datacenter string - - if raw, ok := d.GetOk("datacenter"); ok { - dcs, err := config.API.ListDatacenters() - if err != nil { - return fmt.Errorf("An error occured while fetching list of datacenters %s", err) - } - - decenter := raw.(string) - for _, dc := range dcs { - if strings.ToLower(dc.CountryCode) == strings.ToLower(decenter) { - datacenter = dc.Id - break - } - } - } - - var description string - if raw, ok := d.GetOk("description"); ok { - description = raw.(string) - } - - vpn_id, vpn, err := config.API.CreateVPN(d.Get("name").(string), description, datacenter) - if err != nil { - return err - } - - err = config.API.WaitForState(vpn, "ACTIVE", 10, config.Retries) - if err != nil { - return err - } - - d.SetId(vpn_id) - - return resourceOneandOneVPNRead(d, meta) -} - -func resourceOneandOneVPNUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - if d.HasChange("name") || d.HasChange("description") { - - vpn, err := config.API.ModifyVPN(d.Id(), d.Get("name").(string), d.Get("description").(string)) - if err != nil { - return err - } - - err = config.API.WaitForState(vpn, "ACTIVE", 10, config.Retries) - if err != nil { - return err - } - } - - return resourceOneandOneVPNRead(d, meta) -} - -func resourceOneandOneVPNRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - vpn, err := config.API.GetVPN(d.Id()) - - base64_str, err := config.API.GetVPNConfigFile(d.Id()) - if err != nil { - if strings.Contains(err.Error(), "404") { - d.SetId("") - return nil - } - return err - } - - var download_path string - if raw, ok := d.GetOk("download_path"); ok { - download_path = raw.(string) - } - - path, fileName, err := writeCofnig(vpn, download_path, base64_str) - if err != nil { - return err - } - - d.Set("name", vpn.Name) - d.Set("description", vpn.Description) - d.Set("download_path", path) - d.Set("file_name", fileName) - d.Set("datacenter", vpn.Datacenter.CountryCode) - - return nil -} - -func writeCofnig(vpn *oneandone.VPN, path, base64config string) (string, string, error) { - data, err := base64.StdEncoding.DecodeString(base64config) - if err != nil { - return "", "", err - } - - var fileName string - if vpn.CloudPanelId != "" { - fileName = vpn.CloudPanelId + ".zip" - } else { - fileName = "vpn_" + fmt.Sprintf("%x", md5.Sum(data)) + ".zip" - } - - if path == "" { - path, err = os.Getwd() - if err != nil { - return "", "", err - } - } - - if !fp.IsAbs(path) { - path, err = fp.Abs(path) - if err != nil { - return "", "", err - } - } - - _, err = os.Stat(path) - if err != nil { - if os.IsNotExist(err) { - // make all dirs - os.MkdirAll(path, 0666) - } else { - return "", "", err - } - } - - fpath := fp.Join(path, fileName) - - f, err := os.OpenFile(fpath, os.O_CREATE|os.O_WRONLY, 0666) - defer f.Close() - - if err != nil { - return "", "", err - } - - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - - if err != nil { - return "", "", err - } - - return path, fileName, nil - -} - -func resourceOneandOneVPNDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - vpn, err := config.API.DeleteVPN(d.Id()) - if err != nil { - return err - } - - err = config.API.WaitUntilDeleted(vpn) - if err != nil { - return err - } - - fullPath := fp.Join(d.Get("download_path").(string), d.Get("file_name").(string)) - if _, err := os.Stat(fullPath); !os.IsNotExist(err) { - os.Remove(fullPath) - } - - return nil -} diff --git a/builtin/providers/oneandone/resource_oneandone_vpn_test.go b/builtin/providers/oneandone/resource_oneandone_vpn_test.go deleted file mode 100644 index 94e84bb61..000000000 --- a/builtin/providers/oneandone/resource_oneandone_vpn_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package oneandone - -import ( - "fmt" - "testing" - - "github.com/1and1/oneandone-cloudserver-sdk-go" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "os" - "time" -) - -func TestAccOneandoneVpn_Basic(t *testing.T) { - var server oneandone.VPN - - name := "test" - name_updated := "test1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDOneandoneVPNDestroyCheck, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckOneandoneVPN_basic, name), - Check: resource.ComposeTestCheckFunc( - func(*terraform.State) error { - time.Sleep(10 * time.Second) - return nil - }, - testAccCheckOneandoneVPNExists("oneandone_vpn.vpn", &server), - testAccCheckOneandoneVPNAttributes("oneandone_vpn.vpn", name), - resource.TestCheckResourceAttr("oneandone_vpn.vpn", "name", name), - ), - }, - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckOneandoneVPN_basic, name_updated), - Check: resource.ComposeTestCheckFunc( - func(*terraform.State) error { - time.Sleep(10 * time.Second) - return nil - }, - testAccCheckOneandoneVPNExists("oneandone_vpn.vpn", &server), - testAccCheckOneandoneVPNAttributes("oneandone_vpn.vpn", name_updated), - resource.TestCheckResourceAttr("oneandone_vpn.vpn", "name", name_updated), - ), - }, - }, - }) -} - -func testAccCheckDOneandoneVPNDestroyCheck(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "oneandone_server" { - continue - } - - api := oneandone.New(os.Getenv("ONEANDONE_TOKEN"), oneandone.BaseUrl) - - _, err := api.GetVPN(rs.Primary.ID) - - if err == nil { - return fmt.Errorf("VPN still exists %s %s", rs.Primary.ID, err.Error()) - } - } - - return nil -} -func testAccCheckOneandoneVPNAttributes(n string, reverse_dns string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.Attributes["name"] != reverse_dns { - return fmt.Errorf("Bad name: expected %s : found %s ", reverse_dns, rs.Primary.Attributes["name"]) - } - - return nil - } -} - -func testAccCheckOneandoneVPNExists(n string, server *oneandone.VPN) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - api := oneandone.New(os.Getenv("ONEANDONE_TOKEN"), oneandone.BaseUrl) - - found_server, err := api.GetVPN(rs.Primary.ID) - - if err != nil { - return fmt.Errorf("Error occured while fetching VPN: %s", rs.Primary.ID) - } - if found_server.Id != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - server = found_server - - return nil - } -} - -const testAccCheckOneandoneVPN_basic = ` -resource "oneandone_vpn" "vpn" { - datacenter = "GB" - name = "%s" - description = "ttest descr" -}` diff --git a/builtin/providers/oneandone/resources_oneandone_shared_storage.go b/builtin/providers/oneandone/resources_oneandone_shared_storage.go deleted file mode 100644 index f690e0cf6..000000000 --- a/builtin/providers/oneandone/resources_oneandone_shared_storage.go +++ /dev/null @@ -1,256 +0,0 @@ -package oneandone - -import ( - "fmt" - "github.com/1and1/oneandone-cloudserver-sdk-go" - "github.com/hashicorp/terraform/helper/schema" - "strings" -) - -func resourceOneandOneSharedStorage() *schema.Resource { - return &schema.Resource{ - Create: resourceOneandOneSharedStorageCreate, - Read: resourceOneandOneSharedStorageRead, - Update: resourceOneandOneSharedStorageUpdate, - Delete: resourceOneandOneSharedStorageDelete, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "description": { - Type: schema.TypeString, - Optional: true, - }, - "size": { - Type: schema.TypeInt, - Required: true, - }, - "datacenter": { - Type: schema.TypeString, - Required: true, - }, - "storage_servers": { - Type: schema.TypeList, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Required: true, - }, - "rights": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - Optional: true, - }, - }, - } -} - -func resourceOneandOneSharedStorageCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - req := oneandone.SharedStorageRequest{ - Name: d.Get("name").(string), - Size: oneandone.Int2Pointer(d.Get("size").(int)), - } - - if raw, ok := d.GetOk("description"); ok { - req.Description = raw.(string) - - } - - if raw, ok := d.GetOk("datacenter"); ok { - dcs, err := config.API.ListDatacenters() - - if err != nil { - return fmt.Errorf("An error occured while fetching list of datacenters %s", err) - - } - - decenter := raw.(string) - for _, dc := range dcs { - if strings.ToLower(dc.CountryCode) == strings.ToLower(decenter) { - req.DatacenterId = dc.Id - break - } - } - } - - ss_id, ss, err := config.API.CreateSharedStorage(&req) - if err != nil { - return err - } - - err = config.API.WaitForState(ss, "ACTIVE", 10, config.Retries) - if err != nil { - return err - } - d.SetId(ss_id) - - if raw, ok := d.GetOk("storage_servers"); ok { - - storage_servers := []oneandone.SharedStorageServer{} - - rawRights := raw.([]interface{}) - for _, raws_ss := range rawRights { - ss := raws_ss.(map[string]interface{}) - storage_server := oneandone.SharedStorageServer{ - Id: ss["id"].(string), - Rights: ss["rights"].(string), - } - storage_servers = append(storage_servers, storage_server) - } - - ss, err := config.API.AddSharedStorageServers(ss_id, storage_servers) - - if err != nil { - return err - } - - err = config.API.WaitForState(ss, "ACTIVE", 10, 30) - if err != nil { - return err - } - } - - return resourceOneandOneSharedStorageRead(d, meta) -} - -func resourceOneandOneSharedStorageUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - if d.HasChange("name") || d.HasChange("description") || d.HasChange("size") { - ssu := oneandone.SharedStorageRequest{} - if d.HasChange("name") { - _, n := d.GetChange("name") - ssu.Name = n.(string) - } - if d.HasChange("description") { - _, n := d.GetChange("description") - ssu.Description = n.(string) - } - if d.HasChange("size") { - _, n := d.GetChange("size") - ssu.Size = oneandone.Int2Pointer(n.(int)) - } - - ss, err := config.API.UpdateSharedStorage(d.Id(), &ssu) - - if err != nil { - return err - } - err = config.API.WaitForState(ss, "ACTIVE", 10, 30) - if err != nil { - return err - } - - } - - if d.HasChange("storage_servers") { - - o, n := d.GetChange("storage_servers") - - oldV := o.([]interface{}) - - for _, old := range oldV { - ol := old.(map[string]interface{}) - - ss, err := config.API.DeleteSharedStorageServer(d.Id(), ol["id"].(string)) - if err != nil { - return err - } - - err = config.API.WaitForState(ss, "ACTIVE", 10, config.Retries) - - if err != nil { - return err - } - - } - - newV := n.([]interface{}) - - ids := []oneandone.SharedStorageServer{} - for _, newValue := range newV { - nn := newValue.(map[string]interface{}) - ids = append(ids, oneandone.SharedStorageServer{ - Id: nn["id"].(string), - Rights: nn["rights"].(string), - }) - } - - if len(ids) > 0 { - ss, err := config.API.AddSharedStorageServers(d.Id(), ids) - if err != nil { - return err - } - - err = config.API.WaitForState(ss, "ACTIVE", 10, config.Retries) - - if err != nil { - return err - } - } - - //DeleteSharedStorageServer - - } - - return resourceOneandOneSharedStorageRead(d, meta) -} - -func resourceOneandOneSharedStorageRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - ss, err := config.API.GetSharedStorage(d.Id()) - if err != nil { - if strings.Contains(err.Error(), "404") { - d.SetId("") - return nil - } - return err - } - - d.Set("name", ss.Name) - d.Set("description", ss.Description) - d.Set("size", ss.Size) - d.Set("datacenter", ss.Datacenter.CountryCode) - d.Set("storage_servers", getStorageServers(ss.Servers)) - - return nil -} - -func getStorageServers(servers []oneandone.SharedStorageServer) []map[string]interface{} { - raw := make([]map[string]interface{}, 0, len(servers)) - - for _, server := range servers { - - toadd := map[string]interface{}{ - "id": server.Id, - "rights": server.Rights, - } - - raw = append(raw, toadd) - } - - return raw - -} -func resourceOneandOneSharedStorageDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - ss, err := config.API.DeleteSharedStorage(d.Id()) - if err != nil { - return err - } - err = config.API.WaitUntilDeleted(ss) - if err != nil { - return err - } - - return nil -} diff --git a/builtin/providers/oneandone/resources_oneandone_shared_storage_test.go b/builtin/providers/oneandone/resources_oneandone_shared_storage_test.go deleted file mode 100644 index dcc07302a..000000000 --- a/builtin/providers/oneandone/resources_oneandone_shared_storage_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package oneandone - -import ( - "fmt" - "testing" - - "github.com/1and1/oneandone-cloudserver-sdk-go" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "os" - "time" -) - -func TestAccOneandoneSharedStorage_Basic(t *testing.T) { - var storage oneandone.SharedStorage - - name := "test_storage" - name_updated := "test1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDOneandoneSharedStorageDestroyCheck, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckOneandoneSharedStorage_basic, name), - Check: resource.ComposeTestCheckFunc( - func(*terraform.State) error { - time.Sleep(10 * time.Second) - return nil - }, - testAccCheckOneandoneSharedStorageExists("oneandone_shared_storage.storage", &storage), - testAccCheckOneandoneSharedStorageAttributes("oneandone_shared_storage.storage", name), - resource.TestCheckResourceAttr("oneandone_shared_storage.storage", "name", name), - ), - }, - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckOneandoneSharedStorage_basic, name_updated), - Check: resource.ComposeTestCheckFunc( - func(*terraform.State) error { - time.Sleep(10 * time.Second) - return nil - }, - testAccCheckOneandoneSharedStorageExists("oneandone_shared_storage.storage", &storage), - testAccCheckOneandoneSharedStorageAttributes("oneandone_shared_storage.storage", name_updated), - resource.TestCheckResourceAttr("oneandone_shared_storage.storage", "name", name_updated), - ), - }, - }, - }) -} - -func testAccCheckDOneandoneSharedStorageDestroyCheck(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "oneandone_shared_storage" { - continue - } - - api := oneandone.New(os.Getenv("ONEANDONE_TOKEN"), oneandone.BaseUrl) - - _, err := api.GetVPN(rs.Primary.ID) - - if err == nil { - return fmt.Errorf("VPN still exists %s %s", rs.Primary.ID, err.Error()) - } - } - - return nil -} -func testAccCheckOneandoneSharedStorageAttributes(n string, reverse_dns string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.Attributes["name"] != reverse_dns { - return fmt.Errorf("Bad name: expected %s : found %s ", reverse_dns, rs.Primary.Attributes["name"]) - } - - return nil - } -} - -func testAccCheckOneandoneSharedStorageExists(n string, storage *oneandone.SharedStorage) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - api := oneandone.New(os.Getenv("ONEANDONE_TOKEN"), oneandone.BaseUrl) - - found_storage, err := api.GetSharedStorage(rs.Primary.ID) - - if err != nil { - return fmt.Errorf("Error occured while fetching SharedStorage: %s", rs.Primary.ID) - } - if found_storage.Id != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - storage = found_storage - - return nil - } -} - -const testAccCheckOneandoneSharedStorage_basic = ` -resource "oneandone_shared_storage" "storage" { - name = "%s" - description = "ttt" - size = 50 - datacenter = "GB" -}` diff --git a/builtin/providers/openstack/config.go b/builtin/providers/openstack/config.go deleted file mode 100644 index 8a2ced2ec..000000000 --- a/builtin/providers/openstack/config.go +++ /dev/null @@ -1,203 +0,0 @@ -package openstack - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "net/http" - "os" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack" - "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth" - "github.com/hashicorp/terraform/helper/pathorcontents" - "github.com/hashicorp/terraform/terraform" -) - -type Config struct { - CACertFile string - ClientCertFile string - ClientKeyFile string - DomainID string - DomainName string - EndpointType string - IdentityEndpoint string - Insecure bool - Password string - Swauth bool - TenantID string - TenantName string - Token string - Username string - UserID string - - osClient *gophercloud.ProviderClient -} - -func (c *Config) loadAndValidate() error { - validEndpoint := false - validEndpoints := []string{ - "internal", "internalURL", - "admin", "adminURL", - "public", "publicURL", - "", - } - - for _, endpoint := range validEndpoints { - if c.EndpointType == endpoint { - validEndpoint = true - } - } - - if !validEndpoint { - return fmt.Errorf("Invalid endpoint type provided") - } - - ao := gophercloud.AuthOptions{ - DomainID: c.DomainID, - DomainName: c.DomainName, - IdentityEndpoint: c.IdentityEndpoint, - Password: c.Password, - TenantID: c.TenantID, - TenantName: c.TenantName, - TokenID: c.Token, - Username: c.Username, - UserID: c.UserID, - } - - client, err := openstack.NewClient(ao.IdentityEndpoint) - if err != nil { - return err - } - - // Set UserAgent - client.UserAgent.Prepend(terraform.UserAgentString()) - - config := &tls.Config{} - if c.CACertFile != "" { - caCert, _, err := pathorcontents.Read(c.CACertFile) - if err != nil { - return fmt.Errorf("Error reading CA Cert: %s", err) - } - - caCertPool := x509.NewCertPool() - caCertPool.AppendCertsFromPEM([]byte(caCert)) - config.RootCAs = caCertPool - } - - if c.Insecure { - config.InsecureSkipVerify = true - } - - if c.ClientCertFile != "" && c.ClientKeyFile != "" { - clientCert, _, err := pathorcontents.Read(c.ClientCertFile) - if err != nil { - return fmt.Errorf("Error reading Client Cert: %s", err) - } - clientKey, _, err := pathorcontents.Read(c.ClientKeyFile) - if err != nil { - return fmt.Errorf("Error reading Client Key: %s", err) - } - - cert, err := tls.X509KeyPair([]byte(clientCert), []byte(clientKey)) - if err != nil { - return err - } - - config.Certificates = []tls.Certificate{cert} - config.BuildNameToCertificate() - } - - // if OS_DEBUG is set, log the requests and responses - var osDebug bool - if os.Getenv("OS_DEBUG") != "" { - osDebug = true - } - - transport := &http.Transport{Proxy: http.ProxyFromEnvironment, TLSClientConfig: config} - client.HTTPClient = http.Client{ - Transport: &LogRoundTripper{ - Rt: transport, - OsDebug: osDebug, - }, - } - - // If using Swift Authentication, there's no need to validate authentication normally. - if !c.Swauth { - err = openstack.Authenticate(client, ao) - if err != nil { - return err - } - } - - c.osClient = client - - return nil -} - -func (c *Config) blockStorageV1Client(region string) (*gophercloud.ServiceClient, error) { - return openstack.NewBlockStorageV1(c.osClient, gophercloud.EndpointOpts{ - Region: region, - Availability: c.getEndpointType(), - }) -} - -func (c *Config) blockStorageV2Client(region string) (*gophercloud.ServiceClient, error) { - return openstack.NewBlockStorageV2(c.osClient, gophercloud.EndpointOpts{ - Region: region, - Availability: c.getEndpointType(), - }) -} - -func (c *Config) computeV2Client(region string) (*gophercloud.ServiceClient, error) { - return openstack.NewComputeV2(c.osClient, gophercloud.EndpointOpts{ - Region: region, - Availability: c.getEndpointType(), - }) -} - -func (c *Config) dnsV2Client(region string) (*gophercloud.ServiceClient, error) { - return openstack.NewDNSV2(c.osClient, gophercloud.EndpointOpts{ - Region: region, - Availability: c.getEndpointType(), - }) -} - -func (c *Config) imageV2Client(region string) (*gophercloud.ServiceClient, error) { - return openstack.NewImageServiceV2(c.osClient, gophercloud.EndpointOpts{ - Region: region, - Availability: c.getEndpointType(), - }) -} - -func (c *Config) networkingV2Client(region string) (*gophercloud.ServiceClient, error) { - return openstack.NewNetworkV2(c.osClient, gophercloud.EndpointOpts{ - Region: region, - Availability: c.getEndpointType(), - }) -} - -func (c *Config) objectStorageV1Client(region string) (*gophercloud.ServiceClient, error) { - // If Swift Authentication is being used, return a swauth client. - if c.Swauth { - return swauth.NewObjectStorageV1(c.osClient, swauth.AuthOpts{ - User: c.Username, - Key: c.Password, - }) - } - - return openstack.NewObjectStorageV1(c.osClient, gophercloud.EndpointOpts{ - Region: region, - Availability: c.getEndpointType(), - }) -} - -func (c *Config) getEndpointType() gophercloud.Availability { - if c.EndpointType == "internal" || c.EndpointType == "internalURL" { - return gophercloud.AvailabilityInternal - } - if c.EndpointType == "admin" || c.EndpointType == "adminURL" { - return gophercloud.AvailabilityAdmin - } - return gophercloud.AvailabilityPublic -} diff --git a/builtin/providers/openstack/data_source_openstack_images_image_v2.go b/builtin/providers/openstack/data_source_openstack_images_image_v2.go deleted file mode 100644 index da03b2be7..000000000 --- a/builtin/providers/openstack/data_source_openstack_images_image_v2.go +++ /dev/null @@ -1,255 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "sort" - - "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images" - "github.com/gophercloud/gophercloud/pagination" - - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceImagesImageV2() *schema.Resource { - return &schema.Resource{ - Read: dataSourceImagesImageV2Read, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - - "name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "visibility": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "owner": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "size_min": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - - "size_max": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - - "sort_key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "name", - }, - - "sort_direction": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "asc", - ValidateFunc: dataSourceImagesImageV2SortDirection, - }, - - "tag": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "most_recent": { - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - }, - - // Computed values - "container_format": { - Type: schema.TypeString, - Computed: true, - }, - - "disk_format": { - Type: schema.TypeString, - Computed: true, - }, - - "min_disk_gb": { - Type: schema.TypeInt, - Computed: true, - }, - - "min_ram_mb": { - Type: schema.TypeInt, - Computed: true, - }, - - "protected": { - Type: schema.TypeBool, - Computed: true, - }, - - "checksum": { - Type: schema.TypeString, - Computed: true, - }, - - "size_bytes": { - Type: schema.TypeInt, - Computed: true, - }, - - "metadata": { - Type: schema.TypeMap, - Computed: true, - }, - - "updated_at": { - Type: schema.TypeString, - Computed: true, - }, - - "file": { - Type: schema.TypeString, - Computed: true, - }, - - "schema": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -// dataSourceImagesImageV2Read performs the image lookup. -func dataSourceImagesImageV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - imageClient, err := config.imageV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack image client: %s", err) - } - - visibility := resourceImagesImageV2VisibilityFromString(d.Get("visibility").(string)) - - listOpts := images.ListOpts{ - Name: d.Get("name").(string), - Visibility: visibility, - Owner: d.Get("owner").(string), - Status: images.ImageStatusActive, - SizeMin: int64(d.Get("size_min").(int)), - SizeMax: int64(d.Get("size_max").(int)), - SortKey: d.Get("sort_key").(string), - SortDir: d.Get("sort_direction").(string), - Tag: d.Get("tag").(string), - } - - var allImages []images.Image - pager := images.List(imageClient, listOpts) - err = pager.EachPage(func(page pagination.Page) (bool, error) { - images, err := images.ExtractImages(page) - if err != nil { - return false, err - } - - for _, i := range images { - allImages = append(allImages, i) - } - - return true, nil - }) - - if err != nil { - return fmt.Errorf("Unable to retrieve images: %s", err) - } - - var image images.Image - if len(allImages) < 1 { - return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") - } - - if len(allImages) > 1 { - recent := d.Get("most_recent").(bool) - log.Printf("[DEBUG] openstack_images_image: multiple results found and `most_recent` is set to: %t", recent) - if recent { - image = mostRecentImage(allImages) - } else { - return fmt.Errorf("Your query returned more than one result. Please try a more " + - "specific search criteria, or set `most_recent` attribute to true.") - } - } else { - image = allImages[0] - } - - log.Printf("[DEBUG] openstack_images_image: Single Image found: %s", image.ID) - return dataSourceImagesImageV2Attributes(d, &image) -} - -// dataSourceImagesImageV2Attributes populates the fields of an Image resource. -func dataSourceImagesImageV2Attributes(d *schema.ResourceData, image *images.Image) error { - log.Printf("[DEBUG] openstack_images_image details: %#v", image) - - d.SetId(image.ID) - d.Set("name", image.Name) - d.Set("tags", image.Tags) - d.Set("container_format", image.ContainerFormat) - d.Set("disk_format", image.DiskFormat) - d.Set("min_disk_gb", image.MinDiskGigabytes) - d.Set("min_ram_mb", image.MinRAMMegabytes) - d.Set("owner", image.Owner) - d.Set("protected", image.Protected) - d.Set("visibility", image.Visibility) - d.Set("checksum", image.Checksum) - d.Set("size_bytes", image.SizeBytes) - d.Set("metadata", image.Metadata) - d.Set("created_at", image.CreatedAt) - d.Set("updated_at", image.UpdatedAt) - d.Set("file", image.File) - d.Set("schema", image.Schema) - - return nil -} - -type imageSort []images.Image - -func (a imageSort) Len() int { return len(a) } -func (a imageSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a imageSort) Less(i, j int) bool { - itime := a[i].UpdatedAt - jtime := a[j].UpdatedAt - return itime.Unix() < jtime.Unix() -} - -// Returns the most recent Image out of a slice of images. -func mostRecentImage(images []images.Image) images.Image { - sortedImages := images - sort.Sort(imageSort(sortedImages)) - return sortedImages[len(sortedImages)-1] -} - -func dataSourceImagesImageV2SortDirection(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "asc" && value != "desc" { - err := fmt.Errorf("%s must be either asc or desc", k) - errors = append(errors, err) - } - return -} diff --git a/builtin/providers/openstack/data_source_openstack_images_image_v2_test.go b/builtin/providers/openstack/data_source_openstack_images_image_v2_test.go deleted file mode 100644 index 2d134fa66..000000000 --- a/builtin/providers/openstack/data_source_openstack_images_image_v2_test.go +++ /dev/null @@ -1,136 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccOpenStackImagesV2ImageDataSource_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccOpenStackImagesV2ImageDataSource_cirros, - }, - resource.TestStep{ - Config: testAccOpenStackImagesV2ImageDataSource_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckImagesV2DataSourceID("data.openstack_images_image_v2.image_1"), - resource.TestCheckResourceAttr( - "data.openstack_images_image_v2.image_1", "name", "CirrOS-tf"), - resource.TestCheckResourceAttr( - "data.openstack_images_image_v2.image_1", "container_format", "bare"), - resource.TestCheckResourceAttr( - "data.openstack_images_image_v2.image_1", "disk_format", "qcow2"), - resource.TestCheckResourceAttr( - "data.openstack_images_image_v2.image_1", "min_disk_gb", "0"), - resource.TestCheckResourceAttr( - "data.openstack_images_image_v2.image_1", "min_ram_mb", "0"), - resource.TestCheckResourceAttr( - "data.openstack_images_image_v2.image_1", "protected", "false"), - resource.TestCheckResourceAttr( - "data.openstack_images_image_v2.image_1", "visibility", "private"), - ), - }, - }, - }) -} - -func TestAccOpenStackImagesV2ImageDataSource_testQueries(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccOpenStackImagesV2ImageDataSource_cirros, - }, - resource.TestStep{ - Config: testAccOpenStackImagesV2ImageDataSource_queryTag, - Check: resource.ComposeTestCheckFunc( - testAccCheckImagesV2DataSourceID("data.openstack_images_image_v2.image_1"), - ), - }, - resource.TestStep{ - Config: testAccOpenStackImagesV2ImageDataSource_querySizeMin, - Check: resource.ComposeTestCheckFunc( - testAccCheckImagesV2DataSourceID("data.openstack_images_image_v2.image_1"), - ), - }, - resource.TestStep{ - Config: testAccOpenStackImagesV2ImageDataSource_querySizeMax, - Check: resource.ComposeTestCheckFunc( - testAccCheckImagesV2DataSourceID("data.openstack_images_image_v2.image_1"), - ), - }, - }, - }) -} - -func testAccCheckImagesV2DataSourceID(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Can't find image data source: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Image data source ID not set") - } - - return nil - } -} - -// Standard CirrOS image -const testAccOpenStackImagesV2ImageDataSource_cirros = ` -resource "openstack_images_image_v2" "image_1" { - name = "CirrOS-tf" - container_format = "bare" - disk_format = "qcow2" - image_source_url = "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" - tags = ["cirros-tf"] -} -` - -var testAccOpenStackImagesV2ImageDataSource_basic = fmt.Sprintf(` -%s - -data "openstack_images_image_v2" "image_1" { - most_recent = true - name = "${openstack_images_image_v2.image_1.name}" -} -`, testAccOpenStackImagesV2ImageDataSource_cirros) - -var testAccOpenStackImagesV2ImageDataSource_queryTag = fmt.Sprintf(` -%s - -data "openstack_images_image_v2" "image_1" { - most_recent = true - visibility = "private" - tag = "cirros-tf" -} -`, testAccOpenStackImagesV2ImageDataSource_cirros) - -var testAccOpenStackImagesV2ImageDataSource_querySizeMin = fmt.Sprintf(` -%s - -data "openstack_images_image_v2" "image_1" { - most_recent = true - visibility = "private" - size_min = "13000000" -} -`, testAccOpenStackImagesV2ImageDataSource_cirros) - -var testAccOpenStackImagesV2ImageDataSource_querySizeMax = fmt.Sprintf(` -%s - -data "openstack_images_image_v2" "image_1" { - most_recent = true - visibility = "private" - size_max = "23000000" -} -`, testAccOpenStackImagesV2ImageDataSource_cirros) diff --git a/builtin/providers/openstack/data_source_openstack_networking_network_v2.go b/builtin/providers/openstack/data_source_openstack_networking_network_v2.go deleted file mode 100644 index f7615c41a..000000000 --- a/builtin/providers/openstack/data_source_openstack_networking_network_v2.go +++ /dev/null @@ -1,117 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "strconv" - - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" - "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" -) - -func dataSourceNetworkingNetworkV2() *schema.Resource { - return &schema.Resource{ - Read: dataSourceNetworkingNetworkV2Read, - - Schema: map[string]*schema.Schema{ - "network_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "matching_subnet_cidr": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "OS_TENANT_ID", - "OS_PROJECT_ID", - }, ""), - Description: descriptions["tenant_id"], - }, - "admin_state_up": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "shared": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceNetworkingNetworkV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - - listOpts := networks.ListOpts{ - ID: d.Get("network_id").(string), - Name: d.Get("name").(string), - TenantID: d.Get("tenant_id").(string), - Status: "ACTIVE", - } - - pages, err := networks.List(networkingClient, listOpts).AllPages() - allNetworks, err := networks.ExtractNetworks(pages) - if err != nil { - return fmt.Errorf("Unable to retrieve networks: %s", err) - } - - var refinedNetworks []networks.Network - if cidr := d.Get("matching_subnet_cidr").(string); cidr != "" { - for _, n := range allNetworks { - for _, s := range n.Subnets { - subnet, err := subnets.Get(networkingClient, s).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - continue - } - return fmt.Errorf("Unable to retrieve network subnet: %s", err) - } - if cidr == subnet.CIDR { - refinedNetworks = append(refinedNetworks, n) - } - } - } - } else { - refinedNetworks = allNetworks - } - - if len(refinedNetworks) < 1 { - return fmt.Errorf("Your query returned no results. " + - "Please change your search criteria and try again.") - } - - if len(refinedNetworks) > 1 { - return fmt.Errorf("Your query returned more than one result." + - " Please try a more specific search criteria") - } - - network := refinedNetworks[0] - - log.Printf("[DEBUG] Retrieved Network %s: %+v", network.ID, network) - d.SetId(network.ID) - - d.Set("name", network.Name) - d.Set("admin_state_up", strconv.FormatBool(network.AdminStateUp)) - d.Set("shared", strconv.FormatBool(network.Shared)) - d.Set("tenant_id", network.TenantID) - d.Set("region", GetRegion(d)) - - return nil -} diff --git a/builtin/providers/openstack/data_source_openstack_networking_network_v2_test.go b/builtin/providers/openstack/data_source_openstack_networking_network_v2_test.go deleted file mode 100644 index db721d15a..000000000 --- a/builtin/providers/openstack/data_source_openstack_networking_network_v2_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccOpenStackNetworkingNetworkV2DataSource_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccOpenStackNetworkingNetworkV2DataSource_network, - }, - resource.TestStep{ - Config: testAccOpenStackNetworkingNetworkV2DataSource_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingNetworkV2DataSourceID("data.openstack_networking_network_v2.net"), - resource.TestCheckResourceAttr( - "data.openstack_networking_network_v2.net", "name", "tf_test_network"), - resource.TestCheckResourceAttr( - "data.openstack_networking_network_v2.net", "admin_state_up", "true"), - ), - }, - }, - }) -} - -func TestAccOpenStackNetworkingNetworkV2DataSource_subnet(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccOpenStackNetworkingNetworkV2DataSource_network, - }, - resource.TestStep{ - Config: testAccOpenStackNetworkingNetworkV2DataSource_subnet, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingNetworkV2DataSourceID("data.openstack_networking_network_v2.net"), - resource.TestCheckResourceAttr( - "data.openstack_networking_network_v2.net", "name", "tf_test_network"), - resource.TestCheckResourceAttr( - "data.openstack_networking_network_v2.net", "admin_state_up", "true"), - ), - }, - }, - }) -} - -func TestAccOpenStackNetworkingNetworkV2DataSource_networkID(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccOpenStackNetworkingNetworkV2DataSource_network, - }, - resource.TestStep{ - Config: testAccOpenStackNetworkingNetworkV2DataSource_networkID, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingNetworkV2DataSourceID("data.openstack_networking_network_v2.net"), - resource.TestCheckResourceAttr( - "data.openstack_networking_network_v2.net", "name", "tf_test_network"), - resource.TestCheckResourceAttr( - "data.openstack_networking_network_v2.net", "admin_state_up", "true"), - ), - }, - }, - }) -} - -func testAccCheckNetworkingNetworkV2DataSourceID(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Can't find network data source: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Network data source ID not set") - } - - return nil - } -} - -const testAccOpenStackNetworkingNetworkV2DataSource_network = ` -resource "openstack_networking_network_v2" "net" { - name = "tf_test_network" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet" { - name = "tf_test_subnet" - cidr = "192.168.199.0/24" - no_gateway = true - network_id = "${openstack_networking_network_v2.net.id}" -} -` - -var testAccOpenStackNetworkingNetworkV2DataSource_basic = fmt.Sprintf(` -%s - -data "openstack_networking_network_v2" "net" { - name = "${openstack_networking_network_v2.net.name}" -} -`, testAccOpenStackNetworkingNetworkV2DataSource_network) - -var testAccOpenStackNetworkingNetworkV2DataSource_subnet = fmt.Sprintf(` -%s - -data "openstack_networking_network_v2" "net" { - matching_subnet_cidr = "${openstack_networking_subnet_v2.subnet.cidr}" -} -`, testAccOpenStackNetworkingNetworkV2DataSource_network) - -var testAccOpenStackNetworkingNetworkV2DataSource_networkID = fmt.Sprintf(` -%s - -data "openstack_networking_network_v2" "net" { - network_id = "${openstack_networking_network_v2.net.id}" -} -`, testAccOpenStackNetworkingNetworkV2DataSource_network) diff --git a/builtin/providers/openstack/import_openstack_blockstorage_volume_v1_test.go b/builtin/providers/openstack/import_openstack_blockstorage_volume_v1_test.go deleted file mode 100644 index 4a7e08401..000000000 --- a/builtin/providers/openstack/import_openstack_blockstorage_volume_v1_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package openstack - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccBlockStorageV1Volume_importBasic(t *testing.T) { - resourceName := "openstack_blockstorage_volume_v1.volume_1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBlockStorageV1VolumeDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccBlockStorageV1Volume_basic, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/openstack/import_openstack_blockstorage_volume_v2_test.go b/builtin/providers/openstack/import_openstack_blockstorage_volume_v2_test.go deleted file mode 100644 index 1f5365274..000000000 --- a/builtin/providers/openstack/import_openstack_blockstorage_volume_v2_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package openstack - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccBlockStorageV2Volume_importBasic(t *testing.T) { - resourceName := "openstack_blockstorage_volume_v2.volume_1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBlockStorageV2VolumeDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccBlockStorageV2Volume_basic, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/openstack/import_openstack_compute_floatingip_associate_v2_test.go b/builtin/providers/openstack/import_openstack_compute_floatingip_associate_v2_test.go deleted file mode 100644 index 8bada4947..000000000 --- a/builtin/providers/openstack/import_openstack_compute_floatingip_associate_v2_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package openstack - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccComputeV2FloatingIPAssociate_importBasic(t *testing.T) { - resourceName := "openstack_compute_floatingip_associate_v2.fip_1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2FloatingIPAssociateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2FloatingIPAssociate_basic, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/openstack/import_openstack_compute_floatingip_v2_test.go b/builtin/providers/openstack/import_openstack_compute_floatingip_v2_test.go deleted file mode 100644 index afae2a3f9..000000000 --- a/builtin/providers/openstack/import_openstack_compute_floatingip_v2_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package openstack - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccComputeV2FloatingIP_importBasic(t *testing.T) { - resourceName := "openstack_compute_floatingip_v2.fip_1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2FloatingIPDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2FloatingIP_basic, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/openstack/import_openstack_compute_keypair_v2_test.go b/builtin/providers/openstack/import_openstack_compute_keypair_v2_test.go deleted file mode 100644 index 89e094463..000000000 --- a/builtin/providers/openstack/import_openstack_compute_keypair_v2_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package openstack - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccComputeV2Keypair_importBasic(t *testing.T) { - resourceName := "openstack_compute_keypair_v2.kp_1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2KeypairDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Keypair_basic, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/openstack/import_openstack_compute_secgroup_v2_test.go b/builtin/providers/openstack/import_openstack_compute_secgroup_v2_test.go deleted file mode 100644 index 7705e2ec9..000000000 --- a/builtin/providers/openstack/import_openstack_compute_secgroup_v2_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package openstack - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccComputeV2SecGroup_importBasic(t *testing.T) { - resourceName := "openstack_compute_secgroup_v2.sg_1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2SecGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2SecGroup_basic_orig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/openstack/import_openstack_compute_servergroup_v2_test.go b/builtin/providers/openstack/import_openstack_compute_servergroup_v2_test.go deleted file mode 100644 index 4b1f19304..000000000 --- a/builtin/providers/openstack/import_openstack_compute_servergroup_v2_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package openstack - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccComputeV2ServerGroup_importBasic(t *testing.T) { - resourceName := "openstack_compute_servergroup_v2.sg_1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2ServerGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2ServerGroup_basic, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/openstack/import_openstack_compute_volume_attach_v2_test.go b/builtin/providers/openstack/import_openstack_compute_volume_attach_v2_test.go deleted file mode 100644 index ee00882ff..000000000 --- a/builtin/providers/openstack/import_openstack_compute_volume_attach_v2_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package openstack - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccComputeV2VolumeAttach_importBasic(t *testing.T) { - resourceName := "openstack_compute_volume_attach_v2.va_1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2VolumeAttachDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2VolumeAttach_basic, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/openstack/import_openstack_dns_recordset_v2_test.go b/builtin/providers/openstack/import_openstack_dns_recordset_v2_test.go deleted file mode 100644 index dc07afaf0..000000000 --- a/builtin/providers/openstack/import_openstack_dns_recordset_v2_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package openstack - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccDNSV2RecordSet_importBasic(t *testing.T) { - zoneName := randomZoneName() - resourceName := "openstack_dns_recordset_v2.recordset_1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheckDNSRecordSetV2(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDNSV2RecordSetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDNSV2RecordSet_basic(zoneName), - }, - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/openstack/import_openstack_dns_zone_v2_test.go b/builtin/providers/openstack/import_openstack_dns_zone_v2_test.go deleted file mode 100644 index 83b7f5509..000000000 --- a/builtin/providers/openstack/import_openstack_dns_zone_v2_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccDNSV2Zone_importBasic(t *testing.T) { - var zoneName = fmt.Sprintf("ACPTTEST%s.com.", acctest.RandString(5)) - resourceName := "openstack_dns_zone_v2.zone_1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheckDNSZoneV2(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDNSV2ZoneDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDNSV2Zone_basic(zoneName), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/openstack/import_openstack_fw_firewall_v1_test.go b/builtin/providers/openstack/import_openstack_fw_firewall_v1_test.go deleted file mode 100644 index c7ea21930..000000000 --- a/builtin/providers/openstack/import_openstack_fw_firewall_v1_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package openstack - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccFWFirewallV1_importBasic(t *testing.T) { - resourceName := "openstack_fw_firewall_v1.fw_1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckFWFirewallV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccFWFirewallV1_basic_1, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/openstack/import_openstack_fw_policy_v1_test.go b/builtin/providers/openstack/import_openstack_fw_policy_v1_test.go deleted file mode 100644 index 6b9d9f25c..000000000 --- a/builtin/providers/openstack/import_openstack_fw_policy_v1_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package openstack - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccFWPolicyV1_importBasic(t *testing.T) { - resourceName := "openstack_fw_policy_v1.policy_1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckFWPolicyV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccFWPolicyV1_addRules, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/openstack/import_openstack_fw_rule_v1_test.go b/builtin/providers/openstack/import_openstack_fw_rule_v1_test.go deleted file mode 100644 index c586d1085..000000000 --- a/builtin/providers/openstack/import_openstack_fw_rule_v1_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package openstack - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccFWRuleV1_importBasic(t *testing.T) { - resourceName := "openstack_fw_rule_v1.rule_1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckFWRuleV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccFWRuleV1_basic_2, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/openstack/import_openstack_images_image_v2_test.go b/builtin/providers/openstack/import_openstack_images_image_v2_test.go deleted file mode 100644 index 99bb87b5a..000000000 --- a/builtin/providers/openstack/import_openstack_images_image_v2_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package openstack - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccImagesImageV2_importBasic(t *testing.T) { - resourceName := "openstack_images_image_v2.image_1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckImagesImageV2Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccImagesImageV2_basic, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "region", - "local_file_path", - "image_cache_path", - "image_source_url", - }, - }, - }, - }) -} diff --git a/builtin/providers/openstack/import_openstack_lb_member_v1_test.go b/builtin/providers/openstack/import_openstack_lb_member_v1_test.go deleted file mode 100644 index 514244048..000000000 --- a/builtin/providers/openstack/import_openstack_lb_member_v1_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package openstack - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccLBV1Member_importBasic(t *testing.T) { - resourceName := "openstack_lb_member_v1.member_1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBV1MemberDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccLBV1Member_basic, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/openstack/import_openstack_lb_monitor_v1_test.go b/builtin/providers/openstack/import_openstack_lb_monitor_v1_test.go deleted file mode 100644 index a27295a77..000000000 --- a/builtin/providers/openstack/import_openstack_lb_monitor_v1_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package openstack - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccLBV1Monitor_importBasic(t *testing.T) { - resourceName := "openstack_lb_monitor_v1.monitor_1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBV1MonitorDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccLBV1Monitor_basic, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/openstack/import_openstack_lb_pool_v1_test.go b/builtin/providers/openstack/import_openstack_lb_pool_v1_test.go deleted file mode 100644 index 14369f636..000000000 --- a/builtin/providers/openstack/import_openstack_lb_pool_v1_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package openstack - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccLBV1Pool_importBasic(t *testing.T) { - resourceName := "openstack_lb_pool_v1.pool_1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBV1PoolDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccLBV1Pool_basic, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/openstack/import_openstack_lb_vip_v1_test.go b/builtin/providers/openstack/import_openstack_lb_vip_v1_test.go deleted file mode 100644 index f5e5fa075..000000000 --- a/builtin/providers/openstack/import_openstack_lb_vip_v1_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package openstack - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccLBV1VIP_importBasic(t *testing.T) { - resourceName := "openstack_lb_vip_v1.vip_1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBV1VIPDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccLBV1VIP_basic, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/openstack/import_openstack_networking_floatingip_v2_test.go b/builtin/providers/openstack/import_openstack_networking_floatingip_v2_test.go deleted file mode 100644 index 9b792c661..000000000 --- a/builtin/providers/openstack/import_openstack_networking_floatingip_v2_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package openstack - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccNetworkingV2FloatingIP_importBasic(t *testing.T) { - resourceName := "openstack_networking_floatingip_v2.fip_1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2FloatingIPDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2FloatingIP_basic, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/openstack/import_openstack_networking_network_v2_test.go b/builtin/providers/openstack/import_openstack_networking_network_v2_test.go deleted file mode 100644 index 638c49295..000000000 --- a/builtin/providers/openstack/import_openstack_networking_network_v2_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package openstack - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccNetworkingV2Network_importBasic(t *testing.T) { - resourceName := "openstack_networking_network_v2.network_1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2NetworkDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Network_basic, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/openstack/import_openstack_networking_port_v2_test.go b/builtin/providers/openstack/import_openstack_networking_port_v2_test.go deleted file mode 100644 index 562e3e0cd..000000000 --- a/builtin/providers/openstack/import_openstack_networking_port_v2_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package openstack - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccNetworkingV2Port_importBasic(t *testing.T) { - resourceName := "openstack_networking_port_v2.port_1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2PortDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Port_basic, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "fixed_ip", - }, - }, - }, - }) -} diff --git a/builtin/providers/openstack/import_openstack_networking_secgroup_rule_v2_test.go b/builtin/providers/openstack/import_openstack_networking_secgroup_rule_v2_test.go deleted file mode 100644 index c48ef687a..000000000 --- a/builtin/providers/openstack/import_openstack_networking_secgroup_rule_v2_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package openstack - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccNetworkingV2SecGroupRule_importBasic(t *testing.T) { - resourceName := "openstack_networking_secgroup_rule_v2.secgroup_rule_1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2SecGroupRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2SecGroupRule_basic, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/openstack/import_openstack_networking_secgroup_v2_test.go b/builtin/providers/openstack/import_openstack_networking_secgroup_v2_test.go deleted file mode 100644 index 3913b174f..000000000 --- a/builtin/providers/openstack/import_openstack_networking_secgroup_v2_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package openstack - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccNetworkingV2SecGroup_importBasic(t *testing.T) { - resourceName := "openstack_networking_secgroup_v2.secgroup_1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2SecGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2SecGroup_basic, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/openstack/import_openstack_networking_subnet_v2_test.go b/builtin/providers/openstack/import_openstack_networking_subnet_v2_test.go deleted file mode 100644 index fa2d5eea9..000000000 --- a/builtin/providers/openstack/import_openstack_networking_subnet_v2_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package openstack - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccNetworkingV2Subnet_importBasic(t *testing.T) { - resourceName := "openstack_networking_subnet_v2.subnet_1" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2SubnetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Subnet_basic, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/openstack/provider.go b/builtin/providers/openstack/provider.go deleted file mode 100644 index 49a2d45ec..000000000 --- a/builtin/providers/openstack/provider.go +++ /dev/null @@ -1,247 +0,0 @@ -package openstack - -import ( - "github.com/hashicorp/terraform/helper/mutexkv" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// This is a global MutexKV for use within this plugin. -var osMutexKV = mutexkv.NewMutexKV() - -// Provider returns a schema.Provider for OpenStack. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "auth_url": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("OS_AUTH_URL", nil), - Description: descriptions["auth_url"], - }, - - "user_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_USERNAME", ""), - Description: descriptions["user_name"], - }, - - "user_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_USER_ID", ""), - Description: descriptions["user_name"], - }, - - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "OS_TENANT_ID", - "OS_PROJECT_ID", - }, ""), - Description: descriptions["tenant_id"], - }, - - "tenant_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "OS_TENANT_NAME", - "OS_PROJECT_NAME", - }, ""), - Description: descriptions["tenant_name"], - }, - - "password": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Sensitive: true, - DefaultFunc: schema.EnvDefaultFunc("OS_PASSWORD", ""), - Description: descriptions["password"], - }, - - "token": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_AUTH_TOKEN", ""), - Description: descriptions["token"], - }, - - "domain_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "OS_USER_DOMAIN_ID", - "OS_PROJECT_DOMAIN_ID", - "OS_DOMAIN_ID", - }, ""), - Description: descriptions["domain_id"], - }, - - "domain_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "OS_USER_DOMAIN_NAME", - "OS_PROJECT_DOMAIN_NAME", - "OS_DOMAIN_NAME", - "OS_DEFAULT_DOMAIN", - }, ""), - Description: descriptions["domain_name"], - }, - - "insecure": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_INSECURE", ""), - Description: descriptions["insecure"], - }, - - "endpoint_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_ENDPOINT_TYPE", ""), - }, - - "cacert_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_CACERT", ""), - Description: descriptions["cacert_file"], - }, - - "cert": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_CERT", ""), - Description: descriptions["cert"], - }, - - "key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_KEY", ""), - Description: descriptions["key"], - }, - - "swauth": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("OS_SWAUTH", ""), - Description: descriptions["swauth"], - }, - }, - - DataSourcesMap: map[string]*schema.Resource{ - "openstack_images_image_v2": dataSourceImagesImageV2(), - "openstack_networking_network_v2": dataSourceNetworkingNetworkV2(), - }, - - ResourcesMap: map[string]*schema.Resource{ - "openstack_blockstorage_volume_v1": resourceBlockStorageVolumeV1(), - "openstack_blockstorage_volume_v2": resourceBlockStorageVolumeV2(), - "openstack_blockstorage_volume_attach_v2": resourceBlockStorageVolumeAttachV2(), - "openstack_compute_instance_v2": resourceComputeInstanceV2(), - "openstack_compute_keypair_v2": resourceComputeKeypairV2(), - "openstack_compute_secgroup_v2": resourceComputeSecGroupV2(), - "openstack_compute_servergroup_v2": resourceComputeServerGroupV2(), - "openstack_compute_floatingip_v2": resourceComputeFloatingIPV2(), - "openstack_compute_floatingip_associate_v2": resourceComputeFloatingIPAssociateV2(), - "openstack_compute_volume_attach_v2": resourceComputeVolumeAttachV2(), - "openstack_dns_recordset_v2": resourceDNSRecordSetV2(), - "openstack_dns_zone_v2": resourceDNSZoneV2(), - "openstack_fw_firewall_v1": resourceFWFirewallV1(), - "openstack_fw_policy_v1": resourceFWPolicyV1(), - "openstack_fw_rule_v1": resourceFWRuleV1(), - "openstack_images_image_v2": resourceImagesImageV2(), - "openstack_lb_member_v1": resourceLBMemberV1(), - "openstack_lb_monitor_v1": resourceLBMonitorV1(), - "openstack_lb_pool_v1": resourceLBPoolV1(), - "openstack_lb_vip_v1": resourceLBVipV1(), - "openstack_lb_loadbalancer_v2": resourceLoadBalancerV2(), - "openstack_lb_listener_v2": resourceListenerV2(), - "openstack_lb_pool_v2": resourcePoolV2(), - "openstack_lb_member_v2": resourceMemberV2(), - "openstack_lb_monitor_v2": resourceMonitorV2(), - "openstack_networking_network_v2": resourceNetworkingNetworkV2(), - "openstack_networking_subnet_v2": resourceNetworkingSubnetV2(), - "openstack_networking_floatingip_v2": resourceNetworkingFloatingIPV2(), - "openstack_networking_port_v2": resourceNetworkingPortV2(), - "openstack_networking_router_v2": resourceNetworkingRouterV2(), - "openstack_networking_router_interface_v2": resourceNetworkingRouterInterfaceV2(), - "openstack_networking_router_route_v2": resourceNetworkingRouterRouteV2(), - "openstack_networking_secgroup_v2": resourceNetworkingSecGroupV2(), - "openstack_networking_secgroup_rule_v2": resourceNetworkingSecGroupRuleV2(), - "openstack_objectstorage_container_v1": resourceObjectStorageContainerV1(), - }, - - ConfigureFunc: configureProvider, - } -} - -var descriptions map[string]string - -func init() { - descriptions = map[string]string{ - "auth_url": "The Identity authentication URL.", - - "user_name": "Username to login with.", - - "user_id": "User ID to login with.", - - "tenant_id": "The ID of the Tenant (Identity v2) or Project (Identity v3)\n" + - "to login with.", - - "tenant_name": "The name of the Tenant (Identity v2) or Project (Identity v3)\n" + - "to login with.", - - "password": "Password to login with.", - - "token": "Authentication token to use as an alternative to username/password.", - - "domain_id": "The ID of the Domain to scope to (Identity v3).", - - "domain_name": "The name of the Domain to scope to (Identity v3).", - - "insecure": "Trust self-signed certificates.", - - "cacert_file": "A Custom CA certificate.", - - "endpoint_type": "The catalog endpoint type to use.", - - "cert": "A client certificate to authenticate with.", - - "key": "A client private key to authenticate with.", - - "swauth": "Use Swift's authentication system instead of Keystone. Only used for\n" + - "interaction with Swift.", - } -} - -func configureProvider(d *schema.ResourceData) (interface{}, error) { - config := Config{ - CACertFile: d.Get("cacert_file").(string), - ClientCertFile: d.Get("cert").(string), - ClientKeyFile: d.Get("key").(string), - DomainID: d.Get("domain_id").(string), - DomainName: d.Get("domain_name").(string), - EndpointType: d.Get("endpoint_type").(string), - IdentityEndpoint: d.Get("auth_url").(string), - Insecure: d.Get("insecure").(bool), - Password: d.Get("password").(string), - Swauth: d.Get("swauth").(bool), - Token: d.Get("token").(string), - TenantID: d.Get("tenant_id").(string), - TenantName: d.Get("tenant_name").(string), - Username: d.Get("user_name").(string), - UserID: d.Get("user_id").(string), - } - - if err := config.loadAndValidate(); err != nil { - return nil, err - } - - return &config, nil -} diff --git a/builtin/providers/openstack/provider_test.go b/builtin/providers/openstack/provider_test.go deleted file mode 100644 index 767dcc8d9..000000000 --- a/builtin/providers/openstack/provider_test.go +++ /dev/null @@ -1,237 +0,0 @@ -package openstack - -import ( - "fmt" - "io/ioutil" - "os" - "testing" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/helper/pathorcontents" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var ( - OS_EXTGW_ID = os.Getenv("OS_EXTGW_ID") - OS_FLAVOR_ID = os.Getenv("OS_FLAVOR_ID") - OS_FLAVOR_NAME = os.Getenv("OS_FLAVOR_NAME") - OS_IMAGE_ID = os.Getenv("OS_IMAGE_ID") - OS_IMAGE_NAME = os.Getenv("OS_IMAGE_NAME") - OS_NETWORK_ID = os.Getenv("OS_NETWORK_ID") - OS_POOL_NAME = os.Getenv("OS_POOL_NAME") - OS_REGION_NAME = os.Getenv("OS_REGION_NAME") -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "openstack": testAccProvider, - } -} - -func testAccPreCheck(t *testing.T) { - v := os.Getenv("OS_AUTH_URL") - if v == "" { - t.Fatal("OS_AUTH_URL must be set for acceptance tests") - } - - if OS_IMAGE_ID == "" && OS_IMAGE_NAME == "" { - t.Fatal("OS_IMAGE_ID or OS_IMAGE_NAME must be set for acceptance tests") - } - - if OS_POOL_NAME == "" { - t.Fatal("OS_POOL_NAME must be set for acceptance tests") - } - - if OS_FLAVOR_ID == "" && OS_FLAVOR_NAME == "" { - t.Fatal("OS_FLAVOR_ID or OS_FLAVOR_NAME must be set for acceptance tests") - } - - if OS_NETWORK_ID == "" { - t.Fatal("OS_NETWORK_ID must be set for acceptance tests") - } - - if OS_EXTGW_ID == "" { - t.Fatal("OS_EXTGW_ID must be set for acceptance tests") - } -} - -func testAccPreCheckAdminOnly(t *testing.T) { - v := os.Getenv("OS_USERNAME") - if v != "admin" { - t.Skip("Skipping test because it requires the admin user") - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -// Steps for configuring OpenStack with SSL validation are here: -// https://github.com/hashicorp/terraform/pull/6279#issuecomment-219020144 -func TestAccProvider_caCertFile(t *testing.T) { - if os.Getenv("TF_ACC") == "" || os.Getenv("OS_SSL_TESTS") == "" { - t.Skip("TF_ACC or OS_SSL_TESTS not set, skipping OpenStack SSL test.") - } - if os.Getenv("OS_CACERT") == "" { - t.Skip("OS_CACERT is not set; skipping OpenStack CA test.") - } - - p := Provider() - - caFile, err := envVarFile("OS_CACERT") - if err != nil { - t.Fatal(err) - } - defer os.Remove(caFile) - - raw := map[string]interface{}{ - "cacert_file": caFile, - } - rawConfig, err := config.NewRawConfig(raw) - if err != nil { - t.Fatalf("err: %s", err) - } - - err = p.Configure(terraform.NewResourceConfig(rawConfig)) - if err != nil { - t.Fatalf("Unexpected err when specifying OpenStack CA by file: %s", err) - } -} - -func TestAccProvider_caCertString(t *testing.T) { - if os.Getenv("TF_ACC") == "" || os.Getenv("OS_SSL_TESTS") == "" { - t.Skip("TF_ACC or OS_SSL_TESTS not set, skipping OpenStack SSL test.") - } - if os.Getenv("OS_CACERT") == "" { - t.Skip("OS_CACERT is not set; skipping OpenStack CA test.") - } - - p := Provider() - - caContents, err := envVarContents("OS_CACERT") - if err != nil { - t.Fatal(err) - } - raw := map[string]interface{}{ - "cacert_file": caContents, - } - rawConfig, err := config.NewRawConfig(raw) - if err != nil { - t.Fatalf("err: %s", err) - } - - err = p.Configure(terraform.NewResourceConfig(rawConfig)) - if err != nil { - t.Fatalf("Unexpected err when specifying OpenStack CA by string: %s", err) - } -} - -func TestAccProvider_clientCertFile(t *testing.T) { - if os.Getenv("TF_ACC") == "" || os.Getenv("OS_SSL_TESTS") == "" { - t.Skip("TF_ACC or OS_SSL_TESTS not set, skipping OpenStack SSL test.") - } - if os.Getenv("OS_CERT") == "" || os.Getenv("OS_KEY") == "" { - t.Skip("OS_CERT or OS_KEY is not set; skipping OpenStack client SSL auth test.") - } - - p := Provider() - - certFile, err := envVarFile("OS_CERT") - if err != nil { - t.Fatal(err) - } - defer os.Remove(certFile) - keyFile, err := envVarFile("OS_KEY") - if err != nil { - t.Fatal(err) - } - defer os.Remove(keyFile) - - raw := map[string]interface{}{ - "cert": certFile, - "key": keyFile, - } - rawConfig, err := config.NewRawConfig(raw) - if err != nil { - t.Fatalf("err: %s", err) - } - - err = p.Configure(terraform.NewResourceConfig(rawConfig)) - if err != nil { - t.Fatalf("Unexpected err when specifying OpenStack Client keypair by file: %s", err) - } -} - -func TestAccProvider_clientCertString(t *testing.T) { - if os.Getenv("TF_ACC") == "" || os.Getenv("OS_SSL_TESTS") == "" { - t.Skip("TF_ACC or OS_SSL_TESTS not set, skipping OpenStack SSL test.") - } - if os.Getenv("OS_CERT") == "" || os.Getenv("OS_KEY") == "" { - t.Skip("OS_CERT or OS_KEY is not set; skipping OpenStack client SSL auth test.") - } - - p := Provider() - - certContents, err := envVarContents("OS_CERT") - if err != nil { - t.Fatal(err) - } - keyContents, err := envVarContents("OS_KEY") - if err != nil { - t.Fatal(err) - } - - raw := map[string]interface{}{ - "cert": certContents, - "key": keyContents, - } - rawConfig, err := config.NewRawConfig(raw) - if err != nil { - t.Fatalf("err: %s", err) - } - - err = p.Configure(terraform.NewResourceConfig(rawConfig)) - if err != nil { - t.Fatalf("Unexpected err when specifying OpenStack Client keypair by contents: %s", err) - } -} - -func envVarContents(varName string) (string, error) { - contents, _, err := pathorcontents.Read(os.Getenv(varName)) - if err != nil { - return "", fmt.Errorf("Error reading %s: %s", varName, err) - } - return contents, nil -} - -func envVarFile(varName string) (string, error) { - contents, err := envVarContents(varName) - if err != nil { - return "", err - } - - tmpFile, err := ioutil.TempFile("", varName) - if err != nil { - return "", fmt.Errorf("Error creating temp file: %s", err) - } - if _, err := tmpFile.Write([]byte(contents)); err != nil { - _ = os.Remove(tmpFile.Name()) - return "", fmt.Errorf("Error writing temp file: %s", err) - } - if err := tmpFile.Close(); err != nil { - _ = os.Remove(tmpFile.Name()) - return "", fmt.Errorf("Error closing temp file: %s", err) - } - return tmpFile.Name(), nil -} diff --git a/builtin/providers/openstack/resource_openstack_blockstorage_volume_attach_v2.go b/builtin/providers/openstack/resource_openstack_blockstorage_volume_attach_v2.go deleted file mode 100644 index 4dd28e7bc..000000000 --- a/builtin/providers/openstack/resource_openstack_blockstorage_volume_attach_v2.go +++ /dev/null @@ -1,414 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "strings" - "time" - - "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions" - "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceBlockStorageVolumeAttachV2() *schema.Resource { - return &schema.Resource{ - Create: resourceBlockStorageVolumeAttachV2Create, - Read: resourceBlockStorageVolumeAttachV2Read, - Delete: resourceBlockStorageVolumeAttachV2Delete, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - - "volume_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "instance_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Deprecated: "instance_id is no longer used in this resource", - }, - - "host_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "device": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "attach_mode": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "ro" && value != "rw" { - errors = append(errors, fmt.Errorf( - "Only 'ro' and 'rw' are supported values for 'attach_mode'")) - } - return - }, - }, - - "initiator": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "ip_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "multipath": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - - "os_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "platform": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "wwpn": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "wwnn": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - // Volume attachment information - "data": &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - Sensitive: true, - }, - - "driver_volume_type": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "mount_point_base": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceBlockStorageVolumeAttachV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - client, err := config.blockStorageV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - // initialize the connection - volumeId := d.Get("volume_id").(string) - connOpts := &volumeactions.InitializeConnectionOpts{} - if v, ok := d.GetOk("host_name"); ok { - connOpts.Host = v.(string) - } - - if v, ok := d.GetOk("multipath"); ok { - multipath := v.(bool) - connOpts.Multipath = &multipath - } - - if v, ok := d.GetOk("ip_address"); ok { - connOpts.IP = v.(string) - } - - if v, ok := d.GetOk("initiator"); ok { - connOpts.Initiator = v.(string) - } - - if v, ok := d.GetOk("os_type"); ok { - connOpts.OSType = v.(string) - } - - if v, ok := d.GetOk("platform"); ok { - connOpts.Platform = v.(string) - } - - if v, ok := d.GetOk("wwnns"); ok { - connOpts.Wwnns = v.(string) - } - - if v, ok := d.GetOk("wwpns"); ok { - var wwpns []string - for _, i := range v.([]string) { - wwpns = append(wwpns, i) - } - - connOpts.Wwpns = wwpns - } - - connInfo, err := volumeactions.InitializeConnection(client, volumeId, connOpts).Extract() - if err != nil { - return fmt.Errorf("Unable to create connection: %s", err) - } - - // Only uncomment this when debugging since connInfo contains sensitive information. - // log.Printf("[DEBUG] Volume Connection for %s: %#v", volumeId, connInfo) - - // Because this information is only returned upon creation, - // it must be set in Create. - if v, ok := connInfo["data"]; ok { - data := make(map[string]string) - for key, value := range v.(map[string]interface{}) { - if v, ok := value.(string); ok { - data[key] = v - } - } - - d.Set("data", data) - } - - if v, ok := connInfo["driver_volume_type"]; ok { - d.Set("driver_volume_type", v) - } - - if v, ok := connInfo["mount_point_base"]; ok { - d.Set("mount_point_base", v) - } - - // Once the connection has been made, tell Cinder to mark the volume as attached. - attachMode, err := blockStorageVolumeAttachV2AttachMode(d.Get("attach_mode").(string)) - if err != nil { - return nil - } - - attachOpts := &volumeactions.AttachOpts{ - HostName: d.Get("host_name").(string), - MountPoint: d.Get("device").(string), - Mode: attachMode, - } - - log.Printf("[DEBUG] Attachment Options: %#v", attachOpts) - - if err := volumeactions.Attach(client, volumeId, attachOpts).ExtractErr(); err != nil { - return err - } - - // Wait for the volume to become available. - log.Printf("[DEBUG] Waiting for volume (%s) to become available", volumeId) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"available", "attaching"}, - Target: []string{"in-use"}, - Refresh: VolumeV2StateRefreshFunc(client, volumeId), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for volume (%s) to become ready: %s", volumeId, err) - } - - // Once the volume has been marked as attached, - // retrieve a fresh copy of it with all information now available. - volume, err := volumes.Get(client, volumeId).Extract() - if err != nil { - return err - } - - // Search for the attachmentId - var attachmentId string - hostName := d.Get("host_name").(string) - for _, attachment := range volume.Attachments { - if hostName != "" && hostName == attachment.HostName { - attachmentId = attachment.AttachmentID - } - } - - if attachmentId == "" { - return fmt.Errorf("Unable to determine attachment ID.") - } - - // The ID must be a combination of the volume and attachment ID - // since a volume ID is required to retrieve an attachment ID. - id := fmt.Sprintf("%s/%s", volumeId, attachmentId) - d.SetId(id) - - return resourceBlockStorageVolumeAttachV2Read(d, meta) -} - -func resourceBlockStorageVolumeAttachV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - client, err := config.blockStorageV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - volumeId, attachmentId, err := blockStorageVolumeAttachV2ParseId(d.Id()) - if err != nil { - return err - } - - volume, err := volumes.Get(client, volumeId).Extract() - if err != nil { - return err - } - - log.Printf("[DEBUG] Retrieved volume %s: %#v", d.Id(), volume) - - var attachment volumes.Attachment - for _, v := range volume.Attachments { - if attachmentId == v.AttachmentID { - attachment = v - } - } - - log.Printf("[DEBUG] Retrieved volume attachment: %#v", attachment) - - return nil -} - -func resourceBlockStorageVolumeAttachV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - client, err := config.blockStorageV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - volumeId, attachmentId, err := blockStorageVolumeAttachV2ParseId(d.Id()) - - // Terminate the connection - termOpts := &volumeactions.TerminateConnectionOpts{} - if v, ok := d.GetOk("host_name"); ok { - termOpts.Host = v.(string) - } - - if v, ok := d.GetOk("multipath"); ok { - multipath := v.(bool) - termOpts.Multipath = &multipath - } - - if v, ok := d.GetOk("ip_address"); ok { - termOpts.IP = v.(string) - } - - if v, ok := d.GetOk("initiator"); ok { - termOpts.Initiator = v.(string) - } - - if v, ok := d.GetOk("os_type"); ok { - termOpts.OSType = v.(string) - } - - if v, ok := d.GetOk("platform"); ok { - termOpts.Platform = v.(string) - } - - if v, ok := d.GetOk("wwnns"); ok { - termOpts.Wwnns = v.(string) - } - - if v, ok := d.GetOk("wwpns"); ok { - var wwpns []string - for _, i := range v.([]string) { - wwpns = append(wwpns, i) - } - - termOpts.Wwpns = wwpns - } - - err = volumeactions.TerminateConnection(client, volumeId, termOpts).ExtractErr() - if err != nil { - return fmt.Errorf("Error terminating volume connection %s: %s", volumeId, err) - } - - // Detach the volume - detachOpts := volumeactions.DetachOpts{ - AttachmentID: attachmentId, - } - - log.Printf("[DEBUG] Detachment Options: %#v", detachOpts) - - if err := volumeactions.Detach(client, volumeId, detachOpts).ExtractErr(); err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"in-use", "attaching", "detaching"}, - Target: []string{"available"}, - Refresh: VolumeV2StateRefreshFunc(client, volumeId), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for volume (%s) to become available: %s", volumeId, err) - } - - return nil -} - -func blockStorageVolumeAttachV2AttachMode(v string) (volumeactions.AttachMode, error) { - var attachMode volumeactions.AttachMode - var attachError error - switch v { - case "": - attachMode = "" - case "ro": - attachMode = volumeactions.ReadOnly - case "rw": - attachMode = volumeactions.ReadWrite - default: - attachError = fmt.Errorf("Invalid attach_mode specified") - } - - return attachMode, attachError -} - -func blockStorageVolumeAttachV2ParseId(id string) (string, string, error) { - parts := strings.Split(id, "/") - if len(parts) < 2 { - return "", "", fmt.Errorf("Unable to determine attachment ID") - } - - return parts[0], parts[1], nil -} diff --git a/builtin/providers/openstack/resource_openstack_blockstorage_volume_attach_v2_test.go b/builtin/providers/openstack/resource_openstack_blockstorage_volume_attach_v2_test.go deleted file mode 100644 index d6b54c447..000000000 --- a/builtin/providers/openstack/resource_openstack_blockstorage_volume_attach_v2_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" -) - -func TestAccBlockStorageVolumeAttachV2_basic(t *testing.T) { - var va volumes.Attachment - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBlockStorageVolumeAttachV2Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccBlockStorageVolumeAttachV2_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckBlockStorageVolumeAttachV2Exists("openstack_blockstorage_volume_attach_v2.va_1", &va), - ), - }, - }, - }) -} - -func TestAccBlockStorageVolumeAttachV2_timeout(t *testing.T) { - var va volumes.Attachment - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBlockStorageVolumeAttachV2Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccBlockStorageVolumeAttachV2_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckBlockStorageVolumeAttachV2Exists("openstack_blockstorage_volume_attach_v2.va_1", &va), - ), - }, - }, - }) -} - -func testAccCheckBlockStorageVolumeAttachV2Destroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - client, err := config.blockStorageV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_blockstorage_volume_attach_v2" { - continue - } - - volumeId, attachmentId, err := blockStorageVolumeAttachV2ParseId(rs.Primary.ID) - if err != nil { - return err - } - - volume, err := volumes.Get(client, volumeId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - return nil - } - return err - } - - for _, v := range volume.Attachments { - if attachmentId == v.AttachmentID { - return fmt.Errorf("Volume attachment still exists") - } - } - } - - return nil -} - -func testAccCheckBlockStorageVolumeAttachV2Exists(n string, va *volumes.Attachment) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - client, err := config.blockStorageV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - volumeId, attachmentId, err := blockStorageVolumeAttachV2ParseId(rs.Primary.ID) - if err != nil { - return err - } - - volume, err := volumes.Get(client, volumeId).Extract() - if err != nil { - return err - } - - var found bool - for _, v := range volume.Attachments { - if attachmentId == v.AttachmentID { - found = true - *va = v - } - } - - if !found { - return fmt.Errorf("Volume Attachment not found") - } - - return nil - } -} - -const testAccBlockStorageVolumeAttachV2_basic = ` -resource "openstack_blockstorage_volume_v2" "volume_1" { - name = "volume_1" - size = 1 -} - -resource "openstack_blockstorage_volume_attach_v2" "va_1" { - volume_id = "${openstack_blockstorage_volume_v2.volume_1.id}" - device = "auto" - - host_name = "devstack" - ip_address = "192.168.255.10" - initiator = "iqn.1993-08.org.debian:01:e9861fb1859" - os_type = "linux2" - platform = "x86_64" -} -` - -const testAccBlockStorageVolumeAttachV2_timeout = ` -resource "openstack_blockstorage_volume_v2" "volume_1" { - name = "volume_1" - size = 1 -} - -resource "openstack_blockstorage_volume_attach_v2" "va_1" { - volume_id = "${openstack_blockstorage_volume_v2.volume_1.id}" - device = "auto" - - host_name = "devstack" - ip_address = "192.168.255.10" - initiator = "iqn.1993-08.org.debian:01:e9861fb1859" - os_type = "linux2" - platform = "x86_64" - - timeouts { - create = "5m" - } -} -` diff --git a/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1.go b/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1.go deleted file mode 100644 index 8c84a08e8..000000000 --- a/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1.go +++ /dev/null @@ -1,339 +0,0 @@ -package openstack - -import ( - "bytes" - "fmt" - "log" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceBlockStorageVolumeV1() *schema.Resource { - return &schema.Resource{ - Create: resourceBlockStorageVolumeV1Create, - Read: resourceBlockStorageVolumeV1Read, - Update: resourceBlockStorageVolumeV1Update, - Delete: resourceBlockStorageVolumeV1Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "size": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "availability_zone": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "metadata": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: false, - Computed: true, - }, - "snapshot_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "source_vol_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "image_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "volume_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "attachment": &schema.Schema{ - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "instance_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "device": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - }, - Set: resourceVolumeAttachmentHash, - }, - }, - } -} - -func resourceBlockStorageVolumeV1Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - blockStorageClient, err := config.blockStorageV1Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - createOpts := &volumes.CreateOpts{ - Description: d.Get("description").(string), - AvailabilityZone: d.Get("availability_zone").(string), - Name: d.Get("name").(string), - Size: d.Get("size").(int), - SnapshotID: d.Get("snapshot_id").(string), - SourceVolID: d.Get("source_vol_id").(string), - ImageID: d.Get("image_id").(string), - VolumeType: d.Get("volume_type").(string), - Metadata: resourceContainerMetadataV2(d), - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - v, err := volumes.Create(blockStorageClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack volume: %s", err) - } - log.Printf("[INFO] Volume ID: %s", v.ID) - - // Wait for the volume to become available. - log.Printf( - "[DEBUG] Waiting for volume (%s) to become available", - v.ID) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"downloading", "creating"}, - Target: []string{"available"}, - Refresh: VolumeV1StateRefreshFunc(blockStorageClient, v.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for volume (%s) to become ready: %s", - v.ID, err) - } - - // Store the ID now - d.SetId(v.ID) - - return resourceBlockStorageVolumeV1Read(d, meta) -} - -func resourceBlockStorageVolumeV1Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - blockStorageClient, err := config.blockStorageV1Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - v, err := volumes.Get(blockStorageClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "volume") - } - - log.Printf("[DEBUG] Retrieved volume %s: %+v", d.Id(), v) - - d.Set("size", v.Size) - d.Set("description", v.Description) - d.Set("availability_zone", v.AvailabilityZone) - d.Set("name", v.Name) - d.Set("snapshot_id", v.SnapshotID) - d.Set("source_vol_id", v.SourceVolID) - d.Set("volume_type", v.VolumeType) - d.Set("metadata", v.Metadata) - d.Set("region", GetRegion(d)) - - attachments := make([]map[string]interface{}, len(v.Attachments)) - for i, attachment := range v.Attachments { - attachments[i] = make(map[string]interface{}) - attachments[i]["id"] = attachment["id"] - attachments[i]["instance_id"] = attachment["server_id"] - attachments[i]["device"] = attachment["device"] - log.Printf("[DEBUG] attachment: %v", attachment) - } - d.Set("attachment", attachments) - - return nil -} - -func resourceBlockStorageVolumeV1Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - blockStorageClient, err := config.blockStorageV1Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - updateOpts := volumes.UpdateOpts{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - } - - if d.HasChange("metadata") { - updateOpts.Metadata = resourceVolumeMetadataV1(d) - } - - _, err = volumes.Update(blockStorageClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack volume: %s", err) - } - - return resourceBlockStorageVolumeV1Read(d, meta) -} - -func resourceBlockStorageVolumeV1Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - blockStorageClient, err := config.blockStorageV1Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - v, err := volumes.Get(blockStorageClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "volume") - } - - // make sure this volume is detached from all instances before deleting - if len(v.Attachments) > 0 { - log.Printf("[DEBUG] detaching volumes") - if computeClient, err := config.computeV2Client(GetRegion(d)); err != nil { - return err - } else { - for _, volumeAttachment := range v.Attachments { - log.Printf("[DEBUG] Attachment: %v", volumeAttachment) - if err := volumeattach.Delete(computeClient, volumeAttachment["server_id"].(string), volumeAttachment["id"].(string)).ExtractErr(); err != nil { - return err - } - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"in-use", "attaching", "detaching"}, - Target: []string{"available"}, - Refresh: VolumeV1StateRefreshFunc(blockStorageClient, d.Id()), - Timeout: 10 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for volume (%s) to become available: %s", - d.Id(), err) - } - } - } - - // It's possible that this volume was used as a boot device and is currently - // in a "deleting" state from when the instance was terminated. - // If this is true, just move on. It'll eventually delete. - if v.Status != "deleting" { - if err := volumes.Delete(blockStorageClient, d.Id()).ExtractErr(); err != nil { - return CheckDeleted(d, err, "volume") - } - } - - // Wait for the volume to delete before moving on. - log.Printf("[DEBUG] Waiting for volume (%s) to delete", d.Id()) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"deleting", "downloading", "available"}, - Target: []string{"deleted"}, - Refresh: VolumeV1StateRefreshFunc(blockStorageClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for volume (%s) to delete: %s", - d.Id(), err) - } - - d.SetId("") - return nil -} - -func resourceVolumeMetadataV1(d *schema.ResourceData) map[string]string { - m := make(map[string]string) - for key, val := range d.Get("metadata").(map[string]interface{}) { - m[key] = val.(string) - } - return m -} - -// VolumeV1StateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// an OpenStack volume. -func VolumeV1StateRefreshFunc(client *gophercloud.ServiceClient, volumeID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - v, err := volumes.Get(client, volumeID).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - return v, "deleted", nil - } - return nil, "", err - } - - if v.Status == "error" { - return v, v.Status, fmt.Errorf("There was an error creating the volume. " + - "Please check with your cloud admin or check the Block Storage " + - "API logs to see why this error occurred.") - } - - return v, v.Status, nil - } -} - -func resourceVolumeAttachmentHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - if m["instance_id"] != nil { - buf.WriteString(fmt.Sprintf("%s-", m["instance_id"].(string))) - } - return hashcode.String(buf.String()) -} diff --git a/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1_test.go b/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1_test.go deleted file mode 100644 index 7dd16169e..000000000 --- a/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1_test.go +++ /dev/null @@ -1,221 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes" -) - -func TestAccBlockStorageV1Volume_basic(t *testing.T) { - var volume volumes.Volume - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBlockStorageV1VolumeDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccBlockStorageV1Volume_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckBlockStorageV1VolumeExists("openstack_blockstorage_volume_v1.volume_1", &volume), - testAccCheckBlockStorageV1VolumeMetadata(&volume, "foo", "bar"), - resource.TestCheckResourceAttr( - "openstack_blockstorage_volume_v1.volume_1", "name", "volume_1"), - ), - }, - resource.TestStep{ - Config: testAccBlockStorageV1Volume_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckBlockStorageV1VolumeExists("openstack_blockstorage_volume_v1.volume_1", &volume), - testAccCheckBlockStorageV1VolumeMetadata(&volume, "foo", "bar"), - resource.TestCheckResourceAttr( - "openstack_blockstorage_volume_v1.volume_1", "name", "volume_1-updated"), - ), - }, - }, - }) -} - -func TestAccBlockStorageV1Volume_image(t *testing.T) { - var volume volumes.Volume - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBlockStorageV1VolumeDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccBlockStorageV1Volume_image, - Check: resource.ComposeTestCheckFunc( - testAccCheckBlockStorageV1VolumeExists("openstack_blockstorage_volume_v1.volume_1", &volume), - resource.TestCheckResourceAttr( - "openstack_blockstorage_volume_v1.volume_1", "name", "volume_1"), - ), - }, - }, - }) -} - -func TestAccBlockStorageV1Volume_timeout(t *testing.T) { - var volume volumes.Volume - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBlockStorageV1VolumeDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccBlockStorageV1Volume_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckBlockStorageV1VolumeExists("openstack_blockstorage_volume_v1.volume_1", &volume), - ), - }, - }, - }) -} - -func testAccCheckBlockStorageV1VolumeDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - blockStorageClient, err := config.blockStorageV1Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_blockstorage_volume_v1" { - continue - } - - _, err := volumes.Get(blockStorageClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("Volume still exists") - } - } - - return nil -} - -func testAccCheckBlockStorageV1VolumeExists(n string, volume *volumes.Volume) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - blockStorageClient, err := config.blockStorageV1Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - found, err := volumes.Get(blockStorageClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Volume not found") - } - - *volume = *found - - return nil - } -} - -func testAccCheckBlockStorageV1VolumeDoesNotExist(t *testing.T, n string, volume *volumes.Volume) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - blockStorageClient, err := config.blockStorageV1Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - _, err = volumes.Get(blockStorageClient, volume.ID).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - return nil - } - - return err - } - - return fmt.Errorf("Volume still exists") - } -} - -func testAccCheckBlockStorageV1VolumeMetadata( - volume *volumes.Volume, k string, v string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if volume.Metadata == nil { - return fmt.Errorf("No metadata") - } - - for key, value := range volume.Metadata { - if k != key { - continue - } - - if v == value { - return nil - } - - return fmt.Errorf("Bad value for %s: %s", k, value) - } - - return fmt.Errorf("Metadata not found: %s", k) - } -} - -const testAccBlockStorageV1Volume_basic = ` -resource "openstack_blockstorage_volume_v1" "volume_1" { - name = "volume_1" - description = "first test volume" - availability_zone = "nova" - metadata { - foo = "bar" - } - size = 1 -} -` - -const testAccBlockStorageV1Volume_update = ` -resource "openstack_blockstorage_volume_v1" "volume_1" { - name = "volume_1-updated" - description = "first test volume" - metadata { - foo = "bar" - } - size = 1 -} -` - -var testAccBlockStorageV1Volume_image = fmt.Sprintf(` -resource "openstack_blockstorage_volume_v1" "volume_1" { - name = "volume_1" - size = 5 - image_id = "%s" -} -`, OS_IMAGE_ID) - -const testAccBlockStorageV1Volume_timeout = ` -resource "openstack_blockstorage_volume_v1" "volume_1" { - name = "volume_1" - description = "first test volume" - size = 1 - - timeouts { - create = "5m" - delete = "5m" - } -} -` diff --git a/builtin/providers/openstack/resource_openstack_blockstorage_volume_v2.go b/builtin/providers/openstack/resource_openstack_blockstorage_volume_v2.go deleted file mode 100644 index 5944cac04..000000000 --- a/builtin/providers/openstack/resource_openstack_blockstorage_volume_v2.go +++ /dev/null @@ -1,350 +0,0 @@ -package openstack - -import ( - "bytes" - "fmt" - "log" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceBlockStorageVolumeV2() *schema.Resource { - return &schema.Resource{ - Create: resourceBlockStorageVolumeV2Create, - Read: resourceBlockStorageVolumeV2Read, - Update: resourceBlockStorageVolumeV2Update, - Delete: resourceBlockStorageVolumeV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "size": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "availability_zone": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "metadata": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: false, - Computed: true, - }, - "snapshot_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "source_vol_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "image_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "volume_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "consistency_group_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "source_replica": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "attachment": &schema.Schema{ - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "instance_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "device": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - }, - Set: resourceVolumeV2AttachmentHash, - }, - }, - } -} - -func resourceBlockStorageVolumeV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - blockStorageClient, err := config.blockStorageV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - createOpts := &volumes.CreateOpts{ - AvailabilityZone: d.Get("availability_zone").(string), - ConsistencyGroupID: d.Get("consistency_group_id").(string), - Description: d.Get("description").(string), - ImageID: d.Get("image_id").(string), - Metadata: resourceContainerMetadataV2(d), - Name: d.Get("name").(string), - Size: d.Get("size").(int), - SnapshotID: d.Get("snapshot_id").(string), - SourceReplica: d.Get("source_replica").(string), - SourceVolID: d.Get("source_vol_id").(string), - VolumeType: d.Get("volume_type").(string), - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - v, err := volumes.Create(blockStorageClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack volume: %s", err) - } - log.Printf("[INFO] Volume ID: %s", v.ID) - - // Wait for the volume to become available. - log.Printf( - "[DEBUG] Waiting for volume (%s) to become available", - v.ID) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"downloading", "creating"}, - Target: []string{"available"}, - Refresh: VolumeV2StateRefreshFunc(blockStorageClient, v.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for volume (%s) to become ready: %s", - v.ID, err) - } - - // Store the ID now - d.SetId(v.ID) - - return resourceBlockStorageVolumeV2Read(d, meta) -} - -func resourceBlockStorageVolumeV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - blockStorageClient, err := config.blockStorageV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - v, err := volumes.Get(blockStorageClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "volume") - } - - log.Printf("[DEBUG] Retrieved volume %s: %+v", d.Id(), v) - - d.Set("size", v.Size) - d.Set("description", v.Description) - d.Set("availability_zone", v.AvailabilityZone) - d.Set("name", v.Name) - d.Set("snapshot_id", v.SnapshotID) - d.Set("source_vol_id", v.SourceVolID) - d.Set("volume_type", v.VolumeType) - d.Set("metadata", v.Metadata) - d.Set("region", GetRegion(d)) - - attachments := make([]map[string]interface{}, len(v.Attachments)) - for i, attachment := range v.Attachments { - attachments[i] = make(map[string]interface{}) - attachments[i]["id"] = attachment.ID - attachments[i]["instance_id"] = attachment.ServerID - attachments[i]["device"] = attachment.Device - log.Printf("[DEBUG] attachment: %v", attachment) - } - d.Set("attachment", attachments) - - return nil -} - -func resourceBlockStorageVolumeV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - blockStorageClient, err := config.blockStorageV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - updateOpts := volumes.UpdateOpts{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - } - - if d.HasChange("metadata") { - updateOpts.Metadata = resourceVolumeMetadataV2(d) - } - - _, err = volumes.Update(blockStorageClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack volume: %s", err) - } - - return resourceBlockStorageVolumeV2Read(d, meta) -} - -func resourceBlockStorageVolumeV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - blockStorageClient, err := config.blockStorageV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - v, err := volumes.Get(blockStorageClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "volume") - } - - // make sure this volume is detached from all instances before deleting - if len(v.Attachments) > 0 { - log.Printf("[DEBUG] detaching volumes") - if computeClient, err := config.computeV2Client(GetRegion(d)); err != nil { - return err - } else { - for _, volumeAttachment := range v.Attachments { - log.Printf("[DEBUG] Attachment: %v", volumeAttachment) - if err := volumeattach.Delete(computeClient, volumeAttachment.ServerID, volumeAttachment.ID).ExtractErr(); err != nil { - return err - } - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"in-use", "attaching", "detaching"}, - Target: []string{"available"}, - Refresh: VolumeV2StateRefreshFunc(blockStorageClient, d.Id()), - Timeout: 10 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for volume (%s) to become available: %s", - d.Id(), err) - } - } - } - - // It's possible that this volume was used as a boot device and is currently - // in a "deleting" state from when the instance was terminated. - // If this is true, just move on. It'll eventually delete. - if v.Status != "deleting" { - if err := volumes.Delete(blockStorageClient, d.Id()).ExtractErr(); err != nil { - return CheckDeleted(d, err, "volume") - } - } - - // Wait for the volume to delete before moving on. - log.Printf("[DEBUG] Waiting for volume (%s) to delete", d.Id()) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"deleting", "downloading", "available"}, - Target: []string{"deleted"}, - Refresh: VolumeV2StateRefreshFunc(blockStorageClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for volume (%s) to delete: %s", - d.Id(), err) - } - - d.SetId("") - return nil -} - -func resourceVolumeMetadataV2(d *schema.ResourceData) map[string]string { - m := make(map[string]string) - for key, val := range d.Get("metadata").(map[string]interface{}) { - m[key] = val.(string) - } - return m -} - -// VolumeV2StateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// an OpenStack volume. -func VolumeV2StateRefreshFunc(client *gophercloud.ServiceClient, volumeID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - v, err := volumes.Get(client, volumeID).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - return v, "deleted", nil - } - return nil, "", err - } - - if v.Status == "error" { - return v, v.Status, fmt.Errorf("There was an error creating the volume. " + - "Please check with your cloud admin or check the Block Storage " + - "API logs to see why this error occurred.") - } - - return v, v.Status, nil - } -} - -func resourceVolumeV2AttachmentHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - if m["instance_id"] != nil { - buf.WriteString(fmt.Sprintf("%s-", m["instance_id"].(string))) - } - return hashcode.String(buf.String()) -} diff --git a/builtin/providers/openstack/resource_openstack_blockstorage_volume_v2_test.go b/builtin/providers/openstack/resource_openstack_blockstorage_volume_v2_test.go deleted file mode 100644 index a9991a71e..000000000 --- a/builtin/providers/openstack/resource_openstack_blockstorage_volume_v2_test.go +++ /dev/null @@ -1,219 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" -) - -func TestAccBlockStorageV2Volume_basic(t *testing.T) { - var volume volumes.Volume - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBlockStorageV2VolumeDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccBlockStorageV2Volume_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckBlockStorageV2VolumeExists("openstack_blockstorage_volume_v2.volume_1", &volume), - testAccCheckBlockStorageV2VolumeMetadata(&volume, "foo", "bar"), - resource.TestCheckResourceAttr( - "openstack_blockstorage_volume_v2.volume_1", "name", "volume_1"), - ), - }, - resource.TestStep{ - Config: testAccBlockStorageV2Volume_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckBlockStorageV2VolumeExists("openstack_blockstorage_volume_v2.volume_1", &volume), - testAccCheckBlockStorageV2VolumeMetadata(&volume, "foo", "bar"), - resource.TestCheckResourceAttr( - "openstack_blockstorage_volume_v2.volume_1", "name", "volume_1-updated"), - ), - }, - }, - }) -} - -func TestAccBlockStorageV2Volume_image(t *testing.T) { - var volume volumes.Volume - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBlockStorageV2VolumeDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccBlockStorageV2Volume_image, - Check: resource.ComposeTestCheckFunc( - testAccCheckBlockStorageV2VolumeExists("openstack_blockstorage_volume_v2.volume_1", &volume), - resource.TestCheckResourceAttr( - "openstack_blockstorage_volume_v2.volume_1", "name", "volume_1"), - ), - }, - }, - }) -} - -func TestAccBlockStorageV2Volume_timeout(t *testing.T) { - var volume volumes.Volume - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBlockStorageV2VolumeDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccBlockStorageV2Volume_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckBlockStorageV2VolumeExists("openstack_blockstorage_volume_v2.volume_1", &volume), - ), - }, - }, - }) -} - -func testAccCheckBlockStorageV2VolumeDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - blockStorageClient, err := config.blockStorageV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_blockstorage_volume_v2" { - continue - } - - _, err := volumes.Get(blockStorageClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("Volume still exists") - } - } - - return nil -} - -func testAccCheckBlockStorageV2VolumeExists(n string, volume *volumes.Volume) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - blockStorageClient, err := config.blockStorageV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - found, err := volumes.Get(blockStorageClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Volume not found") - } - - *volume = *found - - return nil - } -} - -func testAccCheckBlockStorageV2VolumeDoesNotExist(t *testing.T, n string, volume *volumes.Volume) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - blockStorageClient, err := config.blockStorageV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } - - _, err = volumes.Get(blockStorageClient, volume.ID).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - return nil - } - return err - } - - return fmt.Errorf("Volume still exists") - } -} - -func testAccCheckBlockStorageV2VolumeMetadata( - volume *volumes.Volume, k string, v string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if volume.Metadata == nil { - return fmt.Errorf("No metadata") - } - - for key, value := range volume.Metadata { - if k != key { - continue - } - - if v == value { - return nil - } - - return fmt.Errorf("Bad value for %s: %s", k, value) - } - - return fmt.Errorf("Metadata not found: %s", k) - } -} - -const testAccBlockStorageV2Volume_basic = ` -resource "openstack_blockstorage_volume_v2" "volume_1" { - name = "volume_1" - description = "first test volume" - metadata { - foo = "bar" - } - size = 1 -} -` - -const testAccBlockStorageV2Volume_update = ` -resource "openstack_blockstorage_volume_v2" "volume_1" { - name = "volume_1-updated" - description = "first test volume" - metadata { - foo = "bar" - } - size = 1 -} -` - -var testAccBlockStorageV2Volume_image = fmt.Sprintf(` -resource "openstack_blockstorage_volume_v2" "volume_1" { - name = "volume_1" - size = 5 - image_id = "%s" -} -`, OS_IMAGE_ID) - -const testAccBlockStorageV2Volume_timeout = ` -resource "openstack_blockstorage_volume_v2" "volume_1" { - name = "volume_1" - description = "first test volume" - size = 1 - - timeouts { - create = "5m" - delete = "5m" - } -} -` diff --git a/builtin/providers/openstack/resource_openstack_compute_floatingip_associate_v2.go b/builtin/providers/openstack/resource_openstack_compute_floatingip_associate_v2.go deleted file mode 100644 index 963e191d6..000000000 --- a/builtin/providers/openstack/resource_openstack_compute_floatingip_associate_v2.go +++ /dev/null @@ -1,234 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "strings" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips" - "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" - nfloatingips "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceComputeFloatingIPAssociateV2() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeFloatingIPAssociateV2Create, - Read: resourceComputeFloatingIPAssociateV2Read, - Delete: resourceComputeFloatingIPAssociateV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "floating_ip": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "instance_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "fixed_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceComputeFloatingIPAssociateV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - floatingIP := d.Get("floating_ip").(string) - fixedIP := d.Get("fixed_ip").(string) - instanceId := d.Get("instance_id").(string) - - associateOpts := floatingips.AssociateOpts{ - FloatingIP: floatingIP, - FixedIP: fixedIP, - } - log.Printf("[DEBUG] Associate Options: %#v", associateOpts) - - err = floatingips.AssociateInstance(computeClient, instanceId, associateOpts).ExtractErr() - if err != nil { - return fmt.Errorf("Error associating Floating IP: %s", err) - } - - // There's an API call to get this information, but it has been - // deprecated. The Neutron API could be used, but I'm trying not - // to mix service APIs. Therefore, a faux ID will be used. - id := fmt.Sprintf("%s/%s/%s", floatingIP, instanceId, fixedIP) - d.SetId(id) - - // This API call is synchronous, so Create won't return until the IP - // is attached. No need to wait for a state. - - return resourceComputeFloatingIPAssociateV2Read(d, meta) -} - -func resourceComputeFloatingIPAssociateV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - // Obtain relevant info from parsing the ID - floatingIP, instanceId, fixedIP, err := parseComputeFloatingIPAssociateId(d.Id()) - if err != nil { - return err - } - - // Now check and see whether the floating IP still exists. - // First try to do this by querying the Network API. - networkEnabled := true - networkClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - networkEnabled = false - } - - var exists bool - if networkEnabled { - log.Printf("[DEBUG] Checking for Floating IP existence via Network API") - exists, err = resourceComputeFloatingIPAssociateV2NetworkExists(networkClient, floatingIP) - } else { - log.Printf("[DEBUG] Checking for Floating IP existence via Compute API") - exists, err = resourceComputeFloatingIPAssociateV2ComputeExists(computeClient, floatingIP) - } - - if err != nil { - return err - } - - if !exists { - d.SetId("") - } - - // Next, see if the instance still exists - instance, err := servers.Get(computeClient, instanceId).Extract() - if err != nil { - if CheckDeleted(d, err, "instance") == nil { - return nil - } - } - - // Finally, check and see if the floating ip is still associated with the instance. - var associated bool - for _, networkAddresses := range instance.Addresses { - for _, element := range networkAddresses.([]interface{}) { - address := element.(map[string]interface{}) - if address["OS-EXT-IPS:type"] == "floating" && address["addr"] == floatingIP { - associated = true - } - } - } - - if !associated { - d.SetId("") - } - - // Set the attributes pulled from the composed resource ID - d.Set("floating_ip", floatingIP) - d.Set("instance_id", instanceId) - d.Set("fixed_ip", fixedIP) - d.Set("region", GetRegion(d)) - - return nil -} - -func resourceComputeFloatingIPAssociateV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - floatingIP := d.Get("floating_ip").(string) - instanceId := d.Get("instance_id").(string) - - disassociateOpts := floatingips.DisassociateOpts{ - FloatingIP: floatingIP, - } - log.Printf("[DEBUG] Disssociate Options: %#v", disassociateOpts) - - err = floatingips.DisassociateInstance(computeClient, instanceId, disassociateOpts).ExtractErr() - if err != nil { - return CheckDeleted(d, err, "floating ip association") - } - - return nil -} - -func parseComputeFloatingIPAssociateId(id string) (string, string, string, error) { - idParts := strings.Split(id, "/") - if len(idParts) < 3 { - return "", "", "", fmt.Errorf("Unable to determine floating ip association ID") - } - - floatingIP := idParts[0] - instanceId := idParts[1] - fixedIP := idParts[2] - - return floatingIP, instanceId, fixedIP, nil -} - -func resourceComputeFloatingIPAssociateV2NetworkExists(networkClient *gophercloud.ServiceClient, floatingIP string) (bool, error) { - listOpts := nfloatingips.ListOpts{ - FloatingIP: floatingIP, - } - allPages, err := nfloatingips.List(networkClient, listOpts).AllPages() - if err != nil { - return false, err - } - - allFips, err := nfloatingips.ExtractFloatingIPs(allPages) - if err != nil { - return false, err - } - - if len(allFips) > 1 { - return false, fmt.Errorf("There was a problem retrieving the floating IP") - } - - if len(allFips) == 0 { - return false, nil - } - - return true, nil -} - -func resourceComputeFloatingIPAssociateV2ComputeExists(computeClient *gophercloud.ServiceClient, floatingIP string) (bool, error) { - // If the Network API isn't available, fall back to the deprecated Compute API. - allPages, err := floatingips.List(computeClient).AllPages() - if err != nil { - return false, err - } - - allFips, err := floatingips.ExtractFloatingIPs(allPages) - if err != nil { - return false, err - } - - for _, f := range allFips { - if f.IP == floatingIP { - return true, nil - } - } - - return false, nil -} diff --git a/builtin/providers/openstack/resource_openstack_compute_floatingip_associate_v2_test.go b/builtin/providers/openstack/resource_openstack_compute_floatingip_associate_v2_test.go deleted file mode 100644 index a9e7b3d4c..000000000 --- a/builtin/providers/openstack/resource_openstack_compute_floatingip_associate_v2_test.go +++ /dev/null @@ -1,324 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccComputeV2FloatingIPAssociate_basic(t *testing.T) { - var instance servers.Server - var fip floatingips.FloatingIP - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2FloatingIPAssociateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2FloatingIPAssociate_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - testAccCheckNetworkingV2FloatingIPExists("openstack_networking_floatingip_v2.fip_1", &fip), - testAccCheckComputeV2FloatingIPAssociateAssociated(&fip, &instance, 1), - ), - }, - }, - }) -} - -func TestAccComputeV2FloatingIPAssociate_fixedIP(t *testing.T) { - var instance servers.Server - var fip floatingips.FloatingIP - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2FloatingIPAssociateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2FloatingIPAssociate_fixedIP, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - testAccCheckNetworkingV2FloatingIPExists("openstack_networking_floatingip_v2.fip_1", &fip), - testAccCheckComputeV2FloatingIPAssociateAssociated(&fip, &instance, 1), - ), - }, - }, - }) -} - -func TestAccComputeV2FloatingIPAssociate_attachToFirstNetwork(t *testing.T) { - var instance servers.Server - var fip floatingips.FloatingIP - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2FloatingIPAssociateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2FloatingIPAssociate_attachToFirstNetwork, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - testAccCheckNetworkingV2FloatingIPExists("openstack_networking_floatingip_v2.fip_1", &fip), - testAccCheckComputeV2FloatingIPAssociateAssociated(&fip, &instance, 1), - ), - }, - }, - }) -} - -func TestAccComputeV2FloatingIPAssociate_attachToSecondNetwork(t *testing.T) { - var instance servers.Server - var fip floatingips.FloatingIP - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2FloatingIPAssociateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2FloatingIPAssociate_attachToSecondNetwork, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - testAccCheckNetworkingV2FloatingIPExists("openstack_networking_floatingip_v2.fip_1", &fip), - testAccCheckComputeV2FloatingIPAssociateAssociated(&fip, &instance, 2), - ), - }, - }, - }) -} - -func TestAccComputeV2FloatingIPAssociate_attachNew(t *testing.T) { - var instance servers.Server - var fip_1 floatingips.FloatingIP - var fip_2 floatingips.FloatingIP - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2FloatingIPAssociateDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2FloatingIPAssociate_attachNew_1, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - testAccCheckNetworkingV2FloatingIPExists("openstack_networking_floatingip_v2.fip_1", &fip_1), - testAccCheckNetworkingV2FloatingIPExists("openstack_networking_floatingip_v2.fip_2", &fip_2), - testAccCheckComputeV2FloatingIPAssociateAssociated(&fip_1, &instance, 1), - ), - }, - resource.TestStep{ - Config: testAccComputeV2FloatingIPAssociate_attachNew_2, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - testAccCheckNetworkingV2FloatingIPExists("openstack_networking_floatingip_v2.fip_1", &fip_1), - testAccCheckNetworkingV2FloatingIPExists("openstack_networking_floatingip_v2.fip_2", &fip_2), - testAccCheckComputeV2FloatingIPAssociateAssociated(&fip_2, &instance, 1), - ), - }, - }, - }) -} - -func testAccCheckComputeV2FloatingIPAssociateDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - computeClient, err := config.computeV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_compute_floatingip_associate_v2" { - continue - } - - floatingIP, instanceId, _, err := parseComputeFloatingIPAssociateId(rs.Primary.ID) - if err != nil { - return err - } - - instance, err := servers.Get(computeClient, instanceId).Extract() - if err != nil { - // If the error is a 404, then the instance does not exist, - // and therefore the floating IP cannot be associated to it. - if _, ok := err.(gophercloud.ErrDefault404); ok { - return nil - } - return err - } - - // But if the instance still exists, then walk through its known addresses - // and see if there's a floating IP. - for _, networkAddresses := range instance.Addresses { - for _, element := range networkAddresses.([]interface{}) { - address := element.(map[string]interface{}) - if address["OS-EXT-IPS:type"] == "floating" { - return fmt.Errorf("Floating IP %s is still attached to instance %s", floatingIP, instanceId) - } - } - } - } - - return nil -} - -func testAccCheckComputeV2FloatingIPAssociateAssociated( - fip *floatingips.FloatingIP, instance *servers.Server, n int) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - computeClient, err := config.computeV2Client(OS_REGION_NAME) - - newInstance, err := servers.Get(computeClient, instance.ID).Extract() - if err != nil { - return err - } - - // Walk through the instance's addresses and find the match - i := 0 - for _, networkAddresses := range newInstance.Addresses { - i += 1 - if i != n { - continue - } - for _, element := range networkAddresses.([]interface{}) { - address := element.(map[string]interface{}) - if address["OS-EXT-IPS:type"] == "floating" && address["addr"] == fip.FloatingIP { - return nil - } - } - } - return fmt.Errorf("Floating IP %s was not attached to instance %s", fip.FloatingIP, instance.ID) - } -} - -const testAccComputeV2FloatingIPAssociate_basic = ` -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] -} - -resource "openstack_networking_floatingip_v2" "fip_1" { -} - -resource "openstack_compute_floatingip_associate_v2" "fip_1" { - floating_ip = "${openstack_networking_floatingip_v2.fip_1.address}" - instance_id = "${openstack_compute_instance_v2.instance_1.id}" -} -` - -const testAccComputeV2FloatingIPAssociate_fixedIP = ` -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] -} - -resource "openstack_networking_floatingip_v2" "fip_1" { -} - -resource "openstack_compute_floatingip_associate_v2" "fip_1" { - floating_ip = "${openstack_networking_floatingip_v2.fip_1.address}" - instance_id = "${openstack_compute_instance_v2.instance_1.id}" - fixed_ip = "${openstack_compute_instance_v2.instance_1.access_ip_v4}" -} -` - -var testAccComputeV2FloatingIPAssociate_attachToFirstNetwork = fmt.Sprintf(` -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - - network { - uuid = "%s" - } -} - -resource "openstack_networking_floatingip_v2" "fip_1" { -} - -resource "openstack_compute_floatingip_associate_v2" "fip_1" { - floating_ip = "${openstack_networking_floatingip_v2.fip_1.address}" - instance_id = "${openstack_compute_instance_v2.instance_1.id}" - fixed_ip = "${openstack_compute_instance_v2.instance_1.network.0.fixed_ip_v4}" -} -`, OS_NETWORK_ID) - -var testAccComputeV2FloatingIPAssociate_attachToSecondNetwork = fmt.Sprintf(` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - network_id = "${openstack_networking_network_v2.network_1.id}" - cidr = "192.168.1.0/24" - ip_version = 4 - enable_dhcp = true - no_gateway = true -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - - network { - uuid = "${openstack_networking_network_v2.network_1.id}" - } - - network { - uuid = "%s" - } -} - -resource "openstack_networking_floatingip_v2" "fip_1" { -} - -resource "openstack_compute_floatingip_associate_v2" "fip_1" { - floating_ip = "${openstack_networking_floatingip_v2.fip_1.address}" - instance_id = "${openstack_compute_instance_v2.instance_1.id}" - fixed_ip = "${openstack_compute_instance_v2.instance_1.network.1.fixed_ip_v4}" -} -`, OS_NETWORK_ID) - -const testAccComputeV2FloatingIPAssociate_attachNew_1 = ` -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] -} - -resource "openstack_networking_floatingip_v2" "fip_1" { -} - -resource "openstack_networking_floatingip_v2" "fip_2" { -} - -resource "openstack_compute_floatingip_associate_v2" "fip_1" { - floating_ip = "${openstack_networking_floatingip_v2.fip_1.address}" - instance_id = "${openstack_compute_instance_v2.instance_1.id}" -} -` - -const testAccComputeV2FloatingIPAssociate_attachNew_2 = ` -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] -} - -resource "openstack_networking_floatingip_v2" "fip_1" { -} - -resource "openstack_networking_floatingip_v2" "fip_2" { -} - -resource "openstack_compute_floatingip_associate_v2" "fip_1" { - floating_ip = "${openstack_networking_floatingip_v2.fip_2.address}" - instance_id = "${openstack_compute_instance_v2.instance_1.id}" -} -` diff --git a/builtin/providers/openstack/resource_openstack_compute_floatingip_v2.go b/builtin/providers/openstack/resource_openstack_compute_floatingip_v2.go deleted file mode 100644 index 96e723d5c..000000000 --- a/builtin/providers/openstack/resource_openstack_compute_floatingip_v2.go +++ /dev/null @@ -1,111 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceComputeFloatingIPV2() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeFloatingIPV2Create, - Read: resourceComputeFloatingIPV2Read, - Update: nil, - Delete: resourceComputeFloatingIPV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - - "pool": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_POOL_NAME", nil), - }, - - "address": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "fixed_ip": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "instance_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceComputeFloatingIPV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - createOpts := &floatingips.CreateOpts{ - Pool: d.Get("pool").(string), - } - log.Printf("[DEBUG] Create Options: %#v", createOpts) - newFip, err := floatingips.Create(computeClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating Floating IP: %s", err) - } - - d.SetId(newFip.ID) - - return resourceComputeFloatingIPV2Read(d, meta) -} - -func resourceComputeFloatingIPV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - fip, err := floatingips.Get(computeClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "floating ip") - } - - log.Printf("[DEBUG] Retrieved Floating IP %s: %+v", d.Id(), fip) - - d.Set("pool", fip.Pool) - d.Set("instance_id", fip.InstanceID) - d.Set("address", fip.IP) - d.Set("fixed_ip", fip.FixedIP) - d.Set("region", GetRegion(d)) - - return nil -} - -func resourceComputeFloatingIPV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - log.Printf("[DEBUG] Deleting Floating IP %s", d.Id()) - if err := floatingips.Delete(computeClient, d.Id()).ExtractErr(); err != nil { - return fmt.Errorf("Error deleting Floating IP: %s", err) - } - - return nil -} diff --git a/builtin/providers/openstack/resource_openstack_compute_floatingip_v2_test.go b/builtin/providers/openstack/resource_openstack_compute_floatingip_v2_test.go deleted file mode 100644 index a42cb6fd5..000000000 --- a/builtin/providers/openstack/resource_openstack_compute_floatingip_v2_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips" - "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" -) - -func TestAccComputeV2FloatingIP_basic(t *testing.T) { - var fip floatingips.FloatingIP - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2FloatingIPDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2FloatingIP_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2FloatingIPExists("openstack_compute_floatingip_v2.fip_1", &fip), - ), - }, - }, - }) -} - -func TestAccComputeV2FloatingIP_attach(t *testing.T) { - var instance servers.Server - var fip floatingips.FloatingIP - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2FloatingIPDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2FloatingIP_attach, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2FloatingIPExists("openstack_compute_floatingip_v2.fip_1", &fip), - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - testAccCheckComputeV2InstanceFloatingIPAttach(&instance, &fip), - ), - }, - }, - }) -} - -func testAccCheckComputeV2FloatingIPDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - computeClient, err := config.computeV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_compute_floatingip_v2" { - continue - } - - _, err := floatingips.Get(computeClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("FloatingIP still exists") - } - } - - return nil -} - -func testAccCheckComputeV2FloatingIPExists(n string, kp *floatingips.FloatingIP) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - computeClient, err := config.computeV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - found, err := floatingips.Get(computeClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("FloatingIP not found") - } - - *kp = *found - - return nil - } -} - -const testAccComputeV2FloatingIP_basic = ` -resource "openstack_compute_floatingip_v2" "fip_1" { -} -` - -var testAccComputeV2FloatingIP_attach = fmt.Sprintf(` -resource "openstack_compute_floatingip_v2" "fip_1" { -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - floating_ip = "${openstack_compute_floatingip_v2.fip_1.address}" - - network { - uuid = "%s" - } -} -`, OS_NETWORK_ID) diff --git a/builtin/providers/openstack/resource_openstack_compute_instance_v2.go b/builtin/providers/openstack/resource_openstack_compute_instance_v2.go deleted file mode 100644 index 4484e7c89..000000000 --- a/builtin/providers/openstack/resource_openstack_compute_instance_v2.go +++ /dev/null @@ -1,1626 +0,0 @@ -package openstack - -import ( - "bytes" - "crypto/sha1" - "encoding/hex" - "fmt" - "log" - "os" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" - "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors" - "github.com/gophercloud/gophercloud/openstack/compute/v2/images" - "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceComputeInstanceV2() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeInstanceV2Create, - Read: resourceComputeInstanceV2Read, - Update: resourceComputeInstanceV2Update, - Delete: resourceComputeInstanceV2Delete, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - "image_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "image_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "flavor_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - Computed: true, - DefaultFunc: schema.EnvDefaultFunc("OS_FLAVOR_ID", nil), - }, - "flavor_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - Computed: true, - DefaultFunc: schema.EnvDefaultFunc("OS_FLAVOR_NAME", nil), - }, - "floating_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - Deprecated: "Use the openstack_compute_floatingip_associate_v2 resource instead", - }, - "user_data": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - // just stash the hash for state & diff comparisons - StateFunc: func(v interface{}) string { - switch v.(type) { - case string: - hash := sha1.Sum([]byte(v.(string))) - return hex.EncodeToString(hash[:]) - default: - return "" - } - }, - }, - "security_groups": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: false, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "availability_zone": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "network": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "uuid": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "port": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "fixed_ip_v4": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "fixed_ip_v6": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "floating_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - Deprecated: "Use the openstack_compute_floatingip_associate_v2 resource instead", - }, - "mac": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "access_network": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - }, - }, - }, - "metadata": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: false, - }, - "config_drive": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - "admin_pass": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "access_ip_v4": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: false, - }, - "access_ip_v6": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: false, - }, - "key_pair": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "block_device": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "source_type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "uuid": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "volume_size": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - "destination_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "boot_index": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - "delete_on_termination": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - }, - "guest_format": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - }, - }, - "volume": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Deprecated: "Use block_device or openstack_compute_volume_attach_v2 instead", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "volume_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "device": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - }, - Set: resourceComputeVolumeAttachmentHash, - }, - "scheduler_hints": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "group": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "different_host": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "same_host": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "query": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "target_cell": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "build_near_host_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - }, - Set: resourceComputeSchedulerHintsHash, - }, - "personality": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "file": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "content": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - Set: resourceComputeInstancePersonalityHash, - }, - "stop_before_destroy": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "force_delete": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "all_metadata": &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - }, - }, - } -} - -func resourceComputeInstanceV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - var createOpts servers.CreateOptsBuilder - - // Determines the Image ID using the following rules: - // If a bootable block_device was specified, ignore the image altogether. - // If an image_id was specified, use it. - // If an image_name was specified, look up the image ID, report if error. - imageId, err := getImageIDFromConfig(computeClient, d) - if err != nil { - return err - } - - flavorId, err := getFlavorID(computeClient, d) - if err != nil { - return err - } - - // determine if block_device configuration is correct - // this includes valid combinations and required attributes - if err := checkBlockDeviceConfig(d); err != nil { - return err - } - - // check if floating IP configuration is correct - if err := checkInstanceFloatingIPs(d); err != nil { - return err - } - - // Build a list of networks with the information given upon creation. - // Error out if an invalid network configuration was used. - networkDetails, err := getInstanceNetworks(computeClient, d) - if err != nil { - return err - } - - networks := make([]servers.Network, len(networkDetails)) - for i, net := range networkDetails { - networks[i] = servers.Network{ - UUID: net["uuid"].(string), - Port: net["port"].(string), - FixedIP: net["fixed_ip_v4"].(string), - } - } - - configDrive := d.Get("config_drive").(bool) - - createOpts = &servers.CreateOpts{ - Name: d.Get("name").(string), - ImageRef: imageId, - FlavorRef: flavorId, - SecurityGroups: resourceInstanceSecGroupsV2(d), - AvailabilityZone: d.Get("availability_zone").(string), - Networks: networks, - Metadata: resourceInstanceMetadataV2(d), - ConfigDrive: &configDrive, - AdminPass: d.Get("admin_pass").(string), - UserData: []byte(d.Get("user_data").(string)), - Personality: resourceInstancePersonalityV2(d), - } - - if keyName, ok := d.Get("key_pair").(string); ok && keyName != "" { - createOpts = &keypairs.CreateOptsExt{ - CreateOptsBuilder: createOpts, - KeyName: keyName, - } - } - - if vL, ok := d.GetOk("block_device"); ok { - blockDevices, err := resourceInstanceBlockDevicesV2(d, vL.([]interface{})) - if err != nil { - return err - } - - createOpts = &bootfromvolume.CreateOptsExt{ - CreateOptsBuilder: createOpts, - BlockDevice: blockDevices, - } - } - - schedulerHintsRaw := d.Get("scheduler_hints").(*schema.Set).List() - if len(schedulerHintsRaw) > 0 { - log.Printf("[DEBUG] schedulerhints: %+v", schedulerHintsRaw) - schedulerHints := resourceInstanceSchedulerHintsV2(d, schedulerHintsRaw[0].(map[string]interface{})) - createOpts = &schedulerhints.CreateOptsExt{ - CreateOptsBuilder: createOpts, - SchedulerHints: schedulerHints, - } - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - - // If a block_device is used, use the bootfromvolume.Create function as it allows an empty ImageRef. - // Otherwise, use the normal servers.Create function. - var server *servers.Server - if _, ok := d.GetOk("block_device"); ok { - server, err = bootfromvolume.Create(computeClient, createOpts).Extract() - } else { - server, err = servers.Create(computeClient, createOpts).Extract() - } - - if err != nil { - return fmt.Errorf("Error creating OpenStack server: %s", err) - } - log.Printf("[INFO] Instance ID: %s", server.ID) - - // Store the ID now - d.SetId(server.ID) - - // Wait for the instance to become running so we can get some attributes - // that aren't available until later. - log.Printf( - "[DEBUG] Waiting for instance (%s) to become running", - server.ID) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"BUILD"}, - Target: []string{"ACTIVE"}, - Refresh: ServerV2StateRefreshFunc(computeClient, server.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for instance (%s) to become ready: %s", - server.ID, err) - } - - // Now that the instance has been created, we need to do an early read on the - // networks in order to associate floating IPs - _, err = getInstanceNetworksAndAddresses(computeClient, d) - - // If floating IPs were specified, associate them after the instance has launched. - err = associateFloatingIPsToInstance(computeClient, d) - if err != nil { - return err - } - - // if volumes were specified, attach them after the instance has launched. - if v, ok := d.GetOk("volume"); ok { - vols := v.(*schema.Set).List() - if blockClient, err := config.blockStorageV1Client(GetRegion(d)); err != nil { - return fmt.Errorf("Error creating OpenStack block storage client: %s", err) - } else { - if err := attachVolumesToInstance(computeClient, blockClient, d.Id(), vols); err != nil { - return err - } - } - } - - return resourceComputeInstanceV2Read(d, meta) -} - -func resourceComputeInstanceV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - server, err := servers.Get(computeClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "server") - } - - log.Printf("[DEBUG] Retrieved Server %s: %+v", d.Id(), server) - - d.Set("name", server.Name) - - // Get the instance network and address information - networks, err := getInstanceNetworksAndAddresses(computeClient, d) - if err != nil { - return err - } - - // Determine the best IPv4 and IPv6 addresses to access the instance with - hostv4, hostv6 := getInstanceAccessAddresses(d, networks) - - if server.AccessIPv4 != "" && hostv4 == "" { - hostv4 = server.AccessIPv4 - } - - if server.AccessIPv6 != "" && hostv6 == "" { - hostv6 = server.AccessIPv6 - } - - d.Set("network", networks) - d.Set("access_ip_v4", hostv4) - d.Set("access_ip_v6", hostv6) - - // Determine the best IP address to use for SSH connectivity. - // Prefer IPv4 over IPv6. - preferredSSHAddress := "" - if hostv4 != "" { - preferredSSHAddress = hostv4 - } else if hostv6 != "" { - preferredSSHAddress = hostv6 - } - - if preferredSSHAddress != "" { - // Initialize the connection info - d.SetConnInfo(map[string]string{ - "type": "ssh", - "host": preferredSSHAddress, - }) - } - - d.Set("all_metadata", server.Metadata) - - secGrpNames := []string{} - for _, sg := range server.SecurityGroups { - secGrpNames = append(secGrpNames, sg["name"].(string)) - } - d.Set("security_groups", secGrpNames) - - flavorId, ok := server.Flavor["id"].(string) - if !ok { - return fmt.Errorf("Error setting OpenStack server's flavor: %v", server.Flavor) - } - d.Set("flavor_id", flavorId) - - flavor, err := flavors.Get(computeClient, flavorId).Extract() - if err != nil { - return err - } - d.Set("flavor_name", flavor.Name) - - // Set the instance's image information appropriately - if err := setImageInformation(computeClient, server, d); err != nil { - return err - } - - // volume attachments - if err := getVolumeAttachments(computeClient, d); err != nil { - return err - } - - // Build a custom struct for the availability zone extension - var serverWithAZ struct { - servers.Server - availabilityzones.ServerExt - } - - // Do another Get so the above work is not disturbed. - err = servers.Get(computeClient, d.Id()).ExtractInto(&serverWithAZ) - if err != nil { - return CheckDeleted(d, err, "server") - } - - // Set the availability zone - d.Set("availability_zone", serverWithAZ.AvailabilityZone) - - return nil -} - -func resourceComputeInstanceV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - var updateOpts servers.UpdateOpts - if d.HasChange("name") { - updateOpts.Name = d.Get("name").(string) - } - - if updateOpts != (servers.UpdateOpts{}) { - _, err := servers.Update(computeClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack server: %s", err) - } - } - - if d.HasChange("metadata") { - oldMetadata, newMetadata := d.GetChange("metadata") - var metadataToDelete []string - - // Determine if any metadata keys were removed from the configuration. - // Then request those keys to be deleted. - for oldKey, _ := range oldMetadata.(map[string]interface{}) { - var found bool - for newKey, _ := range newMetadata.(map[string]interface{}) { - if oldKey == newKey { - found = true - } - } - - if !found { - metadataToDelete = append(metadataToDelete, oldKey) - } - } - - for _, key := range metadataToDelete { - err := servers.DeleteMetadatum(computeClient, d.Id(), key).ExtractErr() - if err != nil { - return fmt.Errorf("Error deleting metadata (%s) from server (%s): %s", key, d.Id(), err) - } - } - - // Update existing metadata and add any new metadata. - metadataOpts := make(servers.MetadataOpts) - for k, v := range newMetadata.(map[string]interface{}) { - metadataOpts[k] = v.(string) - } - - _, err := servers.UpdateMetadata(computeClient, d.Id(), metadataOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack server (%s) metadata: %s", d.Id(), err) - } - } - - if d.HasChange("security_groups") { - oldSGRaw, newSGRaw := d.GetChange("security_groups") - oldSGSet := oldSGRaw.(*schema.Set) - newSGSet := newSGRaw.(*schema.Set) - secgroupsToAdd := newSGSet.Difference(oldSGSet) - secgroupsToRemove := oldSGSet.Difference(newSGSet) - - log.Printf("[DEBUG] Security groups to add: %v", secgroupsToAdd) - - log.Printf("[DEBUG] Security groups to remove: %v", secgroupsToRemove) - - for _, g := range secgroupsToRemove.List() { - err := secgroups.RemoveServer(computeClient, d.Id(), g.(string)).ExtractErr() - if err != nil && err.Error() != "EOF" { - if _, ok := err.(gophercloud.ErrDefault404); ok { - continue - } - - return fmt.Errorf("Error removing security group (%s) from OpenStack server (%s): %s", g, d.Id(), err) - } else { - log.Printf("[DEBUG] Removed security group (%s) from instance (%s)", g, d.Id()) - } - } - - for _, g := range secgroupsToAdd.List() { - err := secgroups.AddServer(computeClient, d.Id(), g.(string)).ExtractErr() - if err != nil && err.Error() != "EOF" { - return fmt.Errorf("Error adding security group (%s) to OpenStack server (%s): %s", g, d.Id(), err) - } - log.Printf("[DEBUG] Added security group (%s) to instance (%s)", g, d.Id()) - } - } - - if d.HasChange("admin_pass") { - if newPwd, ok := d.Get("admin_pass").(string); ok { - err := servers.ChangeAdminPassword(computeClient, d.Id(), newPwd).ExtractErr() - if err != nil { - return fmt.Errorf("Error changing admin password of OpenStack server (%s): %s", d.Id(), err) - } - } - } - - if d.HasChange("floating_ip") { - oldFIP, newFIP := d.GetChange("floating_ip") - log.Printf("[DEBUG] Old Floating IP: %v", oldFIP) - log.Printf("[DEBUG] New Floating IP: %v", newFIP) - if oldFIP.(string) != "" { - log.Printf("[DEBUG] Attempting to disassociate %s from %s", oldFIP, d.Id()) - if err := disassociateFloatingIPFromInstance(computeClient, oldFIP.(string), d.Id(), ""); err != nil { - return fmt.Errorf("Error disassociating Floating IP during update: %s", err) - } - } - - if newFIP.(string) != "" { - log.Printf("[DEBUG] Attempting to associate %s to %s", newFIP, d.Id()) - if err := associateFloatingIPToInstance(computeClient, newFIP.(string), d.Id(), ""); err != nil { - return fmt.Errorf("Error associating Floating IP during update: %s", err) - } - } - } - - if d.HasChange("network") { - oldNetworks, newNetworks := d.GetChange("network") - oldNetworkList := oldNetworks.([]interface{}) - newNetworkList := newNetworks.([]interface{}) - for i, oldNet := range oldNetworkList { - var oldFIP, newFIP string - var oldFixedIP, newFixedIP string - - if oldNetRaw, ok := oldNet.(map[string]interface{}); ok { - oldFIP = oldNetRaw["floating_ip"].(string) - oldFixedIP = oldNetRaw["fixed_ip_v4"].(string) - } - - if len(newNetworkList) > i { - if newNetRaw, ok := newNetworkList[i].(map[string]interface{}); ok { - newFIP = newNetRaw["floating_ip"].(string) - newFixedIP = newNetRaw["fixed_ip_v4"].(string) - } - } - - // Only changes to the floating IP are supported - if oldFIP != "" && oldFIP != newFIP { - log.Printf("[DEBUG] Attempting to disassociate %s from %s", oldFIP, d.Id()) - if err := disassociateFloatingIPFromInstance(computeClient, oldFIP, d.Id(), oldFixedIP); err != nil { - return fmt.Errorf("Error disassociating Floating IP during update: %s", err) - } - } - - if newFIP != "" && oldFIP != newFIP { - log.Printf("[DEBUG] Attempting to associate %s to %s", newFIP, d.Id()) - if err := associateFloatingIPToInstance(computeClient, newFIP, d.Id(), newFixedIP); err != nil { - return fmt.Errorf("Error associating Floating IP during update: %s", err) - } - } - } - } - - if d.HasChange("volume") { - // old attachments and new attachments - oldAttachments, newAttachments := d.GetChange("volume") - // for each old attachment, detach the volume - oldAttachmentSet := oldAttachments.(*schema.Set).List() - - log.Printf("[DEBUG] Attempting to detach the following volumes: %#v", oldAttachmentSet) - if blockClient, err := config.blockStorageV1Client(GetRegion(d)); err != nil { - return err - } else { - if err := detachVolumesFromInstance(computeClient, blockClient, d.Id(), oldAttachmentSet); err != nil { - return err - } - } - - // for each new attachment, attach the volume - newAttachmentSet := newAttachments.(*schema.Set).List() - if blockClient, err := config.blockStorageV1Client(GetRegion(d)); err != nil { - return err - } else { - if err := attachVolumesToInstance(computeClient, blockClient, d.Id(), newAttachmentSet); err != nil { - return err - } - } - - d.SetPartial("volume") - } - - if d.HasChange("flavor_id") || d.HasChange("flavor_name") { - var newFlavorId string - var err error - if d.HasChange("flavor_id") { - newFlavorId = d.Get("flavor_id").(string) - } else { - newFlavorName := d.Get("flavor_name").(string) - newFlavorId, err = flavors.IDFromName(computeClient, newFlavorName) - if err != nil { - return err - } - } - - resizeOpts := &servers.ResizeOpts{ - FlavorRef: newFlavorId, - } - log.Printf("[DEBUG] Resize configuration: %#v", resizeOpts) - err = servers.Resize(computeClient, d.Id(), resizeOpts).ExtractErr() - if err != nil { - return fmt.Errorf("Error resizing OpenStack server: %s", err) - } - - // Wait for the instance to finish resizing. - log.Printf("[DEBUG] Waiting for instance (%s) to finish resizing", d.Id()) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"RESIZE"}, - Target: []string{"VERIFY_RESIZE"}, - Refresh: ServerV2StateRefreshFunc(computeClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutUpdate), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for instance (%s) to resize: %s", d.Id(), err) - } - - // Confirm resize. - log.Printf("[DEBUG] Confirming resize") - err = servers.ConfirmResize(computeClient, d.Id()).ExtractErr() - if err != nil { - return fmt.Errorf("Error confirming resize of OpenStack server: %s", err) - } - - stateConf = &resource.StateChangeConf{ - Pending: []string{"VERIFY_RESIZE"}, - Target: []string{"ACTIVE"}, - Refresh: ServerV2StateRefreshFunc(computeClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutUpdate), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for instance (%s) to confirm resize: %s", d.Id(), err) - } - } - - return resourceComputeInstanceV2Read(d, meta) -} - -func resourceComputeInstanceV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - // Make sure all volumes are detached before deleting - volumes := d.Get("volume") - if volumeSet, ok := volumes.(*schema.Set); ok { - volumeList := volumeSet.List() - if len(volumeList) > 0 { - log.Printf("[DEBUG] Attempting to detach the following volumes: %#v", volumeList) - if blockClient, err := config.blockStorageV1Client(GetRegion(d)); err != nil { - return err - } else { - if err := detachVolumesFromInstance(computeClient, blockClient, d.Id(), volumeList); err != nil { - return err - } - } - } - } - - if d.Get("stop_before_destroy").(bool) { - err = startstop.Stop(computeClient, d.Id()).ExtractErr() - if err != nil { - log.Printf("[WARN] Error stopping OpenStack instance: %s", err) - } else { - stopStateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE"}, - Target: []string{"SHUTOFF"}, - Refresh: ServerV2StateRefreshFunc(computeClient, d.Id()), - Timeout: 3 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - log.Printf("[DEBUG] Waiting for instance (%s) to stop", d.Id()) - _, err = stopStateConf.WaitForState() - if err != nil { - log.Printf("[WARN] Error waiting for instance (%s) to stop: %s, proceeding to delete", d.Id(), err) - } - } - } - - if d.Get("force_delete").(bool) { - log.Printf("[DEBUG] Force deleting OpenStack Instance %s", d.Id()) - err = servers.ForceDelete(computeClient, d.Id()).ExtractErr() - if err != nil { - return fmt.Errorf("Error deleting OpenStack server: %s", err) - } - } else { - log.Printf("[DEBUG] Deleting OpenStack Instance %s", d.Id()) - err = servers.Delete(computeClient, d.Id()).ExtractErr() - if err != nil { - return fmt.Errorf("Error deleting OpenStack server: %s", err) - } - } - - // Wait for the instance to delete before moving on. - log.Printf("[DEBUG] Waiting for instance (%s) to delete", d.Id()) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE", "SHUTOFF"}, - Target: []string{"DELETED", "SOFT_DELETED"}, - Refresh: ServerV2StateRefreshFunc(computeClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for instance (%s) to delete: %s", - d.Id(), err) - } - - d.SetId("") - return nil -} - -// ServerV2StateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// an OpenStack instance. -func ServerV2StateRefreshFunc(client *gophercloud.ServiceClient, instanceID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - s, err := servers.Get(client, instanceID).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - return s, "DELETED", nil - } - return nil, "", err - } - - return s, s.Status, nil - } -} - -func resourceInstanceSecGroupsV2(d *schema.ResourceData) []string { - rawSecGroups := d.Get("security_groups").(*schema.Set).List() - secgroups := make([]string, len(rawSecGroups)) - for i, raw := range rawSecGroups { - secgroups[i] = raw.(string) - } - return secgroups -} - -// getInstanceNetworks collects instance network information from different sources -// and aggregates it all together. -func getInstanceNetworksAndAddresses(computeClient *gophercloud.ServiceClient, d *schema.ResourceData) ([]map[string]interface{}, error) { - server, err := servers.Get(computeClient, d.Id()).Extract() - - if err != nil { - return nil, CheckDeleted(d, err, "server") - } - - networkDetails, err := getInstanceNetworks(computeClient, d) - addresses := getInstanceAddresses(server.Addresses) - if err != nil { - return nil, err - } - - // if there are no networkDetails, make networks at least a length of 1 - networkLength := 1 - if len(networkDetails) > 0 { - networkLength = len(networkDetails) - } - networks := make([]map[string]interface{}, networkLength) - - // Loop through all networks and addresses, - // merge relevant address details. - if len(networkDetails) == 0 { - for netName, n := range addresses { - networks[0] = map[string]interface{}{ - "name": netName, - "fixed_ip_v4": n["fixed_ip_v4"], - "fixed_ip_v6": n["fixed_ip_v6"], - "floating_ip": n["floating_ip"], - "mac": n["mac"], - } - } - } else { - for i, net := range networkDetails { - n := addresses[net["name"].(string)] - - networks[i] = map[string]interface{}{ - "uuid": networkDetails[i]["uuid"], - "name": networkDetails[i]["name"], - "port": networkDetails[i]["port"], - "fixed_ip_v4": n["fixed_ip_v4"], - "fixed_ip_v6": n["fixed_ip_v6"], - "floating_ip": n["floating_ip"], - "mac": n["mac"], - "access_network": networkDetails[i]["access_network"], - } - } - } - - log.Printf("[DEBUG] networks: %+v", networks) - - return networks, nil -} - -func getInstanceNetworks(computeClient *gophercloud.ServiceClient, d *schema.ResourceData) ([]map[string]interface{}, error) { - rawNetworks := d.Get("network").([]interface{}) - newNetworks := make([]map[string]interface{}, 0, len(rawNetworks)) - var tenantnet tenantnetworks.Network - - tenantNetworkExt := true - for _, raw := range rawNetworks { - // Not sure what causes this, but it is a possibility (see GH-2323). - // Since we call this function to reconcile what we'll save in the - // state anyways, we just ignore it. - if raw == nil { - continue - } - - rawMap := raw.(map[string]interface{}) - - // Both a floating IP and a port cannot be specified - if fip, ok := rawMap["floating_ip"].(string); ok { - if port, ok := rawMap["port"].(string); ok { - if fip != "" && port != "" { - return nil, fmt.Errorf("Only one of a floating IP or port may be specified per network.") - } - } - } - - allPages, err := tenantnetworks.List(computeClient).AllPages() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] os-tenant-networks disabled") - tenantNetworkExt = false - } - - log.Printf("[DEBUG] Err looks like: %+v", err) - if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { - if errCode.Actual == 403 { - log.Printf("[DEBUG] os-tenant-networks disabled.") - tenantNetworkExt = false - } else { - log.Printf("[DEBUG] unexpected os-tenant-networks error: %s", err) - tenantNetworkExt = false - } - } - } - - // In some cases, a call to os-tenant-networks might work, - // but the response is invalid. Catch this during extraction. - networkList := []tenantnetworks.Network{} - if tenantNetworkExt { - networkList, err = tenantnetworks.ExtractNetworks(allPages) - if err != nil { - log.Printf("[DEBUG] error extracting os-tenant-networks results: %s", err) - tenantNetworkExt = false - } - } - - networkID := "" - networkName := "" - if tenantNetworkExt { - for _, network := range networkList { - if network.Name == rawMap["name"] { - tenantnet = network - } - if network.ID == rawMap["uuid"] { - tenantnet = network - } - } - - networkID = tenantnet.ID - networkName = tenantnet.Name - } else { - networkID = rawMap["uuid"].(string) - networkName = rawMap["name"].(string) - } - - newNetworks = append(newNetworks, map[string]interface{}{ - "uuid": networkID, - "name": networkName, - "port": rawMap["port"].(string), - "fixed_ip_v4": rawMap["fixed_ip_v4"].(string), - "access_network": rawMap["access_network"].(bool), - }) - } - - log.Printf("[DEBUG] networks: %+v", newNetworks) - return newNetworks, nil -} - -func getInstanceAddresses(addresses map[string]interface{}) map[string]map[string]interface{} { - addrs := make(map[string]map[string]interface{}) - for n, networkAddresses := range addresses { - addrs[n] = make(map[string]interface{}) - for _, element := range networkAddresses.([]interface{}) { - address := element.(map[string]interface{}) - if address["OS-EXT-IPS:type"] == "floating" { - addrs[n]["floating_ip"] = address["addr"] - } else { - if address["version"].(float64) == 4 { - addrs[n]["fixed_ip_v4"] = address["addr"].(string) - } else { - addrs[n]["fixed_ip_v6"] = fmt.Sprintf("[%s]", address["addr"].(string)) - } - } - if mac, ok := address["OS-EXT-IPS-MAC:mac_addr"]; ok { - addrs[n]["mac"] = mac.(string) - } - } - } - - log.Printf("[DEBUG] Addresses: %+v", addresses) - - return addrs -} - -func getInstanceAccessAddresses(d *schema.ResourceData, networks []map[string]interface{}) (string, string) { - var hostv4, hostv6 string - - // Start with a global floating IP - floatingIP := d.Get("floating_ip").(string) - if floatingIP != "" { - hostv4 = floatingIP - } - - // Loop through all networks - // If the network has a valid floating, fixed v4, or fixed v6 address - // and hostv4 or hostv6 is not set, set hostv4/hostv6. - // If the network is an "access_network" overwrite hostv4/hostv6. - for _, n := range networks { - var accessNetwork bool - - if an, ok := n["access_network"].(bool); ok && an { - accessNetwork = true - } - - if fixedIPv4, ok := n["fixed_ip_v4"].(string); ok && fixedIPv4 != "" { - if hostv4 == "" || accessNetwork { - hostv4 = fixedIPv4 - } - } - - if floatingIP, ok := n["floating_ip"].(string); ok && floatingIP != "" { - if hostv4 == "" || accessNetwork { - hostv4 = floatingIP - } - } - - if fixedIPv6, ok := n["fixed_ip_v6"].(string); ok && fixedIPv6 != "" { - if hostv6 == "" || accessNetwork { - hostv6 = fixedIPv6 - } - } - } - - log.Printf("[DEBUG] OpenStack Instance Network Access Addresses: %s, %s", hostv4, hostv6) - - return hostv4, hostv6 -} - -func checkInstanceFloatingIPs(d *schema.ResourceData) error { - rawNetworks := d.Get("network").([]interface{}) - floatingIP := d.Get("floating_ip").(string) - - for _, raw := range rawNetworks { - if raw == nil { - continue - } - - rawMap := raw.(map[string]interface{}) - - // Error if a floating IP was specified both globally and in the network block. - if floatingIP != "" && rawMap["floating_ip"] != "" { - return fmt.Errorf("Cannot specify a floating IP both globally and in a network block.") - } - } - return nil -} - -func associateFloatingIPsToInstance(computeClient *gophercloud.ServiceClient, d *schema.ResourceData) error { - floatingIP := d.Get("floating_ip").(string) - rawNetworks := d.Get("network").([]interface{}) - instanceID := d.Id() - - if floatingIP != "" { - if err := associateFloatingIPToInstance(computeClient, floatingIP, instanceID, ""); err != nil { - return err - } - } else { - for _, raw := range rawNetworks { - if raw == nil { - continue - } - - rawMap := raw.(map[string]interface{}) - if rawMap["floating_ip"].(string) != "" { - floatingIP := rawMap["floating_ip"].(string) - fixedIP := rawMap["fixed_ip_v4"].(string) - if err := associateFloatingIPToInstance(computeClient, floatingIP, instanceID, fixedIP); err != nil { - return err - } - } - } - } - return nil -} - -func associateFloatingIPToInstance(computeClient *gophercloud.ServiceClient, floatingIP string, instanceID string, fixedIP string) error { - associateOpts := floatingips.AssociateOpts{ - FloatingIP: floatingIP, - FixedIP: fixedIP, - } - - if err := floatingips.AssociateInstance(computeClient, instanceID, associateOpts).ExtractErr(); err != nil { - return fmt.Errorf("Error associating floating IP: %s", err) - } - - return nil -} - -func disassociateFloatingIPFromInstance(computeClient *gophercloud.ServiceClient, floatingIP string, instanceID string, fixedIP string) error { - disassociateOpts := floatingips.DisassociateOpts{ - FloatingIP: floatingIP, - } - - if err := floatingips.DisassociateInstance(computeClient, instanceID, disassociateOpts).ExtractErr(); err != nil { - return fmt.Errorf("Error disassociating floating IP: %s", err) - } - - return nil -} - -func resourceInstanceMetadataV2(d *schema.ResourceData) map[string]string { - m := make(map[string]string) - for key, val := range d.Get("metadata").(map[string]interface{}) { - m[key] = val.(string) - } - return m -} - -func resourceInstanceBlockDevicesV2(d *schema.ResourceData, bds []interface{}) ([]bootfromvolume.BlockDevice, error) { - blockDeviceOpts := make([]bootfromvolume.BlockDevice, len(bds)) - for i, bd := range bds { - bdM := bd.(map[string]interface{}) - blockDeviceOpts[i] = bootfromvolume.BlockDevice{ - UUID: bdM["uuid"].(string), - VolumeSize: bdM["volume_size"].(int), - BootIndex: bdM["boot_index"].(int), - DeleteOnTermination: bdM["delete_on_termination"].(bool), - GuestFormat: bdM["guest_format"].(string), - } - - sourceType := bdM["source_type"].(string) - switch sourceType { - case "blank": - blockDeviceOpts[i].SourceType = bootfromvolume.SourceBlank - case "image": - blockDeviceOpts[i].SourceType = bootfromvolume.SourceImage - case "snapshot": - blockDeviceOpts[i].SourceType = bootfromvolume.SourceSnapshot - case "volume": - blockDeviceOpts[i].SourceType = bootfromvolume.SourceVolume - default: - return blockDeviceOpts, fmt.Errorf("unknown block device source type %s", sourceType) - } - - destinationType := bdM["destination_type"].(string) - switch destinationType { - case "local": - blockDeviceOpts[i].DestinationType = bootfromvolume.DestinationLocal - case "volume": - blockDeviceOpts[i].DestinationType = bootfromvolume.DestinationVolume - default: - return blockDeviceOpts, fmt.Errorf("unknown block device destination type %s", destinationType) - } - } - - log.Printf("[DEBUG] Block Device Options: %+v", blockDeviceOpts) - return blockDeviceOpts, nil -} - -func resourceInstanceSchedulerHintsV2(d *schema.ResourceData, schedulerHintsRaw map[string]interface{}) schedulerhints.SchedulerHints { - differentHost := []string{} - if len(schedulerHintsRaw["different_host"].([]interface{})) > 0 { - for _, dh := range schedulerHintsRaw["different_host"].([]interface{}) { - differentHost = append(differentHost, dh.(string)) - } - } - - sameHost := []string{} - if len(schedulerHintsRaw["same_host"].([]interface{})) > 0 { - for _, sh := range schedulerHintsRaw["same_host"].([]interface{}) { - sameHost = append(sameHost, sh.(string)) - } - } - - query := make([]interface{}, len(schedulerHintsRaw["query"].([]interface{}))) - if len(schedulerHintsRaw["query"].([]interface{})) > 0 { - for _, q := range schedulerHintsRaw["query"].([]interface{}) { - query = append(query, q.(string)) - } - } - - schedulerHints := schedulerhints.SchedulerHints{ - Group: schedulerHintsRaw["group"].(string), - DifferentHost: differentHost, - SameHost: sameHost, - Query: query, - TargetCell: schedulerHintsRaw["target_cell"].(string), - BuildNearHostIP: schedulerHintsRaw["build_near_host_ip"].(string), - } - - return schedulerHints -} - -func getImageIDFromConfig(computeClient *gophercloud.ServiceClient, d *schema.ResourceData) (string, error) { - // If block_device was used, an Image does not need to be specified, unless an image/local - // combination was used. This emulates normal boot behavior. Otherwise, ignore the image altogether. - if vL, ok := d.GetOk("block_device"); ok { - needImage := false - for _, v := range vL.([]interface{}) { - vM := v.(map[string]interface{}) - if vM["source_type"] == "image" && vM["destination_type"] == "local" { - needImage = true - } - } - if !needImage { - return "", nil - } - } - - if imageId := d.Get("image_id").(string); imageId != "" { - return imageId, nil - } else { - // try the OS_IMAGE_ID environment variable - if v := os.Getenv("OS_IMAGE_ID"); v != "" { - return v, nil - } - } - - imageName := d.Get("image_name").(string) - if imageName == "" { - // try the OS_IMAGE_NAME environment variable - if v := os.Getenv("OS_IMAGE_NAME"); v != "" { - imageName = v - } - } - - if imageName != "" { - imageId, err := images.IDFromName(computeClient, imageName) - if err != nil { - return "", err - } - return imageId, nil - } - - return "", fmt.Errorf("Neither a boot device, image ID, or image name were able to be determined.") -} - -func setImageInformation(computeClient *gophercloud.ServiceClient, server *servers.Server, d *schema.ResourceData) error { - // If block_device was used, an Image does not need to be specified, unless an image/local - // combination was used. This emulates normal boot behavior. Otherwise, ignore the image altogether. - if vL, ok := d.GetOk("block_device"); ok { - needImage := false - for _, v := range vL.([]interface{}) { - vM := v.(map[string]interface{}) - if vM["source_type"] == "image" && vM["destination_type"] == "local" { - needImage = true - } - } - if !needImage { - d.Set("image_id", "Attempt to boot from volume - no image supplied") - return nil - } - } - - imageId := server.Image["id"].(string) - if imageId != "" { - d.Set("image_id", imageId) - if image, err := images.Get(computeClient, imageId).Extract(); err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - // If the image name can't be found, set the value to "Image not found". - // The most likely scenario is that the image no longer exists in the Image Service - // but the instance still has a record from when it existed. - d.Set("image_name", "Image not found") - return nil - } - return err - } else { - d.Set("image_name", image.Name) - } - } - - return nil -} - -func getFlavorID(client *gophercloud.ServiceClient, d *schema.ResourceData) (string, error) { - flavorId := d.Get("flavor_id").(string) - - if flavorId != "" { - return flavorId, nil - } - - flavorName := d.Get("flavor_name").(string) - return flavors.IDFromName(client, flavorName) -} - -func resourceComputeVolumeAttachmentHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["volume_id"].(string))) - - return hashcode.String(buf.String()) -} - -func resourceComputeSchedulerHintsHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - if m["group"] != nil { - buf.WriteString(fmt.Sprintf("%s-", m["group"].(string))) - } - - if m["target_cell"] != nil { - buf.WriteString(fmt.Sprintf("%s-", m["target_cell"].(string))) - } - - if m["build_host_near_ip"] != nil { - buf.WriteString(fmt.Sprintf("%s-", m["build_host_near_ip"].(string))) - } - - buf.WriteString(fmt.Sprintf("%s-", m["different_host"].([]interface{}))) - buf.WriteString(fmt.Sprintf("%s-", m["same_host"].([]interface{}))) - buf.WriteString(fmt.Sprintf("%s-", m["query"].([]interface{}))) - - return hashcode.String(buf.String()) -} - -func attachVolumesToInstance(computeClient *gophercloud.ServiceClient, blockClient *gophercloud.ServiceClient, serverId string, vols []interface{}) error { - for _, v := range vols { - va := v.(map[string]interface{}) - volumeId := va["volume_id"].(string) - device := va["device"].(string) - - s := "" - if serverId != "" { - s = serverId - } else if va["server_id"] != "" { - s = va["server_id"].(string) - } else { - return fmt.Errorf("Unable to determine server ID to attach volume.") - } - - vaOpts := &volumeattach.CreateOpts{ - Device: device, - VolumeID: volumeId, - } - - if _, err := volumeattach.Create(computeClient, s, vaOpts).Extract(); err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"attaching", "available"}, - Target: []string{"in-use"}, - Refresh: VolumeV1StateRefreshFunc(blockClient, va["volume_id"].(string)), - Timeout: 30 * time.Minute, - Delay: 5 * time.Second, - MinTimeout: 2 * time.Second, - } - - if _, err := stateConf.WaitForState(); err != nil { - return err - } - - log.Printf("[INFO] Attached volume %s to instance %s", volumeId, serverId) - } - return nil -} - -func detachVolumesFromInstance(computeClient *gophercloud.ServiceClient, blockClient *gophercloud.ServiceClient, serverId string, vols []interface{}) error { - for _, v := range vols { - va := v.(map[string]interface{}) - aId := va["id"].(string) - - log.Printf("[INFO] Attempting to detach volume %s", va["volume_id"]) - if err := volumeattach.Delete(computeClient, serverId, aId).ExtractErr(); err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"detaching", "in-use"}, - Target: []string{"available"}, - Refresh: VolumeV1StateRefreshFunc(blockClient, va["volume_id"].(string)), - Timeout: 30 * time.Minute, - Delay: 5 * time.Second, - MinTimeout: 2 * time.Second, - } - - if _, err := stateConf.WaitForState(); err != nil { - return err - } - log.Printf("[INFO] Detached volume %s from instance %s", va["volume_id"], serverId) - } - - return nil -} - -func getVolumeAttachments(computeClient *gophercloud.ServiceClient, d *schema.ResourceData) error { - var vols []map[string]interface{} - - allPages, err := volumeattach.List(computeClient, d.Id()).AllPages() - if err != nil { - if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { - if errCode.Actual == 403 { - log.Printf("[DEBUG] os-volume_attachments disabled.") - return nil - } else { - return err - } - } - } - - allVolumeAttachments, err := volumeattach.ExtractVolumeAttachments(allPages) - if err != nil { - return err - } - - if v, ok := d.GetOk("volume"); ok { - volumes := v.(*schema.Set).List() - for _, volume := range volumes { - if volumeMap, ok := volume.(map[string]interface{}); ok { - if v, ok := volumeMap["volume_id"].(string); ok { - for _, volumeAttachment := range allVolumeAttachments { - if v == volumeAttachment.ID { - vol := make(map[string]interface{}) - vol["id"] = volumeAttachment.ID - vol["volume_id"] = volumeAttachment.VolumeID - vol["device"] = volumeAttachment.Device - vols = append(vols, vol) - } - } - } - } - } - } - - log.Printf("[INFO] Volume attachments: %v", vols) - d.Set("volume", vols) - - return nil -} - -func checkBlockDeviceConfig(d *schema.ResourceData) error { - if vL, ok := d.GetOk("block_device"); ok { - for _, v := range vL.([]interface{}) { - vM := v.(map[string]interface{}) - - if vM["source_type"] != "blank" && vM["uuid"] == "" { - return fmt.Errorf("You must specify a uuid for %s block device types", vM["source_type"]) - } - - if vM["source_type"] == "image" && vM["destination_type"] == "volume" { - if vM["volume_size"] == 0 { - return fmt.Errorf("You must specify a volume_size when creating a volume from an image") - } - } - - if vM["source_type"] == "blank" && vM["destination_type"] == "local" { - if vM["volume_size"] == 0 { - return fmt.Errorf("You must specify a volume_size when creating a blank block device") - } - } - } - } - - return nil -} - -func resourceComputeInstancePersonalityHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["file"].(string))) - - return hashcode.String(buf.String()) -} - -func resourceInstancePersonalityV2(d *schema.ResourceData) servers.Personality { - var personalities servers.Personality - - if v := d.Get("personality"); v != nil { - personalityList := v.(*schema.Set).List() - if len(personalityList) > 0 { - for _, p := range personalityList { - rawPersonality := p.(map[string]interface{}) - file := servers.File{ - Path: rawPersonality["file"].(string), - Contents: []byte(rawPersonality["content"].(string)), - } - - log.Printf("[DEBUG] OpenStack Compute Instance Personality: %+v", file) - - personalities = append(personalities, &file) - } - } - } - - return personalities -} diff --git a/builtin/providers/openstack/resource_openstack_compute_instance_v2_test.go b/builtin/providers/openstack/resource_openstack_compute_instance_v2_test.go deleted file mode 100644 index c8d4b9046..000000000 --- a/builtin/providers/openstack/resource_openstack_compute_instance_v2_test.go +++ /dev/null @@ -1,1635 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" - "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" - "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" - "github.com/gophercloud/gophercloud/pagination" -) - -func TestAccComputeV2Instance_basic(t *testing.T) { - var instance servers.Server - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - testAccCheckComputeV2InstanceMetadata(&instance, "foo", "bar"), - resource.TestCheckResourceAttr( - "openstack_compute_instance_v2.instance_1", "all_metadata.foo", "bar"), - resource.TestCheckResourceAttr( - "openstack_compute_instance_v2.instance_1", "availability_zone", "nova"), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_volumeAttach(t *testing.T) { - var instance servers.Server - var volume volumes.Volume - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_volumeAttach, - Check: resource.ComposeTestCheckFunc( - testAccCheckBlockStorageV1VolumeExists("openstack_blockstorage_volume_v1.vol_1", &volume), - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_volumeAttachPostCreation(t *testing.T) { - var instance servers.Server - var volume volumes.Volume - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_volumeAttachPostCreation_1, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - ), - }, - resource.TestStep{ - Config: testAccComputeV2Instance_volumeAttachPostCreation_2, - Check: resource.ComposeTestCheckFunc( - testAccCheckBlockStorageV1VolumeExists("openstack_blockstorage_volume_v1.vol_1", &volume), - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_volumeDetachPostCreation(t *testing.T) { - var instance servers.Server - var volume volumes.Volume - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_volumeDetachPostCreation_1, - Check: resource.ComposeTestCheckFunc( - testAccCheckBlockStorageV1VolumeExists("openstack_blockstorage_volume_v1.vol_1", &volume), - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume), - ), - }, - resource.TestStep{ - Config: testAccComputeV2Instance_volumeDetachPostCreation_2, - Check: resource.ComposeTestCheckFunc( - testAccCheckBlockStorageV1VolumeExists("openstack_blockstorage_volume_v1.vol_1", &volume), - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - testAccCheckComputeV2InstanceVolumesDetached(&instance), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_volumeDetachAdditionalVolumePostCreation(t *testing.T) { - var instance servers.Server - var volume_1 volumes.Volume - var volume_2 volumes.Volume - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_volumeDetachAdditionalVolumePostCreation_1, - Check: resource.ComposeTestCheckFunc( - testAccCheckBlockStorageV1VolumeExists( - "openstack_blockstorage_volume_v1.root_volume", &volume_1), - testAccCheckBlockStorageV1VolumeExists( - "openstack_blockstorage_volume_v1.additional_volume", &volume_2), - testAccCheckComputeV2InstanceExists( - "openstack_compute_instance_v2.instance_1", &instance), - testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume_1), - testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume_2), - ), - }, - resource.TestStep{ - Config: testAccComputeV2Instance_volumeDetachAdditionalVolumePostCreation_2, - Check: resource.ComposeTestCheckFunc( - testAccCheckBlockStorageV1VolumeExists( - "openstack_blockstorage_volume_v1.root_volume", &volume_1), - testAccCheckBlockStorageV1VolumeExists( - "openstack_blockstorage_volume_v1.additional_volume", &volume_2), - testAccCheckComputeV2InstanceExists( - "openstack_compute_instance_v2.instance_1", &instance), - testAccCheckComputeV2InstanceVolumeDetached( - &instance, "openstack_blockstorage_volume_v1.additional_volume"), - testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume_1), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_volumeAttachInstanceDelete(t *testing.T) { - var instance servers.Server - var volume_1 volumes.Volume - var volume_2 volumes.Volume - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_volumeAttachInstanceDelete_1, - Check: resource.ComposeTestCheckFunc( - testAccCheckBlockStorageV1VolumeExists( - "openstack_blockstorage_volume_v1.root_volume", &volume_1), - testAccCheckBlockStorageV1VolumeExists( - "openstack_blockstorage_volume_v1.additional_volume", &volume_2), - testAccCheckComputeV2InstanceExists( - "openstack_compute_instance_v2.instance_1", &instance), - testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume_1), - testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume_2), - ), - }, - resource.TestStep{ - Config: testAccComputeV2Instance_volumeAttachInstanceDelete_2, - Check: resource.ComposeTestCheckFunc( - testAccCheckBlockStorageV1VolumeExists( - "openstack_blockstorage_volume_v1.root_volume", &volume_1), - testAccCheckBlockStorageV1VolumeExists( - "openstack_blockstorage_volume_v1.additional_volume", &volume_2), - testAccCheckComputeV2InstanceDoesNotExist( - "openstack_compute_instance_v2.instance_1", &instance), - testAccCheckComputeV2InstanceVolumeDetached( - &instance, "openstack_blockstorage_volume_v1.root_volume"), - testAccCheckComputeV2InstanceVolumeDetached( - &instance, "openstack_blockstorage_volume_v1.additional_volume"), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_volumeAttachToNewInstance(t *testing.T) { - var instance_1 servers.Server - var instance_2 servers.Server - var volume_1 volumes.Volume - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_volumeAttachToNewInstance_1, - Check: resource.ComposeTestCheckFunc( - testAccCheckBlockStorageV1VolumeExists( - "openstack_blockstorage_volume_v1.volume_1", &volume_1), - testAccCheckComputeV2InstanceExists( - "openstack_compute_instance_v2.instance_1", &instance_1), - testAccCheckComputeV2InstanceExists( - "openstack_compute_instance_v2.instance_2", &instance_2), - testAccCheckComputeV2InstanceVolumeDetached( - &instance_2, "openstack_blockstorage_volume_v1.volume_1"), - testAccCheckComputeV2InstanceVolumeAttachment(&instance_1, &volume_1), - ), - }, - resource.TestStep{ - Config: testAccComputeV2Instance_volumeAttachToNewInstance_2, - Check: resource.ComposeTestCheckFunc( - testAccCheckBlockStorageV1VolumeExists( - "openstack_blockstorage_volume_v1.volume_1", &volume_1), - testAccCheckComputeV2InstanceExists( - "openstack_compute_instance_v2.instance_1", &instance_1), - testAccCheckComputeV2InstanceExists( - "openstack_compute_instance_v2.instance_2", &instance_2), - testAccCheckComputeV2InstanceVolumeDetached( - &instance_1, "openstack_blockstorage_volume_v1.volume_1"), - testAccCheckComputeV2InstanceVolumeAttachment(&instance_2, &volume_1), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_floatingIPAttachGlobally(t *testing.T) { - var instance servers.Server - var fip floatingips.FloatingIP - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_floatingIPAttachGlobally, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2FloatingIPExists("openstack_compute_floatingip_v2.fip_1", &fip), - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - testAccCheckComputeV2InstanceFloatingIPAttach(&instance, &fip), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_floatingIPAttachToNetwork(t *testing.T) { - var instance servers.Server - var fip floatingips.FloatingIP - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_floatingIPAttachToNetwork, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2FloatingIPExists("openstack_compute_floatingip_v2.fip_1", &fip), - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - testAccCheckComputeV2InstanceFloatingIPAttach(&instance, &fip), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_floatingIPAttachToNetworkAndChange(t *testing.T) { - var instance servers.Server - var fip floatingips.FloatingIP - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_floatingIPAttachToNetworkAndChange_1, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2FloatingIPExists("openstack_compute_floatingip_v2.fip_1", &fip), - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - testAccCheckComputeV2InstanceFloatingIPAttach(&instance, &fip), - ), - }, - resource.TestStep{ - Config: testAccComputeV2Instance_floatingIPAttachToNetworkAndChange_2, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2FloatingIPExists("openstack_compute_floatingip_v2.fip_2", &fip), - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - testAccCheckComputeV2InstanceFloatingIPAttach(&instance, &fip), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_secgroupMulti(t *testing.T) { - var instance_1 servers.Server - var secgroup_1 secgroups.SecurityGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_secgroupMulti, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2SecGroupExists( - "openstack_compute_secgroup_v2.secgroup_1", &secgroup_1), - testAccCheckComputeV2InstanceExists( - "openstack_compute_instance_v2.instance_1", &instance_1), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_secgroupMultiUpdate(t *testing.T) { - var instance_1 servers.Server - var secgroup_1, secgroup_2 secgroups.SecurityGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_secgroupMultiUpdate_1, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2SecGroupExists( - "openstack_compute_secgroup_v2.secgroup_1", &secgroup_1), - testAccCheckComputeV2SecGroupExists( - "openstack_compute_secgroup_v2.secgroup_2", &secgroup_2), - testAccCheckComputeV2InstanceExists( - "openstack_compute_instance_v2.instance_1", &instance_1), - ), - }, - resource.TestStep{ - Config: testAccComputeV2Instance_secgroupMultiUpdate_2, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2SecGroupExists( - "openstack_compute_secgroup_v2.secgroup_1", &secgroup_1), - testAccCheckComputeV2SecGroupExists( - "openstack_compute_secgroup_v2.secgroup_2", &secgroup_2), - testAccCheckComputeV2InstanceExists( - "openstack_compute_instance_v2.instance_1", &instance_1), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_bootFromVolumeImage(t *testing.T) { - var instance servers.Server - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_bootFromVolumeImage, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - testAccCheckComputeV2InstanceBootVolumeAttachment(&instance), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_bootFromVolumeImageWithAttachedVolume(t *testing.T) { - var instance servers.Server - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_bootFromVolumeImageWithAttachedVolume, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_bootFromVolumeVolume(t *testing.T) { - var instance servers.Server - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_bootFromVolumeVolume, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - testAccCheckComputeV2InstanceBootVolumeAttachment(&instance), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_bootFromVolumeForceNew(t *testing.T) { - var instance1_1 servers.Server - var instance1_2 servers.Server - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_bootFromVolumeForceNew_1, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists( - "openstack_compute_instance_v2.instance_1", &instance1_1), - ), - }, - resource.TestStep{ - Config: testAccComputeV2Instance_bootFromVolumeForceNew_2, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists( - "openstack_compute_instance_v2.instance_1", &instance1_2), - testAccCheckComputeV2InstanceInstanceIDsDoNotMatch(&instance1_1, &instance1_2), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_blockDeviceNewVolume(t *testing.T) { - var instance servers.Server - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_blockDeviceNewVolume, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_blockDeviceExistingVolume(t *testing.T) { - var instance servers.Server - var volume volumes.Volume - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_blockDeviceExistingVolume, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - testAccCheckBlockStorageV1VolumeExists( - "openstack_blockstorage_volume_v1.volume_1", &volume), - ), - }, - }, - }) -} - -// TODO: verify the personality really exists on the instance. -func TestAccComputeV2Instance_personality(t *testing.T) { - var instance servers.Server - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_personality, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_multiEphemeral(t *testing.T) { - var instance servers.Server - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_multiEphemeral, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists( - "openstack_compute_instance_v2.instance_1", &instance), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_accessIPv4(t *testing.T) { - var instance servers.Server - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_accessIPv4, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - resource.TestCheckResourceAttr( - "openstack_compute_instance_v2.instance_1", "access_ip_v4", "192.168.1.100"), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_changeFixedIP(t *testing.T) { - var instance1_1 servers.Server - var instance1_2 servers.Server - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_changeFixedIP_1, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists( - "openstack_compute_instance_v2.instance_1", &instance1_1), - ), - }, - resource.TestStep{ - Config: testAccComputeV2Instance_changeFixedIP_2, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists( - "openstack_compute_instance_v2.instance_1", &instance1_2), - testAccCheckComputeV2InstanceInstanceIDsDoNotMatch(&instance1_1, &instance1_2), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_stopBeforeDestroy(t *testing.T) { - var instance servers.Server - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_stopBeforeDestroy, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_metadataRemove(t *testing.T) { - var instance servers.Server - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_metadataRemove_1, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - testAccCheckComputeV2InstanceMetadata(&instance, "foo", "bar"), - testAccCheckComputeV2InstanceMetadata(&instance, "abc", "def"), - resource.TestCheckResourceAttr( - "openstack_compute_instance_v2.instance_1", "all_metadata.foo", "bar"), - resource.TestCheckResourceAttr( - "openstack_compute_instance_v2.instance_1", "all_metadata.abc", "def"), - ), - }, - resource.TestStep{ - Config: testAccComputeV2Instance_metadataRemove_2, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - testAccCheckComputeV2InstanceMetadata(&instance, "foo", "bar"), - testAccCheckComputeV2InstanceMetadata(&instance, "ghi", "jkl"), - testAccCheckComputeV2InstanceNoMetadataKey(&instance, "abc"), - resource.TestCheckResourceAttr( - "openstack_compute_instance_v2.instance_1", "all_metadata.foo", "bar"), - resource.TestCheckResourceAttr( - "openstack_compute_instance_v2.instance_1", "all_metadata.ghi", "jkl"), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_forceDelete(t *testing.T) { - var instance servers.Server - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_forceDelete, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_timeout(t *testing.T) { - var instance servers.Server - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - ), - }, - }, - }) -} - -func TestAccComputeV2Instance_networkNameToID(t *testing.T) { - var instance servers.Server - var network networks.Network - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2InstanceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Instance_networkNameToID, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), - resource.TestCheckResourceAttrPtr( - "openstack_compute_instance_v2.instance_1", "network.1.uuid", &network.ID), - ), - }, - }, - }) -} - -func testAccCheckComputeV2InstanceDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - computeClient, err := config.computeV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_compute_instance_v2" { - continue - } - - server, err := servers.Get(computeClient, rs.Primary.ID).Extract() - if err == nil { - if server.Status != "SOFT_DELETED" { - return fmt.Errorf("Instance still exists") - } - } - } - - return nil -} - -func testAccCheckComputeV2InstanceExists(n string, instance *servers.Server) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - computeClient, err := config.computeV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - found, err := servers.Get(computeClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Instance not found") - } - - *instance = *found - - return nil - } -} - -func testAccCheckComputeV2InstanceDoesNotExist(n string, instance *servers.Server) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - computeClient, err := config.computeV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - _, err = servers.Get(computeClient, instance.ID).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - return nil - } - return err - } - - return fmt.Errorf("Instance still exists") - } -} - -func testAccCheckComputeV2InstanceMetadata( - instance *servers.Server, k string, v string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if instance.Metadata == nil { - return fmt.Errorf("No metadata") - } - - for key, value := range instance.Metadata { - if k != key { - continue - } - - if v == value { - return nil - } - - return fmt.Errorf("Bad value for %s: %s", k, value) - } - - return fmt.Errorf("Metadata not found: %s", k) - } -} - -func testAccCheckComputeV2InstanceNoMetadataKey( - instance *servers.Server, k string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if instance.Metadata == nil { - return nil - } - - for key, _ := range instance.Metadata { - if k == key { - return fmt.Errorf("Metadata found: %s", k) - } - } - - return nil - } -} - -func testAccCheckComputeV2InstanceVolumeAttachment( - instance *servers.Server, volume *volumes.Volume) resource.TestCheckFunc { - return func(s *terraform.State) error { - var attachments []volumeattach.VolumeAttachment - - config := testAccProvider.Meta().(*Config) - computeClient, err := config.computeV2Client(OS_REGION_NAME) - if err != nil { - return err - } - - err = volumeattach.List(computeClient, instance.ID).EachPage( - func(page pagination.Page) (bool, error) { - - actual, err := volumeattach.ExtractVolumeAttachments(page) - if err != nil { - return false, fmt.Errorf("Unable to lookup attachment: %s", err) - } - - attachments = actual - return true, nil - }) - - for _, attachment := range attachments { - if attachment.VolumeID == volume.ID { - return nil - } - } - - return fmt.Errorf("Volume not found: %s", volume.ID) - } -} - -func testAccCheckComputeV2InstanceVolumesDetached(instance *servers.Server) resource.TestCheckFunc { - return func(s *terraform.State) error { - var attachments []volumeattach.VolumeAttachment - - config := testAccProvider.Meta().(*Config) - computeClient, err := config.computeV2Client(OS_REGION_NAME) - if err != nil { - return err - } - - err = volumeattach.List(computeClient, instance.ID).EachPage( - func(page pagination.Page) (bool, error) { - - actual, err := volumeattach.ExtractVolumeAttachments(page) - if err != nil { - return false, fmt.Errorf("Unable to lookup attachment: %s", err) - } - - attachments = actual - return true, nil - }) - - if len(attachments) > 0 { - return fmt.Errorf("Volumes are still attached.") - } - - return nil - } -} - -func testAccCheckComputeV2InstanceBootVolumeAttachment( - instance *servers.Server) resource.TestCheckFunc { - return func(s *terraform.State) error { - var attachments []volumeattach.VolumeAttachment - - config := testAccProvider.Meta().(*Config) - computeClient, err := config.computeV2Client(OS_REGION_NAME) - if err != nil { - return err - } - - err = volumeattach.List(computeClient, instance.ID).EachPage( - func(page pagination.Page) (bool, error) { - - actual, err := volumeattach.ExtractVolumeAttachments(page) - if err != nil { - return false, fmt.Errorf("Unable to lookup attachment: %s", err) - } - - attachments = actual - return true, nil - }) - - if len(attachments) == 1 { - return nil - } - - return fmt.Errorf("No attached volume found.") - } -} - -func testAccCheckComputeV2InstanceFloatingIPAttach( - instance *servers.Server, fip *floatingips.FloatingIP) resource.TestCheckFunc { - return func(s *terraform.State) error { - if fip.InstanceID == instance.ID { - return nil - } - - return fmt.Errorf("Floating IP %s was not attached to instance %s", fip.ID, instance.ID) - } -} - -func testAccCheckComputeV2InstanceInstanceIDsDoNotMatch( - instance1, instance2 *servers.Server) resource.TestCheckFunc { - return func(s *terraform.State) error { - if instance1.ID == instance2.ID { - return fmt.Errorf("Instance was not recreated.") - } - - return nil - } -} - -func testAccCheckComputeV2InstanceVolumeDetached(instance *servers.Server, volume_id string) resource.TestCheckFunc { - return func(s *terraform.State) error { - var attachments []volumeattach.VolumeAttachment - - rs, ok := s.RootModule().Resources[volume_id] - if !ok { - return fmt.Errorf("Not found: %s", volume_id) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - computeClient, err := config.computeV2Client(OS_REGION_NAME) - if err != nil { - return err - } - - err = volumeattach.List(computeClient, instance.ID).EachPage( - func(page pagination.Page) (bool, error) { - actual, err := volumeattach.ExtractVolumeAttachments(page) - if err != nil { - return false, fmt.Errorf("Unable to lookup attachment: %s", err) - } - - attachments = actual - return true, nil - }) - - for _, attachment := range attachments { - if attachment.VolumeID == rs.Primary.ID { - return fmt.Errorf("Volume is still attached.") - } - } - - return nil - } -} - -const testAccComputeV2Instance_basic = ` -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - metadata { - foo = "bar" - } -} -` - -const testAccComputeV2Instance_volumeAttach = ` -resource "openstack_blockstorage_volume_v1" "vol_1" { - name = "vol_1" - size = 1 -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - volume { - volume_id = "${openstack_blockstorage_volume_v1.vol_1.id}" - } -} -` - -const testAccComputeV2Instance_volumeAttachPostCreation_1 = ` -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] -} -` - -const testAccComputeV2Instance_volumeAttachPostCreation_2 = ` -resource "openstack_blockstorage_volume_v1" "vol_1" { - name = "vol_1" - size = 1 -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - volume { - volume_id = "${openstack_blockstorage_volume_v1.vol_1.id}" - } -} -` - -const testAccComputeV2Instance_volumeDetachPostCreation_1 = ` -resource "openstack_blockstorage_volume_v1" "vol_1" { - name = "vol_1" - size = 1 -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - volume { - volume_id = "${openstack_blockstorage_volume_v1.vol_1.id}" - } -} -` - -const testAccComputeV2Instance_volumeDetachPostCreation_2 = ` -resource "openstack_blockstorage_volume_v1" "vol_1" { - name = "vol_1" - size = 1 -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] -} -` - -var testAccComputeV2Instance_volumeDetachAdditionalVolumePostCreation_1 = fmt.Sprintf(` -resource "openstack_blockstorage_volume_v1" "root_volume" { - name = "root_volume" - size = 1 - image_id = "%s" -} - -resource "openstack_blockstorage_volume_v1" "additional_volume" { - name = "additional_volume" - size = 1 -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - - block_device { - uuid = "${openstack_blockstorage_volume_v1.root_volume.id}" - source_type = "volume" - boot_index = 0 - destination_type = "volume" - delete_on_termination = false - } - - volume { - volume_id = "${openstack_blockstorage_volume_v1.additional_volume.id}" - } -} -`, OS_IMAGE_ID) - -var testAccComputeV2Instance_volumeDetachAdditionalVolumePostCreation_2 = fmt.Sprintf(` -resource "openstack_blockstorage_volume_v1" "root_volume" { - name = "root_volume" - size = 1 - image_id = "%s" -} - -resource "openstack_blockstorage_volume_v1" "additional_volume" { - name = "additional_volume" - size = 1 -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - - block_device { - uuid = "${openstack_blockstorage_volume_v1.root_volume.id}" - source_type = "volume" - boot_index = 0 - destination_type = "volume" - delete_on_termination = false - } -} -`, OS_IMAGE_ID) - -var testAccComputeV2Instance_volumeAttachInstanceDelete_1 = fmt.Sprintf(` -resource "openstack_blockstorage_volume_v1" "root_volume" { - name = "root_volume" - size = 1 - image_id = "%s" -} - -resource "openstack_blockstorage_volume_v1" "additional_volume" { - name = "additional_volume" - size = 1 -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - - block_device { - uuid = "${openstack_blockstorage_volume_v1.root_volume.id}" - source_type = "volume" - boot_index = 0 - destination_type = "volume" - delete_on_termination = false - } - - volume { - volume_id = "${openstack_blockstorage_volume_v1.additional_volume.id}" - } -} -`, OS_IMAGE_ID) - -var testAccComputeV2Instance_volumeAttachInstanceDelete_2 = fmt.Sprintf(` -resource "openstack_blockstorage_volume_v1" "root_volume" { - name = "root_volume" - size = 1 - image_id = "%s" -} - -resource "openstack_blockstorage_volume_v1" "additional_volume" { - name = "additional_volume" - size = 1 -} -`, OS_IMAGE_ID) - -const testAccComputeV2Instance_volumeAttachToNewInstance_1 = ` -resource "openstack_blockstorage_volume_v1" "volume_1" { - name = "volume_1" - size = 1 -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - - volume { - volume_id = "${openstack_blockstorage_volume_v1.volume_1.id}" - } -} - -resource "openstack_compute_instance_v2" "instance_2" { - depends_on = ["openstack_compute_instance_v2.instance_1"] - name = "instance_2" - security_groups = ["default"] -} -` - -const testAccComputeV2Instance_volumeAttachToNewInstance_2 = ` -resource "openstack_blockstorage_volume_v1" "volume_1" { - name = "volume_1" - size = 1 -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] -} - -resource "openstack_compute_instance_v2" "instance_2" { - depends_on = ["openstack_compute_instance_v2.instance_1"] - name = "instance_2" - security_groups = ["default"] - - volume { - volume_id = "${openstack_blockstorage_volume_v1.volume_1.id}" - } -} - ` - -const testAccComputeV2Instance_floatingIPAttachGlobally = ` -resource "openstack_compute_floatingip_v2" "fip_1" { -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - floating_ip = "${openstack_compute_floatingip_v2.fip_1.address}" -} -` - -var testAccComputeV2Instance_floatingIPAttachToNetwork = fmt.Sprintf(` -resource "openstack_compute_floatingip_v2" "fip_1" { -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - - network { - uuid = "%s" - floating_ip = "${openstack_compute_floatingip_v2.fip_1.address}" - access_network = true - } -} -`, OS_NETWORK_ID) - -var testAccComputeV2Instance_floatingIPAttachToNetworkAndChange_1 = fmt.Sprintf(` -resource "openstack_compute_floatingip_v2" "fip_1" { -} - -resource "openstack_compute_floatingip_v2" "fip_2" { -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - - network { - uuid = "%s" - floating_ip = "${openstack_compute_floatingip_v2.fip_1.address}" - access_network = true - } -} -`, OS_NETWORK_ID) - -var testAccComputeV2Instance_floatingIPAttachToNetworkAndChange_2 = fmt.Sprintf(` -resource "openstack_compute_floatingip_v2" "fip_1" { -} - -resource "openstack_compute_floatingip_v2" "fip_2" { -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - - network { - uuid = "%s" - floating_ip = "${openstack_compute_floatingip_v2.fip_2.address}" - access_network = true - } -} -`, OS_NETWORK_ID) - -const testAccComputeV2Instance_secgroupMulti = ` -resource "openstack_compute_secgroup_v2" "secgroup_1" { - name = "secgroup_1" - description = "a security group" - rule { - from_port = 22 - to_port = 22 - ip_protocol = "tcp" - cidr = "0.0.0.0/0" - } -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default", "${openstack_compute_secgroup_v2.secgroup_1.name}"] -} -` - -const testAccComputeV2Instance_secgroupMultiUpdate_1 = ` -resource "openstack_compute_secgroup_v2" "secgroup_1" { - name = "secgroup_1" - description = "a security group" - rule { - from_port = 22 - to_port = 22 - ip_protocol = "tcp" - cidr = "0.0.0.0/0" - } -} - -resource "openstack_compute_secgroup_v2" "secgroup_2" { - name = "secgroup_2" - description = "another security group" - rule { - from_port = 80 - to_port = 80 - ip_protocol = "tcp" - cidr = "0.0.0.0/0" - } -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] -} -` - -const testAccComputeV2Instance_secgroupMultiUpdate_2 = ` -resource "openstack_compute_secgroup_v2" "secgroup_1" { - name = "secgroup_1" - description = "a security group" - rule { - from_port = 22 - to_port = 22 - ip_protocol = "tcp" - cidr = "0.0.0.0/0" - } -} - -resource "openstack_compute_secgroup_v2" "secgroup_2" { - name = "secgroup_2" - description = "another security group" - rule { - from_port = 80 - to_port = 80 - ip_protocol = "tcp" - cidr = "0.0.0.0/0" - } -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default", "${openstack_compute_secgroup_v2.secgroup_1.name}", "${openstack_compute_secgroup_v2.secgroup_2.name}"] -} -` - -var testAccComputeV2Instance_bootFromVolumeImage = fmt.Sprintf(` -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - block_device { - uuid = "%s" - source_type = "image" - volume_size = 5 - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } -} -`, OS_IMAGE_ID) - -var testAccComputeV2Instance_bootFromVolumeImageWithAttachedVolume = fmt.Sprintf(` -resource "openstack_blockstorage_volume_v1" "volume_1" { - name = "volume_1" - size = 1 -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - block_device { - uuid = "%s" - source_type = "image" - volume_size = 2 - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } - - volume { - volume_id = "${openstack_blockstorage_volume_v1.volume_1.id}" - } -} -`, OS_IMAGE_ID) - -var testAccComputeV2Instance_bootFromVolumeVolume = fmt.Sprintf(` -resource "openstack_blockstorage_volume_v1" "vol_1" { - name = "vol_1" - size = 5 - image_id = "%s" -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - block_device { - uuid = "${openstack_blockstorage_volume_v1.vol_1.id}" - source_type = "volume" - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } -} -`, OS_IMAGE_ID) - -var testAccComputeV2Instance_bootFromVolumeForceNew_1 = fmt.Sprintf(` -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - block_device { - uuid = "%s" - source_type = "image" - volume_size = 5 - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } -} -`, OS_IMAGE_ID) - -var testAccComputeV2Instance_bootFromVolumeForceNew_2 = fmt.Sprintf(` -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - block_device { - uuid = "%s" - source_type = "image" - volume_size = 4 - boot_index = 0 - destination_type = "volume" - delete_on_termination = true - } -} -`, OS_IMAGE_ID) - -var testAccComputeV2Instance_blockDeviceNewVolume = fmt.Sprintf(` -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - block_device { - uuid = "%s" - source_type = "image" - destination_type = "local" - boot_index = 0 - delete_on_termination = true - } - block_device { - source_type = "blank" - destination_type = "volume" - volume_size = 1 - boot_index = 1 - delete_on_termination = true - } -} -`, OS_IMAGE_ID) - -var testAccComputeV2Instance_blockDeviceExistingVolume = fmt.Sprintf(` -resource "openstack_blockstorage_volume_v1" "volume_1" { - name = "volume_1" - size = 1 -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - block_device { - uuid = "%s" - source_type = "image" - destination_type = "local" - boot_index = 0 - delete_on_termination = true - } - block_device { - uuid = "${openstack_blockstorage_volume_v1.volume_1.id}" - source_type = "volume" - destination_type = "volume" - boot_index = 1 - delete_on_termination = true - } -} -`, OS_IMAGE_ID) - -const testAccComputeV2Instance_personality = ` -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - personality { - file = "/tmp/foobar.txt" - content = "happy" - } - personality { - file = "/tmp/barfoo.txt" - content = "angry" - } -} -` - -var testAccComputeV2Instance_multiEphemeral = fmt.Sprintf(` -resource "openstack_compute_instance_v2" "instance_1" { - name = "terraform-test" - security_groups = ["default"] - block_device { - boot_index = 0 - delete_on_termination = true - destination_type = "local" - source_type = "image" - uuid = "%s" - } - block_device { - boot_index = -1 - delete_on_termination = true - destination_type = "local" - source_type = "blank" - volume_size = 1 - } - block_device { - boot_index = -1 - delete_on_termination = true - destination_type = "local" - source_type = "blank" - volume_size = 1 - } -} -`, OS_IMAGE_ID) - -var testAccComputeV2Instance_accessIPv4 = fmt.Sprintf(` -resource "openstack_compute_floatingip_v2" "myip" { -} - -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - network_id = "${openstack_networking_network_v2.network_1.id}" - cidr = "192.168.1.0/24" - ip_version = 4 - enable_dhcp = true - no_gateway = true -} - -resource "openstack_compute_instance_v2" "instance_1" { - depends_on = ["openstack_networking_subnet_v2.subnet_1"] - - name = "instance_1" - security_groups = ["default"] - floating_ip = "${openstack_compute_floatingip_v2.myip.address}" - - network { - uuid = "%s" - } - - network { - uuid = "${openstack_networking_network_v2.network_1.id}" - fixed_ip_v4 = "192.168.1.100" - access_network = true - } -} -`, OS_NETWORK_ID) - -var testAccComputeV2Instance_changeFixedIP_1 = fmt.Sprintf(` -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - network { - uuid = "%s" - fixed_ip_v4 = "10.0.0.24" - } -} -`, OS_NETWORK_ID) - -var testAccComputeV2Instance_changeFixedIP_2 = fmt.Sprintf(` -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - network { - uuid = "%s" - fixed_ip_v4 = "10.0.0.25" - } -} -`, OS_NETWORK_ID) - -const testAccComputeV2Instance_stopBeforeDestroy = ` -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - stop_before_destroy = true -} -` - -const testAccComputeV2Instance_metadataRemove_1 = ` -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - metadata { - foo = "bar" - abc = "def" - } -} -` - -const testAccComputeV2Instance_metadataRemove_2 = ` -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - metadata { - foo = "bar" - ghi = "jkl" - } -} -` - -const testAccComputeV2Instance_forceDelete = ` -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - force_delete = true -} -` - -const testAccComputeV2Instance_timeout = ` -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - - timeouts { - create = "10m" - } -} -` - -var testAccComputeV2Instance_networkNameToID = fmt.Sprintf(` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - network_id = "${openstack_networking_network_v2.network_1.id}" - cidr = "192.168.1.0/24" - ip_version = 4 - enable_dhcp = true - no_gateway = true -} - -resource "openstack_compute_instance_v2" "instance_1" { - depends_on = ["openstack_networking_subnet_v2.subnet_1"] - - name = "instance_1" - security_groups = ["default"] - - network { - uuid = "%s" - } - - network { - name = "${openstack_networking_network_v2.network_1.name}" - } - -} -`, OS_NETWORK_ID) diff --git a/builtin/providers/openstack/resource_openstack_compute_keypair_v2.go b/builtin/providers/openstack/resource_openstack_compute_keypair_v2.go deleted file mode 100644 index 7537d3bda..000000000 --- a/builtin/providers/openstack/resource_openstack_compute_keypair_v2.go +++ /dev/null @@ -1,104 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceComputeKeypairV2() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeKeypairV2Create, - Read: resourceComputeKeypairV2Read, - Delete: resourceComputeKeypairV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "public_key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "value_specs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceComputeKeypairV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - createOpts := KeyPairCreateOpts{ - keypairs.CreateOpts{ - Name: d.Get("name").(string), - PublicKey: d.Get("public_key").(string), - }, - MapValueSpecs(d), - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - kp, err := keypairs.Create(computeClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack keypair: %s", err) - } - - d.SetId(kp.Name) - - return resourceComputeKeypairV2Read(d, meta) -} - -func resourceComputeKeypairV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - kp, err := keypairs.Get(computeClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "keypair") - } - - d.Set("name", kp.Name) - d.Set("public_key", kp.PublicKey) - d.Set("region", GetRegion(d)) - - return nil -} - -func resourceComputeKeypairV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - err = keypairs.Delete(computeClient, d.Id()).ExtractErr() - if err != nil { - return fmt.Errorf("Error deleting OpenStack keypair: %s", err) - } - d.SetId("") - return nil -} diff --git a/builtin/providers/openstack/resource_openstack_compute_keypair_v2_test.go b/builtin/providers/openstack/resource_openstack_compute_keypair_v2_test.go deleted file mode 100644 index 912806689..000000000 --- a/builtin/providers/openstack/resource_openstack_compute_keypair_v2_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs" -) - -func TestAccComputeV2Keypair_basic(t *testing.T) { - var keypair keypairs.KeyPair - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2KeypairDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2Keypair_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2KeypairExists("openstack_compute_keypair_v2.kp_1", &keypair), - ), - }, - }, - }) -} - -func testAccCheckComputeV2KeypairDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - computeClient, err := config.computeV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_compute_keypair_v2" { - continue - } - - _, err := keypairs.Get(computeClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("Keypair still exists") - } - } - - return nil -} - -func testAccCheckComputeV2KeypairExists(n string, kp *keypairs.KeyPair) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - computeClient, err := config.computeV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - found, err := keypairs.Get(computeClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("Keypair not found") - } - - *kp = *found - - return nil - } -} - -const testAccComputeV2Keypair_basic = ` -resource "openstack_compute_keypair_v2" "kp_1" { - name = "kp_1" - public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAjpC1hwiOCCmKEWxJ4qzTTsJbKzndLo1BCz5PcwtUnflmU+gHJtWMZKpuEGVi29h0A/+ydKek1O18k10Ff+4tyFjiHDQAT9+OfgWf7+b1yK+qDip3X1C0UPMbwHlTfSGWLGZquwhvEFx9k3h/M+VtMvwR1lJ9LUyTAImnNjWG7TAIPmui30HvM2UiFEmqkr4ijq45MyX2+fLIePLRIFuu1p4whjHAQYufqyno3BS48icQb4p6iVEZPo4AE2o9oIyQvj2mx4dk5Y8CgSETOZTYDOR3rU2fZTRDRgPJDH9FWvQjF5tA0p3d9CoWWd2s6GKKbfoUIi8R/Db1BSPJwkqB jrp-hp-pc" -} -` diff --git a/builtin/providers/openstack/resource_openstack_compute_secgroup_v2.go b/builtin/providers/openstack/resource_openstack_compute_secgroup_v2.go deleted file mode 100644 index 99887a2da..000000000 --- a/builtin/providers/openstack/resource_openstack_compute_secgroup_v2.go +++ /dev/null @@ -1,397 +0,0 @@ -package openstack - -import ( - "bytes" - "fmt" - "log" - "strings" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceComputeSecGroupV2() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeSecGroupV2Create, - Read: resourceComputeSecGroupV2Read, - Update: resourceComputeSecGroupV2Update, - Delete: resourceComputeSecGroupV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - "rule": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "from_port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: false, - }, - "to_port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: false, - }, - "ip_protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - "cidr": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - StateFunc: func(v interface{}) string { - return strings.ToLower(v.(string)) - }, - }, - "from_group_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "self": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: false, - }, - }, - }, - Set: secgroupRuleV2Hash, - }, - }, - } -} - -func resourceComputeSecGroupV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - // Before creating the security group, make sure all rules are valid. - if err := checkSecGroupV2RulesForErrors(d); err != nil { - return err - } - - // If all rules are valid, proceed with creating the security gruop. - createOpts := secgroups.CreateOpts{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - sg, err := secgroups.Create(computeClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack security group: %s", err) - } - - d.SetId(sg.ID) - - // Now that the security group has been created, iterate through each rule and create it - createRuleOptsList := resourceSecGroupRulesV2(d) - for _, createRuleOpts := range createRuleOptsList { - _, err := secgroups.CreateRule(computeClient, createRuleOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack security group rule: %s", err) - } - } - - return resourceComputeSecGroupV2Read(d, meta) -} - -func resourceComputeSecGroupV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - sg, err := secgroups.Get(computeClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "security group") - } - - d.Set("name", sg.Name) - d.Set("description", sg.Description) - - rtm, err := rulesToMap(computeClient, d, sg.Rules) - if err != nil { - return err - } - log.Printf("[DEBUG] rulesToMap(sg.Rules): %+v", rtm) - d.Set("rule", rtm) - - d.Set("region", GetRegion(d)) - - return nil -} - -func resourceComputeSecGroupV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - updateOpts := secgroups.UpdateOpts{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - } - - log.Printf("[DEBUG] Updating Security Group (%s) with options: %+v", d.Id(), updateOpts) - - _, err = secgroups.Update(computeClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack security group (%s): %s", d.Id(), err) - } - - if d.HasChange("rule") { - oldSGRaw, newSGRaw := d.GetChange("rule") - oldSGRSet, newSGRSet := oldSGRaw.(*schema.Set), newSGRaw.(*schema.Set) - secgrouprulesToAdd := newSGRSet.Difference(oldSGRSet) - secgrouprulesToRemove := oldSGRSet.Difference(newSGRSet) - - log.Printf("[DEBUG] Security group rules to add: %v", secgrouprulesToAdd) - log.Printf("[DEBUG] Security groups rules to remove: %v", secgrouprulesToRemove) - - for _, rawRule := range secgrouprulesToAdd.List() { - createRuleOpts := resourceSecGroupRuleCreateOptsV2(d, rawRule) - rule, err := secgroups.CreateRule(computeClient, createRuleOpts).Extract() - if err != nil { - return fmt.Errorf("Error adding rule to OpenStack security group (%s): %s", d.Id(), err) - } - log.Printf("[DEBUG] Added rule (%s) to OpenStack security group (%s) ", rule.ID, d.Id()) - } - - for _, r := range secgrouprulesToRemove.List() { - rule := resourceSecGroupRuleV2(d, r) - err := secgroups.DeleteRule(computeClient, rule.ID).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - continue - } - - return fmt.Errorf("Error removing rule (%s) from OpenStack security group (%s)", rule.ID, d.Id()) - } else { - log.Printf("[DEBUG] Removed rule (%s) from OpenStack security group (%s): %s", rule.ID, d.Id(), err) - } - } - } - - return resourceComputeSecGroupV2Read(d, meta) -} - -func resourceComputeSecGroupV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE"}, - Target: []string{"DELETED"}, - Refresh: SecGroupV2StateRefreshFunc(computeClient, d), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack security group: %s", err) - } - - d.SetId("") - return nil -} - -func resourceSecGroupRulesV2(d *schema.ResourceData) []secgroups.CreateRuleOpts { - rawRules := d.Get("rule").(*schema.Set).List() - createRuleOptsList := make([]secgroups.CreateRuleOpts, len(rawRules)) - for i, rawRule := range rawRules { - createRuleOptsList[i] = resourceSecGroupRuleCreateOptsV2(d, rawRule) - } - return createRuleOptsList -} - -func resourceSecGroupRuleCreateOptsV2(d *schema.ResourceData, rawRule interface{}) secgroups.CreateRuleOpts { - rawRuleMap := rawRule.(map[string]interface{}) - groupId := rawRuleMap["from_group_id"].(string) - if rawRuleMap["self"].(bool) { - groupId = d.Id() - } - return secgroups.CreateRuleOpts{ - ParentGroupID: d.Id(), - FromPort: rawRuleMap["from_port"].(int), - ToPort: rawRuleMap["to_port"].(int), - IPProtocol: rawRuleMap["ip_protocol"].(string), - CIDR: rawRuleMap["cidr"].(string), - FromGroupID: groupId, - } -} - -func checkSecGroupV2RulesForErrors(d *schema.ResourceData) error { - rawRules := d.Get("rule").(*schema.Set).List() - for _, rawRule := range rawRules { - rawRuleMap := rawRule.(map[string]interface{}) - - // only one of cidr, from_group_id, or self can be set - cidr := rawRuleMap["cidr"].(string) - groupId := rawRuleMap["from_group_id"].(string) - self := rawRuleMap["self"].(bool) - errorMessage := fmt.Errorf("Only one of cidr, from_group_id, or self can be set.") - - // if cidr is set, from_group_id and self cannot be set - if cidr != "" { - if groupId != "" || self { - return errorMessage - } - } - - // if from_group_id is set, cidr and self cannot be set - if groupId != "" { - if cidr != "" || self { - return errorMessage - } - } - - // if self is set, cidr and from_group_id cannot be set - if self { - if cidr != "" || groupId != "" { - return errorMessage - } - } - } - - return nil -} - -func resourceSecGroupRuleV2(d *schema.ResourceData, rawRule interface{}) secgroups.Rule { - rawRuleMap := rawRule.(map[string]interface{}) - return secgroups.Rule{ - ID: rawRuleMap["id"].(string), - ParentGroupID: d.Id(), - FromPort: rawRuleMap["from_port"].(int), - ToPort: rawRuleMap["to_port"].(int), - IPProtocol: rawRuleMap["ip_protocol"].(string), - IPRange: secgroups.IPRange{CIDR: rawRuleMap["cidr"].(string)}, - } -} - -func rulesToMap(computeClient *gophercloud.ServiceClient, d *schema.ResourceData, sgrs []secgroups.Rule) ([]map[string]interface{}, error) { - sgrMap := make([]map[string]interface{}, len(sgrs)) - for i, sgr := range sgrs { - groupId := "" - self := false - if sgr.Group.Name != "" { - if sgr.Group.Name == d.Get("name").(string) { - self = true - } else { - // Since Nova only returns the secgroup Name (and not the ID) for the group attribute, - // we need to look up all security groups and match the name. - // Nevermind that Nova wants the ID when setting the Group *and* that multiple groups - // with the same name can exist... - allPages, err := secgroups.List(computeClient).AllPages() - if err != nil { - return nil, err - } - securityGroups, err := secgroups.ExtractSecurityGroups(allPages) - if err != nil { - return nil, err - } - - for _, sg := range securityGroups { - if sg.Name == sgr.Group.Name { - groupId = sg.ID - } - } - } - } - - sgrMap[i] = map[string]interface{}{ - "id": sgr.ID, - "from_port": sgr.FromPort, - "to_port": sgr.ToPort, - "ip_protocol": sgr.IPProtocol, - "cidr": sgr.IPRange.CIDR, - "self": self, - "from_group_id": groupId, - } - } - return sgrMap, nil -} - -func secgroupRuleV2Hash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%d-", m["from_port"].(int))) - buf.WriteString(fmt.Sprintf("%d-", m["to_port"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["ip_protocol"].(string))) - buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["cidr"].(string)))) - buf.WriteString(fmt.Sprintf("%s-", m["from_group_id"].(string))) - buf.WriteString(fmt.Sprintf("%t-", m["self"].(bool))) - - return hashcode.String(buf.String()) -} - -func SecGroupV2StateRefreshFunc(computeClient *gophercloud.ServiceClient, d *schema.ResourceData) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete Security Group %s.\n", d.Id()) - - err := secgroups.Delete(computeClient, d.Id()).ExtractErr() - if err != nil { - return nil, "", err - } - - s, err := secgroups.Get(computeClient, d.Id()).Extract() - if err != nil { - err = CheckDeleted(d, err, "Security Group") - if err != nil { - return s, "", err - } else { - log.Printf("[DEBUG] Successfully deleted Security Group %s", d.Id()) - return s, "DELETED", nil - } - } - - log.Printf("[DEBUG] Security Group %s still active.\n", d.Id()) - return s, "ACTIVE", nil - } -} diff --git a/builtin/providers/openstack/resource_openstack_compute_secgroup_v2_test.go b/builtin/providers/openstack/resource_openstack_compute_secgroup_v2_test.go deleted file mode 100644 index f4a0d3ddc..000000000 --- a/builtin/providers/openstack/resource_openstack_compute_secgroup_v2_test.go +++ /dev/null @@ -1,410 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups" -) - -func TestAccComputeV2SecGroup_basic(t *testing.T) { - var secgroup secgroups.SecurityGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2SecGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2SecGroup_basic_orig, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2SecGroupExists("openstack_compute_secgroup_v2.sg_1", &secgroup), - ), - }, - }, - }) -} - -func TestAccComputeV2SecGroup_update(t *testing.T) { - var secgroup secgroups.SecurityGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2SecGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2SecGroup_basic_orig, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2SecGroupExists("openstack_compute_secgroup_v2.sg_1", &secgroup), - ), - }, - resource.TestStep{ - Config: testAccComputeV2SecGroup_basic_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2SecGroupExists("openstack_compute_secgroup_v2.sg_1", &secgroup), - testAccCheckComputeV2SecGroupRuleCount(&secgroup, 2), - ), - }, - }, - }) -} - -func TestAccComputeV2SecGroup_groupID(t *testing.T) { - var secgroup1, secgroup2, secgroup3 secgroups.SecurityGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2SecGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2SecGroup_groupID_orig, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2SecGroupExists("openstack_compute_secgroup_v2.sg_1", &secgroup1), - testAccCheckComputeV2SecGroupExists("openstack_compute_secgroup_v2.sg_2", &secgroup2), - testAccCheckComputeV2SecGroupExists("openstack_compute_secgroup_v2.sg_3", &secgroup3), - testAccCheckComputeV2SecGroupGroupIDMatch(&secgroup1, &secgroup3), - ), - }, - resource.TestStep{ - Config: testAccComputeV2SecGroup_groupID_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2SecGroupExists("openstack_compute_secgroup_v2.sg_1", &secgroup1), - testAccCheckComputeV2SecGroupExists("openstack_compute_secgroup_v2.sg_2", &secgroup2), - testAccCheckComputeV2SecGroupExists("openstack_compute_secgroup_v2.sg_3", &secgroup3), - testAccCheckComputeV2SecGroupGroupIDMatch(&secgroup2, &secgroup3), - ), - }, - }, - }) -} - -func TestAccComputeV2SecGroup_self(t *testing.T) { - var secgroup secgroups.SecurityGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2SecGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2SecGroup_self, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2SecGroupExists("openstack_compute_secgroup_v2.sg_1", &secgroup), - testAccCheckComputeV2SecGroupGroupIDMatch(&secgroup, &secgroup), - resource.TestCheckResourceAttr( - "openstack_compute_secgroup_v2.sg_1", "rule.3170486100.self", "true"), - resource.TestCheckResourceAttr( - "openstack_compute_secgroup_v2.sg_1", "rule.3170486100.from_group_id", ""), - ), - }, - }, - }) -} - -func TestAccComputeV2SecGroup_icmpZero(t *testing.T) { - var secgroup secgroups.SecurityGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2SecGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2SecGroup_icmpZero, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2SecGroupExists("openstack_compute_secgroup_v2.sg_1", &secgroup), - ), - }, - }, - }) -} - -func TestAccComputeV2SecGroup_lowerCaseCIDR(t *testing.T) { - var secgroup secgroups.SecurityGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2SecGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2SecGroup_lowerCaseCIDR, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2SecGroupExists("openstack_compute_secgroup_v2.sg_1", &secgroup), - resource.TestCheckResourceAttr( - "openstack_compute_secgroup_v2.sg_1", "rule.3862435458.cidr", "2001:558:fc00::/39"), - ), - }, - }, - }) -} - -func TestAccComputeV2SecGroup_timeout(t *testing.T) { - var secgroup secgroups.SecurityGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2SecGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2SecGroup_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2SecGroupExists("openstack_compute_secgroup_v2.sg_1", &secgroup), - ), - }, - }, - }) -} - -func testAccCheckComputeV2SecGroupDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - computeClient, err := config.computeV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_compute_secgroup_v2" { - continue - } - - _, err := secgroups.Get(computeClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("Security group still exists") - } - } - - return nil -} - -func testAccCheckComputeV2SecGroupExists(n string, secgroup *secgroups.SecurityGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - computeClient, err := config.computeV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - found, err := secgroups.Get(computeClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Security group not found") - } - - *secgroup = *found - - return nil - } -} - -func testAccCheckComputeV2SecGroupRuleCount(secgroup *secgroups.SecurityGroup, count int) resource.TestCheckFunc { - return func(s *terraform.State) error { - if len(secgroup.Rules) != count { - return fmt.Errorf("Security group rule count does not match. Expected %d, got %d", count, len(secgroup.Rules)) - } - - return nil - } -} - -func testAccCheckComputeV2SecGroupGroupIDMatch(sg1, sg2 *secgroups.SecurityGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - if len(sg2.Rules) == 1 { - if sg1.Name != sg2.Rules[0].Group.Name || sg1.TenantID != sg2.Rules[0].Group.TenantID { - return fmt.Errorf("%s was not correctly applied to %s", sg1.Name, sg2.Name) - } - } else { - return fmt.Errorf("%s rule count is incorrect", sg2.Name) - } - - return nil - } -} - -const testAccComputeV2SecGroup_basic_orig = ` -resource "openstack_compute_secgroup_v2" "sg_1" { - name = "sg_1" - description = "first test security group" - rule { - from_port = 22 - to_port = 22 - ip_protocol = "tcp" - cidr = "0.0.0.0/0" - } - rule { - from_port = 1 - to_port = 65535 - ip_protocol = "udp" - cidr = "0.0.0.0/0" - } - rule { - from_port = -1 - to_port = -1 - ip_protocol = "icmp" - cidr = "0.0.0.0/0" - } -} -` - -const testAccComputeV2SecGroup_basic_update = ` -resource "openstack_compute_secgroup_v2" "sg_1" { - name = "sg_1" - description = "first test security group" - rule { - from_port = 2200 - to_port = 2200 - ip_protocol = "tcp" - cidr = "0.0.0.0/0" - } - rule { - from_port = -1 - to_port = -1 - ip_protocol = "icmp" - cidr = "0.0.0.0/0" - } -} -` - -const testAccComputeV2SecGroup_groupID_orig = ` -resource "openstack_compute_secgroup_v2" "sg_1" { - name = "sg_1" - description = "first test security group" - rule { - from_port = 22 - to_port = 22 - ip_protocol = "tcp" - cidr = "0.0.0.0/0" - } -} - -resource "openstack_compute_secgroup_v2" "sg_2" { - name = "sg_2" - description = "second test security group" - rule { - from_port = -1 - to_port = -1 - ip_protocol = "icmp" - cidr = "0.0.0.0/0" - } -} - -resource "openstack_compute_secgroup_v2" "sg_3" { - name = "sg_3" - description = "third test security group" - rule { - from_port = 80 - to_port = 80 - ip_protocol = "tcp" - from_group_id = "${openstack_compute_secgroup_v2.sg_1.id}" - } -} -` - -const testAccComputeV2SecGroup_groupID_update = ` -resource "openstack_compute_secgroup_v2" "sg_1" { - name = "sg_1" - description = "first test security group" - rule { - from_port = 22 - to_port = 22 - ip_protocol = "tcp" - cidr = "0.0.0.0/0" - } -} - -resource "openstack_compute_secgroup_v2" "sg_2" { - name = "sg_2" - description = "second test security group" - rule { - from_port = -1 - to_port = -1 - ip_protocol = "icmp" - cidr = "0.0.0.0/0" - } -} - -resource "openstack_compute_secgroup_v2" "sg_3" { - name = "sg_3" - description = "third test security group" - rule { - from_port = 80 - to_port = 80 - ip_protocol = "tcp" - from_group_id = "${openstack_compute_secgroup_v2.sg_2.id}" - } -} -` - -const testAccComputeV2SecGroup_self = ` -resource "openstack_compute_secgroup_v2" "sg_1" { - name = "sg_1" - description = "first test security group" - rule { - from_port = 22 - to_port = 22 - ip_protocol = "tcp" - self = true - } -} -` - -const testAccComputeV2SecGroup_icmpZero = ` -resource "openstack_compute_secgroup_v2" "sg_1" { - name = "sg_1" - description = "first test security group" - rule { - from_port = 0 - to_port = 0 - ip_protocol = "icmp" - cidr = "0.0.0.0/0" - } -} -` - -const testAccComputeV2SecGroup_lowerCaseCIDR = ` -resource "openstack_compute_secgroup_v2" "sg_1" { - name = "sg_1" - description = "first test security group" - rule { - from_port = 0 - to_port = 0 - ip_protocol = "icmp" - cidr = "2001:558:FC00::/39" - } -} -` - -const testAccComputeV2SecGroup_timeout = ` -resource "openstack_compute_secgroup_v2" "sg_1" { - name = "sg_1" - description = "first test security group" - rule { - from_port = 0 - to_port = 0 - ip_protocol = "icmp" - cidr = "0.0.0.0/0" - } - - timeouts { - delete = "5m" - } -} -` diff --git a/builtin/providers/openstack/resource_openstack_compute_servergroup_v2.go b/builtin/providers/openstack/resource_openstack_compute_servergroup_v2.go deleted file mode 100644 index 70b31eec6..000000000 --- a/builtin/providers/openstack/resource_openstack_compute_servergroup_v2.go +++ /dev/null @@ -1,137 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceComputeServerGroupV2() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeServerGroupV2Create, - Read: resourceComputeServerGroupV2Read, - Update: nil, - Delete: resourceComputeServerGroupV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "name": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Required: true, - }, - "policies": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "members": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "value_specs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceComputeServerGroupV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - createOpts := ServerGroupCreateOpts{ - servergroups.CreateOpts{ - Name: d.Get("name").(string), - Policies: resourceServerGroupPoliciesV2(d), - }, - MapValueSpecs(d), - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - newSG, err := servergroups.Create(computeClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating ServerGroup: %s", err) - } - - d.SetId(newSG.ID) - - return resourceComputeServerGroupV2Read(d, meta) -} - -func resourceComputeServerGroupV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - sg, err := servergroups.Get(computeClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "server group") - } - - log.Printf("[DEBUG] Retrieved ServerGroup %s: %+v", d.Id(), sg) - - // Set the name - d.Set("name", sg.Name) - - // Set the policies - policies := []string{} - for _, p := range sg.Policies { - policies = append(policies, p) - } - d.Set("policies", policies) - - // Set the members - members := []string{} - for _, m := range sg.Members { - members = append(members, m) - } - d.Set("members", members) - - d.Set("region", GetRegion(d)) - - return nil -} - -func resourceComputeServerGroupV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - log.Printf("[DEBUG] Deleting ServerGroup %s", d.Id()) - if err := servergroups.Delete(computeClient, d.Id()).ExtractErr(); err != nil { - return fmt.Errorf("Error deleting ServerGroup: %s", err) - } - - return nil -} - -func resourceServerGroupPoliciesV2(d *schema.ResourceData) []string { - rawPolicies := d.Get("policies").([]interface{}) - policies := make([]string, len(rawPolicies)) - for i, raw := range rawPolicies { - policies[i] = raw.(string) - } - return policies -} diff --git a/builtin/providers/openstack/resource_openstack_compute_servergroup_v2_test.go b/builtin/providers/openstack/resource_openstack_compute_servergroup_v2_test.go deleted file mode 100644 index 0fa85a703..000000000 --- a/builtin/providers/openstack/resource_openstack_compute_servergroup_v2_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups" - "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" -) - -func TestAccComputeV2ServerGroup_basic(t *testing.T) { - var sg servergroups.ServerGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2ServerGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2ServerGroup_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2ServerGroupExists("openstack_compute_servergroup_v2.sg_1", &sg), - ), - }, - }, - }) -} - -func TestAccComputeV2ServerGroup_affinity(t *testing.T) { - var instance servers.Server - var sg servergroups.ServerGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2ServerGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2ServerGroup_affinity, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2ServerGroupExists("openstack_compute_servergroup_v2.sg_1", &sg), - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - testAccCheckComputeV2InstanceInServerGroup(&instance, &sg), - ), - }, - }, - }) -} - -func testAccCheckComputeV2ServerGroupDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - computeClient, err := config.computeV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_compute_servergroup_v2" { - continue - } - - _, err := servergroups.Get(computeClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("ServerGroup still exists") - } - } - - return nil -} - -func testAccCheckComputeV2ServerGroupExists(n string, kp *servergroups.ServerGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - computeClient, err := config.computeV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - found, err := servergroups.Get(computeClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("ServerGroup not found") - } - - *kp = *found - - return nil - } -} - -func testAccCheckComputeV2InstanceInServerGroup(instance *servers.Server, sg *servergroups.ServerGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - if len(sg.Members) > 0 { - for _, m := range sg.Members { - if m == instance.ID { - return nil - } - } - } - - return fmt.Errorf("Instance %s is not part of Server Group %s", instance.ID, sg.ID) - } -} - -const testAccComputeV2ServerGroup_basic = ` -resource "openstack_compute_servergroup_v2" "sg_1" { - name = "sg_1" - policies = ["affinity"] -} -` - -const testAccComputeV2ServerGroup_affinity = ` -resource "openstack_compute_servergroup_v2" "sg_1" { - name = "sg_1" - policies = ["affinity"] -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - scheduler_hints { - group = "${openstack_compute_servergroup_v2.sg_1.id}" - } -} -` diff --git a/builtin/providers/openstack/resource_openstack_compute_volume_attach_v2.go b/builtin/providers/openstack/resource_openstack_compute_volume_attach_v2.go deleted file mode 100644 index 4fa6cb812..000000000 --- a/builtin/providers/openstack/resource_openstack_compute_volume_attach_v2.go +++ /dev/null @@ -1,222 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "strings" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceComputeVolumeAttachV2() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeVolumeAttachV2Create, - Read: resourceComputeVolumeAttachV2Read, - Delete: resourceComputeVolumeAttachV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - - "instance_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "volume_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "device": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - }, - } -} - -func resourceComputeVolumeAttachV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - instanceId := d.Get("instance_id").(string) - volumeId := d.Get("volume_id").(string) - - var device string - if v, ok := d.GetOk("device"); ok { - device = v.(string) - } - - attachOpts := volumeattach.CreateOpts{ - Device: device, - VolumeID: volumeId, - } - - log.Printf("[DEBUG] Creating volume attachment: %#v", attachOpts) - - attachment, err := volumeattach.Create(computeClient, instanceId, attachOpts).Extract() - if err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ATTACHING"}, - Target: []string{"ATTACHED"}, - Refresh: resourceComputeVolumeAttachV2AttachFunc(computeClient, instanceId, attachment.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 30 * time.Second, - MinTimeout: 15 * time.Second, - } - - if _, err = stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error attaching OpenStack volume: %s", err) - } - - log.Printf("[DEBUG] Created volume attachment: %#v", attachment) - - // Use the instance ID and attachment ID as the resource ID. - // This is because an attachment cannot be retrieved just by its ID alone. - id := fmt.Sprintf("%s/%s", instanceId, attachment.ID) - - d.SetId(id) - - return resourceComputeVolumeAttachV2Read(d, meta) -} - -func resourceComputeVolumeAttachV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - instanceId, attachmentId, err := parseComputeVolumeAttachmentId(d.Id()) - if err != nil { - return err - } - - attachment, err := volumeattach.Get(computeClient, instanceId, attachmentId).Extract() - if err != nil { - return CheckDeleted(d, err, "compute_volume_attach") - } - - log.Printf("[DEBUG] Retrieved volume attachment: %#v", attachment) - - d.Set("instance_id", attachment.ServerID) - d.Set("volume_id", attachment.VolumeID) - d.Set("device", attachment.Device) - d.Set("region", GetRegion(d)) - - return nil -} - -func resourceComputeVolumeAttachV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - computeClient, err := config.computeV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - instanceId, attachmentId, err := parseComputeVolumeAttachmentId(d.Id()) - if err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{""}, - Target: []string{"DETACHED"}, - Refresh: resourceComputeVolumeAttachV2DetachFunc(computeClient, instanceId, attachmentId), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 15 * time.Second, - MinTimeout: 15 * time.Second, - } - - if _, err = stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error detaching OpenStack volume: %s", err) - } - - return nil -} - -func resourceComputeVolumeAttachV2AttachFunc( - computeClient *gophercloud.ServiceClient, instanceId, attachmentId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - va, err := volumeattach.Get(computeClient, instanceId, attachmentId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - return va, "ATTACHING", nil - } - return va, "", err - } - - return va, "ATTACHED", nil - } -} - -func resourceComputeVolumeAttachV2DetachFunc( - computeClient *gophercloud.ServiceClient, instanceId, attachmentId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to detach OpenStack volume %s from instance %s", - attachmentId, instanceId) - - va, err := volumeattach.Get(computeClient, instanceId, attachmentId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - return va, "DETACHED", nil - } - return va, "", err - } - - err = volumeattach.Delete(computeClient, instanceId, attachmentId).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - return va, "DETACHED", nil - } - - if _, ok := err.(gophercloud.ErrDefault400); ok { - return nil, "", nil - } - - return nil, "", err - } - - log.Printf("[DEBUG] OpenStack Volume Attachment (%s) is still active.", attachmentId) - return nil, "", nil - } -} - -func parseComputeVolumeAttachmentId(id string) (string, string, error) { - idParts := strings.Split(id, "/") - if len(idParts) < 2 { - return "", "", fmt.Errorf("Unable to determine volume attachment ID") - } - - instanceId := idParts[0] - attachmentId := idParts[1] - - return instanceId, attachmentId, nil -} diff --git a/builtin/providers/openstack/resource_openstack_compute_volume_attach_v2_test.go b/builtin/providers/openstack/resource_openstack_compute_volume_attach_v2_test.go deleted file mode 100644 index fb5b6baa3..000000000 --- a/builtin/providers/openstack/resource_openstack_compute_volume_attach_v2_test.go +++ /dev/null @@ -1,198 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" -) - -func TestAccComputeV2VolumeAttach_basic(t *testing.T) { - var va volumeattach.VolumeAttachment - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2VolumeAttachDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2VolumeAttach_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2VolumeAttachExists("openstack_compute_volume_attach_v2.va_1", &va), - ), - }, - }, - }) -} - -func TestAccComputeV2VolumeAttach_device(t *testing.T) { - var va volumeattach.VolumeAttachment - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2VolumeAttachDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2VolumeAttach_device, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2VolumeAttachExists("openstack_compute_volume_attach_v2.va_1", &va), - testAccCheckComputeV2VolumeAttachDevice(&va, "/dev/vdc"), - ), - }, - }, - }) -} - -func TestAccComputeV2VolumeAttach_timeout(t *testing.T) { - var va volumeattach.VolumeAttachment - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeV2VolumeAttachDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeV2VolumeAttach_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeV2VolumeAttachExists("openstack_compute_volume_attach_v2.va_1", &va), - ), - }, - }, - }) -} - -func testAccCheckComputeV2VolumeAttachDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - computeClient, err := config.computeV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_compute_volume_attach_v2" { - continue - } - - instanceId, volumeId, err := parseComputeVolumeAttachmentId(rs.Primary.ID) - if err != nil { - return err - } - - _, err = volumeattach.Get(computeClient, instanceId, volumeId).Extract() - if err == nil { - return fmt.Errorf("Volume attachment still exists") - } - } - - return nil -} - -func testAccCheckComputeV2VolumeAttachExists(n string, va *volumeattach.VolumeAttachment) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - computeClient, err := config.computeV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack compute client: %s", err) - } - - instanceId, volumeId, err := parseComputeVolumeAttachmentId(rs.Primary.ID) - if err != nil { - return err - } - - found, err := volumeattach.Get(computeClient, instanceId, volumeId).Extract() - if err != nil { - return err - } - - if found.ServerID != instanceId || found.VolumeID != volumeId { - return fmt.Errorf("VolumeAttach not found") - } - - *va = *found - - return nil - } -} - -func testAccCheckComputeV2VolumeAttachDevice( - va *volumeattach.VolumeAttachment, device string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if va.Device != device { - return fmt.Errorf("Requested device of volume attachment (%s) does not match: %s", - device, va.Device) - } - - return nil - } -} - -const testAccComputeV2VolumeAttach_basic = ` -resource "openstack_blockstorage_volume_v2" "volume_1" { - name = "volume_1" - size = 1 -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] -} - -resource "openstack_compute_volume_attach_v2" "va_1" { - instance_id = "${openstack_compute_instance_v2.instance_1.id}" - volume_id = "${openstack_blockstorage_volume_v2.volume_1.id}" -} -` - -const testAccComputeV2VolumeAttach_device = ` -resource "openstack_blockstorage_volume_v2" "volume_1" { - name = "volume_1" - size = 1 -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] -} - -resource "openstack_compute_volume_attach_v2" "va_1" { - instance_id = "${openstack_compute_instance_v2.instance_1.id}" - volume_id = "${openstack_blockstorage_volume_v2.volume_1.id}" - device = "/dev/vdc" -} -` - -const testAccComputeV2VolumeAttach_timeout = ` -resource "openstack_blockstorage_volume_v2" "volume_1" { - name = "volume_1" - size = 1 -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] -} - -resource "openstack_compute_volume_attach_v2" "va_1" { - instance_id = "${openstack_compute_instance_v2.instance_1.id}" - volume_id = "${openstack_blockstorage_volume_v2.volume_1.id}" - - timeouts { - create = "5m" - delete = "5m" - } -} -` diff --git a/builtin/providers/openstack/resource_openstack_dns_recordset_v2.go b/builtin/providers/openstack/resource_openstack_dns_recordset_v2.go deleted file mode 100644 index cf911cd4a..000000000 --- a/builtin/providers/openstack/resource_openstack_dns_recordset_v2.go +++ /dev/null @@ -1,276 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "strings" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceDNSRecordSetV2() *schema.Resource { - return &schema.Resource{ - Create: resourceDNSRecordSetV2Create, - Read: resourceDNSRecordSetV2Read, - Update: resourceDNSRecordSetV2Update, - Delete: resourceDNSRecordSetV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Update: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "zone_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "records": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: false, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "ttl": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: false, - }, - "type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "value_specs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceDNSRecordSetV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - dnsClient, err := config.dnsV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack DNS client: %s", err) - } - - recordsraw := d.Get("records").([]interface{}) - records := make([]string, len(recordsraw)) - for i, recordraw := range recordsraw { - records[i] = recordraw.(string) - } - - createOpts := RecordSetCreateOpts{ - recordsets.CreateOpts{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - Records: records, - TTL: d.Get("ttl").(int), - Type: d.Get("type").(string), - }, - MapValueSpecs(d), - } - - zoneID := d.Get("zone_id").(string) - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - n, err := recordsets.Create(dnsClient, zoneID, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack DNS record set: %s", err) - } - - log.Printf("[DEBUG] Waiting for DNS record set (%s) to become available", n.ID) - stateConf := &resource.StateChangeConf{ - Target: []string{"ACTIVE"}, - Pending: []string{"PENDING"}, - Refresh: waitForDNSRecordSet(dnsClient, zoneID, n.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - - id := fmt.Sprintf("%s/%s", zoneID, n.ID) - d.SetId(id) - - log.Printf("[DEBUG] Created OpenStack DNS record set %s: %#v", n.ID, n) - return resourceDNSRecordSetV2Read(d, meta) -} - -func resourceDNSRecordSetV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - dnsClient, err := config.dnsV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack DNS client: %s", err) - } - - // Obtain relevant info from parsing the ID - zoneID, recordsetID, err := parseDNSV2RecordSetID(d.Id()) - if err != nil { - return err - } - - n, err := recordsets.Get(dnsClient, zoneID, recordsetID).Extract() - if err != nil { - return CheckDeleted(d, err, "record_set") - } - - log.Printf("[DEBUG] Retrieved record set %s: %#v", recordsetID, n) - - d.Set("name", n.Name) - d.Set("description", n.Description) - d.Set("ttl", n.TTL) - d.Set("type", n.Type) - d.Set("records", n.Records) - d.Set("region", GetRegion(d)) - d.Set("zone_id", zoneID) - - return nil -} - -func resourceDNSRecordSetV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - dnsClient, err := config.dnsV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack DNS client: %s", err) - } - - var updateOpts recordsets.UpdateOpts - if d.HasChange("ttl") { - updateOpts.TTL = d.Get("ttl").(int) - } - - if d.HasChange("records") { - recordsraw := d.Get("records").([]interface{}) - records := make([]string, len(recordsraw)) - for i, recordraw := range recordsraw { - records[i] = recordraw.(string) - } - updateOpts.Records = records - } - - if d.HasChange("description") { - updateOpts.Description = d.Get("description").(string) - } - - // Obtain relevant info from parsing the ID - zoneID, recordsetID, err := parseDNSV2RecordSetID(d.Id()) - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating record set %s with options: %#v", recordsetID, updateOpts) - - _, err = recordsets.Update(dnsClient, zoneID, recordsetID, updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack DNS record set: %s", err) - } - - log.Printf("[DEBUG] Waiting for DNS record set (%s) to update", recordsetID) - stateConf := &resource.StateChangeConf{ - Target: []string{"ACTIVE"}, - Pending: []string{"PENDING"}, - Refresh: waitForDNSRecordSet(dnsClient, zoneID, recordsetID), - Timeout: d.Timeout(schema.TimeoutUpdate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - - return resourceDNSRecordSetV2Read(d, meta) -} - -func resourceDNSRecordSetV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - dnsClient, err := config.dnsV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack DNS client: %s", err) - } - - // Obtain relevant info from parsing the ID - zoneID, recordsetID, err := parseDNSV2RecordSetID(d.Id()) - if err != nil { - return err - } - - err = recordsets.Delete(dnsClient, zoneID, recordsetID).ExtractErr() - if err != nil { - return fmt.Errorf("Error deleting OpenStack DNS record set: %s", err) - } - - log.Printf("[DEBUG] Waiting for DNS record set (%s) to be deleted", recordsetID) - stateConf := &resource.StateChangeConf{ - Target: []string{"DELETED"}, - Pending: []string{"ACTIVE", "PENDING"}, - Refresh: waitForDNSRecordSet(dnsClient, zoneID, recordsetID), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - - d.SetId("") - return nil -} - -func waitForDNSRecordSet(dnsClient *gophercloud.ServiceClient, zoneID, recordsetId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - recordset, err := recordsets.Get(dnsClient, zoneID, recordsetId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - return recordset, "DELETED", nil - } - - return nil, "", err - } - - log.Printf("[DEBUG] OpenStack DNS record set (%s) current status: %s", recordset.ID, recordset.Status) - return recordset, recordset.Status, nil - } -} - -func parseDNSV2RecordSetID(id string) (string, string, error) { - idParts := strings.Split(id, "/") - if len(idParts) != 2 { - return "", "", fmt.Errorf("Unable to determine DNS record set ID from raw ID: %s", id) - } - - zoneID := idParts[0] - recordsetID := idParts[1] - - return zoneID, recordsetID, nil -} diff --git a/builtin/providers/openstack/resource_openstack_dns_recordset_v2_test.go b/builtin/providers/openstack/resource_openstack_dns_recordset_v2_test.go deleted file mode 100644 index 051e88411..000000000 --- a/builtin/providers/openstack/resource_openstack_dns_recordset_v2_test.go +++ /dev/null @@ -1,249 +0,0 @@ -package openstack - -import ( - "fmt" - "os" - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets" -) - -func randomZoneName() string { - return fmt.Sprintf("ACPTTEST-zone-%s.com.", acctest.RandString(5)) -} - -func TestAccDNSV2RecordSet_basic(t *testing.T) { - var recordset recordsets.RecordSet - zoneName := randomZoneName() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheckDNSRecordSetV2(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDNSV2RecordSetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDNSV2RecordSet_basic(zoneName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDNSV2RecordSetExists("openstack_dns_recordset_v2.recordset_1", &recordset), - resource.TestCheckResourceAttr( - "openstack_dns_recordset_v2.recordset_1", "description", "a record set"), - resource.TestCheckResourceAttr( - "openstack_dns_recordset_v2.recordset_1", "records.0", "10.1.0.0"), - ), - }, - resource.TestStep{ - Config: testAccDNSV2RecordSet_update(zoneName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("openstack_dns_recordset_v2.recordset_1", "name", zoneName), - resource.TestCheckResourceAttr("openstack_dns_recordset_v2.recordset_1", "ttl", "6000"), - resource.TestCheckResourceAttr("openstack_dns_recordset_v2.recordset_1", "type", "A"), - resource.TestCheckResourceAttr( - "openstack_dns_recordset_v2.recordset_1", "description", "an updated record set"), - resource.TestCheckResourceAttr( - "openstack_dns_recordset_v2.recordset_1", "records.0", "10.1.0.1"), - ), - }, - }, - }) -} - -func TestAccDNSV2RecordSet_readTTL(t *testing.T) { - var recordset recordsets.RecordSet - zoneName := randomZoneName() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheckDNSRecordSetV2(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDNSV2RecordSetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDNSV2RecordSet_readTTL(zoneName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDNSV2RecordSetExists("openstack_dns_recordset_v2.recordset_1", &recordset), - resource.TestMatchResourceAttr( - "openstack_dns_recordset_v2.recordset_1", "ttl", regexp.MustCompile("^[0-9]+$")), - ), - }, - }, - }) -} - -func TestAccDNSV2RecordSet_timeout(t *testing.T) { - var recordset recordsets.RecordSet - zoneName := randomZoneName() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheckDNSRecordSetV2(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDNSV2RecordSetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDNSV2RecordSet_timeout(zoneName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDNSV2RecordSetExists("openstack_dns_recordset_v2.recordset_1", &recordset), - ), - }, - }, - }) -} - -func testAccCheckDNSV2RecordSetDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - dnsClient, err := config.dnsV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack DNS client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_dns_recordset_v2" { - continue - } - - zoneID, recordsetID, err := parseDNSV2RecordSetID(rs.Primary.ID) - if err != nil { - return err - } - - _, err = recordsets.Get(dnsClient, zoneID, recordsetID).Extract() - if err == nil { - return fmt.Errorf("Record set still exists") - } - } - - return nil -} - -func testAccCheckDNSV2RecordSetExists(n string, recordset *recordsets.RecordSet) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - dnsClient, err := config.dnsV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack DNS client: %s", err) - } - - zoneID, recordsetID, err := parseDNSV2RecordSetID(rs.Primary.ID) - if err != nil { - return err - } - - found, err := recordsets.Get(dnsClient, zoneID, recordsetID).Extract() - if err != nil { - return err - } - - if found.ID != recordsetID { - return fmt.Errorf("Record set not found") - } - - *recordset = *found - - return nil - } -} - -func testAccPreCheckDNSRecordSetV2(t *testing.T) { - if os.Getenv("OS_AUTH_URL") == "" { - t.Fatal("OS_AUTH_URL must be set for acceptance tests") - } -} - -func testAccDNSV2RecordSet_basic(zoneName string) string { - return fmt.Sprintf(` - resource "openstack_dns_zone_v2" "zone_1" { - name = "%s" - email = "email2@example.com" - description = "a zone" - ttl = 6000 - type = "PRIMARY" - } - - resource "openstack_dns_recordset_v2" "recordset_1" { - zone_id = "${openstack_dns_zone_v2.zone_1.id}" - name = "%s" - type = "A" - description = "a record set" - ttl = 3000 - records = ["10.1.0.0"] - } - `, zoneName, zoneName) -} - -func testAccDNSV2RecordSet_update(zoneName string) string { - return fmt.Sprintf(` - resource "openstack_dns_zone_v2" "zone_1" { - name = "%s" - email = "email2@example.com" - description = "an updated zone" - ttl = 6000 - type = "PRIMARY" - } - - resource "openstack_dns_recordset_v2" "recordset_1" { - zone_id = "${openstack_dns_zone_v2.zone_1.id}" - name = "%s" - type = "A" - description = "an updated record set" - ttl = 6000 - records = ["10.1.0.1"] - } - `, zoneName, zoneName) -} - -func testAccDNSV2RecordSet_readTTL(zoneName string) string { - return fmt.Sprintf(` - resource "openstack_dns_zone_v2" "zone_1" { - name = "%s" - email = "email2@example.com" - description = "an updated zone" - ttl = 6000 - type = "PRIMARY" - } - - resource "openstack_dns_recordset_v2" "recordset_1" { - zone_id = "${openstack_dns_zone_v2.zone_1.id}" - name = "%s" - type = "A" - records = ["10.1.0.2"] - } - `, zoneName, zoneName) -} - -func testAccDNSV2RecordSet_timeout(zoneName string) string { - return fmt.Sprintf(` - resource "openstack_dns_zone_v2" "zone_1" { - name = "%s" - email = "email2@example.com" - description = "an updated zone" - ttl = 6000 - type = "PRIMARY" - } - - resource "openstack_dns_recordset_v2" "recordset_1" { - zone_id = "${openstack_dns_zone_v2.zone_1.id}" - name = "%s" - type = "A" - ttl = 3000 - records = ["10.1.0.3"] - - timeouts { - create = "5m" - update = "5m" - delete = "5m" - } - } - `, zoneName, zoneName) -} diff --git a/builtin/providers/openstack/resource_openstack_dns_zone_v2.go b/builtin/providers/openstack/resource_openstack_dns_zone_v2.go deleted file mode 100644 index 2b4b7995b..000000000 --- a/builtin/providers/openstack/resource_openstack_dns_zone_v2.go +++ /dev/null @@ -1,276 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/dns/v2/zones" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceDNSZoneV2() *schema.Resource { - return &schema.Resource{ - Create: resourceDNSZoneV2Create, - Read: resourceDNSZoneV2Read, - Update: resourceDNSZoneV2Update, - Delete: resourceDNSZoneV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Update: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "email": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: resourceDNSZoneV2ValidType, - }, - "attributes": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - "ttl": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: false, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "masters": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: false, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "value_specs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceDNSZoneV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - dnsClient, err := config.dnsV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack DNS client: %s", err) - } - - mastersraw := d.Get("masters").(*schema.Set).List() - masters := make([]string, len(mastersraw)) - for i, masterraw := range mastersraw { - masters[i] = masterraw.(string) - } - - attrsraw := d.Get("attributes").(map[string]interface{}) - attrs := make(map[string]string, len(attrsraw)) - for k, v := range attrsraw { - attrs[k] = v.(string) - } - - createOpts := ZoneCreateOpts{ - zones.CreateOpts{ - Name: d.Get("name").(string), - Type: d.Get("type").(string), - Attributes: attrs, - TTL: d.Get("ttl").(int), - Email: d.Get("email").(string), - Description: d.Get("description").(string), - Masters: masters, - }, - MapValueSpecs(d), - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - n, err := zones.Create(dnsClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack DNS zone: %s", err) - } - - log.Printf("[DEBUG] Waiting for DNS Zone (%s) to become available", n.ID) - stateConf := &resource.StateChangeConf{ - Target: []string{"ACTIVE"}, - Pending: []string{"PENDING"}, - Refresh: waitForDNSZone(dnsClient, n.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - - d.SetId(n.ID) - - log.Printf("[DEBUG] Created OpenStack DNS Zone %s: %#v", n.ID, n) - return resourceDNSZoneV2Read(d, meta) -} - -func resourceDNSZoneV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - dnsClient, err := config.dnsV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack DNS client: %s", err) - } - - n, err := zones.Get(dnsClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "zone") - } - - log.Printf("[DEBUG] Retrieved Zone %s: %#v", d.Id(), n) - - d.Set("name", n.Name) - d.Set("email", n.Email) - d.Set("description", n.Description) - d.Set("ttl", n.TTL) - d.Set("type", n.Type) - d.Set("attributes", n.Attributes) - d.Set("masters", n.Masters) - d.Set("region", GetRegion(d)) - - return nil -} - -func resourceDNSZoneV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - dnsClient, err := config.dnsV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack DNS client: %s", err) - } - - var updateOpts zones.UpdateOpts - if d.HasChange("email") { - updateOpts.Email = d.Get("email").(string) - } - if d.HasChange("ttl") { - updateOpts.TTL = d.Get("ttl").(int) - } - if d.HasChange("masters") { - mastersraw := d.Get("masters").(*schema.Set).List() - masters := make([]string, len(mastersraw)) - for i, masterraw := range mastersraw { - masters[i] = masterraw.(string) - } - updateOpts.Masters = masters - } - if d.HasChange("description") { - updateOpts.Description = d.Get("description").(string) - } - - log.Printf("[DEBUG] Updating Zone %s with options: %#v", d.Id(), updateOpts) - - _, err = zones.Update(dnsClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack DNS Zone: %s", err) - } - - log.Printf("[DEBUG] Waiting for DNS Zone (%s) to update", d.Id()) - stateConf := &resource.StateChangeConf{ - Target: []string{"ACTIVE"}, - Pending: []string{"PENDING"}, - Refresh: waitForDNSZone(dnsClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutUpdate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - - return resourceDNSZoneV2Read(d, meta) -} - -func resourceDNSZoneV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - dnsClient, err := config.dnsV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack DNS client: %s", err) - } - - _, err = zones.Delete(dnsClient, d.Id()).Extract() - if err != nil { - return fmt.Errorf("Error deleting OpenStack DNS Zone: %s", err) - } - - log.Printf("[DEBUG] Waiting for DNS Zone (%s) to become available", d.Id()) - stateConf := &resource.StateChangeConf{ - Target: []string{"DELETED"}, - Pending: []string{"ACTIVE", "PENDING"}, - Refresh: waitForDNSZone(dnsClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - - d.SetId("") - return nil -} - -func resourceDNSZoneV2ValidType(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - validTypes := []string{ - "PRIMARY", - "SECONDARY", - } - - for _, v := range validTypes { - if value == v { - return - } - } - - err := fmt.Errorf("%s must be one of %s", k, validTypes) - errors = append(errors, err) - return -} - -func waitForDNSZone(dnsClient *gophercloud.ServiceClient, zoneId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - zone, err := zones.Get(dnsClient, zoneId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - return zone, "DELETED", nil - } - - return nil, "", err - } - - log.Printf("[DEBUG] OpenStack DNS Zone (%s) current status: %s", zone.ID, zone.Status) - return zone, zone.Status, nil - } -} diff --git a/builtin/providers/openstack/resource_openstack_dns_zone_v2_test.go b/builtin/providers/openstack/resource_openstack_dns_zone_v2_test.go deleted file mode 100644 index 8b06d28e4..000000000 --- a/builtin/providers/openstack/resource_openstack_dns_zone_v2_test.go +++ /dev/null @@ -1,196 +0,0 @@ -package openstack - -import ( - "fmt" - "os" - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/gophercloud/gophercloud/openstack/dns/v2/zones" -) - -func TestAccDNSV2Zone_basic(t *testing.T) { - var zone zones.Zone - var zoneName = fmt.Sprintf("ACPTTEST%s.com.", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheckDNSZoneV2(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDNSV2ZoneDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDNSV2Zone_basic(zoneName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDNSV2ZoneExists("openstack_dns_zone_v2.zone_1", &zone), - resource.TestCheckResourceAttr( - "openstack_dns_zone_v2.zone_1", "description", "a zone"), - ), - }, - resource.TestStep{ - Config: testAccDNSV2Zone_update(zoneName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("openstack_dns_zone_v2.zone_1", "name", zoneName), - resource.TestCheckResourceAttr("openstack_dns_zone_v2.zone_1", "email", "email2@example.com"), - resource.TestCheckResourceAttr("openstack_dns_zone_v2.zone_1", "ttl", "6000"), - resource.TestCheckResourceAttr("openstack_dns_zone_v2.zone_1", "type", "PRIMARY"), - resource.TestCheckResourceAttr( - "openstack_dns_zone_v2.zone_1", "description", "an updated zone"), - ), - }, - }, - }) -} - -func TestAccDNSV2Zone_readTTL(t *testing.T) { - var zone zones.Zone - var zoneName = fmt.Sprintf("ACPTTEST%s.com.", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheckDNSZoneV2(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDNSV2ZoneDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDNSV2Zone_readTTL(zoneName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDNSV2ZoneExists("openstack_dns_zone_v2.zone_1", &zone), - resource.TestCheckResourceAttr("openstack_dns_zone_v2.zone_1", "type", "PRIMARY"), - resource.TestMatchResourceAttr( - "openstack_dns_zone_v2.zone_1", "ttl", regexp.MustCompile("^[0-9]+$")), - ), - }, - }, - }) -} - -func TestAccDNSV2Zone_timeout(t *testing.T) { - var zone zones.Zone - var zoneName = fmt.Sprintf("ACPTTEST%s.com.", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheckDNSZoneV2(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDNSV2ZoneDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccDNSV2Zone_timeout(zoneName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDNSV2ZoneExists("openstack_dns_zone_v2.zone_1", &zone), - ), - }, - }, - }) -} - -func testAccCheckDNSV2ZoneDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - dnsClient, err := config.dnsV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack DNS client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_dns_zone_v2" { - continue - } - - _, err := zones.Get(dnsClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("Zone still exists") - } - } - - return nil -} - -func testAccCheckDNSV2ZoneExists(n string, zone *zones.Zone) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - dnsClient, err := config.dnsV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack DNS client: %s", err) - } - - found, err := zones.Get(dnsClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Zone not found") - } - - *zone = *found - - return nil - } -} - -func testAccPreCheckDNSZoneV2(t *testing.T) { - v := os.Getenv("OS_AUTH_URL") - if v == "" { - t.Fatal("OS_AUTH_URL must be set for acceptance tests") - } -} - -func testAccDNSV2Zone_basic(zoneName string) string { - return fmt.Sprintf(` - resource "openstack_dns_zone_v2" "zone_1" { - name = "%s" - email = "email1@example.com" - description = "a zone" - ttl = 3000 - type = "PRIMARY" - } - `, zoneName) -} - -func testAccDNSV2Zone_update(zoneName string) string { - return fmt.Sprintf(` - resource "openstack_dns_zone_v2" "zone_1" { - name = "%s" - email = "email2@example.com" - description = "an updated zone" - ttl = 6000 - type = "PRIMARY" - } - `, zoneName) -} - -func testAccDNSV2Zone_readTTL(zoneName string) string { - return fmt.Sprintf(` - resource "openstack_dns_zone_v2" "zone_1" { - name = "%s" - email = "email1@example.com" - } - `, zoneName) -} - -func testAccDNSV2Zone_timeout(zoneName string) string { - return fmt.Sprintf(` - resource "openstack_dns_zone_v2" "zone_1" { - name = "%s" - email = "email@example.com" - ttl = 3000 - - timeouts { - create = "5m" - update = "5m" - delete = "5m" - } - } - `, zoneName) -} diff --git a/builtin/providers/openstack/resource_openstack_fw_firewall_v1.go b/builtin/providers/openstack/resource_openstack_fw_firewall_v1.go deleted file mode 100644 index 66601b998..000000000 --- a/builtin/providers/openstack/resource_openstack_fw_firewall_v1.go +++ /dev/null @@ -1,324 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceFWFirewallV1() *schema.Resource { - return &schema.Resource{ - Create: resourceFWFirewallV1Create, - Read: resourceFWFirewallV1Read, - Update: resourceFWFirewallV1Update, - Delete: resourceFWFirewallV1Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Update: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "policy_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "admin_state_up": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "associated_routers": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - ConflictsWith: []string{"no_routers"}, - }, - "no_routers": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ConflictsWith: []string{"associated_routers"}, - }, - "value_specs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceFWFirewallV1Create(d *schema.ResourceData, meta interface{}) error { - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var createOpts firewalls.CreateOptsBuilder - - adminStateUp := d.Get("admin_state_up").(bool) - createOpts = &firewalls.CreateOpts{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - PolicyID: d.Get("policy_id").(string), - AdminStateUp: &adminStateUp, - TenantID: d.Get("tenant_id").(string), - } - - associatedRoutersRaw := d.Get("associated_routers").(*schema.Set).List() - if len(associatedRoutersRaw) > 0 { - log.Printf("[DEBUG] Will attempt to associate Firewall with router(s): %+v", associatedRoutersRaw) - - var routerIds []string - for _, v := range associatedRoutersRaw { - routerIds = append(routerIds, v.(string)) - } - - createOpts = &routerinsertion.CreateOptsExt{ - CreateOptsBuilder: createOpts, - RouterIDs: routerIds, - } - } - - if d.Get("no_routers").(bool) { - routerIds := make([]string, 0) - log.Println("[DEBUG] No routers specified. Setting to empty slice") - createOpts = &routerinsertion.CreateOptsExt{ - CreateOptsBuilder: createOpts, - RouterIDs: routerIds, - } - } - - createOpts = &FirewallCreateOpts{ - createOpts, - MapValueSpecs(d), - } - - log.Printf("[DEBUG] Create firewall: %#v", createOpts) - - firewall, err := firewalls.Create(networkingClient, createOpts).Extract() - if err != nil { - return err - } - - log.Printf("[DEBUG] Firewall created: %#v", firewall) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"PENDING_CREATE"}, - Target: []string{"ACTIVE"}, - Refresh: waitForFirewallActive(networkingClient, firewall.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 0, - MinTimeout: 2 * time.Second, - } - - _, err = stateConf.WaitForState() - log.Printf("[DEBUG] Firewall (%s) is active.", firewall.ID) - - d.SetId(firewall.ID) - - return resourceFWFirewallV1Read(d, meta) -} - -func resourceFWFirewallV1Read(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Retrieve information about firewall: %s", d.Id()) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var firewall Firewall - err = firewalls.Get(networkingClient, d.Id()).ExtractInto(&firewall) - if err != nil { - return CheckDeleted(d, err, "firewall") - } - - log.Printf("[DEBUG] Read OpenStack Firewall %s: %#v", d.Id(), firewall) - - d.Set("name", firewall.Name) - d.Set("description", firewall.Description) - d.Set("policy_id", firewall.PolicyID) - d.Set("admin_state_up", firewall.AdminStateUp) - d.Set("tenant_id", firewall.TenantID) - d.Set("region", GetRegion(d)) - d.Set("associated_routers", firewall.RouterIDs) - - return nil -} - -func resourceFWFirewallV1Update(d *schema.ResourceData, meta interface{}) error { - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - // PolicyID is required - opts := firewalls.UpdateOpts{ - PolicyID: d.Get("policy_id").(string), - } - - if d.HasChange("name") { - opts.Name = d.Get("name").(string) - } - - if d.HasChange("description") { - opts.Description = d.Get("description").(string) - } - - if d.HasChange("admin_state_up") { - adminStateUp := d.Get("admin_state_up").(bool) - opts.AdminStateUp = &adminStateUp - } - - var updateOpts firewalls.UpdateOptsBuilder - var routerIds []string - if d.HasChange("associated_routers") || d.HasChange("no_routers") { - // 'no_routers' = true means 'associated_routers' will be empty... - if d.Get("no_routers").(bool) { - log.Printf("[DEBUG] 'no_routers' is true.") - routerIds = make([]string, 0) - } else { - associatedRoutersRaw := d.Get("associated_routers").(*schema.Set).List() - for _, v := range associatedRoutersRaw { - routerIds = append(routerIds, v.(string)) - } - } - - updateOpts = routerinsertion.UpdateOptsExt{ - UpdateOptsBuilder: opts, - RouterIDs: routerIds, - } - } else { - updateOpts = opts - } - - log.Printf("[DEBUG] Updating firewall with id %s: %#v", d.Id(), updateOpts) - - err = firewalls.Update(networkingClient, d.Id(), updateOpts).Err - if err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"PENDING_CREATE", "PENDING_UPDATE"}, - Target: []string{"ACTIVE"}, - Refresh: waitForFirewallActive(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutUpdate), - Delay: 0, - MinTimeout: 2 * time.Second, - } - - _, err = stateConf.WaitForState() - - return resourceFWFirewallV1Read(d, meta) -} - -func resourceFWFirewallV1Delete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Destroy firewall: %s", d.Id()) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - // Ensure the firewall was fully created/updated before being deleted. - stateConf := &resource.StateChangeConf{ - Pending: []string{"PENDING_CREATE", "PENDING_UPDATE"}, - Target: []string{"ACTIVE"}, - Refresh: waitForFirewallActive(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutUpdate), - Delay: 0, - MinTimeout: 2 * time.Second, - } - - _, err = stateConf.WaitForState() - - err = firewalls.Delete(networkingClient, d.Id()).Err - - if err != nil { - return err - } - - stateConf = &resource.StateChangeConf{ - Pending: []string{"DELETING"}, - Target: []string{"DELETED"}, - Refresh: waitForFirewallDeletion(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 0, - MinTimeout: 2 * time.Second, - } - - _, err = stateConf.WaitForState() - - return err -} - -func waitForFirewallActive(networkingClient *gophercloud.ServiceClient, id string) resource.StateRefreshFunc { - - return func() (interface{}, string, error) { - var fw Firewall - - err := firewalls.Get(networkingClient, id).ExtractInto(&fw) - if err != nil { - return nil, "", err - } - return fw, fw.Status, nil - } -} - -func waitForFirewallDeletion(networkingClient *gophercloud.ServiceClient, id string) resource.StateRefreshFunc { - - return func() (interface{}, string, error) { - fw, err := firewalls.Get(networkingClient, id).Extract() - log.Printf("[DEBUG] Got firewall %s => %#v", id, fw) - - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Firewall %s is actually deleted", id) - return "", "DELETED", nil - } - return nil, "", fmt.Errorf("Unexpected error: %s", err) - } - - log.Printf("[DEBUG] Firewall %s deletion is pending", id) - return fw, "DELETING", nil - } -} diff --git a/builtin/providers/openstack/resource_openstack_fw_firewall_v1_test.go b/builtin/providers/openstack/resource_openstack_fw_firewall_v1_test.go deleted file mode 100644 index f3d41aa01..000000000 --- a/builtin/providers/openstack/resource_openstack_fw_firewall_v1_test.go +++ /dev/null @@ -1,378 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccFWFirewallV1_basic(t *testing.T) { - var policyID *string - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckFWFirewallV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccFWFirewallV1_basic_1, - Check: resource.ComposeTestCheckFunc( - testAccCheckFWFirewallV1("openstack_fw_firewall_v1.fw_1", "", "", policyID), - ), - }, - resource.TestStep{ - Config: testAccFWFirewallV1_basic_2, - Check: resource.ComposeTestCheckFunc( - testAccCheckFWFirewallV1( - "openstack_fw_firewall_v1.fw_1", "fw_1", "terraform acceptance test", policyID), - ), - }, - }, - }) -} - -func TestAccFWFirewallV1_timeout(t *testing.T) { - var policyID *string - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckFWFirewallV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccFWFirewallV1_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckFWFirewallV1("openstack_fw_firewall_v1.fw_1", "", "", policyID), - ), - }, - }, - }) -} - -func TestAccFWFirewallV1_router(t *testing.T) { - var firewall Firewall - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckFWFirewallV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccFWFirewallV1_router, - Check: resource.ComposeTestCheckFunc( - testAccCheckFWFirewallV1Exists("openstack_fw_firewall_v1.fw_1", &firewall), - testAccCheckFWFirewallRouterCount(&firewall, 1), - ), - }, - }, - }) -} - -func TestAccFWFirewallV1_no_router(t *testing.T) { - var firewall Firewall - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckFWFirewallV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccFWFirewallV1_no_router, - Check: resource.ComposeTestCheckFunc( - testAccCheckFWFirewallV1Exists("openstack_fw_firewall_v1.fw_1", &firewall), - resource.TestCheckResourceAttr("openstack_fw_firewall_v1.fw_1", "description", "firewall router test"), - testAccCheckFWFirewallRouterCount(&firewall, 0), - ), - }, - }, - }) -} - -func TestAccFWFirewallV1_router_update(t *testing.T) { - var firewall Firewall - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckFWFirewallV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccFWFirewallV1_router, - Check: resource.ComposeTestCheckFunc( - testAccCheckFWFirewallV1Exists("openstack_fw_firewall_v1.fw_1", &firewall), - testAccCheckFWFirewallRouterCount(&firewall, 1), - ), - }, - resource.TestStep{ - Config: testAccFWFirewallV1_router_add, - Check: resource.ComposeTestCheckFunc( - testAccCheckFWFirewallV1Exists("openstack_fw_firewall_v1.fw_1", &firewall), - testAccCheckFWFirewallRouterCount(&firewall, 2), - ), - }, - }, - }) -} - -func TestAccFWFirewallV1_router_remove(t *testing.T) { - var firewall Firewall - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckFWFirewallV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccFWFirewallV1_router, - Check: resource.ComposeTestCheckFunc( - testAccCheckFWFirewallV1Exists("openstack_fw_firewall_v1.fw_1", &firewall), - testAccCheckFWFirewallRouterCount(&firewall, 1), - ), - }, - resource.TestStep{ - Config: testAccFWFirewallV1_router_remove, - Check: resource.ComposeTestCheckFunc( - testAccCheckFWFirewallV1Exists("openstack_fw_firewall_v1.fw_1", &firewall), - testAccCheckFWFirewallRouterCount(&firewall, 0), - ), - }, - }, - }) -} - -func testAccCheckFWFirewallV1Destroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_firewall" { - continue - } - - _, err = firewalls.Get(networkingClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("Firewall (%s) still exists.", rs.Primary.ID) - } - if _, ok := err.(gophercloud.ErrDefault404); !ok { - return err - } - } - return nil -} - -func testAccCheckFWFirewallV1Exists(n string, firewall *Firewall) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Exists) Error creating OpenStack networking client: %s", err) - } - - var found Firewall - err = firewalls.Get(networkingClient, rs.Primary.ID).ExtractInto(&found) - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Firewall not found") - } - - *firewall = found - - return nil - } -} - -func testAccCheckFWFirewallRouterCount(firewall *Firewall, expected int) resource.TestCheckFunc { - return func(s *terraform.State) error { - if len(firewall.RouterIDs) != expected { - return fmt.Errorf("Expected %d Routers, got %d", expected, len(firewall.RouterIDs)) - } - - return nil - } -} - -func testAccCheckFWFirewallV1(n, expectedName, expectedDescription string, policyID *string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Exists) Error creating OpenStack networking client: %s", err) - } - - var found *firewalls.Firewall - for i := 0; i < 5; i++ { - // Firewall creation is asynchronous. Retry some times - // if we get a 404 error. Fail on any other error. - found, err = firewalls.Get(networkingClient, rs.Primary.ID).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - time.Sleep(time.Second) - continue - } - return err - } - break - } - - switch { - case found.Name != expectedName: - err = fmt.Errorf("Expected Name to be <%s> but found <%s>", expectedName, found.Name) - case found.Description != expectedDescription: - err = fmt.Errorf("Expected Description to be <%s> but found <%s>", - expectedDescription, found.Description) - case found.PolicyID == "": - err = fmt.Errorf("Policy should not be empty") - case policyID != nil && found.PolicyID == *policyID: - err = fmt.Errorf("Policy had not been correctly updated. Went from <%s> to <%s>", - expectedName, found.Name) - } - - if err != nil { - return err - } - - policyID = &found.PolicyID - - return nil - } -} - -const testAccFWFirewallV1_basic_1 = ` -resource "openstack_fw_firewall_v1" "fw_1" { - policy_id = "${openstack_fw_policy_v1.policy_1.id}" -} - -resource "openstack_fw_policy_v1" "policy_1" { - name = "policy_1" -} -` - -const testAccFWFirewallV1_basic_2 = ` -resource "openstack_fw_firewall_v1" "fw_1" { - name = "fw_1" - description = "terraform acceptance test" - policy_id = "${openstack_fw_policy_v1.policy_2.id}" - admin_state_up = true -} - -resource "openstack_fw_policy_v1" "policy_2" { - name = "policy_2" -} -` - -const testAccFWFirewallV1_timeout = ` -resource "openstack_fw_firewall_v1" "fw_1" { - policy_id = "${openstack_fw_policy_v1.policy_1.id}" - - timeouts { - create = "5m" - update = "5m" - delete = "5m" - } -} - -resource "openstack_fw_policy_v1" "policy_1" { - name = "policy_1" -} -` - -const testAccFWFirewallV1_router = ` -resource "openstack_networking_router_v2" "router_1" { - name = "router_1" - admin_state_up = "true" - distributed = "false" -} - -resource "openstack_fw_policy_v1" "policy_1" { - name = "policy_1" -} - -resource "openstack_fw_firewall_v1" "fw_1" { - name = "firewall_1" - description = "firewall router test" - policy_id = "${openstack_fw_policy_v1.policy_1.id}" - associated_routers = ["${openstack_networking_router_v2.router_1.id}"] -} -` - -const testAccFWFirewallV1_router_add = ` -resource "openstack_networking_router_v2" "router_1" { - name = "router_1" - admin_state_up = "true" - distributed = "false" -} - -resource "openstack_networking_router_v2" "router_2" { - name = "router_2" - admin_state_up = "true" - distributed = "false" -} - -resource "openstack_fw_policy_v1" "policy_1" { - name = "policy_1" -} - -resource "openstack_fw_firewall_v1" "fw_1" { - name = "firewall_1" - description = "firewall router test" - policy_id = "${openstack_fw_policy_v1.policy_1.id}" - associated_routers = [ - "${openstack_networking_router_v2.router_1.id}", - "${openstack_networking_router_v2.router_2.id}" - ] -} -` - -const testAccFWFirewallV1_router_remove = ` -resource "openstack_fw_policy_v1" "policy_1" { - name = "policy_1" -} - -resource "openstack_fw_firewall_v1" "fw_1" { - name = "firewall_1" - description = "firewall router test" - policy_id = "${openstack_fw_policy_v1.policy_1.id}" - no_routers = true -} -` - -const testAccFWFirewallV1_no_router = ` -resource "openstack_fw_policy_v1" "policy_1" { - name = "policy_1" -} - -resource "openstack_fw_firewall_v1" "fw_1" { - name = "firewall_1" - description = "firewall router test" - policy_id = "${openstack_fw_policy_v1.policy_1.id}" - no_routers = true -} -` diff --git a/builtin/providers/openstack/resource_openstack_fw_policy_v1.go b/builtin/providers/openstack/resource_openstack_fw_policy_v1.go deleted file mode 100644 index a810e360e..000000000 --- a/builtin/providers/openstack/resource_openstack_fw_policy_v1.go +++ /dev/null @@ -1,231 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceFWPolicyV1() *schema.Resource { - return &schema.Resource{ - Create: resourceFWPolicyV1Create, - Read: resourceFWPolicyV1Read, - Update: resourceFWPolicyV1Update, - Delete: resourceFWPolicyV1Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "audited": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "shared": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "rules": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "value_specs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceFWPolicyV1Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - v := d.Get("rules").([]interface{}) - - log.Printf("[DEBUG] Rules found : %#v", v) - log.Printf("[DEBUG] Rules count : %d", len(v)) - - rules := make([]string, len(v)) - for i, v := range v { - rules[i] = v.(string) - } - - audited := d.Get("audited").(bool) - - opts := PolicyCreateOpts{ - policies.CreateOpts{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - Audited: &audited, - TenantID: d.Get("tenant_id").(string), - Rules: rules, - }, - MapValueSpecs(d), - } - - if r, ok := d.GetOk("shared"); ok { - shared := r.(bool) - opts.Shared = &shared - } - - log.Printf("[DEBUG] Create firewall policy: %#v", opts) - - policy, err := policies.Create(networkingClient, opts).Extract() - if err != nil { - return err - } - - log.Printf("[DEBUG] Firewall policy created: %#v", policy) - - d.SetId(policy.ID) - - return resourceFWPolicyV1Read(d, meta) -} - -func resourceFWPolicyV1Read(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Retrieve information about firewall policy: %s", d.Id()) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - policy, err := policies.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "FW policy") - } - - log.Printf("[DEBUG] Read OpenStack Firewall Policy %s: %#v", d.Id(), policy) - - d.Set("name", policy.Name) - d.Set("description", policy.Description) - d.Set("shared", policy.Shared) - d.Set("audited", policy.Audited) - d.Set("tenant_id", policy.TenantID) - d.Set("rules", policy.Rules) - d.Set("region", GetRegion(d)) - - return nil -} - -func resourceFWPolicyV1Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - opts := policies.UpdateOpts{} - - if d.HasChange("name") { - opts.Name = d.Get("name").(string) - } - - if d.HasChange("description") { - opts.Description = d.Get("description").(string) - } - - if d.HasChange("rules") { - v := d.Get("rules").([]interface{}) - - log.Printf("[DEBUG] Rules found : %#v", v) - log.Printf("[DEBUG] Rules count : %d", len(v)) - - rules := make([]string, len(v)) - for i, v := range v { - rules[i] = v.(string) - } - opts.Rules = rules - } - - log.Printf("[DEBUG] Updating firewall policy with id %s: %#v", d.Id(), opts) - - err = policies.Update(networkingClient, d.Id(), opts).Err - if err != nil { - return err - } - - return resourceFWPolicyV1Read(d, meta) -} - -func resourceFWPolicyV1Delete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Destroy firewall policy: %s", d.Id()) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE"}, - Target: []string{"DELETED"}, - Refresh: waitForFirewallPolicyDeletion(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 0, - MinTimeout: 2 * time.Second, - } - - if _, err = stateConf.WaitForState(); err != nil { - return err - } - - return nil -} - -func waitForFirewallPolicyDeletion(networkingClient *gophercloud.ServiceClient, id string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - err := policies.Delete(networkingClient, id).Err - if err == nil { - return "", "DELETED", nil - } - - if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { - if errCode.Actual == 409 { - // This error usually means that the policy is attached - // to a firewall. At this point, the firewall is probably - // being delete. So, we retry a few times. - return nil, "ACTIVE", nil - } - } - - return nil, "ACTIVE", err - } -} diff --git a/builtin/providers/openstack/resource_openstack_fw_policy_v1_test.go b/builtin/providers/openstack/resource_openstack_fw_policy_v1_test.go deleted file mode 100644 index 7302db3e3..000000000 --- a/builtin/providers/openstack/resource_openstack_fw_policy_v1_test.go +++ /dev/null @@ -1,199 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccFWPolicyV1_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckFWPolicyV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccFWPolicyV1_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckFWPolicyV1Exists( - "openstack_fw_policy_v1.policy_1", "", "", 0), - ), - }, - }, - }) -} - -func TestAccFWPolicyV1_addRules(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckFWPolicyV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccFWPolicyV1_addRules, - Check: resource.ComposeTestCheckFunc( - testAccCheckFWPolicyV1Exists( - "openstack_fw_policy_v1.policy_1", "policy_1", "terraform acceptance test", 2), - ), - }, - }, - }) -} - -func TestAccFWPolicyV1_deleteRules(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckFWPolicyV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccFWPolicyV1_deleteRules, - Check: resource.ComposeTestCheckFunc( - testAccCheckFWPolicyV1Exists( - "openstack_fw_policy_v1.policy_1", "policy_1", "terraform acceptance test", 1), - ), - }, - }, - }) -} - -func TestAccFWPolicyV1_timeout(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckFWPolicyV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccFWPolicyV1_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckFWPolicyV1Exists( - "openstack_fw_policy_v1.policy_1", "", "", 0), - ), - }, - }, - }) -} - -func testAccCheckFWPolicyV1Destroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_fw_policy_v1" { - continue - } - _, err = policies.Get(networkingClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("Firewall policy (%s) still exists.", rs.Primary.ID) - } - if _, ok := err.(gophercloud.ErrDefault404); !ok { - return err - } - } - return nil -} - -func testAccCheckFWPolicyV1Exists(n, name, description string, ruleCount int) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var found *policies.Policy - for i := 0; i < 5; i++ { - // Firewall policy creation is asynchronous. Retry some times - // if we get a 404 error. Fail on any other error. - found, err = policies.Get(networkingClient, rs.Primary.ID).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - time.Sleep(time.Second) - continue - } - return err - } - break - } - - switch { - case name != found.Name: - err = fmt.Errorf("Expected name <%s>, but found <%s>", name, found.Name) - case description != found.Description: - err = fmt.Errorf("Expected description <%s>, but found <%s>", description, found.Description) - case ruleCount != len(found.Rules): - err = fmt.Errorf("Expected rule count <%d>, but found <%d>", ruleCount, len(found.Rules)) - } - - if err != nil { - return err - } - - return nil - } -} - -const testAccFWPolicyV1_basic = ` -resource "openstack_fw_policy_v1" "policy_1" { -} -` - -const testAccFWPolicyV1_addRules = ` -resource "openstack_fw_policy_v1" "policy_1" { - name = "policy_1" - description = "terraform acceptance test" - rules = [ - "${openstack_fw_rule_v1.udp_deny.id}", - "${openstack_fw_rule_v1.tcp_allow.id}" - ] -} - -resource "openstack_fw_rule_v1" "tcp_allow" { - protocol = "tcp" - action = "allow" -} - -resource "openstack_fw_rule_v1" "udp_deny" { - protocol = "udp" - action = "deny" -} -` - -const testAccFWPolicyV1_deleteRules = ` -resource "openstack_fw_policy_v1" "policy_1" { - name = "policy_1" - description = "terraform acceptance test" - rules = [ - "${openstack_fw_rule_v1.udp_deny.id}" - ] -} - -resource "openstack_fw_rule_v1" "udp_deny" { - protocol = "udp" - action = "deny" -} -` - -const testAccFWPolicyV1_timeout = ` -resource "openstack_fw_policy_v1" "policy_1" { - timeouts { - create = "5m" - } -} -` diff --git a/builtin/providers/openstack/resource_openstack_fw_rule_v1.go b/builtin/providers/openstack/resource_openstack_fw_rule_v1.go deleted file mode 100644 index afde64f93..000000000 --- a/builtin/providers/openstack/resource_openstack_fw_rule_v1.go +++ /dev/null @@ -1,288 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceFWRuleV1() *schema.Resource { - return &schema.Resource{ - Create: resourceFWRuleV1Create, - Read: resourceFWRuleV1Read, - Update: resourceFWRuleV1Update, - Delete: resourceFWRuleV1Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "action": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "ip_version": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 4, - }, - "source_ip_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "destination_ip_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "source_port": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "destination_port": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "enabled": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "value_specs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceFWRuleV1Create(d *schema.ResourceData, meta interface{}) error { - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - enabled := d.Get("enabled").(bool) - ipVersion := resourceFWRuleV1DetermineIPVersion(d.Get("ip_version").(int)) - protocol := resourceFWRuleV1DetermineProtocol(d.Get("protocol").(string)) - - ruleConfiguration := RuleCreateOpts{ - rules.CreateOpts{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - Protocol: protocol, - Action: d.Get("action").(string), - IPVersion: ipVersion, - SourceIPAddress: d.Get("source_ip_address").(string), - DestinationIPAddress: d.Get("destination_ip_address").(string), - SourcePort: d.Get("source_port").(string), - DestinationPort: d.Get("destination_port").(string), - Enabled: &enabled, - TenantID: d.Get("tenant_id").(string), - }, - MapValueSpecs(d), - } - - log.Printf("[DEBUG] Create firewall rule: %#v", ruleConfiguration) - - rule, err := rules.Create(networkingClient, ruleConfiguration).Extract() - - if err != nil { - return err - } - - log.Printf("[DEBUG] Firewall rule with id %s : %#v", rule.ID, rule) - - d.SetId(rule.ID) - - return resourceFWRuleV1Read(d, meta) -} - -func resourceFWRuleV1Read(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Retrieve information about firewall rule: %s", d.Id()) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - rule, err := rules.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "FW rule") - } - - log.Printf("[DEBUG] Read OpenStack Firewall Rule %s: %#v", d.Id(), rule) - - d.Set("action", rule.Action) - d.Set("name", rule.Name) - d.Set("description", rule.Description) - d.Set("ip_version", rule.IPVersion) - d.Set("source_ip_address", rule.SourceIPAddress) - d.Set("destination_ip_address", rule.DestinationIPAddress) - d.Set("source_port", rule.SourcePort) - d.Set("destination_port", rule.DestinationPort) - d.Set("enabled", rule.Enabled) - - if rule.Protocol == "" { - d.Set("protocol", "any") - } else { - d.Set("protocol", rule.Protocol) - } - - d.Set("region", GetRegion(d)) - - return nil -} - -func resourceFWRuleV1Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - opts := rules.UpdateOpts{} - - if d.HasChange("name") { - v := d.Get("name").(string) - opts.Name = &v - } - - if d.HasChange("description") { - v := d.Get("description").(string) - opts.Description = &v - } - - if d.HasChange("protocol") { - v := d.Get("protocol").(string) - opts.Protocol = &v - } - - if d.HasChange("action") { - v := d.Get("action").(string) - opts.Action = &v - } - - if d.HasChange("ip_version") { - v := d.Get("ip_version").(int) - ipVersion := resourceFWRuleV1DetermineIPVersion(v) - opts.IPVersion = &ipVersion - } - - if d.HasChange("source_ip_address") { - v := d.Get("source_ip_address").(string) - opts.SourceIPAddress = &v - } - - if d.HasChange("destination_ip_address") { - v := d.Get("destination_ip_address").(string) - opts.DestinationIPAddress = &v - } - - if d.HasChange("source_port") { - v := d.Get("source_port").(string) - opts.SourcePort = &v - } - - if d.HasChange("destination_port") { - v := d.Get("destination_port").(string) - opts.DestinationPort = &v - } - - if d.HasChange("enabled") { - v := d.Get("enabled").(bool) - opts.Enabled = &v - } - - log.Printf("[DEBUG] Updating firewall rules: %#v", opts) - - err = rules.Update(networkingClient, d.Id(), opts).Err - if err != nil { - return err - } - - return resourceFWRuleV1Read(d, meta) -} - -func resourceFWRuleV1Delete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Destroy firewall rule: %s", d.Id()) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - rule, err := rules.Get(networkingClient, d.Id()).Extract() - if err != nil { - return err - } - - if rule.PolicyID != "" { - _, err := policies.RemoveRule(networkingClient, rule.PolicyID, rule.ID).Extract() - if err != nil { - return err - } - } - - return rules.Delete(networkingClient, d.Id()).Err -} - -func resourceFWRuleV1DetermineIPVersion(ipv int) gophercloud.IPVersion { - // Determine the IP Version - var ipVersion gophercloud.IPVersion - switch ipv { - case 4: - ipVersion = gophercloud.IPv4 - case 6: - ipVersion = gophercloud.IPv6 - } - - return ipVersion -} - -func resourceFWRuleV1DetermineProtocol(p string) rules.Protocol { - var protocol rules.Protocol - switch p { - case "any": - protocol = rules.ProtocolAny - case "icmp": - protocol = rules.ProtocolICMP - case "tcp": - protocol = rules.ProtocolTCP - case "udp": - protocol = rules.ProtocolUDP - } - - return protocol -} diff --git a/builtin/providers/openstack/resource_openstack_fw_rule_v1_test.go b/builtin/providers/openstack/resource_openstack_fw_rule_v1_test.go deleted file mode 100644 index 1e023ade0..000000000 --- a/builtin/providers/openstack/resource_openstack_fw_rule_v1_test.go +++ /dev/null @@ -1,218 +0,0 @@ -package openstack - -import ( - "fmt" - "reflect" - "testing" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccFWRuleV1_basic(t *testing.T) { - rule1 := &rules.Rule{ - Name: "rule_1", - Protocol: "udp", - Action: "deny", - IPVersion: 4, - Enabled: true, - } - - rule2 := &rules.Rule{ - Name: "rule_1", - Protocol: "udp", - Action: "deny", - Description: "Terraform accept test", - IPVersion: 4, - SourceIPAddress: "1.2.3.4", - DestinationIPAddress: "4.3.2.0/24", - SourcePort: "444", - DestinationPort: "555", - Enabled: true, - } - - rule3 := &rules.Rule{ - Name: "rule_1", - Protocol: "tcp", - Action: "allow", - Description: "Terraform accept test updated", - IPVersion: 4, - SourceIPAddress: "1.2.3.0/24", - DestinationIPAddress: "4.3.2.8", - SourcePort: "666", - DestinationPort: "777", - Enabled: false, - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckFWRuleV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccFWRuleV1_basic_1, - Check: resource.ComposeTestCheckFunc( - testAccCheckFWRuleV1Exists("openstack_fw_rule_v1.rule_1", rule1), - ), - }, - resource.TestStep{ - Config: testAccFWRuleV1_basic_2, - Check: resource.ComposeTestCheckFunc( - testAccCheckFWRuleV1Exists("openstack_fw_rule_v1.rule_1", rule2), - ), - }, - resource.TestStep{ - Config: testAccFWRuleV1_basic_3, - Check: resource.ComposeTestCheckFunc( - testAccCheckFWRuleV1Exists("openstack_fw_rule_v1.rule_1", rule3), - ), - }, - }, - }) -} - -func TestAccFWRuleV1_anyProtocol(t *testing.T) { - rule := &rules.Rule{ - Name: "rule_1", - Description: "Allow any protocol", - Protocol: "", - Action: "allow", - IPVersion: 4, - SourceIPAddress: "192.168.199.0/24", - Enabled: true, - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckFWRuleV1Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccFWRuleV1_anyProtocol, - Check: resource.ComposeTestCheckFunc( - testAccCheckFWRuleV1Exists("openstack_fw_rule_v1.rule_1", rule), - ), - }, - }, - }) -} - -func testAccCheckFWRuleV1Destroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_firewall_rule" { - continue - } - _, err = rules.Get(networkingClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("Firewall rule (%s) still exists.", rs.Primary.ID) - } - if _, ok := err.(gophercloud.ErrDefault404); !ok { - return err - } - } - return nil -} - -func testAccCheckFWRuleV1Exists(n string, expected *rules.Rule) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var found *rules.Rule - for i := 0; i < 5; i++ { - // Firewall rule creation is asynchronous. Retry some times - // if we get a 404 error. Fail on any other error. - found, err = rules.Get(networkingClient, rs.Primary.ID).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - time.Sleep(time.Second) - continue - } - return err - } - break - } - - expected.ID = found.ID - // Erase the tenant id because we don't want to compare - // it as long it is not present in the expected - found.TenantID = "" - - if !reflect.DeepEqual(expected, found) { - return fmt.Errorf("Expected:\n%#v\nFound:\n%#v", expected, found) - } - - return nil - } -} - -const testAccFWRuleV1_basic_1 = ` -resource "openstack_fw_rule_v1" "rule_1" { - name = "rule_1" - protocol = "udp" - action = "deny" -} -` - -const testAccFWRuleV1_basic_2 = ` -resource "openstack_fw_rule_v1" "rule_1" { - name = "rule_1" - description = "Terraform accept test" - protocol = "udp" - action = "deny" - ip_version = 4 - source_ip_address = "1.2.3.4" - destination_ip_address = "4.3.2.0/24" - source_port = "444" - destination_port = "555" - enabled = true -} -` - -const testAccFWRuleV1_basic_3 = ` -resource "openstack_fw_rule_v1" "rule_1" { - name = "rule_1" - description = "Terraform accept test updated" - protocol = "tcp" - action = "allow" - ip_version = 4 - source_ip_address = "1.2.3.0/24" - destination_ip_address = "4.3.2.8" - source_port = "666" - destination_port = "777" - enabled = false -} -` - -const testAccFWRuleV1_anyProtocol = ` -resource "openstack_fw_rule_v1" "rule_1" { - name = "rule_1" - description = "Allow any protocol" - protocol = "any" - action = "allow" - ip_version = 4 - source_ip_address = "192.168.199.0/24" - enabled = true -} -` diff --git a/builtin/providers/openstack/resource_openstack_images_image_v2.go b/builtin/providers/openstack/resource_openstack_images_image_v2.go deleted file mode 100644 index 483494334..000000000 --- a/builtin/providers/openstack/resource_openstack_images_image_v2.go +++ /dev/null @@ -1,501 +0,0 @@ -package openstack - -import ( - "crypto/md5" - "encoding/hex" - "fmt" - "io" - "log" - "net/http" - "os" - "path/filepath" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata" - "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceImagesImageV2() *schema.Resource { - return &schema.Resource{ - Create: resourceImagesImageV2Create, - Read: resourceImagesImageV2Read, - Update: resourceImagesImageV2Update, - Delete: resourceImagesImageV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "checksum": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "container_format": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resourceImagesImageV2ValidateContainerFormat, - }, - - "created_at": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "disk_format": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resourceImagesImageV2ValidateDiskFormat, - }, - - "file": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "image_cache_path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: fmt.Sprintf("%s/.terraform/image_cache", os.Getenv("HOME")), - }, - - "image_source_url": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"local_file_path"}, - }, - - "local_file_path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"image_source_url"}, - }, - - "metadata": &schema.Schema{ - Type: schema.TypeMap, - Computed: true, - }, - - "min_disk_gb": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validatePositiveInt, - Default: 0, - }, - - "min_ram_mb": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validatePositiveInt, - Default: 0, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - - "owner": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "protected": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Default: false, - }, - - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - - "schema": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "size_bytes": &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - }, - - "status": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "tags": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "update_at": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "visibility": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - ValidateFunc: resourceImagesImageV2ValidateVisibility, - Default: "private", - }, - }, - } -} - -func resourceImagesImageV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - imageClient, err := config.imageV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack image client: %s", err) - } - - protected := d.Get("protected").(bool) - visibility := resourceImagesImageV2VisibilityFromString(d.Get("visibility").(string)) - createOpts := &images.CreateOpts{ - Name: d.Get("name").(string), - ContainerFormat: d.Get("container_format").(string), - DiskFormat: d.Get("disk_format").(string), - MinDisk: d.Get("min_disk_gb").(int), - MinRAM: d.Get("min_ram_mb").(int), - Protected: &protected, - Visibility: &visibility, - } - - if v, ok := d.GetOk("tags"); ok { - tags := v.(*schema.Set).List() - createOpts.Tags = resourceImagesImageV2BuildTags(tags) - } - - d.Partial(true) - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - newImg, err := images.Create(imageClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating Image: %s", err) - } - - d.SetId(newImg.ID) - - // downloading/getting image file props - imgFilePath, err := resourceImagesImageV2File(d) - if err != nil { - return fmt.Errorf("Error opening file for Image: %s", err) - - } - fileSize, fileChecksum, err := resourceImagesImageV2FileProps(imgFilePath) - if err != nil { - return fmt.Errorf("Error getting file props: %s", err) - } - - // upload - imgFile, err := os.Open(imgFilePath) - if err != nil { - return fmt.Errorf("Error opening file %q: %s", imgFilePath, err) - } - defer imgFile.Close() - log.Printf("[WARN] Uploading image %s (%d bytes). This can be pretty long.", d.Id(), fileSize) - - res := imagedata.Upload(imageClient, d.Id(), imgFile) - if res.Err != nil { - return fmt.Errorf("Error while uploading file %q: %s", imgFilePath, res.Err) - } - - //wait for active - stateConf := &resource.StateChangeConf{ - Pending: []string{string(images.ImageStatusQueued), string(images.ImageStatusSaving)}, - Target: []string{string(images.ImageStatusActive)}, - Refresh: resourceImagesImageV2RefreshFunc(imageClient, d.Id(), fileSize, fileChecksum), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - if _, err = stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for Image: %s", err) - } - - d.Partial(false) - - return resourceImagesImageV2Read(d, meta) -} - -func resourceImagesImageV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - imageClient, err := config.imageV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack image client: %s", err) - } - - img, err := images.Get(imageClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "image") - } - - log.Printf("[DEBUG] Retrieved Image %s: %#v", d.Id(), img) - - d.Set("owner", img.Owner) - d.Set("status", img.Status) - d.Set("file", img.File) - d.Set("schema", img.Schema) - d.Set("checksum", img.Checksum) - d.Set("size_bytes", img.SizeBytes) - d.Set("metadata", img.Metadata) - d.Set("created_at", img.CreatedAt) - d.Set("update_at", img.UpdatedAt) - d.Set("container_format", img.ContainerFormat) - d.Set("disk_format", img.DiskFormat) - d.Set("min_disk_gb", img.MinDiskGigabytes) - d.Set("min_ram_mb", img.MinRAMMegabytes) - d.Set("file", img.File) - d.Set("name", img.Name) - d.Set("protected", img.Protected) - d.Set("size_bytes", img.SizeBytes) - d.Set("tags", img.Tags) - d.Set("visibility", img.Visibility) - return nil -} - -func resourceImagesImageV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - imageClient, err := config.imageV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack image client: %s", err) - } - - updateOpts := make(images.UpdateOpts, 0) - - if d.HasChange("visibility") { - visibility := resourceImagesImageV2VisibilityFromString(d.Get("visibility").(string)) - v := images.UpdateVisibility{Visibility: visibility} - updateOpts = append(updateOpts, v) - } - - if d.HasChange("name") { - v := images.ReplaceImageName{NewName: d.Get("name").(string)} - updateOpts = append(updateOpts, v) - } - - if d.HasChange("tags") { - tags := d.Get("tags").(*schema.Set).List() - v := images.ReplaceImageTags{ - NewTags: resourceImagesImageV2BuildTags(tags), - } - updateOpts = append(updateOpts, v) - } - - log.Printf("[DEBUG] Update Options: %#v", updateOpts) - - _, err = images.Update(imageClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating image: %s", err) - } - - return resourceImagesImageV2Read(d, meta) -} - -func resourceImagesImageV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - imageClient, err := config.imageV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack image client: %s", err) - } - - log.Printf("[DEBUG] Deleting Image %s", d.Id()) - if err := images.Delete(imageClient, d.Id()).Err; err != nil { - return fmt.Errorf("Error deleting Image: %s", err) - } - - d.SetId("") - return nil -} - -func resourceImagesImageV2ValidateVisibility(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - validVisibilities := []string{ - "public", - "private", - "shared", - "community", - } - - for _, v := range validVisibilities { - if value == v { - return - } - } - - err := fmt.Errorf("%s must be one of %s", k, validVisibilities) - errors = append(errors, err) - return -} - -func validatePositiveInt(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value > 0 { - return - } - errors = append(errors, fmt.Errorf("%q must be a positive integer", k)) - return -} - -var DiskFormats = [9]string{"ami", "ari", "aki", "vhd", "vmdk", "raw", "qcow2", "vdi", "iso"} - -func resourceImagesImageV2ValidateDiskFormat(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - for i := range DiskFormats { - if value == DiskFormats[i] { - return - } - } - errors = append(errors, fmt.Errorf("%q must be one of %v", k, DiskFormats)) - return -} - -var ContainerFormats = [9]string{"ami", "ari", "aki", "bare", "ovf"} - -func resourceImagesImageV2ValidateContainerFormat(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - for i := range ContainerFormats { - if value == ContainerFormats[i] { - return - } - } - errors = append(errors, fmt.Errorf("%q must be one of %v", k, ContainerFormats)) - return -} - -func resourceImagesImageV2VisibilityFromString(v string) images.ImageVisibility { - switch v { - case "public": - return images.ImageVisibilityPublic - case "private": - return images.ImageVisibilityPrivate - case "shared": - return images.ImageVisibilityShared - case "community": - return images.ImageVisibilityCommunity - } - - return "" -} - -func fileMD5Checksum(f *os.File) (string, error) { - hash := md5.New() - if _, err := io.Copy(hash, f); err != nil { - return "", err - } - return hex.EncodeToString(hash.Sum(nil)), nil -} - -func resourceImagesImageV2FileProps(filename string) (int64, string, error) { - var filesize int64 - var filechecksum string - - file, err := os.Open(filename) - if err != nil { - return -1, "", fmt.Errorf("Error opening file for Image: %s", err) - - } - defer file.Close() - - fstat, err := file.Stat() - if err != nil { - return -1, "", fmt.Errorf("Error reading image file %q: %s", file.Name(), err) - } - - filesize = fstat.Size() - filechecksum, err = fileMD5Checksum(file) - - if err != nil { - return -1, "", fmt.Errorf("Error computing image file %q checksum: %s", file.Name(), err) - } - - return filesize, filechecksum, nil -} - -func resourceImagesImageV2File(d *schema.ResourceData) (string, error) { - if filename := d.Get("local_file_path").(string); filename != "" { - return filename, nil - } else if furl := d.Get("image_source_url").(string); furl != "" { - dir := d.Get("image_cache_path").(string) - os.MkdirAll(dir, 0700) - filename := filepath.Join(dir, fmt.Sprintf("%x.img", md5.Sum([]byte(furl)))) - - if _, err := os.Stat(filename); err != nil { - if !os.IsNotExist(err) { - return "", fmt.Errorf("Error while trying to access file %q: %s", filename, err) - } - log.Printf("[DEBUG] File doens't exists %s. will download from %s", filename, furl) - file, err := os.Create(filename) - if err != nil { - return "", fmt.Errorf("Error creating file %q: %s", filename, err) - } - defer file.Close() - resp, err := http.Get(furl) - if err != nil { - return "", fmt.Errorf("Error downloading image from %q", furl) - } - defer resp.Body.Close() - - if _, err = io.Copy(file, resp.Body); err != nil { - return "", fmt.Errorf("Error downloading image %q to file %q: %s", furl, filename, err) - } - return filename, nil - } else { - log.Printf("[DEBUG] File exists %s", filename) - return filename, nil - } - } else { - return "", fmt.Errorf("Error in config. no file specified") - } -} - -func resourceImagesImageV2RefreshFunc(client *gophercloud.ServiceClient, id string, fileSize int64, checksum string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - img, err := images.Get(client, id).Extract() - if err != nil { - return nil, "", err - } - log.Printf("[DEBUG] OpenStack image status is: %s", img.Status) - - if img.Checksum != checksum || int64(img.SizeBytes) != fileSize { - return img, fmt.Sprintf("%s", img.Status), fmt.Errorf("Error wrong size %v or checksum %q", img.SizeBytes, img.Checksum) - } - - return img, fmt.Sprintf("%s", img.Status), nil - } -} - -func resourceImagesImageV2BuildTags(v []interface{}) []string { - var tags []string - for _, tag := range v { - tags = append(tags, tag.(string)) - } - - return tags -} diff --git a/builtin/providers/openstack/resource_openstack_images_image_v2_test.go b/builtin/providers/openstack/resource_openstack_images_image_v2_test.go deleted file mode 100644 index b1201040e..000000000 --- a/builtin/providers/openstack/resource_openstack_images_image_v2_test.go +++ /dev/null @@ -1,358 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccImagesImageV2_basic(t *testing.T) { - var image images.Image - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckImagesImageV2Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccImagesImageV2_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckImagesImageV2Exists("openstack_images_image_v2.image_1", &image), - resource.TestCheckResourceAttr( - "openstack_images_image_v2.image_1", "name", "Rancher TerraformAccTest"), - resource.TestCheckResourceAttr( - "openstack_images_image_v2.image_1", "container_format", "bare"), - resource.TestCheckResourceAttr( - "openstack_images_image_v2.image_1", "disk_format", "qcow2"), - resource.TestCheckResourceAttr( - "openstack_images_image_v2.image_1", "schema", "/v2/schemas/image"), - ), - }, - }, - }) -} - -func TestAccImagesImageV2_name(t *testing.T) { - var image images.Image - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckImagesImageV2Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccImagesImageV2_name_1, - Check: resource.ComposeTestCheckFunc( - testAccCheckImagesImageV2Exists("openstack_images_image_v2.image_1", &image), - resource.TestCheckResourceAttr( - "openstack_images_image_v2.image_1", "name", "Rancher TerraformAccTest"), - ), - }, - resource.TestStep{ - Config: testAccImagesImageV2_name_2, - Check: resource.ComposeTestCheckFunc( - testAccCheckImagesImageV2Exists("openstack_images_image_v2.image_1", &image), - resource.TestCheckResourceAttr( - "openstack_images_image_v2.image_1", "name", "TerraformAccTest Rancher"), - ), - }, - }, - }) -} - -func TestAccImagesImageV2_tags(t *testing.T) { - var image images.Image - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckImagesImageV2Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccImagesImageV2_tags_1, - Check: resource.ComposeTestCheckFunc( - testAccCheckImagesImageV2Exists("openstack_images_image_v2.image_1", &image), - testAccCheckImagesImageV2HasTag("openstack_images_image_v2.image_1", "foo"), - testAccCheckImagesImageV2HasTag("openstack_images_image_v2.image_1", "bar"), - testAccCheckImagesImageV2TagCount("openstack_images_image_v2.image_1", 2), - ), - }, - resource.TestStep{ - Config: testAccImagesImageV2_tags_2, - Check: resource.ComposeTestCheckFunc( - testAccCheckImagesImageV2Exists("openstack_images_image_v2.image_1", &image), - testAccCheckImagesImageV2HasTag("openstack_images_image_v2.image_1", "foo"), - testAccCheckImagesImageV2HasTag("openstack_images_image_v2.image_1", "bar"), - testAccCheckImagesImageV2HasTag("openstack_images_image_v2.image_1", "baz"), - testAccCheckImagesImageV2TagCount("openstack_images_image_v2.image_1", 3), - ), - }, - resource.TestStep{ - Config: testAccImagesImageV2_tags_3, - Check: resource.ComposeTestCheckFunc( - testAccCheckImagesImageV2Exists("openstack_images_image_v2.image_1", &image), - testAccCheckImagesImageV2HasTag("openstack_images_image_v2.image_1", "foo"), - testAccCheckImagesImageV2HasTag("openstack_images_image_v2.image_1", "baz"), - testAccCheckImagesImageV2TagCount("openstack_images_image_v2.image_1", 2), - ), - }, - }, - }) -} - -func TestAccImagesImageV2_visibility(t *testing.T) { - var image images.Image - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - testAccPreCheckAdminOnly(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckImagesImageV2Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccImagesImageV2_visibility_1, - Check: resource.ComposeTestCheckFunc( - testAccCheckImagesImageV2Exists("openstack_images_image_v2.image_1", &image), - resource.TestCheckResourceAttr( - "openstack_images_image_v2.image_1", "visibility", "private"), - ), - }, - resource.TestStep{ - Config: testAccImagesImageV2_visibility_2, - Check: resource.ComposeTestCheckFunc( - testAccCheckImagesImageV2Exists("openstack_images_image_v2.image_1", &image), - resource.TestCheckResourceAttr( - "openstack_images_image_v2.image_1", "visibility", "public"), - ), - }, - }, - }) -} - -func TestAccImagesImageV2_timeout(t *testing.T) { - var image images.Image - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckImagesImageV2Destroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccImagesImageV2_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckImagesImageV2Exists("openstack_images_image_v2.image_1", &image), - ), - }, - }, - }) -} - -func testAccCheckImagesImageV2Destroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - imageClient, err := config.imageV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack Image: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_images_image_v2" { - continue - } - - _, err := images.Get(imageClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("Image still exists") - } - } - - return nil -} - -func testAccCheckImagesImageV2Exists(n string, image *images.Image) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - imageClient, err := config.imageV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack Image: %s", err) - } - - found, err := images.Get(imageClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Image not found") - } - - *image = *found - - return nil - } -} - -func testAccCheckImagesImageV2HasTag(n, tag string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - imageClient, err := config.imageV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack Image: %s", err) - } - - found, err := images.Get(imageClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Image not found") - } - - for _, v := range found.Tags { - if tag == v { - return nil - } - } - - return fmt.Errorf("Tag not found: %s", tag) - } -} - -func testAccCheckImagesImageV2TagCount(n string, expected int) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - imageClient, err := config.imageV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack Image: %s", err) - } - - found, err := images.Get(imageClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Image not found") - } - - if len(found.Tags) != expected { - return fmt.Errorf("Expecting %d tags, found %d", expected, len(found.Tags)) - } - - return nil - } -} - -var testAccImagesImageV2_basic = ` - resource "openstack_images_image_v2" "image_1" { - name = "Rancher TerraformAccTest" - image_source_url = "https://releases.rancher.com/os/latest/rancheros-openstack.img" - container_format = "bare" - disk_format = "qcow2" - }` - -var testAccImagesImageV2_name_1 = ` - resource "openstack_images_image_v2" "image_1" { - name = "Rancher TerraformAccTest" - image_source_url = "https://releases.rancher.com/os/latest/rancheros-openstack.img" - container_format = "bare" - disk_format = "qcow2" - }` - -var testAccImagesImageV2_name_2 = ` - resource "openstack_images_image_v2" "image_1" { - name = "TerraformAccTest Rancher" - image_source_url = "https://releases.rancher.com/os/latest/rancheros-openstack.img" - container_format = "bare" - disk_format = "qcow2" - }` - -var testAccImagesImageV2_tags_1 = ` - resource "openstack_images_image_v2" "image_1" { - name = "Rancher TerraformAccTest" - image_source_url = "https://releases.rancher.com/os/latest/rancheros-openstack.img" - container_format = "bare" - disk_format = "qcow2" - tags = ["foo","bar"] - }` - -var testAccImagesImageV2_tags_2 = ` - resource "openstack_images_image_v2" "image_1" { - name = "Rancher TerraformAccTest" - image_source_url = "https://releases.rancher.com/os/latest/rancheros-openstack.img" - container_format = "bare" - disk_format = "qcow2" - tags = ["foo","bar","baz"] - }` - -var testAccImagesImageV2_tags_3 = ` - resource "openstack_images_image_v2" "image_1" { - name = "Rancher TerraformAccTest" - image_source_url = "https://releases.rancher.com/os/latest/rancheros-openstack.img" - container_format = "bare" - disk_format = "qcow2" - tags = ["foo","baz"] - }` - -var testAccImagesImageV2_visibility_1 = ` - resource "openstack_images_image_v2" "image_1" { - name = "Rancher TerraformAccTest" - image_source_url = "https://releases.rancher.com/os/latest/rancheros-openstack.img" - container_format = "bare" - disk_format = "qcow2" - visibility = "private" - }` - -var testAccImagesImageV2_visibility_2 = ` - resource "openstack_images_image_v2" "image_1" { - name = "Rancher TerraformAccTest" - image_source_url = "https://releases.rancher.com/os/latest/rancheros-openstack.img" - container_format = "bare" - disk_format = "qcow2" - visibility = "public" - }` - -var testAccImagesImageV2_timeout = ` - resource "openstack_images_image_v2" "image_1" { - name = "Rancher TerraformAccTest" - image_source_url = "https://releases.rancher.com/os/latest/rancheros-openstack.img" - container_format = "bare" - disk_format = "qcow2" - - timeouts { - create = "10m" - } - }` diff --git a/builtin/providers/openstack/resource_openstack_lb_listener_v2.go b/builtin/providers/openstack/resource_openstack_lb_listener_v2.go deleted file mode 100644 index c426f2be6..000000000 --- a/builtin/providers/openstack/resource_openstack_lb_listener_v2.go +++ /dev/null @@ -1,316 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners" -) - -func resourceListenerV2() *schema.Resource { - return &schema.Resource{ - Create: resourceListenerV2Create, - Read: resourceListenerV2Read, - Update: resourceListenerV2Update, - Delete: resourceListenerV2Delete, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - - "protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "TCP" && value != "HTTP" && value != "HTTPS" { - errors = append(errors, fmt.Errorf( - "Only 'TCP', 'HTTP', and 'HTTPS' are supported values for 'protocol'")) - } - return - }, - }, - - "protocol_port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "loadbalancer_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "default_pool_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "connection_limit": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - - "default_tls_container_ref": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "sni_container_refs": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "admin_state_up": &schema.Schema{ - Type: schema.TypeBool, - Default: true, - Optional: true, - }, - - "id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - } -} - -func resourceListenerV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - adminStateUp := d.Get("admin_state_up").(bool) - connLimit := d.Get("connection_limit").(int) - var sniContainerRefs []string - if raw, ok := d.GetOk("sni_container_refs"); ok { - for _, v := range raw.([]interface{}) { - sniContainerRefs = append(sniContainerRefs, v.(string)) - } - } - createOpts := listeners.CreateOpts{ - Protocol: listeners.Protocol(d.Get("protocol").(string)), - ProtocolPort: d.Get("protocol_port").(int), - TenantID: d.Get("tenant_id").(string), - LoadbalancerID: d.Get("loadbalancer_id").(string), - Name: d.Get("name").(string), - DefaultPoolID: d.Get("default_pool_id").(string), - Description: d.Get("description").(string), - ConnLimit: &connLimit, - DefaultTlsContainerRef: d.Get("default_tls_container_ref").(string), - SniContainerRefs: sniContainerRefs, - AdminStateUp: &adminStateUp, - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - listener, err := listeners.Create(networkingClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack LBaaSV2 listener: %s", err) - } - log.Printf("[INFO] Listener ID: %s", listener.ID) - - log.Printf("[DEBUG] Waiting for Openstack LBaaSV2 listener (%s) to become available.", listener.ID) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"PENDING_CREATE"}, - Target: []string{"ACTIVE"}, - Refresh: waitForListenerActive(networkingClient, listener.ID), - Timeout: 2 * time.Minute, - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - d.SetId(listener.ID) - - return resourceListenerV2Read(d, meta) -} - -func resourceListenerV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - listener, err := listeners.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "LBV2 listener") - } - - log.Printf("[DEBUG] Retrieved OpenStack LBaaSV2 listener %s: %+v", d.Id(), listener) - - d.Set("id", listener.ID) - d.Set("name", listener.Name) - d.Set("protocol", listener.Protocol) - d.Set("tenant_id", listener.TenantID) - d.Set("description", listener.Description) - d.Set("protocol_port", listener.ProtocolPort) - d.Set("admin_state_up", listener.AdminStateUp) - d.Set("default_pool_id", listener.DefaultPoolID) - d.Set("connection_limit", listener.ConnLimit) - d.Set("sni_container_refs", listener.SniContainerRefs) - d.Set("default_tls_container_ref", listener.DefaultTlsContainerRef) - - return nil -} - -func resourceListenerV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var updateOpts listeners.UpdateOpts - if d.HasChange("name") { - updateOpts.Name = d.Get("name").(string) - } - if d.HasChange("description") { - updateOpts.Description = d.Get("description").(string) - } - if d.HasChange("connection_limit") { - connLimit := d.Get("connection_limit").(int) - updateOpts.ConnLimit = &connLimit - } - if d.HasChange("default_tls_container_ref") { - updateOpts.DefaultTlsContainerRef = d.Get("default_tls_container_ref").(string) - } - if d.HasChange("sni_container_refs") { - var sniContainerRefs []string - if raw, ok := d.GetOk("sni_container_refs"); ok { - for _, v := range raw.([]interface{}) { - sniContainerRefs = append(sniContainerRefs, v.(string)) - } - } - updateOpts.SniContainerRefs = sniContainerRefs - } - if d.HasChange("admin_state_up") { - asu := d.Get("admin_state_up").(bool) - updateOpts.AdminStateUp = &asu - } - - log.Printf("[DEBUG] Updating OpenStack LBaaSV2 Listener %s with options: %+v", d.Id(), updateOpts) - - _, err = listeners.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack LBaaSV2 Listener: %s", err) - } - - return resourceListenerV2Read(d, meta) - -} - -func resourceListenerV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE", "PENDING_DELETE"}, - Target: []string{"DELETED"}, - Refresh: waitForListenerDelete(networkingClient, d.Id()), - Timeout: 2 * time.Minute, - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack LBaaSV2 listener: %s", err) - } - - d.SetId("") - return nil -} - -func waitForListenerActive(networkingClient *gophercloud.ServiceClient, listenerID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - listener, err := listeners.Get(networkingClient, listenerID).Extract() - if err != nil { - return nil, "", err - } - - // The listener resource has no Status attribute, so a successful Get is the best we can do - log.Printf("[DEBUG] OpenStack LBaaSV2 listener: %+v", listener) - return listener, "ACTIVE", nil - } -} - -func waitForListenerDelete(networkingClient *gophercloud.ServiceClient, listenerID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack LBaaSV2 listener %s", listenerID) - - listener, err := listeners.Get(networkingClient, listenerID).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack LBaaSV2 listener %s", listenerID) - return listener, "DELETED", nil - } - return listener, "ACTIVE", err - } - - log.Printf("[DEBUG] Openstack LBaaSV2 listener: %+v", listener) - err = listeners.Delete(networkingClient, listenerID).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack LBaaSV2 listener %s", listenerID) - return listener, "DELETED", nil - } - - if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { - if errCode.Actual == 409 { - log.Printf("[DEBUG] OpenStack LBaaSV2 listener (%s) is still in use.", listenerID) - return listener, "ACTIVE", nil - } - } - - return listener, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack LBaaSV2 listener %s still active.", listenerID) - return listener, "ACTIVE", nil - } -} diff --git a/builtin/providers/openstack/resource_openstack_lb_listener_v2_test.go b/builtin/providers/openstack/resource_openstack_lb_listener_v2_test.go deleted file mode 100644 index 06af14ede..000000000 --- a/builtin/providers/openstack/resource_openstack_lb_listener_v2_test.go +++ /dev/null @@ -1,144 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccLBV2Listener_basic(t *testing.T) { - var listener listeners.Listener - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBV2ListenerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: TestAccLBV2ListenerConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckLBV2ListenerExists("openstack_lb_listener_v2.listener_1", &listener), - ), - }, - resource.TestStep{ - Config: TestAccLBV2ListenerConfig_update, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "openstack_lb_listener_v2.listener_1", "name", "listener_1_updated"), - resource.TestCheckResourceAttr( - "openstack_lb_listener_v2.listener_1", "connection_limit", "100"), - ), - }, - }, - }) -} - -func testAccCheckLBV2ListenerDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_lb_listener_v2" { - continue - } - - _, err := listeners.Get(networkingClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("Listener still exists: %s", rs.Primary.ID) - } - } - - return nil -} - -func testAccCheckLBV2ListenerExists(n string, listener *listeners.Listener) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - found, err := listeners.Get(networkingClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Member not found") - } - - *listener = *found - - return nil - } -} - -const TestAccLBV2ListenerConfig_basic = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_lb_loadbalancer_v2" "loadbalancer_1" { - name = "loadbalancer_1" - vip_subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} - -resource "openstack_lb_listener_v2" "listener_1" { - name = "listener_1" - protocol = "HTTP" - protocol_port = 8080 - loadbalancer_id = "${openstack_lb_loadbalancer_v2.loadbalancer_1.id}" -} -` - -const TestAccLBV2ListenerConfig_update = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_lb_loadbalancer_v2" "loadbalancer_1" { - name = "loadbalancer_1" - vip_subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} - -resource "openstack_lb_listener_v2" "listener_1" { - name = "listener_1_updated" - protocol = "HTTP" - protocol_port = 8080 - connection_limit = 100 - admin_state_up = "true" - loadbalancer_id = "${openstack_lb_loadbalancer_v2.loadbalancer_1.id}" -} -` diff --git a/builtin/providers/openstack/resource_openstack_lb_loadbalancer_v2.go b/builtin/providers/openstack/resource_openstack_lb_loadbalancer_v2.go deleted file mode 100644 index c4e17995f..000000000 --- a/builtin/providers/openstack/resource_openstack_lb_loadbalancer_v2.go +++ /dev/null @@ -1,337 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers" - "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" -) - -func resourceLoadBalancerV2() *schema.Resource { - return &schema.Resource{ - Create: resourceLoadBalancerV2Create, - Read: resourceLoadBalancerV2Read, - Update: resourceLoadBalancerV2Update, - Delete: resourceLoadBalancerV2Delete, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "vip_subnet_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "vip_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "vip_port_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "admin_state_up": &schema.Schema{ - Type: schema.TypeBool, - Default: true, - Optional: true, - }, - - "flavor": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "provider": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Deprecated: "Please use loadbalancer_provider", - }, - - "loadbalancer_provider": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "security_group_ids": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - } -} - -func resourceLoadBalancerV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var lbProvider string - if v, ok := d.GetOk("loadbalancer_provider"); ok { - lbProvider = v.(string) - } - - adminStateUp := d.Get("admin_state_up").(bool) - createOpts := loadbalancers.CreateOpts{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - VipSubnetID: d.Get("vip_subnet_id").(string), - TenantID: d.Get("tenant_id").(string), - VipAddress: d.Get("vip_address").(string), - AdminStateUp: &adminStateUp, - Flavor: d.Get("flavor").(string), - Provider: lbProvider, - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - lb, err := loadbalancers.Create(networkingClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack LoadBalancer: %s", err) - } - log.Printf("[INFO] LoadBalancer ID: %s", lb.ID) - - log.Printf("[DEBUG] Waiting for Openstack LoadBalancer (%s) to become available.", lb.ID) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"PENDING_CREATE"}, - Target: []string{"ACTIVE"}, - Refresh: waitForLoadBalancerActive(networkingClient, lb.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - // Once the loadbalancer has been created, apply any requested security groups - // to the port that was created behind the scenes. - if err := resourceLoadBalancerV2SecurityGroups(networkingClient, lb.VipPortID, d); err != nil { - return err - } - - // If all has been successful, set the ID on the resource - d.SetId(lb.ID) - - return resourceLoadBalancerV2Read(d, meta) -} - -func resourceLoadBalancerV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - lb, err := loadbalancers.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "LoadBalancerV2") - } - - log.Printf("[DEBUG] Retrieved OpenStack LBaaSV2 LoadBalancer %s: %+v", d.Id(), lb) - - d.Set("name", lb.Name) - d.Set("description", lb.Description) - d.Set("vip_subnet_id", lb.VipSubnetID) - d.Set("tenant_id", lb.TenantID) - d.Set("vip_address", lb.VipAddress) - d.Set("vip_port_id", lb.VipPortID) - d.Set("admin_state_up", lb.AdminStateUp) - d.Set("flavor", lb.Flavor) - d.Set("loadbalancer_provider", lb.Provider) - - // Get any security groups on the VIP Port - if lb.VipPortID != "" { - port, err := ports.Get(networkingClient, lb.VipPortID).Extract() - if err != nil { - return err - } - - d.Set("security_group_ids", port.SecurityGroups) - } - - return nil -} - -func resourceLoadBalancerV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var updateOpts loadbalancers.UpdateOpts - if d.HasChange("name") { - updateOpts.Name = d.Get("name").(string) - } - if d.HasChange("description") { - updateOpts.Description = d.Get("description").(string) - } - if d.HasChange("admin_state_up") { - asu := d.Get("admin_state_up").(bool) - updateOpts.AdminStateUp = &asu - } - - log.Printf("[DEBUG] Updating OpenStack LBaaSV2 LoadBalancer %s with options: %+v", d.Id(), updateOpts) - - _, err = loadbalancers.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack LBaaSV2 LoadBalancer: %s", err) - } - - // Security Groups get updated separately - if d.HasChange("security_group_ids") { - vipPortID := d.Get("vip_port_id").(string) - if err := resourceLoadBalancerV2SecurityGroups(networkingClient, vipPortID, d); err != nil { - return err - } - } - - return resourceLoadBalancerV2Read(d, meta) -} - -func resourceLoadBalancerV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE", "PENDING_DELETE"}, - Target: []string{"DELETED"}, - Refresh: waitForLoadBalancerDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack LBaaSV2 LoadBalancer: %s", err) - } - - d.SetId("") - return nil -} - -func resourceLoadBalancerV2SecurityGroups(networkingClient *gophercloud.ServiceClient, vipPortID string, d *schema.ResourceData) error { - if vipPortID != "" { - if _, ok := d.GetOk("security_group_ids"); ok { - updateOpts := ports.UpdateOpts{ - SecurityGroups: resourcePortSecurityGroupsV2(d), - } - - log.Printf("[DEBUG] Adding security groups to OpenStack LoadBalancer "+ - "VIP Port (%s): %#v", vipPortID, updateOpts) - - _, err := ports.Update(networkingClient, vipPortID, updateOpts).Extract() - if err != nil { - return err - } - } - } - - return nil -} - -func waitForLoadBalancerActive(networkingClient *gophercloud.ServiceClient, lbID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - lb, err := loadbalancers.Get(networkingClient, lbID).Extract() - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] OpenStack LBaaSV2 LoadBalancer: %+v", lb) - if lb.ProvisioningStatus == "ACTIVE" { - return lb, "ACTIVE", nil - } - - return lb, lb.ProvisioningStatus, nil - } -} - -func waitForLoadBalancerDelete(networkingClient *gophercloud.ServiceClient, lbID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack LBaaSV2 LoadBalancer %s", lbID) - - lb, err := loadbalancers.Get(networkingClient, lbID).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack LBaaSV2 LoadBalancer %s", lbID) - return lb, "DELETED", nil - } - return lb, "ACTIVE", err - } - - log.Printf("[DEBUG] Openstack LoadBalancerV2: %+v", lb) - err = loadbalancers.Delete(networkingClient, lbID).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack LBaaSV2 LoadBalancer %s", lbID) - return lb, "DELETED", nil - } - - if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { - if errCode.Actual == 409 { - log.Printf("[DEBUG] OpenStack LBaaSV2 LoadBalancer (%s) is still in use.", lbID) - return lb, "ACTIVE", nil - } - } - - return lb, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack LBaaSV2 LoadBalancer (%s) still active.", lbID) - return lb, "ACTIVE", nil - } -} diff --git a/builtin/providers/openstack/resource_openstack_lb_loadbalancer_v2_test.go b/builtin/providers/openstack/resource_openstack_lb_loadbalancer_v2_test.go deleted file mode 100644 index 0b157b16a..000000000 --- a/builtin/providers/openstack/resource_openstack_lb_loadbalancer_v2_test.go +++ /dev/null @@ -1,355 +0,0 @@ -package openstack - -import ( - "fmt" - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups" - "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" -) - -func TestAccLBV2LoadBalancer_basic(t *testing.T) { - var lb loadbalancers.LoadBalancer - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBV2LoadBalancerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccLBV2LoadBalancerConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckLBV2LoadBalancerExists("openstack_lb_loadbalancer_v2.loadbalancer_1", &lb), - ), - }, - resource.TestStep{ - Config: testAccLBV2LoadBalancerConfig_update, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "openstack_lb_loadbalancer_v2.loadbalancer_1", "name", "loadbalancer_1_updated"), - resource.TestMatchResourceAttr( - "openstack_lb_loadbalancer_v2.loadbalancer_1", "vip_port_id", - regexp.MustCompile("^[a-f0-9-]+")), - ), - }, - }, - }) -} - -func TestAccLBV2LoadBalancer_secGroup(t *testing.T) { - var lb loadbalancers.LoadBalancer - var sg_1, sg_2 groups.SecGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBV2LoadBalancerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccLBV2LoadBalancer_secGroup, - Check: resource.ComposeTestCheckFunc( - testAccCheckLBV2LoadBalancerExists( - "openstack_lb_loadbalancer_v2.loadbalancer_1", &lb), - testAccCheckNetworkingV2SecGroupExists( - "openstack_networking_secgroup_v2.secgroup_1", &sg_1), - testAccCheckNetworkingV2SecGroupExists( - "openstack_networking_secgroup_v2.secgroup_1", &sg_2), - resource.TestCheckResourceAttr( - "openstack_lb_loadbalancer_v2.loadbalancer_1", "security_group_ids.#", "1"), - testAccCheckLBV2LoadBalancerHasSecGroup(&lb, &sg_1), - ), - }, - resource.TestStep{ - Config: testAccLBV2LoadBalancer_secGroup_update1, - Check: resource.ComposeTestCheckFunc( - testAccCheckLBV2LoadBalancerExists( - "openstack_lb_loadbalancer_v2.loadbalancer_1", &lb), - testAccCheckNetworkingV2SecGroupExists( - "openstack_networking_secgroup_v2.secgroup_2", &sg_1), - testAccCheckNetworkingV2SecGroupExists( - "openstack_networking_secgroup_v2.secgroup_2", &sg_2), - resource.TestCheckResourceAttr( - "openstack_lb_loadbalancer_v2.loadbalancer_1", "security_group_ids.#", "2"), - testAccCheckLBV2LoadBalancerHasSecGroup(&lb, &sg_1), - testAccCheckLBV2LoadBalancerHasSecGroup(&lb, &sg_2), - ), - }, - resource.TestStep{ - Config: testAccLBV2LoadBalancer_secGroup_update2, - Check: resource.ComposeTestCheckFunc( - testAccCheckLBV2LoadBalancerExists( - "openstack_lb_loadbalancer_v2.loadbalancer_1", &lb), - testAccCheckNetworkingV2SecGroupExists( - "openstack_networking_secgroup_v2.secgroup_2", &sg_1), - testAccCheckNetworkingV2SecGroupExists( - "openstack_networking_secgroup_v2.secgroup_2", &sg_2), - resource.TestCheckResourceAttr( - "openstack_lb_loadbalancer_v2.loadbalancer_1", "security_group_ids.#", "1"), - testAccCheckLBV2LoadBalancerHasSecGroup(&lb, &sg_2), - ), - }, - }, - }) -} - -func TestAccLBV2LoadBalancer_timeout(t *testing.T) { - var lb loadbalancers.LoadBalancer - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBV2LoadBalancerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccLBV2LoadBalancerConfig_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckLBV2LoadBalancerExists("openstack_lb_loadbalancer_v2.loadbalancer_1", &lb), - ), - }, - }, - }) -} - -func testAccCheckLBV2LoadBalancerDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_lb_loadbalancer_v2" { - continue - } - - _, err := loadbalancers.Get(networkingClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("LoadBalancer still exists: %s", rs.Primary.ID) - } - } - - return nil -} - -func testAccCheckLBV2LoadBalancerExists( - n string, lb *loadbalancers.LoadBalancer) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - found, err := loadbalancers.Get(networkingClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Member not found") - } - - *lb = *found - - return nil - } -} - -func testAccCheckLBV2LoadBalancerHasSecGroup( - lb *loadbalancers.LoadBalancer, sg *groups.SecGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - port, err := ports.Get(networkingClient, lb.VipPortID).Extract() - if err != nil { - return err - } - - for _, p := range port.SecurityGroups { - if p == sg.ID { - return nil - } - } - - return fmt.Errorf("LoadBalancer does not have the security group") - } -} - -const testAccLBV2LoadBalancerConfig_basic = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_lb_loadbalancer_v2" "loadbalancer_1" { - name = "loadbalancer_1" - loadbalancer_provider = "haproxy" - vip_subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} -` - -const testAccLBV2LoadBalancerConfig_update = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_lb_loadbalancer_v2" "loadbalancer_1" { - name = "loadbalancer_1_updated" - loadbalancer_provider = "haproxy" - admin_state_up = "true" - vip_subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} -` - -const testAccLBV2LoadBalancer_secGroup = ` -resource "openstack_networking_secgroup_v2" "secgroup_1" { - name = "secgroup_1" - description = "secgroup_1" -} - -resource "openstack_networking_secgroup_v2" "secgroup_2" { - name = "secgroup_2" - description = "secgroup_2" -} - -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - network_id = "${openstack_networking_network_v2.network_1.id}" - cidr = "192.168.199.0/24" -} - -resource "openstack_lb_loadbalancer_v2" "loadbalancer_1" { - name = "loadbalancer_1" - vip_subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - security_group_ids = [ - "${openstack_networking_secgroup_v2.secgroup_1.id}" - ] -} -` - -const testAccLBV2LoadBalancer_secGroup_update1 = ` -resource "openstack_networking_secgroup_v2" "secgroup_1" { - name = "secgroup_1" - description = "secgroup_1" -} - -resource "openstack_networking_secgroup_v2" "secgroup_2" { - name = "secgroup_2" - description = "secgroup_2" -} - -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - network_id = "${openstack_networking_network_v2.network_1.id}" - cidr = "192.168.199.0/24" -} - -resource "openstack_lb_loadbalancer_v2" "loadbalancer_1" { - name = "loadbalancer_1" - vip_subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - security_group_ids = [ - "${openstack_networking_secgroup_v2.secgroup_1.id}", - "${openstack_networking_secgroup_v2.secgroup_2.id}" - ] -} -` - -const testAccLBV2LoadBalancer_secGroup_update2 = ` -resource "openstack_networking_secgroup_v2" "secgroup_1" { - name = "secgroup_1" - description = "secgroup_1" -} - -resource "openstack_networking_secgroup_v2" "secgroup_2" { - name = "secgroup_2" - description = "secgroup_2" -} - -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - network_id = "${openstack_networking_network_v2.network_1.id}" - cidr = "192.168.199.0/24" -} - -resource "openstack_lb_loadbalancer_v2" "loadbalancer_1" { - name = "loadbalancer_1" - vip_subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - security_group_ids = [ - "${openstack_networking_secgroup_v2.secgroup_2.id}" - ] - depends_on = ["openstack_networking_secgroup_v2.secgroup_1"] -} -` - -const testAccLBV2LoadBalancerConfig_timeout = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_lb_loadbalancer_v2" "loadbalancer_1" { - name = "loadbalancer_1" - loadbalancer_provider = "haproxy" - vip_subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - - timeouts { - create = "5m" - delete = "5m" - } -} -` diff --git a/builtin/providers/openstack/resource_openstack_lb_member_v1.go b/builtin/providers/openstack/resource_openstack_lb_member_v1.go deleted file mode 100644 index e6dc3da9f..000000000 --- a/builtin/providers/openstack/resource_openstack_lb_member_v1.go +++ /dev/null @@ -1,236 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members" -) - -func resourceLBMemberV1() *schema.Resource { - return &schema.Resource{ - Create: resourceLBMemberV1Create, - Read: resourceLBMemberV1Read, - Update: resourceLBMemberV1Update, - Delete: resourceLBMemberV1Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "pool_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "address": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "weight": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "admin_state_up": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: false, - Computed: true, - }, - }, - } -} - -func resourceLBMemberV1Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - createOpts := members.CreateOpts{ - TenantID: d.Get("tenant_id").(string), - PoolID: d.Get("pool_id").(string), - Address: d.Get("address").(string), - ProtocolPort: d.Get("port").(int), - } - - log.Printf("[DEBUG] OpenStack LB Member Create Options: %#v", createOpts) - m, err := members.Create(networkingClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack LB member: %s", err) - } - log.Printf("[INFO] LB member ID: %s", m.ID) - - log.Printf("[DEBUG] Waiting for OpenStack LB member (%s) to become available.", m.ID) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"PENDING_CREATE"}, - Target: []string{"ACTIVE", "INACTIVE", "CREATED", "DOWN"}, - Refresh: waitForLBMemberActive(networkingClient, m.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - d.SetId(m.ID) - - // Due to the way Gophercloud is currently set up, AdminStateUp must be set post-create - asu := d.Get("admin_state_up").(bool) - updateOpts := members.UpdateOpts{ - AdminStateUp: &asu, - } - - log.Printf("[DEBUG] OpenStack LB Member Update Options: %#v", createOpts) - m, err = members.Update(networkingClient, m.ID, updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack LB member: %s", err) - } - - return resourceLBMemberV1Read(d, meta) -} - -func resourceLBMemberV1Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - m, err := members.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "LB member") - } - - log.Printf("[DEBUG] Retrieved OpenStack LB member %s: %+v", d.Id(), m) - - d.Set("address", m.Address) - d.Set("pool_id", m.PoolID) - d.Set("port", m.ProtocolPort) - d.Set("weight", m.Weight) - d.Set("admin_state_up", m.AdminStateUp) - d.Set("region", GetRegion(d)) - - return nil -} - -func resourceLBMemberV1Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var updateOpts members.UpdateOpts - if d.HasChange("admin_state_up") { - asu := d.Get("admin_state_up").(bool) - updateOpts.AdminStateUp = &asu - } - - log.Printf("[DEBUG] Updating LB member %s with options: %+v", d.Id(), updateOpts) - - _, err = members.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack LB member: %s", err) - } - - return resourceLBMemberV1Read(d, meta) -} - -func resourceLBMemberV1Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - err = members.Delete(networkingClient, d.Id()).ExtractErr() - if err != nil { - CheckDeleted(d, err, "LB member") - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE", "PENDING_DELETE"}, - Target: []string{"DELETED"}, - Refresh: waitForLBMemberDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack LB member: %s", err) - } - - d.SetId("") - return nil -} - -func waitForLBMemberActive(networkingClient *gophercloud.ServiceClient, memberId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - m, err := members.Get(networkingClient, memberId).Extract() - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] OpenStack LB member: %+v", m) - if m.Status == "ACTIVE" { - return m, "ACTIVE", nil - } - - return m, m.Status, nil - } -} - -func waitForLBMemberDelete(networkingClient *gophercloud.ServiceClient, memberId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack LB member %s", memberId) - - m, err := members.Get(networkingClient, memberId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack LB member %s", memberId) - return m, "DELETED", nil - } - return m, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack LB member %s still active.", memberId) - return m, "ACTIVE", nil - } - -} diff --git a/builtin/providers/openstack/resource_openstack_lb_member_v1_test.go b/builtin/providers/openstack/resource_openstack_lb_member_v1_test.go deleted file mode 100644 index af840a5b5..000000000 --- a/builtin/providers/openstack/resource_openstack_lb_member_v1_test.go +++ /dev/null @@ -1,191 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccLBV1Member_basic(t *testing.T) { - var member members.Member - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBV1MemberDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccLBV1Member_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckLBV1MemberExists("openstack_lb_member_v1.member_1", &member), - ), - }, - resource.TestStep{ - Config: testAccLBV1Member_update, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("openstack_lb_member_v1.member_1", "admin_state_up", "false"), - ), - }, - }, - }) -} - -func TestAccLBV1Member_timeout(t *testing.T) { - var member members.Member - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBV1MemberDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccLBV1Member_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckLBV1MemberExists("openstack_lb_member_v1.member_1", &member), - ), - }, - }, - }) -} - -func testAccCheckLBV1MemberDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_lb_member_v1" { - continue - } - - _, err := members.Get(networkingClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("LB Member still exists") - } - } - - return nil -} - -func testAccCheckLBV1MemberExists(n string, member *members.Member) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - found, err := members.Get(networkingClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Member not found") - } - - *member = *found - - return nil - } -} - -const testAccLBV1Member_basic = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_lb_pool_v1" "pool_1" { - name = "pool_1" - protocol = "HTTP" - lb_method = "ROUND_ROBIN" - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} - -resource "openstack_lb_member_v1" "member_1" { - address = "192.168.199.10" - port = 80 - admin_state_up = true - pool_id = "${openstack_lb_pool_v1.pool_1.id}" -} -` - -const testAccLBV1Member_update = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_lb_pool_v1" "pool_1" { - name = "pool_1" - protocol = "HTTP" - lb_method = "ROUND_ROBIN" - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} - -resource "openstack_lb_member_v1" "member_1" { - address = "192.168.199.10" - port = 80 - admin_state_up = false - pool_id = "${openstack_lb_pool_v1.pool_1.id}" -} -` - -const testAccLBV1Member_timeout = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_lb_pool_v1" "pool_1" { - name = "pool_1" - protocol = "HTTP" - lb_method = "ROUND_ROBIN" - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} - -resource "openstack_lb_member_v1" "member_1" { - address = "192.168.199.10" - port = 80 - admin_state_up = true - pool_id = "${openstack_lb_pool_v1.pool_1.id}" - - timeouts { - create = "5m" - delete = "5m" - } -} -` diff --git a/builtin/providers/openstack/resource_openstack_lb_member_v2.go b/builtin/providers/openstack/resource_openstack_lb_member_v2.go deleted file mode 100644 index 61326bac3..000000000 --- a/builtin/providers/openstack/resource_openstack_lb_member_v2.go +++ /dev/null @@ -1,305 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" -) - -func resourceMemberV2() *schema.Resource { - return &schema.Resource{ - Create: resourceMemberV2Create, - Read: resourceMemberV2Read, - Update: resourceMemberV2Update, - Delete: resourceMemberV2Delete, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "address": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "protocol_port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - - "weight": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 1 { - errors = append(errors, fmt.Errorf( - "Only numbers greater than 0 are supported values for 'weight'")) - } - return - }, - }, - - "subnet_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "admin_state_up": &schema.Schema{ - Type: schema.TypeBool, - Default: true, - Optional: true, - }, - - "pool_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - } -} - -func resourceMemberV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - adminStateUp := d.Get("admin_state_up").(bool) - createOpts := pools.CreateMemberOpts{ - Name: d.Get("name").(string), - TenantID: d.Get("tenant_id").(string), - Address: d.Get("address").(string), - ProtocolPort: d.Get("protocol_port").(int), - Weight: d.Get("weight").(int), - AdminStateUp: &adminStateUp, - } - - // Must omit if not set - if v, ok := d.GetOk("subnet_id"); ok { - createOpts.SubnetID = v.(string) - } - - poolID := d.Get("pool_id").(string) - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - - var member *pools.Member - err = resource.Retry(10*time.Minute, func() *resource.RetryError { - var err error - log.Printf("[DEBUG] Attempting to create LBaaSV2 member") - member, err = pools.CreateMember(networkingClient, poolID, createOpts).Extract() - if err != nil { - switch errCode := err.(type) { - case gophercloud.ErrDefault500: - log.Printf("[DEBUG] OpenStack LBaaSV2 member is still creating.") - return resource.RetryableError(err) - case gophercloud.ErrUnexpectedResponseCode: - if errCode.Actual == 409 { - log.Printf("[DEBUG] OpenStack LBaaSV2 member is still creating.") - return resource.RetryableError(err) - } - - default: - return resource.NonRetryableError(err) - } - } - return nil - }) - - if err != nil { - return fmt.Errorf("Error creating OpenStack LBaaSV2 member: %s", err) - } - log.Printf("[INFO] member ID: %s", member.ID) - - log.Printf("[DEBUG] Waiting for Openstack LBaaSV2 member (%s) to become available.", member.ID) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"PENDING_CREATE"}, - Target: []string{"ACTIVE"}, - Refresh: waitForMemberActive(networkingClient, poolID, member.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - d.SetId(member.ID) - - return resourceMemberV2Read(d, meta) -} - -func resourceMemberV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - member, err := pools.GetMember(networkingClient, d.Get("pool_id").(string), d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "LBV2 Member") - } - - log.Printf("[DEBUG] Retrieved OpenStack LBaaSV2 Member %s: %+v", d.Id(), member) - - d.Set("name", member.Name) - d.Set("weight", member.Weight) - d.Set("admin_state_up", member.AdminStateUp) - d.Set("tenant_id", member.TenantID) - d.Set("subnet_id", member.SubnetID) - d.Set("address", member.Address) - d.Set("protocol_port", member.ProtocolPort) - d.Set("id", member.ID) - - return nil -} - -func resourceMemberV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var updateOpts pools.UpdateMemberOpts - if d.HasChange("name") { - updateOpts.Name = d.Get("name").(string) - } - if d.HasChange("weight") { - updateOpts.Weight = d.Get("weight").(int) - } - if d.HasChange("admin_state_up") { - asu := d.Get("admin_state_up").(bool) - updateOpts.AdminStateUp = &asu - } - - log.Printf("[DEBUG] Updating OpenStack LBaaSV2 Member %s with options: %+v", d.Id(), updateOpts) - - _, err = pools.UpdateMember(networkingClient, d.Get("pool_id").(string), d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack LBaaSV2 Member: %s", err) - } - - return resourceMemberV2Read(d, meta) -} - -func resourceMemberV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE", "PENDING_DELETE"}, - Target: []string{"DELETED"}, - Refresh: waitForMemberDelete(networkingClient, d.Get("pool_id").(string), d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack LBaaSV2 Member: %s", err) - } - - d.SetId("") - return nil -} - -func waitForMemberActive(networkingClient *gophercloud.ServiceClient, poolID string, memberID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - member, err := pools.GetMember(networkingClient, poolID, memberID).Extract() - if err != nil { - return nil, "", err - } - - // The member resource has no Status attribute, so a successful Get is the best we can do - log.Printf("[DEBUG] OpenStack LBaaSV2 Member: %+v", member) - return member, "ACTIVE", nil - } -} - -func waitForMemberDelete(networkingClient *gophercloud.ServiceClient, poolID string, memberID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack LBaaSV2 Member %s", memberID) - - member, err := pools.GetMember(networkingClient, poolID, memberID).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack LBaaSV2 Member %s", memberID) - return member, "DELETED", nil - } - return member, "ACTIVE", err - } - - log.Printf("[DEBUG] Openstack LBaaSV2 Member: %+v", member) - err = pools.DeleteMember(networkingClient, poolID, memberID).ExtractErr() - if err != nil { - switch errCode := err.(type) { - case gophercloud.ErrDefault404: - log.Printf("[DEBUG] Successfully deleted OpenStack LBaaSV2 Member %s", memberID) - return member, "DELETED", nil - case gophercloud.ErrDefault500: - log.Printf("[DEBUG] OpenStack LBaaSV2 Member (%s) is still in use.", memberID) - return member, "PENDING_DELETE", nil - case gophercloud.ErrUnexpectedResponseCode: - if errCode.Actual == 409 { - log.Printf("[DEBUG] OpenStack LBaaSV2 Member (%s) is still in use.", memberID) - return member, "PENDING_DELETE", nil - } - - default: - return member, "ACTIVE", err - } - } - - log.Printf("[DEBUG] OpenStack LBaaSV2 Member %s still active.", memberID) - return member, "ACTIVE", nil - } -} diff --git a/builtin/providers/openstack/resource_openstack_lb_member_v2_test.go b/builtin/providers/openstack/resource_openstack_lb_member_v2_test.go deleted file mode 100644 index 488b9fefc..000000000 --- a/builtin/providers/openstack/resource_openstack_lb_member_v2_test.go +++ /dev/null @@ -1,234 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccLBV2Member_basic(t *testing.T) { - var member pools.Member - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBV2MemberDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: TestAccLBV2MemberConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckLBV2MemberExists("openstack_lb_member_v2.member_1", &member), - ), - }, - resource.TestStep{ - Config: TestAccLBV2MemberConfig_update, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("openstack_lb_member_v2.member_1", "weight", "10"), - ), - }, - }, - }) -} - -func TestAccLBV2Member_timeout(t *testing.T) { - var member pools.Member - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBV2MemberDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: TestAccLBV2MemberConfig_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckLBV2MemberExists("openstack_lb_member_v2.member_1", &member), - ), - }, - }, - }) -} - -func testAccCheckLBV2MemberDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_lb_member_v2" { - continue - } - - poolId := rs.Primary.Attributes["pool_id"] - _, err := pools.GetMember(networkingClient, poolId, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("Member still exists: %s", rs.Primary.ID) - } - } - - return nil -} - -func testAccCheckLBV2MemberExists(n string, member *pools.Member) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - poolId := rs.Primary.Attributes["pool_id"] - found, err := pools.GetMember(networkingClient, poolId, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Member not found") - } - - *member = *found - - return nil - } -} - -const TestAccLBV2MemberConfig_basic = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - network_id = "${openstack_networking_network_v2.network_1.id}" - cidr = "192.168.199.0/24" - ip_version = 4 -} - -resource "openstack_lb_loadbalancer_v2" "loadbalancer_1" { - name = "loadbalancer_1" - vip_subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} - -resource "openstack_lb_listener_v2" "listener_1" { - name = "listener_1" - protocol = "HTTP" - protocol_port = 8080 - loadbalancer_id = "${openstack_lb_loadbalancer_v2.loadbalancer_1.id}" -} - -resource "openstack_lb_pool_v2" "pool_1" { - name = "pool_1" - protocol = "HTTP" - lb_method = "ROUND_ROBIN" - listener_id = "${openstack_lb_listener_v2.listener_1.id}" -} - -resource "openstack_lb_member_v2" "member_1" { - address = "192.168.199.10" - protocol_port = 8080 - pool_id = "${openstack_lb_pool_v2.pool_1.id}" - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} -` - -const TestAccLBV2MemberConfig_update = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_lb_loadbalancer_v2" "loadbalancer_1" { - name = "loadbalancer_1" - vip_subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} - -resource "openstack_lb_listener_v2" "listener_1" { - name = "listener_1" - protocol = "HTTP" - protocol_port = 8080 - loadbalancer_id = "${openstack_lb_loadbalancer_v2.loadbalancer_1.id}" -} - -resource "openstack_lb_pool_v2" "pool_1" { - name = "pool_1" - protocol = "HTTP" - lb_method = "ROUND_ROBIN" - listener_id = "${openstack_lb_listener_v2.listener_1.id}" -} - -resource "openstack_lb_member_v2" "member_1" { - address = "192.168.199.10" - protocol_port = 8080 - weight = 10 - admin_state_up = "true" - pool_id = "${openstack_lb_pool_v2.pool_1.id}" - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} -` - -const TestAccLBV2MemberConfig_timeout = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - network_id = "${openstack_networking_network_v2.network_1.id}" - cidr = "192.168.199.0/24" - ip_version = 4 -} - -resource "openstack_lb_loadbalancer_v2" "loadbalancer_1" { - name = "loadbalancer_1" - vip_subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} - -resource "openstack_lb_listener_v2" "listener_1" { - name = "listener_1" - protocol = "HTTP" - protocol_port = 8080 - loadbalancer_id = "${openstack_lb_loadbalancer_v2.loadbalancer_1.id}" -} - -resource "openstack_lb_pool_v2" "pool_1" { - name = "pool_1" - protocol = "HTTP" - lb_method = "ROUND_ROBIN" - listener_id = "${openstack_lb_listener_v2.listener_1.id}" -} - -resource "openstack_lb_member_v2" "member_1" { - address = "192.168.199.10" - protocol_port = 8080 - pool_id = "${openstack_lb_pool_v2.pool_1.id}" - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - - timeouts { - create = "5m" - delete = "5m" - } -} -` diff --git a/builtin/providers/openstack/resource_openstack_lb_monitor_v1.go b/builtin/providers/openstack/resource_openstack_lb_monitor_v1.go deleted file mode 100644 index 26066cbea..000000000 --- a/builtin/providers/openstack/resource_openstack_lb_monitor_v1.go +++ /dev/null @@ -1,310 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "strconv" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors" -) - -func resourceLBMonitorV1() *schema.Resource { - return &schema.Resource{ - Create: resourceLBMonitorV1Create, - Read: resourceLBMonitorV1Read, - Update: resourceLBMonitorV1Update, - Delete: resourceLBMonitorV1Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "delay": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: false, - }, - "timeout": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: false, - }, - "max_retries": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: false, - }, - "url_path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "http_method": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "expected_codes": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "admin_state_up": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - Computed: true, - }, - }, - } -} - -func resourceLBMonitorV1Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - createOpts := monitors.CreateOpts{ - TenantID: d.Get("tenant_id").(string), - Delay: d.Get("delay").(int), - Timeout: d.Get("timeout").(int), - MaxRetries: d.Get("max_retries").(int), - URLPath: d.Get("url_path").(string), - ExpectedCodes: d.Get("expected_codes").(string), - HTTPMethod: d.Get("http_method").(string), - } - - if v, ok := d.GetOk("type"); ok { - monitorType := resourceLBMonitorV1DetermineType(v.(string)) - createOpts.Type = monitorType - } - - asuRaw := d.Get("admin_state_up").(string) - if asuRaw != "" { - asu, err := strconv.ParseBool(asuRaw) - if err != nil { - return fmt.Errorf("admin_state_up, if provided, must be either 'true' or 'false'") - } - createOpts.AdminStateUp = &asu - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - m, err := monitors.Create(networkingClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack LB Monitor: %s", err) - } - log.Printf("[INFO] LB Monitor ID: %s", m.ID) - - log.Printf("[DEBUG] Waiting for OpenStack LB Monitor (%s) to become available.", m.ID) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"PENDING_CREATE"}, - Target: []string{"ACTIVE"}, - Refresh: waitForLBMonitorActive(networkingClient, m.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - d.SetId(m.ID) - - return resourceLBMonitorV1Read(d, meta) -} - -func resourceLBMonitorV1Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - m, err := monitors.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "LB monitor") - } - - log.Printf("[DEBUG] Retrieved OpenStack LB Monitor %s: %+v", d.Id(), m) - - d.Set("type", m.Type) - d.Set("delay", m.Delay) - d.Set("timeout", m.Timeout) - d.Set("max_retries", m.MaxRetries) - d.Set("tenant_id", m.TenantID) - d.Set("url_path", m.URLPath) - d.Set("http_method", m.HTTPMethod) - d.Set("expected_codes", m.ExpectedCodes) - d.Set("admin_state_up", strconv.FormatBool(m.AdminStateUp)) - d.Set("region", GetRegion(d)) - - return nil -} - -func resourceLBMonitorV1Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - updateOpts := monitors.UpdateOpts{ - Delay: d.Get("delay").(int), - Timeout: d.Get("timeout").(int), - MaxRetries: d.Get("max_retries").(int), - URLPath: d.Get("url_path").(string), - HTTPMethod: d.Get("http_method").(string), - ExpectedCodes: d.Get("expected_codes").(string), - } - - if d.HasChange("admin_state_up") { - asuRaw := d.Get("admin_state_up").(string) - if asuRaw != "" { - asu, err := strconv.ParseBool(asuRaw) - if err != nil { - return fmt.Errorf("admin_state_up, if provided, must be either 'true' or 'false'") - } - updateOpts.AdminStateUp = &asu - } - } - - log.Printf("[DEBUG] Updating OpenStack LB Monitor %s with options: %+v", d.Id(), updateOpts) - - _, err = monitors.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack LB Monitor: %s", err) - } - - return resourceLBMonitorV1Read(d, meta) -} - -func resourceLBMonitorV1Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE", "PENDING_DELETE"}, - Target: []string{"DELETED"}, - Refresh: waitForLBMonitorDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack LB Monitor: %s", err) - } - - d.SetId("") - return nil -} - -func resourceLBMonitorV1DetermineType(t string) monitors.MonitorType { - var monitorType monitors.MonitorType - switch t { - case "PING": - monitorType = monitors.TypePING - case "TCP": - monitorType = monitors.TypeTCP - case "HTTP": - monitorType = monitors.TypeHTTP - case "HTTPS": - monitorType = monitors.TypeHTTPS - } - - return monitorType -} - -func waitForLBMonitorActive(networkingClient *gophercloud.ServiceClient, monitorId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - m, err := monitors.Get(networkingClient, monitorId).Extract() - if err != nil { - return nil, "", err - } - - // The monitor resource has no Status attribute, so a successful Get is the best we can do - log.Printf("[DEBUG] OpenStack LB Monitor: %+v", m) - return m, "ACTIVE", nil - } -} - -func waitForLBMonitorDelete(networkingClient *gophercloud.ServiceClient, monitorId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack LB Monitor %s", monitorId) - - m, err := monitors.Get(networkingClient, monitorId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack LB Monitor %s", monitorId) - return m, "DELETED", nil - } - - if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { - if errCode.Actual == 409 { - log.Printf("[DEBUG] OpenStack LB Monitor (%s) is waiting for Pool to delete.", monitorId) - return m, "PENDING", nil - } - } - - return m, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack LB Monitor: %+v", m) - err = monitors.Delete(networkingClient, monitorId).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack LB Monitor %s", monitorId) - return m, "DELETED", nil - } - - if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { - if errCode.Actual == 409 { - log.Printf("[DEBUG] OpenStack LB Monitor (%s) is waiting for Pool to delete.", monitorId) - return m, "PENDING", nil - } - } - - return m, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack LB Monitor %s still active.", monitorId) - return m, "ACTIVE", nil - } - -} diff --git a/builtin/providers/openstack/resource_openstack_lb_monitor_v1_test.go b/builtin/providers/openstack/resource_openstack_lb_monitor_v1_test.go deleted file mode 100644 index 3da3b6623..000000000 --- a/builtin/providers/openstack/resource_openstack_lb_monitor_v1_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors" -) - -func TestAccLBV1Monitor_basic(t *testing.T) { - var monitor monitors.Monitor - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBV1MonitorDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccLBV1Monitor_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckLBV1MonitorExists("openstack_lb_monitor_v1.monitor_1", &monitor), - ), - }, - resource.TestStep{ - Config: testAccLBV1Monitor_update, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("openstack_lb_monitor_v1.monitor_1", "delay", "20"), - ), - }, - }, - }) -} - -func TestAccLBV1Monitor_timeout(t *testing.T) { - var monitor monitors.Monitor - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBV1MonitorDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccLBV1Monitor_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckLBV1MonitorExists("openstack_lb_monitor_v1.monitor_1", &monitor), - ), - }, - }, - }) -} - -func testAccCheckLBV1MonitorDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_lb_monitor_v1" { - continue - } - - _, err := monitors.Get(networkingClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("LB monitor still exists") - } - } - - return nil -} - -func testAccCheckLBV1MonitorExists(n string, monitor *monitors.Monitor) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - found, err := monitors.Get(networkingClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Monitor not found") - } - - *monitor = *found - - return nil - } -} - -const testAccLBV1Monitor_basic = ` -resource "openstack_lb_monitor_v1" "monitor_1" { - type = "PING" - delay = 30 - timeout = 5 - max_retries = 3 - admin_state_up = "true" -} -` - -const testAccLBV1Monitor_update = ` -resource "openstack_lb_monitor_v1" "monitor_1" { - type = "PING" - delay = 20 - timeout = 5 - max_retries = 3 - admin_state_up = "true" -} -` - -const testAccLBV1Monitor_timeout = ` -resource "openstack_lb_monitor_v1" "monitor_1" { - type = "PING" - delay = 30 - timeout = 5 - max_retries = 3 - admin_state_up = "true" - - timeouts { - create = "5m" - delete = "5m" - } -} -` diff --git a/builtin/providers/openstack/resource_openstack_lb_monitor_v2.go b/builtin/providers/openstack/resource_openstack_lb_monitor_v2.go deleted file mode 100644 index 061c270e5..000000000 --- a/builtin/providers/openstack/resource_openstack_lb_monitor_v2.go +++ /dev/null @@ -1,294 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors" -) - -func resourceMonitorV2() *schema.Resource { - return &schema.Resource{ - Create: resourceMonitorV2Create, - Read: resourceMonitorV2Read, - Update: resourceMonitorV2Update, - Delete: resourceMonitorV2Delete, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - - "pool_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "delay": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - "timeout": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - "max_retries": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - "url_path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "http_method": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "expected_codes": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "admin_state_up": &schema.Schema{ - Type: schema.TypeBool, - Default: true, - Optional: true, - }, - - "id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - } -} - -func resourceMonitorV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - adminStateUp := d.Get("admin_state_up").(bool) - createOpts := monitors.CreateOpts{ - PoolID: d.Get("pool_id").(string), - TenantID: d.Get("tenant_id").(string), - Type: d.Get("type").(string), - Delay: d.Get("delay").(int), - Timeout: d.Get("timeout").(int), - MaxRetries: d.Get("max_retries").(int), - URLPath: d.Get("url_path").(string), - HTTPMethod: d.Get("http_method").(string), - ExpectedCodes: d.Get("expected_codes").(string), - Name: d.Get("name").(string), - AdminStateUp: &adminStateUp, - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - monitor, err := monitors.Create(networkingClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack LBaaSV2 monitor: %s", err) - } - log.Printf("[INFO] monitor ID: %s", monitor.ID) - - log.Printf("[DEBUG] Waiting for Openstack LBaaSV2 monitor (%s) to become available.", monitor.ID) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"PENDING_CREATE"}, - Target: []string{"ACTIVE"}, - Refresh: waitForMonitorActive(networkingClient, monitor.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - d.SetId(monitor.ID) - - return resourceMonitorV2Read(d, meta) -} - -func resourceMonitorV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - monitor, err := monitors.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "LBV2 Monitor") - } - - log.Printf("[DEBUG] Retrieved OpenStack LBaaSV2 Monitor %s: %+v", d.Id(), monitor) - - d.Set("id", monitor.ID) - d.Set("tenant_id", monitor.TenantID) - d.Set("type", monitor.Type) - d.Set("delay", monitor.Delay) - d.Set("timeout", monitor.Timeout) - d.Set("max_retries", monitor.MaxRetries) - d.Set("url_path", monitor.URLPath) - d.Set("http_method", monitor.HTTPMethod) - d.Set("expected_codes", monitor.ExpectedCodes) - d.Set("admin_state_up", monitor.AdminStateUp) - d.Set("name", monitor.Name) - - return nil -} - -func resourceMonitorV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var updateOpts monitors.UpdateOpts - if d.HasChange("url_path") { - updateOpts.URLPath = d.Get("url_path").(string) - } - if d.HasChange("expected_codes") { - updateOpts.ExpectedCodes = d.Get("expected_codes").(string) - } - if d.HasChange("delay") { - updateOpts.Delay = d.Get("delay").(int) - } - if d.HasChange("timeout") { - updateOpts.Timeout = d.Get("timeout").(int) - } - if d.HasChange("max_retries") { - updateOpts.MaxRetries = d.Get("max_retries").(int) - } - if d.HasChange("admin_state_up") { - asu := d.Get("admin_state_up").(bool) - updateOpts.AdminStateUp = &asu - } - if d.HasChange("name") { - updateOpts.Name = d.Get("name").(string) - } - if d.HasChange("http_method") { - updateOpts.HTTPMethod = d.Get("http_method").(string) - } - - log.Printf("[DEBUG] Updating OpenStack LBaaSV2 Monitor %s with options: %+v", d.Id(), updateOpts) - - _, err = monitors.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack LBaaSV2 Monitor: %s", err) - } - - return resourceMonitorV2Read(d, meta) -} - -func resourceMonitorV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE", "PENDING_DELETE"}, - Target: []string{"DELETED"}, - Refresh: waitForMonitorDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack LBaaSV2 Monitor: %s", err) - } - - d.SetId("") - return nil -} - -func waitForMonitorActive(networkingClient *gophercloud.ServiceClient, monitorID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - monitor, err := monitors.Get(networkingClient, monitorID).Extract() - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] OpenStack LBaaSV2 Monitor: %+v", monitor) - return monitor, "ACTIVE", nil - } -} - -func waitForMonitorDelete(networkingClient *gophercloud.ServiceClient, monitorID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack LBaaSV2 Monitor %s", monitorID) - - monitor, err := monitors.Get(networkingClient, monitorID).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack LBaaSV2 Monitor %s", monitorID) - return monitor, "DELETED", nil - } - return monitor, "ACTIVE", err - } - - log.Printf("[DEBUG] Openstack LBaaSV2 Monitor: %+v", monitor) - err = monitors.Delete(networkingClient, monitorID).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack LBaaSV2 Monitor %s", monitorID) - return monitor, "DELETED", nil - } - - if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { - if errCode.Actual == 409 { - log.Printf("[DEBUG] OpenStack LBaaSV2 Monitor (%s) is still in use.", monitorID) - return monitor, "ACTIVE", nil - } - } - - return monitor, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack LBaaSV2 Monitor %s still active.", monitorID) - return monitor, "ACTIVE", nil - } -} diff --git a/builtin/providers/openstack/resource_openstack_lb_monitor_v2_test.go b/builtin/providers/openstack/resource_openstack_lb_monitor_v2_test.go deleted file mode 100644 index a7f095301..000000000 --- a/builtin/providers/openstack/resource_openstack_lb_monitor_v2_test.go +++ /dev/null @@ -1,240 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccLBV2Monitor_basic(t *testing.T) { - var monitor monitors.Monitor - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBV2MonitorDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: TestAccLBV2MonitorConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckLBV2MonitorExists(t, "openstack_lb_monitor_v2.monitor_1", &monitor), - ), - }, - resource.TestStep{ - Config: TestAccLBV2MonitorConfig_update, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "openstack_lb_monitor_v2.monitor_1", "name", "monitor_1_updated"), - resource.TestCheckResourceAttr("openstack_lb_monitor_v2.monitor_1", "delay", "30"), - resource.TestCheckResourceAttr("openstack_lb_monitor_v2.monitor_1", "timeout", "15"), - ), - }, - }, - }) -} - -func TestAccLBV2Monitor_timeout(t *testing.T) { - var monitor monitors.Monitor - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBV2MonitorDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: TestAccLBV2MonitorConfig_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckLBV2MonitorExists(t, "openstack_lb_monitor_v2.monitor_1", &monitor), - ), - }, - }, - }) -} - -func testAccCheckLBV2MonitorDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_lb_monitor_v2" { - continue - } - - _, err := monitors.Get(networkingClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("Monitor still exists: %s", rs.Primary.ID) - } - } - - return nil -} - -func testAccCheckLBV2MonitorExists(t *testing.T, n string, monitor *monitors.Monitor) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - found, err := monitors.Get(networkingClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Monitor not found") - } - - *monitor = *found - - return nil - } -} - -const TestAccLBV2MonitorConfig_basic = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_lb_loadbalancer_v2" "loadbalancer_1" { - name = "loadbalancer_1" - vip_subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} - -resource "openstack_lb_listener_v2" "listener_1" { - name = "listener_1" - protocol = "HTTP" - protocol_port = 8080 - loadbalancer_id = "${openstack_lb_loadbalancer_v2.loadbalancer_1.id}" -} - -resource "openstack_lb_pool_v2" "pool_1" { - name = "pool_1" - protocol = "HTTP" - lb_method = "ROUND_ROBIN" - listener_id = "${openstack_lb_listener_v2.listener_1.id}" -} - -resource "openstack_lb_monitor_v2" "monitor_1" { - name = "monitor_1" - type = "PING" - delay = 20 - timeout = 10 - max_retries = 5 - pool_id = "${openstack_lb_pool_v2.pool_1.id}" -} -` - -const TestAccLBV2MonitorConfig_update = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_lb_loadbalancer_v2" "loadbalancer_1" { - name = "loadbalancer_1" - vip_subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} - -resource "openstack_lb_listener_v2" "listener_1" { - name = "listener_1" - protocol = "HTTP" - protocol_port = 8080 - loadbalancer_id = "${openstack_lb_loadbalancer_v2.loadbalancer_1.id}" -} - -resource "openstack_lb_pool_v2" "pool_1" { - name = "pool_1" - protocol = "HTTP" - lb_method = "ROUND_ROBIN" - listener_id = "${openstack_lb_listener_v2.listener_1.id}" -} - -resource "openstack_lb_monitor_v2" "monitor_1" { - name = "monitor_1_updated" - type = "PING" - delay = 30 - timeout = 15 - max_retries = 10 - admin_state_up = "true" - pool_id = "${openstack_lb_pool_v2.pool_1.id}" -} -` - -const TestAccLBV2MonitorConfig_timeout = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_lb_loadbalancer_v2" "loadbalancer_1" { - name = "loadbalancer_1" - vip_subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} - -resource "openstack_lb_listener_v2" "listener_1" { - name = "listener_1" - protocol = "HTTP" - protocol_port = 8080 - loadbalancer_id = "${openstack_lb_loadbalancer_v2.loadbalancer_1.id}" -} - -resource "openstack_lb_pool_v2" "pool_1" { - name = "pool_1" - protocol = "HTTP" - lb_method = "ROUND_ROBIN" - listener_id = "${openstack_lb_listener_v2.listener_1.id}" -} - -resource "openstack_lb_monitor_v2" "monitor_1" { - name = "monitor_1" - type = "PING" - delay = 20 - timeout = 10 - max_retries = 5 - pool_id = "${openstack_lb_pool_v2.pool_1.id}" - - timeouts { - create = "5m" - delete = "5m" - } -} -` diff --git a/builtin/providers/openstack/resource_openstack_lb_pool_v1.go b/builtin/providers/openstack/resource_openstack_lb_pool_v1.go deleted file mode 100644 index 7cf796c6f..000000000 --- a/builtin/providers/openstack/resource_openstack_lb_pool_v1.go +++ /dev/null @@ -1,467 +0,0 @@ -package openstack - -import ( - "bytes" - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools" - "github.com/gophercloud/gophercloud/pagination" -) - -func resourceLBPoolV1() *schema.Resource { - return &schema.Resource{ - Create: resourceLBPoolV1Create, - Read: resourceLBPoolV1Read, - Update: resourceLBPoolV1Update, - Delete: resourceLBPoolV1Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - "protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "subnet_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "lb_method": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - "lb_provider": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "member": &schema.Schema{ - Type: schema.TypeSet, - Deprecated: "Use openstack_lb_member_v1 instead. This attribute will be removed in a future version.", - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "address": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "admin_state_up": &schema.Schema{ - Type: schema.TypeBool, - Required: true, - ForceNew: false, - }, - }, - }, - Set: resourceLBMemberV1Hash, - }, - "monitor_ids": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: false, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - } -} - -func resourceLBPoolV1Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - createOpts := pools.CreateOpts{ - Name: d.Get("name").(string), - SubnetID: d.Get("subnet_id").(string), - TenantID: d.Get("tenant_id").(string), - Provider: d.Get("lb_provider").(string), - } - - if v, ok := d.GetOk("protocol"); ok { - protocol := resourceLBPoolV1DetermineProtocol(v.(string)) - createOpts.Protocol = protocol - } - - if v, ok := d.GetOk("lb_method"); ok { - lbMethod := resourceLBPoolV1DetermineLBMethod(v.(string)) - createOpts.LBMethod = lbMethod - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - p, err := pools.Create(networkingClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack LB pool: %s", err) - } - log.Printf("[INFO] LB Pool ID: %s", p.ID) - - log.Printf("[DEBUG] Waiting for OpenStack LB pool (%s) to become available.", p.ID) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"PENDING_CREATE"}, - Target: []string{"ACTIVE"}, - Refresh: waitForLBPoolActive(networkingClient, p.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - d.SetId(p.ID) - - if mIDs := resourcePoolMonitorIDsV1(d); mIDs != nil { - for _, mID := range mIDs { - _, err := pools.AssociateMonitor(networkingClient, p.ID, mID).Extract() - if err != nil { - return fmt.Errorf("Error associating monitor (%s) with OpenStack LB pool (%s): %s", mID, p.ID, err) - } - } - } - - if memberOpts := resourcePoolMembersV1(d); memberOpts != nil { - for _, memberOpt := range memberOpts { - _, err := members.Create(networkingClient, memberOpt).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack LB member: %s", err) - } - } - } - - return resourceLBPoolV1Read(d, meta) -} - -func resourceLBPoolV1Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - p, err := pools.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "LB pool") - } - - log.Printf("[DEBUG] Retrieved OpenStack LB Pool %s: %+v", d.Id(), p) - - d.Set("name", p.Name) - d.Set("protocol", p.Protocol) - d.Set("subnet_id", p.SubnetID) - d.Set("lb_method", p.LBMethod) - d.Set("lb_provider", p.Provider) - d.Set("tenant_id", p.TenantID) - d.Set("monitor_ids", p.MonitorIDs) - d.Set("member_ids", p.MemberIDs) - d.Set("region", GetRegion(d)) - - return nil -} - -func resourceLBPoolV1Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var updateOpts pools.UpdateOpts - // If either option changed, update both. - // Gophercloud complains if one is empty. - if d.HasChange("name") || d.HasChange("lb_method") { - updateOpts.Name = d.Get("name").(string) - - lbMethod := resourceLBPoolV1DetermineLBMethod(d.Get("lb_method").(string)) - updateOpts.LBMethod = lbMethod - } - - log.Printf("[DEBUG] Updating OpenStack LB Pool %s with options: %+v", d.Id(), updateOpts) - - _, err = pools.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack LB Pool: %s", err) - } - - if d.HasChange("monitor_ids") { - oldMIDsRaw, newMIDsRaw := d.GetChange("monitor_ids") - oldMIDsSet, newMIDsSet := oldMIDsRaw.(*schema.Set), newMIDsRaw.(*schema.Set) - monitorsToAdd := newMIDsSet.Difference(oldMIDsSet) - monitorsToRemove := oldMIDsSet.Difference(newMIDsSet) - - log.Printf("[DEBUG] Monitors to add: %v", monitorsToAdd) - - log.Printf("[DEBUG] Monitors to remove: %v", monitorsToRemove) - - for _, m := range monitorsToAdd.List() { - _, err := pools.AssociateMonitor(networkingClient, d.Id(), m.(string)).Extract() - if err != nil { - return fmt.Errorf("Error associating monitor (%s) with OpenStack server (%s): %s", m.(string), d.Id(), err) - } - log.Printf("[DEBUG] Associated monitor (%s) with pool (%s)", m.(string), d.Id()) - } - - for _, m := range monitorsToRemove.List() { - _, err := pools.DisassociateMonitor(networkingClient, d.Id(), m.(string)).Extract() - if err != nil { - return fmt.Errorf("Error disassociating monitor (%s) from OpenStack server (%s): %s", m.(string), d.Id(), err) - } - log.Printf("[DEBUG] Disassociated monitor (%s) from pool (%s)", m.(string), d.Id()) - } - } - - if d.HasChange("member") { - oldMembersRaw, newMembersRaw := d.GetChange("member") - oldMembersSet, newMembersSet := oldMembersRaw.(*schema.Set), newMembersRaw.(*schema.Set) - membersToAdd := newMembersSet.Difference(oldMembersSet) - membersToRemove := oldMembersSet.Difference(newMembersSet) - - log.Printf("[DEBUG] Members to add: %v", membersToAdd) - - log.Printf("[DEBUG] Members to remove: %v", membersToRemove) - - for _, m := range membersToRemove.List() { - oldMember := resourcePoolMemberV1(d, m) - listOpts := members.ListOpts{ - PoolID: d.Id(), - Address: oldMember.Address, - ProtocolPort: oldMember.ProtocolPort, - } - err = members.List(networkingClient, listOpts).EachPage(func(page pagination.Page) (bool, error) { - extractedMembers, err := members.ExtractMembers(page) - if err != nil { - return false, err - } - for _, member := range extractedMembers { - err := members.Delete(networkingClient, member.ID).ExtractErr() - if err != nil { - return false, fmt.Errorf("Error deleting member (%s) from OpenStack LB pool (%s): %s", member.ID, d.Id(), err) - } - log.Printf("[DEBUG] Deleted member (%s) from pool (%s)", member.ID, d.Id()) - } - return true, nil - }) - } - - for _, m := range membersToAdd.List() { - createOpts := resourcePoolMemberV1(d, m) - newMember, err := members.Create(networkingClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating LB member: %s", err) - } - log.Printf("[DEBUG] Created member (%s) in OpenStack LB pool (%s)", newMember.ID, d.Id()) - } - } - - return resourceLBPoolV1Read(d, meta) -} - -func resourceLBPoolV1Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - // Make sure all monitors are disassociated first - if v, ok := d.GetOk("monitor_ids"); ok { - if monitorIDList, ok := v.([]interface{}); ok { - for _, monitorID := range monitorIDList { - mID := monitorID.(string) - log.Printf("[DEBUG] Attempting to disassociate monitor %s from pool %s", mID, d.Id()) - if res := pools.DisassociateMonitor(networkingClient, d.Id(), mID); res.Err != nil { - return fmt.Errorf("Error disassociating monitor %s from pool %s: %s", mID, d.Id(), err) - } - } - } - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE", "PENDING_DELETE"}, - Target: []string{"DELETED"}, - Refresh: waitForLBPoolDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack LB Pool: %s", err) - } - - d.SetId("") - return nil -} - -func resourcePoolMonitorIDsV1(d *schema.ResourceData) []string { - mIDsRaw := d.Get("monitor_ids").(*schema.Set) - mIDs := make([]string, mIDsRaw.Len()) - for i, raw := range mIDsRaw.List() { - mIDs[i] = raw.(string) - } - return mIDs -} - -func resourcePoolMembersV1(d *schema.ResourceData) []members.CreateOpts { - memberOptsRaw := d.Get("member").(*schema.Set) - memberOpts := make([]members.CreateOpts, memberOptsRaw.Len()) - for i, raw := range memberOptsRaw.List() { - rawMap := raw.(map[string]interface{}) - memberOpts[i] = members.CreateOpts{ - TenantID: rawMap["tenant_id"].(string), - Address: rawMap["address"].(string), - ProtocolPort: rawMap["port"].(int), - PoolID: d.Id(), - } - } - return memberOpts -} - -func resourcePoolMemberV1(d *schema.ResourceData, raw interface{}) members.CreateOpts { - rawMap := raw.(map[string]interface{}) - return members.CreateOpts{ - TenantID: rawMap["tenant_id"].(string), - Address: rawMap["address"].(string), - ProtocolPort: rawMap["port"].(int), - PoolID: d.Id(), - } -} - -func resourceLBMemberV1Hash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["region"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["tenant_id"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["address"].(string))) - buf.WriteString(fmt.Sprintf("%d-", m["port"].(int))) - - return hashcode.String(buf.String()) -} - -func resourceLBPoolV1DetermineProtocol(v string) pools.LBProtocol { - var protocol pools.LBProtocol - switch v { - case "TCP": - protocol = pools.ProtocolTCP - case "HTTP": - protocol = pools.ProtocolHTTP - case "HTTPS": - protocol = pools.ProtocolHTTPS - } - - return protocol -} - -func resourceLBPoolV1DetermineLBMethod(v string) pools.LBMethod { - var lbMethod pools.LBMethod - switch v { - case "ROUND_ROBIN": - lbMethod = pools.LBMethodRoundRobin - case "LEAST_CONNECTIONS": - lbMethod = pools.LBMethodLeastConnections - } - - return lbMethod -} - -func waitForLBPoolActive(networkingClient *gophercloud.ServiceClient, poolId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - p, err := pools.Get(networkingClient, poolId).Extract() - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] OpenStack LB Pool: %+v", p) - if p.Status == "ACTIVE" { - return p, "ACTIVE", nil - } - - return p, p.Status, nil - } -} - -func waitForLBPoolDelete(networkingClient *gophercloud.ServiceClient, poolId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack LB Pool %s", poolId) - - p, err := pools.Get(networkingClient, poolId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack LB Pool %s", poolId) - return p, "DELETED", nil - } - return p, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack LB Pool: %+v", p) - err = pools.Delete(networkingClient, poolId).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack LB Pool %s", poolId) - return p, "DELETED", nil - } - return p, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack LB Pool %s still active.", poolId) - return p, "ACTIVE", nil - } - -} diff --git a/builtin/providers/openstack/resource_openstack_lb_pool_v1_test.go b/builtin/providers/openstack/resource_openstack_lb_pool_v1_test.go deleted file mode 100644 index 72e905406..000000000 --- a/builtin/providers/openstack/resource_openstack_lb_pool_v1_test.go +++ /dev/null @@ -1,514 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups" - "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips" - "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" - "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" -) - -func TestAccLBV1Pool_basic(t *testing.T) { - var pool pools.Pool - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBV1PoolDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccLBV1Pool_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckLBV1PoolExists("openstack_lb_pool_v1.pool_1", &pool), - resource.TestCheckResourceAttr("openstack_lb_pool_v1.pool_1", "lb_provider", "haproxy"), - ), - }, - resource.TestStep{ - Config: testAccLBV1Pool_update, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("openstack_lb_pool_v1.pool_1", "name", "pool_1"), - ), - }, - }, - }) -} - -func TestAccLBV1Pool_fullstack(t *testing.T) { - var instance1, instance2 servers.Server - var monitor monitors.Monitor - var network networks.Network - var pool pools.Pool - var secgroup secgroups.SecurityGroup - var subnet subnets.Subnet - var vip vips.VirtualIP - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBV1PoolDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccLBV1Pool_fullstack_1, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), - testAccCheckComputeV2SecGroupExists("openstack_compute_secgroup_v2.secgroup_1", &secgroup), - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance1), - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_2", &instance2), - testAccCheckLBV1PoolExists("openstack_lb_pool_v1.pool_1", &pool), - testAccCheckLBV1MonitorExists("openstack_lb_monitor_v1.monitor_1", &monitor), - testAccCheckLBV1VIPExists("openstack_lb_vip_v1.vip_1", &vip), - ), - }, - resource.TestStep{ - Config: testAccLBV1Pool_fullstack_2, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), - testAccCheckComputeV2SecGroupExists("openstack_compute_secgroup_v2.secgroup_1", &secgroup), - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance1), - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_2", &instance2), - testAccCheckLBV1PoolExists("openstack_lb_pool_v1.pool_1", &pool), - testAccCheckLBV1MonitorExists("openstack_lb_monitor_v1.monitor_1", &monitor), - testAccCheckLBV1VIPExists("openstack_lb_vip_v1.vip_1", &vip), - ), - }, - }, - }) -} - -func TestAccLBV1Pool_timeout(t *testing.T) { - var pool pools.Pool - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBV1PoolDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccLBV1Pool_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckLBV1PoolExists("openstack_lb_pool_v1.pool_1", &pool), - resource.TestCheckResourceAttr("openstack_lb_pool_v1.pool_1", "lb_provider", "haproxy"), - ), - }, - }, - }) -} - -func TestAccLBV1Pool_updateMonitor(t *testing.T) { - var monitor_1 monitors.Monitor - var monitor_2 monitors.Monitor - var network networks.Network - var pool pools.Pool - var subnet subnets.Subnet - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBV1PoolDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccLBV1Pool_updateMonitor_1, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), - testAccCheckLBV1PoolExists("openstack_lb_pool_v1.pool_1", &pool), - testAccCheckLBV1MonitorExists("openstack_lb_monitor_v1.monitor_1", &monitor_1), - testAccCheckLBV1MonitorExists("openstack_lb_monitor_v1.monitor_2", &monitor_2), - ), - }, - resource.TestStep{ - Config: testAccLBV1Pool_updateMonitor_2, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), - testAccCheckLBV1PoolExists("openstack_lb_pool_v1.pool_1", &pool), - testAccCheckLBV1MonitorExists("openstack_lb_monitor_v1.monitor_1", &monitor_1), - testAccCheckLBV1MonitorExists("openstack_lb_monitor_v1.monitor_2", &monitor_2), - ), - }, - }, - }) -} - -func testAccCheckLBV1PoolDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_lb_pool_v1" { - continue - } - - _, err := pools.Get(networkingClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("LB Pool still exists") - } - } - - return nil -} - -func testAccCheckLBV1PoolExists(n string, pool *pools.Pool) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - found, err := pools.Get(networkingClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Pool not found") - } - - *pool = *found - - return nil - } -} - -const testAccLBV1Pool_basic = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_lb_pool_v1" "pool_1" { - name = "pool_1" - protocol = "HTTP" - lb_method = "ROUND_ROBIN" - lb_provider = "haproxy" - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} -` - -const testAccLBV1Pool_update = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_lb_pool_v1" "pool_1" { - name = "pool_1" - protocol = "HTTP" - lb_method = "ROUND_ROBIN" - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} -` - -const testAccLBV1Pool_fullstack_1 = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_compute_secgroup_v2" "secgroup_1" { - name = "secgroup_1" - description = "Rules for secgroup_1" - - rule { - from_port = -1 - to_port = -1 - ip_protocol = "icmp" - cidr = "0.0.0.0/0" - } - - rule { - from_port = 80 - to_port = 80 - ip_protocol = "tcp" - cidr = "0.0.0.0/0" - } -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default", "${openstack_compute_secgroup_v2.secgroup_1.name}"] - - network { - uuid = "${openstack_networking_network_v2.network_1.id}" - } -} - -resource "openstack_compute_instance_v2" "instance_2" { - name = "instance_2" - security_groups = ["default", "${openstack_compute_secgroup_v2.secgroup_1.name}"] - - network { - uuid = "${openstack_networking_network_v2.network_1.id}" - } -} - -resource "openstack_lb_monitor_v1" "monitor_1" { - type = "TCP" - delay = 30 - timeout = 5 - max_retries = 3 - admin_state_up = "true" -} - -resource "openstack_lb_pool_v1" "pool_1" { - name = "pool_1" - protocol = "TCP" - lb_method = "ROUND_ROBIN" - monitor_ids = ["${openstack_lb_monitor_v1.monitor_1.id}"] - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} - -resource "openstack_lb_member_v1" "member_1" { - port = 80 - admin_state_up = true - pool_id = "${openstack_lb_pool_v1.pool_1.id}" - address = "${openstack_compute_instance_v2.instance_1.access_ip_v4}" -} - -resource "openstack_lb_member_v1" "member_2" { - port = 80 - admin_state_up = true - pool_id = "${openstack_lb_pool_v1.pool_1.id}" - address = "${openstack_compute_instance_v2.instance_2.access_ip_v4}" -} - -resource "openstack_lb_vip_v1" "vip_1" { - name = "vip_1" - protocol = "TCP" - port = 80 - admin_state_up = true - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - pool_id = "${openstack_lb_pool_v1.pool_1.id}" -} -` - -const testAccLBV1Pool_fullstack_2 = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_compute_secgroup_v2" "secgroup_1" { - name = "secgroup_1" - description = "Rules for secgroup_1" - - rule { - from_port = -1 - to_port = -1 - ip_protocol = "icmp" - cidr = "0.0.0.0/0" - } - - rule { - from_port = 80 - to_port = 80 - ip_protocol = "tcp" - cidr = "0.0.0.0/0" - } -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default", "${openstack_compute_secgroup_v2.secgroup_1.name}"] - - network { - uuid = "${openstack_networking_network_v2.network_1.id}" - } -} - -resource "openstack_compute_instance_v2" "instance_2" { - name = "instance_2" - security_groups = ["default", "${openstack_compute_secgroup_v2.secgroup_1.name}"] - user_data = "#cloud-config\ndisable_root: false" - - network { - uuid = "${openstack_networking_network_v2.network_1.id}" - } -} - -resource "openstack_lb_monitor_v1" "monitor_1" { - type = "TCP" - delay = 30 - timeout = 5 - max_retries = 3 - admin_state_up = "true" -} - -resource "openstack_lb_pool_v1" "pool_1" { - name = "pool_1" - protocol = "TCP" - lb_method = "ROUND_ROBIN" - monitor_ids = ["${openstack_lb_monitor_v1.monitor_1.id}"] - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} - -resource "openstack_lb_member_v1" "member_1" { - port = 80 - admin_state_up = true - pool_id = "${openstack_lb_pool_v1.pool_1.id}" - address = "${openstack_compute_instance_v2.instance_1.access_ip_v4}" -} - -resource "openstack_lb_member_v1" "member_2" { - port = 80 - admin_state_up = true - pool_id = "${openstack_lb_pool_v1.pool_1.id}" - address = "${openstack_compute_instance_v2.instance_2.access_ip_v4}" -} - -resource "openstack_lb_vip_v1" "vip_1" { - name = "vip_1" - protocol = "TCP" - port = 80 - admin_state_up = true - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - pool_id = "${openstack_lb_pool_v1.pool_1.id}" -} -` - -const testAccLBV1Pool_timeout = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_lb_pool_v1" "pool_1" { - name = "pool_1" - protocol = "HTTP" - lb_method = "ROUND_ROBIN" - lb_provider = "haproxy" - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - - timeouts { - create = "5m" - delete = "5m" - } -} -` - -const testAccLBV1Pool_updateMonitor_1 = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_lb_monitor_v1" "monitor_1" { - type = "TCP" - delay = 30 - timeout = 5 - max_retries = 3 - admin_state_up = "true" -} - -resource "openstack_lb_monitor_v1" "monitor_2" { - type = "TCP" - delay = 30 - timeout = 5 - max_retries = 3 - admin_state_up = "true" -} - -resource "openstack_lb_pool_v1" "pool_1" { - name = "pool_1" - protocol = "TCP" - lb_method = "ROUND_ROBIN" - monitor_ids = ["${openstack_lb_monitor_v1.monitor_1.id}"] - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} -` - -const testAccLBV1Pool_updateMonitor_2 = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_lb_monitor_v1" "monitor_1" { - type = "TCP" - delay = 30 - timeout = 5 - max_retries = 3 - admin_state_up = "true" -} - -resource "openstack_lb_monitor_v1" "monitor_2" { - type = "TCP" - delay = 30 - timeout = 5 - max_retries = 3 - admin_state_up = "true" -} - -resource "openstack_lb_pool_v1" "pool_1" { - name = "pool_1" - protocol = "TCP" - lb_method = "ROUND_ROBIN" - monitor_ids = ["${openstack_lb_monitor_v1.monitor_2.id}"] - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} -` diff --git a/builtin/providers/openstack/resource_openstack_lb_pool_v2.go b/builtin/providers/openstack/resource_openstack_lb_pool_v2.go deleted file mode 100644 index d1a602f53..000000000 --- a/builtin/providers/openstack/resource_openstack_lb_pool_v2.go +++ /dev/null @@ -1,350 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" -) - -func resourcePoolV2() *schema.Resource { - return &schema.Resource{ - Create: resourcePoolV2Create, - Read: resourcePoolV2Read, - Update: resourcePoolV2Update, - Delete: resourcePoolV2Delete, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "TCP" && value != "HTTP" && value != "HTTPS" { - errors = append(errors, fmt.Errorf( - "Only 'TCP', 'HTTP', and 'HTTPS' are supported values for 'protocol'")) - } - return - }, - }, - - // One of loadbalancer_id or listener_id must be provided - "loadbalancer_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - // One of loadbalancer_id or listener_id must be provided - "listener_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "lb_method": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "ROUND_ROBIN" && value != "LEAST_CONNECTIONS" && value != "SOURCE_IP" { - errors = append(errors, fmt.Errorf( - "Only 'ROUND_ROBIN', 'LEAST_CONNECTIONS', and 'SOURCE_IP' are supported values for 'lb_method'")) - } - return - }, - }, - - "persistence": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "SOURCE_IP" && value != "HTTP_COOKIE" && value != "APP_COOKIE" { - errors = append(errors, fmt.Errorf( - "Only 'SOURCE_IP', 'HTTP_COOKIE', and 'APP_COOKIE' are supported values for 'persistence'")) - } - return - }, - }, - - "cookie_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - }, - }, - - "admin_state_up": &schema.Schema{ - Type: schema.TypeBool, - Default: true, - Optional: true, - }, - - "id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - } -} - -func resourcePoolV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - adminStateUp := d.Get("admin_state_up").(bool) - var persistence pools.SessionPersistence - if p, ok := d.GetOk("persistence"); ok { - pV := (p.([]interface{}))[0].(map[string]interface{}) - - persistence = pools.SessionPersistence{ - Type: pV["type"].(string), - CookieName: pV["cookie_name"].(string), - } - } - createOpts := pools.CreateOpts{ - TenantID: d.Get("tenant_id").(string), - Name: d.Get("name").(string), - Description: d.Get("description").(string), - Protocol: pools.Protocol(d.Get("protocol").(string)), - LoadbalancerID: d.Get("loadbalancer_id").(string), - ListenerID: d.Get("listener_id").(string), - LBMethod: pools.LBMethod(d.Get("lb_method").(string)), - AdminStateUp: &adminStateUp, - } - // Must omit if not set - if persistence != (pools.SessionPersistence{}) { - createOpts.Persistence = &persistence - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - - var pool *pools.Pool - err = resource.Retry(d.Timeout(schema.TimeoutCreate), func() *resource.RetryError { - var err error - log.Printf("[DEBUG] Attempting to create LBaaSV2 pool") - pool, err = pools.Create(networkingClient, createOpts).Extract() - if err != nil { - switch errCode := err.(type) { - case gophercloud.ErrDefault500: - log.Printf("[DEBUG] OpenStack LBaaSV2 pool is still creating.") - return resource.RetryableError(err) - case gophercloud.ErrUnexpectedResponseCode: - if errCode.Actual == 409 { - log.Printf("[DEBUG] OpenStack LBaaSV2 pool is still creating.") - return resource.RetryableError(err) - } - default: - return resource.NonRetryableError(err) - } - } - return nil - }) - - if err != nil { - return fmt.Errorf("Error creating OpenStack LBaaSV2 pool: %s", err) - } - - log.Printf("[INFO] pool ID: %s", pool.ID) - - log.Printf("[DEBUG] Waiting for Openstack LBaaSV2 pool (%s) to become available.", pool.ID) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"PENDING_CREATE"}, - Target: []string{"ACTIVE"}, - Refresh: waitForPoolActive(networkingClient, pool.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - d.SetId(pool.ID) - - return resourcePoolV2Read(d, meta) -} - -func resourcePoolV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - pool, err := pools.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "LBV2 Pool") - } - - log.Printf("[DEBUG] Retrieved OpenStack LBaaSV2 Pool %s: %+v", d.Id(), pool) - - d.Set("lb_method", pool.LBMethod) - d.Set("protocol", pool.Protocol) - d.Set("description", pool.Description) - d.Set("tenant_id", pool.TenantID) - d.Set("admin_state_up", pool.AdminStateUp) - d.Set("name", pool.Name) - d.Set("id", pool.ID) - d.Set("persistence", pool.Persistence) - - return nil -} - -func resourcePoolV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var updateOpts pools.UpdateOpts - if d.HasChange("lb_method") { - updateOpts.LBMethod = pools.LBMethod(d.Get("lb_method").(string)) - } - if d.HasChange("name") { - updateOpts.Name = d.Get("name").(string) - } - if d.HasChange("description") { - updateOpts.Description = d.Get("description").(string) - } - if d.HasChange("admin_state_up") { - asu := d.Get("admin_state_up").(bool) - updateOpts.AdminStateUp = &asu - } - - log.Printf("[DEBUG] Updating OpenStack LBaaSV2 Pool %s with options: %+v", d.Id(), updateOpts) - - _, err = pools.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack LBaaSV2 Pool: %s", err) - } - - return resourcePoolV2Read(d, meta) -} - -func resourcePoolV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE", "PENDING_DELETE"}, - Target: []string{"DELETED"}, - Refresh: waitForPoolDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack LBaaSV2 Pool: %s", err) - } - - d.SetId("") - return nil -} - -func waitForPoolActive(networkingClient *gophercloud.ServiceClient, poolID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - pool, err := pools.Get(networkingClient, poolID).Extract() - if err != nil { - return nil, "", err - } - - // The pool resource has no Status attribute, so a successful Get is the best we can do - log.Printf("[DEBUG] OpenStack LBaaSV2 Pool: %+v", pool) - return pool, "ACTIVE", nil - } -} - -func waitForPoolDelete(networkingClient *gophercloud.ServiceClient, poolID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack LBaaSV2 Pool %s", poolID) - - pool, err := pools.Get(networkingClient, poolID).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack LBaaSV2 Pool %s", poolID) - return pool, "DELETED", nil - } - return pool, "ACTIVE", err - } - - log.Printf("[DEBUG] Openstack LBaaSV2 Pool: %+v", pool) - err = pools.Delete(networkingClient, poolID).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack LBaaSV2 Pool %s", poolID) - return pool, "DELETED", nil - } - - if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { - if errCode.Actual == 409 { - log.Printf("[DEBUG] OpenStack LBaaSV2 Pool (%s) is still in use.", poolID) - return pool, "ACTIVE", nil - } - } - - return pool, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack LBaaSV2 Pool %s still active.", poolID) - return pool, "ACTIVE", nil - } -} diff --git a/builtin/providers/openstack/resource_openstack_lb_pool_v2_test.go b/builtin/providers/openstack/resource_openstack_lb_pool_v2_test.go deleted file mode 100644 index 6af15374a..000000000 --- a/builtin/providers/openstack/resource_openstack_lb_pool_v2_test.go +++ /dev/null @@ -1,210 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccLBV2Pool_basic(t *testing.T) { - var pool pools.Pool - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBV2PoolDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: TestAccLBV2PoolConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckLBV2PoolExists("openstack_lb_pool_v2.pool_1", &pool), - ), - }, - resource.TestStep{ - Config: TestAccLBV2PoolConfig_update, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("openstack_lb_pool_v2.pool_1", "name", "pool_1_updated"), - ), - }, - }, - }) -} - -func TestAccLBV2Pool_timeout(t *testing.T) { - var pool pools.Pool - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBV2PoolDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: TestAccLBV2PoolConfig_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckLBV2PoolExists("openstack_lb_pool_v2.pool_1", &pool), - ), - }, - }, - }) -} - -func testAccCheckLBV2PoolDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_lb_pool_v2" { - continue - } - - _, err := pools.Get(networkingClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("Pool still exists: %s", rs.Primary.ID) - } - } - - return nil -} - -func testAccCheckLBV2PoolExists(n string, pool *pools.Pool) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - found, err := pools.Get(networkingClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Member not found") - } - - *pool = *found - - return nil - } -} - -const TestAccLBV2PoolConfig_basic = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_lb_loadbalancer_v2" "loadbalancer_1" { - name = "loadbalancer_1" - vip_subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} - -resource "openstack_lb_listener_v2" "listener_1" { - name = "listener_1" - protocol = "HTTP" - protocol_port = 8080 - loadbalancer_id = "${openstack_lb_loadbalancer_v2.loadbalancer_1.id}" -} - -resource "openstack_lb_pool_v2" "pool_1" { - name = "pool_1" - protocol = "HTTP" - lb_method = "ROUND_ROBIN" - listener_id = "${openstack_lb_listener_v2.listener_1.id}" -} -` - -const TestAccLBV2PoolConfig_update = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_lb_loadbalancer_v2" "loadbalancer_1" { - name = "loadbalancer_1" - vip_subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} - -resource "openstack_lb_listener_v2" "listener_1" { - name = "listener_1" - protocol = "HTTP" - protocol_port = 8080 - loadbalancer_id = "${openstack_lb_loadbalancer_v2.loadbalancer_1.id}" -} - -resource "openstack_lb_pool_v2" "pool_1" { - name = "pool_1_updated" - protocol = "HTTP" - lb_method = "LEAST_CONNECTIONS" - admin_state_up = "true" - listener_id = "${openstack_lb_listener_v2.listener_1.id}" -} -` - -const TestAccLBV2PoolConfig_timeout = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_lb_loadbalancer_v2" "loadbalancer_1" { - name = "loadbalancer_1" - vip_subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} - -resource "openstack_lb_listener_v2" "listener_1" { - name = "listener_1" - protocol = "HTTP" - protocol_port = 8080 - loadbalancer_id = "${openstack_lb_loadbalancer_v2.loadbalancer_1.id}" -} - -resource "openstack_lb_pool_v2" "pool_1" { - name = "pool_1" - protocol = "HTTP" - lb_method = "ROUND_ROBIN" - listener_id = "${openstack_lb_listener_v2.listener_1.id}" - - timeouts { - create = "5m" - delete = "5m" - } -} -` diff --git a/builtin/providers/openstack/resource_openstack_lb_vip_v1.go b/builtin/providers/openstack/resource_openstack_lb_vip_v1.go deleted file mode 100644 index 6e6d46d89..000000000 --- a/builtin/providers/openstack/resource_openstack_lb_vip_v1.go +++ /dev/null @@ -1,401 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceLBVipV1() *schema.Resource { - return &schema.Resource{ - Create: resourceLBVipV1Create, - Read: resourceLBVipV1Read, - Update: resourceLBVipV1Update, - Delete: resourceLBVipV1Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - "subnet_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "pool_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: false, - }, - "persistence": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: false, - }, - "conn_limit": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: false, - }, - "port_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - ForceNew: false, - }, - "floating_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "admin_state_up": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - ForceNew: false, - }, - }, - } -} - -func resourceLBVipV1Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - createOpts := vips.CreateOpts{ - Name: d.Get("name").(string), - SubnetID: d.Get("subnet_id").(string), - Protocol: d.Get("protocol").(string), - ProtocolPort: d.Get("port").(int), - PoolID: d.Get("pool_id").(string), - TenantID: d.Get("tenant_id").(string), - Address: d.Get("address").(string), - Description: d.Get("description").(string), - Persistence: resourceVipPersistenceV1(d), - ConnLimit: gophercloud.MaybeInt(d.Get("conn_limit").(int)), - } - - asu := d.Get("admin_state_up").(bool) - createOpts.AdminStateUp = &asu - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - p, err := vips.Create(networkingClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack LB VIP: %s", err) - } - log.Printf("[INFO] LB VIP ID: %s", p.ID) - - log.Printf("[DEBUG] Waiting for OpenStack LB VIP (%s) to become available.", p.ID) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"PENDING_CREATE"}, - Target: []string{"ACTIVE"}, - Refresh: waitForLBVIPActive(networkingClient, p.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - floatingIP := d.Get("floating_ip").(string) - if floatingIP != "" { - lbVipV1AssignFloatingIP(floatingIP, p.PortID, networkingClient) - } - - d.SetId(p.ID) - - return resourceLBVipV1Read(d, meta) -} - -func resourceLBVipV1Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - p, err := vips.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "LB VIP") - } - - log.Printf("[DEBUG] Retrieved OpenStack LB VIP %s: %+v", d.Id(), p) - - d.Set("name", p.Name) - d.Set("subnet_id", p.SubnetID) - d.Set("protocol", p.Protocol) - d.Set("port", p.ProtocolPort) - d.Set("pool_id", p.PoolID) - d.Set("port_id", p.PortID) - d.Set("tenant_id", p.TenantID) - d.Set("address", p.Address) - d.Set("description", p.Description) - d.Set("conn_limit", p.ConnLimit) - d.Set("admin_state_up", p.AdminStateUp) - - // Set the persistence method being used - persistence := make(map[string]interface{}) - if p.Persistence.Type != "" { - persistence["type"] = p.Persistence.Type - } - if p.Persistence.CookieName != "" { - persistence["cookie_name"] = p.Persistence.CookieName - } - d.Set("persistence", persistence) - - d.Set("region", GetRegion(d)) - - return nil -} - -func resourceLBVipV1Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var updateOpts vips.UpdateOpts - if d.HasChange("name") { - v := d.Get("name").(string) - updateOpts.Name = &v - } - - if d.HasChange("pool_id") { - v := d.Get("pool_id").(string) - updateOpts.PoolID = &v - } - - if d.HasChange("description") { - v := d.Get("description").(string) - updateOpts.Description = &v - } - - if d.HasChange("conn_limit") { - updateOpts.ConnLimit = gophercloud.MaybeInt(d.Get("conn_limit").(int)) - } - - if d.HasChange("floating_ip") { - portID := d.Get("port_id").(string) - - // Searching for a floating IP assigned to the VIP - listOpts := floatingips.ListOpts{ - PortID: portID, - } - page, err := floatingips.List(networkingClient, listOpts).AllPages() - if err != nil { - return err - } - - fips, err := floatingips.ExtractFloatingIPs(page) - if err != nil { - return err - } - - // If a floating IP is found we unassign it - if len(fips) == 1 { - portID := "" - updateOpts := floatingips.UpdateOpts{ - PortID: &portID, - } - if err = floatingips.Update(networkingClient, fips[0].ID, updateOpts).Err; err != nil { - return err - } - } - - // Assign the updated floating IP - floatingIP := d.Get("floating_ip").(string) - if floatingIP != "" { - lbVipV1AssignFloatingIP(floatingIP, portID, networkingClient) - } - } - - if d.HasChange("admin_state_up") { - asu := d.Get("admin_state_up").(bool) - updateOpts.AdminStateUp = &asu - } - - // Persistence has to be included, even if it hasn't changed. - updateOpts.Persistence = resourceVipPersistenceV1(d) - - log.Printf("[DEBUG] Updating OpenStack LB VIP %s with options: %+v", d.Id(), updateOpts) - - _, err = vips.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack LB VIP: %s", err) - } - - return resourceLBVipV1Read(d, meta) -} - -func resourceLBVipV1Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE", "PENDING_DELETE"}, - Target: []string{"DELETED"}, - Refresh: waitForLBVIPDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack LB VIP: %s", err) - } - - d.SetId("") - return nil -} - -func resourceVipPersistenceV1(d *schema.ResourceData) *vips.SessionPersistence { - rawP := d.Get("persistence").(interface{}) - rawMap := rawP.(map[string]interface{}) - if len(rawMap) != 0 { - p := vips.SessionPersistence{} - if t, ok := rawMap["type"]; ok { - p.Type = t.(string) - } - if c, ok := rawMap["cookie_name"]; ok { - p.CookieName = c.(string) - } - return &p - } - return nil -} - -func lbVipV1AssignFloatingIP(floatingIP, portID string, networkingClient *gophercloud.ServiceClient) error { - log.Printf("[DEBUG] Assigning floating IP %s to VIP %s", floatingIP, portID) - - listOpts := floatingips.ListOpts{ - FloatingIP: floatingIP, - } - page, err := floatingips.List(networkingClient, listOpts).AllPages() - if err != nil { - return err - } - - fips, err := floatingips.ExtractFloatingIPs(page) - if err != nil { - return err - } - if len(fips) != 1 { - return fmt.Errorf("Unable to retrieve floating IP '%s'", floatingIP) - } - - updateOpts := floatingips.UpdateOpts{ - PortID: &portID, - } - if err = floatingips.Update(networkingClient, fips[0].ID, updateOpts).Err; err != nil { - return err - } - - return nil -} - -func waitForLBVIPActive(networkingClient *gophercloud.ServiceClient, vipId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - p, err := vips.Get(networkingClient, vipId).Extract() - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] OpenStack LB VIP: %+v", p) - if p.Status == "ACTIVE" { - return p, "ACTIVE", nil - } - - return p, p.Status, nil - } -} - -func waitForLBVIPDelete(networkingClient *gophercloud.ServiceClient, vipId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack LB VIP %s", vipId) - - p, err := vips.Get(networkingClient, vipId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack LB VIP %s", vipId) - return p, "DELETED", nil - } - return p, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack LB VIP: %+v", p) - err = vips.Delete(networkingClient, vipId).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack LB VIP %s", vipId) - return p, "DELETED", nil - } - return p, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack LB VIP %s still active.", vipId) - return p, "ACTIVE", nil - } - -} diff --git a/builtin/providers/openstack/resource_openstack_lb_vip_v1_test.go b/builtin/providers/openstack/resource_openstack_lb_vip_v1_test.go deleted file mode 100644 index 8fda99c83..000000000 --- a/builtin/providers/openstack/resource_openstack_lb_vip_v1_test.go +++ /dev/null @@ -1,210 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips" -) - -func TestAccLBV1VIP_basic(t *testing.T) { - var vip vips.VirtualIP - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBV1VIPDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccLBV1VIP_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckLBV1VIPExists("openstack_lb_vip_v1.vip_1", &vip), - ), - }, - resource.TestStep{ - Config: testAccLBV1VIP_update, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("openstack_lb_vip_v1.vip_1", "name", "vip_1_updated"), - ), - }, - }, - }) -} - -func TestAccLBV1VIP_timeout(t *testing.T) { - var vip vips.VirtualIP - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLBV1VIPDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccLBV1VIP_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckLBV1VIPExists("openstack_lb_vip_v1.vip_1", &vip), - ), - }, - }, - }) -} - -func testAccCheckLBV1VIPDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_lb_vip_v1" { - continue - } - - _, err := vips.Get(networkingClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("LB VIP still exists") - } - } - - return nil -} - -func testAccCheckLBV1VIPExists(n string, vip *vips.VirtualIP) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - found, err := vips.Get(networkingClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("VIP not found") - } - - *vip = *found - - return nil - } -} - -const testAccLBV1VIP_basic = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_lb_pool_v1" "pool_1" { - name = "pool_1" - protocol = "HTTP" - lb_method = "ROUND_ROBIN" - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} - -resource "openstack_lb_vip_v1" "vip_1" { - name = "vip_1" - protocol = "HTTP" - port = 80 - admin_state_up = true - pool_id = "${openstack_lb_pool_v1.pool_1.id}" - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - - persistence { - type = "SOURCE_IP" - } -} -` - -const testAccLBV1VIP_update = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_lb_pool_v1" "pool_1" { - name = "pool_1" - protocol = "HTTP" - lb_method = "ROUND_ROBIN" - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} - -resource "openstack_lb_vip_v1" "vip_1" { - name = "vip_1_updated" - protocol = "HTTP" - port = 80 - admin_state_up = true - pool_id = "${openstack_lb_pool_v1.pool_1.id}" - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - - persistence { - type = "SOURCE_IP" - } -} -` - -const testAccLBV1VIP_timeout = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_lb_pool_v1" "pool_1" { - name = "pool_1" - protocol = "HTTP" - lb_method = "ROUND_ROBIN" - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} - -resource "openstack_lb_vip_v1" "vip_1" { - name = "vip_1" - protocol = "HTTP" - port = 80 - admin_state_up = true - pool_id = "${openstack_lb_pool_v1.pool_1.id}" - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - - persistence { - type = "SOURCE_IP" - } - - timeouts { - create = "5m" - delete = "5m" - } -} -` diff --git a/builtin/providers/openstack/resource_openstack_networking_floatingip_v2.go b/builtin/providers/openstack/resource_openstack_networking_floatingip_v2.go deleted file mode 100644 index 9712dd156..000000000 --- a/builtin/providers/openstack/resource_openstack_networking_floatingip_v2.go +++ /dev/null @@ -1,298 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" - "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" - "github.com/gophercloud/gophercloud/pagination" -) - -func resourceNetworkingFloatingIPV2() *schema.Resource { - return &schema.Resource{ - Create: resourceNetworkFloatingIPV2Create, - Read: resourceNetworkFloatingIPV2Read, - Update: resourceNetworkFloatingIPV2Update, - Delete: resourceNetworkFloatingIPV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "address": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "pool": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_POOL_NAME", nil), - }, - "port_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "fixed_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "value_specs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceNetworkFloatingIPV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack network client: %s", err) - } - - poolID, err := getNetworkID(d, meta, d.Get("pool").(string)) - if err != nil { - return fmt.Errorf("Error retrieving floating IP pool name: %s", err) - } - if len(poolID) == 0 { - return fmt.Errorf("No network found with name: %s", d.Get("pool").(string)) - } - createOpts := FloatingIPCreateOpts{ - floatingips.CreateOpts{ - FloatingNetworkID: poolID, - PortID: d.Get("port_id").(string), - TenantID: d.Get("tenant_id").(string), - FixedIP: d.Get("fixed_ip").(string), - }, - MapValueSpecs(d), - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - floatingIP, err := floatingips.Create(networkingClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error allocating floating IP: %s", err) - } - - log.Printf("[DEBUG] Waiting for OpenStack Neutron Floating IP (%s) to become available.", floatingIP.ID) - - stateConf := &resource.StateChangeConf{ - Target: []string{"ACTIVE"}, - Refresh: waitForFloatingIPActive(networkingClient, floatingIP.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - - d.SetId(floatingIP.ID) - - return resourceNetworkFloatingIPV2Read(d, meta) -} - -func resourceNetworkFloatingIPV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack network client: %s", err) - } - - floatingIP, err := floatingips.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "floating IP") - } - - d.Set("address", floatingIP.FloatingIP) - d.Set("port_id", floatingIP.PortID) - d.Set("fixed_ip", floatingIP.FixedIP) - poolName, err := getNetworkName(d, meta, floatingIP.FloatingNetworkID) - if err != nil { - return fmt.Errorf("Error retrieving floating IP pool name: %s", err) - } - d.Set("pool", poolName) - d.Set("tenant_id", floatingIP.TenantID) - - d.Set("region", GetRegion(d)) - - return nil -} - -func resourceNetworkFloatingIPV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack network client: %s", err) - } - - var updateOpts floatingips.UpdateOpts - - if d.HasChange("port_id") { - portID := d.Get("port_id").(string) - updateOpts.PortID = &portID - } - - log.Printf("[DEBUG] Update Options: %#v", updateOpts) - - _, err = floatingips.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating floating IP: %s", err) - } - - return resourceNetworkFloatingIPV2Read(d, meta) -} - -func resourceNetworkFloatingIPV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack network client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE"}, - Target: []string{"DELETED"}, - Refresh: waitForFloatingIPDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack Neutron Floating IP: %s", err) - } - - d.SetId("") - return nil -} - -func getNetworkID(d *schema.ResourceData, meta interface{}, networkName string) (string, error) { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return "", fmt.Errorf("Error creating OpenStack network client: %s", err) - } - - opts := networks.ListOpts{Name: networkName} - pager := networks.List(networkingClient, opts) - networkID := "" - - err = pager.EachPage(func(page pagination.Page) (bool, error) { - networkList, err := networks.ExtractNetworks(page) - if err != nil { - return false, err - } - - for _, n := range networkList { - if n.Name == networkName { - networkID = n.ID - return false, nil - } - } - - return true, nil - }) - - return networkID, err -} - -func getNetworkName(d *schema.ResourceData, meta interface{}, networkID string) (string, error) { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return "", fmt.Errorf("Error creating OpenStack network client: %s", err) - } - - opts := networks.ListOpts{ID: networkID} - pager := networks.List(networkingClient, opts) - networkName := "" - - err = pager.EachPage(func(page pagination.Page) (bool, error) { - networkList, err := networks.ExtractNetworks(page) - if err != nil { - return false, err - } - - for _, n := range networkList { - if n.ID == networkID { - networkName = n.Name - return false, nil - } - } - - return true, nil - }) - - return networkName, err -} - -func waitForFloatingIPActive(networkingClient *gophercloud.ServiceClient, fId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - f, err := floatingips.Get(networkingClient, fId).Extract() - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] OpenStack Neutron Floating IP: %+v", f) - if f.Status == "DOWN" || f.Status == "ACTIVE" { - return f, "ACTIVE", nil - } - - return f, "", nil - } -} - -func waitForFloatingIPDelete(networkingClient *gophercloud.ServiceClient, fId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack Floating IP %s.\n", fId) - - f, err := floatingips.Get(networkingClient, fId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Floating IP %s", fId) - return f, "DELETED", nil - } - return f, "ACTIVE", err - } - - err = floatingips.Delete(networkingClient, fId).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Floating IP %s", fId) - return f, "DELETED", nil - } - return f, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack Floating IP %s still active.\n", fId) - return f, "ACTIVE", nil - } -} diff --git a/builtin/providers/openstack/resource_openstack_networking_floatingip_v2_test.go b/builtin/providers/openstack/resource_openstack_networking_floatingip_v2_test.go deleted file mode 100644 index 1eefea90b..000000000 --- a/builtin/providers/openstack/resource_openstack_networking_floatingip_v2_test.go +++ /dev/null @@ -1,247 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" -) - -func TestAccNetworkingV2FloatingIP_basic(t *testing.T) { - var fip floatingips.FloatingIP - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2FloatingIPDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2FloatingIP_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2FloatingIPExists("openstack_networking_floatingip_v2.fip_1", &fip), - ), - }, - }, - }) -} - -func TestAccNetworkingV2FloatingIP_attach(t *testing.T) { - var instance servers.Server - var fip floatingips.FloatingIP - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2FloatingIPDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkV2FloatingIP_attach, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2FloatingIPExists("openstack_networking_floatingip_v2.fip_1", &fip), - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - testAccCheckNetworkingV2InstanceFloatingIPAttach(&instance, &fip), - ), - }, - }, - }) -} - -func TestAccNetworkingV2FloatingIP_fixedip_bind(t *testing.T) { - var fip floatingips.FloatingIP - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2FloatingIPDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2FloatingIP_fixedip_bind, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2FloatingIPExists("openstack_networking_floatingip_v2.fip_1", &fip), - testAccCheckNetworkingV2FloatingIPBoundToCorrectIP(&fip, "192.168.199.20"), - ), - }, - }, - }) -} - -func TestAccNetworkingV2FloatingIP_timeout(t *testing.T) { - var fip floatingips.FloatingIP - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2FloatingIPDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2FloatingIP_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2FloatingIPExists("openstack_networking_floatingip_v2.fip_1", &fip), - ), - }, - }, - }) -} - -func testAccCheckNetworkingV2FloatingIPDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - networkClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack floating IP: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_networking_floatingip_v2" { - continue - } - - _, err := floatingips.Get(networkClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("FloatingIP still exists") - } - } - - return nil -} - -func testAccCheckNetworkingV2FloatingIPExists(n string, kp *floatingips.FloatingIP) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - networkClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - found, err := floatingips.Get(networkClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("FloatingIP not found") - } - - *kp = *found - - return nil - } -} - -func testAccCheckNetworkingV2FloatingIPBoundToCorrectIP(fip *floatingips.FloatingIP, fixed_ip string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if fip.FixedIP != fixed_ip { - return fmt.Errorf("Floating ip associated with wrong fixed ip") - } - - return nil - } -} - -func testAccCheckNetworkingV2InstanceFloatingIPAttach( - instance *servers.Server, fip *floatingips.FloatingIP) resource.TestCheckFunc { - - // When Neutron is used, the Instance sometimes does not know its floating IP until some time - // after the attachment happened. This can be anywhere from 2-20 seconds. Because of that delay, - // the test usually completes with failure. - // However, the Fixed IP is known on both sides immediately, so that can be used as a bridge - // to ensure the two are now related. - // I think a better option is to introduce some state changing config in the actual resource. - return func(s *terraform.State) error { - for _, networkAddresses := range instance.Addresses { - for _, element := range networkAddresses.([]interface{}) { - address := element.(map[string]interface{}) - if address["OS-EXT-IPS:type"] == "fixed" && address["addr"] == fip.FixedIP { - return nil - } - } - } - return fmt.Errorf("Floating IP %+v was not attached to instance %+v", fip, instance) - } -} - -const testAccNetworkingV2FloatingIP_basic = ` -resource "openstack_networking_floatingip_v2" "fip_1" { -} -` - -var testAccNetworkV2FloatingIP_attach = fmt.Sprintf(` -resource "openstack_networking_floatingip_v2" "fip_1" { -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["default"] - floating_ip = "${openstack_networking_floatingip_v2.fip_1.address}" - - network { - uuid = "%s" - } -} -`, OS_NETWORK_ID) - -var testAccNetworkingV2FloatingIP_fixedip_bind = fmt.Sprintf(` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_networking_router_interface_v2" "router_interface_1" { - router_id = "${openstack_networking_router_v2.router_1.id}" - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} - -resource "openstack_networking_router_v2" "router_1" { - name = "router_1" - external_gateway = "%s" -} - -resource "openstack_networking_port_v2" "port_1" { - admin_state_up = "true" - network_id = "${openstack_networking_subnet_v2.subnet_1.network_id}" - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - ip_address = "192.168.199.10" - } - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - ip_address = "192.168.199.20" - } -} - -resource "openstack_networking_floatingip_v2" "fip_1" { - pool = "%s" - port_id = "${openstack_networking_port_v2.port_1.id}" - fixed_ip = "${openstack_networking_port_v2.port_1.fixed_ip.1.ip_address}" -} -`, OS_EXTGW_ID, OS_POOL_NAME) - -const testAccNetworkingV2FloatingIP_timeout = ` -resource "openstack_networking_floatingip_v2" "fip_1" { - timeouts { - create = "5m" - delete = "5m" - } -} -` diff --git a/builtin/providers/openstack/resource_openstack_networking_network_v2.go b/builtin/providers/openstack/resource_openstack_networking_network_v2.go deleted file mode 100644 index 81e8b6379..000000000 --- a/builtin/providers/openstack/resource_openstack_networking_network_v2.go +++ /dev/null @@ -1,326 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "strconv" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider" - "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" -) - -func resourceNetworkingNetworkV2() *schema.Resource { - return &schema.Resource{ - Create: resourceNetworkingNetworkV2Create, - Read: resourceNetworkingNetworkV2Read, - Update: resourceNetworkingNetworkV2Update, - Delete: resourceNetworkingNetworkV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "admin_state_up": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - Computed: true, - }, - "shared": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - Computed: true, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "segments": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "physical_network": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "network_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "segmentation_id": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - }, - }, - }, - "value_specs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceNetworkingNetworkV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - createOpts := NetworkCreateOpts{ - networks.CreateOpts{ - Name: d.Get("name").(string), - TenantID: d.Get("tenant_id").(string), - }, - MapValueSpecs(d), - } - - asuRaw := d.Get("admin_state_up").(string) - if asuRaw != "" { - asu, err := strconv.ParseBool(asuRaw) - if err != nil { - return fmt.Errorf("admin_state_up, if provided, must be either 'true' or 'false'") - } - createOpts.AdminStateUp = &asu - } - - sharedRaw := d.Get("shared").(string) - if sharedRaw != "" { - shared, err := strconv.ParseBool(sharedRaw) - if err != nil { - return fmt.Errorf("shared, if provided, must be either 'true' or 'false': %v", err) - } - createOpts.Shared = &shared - } - - segments := resourceNetworkingNetworkV2Segments(d) - - n := &networks.Network{} - if len(segments) > 0 { - providerCreateOpts := provider.CreateOptsExt{ - CreateOptsBuilder: createOpts, - Segments: segments, - } - log.Printf("[DEBUG] Create Options: %#v", providerCreateOpts) - n, err = networks.Create(networkingClient, providerCreateOpts).Extract() - } else { - log.Printf("[DEBUG] Create Options: %#v", createOpts) - n, err = networks.Create(networkingClient, createOpts).Extract() - } - - if err != nil { - return fmt.Errorf("Error creating OpenStack Neutron network: %s", err) - } - - log.Printf("[INFO] Network ID: %s", n.ID) - - log.Printf("[DEBUG] Waiting for Network (%s) to become available", n.ID) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"BUILD"}, - Target: []string{"ACTIVE"}, - Refresh: waitForNetworkActive(networkingClient, n.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - - d.SetId(n.ID) - - return resourceNetworkingNetworkV2Read(d, meta) -} - -func resourceNetworkingNetworkV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - n, err := networks.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "network") - } - - log.Printf("[DEBUG] Retrieved Network %s: %+v", d.Id(), n) - - d.Set("name", n.Name) - d.Set("admin_state_up", strconv.FormatBool(n.AdminStateUp)) - d.Set("shared", strconv.FormatBool(n.Shared)) - d.Set("tenant_id", n.TenantID) - d.Set("region", GetRegion(d)) - - return nil -} - -func resourceNetworkingNetworkV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var updateOpts networks.UpdateOpts - if d.HasChange("name") { - updateOpts.Name = d.Get("name").(string) - } - if d.HasChange("admin_state_up") { - asuRaw := d.Get("admin_state_up").(string) - if asuRaw != "" { - asu, err := strconv.ParseBool(asuRaw) - if err != nil { - return fmt.Errorf("admin_state_up, if provided, must be either 'true' or 'false'") - } - updateOpts.AdminStateUp = &asu - } - } - if d.HasChange("shared") { - sharedRaw := d.Get("shared").(string) - if sharedRaw != "" { - shared, err := strconv.ParseBool(sharedRaw) - if err != nil { - return fmt.Errorf("shared, if provided, must be either 'true' or 'false': %v", err) - } - updateOpts.Shared = &shared - } - } - - log.Printf("[DEBUG] Updating Network %s with options: %+v", d.Id(), updateOpts) - - _, err = networks.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack Neutron Network: %s", err) - } - - return resourceNetworkingNetworkV2Read(d, meta) -} - -func resourceNetworkingNetworkV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE"}, - Target: []string{"DELETED"}, - Refresh: waitForNetworkDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack Neutron Network: %s", err) - } - - d.SetId("") - return nil -} - -func resourceNetworkingNetworkV2Segments(d *schema.ResourceData) (providerSegments []provider.Segment) { - segments := d.Get("segments").([]interface{}) - for _, v := range segments { - var segment provider.Segment - segmentMap := v.(map[string]interface{}) - - if v, ok := segmentMap["physical_network"].(string); ok { - segment.PhysicalNetwork = v - } - - if v, ok := segmentMap["network_type"].(string); ok { - segment.NetworkType = v - } - - if v, ok := segmentMap["segmentation_id"].(int); ok { - segment.SegmentationID = v - } - - providerSegments = append(providerSegments, segment) - } - return -} - -func waitForNetworkActive(networkingClient *gophercloud.ServiceClient, networkId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - n, err := networks.Get(networkingClient, networkId).Extract() - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] OpenStack Neutron Network: %+v", n) - if n.Status == "DOWN" || n.Status == "ACTIVE" { - return n, "ACTIVE", nil - } - - return n, n.Status, nil - } -} - -func waitForNetworkDelete(networkingClient *gophercloud.ServiceClient, networkId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack Network %s.\n", networkId) - - n, err := networks.Get(networkingClient, networkId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Network %s", networkId) - return n, "DELETED", nil - } - return n, "ACTIVE", err - } - - err = networks.Delete(networkingClient, networkId).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Network %s", networkId) - return n, "DELETED", nil - } - if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { - if errCode.Actual == 409 { - return n, "ACTIVE", nil - } - } - return n, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack Network %s still active.\n", networkId) - return n, "ACTIVE", nil - } -} diff --git a/builtin/providers/openstack/resource_openstack_networking_network_v2_test.go b/builtin/providers/openstack/resource_openstack_networking_network_v2_test.go deleted file mode 100644 index b19048cfc..000000000 --- a/builtin/providers/openstack/resource_openstack_networking_network_v2_test.go +++ /dev/null @@ -1,288 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups" - "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" - "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" - "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" - "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" -) - -func TestAccNetworkingV2Network_basic(t *testing.T) { - var network networks.Network - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2NetworkDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Network_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), - ), - }, - resource.TestStep{ - Config: testAccNetworkingV2Network_update, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "openstack_networking_network_v2.network_1", "name", "network_2"), - ), - }, - }, - }) -} - -func TestAccNetworkingV2Network_netstack(t *testing.T) { - var network networks.Network - var subnet subnets.Subnet - var router routers.Router - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2NetworkDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Network_netstack, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), - testAccCheckNetworkingV2RouterExists("openstack_networking_router_v2.router_1", &router), - testAccCheckNetworkingV2RouterInterfaceExists( - "openstack_networking_router_interface_v2.ri_1"), - ), - }, - }, - }) -} - -func TestAccNetworkingV2Network_fullstack(t *testing.T) { - var instance servers.Server - var network networks.Network - var port ports.Port - var secgroup secgroups.SecurityGroup - var subnet subnets.Subnet - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2NetworkDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Network_fullstack, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), - testAccCheckComputeV2SecGroupExists("openstack_compute_secgroup_v2.secgroup_1", &secgroup), - testAccCheckNetworkingV2PortExists("openstack_networking_port_v2.port_1", &port), - testAccCheckComputeV2InstanceExists("openstack_compute_instance_v2.instance_1", &instance), - ), - }, - }, - }) -} - -func TestAccNetworkingV2Network_timeout(t *testing.T) { - var network networks.Network - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2NetworkDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Network_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), - ), - }, - }, - }) -} - -func TestAccNetworkingV2Network_with_multiple_segment_mappings(t *testing.T) { - var network networks.Network - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2NetworkDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Network_with_multiple_segment_mappings, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), - ), - }, - }, - }) -} - -func testAccCheckNetworkingV2NetworkDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_networking_network_v2" { - continue - } - - _, err := networks.Get(networkingClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("Network still exists") - } - } - - return nil -} - -func testAccCheckNetworkingV2NetworkExists(n string, network *networks.Network) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - found, err := networks.Get(networkingClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Network not found") - } - - *network = *found - - return nil - } -} - -const testAccNetworkingV2Network_basic = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} -` - -const testAccNetworkingV2Network_update = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_2" - admin_state_up = "true" -} -` - -const testAccNetworkingV2Network_netstack = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.10.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_networking_router_v2" "router_1" { - name = "router_1" -} - -resource "openstack_networking_router_interface_v2" "ri_1" { - router_id = "${openstack_networking_router_v2.router_1.id}" - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" -} -` - -const testAccNetworkingV2Network_fullstack = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_compute_secgroup_v2" "secgroup_1" { - name = "secgroup_1" - description = "a security group" - rule { - from_port = 22 - to_port = 22 - ip_protocol = "tcp" - cidr = "0.0.0.0/0" - } -} - -resource "openstack_networking_port_v2" "port_1" { - name = "port_1" - admin_state_up = "true" - security_group_ids = ["${openstack_compute_secgroup_v2.secgroup_1.id}"] - network_id = "${openstack_networking_network_v2.network_1.id}" - - fixed_ip { - "subnet_id" = "${openstack_networking_subnet_v2.subnet_1.id}" - "ip_address" = "192.168.199.23" - } -} - -resource "openstack_compute_instance_v2" "instance_1" { - name = "instance_1" - security_groups = ["${openstack_compute_secgroup_v2.secgroup_1.name}"] - - network { - port = "${openstack_networking_port_v2.port_1.id}" - } -} -` - -const testAccNetworkingV2Network_timeout = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" - - timeouts { - create = "5m" - delete = "5m" - } -} -` - -const testAccNetworkingV2Network_with_multiple_segment_mappings = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - segments =[ - { - segmentation_id = 2, - network_type = "vxlan" - } - ], - admin_state_up = "true" -} -` diff --git a/builtin/providers/openstack/resource_openstack_networking_port_v2.go b/builtin/providers/openstack/resource_openstack_networking_port_v2.go deleted file mode 100644 index 4be432935..000000000 --- a/builtin/providers/openstack/resource_openstack_networking_port_v2.go +++ /dev/null @@ -1,404 +0,0 @@ -package openstack - -import ( - "bytes" - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" -) - -func resourceNetworkingPortV2() *schema.Resource { - return &schema.Resource{ - Create: resourceNetworkingPortV2Create, - Read: resourceNetworkingPortV2Read, - Update: resourceNetworkingPortV2Update, - Delete: resourceNetworkingPortV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "network_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "admin_state_up": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: false, - Computed: true, - }, - "mac_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "device_owner": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "security_group_ids": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: false, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "device_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "fixed_ip": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: false, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "subnet_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "ip_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "allowed_address_pairs": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: false, - Computed: true, - Set: allowedAddressPairsHash, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ip_address": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "mac_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - }, - }, - "value_specs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - "all_fixed_ips": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func resourceNetworkingPortV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - createOpts := PortCreateOpts{ - ports.CreateOpts{ - Name: d.Get("name").(string), - AdminStateUp: resourcePortAdminStateUpV2(d), - NetworkID: d.Get("network_id").(string), - MACAddress: d.Get("mac_address").(string), - TenantID: d.Get("tenant_id").(string), - DeviceOwner: d.Get("device_owner").(string), - SecurityGroups: resourcePortSecurityGroupsV2(d), - DeviceID: d.Get("device_id").(string), - FixedIPs: resourcePortFixedIpsV2(d), - AllowedAddressPairs: resourceAllowedAddressPairsV2(d), - }, - MapValueSpecs(d), - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - p, err := ports.Create(networkingClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack Neutron network: %s", err) - } - log.Printf("[INFO] Network ID: %s", p.ID) - - log.Printf("[DEBUG] Waiting for OpenStack Neutron Port (%s) to become available.", p.ID) - - stateConf := &resource.StateChangeConf{ - Target: []string{"ACTIVE"}, - Refresh: waitForNetworkPortActive(networkingClient, p.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - - d.SetId(p.ID) - - return resourceNetworkingPortV2Read(d, meta) -} - -func resourceNetworkingPortV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - p, err := ports.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "port") - } - - log.Printf("[DEBUG] Retrieved Port %s: %+v", d.Id(), p) - - d.Set("name", p.Name) - d.Set("admin_state_up", p.AdminStateUp) - d.Set("network_id", p.NetworkID) - d.Set("mac_address", p.MACAddress) - d.Set("tenant_id", p.TenantID) - d.Set("device_owner", p.DeviceOwner) - d.Set("security_group_ids", p.SecurityGroups) - d.Set("device_id", p.DeviceID) - - // Create a slice of all returned Fixed IPs. - // This will be in the order returned by the API, - // which is usually alpha-numeric. - var ips []string - for _, ipObject := range p.FixedIPs { - ips = append(ips, ipObject.IPAddress) - } - d.Set("all_fixed_ips", ips) - - // Convert AllowedAddressPairs to list of map - var pairs []map[string]interface{} - for _, pairObject := range p.AllowedAddressPairs { - pair := make(map[string]interface{}) - pair["ip_address"] = pairObject.IPAddress - pair["mac_address"] = pairObject.MACAddress - pairs = append(pairs, pair) - } - d.Set("allowed_address_pairs", pairs) - - d.Set("region", GetRegion(d)) - - return nil -} - -func resourceNetworkingPortV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - // security_group_ids and allowed_address_pairs are able to send empty arrays - // to denote the removal of each. But their default zero-value is translated - // to "null", which has been reported to cause problems in vendor-modified - // OpenStack clouds. Therefore, we must set them in each request update. - updateOpts := ports.UpdateOpts{ - AllowedAddressPairs: resourceAllowedAddressPairsV2(d), - SecurityGroups: resourcePortSecurityGroupsV2(d), - } - - if d.HasChange("name") { - updateOpts.Name = d.Get("name").(string) - } - - if d.HasChange("admin_state_up") { - updateOpts.AdminStateUp = resourcePortAdminStateUpV2(d) - } - - if d.HasChange("device_owner") { - updateOpts.DeviceOwner = d.Get("device_owner").(string) - } - - if d.HasChange("device_id") { - updateOpts.DeviceID = d.Get("device_id").(string) - } - - if d.HasChange("fixed_ip") { - updateOpts.FixedIPs = resourcePortFixedIpsV2(d) - } - - log.Printf("[DEBUG] Updating Port %s with options: %+v", d.Id(), updateOpts) - - _, err = ports.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack Neutron Network: %s", err) - } - - return resourceNetworkingPortV2Read(d, meta) -} - -func resourceNetworkingPortV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE"}, - Target: []string{"DELETED"}, - Refresh: waitForNetworkPortDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack Neutron Network: %s", err) - } - - d.SetId("") - return nil -} - -func resourcePortSecurityGroupsV2(d *schema.ResourceData) []string { - rawSecurityGroups := d.Get("security_group_ids").(*schema.Set) - groups := make([]string, rawSecurityGroups.Len()) - for i, raw := range rawSecurityGroups.List() { - groups[i] = raw.(string) - } - return groups -} - -func resourcePortFixedIpsV2(d *schema.ResourceData) interface{} { - rawIP := d.Get("fixed_ip").([]interface{}) - - if len(rawIP) == 0 { - return nil - } - - ip := make([]ports.IP, len(rawIP)) - for i, raw := range rawIP { - rawMap := raw.(map[string]interface{}) - ip[i] = ports.IP{ - SubnetID: rawMap["subnet_id"].(string), - IPAddress: rawMap["ip_address"].(string), - } - } - return ip -} - -func resourceAllowedAddressPairsV2(d *schema.ResourceData) []ports.AddressPair { - // ports.AddressPair - rawPairs := d.Get("allowed_address_pairs").(*schema.Set).List() - - pairs := make([]ports.AddressPair, len(rawPairs)) - for i, raw := range rawPairs { - rawMap := raw.(map[string]interface{}) - pairs[i] = ports.AddressPair{ - IPAddress: rawMap["ip_address"].(string), - MACAddress: rawMap["mac_address"].(string), - } - } - return pairs -} - -func resourcePortAdminStateUpV2(d *schema.ResourceData) *bool { - value := false - - if raw, ok := d.GetOk("admin_state_up"); ok && raw == true { - value = true - } - - return &value -} - -func allowedAddressPairsHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s", m["ip_address"].(string))) - - return hashcode.String(buf.String()) -} - -func waitForNetworkPortActive(networkingClient *gophercloud.ServiceClient, portId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - p, err := ports.Get(networkingClient, portId).Extract() - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] OpenStack Neutron Port: %+v", p) - if p.Status == "DOWN" || p.Status == "ACTIVE" { - return p, "ACTIVE", nil - } - - return p, p.Status, nil - } -} - -func waitForNetworkPortDelete(networkingClient *gophercloud.ServiceClient, portId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack Neutron Port %s", portId) - - p, err := ports.Get(networkingClient, portId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Port %s", portId) - return p, "DELETED", nil - } - return p, "ACTIVE", err - } - - err = ports.Delete(networkingClient, portId).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Port %s", portId) - return p, "DELETED", nil - } - return p, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack Port %s still active.\n", portId) - return p, "ACTIVE", nil - } -} diff --git a/builtin/providers/openstack/resource_openstack_networking_port_v2_test.go b/builtin/providers/openstack/resource_openstack_networking_port_v2_test.go deleted file mode 100644 index 4a47cd0fd..000000000 --- a/builtin/providers/openstack/resource_openstack_networking_port_v2_test.go +++ /dev/null @@ -1,667 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups" - "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" - "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" - "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" -) - -func TestAccNetworkingV2Port_basic(t *testing.T) { - var network networks.Network - var port ports.Port - var subnet subnets.Subnet - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2PortDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Port_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), - testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), - testAccCheckNetworkingV2PortExists("openstack_networking_port_v2.port_1", &port), - ), - }, - }, - }) -} - -func TestAccNetworkingV2Port_noip(t *testing.T) { - var network networks.Network - var port ports.Port - var subnet subnets.Subnet - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2PortDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Port_noip, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), - testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), - testAccCheckNetworkingV2PortExists("openstack_networking_port_v2.port_1", &port), - testAccCheckNetworkingV2PortCountFixedIPs(&port, 1), - ), - }, - }, - }) -} - -func TestAccNetworkingV2Port_multipleNoIP(t *testing.T) { - var network networks.Network - var port ports.Port - var subnet subnets.Subnet - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2PortDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Port_multipleNoIP, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), - testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), - testAccCheckNetworkingV2PortExists("openstack_networking_port_v2.port_1", &port), - testAccCheckNetworkingV2PortCountFixedIPs(&port, 3), - ), - }, - }, - }) -} - -func TestAccNetworkingV2Port_allowedAddressPairs(t *testing.T) { - var network networks.Network - var subnet subnets.Subnet - var vrrp_port_1, vrrp_port_2, instance_port ports.Port - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2PortDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Port_allowedAddressPairs, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.vrrp_subnet", &subnet), - testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.vrrp_network", &network), - testAccCheckNetworkingV2PortExists("openstack_networking_port_v2.vrrp_port_1", &vrrp_port_1), - testAccCheckNetworkingV2PortExists("openstack_networking_port_v2.vrrp_port_2", &vrrp_port_2), - testAccCheckNetworkingV2PortExists("openstack_networking_port_v2.instance_port", &instance_port), - ), - }, - }, - }) -} - -func TestAccNetworkingV2Port_multipleFixedIPs(t *testing.T) { - var network networks.Network - var port ports.Port - var subnet subnets.Subnet - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2PortDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Port_multipleFixedIPs, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), - testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), - testAccCheckNetworkingV2PortExists("openstack_networking_port_v2.port_1", &port), - testAccCheckNetworkingV2PortCountFixedIPs(&port, 3), - ), - }, - }, - }) -} - -func TestAccNetworkingV2Port_timeout(t *testing.T) { - var network networks.Network - var port ports.Port - var subnet subnets.Subnet - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2PortDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Port_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), - testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), - testAccCheckNetworkingV2PortExists("openstack_networking_port_v2.port_1", &port), - ), - }, - }, - }) -} - -func TestAccNetworkingV2Port_fixedIPs(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2PortDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Port_fixedIPs, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "openstack_networking_port_v2.port_1", "all_fixed_ips.0", "192.168.199.23"), - resource.TestCheckResourceAttr( - "openstack_networking_port_v2.port_1", "all_fixed_ips.1", "192.168.199.24"), - ), - }, - }, - }) -} - -func TestAccNetworkingV2Port_updateSecurityGroups(t *testing.T) { - var network networks.Network - var port ports.Port - var security_group groups.SecGroup - var subnet subnets.Subnet - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2PortDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Port_updateSecurityGroups_1, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), - testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), - testAccCheckNetworkingV2PortExists("openstack_networking_port_v2.port_1", &port), - testAccCheckNetworkingV2SecGroupExists( - "openstack_networking_secgroup_v2.secgroup_1", &security_group), - testAccCheckNetworkingV2PortCountSecurityGroups(&port, 1), - ), - }, - resource.TestStep{ - Config: testAccNetworkingV2Port_updateSecurityGroups_2, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), - testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), - testAccCheckNetworkingV2PortExists("openstack_networking_port_v2.port_1", &port), - testAccCheckNetworkingV2SecGroupExists( - "openstack_networking_secgroup_v2.secgroup_1", &security_group), - testAccCheckNetworkingV2PortCountSecurityGroups(&port, 1), - ), - }, - resource.TestStep{ - Config: testAccNetworkingV2Port_updateSecurityGroups_3, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), - testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), - testAccCheckNetworkingV2PortExists("openstack_networking_port_v2.port_1", &port), - testAccCheckNetworkingV2SecGroupExists( - "openstack_networking_secgroup_v2.secgroup_1", &security_group), - testAccCheckNetworkingV2PortCountSecurityGroups(&port, 1), - ), - }, - resource.TestStep{ - Config: testAccNetworkingV2Port_updateSecurityGroups_4, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), - testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), - testAccCheckNetworkingV2PortExists("openstack_networking_port_v2.port_1", &port), - testAccCheckNetworkingV2SecGroupExists( - "openstack_networking_secgroup_v2.secgroup_1", &security_group), - testAccCheckNetworkingV2PortCountSecurityGroups(&port, 0), - ), - }, - }, - }) -} - -func testAccCheckNetworkingV2PortDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_networking_port_v2" { - continue - } - - _, err := ports.Get(networkingClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("Port still exists") - } - } - - return nil -} - -func testAccCheckNetworkingV2PortExists(n string, port *ports.Port) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - found, err := ports.Get(networkingClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Port not found") - } - - *port = *found - - return nil - } -} - -func testAccCheckNetworkingV2PortCountFixedIPs(port *ports.Port, expected int) resource.TestCheckFunc { - return func(s *terraform.State) error { - if len(port.FixedIPs) != expected { - return fmt.Errorf("Expected %d Fixed IPs, got %d", expected, len(port.FixedIPs)) - } - - return nil - } -} - -func testAccCheckNetworkingV2PortCountSecurityGroups(port *ports.Port, expected int) resource.TestCheckFunc { - return func(s *terraform.State) error { - if len(port.SecurityGroups) != expected { - return fmt.Errorf("Expected %d Security Groups, got %d", expected, len(port.SecurityGroups)) - } - - return nil - } -} - -const testAccNetworkingV2Port_basic = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_networking_port_v2" "port_1" { - name = "port_1" - admin_state_up = "true" - network_id = "${openstack_networking_network_v2.network_1.id}" - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - ip_address = "192.168.199.23" - } -} -` - -const testAccNetworkingV2Port_noip = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_networking_port_v2" "port_1" { - name = "port_1" - admin_state_up = "true" - network_id = "${openstack_networking_network_v2.network_1.id}" - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - } -} -` - -const testAccNetworkingV2Port_multipleNoIP = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_networking_port_v2" "port_1" { - name = "port_1" - admin_state_up = "true" - network_id = "${openstack_networking_network_v2.network_1.id}" - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - } - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - } - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - } -} -` - -const testAccNetworkingV2Port_allowedAddressPairs = ` -resource "openstack_networking_network_v2" "vrrp_network" { - name = "vrrp_network" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "vrrp_subnet" { - name = "vrrp_subnet" - cidr = "10.0.0.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.vrrp_network.id}" - - allocation_pools { - start = "10.0.0.2" - end = "10.0.0.200" - } -} - -resource "openstack_networking_router_v2" "vrrp_router" { - name = "vrrp_router" -} - -resource "openstack_networking_router_interface_v2" "vrrp_interface" { - router_id = "${openstack_networking_router_v2.vrrp_router.id}" - subnet_id = "${openstack_networking_subnet_v2.vrrp_subnet.id}" -} - -resource "openstack_networking_port_v2" "vrrp_port_1" { - name = "vrrp_port_1" - admin_state_up = "true" - network_id = "${openstack_networking_network_v2.vrrp_network.id}" - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.vrrp_subnet.id}" - ip_address = "10.0.0.202" - } -} - -resource "openstack_networking_port_v2" "vrrp_port_2" { - name = "vrrp_port_2" - admin_state_up = "true" - network_id = "${openstack_networking_network_v2.vrrp_network.id}" - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.vrrp_subnet.id}" - ip_address = "10.0.0.201" - } -} - -resource "openstack_networking_port_v2" "instance_port" { - name = "instance_port" - admin_state_up = "true" - network_id = "${openstack_networking_network_v2.vrrp_network.id}" - - allowed_address_pairs { - ip_address = "${openstack_networking_port_v2.vrrp_port_1.fixed_ip.0.ip_address}" - mac_address = "${openstack_networking_port_v2.vrrp_port_1.mac_address}" - } - - allowed_address_pairs { - ip_address = "${openstack_networking_port_v2.vrrp_port_2.fixed_ip.0.ip_address}" - mac_address = "${openstack_networking_port_v2.vrrp_port_2.mac_address}" - } -} -` - -const testAccNetworkingV2Port_multipleFixedIPs = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_networking_port_v2" "port_1" { - name = "port_1" - admin_state_up = "true" - network_id = "${openstack_networking_network_v2.network_1.id}" - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - ip_address = "192.168.199.23" - } - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - ip_address = "192.168.199.20" - } - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - ip_address = "192.168.199.40" - } -} -` - -const testAccNetworkingV2Port_timeout = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_networking_port_v2" "port_1" { - name = "port_1" - admin_state_up = "true" - network_id = "${openstack_networking_network_v2.network_1.id}" - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - ip_address = "192.168.199.23" - } - - timeouts { - create = "5m" - delete = "5m" - } -} -` - -const testAccNetworkingV2Port_fixedIPs = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_networking_port_v2" "port_1" { - name = "port_1" - admin_state_up = "true" - network_id = "${openstack_networking_network_v2.network_1.id}" - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - ip_address = "192.168.199.24" - } - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - ip_address = "192.168.199.23" - } -} -` - -const testAccNetworkingV2Port_updateSecurityGroups_1 = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_networking_secgroup_v2" "secgroup_1" { - name = "security_group" - description = "terraform security group acceptance test" -} - -resource "openstack_networking_port_v2" "port_1" { - name = "port_1" - admin_state_up = "true" - network_id = "${openstack_networking_network_v2.network_1.id}" - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - ip_address = "192.168.199.23" - } -} -` - -const testAccNetworkingV2Port_updateSecurityGroups_2 = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_networking_secgroup_v2" "secgroup_1" { - name = "security_group" - description = "terraform security group acceptance test" -} - -resource "openstack_networking_port_v2" "port_1" { - name = "port_1" - admin_state_up = "true" - network_id = "${openstack_networking_network_v2.network_1.id}" - security_group_ids = ["${openstack_networking_secgroup_v2.secgroup_1.id}"] - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - ip_address = "192.168.199.23" - } -} -` - -const testAccNetworkingV2Port_updateSecurityGroups_3 = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_networking_secgroup_v2" "secgroup_1" { - name = "security_group_1" - description = "terraform security group acceptance test" -} - -resource "openstack_networking_port_v2" "port_1" { - name = "port_1" - admin_state_up = "true" - network_id = "${openstack_networking_network_v2.network_1.id}" - security_group_ids = ["${openstack_networking_secgroup_v2.secgroup_1.id}"] - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - ip_address = "192.168.199.23" - } -} -` - -const testAccNetworkingV2Port_updateSecurityGroups_4 = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_networking_secgroup_v2" "secgroup_1" { - name = "security_group" - description = "terraform security group acceptance test" -} - -resource "openstack_networking_port_v2" "port_1" { - name = "port_1" - admin_state_up = "true" - network_id = "${openstack_networking_network_v2.network_1.id}" - security_group_ids = [] - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - ip_address = "192.168.199.23" - } -} -` diff --git a/builtin/providers/openstack/resource_openstack_networking_router_interface_v2.go b/builtin/providers/openstack/resource_openstack_networking_router_interface_v2.go deleted file mode 100644 index 4a4ae8685..000000000 --- a/builtin/providers/openstack/resource_openstack_networking_router_interface_v2.go +++ /dev/null @@ -1,189 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" - "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" -) - -func resourceNetworkingRouterInterfaceV2() *schema.Resource { - return &schema.Resource{ - Create: resourceNetworkingRouterInterfaceV2Create, - Read: resourceNetworkingRouterInterfaceV2Read, - Delete: resourceNetworkingRouterInterfaceV2Delete, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "router_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "subnet_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "port_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceNetworkingRouterInterfaceV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - createOpts := routers.AddInterfaceOpts{ - SubnetID: d.Get("subnet_id").(string), - PortID: d.Get("port_id").(string), - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - n, err := routers.AddInterface(networkingClient, d.Get("router_id").(string), createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack Neutron router interface: %s", err) - } - log.Printf("[INFO] Router interface Port ID: %s", n.PortID) - - log.Printf("[DEBUG] Waiting for Router Interface (%s) to become available", n.PortID) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"BUILD", "PENDING_CREATE", "PENDING_UPDATE"}, - Target: []string{"ACTIVE"}, - Refresh: waitForRouterInterfaceActive(networkingClient, n.PortID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - - d.SetId(n.PortID) - - return resourceNetworkingRouterInterfaceV2Read(d, meta) -} - -func resourceNetworkingRouterInterfaceV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - n, err := ports.Get(networkingClient, d.Id()).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - d.SetId("") - return nil - } - - return fmt.Errorf("Error retrieving OpenStack Neutron Router Interface: %s", err) - } - - log.Printf("[DEBUG] Retrieved Router Interface %s: %+v", d.Id(), n) - - return nil -} - -func resourceNetworkingRouterInterfaceV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE"}, - Target: []string{"DELETED"}, - Refresh: waitForRouterInterfaceDelete(networkingClient, d), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack Neutron Router Interface: %s", err) - } - - d.SetId("") - return nil -} - -func waitForRouterInterfaceActive(networkingClient *gophercloud.ServiceClient, rId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - r, err := ports.Get(networkingClient, rId).Extract() - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] OpenStack Neutron Router Interface: %+v", r) - return r, r.Status, nil - } -} - -func waitForRouterInterfaceDelete(networkingClient *gophercloud.ServiceClient, d *schema.ResourceData) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - routerId := d.Get("router_id").(string) - routerInterfaceId := d.Id() - - log.Printf("[DEBUG] Attempting to delete OpenStack Router Interface %s.", routerInterfaceId) - - removeOpts := routers.RemoveInterfaceOpts{ - SubnetID: d.Get("subnet_id").(string), - PortID: d.Get("port_id").(string), - } - - r, err := ports.Get(networkingClient, routerInterfaceId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Router Interface %s", routerInterfaceId) - return r, "DELETED", nil - } - return r, "ACTIVE", err - } - - _, err = routers.RemoveInterface(networkingClient, routerId, removeOpts).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Router Interface %s.", routerInterfaceId) - return r, "DELETED", nil - } - if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { - if errCode.Actual == 409 { - log.Printf("[DEBUG] Router Interface %s is still in use.", routerInterfaceId) - return r, "ACTIVE", nil - } - } - - return r, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack Router Interface %s is still active.", routerInterfaceId) - return r, "ACTIVE", nil - } -} diff --git a/builtin/providers/openstack/resource_openstack_networking_router_interface_v2_test.go b/builtin/providers/openstack/resource_openstack_networking_router_interface_v2_test.go deleted file mode 100644 index c6289050c..000000000 --- a/builtin/providers/openstack/resource_openstack_networking_router_interface_v2_test.go +++ /dev/null @@ -1,221 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" - "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" - "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" - "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" -) - -func TestAccNetworkingV2RouterInterface_basic_subnet(t *testing.T) { - var network networks.Network - var router routers.Router - var subnet subnets.Subnet - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2RouterInterfaceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2RouterInterface_basic_subnet, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), - testAccCheckNetworkingV2RouterExists("openstack_networking_router_v2.router_1", &router), - testAccCheckNetworkingV2RouterInterfaceExists("openstack_networking_router_interface_v2.int_1"), - ), - }, - }, - }) -} - -func TestAccNetworkingV2RouterInterface_basic_port(t *testing.T) { - var network networks.Network - var port ports.Port - var router routers.Router - var subnet subnets.Subnet - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2RouterInterfaceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2RouterInterface_basic_port, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), - testAccCheckNetworkingV2RouterExists("openstack_networking_router_v2.router_1", &router), - testAccCheckNetworkingV2PortExists("openstack_networking_port_v2.port_1", &port), - testAccCheckNetworkingV2RouterInterfaceExists("openstack_networking_router_interface_v2.int_1"), - ), - }, - }, - }) -} - -func TestAccNetworkingV2RouterInterface_timeout(t *testing.T) { - var network networks.Network - var router routers.Router - var subnet subnets.Subnet - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2RouterInterfaceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2RouterInterface_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2NetworkExists("openstack_networking_network_v2.network_1", &network), - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), - testAccCheckNetworkingV2RouterExists("openstack_networking_router_v2.router_1", &router), - testAccCheckNetworkingV2RouterInterfaceExists("openstack_networking_router_interface_v2.int_1"), - ), - }, - }, - }) -} - -func testAccCheckNetworkingV2RouterInterfaceDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_networking_router_interface_v2" { - continue - } - - _, err := ports.Get(networkingClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("Router interface still exists") - } - } - - return nil -} - -func testAccCheckNetworkingV2RouterInterfaceExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - found, err := ports.Get(networkingClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Router interface not found") - } - - return nil - } -} - -const testAccNetworkingV2RouterInterface_basic_subnet = ` -resource "openstack_networking_router_v2" "router_1" { - name = "router_1" - admin_state_up = "true" -} - -resource "openstack_networking_router_interface_v2" "int_1" { - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - router_id = "${openstack_networking_router_v2.router_1.id}" -} - -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} -` - -const testAccNetworkingV2RouterInterface_basic_port = ` -resource "openstack_networking_router_v2" "router_1" { - name = "router_1" - admin_state_up = "true" -} - -resource "openstack_networking_router_interface_v2" "int_1" { - router_id = "${openstack_networking_router_v2.router_1.id}" - port_id = "${openstack_networking_port_v2.port_1.id}" -} - -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_networking_port_v2" "port_1" { - name = "port_1" - admin_state_up = "true" - network_id = "${openstack_networking_network_v2.network_1.id}" - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - ip_address = "192.168.199.1" - } -} -` - -const testAccNetworkingV2RouterInterface_timeout = ` -resource "openstack_networking_router_v2" "router_1" { - name = "router_1" - admin_state_up = "true" -} - -resource "openstack_networking_router_interface_v2" "int_1" { - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - router_id = "${openstack_networking_router_v2.router_1.id}" - - timeouts { - create = "5m" - delete = "5m" - } -} - -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} -` diff --git a/builtin/providers/openstack/resource_openstack_networking_router_route_v2.go b/builtin/providers/openstack/resource_openstack_networking_router_route_v2.go deleted file mode 100644 index 332017ac6..000000000 --- a/builtin/providers/openstack/resource_openstack_networking_router_route_v2.go +++ /dev/null @@ -1,202 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" -) - -func resourceNetworkingRouterRouteV2() *schema.Resource { - return &schema.Resource{ - Create: resourceNetworkingRouterRouteV2Create, - Read: resourceNetworkingRouterRouteV2Read, - Delete: resourceNetworkingRouterRouteV2Delete, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "router_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "destination_cidr": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "next_hop": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceNetworkingRouterRouteV2Create(d *schema.ResourceData, meta interface{}) error { - - routerId := d.Get("router_id").(string) - osMutexKV.Lock(routerId) - defer osMutexKV.Unlock(routerId) - - var destCidr string = d.Get("destination_cidr").(string) - var nextHop string = d.Get("next_hop").(string) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - n, err := routers.Get(networkingClient, routerId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - d.SetId("") - return nil - } - - return fmt.Errorf("Error retrieving OpenStack Neutron Router: %s", err) - } - - var updateOpts routers.UpdateOpts - var routeExists bool = false - - var rts []routers.Route = n.Routes - for _, r := range rts { - - if r.DestinationCIDR == destCidr && r.NextHop == nextHop { - routeExists = true - break - } - } - - if !routeExists { - - if destCidr != "" && nextHop != "" { - r := routers.Route{DestinationCIDR: destCidr, NextHop: nextHop} - log.Printf( - "[INFO] Adding route %s", r) - rts = append(rts, r) - } - - updateOpts.Routes = rts - - log.Printf("[DEBUG] Updating Router %s with options: %+v", routerId, updateOpts) - - _, err = routers.Update(networkingClient, routerId, updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack Neutron Router: %s", err) - } - d.SetId(fmt.Sprintf("%s-route-%s-%s", routerId, destCidr, nextHop)) - - } else { - log.Printf("[DEBUG] Router %s has route already", routerId) - } - - return resourceNetworkingRouterRouteV2Read(d, meta) -} - -func resourceNetworkingRouterRouteV2Read(d *schema.ResourceData, meta interface{}) error { - - routerId := d.Get("router_id").(string) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - n, err := routers.Get(networkingClient, routerId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - d.SetId("") - return nil - } - - return fmt.Errorf("Error retrieving OpenStack Neutron Router: %s", err) - } - - log.Printf("[DEBUG] Retrieved Router %s: %+v", routerId, n) - - var destCidr string = d.Get("destination_cidr").(string) - var nextHop string = d.Get("next_hop").(string) - - d.Set("next_hop", "") - d.Set("destination_cidr", "") - - for _, r := range n.Routes { - - if r.DestinationCIDR == destCidr && r.NextHop == nextHop { - d.Set("destination_cidr", destCidr) - d.Set("next_hop", nextHop) - break - } - } - - return nil -} - -func resourceNetworkingRouterRouteV2Delete(d *schema.ResourceData, meta interface{}) error { - - routerId := d.Get("router_id").(string) - osMutexKV.Lock(routerId) - defer osMutexKV.Unlock(routerId) - - config := meta.(*Config) - - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - n, err := routers.Get(networkingClient, routerId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - return nil - } - - return fmt.Errorf("Error retrieving OpenStack Neutron Router: %s", err) - } - - var updateOpts routers.UpdateOpts - - var destCidr string = d.Get("destination_cidr").(string) - var nextHop string = d.Get("next_hop").(string) - - var oldRts []routers.Route = n.Routes - var newRts []routers.Route - - for _, r := range oldRts { - - if r.DestinationCIDR != destCidr || r.NextHop != nextHop { - newRts = append(newRts, r) - } - } - - if len(oldRts) != len(newRts) { - r := routers.Route{DestinationCIDR: destCidr, NextHop: nextHop} - log.Printf( - "[INFO] Deleting route %s", r) - updateOpts.Routes = newRts - - log.Printf("[DEBUG] Updating Router %s with options: %+v", routerId, updateOpts) - - _, err = routers.Update(networkingClient, routerId, updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack Neutron Router: %s", err) - } - } else { - return fmt.Errorf("Route did not exist already") - } - - return nil -} diff --git a/builtin/providers/openstack/resource_openstack_networking_router_route_v2_test.go b/builtin/providers/openstack/resource_openstack_networking_router_route_v2_test.go deleted file mode 100644 index e5965c20e..000000000 --- a/builtin/providers/openstack/resource_openstack_networking_router_route_v2_test.go +++ /dev/null @@ -1,342 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" - "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" - "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" -) - -func TestAccNetworkingV2RouterRoute_basic(t *testing.T) { - var router routers.Router - var network [2]networks.Network - var subnet [2]subnets.Subnet - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2RouterRoute_create, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2RouterExists("openstack_networking_router_v2.router_1", &router), - testAccCheckNetworkingV2NetworkExists( - "openstack_networking_network_v2.network_1", &network[0]), - testAccCheckNetworkingV2SubnetExists( - "openstack_networking_subnet_v2.subnet_1", &subnet[0]), - testAccCheckNetworkingV2NetworkExists( - "openstack_networking_network_v2.network_1", &network[1]), - testAccCheckNetworkingV2SubnetExists( - "openstack_networking_subnet_v2.subnet_1", &subnet[1]), - testAccCheckNetworkingV2RouterInterfaceExists( - "openstack_networking_router_interface_v2.int_1"), - testAccCheckNetworkingV2RouterInterfaceExists( - "openstack_networking_router_interface_v2.int_2"), - testAccCheckNetworkingV2RouterRouteExists( - "openstack_networking_router_route_v2.router_route_1"), - ), - }, - resource.TestStep{ - Config: testAccNetworkingV2RouterRoute_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2RouterRouteExists( - "openstack_networking_router_route_v2.router_route_1"), - testAccCheckNetworkingV2RouterRouteExists( - "openstack_networking_router_route_v2.router_route_2"), - ), - }, - resource.TestStep{ - Config: testAccNetworkingV2RouterRoute_destroy, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2RouterRouteEmpty("openstack_networking_router_v2.router_1"), - ), - }, - }, - }) -} - -func testAccCheckNetworkingV2RouterRouteEmpty(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - router, err := routers.Get(networkingClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if router.ID != rs.Primary.ID { - return fmt.Errorf("Router not found") - } - - if len(router.Routes) != 0 { - return fmt.Errorf("Invalid number of route entries: %d", len(router.Routes)) - } - - return nil - } -} - -func testAccCheckNetworkingV2RouterRouteExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - router, err := routers.Get(networkingClient, rs.Primary.Attributes["router_id"]).Extract() - if err != nil { - return err - } - - if router.ID != rs.Primary.Attributes["router_id"] { - return fmt.Errorf("Router for route not found") - } - - var found bool = false - for _, r := range router.Routes { - if r.DestinationCIDR == rs.Primary.Attributes["destination_cidr"] && r.NextHop == rs.Primary.Attributes["next_hop"] { - found = true - } - } - if !found { - return fmt.Errorf("Could not find route for destination CIDR: %s, next hop: %s", rs.Primary.Attributes["destination_cidr"], rs.Primary.Attributes["next_hop"]) - } - - return nil - } -} - -const testAccNetworkingV2RouterRoute_create = ` -resource "openstack_networking_router_v2" "router_1" { - name = "router_1" - admin_state_up = "true" -} - -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_networking_port_v2" "port_1" { - name = "port_1" - admin_state_up = "true" - network_id = "${openstack_networking_network_v2.network_1.id}" - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - ip_address = "192.168.199.1" - } -} - -resource "openstack_networking_router_interface_v2" "int_1" { - router_id = "${openstack_networking_router_v2.router_1.id}" - port_id = "${openstack_networking_port_v2.port_1.id}" -} - -resource "openstack_networking_network_v2" "network_2" { - name = "network_2" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_2" { - cidr = "192.168.200.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_2.id}" -} - -resource "openstack_networking_port_v2" "port_2" { - name = "port_2" - admin_state_up = "true" - network_id = "${openstack_networking_network_v2.network_2.id}" - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.subnet_2.id}" - ip_address = "192.168.200.1" - } -} - -resource "openstack_networking_router_interface_v2" "int_2" { - router_id = "${openstack_networking_router_v2.router_1.id}" - port_id = "${openstack_networking_port_v2.port_2.id}" -} - -resource "openstack_networking_router_route_v2" "router_route_1" { - destination_cidr = "10.0.1.0/24" - next_hop = "192.168.199.254" - - depends_on = ["openstack_networking_router_interface_v2.int_1"] - router_id = "${openstack_networking_router_v2.router_1.id}" -} -` - -const testAccNetworkingV2RouterRoute_update = ` -resource "openstack_networking_router_v2" "router_1" { - name = "router_1" - admin_state_up = "true" -} - -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_networking_port_v2" "port_1" { - name = "port_1" - admin_state_up = "true" - network_id = "${openstack_networking_network_v2.network_1.id}" - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - ip_address = "192.168.199.1" - } -} - -resource "openstack_networking_router_interface_v2" "int_1" { - router_id = "${openstack_networking_router_v2.router_1.id}" - port_id = "${openstack_networking_port_v2.port_1.id}" -} - -resource "openstack_networking_network_v2" "network_2" { - name = "network_2" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_2" { - cidr = "192.168.200.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_2.id}" -} - -resource "openstack_networking_port_v2" "port_2" { - name = "port_2" - admin_state_up = "true" - network_id = "${openstack_networking_network_v2.network_2.id}" - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.subnet_2.id}" - ip_address = "192.168.200.1" - } -} - -resource "openstack_networking_router_interface_v2" "int_2" { - router_id = "${openstack_networking_router_v2.router_1.id}" - port_id = "${openstack_networking_port_v2.port_2.id}" -} - -resource "openstack_networking_router_route_v2" "router_route_1" { - destination_cidr = "10.0.1.0/24" - next_hop = "192.168.199.254" - - depends_on = ["openstack_networking_router_interface_v2.int_1"] - router_id = "${openstack_networking_router_v2.router_1.id}" -} - -resource "openstack_networking_router_route_v2" "router_route_2" { - destination_cidr = "10.0.2.0/24" - next_hop = "192.168.200.254" - - depends_on = ["openstack_networking_router_interface_v2.int_2"] - router_id = "${openstack_networking_router_v2.router_1.id}" -} -` - -const testAccNetworkingV2RouterRoute_destroy = ` -resource "openstack_networking_router_v2" "router_1" { - name = "router_1" - admin_state_up = "true" -} - -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - cidr = "192.168.199.0/24" - ip_version = 4 - network_id = "${openstack_networking_network_v2.network_1.id}" -} - -resource "openstack_networking_port_v2" "port_1" { - name = "port_1" - admin_state_up = "true" - network_id = "${openstack_networking_network_v2.network_1.id}" - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" - ip_address = "192.168.199.1" - } -} - -resource "openstack_networking_router_interface_v2" "int_1" { - router_id = "${openstack_networking_router_v2.router_1.id}" - port_id = "${openstack_networking_port_v2.port_1.id}" -} - -resource "openstack_networking_network_v2" "network_2" { - name = "network_2" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_2" { - ip_version = 4 - cidr = "192.168.200.0/24" - network_id = "${openstack_networking_network_v2.network_2.id}" -} - -resource "openstack_networking_port_v2" "port_2" { - name = "port_2" - admin_state_up = "true" - network_id = "${openstack_networking_network_v2.network_2.id}" - - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.subnet_2.id}" - ip_address = "192.168.200.1" - } -} - -resource "openstack_networking_router_interface_v2" "int_2" { - router_id = "${openstack_networking_router_v2.router_1.id}" - port_id = "${openstack_networking_port_v2.port_2.id}" -} -` diff --git a/builtin/providers/openstack/resource_openstack_networking_router_v2.go b/builtin/providers/openstack/resource_openstack_networking_router_v2.go deleted file mode 100644 index d979a53e6..000000000 --- a/builtin/providers/openstack/resource_openstack_networking_router_v2.go +++ /dev/null @@ -1,257 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" -) - -func resourceNetworkingRouterV2() *schema.Resource { - return &schema.Resource{ - Create: resourceNetworkingRouterV2Create, - Read: resourceNetworkingRouterV2Read, - Update: resourceNetworkingRouterV2Update, - Delete: resourceNetworkingRouterV2Delete, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "admin_state_up": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: false, - Computed: true, - }, - "distributed": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Computed: true, - }, - "external_gateway": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "value_specs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceNetworkingRouterV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - createOpts := RouterCreateOpts{ - routers.CreateOpts{ - Name: d.Get("name").(string), - TenantID: d.Get("tenant_id").(string), - }, - MapValueSpecs(d), - } - - if asuRaw, ok := d.GetOk("admin_state_up"); ok { - asu := asuRaw.(bool) - createOpts.AdminStateUp = &asu - } - - if dRaw, ok := d.GetOk("distributed"); ok { - d := dRaw.(bool) - createOpts.Distributed = &d - } - - externalGateway := d.Get("external_gateway").(string) - if externalGateway != "" { - gatewayInfo := routers.GatewayInfo{ - NetworkID: externalGateway, - } - createOpts.GatewayInfo = &gatewayInfo - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - n, err := routers.Create(networkingClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack Neutron router: %s", err) - } - log.Printf("[INFO] Router ID: %s", n.ID) - - log.Printf("[DEBUG] Waiting for OpenStack Neutron Router (%s) to become available", n.ID) - stateConf := &resource.StateChangeConf{ - Pending: []string{"BUILD", "PENDING_CREATE", "PENDING_UPDATE"}, - Target: []string{"ACTIVE"}, - Refresh: waitForRouterActive(networkingClient, n.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - - d.SetId(n.ID) - - return resourceNetworkingRouterV2Read(d, meta) -} - -func resourceNetworkingRouterV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - n, err := routers.Get(networkingClient, d.Id()).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - d.SetId("") - return nil - } - - return fmt.Errorf("Error retrieving OpenStack Neutron Router: %s", err) - } - - log.Printf("[DEBUG] Retrieved Router %s: %+v", d.Id(), n) - - d.Set("name", n.Name) - d.Set("admin_state_up", n.AdminStateUp) - d.Set("distributed", n.Distributed) - d.Set("tenant_id", n.TenantID) - d.Set("external_gateway", n.GatewayInfo.NetworkID) - - return nil -} - -func resourceNetworkingRouterV2Update(d *schema.ResourceData, meta interface{}) error { - routerId := d.Id() - osMutexKV.Lock(routerId) - defer osMutexKV.Unlock(routerId) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var updateOpts routers.UpdateOpts - if d.HasChange("name") { - updateOpts.Name = d.Get("name").(string) - } - if d.HasChange("admin_state_up") { - asu := d.Get("admin_state_up").(bool) - updateOpts.AdminStateUp = &asu - } - if d.HasChange("external_gateway") { - externalGateway := d.Get("external_gateway").(string) - if externalGateway != "" { - gatewayInfo := routers.GatewayInfo{ - NetworkID: externalGateway, - } - updateOpts.GatewayInfo = &gatewayInfo - } - } - - log.Printf("[DEBUG] Updating Router %s with options: %+v", d.Id(), updateOpts) - - _, err = routers.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack Neutron Router: %s", err) - } - - return resourceNetworkingRouterV2Read(d, meta) -} - -func resourceNetworkingRouterV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE"}, - Target: []string{"DELETED"}, - Refresh: waitForRouterDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack Neutron Router: %s", err) - } - - d.SetId("") - return nil -} - -func waitForRouterActive(networkingClient *gophercloud.ServiceClient, routerId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - r, err := routers.Get(networkingClient, routerId).Extract() - if err != nil { - return nil, r.Status, err - } - - log.Printf("[DEBUG] OpenStack Neutron Router: %+v", r) - return r, r.Status, nil - } -} - -func waitForRouterDelete(networkingClient *gophercloud.ServiceClient, routerId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack Router %s.\n", routerId) - - r, err := routers.Get(networkingClient, routerId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Router %s", routerId) - return r, "DELETED", nil - } - return r, "ACTIVE", err - } - - err = routers.Delete(networkingClient, routerId).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Router %s", routerId) - return r, "DELETED", nil - } - return r, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack Router %s still active.\n", routerId) - return r, "ACTIVE", nil - } -} diff --git a/builtin/providers/openstack/resource_openstack_networking_router_v2_test.go b/builtin/providers/openstack/resource_openstack_networking_router_v2_test.go deleted file mode 100644 index 2c08f9b92..000000000 --- a/builtin/providers/openstack/resource_openstack_networking_router_v2_test.go +++ /dev/null @@ -1,178 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" -) - -func TestAccNetworkingV2Router_basic(t *testing.T) { - var router routers.Router - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2RouterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Router_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2RouterExists("openstack_networking_router_v2.router_1", &router), - ), - }, - resource.TestStep{ - Config: testAccNetworkingV2Router_update, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "openstack_networking_router_v2.router_1", "name", "router_2"), - ), - }, - }, - }) -} - -func TestAccNetworkingV2Router_update_external_gw(t *testing.T) { - var router routers.Router - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2RouterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Router_update_external_gw_1, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2RouterExists("openstack_networking_router_v2.router_1", &router), - ), - }, - resource.TestStep{ - Config: testAccNetworkingV2Router_update_external_gw_2, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "openstack_networking_router_v2.router_1", "external_gateway", OS_EXTGW_ID), - ), - }, - }, - }) -} - -func TestAccNetworkingV2Router_timeout(t *testing.T) { - var router routers.Router - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2RouterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Router_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2RouterExists("openstack_networking_router_v2.router_1", &router), - ), - }, - }, - }) -} - -func testAccCheckNetworkingV2RouterDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_networking_router_v2" { - continue - } - - _, err := routers.Get(networkingClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("Router still exists") - } - } - - return nil -} - -func testAccCheckNetworkingV2RouterExists(n string, router *routers.Router) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - found, err := routers.Get(networkingClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Router not found") - } - - *router = *found - - return nil - } -} - -const testAccNetworkingV2Router_basic = ` -resource "openstack_networking_router_v2" "router_1" { - name = "router_1" - admin_state_up = "true" - distributed = "false" -} -` - -const testAccNetworkingV2Router_update = ` -resource "openstack_networking_router_v2" "router_1" { - name = "router_2" - admin_state_up = "true" - distributed = "false" -} -` - -const testAccNetworkingV2Router_update_external_gw_1 = ` -resource "openstack_networking_router_v2" "router_1" { - name = "router" - admin_state_up = "true" - distributed = "false" -} -` - -var testAccNetworkingV2Router_update_external_gw_2 = fmt.Sprintf(` -resource "openstack_networking_router_v2" "router_1" { - name = "router" - admin_state_up = "true" - distributed = "false" - external_gateway = "%s" -} -`, OS_EXTGW_ID) - -const testAccNetworkingV2Router_timeout = ` -resource "openstack_networking_router_v2" "router_1" { - name = "router_1" - admin_state_up = "true" - distributed = "false" - - timeouts { - create = "5m" - delete = "5m" - } -} -` diff --git a/builtin/providers/openstack/resource_openstack_networking_secgroup_rule_v2.go b/builtin/providers/openstack/resource_openstack_networking_secgroup_rule_v2.go deleted file mode 100644 index 6f5464fed..000000000 --- a/builtin/providers/openstack/resource_openstack_networking_secgroup_rule_v2.go +++ /dev/null @@ -1,316 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "strconv" - "strings" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules" -) - -func resourceNetworkingSecGroupRuleV2() *schema.Resource { - return &schema.Resource{ - Create: resourceNetworkingSecGroupRuleV2Create, - Read: resourceNetworkingSecGroupRuleV2Read, - Delete: resourceNetworkingSecGroupRuleV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "direction": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "ethertype": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "port_range_min": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Computed: true, - }, - "port_range_max": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Computed: true, - }, - "protocol": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "remote_group_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "remote_ip_prefix": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - StateFunc: func(v interface{}) string { - return strings.ToLower(v.(string)) - }, - }, - "security_group_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - }, - } -} - -func resourceNetworkingSecGroupRuleV2Create(d *schema.ResourceData, meta interface{}) error { - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - portRangeMin := d.Get("port_range_min").(int) - portRangeMax := d.Get("port_range_max").(int) - protocol := d.Get("protocol").(string) - - if protocol == "" { - if portRangeMin != 0 || portRangeMax != 0 { - return fmt.Errorf("A protocol must be specified when using port_range_min and port_range_max") - } - } - - opts := rules.CreateOpts{ - SecGroupID: d.Get("security_group_id").(string), - PortRangeMin: d.Get("port_range_min").(int), - PortRangeMax: d.Get("port_range_max").(int), - RemoteGroupID: d.Get("remote_group_id").(string), - RemoteIPPrefix: d.Get("remote_ip_prefix").(string), - TenantID: d.Get("tenant_id").(string), - } - - if v, ok := d.GetOk("direction"); ok { - direction := resourceNetworkingSecGroupRuleV2DetermineDirection(v.(string)) - opts.Direction = direction - } - - if v, ok := d.GetOk("ethertype"); ok { - ethertype := resourceNetworkingSecGroupRuleV2DetermineEtherType(v.(string)) - opts.EtherType = ethertype - } - - if v, ok := d.GetOk("protocol"); ok { - protocol := resourceNetworkingSecGroupRuleV2DetermineProtocol(v.(string)) - opts.Protocol = protocol - } - - log.Printf("[DEBUG] Create OpenStack Neutron security group: %#v", opts) - - security_group_rule, err := rules.Create(networkingClient, opts).Extract() - if err != nil { - return err - } - - log.Printf("[DEBUG] OpenStack Neutron Security Group Rule created: %#v", security_group_rule) - - d.SetId(security_group_rule.ID) - - return resourceNetworkingSecGroupRuleV2Read(d, meta) -} - -func resourceNetworkingSecGroupRuleV2Read(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Retrieve information about security group rule: %s", d.Id()) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - security_group_rule, err := rules.Get(networkingClient, d.Id()).Extract() - - if err != nil { - return CheckDeleted(d, err, "OpenStack Security Group Rule") - } - - d.Set("direction", security_group_rule.Direction) - d.Set("ethertype", security_group_rule.EtherType) - d.Set("protocol", security_group_rule.Protocol) - d.Set("port_range_min", security_group_rule.PortRangeMin) - d.Set("port_range_max", security_group_rule.PortRangeMax) - d.Set("remote_group_id", security_group_rule.RemoteGroupID) - d.Set("remote_ip_prefix", security_group_rule.RemoteIPPrefix) - d.Set("security_group_id", security_group_rule.SecGroupID) - d.Set("tenant_id", security_group_rule.TenantID) - d.Set("region", GetRegion(d)) - - return nil -} - -func resourceNetworkingSecGroupRuleV2Delete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Destroy security group rule: %s", d.Id()) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE"}, - Target: []string{"DELETED"}, - Refresh: waitForSecGroupRuleDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack Neutron Security Group Rule: %s", err) - } - - d.SetId("") - return err -} - -func resourceNetworkingSecGroupRuleV2DetermineDirection(v string) rules.RuleDirection { - var direction rules.RuleDirection - switch v { - case "ingress": - direction = rules.DirIngress - case "egress": - direction = rules.DirEgress - } - - return direction -} - -func resourceNetworkingSecGroupRuleV2DetermineEtherType(v string) rules.RuleEtherType { - var etherType rules.RuleEtherType - switch v { - case "IPv4": - etherType = rules.EtherType4 - case "IPv6": - etherType = rules.EtherType6 - } - - return etherType -} - -func resourceNetworkingSecGroupRuleV2DetermineProtocol(v string) rules.RuleProtocol { - var protocol rules.RuleProtocol - - // Check and see if the requested protocol matched a list of known protocol names. - switch v { - case "tcp": - protocol = rules.ProtocolTCP - case "udp": - protocol = rules.ProtocolUDP - case "icmp": - protocol = rules.ProtocolICMP - case "ah": - protocol = rules.ProtocolAH - case "dccp": - protocol = rules.ProtocolDCCP - case "egp": - protocol = rules.ProtocolEGP - case "esp": - protocol = rules.ProtocolESP - case "gre": - protocol = rules.ProtocolGRE - case "igmp": - protocol = rules.ProtocolIGMP - case "ipv6-encap": - protocol = rules.ProtocolIPv6Encap - case "ipv6-frag": - protocol = rules.ProtocolIPv6Frag - case "ipv6-icmp": - protocol = rules.ProtocolIPv6ICMP - case "ipv6-nonxt": - protocol = rules.ProtocolIPv6NoNxt - case "ipv6-opts": - protocol = rules.ProtocolIPv6Opts - case "ipv6-route": - protocol = rules.ProtocolIPv6Route - case "ospf": - protocol = rules.ProtocolOSPF - case "pgm": - protocol = rules.ProtocolPGM - case "rsvp": - protocol = rules.ProtocolRSVP - case "sctp": - protocol = rules.ProtocolSCTP - case "udplite": - protocol = rules.ProtocolUDPLite - case "vrrp": - protocol = rules.ProtocolVRRP - } - - // If the protocol wasn't matched above, see if it's an integer. - if protocol == "" { - _, err := strconv.Atoi(v) - if err == nil { - protocol = rules.RuleProtocol(v) - } - } - - return protocol -} - -func waitForSecGroupRuleDelete(networkingClient *gophercloud.ServiceClient, secGroupRuleId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack Security Group Rule %s.\n", secGroupRuleId) - - r, err := rules.Get(networkingClient, secGroupRuleId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Neutron Security Group Rule %s", secGroupRuleId) - return r, "DELETED", nil - } - return r, "ACTIVE", err - } - - err = rules.Delete(networkingClient, secGroupRuleId).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Neutron Security Group Rule %s", secGroupRuleId) - return r, "DELETED", nil - } - return r, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack Neutron Security Group Rule %s still active.\n", secGroupRuleId) - return r, "ACTIVE", nil - } -} diff --git a/builtin/providers/openstack/resource_openstack_networking_secgroup_rule_v2_test.go b/builtin/providers/openstack/resource_openstack_networking_secgroup_rule_v2_test.go deleted file mode 100644 index 5cbe3b8e2..000000000 --- a/builtin/providers/openstack/resource_openstack_networking_secgroup_rule_v2_test.go +++ /dev/null @@ -1,529 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules" -) - -func TestAccNetworkingV2SecGroupRule_basic(t *testing.T) { - var secgroup_1 groups.SecGroup - var secgroup_2 groups.SecGroup - var secgroup_rule_1 rules.SecGroupRule - var secgroup_rule_2 rules.SecGroupRule - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2SecGroupRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2SecGroupRule_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2SecGroupExists( - "openstack_networking_secgroup_v2.secgroup_1", &secgroup_1), - testAccCheckNetworkingV2SecGroupExists( - "openstack_networking_secgroup_v2.secgroup_2", &secgroup_2), - testAccCheckNetworkingV2SecGroupRuleExists( - "openstack_networking_secgroup_rule_v2.secgroup_rule_1", &secgroup_rule_1), - testAccCheckNetworkingV2SecGroupRuleExists( - "openstack_networking_secgroup_rule_v2.secgroup_rule_2", &secgroup_rule_2), - ), - }, - }, - }) -} - -func TestAccNetworkingV2SecGroupRule_lowerCaseCIDR(t *testing.T) { - var secgroup_1 groups.SecGroup - var secgroup_rule_1 rules.SecGroupRule - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2SecGroupRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2SecGroupRule_lowerCaseCIDR, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2SecGroupExists( - "openstack_networking_secgroup_v2.secgroup_1", &secgroup_1), - testAccCheckNetworkingV2SecGroupRuleExists( - "openstack_networking_secgroup_rule_v2.secgroup_rule_1", &secgroup_rule_1), - resource.TestCheckResourceAttr( - "openstack_networking_secgroup_rule_v2.secgroup_rule_1", "remote_ip_prefix", "2001:558:fc00::/39"), - ), - }, - }, - }) -} - -func TestAccNetworkingV2SecGroupRule_timeout(t *testing.T) { - var secgroup_1 groups.SecGroup - var secgroup_2 groups.SecGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2SecGroupRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2SecGroupRule_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2SecGroupExists( - "openstack_networking_secgroup_v2.secgroup_1", &secgroup_1), - testAccCheckNetworkingV2SecGroupExists( - "openstack_networking_secgroup_v2.secgroup_2", &secgroup_2), - ), - }, - }, - }) -} - -func TestAccNetworkingV2SecGroupRule_protocols(t *testing.T) { - var secgroup_1 groups.SecGroup - var secgroup_rule_ah rules.SecGroupRule - var secgroup_rule_dccp rules.SecGroupRule - var secgroup_rule_egp rules.SecGroupRule - var secgroup_rule_esp rules.SecGroupRule - var secgroup_rule_gre rules.SecGroupRule - var secgroup_rule_igmp rules.SecGroupRule - var secgroup_rule_ipv6_encap rules.SecGroupRule - var secgroup_rule_ipv6_frag rules.SecGroupRule - var secgroup_rule_ipv6_icmp rules.SecGroupRule - var secgroup_rule_ipv6_nonxt rules.SecGroupRule - var secgroup_rule_ipv6_opts rules.SecGroupRule - var secgroup_rule_ipv6_route rules.SecGroupRule - var secgroup_rule_ospf rules.SecGroupRule - var secgroup_rule_pgm rules.SecGroupRule - var secgroup_rule_rsvp rules.SecGroupRule - var secgroup_rule_sctp rules.SecGroupRule - var secgroup_rule_udplite rules.SecGroupRule - var secgroup_rule_vrrp rules.SecGroupRule - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2SecGroupRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2SecGroupRule_protocols, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2SecGroupExists( - "openstack_networking_secgroup_v2.secgroup_1", &secgroup_1), - testAccCheckNetworkingV2SecGroupRuleExists( - "openstack_networking_secgroup_rule_v2.secgroup_rule_ah", &secgroup_rule_ah), - testAccCheckNetworkingV2SecGroupRuleExists( - "openstack_networking_secgroup_rule_v2.secgroup_rule_dccp", &secgroup_rule_dccp), - testAccCheckNetworkingV2SecGroupRuleExists( - "openstack_networking_secgroup_rule_v2.secgroup_rule_egp", &secgroup_rule_egp), - testAccCheckNetworkingV2SecGroupRuleExists( - "openstack_networking_secgroup_rule_v2.secgroup_rule_esp", &secgroup_rule_esp), - testAccCheckNetworkingV2SecGroupRuleExists( - "openstack_networking_secgroup_rule_v2.secgroup_rule_gre", &secgroup_rule_gre), - testAccCheckNetworkingV2SecGroupRuleExists( - "openstack_networking_secgroup_rule_v2.secgroup_rule_igmp", &secgroup_rule_igmp), - testAccCheckNetworkingV2SecGroupRuleExists( - "openstack_networking_secgroup_rule_v2.secgroup_rule_ipv6_encap", &secgroup_rule_ipv6_encap), - testAccCheckNetworkingV2SecGroupRuleExists( - "openstack_networking_secgroup_rule_v2.secgroup_rule_ipv6_frag", &secgroup_rule_ipv6_frag), - testAccCheckNetworkingV2SecGroupRuleExists( - "openstack_networking_secgroup_rule_v2.secgroup_rule_ipv6_icmp", &secgroup_rule_ipv6_icmp), - testAccCheckNetworkingV2SecGroupRuleExists( - "openstack_networking_secgroup_rule_v2.secgroup_rule_ipv6_nonxt", &secgroup_rule_ipv6_nonxt), - testAccCheckNetworkingV2SecGroupRuleExists( - "openstack_networking_secgroup_rule_v2.secgroup_rule_ipv6_opts", &secgroup_rule_ipv6_opts), - testAccCheckNetworkingV2SecGroupRuleExists( - "openstack_networking_secgroup_rule_v2.secgroup_rule_ipv6_route", &secgroup_rule_ipv6_route), - testAccCheckNetworkingV2SecGroupRuleExists( - "openstack_networking_secgroup_rule_v2.secgroup_rule_ospf", &secgroup_rule_ospf), - testAccCheckNetworkingV2SecGroupRuleExists( - "openstack_networking_secgroup_rule_v2.secgroup_rule_pgm", &secgroup_rule_pgm), - testAccCheckNetworkingV2SecGroupRuleExists( - "openstack_networking_secgroup_rule_v2.secgroup_rule_rsvp", &secgroup_rule_rsvp), - testAccCheckNetworkingV2SecGroupRuleExists( - "openstack_networking_secgroup_rule_v2.secgroup_rule_sctp", &secgroup_rule_sctp), - testAccCheckNetworkingV2SecGroupRuleExists( - "openstack_networking_secgroup_rule_v2.secgroup_rule_udplite", &secgroup_rule_udplite), - testAccCheckNetworkingV2SecGroupRuleExists( - "openstack_networking_secgroup_rule_v2.secgroup_rule_vrrp", &secgroup_rule_vrrp), - resource.TestCheckResourceAttr( - "openstack_networking_secgroup_rule_v2.secgroup_rule_ah", "protocol", "ah"), - resource.TestCheckResourceAttr( - "openstack_networking_secgroup_rule_v2.secgroup_rule_dccp", "protocol", "dccp"), - resource.TestCheckResourceAttr( - "openstack_networking_secgroup_rule_v2.secgroup_rule_egp", "protocol", "egp"), - resource.TestCheckResourceAttr( - "openstack_networking_secgroup_rule_v2.secgroup_rule_esp", "protocol", "esp"), - resource.TestCheckResourceAttr( - "openstack_networking_secgroup_rule_v2.secgroup_rule_gre", "protocol", "gre"), - resource.TestCheckResourceAttr( - "openstack_networking_secgroup_rule_v2.secgroup_rule_igmp", "protocol", "igmp"), - resource.TestCheckResourceAttr( - "openstack_networking_secgroup_rule_v2.secgroup_rule_ipv6_encap", "protocol", "ipv6-encap"), - resource.TestCheckResourceAttr( - "openstack_networking_secgroup_rule_v2.secgroup_rule_ipv6_frag", "protocol", "ipv6-frag"), - resource.TestCheckResourceAttr( - "openstack_networking_secgroup_rule_v2.secgroup_rule_ipv6_icmp", "protocol", "ipv6-icmp"), - resource.TestCheckResourceAttr( - "openstack_networking_secgroup_rule_v2.secgroup_rule_ipv6_nonxt", "protocol", "ipv6-nonxt"), - resource.TestCheckResourceAttr( - "openstack_networking_secgroup_rule_v2.secgroup_rule_ipv6_opts", "protocol", "ipv6-opts"), - resource.TestCheckResourceAttr( - "openstack_networking_secgroup_rule_v2.secgroup_rule_ipv6_route", "protocol", "ipv6-route"), - resource.TestCheckResourceAttr( - "openstack_networking_secgroup_rule_v2.secgroup_rule_ospf", "protocol", "ospf"), - resource.TestCheckResourceAttr( - "openstack_networking_secgroup_rule_v2.secgroup_rule_pgm", "protocol", "pgm"), - resource.TestCheckResourceAttr( - "openstack_networking_secgroup_rule_v2.secgroup_rule_rsvp", "protocol", "rsvp"), - resource.TestCheckResourceAttr( - "openstack_networking_secgroup_rule_v2.secgroup_rule_sctp", "protocol", "sctp"), - resource.TestCheckResourceAttr( - "openstack_networking_secgroup_rule_v2.secgroup_rule_udplite", "protocol", "udplite"), - resource.TestCheckResourceAttr( - "openstack_networking_secgroup_rule_v2.secgroup_rule_vrrp", "protocol", "vrrp"), - ), - }, - }, - }) -} - -func TestAccNetworkingV2SecGroupRule_numericProtocol(t *testing.T) { - var secgroup_1 groups.SecGroup - var secgroup_rule_1 rules.SecGroupRule - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2SecGroupRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2SecGroupRule_numericProtocol, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2SecGroupExists( - "openstack_networking_secgroup_v2.secgroup_1", &secgroup_1), - testAccCheckNetworkingV2SecGroupRuleExists( - "openstack_networking_secgroup_rule_v2.secgroup_rule_1", &secgroup_rule_1), - resource.TestCheckResourceAttr( - "openstack_networking_secgroup_rule_v2.secgroup_rule_1", "protocol", "115"), - ), - }, - }, - }) -} - -func testAccCheckNetworkingV2SecGroupRuleDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_networking_secgroup_rule_v2" { - continue - } - - _, err := rules.Get(networkingClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("Security group rule still exists") - } - } - - return nil -} - -func testAccCheckNetworkingV2SecGroupRuleExists(n string, security_group_rule *rules.SecGroupRule) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - found, err := rules.Get(networkingClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Security group rule not found") - } - - *security_group_rule = *found - - return nil - } -} - -const testAccNetworkingV2SecGroupRule_basic = ` -resource "openstack_networking_secgroup_v2" "secgroup_1" { - name = "secgroup_1" - description = "terraform security group rule acceptance test" -} - -resource "openstack_networking_secgroup_v2" "secgroup_2" { - name = "secgroup_2" - description = "terraform security group rule acceptance test" -} - -resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_1" { - direction = "ingress" - ethertype = "IPv4" - port_range_max = 22 - port_range_min = 22 - protocol = "tcp" - remote_ip_prefix = "0.0.0.0/0" - security_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}" -} - -resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_2" { - direction = "ingress" - ethertype = "IPv4" - port_range_max = 80 - port_range_min = 80 - protocol = "tcp" - remote_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}" - security_group_id = "${openstack_networking_secgroup_v2.secgroup_2.id}" -} -` - -const testAccNetworkingV2SecGroupRule_lowerCaseCIDR = ` -resource "openstack_networking_secgroup_v2" "secgroup_1" { - name = "secgroup_1" - description = "terraform security group rule acceptance test" -} - -resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_1" { - direction = "ingress" - ethertype = "IPv6" - port_range_max = 22 - port_range_min = 22 - protocol = "tcp" - remote_ip_prefix = "2001:558:FC00::/39" - security_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}" -} -` - -const testAccNetworkingV2SecGroupRule_timeout = ` -resource "openstack_networking_secgroup_v2" "secgroup_1" { - name = "secgroup_1" - description = "terraform security group rule acceptance test" -} - -resource "openstack_networking_secgroup_v2" "secgroup_2" { - name = "secgroup_2" - description = "terraform security group rule acceptance test" -} - -resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_1" { - direction = "ingress" - ethertype = "IPv4" - port_range_max = 22 - port_range_min = 22 - protocol = "tcp" - remote_ip_prefix = "0.0.0.0/0" - security_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}" - - timeouts { - delete = "5m" - } -} - -resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_2" { - direction = "ingress" - ethertype = "IPv4" - port_range_max = 80 - port_range_min = 80 - protocol = "tcp" - remote_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}" - security_group_id = "${openstack_networking_secgroup_v2.secgroup_2.id}" - - timeouts { - delete = "5m" - } -} -` - -const testAccNetworkingV2SecGroupRule_protocols = ` -resource "openstack_networking_secgroup_v2" "secgroup_1" { - name = "secgroup_1" - description = "terraform security group rule acceptance test" -} - -resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_ah" { - direction = "ingress" - ethertype = "IPv4" - protocol = "ah" - remote_ip_prefix = "0.0.0.0/0" - security_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}" -} - -resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_dccp" { - direction = "ingress" - ethertype = "IPv4" - protocol = "dccp" - remote_ip_prefix = "0.0.0.0/0" - security_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}" -} - -resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_egp" { - direction = "ingress" - ethertype = "IPv4" - protocol = "egp" - remote_ip_prefix = "0.0.0.0/0" - security_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}" -} - -resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_esp" { - direction = "ingress" - ethertype = "IPv4" - protocol = "esp" - remote_ip_prefix = "0.0.0.0/0" - security_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}" -} - -resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_gre" { - direction = "ingress" - ethertype = "IPv4" - protocol = "gre" - remote_ip_prefix = "0.0.0.0/0" - security_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}" -} - -resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_igmp" { - direction = "ingress" - ethertype = "IPv4" - protocol = "igmp" - remote_ip_prefix = "0.0.0.0/0" - security_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}" -} - -resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_ipv6_encap" { - direction = "ingress" - ethertype = "IPv6" - protocol = "ipv6-encap" - remote_ip_prefix = "::/0" - security_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}" -} - -resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_ipv6_frag" { - direction = "ingress" - ethertype = "IPv6" - protocol = "ipv6-frag" - remote_ip_prefix = "::/0" - security_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}" -} - -resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_ipv6_icmp" { - direction = "ingress" - ethertype = "IPv6" - protocol = "ipv6-icmp" - remote_ip_prefix = "::/0" - security_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}" -} - -resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_ipv6_nonxt" { - direction = "ingress" - ethertype = "IPv6" - protocol = "ipv6-nonxt" - remote_ip_prefix = "::/0" - security_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}" -} - -resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_ipv6_opts" { - direction = "ingress" - ethertype = "IPv6" - protocol = "ipv6-opts" - remote_ip_prefix = "::/0" - security_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}" -} - -resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_ipv6_route" { - direction = "ingress" - ethertype = "IPv6" - protocol = "ipv6-route" - remote_ip_prefix = "::/0" - security_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}" -} - -resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_ospf" { - direction = "ingress" - ethertype = "IPv4" - protocol = "ospf" - remote_ip_prefix = "0.0.0.0/0" - security_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}" -} - -resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_pgm" { - direction = "ingress" - ethertype = "IPv4" - protocol = "pgm" - remote_ip_prefix = "0.0.0.0/0" - security_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}" -} - -resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_rsvp" { - direction = "ingress" - ethertype = "IPv4" - protocol = "rsvp" - remote_ip_prefix = "0.0.0.0/0" - security_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}" -} - -resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_sctp" { - direction = "ingress" - ethertype = "IPv4" - protocol = "sctp" - remote_ip_prefix = "0.0.0.0/0" - security_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}" -} - -resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_udplite" { - direction = "ingress" - ethertype = "IPv4" - protocol = "udplite" - remote_ip_prefix = "0.0.0.0/0" - security_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}" -} - -resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_vrrp" { - direction = "ingress" - ethertype = "IPv4" - protocol = "vrrp" - remote_ip_prefix = "0.0.0.0/0" - security_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}" -} -` - -const testAccNetworkingV2SecGroupRule_numericProtocol = ` -resource "openstack_networking_secgroup_v2" "secgroup_1" { - name = "secgroup_1" - description = "terraform security group rule acceptance test" -} - -resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_1" { - direction = "ingress" - ethertype = "IPv4" - port_range_max = 22 - port_range_min = 22 - protocol = "115" - remote_ip_prefix = "0.0.0.0/0" - security_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}" -} -` diff --git a/builtin/providers/openstack/resource_openstack_networking_secgroup_v2.go b/builtin/providers/openstack/resource_openstack_networking_secgroup_v2.go deleted file mode 100644 index effe0e746..000000000 --- a/builtin/providers/openstack/resource_openstack_networking_secgroup_v2.go +++ /dev/null @@ -1,211 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules" -) - -func resourceNetworkingSecGroupV2() *schema.Resource { - return &schema.Resource{ - Create: resourceNetworkingSecGroupV2Create, - Read: resourceNetworkingSecGroupV2Read, - Update: resourceNetworkingSecGroupV2Update, - Delete: resourceNetworkingSecGroupV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "delete_default_rules": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceNetworkingSecGroupV2Create(d *schema.ResourceData, meta interface{}) error { - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - opts := groups.CreateOpts{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - TenantID: d.Get("tenant_id").(string), - } - - log.Printf("[DEBUG] Create OpenStack Neutron Security Group: %#v", opts) - - security_group, err := groups.Create(networkingClient, opts).Extract() - if err != nil { - return err - } - - // Delete the default security group rules if it has been requested. - deleteDefaultRules := d.Get("delete_default_rules").(bool) - if deleteDefaultRules { - for _, rule := range security_group.Rules { - if err := rules.Delete(networkingClient, rule.ID).ExtractErr(); err != nil { - return fmt.Errorf( - "There was a problem deleting a default security group rule: %s", err) - } - } - } - - log.Printf("[DEBUG] OpenStack Neutron Security Group created: %#v", security_group) - - d.SetId(security_group.ID) - - return resourceNetworkingSecGroupV2Read(d, meta) -} - -func resourceNetworkingSecGroupV2Read(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Retrieve information about security group: %s", d.Id()) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - security_group, err := groups.Get(networkingClient, d.Id()).Extract() - - if err != nil { - return CheckDeleted(d, err, "OpenStack Neutron Security group") - } - - d.Set("description", security_group.Description) - d.Set("tenant_id", security_group.TenantID) - d.Set("name", security_group.Name) - d.Set("region", GetRegion(d)) - - return nil -} - -func resourceNetworkingSecGroupV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - var update bool - var updateOpts groups.UpdateOpts - - if d.HasChange("name") { - update = true - updateOpts.Name = d.Get("name").(string) - } - - if d.HasChange("description") { - update = true - updateOpts.Name = d.Get("description").(string) - } - - if update { - log.Printf("[DEBUG] Updating SecGroup %s with options: %#v", d.Id(), updateOpts) - _, err = groups.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack SecGroup: %s", err) - } - } - - return resourceNetworkingSecGroupV2Read(d, meta) -} - -func resourceNetworkingSecGroupV2Delete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Destroy security group: %s", d.Id()) - - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE"}, - Target: []string{"DELETED"}, - Refresh: waitForSecGroupDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack Neutron Security Group: %s", err) - } - - d.SetId("") - return err -} - -func waitForSecGroupDelete(networkingClient *gophercloud.ServiceClient, secGroupId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack Security Group %s.\n", secGroupId) - - r, err := groups.Get(networkingClient, secGroupId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Neutron Security Group %s", secGroupId) - return r, "DELETED", nil - } - return r, "ACTIVE", err - } - - err = groups.Delete(networkingClient, secGroupId).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Neutron Security Group %s", secGroupId) - return r, "DELETED", nil - } - if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { - if errCode.Actual == 409 { - return r, "ACTIVE", nil - } - } - return r, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack Neutron Security Group %s still active.\n", secGroupId) - return r, "ACTIVE", nil - } -} diff --git a/builtin/providers/openstack/resource_openstack_networking_secgroup_v2_test.go b/builtin/providers/openstack/resource_openstack_networking_secgroup_v2_test.go deleted file mode 100644 index 09b77a197..000000000 --- a/builtin/providers/openstack/resource_openstack_networking_secgroup_v2_test.go +++ /dev/null @@ -1,177 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups" -) - -func TestAccNetworkingV2SecGroup_basic(t *testing.T) { - var security_group groups.SecGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2SecGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2SecGroup_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2SecGroupExists( - "openstack_networking_secgroup_v2.secgroup_1", &security_group), - testAccCheckNetworkingV2SecGroupRuleCount(&security_group, 2), - ), - }, - resource.TestStep{ - Config: testAccNetworkingV2SecGroup_update, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPtr( - "openstack_networking_secgroup_v2.secgroup_1", "id", &security_group.ID), - resource.TestCheckResourceAttr( - "openstack_networking_secgroup_v2.secgroup_1", "name", "security_group_2"), - ), - }, - }, - }) -} - -func TestAccNetworkingV2SecGroup_noDefaultRules(t *testing.T) { - var security_group groups.SecGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2SecGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2SecGroup_noDefaultRules, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2SecGroupExists( - "openstack_networking_secgroup_v2.secgroup_1", &security_group), - testAccCheckNetworkingV2SecGroupRuleCount(&security_group, 0), - ), - }, - }, - }) -} - -func TestAccNetworkingV2SecGroup_timeout(t *testing.T) { - var security_group groups.SecGroup - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2SecGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2SecGroup_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2SecGroupExists( - "openstack_networking_secgroup_v2.secgroup_1", &security_group), - ), - }, - }, - }) -} - -func testAccCheckNetworkingV2SecGroupDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_networking_secgroup_v2" { - continue - } - - _, err := groups.Get(networkingClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("Security group still exists") - } - } - - return nil -} - -func testAccCheckNetworkingV2SecGroupExists(n string, security_group *groups.SecGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - found, err := groups.Get(networkingClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Security group not found") - } - - *security_group = *found - - return nil - } -} - -func testAccCheckNetworkingV2SecGroupRuleCount( - sg *groups.SecGroup, count int) resource.TestCheckFunc { - return func(s *terraform.State) error { - if len(sg.Rules) == count { - return nil - } - - return fmt.Errorf("Unexpected number of rules in group %s. Expected %d, got %d", - sg.ID, count, len(sg.Rules)) - } -} - -const testAccNetworkingV2SecGroup_basic = ` -resource "openstack_networking_secgroup_v2" "secgroup_1" { - name = "security_group" - description = "terraform security group acceptance test" -} -` - -const testAccNetworkingV2SecGroup_update = ` -resource "openstack_networking_secgroup_v2" "secgroup_1" { - name = "security_group_2" - description = "terraform security group acceptance test" -} -` - -const testAccNetworkingV2SecGroup_noDefaultRules = ` -resource "openstack_networking_secgroup_v2" "secgroup_1" { - name = "security_group_1" - description = "terraform security group acceptance test" - delete_default_rules = true -} -` - -const testAccNetworkingV2SecGroup_timeout = ` -resource "openstack_networking_secgroup_v2" "secgroup_1" { - name = "security_group" - description = "terraform security group acceptance test" - - timeouts { - delete = "5m" - } -} -` diff --git a/builtin/providers/openstack/resource_openstack_networking_subnet_v2.go b/builtin/providers/openstack/resource_openstack_networking_subnet_v2.go deleted file mode 100644 index be19338aa..000000000 --- a/builtin/providers/openstack/resource_openstack_networking_subnet_v2.go +++ /dev/null @@ -1,423 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" -) - -func resourceNetworkingSubnetV2() *schema.Resource { - return &schema.Resource{ - Create: resourceNetworkingSubnetV2Create, - Read: resourceNetworkingSubnetV2Read, - Update: resourceNetworkingSubnetV2Update, - Delete: resourceNetworkingSubnetV2Delete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "network_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "cidr": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "tenant_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "allocation_pools": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "start": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "end": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "gateway_ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - Computed: true, - }, - "no_gateway": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: false, - }, - "ip_version": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 4, - ForceNew: true, - }, - "enable_dhcp": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: false, - Default: true, - }, - "dns_nameservers": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ForceNew: false, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "host_routes": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: false, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "destination_cidr": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "next_hop": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "value_specs": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceNetworkingSubnetV2Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - createOpts := SubnetCreateOpts{ - subnets.CreateOpts{ - NetworkID: d.Get("network_id").(string), - CIDR: d.Get("cidr").(string), - Name: d.Get("name").(string), - TenantID: d.Get("tenant_id").(string), - AllocationPools: resourceSubnetAllocationPoolsV2(d), - DNSNameservers: resourceSubnetDNSNameserversV2(d), - HostRoutes: resourceSubnetHostRoutesV2(d), - EnableDHCP: nil, - }, - MapValueSpecs(d), - } - - noGateway := d.Get("no_gateway").(bool) - gatewayIP := d.Get("gateway_ip").(string) - - if gatewayIP != "" && noGateway { - return fmt.Errorf("Both gateway_ip and no_gateway cannot be set") - } - - if gatewayIP != "" { - createOpts.GatewayIP = &gatewayIP - } - - if noGateway { - disableGateway := "" - createOpts.GatewayIP = &disableGateway - } - - enableDHCP := d.Get("enable_dhcp").(bool) - createOpts.EnableDHCP = &enableDHCP - - if v, ok := d.GetOk("ip_version"); ok { - ipVersion := resourceNetworkingSubnetV2DetermineIPVersion(v.(int)) - createOpts.IPVersion = ipVersion - } - - s, err := subnets.Create(networkingClient, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack Neutron subnet: %s", err) - } - - log.Printf("[DEBUG] Waiting for Subnet (%s) to become available", s.ID) - stateConf := &resource.StateChangeConf{ - Target: []string{"ACTIVE"}, - Refresh: waitForSubnetActive(networkingClient, s.ID), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - - d.SetId(s.ID) - - log.Printf("[DEBUG] Created Subnet %s: %#v", s.ID, s) - return resourceNetworkingSubnetV2Read(d, meta) -} - -func resourceNetworkingSubnetV2Read(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - s, err := subnets.Get(networkingClient, d.Id()).Extract() - if err != nil { - return CheckDeleted(d, err, "subnet") - } - - log.Printf("[DEBUG] Retrieved Subnet %s: %#v", d.Id(), s) - - d.Set("network_id", s.NetworkID) - d.Set("cidr", s.CIDR) - d.Set("ip_version", s.IPVersion) - d.Set("name", s.Name) - d.Set("tenant_id", s.TenantID) - d.Set("gateway_ip", s.GatewayIP) - d.Set("dns_nameservers", s.DNSNameservers) - d.Set("host_routes", s.HostRoutes) - d.Set("enable_dhcp", s.EnableDHCP) - d.Set("network_id", s.NetworkID) - - // Set the allocation_pools - var allocationPools []map[string]interface{} - for _, v := range s.AllocationPools { - pool := make(map[string]interface{}) - pool["start"] = v.Start - pool["end"] = v.End - - allocationPools = append(allocationPools, pool) - } - d.Set("allocation_pools", allocationPools) - - d.Set("region", GetRegion(d)) - - return nil -} - -func resourceNetworkingSubnetV2Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - // Check if both gateway_ip and no_gateway are set - if _, ok := d.GetOk("gateway_ip"); ok { - noGateway := d.Get("no_gateway").(bool) - if noGateway { - return fmt.Errorf("Both gateway_ip and no_gateway cannot be set.") - } - } - - var updateOpts subnets.UpdateOpts - - noGateway := d.Get("no_gateway").(bool) - gatewayIP := d.Get("gateway_ip").(string) - - if gatewayIP != "" && noGateway { - return fmt.Errorf("Both gateway_ip and no_gateway cannot be set") - } - - if d.HasChange("name") { - updateOpts.Name = d.Get("name").(string) - } - - if d.HasChange("gateway_ip") { - updateOpts.GatewayIP = nil - if v, ok := d.GetOk("gateway_ip"); ok { - gatewayIP := v.(string) - updateOpts.GatewayIP = &gatewayIP - } - } - - if d.HasChange("no_gateway") { - if d.Get("no_gateway").(bool) { - gatewayIP := "" - updateOpts.GatewayIP = &gatewayIP - } - } - - if d.HasChange("dns_nameservers") { - updateOpts.DNSNameservers = resourceSubnetDNSNameserversV2(d) - } - - if d.HasChange("host_routes") { - updateOpts.HostRoutes = resourceSubnetHostRoutesV2(d) - } - - if d.HasChange("enable_dhcp") { - v := d.Get("enable_dhcp").(bool) - updateOpts.EnableDHCP = &v - } - - if d.HasChange("allocation_pools") { - updateOpts.AllocationPools = resourceSubnetAllocationPoolsV2(d) - } - - log.Printf("[DEBUG] Updating Subnet %s with options: %+v", d.Id(), updateOpts) - - _, err = subnets.Update(networkingClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack Neutron Subnet: %s", err) - } - - return resourceNetworkingSubnetV2Read(d, meta) -} - -func resourceNetworkingSubnetV2Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - networkingClient, err := config.networkingV2Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"ACTIVE"}, - Target: []string{"DELETED"}, - Refresh: waitForSubnetDelete(networkingClient, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error deleting OpenStack Neutron Subnet: %s", err) - } - - d.SetId("") - return nil -} - -func resourceSubnetAllocationPoolsV2(d *schema.ResourceData) []subnets.AllocationPool { - rawAPs := d.Get("allocation_pools").([]interface{}) - aps := make([]subnets.AllocationPool, len(rawAPs)) - for i, raw := range rawAPs { - rawMap := raw.(map[string]interface{}) - aps[i] = subnets.AllocationPool{ - Start: rawMap["start"].(string), - End: rawMap["end"].(string), - } - } - return aps -} - -func resourceSubnetDNSNameserversV2(d *schema.ResourceData) []string { - rawDNSN := d.Get("dns_nameservers").(*schema.Set) - dnsn := make([]string, rawDNSN.Len()) - for i, raw := range rawDNSN.List() { - dnsn[i] = raw.(string) - } - return dnsn -} - -func resourceSubnetHostRoutesV2(d *schema.ResourceData) []subnets.HostRoute { - rawHR := d.Get("host_routes").([]interface{}) - hr := make([]subnets.HostRoute, len(rawHR)) - for i, raw := range rawHR { - rawMap := raw.(map[string]interface{}) - hr[i] = subnets.HostRoute{ - DestinationCIDR: rawMap["destination_cidr"].(string), - NextHop: rawMap["next_hop"].(string), - } - } - return hr -} - -func resourceNetworkingSubnetV2DetermineIPVersion(v int) gophercloud.IPVersion { - var ipVersion gophercloud.IPVersion - switch v { - case 4: - ipVersion = gophercloud.IPv4 - case 6: - ipVersion = gophercloud.IPv6 - } - - return ipVersion -} - -func waitForSubnetActive(networkingClient *gophercloud.ServiceClient, subnetId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - s, err := subnets.Get(networkingClient, subnetId).Extract() - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] OpenStack Neutron Subnet: %+v", s) - return s, "ACTIVE", nil - } -} - -func waitForSubnetDelete(networkingClient *gophercloud.ServiceClient, subnetId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - log.Printf("[DEBUG] Attempting to delete OpenStack Subnet %s.\n", subnetId) - - s, err := subnets.Get(networkingClient, subnetId).Extract() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Subnet %s", subnetId) - return s, "DELETED", nil - } - return s, "ACTIVE", err - } - - err = subnets.Delete(networkingClient, subnetId).ExtractErr() - if err != nil { - if _, ok := err.(gophercloud.ErrDefault404); ok { - log.Printf("[DEBUG] Successfully deleted OpenStack Subnet %s", subnetId) - return s, "DELETED", nil - } - if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { - if errCode.Actual == 409 { - return s, "ACTIVE", nil - } - } - return s, "ACTIVE", err - } - - log.Printf("[DEBUG] OpenStack Subnet %s still active.\n", subnetId) - return s, "ACTIVE", nil - } -} diff --git a/builtin/providers/openstack/resource_openstack_networking_subnet_v2_test.go b/builtin/providers/openstack/resource_openstack_networking_subnet_v2_test.go deleted file mode 100644 index fe0a6b08a..000000000 --- a/builtin/providers/openstack/resource_openstack_networking_subnet_v2_test.go +++ /dev/null @@ -1,308 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" -) - -func TestAccNetworkingV2Subnet_basic(t *testing.T) { - var subnet subnets.Subnet - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2SubnetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Subnet_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), - resource.TestCheckResourceAttr( - "openstack_networking_subnet_v2.subnet_1", "allocation_pools.0.start", "192.168.199.100"), - ), - }, - resource.TestStep{ - Config: testAccNetworkingV2Subnet_update, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "openstack_networking_subnet_v2.subnet_1", "name", "subnet_1"), - resource.TestCheckResourceAttr( - "openstack_networking_subnet_v2.subnet_1", "gateway_ip", "192.168.199.1"), - resource.TestCheckResourceAttr( - "openstack_networking_subnet_v2.subnet_1", "enable_dhcp", "true"), - resource.TestCheckResourceAttr( - "openstack_networking_subnet_v2.subnet_1", "allocation_pools.0.start", "192.168.199.150"), - ), - }, - }, - }) -} - -func TestAccNetworkingV2Subnet_enableDHCP(t *testing.T) { - var subnet subnets.Subnet - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2SubnetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Subnet_enableDHCP, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), - resource.TestCheckResourceAttr( - "openstack_networking_subnet_v2.subnet_1", "enable_dhcp", "true"), - ), - }, - }, - }) -} - -func TestAccNetworkingV2Subnet_disableDHCP(t *testing.T) { - var subnet subnets.Subnet - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2SubnetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Subnet_disableDHCP, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), - resource.TestCheckResourceAttr( - "openstack_networking_subnet_v2.subnet_1", "enable_dhcp", "false"), - ), - }, - }, - }) -} - -func TestAccNetworkingV2Subnet_noGateway(t *testing.T) { - var subnet subnets.Subnet - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2SubnetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Subnet_noGateway, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), - resource.TestCheckResourceAttr( - "openstack_networking_subnet_v2.subnet_1", "gateway_ip", ""), - ), - }, - }, - }) -} - -func TestAccNetworkingV2Subnet_impliedGateway(t *testing.T) { - var subnet subnets.Subnet - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2SubnetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Subnet_impliedGateway, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), - resource.TestCheckResourceAttr( - "openstack_networking_subnet_v2.subnet_1", "gateway_ip", "192.168.199.1"), - ), - }, - }, - }) -} - -func TestAccNetworkingV2Subnet_timeout(t *testing.T) { - var subnet subnets.Subnet - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNetworkingV2SubnetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccNetworkingV2Subnet_timeout, - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkingV2SubnetExists("openstack_networking_subnet_v2.subnet_1", &subnet), - ), - }, - }, - }) -} - -func testAccCheckNetworkingV2SubnetDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_networking_subnet_v2" { - continue - } - - _, err := subnets.Get(networkingClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("Subnet still exists") - } - } - - return nil -} - -func testAccCheckNetworkingV2SubnetExists(n string, subnet *subnets.Subnet) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - networkingClient, err := config.networkingV2Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack networking client: %s", err) - } - - found, err := subnets.Get(networkingClient, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Subnet not found") - } - - *subnet = *found - - return nil - } -} - -const testAccNetworkingV2Subnet_basic = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - cidr = "192.168.199.0/24" - network_id = "${openstack_networking_network_v2.network_1.id}" - - allocation_pools { - start = "192.168.199.100" - end = "192.168.199.200" - } -} -` - -const testAccNetworkingV2Subnet_update = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - gateway_ip = "192.168.199.1" - network_id = "${openstack_networking_network_v2.network_1.id}" - - allocation_pools { - start = "192.168.199.150" - end = "192.168.199.200" - } -} -` - -const testAccNetworkingV2Subnet_enableDHCP = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - gateway_ip = "192.168.199.1" - enable_dhcp = true - network_id = "${openstack_networking_network_v2.network_1.id}" -} -` - -const testAccNetworkingV2Subnet_disableDHCP = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - enable_dhcp = false - network_id = "${openstack_networking_network_v2.network_1.id}" -} -` - -const testAccNetworkingV2Subnet_noGateway = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - no_gateway = true - network_id = "${openstack_networking_network_v2.network_1.id}" -} -` - -const testAccNetworkingV2Subnet_impliedGateway = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} -resource "openstack_networking_subnet_v2" "subnet_1" { - name = "subnet_1" - cidr = "192.168.199.0/24" - network_id = "${openstack_networking_network_v2.network_1.id}" -} -` - -const testAccNetworkingV2Subnet_timeout = ` -resource "openstack_networking_network_v2" "network_1" { - name = "network_1" - admin_state_up = "true" -} - -resource "openstack_networking_subnet_v2" "subnet_1" { - cidr = "192.168.199.0/24" - network_id = "${openstack_networking_network_v2.network_1.id}" - - allocation_pools { - start = "192.168.199.100" - end = "192.168.199.200" - } - - timeouts { - create = "5m" - delete = "5m" - } -} -` diff --git a/builtin/providers/openstack/resource_openstack_objectstorage_container_v1.go b/builtin/providers/openstack/resource_openstack_objectstorage_container_v1.go deleted file mode 100644 index 27fb7ae0c..000000000 --- a/builtin/providers/openstack/resource_openstack_objectstorage_container_v1.go +++ /dev/null @@ -1,148 +0,0 @@ -package openstack - -import ( - "fmt" - "log" - - "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceObjectStorageContainerV1() *schema.Resource { - return &schema.Resource{ - Create: resourceObjectStorageContainerV1Create, - Read: resourceObjectStorageContainerV1Read, - Update: resourceObjectStorageContainerV1Update, - Delete: resourceObjectStorageContainerV1Delete, - - Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - "container_read": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "container_sync_to": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "container_sync_key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "container_write": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "content_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: false, - }, - "metadata": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: false, - }, - }, - } -} - -func resourceObjectStorageContainerV1Create(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - objectStorageClient, err := config.objectStorageV1Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack object storage client: %s", err) - } - - cn := d.Get("name").(string) - - createOpts := &containers.CreateOpts{ - ContainerRead: d.Get("container_read").(string), - ContainerSyncTo: d.Get("container_sync_to").(string), - ContainerSyncKey: d.Get("container_sync_key").(string), - ContainerWrite: d.Get("container_write").(string), - ContentType: d.Get("content_type").(string), - Metadata: resourceContainerMetadataV2(d), - } - - log.Printf("[DEBUG] Create Options: %#v", createOpts) - _, err = containers.Create(objectStorageClient, cn, createOpts).Extract() - if err != nil { - return fmt.Errorf("Error creating OpenStack container: %s", err) - } - log.Printf("[INFO] Container ID: %s", cn) - - // Store the ID now - d.SetId(cn) - - return resourceObjectStorageContainerV1Read(d, meta) -} - -func resourceObjectStorageContainerV1Read(d *schema.ResourceData, meta interface{}) error { - return nil -} - -func resourceObjectStorageContainerV1Update(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - objectStorageClient, err := config.objectStorageV1Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack object storage client: %s", err) - } - - updateOpts := containers.UpdateOpts{ - ContainerRead: d.Get("container_read").(string), - ContainerSyncTo: d.Get("container_sync_to").(string), - ContainerSyncKey: d.Get("container_sync_key").(string), - ContainerWrite: d.Get("container_write").(string), - ContentType: d.Get("content_type").(string), - } - - if d.HasChange("metadata") { - updateOpts.Metadata = resourceContainerMetadataV2(d) - } - - _, err = containers.Update(objectStorageClient, d.Id(), updateOpts).Extract() - if err != nil { - return fmt.Errorf("Error updating OpenStack container: %s", err) - } - - return resourceObjectStorageContainerV1Read(d, meta) -} - -func resourceObjectStorageContainerV1Delete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - objectStorageClient, err := config.objectStorageV1Client(GetRegion(d)) - if err != nil { - return fmt.Errorf("Error creating OpenStack object storage client: %s", err) - } - - _, err = containers.Delete(objectStorageClient, d.Id()).Extract() - if err != nil { - return fmt.Errorf("Error deleting OpenStack container: %s", err) - } - - d.SetId("") - return nil -} - -func resourceContainerMetadataV2(d *schema.ResourceData) map[string]string { - m := make(map[string]string) - for key, val := range d.Get("metadata").(map[string]interface{}) { - m[key] = val.(string) - } - return m -} diff --git a/builtin/providers/openstack/resource_openstack_objectstorage_container_v1_test.go b/builtin/providers/openstack/resource_openstack_objectstorage_container_v1_test.go deleted file mode 100644 index e344dd115..000000000 --- a/builtin/providers/openstack/resource_openstack_objectstorage_container_v1_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package openstack - -import ( - "fmt" - "testing" - - "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccObjectStorageV1Container_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckObjectStorageV1ContainerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccObjectStorageV1Container_basic, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "openstack_objectstorage_container_v1.container_1", "name", "container_1"), - resource.TestCheckResourceAttr( - "openstack_objectstorage_container_v1.container_1", "content_type", "application/json"), - ), - }, - resource.TestStep{ - Config: testAccObjectStorageV1Container_update, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "openstack_objectstorage_container_v1.container_1", "content_type", "text/plain"), - ), - }, - }, - }) -} - -func testAccCheckObjectStorageV1ContainerDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - objectStorageClient, err := config.objectStorageV1Client(OS_REGION_NAME) - if err != nil { - return fmt.Errorf("Error creating OpenStack object storage client: %s", err) - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "openstack_objectstorage_container_v1" { - continue - } - - _, err := containers.Get(objectStorageClient, rs.Primary.ID).Extract() - if err == nil { - return fmt.Errorf("Container still exists") - } - } - - return nil -} - -const testAccObjectStorageV1Container_basic = ` -resource "openstack_objectstorage_container_v1" "container_1" { - name = "container_1" - metadata { - test = "true" - } - content_type = "application/json" -} -` - -const testAccObjectStorageV1Container_update = ` -resource "openstack_objectstorage_container_v1" "container_1" { - name = "container_1" - metadata { - test = "true" - } - content_type = "text/plain" -} -` diff --git a/builtin/providers/openstack/types.go b/builtin/providers/openstack/types.go deleted file mode 100644 index fd4fca56f..000000000 --- a/builtin/providers/openstack/types.go +++ /dev/null @@ -1,358 +0,0 @@ -package openstack - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "strings" - - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups" - "github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets" - "github.com/gophercloud/gophercloud/openstack/dns/v2/zones" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" - "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" - "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" - "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" -) - -// LogRoundTripper satisfies the http.RoundTripper interface and is used to -// customize the default http client RoundTripper to allow for logging. -type LogRoundTripper struct { - Rt http.RoundTripper - OsDebug bool -} - -// RoundTrip performs a round-trip HTTP request and logs relevant information about it. -func (lrt *LogRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) { - defer func() { - if request.Body != nil { - request.Body.Close() - } - }() - - // for future reference, this is how to access the Transport struct: - //tlsconfig := lrt.Rt.(*http.Transport).TLSClientConfig - - var err error - - if lrt.OsDebug { - log.Printf("[DEBUG] OpenStack Request URL: %s %s", request.Method, request.URL) - log.Printf("[DEBUG] Openstack Request Headers:\n%s", FormatHeaders(request.Header, "\n")) - - if request.Body != nil { - request.Body, err = lrt.logRequest(request.Body, request.Header.Get("Content-Type")) - if err != nil { - return nil, err - } - } - } - - response, err := lrt.Rt.RoundTrip(request) - if response == nil { - return nil, err - } - - if lrt.OsDebug { - log.Printf("[DEBUG] Openstack Response Code: %d", response.StatusCode) - log.Printf("[DEBUG] Openstack Response Headers:\n%s", FormatHeaders(response.Header, "\n")) - - response.Body, err = lrt.logResponse(response.Body, response.Header.Get("Content-Type")) - } - - return response, err -} - -// logRequest will log the HTTP Request details. -// If the body is JSON, it will attempt to be pretty-formatted. -func (lrt *LogRoundTripper) logRequest(original io.ReadCloser, contentType string) (io.ReadCloser, error) { - defer original.Close() - - var bs bytes.Buffer - _, err := io.Copy(&bs, original) - if err != nil { - return nil, err - } - - // Handle request contentType - if strings.HasPrefix(contentType, "application/json") { - debugInfo := lrt.formatJSON(bs.Bytes()) - log.Printf("[DEBUG] OpenStack Request Body: %s", debugInfo) - } else { - log.Printf("[DEBUG] OpenStack Request Body: %s", bs.String()) - } - - return ioutil.NopCloser(strings.NewReader(bs.String())), nil -} - -// logResponse will log the HTTP Response details. -// If the body is JSON, it will attempt to be pretty-formatted. -func (lrt *LogRoundTripper) logResponse(original io.ReadCloser, contentType string) (io.ReadCloser, error) { - if strings.HasPrefix(contentType, "application/json") { - var bs bytes.Buffer - defer original.Close() - _, err := io.Copy(&bs, original) - if err != nil { - return nil, err - } - debugInfo := lrt.formatJSON(bs.Bytes()) - if debugInfo != "" { - log.Printf("[DEBUG] OpenStack Response Body: %s", debugInfo) - } - return ioutil.NopCloser(strings.NewReader(bs.String())), nil - } - - log.Printf("[DEBUG] Not logging because OpenStack response body isn't JSON") - return original, nil -} - -// formatJSON will try to pretty-format a JSON body. -// It will also mask known fields which contain sensitive information. -func (lrt *LogRoundTripper) formatJSON(raw []byte) string { - var data map[string]interface{} - - err := json.Unmarshal(raw, &data) - if err != nil { - log.Printf("[DEBUG] Unable to parse OpenStack JSON: %s", err) - return string(raw) - } - - // Mask known password fields - if v, ok := data["auth"].(map[string]interface{}); ok { - if v, ok := v["identity"].(map[string]interface{}); ok { - if v, ok := v["password"].(map[string]interface{}); ok { - if v, ok := v["user"].(map[string]interface{}); ok { - v["password"] = "***" - } - } - } - } - - // Ignore the catalog - if v, ok := data["token"].(map[string]interface{}); ok { - if _, ok := v["catalog"]; ok { - return "" - } - } - - pretty, err := json.MarshalIndent(data, "", " ") - if err != nil { - log.Printf("[DEBUG] Unable to re-marshal OpenStack JSON: %s", err) - return string(raw) - } - - return string(pretty) -} - -// Firewall is an OpenStack firewall. -type Firewall struct { - firewalls.Firewall - routerinsertion.FirewallExt -} - -// FirewallCreateOpts represents the attributes used when creating a new firewall. -type FirewallCreateOpts struct { - firewalls.CreateOptsBuilder - ValueSpecs map[string]string `json:"value_specs,omitempty"` -} - -// ToFirewallCreateMap casts a CreateOptsExt struct to a map. -// It overrides firewalls.ToFirewallCreateMap to add the ValueSpecs field. -func (opts FirewallCreateOpts) ToFirewallCreateMap() (map[string]interface{}, error) { - body, err := opts.CreateOptsBuilder.ToFirewallCreateMap() - if err != nil { - return nil, err - } - - return AddValueSpecs(body), nil -} - -//FirewallUpdateOpts -type FirewallUpdateOpts struct { - firewalls.UpdateOptsBuilder -} - -func (opts FirewallUpdateOpts) ToFirewallUpdateMap() (map[string]interface{}, error) { - return BuildRequest(opts, "firewall") -} - -// FloatingIPCreateOpts represents the attributes used when creating a new floating ip. -type FloatingIPCreateOpts struct { - floatingips.CreateOpts - ValueSpecs map[string]string `json:"value_specs,omitempty"` -} - -// ToFloatingIPCreateMap casts a CreateOpts struct to a map. -// It overrides floatingips.ToFloatingIPCreateMap to add the ValueSpecs field. -func (opts FloatingIPCreateOpts) ToFloatingIPCreateMap() (map[string]interface{}, error) { - return BuildRequest(opts, "floatingip") -} - -// KeyPairCreateOpts represents the attributes used when creating a new keypair. -type KeyPairCreateOpts struct { - keypairs.CreateOpts - ValueSpecs map[string]string `json:"value_specs,omitempty"` -} - -// ToKeyPairCreateMap casts a CreateOpts struct to a map. -// It overrides keypairs.ToKeyPairCreateMap to add the ValueSpecs field. -func (opts KeyPairCreateOpts) ToKeyPairCreateMap() (map[string]interface{}, error) { - return BuildRequest(opts, "keypair") -} - -// NetworkCreateOpts represents the attributes used when creating a new network. -type NetworkCreateOpts struct { - networks.CreateOpts - ValueSpecs map[string]string `json:"value_specs,omitempty"` -} - -// ToNetworkCreateMap casts a CreateOpts struct to a map. -// It overrides networks.ToNetworkCreateMap to add the ValueSpecs field. -func (opts NetworkCreateOpts) ToNetworkCreateMap() (map[string]interface{}, error) { - return BuildRequest(opts, "network") -} - -// PolicyCreateOpts represents the attributes used when creating a new firewall policy. -type PolicyCreateOpts struct { - policies.CreateOpts - ValueSpecs map[string]string `json:"value_specs,omitempty"` -} - -// ToPolicyCreateMap casts a CreateOpts struct to a map. -// It overrides policies.ToFirewallPolicyCreateMap to add the ValueSpecs field. -func (opts PolicyCreateOpts) ToFirewallPolicyCreateMap() (map[string]interface{}, error) { - return BuildRequest(opts, "firewall_policy") -} - -// PortCreateOpts represents the attributes used when creating a new port. -type PortCreateOpts struct { - ports.CreateOpts - ValueSpecs map[string]string `json:"value_specs,omitempty"` -} - -// ToPortCreateMap casts a CreateOpts struct to a map. -// It overrides ports.ToPortCreateMap to add the ValueSpecs field. -func (opts PortCreateOpts) ToPortCreateMap() (map[string]interface{}, error) { - return BuildRequest(opts, "port") -} - -// RecordSetCreateOpts represents the attributes used when creating a new DNS record set. -type RecordSetCreateOpts struct { - recordsets.CreateOpts - ValueSpecs map[string]string `json:"value_specs,omitempty"` -} - -// ToRecordSetCreateMap casts a CreateOpts struct to a map. -// It overrides recordsets.ToRecordSetCreateMap to add the ValueSpecs field. -func (opts RecordSetCreateOpts) ToRecordSetCreateMap() (map[string]interface{}, error) { - b, err := BuildRequest(opts, "") - if err != nil { - return nil, err - } - - if m, ok := b[""].(map[string]interface{}); ok { - return m, nil - } - - return nil, fmt.Errorf("Expected map but got %T", b[""]) -} - -// RouterCreateOpts represents the attributes used when creating a new router. -type RouterCreateOpts struct { - routers.CreateOpts - ValueSpecs map[string]string `json:"value_specs,omitempty"` -} - -// ToRouterCreateMap casts a CreateOpts struct to a map. -// It overrides routers.ToRouterCreateMap to add the ValueSpecs field. -func (opts RouterCreateOpts) ToRouterCreateMap() (map[string]interface{}, error) { - return BuildRequest(opts, "router") -} - -// RuleCreateOpts represents the attributes used when creating a new firewall rule. -type RuleCreateOpts struct { - rules.CreateOpts - ValueSpecs map[string]string `json:"value_specs,omitempty"` -} - -// ToRuleCreateMap casts a CreateOpts struct to a map. -// It overrides rules.ToRuleCreateMap to add the ValueSpecs field. -func (opts RuleCreateOpts) ToRuleCreateMap() (map[string]interface{}, error) { - b, err := BuildRequest(opts, "firewall_rule") - if err != nil { - return nil, err - } - - if m := b["firewall_rule"].(map[string]interface{}); m["protocol"] == "any" { - m["protocol"] = nil - } - - return b, nil -} - -// ServerGroupCreateOpts represents the attributes used when creating a new router. -type ServerGroupCreateOpts struct { - servergroups.CreateOpts - ValueSpecs map[string]string `json:"value_specs,omitempty"` -} - -// ToServerGroupCreateMap casts a CreateOpts struct to a map. -// It overrides routers.ToServerGroupCreateMap to add the ValueSpecs field. -func (opts ServerGroupCreateOpts) ToServerGroupCreateMap() (map[string]interface{}, error) { - return BuildRequest(opts, "server_group") -} - -// SubnetCreateOpts represents the attributes used when creating a new subnet. -type SubnetCreateOpts struct { - subnets.CreateOpts - ValueSpecs map[string]string `json:"value_specs,omitempty"` -} - -// ToSubnetCreateMap casts a CreateOpts struct to a map. -// It overrides subnets.ToSubnetCreateMap to add the ValueSpecs field. -func (opts SubnetCreateOpts) ToSubnetCreateMap() (map[string]interface{}, error) { - b, err := BuildRequest(opts, "subnet") - if err != nil { - return nil, err - } - - if m := b["subnet"].(map[string]interface{}); m["gateway_ip"] == "" { - m["gateway_ip"] = nil - } - - return b, nil -} - -// ZoneCreateOpts represents the attributes used when creating a new DNS zone. -type ZoneCreateOpts struct { - zones.CreateOpts - ValueSpecs map[string]string `json:"value_specs,omitempty"` -} - -// ToZoneCreateMap casts a CreateOpts struct to a map. -// It overrides zones.ToZoneCreateMap to add the ValueSpecs field. -func (opts ZoneCreateOpts) ToZoneCreateMap() (map[string]interface{}, error) { - b, err := BuildRequest(opts, "") - if err != nil { - return nil, err - } - - if m, ok := b[""].(map[string]interface{}); ok { - if opts.TTL > 0 { - m["ttl"] = opts.TTL - } - - return m, nil - } - - return nil, fmt.Errorf("Expected map but got %T", b[""]) -} diff --git a/builtin/providers/openstack/util.go b/builtin/providers/openstack/util.go deleted file mode 100644 index 0c879c9ea..000000000 --- a/builtin/providers/openstack/util.go +++ /dev/null @@ -1,100 +0,0 @@ -package openstack - -import ( - "fmt" - "net/http" - "os" - "sort" - "strings" - - "github.com/Unknwon/com" - "github.com/gophercloud/gophercloud" - "github.com/hashicorp/terraform/helper/schema" -) - -// BuildRequest takes an opts struct and builds a request body for -// Gophercloud to execute -func BuildRequest(opts interface{}, parent string) (map[string]interface{}, error) { - b, err := gophercloud.BuildRequestBody(opts, "") - if err != nil { - return nil, err - } - - b = AddValueSpecs(b) - - return map[string]interface{}{parent: b}, nil -} - -// CheckDeleted checks the error to see if it's a 404 (Not Found) and, if so, -// sets the resource ID to the empty string instead of throwing an error. -func CheckDeleted(d *schema.ResourceData, err error, msg string) error { - if _, ok := err.(gophercloud.ErrDefault404); ok { - d.SetId("") - return nil - } - - return fmt.Errorf("%s: %s", msg, err) -} - -// GetRegion returns the region from either d.Get("region") or OS_REGION_NAME -func GetRegion(d *schema.ResourceData) string { - if v, ok := d.GetOk("region"); ok { - return v.(string) - } - - if v := os.Getenv("OS_REGION_NAME"); v != "" { - return v - } - - return "" -} - -// AddValueSpecs expands the 'value_specs' object and removes 'value_specs' -// from the reqeust body. -func AddValueSpecs(body map[string]interface{}) map[string]interface{} { - if body["value_specs"] != nil { - for k, v := range body["value_specs"].(map[string]interface{}) { - body[k] = v - } - delete(body, "value_specs") - } - - return body -} - -// MapValueSpecs converts ResourceData into a map -func MapValueSpecs(d *schema.ResourceData) map[string]string { - m := make(map[string]string) - for key, val := range d.Get("value_specs").(map[string]interface{}) { - m[key] = val.(string) - } - return m -} - -// List of headers that need to be redacted -var REDACT_HEADERS = []string{"x-auth-token", "x-auth-key", "x-service-token", - "x-storage-token", "x-account-meta-temp-url-key", "x-account-meta-temp-url-key-2", - "x-container-meta-temp-url-key", "x-container-meta-temp-url-key-2", "set-cookie", - "x-subject-token"} - -// RedactHeaders processes a headers object, returning a redacted list -func RedactHeaders(headers http.Header) (processedHeaders []string) { - for name, header := range headers { - for _, v := range header { - if com.IsSliceContainsStr(REDACT_HEADERS, name) { - processedHeaders = append(processedHeaders, fmt.Sprintf("%v: %v", name, "***")) - } else { - processedHeaders = append(processedHeaders, fmt.Sprintf("%v: %v", name, v)) - } - } - } - return -} - -// FormatHeaders processes a headers object plus a deliminator, returning a string -func FormatHeaders(headers http.Header, seperator string) string { - redactedHeaders := RedactHeaders(headers) - sort.Strings(redactedHeaders) - - return strings.Join(redactedHeaders, seperator) -} diff --git a/builtin/providers/opsgenie/config.go b/builtin/providers/opsgenie/config.go deleted file mode 100644 index d8a36dbb3..000000000 --- a/builtin/providers/opsgenie/config.go +++ /dev/null @@ -1,46 +0,0 @@ -package opsgenie - -import ( - "log" - - "golang.org/x/net/context" - - "github.com/opsgenie/opsgenie-go-sdk/client" -) - -type OpsGenieClient struct { - apiKey string - - StopContext context.Context - - teams client.OpsGenieTeamClient - users client.OpsGenieUserClient -} - -// Config defines the configuration options for the OpsGenie client -type Config struct { - ApiKey string -} - -// Client returns a new OpsGenie client -func (c *Config) Client() (*OpsGenieClient, error) { - opsGenie := new(client.OpsGenieClient) - opsGenie.SetAPIKey(c.ApiKey) - client := OpsGenieClient{} - - log.Printf("[INFO] OpsGenie client configured") - - teamsClient, err := opsGenie.Team() - if err != nil { - return nil, err - } - client.teams = *teamsClient - - usersClient, err := opsGenie.User() - if err != nil { - return nil, err - } - client.users = *usersClient - - return &client, nil -} diff --git a/builtin/providers/opsgenie/data_source_opsgenie_user.go b/builtin/providers/opsgenie/data_source_opsgenie_user.go deleted file mode 100644 index 310c7b22c..000000000 --- a/builtin/providers/opsgenie/data_source_opsgenie_user.go +++ /dev/null @@ -1,66 +0,0 @@ -package opsgenie - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/opsgenie/opsgenie-go-sdk/user" -) - -func dataSourceOpsGenieUser() *schema.Resource { - return &schema.Resource{ - Read: dataSourceOpsGenieUserRead, - - Schema: map[string]*schema.Schema{ - "username": { - Type: schema.TypeString, - Required: true, - }, - "full_name": { - Type: schema.TypeString, - Computed: true, - }, - "role": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceOpsGenieUserRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*OpsGenieClient).users - - username := d.Get("username").(string) - - log.Printf("[INFO] Reading OpsGenie user '%s'", username) - - o := user.ListUsersRequest{} - resp, err := client.List(o) - if err != nil { - return nil - } - - var found *user.GetUserResponse - - if len(resp.Users) > 0 { - for _, user := range resp.Users { - if user.Username == username { - found = &user - break - } - } - } - - if found == nil { - return fmt.Errorf("Unable to locate any user with the username: %s", username) - } - - d.SetId(found.Id) - d.Set("username", found.Username) - d.Set("full_name", found.Fullname) - d.Set("role", found.Role) - - return nil -} diff --git a/builtin/providers/opsgenie/data_source_opsgenie_user_test.go b/builtin/providers/opsgenie/data_source_opsgenie_user_test.go deleted file mode 100644 index a0bac0650..000000000 --- a/builtin/providers/opsgenie/data_source_opsgenie_user_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package opsgenie - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourceOpsGenieUser_Basic(t *testing.T) { - ri := acctest.RandInt() - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceOpsGenieUserConfig(ri), - Check: resource.ComposeTestCheckFunc( - testAccDataSourceOpsGenieUser("opsgenie_user.test", "data.opsgenie_user.by_username"), - ), - }, - }, - }) -} - -func testAccDataSourceOpsGenieUser(src, n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - srcR := s.RootModule().Resources[src] - srcA := srcR.Primary.Attributes - - r := s.RootModule().Resources[n] - a := r.Primary.Attributes - - if a["id"] == "" { - return fmt.Errorf("Expected to get a user ID from OpsGenie") - } - - testAtts := []string{"username", "full_name", "role"} - - for _, att := range testAtts { - if a[att] != srcA[att] { - return fmt.Errorf("Expected the user %s to be: %s, but got: %s", att, srcA[att], a[att]) - } - } - - return nil - } -} - -func testAccDataSourceOpsGenieUserConfig(ri int) string { - return fmt.Sprintf(` -resource "opsgenie_user" "test" { - username = "acctest-%d@example.tld" - full_name = "Acceptance Test User" - role = "User" -} - -data "opsgenie_user" "by_username" { - username = "${opsgenie_user.test.username}" -} -`, ri) -} diff --git a/builtin/providers/opsgenie/import_opsgenie_team_test.go b/builtin/providers/opsgenie/import_opsgenie_team_test.go deleted file mode 100644 index b77a02b03..000000000 --- a/builtin/providers/opsgenie/import_opsgenie_team_test.go +++ /dev/null @@ -1,106 +0,0 @@ -package opsgenie - -import ( - "testing" - - "fmt" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccOpsGenieTeam_importBasic(t *testing.T) { - resourceName := "opsgenie_team.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccOpsGenieTeam_basic, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckOpsGenieTeamDestroy, - Steps: []resource.TestStep{ - { - Config: config, - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccOpsGenieTeam_importWithEmptyDescription(t *testing.T) { - resourceName := "opsgenie_team.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccOpsGenieTeam_withEmptyDescription, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckOpsGenieTeamDestroy, - Steps: []resource.TestStep{ - { - Config: config, - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccOpsGenieTeam_importWithUser(t *testing.T) { - resourceName := "opsgenie_team.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccOpsGenieTeam_withUser, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckOpsGenieTeamDestroy, - Steps: []resource.TestStep{ - { - Config: config, - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccOpsGenieTeam_importWithUserComplete(t *testing.T) { - resourceName := "opsgenie_team.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccOpsGenieTeam_withUserComplete, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckOpsGenieTeamDestroy, - Steps: []resource.TestStep{ - { - Config: config, - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/opsgenie/import_opsgenie_user_test.go b/builtin/providers/opsgenie/import_opsgenie_user_test.go deleted file mode 100644 index 3219b0c1b..000000000 --- a/builtin/providers/opsgenie/import_opsgenie_user_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package opsgenie - -import ( - "testing" - - "fmt" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccOpsGenieUser_importBasic(t *testing.T) { - resourceName := "opsgenie_user.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccOpsGenieUser_basic, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckOpsGenieUserDestroy, - Steps: []resource.TestStep{ - { - Config: config, - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccOpsGenieUser_importComplete(t *testing.T) { - resourceName := "opsgenie_user.test" - - ri := acctest.RandInt() - config := fmt.Sprintf(testAccOpsGenieUser_complete, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckOpsGenieUserDestroy, - Steps: []resource.TestStep{ - { - Config: config, - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/opsgenie/provider.go b/builtin/providers/opsgenie/provider.go deleted file mode 100644 index c3510013f..000000000 --- a/builtin/providers/opsgenie/provider.go +++ /dev/null @@ -1,42 +0,0 @@ -package opsgenie - -import ( - "log" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider represents a resource provider in Terraform -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "api_key": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("OPSGENIE_API_KEY", nil), - }, - }, - - DataSourcesMap: map[string]*schema.Resource{ - "opsgenie_user": dataSourceOpsGenieUser(), - }, - - ResourcesMap: map[string]*schema.Resource{ - "opsgenie_team": resourceOpsGenieTeam(), - "opsgenie_user": resourceOpsGenieUser(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(data *schema.ResourceData) (interface{}, error) { - log.Println("[INFO] Initializing OpsGenie client") - - config := Config{ - ApiKey: data.Get("api_key").(string), - } - - return config.Client() -} diff --git a/builtin/providers/opsgenie/provider_test.go b/builtin/providers/opsgenie/provider_test.go deleted file mode 100644 index 14b63b7e7..000000000 --- a/builtin/providers/opsgenie/provider_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package opsgenie - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "opsgenie": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - apiKey := os.Getenv("OPSGENIE_API_KEY") - - if apiKey == "" { - t.Fatal("OPSGENIE_API_KEY must be set for acceptance tests") - } -} diff --git a/builtin/providers/opsgenie/resource_opsgenie_team.go b/builtin/providers/opsgenie/resource_opsgenie_team.go deleted file mode 100644 index 14606fc33..000000000 --- a/builtin/providers/opsgenie/resource_opsgenie_team.go +++ /dev/null @@ -1,241 +0,0 @@ -package opsgenie - -import ( - "log" - - "fmt" - "strings" - - "regexp" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/opsgenie/opsgenie-go-sdk/team" -) - -func resourceOpsGenieTeam() *schema.Resource { - return &schema.Resource{ - Create: resourceOpsGenieTeamCreate, - Read: resourceOpsGenieTeamRead, - Update: resourceOpsGenieTeamUpdate, - Delete: resourceOpsGenieTeamDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateOpsGenieTeamName, - }, - "description": { - Type: schema.TypeString, - Optional: true, - }, - "member": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "username": { - Type: schema.TypeString, - Required: true, - }, - - "role": { - Type: schema.TypeString, - Optional: true, - Default: "user", - ValidateFunc: validateOpsGenieTeamRole, - }, - }, - }, - }, - }, - } -} - -func resourceOpsGenieTeamCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*OpsGenieClient).teams - - name := d.Get("name").(string) - description := d.Get("description").(string) - - createRequest := team.CreateTeamRequest{ - Name: name, - Description: description, - Members: expandOpsGenieTeamMembers(d), - } - - log.Printf("[INFO] Creating OpsGenie team '%s'", name) - - createResponse, err := client.Create(createRequest) - if err != nil { - return err - } - - err = checkOpsGenieResponse(createResponse.Code, createResponse.Status) - if err != nil { - return err - } - - getRequest := team.GetTeamRequest{ - Name: name, - } - - getResponse, err := client.Get(getRequest) - if err != nil { - return err - } - - d.SetId(getResponse.Id) - - return resourceOpsGenieTeamRead(d, meta) -} - -func resourceOpsGenieTeamRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*OpsGenieClient).teams - - listRequest := team.ListTeamsRequest{} - listResponse, err := client.List(listRequest) - if err != nil { - return err - } - - var found *team.GetTeamResponse - for _, team := range listResponse.Teams { - if team.Id == d.Id() { - found = &team - break - } - } - - if found == nil { - d.SetId("") - log.Printf("[INFO] Team %q not found. Removing from state", d.Get("name").(string)) - return nil - } - - getRequest := team.GetTeamRequest{ - Id: d.Id(), - } - - getResponse, err := client.Get(getRequest) - if err != nil { - return err - } - - d.Set("name", getResponse.Name) - d.Set("description", getResponse.Description) - d.Set("member", flattenOpsGenieTeamMembers(getResponse.Members)) - - return nil -} - -func resourceOpsGenieTeamUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*OpsGenieClient).teams - name := d.Get("name").(string) - description := d.Get("description").(string) - - updateRequest := team.UpdateTeamRequest{ - Id: d.Id(), - Name: name, - Description: description, - Members: expandOpsGenieTeamMembers(d), - } - - log.Printf("[INFO] Updating OpsGenie team '%s'", name) - - updateResponse, err := client.Update(updateRequest) - if err != nil { - return err - } - - err = checkOpsGenieResponse(updateResponse.Code, updateResponse.Status) - if err != nil { - return err - } - - return nil -} - -func resourceOpsGenieTeamDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Deleting OpsGenie team '%s'", d.Get("name").(string)) - client := meta.(*OpsGenieClient).teams - - deleteRequest := team.DeleteTeamRequest{ - Id: d.Id(), - } - - _, err := client.Delete(deleteRequest) - if err != nil { - return err - } - - return nil -} - -func flattenOpsGenieTeamMembers(input []team.Member) []interface{} { - members := make([]interface{}, 0, len(input)) - for _, inputMember := range input { - outputMember := make(map[string]interface{}) - outputMember["username"] = inputMember.User - outputMember["role"] = inputMember.Role - - members = append(members, outputMember) - } - - return members -} - -func expandOpsGenieTeamMembers(d *schema.ResourceData) []team.Member { - input := d.Get("member").([]interface{}) - - members := make([]team.Member, 0, len(input)) - if input == nil { - return members - } - - for _, v := range input { - config := v.(map[string]interface{}) - - username := config["username"].(string) - role := config["role"].(string) - - member := team.Member{ - User: username, - Role: role, - } - - members = append(members, member) - } - - return members -} - -func validateOpsGenieTeamName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[a-zA-Z0-9_]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only alpha numeric characters and underscores are allowed in %q: %q", k, value)) - } - - if len(value) >= 100 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 100 characters: %q %d", k, value, len(value))) - } - - return -} - -func validateOpsGenieTeamRole(v interface{}, k string) (ws []string, errors []error) { - value := strings.ToLower(v.(string)) - families := map[string]bool{ - "admin": true, - "user": true, - } - - if !families[value] { - errors = append(errors, fmt.Errorf("OpsGenie Team Role can only be 'Admin' or 'User'")) - } - - return -} diff --git a/builtin/providers/opsgenie/resource_opsgenie_team_test.go b/builtin/providers/opsgenie/resource_opsgenie_team_test.go deleted file mode 100644 index a357c1eaf..000000000 --- a/builtin/providers/opsgenie/resource_opsgenie_team_test.go +++ /dev/null @@ -1,298 +0,0 @@ -package opsgenie - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/opsgenie/opsgenie-go-sdk/team" -) - -func TestAccOpsGenieTeamName_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "hello-world", - ErrCount: 1, - }, - { - Value: "hello_world", - ErrCount: 0, - }, - { - Value: "helloWorld", - ErrCount: 0, - }, - { - Value: "helloworld12", - ErrCount: 0, - }, - { - Value: "hello@world", - ErrCount: 1, - }, - { - Value: "qfvbdsbvipqdbwsbddbdcwqffewsqwcdw21ddwqwd3324120", - ErrCount: 0, - }, - { - Value: "qfvbdsbvipqdbwsbddbdcwqffewsqwcdw21ddwqwd33241202qfvbdsbvipqdbwsbddbdcwqffewsqwcdw21ddwqwd33241202", - ErrCount: 0, - }, - { - Value: "qfvbdsbvipqdbwsbddbdcwqfjjfewsqwcdw21ddwqwd3324120qfvbdsbvipqdbwsbddbdcwqfjjfewsqwcdw21ddwqwd3324120", - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateOpsGenieTeamName(tc.Value, "opsgenie_team") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the OpsGenie Team Name to trigger a validation error: %v", errors) - } - } -} - -func TestAccOpsGenieTeamRole_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "admin", - ErrCount: 0, - }, - { - Value: "user", - ErrCount: 0, - }, - { - Value: "custom", - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateOpsGenieTeamRole(tc.Value, "opsgenie_team") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the OpsGenie Team Role to trigger a validation error") - } - } -} - -func TestAccOpsGenieTeam_basic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccOpsGenieTeam_basic, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckOpsGenieTeamDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckOpsGenieTeamExists("opsgenie_team.test"), - ), - }, - }, - }) -} - -func TestAccOpsGenieTeam_withEmptyDescription(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccOpsGenieTeam_withEmptyDescription, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckOpsGenieTeamDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckOpsGenieTeamExists("opsgenie_team.test"), - ), - }, - }, - }) -} - -func TestAccOpsGenieTeam_withUser(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccOpsGenieTeam_withUser, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckOpsGenieTeamDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckOpsGenieTeamExists("opsgenie_team.test"), - ), - }, - }, - }) -} - -func TestAccOpsGenieTeam_withUserComplete(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccOpsGenieTeam_withUserComplete, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckOpsGenieTeamDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckOpsGenieTeamExists("opsgenie_team.test"), - ), - }, - }, - }) -} - -func TestAccOpsGenieTeam_withMultipleUsers(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccOpsGenieTeam_withMultipleUsers, ri, ri, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckOpsGenieTeamDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckOpsGenieTeamExists("opsgenie_team.test"), - ), - }, - }, - }) -} - -func testCheckOpsGenieTeamDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*OpsGenieClient).teams - - for _, rs := range s.RootModule().Resources { - if rs.Type != "opsgenie_team" { - continue - } - - req := team.GetTeamRequest{ - Id: rs.Primary.Attributes["id"], - } - - result, _ := client.Get(req) - if result != nil { - return fmt.Errorf("Team still exists:\n%#v", result) - } - } - - return nil -} - -func testCheckOpsGenieTeamExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - id := rs.Primary.Attributes["id"] - name := rs.Primary.Attributes["name"] - - client := testAccProvider.Meta().(*OpsGenieClient).teams - - req := team.GetTeamRequest{ - Id: rs.Primary.Attributes["id"], - } - - result, _ := client.Get(req) - if result == nil { - return fmt.Errorf("Bad: Team %q (name: %q) does not exist", id, name) - } - - return nil - } -} - -var testAccOpsGenieTeam_basic = ` -resource "opsgenie_team" "test" { - name = "acctest%d" -} -` - -var testAccOpsGenieTeam_withEmptyDescription = ` -resource "opsgenie_team" "test" { - name = "acctest%d" - description = "" -} -` - -var testAccOpsGenieTeam_withUser = ` -resource "opsgenie_user" "test" { - username = "acctest-%d@example.tld" - full_name = "Acceptance Test User" - role = "User" -} - -resource "opsgenie_team" "test" { - name = "acctest%d" - member { - username = "${opsgenie_user.test.username}" - } -} -` - -var testAccOpsGenieTeam_withUserComplete = ` -resource "opsgenie_user" "test" { - username = "acctest-%d@example.tld" - full_name = "Acceptance Test User" - role = "User" -} - -resource "opsgenie_team" "test" { - name = "acctest%d" - description = "Some exmaple description" - member { - username = "${opsgenie_user.test.username}" - role = "user" - } -} -` - -var testAccOpsGenieTeam_withMultipleUsers = ` -resource "opsgenie_user" "first" { - username = "acctest-1-%d@example.tld" - full_name = "First Acceptance Test User" - role = "User" -} -resource "opsgenie_user" "second" { - username = "acctest-2-%d@example.tld" - full_name = "Second Acceptance Test User" - role = "User" -} - -resource "opsgenie_team" "test" { - name = "acctest%d" - description = "Some exmaple description" - member { - username = "${opsgenie_user.first.username}" - } - member { - username = "${opsgenie_user.second.username}" - } -} -` diff --git a/builtin/providers/opsgenie/resource_opsgenie_user.go b/builtin/providers/opsgenie/resource_opsgenie_user.go deleted file mode 100644 index d6636ff95..000000000 --- a/builtin/providers/opsgenie/resource_opsgenie_user.go +++ /dev/null @@ -1,211 +0,0 @@ -package opsgenie - -import ( - "log" - - "fmt" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/opsgenie/opsgenie-go-sdk/user" -) - -func resourceOpsGenieUser() *schema.Resource { - return &schema.Resource{ - Create: resourceOpsGenieUserCreate, - Read: resourceOpsGenieUserRead, - Update: resourceOpsGenieUserUpdate, - Delete: resourceOpsGenieUserDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - Schema: map[string]*schema.Schema{ - "username": { - Type: schema.TypeString, - ForceNew: true, - Required: true, - ValidateFunc: validateOpsGenieUserUsername, - }, - "full_name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateOpsGenieUserFullName, - }, - "role": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateOpsGenieUserRole, - }, - "locale": { - Type: schema.TypeString, - Optional: true, - Default: "en_US", - }, - "timezone": { - Type: schema.TypeString, - Optional: true, - Default: "America/New_York", - }, - }, - } -} - -func resourceOpsGenieUserCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*OpsGenieClient).users - - username := d.Get("username").(string) - fullName := d.Get("full_name").(string) - role := d.Get("role").(string) - locale := d.Get("locale").(string) - timeZone := d.Get("timezone").(string) - - createRequest := user.CreateUserRequest{ - Username: username, - Fullname: fullName, - Role: role, - Locale: locale, - Timezone: timeZone, - } - - log.Printf("[INFO] Creating OpsGenie user '%s'", username) - createResponse, err := client.Create(createRequest) - if err != nil { - return err - } - - err = checkOpsGenieResponse(createResponse.Code, createResponse.Status) - if err != nil { - return err - } - - getRequest := user.GetUserRequest{ - Username: username, - } - - getResponse, err := client.Get(getRequest) - if err != nil { - return err - } - - d.SetId(getResponse.Id) - - return resourceOpsGenieUserRead(d, meta) -} - -func resourceOpsGenieUserRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*OpsGenieClient).users - - listRequest := user.ListUsersRequest{} - listResponse, err := client.List(listRequest) - if err != nil { - return err - } - - var found *user.GetUserResponse - for _, user := range listResponse.Users { - if user.Id == d.Id() { - found = &user - break - } - } - - if found == nil { - d.SetId("") - log.Printf("[INFO] User %q not found. Removing from state", d.Get("username").(string)) - return nil - } - - getRequest := user.GetUserRequest{ - Id: d.Id(), - } - - getResponse, err := client.Get(getRequest) - if err != nil { - return err - } - - d.Set("username", getResponse.Username) - d.Set("full_name", getResponse.Fullname) - d.Set("role", getResponse.Role) - d.Set("locale", getResponse.Locale) - d.Set("timezone", getResponse.Timezone) - - return nil -} - -func resourceOpsGenieUserUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*OpsGenieClient).users - - username := d.Get("username").(string) - fullName := d.Get("full_name").(string) - role := d.Get("role").(string) - locale := d.Get("locale").(string) - timeZone := d.Get("timezone").(string) - - log.Printf("[INFO] Updating OpsGenie user '%s'", username) - - updateRequest := user.UpdateUserRequest{ - Id: d.Id(), - Fullname: fullName, - Role: role, - Locale: locale, - Timezone: timeZone, - } - - updateResponse, err := client.Update(updateRequest) - if err != nil { - return err - } - - err = checkOpsGenieResponse(updateResponse.Code, updateResponse.Status) - if err != nil { - return err - } - - return nil -} - -func resourceOpsGenieUserDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Deleting OpsGenie user '%s'", d.Get("username").(string)) - client := meta.(*OpsGenieClient).users - - deleteRequest := user.DeleteUserRequest{ - Id: d.Id(), - } - - _, err := client.Delete(deleteRequest) - if err != nil { - return err - } - - return nil -} - -func validateOpsGenieUserUsername(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if len(value) >= 100 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 100 characters: %q %d", k, value, len(value))) - } - - return -} - -func validateOpsGenieUserFullName(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if len(value) >= 512 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 512 characters: %q %d", k, value, len(value))) - } - - return -} - -func validateOpsGenieUserRole(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if len(value) >= 512 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 512 characters: %q %d", k, value, len(value))) - } - - return -} diff --git a/builtin/providers/opsgenie/resource_opsgenie_user_test.go b/builtin/providers/opsgenie/resource_opsgenie_user_test.go deleted file mode 100644 index a4b67b5cf..000000000 --- a/builtin/providers/opsgenie/resource_opsgenie_user_test.go +++ /dev/null @@ -1,206 +0,0 @@ -package opsgenie - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/opsgenie/opsgenie-go-sdk/user" -) - -func TestAccOpsGenieUserUsername_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "hello", - ErrCount: 0, - }, - { - Value: acctest.RandString(99), - ErrCount: 0, - }, - { - Value: acctest.RandString(100), - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateOpsGenieUserUsername(tc.Value, "opsgenie_team") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the OpsGenie User Username Validation to trigger a validation error: %v", errors) - } - } -} - -func TestAccOpsGenieUserFullName_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "hello", - ErrCount: 0, - }, - { - Value: acctest.RandString(100), - ErrCount: 0, - }, - { - Value: acctest.RandString(511), - ErrCount: 0, - }, - { - Value: acctest.RandString(512), - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateOpsGenieUserFullName(tc.Value, "opsgenie_team") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the OpsGenie User Full Name Validation to trigger a validation error: %v", errors) - } - } -} - -func TestAccOpsGenieUserRole_validation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "hello", - ErrCount: 0, - }, - { - Value: acctest.RandString(100), - ErrCount: 0, - }, - { - Value: acctest.RandString(511), - ErrCount: 0, - }, - { - Value: acctest.RandString(512), - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateOpsGenieUserRole(tc.Value, "opsgenie_team") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the OpsGenie User Role Validation to trigger a validation error: %v", errors) - } - } -} - -func TestAccOpsGenieUser_basic(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccOpsGenieUser_basic, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckOpsGenieUserDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckOpsGenieUserExists("opsgenie_user.test"), - ), - }, - }, - }) -} - -func TestAccOpsGenieUser_complete(t *testing.T) { - ri := acctest.RandInt() - config := fmt.Sprintf(testAccOpsGenieUser_complete, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckOpsGenieUserDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckOpsGenieUserExists("opsgenie_user.test"), - ), - }, - }, - }) -} - -func testCheckOpsGenieUserDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*OpsGenieClient).users - - for _, rs := range s.RootModule().Resources { - if rs.Type != "opsgenie_user" { - continue - } - - req := user.GetUserRequest{ - Id: rs.Primary.Attributes["id"], - } - - result, _ := client.Get(req) - if result != nil { - return fmt.Errorf("User still exists:\n%#v", result) - } - } - - return nil -} - -func testCheckOpsGenieUserExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - id := rs.Primary.Attributes["id"] - username := rs.Primary.Attributes["username"] - - client := testAccProvider.Meta().(*OpsGenieClient).users - - req := user.GetUserRequest{ - Id: rs.Primary.Attributes["id"], - } - - result, _ := client.Get(req) - if result == nil { - return fmt.Errorf("Bad: User %q (username: %q) does not exist", id, username) - } - - return nil - } -} - -var testAccOpsGenieUser_basic = ` -resource "opsgenie_user" "test" { - username = "acctest-%d@example.tld" - full_name = "Acceptance Test User" - role = "User" -} -` - -var testAccOpsGenieUser_complete = ` -resource "opsgenie_user" "test" { - username = "acctest-%d@example.tld" - full_name = "Acceptance Test User" - role = "User" - locale = "en_GB" - timezone = "Etc/GMT" -} -` diff --git a/builtin/providers/opsgenie/util.go b/builtin/providers/opsgenie/util.go deleted file mode 100644 index f8b9c2940..000000000 --- a/builtin/providers/opsgenie/util.go +++ /dev/null @@ -1,14 +0,0 @@ -package opsgenie - -import ( - "fmt" - "net/http" -) - -func checkOpsGenieResponse(code int, status string) error { - if code == http.StatusOK { - return nil - } - - return fmt.Errorf("Unexpected Status Code '%d', Response '%s'", code, status) -} diff --git a/builtin/providers/ovh/config.go b/builtin/providers/ovh/config.go deleted file mode 100644 index 95fbfe768..000000000 --- a/builtin/providers/ovh/config.go +++ /dev/null @@ -1,67 +0,0 @@ -package ovh - -import ( - "fmt" - "log" - - "github.com/ovh/go-ovh/ovh" -) - -type Config struct { - Endpoint string - ApplicationKey string - ApplicationSecret string - ConsumerKey string - OVHClient *ovh.Client -} - -/* type used to verify client access to ovh api - */ -type PartialMe struct { - Firstname string `json:"firstname"` -} - -func clientDefault(c *Config) (*ovh.Client, error) { - client, err := ovh.NewClient( - c.Endpoint, - c.ApplicationKey, - c.ApplicationSecret, - c.ConsumerKey, - ) - if err != nil { - return nil, err - } - return client, nil -} - -func (c *Config) loadAndValidate() error { - validEndpoint := false - - ovhEndpoints := [2]string{ovh.OvhEU, ovh.OvhCA} - - for _, e := range ovhEndpoints { - if ovh.Endpoints[c.Endpoint] == e { - validEndpoint = true - } - } - - if !validEndpoint { - return fmt.Errorf("%s must be one of %#v endpoints\n", c.Endpoint, ovh.Endpoints) - } - - targetClient, err := clientDefault(c) - if err != nil { - return fmt.Errorf("Error getting ovh client: %q\n", err) - } - - var me PartialMe - err = targetClient.Get("/me", &me) - if err != nil { - return fmt.Errorf("OVH client seems to be misconfigured: %q\n", err) - } - - log.Printf("[DEBUG] Logged in on OVH API as %s!", me.Firstname) - c.OVHClient = targetClient - - return nil -} diff --git a/builtin/providers/ovh/data_source_ovh_publiccloud_region.go b/builtin/providers/ovh/data_source_ovh_publiccloud_region.go deleted file mode 100644 index 9f324b94a..000000000 --- a/builtin/providers/ovh/data_source_ovh_publiccloud_region.go +++ /dev/null @@ -1,99 +0,0 @@ -package ovh - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourcePublicCloudRegion() *schema.Resource { - return &schema.Resource{ - Read: dataSourcePublicCloudRegionRead, - Schema: map[string]*schema.Schema{ - "project_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OVH_PROJECT_ID", nil), - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "services": &schema.Schema{ - Type: schema.TypeSet, - Set: publicCloudServiceHash, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "status": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - - "continentCode": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "datacenterLocation": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourcePublicCloudRegionRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - projectId := d.Get("project_id").(string) - name := d.Get("name").(string) - - log.Printf("[DEBUG] Will read public cloud region %s for project: %s", name, projectId) - d.Partial(true) - - response := &PublicCloudRegionResponse{} - endpoint := fmt.Sprintf("/cloud/project/%s/region/%s", projectId, name) - err := config.OVHClient.Get(endpoint, response) - - if err != nil { - return fmt.Errorf("Error calling %s:\n\t %q", endpoint, err) - } - - d.Set("datacenterLocation", response.DatacenterLocation) - d.Set("continentCode", response.ContinentCode) - - services := &schema.Set{ - F: publicCloudServiceHash, - } - for i := range response.Services { - service := map[string]interface{}{ - "name": response.Services[i].Name, - "status": response.Services[i].Status, - } - services.Add(service) - } - - d.Set("services", services) - - d.Partial(false) - d.SetId(fmt.Sprintf("%s_%s", projectId, name)) - - return nil -} - -func publicCloudServiceHash(v interface{}) int { - r := v.(map[string]interface{}) - return hashcode.String(r["name"].(string)) -} diff --git a/builtin/providers/ovh/data_source_ovh_publiccloud_region_test.go b/builtin/providers/ovh/data_source_ovh_publiccloud_region_test.go deleted file mode 100644 index 963b21211..000000000 --- a/builtin/providers/ovh/data_source_ovh_publiccloud_region_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package ovh - -import ( - "fmt" - "os" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccPublicCloudRegionDataSource_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccPublicCloudRegionDatasourceConfig, - Check: resource.ComposeTestCheckFunc( - testAccPublicCloudRegionDatasource("data.ovh_publiccloud_region.region_attr.0"), - testAccPublicCloudRegionDatasource("data.ovh_publiccloud_region.region_attr.1"), - testAccPublicCloudRegionDatasource("data.ovh_publiccloud_region.region_attr.2"), - ), - }, - }, - }) -} - -func testAccPublicCloudRegionDatasource(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Can't find regions data source: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Cannot find region attributes for project %s and region %s", rs.Primary.Attributes["project_id"], rs.Primary.Attributes["region"]) - } - - return nil - } -} - -var testAccPublicCloudRegionDatasourceConfig = fmt.Sprintf(` -data "ovh_publiccloud_regions" "regions" { - project_id = "%s" -} - -data "ovh_publiccloud_region" "region_attr" { - count = 3 - project_id = "${data.ovh_publiccloud_regions.regions.project_id}" - name = "${element(data.ovh_publiccloud_regions.regions.names, count.index)}" -} -`, os.Getenv("OVH_PUBLIC_CLOUD")) diff --git a/builtin/providers/ovh/data_source_ovh_publiccloud_regions.go b/builtin/providers/ovh/data_source_ovh_publiccloud_regions.go deleted file mode 100644 index 726ad3e35..000000000 --- a/builtin/providers/ovh/data_source_ovh_publiccloud_regions.go +++ /dev/null @@ -1,51 +0,0 @@ -package ovh - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourcePublicCloudRegions() *schema.Resource { - return &schema.Resource{ - Read: dataSourcePublicCloudRegionsRead, - Schema: map[string]*schema.Schema{ - "project_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OVH_PROJECT_ID", nil), - }, - "names": &schema.Schema{ - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - } -} - -func dataSourcePublicCloudRegionsRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - projectId := d.Get("project_id").(string) - - log.Printf("[DEBUG] Will read public cloud regions for project: %s", projectId) - d.Partial(true) - - endpoint := fmt.Sprintf("/cloud/project/%s/region", projectId) - names := make([]string, 0) - err := config.OVHClient.Get(endpoint, &names) - - if err != nil { - return fmt.Errorf("Error calling %s:\n\t %q", endpoint, err) - } - - d.Set("names", names) - d.Partial(false) - d.SetId(projectId) - - log.Printf("[DEBUG] Read Public Cloud Regions %s", names) - return nil -} diff --git a/builtin/providers/ovh/data_source_ovh_publiccloud_regions_test.go b/builtin/providers/ovh/data_source_ovh_publiccloud_regions_test.go deleted file mode 100644 index 5030bcd35..000000000 --- a/builtin/providers/ovh/data_source_ovh_publiccloud_regions_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package ovh - -import ( - "fmt" - "os" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccPublicCloudRegionsDataSource_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccPublicCloudRegionsDatasourceConfig, - Check: resource.ComposeTestCheckFunc( - testAccPublicCloudRegionsDatasource("data.ovh_publiccloud_regions.regions"), - ), - }, - }, - }) -} - -func testAccPublicCloudRegionsDatasource(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Can't find regions data source: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Cannot find regions for project %s", rs.Primary.Attributes["project_id"]) - } - - return nil - } -} - -var testAccPublicCloudRegionsDatasourceConfig = fmt.Sprintf(` -data "ovh_publiccloud_regions" "regions" { - project_id = "%s" -} -`, os.Getenv("OVH_PUBLIC_CLOUD")) diff --git a/builtin/providers/ovh/provider.go b/builtin/providers/ovh/provider.go deleted file mode 100644 index 5260a917f..000000000 --- a/builtin/providers/ovh/provider.go +++ /dev/null @@ -1,80 +0,0 @@ -package ovh - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider returns a schema.Provider for OVH. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "endpoint": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("OVH_ENDPOINT", nil), - Description: descriptions["endpoint"], - }, - "application_key": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("OVH_APPLICATION_KEY", ""), - Description: descriptions["application_key"], - }, - "application_secret": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("OVH_APPLICATION_SECRET", ""), - Description: descriptions["application_secret"], - }, - "consumer_key": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("OVH_CONSUMER_KEY", ""), - Description: descriptions["consumer_key"], - }, - }, - - DataSourcesMap: map[string]*schema.Resource{ - "ovh_publiccloud_region": dataSourcePublicCloudRegion(), - "ovh_publiccloud_regions": dataSourcePublicCloudRegions(), - }, - - ResourcesMap: map[string]*schema.Resource{ - "ovh_publiccloud_private_network": resourcePublicCloudPrivateNetwork(), - "ovh_publiccloud_private_network_subnet": resourcePublicCloudPrivateNetworkSubnet(), - "ovh_publiccloud_user": resourcePublicCloudUser(), - "ovh_vrack_publiccloud_attachment": resourceVRackPublicCloudAttachment(), - }, - - ConfigureFunc: configureProvider, - } -} - -var descriptions map[string]string - -func init() { - descriptions = map[string]string{ - "endpoint": "The OVH API endpoint to target (ex: \"ovh-eu\").", - - "application_key": "The OVH API Application Key.", - - "application_secret": "The OVH API Application Secret.", - "consumer_key": "The OVH API Consumer key.", - } -} - -func configureProvider(d *schema.ResourceData) (interface{}, error) { - config := Config{ - Endpoint: d.Get("endpoint").(string), - ApplicationKey: d.Get("application_key").(string), - ApplicationSecret: d.Get("application_secret").(string), - ConsumerKey: d.Get("consumer_key").(string), - } - - if err := config.loadAndValidate(); err != nil { - return nil, err - } - - return &config, nil -} diff --git a/builtin/providers/ovh/provider_test.go b/builtin/providers/ovh/provider_test.go deleted file mode 100644 index e2f9adda8..000000000 --- a/builtin/providers/ovh/provider_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package ovh - -import ( - "fmt" - "log" - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - - "github.com/ovh/go-ovh/ovh" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider -var testAccOVHClient *ovh.Client - -func init() { - log.SetOutput(os.Stdout) - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "ovh": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - v := os.Getenv("OVH_ENDPOINT") - if v == "" { - t.Fatal("OVH_ENDPOINT must be set for acceptance tests") - } - - v = os.Getenv("OVH_APPLICATION_KEY") - if v == "" { - t.Fatal("OVH_APPLICATION_KEY must be set for acceptance tests") - } - - v = os.Getenv("OVH_APPLICATION_SECRET") - if v == "" { - t.Fatal("OVH_APPLICATION_SECRET must be set for acceptance tests") - } - - v = os.Getenv("OVH_CONSUMER_KEY") - if v == "" { - t.Fatal("OVH_CONSUMER_KEY must be set for acceptance tests") - } - - v = os.Getenv("OVH_VRACK") - if v == "" { - t.Fatal("OVH_VRACK must be set for acceptance tests") - } - - v = os.Getenv("OVH_PUBLIC_CLOUD") - if v == "" { - t.Fatal("OVH_PUBLIC_CLOUD must be set for acceptance tests") - } - - if testAccOVHClient == nil { - config := Config{ - Endpoint: os.Getenv("OVH_ENDPOINT"), - ApplicationKey: os.Getenv("OVH_APPLICATION_KEY"), - ApplicationSecret: os.Getenv("OVH_APPLICATION_SECRET"), - ConsumerKey: os.Getenv("OVH_CONSUMER_KEY"), - } - - if err := config.loadAndValidate(); err != nil { - t.Fatalf("couln't load OVH Client: %s", err) - } else { - testAccOVHClient = config.OVHClient - } - } -} - -func testAccCheckVRackExists(t *testing.T) { - type vrackResponse struct { - Name string `json:"name"` - Description string `json:"description"` - } - - r := vrackResponse{} - - endpoint := fmt.Sprintf("/vrack/%s", os.Getenv("OVH_VRACK")) - - err := testAccOVHClient.Get(endpoint, &r) - if err != nil { - t.Fatalf("Error: %q\n", err) - } - t.Logf("Read VRack %s -> name:'%s', desc:'%s' ", endpoint, r.Name, r.Description) - -} - -func testAccCheckPublicCloudExists(t *testing.T) { - type cloudProjectResponse struct { - ID string `json:"project_id"` - Status string `json:"status"` - Description string `json:"description"` - } - - r := cloudProjectResponse{} - - endpoint := fmt.Sprintf("/cloud/project/%s", os.Getenv("OVH_PUBLIC_CLOUD")) - - err := testAccOVHClient.Get(endpoint, &r) - if err != nil { - t.Fatalf("Error: %q\n", err) - } - t.Logf("Read Cloud Project %s -> status: '%s', desc: '%s'", endpoint, r.Status, r.Description) - -} diff --git a/builtin/providers/ovh/resource_ovh_publiccloud_private_network.go b/builtin/providers/ovh/resource_ovh_publiccloud_private_network.go deleted file mode 100644 index 9044acf22..000000000 --- a/builtin/providers/ovh/resource_ovh_publiccloud_private_network.go +++ /dev/null @@ -1,297 +0,0 @@ -package ovh - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/ovh/go-ovh/ovh" -) - -func resourcePublicCloudPrivateNetwork() *schema.Resource { - return &schema.Resource{ - Create: resourcePublicCloudPrivateNetworkCreate, - Read: resourcePublicCloudPrivateNetworkRead, - Update: resourcePublicCloudPrivateNetworkUpdate, - Delete: resourcePublicCloudPrivateNetworkDelete, - Importer: &schema.ResourceImporter{ - State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - return []*schema.ResourceData{d}, nil - }, - }, - - Schema: map[string]*schema.Schema{ - "project_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OVH_PROJECT_ID", nil), - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - "vlan_id": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Default: 0, - }, - "regions": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Computed: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "regions_status": &schema.Schema{ - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "status": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "region": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "status": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "type": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourcePublicCloudPrivateNetworkCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - projectId := d.Get("project_id").(string) - params := &PublicCloudPrivateNetworkCreateOpts{ - ProjectId: d.Get("project_id").(string), - VlanId: d.Get("vlan_id").(int), - Name: d.Get("name").(string), - Regions: regionsOptsFromSchema(d), - } - - r := &PublicCloudPrivateNetworkResponse{} - - log.Printf("[DEBUG] Will create public cloud private network: %s", params) - - endpoint := fmt.Sprintf("/cloud/project/%s/network/private", params.ProjectId) - - err := config.OVHClient.Post(endpoint, params, r) - if err != nil { - return fmt.Errorf("calling %s with params %s:\n\t %q", endpoint, params, err) - } - - log.Printf("[DEBUG] Waiting for Private Network %s:", r) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"BUILDING"}, - Target: []string{"ACTIVE"}, - Refresh: waitForPublicCloudPrivateNetworkActive(config.OVHClient, projectId, r.Id), - Timeout: 10 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("waiting for private network (%s): %s", params, err) - } - log.Printf("[DEBUG] Created Private Network %s", r) - - //set id - d.SetId(r.Id) - - return nil -} - -func resourcePublicCloudPrivateNetworkRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - projectId := d.Get("project_id").(string) - - r := &PublicCloudPrivateNetworkResponse{} - - log.Printf("[DEBUG] Will read public cloud private network for project: %s, id: %s", projectId, d.Id()) - - endpoint := fmt.Sprintf("/cloud/project/%s/network/private/%s", projectId, d.Id()) - - d.Partial(true) - err := config.OVHClient.Get(endpoint, r) - if err != nil { - return fmt.Errorf("Error calling %s:\n\t %q", endpoint, err) - } - - err = readPublicCloudPrivateNetwork(config, d, r) - if err != nil { - return err - } - d.Partial(false) - - log.Printf("[DEBUG] Read Public Cloud Private Network %s", r) - return nil -} - -func resourcePublicCloudPrivateNetworkUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - projectId := d.Get("project_id").(string) - params := &PublicCloudPrivateNetworkUpdateOpts{ - Name: d.Get("name").(string), - } - - log.Printf("[DEBUG] Will update public cloud private network: %s", params) - - endpoint := fmt.Sprintf("/cloud/project/%s/network/private/%s", projectId, d.Id()) - - err := config.OVHClient.Put(endpoint, params, nil) - if err != nil { - return fmt.Errorf("calling %s with params %s:\n\t %q", endpoint, params, err) - } - - log.Printf("[DEBUG] Updated Public cloud %s Private Network %s:", projectId, d.Id()) - - return resourcePublicCloudPrivateNetworkRead(d, meta) -} - -func resourcePublicCloudPrivateNetworkDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - projectId := d.Get("project_id").(string) - id := d.Id() - - log.Printf("[DEBUG] Will delete public cloud private network for project: %s, id: %s", projectId, id) - - endpoint := fmt.Sprintf("/cloud/project/%s/network/private/%s", projectId, id) - - err := config.OVHClient.Delete(endpoint, nil) - if err != nil { - return fmt.Errorf("calling %s:\n\t %q", endpoint, err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"DELETING"}, - Target: []string{"DELETED"}, - Refresh: waitForPublicCloudPrivateNetworkDelete(config.OVHClient, projectId, id), - Timeout: 10 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("deleting for private network (%s): %s", id, err) - } - - d.SetId("") - - log.Printf("[DEBUG] Deleted Public Cloud %s Private Network %s", projectId, id) - return nil -} - -func regionsOptsFromSchema(d *schema.ResourceData) []string { - var regions []string - if v := d.Get("regions"); v != nil { - rs := v.(*schema.Set).List() - if len(rs) > 0 { - for _, v := range v.(*schema.Set).List() { - regions = append(regions, v.(string)) - } - } - } - return regions -} - -func readPublicCloudPrivateNetwork(config *Config, d *schema.ResourceData, r *PublicCloudPrivateNetworkResponse) error { - d.Set("name", r.Name) - d.Set("status", r.Status) - d.Set("type", r.Type) - d.Set("vlan_id", r.Vlanid) - - regions_status := make([]map[string]interface{}, 0) - regions := make([]string, 0) - - for i := range r.Regions { - region := make(map[string]interface{}) - region["region"] = r.Regions[i].Region - region["status"] = r.Regions[i].Status - regions_status = append(regions_status, region) - regions = append(regions, fmt.Sprintf(r.Regions[i].Region)) - } - d.Set("regions_status", regions_status) - d.Set("regions", regions) - - d.SetId(r.Id) - return nil -} - -func publicCloudPrivateNetworkExists(projectId, id string, c *ovh.Client) error { - r := &PublicCloudPrivateNetworkResponse{} - - log.Printf("[DEBUG] Will read public cloud private network for project: %s, id: %s", projectId, id) - - endpoint := fmt.Sprintf("/cloud/project/%s/network/private/%s", projectId, id) - - err := c.Get(endpoint, r) - if err != nil { - return fmt.Errorf("calling %s:\n\t %q", endpoint, err) - } - log.Printf("[DEBUG] Read public cloud private network: %s", r) - - return nil -} - -// AttachmentStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// an Attachment Task. -func waitForPublicCloudPrivateNetworkActive(c *ovh.Client, projectId, PublicCloudPrivateNetworkId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - r := &PublicCloudPrivateNetworkResponse{} - endpoint := fmt.Sprintf("/cloud/project/%s/network/private/%s", projectId, PublicCloudPrivateNetworkId) - err := c.Get(endpoint, r) - if err != nil { - return r, "", err - } - - log.Printf("[DEBUG] Pending Private Network: %s", r) - return r, r.Status, nil - } -} - -// AttachmentStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// an Attachment Task. -func waitForPublicCloudPrivateNetworkDelete(c *ovh.Client, projectId, PublicCloudPrivateNetworkId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - r := &PublicCloudPrivateNetworkResponse{} - endpoint := fmt.Sprintf("/cloud/project/%s/network/private/%s", projectId, PublicCloudPrivateNetworkId) - err := c.Get(endpoint, r) - if err != nil { - if err.(*ovh.APIError).Code == 404 { - log.Printf("[DEBUG] private network id %s on project %s deleted", PublicCloudPrivateNetworkId, projectId) - return r, "DELETED", nil - } else { - return r, "", err - } - } - log.Printf("[DEBUG] Pending Private Network: %s", r) - return r, r.Status, nil - } -} diff --git a/builtin/providers/ovh/resource_ovh_publiccloud_private_network_subnet.go b/builtin/providers/ovh/resource_ovh_publiccloud_private_network_subnet.go deleted file mode 100644 index de8d4204c..000000000 --- a/builtin/providers/ovh/resource_ovh_publiccloud_private_network_subnet.go +++ /dev/null @@ -1,281 +0,0 @@ -package ovh - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - - "net" - - "github.com/ovh/go-ovh/ovh" -) - -func resourcePublicCloudPrivateNetworkSubnet() *schema.Resource { - return &schema.Resource{ - Create: resourcePublicCloudPrivateNetworkSubnetCreate, - Read: resourcePublicCloudPrivateNetworkSubnetRead, - Delete: resourcePublicCloudPrivateNetworkSubnetDelete, - Importer: &schema.ResourceImporter{ - State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - return []*schema.ResourceData{d}, nil - }, - }, - - Schema: map[string]*schema.Schema{ - "project_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OVH_PROJECT_ID", ""), - }, - "network_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "dhcp": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Default: false, - }, - "start": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resourcePubliccloudPrivateNetworkSubnetValidateIP, - }, - "end": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resourcePubliccloudPrivateNetworkSubnetValidateIP, - }, - "network": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resourcePubliccloudPrivateNetworkSubnetValidateNetwork, - }, - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "no_gateway": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Default: false, - }, - "gateway_ip": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "cidr": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "ip_pools": &schema.Schema{ - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "network": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "region": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "dhcp": &schema.Schema{ - Type: schema.TypeBool, - Computed: true, - }, - "end": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "start": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - }, - } -} - -func resourcePublicCloudPrivateNetworkSubnetCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - projectId := d.Get("project_id").(string) - networkId := d.Get("network_id").(string) - - params := &PublicCloudPrivateNetworksCreateOpts{ - ProjectId: projectId, - NetworkId: networkId, - Dhcp: d.Get("dhcp").(bool), - NoGateway: d.Get("no_gateway").(bool), - Start: d.Get("start").(string), - End: d.Get("end").(string), - Network: d.Get("network").(string), - Region: d.Get("region").(string), - } - - r := &PublicCloudPrivateNetworksResponse{} - - log.Printf("[DEBUG] Will create public cloud private network subnet: %s", params) - - endpoint := fmt.Sprintf("/cloud/project/%s/network/private/%s/subnet", projectId, networkId) - - err := config.OVHClient.Post(endpoint, params, r) - if err != nil { - return fmt.Errorf("calling %s with params %s:\n\t %q", endpoint, params, err) - } - - log.Printf("[DEBUG] Created Private Network Subnet %s", r) - - //set id - d.SetId(r.Id) - - return nil -} - -func resourcePublicCloudPrivateNetworkSubnetRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - projectId := d.Get("project_id").(string) - networkId := d.Get("network_id").(string) - - r := []*PublicCloudPrivateNetworksResponse{} - - log.Printf("[DEBUG] Will read public cloud private network subnet for project: %s, network: %s, id: %s", projectId, networkId, d.Id()) - - endpoint := fmt.Sprintf("/cloud/project/%s/network/private/%s/subnet", projectId, networkId) - - err := config.OVHClient.Get(endpoint, &r) - if err != nil { - return fmt.Errorf("calling %s:\n\t %q", endpoint, err) - } - - err = readPublicCloudPrivateNetworkSubnet(d, r) - if err != nil { - return err - } - - log.Printf("[DEBUG] Read Public Cloud Private Network %v", r) - return nil -} - -func resourcePublicCloudPrivateNetworkSubnetDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - projectId := d.Get("project_id").(string) - networkId := d.Get("network_id").(string) - id := d.Id() - - log.Printf("[DEBUG] Will delete public cloud private network subnet for project: %s, network: %s, id: %s", projectId, networkId, id) - - endpoint := fmt.Sprintf("/cloud/project/%s/network/private/%s/subnet/%s", projectId, id, id) - - err := config.OVHClient.Delete(endpoint, nil) - if err != nil { - return fmt.Errorf("calling %s:\n\t %q", endpoint, err) - } - - d.SetId("") - - log.Printf("[DEBUG] Deleted Public Cloud %s Private Network %s Subnet %s", projectId, networkId, id) - return nil -} - -func publicCloudPrivateNetworkSubnetExists(projectId, networkId, id string, c *ovh.Client) error { - r := []*PublicCloudPrivateNetworksResponse{} - - log.Printf("[DEBUG] Will read public cloud private network subnet for project: %s, network: %s, id: %s", projectId, networkId, id) - - endpoint := fmt.Sprintf("/cloud/project/%s/network/private/%s/subnet", projectId, networkId) - - err := c.Get(endpoint, &r) - if err != nil { - return fmt.Errorf("calling %s:\n\t %q", endpoint, err) - } - - s := findPublicCloudPrivateNetworkSubnet(r, id) - if s == nil { - return fmt.Errorf("Subnet %s doesn't exists for project %s and network %s", id, projectId, networkId) - } - - return nil -} - -func findPublicCloudPrivateNetworkSubnet(rs []*PublicCloudPrivateNetworksResponse, id string) *PublicCloudPrivateNetworksResponse { - for i := range rs { - if rs[i].Id == id { - return rs[i] - } - } - - return nil -} - -func readPublicCloudPrivateNetworkSubnet(d *schema.ResourceData, rs []*PublicCloudPrivateNetworksResponse) error { - r := findPublicCloudPrivateNetworkSubnet(rs, d.Id()) - if r == nil { - return fmt.Errorf("%s subnet not found", d.Id()) - } - - d.Set("gateway_ip", r.GatewayIp) - d.Set("cidr", r.Cidr) - - ippools := make([]map[string]interface{}, 0) - for i := range r.IPPools { - ippool := make(map[string]interface{}) - ippool["network"] = r.IPPools[i].Network - ippool["region"] = r.IPPools[i].Region - ippool["dhcp"] = r.IPPools[i].Dhcp - ippool["start"] = r.IPPools[i].Start - ippool["end"] = r.IPPools[i].End - ippools = append(ippools, ippool) - } - - d.Set("network", ippools[0]["network"]) - d.Set("region", ippools[0]["region"]) - d.Set("dhcp", ippools[0]["dhcp"]) - d.Set("start", ippools[0]["start"]) - d.Set("end", ippools[0]["end"]) - d.Set("ip_pools", ippools) - - if r.GatewayIp == "" { - d.Set("no_gateway", true) - } else { - d.Set("no_gateway", false) - } - - d.SetId(r.Id) - return nil -} - -func resourcePubliccloudPrivateNetworkSubnetValidateIP(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - ip := net.ParseIP(value) - if ip == nil { - errors = append(errors, fmt.Errorf("%q must be a valid IP", k)) - } - return -} - -func resourcePubliccloudPrivateNetworkSubnetValidateNetwork(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - _, _, err := net.ParseCIDR(value) - if err != nil { - errors = append(errors, fmt.Errorf("%q is not a valid network value: %#v", k, err)) - } - return -} diff --git a/builtin/providers/ovh/resource_ovh_publiccloud_private_network_subnet_test.go b/builtin/providers/ovh/resource_ovh_publiccloud_private_network_subnet_test.go deleted file mode 100644 index 25c7d1164..000000000 --- a/builtin/providers/ovh/resource_ovh_publiccloud_private_network_subnet_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package ovh - -import ( - "fmt" - "os" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -var testAccPublicCloudPrivateNetworkSubnetConfig = fmt.Sprintf(` -resource "ovh_vrack_publiccloud_attachment" "attach" { - vrack_id = "%s" - project_id = "%s" -} - -data "ovh_publiccloud_regions" "regions" { - project_id = "${ovh_vrack_publiccloud_attachment.attach.project_id}" -} - -data "ovh_publiccloud_region" "region_attr" { - count = 2 - project_id = "${data.ovh_publiccloud_regions.regions.project_id}" - name = "${element(data.ovh_publiccloud_regions.regions.names, count.index)}" -} - -resource "ovh_publiccloud_private_network" "network" { - project_id = "${ovh_vrack_publiccloud_attachment.attach.project_id}" - vlan_id = 0 - name = "terraform_testacc_private_net" - regions = ["${data.ovh_publiccloud_regions.regions.names}"] -} - -resource "ovh_publiccloud_private_network_subnet" "subnet" { - project_id = "${ovh_publiccloud_private_network.network.project_id}" - network_id = "${ovh_publiccloud_private_network.network.id}" - region = "${element(data.ovh_publiccloud_regions.regions.names, 0)}" - start = "192.168.168.100" - end = "192.168.168.200" - network = "192.168.168.0/24" - dhcp = true - no_gateway = false -} -`, os.Getenv("OVH_VRACK"), os.Getenv("OVH_PUBLIC_CLOUD")) - -func TestAccPublicCloudPrivateNetworkSubnet_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccCheckPublicCloudPrivateNetworkSubnetPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPublicCloudPrivateNetworkSubnetDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccPublicCloudPrivateNetworkSubnetConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckVRackPublicCloudAttachmentExists("ovh_vrack_publiccloud_attachment.attach", t), - testAccCheckPublicCloudPrivateNetworkExists("ovh_publiccloud_private_network.network", t), - testAccCheckPublicCloudPrivateNetworkSubnetExists("ovh_publiccloud_private_network_subnet.subnet", t), - ), - }, - }, - }) -} - -func testAccCheckPublicCloudPrivateNetworkSubnetPreCheck(t *testing.T) { - testAccPreCheck(t) - testAccCheckPublicCloudExists(t) -} - -func testAccCheckPublicCloudPrivateNetworkSubnetExists(n string, t *testing.T) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - if rs.Primary.Attributes["project_id"] == "" { - return fmt.Errorf("No Project ID is set") - } - - if rs.Primary.Attributes["network_id"] == "" { - return fmt.Errorf("No Network ID is set") - } - - return publicCloudPrivateNetworkSubnetExists( - rs.Primary.Attributes["project_id"], - rs.Primary.Attributes["network_id"], - rs.Primary.ID, - config.OVHClient, - ) - } -} - -func testAccCheckPublicCloudPrivateNetworkSubnetDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - for _, rs := range s.RootModule().Resources { - if rs.Type != "ovh_publiccloud_private_network_subnet" { - continue - } - - err := publicCloudPrivateNetworkSubnetExists( - rs.Primary.Attributes["project_id"], - rs.Primary.Attributes["network_id"], - rs.Primary.ID, - config.OVHClient, - ) - - if err == nil { - return fmt.Errorf("VRack > Public Cloud Private Network Subnet still exists") - } - - } - return nil -} diff --git a/builtin/providers/ovh/resource_ovh_publiccloud_private_network_test.go b/builtin/providers/ovh/resource_ovh_publiccloud_private_network_test.go deleted file mode 100644 index 6cc1a91b6..000000000 --- a/builtin/providers/ovh/resource_ovh_publiccloud_private_network_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package ovh - -import ( - "fmt" - "os" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -var testAccPublicCloudPrivateNetworkConfig = fmt.Sprintf(` -resource "ovh_vrack_publiccloud_attachment" "attach" { - vrack_id = "%s" - project_id = "%s" -} - -data "ovh_publiccloud_regions" "regions" { - project_id = "${ovh_vrack_publiccloud_attachment.attach.project_id}" -} - -data "ovh_publiccloud_region" "region_attr" { - count = 2 - project_id = "${data.ovh_publiccloud_regions.regions.project_id}" - name = "${element(data.ovh_publiccloud_regions.regions.names, count.index)}" -} - -resource "ovh_publiccloud_private_network" "network" { - project_id = "${ovh_vrack_publiccloud_attachment.attach.project_id}" - vlan_id = 0 - name = "terraform_testacc_private_net" - regions = ["${data.ovh_publiccloud_regions.regions.names}"] -} -`, os.Getenv("OVH_VRACK"), os.Getenv("OVH_PUBLIC_CLOUD")) - -func TestAccPublicCloudPrivateNetwork_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccCheckPublicCloudPrivateNetworkPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPublicCloudPrivateNetworkDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccPublicCloudPrivateNetworkConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckVRackPublicCloudAttachmentExists("ovh_vrack_publiccloud_attachment.attach", t), - testAccCheckPublicCloudPrivateNetworkExists("ovh_publiccloud_private_network.network", t), - ), - }, - }, - }) -} - -func testAccCheckPublicCloudPrivateNetworkPreCheck(t *testing.T) { - testAccPreCheck(t) - testAccCheckPublicCloudExists(t) -} - -func testAccCheckPublicCloudPrivateNetworkExists(n string, t *testing.T) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - if rs.Primary.Attributes["project_id"] == "" { - return fmt.Errorf("No Project ID is set") - } - - return publicCloudPrivateNetworkExists(rs.Primary.Attributes["project_id"], rs.Primary.ID, config.OVHClient) - } -} - -func testAccCheckPublicCloudPrivateNetworkDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - for _, rs := range s.RootModule().Resources { - if rs.Type != "ovh_publiccloud_private_network" { - continue - } - - err := publicCloudPrivateNetworkExists(rs.Primary.Attributes["project_id"], rs.Primary.ID, config.OVHClient) - if err == nil { - return fmt.Errorf("VRack > Public Cloud Private Network still exists") - } - - } - return nil -} diff --git a/builtin/providers/ovh/resource_ovh_publiccloud_user.go b/builtin/providers/ovh/resource_ovh_publiccloud_user.go deleted file mode 100644 index 73824cccb..000000000 --- a/builtin/providers/ovh/resource_ovh_publiccloud_user.go +++ /dev/null @@ -1,289 +0,0 @@ -package ovh - -import ( - "fmt" - "log" - "regexp" - "strconv" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/ovh/go-ovh/ovh" -) - -func resourcePublicCloudUser() *schema.Resource { - return &schema.Resource{ - Create: resourcePublicCloudUserCreate, - Read: resourcePublicCloudUserRead, - Delete: resourcePublicCloudUserDelete, - - Importer: &schema.ResourceImporter{ - State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - return []*schema.ResourceData{d}, nil - }, - }, - - Schema: map[string]*schema.Schema{ - "project_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OVH_PROJECT_ID", nil), - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "username": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "password": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Sensitive: true, - }, - "status": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "creation_date": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "openstack_rc": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Computed: true, - }, - }, - } -} - -func resourcePublicCloudUserCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - projectId := d.Get("project_id").(string) - params := &PublicCloudUserCreateOpts{ - ProjectId: projectId, - Description: d.Get("description").(string), - } - - r := &PublicCloudUserResponse{} - - log.Printf("[DEBUG] Will create public cloud user: %s", params) - - // Resource is partial because we will also compute Openstack RC & creds - d.Partial(true) - - endpoint := fmt.Sprintf("/cloud/project/%s/user", params.ProjectId) - - err := config.OVHClient.Post(endpoint, params, r) - if err != nil { - return fmt.Errorf("calling Post %s with params %s:\n\t %q", endpoint, params, err) - } - - log.Printf("[DEBUG] Waiting for User %s:", r) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating"}, - Target: []string{"ok"}, - Refresh: waitForPublicCloudUserActive(config.OVHClient, projectId, strconv.Itoa(r.Id)), - Timeout: 10 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("waiting for user (%s): %s", params, err) - } - log.Printf("[DEBUG] Created User %s", r) - - readPublicCloudUser(d, r, true) - - openstackrc := make(map[string]string) - err = publicCloudUserGetOpenstackRC(projectId, d.Id(), config.OVHClient, openstackrc) - if err != nil { - return fmt.Errorf("Creating openstack creds for user %s: %s", d.Id(), err) - } - - d.Set("openstack_rc", &openstackrc) - - d.Partial(false) - - return nil -} - -func resourcePublicCloudUserRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - projectId := d.Get("project_id").(string) - - d.Partial(true) - r := &PublicCloudUserResponse{} - - log.Printf("[DEBUG] Will read public cloud user %s from project: %s", d.Id(), projectId) - - endpoint := fmt.Sprintf("/cloud/project/%s/user/%s", projectId, d.Id()) - - err := config.OVHClient.Get(endpoint, r) - if err != nil { - return fmt.Errorf("calling Get %s:\n\t %q", endpoint, err) - } - - readPublicCloudUser(d, r, false) - - openstackrc := make(map[string]string) - err = publicCloudUserGetOpenstackRC(projectId, d.Id(), config.OVHClient, openstackrc) - if err != nil { - return fmt.Errorf("Reading openstack creds for user %s: %s", d.Id(), err) - } - - d.Set("openstack_rc", &openstackrc) - d.Partial(false) - log.Printf("[DEBUG] Read Public Cloud User %s", r) - return nil -} - -func resourcePublicCloudUserDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - projectId := d.Get("project_id").(string) - id := d.Id() - - log.Printf("[DEBUG] Will delete public cloud user %s from project: %s", id, projectId) - - endpoint := fmt.Sprintf("/cloud/project/%s/user/%s", projectId, id) - - err := config.OVHClient.Delete(endpoint, nil) - if err != nil { - return fmt.Errorf("calling Delete %s:\n\t %q", endpoint, err) - } - - log.Printf("[DEBUG] Deleting Public Cloud User %s from project %s:", id, projectId) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"deleting"}, - Target: []string{"deleted"}, - Refresh: waitForPublicCloudUserDelete(config.OVHClient, projectId, id), - Timeout: 10 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Deleting Public Cloud user %s from project %s", id, projectId) - } - log.Printf("[DEBUG] Deleted Public Cloud User %s from project %s", id, projectId) - - d.SetId("") - - return nil -} - -func publicCloudUserExists(projectId, id string, c *ovh.Client) error { - r := &PublicCloudUserResponse{} - - log.Printf("[DEBUG] Will read public cloud user for project: %s, id: %s", projectId, id) - - endpoint := fmt.Sprintf("/cloud/project/%s/user/%s", projectId, id) - - err := c.Get(endpoint, r) - if err != nil { - return fmt.Errorf("calling Get %s:\n\t %q", endpoint, err) - } - log.Printf("[DEBUG] Read public cloud user: %s", r) - - return nil -} - -var publicCloudUserOSTenantName = regexp.MustCompile("export OS_TENANT_NAME=\"?([[:alnum:]]+)\"?") -var publicCloudUserOSTenantId = regexp.MustCompile("export OS_TENANT_ID=\"??([[:alnum:]]+)\"??") -var publicCloudUserOSAuthURL = regexp.MustCompile("export OS_AUTH_URL=\"??([[:^space:]]+)\"??") -var publicCloudUserOSUsername = regexp.MustCompile("export OS_USERNAME=\"?([[:alnum:]]+)\"?") - -func publicCloudUserGetOpenstackRC(projectId, id string, c *ovh.Client, rc map[string]string) error { - log.Printf("[DEBUG] Will read public cloud user openstack rc for project: %s, id: %s", projectId, id) - - endpoint := fmt.Sprintf("/cloud/project/%s/user/%s/openrc?region=to_be_overriden", projectId, id) - - r := &PublicCloudUserOpenstackRC{} - - err := c.Get(endpoint, r) - if err != nil { - return fmt.Errorf("calling Get %s:\n\t %q", endpoint, err) - } - - authURL := publicCloudUserOSAuthURL.FindStringSubmatch(r.Content) - if authURL == nil { - return fmt.Errorf("couln't extract OS_AUTH_URL from content: \n\t%s", r.Content) - } - tenantName := publicCloudUserOSTenantName.FindStringSubmatch(r.Content) - if tenantName == nil { - return fmt.Errorf("couln't extract OS_TENANT_NAME from content: \n\t%s", r.Content) - } - tenantId := publicCloudUserOSTenantId.FindStringSubmatch(r.Content) - if tenantId == nil { - return fmt.Errorf("couln't extract OS_TENANT_ID from content: \n\t%s", r.Content) - } - username := publicCloudUserOSUsername.FindStringSubmatch(r.Content) - if username == nil { - return fmt.Errorf("couln't extract OS_USERNAME from content: \n\t%s", r.Content) - } - - rc["OS_AUTH_URL"] = authURL[1] - rc["OS_TENANT_ID"] = tenantId[1] - rc["OS_TENANT_NAME"] = tenantName[1] - rc["OS_USERNAME"] = username[1] - - return nil -} - -func readPublicCloudUser(d *schema.ResourceData, r *PublicCloudUserResponse, setPassword bool) { - d.Set("description", r.Description) - d.Set("status", r.Status) - d.Set("creation_date", r.CreationDate) - d.Set("username", r.Username) - if setPassword { - d.Set("password", r.Password) - } - d.SetId(strconv.Itoa(r.Id)) -} - -func waitForPublicCloudUserActive(c *ovh.Client, projectId, PublicCloudUserId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - r := &PublicCloudUserResponse{} - endpoint := fmt.Sprintf("/cloud/project/%s/user/%s", projectId, PublicCloudUserId) - err := c.Get(endpoint, r) - if err != nil { - return r, "", err - } - - log.Printf("[DEBUG] Pending User: %s", r) - return r, r.Status, nil - } -} - -func waitForPublicCloudUserDelete(c *ovh.Client, projectId, PublicCloudUserId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - r := &PublicCloudUserResponse{} - endpoint := fmt.Sprintf("/cloud/project/%s/user/%s", projectId, PublicCloudUserId) - err := c.Get(endpoint, r) - if err != nil { - if err.(*ovh.APIError).Code == 404 { - log.Printf("[DEBUG] user id %s on project %s deleted", PublicCloudUserId, projectId) - return r, "deleted", nil - } else { - return r, "", err - } - } - - log.Printf("[DEBUG] Pending User: %s", r) - return r, r.Status, nil - } -} diff --git a/builtin/providers/ovh/resource_ovh_publiccloud_user_test.go b/builtin/providers/ovh/resource_ovh_publiccloud_user_test.go deleted file mode 100644 index 3edac47d2..000000000 --- a/builtin/providers/ovh/resource_ovh_publiccloud_user_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package ovh - -import ( - "fmt" - "os" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -var testAccPublicCloudUserConfig = fmt.Sprintf(` -resource "ovh_publiccloud_user" "user" { - project_id = "%s" - description = "my user for acceptance tests" -} -`, os.Getenv("OVH_PUBLIC_CLOUD")) - -func TestAccPublicCloudUser_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccCheckPublicCloudUserPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPublicCloudUserDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccPublicCloudUserConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckPublicCloudUserExists("ovh_publiccloud_user.user", t), - testAccCheckPublicCloudUserOpenRC("ovh_publiccloud_user.user", t), - ), - }, - }, - }) -} - -func testAccCheckPublicCloudUserPreCheck(t *testing.T) { - testAccPreCheck(t) - testAccCheckPublicCloudExists(t) -} - -func testAccCheckPublicCloudUserExists(n string, t *testing.T) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - if rs.Primary.Attributes["project_id"] == "" { - return fmt.Errorf("No Project ID is set") - } - - return publicCloudUserExists(rs.Primary.Attributes["project_id"], rs.Primary.ID, config.OVHClient) - } -} - -func testAccCheckPublicCloudUserOpenRC(n string, t *testing.T) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - if rs.Primary.Attributes["openstack_rc.OS_AUTH_URL"] == "" { - return fmt.Errorf("No openstack_rc.OS_AUTH_URL is set") - } - - if rs.Primary.Attributes["openstack_rc.OS_TENANT_ID"] == "" { - return fmt.Errorf("No openstack_rc.OS_TENANT_ID is set") - } - - if rs.Primary.Attributes["openstack_rc.OS_TENANT_NAME"] == "" { - return fmt.Errorf("No openstack_rc.OS_TENANT_NAME is set") - } - - if rs.Primary.Attributes["openstack_rc.OS_USERNAME"] == "" { - return fmt.Errorf("No openstack_rc.OS_USERNAME is set") - } - - return nil - } -} - -func testAccCheckPublicCloudUserDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - for _, rs := range s.RootModule().Resources { - if rs.Type != "ovh_publiccloud_user" { - continue - } - - err := publicCloudUserExists(rs.Primary.Attributes["project_id"], rs.Primary.ID, config.OVHClient) - if err == nil { - return fmt.Errorf("VRack > Public Cloud User still exists") - } - - } - return nil -} diff --git a/builtin/providers/ovh/resource_ovh_vrack_publiccloud_attachment.go b/builtin/providers/ovh/resource_ovh_vrack_publiccloud_attachment.go deleted file mode 100644 index 8ea1db8b4..000000000 --- a/builtin/providers/ovh/resource_ovh_vrack_publiccloud_attachment.go +++ /dev/null @@ -1,172 +0,0 @@ -package ovh - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/ovh/go-ovh/ovh" -) - -func resourceVRackPublicCloudAttachment() *schema.Resource { - return &schema.Resource{ - Create: resourceVRackPublicCloudAttachmentCreate, - Read: resourceVRackPublicCloudAttachmentRead, - Delete: resourceVRackPublicCloudAttachmentDelete, - - Schema: map[string]*schema.Schema{ - "vrack_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OVH_VRACK_ID", ""), - }, - "project_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - DefaultFunc: schema.EnvDefaultFunc("OVH_PROJECT_ID", ""), - }, - }, - } -} - -func resourceVRackPublicCloudAttachmentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - vrackId := d.Get("vrack_id").(string) - projectId := d.Get("project_id").(string) - - if err := vrackPublicCloudAttachmentExists(vrackId, projectId, config.OVHClient); err == nil { - //set id - d.SetId(fmt.Sprintf("vrack_%s-cloudproject_%s-attach", vrackId, projectId)) - return nil - } - - params := &VRackAttachOpts{Project: projectId} - r := VRackAttachTaskResponse{} - - log.Printf("[DEBUG] Will Attach VRack %s -> PublicCloud %s", vrackId, params.Project) - endpoint := fmt.Sprintf("/vrack/%s/cloudProject", vrackId) - - err := config.OVHClient.Post(endpoint, params, &r) - if err != nil { - return fmt.Errorf("Error calling %s with params %s:\n\t %q", endpoint, params, err) - } - - log.Printf("[DEBUG] Waiting for Attachement Task id %d: VRack %s -> PublicCloud %s", r.Id, vrackId, params.Project) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"init", "todo", "doing"}, - Target: []string{"completed"}, - Refresh: waitForVRackTaskCompleted(config.OVHClient, vrackId, r.Id), - Timeout: 10 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for vrack (%s) to attach to public cloud (%s): %s", vrackId, params.Project, err) - } - log.Printf("[DEBUG] Created Attachement Task id %d: VRack %s -> PublicCloud %s", r.Id, vrackId, params.Project) - - //set id - d.SetId(fmt.Sprintf("vrack_%s-cloudproject_%s-attach", vrackId, params.Project)) - - return nil -} - -func resourceVRackPublicCloudAttachmentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - vrackId := d.Get("vrack_id").(string) - params := &VRackAttachOpts{Project: d.Get("project_id").(string)} - r := VRackAttachTaskResponse{} - endpoint := fmt.Sprintf("/vrack/%s/cloudProject/%s", vrackId, params.Project) - - err := config.OVHClient.Get(endpoint, &r) - if err != nil { - return err - } - log.Printf("[DEBUG] Read VRack %s -> PublicCloud %s", vrackId, params.Project) - - return nil -} - -func resourceVRackPublicCloudAttachmentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - vrackId := d.Get("vrack_id").(string) - params := &VRackAttachOpts{Project: d.Get("project_id").(string)} - - r := VRackAttachTaskResponse{} - endpoint := fmt.Sprintf("/vrack/%s/cloudProject/%s", vrackId, params.Project) - - err := config.OVHClient.Delete(endpoint, &r) - if err != nil { - return err - } - - log.Printf("[DEBUG] Waiting for Attachment Deletion Task id %d: VRack %s -> PublicCloud %s", r.Id, vrackId, params.Project) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"init", "todo", "doing"}, - Target: []string{"completed"}, - Refresh: waitForVRackTaskCompleted(config.OVHClient, vrackId, r.Id), - Timeout: 10 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for vrack (%s) to attach to public cloud (%s): %s", vrackId, params.Project, err) - } - log.Printf("[DEBUG] Removed Attachement id %d: VRack %s -> PublicCloud %s", r.Id, vrackId, params.Project) - - d.SetId("") - return nil -} - -func vrackPublicCloudAttachmentExists(vrackId, projectId string, c *ovh.Client) error { - type attachResponse struct { - VRack string `json:"vrack"` - Project string `json:"project"` - } - - r := attachResponse{} - - endpoint := fmt.Sprintf("/vrack/%s/cloudProject/%s", vrackId, projectId) - - err := c.Get(endpoint, &r) - if err != nil { - return fmt.Errorf("Error while querying %s: %q\n", endpoint, err) - } - log.Printf("[DEBUG] Read Attachment %s -> VRack:%s, Cloud Project: %s", endpoint, r.VRack, r.Project) - - return nil -} - -// AttachmentStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// an Attachment Task. -func waitForVRackTaskCompleted(c *ovh.Client, serviceName string, taskId int) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - r := VRackAttachTaskResponse{} - endpoint := fmt.Sprintf("/vrack/%s/task/%d", serviceName, taskId) - err := c.Get(endpoint, &r) - if err != nil { - if err.(*ovh.APIError).Code == 404 { - log.Printf("[DEBUG] Task id %d on VRack %s completed", taskId, serviceName) - return taskId, "completed", nil - } else { - return taskId, "", err - } - } - - log.Printf("[DEBUG] Pending Task id %d on VRack %s status: %s", r.Id, serviceName, r.Status) - return taskId, r.Status, nil - } -} diff --git a/builtin/providers/ovh/resource_ovh_vrack_publiccloud_attachment_test.go b/builtin/providers/ovh/resource_ovh_vrack_publiccloud_attachment_test.go deleted file mode 100644 index ed687086d..000000000 --- a/builtin/providers/ovh/resource_ovh_vrack_publiccloud_attachment_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package ovh - -import ( - "fmt" - "os" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -var testAccVRackPublicCloudAttachmentConfig = fmt.Sprintf(` -resource "ovh_vrack_publiccloud_attachment" "attach" { - vrack_id = "%s" - project_id = "%s" -} -`, os.Getenv("OVH_VRACK"), os.Getenv("OVH_PUBLIC_CLOUD")) - -func TestAccVRackPublicCloudAttachment_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccCheckVRackPublicCloudAttachmentPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVRackPublicCloudAttachmentDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVRackPublicCloudAttachmentConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckVRackPublicCloudAttachmentExists("ovh_vrack_publiccloud_attachment.attach", t), - ), - }, - }, - }) -} - -func testAccCheckVRackPublicCloudAttachmentPreCheck(t *testing.T) { - testAccPreCheck(t) - testAccCheckVRackExists(t) - testAccCheckPublicCloudExists(t) -} - -func testAccCheckVRackPublicCloudAttachmentExists(n string, t *testing.T) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.Attributes["vrack_id"] == "" { - return fmt.Errorf("No VRack ID is set") - } - - if rs.Primary.Attributes["project_id"] == "" { - return fmt.Errorf("No Project ID is set") - } - - return vrackPublicCloudAttachmentExists(rs.Primary.Attributes["vrack_id"], rs.Primary.Attributes["project_id"], config.OVHClient) - } -} - -func testAccCheckVRackPublicCloudAttachmentDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - for _, rs := range s.RootModule().Resources { - if rs.Type != "ovh_vrack_publiccloud_attachment" { - continue - } - - err := vrackPublicCloudAttachmentExists(rs.Primary.Attributes["vrack_id"], rs.Primary.Attributes["project_id"], config.OVHClient) - if err == nil { - return fmt.Errorf("VRack > Public Cloud Attachment still exists") - } - - } - return nil -} diff --git a/builtin/providers/ovh/types.go b/builtin/providers/ovh/types.go deleted file mode 100644 index 7141323c0..000000000 --- a/builtin/providers/ovh/types.go +++ /dev/null @@ -1,154 +0,0 @@ -package ovh - -import ( - "fmt" - "time" -) - -// Opts -type PublicCloudPrivateNetworkCreateOpts struct { - ProjectId string `json:"serviceName"` - VlanId int `json:"vlanId"` - Name string `json:"name"` - Regions []string `json:"regions"` -} - -func (p *PublicCloudPrivateNetworkCreateOpts) String() string { - return fmt.Sprintf("projectId: %s, vlanId:%d, name: %s, regions: %s", p.ProjectId, p.VlanId, p.Name, p.Regions) -} - -// Opts -type PublicCloudPrivateNetworkUpdateOpts struct { - Name string `json:"name"` -} - -type PublicCloudPrivateNetworkRegion struct { - Status string `json:"status"` - Region string `json:"region"` -} - -func (p *PublicCloudPrivateNetworkRegion) String() string { - return fmt.Sprintf("Status:%s, Region: %s", p.Status, p.Region) -} - -type PublicCloudPrivateNetworkResponse struct { - Id string `json:"id"` - Status string `json:"status"` - Vlanid int `json:"vlanId"` - Name string `json:"name"` - Type string `json:"type"` - Regions []*PublicCloudPrivateNetworkRegion `json:"regions"` -} - -func (p *PublicCloudPrivateNetworkResponse) String() string { - return fmt.Sprintf("Id: %s, Status: %s, Name: %s, Vlanid: %d, Type: %s, Regions: %s", p.Id, p.Status, p.Name, p.Vlanid, p.Type, p.Regions) -} - -// Opts -type PublicCloudPrivateNetworksCreateOpts struct { - ProjectId string `json:"serviceName"` - NetworkId string `json:"networkId"` - Dhcp bool `json:"dhcp"` - NoGateway bool `json:"noGateway"` - Start string `json:"start"` - End string `json:"end"` - Network string `json:"network"` - Region string `json:"region"` -} - -func (p *PublicCloudPrivateNetworksCreateOpts) String() string { - return fmt.Sprintf("PCPNSCreateOpts[projectId: %s, networkId:%s, dchp: %v, noGateway: %v, network: %s, start: %s, end: %s, region: %s]", - p.ProjectId, p.NetworkId, p.Dhcp, p.NoGateway, p.Network, p.Start, p.End, p.Region) -} - -type IPPool struct { - Network string `json:"network"` - Region string `json:"region"` - Dhcp bool `json:"dhcp"` - Start string `json:"start"` - End string `json:"end"` -} - -func (p *IPPool) String() string { - return fmt.Sprintf("IPPool[Network: %s, Region: %s, Dhcp: %v, Start: %s, End: %s]", p.Network, p.Region, p.Dhcp, p.Start, p.End) -} - -type PublicCloudPrivateNetworksResponse struct { - Id string `json:"id"` - GatewayIp string `json:"gatewayIp"` - Cidr string `json:"cidr"` - IPPools []*IPPool `json:"ipPools"` -} - -func (p *PublicCloudPrivateNetworksResponse) String() string { - return fmt.Sprintf("PCPNSResponse[Id: %s, GatewayIp: %s, Cidr: %s, IPPools: %s]", p.Id, p.GatewayIp, p.Cidr, p.IPPools) -} - -// Opts -type PublicCloudUserCreateOpts struct { - ProjectId string `json:"serviceName"` - Description string `json:"description"` -} - -func (p *PublicCloudUserCreateOpts) String() string { - return fmt.Sprintf("UserOpts[projectId: %s, description:%s]", p.ProjectId, p.Description) -} - -type PublicCloudUserResponse struct { - Id int `json:"id"` - Username string `json:"username"` - Status string `json:"status"` - Description string `json:"description"` - Password string `json:"password"` - CreationDate string `json:"creationDate"` -} - -func (p *PublicCloudUserResponse) String() string { - return fmt.Sprintf("UserResponse[Id: %v, Username: %s, Status: %s, Description: %s, CreationDate: %s]", p.Id, p.Username, p.Status, p.Description, p.CreationDate) -} - -type PublicCloudUserOpenstackRC struct { - Content string `json:"content"` -} - -// Opts -type VRackAttachOpts struct { - Project string `json:"project"` -} - -// Task Opts -type TaskOpts struct { - ServiceName string `json:"serviceName"` - TaskId string `json:"taskId"` -} - -type VRackAttachTaskResponse struct { - Id int `json:"id"` - Function string `json:"function"` - TargetDomain string `json:"targetDomain"` - Status string `json:"status"` - ServiceName string `json:"serviceName"` - OrderId int `json:"orderId"` - LastUpdate time.Time `json:"lastUpdate"` - TodoDate time.Time `json:"TodoDate"` -} - -type PublicCloudRegionResponse struct { - ContinentCode string `json:"continentCode"` - DatacenterLocation string `json:"datacenterLocation"` - Name string `json:"name"` - Services []PublicCloudServiceStatusResponse `json:"services"` -} - -func (r *PublicCloudRegionResponse) String() string { - return fmt.Sprintf("Region: %s, Services: %s", r.Name, r.Services) -} - -type PublicCloudServiceStatusResponse struct { - Status string `json:"status"` - Name string `json:"name"` -} - -func (s *PublicCloudServiceStatusResponse) String() string { - return fmt.Sprintf("%s: %s", s.Name, s.Status) -} diff --git a/builtin/providers/packet/config.go b/builtin/providers/packet/config.go deleted file mode 100644 index 92d0c22af..000000000 --- a/builtin/providers/packet/config.go +++ /dev/null @@ -1,19 +0,0 @@ -package packet - -import ( - "github.com/hashicorp/go-cleanhttp" - "github.com/packethost/packngo" -) - -const ( - consumerToken = "aZ9GmqHTPtxevvFq9SK3Pi2yr9YCbRzduCSXF2SNem5sjB91mDq7Th3ZwTtRqMWZ" -) - -type Config struct { - AuthToken string -} - -// Client() returns a new client for accessing Packet's API. -func (c *Config) Client() *packngo.Client { - return packngo.NewClient(consumerToken, c.AuthToken, cleanhttp.DefaultClient()) -} diff --git a/builtin/providers/packet/errors.go b/builtin/providers/packet/errors.go deleted file mode 100644 index 1c19dc4d9..000000000 --- a/builtin/providers/packet/errors.go +++ /dev/null @@ -1,43 +0,0 @@ -package packet - -import ( - "net/http" - "strings" - - "github.com/packethost/packngo" -) - -func friendlyError(err error) error { - if e, ok := err.(*packngo.ErrorResponse); ok { - return &ErrorResponse{ - StatusCode: e.Response.StatusCode, - Errors: Errors(e.Errors), - } - } - return err -} - -func isForbidden(err error) bool { - if r, ok := err.(*ErrorResponse); ok { - return r.StatusCode == http.StatusForbidden - } - return false -} - -func isNotFound(err error) bool { - if r, ok := err.(*ErrorResponse); ok { - return r.StatusCode == http.StatusNotFound - } - return false -} - -type Errors []string - -func (e Errors) Error() string { - return strings.Join(e, "; ") -} - -type ErrorResponse struct { - StatusCode int - Errors -} diff --git a/builtin/providers/packet/provider.go b/builtin/providers/packet/provider.go deleted file mode 100644 index f3a848337..000000000 --- a/builtin/providers/packet/provider.go +++ /dev/null @@ -1,36 +0,0 @@ -package packet - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider returns a schema.Provider for managing Packet infrastructure. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "auth_token": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("PACKET_AUTH_TOKEN", nil), - Description: "The API auth key for API operations.", - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "packet_device": resourcePacketDevice(), - "packet_ssh_key": resourcePacketSSHKey(), - "packet_project": resourcePacketProject(), - "packet_volume": resourcePacketVolume(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - config := Config{ - AuthToken: d.Get("auth_token").(string), - } - return config.Client(), nil -} diff --git a/builtin/providers/packet/provider_test.go b/builtin/providers/packet/provider_test.go deleted file mode 100644 index 5483c4fb0..000000000 --- a/builtin/providers/packet/provider_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package packet - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "packet": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("PACKET_AUTH_TOKEN"); v == "" { - t.Fatal("PACKET_AUTH_TOKEN must be set for acceptance tests") - } -} diff --git a/builtin/providers/packet/resource_packet_device.go b/builtin/providers/packet/resource_packet_device.go deleted file mode 100644 index adae42575..000000000 --- a/builtin/providers/packet/resource_packet_device.go +++ /dev/null @@ -1,303 +0,0 @@ -package packet - -import ( - "errors" - "fmt" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/packethost/packngo" -) - -func resourcePacketDevice() *schema.Resource { - return &schema.Resource{ - Create: resourcePacketDeviceCreate, - Read: resourcePacketDeviceRead, - Update: resourcePacketDeviceUpdate, - Delete: resourcePacketDeviceDelete, - - Schema: map[string]*schema.Schema{ - "project_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "hostname": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "operating_system": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "facility": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "plan": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "billing_cycle": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "state": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "locked": &schema.Schema{ - Type: schema.TypeBool, - Computed: true, - }, - - "network": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "address": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "gateway": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "family": &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - }, - - "cidr": &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - }, - - "public": &schema.Schema{ - Type: schema.TypeBool, - Computed: true, - }, - }, - }, - }, - - "created": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "updated": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "user_data": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "tags": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func resourcePacketDeviceCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - - createRequest := &packngo.DeviceCreateRequest{ - HostName: d.Get("hostname").(string), - Plan: d.Get("plan").(string), - Facility: d.Get("facility").(string), - OS: d.Get("operating_system").(string), - BillingCycle: d.Get("billing_cycle").(string), - ProjectID: d.Get("project_id").(string), - } - - if attr, ok := d.GetOk("user_data"); ok { - createRequest.UserData = attr.(string) - } - - tags := d.Get("tags.#").(int) - if tags > 0 { - createRequest.Tags = make([]string, 0, tags) - for i := 0; i < tags; i++ { - key := fmt.Sprintf("tags.%d", i) - createRequest.Tags = append(createRequest.Tags, d.Get(key).(string)) - } - } - - newDevice, _, err := client.Devices.Create(createRequest) - if err != nil { - return friendlyError(err) - } - - d.SetId(newDevice.ID) - - // Wait for the device so we can get the networking attributes that show up after a while. - _, err = waitForDeviceAttribute(d, "active", []string{"queued", "provisioning"}, "state", meta) - if err != nil { - if isForbidden(err) { - // If the device doesn't get to the active state, we can't recover it from here. - d.SetId("") - - return errors.New("provisioning time limit exceeded; the Packet team will investigate") - } - return err - } - - return resourcePacketDeviceRead(d, meta) -} - -func resourcePacketDeviceRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - - device, _, err := client.Devices.Get(d.Id()) - if err != nil { - err = friendlyError(err) - - // If the device somehow already destroyed, mark as succesfully gone. - if isNotFound(err) { - d.SetId("") - return nil - } - - return err - } - - d.Set("name", device.Hostname) - d.Set("plan", device.Plan.Slug) - d.Set("facility", device.Facility.Code) - d.Set("operating_system", device.OS.Slug) - d.Set("state", device.State) - d.Set("billing_cycle", device.BillingCycle) - d.Set("locked", device.Locked) - d.Set("created", device.Created) - d.Set("updated", device.Updated) - - tags := make([]string, 0, len(device.Tags)) - for _, tag := range device.Tags { - tags = append(tags, tag) - } - d.Set("tags", tags) - - var ( - host string - networks = make([]map[string]interface{}, 0, 1) - ) - for _, ip := range device.Network { - network := map[string]interface{}{ - "address": ip.Address, - "gateway": ip.Gateway, - "family": ip.AddressFamily, - "cidr": ip.Cidr, - "public": ip.Public, - } - networks = append(networks, network) - - if ip.AddressFamily == 4 && ip.Public == true { - host = ip.Address - } - } - d.Set("network", networks) - - if host != "" { - d.SetConnInfo(map[string]string{ - "type": "ssh", - "host": host, - }) - } - - return nil -} - -func resourcePacketDeviceUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - - if d.HasChange("locked") { - var action func(string) (*packngo.Response, error) - if d.Get("locked").(bool) { - action = client.Devices.Lock - } else { - action = client.Devices.Unlock - } - if _, err := action(d.Id()); err != nil { - return friendlyError(err) - } - } - - return resourcePacketDeviceRead(d, meta) -} - -func resourcePacketDeviceDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - - if _, err := client.Devices.Delete(d.Id()); err != nil { - return friendlyError(err) - } - - return nil -} - -func waitForDeviceAttribute(d *schema.ResourceData, target string, pending []string, attribute string, meta interface{}) (interface{}, error) { - stateConf := &resource.StateChangeConf{ - Pending: pending, - Target: []string{target}, - Refresh: newDeviceStateRefreshFunc(d, attribute, meta), - Timeout: 60 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - return stateConf.WaitForState() -} - -func newDeviceStateRefreshFunc(d *schema.ResourceData, attribute string, meta interface{}) resource.StateRefreshFunc { - client := meta.(*packngo.Client) - - return func() (interface{}, string, error) { - if err := resourcePacketDeviceRead(d, meta); err != nil { - return nil, "", err - } - - if attr, ok := d.GetOk(attribute); ok { - device, _, err := client.Devices.Get(d.Id()) - if err != nil { - return nil, "", friendlyError(err) - } - return &device, attr.(string), nil - } - - return nil, "", nil - } -} - -// powerOnAndWait Powers on the device and waits for it to be active. -func powerOnAndWait(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - _, err := client.Devices.PowerOn(d.Id()) - if err != nil { - return friendlyError(err) - } - - _, err = waitForDeviceAttribute(d, "active", []string{"off"}, "state", client) - return err -} diff --git a/builtin/providers/packet/resource_packet_project.go b/builtin/providers/packet/resource_packet_project.go deleted file mode 100644 index 05c739b7a..000000000 --- a/builtin/providers/packet/resource_packet_project.go +++ /dev/null @@ -1,117 +0,0 @@ -package packet - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/packethost/packngo" -) - -func resourcePacketProject() *schema.Resource { - return &schema.Resource{ - Create: resourcePacketProjectCreate, - Read: resourcePacketProjectRead, - Update: resourcePacketProjectUpdate, - Delete: resourcePacketProjectDelete, - - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "payment_method": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "created": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "updated": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourcePacketProjectCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - - createRequest := &packngo.ProjectCreateRequest{ - Name: d.Get("name").(string), - PaymentMethod: d.Get("payment_method").(string), - } - - project, _, err := client.Projects.Create(createRequest) - if err != nil { - return friendlyError(err) - } - - d.SetId(project.ID) - - return resourcePacketProjectRead(d, meta) -} - -func resourcePacketProjectRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - - key, _, err := client.Projects.Get(d.Id()) - if err != nil { - err = friendlyError(err) - - // If the project somehow already destroyed, mark as succesfully gone. - if isNotFound(err) { - d.SetId("") - - return nil - } - - return err - } - - d.Set("id", key.ID) - d.Set("name", key.Name) - d.Set("created", key.Created) - d.Set("updated", key.Updated) - - return nil -} - -func resourcePacketProjectUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - - updateRequest := &packngo.ProjectUpdateRequest{ - ID: d.Get("id").(string), - Name: d.Get("name").(string), - } - - if attr, ok := d.GetOk("payment_method"); ok { - updateRequest.PaymentMethod = attr.(string) - } - - _, _, err := client.Projects.Update(updateRequest) - if err != nil { - return friendlyError(err) - } - - return resourcePacketProjectRead(d, meta) -} - -func resourcePacketProjectDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - - _, err := client.Projects.Delete(d.Id()) - if err != nil { - return friendlyError(err) - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/packet/resource_packet_project_test.go b/builtin/providers/packet/resource_packet_project_test.go deleted file mode 100644 index 1ba91b1fa..000000000 --- a/builtin/providers/packet/resource_packet_project_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package packet - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/packethost/packngo" -) - -func TestAccPacketProject_Basic(t *testing.T) { - var project packngo.Project - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPacketProjectDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckPacketProjectConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckPacketProjectExists("packet_project.foobar", &project), - testAccCheckPacketProjectAttributes(&project), - resource.TestCheckResourceAttr( - "packet_project.foobar", "name", "foobar"), - ), - }, - }, - }) -} - -func testAccCheckPacketProjectDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*packngo.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "packet_project" { - continue - } - if _, _, err := client.Projects.Get(rs.Primary.ID); err == nil { - return fmt.Errorf("Project still exists") - } - } - - return nil -} - -func testAccCheckPacketProjectAttributes(project *packngo.Project) resource.TestCheckFunc { - return func(s *terraform.State) error { - if project.Name != "foobar" { - return fmt.Errorf("Bad name: %s", project.Name) - } - return nil - } -} - -func testAccCheckPacketProjectExists(n string, project *packngo.Project) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - client := testAccProvider.Meta().(*packngo.Client) - - foundProject, _, err := client.Projects.Get(rs.Primary.ID) - if err != nil { - return err - } - if foundProject.ID != rs.Primary.ID { - return fmt.Errorf("Record not found: %v - %v", rs.Primary.ID, foundProject) - } - - *project = *foundProject - - return nil - } -} - -var testAccCheckPacketProjectConfig_basic = fmt.Sprintf(` -resource "packet_project" "foobar" { - name = "foobar" -}`) diff --git a/builtin/providers/packet/resource_packet_ssh_key.go b/builtin/providers/packet/resource_packet_ssh_key.go deleted file mode 100644 index a70ed78a2..000000000 --- a/builtin/providers/packet/resource_packet_ssh_key.go +++ /dev/null @@ -1,122 +0,0 @@ -package packet - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/packethost/packngo" -) - -func resourcePacketSSHKey() *schema.Resource { - return &schema.Resource{ - Create: resourcePacketSSHKeyCreate, - Read: resourcePacketSSHKeyRead, - Update: resourcePacketSSHKeyUpdate, - Delete: resourcePacketSSHKeyDelete, - - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "public_key": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "fingerprint": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "created": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "updated": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourcePacketSSHKeyCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - - createRequest := &packngo.SSHKeyCreateRequest{ - Label: d.Get("name").(string), - Key: d.Get("public_key").(string), - } - - key, _, err := client.SSHKeys.Create(createRequest) - if err != nil { - return friendlyError(err) - } - - d.SetId(key.ID) - - return resourcePacketSSHKeyRead(d, meta) -} - -func resourcePacketSSHKeyRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - - key, _, err := client.SSHKeys.Get(d.Id()) - if err != nil { - err = friendlyError(err) - - // If the key is somehow already destroyed, mark as - // succesfully gone - if isNotFound(err) { - d.SetId("") - return nil - } - - return err - } - - d.Set("id", key.ID) - d.Set("name", key.Label) - d.Set("public_key", key.Key) - d.Set("fingerprint", key.FingerPrint) - d.Set("created", key.Created) - d.Set("updated", key.Updated) - - return nil -} - -func resourcePacketSSHKeyUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - - updateRequest := &packngo.SSHKeyUpdateRequest{ - ID: d.Get("id").(string), - Label: d.Get("name").(string), - Key: d.Get("public_key").(string), - } - - _, _, err := client.SSHKeys.Update(updateRequest) - if err != nil { - return friendlyError(err) - } - - return resourcePacketSSHKeyRead(d, meta) -} - -func resourcePacketSSHKeyDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - - _, err := client.SSHKeys.Delete(d.Id()) - if err != nil { - return friendlyError(err) - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/packet/resource_packet_ssh_key_test.go b/builtin/providers/packet/resource_packet_ssh_key_test.go deleted file mode 100644 index 5f019d428..000000000 --- a/builtin/providers/packet/resource_packet_ssh_key_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package packet - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/packethost/packngo" -) - -func TestAccPacketSSHKey_Basic(t *testing.T) { - var key packngo.SSHKey - rInt := acctest.RandInt() - publicKeyMaterial, _, err := acctest.RandSSHKeyPair("") - if err != nil { - t.Fatalf("Cannot generate test SSH key pair: %s", err) - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPacketSSHKeyDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckPacketSSHKeyConfig_basic(rInt, publicKeyMaterial), - Check: resource.ComposeTestCheckFunc( - testAccCheckPacketSSHKeyExists("packet_ssh_key.foobar", &key), - resource.TestCheckResourceAttr( - "packet_ssh_key.foobar", "name", fmt.Sprintf("foobar-%d", rInt)), - resource.TestCheckResourceAttr( - "packet_ssh_key.foobar", "public_key", publicKeyMaterial), - ), - }, - }, - }) -} - -func testAccCheckPacketSSHKeyDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*packngo.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "packet_ssh_key" { - continue - } - if _, _, err := client.SSHKeys.Get(rs.Primary.ID); err == nil { - return fmt.Errorf("SSH key still exists") - } - } - - return nil -} - -func testAccCheckPacketSSHKeyExists(n string, key *packngo.SSHKey) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - client := testAccProvider.Meta().(*packngo.Client) - - foundKey, _, err := client.SSHKeys.Get(rs.Primary.ID) - if err != nil { - return err - } - if foundKey.ID != rs.Primary.ID { - return fmt.Errorf("SSh Key not found: %v - %v", rs.Primary.ID, foundKey) - } - - *key = *foundKey - - fmt.Printf("key: %v", key) - return nil - } -} - -func testAccCheckPacketSSHKeyConfig_basic(rInt int, publicSshKey string) string { - return fmt.Sprintf(` -resource "packet_ssh_key" "foobar" { - name = "foobar-%d" - public_key = "%s" -}`, rInt, publicSshKey) -} diff --git a/builtin/providers/packet/resource_packet_volume.go b/builtin/providers/packet/resource_packet_volume.go deleted file mode 100644 index c5dc0a887..000000000 --- a/builtin/providers/packet/resource_packet_volume.go +++ /dev/null @@ -1,281 +0,0 @@ -package packet - -import ( - "errors" - "fmt" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/packethost/packngo" -) - -func resourcePacketVolume() *schema.Resource { - return &schema.Resource{ - Create: resourcePacketVolumeCreate, - Read: resourcePacketVolumeRead, - Update: resourcePacketVolumeUpdate, - Delete: resourcePacketVolumeDelete, - - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "project_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Required: false, - Optional: true, - }, - - "size": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "facility": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "plan": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "billing_cycle": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - - "state": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "locked": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - - "snapshot_policies": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "snapshot_frequency": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "snapshot_count": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - }, - }, - }, - - "attachments": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "href": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - - "created": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "updated": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourcePacketVolumeCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - - createRequest := &packngo.VolumeCreateRequest{ - PlanID: d.Get("plan").(string), - FacilityID: d.Get("facility").(string), - ProjectID: d.Get("project_id").(string), - Size: d.Get("size").(int), - } - - if attr, ok := d.GetOk("billing_cycle"); ok { - createRequest.BillingCycle = attr.(string) - } else { - createRequest.BillingCycle = "hourly" - } - - if attr, ok := d.GetOk("description"); ok { - createRequest.Description = attr.(string) - } - - snapshot_count := d.Get("snapshot_policies.#").(int) - if snapshot_count > 0 { - createRequest.SnapshotPolicies = make([]*packngo.SnapshotPolicy, 0, snapshot_count) - for i := 0; i < snapshot_count; i++ { - policy := new(packngo.SnapshotPolicy) - policy.SnapshotFrequency = d.Get(fmt.Sprintf("snapshot_policies.%d.snapshot_frequency", i)).(string) - policy.SnapshotCount = d.Get(fmt.Sprintf("snapshot_policies.%d.snapshot_count", i)).(int) - createRequest.SnapshotPolicies = append(createRequest.SnapshotPolicies, policy) - } - } - - newVolume, _, err := client.Volumes.Create(createRequest) - if err != nil { - return friendlyError(err) - } - - d.SetId(newVolume.ID) - - _, err = waitForVolumeAttribute(d, "active", []string{"queued", "provisioning"}, "state", meta) - if err != nil { - if isForbidden(err) { - // If the volume doesn't get to the active state, we can't recover it from here. - d.SetId("") - - return errors.New("provisioning time limit exceeded; the Packet team will investigate") - } - return err - } - - return resourcePacketVolumeRead(d, meta) -} - -func waitForVolumeAttribute(d *schema.ResourceData, target string, pending []string, attribute string, meta interface{}) (interface{}, error) { - stateConf := &resource.StateChangeConf{ - Pending: pending, - Target: []string{target}, - Refresh: newVolumeStateRefreshFunc(d, attribute, meta), - Timeout: 60 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - return stateConf.WaitForState() -} - -func newVolumeStateRefreshFunc(d *schema.ResourceData, attribute string, meta interface{}) resource.StateRefreshFunc { - client := meta.(*packngo.Client) - - return func() (interface{}, string, error) { - if err := resourcePacketVolumeRead(d, meta); err != nil { - return nil, "", err - } - - if attr, ok := d.GetOk(attribute); ok { - volume, _, err := client.Volumes.Get(d.Id()) - if err != nil { - return nil, "", friendlyError(err) - } - return &volume, attr.(string), nil - } - - return nil, "", nil - } -} - -func resourcePacketVolumeRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - - volume, _, err := client.Volumes.Get(d.Id()) - if err != nil { - err = friendlyError(err) - - // If the volume somehow already destroyed, mark as succesfully gone. - if isNotFound(err) { - d.SetId("") - return nil - } - - return err - } - - d.Set("name", volume.Name) - d.Set("description", volume.Description) - d.Set("size", volume.Size) - d.Set("plan", volume.Plan.Slug) - d.Set("facility", volume.Facility.Code) - d.Set("state", volume.State) - d.Set("billing_cycle", volume.BillingCycle) - d.Set("locked", volume.Locked) - d.Set("created", volume.Created) - d.Set("updated", volume.Updated) - - snapshot_policies := make([]map[string]interface{}, 0, len(volume.SnapshotPolicies)) - for _, snapshot_policy := range volume.SnapshotPolicies { - policy := map[string]interface{}{ - "snapshot_frequency": snapshot_policy.SnapshotFrequency, - "snapshot_count": snapshot_policy.SnapshotCount, - } - snapshot_policies = append(snapshot_policies, policy) - } - d.Set("snapshot_policies", snapshot_policies) - - attachments := make([]*packngo.Attachment, 0, len(volume.Attachments)) - for _, attachment := range volume.Attachments { - attachments = append(attachments, attachment) - } - d.Set("attachments", attachments) - - return nil -} - -func resourcePacketVolumeUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - - updateRequest := &packngo.VolumeUpdateRequest{ - ID: d.Get("id").(string), - } - - if attr, ok := d.GetOk("description"); ok { - updateRequest.Description = attr.(string) - } - - if attr, ok := d.GetOk("plan"); ok { - updateRequest.Plan = attr.(string) - } - - _, _, err := client.Volumes.Update(updateRequest) - if err != nil { - return friendlyError(err) - } - - return resourcePacketVolumeRead(d, meta) -} - -func resourcePacketVolumeDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - - if _, err := client.Volumes.Delete(d.Id()); err != nil { - return friendlyError(err) - } - - return nil -} diff --git a/builtin/providers/packet/resource_packet_volume_test.go b/builtin/providers/packet/resource_packet_volume_test.go deleted file mode 100644 index cc487d897..000000000 --- a/builtin/providers/packet/resource_packet_volume_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package packet - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/packethost/packngo" -) - -func TestAccPacketVolume_Basic(t *testing.T) { - var volume packngo.Volume - - rs := acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPacketVolumeDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckPacketVolumeConfig_basic, rs), - Check: resource.ComposeTestCheckFunc( - testAccCheckPacketVolumeExists("packet_volume.foobar", &volume), - resource.TestCheckResourceAttr( - "packet_volume.foobar", "plan", "storage_1"), - resource.TestCheckResourceAttr( - "packet_volume.foobar", "billing_cycle", "hourly"), - resource.TestCheckResourceAttr( - "packet_volume.foobar", "size", "100"), - ), - }, - }, - }) -} - -func testAccCheckPacketVolumeDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*packngo.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "packet_volume" { - continue - } - if _, _, err := client.Volumes.Get(rs.Primary.ID); err == nil { - return fmt.Errorf("Volume still exists") - } - } - - return nil -} - -func testAccCheckPacketVolumeExists(n string, volume *packngo.Volume) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - client := testAccProvider.Meta().(*packngo.Client) - - foundVolume, _, err := client.Volumes.Get(rs.Primary.ID) - if err != nil { - return err - } - if foundVolume.ID != rs.Primary.ID { - return fmt.Errorf("Record not found: %v - %v", rs.Primary.ID, foundVolume) - } - - *volume = *foundVolume - - return nil - } -} - -const testAccCheckPacketVolumeConfig_basic = ` -resource "packet_project" "foobar" { - name = "%s" -} - -resource "packet_volume" "foobar" { - plan = "storage_1" - billing_cycle = "hourly" - size = 100 - project_id = "${packet_project.foobar.id}" - facility = "ewr1" - snapshot_policies = { snapshot_frequency = "1day", snapshot_count = 7 } -}` diff --git a/builtin/providers/pagerduty/config.go b/builtin/providers/pagerduty/config.go deleted file mode 100644 index d8b083bfc..000000000 --- a/builtin/providers/pagerduty/config.go +++ /dev/null @@ -1,49 +0,0 @@ -package pagerduty - -import ( - "fmt" - "log" - - "github.com/PagerDuty/go-pagerduty" -) - -// Config defines the configuration options for the PagerDuty client -type Config struct { - // The PagerDuty API V2 token - Token string - - // Skip validation of the token against the PagerDuty API - SkipCredsValidation bool -} - -const invalidCreds = ` - -No valid credentials found for PagerDuty provider. -Please see https://www.terraform.io/docs/providers/pagerduty/index.html -for more information on providing credentials for this provider. -` - -// Client returns a new PagerDuty client -func (c *Config) Client() (*pagerduty.Client, error) { - // Validate that the PagerDuty token is set - if c.Token == "" { - return nil, fmt.Errorf(invalidCreds) - } - - client := pagerduty.NewClient(c.Token) - - if !c.SkipCredsValidation { - // Validate the credentials by calling the abilities endpoint, - // if we get a 401 response back we return an error to the user - if _, err := client.ListAbilities(); err != nil { - if isUnauthorized(err) { - return nil, fmt.Errorf(fmt.Sprintf("%s\n%s", err, invalidCreds)) - } - return nil, err - } - } - - log.Printf("[INFO] PagerDuty client configured") - - return client, nil -} diff --git a/builtin/providers/pagerduty/config_test.go b/builtin/providers/pagerduty/config_test.go deleted file mode 100644 index 4f43d736c..000000000 --- a/builtin/providers/pagerduty/config_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package pagerduty - -import ( - "testing" -) - -// Test config with an empty token -func TestConfigEmptyToken(t *testing.T) { - config := Config{ - Token: "", - } - - if _, err := config.Client(); err == nil { - t.Fatalf("expected error, but got nil") - } -} - -// Test config with invalid token but with SkipCredsValidation -func TestConfigSkipCredsValidation(t *testing.T) { - config := Config{ - Token: "foo", - SkipCredsValidation: true, - } - - if _, err := config.Client(); err != nil { - t.Fatalf("error: expected the client to not fail: %v", err) - } -} diff --git a/builtin/providers/pagerduty/data_source_pagerduty_escalation_policy.go b/builtin/providers/pagerduty/data_source_pagerduty_escalation_policy.go deleted file mode 100644 index a9643c84d..000000000 --- a/builtin/providers/pagerduty/data_source_pagerduty_escalation_policy.go +++ /dev/null @@ -1,57 +0,0 @@ -package pagerduty - -import ( - "fmt" - "log" - - pagerduty "github.com/PagerDuty/go-pagerduty" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourcePagerDutyEscalationPolicy() *schema.Resource { - return &schema.Resource{ - Read: dataSourcePagerDutyEscalationPolicyRead, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - }, - } -} - -func dataSourcePagerDutyEscalationPolicyRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - log.Printf("[INFO] Reading PagerDuty escalation policy") - - searchName := d.Get("name").(string) - - o := &pagerduty.ListEscalationPoliciesOptions{ - Query: searchName, - } - - resp, err := client.ListEscalationPolicies(*o) - if err != nil { - return err - } - - var found *pagerduty.EscalationPolicy - - for _, policy := range resp.EscalationPolicies { - if policy.Name == searchName { - found = &policy - break - } - } - - if found == nil { - return fmt.Errorf("Unable to locate any escalation policy with the name: %s", searchName) - } - - d.SetId(found.ID) - d.Set("name", found.Name) - - return nil -} diff --git a/builtin/providers/pagerduty/data_source_pagerduty_escalation_policy_test.go b/builtin/providers/pagerduty/data_source_pagerduty_escalation_policy_test.go deleted file mode 100644 index cd93e5c71..000000000 --- a/builtin/providers/pagerduty/data_source_pagerduty_escalation_policy_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package pagerduty - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourcePagerDutyEscalationPolicy_Basic(t *testing.T) { - username := fmt.Sprintf("tf-%s", acctest.RandString(5)) - email := fmt.Sprintf("%s@foo.com", username) - escalationPolicy := fmt.Sprintf("tf-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourcePagerDutyEscalationPolicyConfig(username, email, escalationPolicy), - Check: resource.ComposeTestCheckFunc( - testAccDataSourcePagerDutyEscalationPolicy("pagerduty_escalation_policy.test", "data.pagerduty_escalation_policy.by_name"), - ), - }, - }, - }) -} - -func testAccDataSourcePagerDutyEscalationPolicy(src, n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - srcR := s.RootModule().Resources[src] - srcA := srcR.Primary.Attributes - - r := s.RootModule().Resources[n] - a := r.Primary.Attributes - - if a["id"] == "" { - return fmt.Errorf("Expected to get a escalation policy ID from PagerDuty") - } - - testAtts := []string{"id", "name"} - - for _, att := range testAtts { - if a[att] != srcA[att] { - return fmt.Errorf("Expected the escalation policy %s to be: %s, but got: %s", att, srcA[att], a[att]) - } - } - - return nil - } -} - -func testAccDataSourcePagerDutyEscalationPolicyConfig(username, email, escalationPolicy string) string { - return fmt.Sprintf(` -resource "pagerduty_user" "test" { - name = "%s" - email = "%s" -} - -resource "pagerduty_escalation_policy" "test" { - name = "%s" - num_loops = 2 - - rule { - escalation_delay_in_minutes = 10 - - target { - type = "user_reference" - id = "${pagerduty_user.test.id}" - } - } -} - -data "pagerduty_escalation_policy" "by_name" { - name = "${pagerduty_escalation_policy.test.name}" -} -`, username, email, escalationPolicy) -} diff --git a/builtin/providers/pagerduty/data_source_pagerduty_schedule.go b/builtin/providers/pagerduty/data_source_pagerduty_schedule.go deleted file mode 100644 index 3d80aadb2..000000000 --- a/builtin/providers/pagerduty/data_source_pagerduty_schedule.go +++ /dev/null @@ -1,57 +0,0 @@ -package pagerduty - -import ( - "fmt" - "log" - - pagerduty "github.com/PagerDuty/go-pagerduty" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourcePagerDutySchedule() *schema.Resource { - return &schema.Resource{ - Read: dataSourcePagerDutyScheduleRead, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - }, - } -} - -func dataSourcePagerDutyScheduleRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - log.Printf("[INFO] Reading PagerDuty schedule") - - searchName := d.Get("name").(string) - - o := &pagerduty.ListSchedulesOptions{ - Query: searchName, - } - - resp, err := client.ListSchedules(*o) - if err != nil { - return err - } - - var found *pagerduty.Schedule - - for _, schedule := range resp.Schedules { - if schedule.Name == searchName { - found = &schedule - break - } - } - - if found == nil { - return fmt.Errorf("Unable to locate any schedule with the name: %s", searchName) - } - - d.SetId(found.ID) - d.Set("name", found.Name) - - return nil -} diff --git a/builtin/providers/pagerduty/data_source_pagerduty_schedule_test.go b/builtin/providers/pagerduty/data_source_pagerduty_schedule_test.go deleted file mode 100644 index 93338595a..000000000 --- a/builtin/providers/pagerduty/data_source_pagerduty_schedule_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package pagerduty - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourcePagerDutySchedule_Basic(t *testing.T) { - username := fmt.Sprintf("tf-%s", acctest.RandString(5)) - email := fmt.Sprintf("%s@foo.com", username) - schedule := fmt.Sprintf("tf-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourcePagerDutyScheduleConfig(username, email, schedule), - Check: resource.ComposeTestCheckFunc( - testAccDataSourcePagerDutySchedule("pagerduty_schedule.test", "data.pagerduty_schedule.by_name"), - ), - }, - }, - }) -} - -func testAccDataSourcePagerDutySchedule(src, n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - srcR := s.RootModule().Resources[src] - srcA := srcR.Primary.Attributes - - r := s.RootModule().Resources[n] - a := r.Primary.Attributes - - if a["id"] == "" { - return fmt.Errorf("Expected to get a schedule ID from PagerDuty") - } - - testAtts := []string{"id", "name"} - - for _, att := range testAtts { - if a[att] != srcA[att] { - return fmt.Errorf("Expected the schedule %s to be: %s, but got: %s", att, srcA[att], a[att]) - } - } - - return nil - } -} - -func testAccDataSourcePagerDutyScheduleConfig(username, email, schedule string) string { - return fmt.Sprintf(` -resource "pagerduty_user" "test" { - name = "%s" - email = "%s" -} - -resource "pagerduty_schedule" "test" { - name = "%s" - - time_zone = "America/New_York" - - layer { - name = "foo" - start = "2015-11-06T20:00:00-05:00" - rotation_virtual_start = "2015-11-06T20:00:00-05:00" - rotation_turn_length_seconds = 86400 - users = ["${pagerduty_user.test.id}"] - - restriction { - type = "weekly_restriction" - start_time_of_day = "08:00:00" - start_day_of_week = 5 - duration_seconds = 32101 - } - } -} - -data "pagerduty_schedule" "by_name" { - name = "${pagerduty_schedule.test.name}" -} -`, username, email, schedule) -} diff --git a/builtin/providers/pagerduty/data_source_pagerduty_user.go b/builtin/providers/pagerduty/data_source_pagerduty_user.go deleted file mode 100644 index 392a8a184..000000000 --- a/builtin/providers/pagerduty/data_source_pagerduty_user.go +++ /dev/null @@ -1,62 +0,0 @@ -package pagerduty - -import ( - "fmt" - "log" - - pagerduty "github.com/PagerDuty/go-pagerduty" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourcePagerDutyUser() *schema.Resource { - return &schema.Resource{ - Read: dataSourcePagerDutyUserRead, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Computed: true, - }, - "email": { - Type: schema.TypeString, - Required: true, - }, - }, - } -} - -func dataSourcePagerDutyUserRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - log.Printf("[INFO] Reading PagerDuty user") - - searchEmail := d.Get("email").(string) - - o := &pagerduty.ListUsersOptions{ - Query: searchEmail, - } - - resp, err := client.ListUsers(*o) - if err != nil { - return err - } - - var found *pagerduty.User - - for _, user := range resp.Users { - if user.Email == searchEmail { - found = &user - break - } - } - - if found == nil { - return fmt.Errorf("Unable to locate any user with the email: %s", searchEmail) - } - - d.SetId(found.ID) - d.Set("name", found.Name) - d.Set("email", found.Email) - - return nil -} diff --git a/builtin/providers/pagerduty/data_source_pagerduty_user_test.go b/builtin/providers/pagerduty/data_source_pagerduty_user_test.go deleted file mode 100644 index 2c09d9231..000000000 --- a/builtin/providers/pagerduty/data_source_pagerduty_user_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package pagerduty - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourcePagerDutyUser_Basic(t *testing.T) { - username := fmt.Sprintf("tf-%s", acctest.RandString(5)) - email := fmt.Sprintf("%s@foo.com", username) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourcePagerDutyUserConfig(username, email), - Check: resource.ComposeTestCheckFunc( - testAccDataSourcePagerDutyUser("pagerduty_user.test", "data.pagerduty_user.by_email"), - ), - }, - }, - }) -} - -func testAccDataSourcePagerDutyUser(src, n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - srcR := s.RootModule().Resources[src] - srcA := srcR.Primary.Attributes - - r := s.RootModule().Resources[n] - a := r.Primary.Attributes - - if a["id"] == "" { - return fmt.Errorf("Expected to get a user ID from PagerDuty") - } - - testAtts := []string{"id", "name", "email"} - - for _, att := range testAtts { - if a[att] != srcA[att] { - return fmt.Errorf("Expected the user %s to be: %s, but got: %s", att, srcA[att], a[att]) - } - } - - return nil - } -} - -func testAccDataSourcePagerDutyUserConfig(username, email string) string { - return fmt.Sprintf(` -resource "pagerduty_user" "test" { - name = "%s" - email = "%s" -} - -data "pagerduty_user" "by_email" { - email = "${pagerduty_user.test.email}" -} -`, username, email) -} diff --git a/builtin/providers/pagerduty/data_source_pagerduty_vendor.go b/builtin/providers/pagerduty/data_source_pagerduty_vendor.go deleted file mode 100644 index b3e3659ac..000000000 --- a/builtin/providers/pagerduty/data_source_pagerduty_vendor.go +++ /dev/null @@ -1,70 +0,0 @@ -package pagerduty - -import ( - "fmt" - "log" - "regexp" - - pagerduty "github.com/PagerDuty/go-pagerduty" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourcePagerDutyVendor() *schema.Resource { - return &schema.Resource{ - Read: dataSourcePagerDutyVendorRead, - - Schema: map[string]*schema.Schema{ - "name_regex": { - Type: schema.TypeString, - Optional: true, - Removed: "Use `name` instead. This attribute will be removed in a future version", - }, - "name": { - Type: schema.TypeString, - Required: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourcePagerDutyVendorRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - log.Printf("[INFO] Reading PagerDuty vendor") - - searchName := d.Get("name").(string) - - o := &pagerduty.ListVendorOptions{ - Query: searchName, - } - - resp, err := client.ListVendors(*o) - if err != nil { - return err - } - - var found *pagerduty.Vendor - - r := regexp.MustCompile("(?i)" + searchName) - - for _, vendor := range resp.Vendors { - if r.MatchString(vendor.Name) { - found = &vendor - break - } - } - - if found == nil { - return fmt.Errorf("Unable to locate any vendor with the name: %s", searchName) - } - - d.SetId(found.ID) - d.Set("name", found.Name) - d.Set("type", found.GenericServiceType) - - return nil -} diff --git a/builtin/providers/pagerduty/data_source_pagerduty_vendor_test.go b/builtin/providers/pagerduty/data_source_pagerduty_vendor_test.go deleted file mode 100644 index 59b639aa2..000000000 --- a/builtin/providers/pagerduty/data_source_pagerduty_vendor_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package pagerduty - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccDataSourcePagerDutyVendor_Basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPagerDutyScheduleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccDataSourcePagerDutyVendorConfig, - Check: resource.ComposeTestCheckFunc( - testAccDataSourcePagerDutyVendor("data.pagerduty_vendor.foo"), - ), - }, - }, - }) -} - -func testAccDataSourcePagerDutyVendor(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - - r := s.RootModule().Resources[n] - a := r.Primary.Attributes - - if a["id"] == "" { - return fmt.Errorf("Expected to get a vendor ID from PagerDuty") - } - - if a["id"] != "PZQ6AUS" { - return fmt.Errorf("Expected the Datadog Vendor ID to be: PZQ6AUS, but got: %s", a["id"]) - } - - if a["name"] != "Amazon Cloudwatch" { - return fmt.Errorf("Expected the Datadog Vendor Name to be: Datadog, but got: %s", a["name"]) - } - - if a["type"] != "api" { - return fmt.Errorf("Expected the Datadog Vendor Type to be: api, but got: %s", a["type"]) - } - - return nil - } -} - -const testAccDataSourcePagerDutyVendorConfig = ` -data "pagerduty_vendor" "foo" { - name = "cloudwatch" -} -` diff --git a/builtin/providers/pagerduty/errors.go b/builtin/providers/pagerduty/errors.go deleted file mode 100644 index fc9d579d6..000000000 --- a/builtin/providers/pagerduty/errors.go +++ /dev/null @@ -1,15 +0,0 @@ -package pagerduty - -import "strings" - -func isNotFound(err error) bool { - if strings.Contains(err.Error(), "Failed call API endpoint. HTTP response code: 404") { - return true - } - - return false -} - -func isUnauthorized(err error) bool { - return strings.Contains(err.Error(), "HTTP response code: 401") -} diff --git a/builtin/providers/pagerduty/import_pagerduty_escalation_policy_test.go b/builtin/providers/pagerduty/import_pagerduty_escalation_policy_test.go deleted file mode 100644 index 5feb94fc9..000000000 --- a/builtin/providers/pagerduty/import_pagerduty_escalation_policy_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package pagerduty - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccPagerDutyEscalationPolicy_import(t *testing.T) { - username := fmt.Sprintf("tf-%s", acctest.RandString(5)) - email := fmt.Sprintf("%s@foo.com", username) - escalationPolicy := fmt.Sprintf("tf-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPagerDutyEscalationPolicyDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckPagerDutyEscalationPolicyConfig(username, email, escalationPolicy), - }, - - { - ResourceName: "pagerduty_escalation_policy.foo", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/pagerduty/import_pagerduty_schedule_test.go b/builtin/providers/pagerduty/import_pagerduty_schedule_test.go deleted file mode 100644 index 3cc0db866..000000000 --- a/builtin/providers/pagerduty/import_pagerduty_schedule_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package pagerduty - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccPagerDutySchedule_import(t *testing.T) { - username := fmt.Sprintf("tf-%s", acctest.RandString(5)) - email := fmt.Sprintf("%s@foo.com", username) - schedule := fmt.Sprintf("tf-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPagerDutyUserDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckPagerDutyScheduleConfig(username, email, schedule), - }, - - { - ResourceName: "pagerduty_schedule.foo", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/pagerduty/import_pagerduty_service_integration_test.go b/builtin/providers/pagerduty/import_pagerduty_service_integration_test.go deleted file mode 100644 index 9b0ccfdcf..000000000 --- a/builtin/providers/pagerduty/import_pagerduty_service_integration_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package pagerduty - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccPagerDutyServiceIntegration_import(t *testing.T) { - username := fmt.Sprintf("tf-%s", acctest.RandString(5)) - email := fmt.Sprintf("%s@foo.com", username) - escalationPolicy := fmt.Sprintf("tf-%s", acctest.RandString(5)) - service := fmt.Sprintf("tf-%s", acctest.RandString(5)) - serviceIntegration := fmt.Sprintf("tf-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPagerDutyServiceIntegrationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckPagerDutyServiceIntegrationConfig(username, email, escalationPolicy, service, serviceIntegration), - }, - - { - ResourceName: "pagerduty_service_integration.foo", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/pagerduty/import_pagerduty_service_test.go b/builtin/providers/pagerduty/import_pagerduty_service_test.go deleted file mode 100644 index 516411a7f..000000000 --- a/builtin/providers/pagerduty/import_pagerduty_service_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package pagerduty - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccPagerDutyService_import(t *testing.T) { - username := fmt.Sprintf("tf-%s", acctest.RandString(5)) - email := fmt.Sprintf("%s@foo.com", username) - escalationPolicy := fmt.Sprintf("tf-%s", acctest.RandString(5)) - service := fmt.Sprintf("tf-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPagerDutyServiceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckPagerDutyServiceConfig(username, email, escalationPolicy, service), - }, - - { - ResourceName: "pagerduty_service.foo", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccPagerDutyServiceWithIncidentUrgency_import(t *testing.T) { - username := fmt.Sprintf("tf-%s", acctest.RandString(5)) - email := fmt.Sprintf("%s@foo.com", username) - escalationPolicy := fmt.Sprintf("tf-%s", acctest.RandString(5)) - service := fmt.Sprintf("tf-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPagerDutyServiceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckPagerDutyServiceWithIncidentUrgencyRulesConfig(username, email, escalationPolicy, service), - }, - - { - ResourceName: "pagerduty_service.foo", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/pagerduty/import_pagerduty_team_test.go b/builtin/providers/pagerduty/import_pagerduty_team_test.go deleted file mode 100644 index 51a22f91c..000000000 --- a/builtin/providers/pagerduty/import_pagerduty_team_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package pagerduty - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccPagerDutyTeam_import(t *testing.T) { - team := fmt.Sprintf("tf-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPagerDutyTeamDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckPagerDutyTeamConfig(team), - }, - - { - ResourceName: "pagerduty_team.foo", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/pagerduty/import_pagerduty_user_test.go b/builtin/providers/pagerduty/import_pagerduty_user_test.go deleted file mode 100644 index 6f130401a..000000000 --- a/builtin/providers/pagerduty/import_pagerduty_user_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package pagerduty - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccPagerDutyUser_import(t *testing.T) { - username := fmt.Sprintf("tf-%s", acctest.RandString(5)) - email := fmt.Sprintf("%s@foo.com", username) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPagerDutyUserDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckPagerDutyUserConfig(username, email), - }, - - { - ResourceName: "pagerduty_user.foo", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/pagerduty/provider.go b/builtin/providers/pagerduty/provider.go deleted file mode 100644 index 96a2f338e..000000000 --- a/builtin/providers/pagerduty/provider.go +++ /dev/null @@ -1,56 +0,0 @@ -package pagerduty - -import ( - "log" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider represents a resource provider in Terraform -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "token": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("PAGERDUTY_TOKEN", nil), - }, - - "skip_credentials_validation": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - }, - - DataSourcesMap: map[string]*schema.Resource{ - "pagerduty_user": dataSourcePagerDutyUser(), - "pagerduty_schedule": dataSourcePagerDutySchedule(), - "pagerduty_escalation_policy": dataSourcePagerDutyEscalationPolicy(), - "pagerduty_vendor": dataSourcePagerDutyVendor(), - }, - - ResourcesMap: map[string]*schema.Resource{ - "pagerduty_addon": resourcePagerDutyAddon(), - "pagerduty_user": resourcePagerDutyUser(), - "pagerduty_team": resourcePagerDutyTeam(), - "pagerduty_service": resourcePagerDutyService(), - "pagerduty_service_integration": resourcePagerDutyServiceIntegration(), - "pagerduty_schedule": resourcePagerDutySchedule(), - "pagerduty_escalation_policy": resourcePagerDutyEscalationPolicy(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(data *schema.ResourceData) (interface{}, error) { - config := Config{ - Token: data.Get("token").(string), - SkipCredsValidation: data.Get("skip_credentials_validation").(bool), - } - - log.Println("[INFO] Initializing PagerDuty client") - return config.Client() -} diff --git a/builtin/providers/pagerduty/provider_test.go b/builtin/providers/pagerduty/provider_test.go deleted file mode 100644 index 906c4e5c8..000000000 --- a/builtin/providers/pagerduty/provider_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package pagerduty - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "pagerduty": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProviderImpl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("PAGERDUTY_PARALLEL"); v != "" { - t.Parallel() - } - - if v := os.Getenv("PAGERDUTY_TOKEN"); v == "" { - t.Fatal("PAGERDUTY_TOKEN must be set for acceptance tests") - } -} diff --git a/builtin/providers/pagerduty/resource_pagerduty_addon.go b/builtin/providers/pagerduty/resource_pagerduty_addon.go deleted file mode 100644 index 04190ecf8..000000000 --- a/builtin/providers/pagerduty/resource_pagerduty_addon.go +++ /dev/null @@ -1,107 +0,0 @@ -package pagerduty - -import ( - "log" - - "github.com/PagerDuty/go-pagerduty" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourcePagerDutyAddon() *schema.Resource { - return &schema.Resource{ - Create: resourcePagerDutyAddonCreate, - Read: resourcePagerDutyAddonRead, - Update: resourcePagerDutyAddonUpdate, - Delete: resourcePagerDutyAddonDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "src": { - Type: schema.TypeString, - Required: true, - }, - }, - } -} - -func buildAddonStruct(d *schema.ResourceData) *pagerduty.Addon { - addon := pagerduty.Addon{ - Name: d.Get("name").(string), - Src: d.Get("src").(string), - APIObject: pagerduty.APIObject{ - Type: "full_page_addon", - }, - } - - return &addon -} - -func resourcePagerDutyAddonCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - addon := buildAddonStruct(d) - - log.Printf("[INFO] Creating PagerDuty add-on %s", addon.Name) - - addon, err := client.InstallAddon(*addon) - if err != nil { - return err - } - - d.SetId(addon.ID) - - return resourcePagerDutyAddonRead(d, meta) -} - -func resourcePagerDutyAddonRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - log.Printf("[INFO] Reading PagerDuty add-on %s", d.Id()) - - addon, err := client.GetAddon(d.Id()) - if err != nil { - if isNotFound(err) { - d.SetId("") - return nil - } - return err - } - - d.Set("name", addon.Name) - d.Set("src", addon.Src) - - return nil -} - -func resourcePagerDutyAddonUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - addon := buildAddonStruct(d) - - log.Printf("[INFO] Updating PagerDuty add-on %s", d.Id()) - - if _, err := client.UpdateAddon(d.Id(), *addon); err != nil { - return err - } - - return nil -} - -func resourcePagerDutyAddonDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - log.Printf("[INFO] Deleting PagerDuty add-on %s", d.Id()) - - if err := client.DeleteAddon(d.Id()); err != nil { - return err - } - - d.SetId("") - - return nil -} diff --git a/builtin/providers/pagerduty/resource_pagerduty_addon_test.go b/builtin/providers/pagerduty/resource_pagerduty_addon_test.go deleted file mode 100644 index 639153b1e..000000000 --- a/builtin/providers/pagerduty/resource_pagerduty_addon_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package pagerduty - -import ( - "fmt" - "testing" - - "github.com/PagerDuty/go-pagerduty" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccPagerDutyAddon_Basic(t *testing.T) { - addon := fmt.Sprintf("tf-%s", acctest.RandString(5)) - addonUpdated := fmt.Sprintf("tf-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPagerDutyAddonDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckPagerDutyAddonConfig(addon), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyAddonExists("pagerduty_addon.foo"), - resource.TestCheckResourceAttr( - "pagerduty_addon.foo", "name", addon), - resource.TestCheckResourceAttr( - "pagerduty_addon.foo", "src", "https://intranet.foo.com/status"), - ), - }, - { - Config: testAccCheckPagerDutyAddonConfigUpdated(addonUpdated), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyAddonExists("pagerduty_addon.foo"), - resource.TestCheckResourceAttr( - "pagerduty_addon.foo", "name", addonUpdated), - resource.TestCheckResourceAttr( - "pagerduty_addon.foo", "src", "https://intranet.bar.com/status"), - ), - }, - }, - }) -} - -func testAccCheckPagerDutyAddonDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*pagerduty.Client) - for _, r := range s.RootModule().Resources { - if r.Type != "pagerduty_addon" { - continue - } - - if _, err := client.GetAddon(r.Primary.ID); err == nil { - return fmt.Errorf("Add-on still exists") - } - - } - return nil -} - -func testAccCheckPagerDutyAddonExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No add-on ID is set") - } - - client := testAccProvider.Meta().(*pagerduty.Client) - - found, err := client.GetAddon(rs.Primary.ID) - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Add-on not found: %v - %v", rs.Primary.ID, found) - } - - return nil - } -} - -func testAccCheckPagerDutyAddonConfig(addon string) string { - return fmt.Sprintf(` -resource "pagerduty_addon" "foo" { - name = "%s" - src = "https://intranet.foo.com/status" -} -`, addon) -} - -func testAccCheckPagerDutyAddonConfigUpdated(addon string) string { - return fmt.Sprintf(` -resource "pagerduty_addon" "foo" { - name = "%s" - src = "https://intranet.bar.com/status" -} -`, addon) -} diff --git a/builtin/providers/pagerduty/resource_pagerduty_escalation_policy.go b/builtin/providers/pagerduty/resource_pagerduty_escalation_policy.go deleted file mode 100644 index f8c4e37bb..000000000 --- a/builtin/providers/pagerduty/resource_pagerduty_escalation_policy.go +++ /dev/null @@ -1,169 +0,0 @@ -package pagerduty - -import ( - "log" - - "github.com/PagerDuty/go-pagerduty" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourcePagerDutyEscalationPolicy() *schema.Resource { - return &schema.Resource{ - Create: resourcePagerDutyEscalationPolicyCreate, - Read: resourcePagerDutyEscalationPolicyRead, - Update: resourcePagerDutyEscalationPolicyUpdate, - Delete: resourcePagerDutyEscalationPolicyDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Default: "Managed by Terraform", - }, - "num_loops": { - Type: schema.TypeInt, - Optional: true, - }, - "teams": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "rule": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, - "escalation_delay_in_minutes": { - Type: schema.TypeInt, - Required: true, - }, - "target": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Optional: true, - Default: "user_reference", - }, - "id": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func buildEscalationPolicyStruct(d *schema.ResourceData) *pagerduty.EscalationPolicy { - escalationRules := d.Get("rule").([]interface{}) - - escalationPolicy := pagerduty.EscalationPolicy{ - Name: d.Get("name").(string), - EscalationRules: expandEscalationRules(escalationRules), - } - - if attr, ok := d.GetOk("description"); ok { - escalationPolicy.Description = attr.(string) - } - - if attr, ok := d.GetOk("num_loops"); ok { - escalationPolicy.NumLoops = uint(attr.(int)) - } - - if attr, ok := d.GetOk("teams"); ok { - escalationPolicy.Teams = expandTeams(attr.([]interface{})) - } - - return &escalationPolicy -} - -func resourcePagerDutyEscalationPolicyCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - escalationPolicy := buildEscalationPolicyStruct(d) - - log.Printf("[INFO] Creating PagerDuty escalation policy: %s", escalationPolicy.Name) - - escalationPolicy, err := client.CreateEscalationPolicy(*escalationPolicy) - - if err != nil { - return err - } - - d.SetId(escalationPolicy.ID) - - return resourcePagerDutyEscalationPolicyRead(d, meta) -} - -func resourcePagerDutyEscalationPolicyRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - log.Printf("[INFO] Reading PagerDuty escalation policy: %s", d.Id()) - - o := &pagerduty.GetEscalationPolicyOptions{} - - escalationPolicy, err := client.GetEscalationPolicy(d.Id(), o) - - if err != nil { - return err - } - - d.Set("name", escalationPolicy.Name) - d.Set("teams", escalationPolicy.Teams) - d.Set("description", escalationPolicy.Description) - d.Set("num_loops", escalationPolicy.NumLoops) - - if err := d.Set("rule", flattenEscalationRules(escalationPolicy.EscalationRules)); err != nil { - return err - } - - return nil -} - -func resourcePagerDutyEscalationPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - escalationPolicy := buildEscalationPolicyStruct(d) - - log.Printf("[INFO] Updating PagerDuty escalation policy: %s", d.Id()) - - if _, err := client.UpdateEscalationPolicy(d.Id(), escalationPolicy); err != nil { - return err - } - - return nil -} - -func resourcePagerDutyEscalationPolicyDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - log.Printf("[INFO] Deleting PagerDuty escalation policy: %s", d.Id()) - - if err := client.DeleteEscalationPolicy(d.Id()); err != nil { - return err - } - - d.SetId("") - - return nil -} diff --git a/builtin/providers/pagerduty/resource_pagerduty_escalation_policy_test.go b/builtin/providers/pagerduty/resource_pagerduty_escalation_policy_test.go deleted file mode 100644 index c70d95809..000000000 --- a/builtin/providers/pagerduty/resource_pagerduty_escalation_policy_test.go +++ /dev/null @@ -1,298 +0,0 @@ -package pagerduty - -import ( - "fmt" - "testing" - - "github.com/PagerDuty/go-pagerduty" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccPagerDutyEscalationPolicy_Basic(t *testing.T) { - username := fmt.Sprintf("tf-%s", acctest.RandString(5)) - email := fmt.Sprintf("%s@foo.com", username) - escalationPolicy := fmt.Sprintf("tf-%s", acctest.RandString(5)) - escalationPolicyUpdated := fmt.Sprintf("tf-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPagerDutyEscalationPolicyDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckPagerDutyEscalationPolicyConfig(username, email, escalationPolicy), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyEscalationPolicyExists("pagerduty_escalation_policy.foo"), - resource.TestCheckResourceAttr( - "pagerduty_escalation_policy.foo", "name", escalationPolicy), - resource.TestCheckResourceAttr( - "pagerduty_escalation_policy.foo", "description", "foo"), - resource.TestCheckResourceAttr( - "pagerduty_escalation_policy.foo", "num_loops", "1"), - resource.TestCheckResourceAttr( - "pagerduty_escalation_policy.foo", "rule.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_escalation_policy.foo", "rule.0.escalation_delay_in_minutes", "10"), - ), - }, - - { - Config: testAccCheckPagerDutyEscalationPolicyConfigUpdated(username, email, escalationPolicyUpdated), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyEscalationPolicyExists("pagerduty_escalation_policy.foo"), - resource.TestCheckResourceAttr( - "pagerduty_escalation_policy.foo", "name", escalationPolicyUpdated), - resource.TestCheckResourceAttr( - "pagerduty_escalation_policy.foo", "description", "bar"), - resource.TestCheckResourceAttr( - "pagerduty_escalation_policy.foo", "num_loops", "2"), - resource.TestCheckResourceAttr( - "pagerduty_escalation_policy.foo", "rule.#", "2"), - resource.TestCheckResourceAttr( - "pagerduty_escalation_policy.foo", "rule.0.escalation_delay_in_minutes", "10"), - resource.TestCheckResourceAttr( - "pagerduty_escalation_policy.foo", "rule.1.escalation_delay_in_minutes", "20"), - ), - }, - }, - }) -} - -func TestAccPagerDutyEscalationPolicyWithTeams_Basic(t *testing.T) { - username := fmt.Sprintf("tf-%s", acctest.RandString(5)) - email := fmt.Sprintf("%s@foo.com", username) - team := fmt.Sprintf("tf-%s", acctest.RandString(5)) - escalationPolicy := fmt.Sprintf("tf-%s", acctest.RandString(5)) - escalationPolicyUpdated := fmt.Sprintf("tf-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPagerDutyEscalationPolicyDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckPagerDutyEscalationPolicyWithTeamsConfig(username, email, team, escalationPolicy), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyEscalationPolicyExists("pagerduty_escalation_policy.foo"), - resource.TestCheckResourceAttr( - "pagerduty_escalation_policy.foo", "name", escalationPolicy), - resource.TestCheckResourceAttr( - "pagerduty_escalation_policy.foo", "description", "foo"), - resource.TestCheckResourceAttr( - "pagerduty_escalation_policy.foo", "num_loops", "1"), - resource.TestCheckResourceAttr( - "pagerduty_escalation_policy.foo", "rule.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_escalation_policy.foo", "rule.0.escalation_delay_in_minutes", "10"), - resource.TestCheckResourceAttr( - "pagerduty_escalation_policy.foo", "teams.#", "1"), - ), - }, - { - Config: testAccCheckPagerDutyEscalationPolicyWithTeamsConfigUpdated(username, email, team, escalationPolicyUpdated), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyEscalationPolicyExists("pagerduty_escalation_policy.foo"), - resource.TestCheckResourceAttr( - "pagerduty_escalation_policy.foo", "name", escalationPolicyUpdated), - resource.TestCheckResourceAttr( - "pagerduty_escalation_policy.foo", "description", "bar"), - resource.TestCheckResourceAttr( - "pagerduty_escalation_policy.foo", "num_loops", "2"), - resource.TestCheckResourceAttr( - "pagerduty_escalation_policy.foo", "rule.#", "2"), - resource.TestCheckResourceAttr( - "pagerduty_escalation_policy.foo", "rule.0.escalation_delay_in_minutes", "10"), - resource.TestCheckResourceAttr( - "pagerduty_escalation_policy.foo", "rule.1.escalation_delay_in_minutes", "20"), - resource.TestCheckResourceAttr( - "pagerduty_escalation_policy.foo", "teams.#", "0"), - ), - }, - }, - }) -} - -func testAccCheckPagerDutyEscalationPolicyDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*pagerduty.Client) - for _, r := range s.RootModule().Resources { - if r.Type != "pagerduty_escalation_policy" { - continue - } - - _, err := client.GetEscalationPolicy(r.Primary.ID, &pagerduty.GetEscalationPolicyOptions{}) - - if err == nil { - return fmt.Errorf("Escalation Policy still exists") - } - - } - return nil -} - -func testAccCheckPagerDutyEscalationPolicyExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No Escalation Policy ID is set") - } - - client := testAccProvider.Meta().(*pagerduty.Client) - - found, err := client.GetEscalationPolicy(rs.Primary.ID, &pagerduty.GetEscalationPolicyOptions{}) - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Escalation policy not found: %v - %v", rs.Primary.ID, found) - } - - return nil - } -} - -func testAccCheckPagerDutyEscalationPolicyConfig(name, email, escalationPolicy string) string { - return fmt.Sprintf(` -resource "pagerduty_user" "foo" { - name = "%s" - email = "%s" - color = "green" - role = "user" - job_title = "foo" - description = "foo" -} - -resource "pagerduty_escalation_policy" "foo" { - name = "%s" - description = "foo" - num_loops = 1 - - rule { - escalation_delay_in_minutes = 10 - - target { - type = "user_reference" - id = "${pagerduty_user.foo.id}" - } - } -} -`, name, email, escalationPolicy) -} - -func testAccCheckPagerDutyEscalationPolicyConfigUpdated(name, email, escalationPolicy string) string { - return fmt.Sprintf(` -resource "pagerduty_user" "foo" { - name = "%s" - email = "%s" - color = "green" - role = "user" - job_title = "foo" - description = "foo" -} - -resource "pagerduty_escalation_policy" "foo" { - name = "%s" - description = "bar" - num_loops = 2 - - rule { - escalation_delay_in_minutes = 10 - - target { - type = "user_reference" - id = "${pagerduty_user.foo.id}" - } - } - - rule { - escalation_delay_in_minutes = 20 - - target { - type = "user_reference" - id = "${pagerduty_user.foo.id}" - } - } -} -`, name, email, escalationPolicy) -} - -func testAccCheckPagerDutyEscalationPolicyWithTeamsConfig(name, email, team, escalationPolicy string) string { - return fmt.Sprintf(` -resource "pagerduty_user" "foo" { - name = "%s" - email = "%s" - color = "green" - role = "user" - job_title = "foo" - description = "foo" -} - -resource "pagerduty_team" "foo" { - name = "%s" - description = "foo" -} - -resource "pagerduty_escalation_policy" "foo" { - name = "%s" - description = "foo" - num_loops = 1 - teams = ["${pagerduty_team.foo.id}"] - - rule { - escalation_delay_in_minutes = 10 - - target { - type = "user_reference" - id = "${pagerduty_user.foo.id}" - } - } -} -`, name, email, team, escalationPolicy) -} - -func testAccCheckPagerDutyEscalationPolicyWithTeamsConfigUpdated(name, email, team, escalationPolicy string) string { - return fmt.Sprintf(` -resource "pagerduty_user" "foo" { - name = "%s" - email = "%s" - color = "green" - role = "user" - job_title = "foo" - description = "foo" -} - -resource "pagerduty_team" "foo" { - name = "%s" - description = "foo" -} - -resource "pagerduty_escalation_policy" "foo" { - name = "%s" - description = "bar" - num_loops = 2 - - rule { - escalation_delay_in_minutes = 10 - - target { - type = "user_reference" - id = "${pagerduty_user.foo.id}" - } - } - - rule { - escalation_delay_in_minutes = 20 - - target { - type = "user_reference" - id = "${pagerduty_user.foo.id}" - } - } -} -`, name, email, team, escalationPolicy) -} diff --git a/builtin/providers/pagerduty/resource_pagerduty_schedule.go b/builtin/providers/pagerduty/resource_pagerduty_schedule.go deleted file mode 100644 index 38b1031b0..000000000 --- a/builtin/providers/pagerduty/resource_pagerduty_schedule.go +++ /dev/null @@ -1,198 +0,0 @@ -package pagerduty - -import ( - "log" - - "github.com/PagerDuty/go-pagerduty" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourcePagerDutySchedule() *schema.Resource { - return &schema.Resource{ - Create: resourcePagerDutyScheduleCreate, - Read: resourcePagerDutyScheduleRead, - Update: resourcePagerDutyScheduleUpdate, - Delete: resourcePagerDutyScheduleDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - }, - "time_zone": { - Type: schema.TypeString, - Required: true, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Default: "Managed by Terraform", - }, - "layer": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "start": { - Type: schema.TypeString, - Optional: true, - Computed: true, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if old == "" { - return false - } - return true - }, - }, - "end": { - Type: schema.TypeString, - Optional: true, - }, - "rotation_virtual_start": { - Type: schema.TypeString, - Optional: true, - Computed: true, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if old == "" { - return false - } - return true - }, - }, - "rotation_turn_length_seconds": { - Type: schema.TypeInt, - Required: true, - }, - "users": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "restriction": { - Optional: true, - Type: schema.TypeList, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - }, - "start_time_of_day": { - Type: schema.TypeString, - Required: true, - }, - "start_day_of_week": { - Type: schema.TypeInt, - Optional: true, - }, - "duration_seconds": { - Type: schema.TypeInt, - Required: true, - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func buildScheduleStruct(d *schema.ResourceData) *pagerduty.Schedule { - scheduleLayers := d.Get("layer").([]interface{}) - - schedule := pagerduty.Schedule{ - Name: d.Get("name").(string), - TimeZone: d.Get("time_zone").(string), - ScheduleLayers: expandScheduleLayers(scheduleLayers), - } - - if attr, ok := d.GetOk("description"); ok { - schedule.Description = attr.(string) - } - - return &schedule -} - -func resourcePagerDutyScheduleCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - schedule := buildScheduleStruct(d) - - log.Printf("[INFO] Creating PagerDuty schedule: %s", schedule.Name) - - schedule, err := client.CreateSchedule(*schedule) - - if err != nil { - return err - } - - d.SetId(schedule.ID) - - return resourcePagerDutyScheduleRead(d, meta) -} - -func resourcePagerDutyScheduleRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - log.Printf("[INFO] Reading PagerDuty schedule: %s", d.Id()) - - schedule, err := client.GetSchedule(d.Id(), pagerduty.GetScheduleOptions{}) - - if err != nil { - return err - } - - d.Set("name", schedule.Name) - d.Set("time_zone", schedule.TimeZone) - d.Set("description", schedule.Description) - - if err := d.Set("layer", flattenScheduleLayers(schedule.ScheduleLayers)); err != nil { - return err - } - - return nil -} - -func resourcePagerDutyScheduleUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - schedule := buildScheduleStruct(d) - - log.Printf("[INFO] Updating PagerDuty schedule: %s", d.Id()) - - if _, err := client.UpdateSchedule(d.Id(), *schedule); err != nil { - return err - } - - return nil -} - -func resourcePagerDutyScheduleDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - log.Printf("[INFO] Deleting PagerDuty schedule: %s", d.Id()) - - if err := client.DeleteSchedule(d.Id()); err != nil { - return err - } - - d.SetId("") - - return nil -} diff --git a/builtin/providers/pagerduty/resource_pagerduty_schedule_test.go b/builtin/providers/pagerduty/resource_pagerduty_schedule_test.go deleted file mode 100644 index 09eb9f604..000000000 --- a/builtin/providers/pagerduty/resource_pagerduty_schedule_test.go +++ /dev/null @@ -1,411 +0,0 @@ -package pagerduty - -import ( - "fmt" - "testing" - - "github.com/PagerDuty/go-pagerduty" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccPagerDutySchedule_Basic(t *testing.T) { - username := fmt.Sprintf("tf-%s", acctest.RandString(5)) - email := fmt.Sprintf("%s@foo.com", username) - schedule := fmt.Sprintf("tf-%s", acctest.RandString(5)) - scheduleUpdated := fmt.Sprintf("tf-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPagerDutyScheduleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckPagerDutyScheduleConfig(username, email, schedule), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyScheduleExists("pagerduty_schedule.foo"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "name", schedule), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "description", "foo"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "time_zone", "Europe/Berlin"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.0.name", "foo"), - ), - }, - { - Config: testAccCheckPagerDutyScheduleConfigUpdated(username, email, scheduleUpdated), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyScheduleExists("pagerduty_schedule.foo"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "name", scheduleUpdated), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "description", "Managed by Terraform"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "time_zone", "America/New_York"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.0.name", "foo"), - ), - }, - }, - }) -} - -func TestAccPagerDutySchedule_BasicWeek(t *testing.T) { - username := fmt.Sprintf("tf-%s", acctest.RandString(5)) - email := fmt.Sprintf("%s@foo.com", username) - schedule := fmt.Sprintf("tf-%s", acctest.RandString(5)) - scheduleUpdated := fmt.Sprintf("tf-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPagerDutyScheduleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckPagerDutyScheduleConfigWeek(username, email, schedule), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyScheduleExists("pagerduty_schedule.foo"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "name", schedule), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "description", "foo"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "time_zone", "Europe/Berlin"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.0.name", "foo"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.0.restriction.0.start_day_of_week", "1"), - ), - }, - { - Config: testAccCheckPagerDutyScheduleConfigWeekUpdated(username, email, scheduleUpdated), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyScheduleExists("pagerduty_schedule.foo"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "name", scheduleUpdated), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "description", "Managed by Terraform"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "time_zone", "America/New_York"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.0.name", "foo"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.0.restriction.0.start_day_of_week", "5"), - ), - }, - }, - }) -} - -func TestAccPagerDutySchedule_Multi(t *testing.T) { - username := fmt.Sprintf("tf-%s", acctest.RandString(5)) - email := fmt.Sprintf("%s@foo.com", username) - schedule := fmt.Sprintf("tf-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPagerDutyScheduleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckPagerDutyScheduleConfigMulti(username, email, schedule), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyScheduleExists("pagerduty_schedule.foo"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "name", schedule), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "description", "foo"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "time_zone", "America/New_York"), - - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.#", "3"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.0.name", "foo"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.0.restriction.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.0.restriction.0.duration_seconds", "32101"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.0.restriction.0.start_time_of_day", "08:00:00"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.0.rotation_turn_length_seconds", "86400"), - // NOTE: Temporarily disabled due to API inconsistencies - // resource.TestCheckResourceAttr( - // "pagerduty_schedule.foo", "layer.0.rotation_virtual_start", "2015-11-06T20:00:00-05:00"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.0.users.#", "1"), - - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.1.name", "bar"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.1.restriction.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.1.restriction.0.duration_seconds", "32101"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.1.restriction.0.start_time_of_day", "08:00:00"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.1.restriction.0.start_day_of_week", "5"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.1.rotation_turn_length_seconds", "86400"), - // NOTE: Temporarily disabled due to API inconsistencies - // resource.TestCheckResourceAttr( - // "pagerduty_schedule.foo", "layer.1.rotation_virtual_start", "2015-11-06T20:00:00-05:00"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.1.users.#", "1"), - - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.2.name", "foobar"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.2.restriction.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.2.restriction.0.duration_seconds", "32101"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.2.restriction.0.start_time_of_day", "08:00:00"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.2.restriction.0.start_day_of_week", "1"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.2.rotation_turn_length_seconds", "86400"), - // NOTE: Temporarily disabled due to API inconsistencies - // resource.TestCheckResourceAttr( - // "pagerduty_schedule.foo", "layer.2.rotation_virtual_start", "2015-11-06T20:00:00-05:00"), - resource.TestCheckResourceAttr( - "pagerduty_schedule.foo", "layer.2.users.#", "1"), - ), - }, - }, - }) -} - -func testAccCheckPagerDutyScheduleDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*pagerduty.Client) - for _, r := range s.RootModule().Resources { - if r.Type != "pagerduty_schedule" { - continue - } - - _, err := client.GetSchedule(r.Primary.ID, pagerduty.GetScheduleOptions{}) - - if err == nil { - return fmt.Errorf("Schedule still exists") - } - - } - return nil -} - -func testAccCheckPagerDutyScheduleExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No Schedule ID is set") - } - - client := testAccProvider.Meta().(*pagerduty.Client) - - found, err := client.GetSchedule(rs.Primary.ID, pagerduty.GetScheduleOptions{}) - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Schedule not found: %v - %v", rs.Primary.ID, found) - } - - return nil - } -} - -func testAccCheckPagerDutyScheduleConfig(username, email, schedule string) string { - return fmt.Sprintf(` -resource "pagerduty_user" "foo" { - name = "%s" - email = "%s" -} - -resource "pagerduty_schedule" "foo" { - name = "%s" - - time_zone = "Europe/Berlin" - description = "foo" - - layer { - name = "foo" - start = "2015-11-06T20:00:00-05:00" - rotation_virtual_start = "2015-11-06T20:00:00-05:00" - rotation_turn_length_seconds = 86400 - users = ["${pagerduty_user.foo.id}"] - - restriction { - type = "daily_restriction" - start_time_of_day = "08:00:00" - duration_seconds = 32101 - } - } -} -`, username, email, schedule) -} - -func testAccCheckPagerDutyScheduleConfigUpdated(username, email, schedule string) string { - return fmt.Sprintf(` -resource "pagerduty_user" "foo" { - name = "%s" - email = "%s" -} - -resource "pagerduty_schedule" "foo" { - name = "%s" - - time_zone = "America/New_York" - - layer { - name = "foo" - start = "2015-11-06T20:00:00-05:00" - rotation_virtual_start = "2015-11-06T20:00:00-05:00" - rotation_turn_length_seconds = 86400 - users = ["${pagerduty_user.foo.id}"] - - restriction { - type = "daily_restriction" - start_time_of_day = "08:00:00" - duration_seconds = 32101 - } - } -} -`, username, email, schedule) -} - -func testAccCheckPagerDutyScheduleConfigWeek(username, email, schedule string) string { - return fmt.Sprintf(` -resource "pagerduty_user" "foo" { - name = "%s" - email = "%s" -} - -resource "pagerduty_schedule" "foo" { - name = "%s" - - time_zone = "Europe/Berlin" - description = "foo" - - layer { - name = "foo" - start = "2015-11-06T20:00:00-05:00" - rotation_virtual_start = "2015-11-06T20:00:00-05:00" - rotation_turn_length_seconds = 86400 - users = ["${pagerduty_user.foo.id}"] - - restriction { - type = "weekly_restriction" - start_time_of_day = "08:00:00" - start_day_of_week = 1 - duration_seconds = 32101 - } - } -} -`, username, email, schedule) -} - -func testAccCheckPagerDutyScheduleConfigWeekUpdated(username, email, schedule string) string { - return fmt.Sprintf(` -resource "pagerduty_user" "foo" { - name = "%s" - email = "%s" -} - -resource "pagerduty_schedule" "foo" { - name = "%s" - - time_zone = "America/New_York" - - layer { - name = "foo" - start = "2015-11-06T20:00:00-05:00" - rotation_virtual_start = "2015-11-06T20:00:00-05:00" - rotation_turn_length_seconds = 86400 - users = ["${pagerduty_user.foo.id}"] - - restriction { - type = "weekly_restriction" - start_time_of_day = "08:00:00" - start_day_of_week = 5 - duration_seconds = 32101 - } - } -} -`, username, email, schedule) -} - -func testAccCheckPagerDutyScheduleConfigMulti(username, email, schedule string) string { - return fmt.Sprintf(` -resource "pagerduty_user" "foo" { - name = "%s" - email = "%s" -} - -resource "pagerduty_schedule" "foo" { - name = "%s" - - time_zone = "America/New_York" - description = "foo" - - layer { - name = "foo" - start = "2015-11-06T20:00:00-05:00" - rotation_virtual_start = "2015-11-06T20:00:00-05:00" - rotation_turn_length_seconds = 86400 - users = ["${pagerduty_user.foo.id}"] - - restriction { - type = "daily_restriction" - start_time_of_day = "08:00:00" - duration_seconds = 32101 - } - } - - layer { - name = "bar" - start = "2015-11-06T20:00:00-05:00" - rotation_virtual_start = "2015-11-06T20:00:00-05:00" - rotation_turn_length_seconds = 86400 - users = ["${pagerduty_user.foo.id}"] - - restriction { - type = "weekly_restriction" - start_time_of_day = "08:00:00" - start_day_of_week = 5 - duration_seconds = 32101 - } - } - - layer { - name = "foobar" - start = "2015-11-06T20:00:00-05:00" - rotation_virtual_start = "2015-11-06T20:00:00-05:00" - rotation_turn_length_seconds = 86400 - users = ["${pagerduty_user.foo.id}"] - - restriction { - type = "weekly_restriction" - start_time_of_day = "08:00:00" - start_day_of_week = 1 - duration_seconds = 32101 - } - } -} -`, username, email, schedule) -} diff --git a/builtin/providers/pagerduty/resource_pagerduty_service.go b/builtin/providers/pagerduty/resource_pagerduty_service.go deleted file mode 100644 index a4c3cb5eb..000000000 --- a/builtin/providers/pagerduty/resource_pagerduty_service.go +++ /dev/null @@ -1,305 +0,0 @@ -package pagerduty - -import ( - "log" - - pagerduty "github.com/PagerDuty/go-pagerduty" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourcePagerDutyService() *schema.Resource { - return &schema.Resource{ - Create: resourcePagerDutyServiceCreate, - Read: resourcePagerDutyServiceRead, - Update: resourcePagerDutyServiceUpdate, - Delete: resourcePagerDutyServiceDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Default: "Managed by Terraform", - }, - "auto_resolve_timeout": { - Type: schema.TypeInt, - Optional: true, - }, - "last_incident_timestamp": { - Type: schema.TypeString, - Computed: true, - }, - "created_at": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "acknowledgement_timeout": { - Type: schema.TypeInt, - Optional: true, - }, - "escalation_policy": { - Type: schema.TypeString, - Required: true, - }, - "incident_urgency_rule": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - }, - "urgency": { - Type: schema.TypeString, - Optional: true, - }, - "during_support_hours": { - Type: schema.TypeList, - MaxItems: 1, - MinItems: 1, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Optional: true, - }, - "urgency": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "outside_support_hours": { - Type: schema.TypeList, - MaxItems: 1, - MinItems: 1, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Optional: true, - }, - "urgency": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - "support_hours": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - MinItems: 1, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Optional: true, - }, - "time_zone": { - Type: schema.TypeString, - Optional: true, - }, - "start_time": { - Type: schema.TypeString, - Optional: true, - }, - "end_time": { - Type: schema.TypeString, - Optional: true, - }, - "days_of_week": { - Type: schema.TypeList, - Optional: true, - MaxItems: 7, - Elem: &schema.Schema{Type: schema.TypeInt}, - }, - }, - }, - }, - "scheduled_actions": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Optional: true, - }, - "to_urgency": { - Type: schema.TypeString, - Optional: true, - }, - "at": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Optional: true, - }, - "name": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func buildServiceStruct(d *schema.ResourceData) *pagerduty.Service { - service := pagerduty.Service{ - Name: d.Get("name").(string), - Status: d.Get("status").(string), - APIObject: pagerduty.APIObject{ - ID: d.Id(), - }, - } - - if attr, ok := d.GetOk("description"); ok { - service.Description = attr.(string) - } - - if attr, ok := d.GetOk("auto_resolve_timeout"); ok { - autoResolveTimeout := uint(attr.(int)) - service.AutoResolveTimeout = &autoResolveTimeout - } - - if attr, ok := d.GetOk("acknowledgement_timeout"); ok { - acknowledgementTimeout := uint(attr.(int)) - service.AcknowledgementTimeout = &acknowledgementTimeout - } - - escalationPolicy := &pagerduty.EscalationPolicy{ - APIObject: pagerduty.APIObject{ - ID: d.Get("escalation_policy").(string), - Type: "escalation_policy_reference", - }, - } - - service.EscalationPolicy = *escalationPolicy - - if attr, ok := d.GetOk("incident_urgency_rule"); ok { - if iur, ok := expandIncidentUrgencyRule(attr); ok { - service.IncidentUrgencyRule = iur - } - } - if attr, ok := d.GetOk("support_hours"); ok { - service.SupportHours = expandSupportHours(attr) - } - if attr, ok := d.GetOk("scheduled_actions"); ok { - service.ScheduledActions = expandScheduledActions(attr) - } - - return &service -} - -func resourcePagerDutyServiceCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - service := buildServiceStruct(d) - - log.Printf("[INFO] Creating PagerDuty service %s", service.Name) - - service, err := client.CreateService(*service) - - if err != nil { - return err - } - - d.SetId(service.ID) - - return nil -} - -func resourcePagerDutyServiceRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - log.Printf("[INFO] Reading PagerDuty service %s", d.Id()) - - o := &pagerduty.GetServiceOptions{} - - service, err := client.GetService(d.Id(), o) - - if err != nil { - if isNotFound(err) { - d.SetId("") - return nil - } - return err - } - - d.Set("name", service.Name) - d.Set("status", service.Status) - d.Set("created_at", service.CreateAt) - d.Set("escalation_policy", service.EscalationPolicy.ID) - d.Set("description", service.Description) - d.Set("auto_resolve_timeout", service.AutoResolveTimeout) - d.Set("last_incident_timestamp", service.LastIncidentTimestamp) - d.Set("acknowledgement_timeout", service.AcknowledgementTimeout) - - if incidentUrgencyRule, ok := flattenIncidentUrgencyRule(service); ok { - d.Set("incident_urgency_rule", incidentUrgencyRule) - } - - supportHours := flattenSupportHours(service) - d.Set("support_hours", supportHours) - - scheduledActions := flattenScheduledActions(service) - d.Set("scheduled_actions", scheduledActions) - - return nil -} - -func resourcePagerDutyServiceUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - service := buildServiceStruct(d) - - log.Printf("[INFO] Updating PagerDuty service %s", d.Id()) - - if _, err := client.UpdateService(*service); err != nil { - return err - } - - return nil -} - -func resourcePagerDutyServiceDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - log.Printf("[INFO] Deleting PagerDuty service %s", d.Id()) - - if err := client.DeleteService(d.Id()); err != nil { - return err - } - - d.SetId("") - - return nil -} diff --git a/builtin/providers/pagerduty/resource_pagerduty_service_integration.go b/builtin/providers/pagerduty/resource_pagerduty_service_integration.go deleted file mode 100644 index 918e78000..000000000 --- a/builtin/providers/pagerduty/resource_pagerduty_service_integration.go +++ /dev/null @@ -1,213 +0,0 @@ -package pagerduty - -import ( - "fmt" - "log" - - pagerduty "github.com/PagerDuty/go-pagerduty" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourcePagerDutyServiceIntegration() *schema.Resource { - return &schema.Resource{ - Create: resourcePagerDutyServiceIntegrationCreate, - Read: resourcePagerDutyServiceIntegrationRead, - Update: resourcePagerDutyServiceIntegrationUpdate, - Delete: resourcePagerDutyServiceIntegrationDelete, - Importer: &schema.ResourceImporter{ - State: resourcePagerDutyServiceIntegrationImport, - }, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - }, - "service": { - Type: schema.TypeString, - Optional: true, - }, - "type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - ConflictsWith: []string{"vendor"}, - ValidateFunc: validateValueFunc([]string{ - "aws_cloudwatch_inbound_integration", - "cloudkick_inbound_integration", - "event_transformer_api_inbound_integration", - "generic_email_inbound_integration", - "generic_events_api_inbound_integration", - "keynote_inbound_integration", - "nagios_inbound_integration", - "pingdom_inbound_integration", - "sql_monitor_inbound_integration", - }), - }, - "vendor": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - ConflictsWith: []string{"type"}, - Computed: true, - }, - "integration_key": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "integration_email": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - } -} - -func buildServiceIntegrationStruct(d *schema.ResourceData) *pagerduty.Integration { - serviceIntegration := pagerduty.Integration{ - Name: d.Get("name").(string), - Service: &pagerduty.APIObject{ - Type: "service", - ID: d.Get("service").(string), - }, - APIObject: pagerduty.APIObject{ - ID: d.Id(), - Type: "service_integration", - }, - } - - if attr, ok := d.GetOk("integration_key"); ok { - serviceIntegration.IntegrationKey = attr.(string) - } - - if attr, ok := d.GetOk("integration_email"); ok { - serviceIntegration.IntegrationEmail = attr.(string) - } - - if attr, ok := d.GetOk("type"); ok { - serviceIntegration.Type = attr.(string) - } - - if attr, ok := d.GetOk("vendor"); ok { - serviceIntegration.Vendor = &pagerduty.APIObject{ - ID: attr.(string), - Type: "vendor", - } - } - - return &serviceIntegration -} - -func resourcePagerDutyServiceIntegrationCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - serviceIntegration := buildServiceIntegrationStruct(d) - - log.Printf("[INFO] Creating PagerDuty service integration %s", serviceIntegration.Name) - - service := d.Get("service").(string) - - serviceIntegration, err := client.CreateIntegration(service, *serviceIntegration) - - if err != nil { - return err - } - - d.SetId(serviceIntegration.ID) - - return resourcePagerDutyServiceIntegrationRead(d, meta) -} - -func resourcePagerDutyServiceIntegrationRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - log.Printf("[INFO] Reading PagerDuty service integration %s", d.Id()) - - service := d.Get("service").(string) - - o := &pagerduty.GetIntegrationOptions{} - - serviceIntegration, err := client.GetIntegration(service, d.Id(), *o) - - if err != nil { - if isNotFound(err) { - d.SetId("") - return nil - } - return err - } - - d.Set("name", serviceIntegration.Name) - d.Set("type", serviceIntegration.Type) - d.Set("service", serviceIntegration.Service.ID) - d.Set("vendor", serviceIntegration.Vendor.ID) - d.Set("integration_key", serviceIntegration.IntegrationKey) - d.Set("integration_email", serviceIntegration.IntegrationEmail) - - return nil -} - -func resourcePagerDutyServiceIntegrationUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - serviceIntegration := buildServiceIntegrationStruct(d) - - service := d.Get("service").(string) - - log.Printf("[INFO] Updating PagerDuty service integration %s", d.Id()) - - if _, err := client.UpdateIntegration(service, *serviceIntegration); err != nil { - return err - } - - return nil -} - -func resourcePagerDutyServiceIntegrationDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - service := d.Get("service").(string) - - log.Printf("[INFO] Removing PagerDuty service integration %s", d.Id()) - - if err := client.DeleteIntegration(service, d.Id()); err != nil { - if isNotFound(err) { - d.SetId("") - return nil - } - return err - } - - d.SetId("") - - return nil -} - -func resourcePagerDutyServiceIntegrationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - client := meta.(*pagerduty.Client) - - resp, err := client.ListServices(pagerduty.ListServiceOptions{}) - if err != nil { - return []*schema.ResourceData{}, err - } - - var serviceID string - - for _, service := range resp.Services { - for _, integration := range service.Integrations { - if integration.ID == d.Id() { - serviceID = service.ID - } - } - } - - if serviceID == "" { - return []*schema.ResourceData{}, fmt.Errorf("Error importing pagerduty_service_integration. Could not locate a service ID for the integration") - } - - d.Set("service", serviceID) - - return []*schema.ResourceData{d}, nil -} diff --git a/builtin/providers/pagerduty/resource_pagerduty_service_integration_test.go b/builtin/providers/pagerduty/resource_pagerduty_service_integration_test.go deleted file mode 100644 index 0aa451fc4..000000000 --- a/builtin/providers/pagerduty/resource_pagerduty_service_integration_test.go +++ /dev/null @@ -1,324 +0,0 @@ -package pagerduty - -import ( - "fmt" - "testing" - - "github.com/PagerDuty/go-pagerduty" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccPagerDutyServiceIntegration_Basic(t *testing.T) { - username := fmt.Sprintf("tf-%s", acctest.RandString(5)) - email := fmt.Sprintf("%s@foo.com", username) - escalationPolicy := fmt.Sprintf("tf-%s", acctest.RandString(5)) - service := fmt.Sprintf("tf-%s", acctest.RandString(5)) - serviceIntegration := fmt.Sprintf("tf-%s", acctest.RandString(5)) - serviceIntegrationUpdated := fmt.Sprintf("tf-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPagerDutyServiceIntegrationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckPagerDutyServiceIntegrationConfig(username, email, escalationPolicy, service, serviceIntegration), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyServiceIntegrationExists("pagerduty_service_integration.foo"), - resource.TestCheckResourceAttr( - "pagerduty_service_integration.foo", "name", serviceIntegration), - resource.TestCheckResourceAttr( - "pagerduty_service_integration.foo", "type", "generic_events_api_inbound_integration"), - resource.TestCheckResourceAttr( - "pagerduty_service_integration.foo", "vendor", "PAM4FGS"), - ), - }, - { - Config: testAccCheckPagerDutyServiceIntegrationConfigUpdated(username, email, escalationPolicy, service, serviceIntegrationUpdated), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyServiceIntegrationExists("pagerduty_service_integration.foo"), - resource.TestCheckResourceAttr( - "pagerduty_service_integration.foo", "name", serviceIntegrationUpdated), - resource.TestCheckResourceAttr( - "pagerduty_service_integration.foo", "type", "generic_events_api_inbound_integration"), - resource.TestCheckResourceAttr( - "pagerduty_service_integration.foo", "vendor", "PAM4FGS"), - ), - }, - }, - }) -} - -func TestAccPagerDutyServiceIntegrationGeneric_Basic(t *testing.T) { - username := fmt.Sprintf("tf-%s", acctest.RandString(5)) - email := fmt.Sprintf("%s@foo.com", username) - escalationPolicy := fmt.Sprintf("tf-%s", acctest.RandString(5)) - service := fmt.Sprintf("tf-%s", acctest.RandString(5)) - serviceIntegration := fmt.Sprintf("tf-%s", acctest.RandString(5)) - serviceIntegrationUpdated := fmt.Sprintf("tf-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPagerDutyServiceIntegrationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckPagerDutyServiceIntegrationGenericConfig(username, email, escalationPolicy, service, serviceIntegration), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyServiceIntegrationExists("pagerduty_service_integration.foo"), - resource.TestCheckResourceAttr( - "pagerduty_service_integration.foo", "name", serviceIntegration), - resource.TestCheckResourceAttr( - "pagerduty_service_integration.foo", "type", "generic_events_api_inbound_integration"), - ), - }, - { - Config: testAccCheckPagerDutyServiceIntegrationGenericConfigUpdated(username, email, escalationPolicy, service, serviceIntegrationUpdated), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyServiceIntegrationExists("pagerduty_service_integration.foo"), - resource.TestCheckResourceAttr( - "pagerduty_service_integration.foo", "name", serviceIntegrationUpdated), - resource.TestCheckResourceAttr( - "pagerduty_service_integration.foo", "type", "generic_events_api_inbound_integration"), - ), - }, - }, - }) -} - -func testAccCheckPagerDutyServiceIntegrationDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*pagerduty.Client) - for _, r := range s.RootModule().Resources { - if r.Type != "pagerduty_service_integration" { - continue - } - - service, _ := s.RootModule().Resources["pagerduty_service.foo"] - - _, err := client.GetIntegration(service.Primary.ID, r.Primary.ID, pagerduty.GetIntegrationOptions{}) - - if err == nil { - return fmt.Errorf("Service Integration still exists") - } - - } - return nil -} - -func testAccCheckPagerDutyServiceIntegrationExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No Service Integration ID is set") - } - - service, _ := s.RootModule().Resources["pagerduty_service.foo"] - - client := testAccProvider.Meta().(*pagerduty.Client) - - found, err := client.GetIntegration(service.Primary.ID, rs.Primary.ID, pagerduty.GetIntegrationOptions{}) - if err != nil { - return fmt.Errorf("Service integration not found: %v", rs.Primary.ID) - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Service Integration not found: %v - %v", rs.Primary.ID, found) - } - - return nil - } -} - -func testAccCheckPagerDutyServiceIntegrationConfig(username, email, escalationPolicy, service, serviceIntegration string) string { - return fmt.Sprintf(` -resource "pagerduty_user" "foo" { - name = "%s" - email = "%s" -} - -resource "pagerduty_escalation_policy" "foo" { - name = "%s" - description = "foo" - num_loops = 1 - - rule { - escalation_delay_in_minutes = 10 - - target { - type = "user_reference" - id = "${pagerduty_user.foo.id}" - } - } -} - -resource "pagerduty_service" "foo" { - name = "%s" - description = "foo" - auto_resolve_timeout = 1800 - acknowledgement_timeout = 1800 - escalation_policy = "${pagerduty_escalation_policy.foo.id}" - - incident_urgency_rule { - type = "constant" - urgency = "high" - } -} - -data "pagerduty_vendor" "datadog" { - name = "datadog" -} - -resource "pagerduty_service_integration" "foo" { - name = "%s" - service = "${pagerduty_service.foo.id}" - vendor = "${data.pagerduty_vendor.datadog.id}" -} -`, username, email, escalationPolicy, service, serviceIntegration) -} - -func testAccCheckPagerDutyServiceIntegrationConfigUpdated(username, email, escalationPolicy, service, serviceIntegration string) string { - return fmt.Sprintf(` -resource "pagerduty_user" "foo" { - name = "%s" - email = "%s" - color = "green" - role = "user" - job_title = "foo" - description = "foo" -} - -resource "pagerduty_escalation_policy" "foo" { - name = "%s" - description = "bar" - num_loops = 2 - - rule { - escalation_delay_in_minutes = 10 - - target { - type = "user_reference" - id = "${pagerduty_user.foo.id}" - } - } -} - -resource "pagerduty_service" "foo" { - name = "%s" - description = "bar" - auto_resolve_timeout = 3600 - acknowledgement_timeout = 3600 - escalation_policy = "${pagerduty_escalation_policy.foo.id}" - - incident_urgency_rule { - type = "constant" - urgency = "high" - } -} - -data "pagerduty_vendor" "datadog" { - name = "datadog" -} - -resource "pagerduty_service_integration" "foo" { - name = "%s" - service = "${pagerduty_service.foo.id}" - vendor = "${data.pagerduty_vendor.datadog.id}" -} -`, username, email, escalationPolicy, service, serviceIntegration) -} - -func testAccCheckPagerDutyServiceIntegrationGenericConfig(username, email, escalationPolicy, service, serviceIntegration string) string { - return fmt.Sprintf(` -resource "pagerduty_user" "foo" { - name = "%s" - email = "%s" -} - -resource "pagerduty_escalation_policy" "foo" { - name = "%s" - description = "foo" - num_loops = 1 - - rule { - escalation_delay_in_minutes = 10 - - target { - type = "user_reference" - id = "${pagerduty_user.foo.id}" - } - } -} - -resource "pagerduty_service" "foo" { - name = "%s" - description = "foo" - auto_resolve_timeout = 1800 - acknowledgement_timeout = 1800 - escalation_policy = "${pagerduty_escalation_policy.foo.id}" - - incident_urgency_rule { - type = "constant" - urgency = "high" - } -} - -resource "pagerduty_service_integration" "foo" { - name = "%s" - service = "${pagerduty_service.foo.id}" - type = "generic_events_api_inbound_integration" -} -`, username, email, escalationPolicy, service, serviceIntegration) -} - -func testAccCheckPagerDutyServiceIntegrationGenericConfigUpdated(username, email, escalationPolicy, service, serviceIntegration string) string { - return fmt.Sprintf(` -resource "pagerduty_user" "foo" { - name = "%s" - email = "%s" - color = "green" - role = "user" - job_title = "foo" - description = "foo" -} - -resource "pagerduty_escalation_policy" "foo" { - name = "%s" - description = "bar" - num_loops = 2 - - rule { - escalation_delay_in_minutes = 10 - - target { - type = "user_reference" - id = "${pagerduty_user.foo.id}" - } - } -} - -resource "pagerduty_service" "foo" { - name = "%s" - description = "bar" - auto_resolve_timeout = 3600 - acknowledgement_timeout = 3600 - escalation_policy = "${pagerduty_escalation_policy.foo.id}" - - incident_urgency_rule { - type = "constant" - urgency = "high" - } -} - -resource "pagerduty_service_integration" "foo" { - name = "%s" - service = "${pagerduty_service.foo.id}" - type = "generic_events_api_inbound_integration" -} -`, username, email, escalationPolicy, service, serviceIntegration) -} diff --git a/builtin/providers/pagerduty/resource_pagerduty_service_test.go b/builtin/providers/pagerduty/resource_pagerduty_service_test.go deleted file mode 100644 index b169b1b0f..000000000 --- a/builtin/providers/pagerduty/resource_pagerduty_service_test.go +++ /dev/null @@ -1,555 +0,0 @@ -package pagerduty - -import ( - "fmt" - "testing" - - "github.com/PagerDuty/go-pagerduty" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccPagerDutyService_Basic(t *testing.T) { - username := fmt.Sprintf("tf-%s", acctest.RandString(5)) - email := fmt.Sprintf("%s@foo.com", username) - escalationPolicy := fmt.Sprintf("tf-%s", acctest.RandString(5)) - service := fmt.Sprintf("tf-%s", acctest.RandString(5)) - serviceUpdated := fmt.Sprintf("tf-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPagerDutyServiceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckPagerDutyServiceConfig(username, email, escalationPolicy, service), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyServiceExists("pagerduty_service.foo"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "name", service), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "description", "foo"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "auto_resolve_timeout", "1800"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "acknowledgement_timeout", "1800"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.urgency", "high"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.type", "constant"), - ), - }, - { - Config: testAccCheckPagerDutyServiceConfigUpdated(username, email, escalationPolicy, serviceUpdated), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyServiceExists("pagerduty_service.foo"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "name", serviceUpdated), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "description", "bar"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "auto_resolve_timeout", "3600"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "acknowledgement_timeout", "3600"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.urgency", "high"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.type", "constant"), - ), - }, - }, - }) -} - -func TestAccPagerDutyService_BasicWithIncidentUrgencyRules(t *testing.T) { - username := fmt.Sprintf("tf-%s", acctest.RandString(5)) - email := fmt.Sprintf("%s@foo.com", username) - escalationPolicy := fmt.Sprintf("tf-%s", acctest.RandString(5)) - service := fmt.Sprintf("tf-%s", acctest.RandString(5)) - serviceUpdated := fmt.Sprintf("tf-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPagerDutyServiceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckPagerDutyServiceWithIncidentUrgencyRulesConfig(username, email, escalationPolicy, service), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyServiceExists("pagerduty_service.foo"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "name", service), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "description", "foo"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "auto_resolve_timeout", "1800"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "acknowledgement_timeout", "1800"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.during_support_hours.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.during_support_hours.0.type", "constant"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.during_support_hours.0.urgency", "high"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.outside_support_hours.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.outside_support_hours.0.type", "constant"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.outside_support_hours.0.urgency", "low"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.type", "use_support_hours"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "scheduled_actions.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "scheduled_actions.0.at.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "scheduled_actions.0.at.0.name", "support_hours_start"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "scheduled_actions.0.to_urgency", "high"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "scheduled_actions.0.type", "urgency_change"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.days_of_week.#", "5"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.days_of_week.0", "1"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.days_of_week.1", "2"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.days_of_week.2", "3"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.days_of_week.3", "4"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.days_of_week.4", "5"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.end_time", "17:00:00"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.start_time", "09:00:00"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.time_zone", "America/Lima"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.type", "fixed_time_per_day"), - ), - }, - { - Config: testAccCheckPagerDutyServiceWithIncidentUrgencyRulesConfigUpdated(username, email, escalationPolicy, serviceUpdated), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyServiceExists("pagerduty_service.foo"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "name", serviceUpdated), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "description", "bar bar bar"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "auto_resolve_timeout", "3600"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "acknowledgement_timeout", "3600"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.during_support_hours.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.during_support_hours.0.type", "constant"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.during_support_hours.0.urgency", "high"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.outside_support_hours.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.outside_support_hours.0.type", "constant"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.outside_support_hours.0.urgency", "low"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.type", "use_support_hours"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "scheduled_actions.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "scheduled_actions.0.at.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "scheduled_actions.0.at.0.name", "support_hours_start"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "scheduled_actions.0.to_urgency", "high"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "scheduled_actions.0.type", "urgency_change"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.days_of_week.#", "5"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.days_of_week.0", "1"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.days_of_week.1", "2"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.days_of_week.2", "3"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.days_of_week.3", "4"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.days_of_week.4", "5"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.end_time", "17:00:00"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.start_time", "09:00:00"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.time_zone", "America/Lima"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.type", "fixed_time_per_day"), - ), - }, - }, - }) -} - -func TestAccPagerDutyService_FromBasicToCustomIncidentUrgencyRules(t *testing.T) { - username := fmt.Sprintf("tf-%s", acctest.RandString(5)) - email := fmt.Sprintf("%s@foo.com", username) - escalationPolicy := fmt.Sprintf("tf-%s", acctest.RandString(5)) - service := fmt.Sprintf("tf-%s", acctest.RandString(5)) - serviceUpdated := fmt.Sprintf("tf-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPagerDutyServiceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckPagerDutyServiceConfig(username, email, escalationPolicy, service), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyServiceExists("pagerduty_service.foo"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "name", service), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "description", "foo"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "auto_resolve_timeout", "1800"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "acknowledgement_timeout", "1800"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.urgency", "high"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.type", "constant"), - ), - }, - { - Config: testAccCheckPagerDutyServiceWithIncidentUrgencyRulesConfigUpdated(username, email, escalationPolicy, serviceUpdated), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyServiceExists("pagerduty_service.foo"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "name", serviceUpdated), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "description", "bar bar bar"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "auto_resolve_timeout", "3600"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "acknowledgement_timeout", "3600"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.during_support_hours.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.during_support_hours.0.type", "constant"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.during_support_hours.0.urgency", "high"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.outside_support_hours.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.outside_support_hours.0.type", "constant"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.outside_support_hours.0.urgency", "low"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "incident_urgency_rule.0.type", "use_support_hours"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "scheduled_actions.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "scheduled_actions.0.at.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "scheduled_actions.0.at.0.name", "support_hours_start"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "scheduled_actions.0.to_urgency", "high"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "scheduled_actions.0.type", "urgency_change"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.#", "1"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.days_of_week.#", "5"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.days_of_week.0", "1"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.days_of_week.1", "2"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.days_of_week.2", "3"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.days_of_week.3", "4"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.days_of_week.4", "5"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.end_time", "17:00:00"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.start_time", "09:00:00"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.time_zone", "America/Lima"), - resource.TestCheckResourceAttr( - "pagerduty_service.foo", "support_hours.0.type", "fixed_time_per_day"), - ), - }, - }, - }) -} - -func testAccCheckPagerDutyServiceDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*pagerduty.Client) - for _, r := range s.RootModule().Resources { - if r.Type != "pagerduty_service" { - continue - } - - _, err := client.GetService(r.Primary.ID, &pagerduty.GetServiceOptions{}) - - if err == nil { - return fmt.Errorf("Service still exists") - } - - } - return nil -} - -func testAccCheckPagerDutyServiceExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Service ID is set") - } - - client := testAccProvider.Meta().(*pagerduty.Client) - - found, err := client.GetService(rs.Primary.ID, &pagerduty.GetServiceOptions{}) - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("Service not found: %v - %v", rs.Primary.ID, found) - } - - return nil - } -} - -func testAccCheckPagerDutyServiceConfig(username, email, escalationPolicy, service string) string { - return fmt.Sprintf(` -resource "pagerduty_user" "foo" { - name = "%s" - email = "%s" - color = "green" - role = "user" - job_title = "foo" - description = "foo" -} - -resource "pagerduty_escalation_policy" "foo" { - name = "%s" - description = "bar" - num_loops = 2 - rule { - escalation_delay_in_minutes = 10 - target { - type = "user_reference" - id = "${pagerduty_user.foo.id}" - } - } -} - -resource "pagerduty_service" "foo" { - name = "%s" - description = "foo" - auto_resolve_timeout = 1800 - acknowledgement_timeout = 1800 - escalation_policy = "${pagerduty_escalation_policy.foo.id}" - incident_urgency_rule { - type = "constant" - urgency = "high" - } -} -`, username, email, escalationPolicy, service) -} - -func testAccCheckPagerDutyServiceConfigUpdated(username, email, escalationPolicy, service string) string { - return fmt.Sprintf(` -resource "pagerduty_user" "foo" { - name = "%s" - email = "%s" - color = "green" - role = "user" - job_title = "foo" - description = "foo" -} - -resource "pagerduty_escalation_policy" "foo" { - name = "%s" - description = "bar" - num_loops = 2 - - rule { - escalation_delay_in_minutes = 10 - target { - type = "user_reference" - id = "${pagerduty_user.foo.id}" - } - } -} - -resource "pagerduty_service" "foo" { - name = "%s" - description = "bar" - auto_resolve_timeout = 3600 - acknowledgement_timeout = 3600 - - escalation_policy = "${pagerduty_escalation_policy.foo.id}" - incident_urgency_rule { - type = "constant" - urgency = "high" - } -} -`, username, email, escalationPolicy, service) -} - -func testAccCheckPagerDutyServiceWithIncidentUrgencyRulesConfig(username, email, escalationPolicy, service string) string { - return fmt.Sprintf(` -resource "pagerduty_user" "foo" { - name = "%s" - email = "%s" - color = "green" - role = "user" - job_title = "foo" - description = "foo" -} - -resource "pagerduty_escalation_policy" "foo" { - name = "%s" - description = "bar" - num_loops = 2 - - rule { - escalation_delay_in_minutes = 10 - target { - type = "user_reference" - id = "${pagerduty_user.foo.id}" - } - } -} - -resource "pagerduty_service" "foo" { - name = "%s" - description = "foo" - auto_resolve_timeout = 1800 - acknowledgement_timeout = 1800 - escalation_policy = "${pagerduty_escalation_policy.foo.id}" - - incident_urgency_rule { - type = "use_support_hours" - - during_support_hours { - type = "constant" - urgency = "high" - } - outside_support_hours { - type = "constant" - urgency = "low" - } - } - - support_hours = [{ - type = "fixed_time_per_day" - time_zone = "America/Lima" - start_time = "09:00:00" - end_time = "17:00:00" - days_of_week = [ 1, 2, 3, 4, 5 ] - }] - - scheduled_actions { - type = "urgency_change" - to_urgency = "high" - at { - type = "named_time", - name = "support_hours_start" - } - } -} -`, username, email, escalationPolicy, service) -} - -func testAccCheckPagerDutyServiceWithIncidentUrgencyRulesConfigUpdated(username, email, escalationPolicy, service string) string { - return fmt.Sprintf(` - resource "pagerduty_user" "foo" { - name = "%s" - email = "%s" - color = "green" - role = "user" - job_title = "foo" - description = "foo" -} - -resource "pagerduty_escalation_policy" "foo" { - name = "%s" - description = "bar" - num_loops = 2 - - rule { - escalation_delay_in_minutes = 10 - target { - type = "user_reference" - id = "${pagerduty_user.foo.id}" - } - } -} - -resource "pagerduty_service" "foo" { - name = "%s" - description = "bar bar bar" - auto_resolve_timeout = 3600 - acknowledgement_timeout = 3600 - escalation_policy = "${pagerduty_escalation_policy.foo.id}" - - incident_urgency_rule { - type = "use_support_hours" - during_support_hours { - type = "constant" - urgency = "high" - } - outside_support_hours { - type = "constant" - urgency = "low" - } - } - - support_hours = [{ - type = "fixed_time_per_day" - time_zone = "America/Lima" - start_time = "09:00:00" - end_time = "17:00:00" - days_of_week = [ 1, 2, 3, 4, 5 ] - }] - - scheduled_actions { - type = "urgency_change" - to_urgency = "high" - at { - type = "named_time", - name = "support_hours_start" - } - } -} -`, username, email, escalationPolicy, service) -} diff --git a/builtin/providers/pagerduty/resource_pagerduty_team.go b/builtin/providers/pagerduty/resource_pagerduty_team.go deleted file mode 100644 index 6bbe1ca46..000000000 --- a/builtin/providers/pagerduty/resource_pagerduty_team.go +++ /dev/null @@ -1,107 +0,0 @@ -package pagerduty - -import ( - "log" - - "github.com/PagerDuty/go-pagerduty" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourcePagerDutyTeam() *schema.Resource { - return &schema.Resource{ - Create: resourcePagerDutyTeamCreate, - Read: resourcePagerDutyTeamRead, - Update: resourcePagerDutyTeamUpdate, - Delete: resourcePagerDutyTeamDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Default: "Managed by Terraform", - }, - }, - } -} - -func buildTeamStruct(d *schema.ResourceData) *pagerduty.Team { - team := pagerduty.Team{ - Name: d.Get("name").(string), - } - - if attr, ok := d.GetOk("description"); ok { - team.Description = attr.(string) - } - - return &team -} - -func resourcePagerDutyTeamCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - team := buildTeamStruct(d) - - log.Printf("[INFO] Creating PagerDuty team %s", team.Name) - - team, err := client.CreateTeam(team) - - if err != nil { - return err - } - - d.SetId(team.ID) - - return nil - -} - -func resourcePagerDutyTeamRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - log.Printf("[INFO] Reading PagerDuty team %s", d.Id()) - - team, err := client.GetTeam(d.Id()) - - if err != nil { - return err - } - - d.Set("name", team.Name) - d.Set("description", team.Description) - - return nil -} - -func resourcePagerDutyTeamUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - team := buildTeamStruct(d) - - log.Printf("[INFO] Updating PagerDuty team %s", d.Id()) - - if _, err := client.UpdateTeam(d.Id(), team); err != nil { - return err - } - - return nil -} - -func resourcePagerDutyTeamDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - log.Printf("[INFO] Deleting PagerDuty team %s", d.Id()) - - if err := client.DeleteTeam(d.Id()); err != nil { - return err - } - - d.SetId("") - - return nil -} diff --git a/builtin/providers/pagerduty/resource_pagerduty_team_test.go b/builtin/providers/pagerduty/resource_pagerduty_team_test.go deleted file mode 100644 index 1695a158b..000000000 --- a/builtin/providers/pagerduty/resource_pagerduty_team_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package pagerduty - -import ( - "fmt" - "testing" - - "github.com/PagerDuty/go-pagerduty" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccPagerDutyTeam_Basic(t *testing.T) { - team := fmt.Sprintf("tf-%s", acctest.RandString(5)) - teamUpdated := fmt.Sprintf("tf-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPagerDutyTeamDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckPagerDutyTeamConfig(team), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyTeamExists("pagerduty_team.foo"), - resource.TestCheckResourceAttr( - "pagerduty_team.foo", "name", team), - resource.TestCheckResourceAttr( - "pagerduty_team.foo", "description", "foo"), - ), - }, - { - Config: testAccCheckPagerDutyTeamConfigUpdated(teamUpdated), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyTeamExists("pagerduty_team.foo"), - resource.TestCheckResourceAttr( - "pagerduty_team.foo", "name", teamUpdated), - resource.TestCheckResourceAttr( - "pagerduty_team.foo", "description", "bar"), - ), - }, - }, - }) -} - -func testAccCheckPagerDutyTeamDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*pagerduty.Client) - for _, r := range s.RootModule().Resources { - if r.Type != "pagerduty_team" { - continue - } - - _, err := client.GetTeam(r.Primary.ID) - - if err == nil { - return fmt.Errorf("Team still exists") - } - - } - return nil -} - -func testAccCheckPagerDutyTeamExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*pagerduty.Client) - for _, r := range s.RootModule().Resources { - if _, err := client.GetTeam(r.Primary.ID); err != nil { - return fmt.Errorf("Received an error retrieving team %s ID: %s", err, r.Primary.ID) - } - } - return nil - } -} - -func testAccCheckPagerDutyTeamConfig(team string) string { - return fmt.Sprintf(` -resource "pagerduty_team" "foo" { - name = "%s" - description = "foo" -}`, team) -} - -func testAccCheckPagerDutyTeamConfigUpdated(team string) string { - return fmt.Sprintf(` -resource "pagerduty_team" "foo" { - name = "%s" - description = "bar" -}`, team) -} diff --git a/builtin/providers/pagerduty/resource_pagerduty_user.go b/builtin/providers/pagerduty/resource_pagerduty_user.go deleted file mode 100644 index dc1bd71d2..000000000 --- a/builtin/providers/pagerduty/resource_pagerduty_user.go +++ /dev/null @@ -1,229 +0,0 @@ -package pagerduty - -import ( - "log" - - "github.com/PagerDuty/go-pagerduty" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourcePagerDutyUser() *schema.Resource { - return &schema.Resource{ - Create: resourcePagerDutyUserCreate, - Read: resourcePagerDutyUserRead, - Update: resourcePagerDutyUserUpdate, - Delete: resourcePagerDutyUserDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "email": { - Type: schema.TypeString, - Required: true, - }, - "color": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "role": { - Type: schema.TypeString, - Optional: true, - Default: "user", - ValidateFunc: validateValueFunc([]string{ - "admin", - "limited_user", - "owner", - "read_only_user", - "team_responder", - "user", - }), - }, - "job_title": { - Type: schema.TypeString, - Optional: true, - }, - "avatar_url": { - Type: schema.TypeString, - Computed: true, - }, - "teams": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - }, - "time_zone": { - Type: schema.TypeString, - Computed: true, - }, - "html_url": { - Type: schema.TypeString, - Computed: true, - }, - "invitation_sent": { - Type: schema.TypeBool, - Computed: true, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Default: "Managed by Terraform", - }, - }, - } -} - -func buildUserStruct(d *schema.ResourceData) *pagerduty.User { - user := pagerduty.User{ - Name: d.Get("name").(string), - Email: d.Get("email").(string), - APIObject: pagerduty.APIObject{ - ID: d.Id(), - }, - } - - if attr, ok := d.GetOk("color"); ok { - user.Color = attr.(string) - } - - if attr, ok := d.GetOk("role"); ok { - role := attr.(string) - // Skip setting the role if the user is the owner of the account. - // Can't change this through the API. - if role != "owner" { - user.Role = role - } - } - - if attr, ok := d.GetOk("job_title"); ok { - user.JobTitle = attr.(string) - } - - if attr, ok := d.GetOk("description"); ok { - user.Description = attr.(string) - } - - return &user -} - -func resourcePagerDutyUserCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - user := buildUserStruct(d) - - log.Printf("[INFO] Creating PagerDuty user %s", user.Name) - - user, err := client.CreateUser(*user) - - if err != nil { - return err - } - - d.SetId(user.ID) - - return resourcePagerDutyUserUpdate(d, meta) -} - -func resourcePagerDutyUserRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - log.Printf("[INFO] Reading PagerDuty user %s", d.Id()) - - o := &pagerduty.GetUserOptions{} - - user, err := client.GetUser(d.Id(), *o) - - if err != nil { - return err - } - - d.Set("name", user.Name) - d.Set("email", user.Email) - d.Set("time_zone", user.Timezone) - d.Set("color", user.Color) - d.Set("role", user.Role) - d.Set("avatar_url", user.AvatarURL) - d.Set("description", user.Description) - d.Set("job_title", user.JobTitle) - d.Set("teams", user.Teams) - - return nil -} - -func resourcePagerDutyUserUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - user := buildUserStruct(d) - - log.Printf("[INFO] Updating PagerDuty user %s", d.Id()) - - if _, err := client.UpdateUser(*user); err != nil { - return err - } - - if d.HasChange("teams") { - o, n := d.GetChange("teams") - - if o == nil { - o = new(schema.Set) - } - - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - - remove := expandStringList(os.Difference(ns).List()) - add := expandStringList(ns.Difference(os).List()) - - for _, t := range remove { - _, tErr := client.GetTeam(t) - - if tErr != nil { - log.Printf("[INFO] PagerDuty team: %s not found, removing dangling team reference for user %s", t, d.Id()) - continue - } - - log.Printf("[INFO] Removing PagerDuty user %s from team: %s", d.Id(), t) - - rErr := client.RemoveUserFromTeam(t, d.Id()) - if rErr != nil { - return rErr - } - } - - for _, t := range add { - log.Printf("[INFO] Adding PagerDuty user %s to team: %s", d.Id(), t) - - aErr := client.AddUserToTeam(t, d.Id()) - if aErr != nil { - return aErr - } - } - } - - return nil -} - -func resourcePagerDutyUserDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*pagerduty.Client) - - log.Printf("[INFO] Deleting PagerDuty user %s", d.Id()) - - if err := client.DeleteUser(d.Id()); err != nil { - return err - } - - d.SetId("") - - return nil -} diff --git a/builtin/providers/pagerduty/resource_pagerduty_user_test.go b/builtin/providers/pagerduty/resource_pagerduty_user_test.go deleted file mode 100644 index bdb1d22e4..000000000 --- a/builtin/providers/pagerduty/resource_pagerduty_user_test.go +++ /dev/null @@ -1,230 +0,0 @@ -package pagerduty - -import ( - "fmt" - "testing" - - "github.com/PagerDuty/go-pagerduty" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccPagerDutyUser_Basic(t *testing.T) { - username := fmt.Sprintf("tf-%s", acctest.RandString(5)) - usernameUpdated := fmt.Sprintf("tf-%s", acctest.RandString(5)) - email := fmt.Sprintf("%s@foo.com", username) - emailUpdated := fmt.Sprintf("%s@foo.com", usernameUpdated) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPagerDutyUserDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckPagerDutyUserConfig(username, email), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyUserExists("pagerduty_user.foo"), - resource.TestCheckResourceAttr( - "pagerduty_user.foo", "name", username), - resource.TestCheckResourceAttr( - "pagerduty_user.foo", "email", email), - resource.TestCheckResourceAttr( - "pagerduty_user.foo", "color", "green"), - resource.TestCheckResourceAttr( - "pagerduty_user.foo", "role", "user"), - resource.TestCheckResourceAttr( - "pagerduty_user.foo", "job_title", "foo"), - resource.TestCheckResourceAttr( - "pagerduty_user.foo", "description", "foo"), - ), - }, - { - Config: testAccCheckPagerDutyUserConfigUpdated(usernameUpdated, emailUpdated), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyUserExists("pagerduty_user.foo"), - resource.TestCheckResourceAttr( - "pagerduty_user.foo", "name", usernameUpdated), - resource.TestCheckResourceAttr( - "pagerduty_user.foo", "email", emailUpdated), - resource.TestCheckResourceAttr( - "pagerduty_user.foo", "color", "red"), - resource.TestCheckResourceAttr( - "pagerduty_user.foo", "role", "team_responder"), - resource.TestCheckResourceAttr( - "pagerduty_user.foo", "job_title", "bar"), - resource.TestCheckResourceAttr( - "pagerduty_user.foo", "description", "bar"), - ), - }, - }, - }) -} - -func TestAccPagerDutyUserWithTeams_Basic(t *testing.T) { - username := fmt.Sprintf("tf-%s", acctest.RandString(5)) - email := fmt.Sprintf("%s@foo.com", username) - team1 := fmt.Sprintf("tf-%s", acctest.RandString(5)) - team2 := fmt.Sprintf("tf-%s", acctest.RandString(5)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPagerDutyUserDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckPagerDutyUserWithTeamsConfig(team1, username, email), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyUserExists("pagerduty_user.foo"), - resource.TestCheckResourceAttr( - "pagerduty_user.foo", "name", username), - resource.TestCheckResourceAttr( - "pagerduty_user.foo", "email", email), - resource.TestCheckResourceAttr( - "pagerduty_user.foo", "teams.#", "1"), - ), - }, - { - Config: testAccCheckPagerDutyUserWithTeamsConfigUpdated(team1, team2, username, email), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyUserExists("pagerduty_user.foo"), - resource.TestCheckResourceAttr( - "pagerduty_user.foo", "name", username), - resource.TestCheckResourceAttr( - "pagerduty_user.foo", "email", email), - resource.TestCheckResourceAttr( - "pagerduty_user.foo", "teams.#", "2"), - ), - }, - { - Config: testAccCheckPagerDutyUserWithNoTeamsConfig(team1, team2, username, email), - Check: resource.ComposeTestCheckFunc( - testAccCheckPagerDutyUserExists("pagerduty_user.foo"), - resource.TestCheckResourceAttr( - "pagerduty_user.foo", "name", username), - resource.TestCheckResourceAttr( - "pagerduty_user.foo", "email", email), - resource.TestCheckResourceAttr( - "pagerduty_user.foo", "teams.#", "0"), - ), - }, - }, - }) -} - -func testAccCheckPagerDutyUserDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*pagerduty.Client) - for _, r := range s.RootModule().Resources { - if r.Type != "pagerduty_user" { - continue - } - - opts := pagerduty.GetUserOptions{} - - _, err := client.GetUser(r.Primary.ID, opts) - - if err == nil { - return fmt.Errorf("User still exists") - } - - } - return nil -} - -func testAccCheckPagerDutyUserExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No user ID is set") - } - - client := testAccProvider.Meta().(*pagerduty.Client) - - found, err := client.GetUser(rs.Primary.ID, pagerduty.GetUserOptions{}) - if err != nil { - return err - } - - if found.ID != rs.Primary.ID { - return fmt.Errorf("User not found: %v - %v", rs.Primary.ID, found) - } - - return nil - } -} - -func testAccCheckPagerDutyUserConfig(username, email string) string { - return fmt.Sprintf(` -resource "pagerduty_user" "foo" { - name = "%s" - email = "%s" - color = "green" - role = "user" - job_title = "foo" - description = "foo" -}`, username, email) -} - -func testAccCheckPagerDutyUserConfigUpdated(username, email string) string { - return fmt.Sprintf(` -resource "pagerduty_user" "foo" { - name = "%s" - email = "%s" - color = "red" - role = "team_responder" - job_title = "bar" - description = "bar" -}`, username, email) -} - -func testAccCheckPagerDutyUserWithTeamsConfig(team, username, email string) string { - return fmt.Sprintf(` -resource "pagerduty_team" "foo" { - name = "%s" -} - -resource "pagerduty_user" "foo" { - name = "%s" - email = "%s" - teams = ["${pagerduty_team.foo.id}"] -} -`, team, username, email) -} - -func testAccCheckPagerDutyUserWithTeamsConfigUpdated(team1, team2, username, email string) string { - return fmt.Sprintf(` -resource "pagerduty_team" "foo" { - name = "%s" -} - -resource "pagerduty_team" "bar" { - name = "%s" -} - -resource "pagerduty_user" "foo" { - name = "%s" - email = "%s" - teams = ["${pagerduty_team.foo.id}", "${pagerduty_team.bar.id}"] -} -`, team1, team2, username, email) -} - -func testAccCheckPagerDutyUserWithNoTeamsConfig(team1, team2, username, email string) string { - return fmt.Sprintf(` -resource "pagerduty_team" "foo" { - name = "%s" -} - -resource "pagerduty_team" "bar" { - name = "%s" -} - -resource "pagerduty_user" "foo" { - name = "%s" - email = "%s" -} -`, team1, team2, username, email) -} diff --git a/builtin/providers/pagerduty/structure.go b/builtin/providers/pagerduty/structure.go deleted file mode 100644 index 4d4f52df8..000000000 --- a/builtin/providers/pagerduty/structure.go +++ /dev/null @@ -1,390 +0,0 @@ -package pagerduty - -import pagerduty "github.com/PagerDuty/go-pagerduty" - -// Expands an array of escalation rules into []pagerduty.EscalationRules -func expandEscalationRules(list []interface{}) []pagerduty.EscalationRule { - result := make([]pagerduty.EscalationRule, 0, len(list)) - - for _, r := range list { - rule := r.(map[string]interface{}) - - escalationRule := &pagerduty.EscalationRule{ - Delay: uint(rule["escalation_delay_in_minutes"].(int)), - } - - for _, t := range rule["target"].([]interface{}) { - target := t.(map[string]interface{}) - escalationRule.Targets = append( - escalationRule.Targets, - pagerduty.APIObject{ - ID: target["id"].(string), - Type: target["type"].(string), - }, - ) - } - - result = append(result, *escalationRule) - - } - - return result -} - -// Flattens an array of []pagerduty.EscalationRule into a map[string]interface{} -func flattenEscalationRules(list []pagerduty.EscalationRule) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(list)) - - for _, i := range list { - r := make(map[string]interface{}) - r["id"] = i.ID - r["escalation_delay_in_minutes"] = i.Delay - - if len(i.Targets) > 0 { - targets := make([]map[string]interface{}, 0, len(i.Targets)) - for _, t := range i.Targets { - targets = append(targets, map[string]interface{}{ - "id": t.ID, - "type": t.Type, - }) - } - r["target"] = targets - } - - result = append(result, r) - } - - return result -} - -// Expands an array of schedules into []pagerduty.Schedule -func expandScheduleLayers(list []interface{}) []pagerduty.ScheduleLayer { - result := make([]pagerduty.ScheduleLayer, 0, len(list)) - - for _, l := range list { - layer := l.(map[string]interface{}) - - scheduleLayer := &pagerduty.ScheduleLayer{ - Name: layer["name"].(string), - Start: layer["start"].(string), - End: layer["end"].(string), - RotationVirtualStart: layer["rotation_virtual_start"].(string), - RotationTurnLengthSeconds: uint(layer["rotation_turn_length_seconds"].(int)), - } - - if layer["id"] != "" { - scheduleLayer.ID = layer["id"].(string) - } - - for _, u := range layer["users"].([]interface{}) { - scheduleLayer.Users = append( - scheduleLayer.Users, - pagerduty.UserReference{ - User: pagerduty.APIObject{ - ID: u.(string), - Type: "user_reference", - }, - }, - ) - } - - for _, r := range layer["restriction"].([]interface{}) { - restriction := r.(map[string]interface{}) - scheduleLayer.Restrictions = append( - scheduleLayer.Restrictions, - pagerduty.Restriction{ - Type: restriction["type"].(string), - StartTimeOfDay: restriction["start_time_of_day"].(string), - StartDayOfWeek: uint(restriction["start_day_of_week"].(int)), - DurationSeconds: uint(restriction["duration_seconds"].(int)), - }, - ) - } - - result = append(result, *scheduleLayer) - } - - return result -} - -// Expands an array of teams into []pagerduty.APIReference -func expandTeams(list []interface{}) []pagerduty.APIReference { - result := make([]pagerduty.APIReference, 0, len(list)) - - for _, l := range list { - team := &pagerduty.APIReference{ - ID: l.(string), - Type: "team_reference", - } - - result = append(result, *team) - } - - return result -} - -// Flattens an array of []pagerduty.ScheduleLayer into a map[string]interface{} -func flattenScheduleLayers(list []pagerduty.ScheduleLayer) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(list)) - - for _, i := range list { - r := make(map[string]interface{}) - r["id"] = i.ID - r["name"] = i.Name - r["end"] = i.End - r["start"] = i.Start - r["rotation_virtual_start"] = i.RotationVirtualStart - r["rotation_turn_length_seconds"] = i.RotationTurnLengthSeconds - - if len(i.Users) > 0 { - users := make([]string, 0, len(i.Users)) - for _, u := range i.Users { - users = append(users, u.User.ID) - } - r["users"] = users - } - - if len(i.Restrictions) > 0 { - restrictions := make([]map[string]interface{}, 0, len(i.Restrictions)) - for _, r := range i.Restrictions { - restriction := map[string]interface{}{ - "duration_seconds": r.DurationSeconds, - "start_time_of_day": r.StartTimeOfDay, - "type": r.Type, - } - - if r.StartDayOfWeek > 0 { - restriction["start_day_of_week"] = r.StartDayOfWeek - } - - restrictions = append(restrictions, restriction) - } - r["restriction"] = restrictions - } - - result = append(result, r) - } - - // Reverse the final result and return it - resultReversed := make([]map[string]interface{}, 0, len(result)) - - for i := len(result) - 1; i >= 0; i-- { - resultReversed = append(resultReversed, result[i]) - } - - return resultReversed -} - -// Takes the result of flatmap.Expand for an array of strings -// and returns a []string -func expandStringList(configured []interface{}) []string { - vs := make([]string, 0, len(configured)) - for _, v := range configured { - vs = append(vs, string(v.(string))) - } - return vs -} - -// Expands attribute slice to incident urgency rule, returns it and true if successful -func expandIncidentUrgencyRule(incidentUrgencyList interface{}) (*pagerduty.IncidentUrgencyRule, bool) { - i := incidentUrgencyList.([]interface{}) - - i, ok := incidentUrgencyList.([]interface{}) - if !ok { - return nil, false - } - - m, ok := i[0].(map[string]interface{}) - if !ok || len(m) == 0 { - return nil, false - } - - iur := pagerduty.IncidentUrgencyRule{} - if val, ok := m["type"]; ok { - iur.Type = val.(string) - } - if val, ok := m["urgency"]; ok { - iur.Urgency = val.(string) - } - if val, ok := m["during_support_hours"]; ok { - iur.DuringSupportHours = expandIncidentUrgencyType(val) - } - if val, ok := m["outside_support_hours"]; ok { - iur.OutsideSupportHours = expandIncidentUrgencyType(val) - } - - return &iur, true -} - -// Expands attribute to inline model -func expandActionInlineModel(inlineModelVal interface{}) *pagerduty.InlineModel { - inlineModel := pagerduty.InlineModel{} - - if slice, ok := inlineModelVal.([]interface{}); ok && len(slice) == 1 { - m := slice[0].(map[string]interface{}) - - if val, ok := m["type"]; ok { - inlineModel.Type = val.(string) - } - if val, ok := m["name"]; ok { - inlineModel.Name = val.(string) - } - } - - return &inlineModel -} - -// Expands attribute into incident urgency type -func expandIncidentUrgencyType(attribute interface{}) *pagerduty.IncidentUrgencyType { - ict := pagerduty.IncidentUrgencyType{} - - slice := attribute.([]interface{}) - if len(slice) != 1 { - return &ict - } - - m := slice[0].(map[string]interface{}) - - if val, ok := m["type"]; ok { - ict.Type = val.(string) - } - if val, ok := m["urgency"]; ok { - ict.Urgency = val.(string) - } - - return &ict -} - -// Returns service's incident urgency rule as slice of length one and bool indicating success -func flattenIncidentUrgencyRule(service *pagerduty.Service) ([]interface{}, bool) { - if service.IncidentUrgencyRule.Type == "" && service.IncidentUrgencyRule.Urgency == "" { - return nil, false - } - - m := map[string]interface{}{ - "type": service.IncidentUrgencyRule.Type, - "urgency": service.IncidentUrgencyRule.Urgency, - } - - if dsh := service.IncidentUrgencyRule.DuringSupportHours; dsh != nil { - m["during_support_hours"] = flattenIncidentUrgencyType(dsh) - } - if osh := service.IncidentUrgencyRule.OutsideSupportHours; osh != nil { - m["outside_support_hours"] = flattenIncidentUrgencyType(osh) - } - - return []interface{}{m}, true -} - -func flattenIncidentUrgencyType(iut *pagerduty.IncidentUrgencyType) []interface{} { - incidentUrgencyType := map[string]interface{}{ - "type": iut.Type, - "urgency": iut.Urgency, - } - return []interface{}{incidentUrgencyType} -} - -// Expands attribute to support hours -func expandSupportHours(attribute interface{}) (sh *pagerduty.SupportHours) { - if slice, ok := attribute.([]interface{}); ok && len(slice) >= 1 { - m := slice[0].(map[string]interface{}) - sh = &pagerduty.SupportHours{} - - if val, ok := m["type"]; ok { - sh.Type = val.(string) - } - if val, ok := m["time_zone"]; ok { - sh.Timezone = val.(string) - } - if val, ok := m["start_time"]; ok { - sh.StartTime = val.(string) - } - if val, ok := m["end_time"]; ok { - sh.EndTime = val.(string) - } - if val, ok := m["days_of_week"]; ok { - daysOfWeekInt := val.([]interface{}) - var daysOfWeek []uint - - for _, i := range daysOfWeekInt { - daysOfWeek = append(daysOfWeek, uint(i.(int))) - } - - sh.DaysOfWeek = daysOfWeek - } - } - - return -} - -// Returns service's support hours as slice of length one -func flattenSupportHours(service *pagerduty.Service) []interface{} { - if service.SupportHours == nil { - return nil - } - - m := map[string]interface{}{} - - if s := service.SupportHours; s != nil { - m["type"] = s.Type - m["time_zone"] = s.Timezone - m["start_time"] = s.StartTime - m["end_time"] = s.EndTime - m["days_of_week"] = s.DaysOfWeek - } - - return []interface{}{m} -} - -// Expands attribute to scheduled action -func expandScheduledActions(input interface{}) (scheduledActions []pagerduty.ScheduledAction) { - inputs := input.([]interface{}) - - for _, i := range inputs { - m := i.(map[string]interface{}) - sa := pagerduty.ScheduledAction{} - - if val, ok := m["type"]; ok { - sa.Type = val.(string) - } - if val, ok := m["to_urgency"]; ok { - sa.ToUrgency = val.(string) - } - if val, ok := m["at"]; ok { - sa.At = *expandActionInlineModel(val) - } - - scheduledActions = append(scheduledActions, sa) - } - - return scheduledActions -} - -// Returns service's scheduled actions -func flattenScheduledActions(service *pagerduty.Service) []interface{} { - scheduledActions := []interface{}{} - - if sas := service.ScheduledActions; sas != nil { - for _, sa := range sas { - m := map[string]interface{}{} - m["to_urgency"] = sa.ToUrgency - m["type"] = sa.Type - if at, ok := scheduledActionsAt(sa.At); ok { - m["at"] = at - } - scheduledActions = append(scheduledActions, m) - } - } - - return scheduledActions -} - -// Returns service's scheduled action's at attribute as slice of length one -func scheduledActionsAt(inlineModel pagerduty.InlineModel) ([]interface{}, bool) { - if inlineModel.Type == "" || inlineModel.Name == "" { - return nil, false - } - - m := map[string]interface{}{"type": inlineModel.Type, "name": inlineModel.Name} - return []interface{}{m}, true -} diff --git a/builtin/providers/pagerduty/util.go b/builtin/providers/pagerduty/util.go deleted file mode 100644 index 68181f9a7..000000000 --- a/builtin/providers/pagerduty/util.go +++ /dev/null @@ -1,26 +0,0 @@ -package pagerduty - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" -) - -// Validate a value against a set of possible values -func validateValueFunc(values []string) schema.SchemaValidateFunc { - return func(v interface{}, k string) (we []string, errors []error) { - value := v.(string) - valid := false - for _, val := range values { - if value == val { - valid = true - break - } - } - - if !valid { - errors = append(errors, fmt.Errorf("%#v is an invalid value for argument %s. Must be one of %#v", value, k, values)) - } - return - } -} diff --git a/builtin/providers/postgresql/.gitignore b/builtin/providers/postgresql/.gitignore deleted file mode 100644 index 8af725ec6..000000000 --- a/builtin/providers/postgresql/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -data/ -pwfile diff --git a/builtin/providers/postgresql/GNUmakefile b/builtin/providers/postgresql/GNUmakefile deleted file mode 100644 index f8f2da5c4..000000000 --- a/builtin/providers/postgresql/GNUmakefile +++ /dev/null @@ -1,38 +0,0 @@ -# env TESTARGS='-run TestAccPostgresqlSchema_AddPolicy' TF_LOG=warn make test -# -# NOTE: As of PostgreSQL 9.6.1 the -test.parallel=1 is required when -# performing `DROP ROLE`-related actions. This behavior and requirement -# may change in the future and is likely not required when doing -# non-delete related operations. But for now it is. - -POSTGRES?=$(wildcard /usr/local/bin/postgres /opt/local/lib/postgresql96/bin/postgres) -PSQL?=$(wildcard /usr/local/bin/psql /opt/local/lib/postgresql96/bin/psql) -INITDB?=$(wildcard /usr/local/bin/initdb /opt/local/lib/postgresql96/bin/initdb) - -PGDATA?=$(GOPATH)/src/github.com/hashicorp/terraform/builtin/providers/postgresql/data - -initdb:: - echo "" > pwfile - $(INITDB) --no-locale -U postgres -A md5 --pwfile=pwfile -D $(PGDATA) - -startdb:: - 2>&1 \ - $(POSTGRES) \ - -D $(PGDATA) \ - -c log_connections=on \ - -c log_disconnections=on \ - -c log_duration=on \ - -c log_statement=all \ - | tee postgresql.log - -cleandb:: - rm -rf $(PGDATA) - rm -f pwfile - -freshdb:: cleandb initdb startdb - -test:: - 2>&1 PGSSLMODE=disable PGHOST=/tmp PGUSER=postgres make -C ../../.. testacc TEST=./builtin/providers/postgresql | tee test.log - -psql:: - $(PSQL) -E postgres postgres diff --git a/builtin/providers/postgresql/config.go b/builtin/providers/postgresql/config.go deleted file mode 100644 index 2b45904fc..000000000 --- a/builtin/providers/postgresql/config.go +++ /dev/null @@ -1,97 +0,0 @@ -package postgresql - -import ( - "bytes" - "database/sql" - "fmt" - "log" - "sync" - "unicode" - - "github.com/hashicorp/errwrap" - _ "github.com/lib/pq" //PostgreSQL db -) - -// Config - provider config -type Config struct { - Host string - Port int - Database string - Username string - Password string - SSLMode string - ApplicationName string - Timeout int - ConnectTimeoutSec int -} - -// Client struct holding connection string -type Client struct { - username string - connStr string - - // PostgreSQL lock on pg_catalog. Many of the operations that Terraform - // performs are not permitted to be concurrent. Unlike traditional - // PostgreSQL tables that use MVCC, many of the PostgreSQL system - // catalogs look like tables, but are not in-fact able to be - // concurrently updated. - catalogLock sync.RWMutex -} - -// NewClient returns new client config -func (c *Config) NewClient() (*Client, error) { - // NOTE: dbname must come before user otherwise dbname will be set to - // user. - const dsnFmt = "host=%s port=%d dbname=%s user=%s password=%s sslmode=%s fallback_application_name=%s connect_timeout=%d" - - // Quote empty strings or strings that contain whitespace - q := func(s string) string { - b := bytes.NewBufferString(`'`) - b.Grow(len(s) + 2) - var haveWhitespace bool - for _, r := range s { - if unicode.IsSpace(r) { - haveWhitespace = true - } - - switch r { - case '\'': - b.WriteString(`\'`) - case '\\': - b.WriteString(`\\`) - default: - b.WriteRune(r) - } - } - - b.WriteString(`'`) - - str := b.String() - if haveWhitespace || len(str) == 2 { - return str - } - return str[1 : len(str)-1] - } - - logDSN := fmt.Sprintf(dsnFmt, q(c.Host), c.Port, q(c.Database), q(c.Username), q(""), q(c.SSLMode), q(c.ApplicationName), c.ConnectTimeoutSec) - log.Printf("[INFO] PostgreSQL DSN: `%s`", logDSN) - - connStr := fmt.Sprintf(dsnFmt, q(c.Host), c.Port, q(c.Database), q(c.Username), q(c.Password), q(c.SSLMode), q(c.ApplicationName), c.ConnectTimeoutSec) - client := Client{ - connStr: connStr, - username: c.Username, - } - - return &client, nil -} - -// Connect will manually connect/disconnect to prevent a large -// number or db connections being made -func (c *Client) Connect() (*sql.DB, error) { - db, err := sql.Open("postgres", c.connStr) - if err != nil { - return nil, errwrap.Wrapf("Error connecting to PostgreSQL server: {{err}}", err) - } - - return db, nil -} diff --git a/builtin/providers/postgresql/helpers.go b/builtin/providers/postgresql/helpers.go deleted file mode 100644 index 2be57bf39..000000000 --- a/builtin/providers/postgresql/helpers.go +++ /dev/null @@ -1,24 +0,0 @@ -package postgresql - -import ( - "fmt" - "strings" -) - -// pqQuoteLiteral returns a string literal safe for inclusion in a PostgreSQL -// query as a parameter. The resulting string still needs to be wrapped in -// single quotes in SQL (i.e. fmt.Sprintf(`'%s'`, pqQuoteLiteral("str"))). See -// quote_literal_internal() in postgresql/backend/utils/adt/quote.c:77. -func pqQuoteLiteral(in string) string { - in = strings.Replace(in, `\`, `\\`, -1) - in = strings.Replace(in, `'`, `''`, -1) - return in -} - -func validateConnLimit(v interface{}, key string) (warnings []string, errors []error) { - value := v.(int) - if value < -1 { - errors = append(errors, fmt.Errorf("%s can not be less than -1", key)) - } - return -} diff --git a/builtin/providers/postgresql/provider.go b/builtin/providers/postgresql/provider.go deleted file mode 100644 index 93bffd5ca..000000000 --- a/builtin/providers/postgresql/provider.go +++ /dev/null @@ -1,112 +0,0 @@ -package postgresql - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "host": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("PGHOST", nil), - Description: "Name of PostgreSQL server address to connect to", - }, - "port": { - Type: schema.TypeInt, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("PGPORT", 5432), - Description: "The PostgreSQL port number to connect to at the server host, or socket file name extension for Unix-domain connections", - }, - "database": { - Type: schema.TypeString, - Optional: true, - Description: "The name of the database to connect to in order to conenct to (defaults to `postgres`).", - DefaultFunc: schema.EnvDefaultFunc("PGDATABASE", "postgres"), - }, - "username": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("PGUSER", "postgres"), - Description: "PostgreSQL user name to connect as", - }, - "password": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("PGPASSWORD", nil), - Description: "Password to be used if the PostgreSQL server demands password authentication", - }, - "sslmode": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("PGSSLMODE", nil), - Description: "This option determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the PostgreSQL server", - }, - "ssl_mode": { - Type: schema.TypeString, - Optional: true, - Deprecated: "Rename PostgreSQL provider `ssl_mode` attribute to `sslmode`", - }, - "connect_timeout": { - Type: schema.TypeInt, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("PGCONNECT_TIMEOUT", 180), - Description: "Maximum wait for connection, in seconds. Zero or not specified means wait indefinitely.", - ValidateFunc: validateConnTimeout, - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "postgresql_database": resourcePostgreSQLDatabase(), - "postgresql_extension": resourcePostgreSQLExtension(), - "postgresql_schema": resourcePostgreSQLSchema(), - "postgresql_role": resourcePostgreSQLRole(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func validateConnTimeout(v interface{}, key string) (warnings []string, errors []error) { - value := v.(int) - if value < 0 { - errors = append(errors, fmt.Errorf("%s can not be less than 0", key)) - } - return -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - var sslMode string - if sslModeRaw, ok := d.GetOk("sslmode"); ok { - sslMode = sslModeRaw.(string) - } else { - sslMode = d.Get("ssl_mode").(string) - } - config := Config{ - Host: d.Get("host").(string), - Port: d.Get("port").(int), - Database: d.Get("database").(string), - Username: d.Get("username").(string), - Password: d.Get("password").(string), - SSLMode: sslMode, - ApplicationName: tfAppName(), - ConnectTimeoutSec: d.Get("connect_timeout").(int), - } - - client, err := config.NewClient() - if err != nil { - return nil, errwrap.Wrapf("Error initializing PostgreSQL client: {{err}}", err) - } - - return client, nil -} - -func tfAppName() string { - return fmt.Sprintf("Terraform v%s", terraform.VersionString()) -} diff --git a/builtin/providers/postgresql/provider_test.go b/builtin/providers/postgresql/provider_test.go deleted file mode 100644 index 697c83329..000000000 --- a/builtin/providers/postgresql/provider_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package postgresql - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "postgresql": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - var host string - if host = os.Getenv("PGHOST"); host == "" { - t.Fatal("PGHOST must be set for acceptance tests") - } - if v := os.Getenv("PGUSER"); v == "" { - t.Fatal("PGUSER must be set for acceptance tests") - } -} diff --git a/builtin/providers/postgresql/resource_postgresql_database.go b/builtin/providers/postgresql/resource_postgresql_database.go deleted file mode 100644 index 81744d4ee..000000000 --- a/builtin/providers/postgresql/resource_postgresql_database.go +++ /dev/null @@ -1,521 +0,0 @@ -package postgresql - -import ( - "bytes" - "database/sql" - "errors" - "fmt" - "log" - "strings" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" - "github.com/lib/pq" -) - -const ( - dbAllowConnsAttr = "allow_connections" - dbCTypeAttr = "lc_ctype" - dbCollationAttr = "lc_collate" - dbConnLimitAttr = "connection_limit" - dbEncodingAttr = "encoding" - dbIsTemplateAttr = "is_template" - dbNameAttr = "name" - dbOwnerAttr = "owner" - dbTablespaceAttr = "tablespace_name" - dbTemplateAttr = "template" -) - -func resourcePostgreSQLDatabase() *schema.Resource { - return &schema.Resource{ - Create: resourcePostgreSQLDatabaseCreate, - Read: resourcePostgreSQLDatabaseRead, - Update: resourcePostgreSQLDatabaseUpdate, - Delete: resourcePostgreSQLDatabaseDelete, - Exists: resourcePostgreSQLDatabaseExists, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - dbNameAttr: { - Type: schema.TypeString, - Required: true, - Description: "The PostgreSQL database name to connect to", - }, - dbOwnerAttr: { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "The ROLE which owns the database", - }, - dbTemplateAttr: { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - Description: "The name of the template from which to create the new database", - }, - dbEncodingAttr: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: "Character set encoding to use in the new database", - }, - dbCollationAttr: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: "Collation order (LC_COLLATE) to use in the new database", - }, - dbCTypeAttr: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: "Character classification (LC_CTYPE) to use in the new database", - }, - dbTablespaceAttr: { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "The name of the tablespace that will be associated with the new database", - }, - dbConnLimitAttr: { - Type: schema.TypeInt, - Optional: true, - Default: -1, - Description: "How many concurrent connections can be made to this database", - ValidateFunc: validateConnLimit, - }, - dbAllowConnsAttr: { - Type: schema.TypeBool, - Optional: true, - Default: true, - Description: "If false then no one can connect to this database", - }, - dbIsTemplateAttr: { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Description: "If true, then this database can be cloned by any user with CREATEDB privileges", - }, - }, - } -} - -func resourcePostgreSQLDatabaseCreate(d *schema.ResourceData, meta interface{}) error { - c := meta.(*Client) - - c.catalogLock.Lock() - defer c.catalogLock.Unlock() - - conn, err := c.Connect() - if err != nil { - return errwrap.Wrapf("Error connecting to PostgreSQL: {{err}}", err) - } - defer conn.Close() - - dbName := d.Get(dbNameAttr).(string) - b := bytes.NewBufferString("CREATE DATABASE ") - fmt.Fprint(b, pq.QuoteIdentifier(dbName)) - - //needed in order to set the owner of the db if the connection user is not a superuser - err = grantRoleMembership(conn, d.Get(dbOwnerAttr).(string), c.username) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error adding connection user (%q) to ROLE %q: {{err}}", c.username, d.Get(dbOwnerAttr).(string)), err) - } - defer func() { - //undo the grant if the connection user is not a superuser - err = revokeRoleMembership(conn, d.Get(dbOwnerAttr).(string), c.username) - if err != nil { - err = errwrap.Wrapf(fmt.Sprintf("Error removing connection user (%q) from ROLE %q: {{err}}", c.username, d.Get(dbOwnerAttr).(string)), err) - } - }() - - // Handle each option individually and stream results into the query - // buffer. - - switch v, ok := d.GetOk(dbOwnerAttr); { - case ok: - fmt.Fprint(b, " OWNER ", pq.QuoteIdentifier(v.(string))) - default: - // No owner specified in the config, default to using - // the connecting username. - fmt.Fprint(b, " OWNER ", pq.QuoteIdentifier(c.username)) - } - - switch v, ok := d.GetOk(dbTemplateAttr); { - case ok: - fmt.Fprint(b, " TEMPLATE ", pq.QuoteIdentifier(v.(string))) - case v.(string) == "", strings.ToUpper(v.(string)) != "DEFAULT": - fmt.Fprint(b, " TEMPLATE template0") - } - - switch v, ok := d.GetOk(dbEncodingAttr); { - case ok: - fmt.Fprint(b, " ENCODING ", pq.QuoteIdentifier(v.(string))) - case v.(string) == "", strings.ToUpper(v.(string)) != "DEFAULT": - fmt.Fprint(b, ` ENCODING "UTF8"`) - } - - switch v, ok := d.GetOk(dbCollationAttr); { - case ok: - fmt.Fprint(b, " LC_COLLATE ", pq.QuoteIdentifier(v.(string))) - case v.(string) == "", strings.ToUpper(v.(string)) != "DEFAULT": - fmt.Fprint(b, ` LC_COLLATE "C"`) - } - - switch v, ok := d.GetOk(dbCTypeAttr); { - case ok: - fmt.Fprint(b, " LC_CTYPE ", pq.QuoteIdentifier(v.(string))) - case v.(string) == "", strings.ToUpper(v.(string)) != "DEFAULT": - fmt.Fprint(b, ` LC_CTYPE "C"`) - } - - if v, ok := d.GetOk(dbTablespaceAttr); ok { - fmt.Fprint(b, " TABLESPACE ", pq.QuoteIdentifier(v.(string))) - } - - { - val := d.Get(dbAllowConnsAttr).(bool) - fmt.Fprint(b, " ALLOW_CONNECTIONS ", val) - } - - { - val := d.Get(dbConnLimitAttr).(int) - fmt.Fprint(b, " CONNECTION LIMIT ", val) - } - - { - val := d.Get(dbIsTemplateAttr).(bool) - fmt.Fprint(b, " IS_TEMPLATE ", val) - } - - query := b.String() - _, err = conn.Query(query) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error creating database %q: {{err}}", dbName), err) - } - - d.SetId(dbName) - - // Set err outside of the return so that the deferred revoke can override err - // if necessary. - err = resourcePostgreSQLDatabaseReadImpl(d, meta) - return err -} - -func resourcePostgreSQLDatabaseDelete(d *schema.ResourceData, meta interface{}) error { - c := meta.(*Client) - c.catalogLock.Lock() - defer c.catalogLock.Unlock() - - conn, err := c.Connect() - if err != nil { - return errwrap.Wrapf("Error connecting to PostgreSQL: {{err}}", err) - } - defer conn.Close() - - dbName := d.Get(dbNameAttr).(string) - - if isTemplate := d.Get(dbIsTemplateAttr).(bool); isTemplate { - // Template databases must have this attribute cleared before - // they can be dropped. - if err := doSetDBIsTemplate(conn, dbName, false); err != nil { - return errwrap.Wrapf("Error updating database IS_TEMPLATE during DROP DATABASE: {{err}}", err) - } - } - - if err := setDBIsTemplate(conn, d); err != nil { - return err - } - - query := fmt.Sprintf("DROP DATABASE %s", pq.QuoteIdentifier(dbName)) - _, err = conn.Query(query) - if err != nil { - return errwrap.Wrapf("Error dropping database: {{err}}", err) - } - - d.SetId("") - - return nil -} - -func resourcePostgreSQLDatabaseExists(d *schema.ResourceData, meta interface{}) (bool, error) { - c := meta.(*Client) - c.catalogLock.RLock() - defer c.catalogLock.RUnlock() - - conn, err := c.Connect() - if err != nil { - return false, err - } - defer conn.Close() - - var dbName string - err = conn.QueryRow("SELECT d.datname from pg_database d WHERE datname=$1", d.Id()).Scan(&dbName) - switch { - case err == sql.ErrNoRows: - return false, nil - case err != nil: - return false, err - } - - return true, nil -} - -func resourcePostgreSQLDatabaseRead(d *schema.ResourceData, meta interface{}) error { - c := meta.(*Client) - c.catalogLock.RLock() - defer c.catalogLock.RUnlock() - - return resourcePostgreSQLDatabaseReadImpl(d, meta) -} - -func resourcePostgreSQLDatabaseReadImpl(d *schema.ResourceData, meta interface{}) error { - c := meta.(*Client) - conn, err := c.Connect() - if err != nil { - return err - } - defer conn.Close() - - dbId := d.Id() - var dbName, ownerName string - err = conn.QueryRow("SELECT d.datname, pg_catalog.pg_get_userbyid(d.datdba) from pg_database d WHERE datname=$1", dbId).Scan(&dbName, &ownerName) - switch { - case err == sql.ErrNoRows: - log.Printf("[WARN] PostgreSQL database (%q) not found", dbId) - d.SetId("") - return nil - case err != nil: - return errwrap.Wrapf("Error reading database: {{err}}", err) - } - - var dbEncoding, dbCollation, dbCType, dbTablespaceName string - var dbConnLimit int - var dbAllowConns, dbIsTemplate bool - err = conn.QueryRow(`SELECT pg_catalog.pg_encoding_to_char(d.encoding), d.datcollate, d.datctype, ts.spcname, d.datconnlimit, d.datallowconn, d.datistemplate FROM pg_catalog.pg_database AS d, pg_catalog.pg_tablespace AS ts WHERE d.datname = $1 AND d.dattablespace = ts.oid`, dbId). - Scan( - &dbEncoding, &dbCollation, &dbCType, &dbTablespaceName, - &dbConnLimit, &dbAllowConns, &dbIsTemplate, - ) - switch { - case err == sql.ErrNoRows: - log.Printf("[WARN] PostgreSQL database (%q) not found", dbId) - d.SetId("") - return nil - case err != nil: - return errwrap.Wrapf("Error reading database: {{err}}", err) - default: - d.Set(dbNameAttr, dbName) - d.Set(dbOwnerAttr, ownerName) - d.Set(dbEncodingAttr, dbEncoding) - d.Set(dbCollationAttr, dbCollation) - d.Set(dbCTypeAttr, dbCType) - d.Set(dbTablespaceAttr, dbTablespaceName) - d.Set(dbConnLimitAttr, dbConnLimit) - d.Set(dbAllowConnsAttr, dbAllowConns) - d.Set(dbIsTemplateAttr, dbIsTemplate) - dbTemplate := d.Get(dbTemplateAttr).(string) - if dbTemplate == "" { - dbTemplate = "template0" - } - d.Set(dbTemplateAttr, dbTemplate) - return nil - } -} - -func resourcePostgreSQLDatabaseUpdate(d *schema.ResourceData, meta interface{}) error { - c := meta.(*Client) - c.catalogLock.Lock() - defer c.catalogLock.Unlock() - - conn, err := c.Connect() - if err != nil { - return err - } - defer conn.Close() - - if err := setDBName(conn, d); err != nil { - return err - } - - if err := setDBOwner(c, conn, d); err != nil { - return err - } - - if err := setDBTablespace(conn, d); err != nil { - return err - } - - if err := setDBConnLimit(conn, d); err != nil { - return err - } - - if err := setDBAllowConns(conn, d); err != nil { - return err - } - - if err := setDBIsTemplate(conn, d); err != nil { - return err - } - - // Empty values: ALTER DATABASE name RESET configuration_parameter; - - return resourcePostgreSQLDatabaseReadImpl(d, meta) -} - -func setDBName(conn *sql.DB, d *schema.ResourceData) error { - if !d.HasChange(dbNameAttr) { - return nil - } - - oraw, nraw := d.GetChange(dbNameAttr) - o := oraw.(string) - n := nraw.(string) - if n == "" { - return errors.New("Error setting database name to an empty string") - } - - query := fmt.Sprintf("ALTER DATABASE %s RENAME TO %s", pq.QuoteIdentifier(o), pq.QuoteIdentifier(n)) - if _, err := conn.Query(query); err != nil { - return errwrap.Wrapf("Error updating database name: {{err}}", err) - } - d.SetId(n) - - return nil -} - -func setDBOwner(c *Client, conn *sql.DB, d *schema.ResourceData) error { - if !d.HasChange(dbOwnerAttr) { - return nil - } - - owner := d.Get(dbOwnerAttr).(string) - if owner == "" { - return nil - } - - //needed in order to set the owner of the db if the connection user is not a superuser - err := grantRoleMembership(conn, d.Get(dbOwnerAttr).(string), c.username) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error adding connection user (%q) to ROLE %q: {{err}}", c.username, d.Get(dbOwnerAttr).(string)), err) - } - defer func() { - // undo the grant if the connection user is not a superuser - err = revokeRoleMembership(conn, d.Get(dbOwnerAttr).(string), c.username) - if err != nil { - err = errwrap.Wrapf(fmt.Sprintf("Error removing connection user (%q) from ROLE %q: {{err}}", c.username, d.Get(dbOwnerAttr).(string)), err) - } - }() - - dbName := d.Get(dbNameAttr).(string) - query := fmt.Sprintf("ALTER DATABASE %s OWNER TO %s", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(owner)) - if _, err := conn.Query(query); err != nil { - return errwrap.Wrapf("Error updating database OWNER: {{err}}", err) - } - - return err -} - -func setDBTablespace(conn *sql.DB, d *schema.ResourceData) error { - if !d.HasChange(dbTablespaceAttr) { - return nil - } - - tbspName := d.Get(dbTablespaceAttr).(string) - dbName := d.Get(dbNameAttr).(string) - var query string - if tbspName == "" || strings.ToUpper(tbspName) == "DEFAULT" { - query = fmt.Sprintf("ALTER DATABASE %s RESET TABLESPACE", pq.QuoteIdentifier(dbName)) - } else { - query = fmt.Sprintf("ALTER DATABASE %s SET TABLESPACE %s", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(tbspName)) - } - - if _, err := conn.Query(query); err != nil { - return errwrap.Wrapf("Error updating database TABLESPACE: {{err}}", err) - } - - return nil -} - -func setDBConnLimit(conn *sql.DB, d *schema.ResourceData) error { - if !d.HasChange(dbConnLimitAttr) { - return nil - } - - connLimit := d.Get(dbConnLimitAttr).(int) - dbName := d.Get(dbNameAttr).(string) - query := fmt.Sprintf("ALTER DATABASE %s CONNECTION LIMIT = %d", pq.QuoteIdentifier(dbName), connLimit) - if _, err := conn.Query(query); err != nil { - return errwrap.Wrapf("Error updating database CONNECTION LIMIT: {{err}}", err) - } - - return nil -} - -func setDBAllowConns(conn *sql.DB, d *schema.ResourceData) error { - if !d.HasChange(dbAllowConnsAttr) { - return nil - } - - allowConns := d.Get(dbAllowConnsAttr).(bool) - dbName := d.Get(dbNameAttr).(string) - query := fmt.Sprintf("ALTER DATABASE %s ALLOW_CONNECTIONS %t", pq.QuoteIdentifier(dbName), allowConns) - if _, err := conn.Query(query); err != nil { - return errwrap.Wrapf("Error updating database ALLOW_CONNECTIONS: {{err}}", err) - } - - return nil -} - -func setDBIsTemplate(conn *sql.DB, d *schema.ResourceData) error { - if !d.HasChange(dbIsTemplateAttr) { - return nil - } - - if err := doSetDBIsTemplate(conn, d.Get(dbNameAttr).(string), d.Get(dbIsTemplateAttr).(bool)); err != nil { - return errwrap.Wrapf("Error updating database IS_TEMPLATE: {{err}}", err) - } - - return nil -} - -func doSetDBIsTemplate(conn *sql.DB, dbName string, isTemplate bool) error { - query := fmt.Sprintf("ALTER DATABASE %s IS_TEMPLATE %t", pq.QuoteIdentifier(dbName), isTemplate) - if _, err := conn.Query(query); err != nil { - return errwrap.Wrapf("Error updating database IS_TEMPLATE: {{err}}", err) - } - - return nil -} - -func grantRoleMembership(conn *sql.DB, dbOwner string, connUsername string) error { - if dbOwner != "" && dbOwner != connUsername { - query := fmt.Sprintf("GRANT %s TO %s", pq.QuoteIdentifier(dbOwner), pq.QuoteIdentifier(connUsername)) - _, err := conn.Query(query) - if err != nil { - // is already member or role - if strings.Contains(err.Error(), "duplicate key value violates unique constraint") { - return nil - } - return errwrap.Wrapf("Error granting membership: {{err}}", err) - } - } - return nil -} - -func revokeRoleMembership(conn *sql.DB, dbOwner string, connUsername string) error { - if dbOwner != "" && dbOwner != connUsername { - query := fmt.Sprintf("REVOKE %s FROM %s", pq.QuoteIdentifier(dbOwner), pq.QuoteIdentifier(connUsername)) - _, err := conn.Query(query) - if err != nil { - return errwrap.Wrapf("Error revoking membership: {{err}}", err) - } - } - return nil -} diff --git a/builtin/providers/postgresql/resource_postgresql_database_test.go b/builtin/providers/postgresql/resource_postgresql_database_test.go deleted file mode 100644 index 581c18e60..000000000 --- a/builtin/providers/postgresql/resource_postgresql_database_test.go +++ /dev/null @@ -1,241 +0,0 @@ -package postgresql - -import ( - "database/sql" - "errors" - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccPostgresqlDatabase_Basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPostgresqlDatabaseDestroy, - Steps: []resource.TestStep{ - { - Config: testAccPostgreSQLDatabaseConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckPostgresqlDatabaseExists("postgresql_database.mydb"), - resource.TestCheckResourceAttr( - "postgresql_database.mydb", "name", "mydb"), - resource.TestCheckResourceAttr( - "postgresql_database.mydb", "owner", "myrole"), - resource.TestCheckResourceAttr( - "postgresql_database.default_opts", "owner", "myrole"), - resource.TestCheckResourceAttr( - "postgresql_database.default_opts", "name", "default_opts_name"), - resource.TestCheckResourceAttr( - "postgresql_database.default_opts", "template", "template0"), - resource.TestCheckResourceAttr( - "postgresql_database.default_opts", "encoding", "UTF8"), - resource.TestCheckResourceAttr( - "postgresql_database.default_opts", "lc_collate", "C"), - resource.TestCheckResourceAttr( - "postgresql_database.default_opts", "lc_ctype", "C"), - resource.TestCheckResourceAttr( - "postgresql_database.default_opts", "tablespace_name", "pg_default"), - resource.TestCheckResourceAttr( - "postgresql_database.default_opts", "connection_limit", "-1"), - resource.TestCheckResourceAttr( - "postgresql_database.default_opts", "allow_connections", "true"), - resource.TestCheckResourceAttr( - "postgresql_database.default_opts", "is_template", "false"), - - resource.TestCheckResourceAttr( - "postgresql_database.modified_opts", "owner", "myrole"), - resource.TestCheckResourceAttr( - "postgresql_database.modified_opts", "name", "custom_template_db"), - resource.TestCheckResourceAttr( - "postgresql_database.modified_opts", "template", "template0"), - resource.TestCheckResourceAttr( - "postgresql_database.modified_opts", "encoding", "UTF8"), - resource.TestCheckResourceAttr( - "postgresql_database.modified_opts", "lc_collate", "en_US.UTF-8"), - resource.TestCheckResourceAttr( - "postgresql_database.modified_opts", "lc_ctype", "en_US.UTF-8"), - resource.TestCheckResourceAttr( - "postgresql_database.modified_opts", "tablespace_name", "pg_default"), - resource.TestCheckResourceAttr( - "postgresql_database.modified_opts", "connection_limit", "10"), - resource.TestCheckResourceAttr( - "postgresql_database.modified_opts", "allow_connections", "false"), - resource.TestCheckResourceAttr( - "postgresql_database.modified_opts", "is_template", "true"), - - resource.TestCheckResourceAttr( - "postgresql_database.pathological_opts", "owner", "myrole"), - resource.TestCheckResourceAttr( - "postgresql_database.pathological_opts", "name", "bad_template_db"), - resource.TestCheckResourceAttr( - "postgresql_database.pathological_opts", "template", "template0"), - resource.TestCheckResourceAttr( - "postgresql_database.pathological_opts", "encoding", "LATIN1"), - resource.TestCheckResourceAttr( - "postgresql_database.pathological_opts", "lc_collate", "C"), - resource.TestCheckResourceAttr( - "postgresql_database.pathological_opts", "lc_ctype", "C"), - resource.TestCheckResourceAttr( - "postgresql_database.pathological_opts", "tablespace_name", "pg_default"), - resource.TestCheckResourceAttr( - "postgresql_database.pathological_opts", "connection_limit", "0"), - resource.TestCheckResourceAttr( - "postgresql_database.pathological_opts", "allow_connections", "true"), - resource.TestCheckResourceAttr( - "postgresql_database.pathological_opts", "is_template", "true"), - ), - }, - }, - }) -} - -func TestAccPostgresqlDatabase_DefaultOwner(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPostgresqlDatabaseDestroy, - Steps: []resource.TestStep{ - { - Config: testAccPostgreSQLDatabaseConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckPostgresqlDatabaseExists("postgresql_database.mydb_default_owner"), - resource.TestCheckResourceAttr( - "postgresql_database.mydb_default_owner", "name", "mydb_default_owner"), - resource.TestCheckResourceAttrSet( - "postgresql_database.mydb_default_owner", "owner"), - ), - }, - }, - }) -} - -func testAccCheckPostgresqlDatabaseDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "postgresql_database" { - continue - } - - exists, err := checkDatabaseExists(client, rs.Primary.ID) - - if err != nil { - return fmt.Errorf("Error checking db %s", err) - } - - if exists { - return errors.New("Db still exists after destroy") - } - } - - return nil -} - -func testAccCheckPostgresqlDatabaseExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Resource not found: %s", n) - } - - if rs.Primary.ID == "" { - return errors.New("No ID is set") - } - - client := testAccProvider.Meta().(*Client) - exists, err := checkDatabaseExists(client, rs.Primary.ID) - - if err != nil { - return fmt.Errorf("Error checking db %s", err) - } - - if !exists { - return errors.New("Db not found") - } - - return nil - } -} - -func checkDatabaseExists(client *Client, dbName string) (bool, error) { - conn, err := client.Connect() - if err != nil { - return false, err - } - defer conn.Close() - - var _rez int - err = conn.QueryRow("SELECT 1 from pg_database d WHERE datname=$1", dbName).Scan(&_rez) - switch { - case err == sql.ErrNoRows: - return false, nil - case err != nil: - return false, fmt.Errorf("Error reading info about database: %s", err) - default: - return true, nil - } -} - -var testAccPostgreSQLDatabaseConfig = ` -resource "postgresql_role" "myrole" { - name = "myrole" - login = true -} - -resource "postgresql_database" "mydb" { - name = "mydb" - owner = "${postgresql_role.myrole.name}" -} - -resource "postgresql_database" "mydb2" { - name = "mydb2" - owner = "${postgresql_role.myrole.name}" -} - -resource "postgresql_database" "default_opts" { - name = "default_opts_name" - owner = "${postgresql_role.myrole.name}" - template = "template0" - encoding = "UTF8" - lc_collate = "C" - lc_ctype = "C" - tablespace_name = "pg_default" - connection_limit = -1 - allow_connections = true - is_template = false -} - -resource "postgresql_database" "modified_opts" { - name = "custom_template_db" - owner = "${postgresql_role.myrole.name}" - template = "template0" - encoding = "UTF8" - lc_collate = "en_US.UTF-8" - lc_ctype = "en_US.UTF-8" - tablespace_name = "pg_default" - connection_limit = 10 - allow_connections = false - is_template = true -} - -resource "postgresql_database" "pathological_opts" { - name = "bad_template_db" - owner = "${postgresql_role.myrole.name}" - template = "template0" - encoding = "LATIN1" - lc_collate = "C" - lc_ctype = "C" - tablespace_name = "pg_default" - connection_limit = 0 - allow_connections = true - is_template = true -} - -resource "postgresql_database" "mydb_default_owner" { - name = "mydb_default_owner" -} - -` diff --git a/builtin/providers/postgresql/resource_postgresql_extension.go b/builtin/providers/postgresql/resource_postgresql_extension.go deleted file mode 100644 index d70ebd94e..000000000 --- a/builtin/providers/postgresql/resource_postgresql_extension.go +++ /dev/null @@ -1,238 +0,0 @@ -package postgresql - -import ( - "bytes" - "database/sql" - "errors" - "fmt" - "log" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" - "github.com/lib/pq" -) - -const ( - extNameAttr = "name" - extSchemaAttr = "schema" - extVersionAttr = "version" -) - -func resourcePostgreSQLExtension() *schema.Resource { - return &schema.Resource{ - Create: resourcePostgreSQLExtensionCreate, - Read: resourcePostgreSQLExtensionRead, - Update: resourcePostgreSQLExtensionUpdate, - Delete: resourcePostgreSQLExtensionDelete, - Exists: resourcePostgreSQLExtensionExists, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - extNameAttr: { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - extSchemaAttr: { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "Sets the schema of an extension", - }, - extVersionAttr: { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "Sets the version number of the extension", - }, - }, - } -} - -func resourcePostgreSQLExtensionCreate(d *schema.ResourceData, meta interface{}) error { - c := meta.(*Client) - c.catalogLock.Lock() - defer c.catalogLock.Unlock() - - conn, err := c.Connect() - if err != nil { - return err - } - defer conn.Close() - - extName := d.Get(extNameAttr).(string) - - b := bytes.NewBufferString("CREATE EXTENSION ") - fmt.Fprint(b, pq.QuoteIdentifier(extName)) - - if v, ok := d.GetOk(extSchemaAttr); ok { - fmt.Fprint(b, " SCHEMA ", pq.QuoteIdentifier(v.(string))) - } - - if v, ok := d.GetOk(extVersionAttr); ok { - fmt.Fprint(b, " VERSION ", pq.QuoteIdentifier(v.(string))) - } - - query := b.String() - _, err = conn.Query(query) - if err != nil { - return errwrap.Wrapf("Error creating extension: {{err}}", err) - } - - d.SetId(extName) - - return resourcePostgreSQLExtensionReadImpl(d, meta) -} - -func resourcePostgreSQLExtensionExists(d *schema.ResourceData, meta interface{}) (bool, error) { - c := meta.(*Client) - c.catalogLock.Lock() - defer c.catalogLock.Unlock() - - conn, err := c.Connect() - if err != nil { - return false, err - } - defer conn.Close() - - var extName string - err = conn.QueryRow("SELECT extname FROM pg_catalog.pg_extension WHERE extname = $1", d.Id()).Scan(&extName) - switch { - case err == sql.ErrNoRows: - return false, nil - case err != nil: - return false, err - } - - return true, nil -} - -func resourcePostgreSQLExtensionRead(d *schema.ResourceData, meta interface{}) error { - c := meta.(*Client) - c.catalogLock.RLock() - defer c.catalogLock.RUnlock() - - return resourcePostgreSQLExtensionReadImpl(d, meta) -} - -func resourcePostgreSQLExtensionReadImpl(d *schema.ResourceData, meta interface{}) error { - c := meta.(*Client) - - conn, err := c.Connect() - if err != nil { - return err - } - defer conn.Close() - - extID := d.Id() - var extName, extSchema, extVersion string - err = conn.QueryRow("SELECT e.extname, n.nspname, e.extversion FROM pg_catalog.pg_extension e, pg_catalog.pg_namespace n WHERE n.oid = e.extnamespace AND e.extname = $1", extID).Scan(&extName, &extSchema, &extVersion) - switch { - case err == sql.ErrNoRows: - log.Printf("[WARN] PostgreSQL extension (%s) not found", d.Id()) - d.SetId("") - return nil - case err != nil: - return errwrap.Wrapf("Error reading extension: {{err}}", err) - default: - d.Set(extNameAttr, extName) - d.Set(extSchemaAttr, extSchema) - d.Set(extVersionAttr, extVersion) - d.SetId(extName) - return nil - } -} - -func resourcePostgreSQLExtensionDelete(d *schema.ResourceData, meta interface{}) error { - c := meta.(*Client) - c.catalogLock.Lock() - defer c.catalogLock.Unlock() - - conn, err := c.Connect() - if err != nil { - return err - } - defer conn.Close() - - extID := d.Id() - - query := fmt.Sprintf("DROP EXTENSION %s", pq.QuoteIdentifier(extID)) - _, err = conn.Query(query) - if err != nil { - return errwrap.Wrapf("Error deleting extension: {{err}}", err) - } - - d.SetId("") - - return nil -} - -func resourcePostgreSQLExtensionUpdate(d *schema.ResourceData, meta interface{}) error { - c := meta.(*Client) - c.catalogLock.Lock() - defer c.catalogLock.Unlock() - - conn, err := c.Connect() - if err != nil { - return err - } - defer conn.Close() - - // Can't rename a schema - - if err := setExtSchema(conn, d); err != nil { - return err - } - - if err := setExtVersion(conn, d); err != nil { - return err - } - - return resourcePostgreSQLExtensionReadImpl(d, meta) -} - -func setExtSchema(conn *sql.DB, d *schema.ResourceData) error { - if !d.HasChange(extSchemaAttr) { - return nil - } - - extID := d.Id() - _, nraw := d.GetChange(extSchemaAttr) - n := nraw.(string) - if n == "" { - return errors.New("Error setting extension name to an empty string") - } - - query := fmt.Sprintf("ALTER EXTENSION %s SET SCHEMA %s", pq.QuoteIdentifier(extID), pq.QuoteIdentifier(n)) - if _, err := conn.Query(query); err != nil { - return errwrap.Wrapf("Error updating extension SCHEMA: {{err}}", err) - } - - return nil -} - -func setExtVersion(conn *sql.DB, d *schema.ResourceData) error { - if !d.HasChange(extVersionAttr) { - return nil - } - - extID := d.Id() - - b := bytes.NewBufferString("ALTER EXTENSION ") - fmt.Fprintf(b, "%s UPDATE", pq.QuoteIdentifier(extID)) - - _, nraw := d.GetChange(extVersionAttr) - n := nraw.(string) - if n != "" { - fmt.Fprintf(b, " TO %s", pq.QuoteIdentifier(n)) - } - - query := b.String() - if _, err := conn.Query(query); err != nil { - return errwrap.Wrapf("Error updating extension version: {{err}}", err) - } - - return nil -} diff --git a/builtin/providers/postgresql/resource_postgresql_extension_test.go b/builtin/providers/postgresql/resource_postgresql_extension_test.go deleted file mode 100644 index 7deb71a39..000000000 --- a/builtin/providers/postgresql/resource_postgresql_extension_test.go +++ /dev/null @@ -1,168 +0,0 @@ -package postgresql - -import ( - "database/sql" - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccPostgresqlExtension_Basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPostgresqlExtensionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccPostgresqlExtensionConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckPostgresqlExtensionExists("postgresql_extension.myextension"), - resource.TestCheckResourceAttr( - "postgresql_extension.myextension", "name", "pg_trgm"), - resource.TestCheckResourceAttr( - "postgresql_extension.myextension", "schema", "public"), - - // NOTE(sean): Version 1.3 is what's - // shipped with PostgreSQL 9.6.1. This - // version number may drift in the - // future. - resource.TestCheckResourceAttr( - "postgresql_extension.myextension", "version", "1.3"), - ), - }, - }, - }) -} - -func testAccCheckPostgresqlExtensionDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "postgresql_extension" { - continue - } - - exists, err := checkExtensionExists(client, rs.Primary.ID) - - if err != nil { - return fmt.Errorf("Error checking extension %s", err) - } - - if exists { - return fmt.Errorf("Extension still exists after destroy") - } - } - - return nil -} - -func testAccCheckPostgresqlExtensionExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Resource not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - client := testAccProvider.Meta().(*Client) - exists, err := checkExtensionExists(client, rs.Primary.ID) - - if err != nil { - return fmt.Errorf("Error checking extension %s", err) - } - - if !exists { - return fmt.Errorf("Extension not found") - } - - return nil - } -} - -func TestAccPostgresqlExtension_SchemaRename(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPostgresqlExtensionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccPostgresqlExtensionSchemaChange1, - Check: resource.ComposeTestCheckFunc( - testAccCheckPostgresqlExtensionExists("postgresql_extension.ext1trgm"), - resource.TestCheckResourceAttr( - "postgresql_schema.ext1foo", "name", "foo"), - resource.TestCheckResourceAttr( - "postgresql_extension.ext1trgm", "name", "pg_trgm"), - resource.TestCheckResourceAttr( - "postgresql_extension.ext1trgm", "name", "pg_trgm"), - resource.TestCheckResourceAttr( - "postgresql_extension.ext1trgm", "schema", "foo"), - ), - }, - { - Config: testAccPostgresqlExtensionSchemaChange2, - Check: resource.ComposeTestCheckFunc( - testAccCheckPostgresqlExtensionExists("postgresql_extension.ext1trgm"), - resource.TestCheckResourceAttr( - "postgresql_schema.ext1foo", "name", "bar"), - resource.TestCheckResourceAttr( - "postgresql_extension.ext1trgm", "name", "pg_trgm"), - resource.TestCheckResourceAttr( - "postgresql_extension.ext1trgm", "schema", "bar"), - ), - }, - }, - }) -} - -func checkExtensionExists(client *Client, extensionName string) (bool, error) { - conn, err := client.Connect() - if err != nil { - return false, err - } - defer conn.Close() - - var _rez bool - err = conn.QueryRow("SELECT TRUE from pg_catalog.pg_extension d WHERE extname=$1", extensionName).Scan(&_rez) - switch { - case err == sql.ErrNoRows: - return false, nil - case err != nil: - return false, fmt.Errorf("Error reading info about extension: %s", err) - default: - return true, nil - } -} - -var testAccPostgresqlExtensionConfig = ` -resource "postgresql_extension" "myextension" { - name = "pg_trgm" -} -` - -var testAccPostgresqlExtensionSchemaChange1 = ` -resource "postgresql_schema" "ext1foo" { - name = "foo" -} - -resource "postgresql_extension" "ext1trgm" { - name = "pg_trgm" - schema = "${postgresql_schema.ext1foo.name}" -} -` - -var testAccPostgresqlExtensionSchemaChange2 = ` -resource "postgresql_schema" "ext1foo" { - name = "bar" -} - -resource "postgresql_extension" "ext1trgm" { - name = "pg_trgm" - schema = "${postgresql_schema.ext1foo.name}" -} -` diff --git a/builtin/providers/postgresql/resource_postgresql_role.go b/builtin/providers/postgresql/resource_postgresql_role.go deleted file mode 100644 index 385abd196..000000000 --- a/builtin/providers/postgresql/resource_postgresql_role.go +++ /dev/null @@ -1,631 +0,0 @@ -package postgresql - -import ( - "database/sql" - "errors" - "fmt" - "log" - "strings" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" - "github.com/lib/pq" -) - -const ( - roleBypassRLSAttr = "bypass_row_level_security" - roleConnLimitAttr = "connection_limit" - roleCreateDBAttr = "create_database" - roleCreateRoleAttr = "create_role" - roleEncryptedPassAttr = "encrypted_password" - roleInheritAttr = "inherit" - roleLoginAttr = "login" - roleNameAttr = "name" - rolePasswordAttr = "password" - roleReplicationAttr = "replication" - roleSkipDropRoleAttr = "skip_drop_role" - roleSkipReassignOwnedAttr = "skip_reassign_owned" - roleSuperuserAttr = "superuser" - roleValidUntilAttr = "valid_until" - - // Deprecated options - roleDepEncryptedAttr = "encrypted" -) - -func resourcePostgreSQLRole() *schema.Resource { - return &schema.Resource{ - Create: resourcePostgreSQLRoleCreate, - Read: resourcePostgreSQLRoleRead, - Update: resourcePostgreSQLRoleUpdate, - Delete: resourcePostgreSQLRoleDelete, - Exists: resourcePostgreSQLRoleExists, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - roleNameAttr: { - Type: schema.TypeString, - Required: true, - Description: "The name of the role", - }, - rolePasswordAttr: { - Type: schema.TypeString, - Optional: true, - Computed: true, - Sensitive: true, - DefaultFunc: schema.EnvDefaultFunc("PGPASSWORD", nil), - Description: "Sets the role's password", - }, - roleDepEncryptedAttr: { - Type: schema.TypeString, - Optional: true, - Deprecated: fmt.Sprintf("Rename PostgreSQL role resource attribute %q to %q", roleDepEncryptedAttr, roleEncryptedPassAttr), - }, - roleEncryptedPassAttr: { - Type: schema.TypeBool, - Optional: true, - Default: true, - Description: "Control whether the password is stored encrypted in the system catalogs", - }, - roleValidUntilAttr: { - Type: schema.TypeString, - Optional: true, - Default: "infinity", - Description: "Sets a date and time after which the role's password is no longer valid", - }, - roleConnLimitAttr: { - Type: schema.TypeInt, - Optional: true, - Default: -1, - Description: "How many concurrent connections can be made with this role", - ValidateFunc: validateConnLimit, - }, - roleSuperuserAttr: { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: `Determine whether the new role is a "superuser"`, - }, - roleCreateDBAttr: { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Define a role's ability to create databases", - }, - roleCreateRoleAttr: { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Determine whether this role will be permitted to create new roles", - }, - roleInheritAttr: { - Type: schema.TypeBool, - Optional: true, - Default: true, - Description: `Determine whether a role "inherits" the privileges of roles it is a member of`, - }, - roleLoginAttr: { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Determine whether a role is allowed to log in", - }, - roleReplicationAttr: { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Determine whether a role is allowed to initiate streaming replication or put the system in and out of backup mode", - }, - roleBypassRLSAttr: { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Determine whether a role bypasses every row-level security (RLS) policy", - }, - roleSkipDropRoleAttr: { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Skip actually running the DROP ROLE command when removing a ROLE from PostgreSQL", - }, - roleSkipReassignOwnedAttr: { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Skip actually running the REASSIGN OWNED command when removing a role from PostgreSQL", - }, - }, - } -} - -func resourcePostgreSQLRoleCreate(d *schema.ResourceData, meta interface{}) error { - c := meta.(*Client) - c.catalogLock.Lock() - defer c.catalogLock.Unlock() - - conn, err := c.Connect() - if err != nil { - return errwrap.Wrapf("Error connecting to PostgreSQL: {{err}}", err) - } - defer conn.Close() - - stringOpts := []struct { - hclKey string - sqlKey string - }{ - {rolePasswordAttr, "PASSWORD"}, - {roleValidUntilAttr, "VALID UNTIL"}, - } - intOpts := []struct { - hclKey string - sqlKey string - }{ - {roleConnLimitAttr, "CONNECTION LIMIT"}, - } - boolOpts := []struct { - hclKey string - sqlKeyEnable string - sqlKeyDisable string - }{ - {roleSuperuserAttr, "CREATEDB", "NOCREATEDB"}, - {roleCreateRoleAttr, "CREATEROLE", "NOCREATEROLE"}, - {roleInheritAttr, "INHERIT", "NOINHERIT"}, - {roleLoginAttr, "LOGIN", "NOLOGIN"}, - {roleReplicationAttr, "REPLICATION", "NOREPLICATION"}, - {roleBypassRLSAttr, "BYPASSRLS", "NOBYPASSRLS"}, - - // roleEncryptedPassAttr is used only when rolePasswordAttr is set. - // {roleEncryptedPassAttr, "ENCRYPTED", "UNENCRYPTED"}, - } - - createOpts := make([]string, 0, len(stringOpts)+len(intOpts)+len(boolOpts)) - - for _, opt := range stringOpts { - v, ok := d.GetOk(opt.hclKey) - if !ok { - continue - } - - val := v.(string) - if val != "" { - switch { - case opt.hclKey == rolePasswordAttr: - if strings.ToUpper(v.(string)) == "NULL" { - createOpts = append(createOpts, "PASSWORD NULL") - } else { - if d.Get(roleEncryptedPassAttr).(bool) { - createOpts = append(createOpts, "ENCRYPTED") - } else { - createOpts = append(createOpts, "UNENCRYPTED") - } - createOpts = append(createOpts, fmt.Sprintf("%s '%s'", opt.sqlKey, pqQuoteLiteral(val))) - } - case opt.hclKey == roleValidUntilAttr: - switch { - case v.(string) == "", strings.ToLower(v.(string)) == "infinity": - createOpts = append(createOpts, fmt.Sprintf("%s '%s'", opt.sqlKey, "infinity")) - default: - createOpts = append(createOpts, fmt.Sprintf("%s %s", opt.sqlKey, pq.QuoteIdentifier(val))) - } - default: - createOpts = append(createOpts, fmt.Sprintf("%s %s", opt.sqlKey, pq.QuoteIdentifier(val))) - } - } - } - - for _, opt := range intOpts { - val := d.Get(opt.hclKey).(int) - createOpts = append(createOpts, fmt.Sprintf("%s %d", opt.sqlKey, val)) - } - - for _, opt := range boolOpts { - if opt.hclKey == roleEncryptedPassAttr { - // This attribute is handled above in the stringOpts - // loop. - continue - } - val := d.Get(opt.hclKey).(bool) - valStr := opt.sqlKeyDisable - if val { - valStr = opt.sqlKeyEnable - } - createOpts = append(createOpts, valStr) - } - - roleName := d.Get(roleNameAttr).(string) - createStr := strings.Join(createOpts, " ") - if len(createOpts) > 0 { - // FIXME(seanc@): Work around ParAccel/AWS RedShift's ancient fork of PostgreSQL - // createStr = " WITH " + createStr - createStr = " " + createStr - } - - query := fmt.Sprintf("CREATE ROLE %s%s", pq.QuoteIdentifier(roleName), createStr) - _, err = conn.Query(query) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error creating role %s: {{err}}", roleName), err) - } - - d.SetId(roleName) - - return resourcePostgreSQLRoleReadImpl(d, meta) -} - -func resourcePostgreSQLRoleDelete(d *schema.ResourceData, meta interface{}) error { - c := meta.(*Client) - c.catalogLock.Lock() - defer c.catalogLock.Unlock() - - conn, err := c.Connect() - if err != nil { - return err - } - defer conn.Close() - - txn, err := conn.Begin() - if err != nil { - return err - } - defer txn.Rollback() - - roleName := d.Get(roleNameAttr).(string) - - queries := make([]string, 0, 3) - if !d.Get(roleSkipReassignOwnedAttr).(bool) { - queries = append(queries, fmt.Sprintf("REASSIGN OWNED BY %s TO CURRENT_USER", pq.QuoteIdentifier(roleName))) - queries = append(queries, fmt.Sprintf("DROP OWNED BY %s", pq.QuoteIdentifier(roleName))) - } - - if !d.Get(roleSkipDropRoleAttr).(bool) { - queries = append(queries, fmt.Sprintf("DROP ROLE %s", pq.QuoteIdentifier(roleName))) - } - - if len(queries) > 0 { - for _, query := range queries { - _, err = conn.Query(query) - if err != nil { - return errwrap.Wrapf("Error deleting role: {{err}}", err) - } - } - - if err := txn.Commit(); err != nil { - return errwrap.Wrapf("Error committing schema: {{err}}", err) - } - } - - d.SetId("") - - return nil -} - -func resourcePostgreSQLRoleExists(d *schema.ResourceData, meta interface{}) (bool, error) { - c := meta.(*Client) - c.catalogLock.RLock() - defer c.catalogLock.RUnlock() - - conn, err := c.Connect() - if err != nil { - return false, err - } - defer conn.Close() - - var roleName string - err = conn.QueryRow("SELECT rolname FROM pg_catalog.pg_roles WHERE rolname=$1", d.Id()).Scan(&roleName) - switch { - case err == sql.ErrNoRows: - return false, nil - case err != nil: - return false, err - } - - return true, nil -} - -func resourcePostgreSQLRoleRead(d *schema.ResourceData, meta interface{}) error { - c := meta.(*Client) - c.catalogLock.RLock() - defer c.catalogLock.RUnlock() - - return resourcePostgreSQLRoleReadImpl(d, meta) -} - -func resourcePostgreSQLRoleReadImpl(d *schema.ResourceData, meta interface{}) error { - c := meta.(*Client) - conn, err := c.Connect() - if err != nil { - return err - } - defer conn.Close() - - roleId := d.Id() - var roleSuperuser, roleInherit, roleCreateRole, roleCreateDB, roleCanLogin, roleReplication, roleBypassRLS bool - var roleConnLimit int - var roleName, roleValidUntil string - err = conn.QueryRow("SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolconnlimit, COALESCE(rolvaliduntil::TEXT, 'infinity'), rolbypassrls FROM pg_catalog.pg_roles WHERE rolname=$1", roleId).Scan(&roleName, &roleSuperuser, &roleInherit, &roleCreateRole, &roleCreateDB, &roleCanLogin, &roleReplication, &roleConnLimit, &roleValidUntil, &roleBypassRLS) - switch { - case err == sql.ErrNoRows: - log.Printf("[WARN] PostgreSQL role (%s) not found", roleId) - d.SetId("") - return nil - case err != nil: - return errwrap.Wrapf("Error reading role: {{err}}", err) - default: - d.Set(roleNameAttr, roleName) - d.Set(roleBypassRLSAttr, roleBypassRLS) - d.Set(roleConnLimitAttr, roleConnLimit) - d.Set(roleCreateDBAttr, roleCreateDB) - d.Set(roleCreateRoleAttr, roleCreateRole) - d.Set(roleEncryptedPassAttr, true) - d.Set(roleInheritAttr, roleInherit) - d.Set(roleLoginAttr, roleCanLogin) - d.Set(roleReplicationAttr, roleReplication) - d.Set(roleSkipDropRoleAttr, d.Get(roleSkipDropRoleAttr).(bool)) - d.Set(roleSkipReassignOwnedAttr, d.Get(roleSkipReassignOwnedAttr).(bool)) - d.Set(roleSuperuserAttr, roleSuperuser) - d.Set(roleValidUntilAttr, roleValidUntil) - d.SetId(roleName) - } - - if !roleSuperuser { - // Return early if not superuser user - return nil - } - - var rolePassword string - err = conn.QueryRow("SELECT COALESCE(passwd, '') FROM pg_catalog.pg_shadow AS s WHERE s.usename = $1", roleId).Scan(&rolePassword) - switch { - case err == sql.ErrNoRows: - return errwrap.Wrapf(fmt.Sprintf("PostgreSQL role (%s) not found in shadow database: {{err}}", roleId), err) - case err != nil: - return errwrap.Wrapf("Error reading role: {{err}}", err) - default: - d.Set(rolePasswordAttr, rolePassword) - return nil - } -} - -func resourcePostgreSQLRoleUpdate(d *schema.ResourceData, meta interface{}) error { - c := meta.(*Client) - c.catalogLock.Lock() - defer c.catalogLock.Unlock() - - conn, err := c.Connect() - if err != nil { - return err - } - defer conn.Close() - - if err := setRoleName(conn, d); err != nil { - return err - } - - if err := setRoleBypassRLS(conn, d); err != nil { - return err - } - - if err := setRoleConnLimit(conn, d); err != nil { - return err - } - - if err := setRoleCreateDB(conn, d); err != nil { - return err - } - - if err := setRoleCreateRole(conn, d); err != nil { - return err - } - - if err := setRoleInherit(conn, d); err != nil { - return err - } - - if err := setRoleLogin(conn, d); err != nil { - return err - } - - if err := setRoleReplication(conn, d); err != nil { - return err - } - - if err := setRoleSuperuser(conn, d); err != nil { - return err - } - - if err := setRoleValidUntil(conn, d); err != nil { - return err - } - - return resourcePostgreSQLRoleReadImpl(d, meta) -} - -func setRoleName(conn *sql.DB, d *schema.ResourceData) error { - if !d.HasChange(roleNameAttr) { - return nil - } - - oraw, nraw := d.GetChange(roleNameAttr) - o := oraw.(string) - n := nraw.(string) - if n == "" { - return errors.New("Error setting role name to an empty string") - } - - query := fmt.Sprintf("ALTER ROLE %s RENAME TO %s", pq.QuoteIdentifier(o), pq.QuoteIdentifier(n)) - if _, err := conn.Query(query); err != nil { - return errwrap.Wrapf("Error updating role NAME: {{err}}", err) - } - d.SetId(n) - - return nil -} - -func setRoleBypassRLS(conn *sql.DB, d *schema.ResourceData) error { - if !d.HasChange(roleBypassRLSAttr) { - return nil - } - - bypassRLS := d.Get(roleBypassRLSAttr).(bool) - tok := "NOBYPASSRLS" - if bypassRLS { - tok = "BYPASSRLS" - } - roleName := d.Get(roleNameAttr).(string) - query := fmt.Sprintf("ALTER ROLE %s WITH %s", pq.QuoteIdentifier(roleName), tok) - if _, err := conn.Query(query); err != nil { - return errwrap.Wrapf("Error updating role BYPASSRLS: {{err}}", err) - } - - return nil -} - -func setRoleConnLimit(conn *sql.DB, d *schema.ResourceData) error { - if !d.HasChange(roleConnLimitAttr) { - return nil - } - - connLimit := d.Get(roleConnLimitAttr).(int) - roleName := d.Get(roleNameAttr).(string) - query := fmt.Sprintf("ALTER ROLE %s CONNECTION LIMIT %d", pq.QuoteIdentifier(roleName), connLimit) - if _, err := conn.Query(query); err != nil { - return errwrap.Wrapf("Error updating role CONNECTION LIMIT: {{err}}", err) - } - - return nil -} - -func setRoleCreateDB(conn *sql.DB, d *schema.ResourceData) error { - if !d.HasChange(roleCreateDBAttr) { - return nil - } - - createDB := d.Get(roleCreateDBAttr).(bool) - tok := "NOCREATEDB" - if createDB { - tok = "CREATEDB" - } - roleName := d.Get(roleNameAttr).(string) - query := fmt.Sprintf("ALTER ROLE %s WITH %s", pq.QuoteIdentifier(roleName), tok) - if _, err := conn.Query(query); err != nil { - return errwrap.Wrapf("Error updating role CREATEDB: {{err}}", err) - } - - return nil -} - -func setRoleCreateRole(conn *sql.DB, d *schema.ResourceData) error { - if !d.HasChange(roleCreateRoleAttr) { - return nil - } - - createRole := d.Get(roleCreateRoleAttr).(bool) - tok := "NOCREATEROLE" - if createRole { - tok = "CREATEROLE" - } - roleName := d.Get(roleNameAttr).(string) - query := fmt.Sprintf("ALTER ROLE %s WITH %s", pq.QuoteIdentifier(roleName), tok) - if _, err := conn.Query(query); err != nil { - return errwrap.Wrapf("Error updating role CREATEROLE: {{err}}", err) - } - - return nil -} - -func setRoleInherit(conn *sql.DB, d *schema.ResourceData) error { - if !d.HasChange(roleInheritAttr) { - return nil - } - - inherit := d.Get(roleInheritAttr).(bool) - tok := "NOINHERIT" - if inherit { - tok = "INHERIT" - } - roleName := d.Get(roleNameAttr).(string) - query := fmt.Sprintf("ALTER ROLE %s WITH %s", pq.QuoteIdentifier(roleName), tok) - if _, err := conn.Query(query); err != nil { - return errwrap.Wrapf("Error updating role INHERIT: {{err}}", err) - } - - return nil -} - -func setRoleLogin(conn *sql.DB, d *schema.ResourceData) error { - if !d.HasChange(roleLoginAttr) { - return nil - } - - login := d.Get(roleLoginAttr).(bool) - tok := "NOLOGIN" - if login { - tok = "LOGIN" - } - roleName := d.Get(roleNameAttr).(string) - query := fmt.Sprintf("ALTER ROLE %s WITH %s", pq.QuoteIdentifier(roleName), tok) - if _, err := conn.Query(query); err != nil { - return errwrap.Wrapf("Error updating role LOGIN: {{err}}", err) - } - - return nil -} - -func setRoleReplication(conn *sql.DB, d *schema.ResourceData) error { - if !d.HasChange(roleReplicationAttr) { - return nil - } - - replication := d.Get(roleReplicationAttr).(bool) - tok := "NOREPLICATION" - if replication { - tok = "REPLICATION" - } - roleName := d.Get(roleNameAttr).(string) - query := fmt.Sprintf("ALTER ROLE %s WITH %s", pq.QuoteIdentifier(roleName), tok) - if _, err := conn.Query(query); err != nil { - return errwrap.Wrapf("Error updating role REPLICATION: {{err}}", err) - } - - return nil -} - -func setRoleSuperuser(conn *sql.DB, d *schema.ResourceData) error { - if !d.HasChange(roleSuperuserAttr) { - return nil - } - - superuser := d.Get(roleSuperuserAttr).(bool) - tok := "NOSUPERUSER" - if superuser { - tok = "SUPERUSER" - } - roleName := d.Get(roleNameAttr).(string) - query := fmt.Sprintf("ALTER ROLE %s WITH %s", pq.QuoteIdentifier(roleName), tok) - if _, err := conn.Query(query); err != nil { - return errwrap.Wrapf("Error updating role SUPERUSER: {{err}}", err) - } - - return nil -} - -func setRoleValidUntil(conn *sql.DB, d *schema.ResourceData) error { - if !d.HasChange(roleValidUntilAttr) { - return nil - } - - validUntil := d.Get(roleValidUntilAttr).(string) - if validUntil == "" { - return nil - } else if strings.ToLower(validUntil) == "infinity" { - validUntil = "infinity" - } - - roleName := d.Get(roleNameAttr).(string) - query := fmt.Sprintf("ALTER ROLE %s VALID UNTIL '%s'", pq.QuoteIdentifier(roleName), pqQuoteLiteral(validUntil)) - - if _, err := conn.Query(query); err != nil { - return errwrap.Wrapf("Error updating role VALID UNTIL: {{err}}", err) - } - - return nil -} diff --git a/builtin/providers/postgresql/resource_postgresql_role_test.go b/builtin/providers/postgresql/resource_postgresql_role_test.go deleted file mode 100644 index 3f51d9523..000000000 --- a/builtin/providers/postgresql/resource_postgresql_role_test.go +++ /dev/null @@ -1,200 +0,0 @@ -package postgresql - -import ( - "database/sql" - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccPostgresqlRole_Basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPostgresqlRoleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccPostgresqlRoleConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckPostgresqlRoleExists("postgresql_role.myrole2", "true"), - resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "name", "testing_role_with_defaults"), - resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "superuser", "false"), - resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "create_database", "false"), - resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "create_role", "false"), - resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "inherit", "false"), - resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "replication", "false"), - resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "bypass_row_level_security", "false"), - resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "connection_limit", "-1"), - resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "encrypted_password", "true"), - resource.TestCheckNoResourceAttr("postgresql_role.role_with_defaults", "password"), - resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "valid_until", "infinity"), - resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "skip_drop_role", "false"), - resource.TestCheckResourceAttr("postgresql_role.role_with_defaults", "skip_reassign_owned", "false"), - ), - }, - }, - }) -} - -func TestAccPostgresqlRole_Update(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPostgresqlRoleDestroy, - Steps: []resource.TestStep{ - { - Config: testAccPostgresqlRoleUpdate1Config, - Check: resource.ComposeTestCheckFunc( - testAccCheckPostgresqlRoleExists("postgresql_role.update_role", "true"), - resource.TestCheckResourceAttr("postgresql_role.update_role", "name", "update_role"), - resource.TestCheckResourceAttr("postgresql_role.update_role", "login", "true"), - resource.TestCheckResourceAttr("postgresql_role.update_role", "connection_limit", "-1"), - ), - }, - { - Config: testAccPostgresqlRoleUpdate2Config, - Check: resource.ComposeTestCheckFunc( - testAccCheckPostgresqlRoleExists("postgresql_role.update_role", "true"), - resource.TestCheckResourceAttr("postgresql_role.update_role", "name", "update_role2"), - resource.TestCheckResourceAttr("postgresql_role.update_role", "login", "true"), - resource.TestCheckResourceAttr("postgresql_role.update_role", "connection_limit", "5"), - ), - }, - }, - }) -} - -func testAccCheckPostgresqlRoleDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "postgresql_role" { - continue - } - - exists, err := checkRoleExists(client, rs.Primary.ID) - - if err != nil { - return fmt.Errorf("Error checking role %s", err) - } - - if exists { - return fmt.Errorf("Role still exists after destroy") - } - } - - return nil -} - -func testAccCheckPostgresqlRoleExists(n string, canLogin string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Resource not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - actualCanLogin := rs.Primary.Attributes["login"] - if actualCanLogin != canLogin { - return fmt.Errorf("Wrong value for login expected %s got %s", canLogin, actualCanLogin) - } - - client := testAccProvider.Meta().(*Client) - exists, err := checkRoleExists(client, rs.Primary.ID) - - if err != nil { - return fmt.Errorf("Error checking role %s", err) - } - - if !exists { - return fmt.Errorf("Role not found") - } - - return nil - } -} - -func checkRoleExists(client *Client, roleName string) (bool, error) { - conn, err := client.Connect() - if err != nil { - return false, err - } - defer conn.Close() - - var _rez int - err = conn.QueryRow("SELECT 1 from pg_roles d WHERE rolname=$1", roleName).Scan(&_rez) - switch { - case err == sql.ErrNoRows: - return false, nil - case err != nil: - return false, fmt.Errorf("Error reading info about role: %s", err) - default: - return true, nil - } -} - -var testAccPostgresqlRoleConfig = ` -resource "postgresql_role" "myrole2" { - name = "myrole2" - login = true -} - -resource "postgresql_role" "role_with_pwd" { - name = "role_with_pwd" - login = true - password = "mypass" -} - -resource "postgresql_role" "role_with_pwd_encr" { - name = "role_with_pwd_encr" - login = true - password = "mypass" - encrypted = true -} - -resource "postgresql_role" "role_with_pwd_no_login" { - name = "role_with_pwd_no_login" - password = "mypass" -} - -resource "postgresql_role" "role_simple" { - name = "role_simple" -} - -resource "postgresql_role" "role_with_defaults" { - name = "testing_role_with_defaults" - superuser = false - create_database = false - create_role = false - inherit = false - login = false - replication = false - bypass_row_level_security = false - connection_limit = -1 - encrypted_password = true - password = "" - skip_drop_role = false - skip_reassign_owned = false - valid_until = "infinity" -} -` - -var testAccPostgresqlRoleUpdate1Config = ` -resource "postgresql_role" "update_role" { - name = "update_role" - login = true -} -` - -var testAccPostgresqlRoleUpdate2Config = ` -resource "postgresql_role" "update_role" { - name = "update_role2" - login = true - connection_limit = 5 -} -` diff --git a/builtin/providers/postgresql/resource_postgresql_schema.go b/builtin/providers/postgresql/resource_postgresql_schema.go deleted file mode 100644 index 12f650971..000000000 --- a/builtin/providers/postgresql/resource_postgresql_schema.go +++ /dev/null @@ -1,541 +0,0 @@ -package postgresql - -import ( - "bytes" - "database/sql" - "errors" - "fmt" - "log" - "reflect" - "strings" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" - "github.com/lib/pq" - "github.com/sean-/postgresql-acl" -) - -const ( - schemaNameAttr = "name" - schemaOwnerAttr = "owner" - schemaPolicyAttr = "policy" - schemaIfNotExists = "if_not_exists" - - schemaPolicyCreateAttr = "create" - schemaPolicyCreateWithGrantAttr = "create_with_grant" - schemaPolicyRoleAttr = "role" - schemaPolicyUsageAttr = "usage" - schemaPolicyUsageWithGrantAttr = "usage_with_grant" -) - -func resourcePostgreSQLSchema() *schema.Resource { - return &schema.Resource{ - Create: resourcePostgreSQLSchemaCreate, - Read: resourcePostgreSQLSchemaRead, - Update: resourcePostgreSQLSchemaUpdate, - Delete: resourcePostgreSQLSchemaDelete, - Exists: resourcePostgreSQLSchemaExists, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - schemaNameAttr: { - Type: schema.TypeString, - Required: true, - Description: "The name of the schema", - }, - schemaOwnerAttr: { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "The ROLE name who owns the schema", - }, - schemaIfNotExists: { - Type: schema.TypeBool, - Optional: true, - Default: true, - Description: "When true, use the existing schema if it exsts", - }, - schemaPolicyAttr: &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - schemaPolicyCreateAttr: { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "If true, allow the specified ROLEs to CREATE new objects within the schema(s)", - ConflictsWith: []string{schemaPolicyAttr + "." + schemaPolicyCreateWithGrantAttr}, - }, - schemaPolicyCreateWithGrantAttr: { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "If true, allow the specified ROLEs to CREATE new objects within the schema(s) and GRANT the same CREATE privilege to different ROLEs", - ConflictsWith: []string{schemaPolicyAttr + "." + schemaPolicyCreateAttr}, - }, - schemaPolicyRoleAttr: { - Type: schema.TypeString, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - Default: "", - Description: "ROLE who will receive this policy (default: PUBLIC)", - }, - schemaPolicyUsageAttr: { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "If true, allow the specified ROLEs to use objects within the schema(s)", - ConflictsWith: []string{schemaPolicyAttr + "." + schemaPolicyUsageWithGrantAttr}, - }, - schemaPolicyUsageWithGrantAttr: { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "If true, allow the specified ROLEs to use objects within the schema(s) and GRANT the same USAGE privilege to different ROLEs", - ConflictsWith: []string{schemaPolicyAttr + "." + schemaPolicyUsageAttr}, - }, - }, - }, - }, - }, - } -} - -func resourcePostgreSQLSchemaCreate(d *schema.ResourceData, meta interface{}) error { - c := meta.(*Client) - - queries := []string{} - - schemaName := d.Get(schemaNameAttr).(string) - { - b := bytes.NewBufferString("CREATE SCHEMA ") - if v := d.Get(schemaIfNotExists); v.(bool) { - fmt.Fprint(b, "IF NOT EXISTS ") - } - fmt.Fprint(b, pq.QuoteIdentifier(schemaName)) - - switch v, ok := d.GetOk(schemaOwnerAttr); { - case ok: - fmt.Fprint(b, " AUTHORIZATION ", pq.QuoteIdentifier(v.(string))) - } - queries = append(queries, b.String()) - } - - // ACL objects that can generate the necessary SQL - type RoleKey string - var schemaPolicies map[RoleKey]acl.Schema - - if policiesRaw, ok := d.GetOk(schemaPolicyAttr); ok { - policiesList := policiesRaw.(*schema.Set).List() - - // NOTE: len(policiesList) doesn't take into account multiple - // roles per policy. - schemaPolicies = make(map[RoleKey]acl.Schema, len(policiesList)) - - for _, policyRaw := range policiesList { - policyMap := policyRaw.(map[string]interface{}) - rolePolicy := schemaPolicyToACL(policyMap) - - roleKey := RoleKey(strings.ToLower(rolePolicy.Role)) - if existingRolePolicy, ok := schemaPolicies[roleKey]; ok { - schemaPolicies[roleKey] = existingRolePolicy.Merge(rolePolicy) - } else { - schemaPolicies[roleKey] = rolePolicy - } - } - } - - for _, policy := range schemaPolicies { - queries = append(queries, policy.Grants(schemaName)...) - } - - c.catalogLock.Lock() - defer c.catalogLock.Unlock() - - conn, err := c.Connect() - if err != nil { - return errwrap.Wrapf("Error connecting to PostgreSQL: {{err}}", err) - } - defer conn.Close() - - txn, err := conn.Begin() - if err != nil { - return err - } - defer txn.Rollback() - - for _, query := range queries { - _, err = txn.Query(query) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error creating schema %s: {{err}}", schemaName), err) - } - } - - if err := txn.Commit(); err != nil { - return errwrap.Wrapf("Error committing schema: {{err}}", err) - } - - d.SetId(schemaName) - - return resourcePostgreSQLSchemaReadImpl(d, meta) -} - -func resourcePostgreSQLSchemaDelete(d *schema.ResourceData, meta interface{}) error { - c := meta.(*Client) - c.catalogLock.Lock() - defer c.catalogLock.Unlock() - - conn, err := c.Connect() - if err != nil { - return err - } - defer conn.Close() - - txn, err := conn.Begin() - if err != nil { - return err - } - defer txn.Rollback() - - schemaName := d.Get(schemaNameAttr).(string) - - // NOTE(sean@): Deliberately not performing a cascading drop. - query := fmt.Sprintf("DROP SCHEMA %s", pq.QuoteIdentifier(schemaName)) - _, err = txn.Query(query) - if err != nil { - return errwrap.Wrapf("Error deleting schema: {{err}}", err) - } - - if err := txn.Commit(); err != nil { - return errwrap.Wrapf("Error committing schema: {{err}}", err) - } - - d.SetId("") - - return nil -} - -func resourcePostgreSQLSchemaExists(d *schema.ResourceData, meta interface{}) (bool, error) { - c := meta.(*Client) - c.catalogLock.RLock() - defer c.catalogLock.RUnlock() - - conn, err := c.Connect() - if err != nil { - return false, err - } - defer conn.Close() - - var schemaName string - err = conn.QueryRow("SELECT n.nspname FROM pg_catalog.pg_namespace n WHERE n.nspname=$1", d.Id()).Scan(&schemaName) - switch { - case err == sql.ErrNoRows: - return false, nil - case err != nil: - return false, errwrap.Wrapf("Error reading schema: {{err}}", err) - } - - return true, nil -} - -func resourcePostgreSQLSchemaRead(d *schema.ResourceData, meta interface{}) error { - c := meta.(*Client) - c.catalogLock.RLock() - defer c.catalogLock.RUnlock() - - return resourcePostgreSQLSchemaReadImpl(d, meta) -} - -func resourcePostgreSQLSchemaReadImpl(d *schema.ResourceData, meta interface{}) error { - c := meta.(*Client) - conn, err := c.Connect() - if err != nil { - return err - } - defer conn.Close() - - schemaId := d.Id() - var schemaName, schemaOwner string - var schemaACLs []string - err = conn.QueryRow("SELECT n.nspname, pg_catalog.pg_get_userbyid(n.nspowner), COALESCE(n.nspacl, '{}'::aclitem[])::TEXT[] FROM pg_catalog.pg_namespace n WHERE n.nspname=$1", schemaId).Scan(&schemaName, &schemaOwner, pq.Array(&schemaACLs)) - switch { - case err == sql.ErrNoRows: - log.Printf("[WARN] PostgreSQL schema (%s) not found", schemaId) - d.SetId("") - return nil - case err != nil: - return errwrap.Wrapf("Error reading schema: {{err}}", err) - default: - type RoleKey string - schemaPolicies := make(map[RoleKey]acl.Schema, len(schemaACLs)) - for _, aclStr := range schemaACLs { - aclItem, err := acl.Parse(aclStr) - if err != nil { - return errwrap.Wrapf("Error parsing aclitem: {{err}}", err) - } - - schemaACL, err := acl.NewSchema(aclItem) - if err != nil { - return errwrap.Wrapf("invalid perms for schema: {{err}}", err) - } - - roleKey := RoleKey(strings.ToLower(schemaACL.Role)) - var mergedPolicy acl.Schema - if existingRolePolicy, ok := schemaPolicies[roleKey]; ok { - mergedPolicy = existingRolePolicy.Merge(schemaACL) - } else { - mergedPolicy = schemaACL - } - schemaPolicies[roleKey] = mergedPolicy - } - - d.Set(schemaNameAttr, schemaName) - d.Set(schemaOwnerAttr, schemaOwner) - d.SetId(schemaName) - return nil - } -} - -func resourcePostgreSQLSchemaUpdate(d *schema.ResourceData, meta interface{}) error { - c := meta.(*Client) - c.catalogLock.Lock() - defer c.catalogLock.Unlock() - - conn, err := c.Connect() - if err != nil { - return err - } - defer conn.Close() - - txn, err := conn.Begin() - if err != nil { - return err - } - defer txn.Rollback() - - if err := setSchemaName(txn, d); err != nil { - return err - } - - if err := setSchemaOwner(txn, d); err != nil { - return err - } - - if err := setSchemaPolicy(txn, d); err != nil { - return err - } - - if err := txn.Commit(); err != nil { - return errwrap.Wrapf("Error committing schema: {{err}}", err) - } - - return resourcePostgreSQLSchemaReadImpl(d, meta) -} - -func setSchemaName(txn *sql.Tx, d *schema.ResourceData) error { - if !d.HasChange(schemaNameAttr) { - return nil - } - - oraw, nraw := d.GetChange(schemaNameAttr) - o := oraw.(string) - n := nraw.(string) - if n == "" { - return errors.New("Error setting schema name to an empty string") - } - - query := fmt.Sprintf("ALTER SCHEMA %s RENAME TO %s", pq.QuoteIdentifier(o), pq.QuoteIdentifier(n)) - if _, err := txn.Query(query); err != nil { - return errwrap.Wrapf("Error updating schema NAME: {{err}}", err) - } - d.SetId(n) - - return nil -} - -func setSchemaOwner(txn *sql.Tx, d *schema.ResourceData) error { - if !d.HasChange(schemaOwnerAttr) { - return nil - } - - oraw, nraw := d.GetChange(schemaOwnerAttr) - o := oraw.(string) - n := nraw.(string) - if n == "" { - return errors.New("Error setting schema owner to an empty string") - } - - query := fmt.Sprintf("ALTER SCHEMA %s OWNER TO %s", pq.QuoteIdentifier(o), pq.QuoteIdentifier(n)) - if _, err := txn.Query(query); err != nil { - return errwrap.Wrapf("Error updating schema OWNER: {{err}}", err) - } - - return nil -} - -func setSchemaPolicy(txn *sql.Tx, d *schema.ResourceData) error { - if !d.HasChange(schemaPolicyAttr) { - return nil - } - - schemaName := d.Get(schemaNameAttr).(string) - - oraw, nraw := d.GetChange(schemaPolicyAttr) - oldList := oraw.(*schema.Set).List() - newList := nraw.(*schema.Set).List() - queries := make([]string, 0, len(oldList)+len(newList)) - dropped, added, updated, _ := schemaChangedPolicies(oldList, newList) - - for _, p := range dropped { - pMap := p.(map[string]interface{}) - rolePolicy := schemaPolicyToACL(pMap) - - // The PUBLIC role can not be DROP'ed, therefore we do not need - // to prevent revoking against it not existing. - if rolePolicy.Role != "" { - var foundUser bool - err := txn.QueryRow(`SELECT TRUE FROM pg_catalog.pg_user WHERE usename = $1`, rolePolicy.Role).Scan(&foundUser) - switch { - case err == sql.ErrNoRows: - // Don't execute this role's REVOKEs because the role - // was dropped first and therefore doesn't exist. - case err != nil: - return errwrap.Wrapf("Error reading schema: {{err}}", err) - default: - queries = append(queries, rolePolicy.Revokes(schemaName)...) - } - } - } - - for _, p := range added { - pMap := p.(map[string]interface{}) - rolePolicy := schemaPolicyToACL(pMap) - queries = append(queries, rolePolicy.Grants(schemaName)...) - } - - for _, p := range updated { - policies := p.([]interface{}) - if len(policies) != 2 { - panic("expected 2 policies, old and new") - } - - { - oldPolicies := policies[0].(map[string]interface{}) - rolePolicy := schemaPolicyToACL(oldPolicies) - queries = append(queries, rolePolicy.Revokes(schemaName)...) - } - - { - newPolicies := policies[1].(map[string]interface{}) - rolePolicy := schemaPolicyToACL(newPolicies) - queries = append(queries, rolePolicy.Grants(schemaName)...) - } - } - - for _, query := range queries { - if _, err := txn.Query(query); err != nil { - return errwrap.Wrapf("Error updating schema DCL: {{err}}", err) - } - } - - return nil -} - -// schemaChangedPolicies walks old and new to create a set of queries that can -// be executed to enact each type of state change (roles that have been dropped -// from the policy, added to a policy, have updated privilges, or are -// unchanged). -func schemaChangedPolicies(old, new []interface{}) (dropped, added, update, unchanged map[string]interface{}) { - type RoleKey string - oldLookupMap := make(map[RoleKey]interface{}, len(old)) - for idx, _ := range old { - v := old[idx] - schemaPolicy := v.(map[string]interface{}) - if roleRaw, ok := schemaPolicy[schemaPolicyRoleAttr]; ok { - role := roleRaw.(string) - roleKey := strings.ToLower(role) - oldLookupMap[RoleKey(roleKey)] = schemaPolicy - } - } - - newLookupMap := make(map[RoleKey]interface{}, len(new)) - for idx, _ := range new { - v := new[idx] - schemaPolicy := v.(map[string]interface{}) - if roleRaw, ok := schemaPolicy[schemaPolicyRoleAttr]; ok { - role := roleRaw.(string) - roleKey := strings.ToLower(role) - newLookupMap[RoleKey(roleKey)] = schemaPolicy - } - } - - droppedRoles := make(map[string]interface{}, len(old)) - for kOld, vOld := range oldLookupMap { - if _, ok := newLookupMap[kOld]; !ok { - droppedRoles[string(kOld)] = vOld - } - } - - addedRoles := make(map[string]interface{}, len(new)) - for kNew, vNew := range newLookupMap { - if _, ok := oldLookupMap[kNew]; !ok { - addedRoles[string(kNew)] = vNew - } - } - - updatedRoles := make(map[string]interface{}, len(new)) - unchangedRoles := make(map[string]interface{}, len(new)) - for kOld, vOld := range oldLookupMap { - if vNew, ok := newLookupMap[kOld]; ok { - if reflect.DeepEqual(vOld, vNew) { - unchangedRoles[string(kOld)] = vOld - } else { - updatedRoles[string(kOld)] = []interface{}{vOld, vNew} - } - } - } - - return droppedRoles, addedRoles, updatedRoles, unchangedRoles -} - -func schemaPolicyToHCL(s *acl.Schema) map[string]interface{} { - return map[string]interface{}{ - schemaPolicyRoleAttr: s.Role, - schemaPolicyCreateAttr: s.GetPrivilege(acl.Create), - schemaPolicyCreateWithGrantAttr: s.GetGrantOption(acl.Create), - schemaPolicyUsageAttr: s.GetPrivilege(acl.Usage), - schemaPolicyUsageWithGrantAttr: s.GetGrantOption(acl.Usage), - } -} - -func schemaPolicyToACL(policyMap map[string]interface{}) acl.Schema { - var rolePolicy acl.Schema - - if policyMap[schemaPolicyCreateAttr].(bool) { - rolePolicy.Privileges |= acl.Create - } - - if policyMap[schemaPolicyCreateWithGrantAttr].(bool) { - rolePolicy.Privileges |= acl.Create - rolePolicy.GrantOptions |= acl.Create - } - - if policyMap[schemaPolicyUsageAttr].(bool) { - rolePolicy.Privileges |= acl.Usage - } - - if policyMap[schemaPolicyUsageWithGrantAttr].(bool) { - rolePolicy.Privileges |= acl.Usage - rolePolicy.GrantOptions |= acl.Usage - } - - if roleRaw, ok := policyMap[schemaPolicyRoleAttr]; ok { - rolePolicy.Role = roleRaw.(string) - } - - return rolePolicy -} diff --git a/builtin/providers/postgresql/resource_postgresql_schema_test.go b/builtin/providers/postgresql/resource_postgresql_schema_test.go deleted file mode 100644 index abf5c8130..000000000 --- a/builtin/providers/postgresql/resource_postgresql_schema_test.go +++ /dev/null @@ -1,418 +0,0 @@ -package postgresql - -import ( - "database/sql" - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccPostgresqlSchema_Basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPostgresqlSchemaDestroy, - Steps: []resource.TestStep{ - { - Config: testAccPostgresqlSchemaConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckPostgresqlSchemaExists("postgresql_schema.test1", "foo"), - resource.TestCheckResourceAttr("postgresql_role.role_all_without_grant", "name", "role_all_without_grant"), - resource.TestCheckResourceAttr("postgresql_role.role_all_without_grant", "login", "true"), - - resource.TestCheckResourceAttr("postgresql_role.role_all_with_grant", "name", "role_all_with_grant"), - - resource.TestCheckResourceAttr("postgresql_schema.test1", "name", "foo"), - - resource.TestCheckResourceAttr("postgresql_schema.test2", "name", "bar"), - resource.TestCheckResourceAttr("postgresql_schema.test2", "owner", "role_all_without_grant"), - resource.TestCheckResourceAttr("postgresql_schema.test2", "if_not_exists", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test2", "policy.#", "1"), - resource.TestCheckResourceAttr("postgresql_schema.test2", "policy.1948480595.create", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test2", "policy.1948480595.create_with_grant", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test2", "policy.1948480595.usage", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test2", "policy.1948480595.usage_with_grant", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test2", "policy.1948480595.role", "role_all_without_grant"), - - resource.TestCheckResourceAttr("postgresql_schema.test3", "name", "baz"), - resource.TestCheckResourceAttr("postgresql_schema.test3", "owner", "role_all_without_grant"), - resource.TestCheckResourceAttr("postgresql_schema.test3", "if_not_exists", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test3", "policy.#", "2"), - resource.TestCheckResourceAttr("postgresql_schema.test3", "policy.1013320538.create_with_grant", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test3", "policy.1013320538.usage_with_grant", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test3", "policy.1013320538.role", "role_all_with_grant"), - resource.TestCheckResourceAttr("postgresql_schema.test3", "policy.1948480595.create", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test3", "policy.1948480595.usage", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test3", "policy.1948480595.role", "role_all_without_grant"), - ), - }, - }, - }) -} - -func TestAccPostgresqlSchema_AddPolicy(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPostgresqlSchemaDestroy, - Steps: []resource.TestStep{ - { - Config: testAccPostgresqlSchemaGrant1, - Check: resource.ComposeTestCheckFunc( - testAccCheckPostgresqlSchemaExists("postgresql_schema.test4", "test4"), - - resource.TestCheckResourceAttr("postgresql_role.all_without_grant_stay", "name", "all_without_grant_stay"), - resource.TestCheckResourceAttr("postgresql_role.all_without_grant_drop", "name", "all_without_grant_drop"), - resource.TestCheckResourceAttr("postgresql_role.policy_compose", "name", "policy_compose"), - resource.TestCheckResourceAttr("postgresql_role.policy_move", "name", "policy_move"), - - resource.TestCheckResourceAttr("postgresql_role.all_with_grantstay", "name", "all_with_grantstay"), - resource.TestCheckResourceAttr("postgresql_role.all_with_grantdrop", "name", "all_with_grantdrop"), - - resource.TestCheckResourceAttr("postgresql_schema.test4", "name", "test4"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "owner", "all_without_grant_stay"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.#", "7"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.108605972.create", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.108605972.create_with_grant", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.108605972.role", "all_with_grantstay"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.108605972.usage", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.108605972.usage_with_grant", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.1417738359.create", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.1417738359.create_with_grant", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.1417738359.role", "policy_move"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.1417738359.usage", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.1417738359.usage_with_grant", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.1762357194.create", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.1762357194.create_with_grant", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.1762357194.role", "all_without_grant_drop"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.1762357194.usage", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.1762357194.usage_with_grant", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.2524457447.create", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.2524457447.create_with_grant", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.2524457447.role", "all_without_grant_stay"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.2524457447.usage", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.2524457447.usage_with_grant", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3959936977.create", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3959936977.create_with_grant", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3959936977.role", "policy_compose"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3959936977.usage", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3959936977.usage_with_grant", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.4178211897.create", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.4178211897.create_with_grant", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.4178211897.role", "all_with_grantdrop"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.4178211897.usage", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.4178211897.usage_with_grant", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.815478369.create", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.815478369.create_with_grant", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.815478369.role", "policy_compose"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.815478369.usage", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.815478369.usage_with_grant", "false"), - ), - }, - { - Config: testAccPostgresqlSchemaGrant2, - Check: resource.ComposeTestCheckFunc( - testAccCheckPostgresqlSchemaExists("postgresql_schema.test4", "test4"), - resource.TestCheckResourceAttr("postgresql_role.all_without_grant_stay", "name", "all_without_grant_stay"), - resource.TestCheckResourceAttr("postgresql_role.all_without_grant_drop", "name", "all_without_grant_drop"), - resource.TestCheckResourceAttr("postgresql_role.policy_compose", "name", "policy_compose"), - resource.TestCheckResourceAttr("postgresql_role.policy_move", "name", "policy_move"), - - resource.TestCheckResourceAttr("postgresql_role.all_with_grantstay", "name", "all_with_grantstay"), - - resource.TestCheckResourceAttr("postgresql_schema.test4", "name", "test4"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "owner", "all_without_grant_stay"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.#", "6"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.108605972.create", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.108605972.create_with_grant", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.108605972.role", "all_with_grantstay"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.108605972.usage", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.108605972.usage_with_grant", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.2524457447.create", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.2524457447.create_with_grant", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.2524457447.role", "all_without_grant_stay"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.2524457447.usage", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.2524457447.usage_with_grant", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3831594020.create", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3831594020.create_with_grant", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3831594020.role", "policy_move"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3831594020.usage", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3831594020.usage_with_grant", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3959936977.create", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3959936977.create_with_grant", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3959936977.role", "policy_compose"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3959936977.usage", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.3959936977.usage_with_grant", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.468685299.create", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.468685299.create_with_grant", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.468685299.role", "policy_new"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.468685299.usage", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.468685299.usage_with_grant", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.815478369.create", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.815478369.create_with_grant", "false"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.815478369.role", "policy_compose"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.815478369.usage", "true"), - resource.TestCheckResourceAttr("postgresql_schema.test4", "policy.815478369.usage_with_grant", "false"), - ), - }, - }, - }) -} - -func testAccCheckPostgresqlSchemaDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "postgresql_schema" { - continue - } - - exists, err := checkSchemaExists(client, rs.Primary.ID) - if err != nil { - return fmt.Errorf("Error checking schema %s", err) - } - - if exists { - return fmt.Errorf("Schema still exists after destroy") - } - } - - return nil -} - -func testAccCheckPostgresqlSchemaExists(n string, schemaName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Resource not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - actualSchemaName := rs.Primary.Attributes["name"] - if actualSchemaName != schemaName { - return fmt.Errorf("Wrong value for schema name expected %s got %s", schemaName, actualSchemaName) - } - - client := testAccProvider.Meta().(*Client) - exists, err := checkSchemaExists(client, rs.Primary.ID) - - if err != nil { - return fmt.Errorf("Error checking schema %s", err) - } - - if !exists { - return fmt.Errorf("Schema not found") - } - - return nil - } -} - -func checkSchemaExists(client *Client, schemaName string) (bool, error) { - conn, err := client.Connect() - if err != nil { - return false, err - } - defer conn.Close() - - var _rez string - err = conn.QueryRow("SELECT nspname FROM pg_catalog.pg_namespace WHERE nspname=$1", schemaName).Scan(&_rez) - switch { - case err == sql.ErrNoRows: - return false, nil - case err != nil: - return false, fmt.Errorf("Error reading info about schema: %s", err) - default: - return true, nil - } -} - -const testAccPostgresqlSchemaConfig = ` -resource "postgresql_role" "role_all_without_grant" { - name = "role_all_without_grant" - login = true -} - -resource "postgresql_role" "role_all_with_grant" { - name = "role_all_with_grant" -} - -resource "postgresql_schema" "test1" { - name = "foo" -} - -resource "postgresql_schema" "test2" { - name = "bar" - owner = "${postgresql_role.role_all_without_grant.name}" - if_not_exists = false - - policy { - create = true - usage = true - role = "${postgresql_role.role_all_without_grant.name}" - } -} - -resource "postgresql_schema" "test3" { - name = "baz" - owner = "${postgresql_role.role_all_without_grant.name}" - if_not_exists = true - - policy { - create_with_grant = true - usage_with_grant = true - role = "${postgresql_role.role_all_with_grant.name}" - } - - policy { - create = true - usage = true - role = "${postgresql_role.role_all_without_grant.name}" - } -} -` - -const testAccPostgresqlSchemaGrant1 = ` -resource "postgresql_role" "all_without_grant_stay" { - name = "all_without_grant_stay" -} - -resource "postgresql_role" "all_without_grant_drop" { - name = "all_without_grant_drop" -} - -resource "postgresql_role" "policy_compose" { - name = "policy_compose" -} - -resource "postgresql_role" "policy_move" { - name = "policy_move" -} - -resource "postgresql_role" "all_with_grantstay" { - name = "all_with_grantstay" -} - -resource "postgresql_role" "all_with_grantdrop" { - name = "all_with_grantdrop" -} - -resource "postgresql_schema" "test4" { - name = "test4" - owner = "${postgresql_role.all_without_grant_stay.name}" - - policy { - create = true - usage = true - role = "${postgresql_role.all_without_grant_stay.name}" - } - - policy { - create = true - usage = true - role = "${postgresql_role.all_without_grant_drop.name}" - } - - policy { - create = true - usage = true - role = "${postgresql_role.policy_compose.name}" - } - - policy { - create = true - usage = true - role = "${postgresql_role.policy_move.name}" - } - - policy { - create_with_grant = true - usage_with_grant = true - role = "${postgresql_role.all_with_grantstay.name}" - } - - policy { - create_with_grant = true - usage_with_grant = true - role = "${postgresql_role.all_with_grantdrop.name}" - } - - policy { - create_with_grant = true - usage_with_grant = true - role = "${postgresql_role.policy_compose.name}" - } -} -` - -const testAccPostgresqlSchemaGrant2 = ` -resource "postgresql_role" "all_without_grant_stay" { - name = "all_without_grant_stay" -} - -resource "postgresql_role" "all_without_grant_drop" { - name = "all_without_grant_drop" -} - -resource "postgresql_role" "policy_compose" { - name = "policy_compose" -} - -resource "postgresql_role" "policy_move" { - name = "policy_move" -} - -resource "postgresql_role" "all_with_grantstay" { - name = "all_with_grantstay" -} - -resource "postgresql_role" "policy_new" { - name = "policy_new" -} - -resource "postgresql_schema" "test4" { - name = "test4" - owner = "${postgresql_role.all_without_grant_stay.name}" - - policy { - create = true - usage = true - role = "${postgresql_role.all_without_grant_stay.name}" - } - - policy { - create = true - usage = true - role = "${postgresql_role.policy_compose.name}" - } - - policy { - create_with_grant = true - usage_with_grant = true - role = "${postgresql_role.all_with_grantstay.name}" - } - - policy { - create_with_grant = true - usage_with_grant = true - role = "${postgresql_role.policy_compose.name}" - } - - policy { - create_with_grant = true - usage_with_grant = true - role = "${postgresql_role.policy_move.name}" - } - - policy { - create = true - usage = true - role = "${postgresql_role.policy_new.name}" - } -} -` diff --git a/builtin/providers/powerdns/client.go b/builtin/providers/powerdns/client.go deleted file mode 100644 index 9b53011fe..000000000 --- a/builtin/providers/powerdns/client.go +++ /dev/null @@ -1,367 +0,0 @@ -package powerdns - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - - "github.com/hashicorp/go-cleanhttp" -) - -type Client struct { - ServerUrl string // Location of PowerDNS server to use - ApiKey string // REST API Static authentication key - ApiVersion int // API version to use - Http *http.Client -} - -// NewClient returns a new PowerDNS client -func NewClient(serverUrl string, apiKey string) (*Client, error) { - client := Client{ - ServerUrl: serverUrl, - ApiKey: apiKey, - Http: cleanhttp.DefaultClient(), - } - var err error - client.ApiVersion, err = client.detectApiVersion() - if err != nil { - return nil, err - } - return &client, nil -} - -// Creates a new request with necessary headers -func (c *Client) newRequest(method string, endpoint string, body []byte) (*http.Request, error) { - - var urlStr string - if c.ApiVersion > 0 { - urlStr = c.ServerUrl + "/api/v" + strconv.Itoa(c.ApiVersion) + endpoint - } else { - urlStr = c.ServerUrl + endpoint - } - url, err := url.Parse(urlStr) - if err != nil { - return nil, fmt.Errorf("Error during parsing request URL: %s", err) - } - - var bodyReader io.Reader - if body != nil { - bodyReader = bytes.NewReader(body) - } - - req, err := http.NewRequest(method, url.String(), bodyReader) - if err != nil { - return nil, fmt.Errorf("Error during creation of request: %s", err) - } - - req.Header.Add("X-API-Key", c.ApiKey) - req.Header.Add("Accept", "application/json") - - if method != "GET" { - req.Header.Add("Content-Type", "application/json") - } - - return req, nil -} - -type ZoneInfo struct { - Id string `json:"id"` - Name string `json:"name"` - URL string `json:"url"` - Kind string `json:"kind"` - DnsSec bool `json:"dnsssec"` - Serial int64 `json:"serial"` - Records []Record `json:"records,omitempty"` - ResourceRecordSets []ResourceRecordSet `json:"rrsets,omitempty"` -} - -type Record struct { - Name string `json:"name"` - Type string `json:"type"` - Content string `json:"content"` - TTL int `json:"ttl"` // For API v0 - Disabled bool `json:"disabled"` -} - -type ResourceRecordSet struct { - Name string `json:"name"` - Type string `json:"type"` - ChangeType string `json:"changetype"` - TTL int `json:"ttl"` // For API v1 - Records []Record `json:"records,omitempty"` -} - -type zonePatchRequest struct { - RecordSets []ResourceRecordSet `json:"rrsets"` -} - -type errorResponse struct { - ErrorMsg string `json:"error"` -} - -const idSeparator string = ":::" - -func (record *Record) Id() string { - return record.Name + idSeparator + record.Type -} - -func (rrSet *ResourceRecordSet) Id() string { - return rrSet.Name + idSeparator + rrSet.Type -} - -// Returns name and type of record or record set based on it's ID -func parseId(recId string) (string, string, error) { - s := strings.Split(recId, idSeparator) - if len(s) == 2 { - return s[0], s[1], nil - } else { - return "", "", fmt.Errorf("Unknown record ID format") - } -} - -// Detects the API version in use on the server -// Uses int to represent the API version: 0 is the legacy AKA version 3.4 API -// Any other integer correlates with the same API version -func (client *Client) detectApiVersion() (int, error) { - req, err := client.newRequest("GET", "/api/v1/servers", nil) - if err != nil { - return -1, err - } - resp, err := client.Http.Do(req) - if err != nil { - return -1, err - } - defer resp.Body.Close() - if resp.StatusCode == 200 { - return 1, nil - } else { - return 0, nil - } -} - -// Returns all Zones of server, without records -func (client *Client) ListZones() ([]ZoneInfo, error) { - - req, err := client.newRequest("GET", "/servers/localhost/zones", nil) - if err != nil { - return nil, err - } - - resp, err := client.Http.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var zoneInfos []ZoneInfo - - err = json.NewDecoder(resp.Body).Decode(&zoneInfos) - if err != nil { - return nil, err - } - - return zoneInfos, nil -} - -// Returns all records in Zone -func (client *Client) ListRecords(zone string) ([]Record, error) { - req, err := client.newRequest("GET", fmt.Sprintf("/servers/localhost/zones/%s", zone), nil) - if err != nil { - return nil, err - } - - resp, err := client.Http.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - zoneInfo := new(ZoneInfo) - err = json.NewDecoder(resp.Body).Decode(zoneInfo) - if err != nil { - return nil, err - } - - records := zoneInfo.Records - // Convert the API v1 response to v0 record structure - for _, rrs := range zoneInfo.ResourceRecordSets { - for _, record := range rrs.Records { - records = append(records, Record{ - Name: rrs.Name, - Type: rrs.Type, - Content: record.Content, - TTL: rrs.TTL, - }) - } - } - - return records, nil -} - -// Returns only records of specified name and type -func (client *Client) ListRecordsInRRSet(zone string, name string, tpe string) ([]Record, error) { - allRecords, err := client.ListRecords(zone) - if err != nil { - return nil, err - } - - records := make([]Record, 0, 10) - for _, r := range allRecords { - if r.Name == name && r.Type == tpe { - records = append(records, r) - } - } - - return records, nil -} - -func (client *Client) ListRecordsByID(zone string, recId string) ([]Record, error) { - name, tpe, err := parseId(recId) - if err != nil { - return nil, err - } else { - return client.ListRecordsInRRSet(zone, name, tpe) - } -} - -// Checks if requested record exists in Zone -func (client *Client) RecordExists(zone string, name string, tpe string) (bool, error) { - allRecords, err := client.ListRecords(zone) - if err != nil { - return false, err - } - - for _, record := range allRecords { - if record.Name == name && record.Type == tpe { - return true, nil - } - } - return false, nil -} - -// Checks if requested record exists in Zone by it's ID -func (client *Client) RecordExistsByID(zone string, recId string) (bool, error) { - name, tpe, err := parseId(recId) - if err != nil { - return false, err - } else { - return client.RecordExists(zone, name, tpe) - } -} - -// Creates new record with single content entry -func (client *Client) CreateRecord(zone string, record Record) (string, error) { - reqBody, _ := json.Marshal(zonePatchRequest{ - RecordSets: []ResourceRecordSet{ - { - Name: record.Name, - Type: record.Type, - ChangeType: "REPLACE", - Records: []Record{record}, - }, - }, - }) - - req, err := client.newRequest("PATCH", fmt.Sprintf("/servers/localhost/zones/%s", zone), reqBody) - if err != nil { - return "", err - } - - resp, err := client.Http.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - if resp.StatusCode != 200 && resp.StatusCode != 204 { - errorResp := new(errorResponse) - if err = json.NewDecoder(resp.Body).Decode(errorResp); err != nil { - return "", fmt.Errorf("Error creating record: %s", record.Id()) - } else { - return "", fmt.Errorf("Error creating record: %s, reason: %q", record.Id(), errorResp.ErrorMsg) - } - } else { - return record.Id(), nil - } -} - -// Creates new record set in Zone -func (client *Client) ReplaceRecordSet(zone string, rrSet ResourceRecordSet) (string, error) { - rrSet.ChangeType = "REPLACE" - - reqBody, _ := json.Marshal(zonePatchRequest{ - RecordSets: []ResourceRecordSet{rrSet}, - }) - - req, err := client.newRequest("PATCH", fmt.Sprintf("/servers/localhost/zones/%s", zone), reqBody) - if err != nil { - return "", err - } - - resp, err := client.Http.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - if resp.StatusCode != 200 && resp.StatusCode != 204 { - errorResp := new(errorResponse) - if err = json.NewDecoder(resp.Body).Decode(errorResp); err != nil { - return "", fmt.Errorf("Error creating record set: %s", rrSet.Id()) - } else { - return "", fmt.Errorf("Error creating record set: %s, reason: %q", rrSet.Id(), errorResp.ErrorMsg) - } - } else { - return rrSet.Id(), nil - } -} - -// Deletes record set from Zone -func (client *Client) DeleteRecordSet(zone string, name string, tpe string) error { - reqBody, _ := json.Marshal(zonePatchRequest{ - RecordSets: []ResourceRecordSet{ - { - Name: name, - Type: tpe, - ChangeType: "DELETE", - }, - }, - }) - - req, err := client.newRequest("PATCH", fmt.Sprintf("/servers/localhost/zones/%s", zone), reqBody) - if err != nil { - return err - } - - resp, err := client.Http.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode != 200 && resp.StatusCode != 204 { - errorResp := new(errorResponse) - if err = json.NewDecoder(resp.Body).Decode(errorResp); err != nil { - return fmt.Errorf("Error deleting record: %s %s", name, tpe) - } else { - return fmt.Errorf("Error deleting record: %s %s, reason: %q", name, tpe, errorResp.ErrorMsg) - } - } else { - return nil - } -} - -// Deletes record from Zone by it's ID -func (client *Client) DeleteRecordSetByID(zone string, recId string) error { - name, tpe, err := parseId(recId) - if err != nil { - return err - } else { - return client.DeleteRecordSet(zone, name, tpe) - } -} diff --git a/builtin/providers/powerdns/config.go b/builtin/providers/powerdns/config.go deleted file mode 100644 index b6ed55449..000000000 --- a/builtin/providers/powerdns/config.go +++ /dev/null @@ -1,24 +0,0 @@ -package powerdns - -import ( - "fmt" - "log" -) - -type Config struct { - ServerUrl string - ApiKey string -} - -// Client returns a new client for accessing PowerDNS -func (c *Config) Client() (*Client, error) { - client, err := NewClient(c.ServerUrl, c.ApiKey) - - if err != nil { - return nil, fmt.Errorf("Error setting up PowerDNS client: %s", err) - } - - log.Printf("[INFO] PowerDNS Client configured for server %s", c.ServerUrl) - - return client, nil -} diff --git a/builtin/providers/powerdns/provider.go b/builtin/providers/powerdns/provider.go deleted file mode 100644 index b1d1e339b..000000000 --- a/builtin/providers/powerdns/provider.go +++ /dev/null @@ -1,40 +0,0 @@ -package powerdns - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "api_key": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("PDNS_API_KEY", nil), - Description: "REST API authentication key", - }, - "server_url": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("PDNS_SERVER_URL", nil), - Description: "Location of PowerDNS server", - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "powerdns_record": resourcePDNSRecord(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(data *schema.ResourceData) (interface{}, error) { - config := Config{ - ApiKey: data.Get("api_key").(string), - ServerUrl: data.Get("server_url").(string), - } - - return config.Client() -} diff --git a/builtin/providers/powerdns/provider_test.go b/builtin/providers/powerdns/provider_test.go deleted file mode 100644 index 50dca1202..000000000 --- a/builtin/providers/powerdns/provider_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package powerdns - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "powerdns": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProviderImpl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("PDNS_API_KEY"); v == "" { - t.Fatal("PDNS_API_KEY must be set for acceptance tests") - } - - if v := os.Getenv("PDNS_SERVER_URL"); v == "" { - t.Fatal("PDNS_SERVER_URL must be set for acceptance tests") - } -} diff --git a/builtin/providers/powerdns/resource_powerdns_record.go b/builtin/providers/powerdns/resource_powerdns_record.go deleted file mode 100644 index b5f9e0687..000000000 --- a/builtin/providers/powerdns/resource_powerdns_record.go +++ /dev/null @@ -1,147 +0,0 @@ -package powerdns - -import ( - "log" - - "fmt" - - "github.com/hashicorp/terraform/helper/schema" -) - -func resourcePDNSRecord() *schema.Resource { - return &schema.Resource{ - Create: resourcePDNSRecordCreate, - Read: resourcePDNSRecordRead, - Delete: resourcePDNSRecordDelete, - Exists: resourcePDNSRecordExists, - - Schema: map[string]*schema.Schema{ - "zone": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "ttl": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - - "records": { - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Required: true, - ForceNew: true, - Set: schema.HashString, - }, - }, - } -} - -func resourcePDNSRecordCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Client) - - rrSet := ResourceRecordSet{ - Name: d.Get("name").(string), - Type: d.Get("type").(string), - TTL: d.Get("ttl").(int), - } - - zone := d.Get("zone").(string) - ttl := d.Get("ttl").(int) - recs := d.Get("records").(*schema.Set).List() - - if len(recs) > 0 { - records := make([]Record, 0, len(recs)) - for _, recContent := range recs { - records = append(records, Record{Name: rrSet.Name, Type: rrSet.Type, TTL: ttl, Content: recContent.(string)}) - } - rrSet.Records = records - - log.Printf("[DEBUG] Creating PowerDNS Record: %#v", rrSet) - - recId, err := client.ReplaceRecordSet(zone, rrSet) - if err != nil { - return fmt.Errorf("Failed to create PowerDNS Record: %s", err) - } - - d.SetId(recId) - log.Printf("[INFO] Created PowerDNS Record with ID: %s", d.Id()) - - } else { - log.Printf("[DEBUG] Deleting empty PowerDNS Record: %#v", rrSet) - err := client.DeleteRecordSet(zone, rrSet.Name, rrSet.Type) - if err != nil { - return fmt.Errorf("Failed to delete PowerDNS Record: %s", err) - } - - d.SetId(rrSet.Id()) - } - - return resourcePDNSRecordRead(d, meta) -} - -func resourcePDNSRecordRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Client) - - log.Printf("[DEBUG] Reading PowerDNS Record: %s", d.Id()) - records, err := client.ListRecordsByID(d.Get("zone").(string), d.Id()) - if err != nil { - return fmt.Errorf("Couldn't fetch PowerDNS Record: %s", err) - } - - recs := make([]string, 0, len(records)) - for _, r := range records { - recs = append(recs, r.Content) - } - d.Set("records", recs) - - if len(records) > 0 { - d.Set("ttl", records[0].TTL) - } - - return nil -} - -func resourcePDNSRecordDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Client) - - log.Printf("[INFO] Deleting PowerDNS Record: %s", d.Id()) - err := client.DeleteRecordSetByID(d.Get("zone").(string), d.Id()) - - if err != nil { - return fmt.Errorf("Error deleting PowerDNS Record: %s", err) - } - - return nil -} - -func resourcePDNSRecordExists(d *schema.ResourceData, meta interface{}) (bool, error) { - zone := d.Get("zone").(string) - name := d.Get("name").(string) - tpe := d.Get("type").(string) - - log.Printf("[INFO] Checking existence of PowerDNS Record: %s, %s", name, tpe) - - client := meta.(*Client) - exists, err := client.RecordExists(zone, name, tpe) - - if err != nil { - return false, fmt.Errorf("Error checking PowerDNS Record: %s", err) - } else { - return exists, nil - } -} diff --git a/builtin/providers/powerdns/resource_powerdns_record_test.go b/builtin/providers/powerdns/resource_powerdns_record_test.go deleted file mode 100644 index 0953a4c51..000000000 --- a/builtin/providers/powerdns/resource_powerdns_record_test.go +++ /dev/null @@ -1,398 +0,0 @@ -package powerdns - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccPDNSRecord_A(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPDNSRecordDestroy, - Steps: []resource.TestStep{ - { - Config: testPDNSRecordConfigA, - Check: resource.ComposeTestCheckFunc( - testAccCheckPDNSRecordExists("powerdns_record.test-a"), - ), - }, - }, - }) -} - -func TestAccPDNSRecord_WithCount(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPDNSRecordDestroy, - Steps: []resource.TestStep{ - { - Config: testPDNSRecordConfigHyphenedWithCount, - Check: resource.ComposeTestCheckFunc( - testAccCheckPDNSRecordExists("powerdns_record.test-counted.0"), - testAccCheckPDNSRecordExists("powerdns_record.test-counted.1"), - ), - }, - }, - }) -} - -func TestAccPDNSRecord_AAAA(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPDNSRecordDestroy, - Steps: []resource.TestStep{ - { - Config: testPDNSRecordConfigAAAA, - Check: resource.ComposeTestCheckFunc( - testAccCheckPDNSRecordExists("powerdns_record.test-aaaa"), - ), - }, - }, - }) -} - -func TestAccPDNSRecord_CNAME(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPDNSRecordDestroy, - Steps: []resource.TestStep{ - { - Config: testPDNSRecordConfigCNAME, - Check: resource.ComposeTestCheckFunc( - testAccCheckPDNSRecordExists("powerdns_record.test-cname"), - ), - }, - }, - }) -} - -func TestAccPDNSRecord_HINFO(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPDNSRecordDestroy, - Steps: []resource.TestStep{ - { - Config: testPDNSRecordConfigHINFO, - Check: resource.ComposeTestCheckFunc( - testAccCheckPDNSRecordExists("powerdns_record.test-hinfo"), - ), - }, - }, - }) -} - -func TestAccPDNSRecord_LOC(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPDNSRecordDestroy, - Steps: []resource.TestStep{ - { - Config: testPDNSRecordConfigLOC, - Check: resource.ComposeTestCheckFunc( - testAccCheckPDNSRecordExists("powerdns_record.test-loc"), - ), - }, - }, - }) -} - -func TestAccPDNSRecord_MX(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPDNSRecordDestroy, - Steps: []resource.TestStep{ - { - Config: testPDNSRecordConfigMX, - Check: resource.ComposeTestCheckFunc( - testAccCheckPDNSRecordExists("powerdns_record.test-mx"), - ), - }, - { - Config: testPDNSRecordConfigMXMulti, - Check: resource.ComposeTestCheckFunc( - testAccCheckPDNSRecordExists("powerdns_record.test-mx-multi"), - ), - }, - }, - }) -} - -func TestAccPDNSRecord_NAPTR(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPDNSRecordDestroy, - Steps: []resource.TestStep{ - { - Config: testPDNSRecordConfigNAPTR, - Check: resource.ComposeTestCheckFunc( - testAccCheckPDNSRecordExists("powerdns_record.test-naptr"), - ), - }, - }, - }) -} - -func TestAccPDNSRecord_NS(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPDNSRecordDestroy, - Steps: []resource.TestStep{ - { - Config: testPDNSRecordConfigNS, - Check: resource.ComposeTestCheckFunc( - testAccCheckPDNSRecordExists("powerdns_record.test-ns"), - ), - }, - }, - }) -} - -func TestAccPDNSRecord_SPF(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPDNSRecordDestroy, - Steps: []resource.TestStep{ - { - Config: testPDNSRecordConfigSPF, - Check: resource.ComposeTestCheckFunc( - testAccCheckPDNSRecordExists("powerdns_record.test-spf"), - ), - }, - }, - }) -} - -func TestAccPDNSRecord_SSHFP(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPDNSRecordDestroy, - Steps: []resource.TestStep{ - { - Config: testPDNSRecordConfigSSHFP, - Check: resource.ComposeTestCheckFunc( - testAccCheckPDNSRecordExists("powerdns_record.test-sshfp"), - ), - }, - }, - }) -} - -func TestAccPDNSRecord_SRV(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPDNSRecordDestroy, - Steps: []resource.TestStep{ - { - Config: testPDNSRecordConfigSRV, - Check: resource.ComposeTestCheckFunc( - testAccCheckPDNSRecordExists("powerdns_record.test-srv"), - ), - }, - }, - }) -} - -func TestAccPDNSRecord_TXT(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPDNSRecordDestroy, - Steps: []resource.TestStep{ - { - Config: testPDNSRecordConfigTXT, - Check: resource.ComposeTestCheckFunc( - testAccCheckPDNSRecordExists("powerdns_record.test-txt"), - ), - }, - }, - }) -} - -func testAccCheckPDNSRecordDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "powerdns_record" { - continue - } - - client := testAccProvider.Meta().(*Client) - exists, err := client.RecordExistsByID(rs.Primary.Attributes["zone"], rs.Primary.ID) - if err != nil { - return fmt.Errorf("Error checking if record still exists: %#v", rs.Primary.ID) - } - if exists { - return fmt.Errorf("Record still exists: %#v", rs.Primary.ID) - } - - } - return nil -} - -func testAccCheckPDNSRecordExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - client := testAccProvider.Meta().(*Client) - foundRecords, err := client.ListRecordsByID(rs.Primary.Attributes["zone"], rs.Primary.ID) - if err != nil { - return err - } - if len(foundRecords) == 0 { - return fmt.Errorf("Record does not exist") - } - for _, rec := range foundRecords { - if rec.Id() == rs.Primary.ID { - return nil - } - } - return fmt.Errorf("Record does not exist: %#v", rs.Primary.ID) - } -} - -const testPDNSRecordConfigA = ` -resource "powerdns_record" "test-a" { - zone = "sysa.xyz" - name = "redis.sysa.xyz" - type = "A" - ttl = 60 - records = [ "1.1.1.1", "2.2.2.2" ] -}` - -const testPDNSRecordConfigHyphenedWithCount = ` -resource "powerdns_record" "test-counted" { - count = "2" - zone = "sysa.xyz" - name = "redis-${count.index}.sysa.xyz" - type = "A" - ttl = 60 - records = [ "1.1.1.${count.index}" ] -}` - -const testPDNSRecordConfigAAAA = ` -resource "powerdns_record" "test-aaaa" { - zone = "sysa.xyz" - name = "redis.sysa.xyz" - type = "AAAA" - ttl = 60 - records = [ "2001:DB8:2000:bf0::1", "2001:DB8:2000:bf1::1" ] -}` - -const testPDNSRecordConfigCNAME = ` -resource "powerdns_record" "test-cname" { - zone = "sysa.xyz" - name = "redis.sysa.xyz" - type = "CNAME" - ttl = 60 - records = [ "redis.example.com" ] -}` - -const testPDNSRecordConfigHINFO = ` -resource "powerdns_record" "test-hinfo" { - zone = "sysa.xyz" - name = "redis.sysa.xyz" - type = "HINFO" - ttl = 60 - records = [ "\"PC-Intel-2.4ghz\" \"Linux\"" ] -}` - -const testPDNSRecordConfigLOC = ` -resource "powerdns_record" "test-loc" { - zone = "sysa.xyz" - name = "redis.sysa.xyz" - type = "LOC" - ttl = 60 - records = [ "51 56 0.123 N 5 54 0.000 E 4.00m 1.00m 10000.00m 10.00m" ] -}` - -const testPDNSRecordConfigMX = ` -resource "powerdns_record" "test-mx" { - zone = "sysa.xyz" - name = "sysa.xyz" - type = "MX" - ttl = 60 - records = [ "10 mail.example.com" ] -}` - -const testPDNSRecordConfigMXMulti = ` -resource "powerdns_record" "test-mx-multi" { - zone = "sysa.xyz" - name = "sysa.xyz" - type = "MX" - ttl = 60 - records = [ "10 mail1.example.com", "20 mail2.example.com" ] -}` - -const testPDNSRecordConfigNAPTR = ` -resource "powerdns_record" "test-naptr" { - zone = "sysa.xyz" - name = "sysa.xyz" - type = "NAPTR" - ttl = 60 - records = [ "100 50 \"s\" \"z3950+I2L+I2C\" \"\" _z3950._tcp.gatech.edu'." ] -}` - -const testPDNSRecordConfigNS = ` -resource "powerdns_record" "test-ns" { - zone = "sysa.xyz" - name = "lab.sysa.xyz" - type = "NS" - ttl = 60 - records = [ "ns1.sysa.xyz", "ns2.sysa.xyz" ] -}` - -const testPDNSRecordConfigSPF = ` -resource "powerdns_record" "test-spf" { - zone = "sysa.xyz" - name = "sysa.xyz" - type = "SPF" - ttl = 60 - records = [ "\"v=spf1 +all\"" ] -}` - -const testPDNSRecordConfigSSHFP = ` -resource "powerdns_record" "test-sshfp" { - zone = "sysa.xyz" - name = "ssh.sysa.xyz" - type = "SSHFP" - ttl = 60 - records = [ "1 1 123456789abcdef67890123456789abcdef67890" ] -}` - -const testPDNSRecordConfigSRV = ` -resource "powerdns_record" "test-srv" { - zone = "sysa.xyz" - name = "_redis._tcp.sysa.xyz" - type = "SRV" - ttl = 60 - records = [ "0 10 6379 redis1.sysa.xyz", "0 10 6379 redis2.sysa.xyz", "10 10 6379 redis-replica.sysa.xyz" ] -}` - -const testPDNSRecordConfigTXT = ` -resource "powerdns_record" "test-txt" { - zone = "sysa.xyz" - name = "text.sysa.xyz" - type = "TXT" - ttl = 60 - records = [ "\"text record payload\"" ] -}` diff --git a/builtin/providers/profitbricks/config.go b/builtin/providers/profitbricks/config.go deleted file mode 100644 index 259616d5d..000000000 --- a/builtin/providers/profitbricks/config.go +++ /dev/null @@ -1,22 +0,0 @@ -package profitbricks - -import ( - "github.com/profitbricks/profitbricks-sdk-go" -) - -type Config struct { - Username string - Password string - Endpoint string - Retries int -} - -// Client() returns a new client for accessing ProfitBricks. -func (c *Config) Client() (*Config, error) { - profitbricks.SetAuth(c.Username, c.Password) - profitbricks.SetDepth("5") - if len(c.Endpoint) > 0 { - profitbricks.SetEndpoint(c.Endpoint) - } - return c, nil -} diff --git a/builtin/providers/profitbricks/data_source_datacenter.go b/builtin/providers/profitbricks/data_source_datacenter.go deleted file mode 100644 index 4e3cf00d4..000000000 --- a/builtin/providers/profitbricks/data_source_datacenter.go +++ /dev/null @@ -1,69 +0,0 @@ -package profitbricks - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/profitbricks/profitbricks-sdk-go" - "log" - "strings" -) - -func dataSourceDataCenter() *schema.Resource { - return &schema.Resource{ - Read: dataSourceDataCenterRead, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "location": { - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -func dataSourceDataCenterRead(d *schema.ResourceData, meta interface{}) error { - datacenters := profitbricks.ListDatacenters() - - if datacenters.StatusCode > 299 { - return fmt.Errorf("An error occured while fetching datacenters %s", datacenters.Response) - } - - name := d.Get("name").(string) - location, locationOk := d.GetOk("location") - - results := []profitbricks.Datacenter{} - - for _, dc := range datacenters.Items { - if dc.Properties.Name == name || strings.Contains(dc.Properties.Name, name) { - results = append(results, dc) - } - } - - if locationOk { - log.Printf("[INFO] searching dcs by location***********") - locationResults := []profitbricks.Datacenter{} - for _, dc := range results { - if dc.Properties.Location == location.(string) { - locationResults = append(locationResults, dc) - } - } - results = locationResults - } - log.Printf("[INFO] Results length %d *************", len(results)) - - if len(results) > 1 { - log.Printf("[INFO] Results length greater than 1") - return fmt.Errorf("There is more than one datacenters that match the search criteria") - } - - if len(results) == 0 { - return fmt.Errorf("There are no datacenters that match the search criteria") - } - - d.SetId(results[0].Id) - - return nil -} diff --git a/builtin/providers/profitbricks/data_source_datacenter_test.go b/builtin/providers/profitbricks/data_source_datacenter_test.go deleted file mode 100644 index 7295313cc..000000000 --- a/builtin/providers/profitbricks/data_source_datacenter_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package profitbricks - -import ( - "github.com/hashicorp/terraform/helper/resource" - "testing" -) - -func TestAccDataSourceDatacenter_matching(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - - Config: testAccDataSourceProfitBricksDataCenter_matching, - }, - { - - Config: testAccDataSourceProfitBricksDataCenter_matchingWithDataSource, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.profitbricks_datacenter.foobar", "name", "test_name"), - resource.TestCheckResourceAttr("data.profitbricks_datacenter.foobar", "location", "us/las"), - ), - }, - }, - }) - -} - -const testAccDataSourceProfitBricksDataCenter_matching = ` -resource "profitbricks_datacenter" "foobar" { - name = "test_name" - location = "us/las" -} -` - -const testAccDataSourceProfitBricksDataCenter_matchingWithDataSource = ` -resource "profitbricks_datacenter" "foobar" { - name = "test_name" - location = "us/las" -} - -data "profitbricks_datacenter" "foobar" { - name = "${profitbricks_datacenter.foobar.name}" - location = "us/las" -}` diff --git a/builtin/providers/profitbricks/data_source_image.go b/builtin/providers/profitbricks/data_source_image.go deleted file mode 100644 index 61d542b5b..000000000 --- a/builtin/providers/profitbricks/data_source_image.go +++ /dev/null @@ -1,102 +0,0 @@ -package profitbricks - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/profitbricks/profitbricks-sdk-go" - "strings" -) - -func dataSourceImage() *schema.Resource { - return &schema.Resource{ - Read: dataSourceImageRead, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - }, - "type": { - Type: schema.TypeString, - Optional: true, - }, - "location": { - Type: schema.TypeString, - Optional: true, - }, - "version": { - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -func dataSourceImageRead(d *schema.ResourceData, meta interface{}) error { - profitbricks.SetDepth("5") - - images := profitbricks.ListImages() - - if images.StatusCode > 299 { - return fmt.Errorf("An error occured while fetching ProfitBricks locations %s", images.Response) - } - - name := d.Get("name").(string) - imageType, imageTypeOk := d.GetOk("type") - location, locationOk := d.GetOk("location") - version, versionOk := d.GetOk("version") - - results := []profitbricks.Image{} - - // if version value is present then concatenate name - version - // otherwise search by name or part of the name - if versionOk { - name_ver := fmt.Sprintf("%s-%s", name, version.(string)) - for _, img := range images.Items { - if strings.Contains(strings.ToLower(img.Properties.Name), strings.ToLower(name_ver)) { - results = append(results, img) - } - } - } else { - for _, img := range images.Items { - if strings.Contains(strings.ToLower(img.Properties.Name), strings.ToLower(name)) { - results = append(results, img) - } - } - } - - if imageTypeOk { - imageTypeResults := []profitbricks.Image{} - for _, img := range results { - if img.Properties.ImageType == imageType.(string) { - imageTypeResults = append(imageTypeResults, img) - } - - } - results = imageTypeResults - } - - if locationOk { - locationResults := []profitbricks.Image{} - for _, img := range results { - if img.Properties.Location == location.(string) { - locationResults = append(locationResults, img) - } - - } - results = locationResults - } - - if len(results) > 1 { - return fmt.Errorf("There is more than one image that match the search criteria") - } - - if len(results) == 0 { - return fmt.Errorf("There are no images that match the search criteria") - } - - d.Set("name", results[0].Properties.Name) - - d.SetId(results[0].Id) - - return nil -} diff --git a/builtin/providers/profitbricks/data_source_image_test.go b/builtin/providers/profitbricks/data_source_image_test.go deleted file mode 100644 index 3f8f151a4..000000000 --- a/builtin/providers/profitbricks/data_source_image_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package profitbricks - -import ( - "github.com/hashicorp/terraform/helper/resource" - "testing" -) - -func TestAccDataSourceImage_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - - Config: testAccDataSourceProfitBricksImage_basic, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.profitbricks_image.img", "location", "us/las"), - resource.TestCheckResourceAttr("data.profitbricks_image.img", "name", "Ubuntu-16.04-LTS-server-2017-05-01"), - resource.TestCheckResourceAttr("data.profitbricks_image.img", "type", "HDD"), - ), - }, - }, - }) - -} - -const testAccDataSourceProfitBricksImage_basic = ` - data "profitbricks_image" "img" { - name = "Ubuntu" - type = "HDD" - version = "16" - location = "us/las" - } - ` diff --git a/builtin/providers/profitbricks/data_source_location.go b/builtin/providers/profitbricks/data_source_location.go deleted file mode 100644 index f55d60872..000000000 --- a/builtin/providers/profitbricks/data_source_location.go +++ /dev/null @@ -1,73 +0,0 @@ -package profitbricks - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/profitbricks/profitbricks-sdk-go" - "log" - "strings" -) - -func dataSourceLocation() *schema.Resource { - return &schema.Resource{ - Read: dataSourceLocationRead, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - }, - "feature": { - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -func dataSourceLocationRead(d *schema.ResourceData, meta interface{}) error { - locations := profitbricks.ListLocations() - - if locations.StatusCode > 299 { - return fmt.Errorf("An error occured while fetching ProfitBricks locations %s", locations.Response) - } - - name, nameOk := d.GetOk("name") - feature, featureOk := d.GetOk("features") - - if !nameOk && !featureOk { - return fmt.Errorf("Either 'name' or 'feature' must be provided.") - } - results := []profitbricks.Location{} - - for _, loc := range locations.Items { - if loc.Properties.Name == name.(string) || strings.Contains(loc.Properties.Name, name.(string)) { - results = append(results, loc) - } - } - - if featureOk { - locationResults := []profitbricks.Location{} - for _, loc := range results { - for _, f := range loc.Properties.Features { - if f == feature.(string) { - locationResults = append(locationResults, loc) - } - } - } - results = locationResults - } - log.Printf("[INFO] Results length %d *************", len(results)) - - if len(results) > 1 { - log.Printf("[INFO] Results length greater than 1") - return fmt.Errorf("There is more than one location that match the search criteria") - } - - if len(results) == 0 { - return fmt.Errorf("There are no locations that match the search criteria") - } - - d.SetId(results[0].Id) - - return nil -} diff --git a/builtin/providers/profitbricks/data_source_location_test.go b/builtin/providers/profitbricks/data_source_location_test.go deleted file mode 100644 index f1411f354..000000000 --- a/builtin/providers/profitbricks/data_source_location_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package profitbricks - -import ( - "github.com/hashicorp/terraform/helper/resource" - "testing" -) - -func TestAccDataSourceLocation_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - - Config: testAccDataSourceProfitBricksLocation_basic, - Check: resource.ComposeTestCheckFunc(resource.TestCheckResourceAttr("data.profitbricks_location.loc", "id", "de/fkb"), - resource.TestCheckResourceAttr("data.profitbricks_location.loc", "name", "karlsruhe"), - ), - }, - }, - }) - -} - -const testAccDataSourceProfitBricksLocation_basic = ` - data "profitbricks_location" "loc" { - name = "karlsruhe" - feature = "SSD" - } - ` diff --git a/builtin/providers/profitbricks/provider.go b/builtin/providers/profitbricks/provider.go deleted file mode 100644 index 5db06b91e..000000000 --- a/builtin/providers/profitbricks/provider.go +++ /dev/null @@ -1,76 +0,0 @@ -package profitbricks - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - "github.com/profitbricks/profitbricks-sdk-go" -) - -// Provider returns a schema.Provider for ProfitBricks. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "username": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("PROFITBRICKS_USERNAME", nil), - Description: "ProfitBricks username for API operations.", - }, - "password": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("PROFITBRICKS_PASSWORD", nil), - Description: "ProfitBricks password for API operations.", - }, - "endpoint": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("PROFITBRICKS_API_URL", profitbricks.Endpoint), - Description: "ProfitBricks REST API URL.", - }, - "retries": { - Type: schema.TypeInt, - Optional: true, - Default: 50, - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "profitbricks_datacenter": resourceProfitBricksDatacenter(), - "profitbricks_ipblock": resourceProfitBricksIPBlock(), - "profitbricks_firewall": resourceProfitBricksFirewall(), - "profitbricks_lan": resourceProfitBricksLan(), - "profitbricks_loadbalancer": resourceProfitBricksLoadbalancer(), - "profitbricks_nic": resourceProfitBricksNic(), - "profitbricks_server": resourceProfitBricksServer(), - "profitbricks_volume": resourceProfitBricksVolume(), - }, - DataSourcesMap: map[string]*schema.Resource{ - "profitbricks_datacenter": dataSourceDataCenter(), - "profitbricks_location": dataSourceLocation(), - "profitbricks_image": dataSourceImage(), - }, - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - - if _, ok := d.GetOk("username"); !ok { - return nil, fmt.Errorf("ProfitBricks username has not been provided.") - } - - if _, ok := d.GetOk("password"); !ok { - return nil, fmt.Errorf("ProfitBricks password has not been provided.") - } - - config := Config{ - Username: d.Get("username").(string), - Password: d.Get("password").(string), - Endpoint: d.Get("endpoint").(string), - Retries: d.Get("retries").(int), - } - - return config.Client() -} diff --git a/builtin/providers/profitbricks/provider_test.go b/builtin/providers/profitbricks/provider_test.go deleted file mode 100644 index 93b59387c..000000000 --- a/builtin/providers/profitbricks/provider_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package profitbricks - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "profitbricks": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("PROFITBRICKS_USERNAME"); v == "" { - t.Fatal("PROFITBRICKS_USERNAME must be set for acceptance tests") - } - - if v := os.Getenv("PROFITBRICKS_PASSWORD"); v == "" { - t.Fatal("PROFITBRICKS_PASSWORD must be set for acceptance tests") - } -} diff --git a/builtin/providers/profitbricks/resource_profitbricks_datacenter.go b/builtin/providers/profitbricks/resource_profitbricks_datacenter.go deleted file mode 100644 index fee3d03cf..000000000 --- a/builtin/providers/profitbricks/resource_profitbricks_datacenter.go +++ /dev/null @@ -1,184 +0,0 @@ -package profitbricks - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/profitbricks/profitbricks-sdk-go" - "log" - "regexp" - "runtime" - "strings" - "time" -) - -func resourceProfitBricksDatacenter() *schema.Resource { - return &schema.Resource{ - Create: resourceProfitBricksDatacenterCreate, - Read: resourceProfitBricksDatacenterRead, - Update: resourceProfitBricksDatacenterUpdate, - Delete: resourceProfitBricksDatacenterDelete, - Schema: map[string]*schema.Schema{ - - //Datacenter parameters - "name": { - Type: schema.TypeString, - Required: true, - }, - - "location": { - Type: schema.TypeString, - Required: true, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - } -} - -func resourceProfitBricksDatacenterCreate(d *schema.ResourceData, meta interface{}) error { - datacenter := profitbricks.Datacenter{ - Properties: profitbricks.DatacenterProperties{ - Name: d.Get("name").(string), - Location: d.Get("location").(string), - }, - } - - if attr, ok := d.GetOk("description"); ok { - datacenter.Properties.Description = attr.(string) - } - dc := profitbricks.CreateDatacenter(datacenter) - - if dc.StatusCode > 299 { - return fmt.Errorf( - "Error creating data center (%s) (%s)", d.Id(), dc.Response) - } - d.SetId(dc.Id) - - log.Printf("[INFO] DataCenter Id: %s", d.Id()) - - err := waitTillProvisioned(meta, dc.Headers.Get("Location")) - if err != nil { - return err - } - return resourceProfitBricksDatacenterRead(d, meta) -} - -func resourceProfitBricksDatacenterRead(d *schema.ResourceData, meta interface{}) error { - datacenter := profitbricks.GetDatacenter(d.Id()) - if datacenter.StatusCode > 299 { - if datacenter.StatusCode == 404 { - d.SetId("") - return nil - } - return fmt.Errorf("Error while fetching a data center ID %s %s", d.Id(), datacenter.Response) - } - - d.Set("name", datacenter.Properties.Name) - d.Set("location", datacenter.Properties.Location) - d.Set("description", datacenter.Properties.Description) - return nil -} - -func resourceProfitBricksDatacenterUpdate(d *schema.ResourceData, meta interface{}) error { - obj := profitbricks.DatacenterProperties{} - - if d.HasChange("name") { - _, newName := d.GetChange("name") - - obj.Name = newName.(string) - } - - if d.HasChange("description") { - _, newDescription := d.GetChange("description") - obj.Description = newDescription.(string) - } - - resp := profitbricks.PatchDatacenter(d.Id(), obj) - waitTillProvisioned(meta, resp.Headers.Get("Location")) - return resourceProfitBricksDatacenterRead(d, meta) -} - -func resourceProfitBricksDatacenterDelete(d *schema.ResourceData, meta interface{}) error { - dcid := d.Id() - resp := profitbricks.DeleteDatacenter(dcid) - - if resp.StatusCode > 299 { - return fmt.Errorf("An error occured while deleting the data center ID %s %s", d.Id(), string(resp.Body)) - } - err := waitTillProvisioned(meta, resp.Headers.Get("Location")) - if err != nil { - return err - } - d.SetId("") - return nil -} - -func waitTillProvisioned(meta interface{}, path string) error { - config := meta.(*Config) - waitCount := 50 - - if config.Retries != 0 { - waitCount = config.Retries - } - for i := 0; i < waitCount; i++ { - request := profitbricks.GetRequestStatus(path) - pc, _, _, ok := runtime.Caller(1) - details := runtime.FuncForPC(pc) - if ok && details != nil { - log.Printf("[DEBUG] Called from %s", details.Name()) - } - log.Printf("[DEBUG] Request status: %s", request.Metadata.Status) - log.Printf("[DEBUG] Request status path: %s", path) - - if request.Metadata.Status == "DONE" { - return nil - } - if request.Metadata.Status == "FAILED" { - - return fmt.Errorf("Request failed with following error: %s", request.Metadata.Message) - } - time.Sleep(10 * time.Second) - i++ - } - return fmt.Errorf("Timeout has expired") -} - -func getImageId(dcId string, imageName string, imageType string) string { - if imageName == "" { - return "" - } - dc := profitbricks.GetDatacenter(dcId) - if dc.StatusCode > 299 { - log.Print(fmt.Errorf("Error while fetching a data center ID %s %s", dcId, dc.Response)) - } - - images := profitbricks.ListImages() - if images.StatusCode > 299 { - log.Print(fmt.Errorf("Error while fetching the list of images %s", images.Response)) - } - - if len(images.Items) > 0 { - for _, i := range images.Items { - imgName := "" - if i.Properties.Name != "" { - imgName = i.Properties.Name - } - - if imageType == "SSD" { - imageType = "HDD" - } - if imgName != "" && strings.Contains(strings.ToLower(imgName), strings.ToLower(imageName)) && i.Properties.ImageType == imageType && i.Properties.Location == dc.Properties.Location && i.Properties.Public == true { - return i.Id - } - } - } - return "" -} - -func IsValidUUID(uuid string) bool { - r := regexp.MustCompile("^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$") - return r.MatchString(uuid) -} diff --git a/builtin/providers/profitbricks/resource_profitbricks_datacenter_test.go b/builtin/providers/profitbricks/resource_profitbricks_datacenter_test.go deleted file mode 100644 index 9d351b1ab..000000000 --- a/builtin/providers/profitbricks/resource_profitbricks_datacenter_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package profitbricks - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/profitbricks/profitbricks-sdk-go" -) - -func TestAccProfitBricksDataCenter_Basic(t *testing.T) { - var datacenter profitbricks.Datacenter - dc_name := "datacenter-test" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDProfitBricksDatacenterDestroyCheck, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckProfitBricksDatacenterConfig_basic, dc_name), - Check: resource.ComposeTestCheckFunc( - testAccCheckProfitBricksDatacenterExists("profitbricks_datacenter.foobar", &datacenter), - testAccCheckProfitBricksDatacenterAttributes("profitbricks_datacenter.foobar", dc_name), - resource.TestCheckResourceAttr("profitbricks_datacenter.foobar", "name", dc_name), - ), - }, - resource.TestStep{ - Config: testAccCheckProfitBricksDatacenterConfig_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckProfitBricksDatacenterExists("profitbricks_datacenter.foobar", &datacenter), - testAccCheckProfitBricksDatacenterAttributes("profitbricks_datacenter.foobar", "updated"), - resource.TestCheckResourceAttr("profitbricks_datacenter.foobar", "name", "updated"), - ), - }, - }, - }) -} - -func testAccCheckDProfitBricksDatacenterDestroyCheck(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "profitbricks_datacenter" { - continue - } - - resp := profitbricks.GetDatacenter(rs.Primary.ID) - - if resp.StatusCode < 299 { - return fmt.Errorf("DataCenter still exists %s %s", rs.Primary.ID, resp.Response) - } - } - - return nil -} - -func testAccCheckProfitBricksDatacenterAttributes(n string, name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.Attributes["name"] != name { - return fmt.Errorf("Bad name: expected %s : found %s ", name, rs.Primary.Attributes["name"]) - } - - return nil - } -} - -func testAccCheckProfitBricksDatacenterExists(n string, datacenter *profitbricks.Datacenter) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - foundDC := profitbricks.GetDatacenter(rs.Primary.ID) - - if foundDC.StatusCode != 200 { - return fmt.Errorf("Error occured while fetching DC: %s", rs.Primary.ID) - } - if foundDC.Id != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - datacenter = &foundDC - - return nil - } -} - -const testAccCheckProfitBricksDatacenterConfig_basic = ` -resource "profitbricks_datacenter" "foobar" { - name = "%s" - location = "us/las" -}` - -const testAccCheckProfitBricksDatacenterConfig_update = ` -resource "profitbricks_datacenter" "foobar" { - name = "updated" - location = "us/las" -}` diff --git a/builtin/providers/profitbricks/resource_profitbricks_firewall.go b/builtin/providers/profitbricks/resource_profitbricks_firewall.go deleted file mode 100644 index 12fb68c0c..000000000 --- a/builtin/providers/profitbricks/resource_profitbricks_firewall.go +++ /dev/null @@ -1,226 +0,0 @@ -package profitbricks - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/profitbricks/profitbricks-sdk-go" -) - -func resourceProfitBricksFirewall() *schema.Resource { - return &schema.Resource{ - Create: resourceProfitBricksFirewallCreate, - Read: resourceProfitBricksFirewallRead, - Update: resourceProfitBricksFirewallUpdate, - Delete: resourceProfitBricksFirewallDelete, - Schema: map[string]*schema.Schema{ - - "name": { - Type: schema.TypeString, - Optional: true, - }, - - "protocol": { - Type: schema.TypeString, - Required: true, - }, - "source_mac": { - Type: schema.TypeString, - Optional: true, - }, - "source_ip": { - Type: schema.TypeString, - Optional: true, - }, - "target_ip": { - Type: schema.TypeString, - Optional: true, - }, - "port_range_start": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - if v.(int) < 1 && v.(int) > 65534 { - errors = append(errors, fmt.Errorf("Port start range must be between 1 and 65534")) - } - return - }, - }, - - "port_range_end": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - if v.(int) < 1 && v.(int) > 65534 { - errors = append(errors, fmt.Errorf("Port end range must be between 1 and 65534")) - } - return - }, - }, - "icmp_type": { - Type: schema.TypeString, - Optional: true, - }, - "icmp_code": { - Type: schema.TypeString, - Optional: true, - }, - "datacenter_id": { - Type: schema.TypeString, - Required: true, - }, - "server_id": { - Type: schema.TypeString, - Required: true, - }, - "nic_id": { - Type: schema.TypeString, - Required: true, - }, - }, - } -} - -func resourceProfitBricksFirewallCreate(d *schema.ResourceData, meta interface{}) error { - fw := profitbricks.FirewallRule{ - Properties: profitbricks.FirewallruleProperties{ - Protocol: d.Get("protocol").(string), - }, - } - - if _, ok := d.GetOk("name"); ok { - fw.Properties.Name = d.Get("name").(string) - } - if _, ok := d.GetOk("source_mac"); ok { - fw.Properties.SourceMac = d.Get("source_mac").(string) - } - if _, ok := d.GetOk("source_ip"); ok { - fw.Properties.SourceIp = d.Get("source_ip").(string) - } - if _, ok := d.GetOk("target_ip"); ok { - fw.Properties.TargetIp = d.Get("target_ip").(string) - } - if _, ok := d.GetOk("port_range_start"); ok { - fw.Properties.PortRangeStart = d.Get("port_range_start").(int) - } - if _, ok := d.GetOk("port_range_end"); ok { - fw.Properties.PortRangeEnd = d.Get("port_range_end").(int) - } - if _, ok := d.GetOk("icmp_type"); ok { - fw.Properties.IcmpType = d.Get("icmp_type").(string) - } - if _, ok := d.GetOk("icmp_code"); ok { - fw.Properties.IcmpCode = d.Get("icmp_code").(string) - } - - fw = profitbricks.CreateFirewallRule(d.Get("datacenter_id").(string), d.Get("server_id").(string), d.Get("nic_id").(string), fw) - - if fw.StatusCode > 299 { - return fmt.Errorf("An error occured while creating a firewall rule: %s", fw.Response) - } - - err := waitTillProvisioned(meta, fw.Headers.Get("Location")) - if err != nil { - return err - } - d.SetId(fw.Id) - - return resourceProfitBricksFirewallRead(d, meta) -} - -func resourceProfitBricksFirewallRead(d *schema.ResourceData, meta interface{}) error { - fw := profitbricks.GetFirewallRule(d.Get("datacenter_id").(string), d.Get("server_id").(string), d.Get("nic_id").(string), d.Id()) - - if fw.StatusCode > 299 { - if fw.StatusCode == 404 { - d.SetId("") - return nil - } - return fmt.Errorf("An error occured while fetching a firewall rule dcId: %s server_id: %s nic_id: %s ID: %s %s", d.Get("datacenter_id").(string), d.Get("server_id").(string), d.Get("nic_id").(string), d.Id(), fw.Response) - } - - d.Set("protocol", fw.Properties.Protocol) - d.Set("name", fw.Properties.Name) - d.Set("source_mac", fw.Properties.SourceMac) - d.Set("source_ip", fw.Properties.SourceIp) - d.Set("target_ip", fw.Properties.TargetIp) - d.Set("port_range_start", fw.Properties.PortRangeStart) - d.Set("port_range_end", fw.Properties.PortRangeEnd) - d.Set("icmp_type", fw.Properties.IcmpType) - d.Set("icmp_code", fw.Properties.IcmpCode) - d.Set("nic_id", d.Get("nic_id").(string)) - - return nil -} - -func resourceProfitBricksFirewallUpdate(d *schema.ResourceData, meta interface{}) error { - properties := profitbricks.FirewallruleProperties{} - - if d.HasChange("name") { - _, new := d.GetChange("name") - - properties.Name = new.(string) - } - if d.HasChange("source_mac") { - _, new := d.GetChange("source_mac") - - properties.SourceMac = new.(string) - } - if d.HasChange("source_ip") { - _, new := d.GetChange("source_ip") - - properties.SourceIp = new.(string) - } - if d.HasChange("target_ip") { - _, new := d.GetChange("target_ip") - - properties.TargetIp = new.(string) - } - if d.HasChange("port_range_start") { - _, new := d.GetChange("port_range_start") - - properties.PortRangeStart = new.(int) - } - if d.HasChange("port_range_end") { - _, new := d.GetChange("port_range_end") - - properties.PortRangeEnd = new.(int) - } - if d.HasChange("icmp_type") { - _, new := d.GetChange("icmp_type") - - properties.IcmpType = new.(int) - } - if d.HasChange("icmp_code") { - _, new := d.GetChange("icmp_code") - - properties.IcmpCode = new.(int) - } - - resp := profitbricks.PatchFirewallRule(d.Get("datacenter_id").(string), d.Get("server_id").(string), d.Get("nic_id").(string), d.Id(), properties) - - if resp.StatusCode > 299 { - return fmt.Errorf("An error occured while deleting a firewall rule ID %s %s", d.Id(), resp.Response) - } - - err := waitTillProvisioned(meta, resp.Headers.Get("Location")) - if err != nil { - return err - } - return resourceProfitBricksFirewallRead(d, meta) -} - -func resourceProfitBricksFirewallDelete(d *schema.ResourceData, meta interface{}) error { - resp := profitbricks.DeleteFirewallRule(d.Get("datacenter_id").(string), d.Get("server_id").(string), d.Get("nic_id").(string), d.Id()) - - if resp.StatusCode > 299 { - return fmt.Errorf("An error occured while deleting a firewall rule ID %s %s", d.Id(), string(resp.Body)) - } - - err := waitTillProvisioned(meta, resp.Headers.Get("Location")) - if err != nil { - return err - } - d.SetId("") - - return nil -} diff --git a/builtin/providers/profitbricks/resource_profitbricks_firewall_test.go b/builtin/providers/profitbricks/resource_profitbricks_firewall_test.go deleted file mode 100644 index 757680bab..000000000 --- a/builtin/providers/profitbricks/resource_profitbricks_firewall_test.go +++ /dev/null @@ -1,201 +0,0 @@ -package profitbricks - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/profitbricks/profitbricks-sdk-go" -) - -func TestAccProfitBricksFirewall_Basic(t *testing.T) { - var firewall profitbricks.FirewallRule - firewallName := "firewall" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDProfitBricksFirewallDestroyCheck, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckProfitbricksFirewallConfig_basic, firewallName), - Check: resource.ComposeTestCheckFunc( - testAccCheckProfitBricksFirewallExists("profitbricks_firewall.webserver_http", &firewall), - testAccCheckProfitBricksFirewallAttributes("profitbricks_firewall.webserver_http", firewallName), - resource.TestCheckResourceAttr("profitbricks_firewall.webserver_http", "name", firewallName), - ), - }, - resource.TestStep{ - Config: testAccCheckProfitbricksFirewallConfig_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckProfitBricksFirewallAttributes("profitbricks_firewall.webserver_http", "updated"), - resource.TestCheckResourceAttr("profitbricks_firewall.webserver_http", "name", "updated"), - ), - }, - }, - }) -} - -func testAccCheckDProfitBricksFirewallDestroyCheck(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "profitbricks_firewall" { - continue - } - - resp := profitbricks.GetFirewallRule(rs.Primary.Attributes["datacenter_id"], rs.Primary.Attributes["server_id"], rs.Primary.Attributes["nic_id"], rs.Primary.ID) - - if resp.StatusCode < 299 { - return fmt.Errorf("Firewall still exists %s %s", rs.Primary.ID, resp.Response) - } - } - - return nil -} - -func testAccCheckProfitBricksFirewallAttributes(n string, name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("testAccCheckProfitBricksFirewallAttributes: Not found: %s", n) - } - if rs.Primary.Attributes["name"] != name { - return fmt.Errorf("Bad name: %s", rs.Primary.Attributes["name"]) - } - - return nil - } -} - -func testAccCheckProfitBricksFirewallExists(n string, firewall *profitbricks.FirewallRule) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("testAccCheckProfitBricksFirewallExists: Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - foundServer := profitbricks.GetFirewallRule(rs.Primary.Attributes["datacenter_id"], rs.Primary.Attributes["server_id"], rs.Primary.Attributes["nic_id"], rs.Primary.ID) - - if foundServer.StatusCode != 200 { - return fmt.Errorf("Error occured while fetching Firewall rule: %s", rs.Primary.ID) - } - if foundServer.Id != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - - firewall = &foundServer - - return nil - } -} - -const testAccCheckProfitbricksFirewallConfig_basic = ` -resource "profitbricks_datacenter" "foobar" { - name = "firewall-test" - location = "us/las" -} - -resource "profitbricks_server" "webserver" { - name = "webserver" - datacenter_id = "${profitbricks_datacenter.foobar.id}" - cores = 1 - ram = 1024 - availability_zone = "ZONE_1" - cpu_family = "AMD_OPTERON" - volume { - name = "system" - size = 5 - disk_type = "SSD" - image_name ="ubuntu-16.04" - image_password = "K3tTj8G14a3EgKyNeeiY" -} - nic { - lan = "1" - dhcp = true - firewall_active = true - firewall { - protocol = "TCP" - name = "SSH" - port_range_start = 22 - port_range_end = 22 - } - } -} - -resource "profitbricks_nic" "database_nic" { - datacenter_id = "${profitbricks_datacenter.foobar.id}" - server_id = "${profitbricks_server.webserver.id}" - lan = 2 - dhcp = true - firewall_active = true - name = "updated" -} - -resource "profitbricks_firewall" "webserver_http" { - datacenter_id = "${profitbricks_datacenter.foobar.id}" - server_id = "${profitbricks_server.webserver.id}" - nic_id = "${profitbricks_nic.database_nic.id}" - protocol = "TCP" - name = "%s" - port_range_start = 80 - port_range_end = 80 -}` - -const testAccCheckProfitbricksFirewallConfig_update = ` -resource "profitbricks_datacenter" "foobar" { - name = "firewall-test" - location = "us/las" -} - -resource "profitbricks_server" "webserver" { - name = "webserver" - datacenter_id = "${profitbricks_datacenter.foobar.id}" - cores = 1 - ram = 1024 - availability_zone = "ZONE_1" - cpu_family = "AMD_OPTERON" - volume { - name = "system" - size = 5 - disk_type = "SSD" - image_name ="ubuntu-16.04" - image_password = "test1234" -} - nic { - lan = "1" - dhcp = true - firewall_active = true - firewall { - protocol = "TCP" - name = "SSH" - port_range_start = 22 - port_range_end = 22 - } - } -} - -resource "profitbricks_nic" "database_nic" { - datacenter_id = "${profitbricks_datacenter.foobar.id}" - server_id = "${profitbricks_server.webserver.id}" - lan = 2 - dhcp = true - firewall_active = true - name = "updated" -} - -resource "profitbricks_firewall" "webserver_http" { - datacenter_id = "${profitbricks_datacenter.foobar.id}" - server_id = "${profitbricks_server.webserver.id}" - nic_id = "${profitbricks_nic.database_nic.id}" - protocol = "TCP" - name = "updated" - port_range_start = 80 - port_range_end = 80 -}` diff --git a/builtin/providers/profitbricks/resource_profitbricks_ipblock.go b/builtin/providers/profitbricks/resource_profitbricks_ipblock.go deleted file mode 100644 index ff6658acc..000000000 --- a/builtin/providers/profitbricks/resource_profitbricks_ipblock.go +++ /dev/null @@ -1,91 +0,0 @@ -package profitbricks - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/profitbricks/profitbricks-sdk-go" - "log" - "strings" -) - -func resourceProfitBricksIPBlock() *schema.Resource { - return &schema.Resource{ - Create: resourceProfitBricksIPBlockCreate, - Read: resourceProfitBricksIPBlockRead, - //Update: resourceProfitBricksIPBlockUpdate, - Delete: resourceProfitBricksIPBlockDelete, - Schema: map[string]*schema.Schema{ - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "size": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "ips": { - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Computed: true, - }, - }, - } -} - -func resourceProfitBricksIPBlockCreate(d *schema.ResourceData, meta interface{}) error { - ipblock := profitbricks.IpBlock{ - Properties: profitbricks.IpBlockProperties{ - Size: d.Get("size").(int), - Location: d.Get("location").(string), - }, - } - - ipblock = profitbricks.ReserveIpBlock(ipblock) - - if ipblock.StatusCode > 299 { - return fmt.Errorf("An error occured while reserving an ip block: %s", ipblock.Response) - } - err := waitTillProvisioned(meta, ipblock.Headers.Get("Location")) - if err != nil { - return err - } - d.SetId(ipblock.Id) - - return resourceProfitBricksIPBlockRead(d, meta) -} - -func resourceProfitBricksIPBlockRead(d *schema.ResourceData, meta interface{}) error { - ipblock := profitbricks.GetIpBlock(d.Id()) - - if ipblock.StatusCode > 299 { - if ipblock.StatusCode == 404 { - d.SetId("") - return nil - } - return fmt.Errorf("An error occured while fetching an ip block ID %s %s", d.Id(), ipblock.Response) - } - - log.Printf("[INFO] IPS: %s", strings.Join(ipblock.Properties.Ips, ",")) - - d.Set("ips", ipblock.Properties.Ips) - d.Set("location", ipblock.Properties.Location) - d.Set("size", ipblock.Properties.Size) - - return nil -} - -func resourceProfitBricksIPBlockDelete(d *schema.ResourceData, meta interface{}) error { - resp := profitbricks.ReleaseIpBlock(d.Id()) - if resp.StatusCode > 299 { - return fmt.Errorf("An error occured while releasing an ipblock ID: %s %s", d.Id(), string(resp.Body)) - } - - err := waitTillProvisioned(meta, resp.Headers.Get("Location")) - if err != nil { - return err - } - d.SetId("") - return nil -} diff --git a/builtin/providers/profitbricks/resource_profitbricks_ipblock_test.go b/builtin/providers/profitbricks/resource_profitbricks_ipblock_test.go deleted file mode 100644 index a08dea904..000000000 --- a/builtin/providers/profitbricks/resource_profitbricks_ipblock_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package profitbricks - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/profitbricks/profitbricks-sdk-go" -) - -func TestAccProfitBricksIPBlock_Basic(t *testing.T) { - var ipblock profitbricks.IpBlock - location := "us/las" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDProfitBricksIPBlockDestroyCheck, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckProfitbricksIPBlockConfig_basic, location), - Check: resource.ComposeTestCheckFunc( - testAccCheckProfitBricksIPBlockExists("profitbricks_ipblock.webserver_ip", &ipblock), - testAccCheckProfitBricksIPBlockAttributes("profitbricks_ipblock.webserver_ip", location), - resource.TestCheckResourceAttr("profitbricks_ipblock.webserver_ip", "location", location), - ), - }, - }, - }) -} - -func testAccCheckDProfitBricksIPBlockDestroyCheck(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "profitbricks_ipblock" { - continue - } - - resp := profitbricks.GetIpBlock(rs.Primary.ID) - - if resp.StatusCode < 299 { - return fmt.Errorf("IPBlock still exists %s %s", rs.Primary.ID, resp.Response) - } - } - - return nil -} - -func testAccCheckProfitBricksIPBlockAttributes(n string, location string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("testAccCheckProfitBricksLanAttributes: Not found: %s", n) - } - if rs.Primary.Attributes["location"] != location { - return fmt.Errorf("Bad name: %s", rs.Primary.Attributes["location"]) - } - - return nil - } -} - -func testAccCheckProfitBricksIPBlockExists(n string, ipblock *profitbricks.IpBlock) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("testAccCheckProfitBricksIPBlockExists: Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - foundIP := profitbricks.GetIpBlock(rs.Primary.ID) - - if foundIP.StatusCode != 200 { - return fmt.Errorf("Error occured while fetching IP Block: %s", rs.Primary.ID) - } - if foundIP.Id != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - - ipblock = &foundIP - - return nil - } -} - -const testAccCheckProfitbricksIPBlockConfig_basic = ` -resource "profitbricks_ipblock" "webserver_ip" { - location = "%s" - size = 1 -}` diff --git a/builtin/providers/profitbricks/resource_profitbricks_lan.go b/builtin/providers/profitbricks/resource_profitbricks_lan.go deleted file mode 100644 index 725c25c08..000000000 --- a/builtin/providers/profitbricks/resource_profitbricks_lan.go +++ /dev/null @@ -1,124 +0,0 @@ -package profitbricks - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/profitbricks/profitbricks-sdk-go" - "log" - "time" -) - -func resourceProfitBricksLan() *schema.Resource { - return &schema.Resource{ - Create: resourceProfitBricksLanCreate, - Read: resourceProfitBricksLanRead, - Update: resourceProfitBricksLanUpdate, - Delete: resourceProfitBricksLanDelete, - Schema: map[string]*schema.Schema{ - - "public": { - Type: schema.TypeBool, - Required: true, - }, - "name": { - Type: schema.TypeString, - Optional: true, - }, - "datacenter_id": { - Type: schema.TypeString, - Required: true, - }, - }, - } -} - -func resourceProfitBricksLanCreate(d *schema.ResourceData, meta interface{}) error { - request := profitbricks.Lan{ - Properties: profitbricks.LanProperties{ - Public: d.Get("public").(bool), - }, - } - - log.Printf("[DEBUG] NAME %s", d.Get("name")) - if d.Get("name") != nil { - request.Properties.Name = d.Get("name").(string) - } - - lan := profitbricks.CreateLan(d.Get("datacenter_id").(string), request) - - log.Printf("[DEBUG] LAN ID: %s", lan.Id) - log.Printf("[DEBUG] LAN RESPONSE: %s", lan.Response) - - if lan.StatusCode > 299 { - return fmt.Errorf("An error occured while creating a lan: %s", lan.Response) - } - - err := waitTillProvisioned(meta, lan.Headers.Get("Location")) - if err != nil { - return err - } - d.SetId(lan.Id) - return resourceProfitBricksLanRead(d, meta) -} - -func resourceProfitBricksLanRead(d *schema.ResourceData, meta interface{}) error { - lan := profitbricks.GetLan(d.Get("datacenter_id").(string), d.Id()) - - if lan.StatusCode > 299 { - if lan.StatusCode == 404 { - d.SetId("") - return nil - } - return fmt.Errorf("An error occured while fetching a lan ID %s %s", d.Id(), lan.Response) - } - - d.Set("public", lan.Properties.Public) - d.Set("name", lan.Properties.Name) - d.Set("datacenter_id", d.Get("datacenter_id").(string)) - return nil -} - -func resourceProfitBricksLanUpdate(d *schema.ResourceData, meta interface{}) error { - properties := &profitbricks.LanProperties{} - if d.HasChange("public") { - _, newValue := d.GetChange("public") - properties.Public = newValue.(bool) - } - if d.HasChange("name") { - _, newValue := d.GetChange("name") - properties.Name = newValue.(string) - } - log.Printf("[DEBUG] LAN UPDATE: %s : %s", properties, d.Get("name")) - if properties != nil { - lan := profitbricks.PatchLan(d.Get("datacenter_id").(string), d.Id(), *properties) - if lan.StatusCode > 299 { - return fmt.Errorf("An error occured while patching a lan ID %s %s", d.Id(), lan.Response) - } - err := waitTillProvisioned(meta, lan.Headers.Get("Location")) - if err != nil { - return err - } - } - return resourceProfitBricksLanRead(d, meta) -} - -func resourceProfitBricksLanDelete(d *schema.ResourceData, meta interface{}) error { - resp := profitbricks.DeleteLan(d.Get("datacenter_id").(string), d.Id()) - if resp.StatusCode > 299 { - //try again in 20 seconds - time.Sleep(60 * time.Second) - resp = profitbricks.DeleteLan(d.Get("datacenter_id").(string), d.Id()) - if resp.StatusCode > 299 && resp.StatusCode != 404 { - return fmt.Errorf("An error occured while deleting a lan dcId %s ID %s %s", d.Get("datacenter_id").(string), d.Id(), string(resp.Body)) - } - } - - if resp.Headers.Get("Location") != "" { - err := waitTillProvisioned(meta, resp.Headers.Get("Location")) - if err != nil { - return err - } - } - d.SetId("") - return nil -} diff --git a/builtin/providers/profitbricks/resource_profitbricks_lan_test.go b/builtin/providers/profitbricks/resource_profitbricks_lan_test.go deleted file mode 100644 index ffaedf917..000000000 --- a/builtin/providers/profitbricks/resource_profitbricks_lan_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package profitbricks - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/profitbricks/profitbricks-sdk-go" -) - -func TestAccProfitBricksLan_Basic(t *testing.T) { - var lan profitbricks.Lan - lanName := "lanName" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDProfitBricksLanDestroyCheck, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckProfitbricksLanConfig_basic, lanName), - Check: resource.ComposeTestCheckFunc( - testAccCheckProfitBricksLanExists("profitbricks_lan.webserver_lan", &lan), - testAccCheckProfitBricksLanAttributes("profitbricks_lan.webserver_lan", lanName), - resource.TestCheckResourceAttr("profitbricks_lan.webserver_lan", "name", lanName), - ), - }, - resource.TestStep{ - Config: testAccCheckProfitbricksLanConfig_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckProfitBricksLanAttributes("profitbricks_lan.webserver_lan", "updated"), - resource.TestCheckResourceAttr("profitbricks_lan.webserver_lan", "name", "updated"), - ), - }, - }, - }) -} - -func testAccCheckDProfitBricksLanDestroyCheck(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "profitbricks_datacenter" { - continue - } - - resp := profitbricks.GetLan(rs.Primary.Attributes["datacenter_id"], rs.Primary.ID) - - if resp.StatusCode < 299 { - return fmt.Errorf("LAN still exists %s %s", rs.Primary.ID, resp.Response) - } - } - - return nil -} - -func testAccCheckProfitBricksLanAttributes(n string, name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("testAccCheckProfitBricksLanAttributes: Not found: %s", n) - } - if rs.Primary.Attributes["name"] != name { - return fmt.Errorf("Bad name: %s", rs.Primary.Attributes["name"]) - } - - return nil - } -} - -func testAccCheckProfitBricksLanExists(n string, lan *profitbricks.Lan) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("testAccCheckProfitBricksLanExists: Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - foundLan := profitbricks.GetLan(rs.Primary.Attributes["datacenter_id"], rs.Primary.ID) - - if foundLan.StatusCode != 200 { - return fmt.Errorf("Error occured while fetching Server: %s", rs.Primary.ID) - } - if foundLan.Id != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - - lan = &foundLan - - return nil - } -} - -const testAccCheckProfitbricksLanConfig_basic = ` -resource "profitbricks_datacenter" "foobar" { - name = "lan-test" - location = "us/las" -} - -resource "profitbricks_lan" "webserver_lan" { - datacenter_id = "${profitbricks_datacenter.foobar.id}" - public = true - name = "%s" -}` - -const testAccCheckProfitbricksLanConfig_update = ` -resource "profitbricks_datacenter" "foobar" { - name = "lan-test" - location = "us/las" -} -resource "profitbricks_lan" "webserver_lan" { - datacenter_id = "${profitbricks_datacenter.foobar.id}" - public = true - name = "updated" -}` diff --git a/builtin/providers/profitbricks/resource_profitbricks_loadbalancer.go b/builtin/providers/profitbricks/resource_profitbricks_loadbalancer.go deleted file mode 100644 index a7ffd98f3..000000000 --- a/builtin/providers/profitbricks/resource_profitbricks_loadbalancer.go +++ /dev/null @@ -1,147 +0,0 @@ -package profitbricks - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/profitbricks/profitbricks-sdk-go" -) - -func resourceProfitBricksLoadbalancer() *schema.Resource { - return &schema.Resource{ - Create: resourceProfitBricksLoadbalancerCreate, - Read: resourceProfitBricksLoadbalancerRead, - Update: resourceProfitBricksLoadbalancerUpdate, - Delete: resourceProfitBricksLoadbalancerDelete, - Schema: map[string]*schema.Schema{ - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "ip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "dhcp": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - "datacenter_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "nic_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - } -} - -func resourceProfitBricksLoadbalancerCreate(d *schema.ResourceData, meta interface{}) error { - lb := profitbricks.Loadbalancer{ - Properties: profitbricks.LoadbalancerProperties{ - Name: d.Get("name").(string), - }, - } - - lb = profitbricks.CreateLoadbalancer(d.Get("datacenter_id").(string), lb) - - if lb.StatusCode > 299 { - return fmt.Errorf("Error occured while creating a loadbalancer %s", lb.Response) - } - err := waitTillProvisioned(meta, lb.Headers.Get("Location")) - - if err != nil { - return err - } - - d.SetId(lb.Id) - - nic := profitbricks.AssociateNic(d.Get("datacenter_id").(string), d.Id(), d.Get("nic_id").(string)) - - if nic.StatusCode > 299 { - return fmt.Errorf("Error occured while deleting a balanced nic: %s", nic.Response) - } - err = waitTillProvisioned(meta, nic.Headers.Get("Location")) - if err != nil { - return err - } - - return resourceProfitBricksLoadbalancerRead(d, meta) -} - -func resourceProfitBricksLoadbalancerRead(d *schema.ResourceData, meta interface{}) error { - lb := profitbricks.GetLoadbalancer(d.Get("datacenter_id").(string), d.Id()) - - if lb.StatusCode > 299 { - if lb.StatusCode == 404 { - d.SetId("") - return nil - } - return fmt.Errorf("An error occured while fetching a lan ID %s %s", d.Id(), lb.Response) - } - - d.Set("name", lb.Properties.Name) - d.Set("ip", lb.Properties.Ip) - d.Set("dhcp", lb.Properties.Dhcp) - - return nil -} - -func resourceProfitBricksLoadbalancerUpdate(d *schema.ResourceData, meta interface{}) error { - properties := profitbricks.LoadbalancerProperties{} - if d.HasChange("name") { - _, new := d.GetChange("name") - properties.Name = new.(string) - } - if d.HasChange("ip") { - _, new := d.GetChange("ip") - properties.Ip = new.(string) - } - if d.HasChange("dhcp") { - _, new := d.GetChange("dhcp") - properties.Dhcp = new.(bool) - } - - if d.HasChange("nic_id") { - old, new := d.GetChange("dhcp") - - resp := profitbricks.DeleteBalancedNic(d.Get("datacenter_id").(string), d.Id(), old.(string)) - if resp.StatusCode > 299 { - return fmt.Errorf("Error occured while deleting a balanced nic: %s", string(resp.Body)) - } - err := waitTillProvisioned(meta, resp.Headers.Get("Location")) - if err != nil { - return err - } - - nic := profitbricks.AssociateNic(d.Get("datacenter_id").(string), d.Id(), new.(string)) - if nic.StatusCode > 299 { - return fmt.Errorf("Error occured while deleting a balanced nic: %s", nic.Response) - } - err = waitTillProvisioned(meta, nic.Headers.Get("Location")) - if err != nil { - return err - } - } - - return resourceProfitBricksLoadbalancerRead(d, meta) -} - -func resourceProfitBricksLoadbalancerDelete(d *schema.ResourceData, meta interface{}) error { - resp := profitbricks.DeleteLoadbalancer(d.Get("datacenter_id").(string), d.Id()) - - if resp.StatusCode > 299 { - return fmt.Errorf("Error occured while deleting a loadbalancer: %s", string(resp.Body)) - } - - err := waitTillProvisioned(meta, resp.Headers.Get("Location")) - if err != nil { - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/profitbricks/resource_profitbricks_loadbalancer_test.go b/builtin/providers/profitbricks/resource_profitbricks_loadbalancer_test.go deleted file mode 100644 index 9af5f5b91..000000000 --- a/builtin/providers/profitbricks/resource_profitbricks_loadbalancer_test.go +++ /dev/null @@ -1,199 +0,0 @@ -package profitbricks - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/profitbricks/profitbricks-sdk-go" -) - -func TestAccProfitBricksLoadbalancer_Basic(t *testing.T) { - var loadbalancer profitbricks.Loadbalancer - lbName := "loadbalancer" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDProfitBricksLoadbalancerDestroyCheck, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckProfitbricksLoadbalancerConfig_basic, lbName), - Check: resource.ComposeTestCheckFunc( - testAccCheckProfitBricksLoadbalancerExists("profitbricks_loadbalancer.example", &loadbalancer), - testAccCheckProfitBricksLoadbalancerAttributes("profitbricks_loadbalancer.example", lbName), - resource.TestCheckResourceAttr("profitbricks_loadbalancer.example", "name", lbName), - ), - }, - resource.TestStep{ - Config: testAccCheckProfitbricksLoadbalancerConfig_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckProfitBricksLoadbalancerAttributes("profitbricks_loadbalancer.example", "updated"), - resource.TestCheckResourceAttr("profitbricks_loadbalancer.example", "name", "updated"), - ), - }, - }, - }) -} - -func testAccCheckDProfitBricksLoadbalancerDestroyCheck(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "profitbricks_loadbalancer" { - continue - } - - resp := profitbricks.GetLoadbalancer(rs.Primary.Attributes["datacenter_id"], rs.Primary.ID) - - if resp.StatusCode < 299 { - resp := profitbricks.DeleteDatacenter(rs.Primary.Attributes["datacenter_id"]) - - if resp.StatusCode > 299 { - return fmt.Errorf("Firewall still exists %s %s", rs.Primary.ID, string(resp.Body)) - } - } - } - - return nil -} - -func testAccCheckProfitBricksLoadbalancerAttributes(n string, name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("testAccCheckProfitBricksLoadbalancerAttributes: Not found: %s", n) - } - if rs.Primary.Attributes["name"] != name { - return fmt.Errorf("Bad name: %s", rs.Primary.Attributes["name"]) - } - - return nil - } -} - -func testAccCheckProfitBricksLoadbalancerExists(n string, loadbalancer *profitbricks.Loadbalancer) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("testAccCheckProfitBricksLoadbalancerExists: Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - foundLB := profitbricks.GetLoadbalancer(rs.Primary.Attributes["datacenter_id"], rs.Primary.ID) - - if foundLB.StatusCode != 200 { - return fmt.Errorf("Error occured while fetching Loadbalancer: %s", rs.Primary.ID) - } - if foundLB.Id != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - - loadbalancer = &foundLB - - return nil - } -} - -const testAccCheckProfitbricksLoadbalancerConfig_basic = ` -resource "profitbricks_datacenter" "foobar" { - name = "loadbalancer-test" - location = "us/las" -} - -resource "profitbricks_server" "webserver" { - name = "webserver" - datacenter_id = "${profitbricks_datacenter.foobar.id}" - cores = 1 - ram = 1024 - availability_zone = "ZONE_1" - cpu_family = "AMD_OPTERON" - volume { - name = "system" - size = 5 - disk_type = "SSD" - image_name ="ubuntu-16.04" - image_password = "K3tTj8G14a3EgKyNeeiY" -} - nic { - lan = "1" - dhcp = true - firewall_active = true - firewall { - protocol = "TCP" - name = "SSH" - port_range_start = 22 - port_range_end = 22 - } - } -} - -resource "profitbricks_nic" "database_nic" { - datacenter_id = "${profitbricks_datacenter.foobar.id}" - server_id = "${profitbricks_server.webserver.id}" - lan = "2" - dhcp = true - firewall_active = true - name = "updated" -} - -resource "profitbricks_loadbalancer" "example" { - datacenter_id = "${profitbricks_datacenter.foobar.id}" - nic_id = "${profitbricks_nic.database_nic.id}" - name = "%s" - dhcp = true -}` - -const testAccCheckProfitbricksLoadbalancerConfig_update = ` -resource "profitbricks_datacenter" "foobar" { - name = "loadbalancer-test" - location = "us/las" -} - -resource "profitbricks_server" "webserver" { - name = "webserver" - datacenter_id = "${profitbricks_datacenter.foobar.id}" - cores = 1 - ram = 1024 - availability_zone = "ZONE_1" - cpu_family = "AMD_OPTERON" - volume { - name = "system" - size = 5 - disk_type = "SSD" - image_name ="ubuntu-16.04" - image_password = "K3tTj8G14a3EgKyNeeiY" -} - nic { - lan = "1" - dhcp = true - firewall_active = true - firewall { - protocol = "TCP" - name = "SSH" - port_range_start = 22 - port_range_end = 22 - } - } -} - -resource "profitbricks_nic" "database_nic" { - datacenter_id = "${profitbricks_datacenter.foobar.id}" - server_id = "${profitbricks_server.webserver.id}" - lan = "2" - dhcp = true - firewall_active = true - name = "updated" -} - -resource "profitbricks_loadbalancer" "example" { - datacenter_id = "${profitbricks_datacenter.foobar.id}" - nic_id = "${profitbricks_nic.database_nic.id}" - name = "updated" - dhcp = true -}` diff --git a/builtin/providers/profitbricks/resource_profitbricks_nic.go b/builtin/providers/profitbricks/resource_profitbricks_nic.go deleted file mode 100644 index 084f02a9a..000000000 --- a/builtin/providers/profitbricks/resource_profitbricks_nic.go +++ /dev/null @@ -1,174 +0,0 @@ -package profitbricks - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/profitbricks/profitbricks-sdk-go" - "log" - "strings" -) - -func resourceProfitBricksNic() *schema.Resource { - return &schema.Resource{ - Create: resourceProfitBricksNicCreate, - Read: resourceProfitBricksNicRead, - Update: resourceProfitBricksNicUpdate, - Delete: resourceProfitBricksNicDelete, - Schema: map[string]*schema.Schema{ - - "lan": { - Type: schema.TypeInt, - Required: true, - }, - "name": { - Type: schema.TypeString, - Optional: true, - }, - "dhcp": { - Type: schema.TypeBool, - Optional: true, - }, - "ip": { - Type: schema.TypeString, - Optional: true, - }, - "ips": { - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Computed: true, - }, - "firewall_active": { - Type: schema.TypeBool, - Optional: true, - }, - "nat": { - Type: schema.TypeBool, - Optional: true, - }, - "server_id": { - Type: schema.TypeString, - Required: true, - }, - "datacenter_id": { - Type: schema.TypeString, - Required: true, - }, - }, - } -} - -func resourceProfitBricksNicCreate(d *schema.ResourceData, meta interface{}) error { - nic := profitbricks.Nic{ - Properties: profitbricks.NicProperties{ - Lan: d.Get("lan").(int), - }, - } - if _, ok := d.GetOk("name"); ok { - nic.Properties.Name = d.Get("name").(string) - } - if _, ok := d.GetOk("dhcp"); ok { - nic.Properties.Dhcp = d.Get("dhcp").(bool) - } - - if _, ok := d.GetOk("ip"); ok { - raw := d.Get("ip").(string) - ips := strings.Split(raw, ",") - nic.Properties.Ips = ips - } - if _, ok := d.GetOk("firewall_active"); ok { - raw := d.Get("firewall_active").(bool) - nic.Properties.FirewallActive = raw - } - if _, ok := d.GetOk("nat"); ok { - raw := d.Get("nat").(bool) - nic.Properties.Nat = raw - } - - nic = profitbricks.CreateNic(d.Get("datacenter_id").(string), d.Get("server_id").(string), nic) - if nic.StatusCode > 299 { - return fmt.Errorf("Error occured while creating a nic: %s", nic.Response) - } - - err := waitTillProvisioned(meta, nic.Headers.Get("Location")) - if err != nil { - return err - } - resp := profitbricks.RebootServer(d.Get("datacenter_id").(string), d.Get("server_id").(string)) - if resp.StatusCode > 299 { - return fmt.Errorf("Error occured while creating a nic: %s", string(resp.Body)) - - } - err = waitTillProvisioned(meta, resp.Headers.Get("Location")) - if err != nil { - return err - } - d.SetId(nic.Id) - return resourceProfitBricksNicRead(d, meta) -} - -func resourceProfitBricksNicRead(d *schema.ResourceData, meta interface{}) error { - nic := profitbricks.GetNic(d.Get("datacenter_id").(string), d.Get("server_id").(string), d.Id()) - if nic.StatusCode > 299 { - if nic.StatusCode == 404 { - d.SetId("") - return nil - } - return fmt.Errorf("Error occured while fetching a nic ID %s %s", d.Id(), nic.Response) - } - log.Printf("[INFO] LAN ON NIC: %d", nic.Properties.Lan) - d.Set("dhcp", nic.Properties.Dhcp) - d.Set("lan", nic.Properties.Lan) - d.Set("name", nic.Properties.Name) - d.Set("ips", nic.Properties.Ips) - - return nil -} - -func resourceProfitBricksNicUpdate(d *schema.ResourceData, meta interface{}) error { - properties := profitbricks.NicProperties{} - - if d.HasChange("name") { - _, n := d.GetChange("name") - - properties.Name = n.(string) - } - if d.HasChange("lan") { - _, n := d.GetChange("lan") - properties.Lan = n.(int) - } - if d.HasChange("dhcp") { - _, n := d.GetChange("dhcp") - properties.Dhcp = n.(bool) - } - if d.HasChange("ip") { - _, raw := d.GetChange("ip") - ips := strings.Split(raw.(string), ",") - properties.Ips = ips - } - if d.HasChange("nat") { - _, raw := d.GetChange("nat") - nat := raw.(bool) - properties.Nat = nat - } - - nic := profitbricks.PatchNic(d.Get("datacenter_id").(string), d.Get("server_id").(string), d.Id(), properties) - - if nic.StatusCode > 299 { - return fmt.Errorf("Error occured while updating a nic: %s", nic.Response) - } - err := waitTillProvisioned(meta, nic.Headers.Get("Location")) - if err != nil { - return err - } - return resourceProfitBricksNicRead(d, meta) -} - -func resourceProfitBricksNicDelete(d *schema.ResourceData, meta interface{}) error { - resp := profitbricks.DeleteNic(d.Get("datacenter_id").(string), d.Get("server_id").(string), d.Id()) - err := waitTillProvisioned(meta, resp.Headers.Get("Location")) - if err != nil { - return err - } - d.SetId("") - return nil -} diff --git a/builtin/providers/profitbricks/resource_profitbricks_nic_test.go b/builtin/providers/profitbricks/resource_profitbricks_nic_test.go deleted file mode 100644 index a3fdae9ac..000000000 --- a/builtin/providers/profitbricks/resource_profitbricks_nic_test.go +++ /dev/null @@ -1,182 +0,0 @@ -package profitbricks - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/profitbricks/profitbricks-sdk-go" -) - -func TestAccProfitBricksNic_Basic(t *testing.T) { - var nic profitbricks.Nic - volumeName := "volume" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDProfitBricksNicDestroyCheck, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckProfitbricksNicConfig_basic, volumeName), - Check: resource.ComposeTestCheckFunc( - testAccCheckProfitBricksNICExists("profitbricks_nic.database_nic", &nic), - testAccCheckProfitBricksNicAttributes("profitbricks_nic.database_nic", volumeName), - resource.TestCheckResourceAttr("profitbricks_nic.database_nic", "name", volumeName), - ), - }, - resource.TestStep{ - Config: testAccCheckProfitbricksNicConfig_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckProfitBricksNicAttributes("profitbricks_nic.database_nic", "updated"), - resource.TestCheckResourceAttr("profitbricks_nic.database_nic", "name", "updated"), - ), - }, - }, - }) -} - -func testAccCheckDProfitBricksNicDestroyCheck(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "profitbricks_nic" { - continue - } - - resp := profitbricks.GetNic(rs.Primary.Attributes["datacenter_id"], rs.Primary.Attributes["nic_id"], rs.Primary.ID) - - if resp.StatusCode < 299 { - return fmt.Errorf("NIC still exists %s %s", rs.Primary.ID, resp.Response) - } - } - - return nil -} - -func testAccCheckProfitBricksNicAttributes(n string, name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("testAccCheckProfitBricksNicAttributes: Not found: %s", n) - } - if rs.Primary.Attributes["name"] != name { - return fmt.Errorf("Bad name: %s", rs.Primary.Attributes["name"]) - } - - return nil - } -} - -func testAccCheckProfitBricksNICExists(n string, nic *profitbricks.Nic) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("testAccCheckProfitBricksVolumeExists: Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - foundNic := profitbricks.GetNic(rs.Primary.Attributes["datacenter_id"], rs.Primary.Attributes["server_id"], rs.Primary.ID) - - if foundNic.StatusCode != 200 { - return fmt.Errorf("Error occured while fetching Volume: %s", rs.Primary.ID) - } - if foundNic.Id != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - - nic = &foundNic - - return nil - } -} - -const testAccCheckProfitbricksNicConfig_basic = ` -resource "profitbricks_datacenter" "foobar" { - name = "nic-test" - location = "us/las" -} - -resource "profitbricks_server" "webserver" { - name = "webserver" - datacenter_id = "${profitbricks_datacenter.foobar.id}" - cores = 1 - ram = 1024 - availability_zone = "ZONE_1" - cpu_family = "AMD_OPTERON" - volume { - name = "system" - size = 5 - disk_type = "SSD" - image_name ="ubuntu-16.04" - image_password = "K3tTj8G14a3EgKyNeeiY" -} - nic { - lan = "1" - dhcp = true - firewall_active = true - firewall { - protocol = "TCP" - name = "SSH" - port_range_start = 22 - port_range_end = 22 - } - } -} - -resource "profitbricks_nic" "database_nic" { - datacenter_id = "${profitbricks_datacenter.foobar.id}" - server_id = "${profitbricks_server.webserver.id}" - lan = 2 - dhcp = true - firewall_active = true - name = "%s" -}` - -const testAccCheckProfitbricksNicConfig_update = ` -resource "profitbricks_datacenter" "foobar" { - name = "nic-test" - location = "us/las" -} - -resource "profitbricks_server" "webserver" { - name = "webserver" - datacenter_id = "${profitbricks_datacenter.foobar.id}" - cores = 1 - ram = 1024 - availability_zone = "ZONE_1" - cpu_family = "AMD_OPTERON" - volume { - name = "system" - size = 5 - disk_type = "SSD" - image_name ="ubuntu-16.04" - image_password = "K3tTj8G14a3EgKyNeeiY" -} - nic { - lan = "1" - dhcp = true - firewall_active = true - firewall { - protocol = "TCP" - name = "SSH" - port_range_start = 22 - port_range_end = 22 - } - } -} - -resource "profitbricks_nic" "database_nic" { - datacenter_id = "${profitbricks_datacenter.foobar.id}" - server_id = "${profitbricks_server.webserver.id}" - lan = 2 - dhcp = true - firewall_active = true - name = "updated" -} -` diff --git a/builtin/providers/profitbricks/resource_profitbricks_server.go b/builtin/providers/profitbricks/resource_profitbricks_server.go deleted file mode 100644 index c617f691d..000000000 --- a/builtin/providers/profitbricks/resource_profitbricks_server.go +++ /dev/null @@ -1,661 +0,0 @@ -package profitbricks - -import ( - "encoding/json" - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/profitbricks/profitbricks-sdk-go" - "golang.org/x/crypto/ssh" - "io/ioutil" - "log" - "strings" -) - -func resourceProfitBricksServer() *schema.Resource { - return &schema.Resource{ - Create: resourceProfitBricksServerCreate, - Read: resourceProfitBricksServerRead, - Update: resourceProfitBricksServerUpdate, - Delete: resourceProfitBricksServerDelete, - Schema: map[string]*schema.Schema{ - - //Server parameters - "name": { - Type: schema.TypeString, - Required: true, - }, - "cores": { - Type: schema.TypeInt, - Required: true, - }, - "ram": { - Type: schema.TypeInt, - Required: true, - }, - "availability_zone": { - Type: schema.TypeString, - Optional: true, - }, - "licence_type": { - Type: schema.TypeString, - Optional: true, - }, - - "boot_volume": { - Type: schema.TypeString, - Computed: true, - }, - - "boot_cdrom": { - Type: schema.TypeString, - Computed: true, - }, - "cpu_family": { - Type: schema.TypeString, - Optional: true, - }, - "boot_image": { - Type: schema.TypeString, - Computed: true, - }, - "primary_nic": { - Type: schema.TypeString, - Computed: true, - }, - "primary_ip": { - Type: schema.TypeString, - Computed: true, - }, - "datacenter_id": { - Type: schema.TypeString, - Required: true, - }, - "volume": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "image_name": { - Type: schema.TypeString, - Required: true, - }, - "size": { - Type: schema.TypeInt, - Required: true, - }, - - "disk_type": { - Type: schema.TypeString, - Required: true, - }, - "image_password": { - Type: schema.TypeString, - Optional: true, - }, - "licence_type": { - Type: schema.TypeString, - Optional: true, - }, - "ssh_key_path": { - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - }, - "bus": { - Type: schema.TypeString, - Optional: true, - }, - "name": { - Type: schema.TypeString, - Optional: true, - }, - "availability_zone": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "nic": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "lan": { - Type: schema.TypeInt, - Required: true, - }, - "name": { - Type: schema.TypeString, - Optional: true, - }, - "dhcp": { - Type: schema.TypeBool, - Optional: true, - }, - - "ip": { - Type: schema.TypeString, - Optional: true, - }, - "ips": { - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Computed: true, - }, - "nat": { - Type: schema.TypeBool, - Optional: true, - }, - "firewall_active": { - Type: schema.TypeBool, - Optional: true, - }, - "firewall": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - }, - - "protocol": { - Type: schema.TypeString, - Required: true, - }, - "source_mac": { - Type: schema.TypeString, - Optional: true, - }, - "source_ip": { - Type: schema.TypeString, - Optional: true, - }, - "target_ip": { - Type: schema.TypeString, - Optional: true, - }, - "ip": { - Type: schema.TypeString, - Optional: true, - }, - "ips": { - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - }, - "port_range_start": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - if v.(int) < 1 && v.(int) > 65534 { - errors = append(errors, fmt.Errorf("Port start range must be between 1 and 65534")) - } - return - }, - }, - - "port_range_end": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - if v.(int) < 1 && v.(int) > 65534 { - errors = append(errors, fmt.Errorf("Port end range must be between 1 and 65534")) - } - return - }, - }, - "icmp_type": { - Type: schema.TypeString, - Optional: true, - }, - "icmp_code": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func resourceProfitBricksServerCreate(d *schema.ResourceData, meta interface{}) error { - request := profitbricks.Server{ - Properties: profitbricks.ServerProperties{ - Name: d.Get("name").(string), - Cores: d.Get("cores").(int), - Ram: d.Get("ram").(int), - }, - } - - if v, ok := d.GetOk("availability_zone"); ok { - request.Properties.AvailabilityZone = v.(string) - } - - if v, ok := d.GetOk("cpu_family"); ok { - if v.(string) != "" { - request.Properties.CpuFamily = v.(string) - } - } - if vRaw, ok := d.GetOk("volume"); ok { - - volumeRaw := vRaw.(*schema.Set).List() - - for _, raw := range volumeRaw { - rawMap := raw.(map[string]interface{}) - var imagePassword string - //Can be one file or a list of files - var sshkey_path []interface{} - var image, licenceType, availabilityZone string - - if rawMap["image_password"] != nil { - imagePassword = rawMap["image_password"].(string) - } - if rawMap["ssh_key_path"] != nil { - sshkey_path = rawMap["ssh_key_path"].([]interface{}) - } - - image_name := rawMap["image_name"].(string) - if !IsValidUUID(image_name) { - if imagePassword == "" && len(sshkey_path) == 0 { - return fmt.Errorf("Either 'image_password' or 'sshkey' must be provided.") - } - image = getImageId(d.Get("datacenter_id").(string), image_name, rawMap["disk_type"].(string)) - } else { - img := profitbricks.GetImage(image_name) - if img.StatusCode > 299 { - return fmt.Errorf("Error fetching image: %s", img.Response) - } - if img.Properties.Public == true { - if imagePassword == "" && len(sshkey_path) == 0 { - return fmt.Errorf("Either 'image_password' or 'sshkey' must be provided.") - } - image = image_name - } else { - image = image_name - } - } - - if rawMap["licence_type"] != nil { - licenceType = rawMap["licence_type"].(string) - } - - var publicKeys []string - if len(sshkey_path) != 0 { - for _, path := range sshkey_path { - log.Printf("[DEBUG] Reading file %s", path) - publicKey, err := readPublicKey(path.(string)) - if err != nil { - return fmt.Errorf("Error fetching sshkey from file (%s) %s", path, err.Error()) - } - publicKeys = append(publicKeys, publicKey) - } - } - if rawMap["availability_zone"] != nil { - availabilityZone = rawMap["availability_zone"].(string) - } - if image == "" && licenceType == "" { - return fmt.Errorf("Either 'image', or 'licenceType' must be set.") - } - - request.Entities = &profitbricks.ServerEntities{ - Volumes: &profitbricks.Volumes{ - Items: []profitbricks.Volume{ - { - Properties: profitbricks.VolumeProperties{ - Name: rawMap["name"].(string), - Size: rawMap["size"].(int), - Type: rawMap["disk_type"].(string), - ImagePassword: imagePassword, - Image: image, - Bus: rawMap["bus"].(string), - LicenceType: licenceType, - AvailabilityZone: availabilityZone, - }, - }, - }, - }, - } - - if len(publicKeys) == 0 { - request.Entities.Volumes.Items[0].Properties.SshKeys = nil - } else { - request.Entities.Volumes.Items[0].Properties.SshKeys = publicKeys - } - } - - } - - if nRaw, ok := d.GetOk("nic"); ok { - nicRaw := nRaw.(*schema.Set).List() - - for _, raw := range nicRaw { - rawMap := raw.(map[string]interface{}) - nic := profitbricks.Nic{Properties: profitbricks.NicProperties{}} - if rawMap["lan"] != nil { - nic.Properties.Lan = rawMap["lan"].(int) - } - if rawMap["name"] != nil { - nic.Properties.Name = rawMap["name"].(string) - } - if rawMap["dhcp"] != nil { - nic.Properties.Dhcp = rawMap["dhcp"].(bool) - } - if rawMap["firewall_active"] != nil { - nic.Properties.FirewallActive = rawMap["firewall_active"].(bool) - } - if rawMap["ip"] != nil { - rawIps := rawMap["ip"].(string) - ips := strings.Split(rawIps, ",") - if rawIps != "" { - nic.Properties.Ips = ips - } - } - if rawMap["nat"] != nil { - nic.Properties.Nat = rawMap["nat"].(bool) - } - request.Entities.Nics = &profitbricks.Nics{ - Items: []profitbricks.Nic{ - nic, - }, - } - - if rawMap["firewall"] != nil { - rawFw := rawMap["firewall"].(*schema.Set).List() - for _, rraw := range rawFw { - fwRaw := rraw.(map[string]interface{}) - log.Println("[DEBUG] fwRaw", fwRaw["protocol"]) - - firewall := profitbricks.FirewallRule{ - Properties: profitbricks.FirewallruleProperties{ - Protocol: fwRaw["protocol"].(string), - }, - } - - if fwRaw["name"] != nil { - firewall.Properties.Name = fwRaw["name"].(string) - } - if fwRaw["source_mac"] != nil { - firewall.Properties.SourceMac = fwRaw["source_mac"].(string) - } - if fwRaw["source_ip"] != nil { - firewall.Properties.SourceIp = fwRaw["source_ip"].(string) - } - if fwRaw["target_ip"] != nil { - firewall.Properties.TargetIp = fwRaw["target_ip"].(string) - } - if fwRaw["port_range_start"] != nil { - firewall.Properties.PortRangeStart = fwRaw["port_range_start"].(int) - } - if fwRaw["port_range_end"] != nil { - firewall.Properties.PortRangeEnd = fwRaw["port_range_end"].(int) - } - if fwRaw["icmp_type"] != nil { - firewall.Properties.IcmpType = fwRaw["icmp_type"].(string) - } - if fwRaw["icmp_code"] != nil { - firewall.Properties.IcmpCode = fwRaw["icmp_code"].(string) - } - - request.Entities.Nics.Items[0].Entities = &profitbricks.NicEntities{ - Firewallrules: &profitbricks.FirewallRules{ - Items: []profitbricks.FirewallRule{ - firewall, - }, - }, - } - } - - } - } - } - - if len(request.Entities.Nics.Items[0].Properties.Ips) == 0 { - request.Entities.Nics.Items[0].Properties.Ips = nil - } - server := profitbricks.CreateServer(d.Get("datacenter_id").(string), request) - - jsn, _ := json.Marshal(request) - log.Println("[DEBUG] Server request", string(jsn)) - log.Println("[DEBUG] Server response", server.Response) - - if server.StatusCode > 299 { - return fmt.Errorf( - "Error creating server: (%s)", server.Response) - } - - err := waitTillProvisioned(meta, server.Headers.Get("Location")) - if err != nil { - return err - } - d.SetId(server.Id) - server = profitbricks.GetServer(d.Get("datacenter_id").(string), server.Id) - - d.Set("primary_nic", server.Entities.Nics.Items[0].Id) - if len(server.Entities.Nics.Items[0].Properties.Ips) > 0 { - d.SetConnInfo(map[string]string{ - "type": "ssh", - "host": server.Entities.Nics.Items[0].Properties.Ips[0], - "password": request.Entities.Volumes.Items[0].Properties.ImagePassword, - }) - } - return resourceProfitBricksServerRead(d, meta) -} - -func resourceProfitBricksServerRead(d *schema.ResourceData, meta interface{}) error { - dcId := d.Get("datacenter_id").(string) - serverId := d.Id() - - server := profitbricks.GetServer(dcId, serverId) - if server.StatusCode > 299 { - if server.StatusCode == 404 { - d.SetId("") - return nil - } - return fmt.Errorf("Error occured while fetching a server ID %s %s", d.Id(), server.Response) - } - d.Set("name", server.Properties.Name) - d.Set("cores", server.Properties.Cores) - d.Set("ram", server.Properties.Ram) - d.Set("availability_zone", server.Properties.AvailabilityZone) - - if primarynic, ok := d.GetOk("primary_nic"); ok { - d.Set("primary_nic", primarynic.(string)) - - nic := profitbricks.GetNic(dcId, serverId, primarynic.(string)) - - if len(nic.Properties.Ips) > 0 { - d.Set("primary_ip", nic.Properties.Ips[0]) - } - - if nRaw, ok := d.GetOk("nic"); ok { - log.Printf("[DEBUG] parsing nic") - - nicRaw := nRaw.(*schema.Set).List() - - for _, raw := range nicRaw { - - rawMap := raw.(map[string]interface{}) - - rawMap["lan"] = nic.Properties.Lan - rawMap["name"] = nic.Properties.Name - rawMap["dhcp"] = nic.Properties.Dhcp - rawMap["nat"] = nic.Properties.Nat - rawMap["firewall_active"] = nic.Properties.FirewallActive - rawMap["ips"] = nic.Properties.Ips - } - d.Set("nic", nicRaw) - } - } - - if server.Properties.BootVolume != nil { - d.Set("boot_volume", server.Properties.BootVolume.Id) - } - if server.Properties.BootCdrom != nil { - d.Set("boot_cdrom", server.Properties.BootCdrom.Id) - } - return nil -} - -func resourceProfitBricksServerUpdate(d *schema.ResourceData, meta interface{}) error { - dcId := d.Get("datacenter_id").(string) - - request := profitbricks.ServerProperties{} - - if d.HasChange("name") { - _, n := d.GetChange("name") - request.Name = n.(string) - } - if d.HasChange("cores") { - _, n := d.GetChange("cores") - request.Cores = n.(int) - } - if d.HasChange("ram") { - _, n := d.GetChange("ram") - request.Ram = n.(int) - } - if d.HasChange("availability_zone") { - _, n := d.GetChange("availability_zone") - request.AvailabilityZone = n.(string) - } - if d.HasChange("cpu_family") { - _, n := d.GetChange("cpu_family") - request.CpuFamily = n.(string) - } - server := profitbricks.PatchServer(dcId, d.Id(), request) - - //Volume stuff - if d.HasChange("volume") { - volume := server.Entities.Volumes.Items[0] - _, new := d.GetChange("volume") - - newVolume := new.(*schema.Set).List() - properties := profitbricks.VolumeProperties{} - - for _, raw := range newVolume { - rawMap := raw.(map[string]interface{}) - if rawMap["name"] != nil { - properties.Name = rawMap["name"].(string) - } - if rawMap["size"] != nil { - properties.Size = rawMap["size"].(int) - } - if rawMap["bus"] != nil { - properties.Bus = rawMap["bus"].(string) - } - } - - volume = profitbricks.PatchVolume(d.Get("datacenter_id").(string), server.Entities.Volumes.Items[0].Id, properties) - - if volume.StatusCode > 299 { - return fmt.Errorf("Error patching volume (%s) (%s)", d.Id(), volume.Response) - } - - err := waitTillProvisioned(meta, volume.Headers.Get("Location")) - if err != nil { - return err - } - } - - //Nic stuff - if d.HasChange("nic") { - nic := profitbricks.Nic{} - for _, n := range server.Entities.Nics.Items { - if n.Id == d.Get("primary_nic").(string) { - nic = n - break - } - } - _, new := d.GetChange("nic") - - newNic := new.(*schema.Set).List() - properties := profitbricks.NicProperties{} - - for _, raw := range newNic { - rawMap := raw.(map[string]interface{}) - if rawMap["name"] != nil { - properties.Name = rawMap["name"].(string) - } - if rawMap["ip"] != nil { - rawIps := rawMap["ip"].(string) - ips := strings.Split(rawIps, ",") - - if rawIps != "" { - nic.Properties.Ips = ips - } - } - if rawMap["lan"] != nil { - properties.Lan = rawMap["lan"].(int) - } - if rawMap["dhcp"] != nil { - properties.Dhcp = rawMap["dhcp"].(bool) - } - if rawMap["nat"] != nil { - properties.Nat = rawMap["nat"].(bool) - } - } - - nic = profitbricks.PatchNic(d.Get("datacenter_id").(string), server.Id, server.Entities.Nics.Items[0].Id, properties) - - if nic.StatusCode > 299 { - return fmt.Errorf( - "Error patching nic (%s)", nic.Response) - } - - err := waitTillProvisioned(meta, nic.Headers.Get("Location")) - if err != nil { - return err - } - } - - if server.StatusCode > 299 { - return fmt.Errorf( - "Error patching server (%s) (%s)", d.Id(), server.Response) - } - return resourceProfitBricksServerRead(d, meta) -} - -func resourceProfitBricksServerDelete(d *schema.ResourceData, meta interface{}) error { - dcId := d.Get("datacenter_id").(string) - - server := profitbricks.GetServer(dcId, d.Id()) - - if server.Properties.BootVolume != nil { - resp := profitbricks.DeleteVolume(dcId, server.Properties.BootVolume.Id) - err := waitTillProvisioned(meta, resp.Headers.Get("Location")) - if err != nil { - return err - } - } - - resp := profitbricks.DeleteServer(dcId, d.Id()) - if resp.StatusCode > 299 { - return fmt.Errorf("An error occured while deleting a server ID %s %s", d.Id(), string(resp.Body)) - - } - err := waitTillProvisioned(meta, resp.Headers.Get("Location")) - if err != nil { - return err - } - d.SetId("") - return nil -} - -//Reads public key from file and returns key string iff valid -func readPublicKey(path string) (key string, err error) { - bytes, err := ioutil.ReadFile(path) - if err != nil { - return "", err - } - pubKey, _, _, _, err := ssh.ParseAuthorizedKey(bytes) - if err != nil { - return "", err - } - return string(ssh.MarshalAuthorizedKey(pubKey)[:]), nil -} diff --git a/builtin/providers/profitbricks/resource_profitbricks_server_test.go b/builtin/providers/profitbricks/resource_profitbricks_server_test.go deleted file mode 100644 index c5bbf2d4d..000000000 --- a/builtin/providers/profitbricks/resource_profitbricks_server_test.go +++ /dev/null @@ -1,175 +0,0 @@ -package profitbricks - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/profitbricks/profitbricks-sdk-go" -) - -func TestAccProfitBricksServer_Basic(t *testing.T) { - var server profitbricks.Server - serverName := "webserver" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDProfitBricksServerDestroyCheck, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckProfitbricksServerConfig_basic, serverName), - Check: resource.ComposeTestCheckFunc( - testAccCheckProfitBricksServerExists("profitbricks_server.webserver", &server), - testAccCheckProfitBricksServerAttributes("profitbricks_server.webserver", serverName), - resource.TestCheckResourceAttr("profitbricks_server.webserver", "name", serverName), - ), - }, - resource.TestStep{ - Config: testAccCheckProfitbricksServerConfig_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckProfitBricksServerAttributes("profitbricks_server.webserver", "updated"), - resource.TestCheckResourceAttr("profitbricks_server.webserver", "name", "updated"), - ), - }, - }, - }) -} - -func testAccCheckDProfitBricksServerDestroyCheck(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "profitbricks_datacenter" { - continue - } - - resp := profitbricks.GetServer(rs.Primary.Attributes["datacenter_id"], rs.Primary.ID) - - if resp.StatusCode < 299 { - return fmt.Errorf("Server still exists %s %s", rs.Primary.ID, resp.Response) - } - } - - return nil -} - -func testAccCheckProfitBricksServerAttributes(n string, name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("testAccCheckProfitBricksServerAttributes: Not found: %s", n) - } - if rs.Primary.Attributes["name"] != name { - return fmt.Errorf("Bad name: %s", rs.Primary.Attributes["name"]) - } - - return nil - } -} - -func testAccCheckProfitBricksServerExists(n string, server *profitbricks.Server) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("testAccCheckProfitBricksServerExists: Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - foundServer := profitbricks.GetServer(rs.Primary.Attributes["datacenter_id"], rs.Primary.ID) - - if foundServer.StatusCode != 200 { - return fmt.Errorf("Error occured while fetching Server: %s", rs.Primary.ID) - } - if foundServer.Id != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - - server = &foundServer - - return nil - } -} - -const testAccCheckProfitbricksServerConfig_basic = ` -resource "profitbricks_datacenter" "foobar" { - name = "server-test" - location = "us/las" -} - -resource "profitbricks_lan" "webserver_lan" { - datacenter_id = "${profitbricks_datacenter.foobar.id}" - public = true - name = "public" -} - -resource "profitbricks_server" "webserver" { - name = "%s" - datacenter_id = "${profitbricks_datacenter.foobar.id}" - cores = 1 - ram = 1024 - availability_zone = "ZONE_1" - cpu_family = "AMD_OPTERON" - volume { - name = "system" - size = 5 - disk_type = "SSD" - image_name ="ubuntu-16.04" - image_password = "K3tTj8G14a3EgKyNeeiY" -} - nic { - lan = "${profitbricks_lan.webserver_lan.id}" - dhcp = true - firewall_active = true - firewall { - protocol = "TCP" - name = "SSH" - port_range_start = 22 - port_range_end = 22 - } - } -}` - -const testAccCheckProfitbricksServerConfig_update = ` -resource "profitbricks_datacenter" "foobar" { - name = "server-test" - location = "us/las" -} - -resource "profitbricks_lan" "webserver_lan" { - datacenter_id = "${profitbricks_datacenter.foobar.id}" - public = true - name = "public" -} - -resource "profitbricks_server" "webserver" { - name = "updated" - datacenter_id = "${profitbricks_datacenter.foobar.id}" - cores = 1 - ram = 1024 - availability_zone = "ZONE_1" - cpu_family = "AMD_OPTERON" - volume { - name = "system" - size = 5 - disk_type = "SSD" - image_name ="ubuntu-16.04" - image_password = "K3tTj8G14a3EgKyNeeiY" -} - nic { - lan = "${profitbricks_lan.webserver_lan.id}" - dhcp = true - firewall_active = true - firewall { - protocol = "TCP" - name = "SSH" - port_range_start = 22 - port_range_end = 22 - } - } -}` diff --git a/builtin/providers/profitbricks/resource_profitbricks_volume.go b/builtin/providers/profitbricks/resource_profitbricks_volume.go deleted file mode 100644 index 46d8ff47d..000000000 --- a/builtin/providers/profitbricks/resource_profitbricks_volume.go +++ /dev/null @@ -1,261 +0,0 @@ -package profitbricks - -import ( - "fmt" - "github.com/hashicorp/terraform/helper/schema" - "github.com/profitbricks/profitbricks-sdk-go" - "log" -) - -func resourceProfitBricksVolume() *schema.Resource { - return &schema.Resource{ - Create: resourceProfitBricksVolumeCreate, - Read: resourceProfitBricksVolumeRead, - Update: resourceProfitBricksVolumeUpdate, - Delete: resourceProfitBricksVolumeDelete, - Schema: map[string]*schema.Schema{ - "image_name": { - Type: schema.TypeString, - Optional: true, - }, - "size": { - Type: schema.TypeInt, - Required: true, - }, - - "disk_type": { - Type: schema.TypeString, - Required: true, - }, - "image_password": { - Type: schema.TypeString, - Optional: true, - }, - "licence_type": { - Type: schema.TypeString, - Optional: true, - }, - "ssh_key_path": { - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - }, - "sshkey": { - Type: schema.TypeString, - Computed: true, - }, - "bus": { - Type: schema.TypeString, - Optional: true, - }, - "name": { - Type: schema.TypeString, - Optional: true, - }, - "availability_zone": { - Type: schema.TypeString, - Optional: true, - }, - "server_id": { - Type: schema.TypeString, - Required: true, - }, - "datacenter_id": { - Type: schema.TypeString, - Required: true, - }, - }, - } -} - -func resourceProfitBricksVolumeCreate(d *schema.ResourceData, meta interface{}) error { - var err error - var ssh_keypath []interface{} - dcId := d.Get("datacenter_id").(string) - serverId := d.Get("server_id").(string) - imagePassword := d.Get("image_password").(string) - ssh_keypath = d.Get("ssh_key_path").([]interface{}) - image_name := d.Get("image_name").(string) - - licenceType := d.Get("licence_type").(string) - - if image_name == "" && licenceType == "" { - return fmt.Errorf("Either 'image_name', or 'licenceType' must be set.") - } - - var publicKeys []string - if len(ssh_keypath) != 0 { - for _, path := range ssh_keypath { - log.Printf("[DEBUG] Reading file %s", path) - publicKey, err := readPublicKey(path.(string)) - if err != nil { - return fmt.Errorf("Error fetching sshkey from file (%s) (%s)", path, err.Error()) - } - publicKeys = append(publicKeys, publicKey) - } - } - - var image string - if image_name != "" { - if !IsValidUUID(image_name) { - if imagePassword == "" && len(ssh_keypath) == 0 { - return fmt.Errorf("Either 'image_password' or 'sshkey' must be provided.") - } - image = getImageId(d.Get("datacenter_id").(string), image_name, d.Get("disk_type").(string)) - } else { - img := profitbricks.GetImage(image_name) - if img.StatusCode > 299 { - return fmt.Errorf("Error fetching image: %s", img.Response) - } - if img.Properties.Public == true { - if imagePassword == "" && len(ssh_keypath) == 0 { - return fmt.Errorf("Either 'image_password' or 'sshkey' must be provided.") - } - image = image_name - } else { - image = image_name - } - } - } - - volume := profitbricks.Volume{ - Properties: profitbricks.VolumeProperties{ - Name: d.Get("name").(string), - Size: d.Get("size").(int), - Type: d.Get("disk_type").(string), - ImagePassword: imagePassword, - Image: image, - Bus: d.Get("bus").(string), - LicenceType: licenceType, - }, - } - - if len(publicKeys) != 0 { - volume.Properties.SshKeys = publicKeys - - } else { - volume.Properties.SshKeys = nil - } - - if _, ok := d.GetOk("availability_zone"); ok { - raw := d.Get("availability_zone").(string) - volume.Properties.AvailabilityZone = raw - } - - volume = profitbricks.CreateVolume(dcId, volume) - - if volume.StatusCode > 299 { - return fmt.Errorf("An error occured while creating a volume: %s", volume.Response) - } - - err = waitTillProvisioned(meta, volume.Headers.Get("Location")) - if err != nil { - return err - } - volume = profitbricks.AttachVolume(dcId, serverId, volume.Id) - if volume.StatusCode > 299 { - return fmt.Errorf("An error occured while attaching a volume dcId: %s server_id: %s ID: %s Response: %s", dcId, serverId, volume.Id, volume.Response) - } - - err = waitTillProvisioned(meta, volume.Headers.Get("Location")) - if err != nil { - return err - } - d.SetId(volume.Id) - - return resourceProfitBricksVolumeRead(d, meta) -} - -func resourceProfitBricksVolumeRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - profitbricks.SetAuth(config.Username, config.Password) - - dcId := d.Get("datacenter_id").(string) - - volume := profitbricks.GetVolume(dcId, d.Id()) - - if volume.StatusCode > 299 { - if volume.StatusCode == 404 { - d.SetId("") - return nil - } - return fmt.Errorf("Error occured while fetching a volume ID %s %s", d.Id(), volume.Response) - } - - if volume.StatusCode > 299 { - return fmt.Errorf("An error occured while fetching a volume ID %s %s", d.Id(), volume.Response) - - } - - d.Set("name", volume.Properties.Name) - d.Set("disk_type", volume.Properties.Type) - d.Set("size", volume.Properties.Size) - d.Set("bus", volume.Properties.Bus) - d.Set("image_name", volume.Properties.Image) - - return nil -} - -func resourceProfitBricksVolumeUpdate(d *schema.ResourceData, meta interface{}) error { - properties := profitbricks.VolumeProperties{} - dcId := d.Get("datacenter_id").(string) - - if d.HasChange("name") { - _, newValue := d.GetChange("name") - properties.Name = newValue.(string) - } - if d.HasChange("disk_type") { - _, newValue := d.GetChange("disk_type") - properties.Type = newValue.(string) - } - if d.HasChange("size") { - _, newValue := d.GetChange("size") - properties.Size = newValue.(int) - } - if d.HasChange("bus") { - _, newValue := d.GetChange("bus") - properties.Bus = newValue.(string) - } - if d.HasChange("availability_zone") { - _, newValue := d.GetChange("availability_zone") - properties.AvailabilityZone = newValue.(string) - } - - volume := profitbricks.PatchVolume(dcId, d.Id(), properties) - err := waitTillProvisioned(meta, volume.Headers.Get("Location")) - if err != nil { - return err - } - if volume.StatusCode > 299 { - return fmt.Errorf("An error occured while updating a volume ID %s %s", d.Id(), volume.Response) - - } - err = resourceProfitBricksVolumeRead(d, meta) - if err != nil { - return err - } - d.SetId(d.Get("server_id").(string)) - err = resourceProfitBricksServerRead(d, meta) - if err != nil { - return err - } - - d.SetId(volume.Id) - return nil -} - -func resourceProfitBricksVolumeDelete(d *schema.ResourceData, meta interface{}) error { - dcId := d.Get("datacenter_id").(string) - - resp := profitbricks.DeleteVolume(dcId, d.Id()) - if resp.StatusCode > 299 { - return fmt.Errorf("An error occured while deleting a volume ID %s %s", d.Id(), string(resp.Body)) - - } - err := waitTillProvisioned(meta, resp.Headers.Get("Location")) - if err != nil { - return err - } - d.SetId("") - return nil -} diff --git a/builtin/providers/profitbricks/resource_profitbricks_volume_test.go b/builtin/providers/profitbricks/resource_profitbricks_volume_test.go deleted file mode 100644 index db53ac0e1..000000000 --- a/builtin/providers/profitbricks/resource_profitbricks_volume_test.go +++ /dev/null @@ -1,195 +0,0 @@ -package profitbricks - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/profitbricks/profitbricks-sdk-go" -) - -func TestAccProfitBricksVolume_Basic(t *testing.T) { - var volume profitbricks.Volume - volumeName := "volume" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDProfitBricksVolumeDestroyCheck, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testAccCheckProfitbricksVolumeConfig_basic, volumeName), - Check: resource.ComposeTestCheckFunc( - testAccCheckProfitBricksVolumeExists("profitbricks_volume.database_volume", &volume), - testAccCheckProfitBricksVolumeAttributes("profitbricks_volume.database_volume", volumeName), - resource.TestCheckResourceAttr("profitbricks_volume.database_volume", "name", volumeName), - ), - }, - resource.TestStep{ - Config: testAccCheckProfitbricksVolumeConfig_update, - Check: resource.ComposeTestCheckFunc( - testAccCheckProfitBricksVolumeAttributes("profitbricks_volume.database_volume", "updated"), - resource.TestCheckResourceAttr("profitbricks_volume.database_volume", "name", "updated"), - ), - }, - }, - }) -} - -func testAccCheckDProfitBricksVolumeDestroyCheck(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "profitbricks_datacenter" { - continue - } - - resp := profitbricks.GetVolume(rs.Primary.Attributes["datacenter_id"], rs.Primary.ID) - - if resp.StatusCode < 299 { - return fmt.Errorf("Volume still exists %s %s", rs.Primary.ID, resp.Response) - } - } - - return nil -} - -func testAccCheckProfitBricksVolumeAttributes(n string, name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("testAccCheckProfitBricksVolumeAttributes: Not found: %s", n) - } - if rs.Primary.Attributes["name"] != name { - return fmt.Errorf("Bad name: %s", rs.Primary.Attributes["name"]) - } - - return nil - } -} - -func testAccCheckProfitBricksVolumeExists(n string, volume *profitbricks.Volume) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("testAccCheckProfitBricksVolumeExists: Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - foundServer := profitbricks.GetVolume(rs.Primary.Attributes["datacenter_id"], rs.Primary.ID) - - if foundServer.StatusCode != 200 { - return fmt.Errorf("Error occured while fetching Volume: %s", rs.Primary.ID) - } - if foundServer.Id != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - - volume = &foundServer - - return nil - } -} - -const testAccCheckProfitbricksVolumeConfig_basic = ` -resource "profitbricks_datacenter" "foobar" { - name = "volume-test" - location = "us/las" -} - -resource "profitbricks_lan" "webserver_lan" { - datacenter_id = "${profitbricks_datacenter.foobar.id}" - public = true - name = "public" -} - -resource "profitbricks_server" "webserver" { - name = "webserver" - datacenter_id = "${profitbricks_datacenter.foobar.id}" - cores = 1 - ram = 1024 - availability_zone = "ZONE_1" - cpu_family = "AMD_OPTERON" - volume { - name = "system" - size = 5 - disk_type = "SSD" - image_name ="ubuntu-16.04" - image_password = "K3tTj8G14a3EgKyNeeiY" -} - nic { - lan = "${profitbricks_lan.webserver_lan.id}" - dhcp = true - firewall_active = true - firewall { - protocol = "TCP" - name = "SSH" - port_range_start = 22 - port_range_end = 22 - } - } -} - -resource "profitbricks_volume" "database_volume" { - datacenter_id = "${profitbricks_datacenter.foobar.id}" - server_id = "${profitbricks_server.webserver.id}" - licence_type = "OTHER" - name = "%s" - size = 5 - disk_type = "SSD" - bus = "VIRTIO" -}` - -const testAccCheckProfitbricksVolumeConfig_update = ` -resource "profitbricks_datacenter" "foobar" { - name = "volume-test" - location = "us/las" -} - -resource "profitbricks_lan" "webserver_lan" { - datacenter_id = "${profitbricks_datacenter.foobar.id}" - public = true - name = "public" -} - -resource "profitbricks_server" "webserver" { - name = "webserver" - datacenter_id = "${profitbricks_datacenter.foobar.id}" - cores = 1 - ram = 1024 - availability_zone = "ZONE_1" - cpu_family = "AMD_OPTERON" - volume { - name = "system" - size = 5 - disk_type = "SSD" - image_name ="ubuntu-16.04" - image_password = "K3tTj8G14a3EgKyNeeiY" -} - nic { - lan = "${profitbricks_lan.webserver_lan.id}" - dhcp = true - firewall_active = true - firewall { - protocol = "TCP" - name = "SSH" - port_range_start = 22 - port_range_end = 22 - } - } -} - -resource "profitbricks_volume" "database_volume" { - datacenter_id = "${profitbricks_datacenter.foobar.id}" - server_id = "${profitbricks_server.webserver.id}" - licence_type = "OTHER" - name = "updated" - size = 5 - disk_type = "SSD" - bus = "VIRTIO" -}` diff --git a/builtin/providers/rabbitmq/acceptance_env/deploy.sh b/builtin/providers/rabbitmq/acceptance_env/deploy.sh deleted file mode 100644 index 9e8c01d0b..000000000 --- a/builtin/providers/rabbitmq/acceptance_env/deploy.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -set -e - -cd -echo 'deb http://www.rabbitmq.com/debian/ testing main' | sudo tee /etc/apt/sources.list.d/rabbitmq.list -wget -O- https://www.rabbitmq.com/rabbitmq-release-signing-key.asc | sudo apt-key add - -sudo apt-get update -sudo apt-get install -y git make mercurial -sudo apt-get install -y rabbitmq-server -sudo rabbitmq-plugins enable rabbitmq_management - -sudo wget -O /usr/local/bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme -sudo chmod +x /usr/local/bin/gimme -gimme 1.8 >> .bashrc - -mkdir ~/go -eval "$(/usr/local/bin/gimme 1.8)" -echo 'export GOPATH=$HOME/go' >> .bashrc -export GOPATH=$HOME/go - -export PATH=$PATH:$HOME/terraform:$HOME/go/bin -echo 'export PATH=$PATH:$HOME/terraform:$HOME/go/bin' >> .bashrc -source .bashrc - -go get -u github.com/kardianos/govendor -go get github.com/hashicorp/terraform - -cat < ~/rabbitmqrc -export RABBITMQ_ENDPOINT="http://127.0.0.1:15672" -export RABBITMQ_USERNAME="guest" -export RABBITMQ_PASSWORD="guest" -EOF diff --git a/builtin/providers/rabbitmq/import_binding_test.go b/builtin/providers/rabbitmq/import_binding_test.go deleted file mode 100644 index db845c710..000000000 --- a/builtin/providers/rabbitmq/import_binding_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package rabbitmq - -import ( - "testing" - - "github.com/michaelklishin/rabbit-hole" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccBinding_importBasic(t *testing.T) { - resourceName := "rabbitmq_binding.test" - var bindingInfo rabbithole.BindingInfo - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccBindingCheckDestroy(bindingInfo), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccBindingConfig_basic, - Check: testAccBindingCheck( - resourceName, &bindingInfo, - ), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/rabbitmq/import_exchange_test.go b/builtin/providers/rabbitmq/import_exchange_test.go deleted file mode 100644 index 8fb8ac53b..000000000 --- a/builtin/providers/rabbitmq/import_exchange_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package rabbitmq - -import ( - "testing" - - "github.com/michaelklishin/rabbit-hole" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccExchange_importBasic(t *testing.T) { - resourceName := "rabbitmq_exchange.test" - var exchangeInfo rabbithole.ExchangeInfo - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccExchangeCheckDestroy(&exchangeInfo), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccExchangeConfig_basic, - Check: testAccExchangeCheck( - resourceName, &exchangeInfo, - ), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/rabbitmq/import_permissions_test.go b/builtin/providers/rabbitmq/import_permissions_test.go deleted file mode 100644 index f8d45b063..000000000 --- a/builtin/providers/rabbitmq/import_permissions_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package rabbitmq - -import ( - "testing" - - "github.com/michaelklishin/rabbit-hole" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccPermissions_importBasic(t *testing.T) { - resourceName := "rabbitmq_permissions.test" - var permissionInfo rabbithole.PermissionInfo - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccPermissionsCheckDestroy(&permissionInfo), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccPermissionsConfig_basic, - Check: testAccPermissionsCheck( - resourceName, &permissionInfo, - ), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/rabbitmq/import_policy_test.go b/builtin/providers/rabbitmq/import_policy_test.go deleted file mode 100644 index 7fa59ba88..000000000 --- a/builtin/providers/rabbitmq/import_policy_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package rabbitmq - -import ( - "testing" - - "github.com/michaelklishin/rabbit-hole" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccPolicy_importBasic(t *testing.T) { - resourceName := "rabbitmq_policy.test" - var policy rabbithole.Policy - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccPolicyCheckDestroy(&policy), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccPolicyConfig_basic, - Check: testAccPolicyCheck( - resourceName, &policy, - ), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/rabbitmq/import_queue_test.go b/builtin/providers/rabbitmq/import_queue_test.go deleted file mode 100644 index ceb3eec80..000000000 --- a/builtin/providers/rabbitmq/import_queue_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package rabbitmq - -import ( - "testing" - - "github.com/michaelklishin/rabbit-hole" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccQueue_importBasic(t *testing.T) { - resourceName := "rabbitmq_queue.test" - var queue rabbithole.QueueInfo - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccQueueCheckDestroy(&queue), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccQueueConfig_basic, - Check: testAccQueueCheck( - resourceName, &queue, - ), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/rabbitmq/import_user_test.go b/builtin/providers/rabbitmq/import_user_test.go deleted file mode 100644 index f33eb7e8e..000000000 --- a/builtin/providers/rabbitmq/import_user_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package rabbitmq - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccUser_importBasic(t *testing.T) { - resourceName := "rabbitmq_user.test" - var user string - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccUserCheckDestroy(user), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccUserConfig_basic, - Check: testAccUserCheck( - resourceName, &user, - ), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"password"}, - }, - }, - }) -} diff --git a/builtin/providers/rabbitmq/import_vhost_test.go b/builtin/providers/rabbitmq/import_vhost_test.go deleted file mode 100644 index 599a02436..000000000 --- a/builtin/providers/rabbitmq/import_vhost_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package rabbitmq - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccVhost_importBasic(t *testing.T) { - resourceName := "rabbitmq_vhost.test" - var vhost string - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccVhostCheckDestroy(vhost), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVhostConfig_basic, - Check: testAccVhostCheck( - resourceName, &vhost, - ), - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/rabbitmq/provider.go b/builtin/providers/rabbitmq/provider.go deleted file mode 100644 index f7db53a4b..000000000 --- a/builtin/providers/rabbitmq/provider.go +++ /dev/null @@ -1,123 +0,0 @@ -package rabbitmq - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net/http" - - "github.com/michaelklishin/rabbit-hole" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "endpoint": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("RABBITMQ_ENDPOINT", nil), - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value == "" { - errors = append(errors, fmt.Errorf("Endpoint must not be an empty string")) - } - - return - }, - }, - - "username": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("RABBITMQ_USERNAME", nil), - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value == "" { - errors = append(errors, fmt.Errorf("Username must not be an empty string")) - } - - return - }, - }, - - "password": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("RABBITMQ_PASSWORD", nil), - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value == "" { - errors = append(errors, fmt.Errorf("Password must not be an empty string")) - } - - return - }, - }, - - "insecure": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("RABBITMQ_INSECURE", nil), - }, - - "cacert_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("RABBITMQ_CACERT", ""), - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "rabbitmq_binding": resourceBinding(), - "rabbitmq_exchange": resourceExchange(), - "rabbitmq_permissions": resourcePermissions(), - "rabbitmq_policy": resourcePolicy(), - "rabbitmq_queue": resourceQueue(), - "rabbitmq_user": resourceUser(), - "rabbitmq_vhost": resourceVhost(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - - var username = d.Get("username").(string) - var password = d.Get("password").(string) - var endpoint = d.Get("endpoint").(string) - var insecure = d.Get("insecure").(bool) - var cacertFile = d.Get("cacert_file").(string) - - // Configure TLS/SSL: - // Ignore self-signed cert warnings - // Specify a custom CA / intermediary cert - // Specify a certificate and key - tlsConfig := &tls.Config{} - if cacertFile != "" { - caCert, err := ioutil.ReadFile(cacertFile) - if err != nil { - return nil, err - } - - caCertPool := x509.NewCertPool() - caCertPool.AppendCertsFromPEM(caCert) - tlsConfig.RootCAs = caCertPool - } - if insecure { - tlsConfig.InsecureSkipVerify = true - } - - // Connect to RabbitMQ management interface - transport := &http.Transport{TLSClientConfig: tlsConfig} - rmqc, err := rabbithole.NewTLSClient(endpoint, username, password, transport) - if err != nil { - return nil, err - } - - return rmqc, nil -} diff --git a/builtin/providers/rabbitmq/provider_test.go b/builtin/providers/rabbitmq/provider_test.go deleted file mode 100644 index 411459ee8..000000000 --- a/builtin/providers/rabbitmq/provider_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package rabbitmq - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// To run these acceptance tests, you will need access to a RabbitMQ server -// with the management plugin enabled. -// -// Set the RABBITMQ_ENDPOINT, RABBITMQ_USERNAME, and RABBITMQ_PASSWORD -// environment variables before running the tests. -// -// You can run the tests like this: -// make testacc TEST=./builtin/providers/rabbitmq - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "rabbitmq": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - for _, name := range []string{"RABBITMQ_ENDPOINT", "RABBITMQ_USERNAME", "RABBITMQ_PASSWORD"} { - if v := os.Getenv(name); v == "" { - t.Fatal("RABBITMQ_ENDPOINT, RABBITMQ_USERNAME and RABBITMQ_PASSWORD must be set for acceptance tests") - } - } -} diff --git a/builtin/providers/rabbitmq/resource_binding.go b/builtin/providers/rabbitmq/resource_binding.go deleted file mode 100644 index dff6bf009..000000000 --- a/builtin/providers/rabbitmq/resource_binding.go +++ /dev/null @@ -1,195 +0,0 @@ -package rabbitmq - -import ( - "fmt" - "log" - "strings" - - "github.com/michaelklishin/rabbit-hole" - - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceBinding() *schema.Resource { - return &schema.Resource{ - Create: CreateBinding, - Read: ReadBinding, - Delete: DeleteBinding, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "source": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "vhost": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "destination": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "destination_type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "properties_key": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "routing_key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "arguments": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func CreateBinding(d *schema.ResourceData, meta interface{}) error { - rmqc := meta.(*rabbithole.Client) - - vhost := d.Get("vhost").(string) - bindingInfo := rabbithole.BindingInfo{ - Source: d.Get("source").(string), - Destination: d.Get("destination").(string), - DestinationType: d.Get("destination_type").(string), - RoutingKey: d.Get("routing_key").(string), - PropertiesKey: d.Get("properties_key").(string), - Arguments: d.Get("arguments").(map[string]interface{}), - } - - if err := declareBinding(rmqc, vhost, bindingInfo); err != nil { - return err - } - - name := fmt.Sprintf("%s/%s/%s/%s/%s", vhost, bindingInfo.Source, bindingInfo.Destination, bindingInfo.DestinationType, bindingInfo.PropertiesKey) - d.SetId(name) - - return ReadBinding(d, meta) -} - -func ReadBinding(d *schema.ResourceData, meta interface{}) error { - rmqc := meta.(*rabbithole.Client) - - bindingId := strings.Split(d.Id(), "/") - if len(bindingId) < 5 { - return fmt.Errorf("Unable to determine binding ID") - } - - vhost := bindingId[0] - source := bindingId[1] - destination := bindingId[2] - destinationType := bindingId[3] - propertiesKey := bindingId[4] - - bindings, err := rmqc.ListBindingsIn(vhost) - if err != nil { - return err - } - - log.Printf("[DEBUG] RabbitMQ: Bindings retrieved: %#v", bindings) - bindingFound := false - for _, binding := range bindings { - if binding.Source == source && binding.Destination == destination && binding.DestinationType == destinationType && binding.PropertiesKey == propertiesKey { - log.Printf("[DEBUG] RabbitMQ: Found Binding: %#v", binding) - bindingFound = true - - d.Set("vhost", binding.Vhost) - d.Set("source", binding.Source) - d.Set("destination", binding.Destination) - d.Set("destination_type", binding.DestinationType) - d.Set("routing_key", binding.RoutingKey) - d.Set("properties_key", binding.PropertiesKey) - d.Set("arguments", binding.Arguments) - } - } - - // The binding could not be found, - // so consider it deleted and remove from state - if !bindingFound { - d.SetId("") - } - - return nil -} - -func DeleteBinding(d *schema.ResourceData, meta interface{}) error { - rmqc := meta.(*rabbithole.Client) - - bindingId := strings.Split(d.Id(), "/") - if len(bindingId) < 5 { - return fmt.Errorf("Unable to determine binding ID") - } - - vhost := bindingId[0] - source := bindingId[1] - destination := bindingId[2] - destinationType := bindingId[3] - propertiesKey := bindingId[4] - - bindingInfo := rabbithole.BindingInfo{ - Vhost: vhost, - Source: source, - Destination: destination, - DestinationType: destinationType, - PropertiesKey: propertiesKey, - } - - log.Printf("[DEBUG] RabbitMQ: Attempting to delete binding for %s/%s/%s/%s/%s", - vhost, source, destination, destinationType, propertiesKey) - - resp, err := rmqc.DeleteBinding(vhost, bindingInfo) - if err != nil { - return err - } - - log.Printf("[DEBUG] RabbitMQ: Binding delete response: %#v", resp) - - if resp.StatusCode == 404 { - // The binding was already deleted - return nil - } - - if resp.StatusCode >= 400 { - return fmt.Errorf("Error deleting RabbitMQ binding: %s", resp.Status) - } - - return nil -} - -func declareBinding(rmqc *rabbithole.Client, vhost string, bindingInfo rabbithole.BindingInfo) error { - log.Printf("[DEBUG] RabbitMQ: Attempting to declare binding for %s/%s/%s/%s/%s", - vhost, bindingInfo.Source, bindingInfo.Destination, bindingInfo.DestinationType, bindingInfo.PropertiesKey) - - resp, err := rmqc.DeclareBinding(vhost, bindingInfo) - log.Printf("[DEBUG] RabbitMQ: Binding declare response: %#v", resp) - if err != nil { - return err - } - - if resp.StatusCode >= 400 { - return fmt.Errorf("Error declaring RabbitMQ binding: %s", resp.Status) - } - - return nil -} diff --git a/builtin/providers/rabbitmq/resource_binding_test.go b/builtin/providers/rabbitmq/resource_binding_test.go deleted file mode 100644 index ccd9d646c..000000000 --- a/builtin/providers/rabbitmq/resource_binding_test.go +++ /dev/null @@ -1,182 +0,0 @@ -package rabbitmq - -import ( - "fmt" - "strings" - "testing" - - "github.com/michaelklishin/rabbit-hole" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccBinding_basic(t *testing.T) { - var bindingInfo rabbithole.BindingInfo - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccBindingCheckDestroy(bindingInfo), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccBindingConfig_basic, - Check: testAccBindingCheck( - "rabbitmq_binding.test", &bindingInfo, - ), - }, - }, - }) -} - -func TestAccBinding_propertiesKey(t *testing.T) { - var bindingInfo rabbithole.BindingInfo - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccBindingCheckDestroy(bindingInfo), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccBindingConfig_propertiesKey, - Check: testAccBindingCheck( - "rabbitmq_binding.test", &bindingInfo, - ), - }, - }, - }) -} - -func testAccBindingCheck(rn string, bindingInfo *rabbithole.BindingInfo) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[rn] - if !ok { - return fmt.Errorf("resource not found: %s", rn) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("binding id not set") - } - - rmqc := testAccProvider.Meta().(*rabbithole.Client) - bindingParts := strings.Split(rs.Primary.ID, "/") - - bindings, err := rmqc.ListBindingsIn(bindingParts[0]) - if err != nil { - return fmt.Errorf("Error retrieving exchange: %s", err) - } - - for _, binding := range bindings { - if binding.Source == bindingParts[1] && binding.Destination == bindingParts[2] && binding.DestinationType == bindingParts[3] && binding.PropertiesKey == bindingParts[4] { - bindingInfo = &binding - return nil - } - } - - return fmt.Errorf("Unable to find binding %s", rn) - } -} - -func testAccBindingCheckDestroy(bindingInfo rabbithole.BindingInfo) resource.TestCheckFunc { - return func(s *terraform.State) error { - rmqc := testAccProvider.Meta().(*rabbithole.Client) - - bindings, err := rmqc.ListBindingsIn(bindingInfo.Vhost) - if err != nil { - return fmt.Errorf("Error retrieving exchange: %s", err) - } - - for _, binding := range bindings { - if binding.Source == bindingInfo.Source && binding.Destination == bindingInfo.Destination && binding.DestinationType == bindingInfo.DestinationType && binding.PropertiesKey == bindingInfo.PropertiesKey { - return fmt.Errorf("Binding still exists") - } - } - - return nil - } -} - -const testAccBindingConfig_basic = ` -resource "rabbitmq_vhost" "test" { - name = "test" -} - -resource "rabbitmq_permissions" "guest" { - user = "guest" - vhost = "${rabbitmq_vhost.test.name}" - permissions { - configure = ".*" - write = ".*" - read = ".*" - } -} - -resource "rabbitmq_exchange" "test" { - name = "test" - vhost = "${rabbitmq_permissions.guest.vhost}" - settings { - type = "fanout" - durable = false - auto_delete = true - } -} - -resource "rabbitmq_queue" "test" { - name = "test" - vhost = "${rabbitmq_permissions.guest.vhost}" - settings { - durable = true - auto_delete = false - } -} - -resource "rabbitmq_binding" "test" { - source = "${rabbitmq_exchange.test.name}" - vhost = "${rabbitmq_vhost.test.name}" - destination = "${rabbitmq_queue.test.name}" - destination_type = "queue" - routing_key = "#" - properties_key = "%23" -}` - -const testAccBindingConfig_propertiesKey = ` -resource "rabbitmq_vhost" "test" { - name = "test" -} - -resource "rabbitmq_permissions" "guest" { - user = "guest" - vhost = "${rabbitmq_vhost.test.name}" - permissions { - configure = ".*" - write = ".*" - read = ".*" - } -} - -resource "rabbitmq_exchange" "test" { - name = "Test" - vhost = "${rabbitmq_permissions.guest.vhost}" - settings { - type = "topic" - durable = true - auto_delete = false - } -} - -resource "rabbitmq_queue" "test" { - name = "Test.Queue" - vhost = "${rabbitmq_permissions.guest.vhost}" - settings { - durable = true - auto_delete = false - } -} - -resource "rabbitmq_binding" "test" { - source = "${rabbitmq_exchange.test.name}" - vhost = "${rabbitmq_vhost.test.name}" - destination = "${rabbitmq_queue.test.name}" - destination_type = "queue" - routing_key = "ANYTHING.#" - properties_key = "ANYTHING.%23" -} -` diff --git a/builtin/providers/rabbitmq/resource_exchange.go b/builtin/providers/rabbitmq/resource_exchange.go deleted file mode 100644 index 238a59e74..000000000 --- a/builtin/providers/rabbitmq/resource_exchange.go +++ /dev/null @@ -1,189 +0,0 @@ -package rabbitmq - -import ( - "fmt" - "log" - "strings" - - "github.com/michaelklishin/rabbit-hole" - - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceExchange() *schema.Resource { - return &schema.Resource{ - Create: CreateExchange, - Read: ReadExchange, - Delete: DeleteExchange, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "vhost": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "/", - ForceNew: true, - }, - - "settings": &schema.Schema{ - Type: schema.TypeList, - Required: true, - ForceNew: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "durable": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "auto_delete": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "arguments": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - }, - }, - }, - }, - }, - } -} - -func CreateExchange(d *schema.ResourceData, meta interface{}) error { - rmqc := meta.(*rabbithole.Client) - - name := d.Get("name").(string) - vhost := d.Get("vhost").(string) - settingsList := d.Get("settings").([]interface{}) - - settingsMap, ok := settingsList[0].(map[string]interface{}) - if !ok { - return fmt.Errorf("Unable to parse settings") - } - - if err := declareExchange(rmqc, vhost, name, settingsMap); err != nil { - return err - } - - id := fmt.Sprintf("%s@%s", name, vhost) - d.SetId(id) - - return ReadExchange(d, meta) -} - -func ReadExchange(d *schema.ResourceData, meta interface{}) error { - rmqc := meta.(*rabbithole.Client) - - exchangeId := strings.Split(d.Id(), "@") - if len(exchangeId) < 2 { - return fmt.Errorf("Unable to determine exchange ID") - } - - name := exchangeId[0] - vhost := exchangeId[1] - - exchangeSettings, err := rmqc.GetExchange(vhost, name) - if err != nil { - return checkDeleted(d, err) - } - - log.Printf("[DEBUG] RabbitMQ: Exchange retrieved %s: %#v", d.Id(), exchangeSettings) - - d.Set("name", exchangeSettings.Name) - d.Set("vhost", exchangeSettings.Vhost) - - exchange := make([]map[string]interface{}, 1) - e := make(map[string]interface{}) - e["type"] = exchangeSettings.Type - e["durable"] = exchangeSettings.Durable - e["auto_delete"] = exchangeSettings.AutoDelete - e["arguments"] = exchangeSettings.Arguments - exchange[0] = e - d.Set("settings", exchange) - - return nil -} - -func DeleteExchange(d *schema.ResourceData, meta interface{}) error { - rmqc := meta.(*rabbithole.Client) - - exchangeId := strings.Split(d.Id(), "@") - if len(exchangeId) < 2 { - return fmt.Errorf("Unable to determine exchange ID") - } - - name := exchangeId[0] - vhost := exchangeId[1] - - log.Printf("[DEBUG] RabbitMQ: Attempting to delete exchange %s", d.Id()) - - resp, err := rmqc.DeleteExchange(vhost, name) - log.Printf("[DEBUG] RabbitMQ: Exchange delete response: %#v", resp) - if err != nil { - return err - } - - if resp.StatusCode == 404 { - // The exchange was automatically deleted - return nil - } - - if resp.StatusCode >= 400 { - return fmt.Errorf("Error deleting RabbitMQ exchange: %s", resp.Status) - } - - return nil -} - -func declareExchange(rmqc *rabbithole.Client, vhost string, name string, settingsMap map[string]interface{}) error { - exchangeSettings := rabbithole.ExchangeSettings{} - - if v, ok := settingsMap["type"].(string); ok { - exchangeSettings.Type = v - } - - if v, ok := settingsMap["durable"].(bool); ok { - exchangeSettings.Durable = v - } - - if v, ok := settingsMap["auto_delete"].(bool); ok { - exchangeSettings.AutoDelete = v - } - - if v, ok := settingsMap["arguments"].(map[string]interface{}); ok { - exchangeSettings.Arguments = v - } - - log.Printf("[DEBUG] RabbitMQ: Attempting to declare exchange %s@%s: %#v", name, vhost, exchangeSettings) - - resp, err := rmqc.DeclareExchange(vhost, name, exchangeSettings) - log.Printf("[DEBUG] RabbitMQ: Exchange declare response: %#v", resp) - if err != nil { - return err - } - - if resp.StatusCode >= 400 { - return fmt.Errorf("Error declaring RabbitMQ exchange: %s", resp.Status) - } - - return nil -} diff --git a/builtin/providers/rabbitmq/resource_exchange_test.go b/builtin/providers/rabbitmq/resource_exchange_test.go deleted file mode 100644 index 7747e489d..000000000 --- a/builtin/providers/rabbitmq/resource_exchange_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package rabbitmq - -import ( - "fmt" - "strings" - "testing" - - "github.com/michaelklishin/rabbit-hole" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccExchange(t *testing.T) { - var exchangeInfo rabbithole.ExchangeInfo - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccExchangeCheckDestroy(&exchangeInfo), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccExchangeConfig_basic, - Check: testAccExchangeCheck( - "rabbitmq_exchange.test", &exchangeInfo, - ), - }, - }, - }) -} - -func testAccExchangeCheck(rn string, exchangeInfo *rabbithole.ExchangeInfo) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[rn] - if !ok { - return fmt.Errorf("resource not found: %s", rn) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("exchange id not set") - } - - rmqc := testAccProvider.Meta().(*rabbithole.Client) - exchParts := strings.Split(rs.Primary.ID, "@") - - exchanges, err := rmqc.ListExchangesIn(exchParts[1]) - if err != nil { - return fmt.Errorf("Error retrieving exchange: %s", err) - } - - for _, exchange := range exchanges { - if exchange.Name == exchParts[0] && exchange.Vhost == exchParts[1] { - exchangeInfo = &exchange - return nil - } - } - - return fmt.Errorf("Unable to find exchange %s", rn) - } -} - -func testAccExchangeCheckDestroy(exchangeInfo *rabbithole.ExchangeInfo) resource.TestCheckFunc { - return func(s *terraform.State) error { - rmqc := testAccProvider.Meta().(*rabbithole.Client) - - exchanges, err := rmqc.ListExchangesIn(exchangeInfo.Vhost) - if err != nil { - return fmt.Errorf("Error retrieving exchange: %s", err) - } - - for _, exchange := range exchanges { - if exchange.Name == exchangeInfo.Name && exchange.Vhost == exchangeInfo.Vhost { - return fmt.Errorf("Exchange %s@%s still exist", exchangeInfo.Name, exchangeInfo.Vhost) - } - } - - return nil - } -} - -const testAccExchangeConfig_basic = ` -resource "rabbitmq_vhost" "test" { - name = "test" -} - -resource "rabbitmq_permissions" "guest" { - user = "guest" - vhost = "${rabbitmq_vhost.test.name}" - permissions { - configure = ".*" - write = ".*" - read = ".*" - } -} - -resource "rabbitmq_exchange" "test" { - name = "test" - vhost = "${rabbitmq_permissions.guest.vhost}" - settings { - type = "fanout" - durable = false - auto_delete = true - } -}` diff --git a/builtin/providers/rabbitmq/resource_permissions.go b/builtin/providers/rabbitmq/resource_permissions.go deleted file mode 100644 index e9ec70818..000000000 --- a/builtin/providers/rabbitmq/resource_permissions.go +++ /dev/null @@ -1,205 +0,0 @@ -package rabbitmq - -import ( - "fmt" - "log" - "strings" - - "github.com/michaelklishin/rabbit-hole" - - "github.com/hashicorp/terraform/helper/schema" -) - -func resourcePermissions() *schema.Resource { - return &schema.Resource{ - Create: CreatePermissions, - Update: UpdatePermissions, - Read: ReadPermissions, - Delete: DeletePermissions, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "user": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "vhost": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "/", - ForceNew: true, - }, - - "permissions": &schema.Schema{ - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "configure": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "write": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "read": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - } -} - -func CreatePermissions(d *schema.ResourceData, meta interface{}) error { - rmqc := meta.(*rabbithole.Client) - - user := d.Get("user").(string) - vhost := d.Get("vhost").(string) - permsList := d.Get("permissions").([]interface{}) - - permsMap, ok := permsList[0].(map[string]interface{}) - if !ok { - return fmt.Errorf("Unable to parse permissions") - } - - if err := setPermissionsIn(rmqc, vhost, user, permsMap); err != nil { - return err - } - - id := fmt.Sprintf("%s@%s", user, vhost) - d.SetId(id) - - return ReadPermissions(d, meta) -} - -func ReadPermissions(d *schema.ResourceData, meta interface{}) error { - rmqc := meta.(*rabbithole.Client) - - permissionId := strings.Split(d.Id(), "@") - if len(permissionId) < 2 { - return fmt.Errorf("Unable to determine Permission ID") - } - - user := permissionId[0] - vhost := permissionId[1] - - userPerms, err := rmqc.GetPermissionsIn(vhost, user) - if err != nil { - return checkDeleted(d, err) - } - - log.Printf("[DEBUG] RabbitMQ: Permission retrieved for %s: %#v", d.Id(), userPerms) - - d.Set("user", userPerms.User) - d.Set("vhost", userPerms.Vhost) - - perms := make([]map[string]interface{}, 1) - p := make(map[string]interface{}) - p["configure"] = userPerms.Configure - p["write"] = userPerms.Write - p["read"] = userPerms.Read - perms[0] = p - d.Set("permissions", perms) - - return nil -} - -func UpdatePermissions(d *schema.ResourceData, meta interface{}) error { - rmqc := meta.(*rabbithole.Client) - - permissionId := strings.Split(d.Id(), "@") - if len(permissionId) < 2 { - return fmt.Errorf("Unable to determine Permission ID") - } - - user := permissionId[0] - vhost := permissionId[1] - - if d.HasChange("permissions") { - _, newPerms := d.GetChange("permissions") - - newPermsList := newPerms.([]interface{}) - permsMap, ok := newPermsList[0].(map[string]interface{}) - if !ok { - return fmt.Errorf("Unable to parse permissions") - } - - if err := setPermissionsIn(rmqc, vhost, user, permsMap); err != nil { - return err - } - } - - return ReadPermissions(d, meta) -} - -func DeletePermissions(d *schema.ResourceData, meta interface{}) error { - rmqc := meta.(*rabbithole.Client) - - permissionId := strings.Split(d.Id(), "@") - if len(permissionId) < 2 { - return fmt.Errorf("Unable to determine Permission ID") - } - - user := permissionId[0] - vhost := permissionId[1] - - log.Printf("[DEBUG] RabbitMQ: Attempting to delete permission for %s", d.Id()) - - resp, err := rmqc.ClearPermissionsIn(vhost, user) - log.Printf("[DEBUG] RabbitMQ: Permission delete response: %#v", resp) - if err != nil { - return err - } - - if resp.StatusCode == 404 { - // The permissions were already deleted - return nil - } - - if resp.StatusCode >= 400 { - return fmt.Errorf("Error deleting RabbitMQ permission: %s", resp.Status) - } - - return nil -} - -func setPermissionsIn(rmqc *rabbithole.Client, vhost string, user string, permsMap map[string]interface{}) error { - perms := rabbithole.Permissions{} - - if v, ok := permsMap["configure"].(string); ok { - perms.Configure = v - } - - if v, ok := permsMap["write"].(string); ok { - perms.Write = v - } - - if v, ok := permsMap["read"].(string); ok { - perms.Read = v - } - - log.Printf("[DEBUG] RabbitMQ: Attempting to set permissions for %s@%s: %#v", user, vhost, perms) - - resp, err := rmqc.UpdatePermissionsIn(vhost, user, perms) - log.Printf("[DEBUG] RabbitMQ: Permission response: %#v", resp) - if err != nil { - return err - } - - if resp.StatusCode >= 400 { - return fmt.Errorf("Error setting permissions: %s", resp.Status) - } - - return nil -} diff --git a/builtin/providers/rabbitmq/resource_permissions_test.go b/builtin/providers/rabbitmq/resource_permissions_test.go deleted file mode 100644 index 3d87adf1e..000000000 --- a/builtin/providers/rabbitmq/resource_permissions_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package rabbitmq - -import ( - "fmt" - "strings" - "testing" - - "github.com/michaelklishin/rabbit-hole" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccPermissions(t *testing.T) { - var permissionInfo rabbithole.PermissionInfo - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccPermissionsCheckDestroy(&permissionInfo), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccPermissionsConfig_basic, - Check: testAccPermissionsCheck( - "rabbitmq_permissions.test", &permissionInfo, - ), - }, - resource.TestStep{ - Config: testAccPermissionsConfig_update, - Check: testAccPermissionsCheck( - "rabbitmq_permissions.test", &permissionInfo, - ), - }, - }, - }) -} - -func testAccPermissionsCheck(rn string, permissionInfo *rabbithole.PermissionInfo) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[rn] - if !ok { - return fmt.Errorf("resource not found: %s", rn) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("permission id not set") - } - - rmqc := testAccProvider.Meta().(*rabbithole.Client) - perms, err := rmqc.ListPermissions() - if err != nil { - return fmt.Errorf("Error retrieving permissions: %s", err) - } - - userParts := strings.Split(rs.Primary.ID, "@") - for _, perm := range perms { - if perm.User == userParts[0] && perm.Vhost == userParts[1] { - permissionInfo = &perm - return nil - } - } - - return fmt.Errorf("Unable to find permissions for user %s", rn) - } -} - -func testAccPermissionsCheckDestroy(permissionInfo *rabbithole.PermissionInfo) resource.TestCheckFunc { - return func(s *terraform.State) error { - rmqc := testAccProvider.Meta().(*rabbithole.Client) - perms, err := rmqc.ListPermissions() - if err != nil { - return fmt.Errorf("Error retrieving permissions: %s", err) - } - - for _, perm := range perms { - if perm.User == permissionInfo.User && perm.Vhost == permissionInfo.Vhost { - return fmt.Errorf("Permissions still exist for user %s@%s", permissionInfo.User, permissionInfo.Vhost) - } - } - - return nil - } -} - -const testAccPermissionsConfig_basic = ` -resource "rabbitmq_vhost" "test" { - name = "test" -} - -resource "rabbitmq_user" "test" { - name = "mctest" - password = "foobar" - tags = ["administrator"] -} - -resource "rabbitmq_permissions" "test" { - user = "${rabbitmq_user.test.name}" - vhost = "${rabbitmq_vhost.test.name}" - permissions { - configure = ".*" - write = ".*" - read = ".*" - } -}` - -const testAccPermissionsConfig_update = ` -resource "rabbitmq_vhost" "test" { - name = "test" -} - -resource "rabbitmq_user" "test" { - name = "mctest" - password = "foobar" - tags = ["administrator"] -} - -resource "rabbitmq_permissions" "test" { - user = "${rabbitmq_user.test.name}" - vhost = "${rabbitmq_vhost.test.name}" - permissions { - configure = ".*" - write = ".*" - read = "" - } -}` diff --git a/builtin/providers/rabbitmq/resource_policy.go b/builtin/providers/rabbitmq/resource_policy.go deleted file mode 100644 index 31154373f..000000000 --- a/builtin/providers/rabbitmq/resource_policy.go +++ /dev/null @@ -1,239 +0,0 @@ -package rabbitmq - -import ( - "fmt" - "log" - "strings" - - "github.com/michaelklishin/rabbit-hole" - - "github.com/hashicorp/terraform/helper/schema" -) - -func resourcePolicy() *schema.Resource { - return &schema.Resource{ - Create: CreatePolicy, - Update: UpdatePolicy, - Read: ReadPolicy, - Delete: DeletePolicy, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "vhost": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "policy": &schema.Schema{ - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "pattern": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "priority": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "apply_to": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "definition": &schema.Schema{ - Type: schema.TypeMap, - Required: true, - }, - }, - }, - }, - }, - } -} - -func CreatePolicy(d *schema.ResourceData, meta interface{}) error { - rmqc := meta.(*rabbithole.Client) - - name := d.Get("name").(string) - vhost := d.Get("vhost").(string) - policyList := d.Get("policy").([]interface{}) - - policyMap, ok := policyList[0].(map[string]interface{}) - if !ok { - return fmt.Errorf("Unable to parse policy") - } - - if err := putPolicy(rmqc, vhost, name, policyMap); err != nil { - return err - } - - id := fmt.Sprintf("%s@%s", name, vhost) - d.SetId(id) - - return ReadPolicy(d, meta) -} - -func ReadPolicy(d *schema.ResourceData, meta interface{}) error { - rmqc := meta.(*rabbithole.Client) - - policyId := strings.Split(d.Id(), "@") - if len(policyId) < 2 { - return fmt.Errorf("Unable to determine policy ID") - } - - user := policyId[0] - vhost := policyId[1] - - policy, err := rmqc.GetPolicy(vhost, user) - if err != nil { - return checkDeleted(d, err) - } - - log.Printf("[DEBUG] RabbitMQ: Policy retrieved for %s: %#v", d.Id(), policy) - - d.Set("name", policy.Name) - d.Set("vhost", policy.Vhost) - - setPolicy := make([]map[string]interface{}, 1) - p := make(map[string]interface{}) - p["pattern"] = policy.Pattern - p["priority"] = policy.Priority - p["apply_to"] = policy.ApplyTo - - policyDefinition := make(map[string]interface{}) - for key, value := range policy.Definition { - if v, ok := value.([]interface{}); ok { - var nodes []string - for _, node := range v { - if n, ok := node.(string); ok { - nodes = append(nodes, n) - } - } - value = strings.Join(nodes, ",") - } - policyDefinition[key] = value - } - p["definition"] = policyDefinition - setPolicy[0] = p - - d.Set("policy", setPolicy) - - return nil -} - -func UpdatePolicy(d *schema.ResourceData, meta interface{}) error { - rmqc := meta.(*rabbithole.Client) - - policyId := strings.Split(d.Id(), "@") - if len(policyId) < 2 { - return fmt.Errorf("Unable to determine policy ID") - } - - user := policyId[0] - vhost := policyId[1] - - if d.HasChange("policy") { - _, newPolicy := d.GetChange("policy") - - policyList := newPolicy.([]interface{}) - policyMap, ok := policyList[0].(map[string]interface{}) - if !ok { - return fmt.Errorf("Unable to parse policy") - } - - if err := putPolicy(rmqc, user, vhost, policyMap); err != nil { - return err - } - } - - return ReadPolicy(d, meta) -} - -func DeletePolicy(d *schema.ResourceData, meta interface{}) error { - rmqc := meta.(*rabbithole.Client) - - policyId := strings.Split(d.Id(), "@") - if len(policyId) < 2 { - return fmt.Errorf("Unable to determine policy ID") - } - - user := policyId[0] - vhost := policyId[1] - - log.Printf("[DEBUG] RabbitMQ: Attempting to delete policy for %s", d.Id()) - - resp, err := rmqc.DeletePolicy(vhost, user) - log.Printf("[DEBUG] RabbitMQ: Policy delete response: %#v", resp) - if err != nil { - return err - } - - if resp.StatusCode == 404 { - // the policy was automatically deleted - return nil - } - - if resp.StatusCode >= 400 { - return fmt.Errorf("Error deleting RabbitMQ policy: %s", resp.Status) - } - - return nil -} - -func putPolicy(rmqc *rabbithole.Client, vhost string, name string, policyMap map[string]interface{}) error { - policy := rabbithole.Policy{} - policy.Vhost = vhost - policy.Name = name - - if v, ok := policyMap["pattern"].(string); ok { - policy.Pattern = v - } - - if v, ok := policyMap["priority"].(int); ok { - policy.Priority = v - } - - if v, ok := policyMap["apply_to"].(string); ok { - policy.ApplyTo = v - } - - if v, ok := policyMap["definition"].(map[string]interface{}); ok { - // special case for ha-mode = nodes - if x, ok := v["ha-mode"]; ok && x == "nodes" { - var nodes rabbithole.NodeNames - nodes = strings.Split(v["ha-params"].(string), ",") - v["ha-params"] = nodes - } - policyDefinition := rabbithole.PolicyDefinition{} - policyDefinition = v - policy.Definition = policyDefinition - } - - log.Printf("[DEBUG] RabbitMQ: Attempting to declare policy for %s@%s: %#v", name, vhost, policy) - - resp, err := rmqc.PutPolicy(vhost, name, policy) - log.Printf("[DEBUG] RabbitMQ: Policy declare response: %#v", resp) - if err != nil { - return err - } - - if resp.StatusCode >= 400 { - return fmt.Errorf("Error declaring RabbitMQ policy: %s", resp.Status) - } - - return nil -} diff --git a/builtin/providers/rabbitmq/resource_policy_test.go b/builtin/providers/rabbitmq/resource_policy_test.go deleted file mode 100644 index 614620b69..000000000 --- a/builtin/providers/rabbitmq/resource_policy_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package rabbitmq - -import ( - "fmt" - "strings" - "testing" - - "github.com/michaelklishin/rabbit-hole" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccPolicy(t *testing.T) { - var policy rabbithole.Policy - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccPolicyCheckDestroy(&policy), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccPolicyConfig_basic, - Check: testAccPolicyCheck( - "rabbitmq_policy.test", &policy, - ), - }, - resource.TestStep{ - Config: testAccPolicyConfig_update, - Check: testAccPolicyCheck( - "rabbitmq_policy.test", &policy, - ), - }, - }, - }) -} - -func testAccPolicyCheck(rn string, policy *rabbithole.Policy) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[rn] - if !ok { - return fmt.Errorf("resource not found: %s", rn) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("policy id not set") - } - - rmqc := testAccProvider.Meta().(*rabbithole.Client) - policyParts := strings.Split(rs.Primary.ID, "@") - - policies, err := rmqc.ListPolicies() - if err != nil { - return fmt.Errorf("Error retrieving policies: %s", err) - } - - for _, p := range policies { - if p.Name == policyParts[0] && p.Vhost == policyParts[1] { - policy = &p - return nil - } - } - - return fmt.Errorf("Unable to find policy %s", rn) - } -} - -func testAccPolicyCheckDestroy(policy *rabbithole.Policy) resource.TestCheckFunc { - return func(s *terraform.State) error { - rmqc := testAccProvider.Meta().(*rabbithole.Client) - - policies, err := rmqc.ListPolicies() - if err != nil { - return fmt.Errorf("Error retrieving policies: %s", err) - } - - for _, p := range policies { - if p.Name == policy.Name && p.Vhost == policy.Vhost { - return fmt.Errorf("Policy %s@%s still exist", policy.Name, policy.Vhost) - } - } - - return nil - } -} - -const testAccPolicyConfig_basic = ` -resource "rabbitmq_vhost" "test" { - name = "test" -} - -resource "rabbitmq_permissions" "guest" { - user = "guest" - vhost = "${rabbitmq_vhost.test.name}" - permissions { - configure = ".*" - write = ".*" - read = ".*" - } -} - -resource "rabbitmq_policy" "test" { - name = "test" - vhost = "${rabbitmq_permissions.guest.vhost}" - policy { - pattern = ".*" - priority = 0 - apply_to = "all" - definition { - ha-mode = "nodes" - ha-params = "a,b,c" - } - } -}` - -const testAccPolicyConfig_update = ` -resource "rabbitmq_vhost" "test" { - name = "test" -} - -resource "rabbitmq_permissions" "guest" { - user = "guest" - vhost = "${rabbitmq_vhost.test.name}" - permissions { - configure = ".*" - write = ".*" - read = ".*" - } -} - -resource "rabbitmq_policy" "test" { - name = "test" - vhost = "${rabbitmq_permissions.guest.vhost}" - policy { - pattern = ".*" - priority = 0 - apply_to = "all" - definition { - ha-mode = "all" - } - } -}` diff --git a/builtin/providers/rabbitmq/resource_queue.go b/builtin/providers/rabbitmq/resource_queue.go deleted file mode 100644 index 097b04d62..000000000 --- a/builtin/providers/rabbitmq/resource_queue.go +++ /dev/null @@ -1,180 +0,0 @@ -package rabbitmq - -import ( - "fmt" - "log" - "strings" - - "github.com/michaelklishin/rabbit-hole" - - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceQueue() *schema.Resource { - return &schema.Resource{ - Create: CreateQueue, - Read: ReadQueue, - Delete: DeleteQueue, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "vhost": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "/", - ForceNew: true, - }, - - "settings": &schema.Schema{ - Type: schema.TypeList, - Required: true, - ForceNew: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "durable": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "auto_delete": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "arguments": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - }, - }, - }, - }, - }, - } -} - -func CreateQueue(d *schema.ResourceData, meta interface{}) error { - rmqc := meta.(*rabbithole.Client) - - name := d.Get("name").(string) - vhost := d.Get("vhost").(string) - settingsList := d.Get("settings").([]interface{}) - - settingsMap, ok := settingsList[0].(map[string]interface{}) - if !ok { - return fmt.Errorf("Unable to parse settings") - } - - if err := declareQueue(rmqc, vhost, name, settingsMap); err != nil { - return err - } - - id := fmt.Sprintf("%s@%s", name, vhost) - d.SetId(id) - - return ReadQueue(d, meta) -} - -func ReadQueue(d *schema.ResourceData, meta interface{}) error { - rmqc := meta.(*rabbithole.Client) - - queueId := strings.Split(d.Id(), "@") - if len(queueId) < 2 { - return fmt.Errorf("Unable to determine Queue ID") - } - - user := queueId[0] - vhost := queueId[1] - - queueSettings, err := rmqc.GetQueue(vhost, user) - if err != nil { - return checkDeleted(d, err) - } - - log.Printf("[DEBUG] RabbitMQ: Queue retrieved for %s: %#v", d.Id(), queueSettings) - - d.Set("name", queueSettings.Name) - d.Set("vhost", queueSettings.Vhost) - - queue := make([]map[string]interface{}, 1) - e := make(map[string]interface{}) - e["durable"] = queueSettings.Durable - e["auto_delete"] = queueSettings.AutoDelete - e["arguments"] = queueSettings.Arguments - queue[0] = e - - d.Set("settings", queue) - - return nil -} - -func DeleteQueue(d *schema.ResourceData, meta interface{}) error { - rmqc := meta.(*rabbithole.Client) - - queueId := strings.Split(d.Id(), "@") - if len(queueId) < 2 { - return fmt.Errorf("Unable to determine Queue ID") - } - - user := queueId[0] - vhost := queueId[1] - - log.Printf("[DEBUG] RabbitMQ: Attempting to delete queue for %s", d.Id()) - - resp, err := rmqc.DeleteQueue(vhost, user) - log.Printf("[DEBUG] RabbitMQ: Queue delete response: %#v", resp) - if err != nil { - return err - } - - if resp.StatusCode == 404 { - // the queue was automatically deleted - return nil - } - - if resp.StatusCode >= 400 { - return fmt.Errorf("Error deleting RabbitMQ queue: %s", resp.Status) - } - - return nil -} - -func declareQueue(rmqc *rabbithole.Client, vhost string, name string, settingsMap map[string]interface{}) error { - queueSettings := rabbithole.QueueSettings{} - - if v, ok := settingsMap["durable"].(bool); ok { - queueSettings.Durable = v - } - - if v, ok := settingsMap["auto_delete"].(bool); ok { - queueSettings.AutoDelete = v - } - - if v, ok := settingsMap["arguments"].(map[string]interface{}); ok { - queueSettings.Arguments = v - } - - log.Printf("[DEBUG] RabbitMQ: Attempting to declare queue for %s@%s: %#v", name, vhost, queueSettings) - - resp, err := rmqc.DeclareQueue(vhost, name, queueSettings) - log.Printf("[DEBUG] RabbitMQ: Queue declare response: %#v", resp) - if err != nil { - return err - } - - if resp.StatusCode >= 400 { - return fmt.Errorf("Error declaring RabbitMQ queue: %s", resp.Status) - } - - return nil -} diff --git a/builtin/providers/rabbitmq/resource_queue_test.go b/builtin/providers/rabbitmq/resource_queue_test.go deleted file mode 100644 index 4d63104c6..000000000 --- a/builtin/providers/rabbitmq/resource_queue_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package rabbitmq - -import ( - "fmt" - "strings" - "testing" - - "github.com/michaelklishin/rabbit-hole" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccQueue(t *testing.T) { - var queueInfo rabbithole.QueueInfo - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccQueueCheckDestroy(&queueInfo), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccQueueConfig_basic, - Check: testAccQueueCheck( - "rabbitmq_queue.test", &queueInfo, - ), - }, - }, - }) -} - -func testAccQueueCheck(rn string, queueInfo *rabbithole.QueueInfo) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[rn] - if !ok { - return fmt.Errorf("resource not found: %s", rn) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("queue id not set") - } - - rmqc := testAccProvider.Meta().(*rabbithole.Client) - queueParts := strings.Split(rs.Primary.ID, "@") - - queues, err := rmqc.ListQueuesIn(queueParts[1]) - if err != nil { - return fmt.Errorf("Error retrieving queue: %s", err) - } - - for _, queue := range queues { - if queue.Name == queueParts[0] && queue.Vhost == queueParts[1] { - queueInfo = &queue - return nil - } - } - - return fmt.Errorf("Unable to find queue %s", rn) - } -} - -func testAccQueueCheckDestroy(queueInfo *rabbithole.QueueInfo) resource.TestCheckFunc { - return func(s *terraform.State) error { - rmqc := testAccProvider.Meta().(*rabbithole.Client) - - queues, err := rmqc.ListQueuesIn(queueInfo.Vhost) - if err != nil { - return fmt.Errorf("Error retrieving queue: %s", err) - } - - for _, queue := range queues { - if queue.Name == queueInfo.Name && queue.Vhost == queueInfo.Vhost { - return fmt.Errorf("Queue %s@%s still exist", queueInfo.Name, queueInfo.Vhost) - } - } - - return nil - } -} - -const testAccQueueConfig_basic = ` -resource "rabbitmq_vhost" "test" { - name = "test" -} - -resource "rabbitmq_permissions" "guest" { - user = "guest" - vhost = "${rabbitmq_vhost.test.name}" - permissions { - configure = ".*" - write = ".*" - read = ".*" - } -} - -resource "rabbitmq_queue" "test" { - name = "test" - vhost = "${rabbitmq_permissions.guest.vhost}" - settings { - durable = false - auto_delete = true - } -}` diff --git a/builtin/providers/rabbitmq/resource_user.go b/builtin/providers/rabbitmq/resource_user.go deleted file mode 100644 index e67e60e99..000000000 --- a/builtin/providers/rabbitmq/resource_user.go +++ /dev/null @@ -1,182 +0,0 @@ -package rabbitmq - -import ( - "fmt" - "log" - "strings" - - "github.com/michaelklishin/rabbit-hole" - - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceUser() *schema.Resource { - return &schema.Resource{ - Create: CreateUser, - Update: UpdateUser, - Read: ReadUser, - Delete: DeleteUser, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "password": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Sensitive: true, - }, - - "tags": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func CreateUser(d *schema.ResourceData, meta interface{}) error { - rmqc := meta.(*rabbithole.Client) - - name := d.Get("name").(string) - - var tagList []string - for _, v := range d.Get("tags").([]interface{}) { - if v, ok := v.(string); ok { - tagList = append(tagList, v) - } - } - - userSettings := rabbithole.UserSettings{ - Password: d.Get("password").(string), - } - - if len(tagList) > 0 { - userSettings.Tags = strings.Join(tagList, ",") - } - - log.Printf("[DEBUG] RabbitMQ: Attempting to create user %s", name) - - resp, err := rmqc.PutUser(name, userSettings) - log.Printf("[DEBUG] RabbitMQ: user creation response: %#v", resp) - if err != nil { - return err - } - - if resp.StatusCode >= 400 { - return fmt.Errorf("Error creating RabbitMQ user: %s", resp.Status) - } - - d.SetId(name) - - return ReadUser(d, meta) -} - -func ReadUser(d *schema.ResourceData, meta interface{}) error { - rmqc := meta.(*rabbithole.Client) - - user, err := rmqc.GetUser(d.Id()) - if err != nil { - return checkDeleted(d, err) - } - - log.Printf("[DEBUG] RabbitMQ: User retrieved: %#v", user) - - d.Set("name", user.Name) - - if len(user.Tags) > 0 { - tags := strings.Split(user.Tags, ",") - d.Set("tags", tags) - } - - return nil -} - -func UpdateUser(d *schema.ResourceData, meta interface{}) error { - rmqc := meta.(*rabbithole.Client) - - name := d.Id() - - if d.HasChange("password") { - _, newPassword := d.GetChange("password") - - userSettings := rabbithole.UserSettings{ - Password: newPassword.(string), - } - - log.Printf("[DEBUG] RabbitMQ: Attempting to update password for %s", name) - - resp, err := rmqc.PutUser(name, userSettings) - log.Printf("[DEBUG] RabbitMQ: Password update response: %#v", resp) - if err != nil { - return err - } - - if resp.StatusCode >= 400 { - return fmt.Errorf("Error updating RabbitMQ user: %s", resp.Status) - } - - } - - if d.HasChange("tags") { - _, newTags := d.GetChange("tags") - - var tagList []string - for _, v := range newTags.([]interface{}) { - if v, ok := v.(string); ok { - tagList = append(tagList, v) - } - } - - userSettings := rabbithole.UserSettings{} - if len(tagList) > 0 { - userSettings.Tags = strings.Join(tagList, ",") - } - - log.Printf("[DEBUG] RabbitMQ: Attempting to update tags for %s", name) - - resp, err := rmqc.PutUser(name, userSettings) - log.Printf("[DEBUG] RabbitMQ: Tags update response: %#v", resp) - if err != nil { - return err - } - - if resp.StatusCode >= 400 { - return fmt.Errorf("Error updating RabbitMQ user: %s", resp.Status) - } - - } - - return ReadUser(d, meta) -} - -func DeleteUser(d *schema.ResourceData, meta interface{}) error { - rmqc := meta.(*rabbithole.Client) - - name := d.Id() - log.Printf("[DEBUG] RabbitMQ: Attempting to delete user %s", name) - - resp, err := rmqc.DeleteUser(name) - log.Printf("[DEBUG] RabbitMQ: User delete response: %#v", resp) - if err != nil { - return err - } - - if resp.StatusCode == 404 { - // the user was automatically deleted - return nil - } - - if resp.StatusCode >= 400 { - return fmt.Errorf("Error deleting RabbitMQ user: %s", resp.Status) - } - - return nil -} diff --git a/builtin/providers/rabbitmq/resource_user_test.go b/builtin/providers/rabbitmq/resource_user_test.go deleted file mode 100644 index 35a3ccd12..000000000 --- a/builtin/providers/rabbitmq/resource_user_test.go +++ /dev/null @@ -1,202 +0,0 @@ -package rabbitmq - -import ( - "fmt" - "strings" - "testing" - - "github.com/michaelklishin/rabbit-hole" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccUser_basic(t *testing.T) { - var user string - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccUserCheckDestroy(user), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccUserConfig_basic, - Check: testAccUserCheck( - "rabbitmq_user.test", &user, - ), - }, - resource.TestStep{ - Config: testAccUserConfig_update, - Check: testAccUserCheck( - "rabbitmq_user.test", &user, - ), - }, - }, - }) -} - -func TestAccUser_emptyTag(t *testing.T) { - var user string - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccUserCheckDestroy(user), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccUserConfig_emptyTag_1, - Check: resource.ComposeTestCheckFunc( - testAccUserCheck("rabbitmq_user.test", &user), - testAccUserCheckTagCount(&user, 0), - ), - }, - resource.TestStep{ - Config: testAccUserConfig_emptyTag_2, - Check: resource.ComposeTestCheckFunc( - testAccUserCheck("rabbitmq_user.test", &user), - testAccUserCheckTagCount(&user, 1), - ), - }, - resource.TestStep{ - Config: testAccUserConfig_emptyTag_1, - Check: resource.ComposeTestCheckFunc( - testAccUserCheck("rabbitmq_user.test", &user), - testAccUserCheckTagCount(&user, 0), - ), - }, - }, - }) -} - -func TestAccUser_noTags(t *testing.T) { - var user string - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccUserCheckDestroy(user), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccUserConfig_noTags_1, - Check: resource.ComposeTestCheckFunc( - testAccUserCheck("rabbitmq_user.test", &user), - testAccUserCheckTagCount(&user, 0), - ), - }, - resource.TestStep{ - Config: testAccUserConfig_noTags_2, - Check: resource.ComposeTestCheckFunc( - testAccUserCheck("rabbitmq_user.test", &user), - testAccUserCheckTagCount(&user, 1), - ), - }, - }, - }) -} - -func testAccUserCheck(rn string, name *string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[rn] - if !ok { - return fmt.Errorf("resource not found: %s", rn) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("user id not set") - } - - rmqc := testAccProvider.Meta().(*rabbithole.Client) - users, err := rmqc.ListUsers() - if err != nil { - return fmt.Errorf("Error retrieving users: %s", err) - } - - for _, user := range users { - if user.Name == rs.Primary.ID { - *name = rs.Primary.ID - return nil - } - } - - return fmt.Errorf("Unable to find user %s", rn) - } -} - -func testAccUserCheckTagCount(name *string, tagCount int) resource.TestCheckFunc { - return func(s *terraform.State) error { - rmqc := testAccProvider.Meta().(*rabbithole.Client) - user, err := rmqc.GetUser(*name) - if err != nil { - return fmt.Errorf("Error retrieving user: %s", err) - } - - var tagList []string - for _, v := range strings.Split(user.Tags, ",") { - if v != "" { - tagList = append(tagList, v) - } - } - - if len(tagList) != tagCount { - return fmt.Errorf("Expected %d tags, user has %d", tagCount, len(tagList)) - } - - return nil - } -} - -func testAccUserCheckDestroy(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rmqc := testAccProvider.Meta().(*rabbithole.Client) - users, err := rmqc.ListUsers() - if err != nil { - return fmt.Errorf("Error retrieving users: %s", err) - } - - for _, user := range users { - if user.Name == name { - return fmt.Errorf("user still exists: %s", name) - } - } - - return nil - } -} - -const testAccUserConfig_basic = ` -resource "rabbitmq_user" "test" { - name = "mctest" - password = "foobar" - tags = ["administrator", "management"] -}` - -const testAccUserConfig_update = ` -resource "rabbitmq_user" "test" { - name = "mctest" - password = "foobarry" - tags = ["management"] -}` - -const testAccUserConfig_emptyTag_1 = ` -resource "rabbitmq_user" "test" { - name = "mctest" - password = "foobar" - tags = [""] -}` - -const testAccUserConfig_emptyTag_2 = ` -resource "rabbitmq_user" "test" { - name = "mctest" - password = "foobar" - tags = ["administrator"] -}` - -const testAccUserConfig_noTags_1 = ` -resource "rabbitmq_user" "test" { - name = "mctest" - password = "foobar" -}` - -const testAccUserConfig_noTags_2 = ` -resource "rabbitmq_user" "test" { - name = "mctest" - password = "foobar" - tags = ["administrator"] -}` diff --git a/builtin/providers/rabbitmq/resource_vhost.go b/builtin/providers/rabbitmq/resource_vhost.go deleted file mode 100644 index d93c80728..000000000 --- a/builtin/providers/rabbitmq/resource_vhost.go +++ /dev/null @@ -1,85 +0,0 @@ -package rabbitmq - -import ( - "fmt" - "log" - - "github.com/michaelklishin/rabbit-hole" - - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceVhost() *schema.Resource { - return &schema.Resource{ - Create: CreateVhost, - Read: ReadVhost, - Delete: DeleteVhost, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func CreateVhost(d *schema.ResourceData, meta interface{}) error { - rmqc := meta.(*rabbithole.Client) - - vhost := d.Get("name").(string) - - log.Printf("[DEBUG] RabbitMQ: Attempting to create vhost %s", vhost) - - resp, err := rmqc.PutVhost(vhost, rabbithole.VhostSettings{}) - log.Printf("[DEBUG] RabbitMQ: vhost creation response: %#v", resp) - if err != nil { - return err - } - - d.SetId(vhost) - - return ReadVhost(d, meta) -} - -func ReadVhost(d *schema.ResourceData, meta interface{}) error { - rmqc := meta.(*rabbithole.Client) - - vhost, err := rmqc.GetVhost(d.Id()) - if err != nil { - return checkDeleted(d, err) - } - - log.Printf("[DEBUG] RabbitMQ: Vhost retrieved: %#v", vhost) - - d.Set("name", vhost.Name) - - return nil -} - -func DeleteVhost(d *schema.ResourceData, meta interface{}) error { - rmqc := meta.(*rabbithole.Client) - - log.Printf("[DEBUG] RabbitMQ: Attempting to delete vhost %s", d.Id()) - - resp, err := rmqc.DeleteVhost(d.Id()) - log.Printf("[DEBUG] RabbitMQ: vhost deletion response: %#v", resp) - if err != nil { - return err - } - - if resp.StatusCode == 404 { - // the vhost was automatically deleted - return nil - } - - if resp.StatusCode >= 400 { - return fmt.Errorf("Error deleting RabbitMQ user: %s", resp.Status) - } - - return nil -} diff --git a/builtin/providers/rabbitmq/resource_vhost_test.go b/builtin/providers/rabbitmq/resource_vhost_test.go deleted file mode 100644 index b0183863c..000000000 --- a/builtin/providers/rabbitmq/resource_vhost_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package rabbitmq - -import ( - "fmt" - "testing" - - "github.com/michaelklishin/rabbit-hole" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccVhost(t *testing.T) { - var vhost string - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccVhostCheckDestroy(vhost), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccVhostConfig_basic, - Check: testAccVhostCheck( - "rabbitmq_vhost.test", &vhost, - ), - }, - }, - }) -} - -func testAccVhostCheck(rn string, name *string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[rn] - if !ok { - return fmt.Errorf("resource not found: %s", rn) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("vhost id not set") - } - - rmqc := testAccProvider.Meta().(*rabbithole.Client) - vhosts, err := rmqc.ListVhosts() - if err != nil { - return fmt.Errorf("Error retrieving vhosts: %s", err) - } - - for _, vhost := range vhosts { - if vhost.Name == rs.Primary.ID { - *name = rs.Primary.ID - return nil - } - } - - return fmt.Errorf("Unable to find vhost %s", rn) - } -} - -func testAccVhostCheckDestroy(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rmqc := testAccProvider.Meta().(*rabbithole.Client) - vhosts, err := rmqc.ListVhosts() - if err != nil { - return fmt.Errorf("Error retrieving vhosts: %s", err) - } - - for _, vhost := range vhosts { - if vhost.Name == name { - return fmt.Errorf("vhost still exists: %v", vhost) - } - } - - return nil - } -} - -const testAccVhostConfig_basic = ` -resource "rabbitmq_vhost" "test" { - name = "test" -}` diff --git a/builtin/providers/rabbitmq/util.go b/builtin/providers/rabbitmq/util.go deleted file mode 100644 index 604d47dce..000000000 --- a/builtin/providers/rabbitmq/util.go +++ /dev/null @@ -1,14 +0,0 @@ -package rabbitmq - -import ( - "github.com/hashicorp/terraform/helper/schema" -) - -func checkDeleted(d *schema.ResourceData, err error) error { - if err.Error() == "not found" { - d.SetId("") - return nil - } - - return err -} diff --git a/builtin/providers/rancher/config.go b/builtin/providers/rancher/config.go deleted file mode 100644 index ff2ede550..000000000 --- a/builtin/providers/rancher/config.go +++ /dev/null @@ -1,77 +0,0 @@ -package rancher - -import ( - "log" - - "github.com/rancher/go-rancher/catalog" - rancherClient "github.com/rancher/go-rancher/v2" -) - -// Config is the configuration parameters for a Rancher API -type Config struct { - APIURL string - AccessKey string - SecretKey string -} - -// GlobalClient creates a Rancher client scoped to the global API -func (c *Config) GlobalClient() (*rancherClient.RancherClient, error) { - client, err := rancherClient.NewRancherClient(&rancherClient.ClientOpts{ - Url: c.APIURL, - AccessKey: c.AccessKey, - SecretKey: c.SecretKey, - }) - if err != nil { - return nil, err - } - - log.Printf("[INFO] Rancher Client configured for url: %s", c.APIURL) - - return client, nil -} - -// EnvironmentClient creates a Rancher client scoped to an Environment's API -func (c *Config) EnvironmentClient(env string) (*rancherClient.RancherClient, error) { - - globalClient, err := c.GlobalClient() - if err != nil { - return nil, err - } - - project, err := globalClient.Project.ById(env) - if err != nil { - return nil, err - } - projectURL := project.Links["self"] - - log.Printf("[INFO] Rancher Client configured for url: %s/schemas", projectURL) - - return rancherClient.NewRancherClient(&rancherClient.ClientOpts{ - Url: projectURL + "/schemas", - AccessKey: c.AccessKey, - SecretKey: c.SecretKey, - }) -} - -// RegistryClient creates a Rancher client scoped to a Registry's API -func (c *Config) RegistryClient(id string) (*rancherClient.RancherClient, error) { - client, err := c.GlobalClient() - if err != nil { - return nil, err - } - reg, err := client.Registry.ById(id) - if err != nil { - return nil, err - } - - return c.EnvironmentClient(reg.AccountId) -} - -// CatalogClient creates a Rancher client scoped to a Catalog's API -func (c *Config) CatalogClient() (*catalog.RancherClient, error) { - return catalog.NewRancherClient(&catalog.ClientOpts{ - Url: c.APIURL, - AccessKey: c.AccessKey, - SecretKey: c.SecretKey, - }) -} diff --git a/builtin/providers/rancher/import_rancher_environment_test.go b/builtin/providers/rancher/import_rancher_environment_test.go deleted file mode 100644 index 0b2a52173..000000000 --- a/builtin/providers/rancher/import_rancher_environment_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package rancher - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccRancherEnvironment_importBasic(t *testing.T) { - resourceName := "rancher_environment.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRancherEnvironmentDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRancherEnvironmentConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/rancher/import_rancher_registration_token_test.go b/builtin/providers/rancher/import_rancher_registration_token_test.go deleted file mode 100644 index 24a0b9b8a..000000000 --- a/builtin/providers/rancher/import_rancher_registration_token_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package rancher - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccRancherRegistrationToken_importBasic(t *testing.T) { - resourceName := "rancher_registration_token.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRancherRegistrationTokenDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRancherRegistrationTokenConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/rancher/import_rancher_registry_credential_test.go b/builtin/providers/rancher/import_rancher_registry_credential_test.go deleted file mode 100644 index 9e2928803..000000000 --- a/builtin/providers/rancher/import_rancher_registry_credential_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package rancher - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccRancherRegistryCredential_importBasic(t *testing.T) { - resourceName := "rancher_registry_credential.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRancherRegistryCredentialDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRancherRegistryCredentialConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "secret_value"}, - }, - }, - }) -} diff --git a/builtin/providers/rancher/import_rancher_registry_test.go b/builtin/providers/rancher/import_rancher_registry_test.go deleted file mode 100644 index f1c9db86c..000000000 --- a/builtin/providers/rancher/import_rancher_registry_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package rancher - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccRancherRegistry_importBasic(t *testing.T) { - resourceName := "rancher_registry.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRancherRegistryDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRancherRegistryConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/rancher/import_rancher_stack_test.go b/builtin/providers/rancher/import_rancher_stack_test.go deleted file mode 100644 index 029b8c4dd..000000000 --- a/builtin/providers/rancher/import_rancher_stack_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package rancher - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccRancherStack_importBasic(t *testing.T) { - resourceName := "rancher_stack.foo" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRancherStackDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRancherStackConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/rancher/provider.go b/builtin/providers/rancher/provider.go deleted file mode 100644 index 29507e043..000000000 --- a/builtin/providers/rancher/provider.go +++ /dev/null @@ -1,145 +0,0 @@ -package rancher - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/url" - "os" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// CLIConfig used to store data from file. -type CLIConfig struct { - AccessKey string `json:"accessKey"` - SecretKey string `json:"secretKey"` - URL string `json:"url"` - Environment string `json:"environment"` - Path string `json:"path,omitempty"` -} - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "api_url": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("RANCHER_URL", ""), - Description: descriptions["api_url"], - }, - "access_key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("RANCHER_ACCESS_KEY", ""), - Description: descriptions["access_key"], - }, - "secret_key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("RANCHER_SECRET_KEY", ""), - Description: descriptions["secret_key"], - }, - "config": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("RANCHER_CLIENT_CONFIG", ""), - Description: descriptions["config"], - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "rancher_certificate": resourceRancherCertificate(), - "rancher_environment": resourceRancherEnvironment(), - "rancher_host": resourceRancherHost(), - "rancher_registration_token": resourceRancherRegistrationToken(), - "rancher_registry": resourceRancherRegistry(), - "rancher_registry_credential": resourceRancherRegistryCredential(), - "rancher_stack": resourceRancherStack(), - }, - - ConfigureFunc: providerConfigure, - } -} - -var descriptions map[string]string - -func init() { - descriptions = map[string]string{ - "access_key": "API Key used to authenticate with the rancher server", - - "secret_key": "API secret used to authenticate with the rancher server", - - "api_url": "The URL to the rancher API, must include version uri (ie. v1 or v2-beta)", - - "config": "Path to the Rancher client cli.json config file", - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - apiURL := d.Get("api_url").(string) - accessKey := d.Get("access_key").(string) - secretKey := d.Get("secret_key").(string) - - if configFile := d.Get("config").(string); configFile != "" { - config, err := loadConfig(configFile) - if err != nil { - return config, err - } - - if apiURL == "" && config.URL != "" { - u, err := url.Parse(config.URL) - if err != nil { - return config, err - } - apiURL = u.Scheme + "://" + u.Host - } - - if accessKey == "" { - accessKey = config.AccessKey - } - - if secretKey == "" { - secretKey = config.SecretKey - } - } - - if apiURL == "" { - return &Config{}, fmt.Errorf("No api_url provided") - } - - config := &Config{ - APIURL: apiURL, - AccessKey: accessKey, - SecretKey: secretKey, - } - - client, err := config.GlobalClient() - if err != nil { - return &Config{}, err - } - // Let Rancher Client normalizes the URL making it reliable as a base. - config.APIURL = client.GetOpts().Url - - return config, nil -} - -func loadConfig(path string) (CLIConfig, error) { - config := CLIConfig{ - Path: path, - } - - content, err := ioutil.ReadFile(path) - if os.IsNotExist(err) { - return config, nil - } else if err != nil { - return config, err - } - - err = json.Unmarshal(content, &config) - config.Path = path - - return config, err -} diff --git a/builtin/providers/rancher/provider_test.go b/builtin/providers/rancher/provider_test.go deleted file mode 100644 index b79e340c2..000000000 --- a/builtin/providers/rancher/provider_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package rancher - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "rancher": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("RANCHER_URL"); v == "" { - t.Fatal("RANCHER_URL must be set for acceptance tests") - } -} diff --git a/builtin/providers/rancher/resource_rancher_certificate.go b/builtin/providers/rancher/resource_rancher_certificate.go deleted file mode 100644 index c91deee19..000000000 --- a/builtin/providers/rancher/resource_rancher_certificate.go +++ /dev/null @@ -1,277 +0,0 @@ -package rancher - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - rancher "github.com/rancher/go-rancher/v2" -) - -func resourceRancherCertificate() *schema.Resource { - return &schema.Resource{ - Create: resourceRancherCertificateCreate, - Read: resourceRancherCertificateRead, - Update: resourceRancherCertificateUpdate, - Delete: resourceRancherCertificateDelete, - Importer: &schema.ResourceImporter{ - State: resourceRancherCertificateImport, - }, - - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "environment_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "cert": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "cert_chain": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "key": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "cn": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "algorithm": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "cert_fingerprint": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "expires_at": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "issued_at": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "issuer": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "key_size": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "serial_number": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "subject_alternative_names": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Computed: true, - }, - "version": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceRancherCertificateCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO][rancher] Creating Certificate: %s", d.Id()) - client, err := meta.(*Config).EnvironmentClient(d.Get("environment_id").(string)) - if err != nil { - return err - } - - name := d.Get("name").(string) - description := d.Get("description").(string) - cert := d.Get("cert").(string) - certChain := d.Get("cert_chain").(string) - key := d.Get("key").(string) - - certificate := rancher.Certificate{ - Name: name, - Description: description, - Cert: cert, - CertChain: certChain, - Key: key, - } - newCertificate, err := client.Certificate.Create(&certificate) - if err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"active", "removed", "removing"}, - Target: []string{"active"}, - Refresh: CertificateStateRefreshFunc(client, newCertificate.Id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - _, waitErr := stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for registry credential (%s) to be created: %s", newCertificate.Id, waitErr) - } - - d.SetId(newCertificate.Id) - log.Printf("[INFO] Certificate ID: %s", d.Id()) - - return resourceRancherCertificateUpdate(d, meta) -} - -func resourceRancherCertificateRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Refreshing Certificate: %s", d.Id()) - client, err := meta.(*Config).EnvironmentClient(d.Get("environment_id").(string)) - if err != nil { - return err - } - - certificate, err := client.Certificate.ById(d.Id()) - if err != nil { - return err - } - - log.Printf("[INFO] Certificate Name: %s", certificate.Name) - - d.Set("description", certificate.Description) - d.Set("name", certificate.Name) - - // Computed values - d.Set("cn", certificate.CN) - d.Set("algorithm", certificate.Algorithm) - d.Set("cert_fingerprint", certificate.CertFingerprint) - d.Set("expires_at", certificate.ExpiresAt) - d.Set("issued_at", certificate.IssuedAt) - d.Set("issuer", certificate.Issuer) - d.Set("key_size", certificate.KeySize) - d.Set("serial_number", certificate.SerialNumber) - d.Set("subject_alternative_names", certificate.SubjectAlternativeNames) - d.Set("version", certificate.Version) - - return nil -} - -func resourceRancherCertificateUpdate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Updating Certificate: %s", d.Id()) - client, err := meta.(*Config).EnvironmentClient(d.Get("environment_id").(string)) - if err != nil { - return err - } - - certificate, err := client.Certificate.ById(d.Id()) - if err != nil { - return err - } - - name := d.Get("name").(string) - description := d.Get("description").(string) - cert := d.Get("cert").(string) - certChain := d.Get("cert_chain").(string) - key := d.Get("key").(string) - - data := map[string]interface{}{ - "name": &name, - "description": &description, - "cert": &cert, - "cert_chain": &certChain, - "key": &key, - } - - var newCertificate rancher.Certificate - if err := client.Update("certificate", &certificate.Resource, data, &newCertificate); err != nil { - return err - } - - return resourceRancherCertificateRead(d, meta) -} - -func resourceRancherCertificateDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Deleting Certificate: %s", d.Id()) - id := d.Id() - client, err := meta.(*Config).EnvironmentClient(d.Get("environment_id").(string)) - if err != nil { - return err - } - - certificate, err := client.Certificate.ById(id) - if err != nil { - return err - } - - if err := client.Certificate.Delete(certificate); err != nil { - return fmt.Errorf("Error deleting Certificate: %s", err) - } - - log.Printf("[DEBUG] Waiting for certificate (%s) to be removed", id) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"active", "removed", "removing"}, - Target: []string{"removed"}, - Refresh: CertificateStateRefreshFunc(client, id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, waitErr := stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for certificate (%s) to be removed: %s", id, waitErr) - } - - d.SetId("") - return nil -} - -func resourceRancherCertificateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - envID, resourceID := splitID(d.Id()) - d.SetId(resourceID) - if envID != "" { - d.Set("environment_id", envID) - } else { - client, err := meta.(*Config).GlobalClient() - if err != nil { - return []*schema.ResourceData{}, err - } - stack, err := client.Stack.ById(d.Id()) - if err != nil { - return []*schema.ResourceData{}, err - } - d.Set("environment_id", stack.AccountId) - } - return []*schema.ResourceData{d}, nil -} - -// CertificateStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// a Rancher Certificate. -func CertificateStateRefreshFunc(client *rancher.RancherClient, certificateID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - cert, err := client.Certificate.ById(certificateID) - - if err != nil { - return nil, "", err - } - - return cert, cert.State, nil - } -} diff --git a/builtin/providers/rancher/resource_rancher_environment.go b/builtin/providers/rancher/resource_rancher_environment.go deleted file mode 100644 index 35878de4c..000000000 --- a/builtin/providers/rancher/resource_rancher_environment.go +++ /dev/null @@ -1,342 +0,0 @@ -package rancher - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" - rancherClient "github.com/rancher/go-rancher/v2" -) - -var ( - defaultProjectTemplates = map[string]string{ - "mesos": "", - "kubernetes": "", - "windows": "", - "swarm": "", - "cattle": "", - } -) - -func resourceRancherEnvironment() *schema.Resource { - return &schema.Resource{ - Create: resourceRancherEnvironmentCreate, - Read: resourceRancherEnvironmentRead, - Update: resourceRancherEnvironmentUpdate, - Delete: resourceRancherEnvironmentDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "orchestration": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"cattle", "kubernetes", "mesos", "swarm", "windows"}, true), - Computed: true, - ConflictsWith: []string{"project_template_id"}, - }, - "project_template_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ConflictsWith: []string{"orchestration"}, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "member": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "external_id_type": { - Type: schema.TypeString, - Required: true, - }, - "external_id": { - Type: schema.TypeString, - Required: true, - }, - "role": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - } -} - -func resourceRancherEnvironmentCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Creating Environment: %s", d.Id()) - populateProjectTemplateIDs(meta.(*Config)) - - client, err := meta.(*Config).GlobalClient() - if err != nil { - return err - } - - name := d.Get("name").(string) - description := d.Get("description").(string) - orchestration := d.Get("orchestration").(string) - projectTemplateID := d.Get("project_template_id").(string) - - projectTemplateID, err = getProjectTemplateID(orchestration, projectTemplateID) - if err != nil { - return err - } - - data := map[string]interface{}{ - "name": &name, - "description": &description, - "projectTemplateId": &projectTemplateID, - } - - var newEnv rancherClient.Project - if err := client.Create("project", data, &newEnv); err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"active", "removed", "removing"}, - Target: []string{"active"}, - Refresh: EnvironmentStateRefreshFunc(client, newEnv.Id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - _, waitErr := stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for environment (%s) to be created: %s", newEnv.Id, waitErr) - } - - d.SetId(newEnv.Id) - log.Printf("[INFO] Environment ID: %s", d.Id()) - - // Add members - if v, ok := d.GetOk("member"); ok { - envClient, err := meta.(*Config).EnvironmentClient(d.Id()) - if err != nil { - return err - } - members := makeProjectMembers(v.([]interface{})) - _, err = envClient.Project.ActionSetmembers(&newEnv, &rancherClient.SetProjectMembersInput{ - Members: members, - }) - if err != nil { - return err - } - } - - return resourceRancherEnvironmentRead(d, meta) -} - -func resourceRancherEnvironmentRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Refreshing Environment: %s", d.Id()) - client, err := meta.(*Config).GlobalClient() - if err != nil { - return err - } - - env, err := client.Project.ById(d.Id()) - if err != nil { - return err - } - - if env == nil { - log.Printf("[INFO] Environment %s not found", d.Id()) - d.SetId("") - return nil - } - - if removed(env.State) { - log.Printf("[INFO] Environment %s was removed on %v", d.Id(), env.Removed) - d.SetId("") - return nil - } - - log.Printf("[INFO] Environment Name: %s", env.Name) - - d.Set("description", env.Description) - d.Set("name", env.Name) - d.Set("orchestration", getActiveOrchestration(env)) - d.Set("project_template_id", env.ProjectTemplateId) - - envClient, err := meta.(*Config).EnvironmentClient(d.Id()) - if err != nil { - return err - } - - members, _ := envClient.ProjectMember.List(NewListOpts()) - - d.Set("member", normalizeMembers(members.Data)) - return nil -} - -func resourceRancherEnvironmentUpdate(d *schema.ResourceData, meta interface{}) error { - populateProjectTemplateIDs(meta.(*Config)) - - client, err := meta.(*Config).GlobalClient() - if err != nil { - return err - } - - name := d.Get("name").(string) - description := d.Get("description").(string) - orchestration := d.Get("orchestration").(string) - projectTemplateID := d.Get("project_template_id").(string) - - projectTemplateID, err = getProjectTemplateID(orchestration, projectTemplateID) - if err != nil { - return err - } - - data := map[string]interface{}{ - "name": &name, - "description": &description, - "project_template_id": &projectTemplateID, - } - - var newEnv rancherClient.Project - env, err := client.Project.ById(d.Id()) - if err != nil { - return err - } - - if err := client.Update("project", &env.Resource, data, &newEnv); err != nil { - return err - } - - // Update members - envClient, err := meta.(*Config).EnvironmentClient(d.Id()) - if err != nil { - return err - } - members := d.Get("member").(*schema.Set).List() - _, err = envClient.Project.ActionSetmembers(&newEnv, &rancherClient.SetProjectMembersInput{ - Members: makeProjectMembers(members), - }) - if err != nil { - return err - } - - return resourceRancherEnvironmentRead(d, meta) -} - -func resourceRancherEnvironmentDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Deleting Environment: %s", d.Id()) - id := d.Id() - client, err := meta.(*Config).GlobalClient() - if err != nil { - return err - } - - env, err := client.Project.ById(id) - if err != nil { - return err - } - - if err := client.Project.Delete(env); err != nil { - return fmt.Errorf("Error deleting Environment: %s", err) - } - - log.Printf("[DEBUG] Waiting for environment (%s) to be removed", id) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"active", "removed", "removing"}, - Target: []string{"removed"}, - Refresh: EnvironmentStateRefreshFunc(client, id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, waitErr := stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for environment (%s) to be removed: %s", id, waitErr) - } - - d.SetId("") - return nil -} - -func getProjectTemplateID(orchestration, templateID string) (string, error) { - id := templateID - if templateID == "" && orchestration == "" { - return "", fmt.Errorf("Need either 'orchestration' or 'project_template_id'") - } - - if templateID == "" && orchestration != "" { - ok := false - id, ok = defaultProjectTemplates[orchestration] - if !ok { - return "", fmt.Errorf("Invalid orchestration: %s", orchestration) - } - } - - return id, nil -} - -func normalizeMembers(in []rancherClient.ProjectMember) (out []interface{}) { - for _, m := range in { - mm := map[string]string{ - "external_id_type": m.ExternalIdType, - "external_id": m.ExternalId, - "role": m.Role, - } - out = append(out, mm) - } - return -} - -func makeProjectMembers(in []interface{}) (out []rancherClient.ProjectMember) { - for _, m := range in { - mMap := m.(map[string]interface{}) - mm := rancherClient.ProjectMember{ - ExternalIdType: mMap["external_id_type"].(string), - ExternalId: mMap["external_id"].(string), - Role: mMap["role"].(string), - } - out = append(out, mm) - } - return -} - -// EnvironmentStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// a Rancher Environment. -func EnvironmentStateRefreshFunc(client *rancherClient.RancherClient, environmentID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - env, err := client.Project.ById(environmentID) - - if err != nil { - return nil, "", err - } - - // Env not returned, or State not set... - if env == nil || env.State == "" { - // This makes it so user level API keys can be used instead of just admin - env = &rancherClient.Project{ - State: "removed", - } - } - - return env, env.State, nil - } -} diff --git a/builtin/providers/rancher/resource_rancher_environment_test.go b/builtin/providers/rancher/resource_rancher_environment_test.go deleted file mode 100644 index 84d44190f..000000000 --- a/builtin/providers/rancher/resource_rancher_environment_test.go +++ /dev/null @@ -1,234 +0,0 @@ -package rancher - -import ( - "fmt" - "testing" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - rancherClient "github.com/rancher/go-rancher/v2" -) - -func TestAccRancherEnvironment_basic(t *testing.T) { - var environment rancherClient.Project - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRancherEnvironmentDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRancherEnvironmentConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRancherEnvironmentExists("rancher_environment.foo", &environment), - resource.TestCheckResourceAttr("rancher_environment.foo", "name", "foo"), - resource.TestCheckResourceAttr("rancher_environment.foo", "description", "Terraform acc test group"), - resource.TestCheckResourceAttr("rancher_environment.foo", "orchestration", "cattle"), - ), - }, - resource.TestStep{ - Config: testAccRancherEnvironmentUpdateConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRancherEnvironmentExists("rancher_environment.foo", &environment), - resource.TestCheckResourceAttr("rancher_environment.foo", "name", "foo2"), - resource.TestCheckResourceAttr("rancher_environment.foo", "description", "Terraform acc test group - updated"), - resource.TestCheckResourceAttr("rancher_environment.foo", "orchestration", "swarm"), - ), - }, - }, - }) -} - -func TestAccRancherEnvironment_disappears(t *testing.T) { - var environment rancherClient.Project - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRancherEnvironmentDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRancherEnvironmentConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRancherEnvironmentExists("rancher_environment.foo", &environment), - testAccRancherEnvironmentDisappears(&environment), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccRancherEnvironment_members(t *testing.T) { - var environment rancherClient.Project - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRancherEnvironmentDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRancherEnvironmentMembersConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRancherEnvironmentExists("rancher_environment.foo", &environment), - resource.TestCheckResourceAttr("rancher_environment.foo", "name", "foo"), - resource.TestCheckResourceAttr("rancher_environment.foo", "description", "Terraform acc test group"), - resource.TestCheckResourceAttr("rancher_environment.foo", "orchestration", "cattle"), - resource.TestCheckResourceAttr("rancher_environment.foo", "member.#", "2"), - ), - }, - resource.TestStep{ - Config: testAccRancherEnvironmentMembersUpdateConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRancherEnvironmentExists("rancher_environment.foo", &environment), - resource.TestCheckResourceAttr("rancher_environment.foo", "name", "foo2"), - resource.TestCheckResourceAttr("rancher_environment.foo", "description", "Terraform acc test group - updated"), - resource.TestCheckResourceAttr("rancher_environment.foo", "orchestration", "swarm"), - resource.TestCheckResourceAttr("rancher_environment.foo", "member.#", "1"), - ), - }, - }, - }) -} - -func testAccRancherEnvironmentDisappears(env *rancherClient.Project) resource.TestCheckFunc { - return func(s *terraform.State) error { - client, err := testAccProvider.Meta().(*Config).GlobalClient() - if err != nil { - return err - } - if err := client.Project.Delete(env); err != nil { - return fmt.Errorf("Error deleting Environment: %s", err) - } - stateConf := &resource.StateChangeConf{ - Pending: []string{"active", "removed", "removing"}, - Target: []string{"removed"}, - Refresh: EnvironmentStateRefreshFunc(client, env.Id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, waitErr := stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for environment (%s) to be removed: %s", env.Id, waitErr) - } - return nil - } -} - -func testAccCheckRancherEnvironmentExists(n string, env *rancherClient.Project) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No App Name is set") - } - - client, err := testAccProvider.Meta().(*Config).GlobalClient() - if err != nil { - return err - } - - foundEnv, err := client.Project.ById(rs.Primary.ID) - if err != nil { - return err - } - - if foundEnv.Resource.Id != rs.Primary.ID { - return fmt.Errorf("Environment not found") - } - - *env = *foundEnv - - return nil - } -} - -func testAccCheckRancherEnvironmentDestroy(s *terraform.State) error { - client, err := testAccProvider.Meta().(*Config).GlobalClient() - if err != nil { - return err - } - - for _, rs := range s.RootModule().Resources { - if rs.Type != "rancher_environment" { - continue - } - env, err := client.Project.ById(rs.Primary.ID) - - if err == nil { - if env != nil && - env.Resource.Id == rs.Primary.ID && - env.State != "removed" { - return fmt.Errorf("Environment still exists") - } - } - - return nil - } - return nil -} - -const testAccRancherEnvironmentConfig = ` -resource "rancher_environment" "foo" { - name = "foo" - description = "Terraform acc test group" - orchestration = "cattle" -} -` - -const testAccRancherEnvironmentUpdateConfig = ` -resource "rancher_environment" "foo" { - name = "foo2" - description = "Terraform acc test group - updated" - orchestration = "swarm" -} -` -const testAccRancherEnvironmentMembersConfig = ` -resource "rancher_environment" "foo" { - name = "foo" - description = "Terraform acc test group" - orchestration = "cattle" - - member { - external_id = "1234" - external_id_type = "github_user" - role = "owner" - } - - member { - external_id = "8765" - external_id_type = "github_team" - role = "member" - } -} -` - -const testAccRancherEnvironmentMembersUpdateConfig = ` -resource "rancher_environment" "foo" { - name = "foo" - description = "Terraform acc test group" - orchestration = "cattle" - - member { - external_id = "1235" - external_id_type = "github_user" - role = "owner" - } -` - -const testAccRancherInvalidEnvironmentConfig = ` -resource "rancher_environment_invalid_config" "bar" { - name = "bar" - description = "Terraform acc test group - failure" - orchestration = "cattle" - project_template_id = "1pt1" -} -` diff --git a/builtin/providers/rancher/resource_rancher_host.go b/builtin/providers/rancher/resource_rancher_host.go deleted file mode 100644 index b2efe1b4f..000000000 --- a/builtin/providers/rancher/resource_rancher_host.go +++ /dev/null @@ -1,212 +0,0 @@ -package rancher - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - rancher "github.com/rancher/go-rancher/v2" -) - -// ro_labels are used internally by Rancher -// They are not documented and should not be set in Terraform -var roLabels = []string{ - "io.rancher.host.agent_image", - "io.rancher.host.docker_version", - "io.rancher.host.kvm", - "io.rancher.host.linux_kernel_version", -} - -func resourceRancherHost() *schema.Resource { - return &schema.Resource{ - Create: resourceRancherHostCreate, - Read: resourceRancherHostRead, - Update: resourceRancherHostUpdate, - Delete: resourceRancherHostDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "environment_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "hostname": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - }, - }, - } -} - -func resourceRancherHostCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO][rancher] Creating Host: %s", d.Id()) - client, err := meta.(*Config).EnvironmentClient(d.Get("environment_id").(string)) - if err != nil { - return err - } - - hosts, _ := client.Host.List(NewListOpts()) - hostname := d.Get("hostname").(string) - var host rancher.Host - - for _, h := range hosts.Data { - if h.Hostname == hostname { - host = h - break - } - } - - if host.Hostname == "" { - return fmt.Errorf("Failed to find host %s", hostname) - } - - d.SetId(host.Id) - - return resourceRancherHostUpdate(d, meta) -} - -func resourceRancherHostRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Refreshing Host: %s", d.Id()) - client, err := meta.(*Config).EnvironmentClient(d.Get("environment_id").(string)) - if err != nil { - return err - } - - host, err := client.Host.ById(d.Id()) - if err != nil { - return err - } - - if host == nil { - log.Printf("[INFO] Host %s not found", d.Id()) - d.SetId("") - return nil - } - - if removed(host.State) { - log.Printf("[INFO] Host %s was removed on %v", d.Id(), host.Removed) - d.SetId("") - return nil - } - - log.Printf("[INFO] Host Name: %s", host.Name) - - d.Set("description", host.Description) - d.Set("name", host.Name) - d.Set("hostname", host.Hostname) - - labels := host.Labels - // Remove read-only labels - for _, lbl := range roLabels { - delete(labels, lbl) - } - d.Set("labels", host.Labels) - - return nil -} - -func resourceRancherHostUpdate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Updating Host: %s", d.Id()) - client, err := meta.(*Config).EnvironmentClient(d.Get("environment_id").(string)) - if err != nil { - return err - } - - name := d.Get("name").(string) - description := d.Get("description").(string) - - // Process labels: merge ro_labels into new labels - labels := d.Get("labels").(map[string]interface{}) - host, err := client.Host.ById(d.Id()) - if err != nil { - return err - } - for _, lbl := range roLabels { - labels[lbl] = host.Labels[lbl] - } - - data := map[string]interface{}{ - "name": &name, - "description": &description, - "labels": &labels, - } - - var newHost rancher.Host - if err := client.Update("host", &host.Resource, data, &newHost); err != nil { - return err - } - - return resourceRancherHostRead(d, meta) -} - -func resourceRancherHostDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Deleting Host: %s", d.Id()) - id := d.Id() - client, err := meta.(*Config).EnvironmentClient(d.Get("environment_id").(string)) - if err != nil { - return err - } - - host, err := client.Host.ById(id) - if err != nil { - return err - } - - if err := client.Host.Delete(host); err != nil { - return fmt.Errorf("Error deleting Host: %s", err) - } - - log.Printf("[DEBUG] Waiting for host (%s) to be removed", id) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"active", "removed", "removing"}, - Target: []string{"removed"}, - Refresh: HostStateRefreshFunc(client, id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, waitErr := stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for host (%s) to be removed: %s", id, waitErr) - } - - d.SetId("") - return nil -} - -// HostStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// a Rancher Host. -func HostStateRefreshFunc(client *rancher.RancherClient, hostID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - host, err := client.Host.ById(hostID) - - if err != nil { - return nil, "", err - } - - return host, host.State, nil - } -} diff --git a/builtin/providers/rancher/resource_rancher_registration_token.go b/builtin/providers/rancher/resource_rancher_registration_token.go deleted file mode 100644 index ef04784b5..000000000 --- a/builtin/providers/rancher/resource_rancher_registration_token.go +++ /dev/null @@ -1,253 +0,0 @@ -package rancher - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - rancherClient "github.com/rancher/go-rancher/v2" -) - -func resourceRancherRegistrationToken() *schema.Resource { - return &schema.Resource{ - Create: resourceRancherRegistrationTokenCreate, - Read: resourceRancherRegistrationTokenRead, - Delete: resourceRancherRegistrationTokenDelete, - Update: resourceRancherRegistrationTokenUpdate, - Importer: &schema.ResourceImporter{ - State: resourceRancherRegistrationTokenImport, - }, - - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "environment_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "token": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "registration_url": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "command": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "image": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "host_labels": { - Type: schema.TypeMap, - Optional: true, - }, - }, - } -} - -func resourceRancherRegistrationTokenCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Creating RegistrationToken: %s", d.Id()) - client, err := meta.(*Config).EnvironmentClient(d.Get("environment_id").(string)) - if err != nil { - return err - } - - name := d.Get("name").(string) - description := d.Get("description").(string) - - data := map[string]interface{}{ - "name": &name, - "description": &description, - } - - var newRegT rancherClient.RegistrationToken - if err := client.Create("registrationToken", data, &newRegT); err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"active", "removed", "removing"}, - Target: []string{"active"}, - Refresh: RegistrationTokenStateRefreshFunc(client, newRegT.Id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - _, waitErr := stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for registration token (%s) to be created: %s", newRegT.Id, waitErr) - } - - d.SetId(newRegT.Id) - log.Printf("[INFO] RegistrationToken ID: %s", d.Id()) - - return resourceRancherRegistrationTokenRead(d, meta) -} - -func resourceRancherRegistrationTokenRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Refreshing RegistrationToken: %s", d.Id()) - client, err := meta.(*Config).EnvironmentClient(d.Get("environment_id").(string)) - if err != nil { - return err - } - - regT, err := client.RegistrationToken.ById(d.Id()) - if err != nil { - return err - } - - if regT == nil { - log.Printf("[INFO] RegistrationToken %s not found", d.Id()) - d.SetId("") - return nil - } - - if removed(regT.State) { - log.Printf("[INFO] Registration Token %s was removed on %v", d.Id(), regT.Removed) - d.SetId("") - return nil - } - - regCommand := addHostLabels(regT.Command, d.Get("host_labels").(map[string]interface{})) - log.Printf("[INFO] RegistrationToken Name: %s", regT.Name) - - d.Set("description", regT.Description) - d.Set("name", regT.Name) - d.Set("token", regT.Token) - d.Set("registration_url", regT.RegistrationUrl) - d.Set("environment_id", regT.AccountId) - d.Set("command", regCommand) - d.Set("image", regT.Image) - - return nil -} - -func resourceRancherRegistrationTokenDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Deleting RegistrationToken: %s", d.Id()) - id := d.Id() - client, err := meta.(*Config).EnvironmentClient(d.Get("environment_id").(string)) - if err != nil { - return err - } - - regT, err := client.RegistrationToken.ById(id) - if err != nil { - return err - } - - // Step 1: Deactivate - if _, e := client.RegistrationToken.ActionDeactivate(regT); e != nil { - return fmt.Errorf("Error deactivating RegistrationToken: %s", err) - } - - log.Printf("[DEBUG] Waiting for registration token (%s) to be deactivated", id) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"active", "inactive", "deactivating"}, - Target: []string{"inactive"}, - Refresh: RegistrationTokenStateRefreshFunc(client, id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, waitErr := stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for registration token (%s) to be deactivated: %s", id, waitErr) - } - - // Update resource to reflect its state - regT, err = client.RegistrationToken.ById(id) - if err != nil { - return fmt.Errorf("Failed to refresh state of deactivated registration token (%s): %s", id, err) - } - - // Step 2: Remove - if _, err := client.RegistrationToken.ActionRemove(regT); err != nil { - return fmt.Errorf("Error removing RegistrationToken: %s", err) - } - - log.Printf("[DEBUG] Waiting for registration token (%s) to be removed", id) - - stateConf = &resource.StateChangeConf{ - Pending: []string{"inactive", "removed", "removing"}, - Target: []string{"removed"}, - Refresh: RegistrationTokenStateRefreshFunc(client, id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, waitErr = stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for registration token (%s) to be removed: %s", id, waitErr) - } - - d.SetId("") - return nil -} - -func resourceRancherRegistrationTokenUpdate(d *schema.ResourceData, meta interface{}) error { - //if d.HasChange("host_labels") { - //newCommand := addHostLabels( - //d.Get("command").(string), - //d.Get("host_labels").(map[string]interface{})) - //d.Set("command", newCommand) - //} - return resourceRancherRegistrationTokenRead(d, meta) -} - -func resourceRancherRegistrationTokenImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - envID, resourceID := splitID(d.Id()) - d.SetId(resourceID) - if envID != "" { - d.Set("environment_id", envID) - } else { - client, err := meta.(*Config).GlobalClient() - if err != nil { - return []*schema.ResourceData{}, err - } - token, err := client.RegistrationToken.ById(d.Id()) - if err != nil { - return []*schema.ResourceData{}, err - } - d.Set("environment_id", token.AccountId) - } - return []*schema.ResourceData{d}, nil -} - -// RegistrationTokenStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// a Rancher RegistrationToken. -func RegistrationTokenStateRefreshFunc(client *rancherClient.RancherClient, regTID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - regT, err := client.RegistrationToken.ById(regTID) - - if err != nil { - return nil, "", err - } - - return regT, regT.State, nil - } -} diff --git a/builtin/providers/rancher/resource_rancher_registration_token_test.go b/builtin/providers/rancher/resource_rancher_registration_token_test.go deleted file mode 100644 index 991ce84ee..000000000 --- a/builtin/providers/rancher/resource_rancher_registration_token_test.go +++ /dev/null @@ -1,207 +0,0 @@ -package rancher - -import ( - "fmt" - "testing" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - rancherClient "github.com/rancher/go-rancher/v2" -) - -func TestAccRancherRegistrationToken_basic(t *testing.T) { - var registrationToken rancherClient.RegistrationToken - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRancherRegistrationTokenDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRancherRegistrationTokenConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRancherRegistrationTokenExists("rancher_registration_token.foo", ®istrationToken), - resource.TestCheckResourceAttr( - "rancher_registration_token.foo", "name", "foo"), - resource.TestCheckResourceAttr( - "rancher_registration_token.foo", "description", "Terraform acc test group"), - resource.TestCheckResourceAttrSet("rancher_registration_token.foo", "command"), - resource.TestCheckResourceAttrSet("rancher_registration_token.foo", "registration_url"), - resource.TestCheckResourceAttrSet("rancher_registration_token.foo", "token"), - ), - }, - resource.TestStep{ - Config: testAccRancherRegistrationTokenUpdateConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRancherRegistrationTokenExists("rancher_registration_token.foo", ®istrationToken), - resource.TestCheckResourceAttr( - "rancher_registration_token.foo", "name", "foo-u"), - resource.TestCheckResourceAttr( - "rancher_registration_token.foo", "description", "Terraform acc test group-u"), - resource.TestCheckResourceAttrSet("rancher_registration_token.foo", "command"), - resource.TestCheckResourceAttrSet("rancher_registration_token.foo", "registration_url"), - resource.TestCheckResourceAttrSet("rancher_registration_token.foo", "token"), - ), - }, - }, - }) -} - -func TestAccRancherRegistrationToken_disappears(t *testing.T) { - var registrationToken rancherClient.RegistrationToken - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRancherRegistrationTokenDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRancherRegistrationTokenConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRancherRegistrationTokenExists("rancher_registration_token.foo", ®istrationToken), - testAccRancherRegistrationTokenDisappears(®istrationToken), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testAccRancherRegistrationTokenDisappears(token *rancherClient.RegistrationToken) resource.TestCheckFunc { - return func(s *terraform.State) error { - client, err := testAccProvider.Meta().(*Config).EnvironmentClient(token.AccountId) - if err != nil { - return err - } - - if _, e := client.RegistrationToken.ActionDeactivate(token); e != nil { - return fmt.Errorf("Error deactivating RegistrationToken: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"active", "inactive", "deactivating"}, - Target: []string{"inactive"}, - Refresh: RegistrationTokenStateRefreshFunc(client, token.Id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, waitErr := stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for registration token (%s) to be deactivated: %s", token.Id, waitErr) - } - - // Update resource to reflect its state - token, err = client.RegistrationToken.ById(token.Id) - if err != nil { - return fmt.Errorf("Failed to refresh state of deactivated registration token (%s): %s", token.Id, err) - } - - // Step 2: Remove - if _, err := client.RegistrationToken.ActionRemove(token); err != nil { - return fmt.Errorf("Error removing RegistrationToken: %s", err) - } - - stateConf = &resource.StateChangeConf{ - Pending: []string{"inactive", "removed", "removing"}, - Target: []string{"removed"}, - Refresh: RegistrationTokenStateRefreshFunc(client, token.Id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, waitErr = stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for registration token (%s) to be removed: %s", token.Id, waitErr) - } - - return nil - } -} - -func testAccCheckRancherRegistrationTokenExists(n string, regT *rancherClient.RegistrationToken) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No App Name is set") - } - - client, err := testAccProvider.Meta().(*Config).EnvironmentClient(rs.Primary.Attributes["environment_id"]) - if err != nil { - return err - } - - foundRegT, err := client.RegistrationToken.ById(rs.Primary.ID) - if err != nil { - return err - } - - if foundRegT.Resource.Id != rs.Primary.ID { - return fmt.Errorf("RegistrationToken not found") - } - - *regT = *foundRegT - - return nil - } -} - -func testAccCheckRancherRegistrationTokenDestroy(s *terraform.State) error { - - for _, rs := range s.RootModule().Resources { - if rs.Type != "rancher_registration_token" { - continue - } - client, err := testAccProvider.Meta().(*Config).GlobalClient() - if err != nil { - return err - } - - regT, err := client.RegistrationToken.ById(rs.Primary.ID) - - if err == nil { - if regT != nil && - regT.Resource.Id == rs.Primary.ID && - regT.State != "removed" { - return fmt.Errorf("RegistrationToken still exists") - } - } - - return nil - } - return nil -} - -const testAccRancherRegistrationTokenConfig = ` -resource "rancher_environment" "foo" { - name = "foo" -} - -resource "rancher_registration_token" "foo" { - name = "foo" - description = "Terraform acc test group" - environment_id = "${rancher_environment.foo.id}" -} -` - -const testAccRancherRegistrationTokenUpdateConfig = ` -resource "rancher_environment" "foo" { - name = "foo" -} - -resource "rancher_registration_token" "foo" { - name = "foo-u" - description = "Terraform acc test group-u" - environment_id = "${rancher_environment.foo.id}" -} -` diff --git a/builtin/providers/rancher/resource_rancher_registry.go b/builtin/providers/rancher/resource_rancher_registry.go deleted file mode 100644 index 7c1b933c0..000000000 --- a/builtin/providers/rancher/resource_rancher_registry.go +++ /dev/null @@ -1,244 +0,0 @@ -package rancher - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - rancherClient "github.com/rancher/go-rancher/v2" -) - -func resourceRancherRegistry() *schema.Resource { - return &schema.Resource{ - Create: resourceRancherRegistryCreate, - Read: resourceRancherRegistryRead, - Update: resourceRancherRegistryUpdate, - Delete: resourceRancherRegistryDelete, - Importer: &schema.ResourceImporter{ - State: resourceRancherRegistryImport, - }, - - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "server_address": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "environment_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceRancherRegistryCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Creating Registry: %s", d.Id()) - client, err := meta.(*Config).EnvironmentClient(d.Get("environment_id").(string)) - if err != nil { - return err - } - - name := d.Get("name").(string) - description := d.Get("description").(string) - serverAddress := d.Get("server_address").(string) - - registry := rancherClient.Registry{ - Name: name, - Description: description, - ServerAddress: serverAddress, - } - newRegistry, err := client.Registry.Create(®istry) - if err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"active", "removed", "removing"}, - Target: []string{"active"}, - Refresh: RegistryStateRefreshFunc(client, newRegistry.Id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - _, waitErr := stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for registry (%s) to be created: %s", newRegistry.Id, waitErr) - } - - d.SetId(newRegistry.Id) - log.Printf("[INFO] Registry ID: %s", d.Id()) - - return resourceRancherRegistryRead(d, meta) -} - -func resourceRancherRegistryRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Refreshing Registry: %s", d.Id()) - client, err := meta.(*Config).EnvironmentClient(d.Get("environment_id").(string)) - if err != nil { - return err - } - - registry, err := client.Registry.ById(d.Id()) - if err != nil { - return err - } - - if registry == nil { - log.Printf("[INFO] Registry %s not found", d.Id()) - d.SetId("") - return nil - } - - if removed(registry.State) { - log.Printf("[INFO] Registry %s was removed on %v", d.Id(), registry.Removed) - d.SetId("") - return nil - } - - log.Printf("[INFO] Registry Name: %s", registry.Name) - - d.Set("description", registry.Description) - d.Set("name", registry.Name) - d.Set("server_address", registry.ServerAddress) - d.Set("environment_id", registry.AccountId) - - return nil -} - -func resourceRancherRegistryUpdate(d *schema.ResourceData, meta interface{}) error { - client, err := meta.(*Config).EnvironmentClient(d.Get("environment_id").(string)) - if err != nil { - return err - } - - registry, err := client.Registry.ById(d.Id()) - if err != nil { - return err - } - - name := d.Get("name").(string) - description := d.Get("description").(string) - - registry.Name = name - registry.Description = description - client.Registry.Update(registry, ®istry) - - return resourceRancherRegistryRead(d, meta) -} - -func resourceRancherRegistryDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Deleting Registry: %s", d.Id()) - id := d.Id() - client, err := meta.(*Config).EnvironmentClient(d.Get("environment_id").(string)) - if err != nil { - return err - } - - reg, err := client.Registry.ById(id) - if err != nil { - return err - } - - // Step 1: Deactivate - if _, e := client.Registry.ActionDeactivate(reg); e != nil { - return fmt.Errorf("Error deactivating Registry: %s", err) - } - - log.Printf("[DEBUG] Waiting for registry (%s) to be deactivated", id) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"active", "inactive", "deactivating"}, - Target: []string{"inactive"}, - Refresh: RegistryStateRefreshFunc(client, id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, waitErr := stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for registry (%s) to be deactivated: %s", id, waitErr) - } - - // Update resource to reflect its state - reg, err = client.Registry.ById(id) - if err != nil { - return fmt.Errorf("Failed to refresh state of deactivated registry (%s): %s", id, err) - } - - // Step 2: Remove - if _, err := client.Registry.ActionRemove(reg); err != nil { - return fmt.Errorf("Error removing Registry: %s", err) - } - - log.Printf("[DEBUG] Waiting for registry (%s) to be removed", id) - - stateConf = &resource.StateChangeConf{ - Pending: []string{"inactive", "removed", "removing"}, - Target: []string{"removed"}, - Refresh: RegistryStateRefreshFunc(client, id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, waitErr = stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for registry (%s) to be removed: %s", id, waitErr) - } - - d.SetId("") - return nil -} - -func resourceRancherRegistryImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - envID, resourceID := splitID(d.Id()) - d.SetId(resourceID) - if envID != "" { - d.Set("environment_id", envID) - } else { - client, err := meta.(*Config).GlobalClient() - if err != nil { - return []*schema.ResourceData{}, err - } - registry, err := client.Registry.ById(d.Id()) - if err != nil { - return []*schema.ResourceData{}, err - } - d.Set("environment_id", registry.AccountId) - } - return []*schema.ResourceData{d}, nil -} - -// RegistryStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// a Rancher Environment. -func RegistryStateRefreshFunc(client *rancherClient.RancherClient, registryID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - env, err := client.Registry.ById(registryID) - - if err != nil { - return nil, "", err - } - - return env, env.State, nil - } -} diff --git a/builtin/providers/rancher/resource_rancher_registry_credential.go b/builtin/providers/rancher/resource_rancher_registry_credential.go deleted file mode 100644 index ed7f96315..000000000 --- a/builtin/providers/rancher/resource_rancher_registry_credential.go +++ /dev/null @@ -1,261 +0,0 @@ -package rancher - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - rancherClient "github.com/rancher/go-rancher/v2" -) - -func resourceRancherRegistryCredential() *schema.Resource { - return &schema.Resource{ - Create: resourceRancherRegistryCredentialCreate, - Read: resourceRancherRegistryCredentialRead, - Update: resourceRancherRegistryCredentialUpdate, - Delete: resourceRancherRegistryCredentialDelete, - Importer: &schema.ResourceImporter{ - State: resourceRancherRegistryCredentialImport, - }, - - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "registry_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "email": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "public_value": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "secret_value": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceRancherRegistryCredentialCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Creating RegistryCredential: %s", d.Id()) - client, err := meta.(*Config).RegistryClient(d.Get("registry_id").(string)) - if err != nil { - return err - } - - name := d.Get("name").(string) - description := d.Get("description").(string) - publicValue := d.Get("public_value").(string) - secretValue := d.Get("secret_value").(string) - registryID := d.Get("registry_id").(string) - - registryCred := rancherClient.RegistryCredential{ - Name: name, - Description: description, - PublicValue: publicValue, - SecretValue: secretValue, - RegistryId: registryID, - } - newRegistryCredential, err := client.RegistryCredential.Create(®istryCred) - if err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"active", "removed", "removing"}, - Target: []string{"active"}, - Refresh: RegistryCredentialStateRefreshFunc(client, newRegistryCredential.Id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - _, waitErr := stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for registry credential (%s) to be created: %s", newRegistryCredential.Id, waitErr) - } - - d.SetId(newRegistryCredential.Id) - log.Printf("[INFO] RegistryCredential ID: %s", d.Id()) - - return resourceRancherRegistryCredentialRead(d, meta) -} - -func resourceRancherRegistryCredentialRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Refreshing RegistryCredential: %s", d.Id()) - client, err := meta.(*Config).RegistryClient(d.Get("registry_id").(string)) - if err != nil { - return err - } - - registryCred, err := client.RegistryCredential.ById(d.Id()) - if err != nil { - return err - } - - if registryCred == nil { - log.Printf("[INFO] RegistryCredential %s not found", d.Id()) - d.SetId("") - return nil - } - - if removed(registryCred.State) { - log.Printf("[INFO] Registry Credential %s was removed on %v", d.Id(), registryCred.Removed) - d.SetId("") - return nil - } - - log.Printf("[INFO] RegistryCredential Name: %s", registryCred.Name) - - d.Set("description", registryCred.Description) - d.Set("name", registryCred.Name) - d.Set("public_value", registryCred.PublicValue) - d.Set("registry_id", registryCred.RegistryId) - - return nil -} - -func resourceRancherRegistryCredentialUpdate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Updating RegistryCredential: %s", d.Id()) - client, err := meta.(*Config).RegistryClient(d.Get("registry_id").(string)) - if err != nil { - return err - } - - registryCred, err := client.RegistryCredential.ById(d.Id()) - if err != nil { - return err - } - - name := d.Get("name").(string) - description := d.Get("description").(string) - publicValue := d.Get("public_value").(string) - secretValue := d.Get("secret_value").(string) - - registryCred.Name = name - registryCred.Description = description - registryCred.PublicValue = publicValue - registryCred.SecretValue = secretValue - client.RegistryCredential.Update(registryCred, ®istryCred) - - return resourceRancherRegistryCredentialRead(d, meta) -} - -func resourceRancherRegistryCredentialDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Deleting RegistryCredential: %s", d.Id()) - id := d.Id() - client, err := meta.(*Config).RegistryClient(d.Get("registry_id").(string)) - if err != nil { - return err - } - - reg, err := client.RegistryCredential.ById(id) - if err != nil { - return err - } - - // Step 1: Deactivate - if _, e := client.RegistryCredential.ActionDeactivate(reg); e != nil { - return fmt.Errorf("Error deactivating RegistryCredential: %s", err) - } - - log.Printf("[DEBUG] Waiting for registry credential (%s) to be deactivated", id) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"active", "inactive", "deactivating"}, - Target: []string{"inactive"}, - Refresh: RegistryCredentialStateRefreshFunc(client, id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, waitErr := stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for registry credential (%s) to be deactivated: %s", id, waitErr) - } - - // Update resource to reflect its state - reg, err = client.RegistryCredential.ById(id) - if err != nil { - return fmt.Errorf("Failed to refresh state of deactivated registry credential (%s): %s", id, err) - } - - // Step 2: Remove - if _, err := client.RegistryCredential.ActionRemove(reg); err != nil { - return fmt.Errorf("Error removing RegistryCredential: %s", err) - } - - log.Printf("[DEBUG] Waiting for registry (%s) to be removed", id) - - stateConf = &resource.StateChangeConf{ - Pending: []string{"inactive", "removed", "removing"}, - Target: []string{"removed"}, - Refresh: RegistryCredentialStateRefreshFunc(client, id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, waitErr = stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for registry (%s) to be removed: %s", id, waitErr) - } - - d.SetId("") - return nil -} - -func resourceRancherRegistryCredentialImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - regID, resourceID := splitID(d.Id()) - d.SetId(resourceID) - if regID != "" { - d.Set("registry_id", regID) - } else { - client, err := meta.(*Config).GlobalClient() - if err != nil { - return []*schema.ResourceData{}, err - } - cred, err := client.RegistryCredential.ById(d.Id()) - if err != nil { - return []*schema.ResourceData{}, err - } - d.Set("registry_id", cred.RegistryId) - } - return []*schema.ResourceData{d}, nil -} - -// RegistryCredentialStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// a Rancher Environment. -func RegistryCredentialStateRefreshFunc(client *rancherClient.RancherClient, registryCredID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - regC, err := client.RegistryCredential.ById(registryCredID) - - if err != nil { - return nil, "", err - } - - return regC, regC.State, nil - } -} diff --git a/builtin/providers/rancher/resource_rancher_registry_credential_test.go b/builtin/providers/rancher/resource_rancher_registry_credential_test.go deleted file mode 100644 index b2aea1311..000000000 --- a/builtin/providers/rancher/resource_rancher_registry_credential_test.go +++ /dev/null @@ -1,219 +0,0 @@ -package rancher - -import ( - "fmt" - "testing" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - rancherClient "github.com/rancher/go-rancher/v2" -) - -func TestAccRancherRegistryCredential_basic(t *testing.T) { - var registry rancherClient.RegistryCredential - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRancherRegistryCredentialDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRancherRegistryCredentialConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRancherRegistryCredentialExists("rancher_registry_credential.foo", ®istry), - resource.TestCheckResourceAttr("rancher_registry_credential.foo", "name", "foo"), - resource.TestCheckResourceAttr("rancher_registry_credential.foo", "description", "registry credential test"), - resource.TestCheckResourceAttr("rancher_registry_credential.foo", "public_value", "user"), - ), - }, - resource.TestStep{ - Config: testAccRancherRegistryCredentialUpdateConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRancherRegistryCredentialExists("rancher_registry_credential.foo", ®istry), - resource.TestCheckResourceAttr("rancher_registry_credential.foo", "name", "foo2"), - resource.TestCheckResourceAttr("rancher_registry_credential.foo", "description", "registry credential test - updated"), - resource.TestCheckResourceAttr("rancher_registry_credential.foo", "public_value", "user2"), - ), - }, - }, - }) -} - -func TestAccRancherRegistryCredential_disappears(t *testing.T) { - var registry rancherClient.RegistryCredential - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRancherRegistryCredentialDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRancherRegistryCredentialConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRancherRegistryCredentialExists("rancher_registry_credential.foo", ®istry), - testAccRancherRegistryCredentialDisappears(®istry), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testAccRancherRegistryCredentialDisappears(reg *rancherClient.RegistryCredential) resource.TestCheckFunc { - return func(s *terraform.State) error { - client, err := testAccProvider.Meta().(*Config).EnvironmentClient(reg.AccountId) - if err != nil { - return err - } - - // Step 1: Deactivate - if _, e := client.RegistryCredential.ActionDeactivate(reg); e != nil { - return fmt.Errorf("Error deactivating RegistryCredential: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"active", "inactive", "deactivating"}, - Target: []string{"inactive"}, - Refresh: RegistryCredentialStateRefreshFunc(client, reg.Id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, waitErr := stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for registry credential (%s) to be deactivated: %s", reg.Id, waitErr) - } - - // Update resource to reflect its state - reg, err = client.RegistryCredential.ById(reg.Id) - if err != nil { - return fmt.Errorf("Failed to refresh state of deactivated registry credential (%s): %s", reg.Id, err) - } - - // Step 2: Remove - if _, err := client.RegistryCredential.ActionRemove(reg); err != nil { - return fmt.Errorf("Error removing RegistryCredential: %s", err) - } - - stateConf = &resource.StateChangeConf{ - Pending: []string{"inactive", "removed", "removing"}, - Target: []string{"removed"}, - Refresh: RegistryCredentialStateRefreshFunc(client, reg.Id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, waitErr = stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for registry (%s) to be removed: %s", reg.Id, waitErr) - } - - return nil - } -} - -func testAccCheckRancherRegistryCredentialExists(n string, reg *rancherClient.RegistryCredential) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No App Name is set") - } - - client, err := testAccProvider.Meta().(*Config).RegistryClient(rs.Primary.Attributes["registry_id"]) - if err != nil { - return err - } - - foundReg, err := client.RegistryCredential.ById(rs.Primary.ID) - if err != nil { - return err - } - - if foundReg.Resource.Id != rs.Primary.ID { - return fmt.Errorf("RegistryCredential not found") - } - - *reg = *foundReg - - return nil - } -} - -func testAccCheckRancherRegistryCredentialDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "rancher_registry_credential" { - continue - } - client, err := testAccProvider.Meta().(*Config).GlobalClient() - if err != nil { - return err - } - - reg, err := client.RegistryCredential.ById(rs.Primary.ID) - - if err == nil { - if reg != nil && - reg.Resource.Id == rs.Primary.ID && - reg.State != "removed" { - return fmt.Errorf("RegistryCredential still exists") - } - } - - return nil - } - return nil -} - -const testAccRancherRegistryCredentialConfig = ` -resource "rancher_environment" "foo" { - name = "foo" -} - -resource "rancher_registry" "foo" { - name = "foo" - description = "registry test" - server_address = "http://bar.com:8080" - environment_id = "${rancher_environment.foo.id}" -} - -resource "rancher_registry_credential" "foo" { - name = "foo" - description = "registry credential test" - registry_id = "${rancher_registry.foo.id}" - email = "registry@credential.com" - public_value = "user" - secret_value = "pass" -} -` - -const testAccRancherRegistryCredentialUpdateConfig = ` -resource "rancher_environment" "foo" { - name = "foo" -} - -resource "rancher_registry" "foo" { - name = "foo" - description = "registry test" - server_address = "http://bar.com:8080" - environment_id = "${rancher_environment.foo.id}" -} - -resource "rancher_registry_credential" "foo" { - name = "foo2" - description = "registry credential test - updated" - registry_id = "${rancher_registry.foo.id}" - email = "registry@credential.com" - public_value = "user2" - secret_value = "pass" -} - ` diff --git a/builtin/providers/rancher/resource_rancher_registry_test.go b/builtin/providers/rancher/resource_rancher_registry_test.go deleted file mode 100644 index c4fdc76bf..000000000 --- a/builtin/providers/rancher/resource_rancher_registry_test.go +++ /dev/null @@ -1,231 +0,0 @@ -package rancher - -import ( - "fmt" - "testing" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - rancherClient "github.com/rancher/go-rancher/v2" -) - -func TestAccRancherRegistry_basic(t *testing.T) { - var registry rancherClient.Registry - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRancherRegistryDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRancherRegistryConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRancherRegistryExists("rancher_registry.foo", ®istry), - resource.TestCheckResourceAttr("rancher_registry.foo", "name", "foo"), - resource.TestCheckResourceAttr("rancher_registry.foo", "description", "registry test"), - resource.TestCheckResourceAttr("rancher_registry.foo", "server_address", "http://foo.com:8080"), - ), - }, - resource.TestStep{ - Config: testAccRancherRegistryUpdateConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRancherRegistryExists("rancher_registry.foo", ®istry), - resource.TestCheckResourceAttr("rancher_registry.foo", "name", "foo2"), - resource.TestCheckResourceAttr("rancher_registry.foo", "description", "registry test - updated"), - resource.TestCheckResourceAttr("rancher_registry.foo", "server_address", "http://foo.updated.com:8080"), - ), - }, - resource.TestStep{ - Config: testAccRancherRegistryRecreateConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRancherRegistryExists("rancher_registry.foo", ®istry), - resource.TestCheckResourceAttr("rancher_registry.foo", "name", "foo"), - resource.TestCheckResourceAttr("rancher_registry.foo", "description", "registry test"), - resource.TestCheckResourceAttr("rancher_registry.foo", "server_address", "http://foo.com:8080"), - ), - }, - }, - }) -} - -func TestAccRancherRegistry_disappears(t *testing.T) { - var registry rancherClient.Registry - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRancherRegistryDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRancherRegistryConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRancherRegistryExists("rancher_registry.foo", ®istry), - testAccRancherRegistryDisappears(®istry), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testAccRancherRegistryDisappears(reg *rancherClient.Registry) resource.TestCheckFunc { - return func(s *terraform.State) error { - client, err := testAccProvider.Meta().(*Config).EnvironmentClient(reg.AccountId) - if err != nil { - return err - } - - // Step 1: Deactivate - if _, e := client.Registry.ActionDeactivate(reg); e != nil { - return fmt.Errorf("Error deactivating Registry: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"active", "inactive", "deactivating"}, - Target: []string{"inactive"}, - Refresh: RegistryStateRefreshFunc(client, reg.Id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, waitErr := stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for registry (%s) to be deactivated: %s", reg.Id, waitErr) - } - - // Update resource to reflect its state - reg, err = client.Registry.ById(reg.Id) - if err != nil { - return fmt.Errorf("Failed to refresh state of deactivated registry (%s): %s", reg.Id, err) - } - - // Step 2: Remove - if _, err := client.Registry.ActionRemove(reg); err != nil { - return fmt.Errorf("Error removing Registry: %s", err) - } - - stateConf = &resource.StateChangeConf{ - Pending: []string{"inactive", "removed", "removing"}, - Target: []string{"removed"}, - Refresh: RegistryStateRefreshFunc(client, reg.Id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, waitErr = stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for registry (%s) to be removed: %s", reg.Id, waitErr) - } - - return nil - } -} - -func testAccCheckRancherRegistryExists(n string, reg *rancherClient.Registry) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No App Name is set") - } - - client, err := testAccProvider.Meta().(*Config).EnvironmentClient(rs.Primary.Attributes["environment_id"]) - if err != nil { - return err - } - - foundReg, err := client.Registry.ById(rs.Primary.ID) - if err != nil { - return err - } - - if foundReg.Resource.Id != rs.Primary.ID { - return fmt.Errorf("Registry not found") - } - - *reg = *foundReg - - return nil - } -} - -func testAccCheckRancherRegistryDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "rancher_registry" { - continue - } - client, err := testAccProvider.Meta().(*Config).GlobalClient() - if err != nil { - return err - } - - reg, err := client.Registry.ById(rs.Primary.ID) - - if err == nil { - if reg != nil && - reg.Resource.Id == rs.Primary.ID && - reg.State != "removed" { - return fmt.Errorf("Registry still exists") - } - } - - return nil - } - return nil -} - -const testAccRancherRegistryConfig = ` -resource "rancher_environment" "foo_registry" { - name = "registry test" - description = "environment to test registries" -} - -resource "rancher_registry" "foo" { - name = "foo" - description = "registry test" - server_address = "http://foo.com:8080" - environment_id = "${rancher_environment.foo_registry.id}" -} -` - -const testAccRancherRegistryUpdateConfig = ` - resource "rancher_environment" "foo_registry" { - name = "registry test" - description = "environment to test registries" - } - - resource "rancher_registry" "foo" { - name = "foo2" - description = "registry test - updated" - server_address = "http://foo.updated.com:8080" - environment_id = "${rancher_environment.foo_registry.id}" - } - ` - -const testAccRancherRegistryRecreateConfig = ` - resource "rancher_environment" "foo_registry" { - name = "registry test" - description = "environment to test registries" - } - - resource "rancher_environment" "foo_registry2" { - name = "alternative registry test" - description = "other environment to test registries" - } - - resource "rancher_registry" "foo" { - name = "foo" - description = "registry test" - server_address = "http://foo.com:8080" - environment_id = "${rancher_environment.foo_registry2.id}" - } - ` diff --git a/builtin/providers/rancher/resource_rancher_stack.go b/builtin/providers/rancher/resource_rancher_stack.go deleted file mode 100644 index f55ed8f8f..000000000 --- a/builtin/providers/rancher/resource_rancher_stack.go +++ /dev/null @@ -1,505 +0,0 @@ -package rancher - -import ( - "encoding/json" - "fmt" - "log" - "net/http" - "reflect" - "strings" - "time" - - compose "github.com/docker/libcompose/config" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" - "github.com/rancher/go-rancher/catalog" - rancherClient "github.com/rancher/go-rancher/v2" -) - -func resourceRancherStack() *schema.Resource { - return &schema.Resource{ - Create: resourceRancherStackCreate, - Read: resourceRancherStackRead, - Update: resourceRancherStackUpdate, - Delete: resourceRancherStackDelete, - Importer: &schema.ResourceImporter{ - State: resourceRancherStackImport, - }, - - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "environment_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "docker_compose": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: suppressComposeDiff, - }, - "rancher_compose": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: suppressComposeDiff, - }, - "environment": { - Type: schema.TypeMap, - Optional: true, - }, - "catalog_id": { - Type: schema.TypeString, - Optional: true, - }, - "scope": { - Type: schema.TypeString, - Default: "user", - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"user", "system"}, true), - }, - "start_on_create": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - "finish_upgrade": { - Type: schema.TypeBool, - Optional: true, - }, - "rendered_docker_compose": { - Type: schema.TypeString, - Computed: true, - }, - "rendered_rancher_compose": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceRancherStackCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Creating Stack: %s", d.Id()) - client, err := meta.(*Config).EnvironmentClient(d.Get("environment_id").(string)) - if err != nil { - return err - } - - data, err := makeStackData(d, meta) - if err != nil { - return err - } - - var newStack rancherClient.Stack - if err := client.Create("stack", data, &newStack); err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"activating", "active", "removed", "removing"}, - Target: []string{"active"}, - Refresh: StackStateRefreshFunc(client, newStack.Id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - _, waitErr := stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for stack (%s) to be created: %s", newStack.Id, waitErr) - } - - d.SetId(newStack.Id) - log.Printf("[INFO] Stack ID: %s", d.Id()) - - return resourceRancherStackRead(d, meta) -} - -func resourceRancherStackRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Refreshing Stack: %s", d.Id()) - client, err := meta.(*Config).EnvironmentClient(d.Get("environment_id").(string)) - if err != nil { - return err - } - - stack, err := client.Stack.ById(d.Id()) - if err != nil { - return err - } - - if stack == nil { - log.Printf("[INFO] Stack %s not found", d.Id()) - d.SetId("") - return nil - } - - if removed(stack.State) { - log.Printf("[INFO] Stack %s was removed on %v", d.Id(), stack.Removed) - d.SetId("") - return nil - } - - config, err := client.Stack.ActionExportconfig(stack, &rancherClient.ComposeConfigInput{}) - if err != nil { - return err - } - - log.Printf("[INFO] Stack Name: %s", stack.Name) - - d.Set("description", stack.Description) - d.Set("name", stack.Name) - dockerCompose := strings.Replace(config.DockerComposeConfig, "\r", "", -1) - rancherCompose := strings.Replace(config.RancherComposeConfig, "\r", "", -1) - - catalogID := d.Get("catalog_id") - if catalogID == "" { - d.Set("docker_compose", dockerCompose) - d.Set("rancher_compose", rancherCompose) - } else { - d.Set("docker_compose", "") - d.Set("rancher_compose", "") - } - d.Set("rendered_docker_compose", dockerCompose) - d.Set("rendered_rancher_compose", rancherCompose) - d.Set("environment_id", stack.AccountId) - d.Set("environment", stack.Environment) - - if stack.ExternalId == "" { - d.Set("scope", "user") - d.Set("catalog_id", "") - } else { - trimmedID := strings.TrimPrefix(stack.ExternalId, "system-") - if trimmedID == stack.ExternalId { - d.Set("scope", "user") - } else { - d.Set("scope", "system") - } - d.Set("catalog_id", strings.TrimPrefix(trimmedID, "catalog://")) - } - - d.Set("start_on_create", stack.StartOnCreate) - d.Set("finish_upgrade", d.Get("finish_upgrade").(bool)) - - return nil -} - -func resourceRancherStackUpdate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Updating Stack: %s", d.Id()) - client, err := meta.(*Config).EnvironmentClient(d.Get("environment_id").(string)) - if err != nil { - return err - } - d.Partial(true) - - data, err := makeStackData(d, meta) - if err != nil { - return err - } - - stack, err := client.Stack.ById(d.Id()) - if err != nil { - return err - } - - var newStack rancherClient.Stack - if err = client.Update(stack.Type, &stack.Resource, data, &newStack); err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"active", "active-updating"}, - Target: []string{"active"}, - Refresh: StackStateRefreshFunc(client, newStack.Id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - s, waitErr := stateConf.WaitForState() - stack = s.(*rancherClient.Stack) - if waitErr != nil { - return fmt.Errorf( - "Error waiting for stack (%s) to be updated: %s", stack.Id, waitErr) - } - - d.SetPartial("name") - d.SetPartial("description") - d.SetPartial("scope") - - if d.HasChange("docker_compose") || - d.HasChange("rancher_compose") || - d.HasChange("environment") || - d.HasChange("catalog_id") { - - envMap := make(map[string]interface{}) - for key, value := range *data["environment"].(*map[string]string) { - envValue := value - envMap[key] = &envValue - } - stack, err = client.Stack.ActionUpgrade(stack, &rancherClient.StackUpgrade{ - DockerCompose: *data["dockerCompose"].(*string), - RancherCompose: *data["rancherCompose"].(*string), - Environment: envMap, - ExternalId: *data["externalId"].(*string), - }) - if err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"active", "upgrading", "upgraded"}, - Target: []string{"upgraded"}, - Refresh: StackStateRefreshFunc(client, stack.Id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - s, waitErr := stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for stack (%s) to be upgraded: %s", stack.Id, waitErr) - } - stack = s.(*rancherClient.Stack) - - if d.Get("finish_upgrade").(bool) { - stack, err = client.Stack.ActionFinishupgrade(stack) - if err != nil { - return err - } - - stateConf = &resource.StateChangeConf{ - Pending: []string{"active", "upgraded", "finishing-upgrade"}, - Target: []string{"active"}, - Refresh: StackStateRefreshFunc(client, stack.Id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - _, waitErr = stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for stack (%s) to be upgraded: %s", stack.Id, waitErr) - } - } - - d.SetPartial("rendered_docker_compose") - d.SetPartial("rendered_rancher_compose") - d.SetPartial("docker_compose") - d.SetPartial("rancher_compose") - d.SetPartial("environment") - d.SetPartial("catalog_id") - } - - d.Partial(false) - - return resourceRancherStackRead(d, meta) -} - -func resourceRancherStackDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Deleting Stack: %s", d.Id()) - id := d.Id() - client, err := meta.(*Config).EnvironmentClient(d.Get("environment_id").(string)) - if err != nil { - return err - } - - stack, err := client.Stack.ById(id) - if err != nil { - return err - } - - if err := client.Stack.Delete(stack); err != nil { - return fmt.Errorf("Error deleting Stack: %s", err) - } - - log.Printf("[DEBUG] Waiting for stack (%s) to be removed", id) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"active", "removed", "removing"}, - Target: []string{"removed"}, - Refresh: StackStateRefreshFunc(client, id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, waitErr := stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for stack (%s) to be removed: %s", id, waitErr) - } - - d.SetId("") - return nil -} - -func resourceRancherStackImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - envID, resourceID := splitID(d.Id()) - d.SetId(resourceID) - if envID != "" { - d.Set("environment_id", envID) - } else { - client, err := meta.(*Config).GlobalClient() - if err != nil { - return []*schema.ResourceData{}, err - } - stack, err := client.Stack.ById(d.Id()) - if err != nil { - return []*schema.ResourceData{}, err - } - d.Set("environment_id", stack.AccountId) - } - return []*schema.ResourceData{d}, nil -} - -// StackStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// a Rancher Stack. -func StackStateRefreshFunc(client *rancherClient.RancherClient, stackID string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - stack, err := client.Stack.ById(stackID) - - if err != nil { - return nil, "", err - } - - return stack, stack.State, nil - } -} - -func environmentFromMap(m map[string]interface{}) map[string]string { - result := make(map[string]string) - for k, v := range m { - result[k] = v.(string) - } - return result -} - -func makeStackData(d *schema.ResourceData, meta interface{}) (data map[string]interface{}, err error) { - name := d.Get("name").(string) - description := d.Get("description").(string) - - var externalID string - var dockerCompose string - var rancherCompose string - var environment map[string]string - if c, ok := d.GetOk("catalog_id"); ok { - if scope, ok := d.GetOk("scope"); ok && scope.(string) == "system" { - externalID = "system-" - } - catalogID := c.(string) - externalID += "catalog://" + catalogID - - catalogClient, err := meta.(*Config).CatalogClient() - if err != nil { - return data, err - } - - templateVersion, err := getCatalogTemplateVersion(catalogClient, catalogID) - if err != nil { - return data, err - } - - if templateVersion.Id != catalogID { - return data, fmt.Errorf("Did not find template %s", catalogID) - } - - dockerCompose = templateVersion.Files["docker-compose.yml"].(string) - rancherCompose = templateVersion.Files["rancher-compose.yml"].(string) - } - - if c, ok := d.GetOk("docker_compose"); ok { - dockerCompose = c.(string) - } - if c, ok := d.GetOk("rancher_compose"); ok { - rancherCompose = c.(string) - } - - environment = environmentFromMap(d.Get("environment").(map[string]interface{})) - - startOnCreate := d.Get("start_on_create") - system := systemScope(d.Get("scope").(string)) - - data = map[string]interface{}{ - "name": &name, - "description": &description, - "dockerCompose": &dockerCompose, - "rancherCompose": &rancherCompose, - "environment": &environment, - "externalId": &externalID, - "startOnCreate": &startOnCreate, - "system": &system, - } - - return data, nil -} - -func suppressComposeDiff(k, old, new string, d *schema.ResourceData) bool { - cOld, err := compose.CreateConfig([]byte(old)) - if err != nil { - // TODO: log? - return false - } - - cNew, err := compose.CreateConfig([]byte(new)) - if err != nil { - // TODO: log? - return false - } - - return reflect.DeepEqual(cOld, cNew) -} - -func getCatalogTemplateVersion(c *catalog.RancherClient, catalogID string) (*catalog.TemplateVersion, error) { - templateVersion := &catalog.TemplateVersion{} - - namesAndFolder := strings.SplitN(catalogID, ":", 3) - if len(namesAndFolder) != 3 { - return templateVersion, fmt.Errorf("catalog_id: %s not in 'catalog:name:N' format", catalogID) - } - - template, err := c.Template.ById(namesAndFolder[0] + ":" + namesAndFolder[1]) - if err != nil { - return templateVersion, fmt.Errorf("Failed to get catalog template: %s at url %s", err, c.GetOpts().Url) - } - - if template == nil { - return templateVersion, fmt.Errorf("Unknown catalog template %s", catalogID) - } - - for _, versionLink := range template.VersionLinks { - if strings.HasSuffix(versionLink.(string), catalogID) { - client := &http.Client{} - req, err := http.NewRequest("GET", fmt.Sprint(versionLink), nil) - req.SetBasicAuth(c.GetOpts().AccessKey, c.GetOpts().SecretKey) - resp, err := client.Do(req) - if err != nil { - return templateVersion, err - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - return templateVersion, fmt.Errorf("Bad Response %d lookup up %s", resp.StatusCode, versionLink) - } - - err = json.NewDecoder(resp.Body).Decode(templateVersion) - return templateVersion, err - } - } - - return templateVersion, nil -} - -func systemScope(scope string) bool { - return scope == "system" -} diff --git a/builtin/providers/rancher/resource_rancher_stack_test.go b/builtin/providers/rancher/resource_rancher_stack_test.go deleted file mode 100644 index be2291ccb..000000000 --- a/builtin/providers/rancher/resource_rancher_stack_test.go +++ /dev/null @@ -1,368 +0,0 @@ -package rancher - -import ( - "fmt" - "testing" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - rancherClient "github.com/rancher/go-rancher/v2" -) - -func TestAccRancherStack_basic(t *testing.T) { - var stack rancherClient.Stack - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRancherStackDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRancherStackConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRancherStackExists("rancher_stack.foo", &stack), - resource.TestCheckResourceAttr("rancher_stack.foo", "name", "foo"), - resource.TestCheckResourceAttr("rancher_stack.foo", "description", "Terraform acc test group"), - resource.TestCheckResourceAttr("rancher_stack.foo", "catalog_id", ""), - resource.TestCheckResourceAttr("rancher_stack.foo", "docker_compose", ""), - resource.TestCheckResourceAttr("rancher_stack.foo", "rancher_compose", ""), - testAccCheckRancherStackAttributes(&stack, emptyEnvironment, false), - ), - }, - resource.TestStep{ - Config: testAccRancherStackUpdateConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRancherStackExists("rancher_stack.foo", &stack), - resource.TestCheckResourceAttr("rancher_stack.foo", "name", "foo2"), - resource.TestCheckResourceAttr("rancher_stack.foo", "description", "Terraform acc test group - updated"), - resource.TestCheckResourceAttr("rancher_stack.foo", "catalog_id", ""), - resource.TestCheckResourceAttr("rancher_stack.foo", "docker_compose", ""), - resource.TestCheckResourceAttr("rancher_stack.foo", "rancher_compose", ""), - testAccCheckRancherStackAttributes(&stack, emptyEnvironment, false), - ), - }, - }, - }) -} - -func TestAccRancherStack_compose(t *testing.T) { - var stack rancherClient.Stack - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRancherStackDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRancherStackComposeConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRancherStackExists("rancher_stack.compose", &stack), - resource.TestCheckResourceAttr("rancher_stack.compose", "name", "compose"), - resource.TestCheckResourceAttr("rancher_stack.compose", "description", "Terraform acc test group - compose"), - resource.TestCheckResourceAttr("rancher_stack.compose", "catalog_id", ""), - resource.TestCheckResourceAttr("rancher_stack.compose", "docker_compose", "web: { image: nginx }"), - resource.TestCheckResourceAttr("rancher_stack.compose", "rancher_compose", "web: { scale: 1 }"), - testAccCheckRancherStackAttributes(&stack, emptyEnvironment, false), - ), - }, - }, - }) -} - -//The following tests are run against the Default environment because -//upgrading a stack automatically starts the services which never -//completes if there is no host available -func TestAccRancherStack_catalog(t *testing.T) { - var stack rancherClient.Stack - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRancherStackDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRancherStackSystemCatalogConfigInitial, - Check: resource.ComposeTestCheckFunc( - testAccCheckRancherStackExists("rancher_stack.catalog", &stack), - resource.TestCheckResourceAttr("rancher_stack.catalog", "name", "catalogInitial"), - resource.TestCheckResourceAttr("rancher_stack.catalog", "description", "Terraform acc test group - catalogInitial"), - resource.TestCheckResourceAttr("rancher_stack.catalog", "catalog_id", "community:janitor:0"), - resource.TestCheckResourceAttr("rancher_stack.catalog", "scope", "system"), - resource.TestCheckResourceAttr("rancher_stack.catalog", "docker_compose", ""), - resource.TestCheckResourceAttr("rancher_stack.catalog", "rancher_compose", ""), - resource.TestCheckResourceAttr("rancher_stack.catalog", "rendered_docker_compose", catalogDockerComposeInitial), - resource.TestCheckResourceAttr("rancher_stack.catalog", "rendered_rancher_compose", catalogRancherComposeInitial), - testAccCheckRancherStackAttributes(&stack, catalogEnvironment, true), - ), - }, - resource.TestStep{ - Config: testAccRancherStackSystemCatalogConfigUpdate, - Check: resource.ComposeTestCheckFunc( - testAccCheckRancherStackExists("rancher_stack.catalog", &stack), - resource.TestCheckResourceAttr("rancher_stack.catalog", "name", "catalogUpdate"), - resource.TestCheckResourceAttr("rancher_stack.catalog", "description", "Terraform acc test group - catalogUpdate"), - resource.TestCheckResourceAttr("rancher_stack.catalog", "catalog_id", "community:janitor:1"), - resource.TestCheckResourceAttr("rancher_stack.catalog", "scope", "user"), - resource.TestCheckResourceAttr("rancher_stack.catalog", "docker_compose", ""), - resource.TestCheckResourceAttr("rancher_stack.catalog", "rancher_compose", ""), - resource.TestCheckResourceAttr("rancher_stack.catalog", "rendered_docker_compose", catalogDockerComposeUpdate), - resource.TestCheckResourceAttr("rancher_stack.catalog", "rendered_rancher_compose", catalogRancherComposeUpdate), - testAccCheckRancherStackAttributes(&stack, catalogEnvironmentUpgrade, true), - ), - }, - }, - }) -} - -func TestAccRancherStack_disappears(t *testing.T) { - var stack rancherClient.Stack - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckRancherStackDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRancherStackConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckRancherStackExists("rancher_stack.foo", &stack), - testAccRancherStackDisappears(&stack), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testAccRancherStackDisappears(stack *rancherClient.Stack) resource.TestCheckFunc { - return func(s *terraform.State) error { - client, err := testAccProvider.Meta().(*Config).EnvironmentClient(stack.AccountId) - if err != nil { - return err - } - - if err := client.Stack.Delete(stack); err != nil { - return fmt.Errorf("Error deleting Stack: %s", err) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"active", "removed", "removing"}, - Target: []string{"removed"}, - Refresh: StackStateRefreshFunc(client, stack.Id), - Timeout: 10 * time.Minute, - Delay: 1 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, waitErr := stateConf.WaitForState() - if waitErr != nil { - return fmt.Errorf( - "Error waiting for stack (%s) to be removed: %s", stack.Id, waitErr) - } - - return nil - } -} - -func testAccCheckRancherStackExists(n string, stack *rancherClient.Stack) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No App Name is set") - } - - client, err := testAccProvider.Meta().(*Config).EnvironmentClient(rs.Primary.Attributes["environment_id"]) - if err != nil { - return err - } - - foundStack, err := client.Stack.ById(rs.Primary.ID) - if err != nil { - return err - } - - if foundStack.Resource.Id != rs.Primary.ID { - return fmt.Errorf("Stack not found") - } - - *stack = *foundStack - - return nil - } -} - -func testAccCheckRancherStackAttributes(stack *rancherClient.Stack, environment map[string]string, startOnCreate bool) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if len(stack.Environment) != len(environment) { - return fmt.Errorf("Bad environment size: %v should be: %v", len(stack.Environment), environment) - } - - for k, v := range stack.Environment { - if environment[k] != v { - return fmt.Errorf("Bad environment value for %s: %s should be: %s", k, environment[k], v) - } - } - - if stack.StartOnCreate != startOnCreate { - return fmt.Errorf("Bad startOnCreate: %t should be: %t", stack.StartOnCreate, startOnCreate) - } - - return nil - } -} - -func testAccCheckRancherStackDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "rancher_stack" { - continue - } - client, err := testAccProvider.Meta().(*Config).GlobalClient() - if err != nil { - return err - } - - stack, err := client.Stack.ById(rs.Primary.ID) - - if err == nil { - if stack != nil && - stack.Resource.Id == rs.Primary.ID && - stack.State != "removed" { - return fmt.Errorf("Stack still exists") - } - } - - return nil - } - return nil -} - -const testAccRancherStackConfig = ` -resource "rancher_environment" "foo" { - name = "foo" -} - -resource "rancher_stack" "foo" { - name = "foo" - description = "Terraform acc test group" - environment_id = "${rancher_environment.foo.id}" -} -` - -const testAccRancherStackUpdateConfig = ` -resource "rancher_environment" "foo" { - name = "foo" -} - -resource "rancher_stack" "foo" { - name = "foo2" - description = "Terraform acc test group - updated" - environment_id = "${rancher_environment.foo.id}" -} -` - -const testAccRancherStackComposeConfig = ` -resource "rancher_environment" "foo" { - name = "foo" -} - -resource "rancher_stack" "compose" { - name = "compose" - description = "Terraform acc test group - compose" - environment_id = "${rancher_environment.foo.id}" - docker_compose = "web: { image: nginx }" - rancher_compose = "web: { scale: 1 }" -} -` - -const testAccRancherStackSystemCatalogConfigInitial = ` -resource "rancher_stack" "catalog" { - name = "catalogInitial" - description = "Terraform acc test group - catalogInitial" - environment_id = "1a5" - catalog_id = "community:janitor:0" - scope = "system" - start_on_create = true - environment { - EXCLUDE_LABEL = "cleanup=false" - FREQUENCY = "60" - KEEP = "rancher/agent:*" - } -} -` - -const testAccRancherStackSystemCatalogConfigUpdate = ` -resource "rancher_stack" "catalog" { - name = "catalogUpdate" - description = "Terraform acc test group - catalogUpdate" - environment_id = "1a5" - catalog_id = "community:janitor:1" - scope = "user" - environment { - EXCLUDE_LABEL = "cleanup=false" - FREQUENCY = "60" - KEEP = "rancher/agent:*" - KEEPC = "*:*" - } -} -` - -var catalogDockerComposeInitial = `cleanup: - environment: - CLEAN_PERIOD: '60' - DELAY_TIME: '900' - KEEP_IMAGES: rancher/agent:* - labels: - io.rancher.scheduler.global: 'true' - io.rancher.scheduler.affinity:host_label_ne: cleanup=false - tty: true - image: meltwater/docker-cleanup:1.4.0 - privileged: true - volumes: - - /var/run/docker.sock:/var/run/docker.sock - - /var/lib/docker:/var/lib/docker - stdin_open: true -` - -const catalogRancherComposeInitial = `{} -` - -const catalogDockerComposeUpdate = `cleanup: - environment: - CLEAN_PERIOD: '60' - DELAY_TIME: '900' - KEEP_CONTAINERS: '*:*' - KEEP_IMAGES: rancher/agent:* - labels: - io.rancher.scheduler.global: 'true' - io.rancher.scheduler.affinity:host_label_ne: cleanup=false - image: sshipway/docker-cleanup:1.5.2 - volumes: - - /var/run/docker.sock:/var/run/docker.sock - - /var/lib/docker:/var/lib/docker - net: none -` - -const catalogRancherComposeUpdate = `{} -` - -var emptyEnvironment = map[string]string{} - -var catalogEnvironment = map[string]string{ - "EXCLUDE_LABEL": "cleanup=false", - "FREQUENCY": "60", - "KEEP": "rancher/agent:*", -} - -var catalogEnvironmentUpgrade = map[string]string{ - "EXCLUDE_LABEL": "cleanup=false", - "FREQUENCY": "60", - "KEEP": "rancher/agent:*", - "KEEPC": "*:*", -} diff --git a/builtin/providers/rancher/util.go b/builtin/providers/rancher/util.go deleted file mode 100644 index 4197a4374..000000000 --- a/builtin/providers/rancher/util.go +++ /dev/null @@ -1,82 +0,0 @@ -package rancher - -import ( - "fmt" - "net/url" - "strings" - - "github.com/rancher/go-rancher/v2" -) - -const ( - stateRemoved = "removed" - statePurged = "purged" -) - -// GetActiveOrchestration get the name of the active orchestration for a environment -func getActiveOrchestration(project *client.Project) string { - return project.Orchestration -} - -func removed(state string) bool { - return state == stateRemoved || state == statePurged -} - -func splitID(id string) (envID, resourceID string) { - if strings.Contains(id, "/") { - return id[0:strings.Index(id, "/")], id[strings.Index(id, "/")+1:] - } - return "", id -} - -// NewListOpts wraps around client.NewListOpts() -func NewListOpts() *client.ListOpts { - return client.NewListOpts() -} - -func populateProjectTemplateIDs(config *Config) error { - cli, err := config.GlobalClient() - if err != nil { - return err - } - - for projectTemplate := range defaultProjectTemplates { - templates, err := cli.ProjectTemplate.List(&client.ListOpts{ - Filters: map[string]interface{}{ - "isPublic": true, - "name": projectTemplate, - "sort": "created", - }, - }) - if err != nil { - return err - } - - if len(templates.Data) > 0 { - defaultProjectTemplates[projectTemplate] = templates.Data[0].Id - } - } - return nil -} - -func addHostLabels(command string, labels map[string]interface{}) string { - result := []string{} - hostLabels := url.Values{} - - if len(labels) == 0 { - return command - } - - tokenizedCommand := strings.Split(command, " ") - if len(tokenizedCommand) > 0 { - result = append(result, tokenizedCommand[:3]...) - for k, v := range labels { - hostLabels.Add(k, v.(string)) - } - strHostLabels := hostLabels.Encode() - result = append(result, "-e", fmt.Sprintf("CATTLE_HOST_LABELS='%s'", strHostLabels)) - result = append(result, tokenizedCommand[3:]...) - } - - return strings.Join(result, " ") -} diff --git a/builtin/providers/rancher/util_test.go b/builtin/providers/rancher/util_test.go deleted file mode 100644 index 9f8839115..000000000 --- a/builtin/providers/rancher/util_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package rancher - -import ( - "testing" - - "github.com/rancher/go-rancher/v2" -) - -var idTests = []struct { - id string - envID string - resourceID string -}{ - {"1a05", "", "1a05"}, - {"1a05/1s234", "1a05", "1s234"}, -} - -func TestSplitId(t *testing.T) { - for _, tt := range idTests { - envID, resourceID := splitID(tt.id) - if envID != tt.envID || resourceID != tt.resourceID { - t.Errorf("splitId(%s) => [%s, %s]) want [%s, %s]", tt.id, envID, resourceID, tt.envID, tt.resourceID) - } - } -} - -var stateTests = []struct { - state string - removed bool -}{ - {"removed", true}, - {"purged", true}, - {"active", false}, -} - -func TestRemovedState(t *testing.T) { - for _, tt := range stateTests { - removed := removed(tt.state) - if removed != tt.removed { - t.Errorf("removed(%s) => %t, wants %t", tt.state, removed, tt.removed) - } - } -} - -var orchestrationTests = []struct { - project *client.Project - orchestration string -}{ - {&client.Project{Orchestration: "cattle"}, "cattle"}, - {&client.Project{Orchestration: "swarm"}, "swarm"}, - {&client.Project{Orchestration: "mesos"}, "mesos"}, - {&client.Project{Orchestration: "kubernetes"}, "kubernetes"}, -} - -func TestActiveOrchestration(t *testing.T) { - for _, tt := range orchestrationTests { - orchestration := getActiveOrchestration(tt.project) - if orchestration != tt.orchestration { - t.Errorf("getActiveOrchestration(%+v) => %s, wants %s", tt.project, orchestration, tt.orchestration) - } - } -} - -type LabelTestCase struct { - Labels map[string]interface{} - Command string - ExpectedCommand string -} - -var ( - HostLabelTestCases = []LabelTestCase{ - LabelTestCase{ - Labels: map[string]interface{}{ - "orch": "true", - "etcd": "true", - }, - Command: "sudo docker run --rm --privileged -v /var/run/docker.sock:/var/run/docker.sock -v /var/lib/rancher:/var/lib/rancher rancher/agent:v1.2.2 http://192.168.122.158:8080/v1/scripts/71FF294EA7A2B6865708:1483142400000:8OVFmSEUlS2VXvVGbYCXTFaMC8w", - ExpectedCommand: "sudo docker run -e CATTLE_HOST_LABELS='etcd=true&orch=true' --rm --privileged -v /var/run/docker.sock:/var/run/docker.sock -v /var/lib/rancher:/var/lib/rancher rancher/agent:v1.2.2 http://192.168.122.158:8080/v1/scripts/71FF294EA7A2B6865708:1483142400000:8OVFmSEUlS2VXvVGbYCXTFaMC8w", - }, - LabelTestCase{ - Labels: map[string]interface{}{}, - Command: "sudo docker run --rm --privileged -v /var/run/docker.sock:/var/run/docker.sock -v /var/lib/rancher:/var/lib/rancher rancher/agent:v1.2.2 http://192.168.122.158:8080/v1/scripts/71FF294EA7A2B6865708:1483142400000:8OVFmSEUlS2VXvVGbYCXTFaMC8w", - ExpectedCommand: "sudo docker run --rm --privileged -v /var/run/docker.sock:/var/run/docker.sock -v /var/lib/rancher:/var/lib/rancher rancher/agent:v1.2.2 http://192.168.122.158:8080/v1/scripts/71FF294EA7A2B6865708:1483142400000:8OVFmSEUlS2VXvVGbYCXTFaMC8w", - }, - } -) - -func TestAddHostLabels(t *testing.T) { - for _, tCase := range HostLabelTestCases { - cmd := addHostLabels(tCase.Command, tCase.Labels) - if cmd != tCase.ExpectedCommand { - t.Errorf("Command:\n%s\nDoes not match\n%s", cmd, tCase.ExpectedCommand) - } - } -} diff --git a/builtin/providers/random/provider.go b/builtin/providers/random/provider.go deleted file mode 100644 index 15665f465..000000000 --- a/builtin/providers/random/provider.go +++ /dev/null @@ -1,19 +0,0 @@ -package random - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{}, - - ResourcesMap: map[string]*schema.Resource{ - "random_id": resourceId(), - "random_shuffle": resourceShuffle(), - "random_pet": resourcePet(), - }, - } -} diff --git a/builtin/providers/random/provider_test.go b/builtin/providers/random/provider_test.go deleted file mode 100644 index 92d16c509..000000000 --- a/builtin/providers/random/provider_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package random - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "random": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { -} diff --git a/builtin/providers/random/resource_id.go b/builtin/providers/random/resource_id.go deleted file mode 100644 index 7b9ec38fc..000000000 --- a/builtin/providers/random/resource_id.go +++ /dev/null @@ -1,110 +0,0 @@ -package random - -import ( - "crypto/rand" - "encoding/base64" - "encoding/hex" - "errors" - "math/big" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceId() *schema.Resource { - return &schema.Resource{ - Create: CreateID, - Read: RepopulateEncodings, - Delete: schema.RemoveFromState, - - Schema: map[string]*schema.Schema{ - "keepers": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - - "byte_length": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - - "prefix": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "b64": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use b64_url for old behavior, or b64_std for standard base64 encoding", - }, - - "b64_url": { - Type: schema.TypeString, - Computed: true, - }, - - "b64_std": { - Type: schema.TypeString, - Computed: true, - }, - - "hex": { - Type: schema.TypeString, - Computed: true, - }, - - "dec": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func CreateID(d *schema.ResourceData, meta interface{}) error { - byteLength := d.Get("byte_length").(int) - bytes := make([]byte, byteLength) - - n, err := rand.Reader.Read(bytes) - if n != byteLength { - return errors.New("generated insufficient random bytes") - } - if err != nil { - return errwrap.Wrapf("error generating random bytes: {{err}}", err) - } - - b64Str := base64.RawURLEncoding.EncodeToString(bytes) - d.SetId(b64Str) - - return RepopulateEncodings(d, meta) -} - -func RepopulateEncodings(d *schema.ResourceData, _ interface{}) error { - prefix := d.Get("prefix").(string) - base64Str := d.Id() - - bytes, err := base64.RawURLEncoding.DecodeString(base64Str) - if err != nil { - return errwrap.Wrapf("Error decoding ID: {{err}}", err) - } - - b64StdStr := base64.StdEncoding.EncodeToString(bytes) - hexStr := hex.EncodeToString(bytes) - - bigInt := big.Int{} - bigInt.SetBytes(bytes) - decStr := bigInt.String() - - d.Set("b64", prefix+base64Str) - d.Set("b64_url", prefix+base64Str) - d.Set("b64_std", prefix+b64StdStr) - - d.Set("hex", prefix+hexStr) - d.Set("dec", prefix+decStr) - - return nil -} diff --git a/builtin/providers/random/resource_id_test.go b/builtin/providers/random/resource_id_test.go deleted file mode 100644 index 05fc943a6..000000000 --- a/builtin/providers/random/resource_id_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package random - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -type idLens struct { - b64Len int - b64UrlLen int - b64StdLen int - hexLen int -} - -func TestAccResourceID(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccResourceIDConfig, - Check: resource.ComposeTestCheckFunc( - testAccResourceIDCheck("random_id.foo", &idLens{ - b64Len: 6, - b64UrlLen: 6, - b64StdLen: 8, - hexLen: 8, - }), - testAccResourceIDCheck("random_id.bar", &idLens{ - b64Len: 12, - b64UrlLen: 12, - b64StdLen: 14, - hexLen: 14, - }), - ), - }, - }, - }) -} - -func testAccResourceIDCheck(id string, want *idLens) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[id] - if !ok { - return fmt.Errorf("Not found: %s", id) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - b64Str := rs.Primary.Attributes["b64"] - b64UrlStr := rs.Primary.Attributes["b64_url"] - b64StdStr := rs.Primary.Attributes["b64_std"] - hexStr := rs.Primary.Attributes["hex"] - decStr := rs.Primary.Attributes["dec"] - - if got, want := len(b64Str), want.b64Len; got != want { - return fmt.Errorf("base64 string length is %d; want %d", got, want) - } - if got, want := len(b64UrlStr), want.b64UrlLen; got != want { - return fmt.Errorf("base64 URL string length is %d; want %d", got, want) - } - if got, want := len(b64StdStr), want.b64StdLen; got != want { - return fmt.Errorf("base64 STD string length is %d; want %d", got, want) - } - if got, want := len(hexStr), want.hexLen; got != want { - return fmt.Errorf("hex string length is %d; want %d", got, want) - } - if len(decStr) < 1 { - return fmt.Errorf("decimal string is empty; want at least one digit") - } - - return nil - } -} - -const ( - testAccResourceIDConfig = ` -resource "random_id" "foo" { - byte_length = 4 -} - -resource "random_id" "bar" { - byte_length = 4 - prefix = "cloud-" -} -` -) diff --git a/builtin/providers/random/resource_pet.go b/builtin/providers/random/resource_pet.go deleted file mode 100644 index 4c6f3e335..000000000 --- a/builtin/providers/random/resource_pet.go +++ /dev/null @@ -1,66 +0,0 @@ -package random - -import ( - "fmt" - "strings" - - "github.com/dustinkirkland/golang-petname" - - "github.com/hashicorp/terraform/helper/schema" -) - -func resourcePet() *schema.Resource { - return &schema.Resource{ - Create: CreatePet, - Read: ReadPet, - Delete: schema.RemoveFromState, - - Schema: map[string]*schema.Schema{ - "keepers": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - - "length": { - Type: schema.TypeInt, - Optional: true, - Default: 2, - ForceNew: true, - }, - - "prefix": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "separator": { - Type: schema.TypeString, - Optional: true, - Default: "-", - ForceNew: true, - }, - }, - } -} - -func CreatePet(d *schema.ResourceData, meta interface{}) error { - length := d.Get("length").(int) - separator := d.Get("separator").(string) - prefix := d.Get("prefix").(string) - - pet := strings.ToLower(petname.Generate(length, separator)) - - if prefix != "" { - pet = fmt.Sprintf("%s%s%s", prefix, separator, pet) - } - - d.SetId(pet) - - return nil -} - -func ReadPet(d *schema.ResourceData, meta interface{}) error { - return nil -} diff --git a/builtin/providers/random/resource_pet_test.go b/builtin/providers/random/resource_pet_test.go deleted file mode 100644 index 64bebb6f6..000000000 --- a/builtin/providers/random/resource_pet_test.go +++ /dev/null @@ -1,115 +0,0 @@ -package random - -import ( - "fmt" - "regexp" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccResourcePet_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccResourcePet_basic, - Check: resource.ComposeTestCheckFunc( - testAccResourcePetLength("random_pet.pet_1", "-", 2), - ), - }, - }, - }) -} - -func TestAccResourcePet_length(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccResourcePet_length, - Check: resource.ComposeTestCheckFunc( - testAccResourcePetLength("random_pet.pet_1", "-", 4), - ), - }, - }, - }) -} - -func TestAccResourcePet_prefix(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccResourcePet_prefix, - Check: resource.ComposeTestCheckFunc( - testAccResourcePetLength("random_pet.pet_1", "-", 3), - resource.TestMatchResourceAttr( - "random_pet.pet_1", "id", regexp.MustCompile("^consul-")), - ), - }, - }, - }) -} - -func TestAccResourcePet_separator(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccResourcePet_separator, - Check: resource.ComposeTestCheckFunc( - testAccResourcePetLength("random_pet.pet_1", "_", 3), - ), - }, - }, - }) -} - -func testAccResourcePetLength(id string, separator string, length int) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[id] - if !ok { - return fmt.Errorf("Not found: %s", id) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - petParts := strings.Split(rs.Primary.ID, separator) - if len(petParts) != length { - return fmt.Errorf("Length does not match") - } - - return nil - } -} - -const testAccResourcePet_basic = ` -resource "random_pet" "pet_1" { -} -` - -const testAccResourcePet_length = ` -resource "random_pet" "pet_1" { - length = 4 -} -` -const testAccResourcePet_prefix = ` -resource "random_pet" "pet_1" { - prefix = "consul" -} -` - -const testAccResourcePet_separator = ` -resource "random_pet" "pet_1" { - length = 3 - separator = "_" -} -` diff --git a/builtin/providers/random/resource_shuffle.go b/builtin/providers/random/resource_shuffle.go deleted file mode 100644 index fcec308a7..000000000 --- a/builtin/providers/random/resource_shuffle.go +++ /dev/null @@ -1,82 +0,0 @@ -package random - -import ( - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceShuffle() *schema.Resource { - return &schema.Resource{ - Create: CreateShuffle, - Read: schema.Noop, - Delete: schema.RemoveFromState, - - Schema: map[string]*schema.Schema{ - "keepers": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - - "seed": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "input": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "result": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "result_count": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func CreateShuffle(d *schema.ResourceData, _ interface{}) error { - input := d.Get("input").([]interface{}) - seed := d.Get("seed").(string) - - resultCount := d.Get("result_count").(int) - if resultCount == 0 { - resultCount = len(input) - } - result := make([]interface{}, 0, resultCount) - - rand := NewRand(seed) - - // Keep producing permutations until we fill our result -Batches: - for { - perm := rand.Perm(len(input)) - - for _, i := range perm { - result = append(result, input[i]) - - if len(result) >= resultCount { - break Batches - } - } - } - - d.SetId("-") - d.Set("result", result) - - return nil -} diff --git a/builtin/providers/random/resource_shuffle_test.go b/builtin/providers/random/resource_shuffle_test.go deleted file mode 100644 index 5770e4105..000000000 --- a/builtin/providers/random/resource_shuffle_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package random - -import ( - "fmt" - "strconv" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccResourceShuffle(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccResourceShuffleConfig, - Check: resource.ComposeTestCheckFunc( - // These results are current as of Go 1.6. The Go - // "rand" package does not guarantee that the random - // number generator will generate the same results - // forever, but the maintainers endeavor not to change - // it gratuitously. - // These tests allow us to detect such changes and - // document them when they arise, but the docs for this - // resource specifically warn that results are not - // guaranteed consistent across Terraform releases. - testAccResourceShuffleCheck( - "random_shuffle.default_length", - []string{"a", "c", "b", "e", "d"}, - ), - testAccResourceShuffleCheck( - "random_shuffle.shorter_length", - []string{"a", "c", "b"}, - ), - testAccResourceShuffleCheck( - "random_shuffle.longer_length", - []string{"a", "c", "b", "e", "d", "a", "e", "d", "c", "b", "a", "b"}, - ), - ), - }, - }, - }) -} - -func testAccResourceShuffleCheck(id string, wants []string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[id] - if !ok { - return fmt.Errorf("Not found: %s", id) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - attrs := rs.Primary.Attributes - - gotLen := attrs["result.#"] - wantLen := strconv.Itoa(len(wants)) - if gotLen != wantLen { - return fmt.Errorf("got %s result items; want %s", gotLen, wantLen) - } - - for i, want := range wants { - key := fmt.Sprintf("result.%d", i) - if got := attrs[key]; got != want { - return fmt.Errorf("index %d is %q; want %q", i, got, want) - } - } - - return nil - } -} - -const testAccResourceShuffleConfig = ` -resource "random_shuffle" "default_length" { - input = ["a", "b", "c", "d", "e"] - seed = "-" -} -resource "random_shuffle" "shorter_length" { - input = ["a", "b", "c", "d", "e"] - seed = "-" - result_count = 3 -} -resource "random_shuffle" "longer_length" { - input = ["a", "b", "c", "d", "e"] - seed = "-" - result_count = 12 -} -` diff --git a/builtin/providers/random/seed.go b/builtin/providers/random/seed.go deleted file mode 100644 index 7d16322fd..000000000 --- a/builtin/providers/random/seed.go +++ /dev/null @@ -1,24 +0,0 @@ -package random - -import ( - "hash/crc64" - "math/rand" - "time" -) - -// NewRand returns a seeded random number generator, using a seed derived -// from the provided string. -// -// If the seed string is empty, the current time is used as a seed. -func NewRand(seed string) *rand.Rand { - var seedInt int64 - if seed != "" { - crcTable := crc64.MakeTable(crc64.ISO) - seedInt = int64(crc64.Checksum([]byte(seed), crcTable)) - } else { - seedInt = time.Now().Unix() - } - - randSource := rand.NewSource(seedInt) - return rand.New(randSource) -} diff --git a/builtin/providers/rundeck/provider.go b/builtin/providers/rundeck/provider.go deleted file mode 100644 index 8c4701a04..000000000 --- a/builtin/providers/rundeck/provider.go +++ /dev/null @@ -1,51 +0,0 @@ -package rundeck - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - - "github.com/apparentlymart/go-rundeck-api/rundeck" -) - -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "url": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("RUNDECK_URL", nil), - Description: "URL of the root of the target Rundeck server.", - }, - "auth_token": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("RUNDECK_AUTH_TOKEN", nil), - Description: "Auth token to use with the Rundeck API.", - }, - "allow_unverified_ssl": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Description: "If set, the Rundeck client will permit unverifiable SSL certificates.", - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "rundeck_project": resourceRundeckProject(), - "rundeck_job": resourceRundeckJob(), - "rundeck_private_key": resourceRundeckPrivateKey(), - "rundeck_public_key": resourceRundeckPublicKey(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - config := &rundeck.ClientConfig{ - BaseURL: d.Get("url").(string), - AuthToken: d.Get("auth_token").(string), - AllowUnverifiedSSL: d.Get("allow_unverified_ssl").(bool), - } - - return rundeck.NewClient(config) -} diff --git a/builtin/providers/rundeck/provider_test.go b/builtin/providers/rundeck/provider_test.go deleted file mode 100644 index fc3d936ba..000000000 --- a/builtin/providers/rundeck/provider_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package rundeck - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// To run these acceptance tests, you will need a Rundeck server. -// An easy way to get one is to use Rundeck's "Anvils" demo, which includes a Vagrantfile -// to get it running easily: -// https://github.com/rundeck/anvils-demo -// The anvils demo ships with some example security policies that don't have enough access to -// run the tests, so you need to either modify one of the stock users to have full access or -// create a new user with such access. The following block is an example that gives the -// 'admin' user and API clients open access. -// In the anvils demo the admin password is "admin" by default. - -// Place the contents of the following comment in /etc/rundeck/terraform-test.aclpolicy -/* -description: Admin, all access. -context: - project: '.*' # all projects -for: - resource: - - allow: '*' # allow read/create all kinds - adhoc: - - allow: '*' # allow read/running/killing adhoc jobs - job: - - allow: '*' # allow read/write/delete/run/kill of all jobs - node: - - allow: '*' # allow read/run for all nodes -by: - group: admin ---- -description: Admin, all access. -context: - application: 'rundeck' -for: - resource: - - allow: '*' # allow create of projects - project: - - allow: '*' # allow view/admin of all projects - storage: - - allow: '*' # allow read/create/update/delete for all /keys/* storage content -by: - group: admin ---- -description: Admin API, all access. -context: - application: 'rundeck' -for: - resource: - - allow: '*' # allow create of projects - project: - - allow: '*' # allow view/admin of all projects - storage: - - allow: '*' # allow read/create/update/delete for all /keys/* storage content -by: - group: api_token_group -*/ - -// Once you've got a user set up, put that user's API auth token in the RUNDECK_AUTH_TOKEN -// environment variable, and put the URL of the Rundeck home page in the RUNDECK_URL variable. -// If you're using the Anvils demo in its default configuration, you can find or generate an API -// token at http://192.168.50.2:4440/user/profile once you've logged in, and RUNDECK_URL will -// be http://192.168.50.2:4440/ . - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "rundeck": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("RUNDECK_URL"); v == "" { - t.Fatal("RUNDECK_URL must be set for acceptance tests") - } - if v := os.Getenv("RUNDECK_AUTH_TOKEN"); v == "" { - t.Fatal("RUNDECK_AUTH_TOKEN must be set for acceptance tests") - } -} diff --git a/builtin/providers/rundeck/resource_job.go b/builtin/providers/rundeck/resource_job.go deleted file mode 100644 index 7fecbcf41..000000000 --- a/builtin/providers/rundeck/resource_job.go +++ /dev/null @@ -1,619 +0,0 @@ -package rundeck - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - - "github.com/apparentlymart/go-rundeck-api/rundeck" -) - -func resourceRundeckJob() *schema.Resource { - return &schema.Resource{ - Create: CreateJob, - Update: UpdateJob, - Delete: DeleteJob, - Exists: JobExists, - Read: ReadJob, - - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "group_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "project_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "log_level": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "INFO", - }, - - "allow_concurrent_executions": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - - "max_thread_count": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 1, - }, - - "continue_on_error": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - - "rank_order": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "ascending", - }, - - "rank_attribute": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "preserve_options_order": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "command_ordering_strategy": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "node-first", - }, - - "node_filter_query": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "node_filter_exclude_precedence": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - - "schedule": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "option": &schema.Schema{ - // This is a list because order is important when preserve_options_order is - // set. When it's not set the order is unimportant but preserved by Rundeck/ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "default_value": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "value_choices": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "value_choices_url": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "require_predefined_choice": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - - "validation_regex": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "required": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - - "allow_multiple_values": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - - "multi_value_delimiter": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "obscure_input": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - - "exposed_to_scripts": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - }, - }, - }, - - "command": &schema.Schema{ - Type: schema.TypeList, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "shell_command": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "inline_script": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "script_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "script_file_args": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "job": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "group_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "run_for_each_node": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - "args": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - - "step_plugin": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: resourceRundeckJobPluginResource(), - }, - - "node_step_plugin": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: resourceRundeckJobPluginResource(), - }, - }, - }, - }, - }, - } -} - -func resourceRundeckJobPluginResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "config": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - }, - }, - } -} - -func CreateJob(d *schema.ResourceData, meta interface{}) error { - client := meta.(*rundeck.Client) - - job, err := jobFromResourceData(d) - if err != nil { - return err - } - - jobSummary, err := client.CreateJob(job) - if err != nil { - return err - } - - d.SetId(jobSummary.ID) - d.Set("id", jobSummary.ID) - - return ReadJob(d, meta) -} - -func UpdateJob(d *schema.ResourceData, meta interface{}) error { - client := meta.(*rundeck.Client) - - job, err := jobFromResourceData(d) - if err != nil { - return err - } - - jobSummary, err := client.CreateOrUpdateJob(job) - if err != nil { - return err - } - - d.SetId(jobSummary.ID) - d.Set("id", jobSummary.ID) - - return ReadJob(d, meta) -} - -func DeleteJob(d *schema.ResourceData, meta interface{}) error { - client := meta.(*rundeck.Client) - - err := client.DeleteJob(d.Id()) - if err != nil { - return err - } - - d.SetId("") - - return nil -} - -func JobExists(d *schema.ResourceData, meta interface{}) (bool, error) { - client := meta.(*rundeck.Client) - - _, err := client.GetJob(d.Id()) - if err != nil { - if _, ok := err.(rundeck.NotFoundError); ok { - err = nil - } - return false, err - } - - return true, nil -} - -func ReadJob(d *schema.ResourceData, meta interface{}) error { - client := meta.(*rundeck.Client) - - job, err := client.GetJob(d.Id()) - if err != nil { - return err - } - - return jobToResourceData(job, d) -} - -func jobFromResourceData(d *schema.ResourceData) (*rundeck.JobDetail, error) { - job := &rundeck.JobDetail{ - ID: d.Id(), - Name: d.Get("name").(string), - GroupName: d.Get("group_name").(string), - ProjectName: d.Get("project_name").(string), - Description: d.Get("description").(string), - LogLevel: d.Get("log_level").(string), - AllowConcurrentExecutions: d.Get("allow_concurrent_executions").(bool), - Dispatch: &rundeck.JobDispatch{ - MaxThreadCount: d.Get("max_thread_count").(int), - ContinueOnError: d.Get("continue_on_error").(bool), - RankAttribute: d.Get("rank_attribute").(string), - RankOrder: d.Get("rank_order").(string), - }, - } - - sequence := &rundeck.JobCommandSequence{ - ContinueOnError: d.Get("continue_on_error").(bool), - OrderingStrategy: d.Get("command_ordering_strategy").(string), - Commands: []rundeck.JobCommand{}, - } - - commandConfigs := d.Get("command").([]interface{}) - for _, commandI := range commandConfigs { - commandMap := commandI.(map[string]interface{}) - command := rundeck.JobCommand{ - Description: commandMap["description"].(string), - ShellCommand: commandMap["shell_command"].(string), - Script: commandMap["inline_script"].(string), - ScriptFile: commandMap["script_file"].(string), - ScriptFileArgs: commandMap["script_file_args"].(string), - } - - jobRefsI := commandMap["job"].([]interface{}) - if len(jobRefsI) > 1 { - return nil, fmt.Errorf("rundeck command may have no more than one job") - } - if len(jobRefsI) > 0 { - jobRefMap := jobRefsI[0].(map[string]interface{}) - command.Job = &rundeck.JobCommandJobRef{ - Name: jobRefMap["name"].(string), - GroupName: jobRefMap["group_name"].(string), - RunForEachNode: jobRefMap["run_for_each_node"].(bool), - Arguments: rundeck.JobCommandJobRefArguments(jobRefMap["args"].(string)), - } - } - - stepPluginsI := commandMap["step_plugin"].([]interface{}) - if len(stepPluginsI) > 1 { - return nil, fmt.Errorf("rundeck command may have no more than one step plugin") - } - if len(stepPluginsI) > 0 { - stepPluginMap := stepPluginsI[0].(map[string]interface{}) - configI := stepPluginMap["config"].(map[string]interface{}) - config := map[string]string{} - for k, v := range configI { - config[k] = v.(string) - } - command.StepPlugin = &rundeck.JobPlugin{ - Type: stepPluginMap["type"].(string), - Config: config, - } - } - - stepPluginsI = commandMap["node_step_plugin"].([]interface{}) - if len(stepPluginsI) > 1 { - return nil, fmt.Errorf("rundeck command may have no more than one node step plugin") - } - if len(stepPluginsI) > 0 { - stepPluginMap := stepPluginsI[0].(map[string]interface{}) - configI := stepPluginMap["config"].(map[string]interface{}) - config := map[string]string{} - for k, v := range configI { - config[k] = v.(string) - } - command.NodeStepPlugin = &rundeck.JobPlugin{ - Type: stepPluginMap["type"].(string), - Config: config, - } - } - - sequence.Commands = append(sequence.Commands, command) - } - job.CommandSequence = sequence - - optionConfigsI := d.Get("option").([]interface{}) - if len(optionConfigsI) > 0 { - optionsConfig := &rundeck.JobOptions{ - PreserveOrder: d.Get("preserve_options_order").(bool), - Options: []rundeck.JobOption{}, - } - for _, optionI := range optionConfigsI { - optionMap := optionI.(map[string]interface{}) - option := rundeck.JobOption{ - Name: optionMap["name"].(string), - DefaultValue: optionMap["default_value"].(string), - ValueChoices: rundeck.JobValueChoices([]string{}), - ValueChoicesURL: optionMap["value_choices_url"].(string), - RequirePredefinedChoice: optionMap["require_predefined_choice"].(bool), - ValidationRegex: optionMap["validation_regex"].(string), - Description: optionMap["description"].(string), - IsRequired: optionMap["required"].(bool), - AllowsMultipleValues: optionMap["allow_multiple_values"].(bool), - MultiValueDelimiter: optionMap["multi_value_delimiter"].(string), - ObscureInput: optionMap["obscure_input"].(bool), - ValueIsExposedToScripts: optionMap["exposed_to_scripts"].(bool), - } - - for _, iv := range optionMap["value_choices"].([]interface{}) { - option.ValueChoices = append(option.ValueChoices, iv.(string)) - } - - optionsConfig.Options = append(optionsConfig.Options, option) - } - job.OptionsConfig = optionsConfig - } - - if d.Get("node_filter_query").(string) != "" { - job.NodeFilter = &rundeck.JobNodeFilter{ - ExcludePrecedence: d.Get("node_filter_exclude_precedence").(bool), - Query: d.Get("node_filter_query").(string), - } - } - - if d.Get("schedule").(string) != "" { - schedule := strings.Split(d.Get("schedule").(string), " ") - if len(schedule) != 7 { - return nil, fmt.Errorf("Rundeck schedule must be formated like a cron expression, as defined here: http://www.quartz-scheduler.org/documentation/quartz-2.2.x/tutorials/tutorial-lesson-06.html") - } - job.Schedule = &rundeck.JobSchedule{ - Time: rundeck.JobScheduleTime{ - Seconds: schedule[0], - Minute: schedule[1], - Hour: schedule[2], - }, - Month: rundeck.JobScheduleMonth{ - Day: schedule[3], - Month: schedule[4], - }, - WeekDay: &rundeck.JobScheduleWeekDay{ - Day: schedule[5], - }, - Year: rundeck.JobScheduleYear{ - Year: schedule[6], - }, - } - } - - return job, nil -} - -func jobToResourceData(job *rundeck.JobDetail, d *schema.ResourceData) error { - - d.SetId(job.ID) - d.Set("id", job.ID) - d.Set("name", job.Name) - d.Set("group_name", job.GroupName) - - // The project name is not consistently returned in all rundeck versions, - // so we'll only update it if it's set. Jobs can't move between projects - // anyway, so this is harmless. - if job.ProjectName != "" { - d.Set("project_name", job.ProjectName) - } - - d.Set("description", job.Description) - d.Set("log_level", job.LogLevel) - d.Set("allow_concurrent_executions", job.AllowConcurrentExecutions) - if job.Dispatch != nil { - d.Set("max_thread_count", job.Dispatch.MaxThreadCount) - d.Set("continue_on_error", job.Dispatch.ContinueOnError) - d.Set("rank_attribute", job.Dispatch.RankAttribute) - d.Set("rank_order", job.Dispatch.RankOrder) - } else { - d.Set("max_thread_count", nil) - d.Set("continue_on_error", nil) - d.Set("rank_attribute", nil) - d.Set("rank_order", nil) - } - - d.Set("node_filter_query", nil) - d.Set("node_filter_exclude_precedence", nil) - if job.NodeFilter != nil { - d.Set("node_filter_query", job.NodeFilter.Query) - d.Set("node_filter_exclude_precedence", job.NodeFilter.ExcludePrecedence) - } - - optionConfigsI := []interface{}{} - if job.OptionsConfig != nil { - d.Set("preserve_options_order", job.OptionsConfig.PreserveOrder) - for _, option := range job.OptionsConfig.Options { - optionConfigI := map[string]interface{}{ - "name": option.Name, - "default_value": option.DefaultValue, - "value_choices": option.ValueChoices, - "value_choices_url": option.ValueChoicesURL, - "require_predefined_choice": option.RequirePredefinedChoice, - "validation_regex": option.ValidationRegex, - "decription": option.Description, - "required": option.IsRequired, - "allow_multiple_values": option.AllowsMultipleValues, - "multi_value_delimiter": option.MultiValueDelimiter, - "obscure_input": option.ObscureInput, - "exposed_to_scripts": option.ValueIsExposedToScripts, - } - optionConfigsI = append(optionConfigsI, optionConfigI) - } - } - d.Set("option", optionConfigsI) - - commandConfigsI := []interface{}{} - if job.CommandSequence != nil { - d.Set("command_ordering_strategy", job.CommandSequence.OrderingStrategy) - for _, command := range job.CommandSequence.Commands { - commandConfigI := map[string]interface{}{ - "description": command.Description, - "shell_command": command.ShellCommand, - "inline_script": command.Script, - "script_file": command.ScriptFile, - "script_file_args": command.ScriptFileArgs, - } - - if command.Job != nil { - commandConfigI["job"] = []interface{}{ - map[string]interface{}{ - "name": command.Job.Name, - "group_name": command.Job.GroupName, - "run_for_each_node": command.Job.RunForEachNode, - "args": command.Job.Arguments, - }, - } - } - - if command.StepPlugin != nil { - commandConfigI["step_plugin"] = []interface{}{ - map[string]interface{}{ - "type": command.StepPlugin.Type, - "config": map[string]string(command.StepPlugin.Config), - }, - } - } - - if command.NodeStepPlugin != nil { - commandConfigI["node_step_plugin"] = []interface{}{ - map[string]interface{}{ - "type": command.NodeStepPlugin.Type, - "config": map[string]string(command.NodeStepPlugin.Config), - }, - } - } - - commandConfigsI = append(commandConfigsI, commandConfigI) - } - } - d.Set("command", commandConfigsI) - - if job.Schedule != nil { - schedule := []string{} - schedule = append(schedule, job.Schedule.Time.Seconds) - schedule = append(schedule, job.Schedule.Time.Minute) - schedule = append(schedule, job.Schedule.Time.Hour) - schedule = append(schedule, job.Schedule.Month.Day) - schedule = append(schedule, job.Schedule.Month.Month) - if job.Schedule.WeekDay != nil { - schedule = append(schedule, job.Schedule.WeekDay.Day) - } else { - schedule = append(schedule, "*") - } - schedule = append(schedule, job.Schedule.Year.Year) - - d.Set("schedule", strings.Join(schedule, " ")) - } - - return nil -} diff --git a/builtin/providers/rundeck/resource_job_test.go b/builtin/providers/rundeck/resource_job_test.go deleted file mode 100644 index 2bcf6f0e1..000000000 --- a/builtin/providers/rundeck/resource_job_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package rundeck - -import ( - "fmt" - "testing" - - "github.com/apparentlymart/go-rundeck-api/rundeck" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccJob_basic(t *testing.T) { - var job rundeck.JobDetail - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccJobCheckDestroy(&job), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccJobConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccJobCheckExists("rundeck_job.test", &job), - func(s *terraform.State) error { - if expected := "basic-job"; job.Name != expected { - return fmt.Errorf("wrong name; expected %v, got %v", expected, job.Name) - } - if expected := "Prints Hello World"; job.CommandSequence.Commands[0].Description != expected { - return fmt.Errorf("failed to set command description; expected %v, got %v", expected, job.CommandSequence.Commands[0].Description) - } - return nil - }, - ), - }, - }, - }) -} - -func testAccJobCheckDestroy(job *rundeck.JobDetail) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*rundeck.Client) - _, err := client.GetJob(job.ID) - if err == nil { - return fmt.Errorf("key still exists") - } - if _, ok := err.(*rundeck.NotFoundError); !ok { - return fmt.Errorf("got something other than NotFoundError (%v) when getting key", err) - } - - return nil - } -} - -func testAccJobCheckExists(rn string, job *rundeck.JobDetail) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[rn] - if !ok { - return fmt.Errorf("resource not found: %s", rn) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("job id not set") - } - - client := testAccProvider.Meta().(*rundeck.Client) - gotJob, err := client.GetJob(rs.Primary.ID) - if err != nil { - return fmt.Errorf("error getting job details: %s", err) - } - - *job = *gotJob - - return nil - } -} - -const testAccJobConfig_basic = ` -resource "rundeck_project" "test" { - name = "terraform-acc-test-job" - description = "parent project for job acceptance tests" - resource_model_source { - type = "file" - config = { - format = "resourcexml" - file = "/tmp/terraform-acc-tests.xml" - } - } -} -resource "rundeck_job" "test" { - project_name = "${rundeck_project.test.name}" - name = "basic-job" - description = "A basic job" - node_filter_query = "example" - allow_concurrent_executions = 1 - max_thread_count = 1 - rank_order = "ascending" - schedule = "0 0 12 * * * *" - option { - name = "foo" - default_value = "bar" - } - command { - description = "Prints Hello World" - shell_command = "echo Hello World" - } -} -` diff --git a/builtin/providers/rundeck/resource_private_key.go b/builtin/providers/rundeck/resource_private_key.go deleted file mode 100644 index a717f85f1..000000000 --- a/builtin/providers/rundeck/resource_private_key.go +++ /dev/null @@ -1,114 +0,0 @@ -package rundeck - -import ( - "crypto/sha1" - "encoding/hex" - - "github.com/hashicorp/terraform/helper/schema" - - "github.com/apparentlymart/go-rundeck-api/rundeck" -) - -func resourceRundeckPrivateKey() *schema.Resource { - return &schema.Resource{ - Create: CreateOrUpdatePrivateKey, - Update: CreateOrUpdatePrivateKey, - Delete: DeletePrivateKey, - Exists: PrivateKeyExists, - Read: ReadPrivateKey, - - Schema: map[string]*schema.Schema{ - "path": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "Path to the key within the key store", - ForceNew: true, - }, - - "key_material": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "The private key material to store, in PEM format", - StateFunc: func(v interface{}) string { - switch v.(type) { - case string: - hash := sha1.Sum([]byte(v.(string))) - return hex.EncodeToString(hash[:]) - default: - return "" - } - }, - }, - }, - } -} - -func CreateOrUpdatePrivateKey(d *schema.ResourceData, meta interface{}) error { - client := meta.(*rundeck.Client) - - path := d.Get("path").(string) - keyMaterial := d.Get("key_material").(string) - - var err error - - if d.Id() != "" { - err = client.ReplacePrivateKey(path, keyMaterial) - } else { - err = client.CreatePrivateKey(path, keyMaterial) - } - - if err != nil { - return err - } - - d.SetId(path) - - return ReadPrivateKey(d, meta) -} - -func DeletePrivateKey(d *schema.ResourceData, meta interface{}) error { - client := meta.(*rundeck.Client) - - path := d.Id() - - // The only "delete" call we have is oblivious to key type, but - // that's okay since our Exists implementation makes sure that we - // won't try to delete a key of the wrong type since we'll pretend - // that it's already been deleted. - err := client.DeleteKey(path) - if err != nil { - return err - } - - d.SetId("") - return nil -} - -func ReadPrivateKey(d *schema.ResourceData, meta interface{}) error { - // Nothing to read for a private key: existence is all we need to - // worry about, and PrivateKeyExists took care of that. - return nil -} - -func PrivateKeyExists(d *schema.ResourceData, meta interface{}) (bool, error) { - client := meta.(*rundeck.Client) - - path := d.Id() - - key, err := client.GetKeyMeta(path) - if err != nil { - if _, ok := err.(rundeck.NotFoundError); ok { - err = nil - } - return false, err - } - - if key.KeyType != "private" { - // If the key type isn't public then as far as this resource is - // concerned it doesn't exist. (We'll fail properly when we try to - // create a key where one already exists.) - return false, nil - } - - return true, nil -} diff --git a/builtin/providers/rundeck/resource_private_key_test.go b/builtin/providers/rundeck/resource_private_key_test.go deleted file mode 100644 index da2dad67f..000000000 --- a/builtin/providers/rundeck/resource_private_key_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package rundeck - -import ( - "fmt" - "strings" - "testing" - - "github.com/apparentlymart/go-rundeck-api/rundeck" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccPrivateKey_basic(t *testing.T) { - var key rundeck.KeyMeta - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccPrivateKeyCheckDestroy(&key), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccPrivateKeyConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccPrivateKeyCheckExists("rundeck_private_key.test", &key), - func(s *terraform.State) error { - if expected := "keys/terraform_acceptance_tests/private_key"; key.Path != expected { - return fmt.Errorf("wrong path; expected %v, got %v", expected, key.Path) - } - if !strings.HasSuffix(key.URL, "/storage/keys/terraform_acceptance_tests/private_key") { - return fmt.Errorf("wrong URL; expected to end with the key path") - } - if expected := "file"; key.ResourceType != expected { - return fmt.Errorf("wrong resource type; expected %v, got %v", expected, key.ResourceType) - } - if expected := "private"; key.KeyType != expected { - return fmt.Errorf("wrong key type; expected %v, got %v", expected, key.KeyType) - } - // Rundeck won't let us re-retrieve a private key payload, so we can't test - // that the key material was submitted and stored correctly. - return nil - }, - ), - }, - }, - }) -} - -func testAccPrivateKeyCheckDestroy(key *rundeck.KeyMeta) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*rundeck.Client) - _, err := client.GetKeyMeta(key.Path) - if err == nil { - return fmt.Errorf("key still exists") - } - if _, ok := err.(*rundeck.NotFoundError); !ok { - return fmt.Errorf("got something other than NotFoundError (%v) when getting key", err) - } - - return nil - } -} - -func testAccPrivateKeyCheckExists(rn string, key *rundeck.KeyMeta) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[rn] - if !ok { - return fmt.Errorf("resource not found: %s", rn) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("key id not set") - } - - client := testAccProvider.Meta().(*rundeck.Client) - gotKey, err := client.GetKeyMeta(rs.Primary.ID) - if err != nil { - return fmt.Errorf("error getting key metadata: %s", err) - } - - *key = *gotKey - - return nil - } -} - -const testAccPrivateKeyConfig_basic = ` -resource "rundeck_private_key" "test" { - path = "terraform_acceptance_tests/private_key" - key_material = "this is not a real private key" -} -` diff --git a/builtin/providers/rundeck/resource_project.go b/builtin/providers/rundeck/resource_project.go deleted file mode 100644 index d355555b0..000000000 --- a/builtin/providers/rundeck/resource_project.go +++ /dev/null @@ -1,293 +0,0 @@ -package rundeck - -import ( - "fmt" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - - "github.com/apparentlymart/go-rundeck-api/rundeck" -) - -var projectConfigAttributes = map[string]string{ - "project.name": "name", - "project.description": "description", - "service.FileCopier.default.provider": "default_node_file_copier_plugin", - "service.NodeExecutor.default.provider": "default_node_executor_plugin", - "project.ssh-authentication": "ssh_authentication_type", - "project.ssh-key-storage-path": "ssh_key_storage_path", - "project.ssh-keypath": "ssh_key_file_path", -} - -func resourceRundeckProject() *schema.Resource { - return &schema.Resource{ - Create: CreateProject, - Update: UpdateProject, - Delete: DeleteProject, - Exists: ProjectExists, - Read: ReadProject, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "Unique name for the project", - ForceNew: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "Description of the project to be shown in the Rundeck UI", - Default: "Managed by Terraform", - }, - - "ui_url": &schema.Schema{ - Type: schema.TypeString, - Required: false, - Computed: true, - }, - - "resource_model_source": &schema.Schema{ - Type: schema.TypeList, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "Name of the resource model plugin to use", - }, - "config": &schema.Schema{ - Type: schema.TypeMap, - Required: true, - Description: "Configuration parameters for the selected plugin", - }, - }, - }, - }, - - "default_node_file_copier_plugin": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "jsch-scp", - }, - - "default_node_executor_plugin": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "jsch-ssh", - }, - - "ssh_authentication_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "privateKey", - }, - - "ssh_key_storage_path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "ssh_key_file_path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "extra_config": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Description: "Additional raw configuration parameters to include in the project configuration, with dots replaced with slashes in the key names due to limitations in Terraform's config language.", - }, - }, - } -} - -func CreateProject(d *schema.ResourceData, meta interface{}) error { - client := meta.(*rundeck.Client) - - // Rundeck's model is a little inconsistent in that we can create - // a project via a high-level structure but yet we must update - // the project via its raw config properties. - // For simplicity's sake we create a bare minimum project here - // and then delegate to UpdateProject to fill in the rest of the - // configuration via the raw config properties. - - project, err := client.CreateProject(&rundeck.Project{ - Name: d.Get("name").(string), - }) - - if err != nil { - return err - } - - d.SetId(project.Name) - d.Set("id", project.Name) - - return UpdateProject(d, meta) -} - -func UpdateProject(d *schema.ResourceData, meta interface{}) error { - client := meta.(*rundeck.Client) - - // In Rundeck, updates are always in terms of the low-level config - // properties map, so we need to transform our data structure - // into the equivalent raw properties. - - projectName := d.Id() - - updateMap := map[string]string{} - - slashReplacer := strings.NewReplacer("/", ".") - if extraConfig := d.Get("extra_config"); extraConfig != nil { - for k, v := range extraConfig.(map[string]interface{}) { - updateMap[slashReplacer.Replace(k)] = v.(string) - } - } - - for configKey, attrKey := range projectConfigAttributes { - v := d.Get(attrKey).(string) - if v != "" { - updateMap[configKey] = v - } - } - - for i, rmsi := range d.Get("resource_model_source").([]interface{}) { - rms := rmsi.(map[string]interface{}) - pluginType := rms["type"].(string) - ci := rms["config"].(map[string]interface{}) - attrKeyPrefix := fmt.Sprintf("resources.source.%v.", i+1) - typeKey := attrKeyPrefix + "type" - configKeyPrefix := fmt.Sprintf("%vconfig.", attrKeyPrefix) - updateMap[typeKey] = pluginType - for k, v := range ci { - updateMap[configKeyPrefix+k] = v.(string) - } - } - - err := client.SetProjectConfig(projectName, updateMap) - - if err != nil { - return err - } - - return ReadProject(d, meta) -} - -func ReadProject(d *schema.ResourceData, meta interface{}) error { - client := meta.(*rundeck.Client) - - name := d.Id() - project, err := client.GetProject(name) - - if err != nil { - return err - } - - for configKey, attrKey := range projectConfigAttributes { - d.Set(projectConfigAttributes[configKey], nil) - if v, ok := project.Config[configKey]; ok { - d.Set(attrKey, v) - // Remove this key so it won't get included in extra_config - // later. - delete(project.Config, configKey) - } - } - - resourceSourceMap := map[int]interface{}{} - configMaps := map[int]interface{}{} - for configKey, v := range project.Config { - if strings.HasPrefix(configKey, "resources.source.") { - nameParts := strings.Split(configKey, ".") - - if len(nameParts) < 4 { - continue - } - - index, err := strconv.Atoi(nameParts[2]) - if err != nil { - continue - } - - if _, ok := resourceSourceMap[index]; !ok { - configMap := map[string]interface{}{} - configMaps[index] = configMap - resourceSourceMap[index] = map[string]interface{}{ - "config": configMap, - } - } - - switch nameParts[3] { - case "type": - if len(nameParts) != 4 { - continue - } - m := resourceSourceMap[index].(map[string]interface{}) - m["type"] = v - case "config": - if len(nameParts) != 5 { - continue - } - m := configMaps[index].(map[string]interface{}) - m[nameParts[4]] = v - default: - continue - } - - // Remove this key so it won't get included in extra_config - // later. - delete(project.Config, configKey) - } - } - - resourceSources := []map[string]interface{}{} - resourceSourceIndices := []int{} - for k := range resourceSourceMap { - resourceSourceIndices = append(resourceSourceIndices, k) - } - sort.Ints(resourceSourceIndices) - - for _, index := range resourceSourceIndices { - resourceSources = append(resourceSources, resourceSourceMap[index].(map[string]interface{})) - } - d.Set("resource_model_source", resourceSources) - - extraConfig := map[string]string{} - dotReplacer := strings.NewReplacer(".", "/") - for k, v := range project.Config { - extraConfig[dotReplacer.Replace(k)] = v - } - d.Set("extra_config", extraConfig) - - d.Set("name", project.Name) - d.Set("ui_url", project.URL) - - return nil -} - -func ProjectExists(d *schema.ResourceData, meta interface{}) (bool, error) { - client := meta.(*rundeck.Client) - - name := d.Id() - _, err := client.GetProject(name) - - if _, ok := err.(rundeck.NotFoundError); ok { - return false, nil - } - - if err != nil { - return false, err - } - - return true, nil -} - -func DeleteProject(d *schema.ResourceData, meta interface{}) error { - client := meta.(*rundeck.Client) - - name := d.Id() - return client.DeleteProject(name) -} diff --git a/builtin/providers/rundeck/resource_project_test.go b/builtin/providers/rundeck/resource_project_test.go deleted file mode 100644 index 1627a7824..000000000 --- a/builtin/providers/rundeck/resource_project_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package rundeck - -import ( - "fmt" - "testing" - - "github.com/apparentlymart/go-rundeck-api/rundeck" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccProject_basic(t *testing.T) { - var project rundeck.Project - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccProjectCheckDestroy(&project), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccProjectConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccProjectCheckExists("rundeck_project.main", &project), - func(s *terraform.State) error { - if expected := "terraform-acc-test-basic"; project.Name != expected { - return fmt.Errorf("wrong name; expected %v, got %v", expected, project.Name) - } - if expected := "baz"; project.Config["foo.bar"] != expected { - return fmt.Errorf("wrong foo.bar config; expected %v, got %v", expected, project.Config["foo.bar"]) - } - if expected := "file"; project.Config["resources.source.1.type"] != expected { - return fmt.Errorf("wrong resources.source.1.type config; expected %v, got %v", expected, project.Config["resources.source.1.type"]) - } - return nil - }, - ), - }, - }, - }) -} - -func testAccProjectCheckDestroy(project *rundeck.Project) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*rundeck.Client) - _, err := client.GetProject(project.Name) - if err == nil { - return fmt.Errorf("project still exists") - } - if _, ok := err.(*rundeck.NotFoundError); !ok { - return fmt.Errorf("got something other than NotFoundError (%v) when getting project", err) - } - - return nil - } -} - -func testAccProjectCheckExists(rn string, project *rundeck.Project) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[rn] - if !ok { - return fmt.Errorf("resource not found: %s", rn) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("project id not set") - } - - client := testAccProvider.Meta().(*rundeck.Client) - gotProject, err := client.GetProject(rs.Primary.ID) - if err != nil { - return fmt.Errorf("error getting project: %s", err) - } - - *project = *gotProject - - return nil - } -} - -const testAccProjectConfig_basic = ` -resource "rundeck_project" "main" { - name = "terraform-acc-test-basic" - description = "Terraform Acceptance Tests Basic Project" - - resource_model_source { - type = "file" - config = { - format = "resourcexml" - file = "/tmp/terraform-acc-tests.xml" - } - } - - extra_config = { - "foo/bar" = "baz" - } -} -` diff --git a/builtin/providers/rundeck/resource_public_key.go b/builtin/providers/rundeck/resource_public_key.go deleted file mode 100644 index 11ee2a3f9..000000000 --- a/builtin/providers/rundeck/resource_public_key.go +++ /dev/null @@ -1,148 +0,0 @@ -package rundeck - -import ( - "github.com/hashicorp/terraform/helper/schema" - - "github.com/apparentlymart/go-rundeck-api/rundeck" -) - -func resourceRundeckPublicKey() *schema.Resource { - return &schema.Resource{ - Create: CreatePublicKey, - Update: UpdatePublicKey, - Delete: DeletePublicKey, - Exists: PublicKeyExists, - Read: ReadPublicKey, - - Schema: map[string]*schema.Schema{ - "path": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "Path to the key within the key store", - ForceNew: true, - }, - - "key_material": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "The public key data to store, in the usual OpenSSH public key file format", - }, - - "url": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: "URL at which the key content can be retrieved", - }, - - "delete": &schema.Schema{ - Type: schema.TypeBool, - Computed: true, - Description: "True if the key should be deleted when the resource is deleted. Defaults to true if key_material is provided in the configuration.", - }, - }, - } -} - -func CreatePublicKey(d *schema.ResourceData, meta interface{}) error { - client := meta.(*rundeck.Client) - - path := d.Get("path").(string) - keyMaterial := d.Get("key_material").(string) - - if keyMaterial != "" { - err := client.CreatePublicKey(path, keyMaterial) - if err != nil { - return err - } - d.Set("delete", true) - } - - d.SetId(path) - - return ReadPublicKey(d, meta) -} - -func UpdatePublicKey(d *schema.ResourceData, meta interface{}) error { - client := meta.(*rundeck.Client) - - if d.HasChange("key_material") { - path := d.Get("path").(string) - keyMaterial := d.Get("key_material").(string) - - err := client.ReplacePublicKey(path, keyMaterial) - if err != nil { - return err - } - } - - return ReadPublicKey(d, meta) -} - -func DeletePublicKey(d *schema.ResourceData, meta interface{}) error { - client := meta.(*rundeck.Client) - - path := d.Id() - - // Since this resource can be used both to create and to read existing - // public keys, we'll only actually delete the key if we remember that - // we created the key in the first place, or if the user explicitly - // opted in to have an existing key deleted. - if d.Get("delete").(bool) { - // The only "delete" call we have is oblivious to key type, but - // that's okay since our Exists implementation makes sure that we - // won't try to delete a key of the wrong type since we'll pretend - // that it's already been deleted. - err := client.DeleteKey(path) - if err != nil { - return err - } - } - - d.SetId("") - return nil -} - -func ReadPublicKey(d *schema.ResourceData, meta interface{}) error { - client := meta.(*rundeck.Client) - - path := d.Id() - - key, err := client.GetKeyMeta(path) - if err != nil { - return err - } - - keyMaterial, err := client.GetKeyContent(path) - if err != nil { - return err - } - - d.Set("key_material", keyMaterial) - d.Set("url", key.URL) - - return nil -} - -func PublicKeyExists(d *schema.ResourceData, meta interface{}) (bool, error) { - client := meta.(*rundeck.Client) - - path := d.Id() - - key, err := client.GetKeyMeta(path) - if err != nil { - if _, ok := err.(rundeck.NotFoundError); ok { - err = nil - } - return false, err - } - - if key.KeyType != "public" { - // If the key type isn't public then as far as this resource is - // concerned it doesn't exist. (We'll fail properly when we try to - // create a key where one already exists.) - return false, nil - } - - return true, nil -} diff --git a/builtin/providers/rundeck/resource_public_key_test.go b/builtin/providers/rundeck/resource_public_key_test.go deleted file mode 100644 index c8b9a1865..000000000 --- a/builtin/providers/rundeck/resource_public_key_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package rundeck - -import ( - "fmt" - "strings" - "testing" - - "github.com/apparentlymart/go-rundeck-api/rundeck" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccPublicKey_basic(t *testing.T) { - var key rundeck.KeyMeta - var keyMaterial string - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccPublicKeyCheckDestroy(&key), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccPublicKeyConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccPublicKeyCheckExists("rundeck_public_key.test", &key, &keyMaterial), - func(s *terraform.State) error { - if expected := "keys/terraform_acceptance_tests/public_key"; key.Path != expected { - return fmt.Errorf("wrong path; expected %v, got %v", expected, key.Path) - } - if !strings.HasSuffix(key.URL, "/storage/keys/terraform_acceptance_tests/public_key") { - return fmt.Errorf("wrong URL; expected to end with the key path") - } - if expected := "file"; key.ResourceType != expected { - return fmt.Errorf("wrong resource type; expected %v, got %v", expected, key.ResourceType) - } - if expected := "public"; key.KeyType != expected { - return fmt.Errorf("wrong key type; expected %v, got %v", expected, key.KeyType) - } - if !strings.Contains(keyMaterial, "test+public+key+for+terraform") { - return fmt.Errorf("wrong key material") - } - return nil - }, - ), - }, - }, - }) -} - -func testAccPublicKeyCheckDestroy(key *rundeck.KeyMeta) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*rundeck.Client) - _, err := client.GetKeyMeta(key.Path) - if err == nil { - return fmt.Errorf("key still exists") - } - if _, ok := err.(*rundeck.NotFoundError); !ok { - return fmt.Errorf("got something other than NotFoundError (%v) when getting key", err) - } - - return nil - } -} - -func testAccPublicKeyCheckExists(rn string, key *rundeck.KeyMeta, keyMaterial *string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[rn] - if !ok { - return fmt.Errorf("resource not found: %s", rn) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("key id not set") - } - - client := testAccProvider.Meta().(*rundeck.Client) - gotKey, err := client.GetKeyMeta(rs.Primary.ID) - if err != nil { - return fmt.Errorf("error getting key metadata: %s", err) - } - - *key = *gotKey - - *keyMaterial, err = client.GetKeyContent(rs.Primary.ID) - if err != nil { - return fmt.Errorf("error getting key contents: %s", err) - } - - return nil - } -} - -const testAccPublicKeyConfig_basic = ` -resource "rundeck_public_key" "test" { - path = "terraform_acceptance_tests/public_key" - key_material = "ssh-rsa test+public+key+for+terraform nobody@nowhere" -} -` diff --git a/builtin/providers/rundeck/util.go b/builtin/providers/rundeck/util.go deleted file mode 100644 index 97544ba0b..000000000 --- a/builtin/providers/rundeck/util.go +++ /dev/null @@ -1,25 +0,0 @@ -package rundeck - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" -) - -func validateValueFunc(values []string) schema.SchemaValidateFunc { - return func(v interface{}, k string) (we []string, errors []error) { - value := v.(string) - valid := false - for _, role := range values { - if value == role { - valid = true - break - } - } - - if !valid { - errors = append(errors, fmt.Errorf("%s is an invalid value for argument %s", value, k)) - } - return - } -} diff --git a/builtin/providers/scaleway/config.go b/builtin/providers/scaleway/config.go deleted file mode 100644 index d82b36dda..000000000 --- a/builtin/providers/scaleway/config.go +++ /dev/null @@ -1,64 +0,0 @@ -package scaleway - -import ( - "fmt" - "log" - "net/http" - "os" - - "github.com/scaleway/scaleway-cli/pkg/api" - "github.com/scaleway/scaleway-cli/pkg/scwversion" -) - -// Config contains scaleway configuration values -type Config struct { - Organization string - APIKey string - Region string -} - -// Client contains scaleway api clients -type Client struct { - scaleway *api.ScalewayAPI -} - -// Client configures and returns a fully initialized Scaleway client -func (c *Config) Client() (*Client, error) { - api, err := api.NewScalewayAPI( - c.Organization, - c.APIKey, - scwversion.UserAgent(), - c.Region, - func(s *api.ScalewayAPI) { - s.Logger = newTerraformLogger() - }, - ) - if err != nil { - return nil, err - } - return &Client{api}, nil -} - -func newTerraformLogger() api.Logger { - return &terraformLogger{} -} - -type terraformLogger struct { -} - -func (l *terraformLogger) LogHTTP(r *http.Request) { - log.Printf("[DEBUG] %s %s\n", r.Method, r.URL.Path) -} -func (l *terraformLogger) Fatalf(format string, v ...interface{}) { - log.Printf("[FATAL] %s\n", fmt.Sprintf(format, v)) - os.Exit(1) -} -func (l *terraformLogger) Debugf(format string, v ...interface{}) { - log.Printf("[DEBUG] %s\n", fmt.Sprintf(format, v)) -} -func (l *terraformLogger) Infof(format string, v ...interface{}) { - log.Printf("[INFO ] %s\n", fmt.Sprintf(format, v)) -} -func (l *terraformLogger) Warnf(format string, v ...interface{}) { - log.Printf("[WARN ] %s\n", fmt.Sprintf(format, v)) -} diff --git a/builtin/providers/scaleway/data_source_scaleway_bootscript.go b/builtin/providers/scaleway/data_source_scaleway_bootscript.go deleted file mode 100644 index cc3b6419c..000000000 --- a/builtin/providers/scaleway/data_source_scaleway_bootscript.go +++ /dev/null @@ -1,123 +0,0 @@ -package scaleway - -import ( - "fmt" - "regexp" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/scaleway/scaleway-cli/pkg/api" -) - -func dataSourceScalewayBootscript() *schema.Resource { - return &schema.Resource{ - Read: dataSourceScalewayBootscriptRead, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "name_filter": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "architecture": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - // Computed values. - "organization": { - Type: schema.TypeString, - Computed: true, - }, - "public": { - Type: schema.TypeBool, - Computed: true, - }, - "boot_cmd_args": { - Type: schema.TypeString, - Computed: true, - }, - "dtb": { - Type: schema.TypeString, - Computed: true, - }, - "initrd": { - Type: schema.TypeString, - Computed: true, - }, - "kernel": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func bootscriptDescriptionAttributes(d *schema.ResourceData, script api.ScalewayBootscript) error { - d.Set("architecture", script.Arch) - d.Set("organization", script.Organization) - d.Set("public", script.Public) - d.Set("boot_cmd_args", script.Bootcmdargs) - d.Set("dtb", script.Dtb) - d.Set("initrd", script.Initrd) - d.Set("kernel", script.Kernel) - d.SetId(script.Identifier) - - return nil -} - -func dataSourceScalewayBootscriptRead(d *schema.ResourceData, meta interface{}) error { - scaleway := meta.(*Client).scaleway - - scripts, err := scaleway.GetBootscripts() - if err != nil { - return err - } - - var isMatch func(api.ScalewayBootscript) bool - - architecture := d.Get("architecture") - if name, ok := d.GetOk("name"); ok { - isMatch = func(s api.ScalewayBootscript) bool { - architectureMatch := true - if architecture != "" { - architectureMatch = architecture == s.Arch - } - return s.Title == name.(string) && architectureMatch - } - } else if nameFilter, ok := d.GetOk("name_filter"); ok { - exp, err := regexp.Compile(nameFilter.(string)) - if err != nil { - return err - } - - isMatch = func(s api.ScalewayBootscript) bool { - nameMatch := exp.MatchString(s.Title) - architectureMatch := true - if architecture != "" { - architectureMatch = architecture == s.Arch - } - return nameMatch && architectureMatch - } - } - - var matches []api.ScalewayBootscript - for _, script := range *scripts { - if isMatch(script) { - matches = append(matches, script) - } - } - - if len(matches) > 1 { - return fmt.Errorf("The query returned more than one result. Please refine your query.") - } - if len(matches) == 0 { - return fmt.Errorf("The query returned no result. Please refine your query.") - } - - return bootscriptDescriptionAttributes(d, matches[0]) -} diff --git a/builtin/providers/scaleway/data_source_scaleway_bootscript_test.go b/builtin/providers/scaleway/data_source_scaleway_bootscript_test.go deleted file mode 100644 index 3c0c10696..000000000 --- a/builtin/providers/scaleway/data_source_scaleway_bootscript_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package scaleway - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccScalewayDataSourceBootscript_Filtered(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckScalewayBootscriptFilterConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckBootscriptID("data.scaleway_bootscript.debug"), - resource.TestCheckResourceAttr("data.scaleway_bootscript.debug", "architecture", "arm"), - resource.TestCheckResourceAttr("data.scaleway_bootscript.debug", "public", "true"), - ), - }, - }, - }) -} - -func testAccCheckBootscriptID(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Can't find bootscript data source: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("bootscript data source ID not set") - } - - scaleway := testAccProvider.Meta().(*Client).scaleway - _, err := scaleway.GetBootscript(rs.Primary.ID) - if err != nil { - return err - } - - return nil - } -} - -const testAccCheckScalewayBootscriptFilterConfig = ` -data "scaleway_bootscript" "debug" { - architecture = "arm" - name_filter = "Rescue" -} -` diff --git a/builtin/providers/scaleway/data_source_scaleway_image.go b/builtin/providers/scaleway/data_source_scaleway_image.go deleted file mode 100644 index fa79e2c60..000000000 --- a/builtin/providers/scaleway/data_source_scaleway_image.go +++ /dev/null @@ -1,89 +0,0 @@ -package scaleway - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/scaleway/scaleway-cli/pkg/api" -) - -func dataSourceScalewayImage() *schema.Resource { - return &schema.Resource{ - Read: dataSourceScalewayImageRead, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "name_filter": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "architecture": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - // Computed values. - "organization": { - Type: schema.TypeString, - Computed: true, - }, - "public": { - Type: schema.TypeBool, - Computed: true, - }, - "creation_date": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func scalewayImageAttributes(d *schema.ResourceData, img *api.ScalewayImage) error { - d.Set("architecture", img.Arch) - d.Set("organization", img.Organization) - d.Set("public", img.Public) - d.Set("creation_date", img.CreationDate) - d.Set("name", img.Name) - d.SetId(img.Identifier) - - return nil -} - -func dataSourceScalewayImageRead(d *schema.ResourceData, meta interface{}) error { - scaleway := meta.(*Client).scaleway - - var needle string - if name, ok := d.GetOk("name"); ok { - needle = name.(string) - } else if nameFilter, ok := d.GetOk("name_filter"); ok { - needle = nameFilter.(string) - } - - images, err := scaleway.ResolveImage(needle) - if err != nil { - return err - } - images = api.FilterImagesByArch(images, d.Get("architecture").(string)) - images = api.FilterImagesByRegion(images, scaleway.Region) - - if len(images) > 1 { - return fmt.Errorf("The query returned more than one result. Please refine your query.") - } - if len(images) == 0 { - return fmt.Errorf("The query returned no result. Please refine your query.") - } - - img, err := scaleway.GetImage(images[0].Identifier) - if err != nil { - return err - } - - return scalewayImageAttributes(d, img) -} diff --git a/builtin/providers/scaleway/data_source_scaleway_image_test.go b/builtin/providers/scaleway/data_source_scaleway_image_test.go deleted file mode 100644 index 41a441289..000000000 --- a/builtin/providers/scaleway/data_source_scaleway_image_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package scaleway - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccScalewayDataSourceImage_Basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckScalewayImageConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckImageID("data.scaleway_image.ubuntu"), - resource.TestCheckResourceAttr("data.scaleway_image.ubuntu", "architecture", "arm"), - resource.TestCheckResourceAttr("data.scaleway_image.ubuntu", "public", "true"), - resource.TestCheckResourceAttrSet("data.scaleway_image.ubuntu", "organization"), - resource.TestCheckResourceAttrSet("data.scaleway_image.ubuntu", "creation_date"), - ), - }, - }, - }) -} - -func TestAccScalewayDataSourceImage_Filtered(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckScalewayImageFilterConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckImageID("data.scaleway_image.ubuntu"), - resource.TestCheckResourceAttr("data.scaleway_image.ubuntu", "name", "Ubuntu Precise (12.04)"), - resource.TestCheckResourceAttr("data.scaleway_image.ubuntu", "architecture", "arm"), - resource.TestCheckResourceAttr("data.scaleway_image.ubuntu", "public", "true"), - resource.TestCheckResourceAttrSet("data.scaleway_image.ubuntu", "organization"), - resource.TestCheckResourceAttrSet("data.scaleway_image.ubuntu", "creation_date"), - ), - }, - }, - }) -} - -func testAccCheckImageID(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Can't find image data source: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("image data source ID not set") - } - - scaleway := testAccProvider.Meta().(*Client).scaleway - _, err := scaleway.GetImage(rs.Primary.ID) - - if err != nil { - return err - } - - return nil - } -} - -const testAccCheckScalewayImageConfig = ` -data "scaleway_image" "ubuntu" { - name = "Ubuntu Precise" - architecture = "arm" -} -` - -const testAccCheckScalewayImageFilterConfig = ` -data "scaleway_image" "ubuntu" { - name_filter = "Precise" - architecture = "arm" -} -` diff --git a/builtin/providers/scaleway/helpers.go b/builtin/providers/scaleway/helpers.go deleted file mode 100644 index d9876707a..000000000 --- a/builtin/providers/scaleway/helpers.go +++ /dev/null @@ -1,105 +0,0 @@ -package scaleway - -import ( - "fmt" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/scaleway/scaleway-cli/pkg/api" -) - -// Bool returns a pointer to of the bool value passed in. -func Bool(val bool) *bool { - return &val -} - -// String returns a pointer to of the string value passed in. -func String(val string) *string { - return &val -} - -func validateVolumeType(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "l_ssd" { - errors = append(errors, fmt.Errorf("%q must be l_ssd", k)) - } - return -} - -func validateVolumeSize(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 1 || value > 150 { - errors = append(errors, fmt.Errorf("%q be more than 1 and less than 150", k)) - } - return -} - -// deleteRunningServer terminates the server and waits until it is removed. -func deleteRunningServer(scaleway *api.ScalewayAPI, server *api.ScalewayServer) error { - err := scaleway.PostServerAction(server.Identifier, "terminate") - - if err != nil { - if serr, ok := err.(api.ScalewayAPIError); ok { - if serr.StatusCode == 404 { - return nil - } - } - - return err - } - - return waitForServerState(scaleway, server.Identifier, "stopped") -} - -// deleteStoppedServer needs to cleanup attached root volumes. this is not done -// automatically by Scaleway -func deleteStoppedServer(scaleway *api.ScalewayAPI, server *api.ScalewayServer) error { - if err := scaleway.DeleteServer(server.Identifier); err != nil { - return err - } - - if rootVolume, ok := server.Volumes["0"]; ok { - if err := scaleway.DeleteVolume(rootVolume.Identifier); err != nil { - return err - } - } - return nil -} - -// NOTE copied from github.com/scaleway/scaleway-cli/pkg/api/helpers.go -// the helpers.go file pulls in quite a lot dependencies, and they're just convenience wrappers anyway - -var allStates = []string{"starting", "running", "stopping", "stopped"} - -func waitForServerState(scaleway *api.ScalewayAPI, serverID, targetState string) error { - pending := []string{} - for _, state := range allStates { - if state != targetState { - pending = append(pending, state) - } - } - stateConf := &resource.StateChangeConf{ - Pending: pending, - Target: []string{targetState}, - Refresh: func() (interface{}, string, error) { - s, err := scaleway.GetServer(serverID) - - if err == nil { - return 42, s.State, nil - } - - if serr, ok := err.(api.ScalewayAPIError); ok { - if serr.StatusCode == 404 { - return 42, "stopped", nil - } - } - - return 42, s.State, err - }, - Timeout: 60 * time.Minute, - MinTimeout: 5 * time.Second, - Delay: 5 * time.Second, - } - _, err := stateConf.WaitForState() - return err -} diff --git a/builtin/providers/scaleway/import_scaleway_ip_test.go b/builtin/providers/scaleway/import_scaleway_ip_test.go deleted file mode 100644 index 3fac64ad0..000000000 --- a/builtin/providers/scaleway/import_scaleway_ip_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package scaleway - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccScalewayIP_importBasic(t *testing.T) { - resourceName := "scaleway_ip.base" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckScalewayIPDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckScalewayIPConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/scaleway/import_scaleway_security_group_test.go b/builtin/providers/scaleway/import_scaleway_security_group_test.go deleted file mode 100644 index 4d886966e..000000000 --- a/builtin/providers/scaleway/import_scaleway_security_group_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package scaleway - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccScalewaySecurityGroup_importBasic(t *testing.T) { - resourceName := "scaleway_security_group.base" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckScalewaySecurityGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckScalewaySecurityGroupConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/scaleway/import_scaleway_server_test.go b/builtin/providers/scaleway/import_scaleway_server_test.go deleted file mode 100644 index 33f292f29..000000000 --- a/builtin/providers/scaleway/import_scaleway_server_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package scaleway - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccScalewayServer_importBasic(t *testing.T) { - resourceName := "scaleway_server.base" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckScalewayServerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckScalewayServerConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/scaleway/import_scaleway_volume_test.go b/builtin/providers/scaleway/import_scaleway_volume_test.go deleted file mode 100644 index 66b646e14..000000000 --- a/builtin/providers/scaleway/import_scaleway_volume_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package scaleway - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccScalewayVolume_importBasic(t *testing.T) { - resourceName := "scaleway_volume.test" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckScalewayVolumeDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckScalewayVolumeConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/scaleway/provider.go b/builtin/providers/scaleway/provider.go deleted file mode 100644 index 16069fe21..000000000 --- a/builtin/providers/scaleway/provider.go +++ /dev/null @@ -1,84 +0,0 @@ -package scaleway - -import ( - "sync" - - "github.com/hashicorp/terraform/helper/mutexkv" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var mu = sync.Mutex{} - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "access_key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("SCALEWAY_ACCESS_KEY", nil), - Deprecated: "Use `token` instead.", - Description: "The API key for Scaleway API operations.", - }, - "token": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "SCALEWAY_TOKEN", - "SCALEWAY_ACCESS_KEY", - }, nil), - Description: "The API key for Scaleway API operations.", - }, - "organization": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("SCALEWAY_ORGANIZATION", nil), - Description: "The Organization ID (a.k.a. 'access key') for Scaleway API operations.", - }, - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("SCALEWAY_REGION", "par1"), - Description: "The Scaleway API region to use.", - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "scaleway_server": resourceScalewayServer(), - "scaleway_ip": resourceScalewayIP(), - "scaleway_security_group": resourceScalewaySecurityGroup(), - "scaleway_security_group_rule": resourceScalewaySecurityGroupRule(), - "scaleway_volume": resourceScalewayVolume(), - "scaleway_volume_attachment": resourceScalewayVolumeAttachment(), - }, - - DataSourcesMap: map[string]*schema.Resource{ - "scaleway_bootscript": dataSourceScalewayBootscript(), - "scaleway_image": dataSourceScalewayImage(), - }, - - ConfigureFunc: providerConfigure, - } -} - -var scalewayMutexKV = mutexkv.NewMutexKV() - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - apiKey := "" - if v, ok := d.Get("token").(string); ok { - apiKey = v - } else { - if v, ok := d.Get("access_key").(string); ok { - apiKey = v - } - } - - config := Config{ - Organization: d.Get("organization").(string), - APIKey: apiKey, - Region: d.Get("region").(string), - } - - return config.Client() -} diff --git a/builtin/providers/scaleway/provider_test.go b/builtin/providers/scaleway/provider_test.go deleted file mode 100644 index 6575ef8fd..000000000 --- a/builtin/providers/scaleway/provider_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package scaleway - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "scaleway": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("SCALEWAY_ORGANIZATION"); v == "" { - t.Fatal("SCALEWAY_ORGANIZATION must be set for acceptance tests") - } - tokenFromAccessKey := os.Getenv("SCALEWAY_ACCESS_KEY") - token := os.Getenv("SCALEWAY_TOKEN") - if token == "" && tokenFromAccessKey == "" { - t.Fatal("SCALEWAY_TOKEN must be set for acceptance tests") - } -} diff --git a/builtin/providers/scaleway/resource_scaleway_ip.go b/builtin/providers/scaleway/resource_scaleway_ip.go deleted file mode 100644 index ef9aa9bd9..000000000 --- a/builtin/providers/scaleway/resource_scaleway_ip.go +++ /dev/null @@ -1,104 +0,0 @@ -package scaleway - -import ( - "log" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/scaleway/scaleway-cli/pkg/api" -) - -func resourceScalewayIP() *schema.Resource { - return &schema.Resource{ - Create: resourceScalewayIPCreate, - Read: resourceScalewayIPRead, - Update: resourceScalewayIPUpdate, - Delete: resourceScalewayIPDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "server": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "ip": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceScalewayIPCreate(d *schema.ResourceData, m interface{}) error { - scaleway := m.(*Client).scaleway - - mu.Lock() - resp, err := scaleway.NewIP() - mu.Unlock() - if err != nil { - return err - } - - d.SetId(resp.IP.ID) - return resourceScalewayIPUpdate(d, m) -} - -func resourceScalewayIPRead(d *schema.ResourceData, m interface{}) error { - scaleway := m.(*Client).scaleway - log.Printf("[DEBUG] Reading IP\n") - - resp, err := scaleway.GetIP(d.Id()) - if err != nil { - log.Printf("[DEBUG] Error reading ip: %q\n", err) - if serr, ok := err.(api.ScalewayAPIError); ok { - if serr.StatusCode == 404 { - d.SetId("") - return nil - } - } - return err - } - - d.Set("ip", resp.IP.Address) - if resp.IP.Server != nil { - d.Set("server", resp.IP.Server.Identifier) - } - return nil -} - -func resourceScalewayIPUpdate(d *schema.ResourceData, m interface{}) error { - scaleway := m.(*Client).scaleway - - mu.Lock() - defer mu.Unlock() - - if d.HasChange("server") { - if d.Get("server").(string) != "" { - log.Printf("[DEBUG] Attaching IP %q to server %q\n", d.Id(), d.Get("server").(string)) - if err := scaleway.AttachIP(d.Id(), d.Get("server").(string)); err != nil { - return err - } - } else { - log.Printf("[DEBUG] Detaching IP %q\n", d.Id()) - return scaleway.DetachIP(d.Id()) - } - } - - return resourceScalewayIPRead(d, m) -} - -func resourceScalewayIPDelete(d *schema.ResourceData, m interface{}) error { - scaleway := m.(*Client).scaleway - - mu.Lock() - defer mu.Unlock() - - err := scaleway.DeleteIP(d.Id()) - if err != nil { - return err - } - d.SetId("") - return nil -} diff --git a/builtin/providers/scaleway/resource_scaleway_ip_test.go b/builtin/providers/scaleway/resource_scaleway_ip_test.go deleted file mode 100644 index f3381cedf..000000000 --- a/builtin/providers/scaleway/resource_scaleway_ip_test.go +++ /dev/null @@ -1,167 +0,0 @@ -package scaleway - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccScalewayIP_Count(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckScalewayIPDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckScalewayIPConfig_Count, - Check: resource.ComposeTestCheckFunc( - testAccCheckScalewayIPExists("scaleway_ip.base.0"), - testAccCheckScalewayIPExists("scaleway_ip.base.1"), - ), - }, - }, - }) -} - -func TestAccScalewayIP_Basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckScalewayIPDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckScalewayIPConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckScalewayIPExists("scaleway_ip.base"), - ), - }, - resource.TestStep{ - Config: testAccCheckScalewayIPAttachConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckScalewayIPExists("scaleway_ip.base"), - testAccCheckScalewayIPAttachment("scaleway_ip.base", func(serverID string) bool { - return serverID != "" - }, "attachment failed"), - ), - }, - resource.TestStep{ - Config: testAccCheckScalewayIPConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckScalewayIPExists("scaleway_ip.base"), - testAccCheckScalewayIPAttachment("scaleway_ip.base", func(serverID string) bool { - return serverID == "" - }, "detachment failed"), - ), - }, - }, - }) -} - -func testAccCheckScalewayIPDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*Client).scaleway - - for _, rs := range s.RootModule().Resources { - if rs.Type != "scaleway" { - continue - } - - _, err := client.GetIP(rs.Primary.ID) - - if err == nil { - return fmt.Errorf("IP still exists") - } - } - - return nil -} - -func testAccCheckScalewayIPAttributes() resource.TestCheckFunc { - return func(s *terraform.State) error { - return nil - } -} - -func testAccCheckScalewayIPExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No IP ID is set") - } - - client := testAccProvider.Meta().(*Client).scaleway - ip, err := client.GetIP(rs.Primary.ID) - - if err != nil { - return err - } - - if ip.IP.ID != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - - return nil - } -} - -func testAccCheckScalewayIPAttachment(n string, check func(string) bool, msg string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No IP ID is set") - } - - client := testAccProvider.Meta().(*Client).scaleway - ip, err := client.GetIP(rs.Primary.ID) - - if err != nil { - return err - } - - var serverID = "" - if ip.IP.Server != nil { - serverID = ip.IP.Server.Identifier - } - if !check(serverID) { - return fmt.Errorf("IP check failed: %q", msg) - } - - return nil - } -} - -var testAccCheckScalewayIPConfig = ` -resource "scaleway_ip" "base" { -} -` - -var testAccCheckScalewayIPConfig_Count = ` -resource "scaleway_ip" "base" { - count = 2 -} -` - -var testAccCheckScalewayIPAttachConfig = fmt.Sprintf(` -resource "scaleway_server" "base" { - name = "test" - # ubuntu 14.04 - image = "%s" - type = "C1" - state = "stopped" -} - -resource "scaleway_ip" "base" { - server = "${scaleway_server.base.id}" -} -`, armImageIdentifier) diff --git a/builtin/providers/scaleway/resource_scaleway_security_group.go b/builtin/providers/scaleway/resource_scaleway_security_group.go deleted file mode 100644 index 0f850d8ac..000000000 --- a/builtin/providers/scaleway/resource_scaleway_security_group.go +++ /dev/null @@ -1,140 +0,0 @@ -package scaleway - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/scaleway/scaleway-cli/pkg/api" -) - -func resourceScalewaySecurityGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceScalewaySecurityGroupCreate, - Read: resourceScalewaySecurityGroupRead, - Update: resourceScalewaySecurityGroupUpdate, - Delete: resourceScalewaySecurityGroupDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "description": { - Type: schema.TypeString, - Required: true, - }, - }, - } -} - -func resourceScalewaySecurityGroupCreate(d *schema.ResourceData, m interface{}) error { - scaleway := m.(*Client).scaleway - - mu.Lock() - defer mu.Unlock() - - req := api.ScalewayNewSecurityGroup{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - Organization: scaleway.Organization, - } - - err := scaleway.PostSecurityGroup(req) - if err != nil { - if serr, ok := err.(api.ScalewayAPIError); ok { - log.Printf("[DEBUG] Error creating security group: %q\n", serr.APIMessage) - } - - return err - } - - resp, err := scaleway.GetSecurityGroups() - if err != nil { - return err - } - - for _, group := range resp.SecurityGroups { - if group.Name == req.Name { - d.SetId(group.ID) - break - } - } - - if d.Id() == "" { - return fmt.Errorf("Failed to find created security group.") - } - - return resourceScalewaySecurityGroupRead(d, m) -} - -func resourceScalewaySecurityGroupRead(d *schema.ResourceData, m interface{}) error { - scaleway := m.(*Client).scaleway - resp, err := scaleway.GetASecurityGroup(d.Id()) - - if err != nil { - if serr, ok := err.(api.ScalewayAPIError); ok { - log.Printf("[DEBUG] Error reading security group: %q\n", serr.APIMessage) - - if serr.StatusCode == 404 { - d.SetId("") - return nil - } - } - - return err - } - - d.Set("name", resp.SecurityGroups.Name) - d.Set("description", resp.SecurityGroups.Description) - - return nil -} - -func resourceScalewaySecurityGroupUpdate(d *schema.ResourceData, m interface{}) error { - scaleway := m.(*Client).scaleway - - mu.Lock() - defer mu.Unlock() - - var req = api.ScalewayUpdateSecurityGroup{ - Organization: scaleway.Organization, - Name: d.Get("name").(string), - Description: d.Get("description").(string), - } - - if err := scaleway.PutSecurityGroup(req, d.Id()); err != nil { - log.Printf("[DEBUG] Error reading security group: %q\n", err) - - return err - } - - return resourceScalewaySecurityGroupRead(d, m) -} - -func resourceScalewaySecurityGroupDelete(d *schema.ResourceData, m interface{}) error { - scaleway := m.(*Client).scaleway - - mu.Lock() - defer mu.Unlock() - - err := scaleway.DeleteSecurityGroup(d.Id()) - if err != nil { - if serr, ok := err.(api.ScalewayAPIError); ok { - log.Printf("[DEBUG] error reading Security Group Rule: %q\n", serr.APIMessage) - - if serr.StatusCode == 404 { - d.SetId("") - return nil - } - } - - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/scaleway/resource_scaleway_security_group_rule.go b/builtin/providers/scaleway/resource_scaleway_security_group_rule.go deleted file mode 100644 index 240bc62a8..000000000 --- a/builtin/providers/scaleway/resource_scaleway_security_group_rule.go +++ /dev/null @@ -1,188 +0,0 @@ -package scaleway - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/scaleway/scaleway-cli/pkg/api" -) - -func resourceScalewaySecurityGroupRule() *schema.Resource { - return &schema.Resource{ - Create: resourceScalewaySecurityGroupRuleCreate, - Read: resourceScalewaySecurityGroupRuleRead, - Update: resourceScalewaySecurityGroupRuleUpdate, - Delete: resourceScalewaySecurityGroupRuleDelete, - Schema: map[string]*schema.Schema{ - "security_group": { - Type: schema.TypeString, - Required: true, - }, - "action": { - Type: schema.TypeString, - Required: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "accept" && value != "drop" { - errors = append(errors, fmt.Errorf("%q must be one of 'accept', 'drop'", k)) - } - return - }, - }, - "direction": { - Type: schema.TypeString, - Required: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "inbound" && value != "outbound" { - errors = append(errors, fmt.Errorf("%q must be one of 'inbound', 'outbound'", k)) - } - return - }, - }, - "ip_range": { - Type: schema.TypeString, - Required: true, - }, - "protocol": { - Type: schema.TypeString, - Required: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "ICMP" && value != "TCP" && value != "UDP" { - errors = append(errors, fmt.Errorf("%q must be one of 'ICMP', 'TCP', 'UDP", k)) - } - return - }, - }, - "port": { - Type: schema.TypeInt, - Optional: true, - }, - }, - } -} - -func resourceScalewaySecurityGroupRuleCreate(d *schema.ResourceData, m interface{}) error { - scaleway := m.(*Client).scaleway - - mu.Lock() - defer mu.Unlock() - - req := api.ScalewayNewSecurityGroupRule{ - Action: d.Get("action").(string), - Direction: d.Get("direction").(string), - IPRange: d.Get("ip_range").(string), - Protocol: d.Get("protocol").(string), - DestPortFrom: d.Get("port").(int), - } - - err := scaleway.PostSecurityGroupRule(d.Get("security_group").(string), req) - if err != nil { - if serr, ok := err.(api.ScalewayAPIError); ok { - log.Printf("[DEBUG] Error creating Security Group Rule: %q\n", serr.APIMessage) - } - - return err - } - - resp, err := scaleway.GetSecurityGroupRules(d.Get("security_group").(string)) - if err != nil { - return err - } - - matches := func(rule api.ScalewaySecurityGroupRule) bool { - return rule.Action == req.Action && - rule.Direction == req.Direction && - rule.IPRange == req.IPRange && - rule.Protocol == req.Protocol && - rule.DestPortFrom == req.DestPortFrom - } - - for _, rule := range resp.Rules { - if matches(rule) { - d.SetId(rule.ID) - break - } - } - - if d.Id() == "" { - return fmt.Errorf("Failed to find created security group rule") - } - - return resourceScalewaySecurityGroupRuleRead(d, m) -} - -func resourceScalewaySecurityGroupRuleRead(d *schema.ResourceData, m interface{}) error { - scaleway := m.(*Client).scaleway - rule, err := scaleway.GetASecurityGroupRule(d.Get("security_group").(string), d.Id()) - - if err != nil { - if serr, ok := err.(api.ScalewayAPIError); ok { - log.Printf("[DEBUG] error reading Security Group Rule: %q\n", serr.APIMessage) - - if serr.StatusCode == 404 { - d.SetId("") - return nil - } - } - - return err - } - - d.Set("action", rule.Rules.Action) - d.Set("direction", rule.Rules.Direction) - d.Set("ip_range", rule.Rules.IPRange) - d.Set("protocol", rule.Rules.Protocol) - d.Set("port", rule.Rules.DestPortFrom) - - return nil -} - -func resourceScalewaySecurityGroupRuleUpdate(d *schema.ResourceData, m interface{}) error { - scaleway := m.(*Client).scaleway - - mu.Lock() - defer mu.Unlock() - - var req = api.ScalewayNewSecurityGroupRule{ - Action: d.Get("action").(string), - Direction: d.Get("direction").(string), - IPRange: d.Get("ip_range").(string), - Protocol: d.Get("protocol").(string), - DestPortFrom: d.Get("port").(int), - } - - if err := scaleway.PutSecurityGroupRule(req, d.Get("security_group").(string), d.Id()); err != nil { - log.Printf("[DEBUG] error updating Security Group Rule: %q", err) - - return err - } - - return resourceScalewaySecurityGroupRuleRead(d, m) -} - -func resourceScalewaySecurityGroupRuleDelete(d *schema.ResourceData, m interface{}) error { - scaleway := m.(*Client).scaleway - - mu.Lock() - defer mu.Unlock() - - err := scaleway.DeleteSecurityGroupRule(d.Get("security_group").(string), d.Id()) - if err != nil { - if serr, ok := err.(api.ScalewayAPIError); ok { - log.Printf("[DEBUG] error reading Security Group Rule: %q\n", serr.APIMessage) - - if serr.StatusCode == 404 { - d.SetId("") - return nil - } - } - - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/scaleway/resource_scaleway_security_group_rule_test.go b/builtin/providers/scaleway/resource_scaleway_security_group_rule_test.go deleted file mode 100644 index 8dd2647ec..000000000 --- a/builtin/providers/scaleway/resource_scaleway_security_group_rule_test.go +++ /dev/null @@ -1,181 +0,0 @@ -package scaleway - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/scaleway/scaleway-cli/pkg/api" -) - -func TestAccScalewaySecurityGroupRule_Basic(t *testing.T) { - var group api.ScalewaySecurityGroups - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckScalewaySecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckScalewaySecurityGroupRuleConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckScalewaySecurityGroupsExists("scaleway_security_group.base", &group), - resource.TestCheckResourceAttr("scaleway_security_group_rule.http", "action", "accept"), - resource.TestCheckResourceAttr("scaleway_security_group_rule.http", "direction", "inbound"), - resource.TestCheckResourceAttr("scaleway_security_group_rule.http", "ip_range", "0.0.0.0/0"), - resource.TestCheckResourceAttr("scaleway_security_group_rule.http", "protocol", "TCP"), - resource.TestCheckResourceAttr("scaleway_security_group_rule.http", "port", "80"), - resource.TestCheckResourceAttr("scaleway_security_group_rule.https", "action", "accept"), - resource.TestCheckResourceAttr("scaleway_security_group_rule.https", "direction", "inbound"), - resource.TestCheckResourceAttr("scaleway_security_group_rule.https", "ip_range", "0.0.0.0/0"), - resource.TestCheckResourceAttr("scaleway_security_group_rule.https", "protocol", "TCP"), - resource.TestCheckResourceAttr("scaleway_security_group_rule.https", "port", "443"), - testAccCheckScalewaySecurityGroupRuleExists("scaleway_security_group_rule.http", &group), - testAccCheckScalewaySecurityGroupRuleAttributes("scaleway_security_group_rule.http", &group), - ), - }, - }, - }) -} - -func testAccCheckScalewaySecurityGroupsExists(n string, group *api.ScalewaySecurityGroups) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Security Group Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Security Group is set") - } - - conn := testAccProvider.Meta().(*Client).scaleway - resp, err := conn.GetASecurityGroup(rs.Primary.ID) - - if err != nil { - return err - } - - if resp.SecurityGroups.ID == rs.Primary.ID { - *group = resp.SecurityGroups - return nil - } - - return fmt.Errorf("Security Group not found") - } -} - -func testAccCheckScalewaySecurityGroupRuleDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*Client).scaleway - - for _, rs := range s.RootModule().Resources { - if rs.Type != "scaleway" { - continue - } - - groups, err := client.GetSecurityGroups() - if err != nil { - return err - } - - all_err := true - for _, group := range groups.SecurityGroups { - _, err := client.GetASecurityGroupRule(group.ID, rs.Primary.ID) - all_err = all_err && err != nil - } - - if !all_err { - return fmt.Errorf("Security Group still exists") - } - } - - return nil -} - -func testAccCheckScalewaySecurityGroupRuleAttributes(n string, group *api.ScalewaySecurityGroups) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Unknown resource: %s", n) - } - - client := testAccProvider.Meta().(*Client).scaleway - rule, err := client.GetASecurityGroupRule(group.ID, rs.Primary.ID) - if err != nil { - return err - } - - if rule.Rules.Action != "accept" { - return fmt.Errorf("Wrong rule action") - } - if rule.Rules.Direction != "inbound" { - return fmt.Errorf("wrong rule direction") - } - if rule.Rules.IPRange != "0.0.0.0/0" { - return fmt.Errorf("wrong rule IP Range") - } - if rule.Rules.Protocol != "TCP" { - return fmt.Errorf("wrong rule protocol") - } - if rule.Rules.DestPortFrom != 80 { - return fmt.Errorf("Wrong port") - } - - return nil - } -} - -func testAccCheckScalewaySecurityGroupRuleExists(n string, group *api.ScalewaySecurityGroups) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Security Group Rule Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Security Group Rule ID is set") - } - - client := testAccProvider.Meta().(*Client).scaleway - rule, err := client.GetASecurityGroupRule(group.ID, rs.Primary.ID) - - if err != nil { - return err - } - - if rule.Rules.ID != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - - return nil - } -} - -var testAccCheckScalewaySecurityGroupRuleConfig = ` -resource "scaleway_security_group" "base" { - name = "public" - description = "public gateway" -} - -resource "scaleway_security_group_rule" "http" { - security_group = "${scaleway_security_group.base.id}" - - action = "accept" - direction = "inbound" - ip_range = "0.0.0.0/0" - protocol = "TCP" - port = 80 -} - -resource "scaleway_security_group_rule" "https" { - security_group = "${scaleway_security_group.base.id}" - - action = "accept" - direction = "inbound" - ip_range = "0.0.0.0/0" - protocol = "TCP" - port = 443 -} -` diff --git a/builtin/providers/scaleway/resource_scaleway_security_group_test.go b/builtin/providers/scaleway/resource_scaleway_security_group_test.go deleted file mode 100644 index 22d351305..000000000 --- a/builtin/providers/scaleway/resource_scaleway_security_group_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package scaleway - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccScalewaySecurityGroup_Basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckScalewaySecurityGroupDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckScalewaySecurityGroupConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckScalewaySecurityGroupExists("scaleway_security_group.base"), - testAccCheckScalewaySecurityGroupAttributes("scaleway_security_group.base"), - resource.TestCheckResourceAttr("scaleway_security_group.base", "name", "public"), - resource.TestCheckResourceAttr("scaleway_security_group.base", "description", "public gateway"), - ), - }, - }, - }) -} - -func testAccCheckScalewaySecurityGroupDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*Client).scaleway - - for _, rs := range s.RootModule().Resources { - if rs.Type != "scaleway" { - continue - } - - _, err := client.GetASecurityGroup(rs.Primary.ID) - - if err == nil { - return fmt.Errorf("Security Group still exists") - } - } - - return nil -} - -func testAccCheckScalewaySecurityGroupAttributes(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Unknown resource: %s", n) - } - - client := testAccProvider.Meta().(*Client).scaleway - group, err := client.GetASecurityGroup(rs.Primary.ID) - if err != nil { - return err - } - - if group.SecurityGroups.Name != "public" { - return fmt.Errorf("Security Group has wrong name") - } - if group.SecurityGroups.Description != "public gateway" { - return fmt.Errorf("Security Group has wrong description") - } - - return nil - } -} - -func testAccCheckScalewaySecurityGroupExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Security Group ID is set") - } - - client := testAccProvider.Meta().(*Client).scaleway - group, err := client.GetASecurityGroup(rs.Primary.ID) - - if err != nil { - return err - } - - if group.SecurityGroups.ID != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - - return nil - } -} - -var testAccCheckScalewaySecurityGroupConfig = ` -resource "scaleway_security_group" "base" { - name = "public" - description = "public gateway" -} -` diff --git a/builtin/providers/scaleway/resource_scaleway_server.go b/builtin/providers/scaleway/resource_scaleway_server.go deleted file mode 100644 index e91dd75e4..000000000 --- a/builtin/providers/scaleway/resource_scaleway_server.go +++ /dev/null @@ -1,330 +0,0 @@ -package scaleway - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/scaleway/scaleway-cli/pkg/api" -) - -func resourceScalewayServer() *schema.Resource { - return &schema.Resource{ - Create: resourceScalewayServerCreate, - Read: resourceScalewayServerRead, - Update: resourceScalewayServerUpdate, - Delete: resourceScalewayServerDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "image": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "bootscript": { - Type: schema.TypeString, - Optional: true, - }, - "tags": { - Type: schema.TypeList, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Optional: true, - }, - "security_group": { - Type: schema.TypeString, - Optional: true, - }, - "volume": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "size_in_gb": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validateVolumeSize, - }, - "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateVolumeType, - }, - "volume_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "enable_ipv6": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "dynamic_ip_required": { - Type: schema.TypeBool, - Optional: true, - }, - "private_ip": { - Type: schema.TypeString, - Computed: true, - }, - "public_ip": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "public_ipv6": { - Type: schema.TypeString, - Computed: true, - }, - "state": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "state_detail": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceScalewayServerCreate(d *schema.ResourceData, m interface{}) error { - scaleway := m.(*Client).scaleway - - mu.Lock() - defer mu.Unlock() - - image := d.Get("image").(string) - var server = api.ScalewayServerDefinition{ - Name: d.Get("name").(string), - Image: String(image), - Organization: scaleway.Organization, - EnableIPV6: d.Get("enable_ipv6").(bool), - SecurityGroup: d.Get("security_group").(string), - } - - server.DynamicIPRequired = Bool(d.Get("dynamic_ip_required").(bool)) - server.CommercialType = d.Get("type").(string) - - if bootscript, ok := d.GetOk("bootscript"); ok { - server.Bootscript = String(bootscript.(string)) - } - - if vs, ok := d.GetOk("volume"); ok { - server.Volumes = make(map[string]string) - - volumes := vs.([]interface{}) - for i, v := range volumes { - volume := v.(map[string]interface{}) - - volumeID, err := scaleway.PostVolume(api.ScalewayVolumeDefinition{ - Size: uint64(volume["size_in_gb"].(int)) * gb, - Type: volume["type"].(string), - Name: fmt.Sprintf("%s-%d", server.Name, volume["size_in_gb"].(int)), - }) - if err != nil { - return err - } - volume["volume_id"] = volumeID - volumes[i] = volume - server.Volumes[fmt.Sprintf("%d", i+1)] = volumeID - } - d.Set("volume", volumes) - } - - if raw, ok := d.GetOk("tags"); ok { - for _, tag := range raw.([]interface{}) { - server.Tags = append(server.Tags, tag.(string)) - } - } - - id, err := scaleway.PostServer(server) - if err != nil { - return err - } - - d.SetId(id) - if d.Get("state").(string) != "stopped" { - err = scaleway.PostServerAction(id, "poweron") - if err != nil { - return err - } - - err = waitForServerState(scaleway, id, "running") - - if v, ok := d.GetOk("public_ip"); ok { - if ips, err := scaleway.GetIPS(); err != nil { - return err - } else { - for _, ip := range ips.IPS { - if ip.Address == v.(string) { - log.Printf("[DEBUG] Attaching IP %q to server %q\n", ip.ID, d.Id()) - if err := scaleway.AttachIP(ip.ID, d.Id()); err != nil { - return err - } - break - } - } - } - } - } - - if err != nil { - return err - } - - return resourceScalewayServerRead(d, m) -} - -func resourceScalewayServerRead(d *schema.ResourceData, m interface{}) error { - scaleway := m.(*Client).scaleway - server, err := scaleway.GetServer(d.Id()) - - if err != nil { - if serr, ok := err.(api.ScalewayAPIError); ok { - log.Printf("[DEBUG] Error reading server: %q\n", serr.APIMessage) - - if serr.StatusCode == 404 { - d.SetId("") - return nil - } - } - - return err - } - - d.Set("name", server.Name) - d.Set("image", server.Image.Identifier) - d.Set("type", server.CommercialType) - d.Set("enable_ipv6", server.EnableIPV6) - d.Set("private_ip", server.PrivateIP) - d.Set("public_ip", server.PublicAddress.IP) - - if server.EnableIPV6 { - d.Set("public_ipv6", server.IPV6.Address) - } - - d.Set("state", server.State) - d.Set("state_detail", server.StateDetail) - d.Set("tags", server.Tags) - - d.SetConnInfo(map[string]string{ - "type": "ssh", - "host": server.PublicAddress.IP, - }) - - return nil -} - -func resourceScalewayServerUpdate(d *schema.ResourceData, m interface{}) error { - scaleway := m.(*Client).scaleway - - mu.Lock() - defer mu.Unlock() - - var req api.ScalewayServerPatchDefinition - if d.HasChange("name") { - name := d.Get("name").(string) - req.Name = &name - } - - if d.HasChange("tags") { - if raw, ok := d.GetOk("tags"); ok { - var tags []string - for _, tag := range raw.([]interface{}) { - tags = append(tags, tag.(string)) - } - req.Tags = &tags - } - } - - if d.HasChange("enable_ipv6") { - req.EnableIPV6 = Bool(d.Get("enable_ipv6").(bool)) - } - - if d.HasChange("dynamic_ip_required") { - req.DynamicIPRequired = Bool(d.Get("dynamic_ip_required").(bool)) - } - - if d.HasChange("security_group") { - req.SecurityGroup = &api.ScalewaySecurityGroup{ - Identifier: d.Get("security_group").(string), - } - } - - if err := scaleway.PatchServer(d.Id(), req); err != nil { - return fmt.Errorf("Failed patching scaleway server: %q", err) - } - - if d.HasChange("public_ip") { - ips, err := scaleway.GetIPS() - if err != nil { - return err - } - if v, ok := d.GetOk("public_ip"); ok { - for _, ip := range ips.IPS { - if ip.Address == v.(string) { - log.Printf("[DEBUG] Attaching IP %q to server %q\n", ip.ID, d.Id()) - if err := scaleway.AttachIP(ip.ID, d.Id()); err != nil { - return err - } - break - } - } - } else { - for _, ip := range ips.IPS { - if ip.Server != nil && ip.Server.Identifier == d.Id() { - log.Printf("[DEBUG] Detaching IP %q to server %q\n", ip.ID, d.Id()) - if err := scaleway.DetachIP(ip.ID); err != nil { - return err - } - break - } - } - } - } - - return resourceScalewayServerRead(d, m) -} - -func resourceScalewayServerDelete(d *schema.ResourceData, m interface{}) error { - scaleway := m.(*Client).scaleway - - mu.Lock() - defer mu.Unlock() - - s, err := scaleway.GetServer(d.Id()) - if err != nil { - return err - } - - if s.State == "stopped" { - return deleteStoppedServer(scaleway, s) - } - - err = deleteRunningServer(scaleway, s) - - if err == nil { - d.SetId("") - } - - return err -} diff --git a/builtin/providers/scaleway/resource_scaleway_server_test.go b/builtin/providers/scaleway/resource_scaleway_server_test.go deleted file mode 100644 index 468ec32c0..000000000 --- a/builtin/providers/scaleway/resource_scaleway_server_test.go +++ /dev/null @@ -1,350 +0,0 @@ -package scaleway - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccScalewayServer_Basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckScalewayServerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckScalewayServerConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckScalewayServerExists("scaleway_server.base"), - testAccCheckScalewayServerAttributes("scaleway_server.base"), - resource.TestCheckResourceAttr( - "scaleway_server.base", "type", "C1"), - resource.TestCheckResourceAttr( - "scaleway_server.base", "name", "test"), - resource.TestCheckResourceAttr( - "scaleway_server.base", "tags.0", "terraform-test"), - ), - }, - resource.TestStep{ - Config: testAccCheckScalewayServerConfig_IPAttachment, - Check: resource.ComposeTestCheckFunc( - testAccCheckScalewayServerIPAttachmentAttributes("scaleway_ip.base", "scaleway_server.base"), - ), - }, - resource.TestStep{ - Config: testAccCheckScalewayServerConfig_IPDetachment, - Check: resource.ComposeTestCheckFunc( - testAccCheckScalewayServerIPDetachmentAttributes("scaleway_server.base"), - ), - }, - }, - }) -} - -func TestAccScalewayServer_ExistingIP(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckScalewayServerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckScalewayServerConfig_IPAttachment, - Check: resource.ComposeTestCheckFunc( - testAccCheckScalewayServerExists("scaleway_server.base"), - testAccCheckScalewayServerIPAttachmentAttributes("scaleway_ip.base", "scaleway_server.base"), - ), - }, - }, - }) -} - -func TestAccScalewayServer_Volumes(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckScalewayServerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckScalewayServerVolumeConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckScalewayServerExists("scaleway_server.base"), - testAccCheckScalewayServerAttributes("scaleway_server.base"), - resource.TestCheckResourceAttr( - "scaleway_server.base", "type", "C1"), - resource.TestCheckResourceAttr( - "scaleway_server.base", "volume.#", "2"), - resource.TestCheckResourceAttrSet( - "scaleway_server.base", "volume.0.volume_id"), - resource.TestCheckResourceAttr( - "scaleway_server.base", "volume.0.type", "l_ssd"), - resource.TestCheckResourceAttr( - "scaleway_server.base", "volume.0.size_in_gb", "20"), - resource.TestCheckResourceAttrSet( - "scaleway_server.base", "volume.1.volume_id"), - resource.TestCheckResourceAttr( - "scaleway_server.base", "volume.1.type", "l_ssd"), - resource.TestCheckResourceAttr( - "scaleway_server.base", "volume.1.size_in_gb", "30"), - ), - }, - }, - }) -} - -func TestAccScalewayServer_SecurityGroup(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckScalewayServerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckScalewayServerConfig_SecurityGroup, - Check: resource.ComposeTestCheckFunc( - testAccCheckScalewayServerExists("scaleway_server.base"), - testAccCheckScalewayServerSecurityGroup("scaleway_server.base", "blue"), - ), - }, - resource.TestStep{ - Config: testAccCheckScalewayServerConfig_SecurityGroup_Update, - Check: resource.ComposeTestCheckFunc( - testAccCheckScalewayServerExists("scaleway_server.base"), - testAccCheckScalewayServerSecurityGroup("scaleway_server.base", "red"), - ), - }, - }, - }) -} - -func testAccCheckScalewayServerDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*Client).scaleway - - for _, rs := range s.RootModule().Resources { - if rs.Type != "scaleway" { - continue - } - - _, err := client.GetServer(rs.Primary.ID) - - if err == nil { - return fmt.Errorf("Server still exists") - } - } - - return nil -} - -func testAccCheckScalewayServerIPAttachmentAttributes(ipName, serverName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - ip, ok := s.RootModule().Resources[ipName] - if !ok { - return fmt.Errorf("Unknown scaleway_ip resource: %s", ipName) - } - - server, ok := s.RootModule().Resources[serverName] - if !ok { - return fmt.Errorf("Unknown scaleway_server resource: %s", serverName) - } - - client := testAccProvider.Meta().(*Client).scaleway - - res, err := client.GetIP(ip.Primary.ID) - if err != nil { - return err - } - if res.IP.Server == nil || res.IP.Server.Identifier != server.Primary.ID { - return fmt.Errorf("IP %q is not attached to server %q", ip.Primary.ID, server.Primary.ID) - } - - return nil - } -} - -func testAccCheckScalewayServerIPDetachmentAttributes(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Unknown resource: %s", n) - } - - client := testAccProvider.Meta().(*Client).scaleway - server, err := client.GetServer(rs.Primary.ID) - if err != nil { - return err - } - - if server.PublicAddress.Identifier != "" { - return fmt.Errorf("Expected server to have no public IP but got %q", server.PublicAddress.Identifier) - } - return nil - } -} - -func testAccCheckScalewayServerAttributes(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Unknown resource: %s", n) - } - - client := testAccProvider.Meta().(*Client).scaleway - server, err := client.GetServer(rs.Primary.ID) - - if err != nil { - return err - } - - if server.Name != "test" { - return fmt.Errorf("Server has wrong name") - } - if server.Image.Identifier != armImageIdentifier { - return fmt.Errorf("Wrong server image") - } - if server.CommercialType != "C1" { - return fmt.Errorf("Wrong server type") - } - - return nil - } -} - -func testAccCheckScalewayServerSecurityGroup(n, securityGroupName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Unknown resource: %s", n) - } - - client := testAccProvider.Meta().(*Client).scaleway - server, err := client.GetServer(rs.Primary.ID) - - if err != nil { - return err - } - - if server.SecurityGroup.Name != securityGroupName { - return fmt.Errorf("Server has wrong security_group") - } - - return nil - } -} - -func testAccCheckScalewayServerExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Server ID is set") - } - - client := testAccProvider.Meta().(*Client).scaleway - server, err := client.GetServer(rs.Primary.ID) - - if err != nil { - return err - } - - if server.Identifier != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - - return nil - } -} - -var armImageIdentifier = "5faef9cd-ea9b-4a63-9171-9e26bec03dbc" - -var testAccCheckScalewayServerConfig = fmt.Sprintf(` -resource "scaleway_server" "base" { - name = "test" - # ubuntu 14.04 - image = "%s" - type = "C1" - tags = [ "terraform-test" ] -}`, armImageIdentifier) - -var testAccCheckScalewayServerConfig_IPAttachment = fmt.Sprintf(` -resource "scaleway_ip" "base" {} - -resource "scaleway_server" "base" { - name = "test" - # ubuntu 14.04 - image = "%s" - type = "C1" - tags = [ "terraform-test" ] - public_ip = "${scaleway_ip.base.ip}" -}`, armImageIdentifier) - -var testAccCheckScalewayServerConfig_IPDetachment = fmt.Sprintf(` -resource "scaleway_server" "base" { - name = "test" - # ubuntu 14.04 - image = "%s" - type = "C1" - tags = [ "terraform-test" ] -}`, armImageIdentifier) - -var testAccCheckScalewayServerVolumeConfig = fmt.Sprintf(` -resource "scaleway_server" "base" { - name = "test" - # ubuntu 14.04 - image = "%s" - type = "C1" - tags = [ "terraform-test" ] - - volume { - size_in_gb = 20 - type = "l_ssd" - } - - volume { - size_in_gb = 30 - type = "l_ssd" - } -}`, armImageIdentifier) - -var testAccCheckScalewayServerConfig_SecurityGroup = fmt.Sprintf(` -resource "scaleway_security_group" "blue" { - name = "blue" - description = "blue" -} - -resource "scaleway_security_group" "red" { - name = "red" - description = "red" -} - -resource "scaleway_server" "base" { - name = "test" - # ubuntu 14.04 - image = "%s" - type = "C1" - tags = [ "terraform-test" ] - security_group = "${scaleway_security_group.blue.id}" -}`, armImageIdentifier) - -var testAccCheckScalewayServerConfig_SecurityGroup_Update = fmt.Sprintf(` -resource "scaleway_security_group" "blue" { - name = "blue" - description = "blue" -} - -resource "scaleway_security_group" "red" { - name = "red" - description = "red" -} - -resource "scaleway_server" "base" { - name = "test" - # ubuntu 14.04 - image = "%s" - type = "C1" - tags = [ "terraform-test" ] - security_group = "${scaleway_security_group.red.id}" -}`, armImageIdentifier) diff --git a/builtin/providers/scaleway/resource_scaleway_volume.go b/builtin/providers/scaleway/resource_scaleway_volume.go deleted file mode 100644 index c9cac0f25..000000000 --- a/builtin/providers/scaleway/resource_scaleway_volume.go +++ /dev/null @@ -1,130 +0,0 @@ -package scaleway - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/scaleway/scaleway-cli/pkg/api" -) - -const gb uint64 = 1000 * 1000 * 1000 - -func resourceScalewayVolume() *schema.Resource { - return &schema.Resource{ - Create: resourceScalewayVolumeCreate, - Read: resourceScalewayVolumeRead, - Update: resourceScalewayVolumeUpdate, - Delete: resourceScalewayVolumeDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "size_in_gb": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validateVolumeSize, - }, - "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateVolumeType, - }, - "server": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceScalewayVolumeCreate(d *schema.ResourceData, m interface{}) error { - scaleway := m.(*Client).scaleway - - mu.Lock() - defer mu.Unlock() - - size := uint64(d.Get("size_in_gb").(int)) * gb - req := api.ScalewayVolumeDefinition{ - Name: d.Get("name").(string), - Size: size, - Type: d.Get("type").(string), - Organization: scaleway.Organization, - } - volumeID, err := scaleway.PostVolume(req) - if err != nil { - return fmt.Errorf("Error Creating volume: %q", err) - } - d.SetId(volumeID) - return resourceScalewayVolumeRead(d, m) -} - -func resourceScalewayVolumeRead(d *schema.ResourceData, m interface{}) error { - scaleway := m.(*Client).scaleway - volume, err := scaleway.GetVolume(d.Id()) - if err != nil { - if serr, ok := err.(api.ScalewayAPIError); ok { - log.Printf("[DEBUG] Error reading volume: %q\n", serr.APIMessage) - - if serr.StatusCode == 404 { - d.SetId("") - return nil - } - } - - return err - } - d.Set("name", volume.Name) - d.Set("size_in_gb", uint64(volume.Size)/gb) - d.Set("type", volume.VolumeType) - d.Set("server", "") - if volume.Server != nil { - d.Set("server", volume.Server.Identifier) - } - return nil -} - -func resourceScalewayVolumeUpdate(d *schema.ResourceData, m interface{}) error { - scaleway := m.(*Client).scaleway - - mu.Lock() - defer mu.Unlock() - - var req api.ScalewayVolumePutDefinition - if d.HasChange("name") { - req.Name = String(d.Get("name").(string)) - } - - if d.HasChange("size_in_gb") { - size := uint64(d.Get("size_in_gb").(int)) * gb - req.Size = &size - } - - scaleway.PutVolume(d.Id(), req) - return resourceScalewayVolumeRead(d, m) -} - -func resourceScalewayVolumeDelete(d *schema.ResourceData, m interface{}) error { - scaleway := m.(*Client).scaleway - - mu.Lock() - defer mu.Unlock() - - err := scaleway.DeleteVolume(d.Id()) - if err != nil { - if serr, ok := err.(api.ScalewayAPIError); ok { - if serr.StatusCode == 404 { - d.SetId("") - return nil - } - } - return err - } - d.SetId("") - return nil -} diff --git a/builtin/providers/scaleway/resource_scaleway_volume_attachment.go b/builtin/providers/scaleway/resource_scaleway_volume_attachment.go deleted file mode 100644 index 74cc4eebd..000000000 --- a/builtin/providers/scaleway/resource_scaleway_volume_attachment.go +++ /dev/null @@ -1,262 +0,0 @@ -package scaleway - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/scaleway/scaleway-cli/pkg/api" -) - -func resourceScalewayVolumeAttachment() *schema.Resource { - return &schema.Resource{ - Create: resourceScalewayVolumeAttachmentCreate, - Read: resourceScalewayVolumeAttachmentRead, - Delete: resourceScalewayVolumeAttachmentDelete, - Schema: map[string]*schema.Schema{ - "server": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "volume": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -var errVolumeAlreadyAttached = fmt.Errorf("Scaleway volume already attached") - -func resourceScalewayVolumeAttachmentCreate(d *schema.ResourceData, m interface{}) error { - scaleway := m.(*Client).scaleway - scaleway.ClearCache() - - vol, err := scaleway.GetVolume(d.Get("volume").(string)) - if err != nil { - return err - } - if vol.Server != nil { - log.Printf("[DEBUG] Scaleway volume %q already attached to %q.", vol.Identifier, vol.Server.Identifier) - return errVolumeAlreadyAttached - } - - // guard against server shutdown/ startup race conditiond - serverID := d.Get("server").(string) - scalewayMutexKV.Lock(serverID) - defer scalewayMutexKV.Unlock(serverID) - - server, err := scaleway.GetServer(serverID) - if err != nil { - fmt.Printf("Failed getting server: %q", err) - return err - } - - var startServerAgain = false - // volumes can only be modified when the server is powered off - if server.State != "stopped" { - startServerAgain = true - - if err := scaleway.PostServerAction(server.Identifier, "poweroff"); err != nil { - return err - } - } - if err := waitForServerState(scaleway, server.Identifier, "stopped"); err != nil { - return err - } - - volumes := make(map[string]api.ScalewayVolume) - for i, volume := range server.Volumes { - volumes[i] = volume - } - - volumes[fmt.Sprintf("%d", len(volumes)+1)] = *vol - - // the API request requires most volume attributes to be unset to succeed - for k, v := range volumes { - v.Size = 0 - v.CreationDate = "" - v.Organization = "" - v.ModificationDate = "" - v.VolumeType = "" - v.Server = nil - v.ExportURI = "" - - volumes[k] = v - } - - if err := resource.Retry(5*time.Minute, func() *resource.RetryError { - scaleway.ClearCache() - - var req = api.ScalewayServerPatchDefinition{ - Volumes: &volumes, - } - mu.Lock() - err := scaleway.PatchServer(serverID, req) - mu.Unlock() - - if err == nil { - return nil - } - - if serr, ok := err.(api.ScalewayAPIError); ok { - log.Printf("[DEBUG] Error patching server: %q\n", serr.APIMessage) - - if serr.StatusCode == 400 { - return resource.RetryableError(fmt.Errorf("Waiting for server update to succeed: %q", serr.APIMessage)) - } - } - - return resource.NonRetryableError(err) - }); err != nil { - return err - } - - if startServerAgain { - if err := scaleway.PostServerAction(serverID, "poweron"); err != nil { - return err - } - if err := waitForServerState(scaleway, serverID, "running"); err != nil { - return err - } - } - - d.SetId(fmt.Sprintf("scaleway-server:%s/volume/%s", serverID, d.Get("volume").(string))) - - return resourceScalewayVolumeAttachmentRead(d, m) -} - -func resourceScalewayVolumeAttachmentRead(d *schema.ResourceData, m interface{}) error { - scaleway := m.(*Client).scaleway - scaleway.ClearCache() - - server, err := scaleway.GetServer(d.Get("server").(string)) - if err != nil { - if serr, ok := err.(api.ScalewayAPIError); ok { - log.Printf("[DEBUG] Error reading server: %q\n", serr.APIMessage) - - if serr.StatusCode == 404 { - d.SetId("") - return nil - } - } - return err - } - - if _, err := scaleway.GetVolume(d.Get("volume").(string)); err != nil { - if serr, ok := err.(api.ScalewayAPIError); ok { - log.Printf("[DEBUG] Error reading volume: %q\n", serr.APIMessage) - - if serr.StatusCode == 404 { - d.SetId("") - return nil - } - } - return err - } - - for _, volume := range server.Volumes { - if volume.Identifier == d.Get("volume").(string) { - return nil - } - } - - log.Printf("[DEBUG] Volume %q not attached to server %q\n", d.Get("volume").(string), d.Get("server").(string)) - d.SetId("") - return nil -} - -func resourceScalewayVolumeAttachmentDelete(d *schema.ResourceData, m interface{}) error { - scaleway := m.(*Client).scaleway - scaleway.ClearCache() - - mu.Lock() - defer mu.Unlock() - - var startServerAgain = false - - // guard against server shutdown/ startup race conditiond - serverID := d.Get("server").(string) - scalewayMutexKV.Lock(serverID) - defer scalewayMutexKV.Unlock(serverID) - - server, err := scaleway.GetServer(serverID) - if err != nil { - return err - } - - // volumes can only be modified when the server is powered off - if server.State != "stopped" { - startServerAgain = true - if err := scaleway.PostServerAction(server.Identifier, "poweroff"); err != nil { - return err - } - } - if err := waitForServerState(scaleway, server.Identifier, "stopped"); err != nil { - return err - } - - volumes := make(map[string]api.ScalewayVolume) - for _, volume := range server.Volumes { - if volume.Identifier != d.Get("volume").(string) { - volumes[fmt.Sprintf("%d", len(volumes))] = volume - } - } - - // the API request requires most volume attributes to be unset to succeed - for k, v := range volumes { - v.Size = 0 - v.CreationDate = "" - v.Organization = "" - v.ModificationDate = "" - v.VolumeType = "" - v.Server = nil - v.ExportURI = "" - - volumes[k] = v - } - - if err := resource.Retry(5*time.Minute, func() *resource.RetryError { - scaleway.ClearCache() - - var req = api.ScalewayServerPatchDefinition{ - Volumes: &volumes, - } - mu.Lock() - err := scaleway.PatchServer(serverID, req) - mu.Unlock() - - if err == nil { - return nil - } - - if serr, ok := err.(api.ScalewayAPIError); ok { - log.Printf("[DEBUG] Error patching server: %q\n", serr.APIMessage) - - if serr.StatusCode == 400 { - return resource.RetryableError(fmt.Errorf("Waiting for server update to succeed: %q", serr.APIMessage)) - } - } - - return resource.NonRetryableError(err) - }); err != nil { - return err - } - - if startServerAgain { - if err := scaleway.PostServerAction(serverID, "poweron"); err != nil { - return err - } - if err := waitForServerState(scaleway, serverID, "running"); err != nil { - return err - } - } - - d.SetId("") - - return nil -} diff --git a/builtin/providers/scaleway/resource_scaleway_volume_attachment_test.go b/builtin/providers/scaleway/resource_scaleway_volume_attachment_test.go deleted file mode 100644 index 9ea4060e7..000000000 --- a/builtin/providers/scaleway/resource_scaleway_volume_attachment_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package scaleway - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccScalewayVolumeAttachment_Basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckScalewayVolumeAttachmentDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckScalewayVolumeAttachmentConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckScalewayVolumeAttachmentExists("scaleway_volume_attachment.test"), - ), - }, - }, - }) -} - -func testAccCheckScalewayVolumeAttachmentDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*Client).scaleway - - for _, rs := range s.RootModule().Resources { - if rs.Type != "scaleway" { - continue - } - - s, err := client.GetServer(rs.Primary.Attributes["server"]) - if err != nil { - fmt.Printf("Failed getting server: %q", err) - return err - } - - for _, volume := range s.Volumes { - if volume.Identifier == rs.Primary.Attributes["volume"] { - return fmt.Errorf("Attachment still exists") - } - } - } - - return nil -} - -func testAccCheckScalewayVolumeAttachmentExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*Client).scaleway - - rs, _ := s.RootModule().Resources[n] - - server, err := client.GetServer(rs.Primary.Attributes["server"]) - if err != nil { - fmt.Printf("Failed getting server: %q", err) - return err - } - - for _, volume := range server.Volumes { - if volume.Identifier == rs.Primary.Attributes["volume"] { - return nil - } - } - - return fmt.Errorf("Attachment does not exist") - } -} - -var testAccCheckScalewayVolumeAttachmentConfig = fmt.Sprintf(` -resource "scaleway_server" "base" { - name = "test" - # ubuntu 14.04 - image = "%s" - type = "C1" - # state = "stopped" -} - -resource "scaleway_volume" "test" { - name = "test" - size_in_gb = 5 - type = "l_ssd" -} - -resource "scaleway_volume_attachment" "test" { - server = "${scaleway_server.base.id}" - volume = "${scaleway_volume.test.id}" -}`, armImageIdentifier) diff --git a/builtin/providers/scaleway/resource_scaleway_volume_test.go b/builtin/providers/scaleway/resource_scaleway_volume_test.go deleted file mode 100644 index 45678a358..000000000 --- a/builtin/providers/scaleway/resource_scaleway_volume_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package scaleway - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccScalewayVolume_Basic(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckScalewayVolumeDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckScalewayVolumeConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckScalewayVolumeExists("scaleway_volume.test"), - testAccCheckScalewayVolumeAttributes("scaleway_volume.test"), - ), - }, - }, - }) -} - -func testAccCheckScalewayVolumeDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*Client).scaleway - - for _, rs := range s.RootModule().Resources { - if rs.Type != "scaleway" { - continue - } - - _, err := client.GetVolume(rs.Primary.ID) - - if err == nil { - return fmt.Errorf("Volume still exists") - } - } - - return nil -} - -func testAccCheckScalewayVolumeAttributes(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Unknown resource: %s", n) - } - - client := testAccProvider.Meta().(*Client).scaleway - volume, err := client.GetVolume(rs.Primary.ID) - - if err != nil { - return err - } - - if volume.Name != "test" { - return fmt.Errorf("volume has wrong name: %q", volume.Name) - } - if volume.Size != 2e+09 { - return fmt.Errorf("volume has wrong size: %d", volume.Size) - } - if volume.VolumeType != "l_ssd" { - return fmt.Errorf("volume has volume type: %q", volume.VolumeType) - } - - return nil - } -} - -func testAccCheckScalewayVolumeExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Volume ID is set") - } - - client := testAccProvider.Meta().(*Client).scaleway - volume, err := client.GetVolume(rs.Primary.ID) - - if err != nil { - return err - } - - if volume.Identifier != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - - return nil - } -} - -var testAccCheckScalewayVolumeConfig = ` -resource "scaleway_volume" "test" { - name = "test" - size_in_gb = 2 - type = "l_ssd" -} -` diff --git a/builtin/providers/softlayer/config.go b/builtin/providers/softlayer/config.go deleted file mode 100644 index 8fb9d77ba..000000000 --- a/builtin/providers/softlayer/config.go +++ /dev/null @@ -1,39 +0,0 @@ -package softlayer - -import ( - "log" - - slclient "github.com/maximilien/softlayer-go/client" - softlayer "github.com/maximilien/softlayer-go/softlayer" -) - -type Config struct { - Username string - ApiKey string -} - -type Client struct { - virtualGuestService softlayer.SoftLayer_Virtual_Guest_Service - sshKeyService softlayer.SoftLayer_Security_Ssh_Key_Service - productOrderService softlayer.SoftLayer_Product_Order_Service -} - -func (c *Config) Client() (*Client, error) { - slc := slclient.NewSoftLayerClient(c.Username, c.ApiKey) - virtualGuestService, err := slc.GetSoftLayer_Virtual_Guest_Service() - - if err != nil { - return nil, err - } - - sshKeyService, err := slc.GetSoftLayer_Security_Ssh_Key_Service() - - client := &Client{ - virtualGuestService: virtualGuestService, - sshKeyService: sshKeyService, - } - - log.Println("[INFO] Created SoftLayer client") - - return client, nil -} diff --git a/builtin/providers/softlayer/provider.go b/builtin/providers/softlayer/provider.go deleted file mode 100644 index ceb62425c..000000000 --- a/builtin/providers/softlayer/provider.go +++ /dev/null @@ -1,41 +0,0 @@ -package softlayer - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "username": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("SOFTLAYER_USERNAME", nil), - Description: "The user name for SoftLayer API operations.", - }, - "api_key": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("SOFTLAYER_API_KEY", nil), - Description: "The API key for SoftLayer API operations.", - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "softlayer_virtual_guest": resourceSoftLayerVirtualGuest(), - "softlayer_ssh_key": resourceSoftLayerSSHKey(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - config := Config{ - Username: d.Get("username").(string), - ApiKey: d.Get("api_key").(string), - } - - return config.Client() -} diff --git a/builtin/providers/softlayer/provider_test.go b/builtin/providers/softlayer/provider_test.go deleted file mode 100644 index 585365193..000000000 --- a/builtin/providers/softlayer/provider_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package softlayer - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "softlayer": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("SOFTLAYER_USERNAME"); v == "" { - t.Fatal("SOFTLAYER_USERNAME must be set for acceptance tests") - } - if v := os.Getenv("SOFTLAYER_API_KEY"); v == "" { - t.Fatal("SOFTLAYER_API_KEY must be set for acceptance tests") - } -} diff --git a/builtin/providers/softlayer/resource_softlayer_ssh_key.go b/builtin/providers/softlayer/resource_softlayer_ssh_key.go deleted file mode 100644 index d03fb7f3b..000000000 --- a/builtin/providers/softlayer/resource_softlayer_ssh_key.go +++ /dev/null @@ -1,159 +0,0 @@ -package softlayer - -import ( - "fmt" - "log" - "strconv" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - datatypes "github.com/maximilien/softlayer-go/data_types" -) - -func resourceSoftLayerSSHKey() *schema.Resource { - return &schema.Resource{ - Create: resourceSoftLayerSSHKeyCreate, - Read: resourceSoftLayerSSHKeyRead, - Update: resourceSoftLayerSSHKeyUpdate, - Delete: resourceSoftLayerSSHKeyDelete, - Exists: resourceSoftLayerSSHKeyExists, - - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "public_key": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "fingerprint": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "notes": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: nil, - }, - }, - } -} - -func resourceSoftLayerSSHKeyCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Client).sshKeyService - - // Build up our creation options - opts := datatypes.SoftLayer_Security_Ssh_Key{ - Label: d.Get("name").(string), - Key: d.Get("public_key").(string), - } - - if notes, ok := d.GetOk("notes"); ok { - opts.Notes = notes.(string) - } - - res, err := client.CreateObject(opts) - if err != nil { - return fmt.Errorf("Error creating SSH Key: %s", err) - } - - d.SetId(strconv.Itoa(res.Id)) - log.Printf("[INFO] SSH Key: %d", res.Id) - - return resourceSoftLayerSSHKeyRead(d, meta) -} - -func resourceSoftLayerSSHKeyRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Client).sshKeyService - - keyId, _ := strconv.Atoi(d.Id()) - - key, err := client.GetObject(keyId) - if err != nil { - // If the key is somehow already destroyed, mark as - // succesfully gone - if strings.Contains(err.Error(), "404 Not Found") { - d.SetId("") - return nil - } - - return fmt.Errorf("Error retrieving SSH key: %s", err) - } - - d.Set("id", key.Id) - d.Set("name", key.Label) - d.Set("public_key", strings.TrimSpace(key.Key)) - d.Set("fingerprint", key.Fingerprint) - d.Set("notes", key.Notes) - - return nil -} - -func resourceSoftLayerSSHKeyUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Client).sshKeyService - - keyId, _ := strconv.Atoi(d.Id()) - - key, err := client.GetObject(keyId) - if err != nil { - return fmt.Errorf("Error retrieving SSH key: %s", err) - } - - if d.HasChange("name") { - key.Label = d.Get("name").(string) - } - - if d.HasChange("notes") { - key.Notes = d.Get("notes").(string) - } - - _, err = client.EditObject(keyId, key) - if err != nil { - return fmt.Errorf("Error editing SSH key: %s", err) - } - return nil -} - -func resourceSoftLayerSSHKeyDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Client).sshKeyService - - id, err := strconv.Atoi(d.Id()) - if err != nil { - return fmt.Errorf("Error deleting SSH Key: %s", err) - } - - log.Printf("[INFO] Deleting SSH key: %d", id) - _, err = client.DeleteObject(id) - if err != nil { - return fmt.Errorf("Error deleting SSH key: %s", err) - } - - d.SetId("") - return nil -} - -func resourceSoftLayerSSHKeyExists(d *schema.ResourceData, meta interface{}) (bool, error) { - client := meta.(*Client).sshKeyService - - if client == nil { - return false, fmt.Errorf("The client was nil.") - } - - keyId, err := strconv.Atoi(d.Id()) - if err != nil { - return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) - } - - result, err := client.GetObject(keyId) - return result.Id == keyId && err == nil, nil -} diff --git a/builtin/providers/softlayer/resource_softlayer_ssh_key_test.go b/builtin/providers/softlayer/resource_softlayer_ssh_key_test.go deleted file mode 100644 index 70f7344fe..000000000 --- a/builtin/providers/softlayer/resource_softlayer_ssh_key_test.go +++ /dev/null @@ -1,131 +0,0 @@ -package softlayer - -import ( - "fmt" - "strconv" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - datatypes "github.com/maximilien/softlayer-go/data_types" -) - -func TestAccSoftLayerSSHKey_Basic(t *testing.T) { - var key datatypes.SoftLayer_Security_Ssh_Key - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckSoftLayerSSHKeyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckSoftLayerSSHKeyConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckSoftLayerSSHKeyExists("softlayer_ssh_key.testacc_foobar", &key), - testAccCheckSoftLayerSSHKeyAttributes(&key), - resource.TestCheckResourceAttr( - "softlayer_ssh_key.testacc_foobar", "name", "testacc_foobar"), - resource.TestCheckResourceAttr( - "softlayer_ssh_key.testacc_foobar", "public_key", testAccValidPublicKey), - resource.TestCheckResourceAttr( - "softlayer_ssh_key.testacc_foobar", "notes", "first_note"), - ), - }, - - resource.TestStep{ - Config: testAccCheckSoftLayerSSHKeyConfig_updated, - Check: resource.ComposeTestCheckFunc( - testAccCheckSoftLayerSSHKeyExists("softlayer_ssh_key.testacc_foobar", &key), - resource.TestCheckResourceAttr( - "softlayer_ssh_key.testacc_foobar", "name", "changed_name"), - resource.TestCheckResourceAttr( - "softlayer_ssh_key.testacc_foobar", "public_key", testAccValidPublicKey), - resource.TestCheckResourceAttr( - "softlayer_ssh_key.testacc_foobar", "notes", "changed_note"), - ), - }, - }, - }) -} - -func testAccCheckSoftLayerSSHKeyDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*Client).sshKeyService - - for _, rs := range s.RootModule().Resources { - if rs.Type != "softlayer_ssh_key" { - continue - } - - keyId, _ := strconv.Atoi(rs.Primary.ID) - - // Try to find the key - _, err := client.GetObject(keyId) - - if err == nil { - return fmt.Errorf("SSH key still exists") - } - } - - return nil -} - -func testAccCheckSoftLayerSSHKeyAttributes(key *datatypes.SoftLayer_Security_Ssh_Key) resource.TestCheckFunc { - return func(s *terraform.State) error { - - if key.Label != "testacc_foobar" { - return fmt.Errorf("Bad name: %s", key.Label) - } - - return nil - } -} - -func testAccCheckSoftLayerSSHKeyExists(n string, key *datatypes.SoftLayer_Security_Ssh_Key) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - keyId, _ := strconv.Atoi(rs.Primary.ID) - - client := testAccProvider.Meta().(*Client).sshKeyService - foundKey, err := client.GetObject(keyId) - - if err != nil { - return err - } - - if strconv.Itoa(int(foundKey.Id)) != rs.Primary.ID { - return fmt.Errorf("Record not found") - } - - *key = foundKey - - return nil - } -} - -var testAccCheckSoftLayerSSHKeyConfig_basic = fmt.Sprintf(` -resource "softlayer_ssh_key" "testacc_foobar" { - name = "testacc_foobar" - notes = "first_note" - public_key = "%s" -}`, testAccValidPublicKey) - -var testAccCheckSoftLayerSSHKeyConfig_updated = fmt.Sprintf(` -resource "softlayer_ssh_key" "testacc_foobar" { - name = "changed_name" - notes = "changed_note" - public_key = "%s" -}`, testAccValidPublicKey) - -var testAccValidPublicKey = strings.TrimSpace(` -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCKVmnMOlHKcZK8tpt3MP1lqOLAcqcJzhsvJcjscgVERRN7/9484SOBJ3HSKxxNG5JN8owAjy5f9yYwcUg+JaUVuytn5Pv3aeYROHGGg+5G346xaq3DAwX6Y5ykr2fvjObgncQBnuU5KHWCECO/4h8uWuwh/kfniXPVjFToc+gnkqA+3RKpAecZhFXwfalQ9mMuYGFxn+fwn8cYEApsJbsEmb0iJwPiZ5hjFC8wREuiTlhPHDgkBLOiycd20op2nXzDbHfCHInquEe/gYxEitALONxm0swBOwJZwlTDOB7C6y2dzlrtxr1L59m7pCkWI4EtTRLvleehBoj3u7jB4usR -`) diff --git a/builtin/providers/softlayer/resource_softlayer_virtual_guest.go b/builtin/providers/softlayer/resource_softlayer_virtual_guest.go deleted file mode 100644 index 54d4f9ba4..000000000 --- a/builtin/providers/softlayer/resource_softlayer_virtual_guest.go +++ /dev/null @@ -1,545 +0,0 @@ -package softlayer - -import ( - "fmt" - "log" - "strconv" - "time" - - "encoding/base64" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - datatypes "github.com/maximilien/softlayer-go/data_types" - "github.com/maximilien/softlayer-go/softlayer" - "math" - "strings" -) - -func resourceSoftLayerVirtualGuest() *schema.Resource { - return &schema.Resource{ - Create: resourceSoftLayerVirtualGuestCreate, - Read: resourceSoftLayerVirtualGuestRead, - Update: resourceSoftLayerVirtualGuestUpdate, - Delete: resourceSoftLayerVirtualGuestDelete, - Exists: resourceSoftLayerVirtualGuestExists, - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "domain": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "image": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "hourly_billing": &schema.Schema{ - Type: schema.TypeBool, - Required: true, - ForceNew: true, - }, - - "private_network_only": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - }, - - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "cpu": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - // TODO: This fields for now requires recreation, because currently for some reason SoftLayer resets "dedicated_acct_host_only" - // TODO: flag to false, while upgrading CPUs. That problem is reported to SoftLayer team. "ForceNew" can be set back - // TODO: to false as soon as it is fixed at their side. Also corresponding test for virtual guest upgrade will be uncommented. - ForceNew: true, - }, - - "ram": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - memoryInMB := float64(v.(int)) - - // Validate memory to match gigs format - remaining := math.Mod(memoryInMB, 1024) - if remaining > 0 { - suggested := math.Ceil(memoryInMB/1024) * 1024 - errors = append(errors, fmt.Errorf( - "Invalid 'ram' value %d megabytes, must be a multiple of 1024 (e.g. use %d)", int(memoryInMB), int(suggested))) - } - - return - }, - }, - - "dedicated_acct_host_only": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - - "frontend_vlan_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "backend_vlan_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "disks": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeInt}, - }, - - "public_network_speed": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 1000, - }, - - "ipv4_address": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "ipv4_address_private": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "ssh_keys": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeInt}, - }, - - "user_data": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "local_disk": &schema.Schema{ - Type: schema.TypeBool, - Required: true, - ForceNew: true, - }, - - "post_install_script_uri": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: nil, - ForceNew: true, - }, - - "block_device_template_group_gid": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func getNameForBlockDevice(i int) string { - // skip 1, which is reserved for the swap disk. - // so we get 0, 2, 3, 4, 5 ... - if i == 0 { - return "0" - } else { - return strconv.Itoa(i + 1) - } -} - -func getBlockDevices(d *schema.ResourceData) []datatypes.BlockDevice { - numBlocks := d.Get("disks.#").(int) - if numBlocks == 0 { - return nil - } else { - blocks := make([]datatypes.BlockDevice, 0, numBlocks) - for i := 0; i < numBlocks; i++ { - blockRef := fmt.Sprintf("disks.%d", i) - name := getNameForBlockDevice(i) - capacity := d.Get(blockRef).(int) - block := datatypes.BlockDevice{ - Device: name, - DiskImage: datatypes.DiskImage{ - Capacity: capacity, - }, - } - blocks = append(blocks, block) - } - return blocks - } -} - -func resourceSoftLayerVirtualGuestCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Client).virtualGuestService - if client == nil { - return fmt.Errorf("The client was nil.") - } - - dc := datatypes.Datacenter{ - Name: d.Get("region").(string), - } - - networkComponent := datatypes.NetworkComponents{ - MaxSpeed: d.Get("public_network_speed").(int), - } - - privateNetworkOnly := d.Get("private_network_only").(bool) - opts := datatypes.SoftLayer_Virtual_Guest_Template{ - Hostname: d.Get("name").(string), - Domain: d.Get("domain").(string), - HourlyBillingFlag: d.Get("hourly_billing").(bool), - PrivateNetworkOnlyFlag: privateNetworkOnly, - Datacenter: dc, - StartCpus: d.Get("cpu").(int), - MaxMemory: d.Get("ram").(int), - NetworkComponents: []datatypes.NetworkComponents{networkComponent}, - BlockDevices: getBlockDevices(d), - LocalDiskFlag: d.Get("local_disk").(bool), - PostInstallScriptUri: d.Get("post_install_script_uri").(string), - } - - if dedicatedAcctHostOnly, ok := d.GetOk("dedicated_acct_host_only"); ok { - opts.DedicatedAccountHostOnlyFlag = dedicatedAcctHostOnly.(bool) - } - - if globalIdentifier, ok := d.GetOk("block_device_template_group_gid"); ok { - opts.BlockDeviceTemplateGroup = &datatypes.BlockDeviceTemplateGroup{ - GlobalIdentifier: globalIdentifier.(string), - } - } - - if operatingSystemReferenceCode, ok := d.GetOk("image"); ok { - opts.OperatingSystemReferenceCode = operatingSystemReferenceCode.(string) - } - - // Apply frontend VLAN if provided - if param, ok := d.GetOk("frontend_vlan_id"); ok { - frontendVlanId, err := strconv.Atoi(param.(string)) - if err != nil { - return fmt.Errorf("Not a valid frontend ID, must be an integer: %s", err) - } - opts.PrimaryNetworkComponent = &datatypes.PrimaryNetworkComponent{ - NetworkVlan: datatypes.NetworkVlan{Id: (frontendVlanId)}, - } - } - - // Apply backend VLAN if provided - if param, ok := d.GetOk("backend_vlan_id"); ok { - backendVlanId, err := strconv.Atoi(param.(string)) - if err != nil { - return fmt.Errorf("Not a valid backend ID, must be an integer: %s", err) - } - opts.PrimaryBackendNetworkComponent = &datatypes.PrimaryBackendNetworkComponent{ - NetworkVlan: datatypes.NetworkVlan{Id: (backendVlanId)}, - } - } - - if userData, ok := d.GetOk("user_data"); ok { - opts.UserData = []datatypes.UserData{ - datatypes.UserData{ - Value: userData.(string), - }, - } - } - - // Get configured ssh_keys - ssh_keys := d.Get("ssh_keys.#").(int) - if ssh_keys > 0 { - opts.SshKeys = make([]datatypes.SshKey, 0, ssh_keys) - for i := 0; i < ssh_keys; i++ { - key := fmt.Sprintf("ssh_keys.%d", i) - id := d.Get(key).(int) - sshKey := datatypes.SshKey{ - Id: id, - } - opts.SshKeys = append(opts.SshKeys, sshKey) - } - } - - log.Printf("[INFO] Creating virtual machine") - - guest, err := client.CreateObject(opts) - - if err != nil { - return fmt.Errorf("Error creating virtual guest: %s", err) - } - - d.SetId(fmt.Sprintf("%d", guest.Id)) - - log.Printf("[INFO] Virtual Machine ID: %s", d.Id()) - - // wait for machine availability - _, err = WaitForNoActiveTransactions(d, meta) - - if err != nil { - return fmt.Errorf( - "Error waiting for virtual machine (%s) to become ready: %s", d.Id(), err) - } - - if !privateNetworkOnly { - _, err = WaitForPublicIpAvailable(d, meta) - if err != nil { - return fmt.Errorf( - "Error waiting for virtual machine (%s) public ip to become ready: %s", d.Id(), err) - } - } - - return resourceSoftLayerVirtualGuestRead(d, meta) -} - -func resourceSoftLayerVirtualGuestRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Client).virtualGuestService - id, err := strconv.Atoi(d.Id()) - if err != nil { - return fmt.Errorf("Not a valid ID, must be an integer: %s", err) - } - result, err := client.GetObject(id) - if err != nil { - return fmt.Errorf("Error retrieving virtual guest: %s", err) - } - - d.Set("name", result.Hostname) - d.Set("domain", result.Domain) - if result.Datacenter != nil { - d.Set("region", result.Datacenter.Name) - } - d.Set("public_network_speed", result.NetworkComponents[0].MaxSpeed) - d.Set("cpu", result.StartCpus) - d.Set("ram", result.MaxMemory) - d.Set("dedicated_acct_host_only", result.DedicatedAccountHostOnlyFlag) - d.Set("has_public_ip", result.PrimaryIpAddress != "") - d.Set("ipv4_address", result.PrimaryIpAddress) - d.Set("ipv4_address_private", result.PrimaryBackendIpAddress) - d.Set("private_network_only", result.PrivateNetworkOnlyFlag) - d.Set("hourly_billing", result.HourlyBillingFlag) - d.Set("local_disk", result.LocalDiskFlag) - d.Set("frontend_vlan_id", result.PrimaryNetworkComponent.NetworkVlan.Id) - d.Set("backend_vlan_id", result.PrimaryBackendNetworkComponent.NetworkVlan.Id) - - userData := result.UserData - if userData != nil && len(userData) > 0 { - data, err := base64.StdEncoding.DecodeString(userData[0].Value) - if err != nil { - log.Printf("Can't base64 decode user data %s. error: %s", userData, err) - d.Set("user_data", userData) - } else { - d.Set("user_data", string(data)) - } - } - - return nil -} - -func resourceSoftLayerVirtualGuestUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Client).virtualGuestService - id, err := strconv.Atoi(d.Id()) - if err != nil { - return fmt.Errorf("Not a valid ID, must be an integer: %s", err) - } - result, err := client.GetObject(id) - if err != nil { - return fmt.Errorf("Error retrieving virtual guest: %s", err) - } - - // Update "name" and "domain" fields if present and changed - // Those are the only fields, which could be updated - if d.HasChange("name") || d.HasChange("domain") { - result.Hostname = d.Get("name").(string) - result.Domain = d.Get("domain").(string) - - _, err = client.EditObject(id, result) - - if err != nil { - return fmt.Errorf("Couldn't update virtual guest: %s", err) - } - } - - // Set user data if provided and not empty - if d.HasChange("user_data") { - client.SetMetadata(id, d.Get("user_data").(string)) - } - - // Upgrade "cpu", "ram" and "nic_speed" if provided and changed - upgradeOptions := softlayer.UpgradeOptions{} - if d.HasChange("cpu") { - upgradeOptions.Cpus = d.Get("cpu").(int) - } - if d.HasChange("ram") { - memoryInMB := float64(d.Get("ram").(int)) - - // Convert memory to GB, as softlayer only allows to upgrade RAM in Gigs - // Must be already validated at this step - upgradeOptions.MemoryInGB = int(memoryInMB / 1024) - } - if d.HasChange("public_network_speed") { - upgradeOptions.NicSpeed = d.Get("public_network_speed").(int) - } - - started, err := client.UpgradeObject(id, &upgradeOptions) - if err != nil { - return fmt.Errorf("Couldn't upgrade virtual guest: %s", err) - } - - if started { - // Wait for softlayer to start upgrading... - _, err = WaitForUpgradeTransactionsToAppear(d, meta) - - // Wait for upgrade transactions to finish - _, err = WaitForNoActiveTransactions(d, meta) - } - - return err -} - -func resourceSoftLayerVirtualGuestDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*Client).virtualGuestService - id, err := strconv.Atoi(d.Id()) - if err != nil { - return fmt.Errorf("Not a valid ID, must be an integer: %s", err) - } - - _, err = WaitForNoActiveTransactions(d, meta) - - if err != nil { - return fmt.Errorf("Error deleting virtual guest, couldn't wait for zero active transactions: %s", err) - } - - _, err = client.DeleteObject(id) - - if err != nil { - return fmt.Errorf("Error deleting virtual guest: %s", err) - } - - return nil -} - -func WaitForUpgradeTransactionsToAppear(d *schema.ResourceData, meta interface{}) (interface{}, error) { - - log.Printf("Waiting for server (%s) to have upgrade transactions", d.Id()) - - id, err := strconv.Atoi(d.Id()) - if err != nil { - return nil, fmt.Errorf("The instance ID %s must be numeric", d.Id()) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"pending_upgrade"}, - Target: []string{"upgrade_started"}, - Refresh: func() (interface{}, string, error) { - client := meta.(*Client).virtualGuestService - transactions, err := client.GetActiveTransactions(id) - if err != nil { - return nil, "", fmt.Errorf("Couldn't fetch active transactions: %s", err) - } - for _, transaction := range transactions { - if strings.Contains(transaction.TransactionStatus.Name, "UPGRADE") { - return transactions, "upgrade_started", nil - } - } - return transactions, "pending_upgrade", nil - }, - Timeout: 5 * time.Minute, - Delay: 5 * time.Second, - MinTimeout: 3 * time.Second, - } - - return stateConf.WaitForState() -} - -func WaitForPublicIpAvailable(d *schema.ResourceData, meta interface{}) (interface{}, error) { - log.Printf("Waiting for server (%s) to get a public IP", d.Id()) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"", "unavailable"}, - Target: []string{"available"}, - Refresh: func() (interface{}, string, error) { - fmt.Println("Refreshing server state...") - client := meta.(*Client).virtualGuestService - id, err := strconv.Atoi(d.Id()) - if err != nil { - return nil, "", fmt.Errorf("Not a valid ID, must be an integer: %s", err) - } - result, err := client.GetObject(id) - if err != nil { - return nil, "", fmt.Errorf("Error retrieving virtual guest: %s", err) - } - if result.PrimaryIpAddress == "" { - return result, "unavailable", nil - } else { - return result, "available", nil - } - }, - Timeout: 30 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - return stateConf.WaitForState() -} - -func WaitForNoActiveTransactions(d *schema.ResourceData, meta interface{}) (interface{}, error) { - log.Printf("Waiting for server (%s) to have zero active transactions", d.Id()) - id, err := strconv.Atoi(d.Id()) - if err != nil { - return nil, fmt.Errorf("The instance ID %s must be numeric", d.Id()) - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{"", "active"}, - Target: []string{"idle"}, - Refresh: func() (interface{}, string, error) { - client := meta.(*Client).virtualGuestService - transactions, err := client.GetActiveTransactions(id) - if err != nil { - return nil, "", fmt.Errorf("Couldn't get active transactions: %s", err) - } - if len(transactions) == 0 { - return transactions, "idle", nil - } else { - return transactions, "active", nil - } - }, - Timeout: 10 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - return stateConf.WaitForState() -} - -func resourceSoftLayerVirtualGuestExists(d *schema.ResourceData, meta interface{}) (bool, error) { - client := meta.(*Client).virtualGuestService - - if client == nil { - return false, fmt.Errorf("The client was nil.") - } - - guestId, err := strconv.Atoi(d.Id()) - if err != nil { - return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) - } - - result, err := client.GetObject(guestId) - return result.Id == guestId && err == nil, nil -} diff --git a/builtin/providers/softlayer/resource_softlayer_virtual_guest_test.go b/builtin/providers/softlayer/resource_softlayer_virtual_guest_test.go deleted file mode 100644 index 43c87e718..000000000 --- a/builtin/providers/softlayer/resource_softlayer_virtual_guest_test.go +++ /dev/null @@ -1,299 +0,0 @@ -package softlayer - -import ( - "fmt" - "strconv" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - datatypes "github.com/maximilien/softlayer-go/data_types" -) - -func TestAccSoftLayerVirtualGuest_Basic(t *testing.T) { - var guest datatypes.SoftLayer_Virtual_Guest - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckSoftLayerVirtualGuestDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckSoftLayerVirtualGuestConfig_basic, - Destroy: false, - Check: resource.ComposeTestCheckFunc( - testAccCheckSoftLayerVirtualGuestExists("softlayer_virtual_guest.terraform-acceptance-test-1", &guest), - resource.TestCheckResourceAttr( - "softlayer_virtual_guest.terraform-acceptance-test-1", "name", "terraform-test"), - resource.TestCheckResourceAttr( - "softlayer_virtual_guest.terraform-acceptance-test-1", "domain", "bar.example.com"), - resource.TestCheckResourceAttr( - "softlayer_virtual_guest.terraform-acceptance-test-1", "region", "ams01"), - resource.TestCheckResourceAttr( - "softlayer_virtual_guest.terraform-acceptance-test-1", "public_network_speed", "10"), - resource.TestCheckResourceAttr( - "softlayer_virtual_guest.terraform-acceptance-test-1", "hourly_billing", "true"), - resource.TestCheckResourceAttr( - "softlayer_virtual_guest.terraform-acceptance-test-1", "private_network_only", "false"), - resource.TestCheckResourceAttr( - "softlayer_virtual_guest.terraform-acceptance-test-1", "cpu", "1"), - resource.TestCheckResourceAttr( - "softlayer_virtual_guest.terraform-acceptance-test-1", "ram", "1024"), - resource.TestCheckResourceAttr( - "softlayer_virtual_guest.terraform-acceptance-test-1", "disks.0", "25"), - resource.TestCheckResourceAttr( - "softlayer_virtual_guest.terraform-acceptance-test-1", "disks.1", "10"), - resource.TestCheckResourceAttr( - "softlayer_virtual_guest.terraform-acceptance-test-1", "disks.2", "20"), - resource.TestCheckResourceAttr( - "softlayer_virtual_guest.terraform-acceptance-test-1", "user_data", "{\"value\":\"newvalue\"}"), - resource.TestCheckResourceAttr( - "softlayer_virtual_guest.terraform-acceptance-test-1", "local_disk", "false"), - resource.TestCheckResourceAttr( - "softlayer_virtual_guest.terraform-acceptance-test-1", "dedicated_acct_host_only", "true"), - - // TODO: As agreed, will be enabled when VLAN support is implemented: https://github.com/TheWeatherCompany/softlayer-go/issues/3 - // resource.TestCheckResourceAttr( - // "softlayer_virtual_guest.terraform-acceptance-test-1", "frontend_vlan_id", "1085155"), - // resource.TestCheckResourceAttr( - // "softlayer_virtual_guest.terraform-acceptance-test-1", "backend_vlan_id", "1085157"), - ), - }, - - resource.TestStep{ - Config: testAccCheckSoftLayerVirtualGuestConfig_userDataUpdate, - Destroy: false, - Check: resource.ComposeTestCheckFunc( - testAccCheckSoftLayerVirtualGuestExists("softlayer_virtual_guest.terraform-acceptance-test-1", &guest), - resource.TestCheckResourceAttr( - "softlayer_virtual_guest.terraform-acceptance-test-1", "user_data", "updatedData"), - ), - }, - - resource.TestStep{ - Config: testAccCheckSoftLayerVirtualGuestConfig_upgradeMemoryNetworkSpeed, - Check: resource.ComposeTestCheckFunc( - testAccCheckSoftLayerVirtualGuestExists("softlayer_virtual_guest.terraform-acceptance-test-1", &guest), - resource.TestCheckResourceAttr( - "softlayer_virtual_guest.terraform-acceptance-test-1", "ram", "2048"), - resource.TestCheckResourceAttr( - "softlayer_virtual_guest.terraform-acceptance-test-1", "public_network_speed", "100"), - ), - }, - - // TODO: currently CPU upgrade test is disabled, due to unexpected behavior of field "dedicated_acct_host_only". - // TODO: For some reason it is reset by SoftLayer to "false". Daniel Bright reported corresponding issue to SoftLayer team. - // resource.TestStep{ - // Config: testAccCheckSoftLayerVirtualGuestConfig_vmUpgradeCPUs, - // Check: resource.ComposeTestCheckFunc( - // testAccCheckSoftLayerVirtualGuestExists("softlayer_virtual_guest.terraform-acceptance-test-1", &guest), - // resource.TestCheckResourceAttr( - // "softlayer_virtual_guest.terraform-acceptance-test-1", "cpu", "2"), - // ), - // }, - - }, - }) -} - -func TestAccSoftLayerVirtualGuest_BlockDeviceTemplateGroup(t *testing.T) { - var guest datatypes.SoftLayer_Virtual_Guest - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckSoftLayerVirtualGuestDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckSoftLayerVirtualGuestConfig_blockDeviceTemplateGroup, - Check: resource.ComposeTestCheckFunc( - // block_device_template_group_gid value is hardcoded. If it's valid then virtual guest will be created well - testAccCheckSoftLayerVirtualGuestExists("softlayer_virtual_guest.terraform-acceptance-test-BDTGroup", &guest), - ), - }, - }, - }) -} - -func TestAccSoftLayerVirtualGuest_postInstallScriptUri(t *testing.T) { - var guest datatypes.SoftLayer_Virtual_Guest - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckSoftLayerVirtualGuestDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckSoftLayerVirtualGuestConfig_postInstallScriptUri, - Check: resource.ComposeTestCheckFunc( - // block_device_template_group_gid value is hardcoded. If it's valid then virtual guest will be created well - testAccCheckSoftLayerVirtualGuestExists("softlayer_virtual_guest.terraform-acceptance-test-pISU", &guest), - ), - }, - }, - }) -} - -func testAccCheckSoftLayerVirtualGuestDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*Client).virtualGuestService - - for _, rs := range s.RootModule().Resources { - if rs.Type != "softlayer_virtual_guest" { - continue - } - - guestId, _ := strconv.Atoi(rs.Primary.ID) - - // Try to find the guest - _, err := client.GetObject(guestId) - - // Wait - - if err != nil && !strings.Contains(err.Error(), "404") { - return fmt.Errorf( - "Error waiting for virtual guest (%s) to be destroyed: %s", - rs.Primary.ID, err) - } - } - - return nil -} - -func testAccCheckSoftLayerVirtualGuestExists(n string, guest *datatypes.SoftLayer_Virtual_Guest) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No virtual guest ID is set") - } - - id, err := strconv.Atoi(rs.Primary.ID) - - if err != nil { - return err - } - - client := testAccProvider.Meta().(*Client).virtualGuestService - retrieveVirtGuest, err := client.GetObject(id) - - if err != nil { - return err - } - - fmt.Printf("The ID is %d", id) - - if retrieveVirtGuest.Id != id { - return fmt.Errorf("Virtual guest not found") - } - - *guest = retrieveVirtGuest - - return nil - } -} - -const testAccCheckSoftLayerVirtualGuestConfig_basic = ` -resource "softlayer_virtual_guest" "terraform-acceptance-test-1" { - name = "terraform-test" - domain = "bar.example.com" - image = "DEBIAN_7_64" - region = "ams01" - public_network_speed = 10 - hourly_billing = true - private_network_only = false - cpu = 1 - ram = 1024 - disks = [25, 10, 20] - user_data = "{\"value\":\"newvalue\"}" - dedicated_acct_host_only = true - local_disk = false -} -` - -const testAccCheckSoftLayerVirtualGuestConfig_userDataUpdate = ` -resource "softlayer_virtual_guest" "terraform-acceptance-test-1" { - name = "terraform-test" - domain = "bar.example.com" - image = "DEBIAN_7_64" - region = "ams01" - public_network_speed = 10 - hourly_billing = true - cpu = 1 - ram = 1024 - disks = [25, 10, 20] - user_data = "updatedData" - dedicated_acct_host_only = true - local_disk = false -} -` - -const testAccCheckSoftLayerVirtualGuestConfig_upgradeMemoryNetworkSpeed = ` -resource "softlayer_virtual_guest" "terraform-acceptance-test-1" { - name = "terraform-test" - domain = "bar.example.com" - image = "DEBIAN_7_64" - region = "ams01" - public_network_speed = 100 - hourly_billing = true - cpu = 1 - ram = 2048 - disks = [25, 10, 20] - user_data = "updatedData" - dedicated_acct_host_only = true - local_disk = false -} -` - -const testAccCheckSoftLayerVirtualGuestConfig_vmUpgradeCPUs = ` -resource "softlayer_virtual_guest" "terraform-acceptance-test-1" { - name = "terraform-test" - domain = "bar.example.com" - image = "DEBIAN_7_64" - region = "ams01" - public_network_speed = 100 - hourly_billing = true - cpu = 2 - ram = 2048 - disks = [25, 10, 20] - user_data = "updatedData" - dedicated_acct_host_only = true - local_disk = false -} -` - -const testAccCheckSoftLayerVirtualGuestConfig_postInstallScriptUri = ` -resource "softlayer_virtual_guest" "terraform-acceptance-test-pISU" { - name = "terraform-test-pISU" - domain = "bar.example.com" - image = "DEBIAN_7_64" - region = "ams01" - public_network_speed = 10 - hourly_billing = true - private_network_only = false - cpu = 1 - ram = 1024 - disks = [25, 10, 20] - user_data = "{\"value\":\"newvalue\"}" - dedicated_acct_host_only = true - local_disk = false - post_install_script_uri = "https://www.google.com" -} -` - -const testAccCheckSoftLayerVirtualGuestConfig_blockDeviceTemplateGroup = ` -resource "softlayer_virtual_guest" "terraform-acceptance-test-BDTGroup" { - name = "terraform-test-blockDeviceTemplateGroup" - domain = "bar.example.com" - region = "ams01" - public_network_speed = 10 - hourly_billing = false - cpu = 1 - ram = 1024 - local_disk = false - block_device_template_group_gid = "ac2b413c-9893-4178-8e62-a24cbe2864db" -} -` diff --git a/builtin/providers/spotinst/config.go b/builtin/providers/spotinst/config.go deleted file mode 100755 index 0d597326f..000000000 --- a/builtin/providers/spotinst/config.go +++ /dev/null @@ -1,54 +0,0 @@ -package spotinst - -import ( - "fmt" - "log" - - "github.com/spotinst/spotinst-sdk-go/spotinst" -) - -type Config struct { - Email string - Password string - ClientID string - ClientSecret string - Token string -} - -// Validate returns an error in case of invalid configuration. -func (c *Config) Validate() error { - msg := "%s\n\nNo valid credentials found for Spotinst Provider.\nPlease see https://www.terraform.io/docs/providers/spotinst/index.html\nfor more information on providing credentials for Spotinst Provider." - - if c.Password != "" && c.Token != "" { - err := "ERR_CONFLICT: Both a password and a token were set, only one is required" - return fmt.Errorf(msg, err) - } - - if c.Password != "" && (c.Email == "" || c.ClientID == "" || c.ClientSecret == "") { - err := "ERR_MISSING: A password was set without email, client_id or client_secret" - return fmt.Errorf(msg, err) - } - - if c.Password == "" && c.Token == "" { - err := "ERR_MISSING: A token is required if not using password" - return fmt.Errorf(msg, err) - } - - return nil -} - -// Client returns a new client for accessing Spotinst. -func (c *Config) Client() (*spotinst.Client, error) { - var clientOpts []spotinst.ClientOptionFunc - if c.Token != "" { - clientOpts = append(clientOpts, spotinst.SetToken(c.Token)) - } else { - clientOpts = append(clientOpts, spotinst.SetCredentials(c.Email, c.Password, c.ClientID, c.ClientSecret)) - } - client, err := spotinst.NewClient(clientOpts...) - if err != nil { - return nil, fmt.Errorf("Error setting up client: %s", err) - } - log.Printf("[INFO] Spotinst client configured") - return client, nil -} diff --git a/builtin/providers/spotinst/provider.go b/builtin/providers/spotinst/provider.go deleted file mode 100755 index a97ebe6ce..000000000 --- a/builtin/providers/spotinst/provider.go +++ /dev/null @@ -1,70 +0,0 @@ -package spotinst - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "email": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("SPOTINST_EMAIL", ""), - Description: "Spotinst Email", - }, - - "password": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("SPOTINST_PASSWORD", ""), - Description: "Spotinst Password", - }, - - "client_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("SPOTINST_CLIENT_ID", ""), - Description: "Spotinst OAuth Client ID", - }, - - "client_secret": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("SPOTINST_CLIENT_SECRET", ""), - Description: "Spotinst OAuth Client Secret", - }, - - "token": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("SPOTINST_TOKEN", ""), - Description: "Spotinst Personal API Access Token", - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "spotinst_aws_group": resourceSpotinstAwsGroup(), - "spotinst_subscription": resourceSpotinstSubscription(), - "spotinst_healthcheck": resourceSpotinstHealthCheck(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - config := Config{ - Email: d.Get("email").(string), - Password: d.Get("password").(string), - ClientID: d.Get("client_id").(string), - ClientSecret: d.Get("client_secret").(string), - Token: d.Get("token").(string), - } - if err := config.Validate(); err != nil { - return nil, err - } - return config.Client() -} diff --git a/builtin/providers/spotinst/provider_test.go b/builtin/providers/spotinst/provider_test.go deleted file mode 100755 index 557047a19..000000000 --- a/builtin/providers/spotinst/provider_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package spotinst - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "spotinst": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - c := map[string]string{ - "email": os.Getenv("SPOTINST_EMAIL"), - "password": os.Getenv("SPOTINST_PASSWORD"), - "client_id": os.Getenv("SPOTINST_CLIENT_ID"), - "client_secret": os.Getenv("SPOTINST_CLIENT_SECRET"), - "token": os.Getenv("SPOTINST_TOKEN"), - } - if c["password"] != "" && c["token"] != "" { - t.Fatalf("ERR_CONFLICT: Both a password and a token were set, only one is required") - } - if c["password"] != "" && (c["email"] == "" || c["client_id"] == "" || c["client_secret"] == "") { - t.Fatalf("ERR_MISSING: A password was set without email, client_id or client_secret") - } - if c["password"] == "" && c["token"] == "" { - t.Fatalf("ERR_MISSING: A token is required if not using password") - } -} diff --git a/builtin/providers/spotinst/resource_spotinst_aws_group.go b/builtin/providers/spotinst/resource_spotinst_aws_group.go deleted file mode 100755 index e838a5737..000000000 --- a/builtin/providers/spotinst/resource_spotinst_aws_group.go +++ /dev/null @@ -1,2334 +0,0 @@ -package spotinst - -import ( - "bytes" - "crypto/sha1" - "encoding/base64" - "encoding/hex" - "fmt" - "log" - "regexp" - "strings" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" - "github.com/spotinst/spotinst-sdk-go/spotinst" - "github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil" -) - -func resourceSpotinstAwsGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceSpotinstAwsGroupCreate, - Read: resourceSpotinstAwsGroupRead, - Update: resourceSpotinstAwsGroupUpdate, - Delete: resourceSpotinstAwsGroupDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "capacity": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "target": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "minimum": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "maximum": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "unit": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - }, - Set: hashAwsGroupCapacity, - }, - - "strategy": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "risk": &schema.Schema{ - Type: schema.TypeFloat, - Optional: true, - }, - - "ondemand_count": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - - "availability_vs_cost": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "draining_timeout": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "utilize_reserved_instances": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "fallback_to_ondemand": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - }, - }, - Set: hashAwsGroupStrategy, - }, - - "scheduled_task": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "task_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "frequency": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "cron_expression": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "scale_target_capacity": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - - "scale_min_capacity": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - - "scale_max_capacity": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - }, - }, - }, - - "product": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "instance_types": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ondemand": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "spot": &schema.Schema{ - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - - "signal": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - - "availability_zone": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - ConflictsWith: []string{"availability_zones"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "subnet_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - - "availability_zones": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - ConflictsWith: []string{"availability_zone"}, - }, - - "hot_ebs_volume": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "device_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "volume_ids": &schema.Schema{ - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - - "load_balancer": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "arn": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - Set: hashAwsGroupLoadBalancer, - }, - - "launch_specification": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "load_balancer_names": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "monitoring": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "ebs_optimized": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "image_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ConflictsWith: []string{"image_id"}, - }, - - "key_pair": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "health_check_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "health_check_grace_period": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - - "security_group_ids": &schema.Schema{ - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "user_data": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - StateFunc: func(v interface{}) string { - switch v.(type) { - case string: - hash := sha1.Sum([]byte(v.(string))) - return hex.EncodeToString(hash[:]) - default: - return "" - } - }, - }, - - "iam_role": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Deprecated: "Attribute iam_role is deprecated. Use iam_instance_profile instead", - }, - - "iam_instance_profile": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - - "image_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "elastic_ips": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "tags": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - }, - - "ebs_block_device": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "delete_on_termination": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "device_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "encrypted": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "iops": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - - "snapshot_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "volume_size": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - - "volume_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - }, - Set: hashAwsGroupEBSBlockDevice, - }, - - "ephemeral_block_device": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "device_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "virtual_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - - "network_interface": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "description": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "device_index": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "secondary_private_ip_address_count": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - - "associate_public_ip_address": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - - "delete_on_termination": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "security_group_ids": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "network_interface_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "private_ip_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "subnet_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - - "scaling_up_policy": scalingPolicySchema(), - - "scaling_down_policy": scalingPolicySchema(), - - "rancher_integration": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "master_host": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "access_key": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "secret_key": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - - "elastic_beanstalk_integration": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "environment_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - - "ec2_container_service_integration": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cluster_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - - "kubernetes_integration": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "api_server": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "token": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - - "mesosphere_integration": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "api_server": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - } -} - -func scalingPolicySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "policy_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "metric_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "statistic": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "unit": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "threshold": &schema.Schema{ - Type: schema.TypeFloat, - Required: true, - }, - - "adjustment": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - - "min_target_capacity": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - - "max_target_capacity": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - - "namespace": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "operator": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "evaluation_periods": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "period": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "cooldown": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "dimensions": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - }, - }, - }, - Set: hashAwsGroupScalingPolicy, - } -} - -func resourceSpotinstAwsGroupCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*spotinst.Client) - newAwsGroup, err := buildAwsGroupOpts(d, meta) - if err != nil { - return err - } - log.Printf("[DEBUG] AwsGroup create configuration: %s\n", stringutil.Stringify(newAwsGroup)) - input := &spotinst.CreateAwsGroupInput{Group: newAwsGroup} - resp, err := client.AwsGroupService.Create(input) - if err != nil { - return fmt.Errorf("Error creating group: %s", err) - } - d.SetId(spotinst.StringValue(resp.Group.ID)) - log.Printf("[INFO] AwsGroup created successfully: %s\n", d.Id()) - return resourceSpotinstAwsGroupRead(d, meta) -} - -func resourceSpotinstAwsGroupRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*spotinst.Client) - input := &spotinst.ReadAwsGroupInput{ID: spotinst.String(d.Id())} - resp, err := client.AwsGroupService.Read(input) - if err != nil { - return fmt.Errorf("Error retrieving group: %s", err) - } - if g := resp.Group; g != nil { - d.Set("name", g.Name) - d.Set("description", g.Description) - d.Set("product", g.Compute.Product) - d.Set("tags", tagsToMap(g.Compute.LaunchSpecification.Tags)) - d.Set("elastic_ips", g.Compute.ElasticIPs) - - // Set capacity. - if g.Capacity != nil { - if err := d.Set("capacity", flattenAwsGroupCapacity(g.Capacity)); err != nil { - return fmt.Errorf("Error setting capacity onfiguration: %#v", err) - } - } - - // Set strategy. - if g.Strategy != nil { - if err := d.Set("strategy", flattenAwsGroupStrategy(g.Strategy)); err != nil { - return fmt.Errorf("Error setting strategy configuration: %#v", err) - } - } - - // Set signals. - if g.Strategy.Signals != nil { - if err := d.Set("signal", flattenAwsGroupSignals(g.Strategy.Signals)); err != nil { - return fmt.Errorf("Error setting signals configuration: %#v", err) - } - } - - // Set scaling up policies. - if g.Scaling.Up != nil { - if err := d.Set("scaling_up_policy", flattenAwsGroupScalingPolicies(g.Scaling.Up)); err != nil { - return fmt.Errorf("Error setting scaling up policies configuration: %#v", err) - } - } - - // Set scaling down policies. - if g.Scaling.Down != nil { - if err := d.Set("scaling_down_policy", flattenAwsGroupScalingPolicies(g.Scaling.Down)); err != nil { - return fmt.Errorf("Error setting scaling down policies configuration: %#v", err) - } - } - - // Set scheduled tasks. - if g.Scheduling.Tasks != nil { - if err := d.Set("scheduled_task", flattenAwsGroupScheduledTasks(g.Scheduling.Tasks)); err != nil { - return fmt.Errorf("Error setting scheduled tasks configuration: %#v", err) - } - } - - // Set launch specification. - if g.Compute.LaunchSpecification != nil { - imageIDSetInLaunchSpec := true - if v, ok := d.GetOk("image_id"); ok && v != "" { - imageIDSetInLaunchSpec = false - } - if err := d.Set("launch_specification", flattenAwsGroupLaunchSpecification(g.Compute.LaunchSpecification, imageIDSetInLaunchSpec)); err != nil { - return fmt.Errorf("Error setting launch specification configuration: %#v", err) - } - } - - // Set image ID. - if g.Compute.LaunchSpecification.ImageID != nil { - if d.Get("image_id") != nil && d.Get("image_id") != "" { - d.Set("image_id", g.Compute.LaunchSpecification.ImageID) - } - } - - // Set load balancers. - if g.Compute.LaunchSpecification.LoadBalancersConfig != nil { - if err := d.Set("load_balancer", flattenAwsGroupLoadBalancers(g.Compute.LaunchSpecification.LoadBalancersConfig.LoadBalancers)); err != nil { - return fmt.Errorf("Error setting load balancers configuration: %#v", err) - } - } - - // Set EBS volume pool. - if g.Compute.EBSVolumePool != nil { - if err := d.Set("hot_ebs_volume", flattenAwsGroupEBSVolumePool(g.Compute.EBSVolumePool)); err != nil { - return fmt.Errorf("Error setting EBS volume pool configuration: %#v", err) - } - } - - // Set network interfaces. - if g.Compute.LaunchSpecification.NetworkInterfaces != nil { - if err := d.Set("network_interface", flattenAwsGroupNetworkInterfaces(g.Compute.LaunchSpecification.NetworkInterfaces)); err != nil { - return fmt.Errorf("Error setting network interfaces configuration: %#v", err) - } - } - - // Set block devices. - if g.Compute.LaunchSpecification.BlockDevices != nil { - if err := d.Set("ebs_block_device", flattenAwsGroupEBSBlockDevices(g.Compute.LaunchSpecification.BlockDevices)); err != nil { - return fmt.Errorf("Error setting EBS block devices configuration: %#v", err) - } - if err := d.Set("ephemeral_block_device", flattenAwsGroupEphemeralBlockDevices(g.Compute.LaunchSpecification.BlockDevices)); err != nil { - return fmt.Errorf("Error setting Ephemeral block devices configuration: %#v", err) - } - } - - // Set Rancher integration. - if g.Integration.Rancher != nil { - if err := d.Set("rancher_integration", flattenAwsGroupRancherIntegration(g.Integration.Rancher)); err != nil { - return fmt.Errorf("Error setting Rancher configuration: %#v", err) - } - } - - // Set Elastic Beanstalk integration. - if g.Integration.ElasticBeanstalk != nil { - if err := d.Set("elastic_beanstalk_integration", flattenAwsGroupElasticBeanstalkIntegration(g.Integration.ElasticBeanstalk)); err != nil { - return fmt.Errorf("Error setting Elastic Beanstalk configuration: %#v", err) - } - } - - // Set EC2 Container Service integration. - if g.Integration.EC2ContainerService != nil { - if err := d.Set("ec2_container_service_integration", flattenAwsGroupEC2ContainerServiceIntegration(g.Integration.EC2ContainerService)); err != nil { - return fmt.Errorf("Error setting EC2 Container Service configuration: %#v", err) - } - } - - // Set Kubernetes integration. - if g.Integration.Kubernetes != nil { - if err := d.Set("kubernetes_integration", flattenAwsGroupKubernetesIntegration(g.Integration.Kubernetes)); err != nil { - return fmt.Errorf("Error setting Kubernetes configuration: %#v", err) - } - } - - // Set Mesosphere integration. - if g.Integration.Mesosphere != nil { - if err := d.Set("mesosphere_integration", flattenAwsGroupMesosphereIntegration(g.Integration.Mesosphere)); err != nil { - return fmt.Errorf("Error setting Mesosphere configuration: %#v", err) - } - } - } else { - d.SetId("") - } - return nil -} - -func resourceSpotinstAwsGroupUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*spotinst.Client) - group := &spotinst.AwsGroup{ID: spotinst.String(d.Id())} - update := false - - if d.HasChange("name") { - group.Name = spotinst.String(d.Get("name").(string)) - update = true - } - - if d.HasChange("description") { - group.Description = spotinst.String(d.Get("description").(string)) - update = true - } - - if d.HasChange("capacity") { - if v, ok := d.GetOk("capacity"); ok { - if capacity, err := expandAwsGroupCapacity(v); err != nil { - return err - } else { - group.Capacity = capacity - update = true - } - } - } - - if d.HasChange("strategy") { - if v, ok := d.GetOk("strategy"); ok { - if strategy, err := expandAwsGroupStrategy(v); err != nil { - return err - } else { - group.Strategy = strategy - if v, ok := d.GetOk("signal"); ok { - if signals, err := expandAwsGroupSignals(v); err != nil { - return err - } else { - group.Strategy.Signals = signals - } - } - update = true - } - } - } - - if d.HasChange("launch_specification") { - if v, ok := d.GetOk("launch_specification"); ok { - lc, err := expandAwsGroupLaunchSpecification(v) - if err != nil { - return err - } - if group.Compute == nil { - group.Compute = &spotinst.AwsGroupCompute{} - } - group.Compute.LaunchSpecification = lc - update = true - } - } - - if d.HasChange("image_id") { - if d.Get("image_id") != nil && d.Get("image_id") != "" { - if group.Compute == nil { - group.Compute = &spotinst.AwsGroupCompute{} - } - if group.Compute.LaunchSpecification == nil { - group.Compute.LaunchSpecification = &spotinst.AwsGroupComputeLaunchSpecification{} - } - group.Compute.LaunchSpecification.ImageID = spotinst.String(d.Get("image_id").(string)) - update = true - } - } - - if d.HasChange("load_balancer") { - if v, ok := d.GetOk("load_balancer"); ok { - if lbs, err := expandAwsGroupLoadBalancer(v); err != nil { - return err - } else { - if group.Compute == nil { - group.Compute = &spotinst.AwsGroupCompute{} - } - if group.Compute.LaunchSpecification == nil { - group.Compute.LaunchSpecification = &spotinst.AwsGroupComputeLaunchSpecification{} - } - if group.Compute.LaunchSpecification.LoadBalancersConfig == nil { - group.Compute.LaunchSpecification.LoadBalancersConfig = &spotinst.AwsGroupComputeLoadBalancersConfig{} - group.Compute.LaunchSpecification.LoadBalancersConfig.LoadBalancers = lbs - update = true - } - } - } - } - - var blockDevicesExpanded bool - - if d.HasChange("ebs_block_device") { - if v, ok := d.GetOk("ebs_block_device"); ok { - if devices, err := expandAwsGroupEBSBlockDevices(v); err != nil { - return err - } else { - if group.Compute == nil { - group.Compute = &spotinst.AwsGroupCompute{} - } - if group.Compute.LaunchSpecification == nil { - group.Compute.LaunchSpecification = &spotinst.AwsGroupComputeLaunchSpecification{} - } - if len(group.Compute.LaunchSpecification.BlockDevices) > 0 { - group.Compute.LaunchSpecification.BlockDevices = append(group.Compute.LaunchSpecification.BlockDevices, devices...) - } else { - if v, ok := d.GetOk("ephemeral_block_device"); ok { - if ephemeral, err := expandAwsGroupEphemeralBlockDevices(v); err != nil { - return err - } else { - devices = append(devices, ephemeral...) - blockDevicesExpanded = true - } - } - group.Compute.LaunchSpecification.BlockDevices = devices - } - update = true - } - } - } - - if d.HasChange("ephemeral_block_device") && !blockDevicesExpanded { - if v, ok := d.GetOk("ephemeral_block_device"); ok { - if devices, err := expandAwsGroupEphemeralBlockDevices(v); err != nil { - return err - } else { - if group.Compute == nil { - group.Compute = &spotinst.AwsGroupCompute{} - } - if group.Compute.LaunchSpecification == nil { - group.Compute.LaunchSpecification = &spotinst.AwsGroupComputeLaunchSpecification{} - } - if len(group.Compute.LaunchSpecification.BlockDevices) > 0 { - group.Compute.LaunchSpecification.BlockDevices = append(group.Compute.LaunchSpecification.BlockDevices, devices...) - } else { - if v, ok := d.GetOk("ebs_block_device"); ok { - if ebs, err := expandAwsGroupEBSBlockDevices(v); err != nil { - return err - } else { - devices = append(devices, ebs...) - } - } - group.Compute.LaunchSpecification.BlockDevices = devices - } - update = true - } - } - } - - if d.HasChange("network_interface") { - if v, ok := d.GetOk("network_interface"); ok { - if interfaces, err := expandAwsGroupNetworkInterfaces(v); err != nil { - return err - } else { - if group.Compute == nil { - group.Compute = &spotinst.AwsGroupCompute{} - } - if group.Compute.LaunchSpecification == nil { - group.Compute.LaunchSpecification = &spotinst.AwsGroupComputeLaunchSpecification{} - } - group.Compute.LaunchSpecification.NetworkInterfaces = interfaces - update = true - } - } - } - - if d.HasChange("availability_zone") { - if v, ok := d.GetOk("availability_zone"); ok { - if zones, err := expandAwsGroupAvailabilityZones(v); err != nil { - return err - } else { - if group.Compute == nil { - group.Compute = &spotinst.AwsGroupCompute{} - } - group.Compute.AvailabilityZones = zones - update = true - } - } - } - - if d.HasChange("availability_zones") { - if v, ok := d.GetOk("availability_zones"); ok { - if zones, err := expandAwsGroupAvailabilityZonesSlice(v); err != nil { - return err - } else { - if group.Compute == nil { - group.Compute = &spotinst.AwsGroupCompute{} - } - group.Compute.AvailabilityZones = zones - update = true - } - } - } - - if d.HasChange("hot_ebs_volume") { - if v, ok := d.GetOk("hot_ebs_volume"); ok { - if ebsVolumePool, err := expandAwsGroupEBSVolumePool(v); err != nil { - return err - } else { - if group.Compute == nil { - group.Compute = &spotinst.AwsGroupCompute{} - } - group.Compute.EBSVolumePool = ebsVolumePool - update = true - } - } - } - - if d.HasChange("signal") { - if v, ok := d.GetOk("signal"); ok { - if signals, err := expandAwsGroupSignals(v); err != nil { - return err - } else { - if group.Strategy == nil { - group.Strategy = &spotinst.AwsGroupStrategy{} - } - group.Strategy.Signals = signals - update = true - } - } - } - - if d.HasChange("instance_types") { - if v, ok := d.GetOk("instance_types"); ok { - if types, err := expandAwsGroupInstanceTypes(v); err != nil { - return err - } else { - if group.Compute == nil { - group.Compute = &spotinst.AwsGroupCompute{} - } - group.Compute.InstanceTypes = types - update = true - } - } - } - - if d.HasChange("tags") { - if v, ok := d.GetOk("tags"); ok { - if tags, err := expandAwsGroupTags(v); err != nil { - return err - } else { - if group.Compute == nil { - group.Compute = &spotinst.AwsGroupCompute{} - } - if group.Compute.LaunchSpecification == nil { - group.Compute.LaunchSpecification = &spotinst.AwsGroupComputeLaunchSpecification{} - } - group.Compute.LaunchSpecification.Tags = tags - update = true - } - } - } - - if d.HasChange("elastic_ips") { - if v, ok := d.GetOk("elastic_ips"); ok { - if eips, err := expandAwsGroupElasticIPs(v); err != nil { - return err - } else { - if group.Compute == nil { - group.Compute = &spotinst.AwsGroupCompute{} - } - group.Compute.ElasticIPs = eips - update = true - } - } - } - - if d.HasChange("scheduled_task") { - if v, ok := d.GetOk("scheduled_task"); ok { - if tasks, err := expandAwsGroupScheduledTasks(v); err != nil { - return err - } else { - if group.Scheduling == nil { - group.Scheduling = &spotinst.AwsGroupScheduling{} - } - group.Scheduling.Tasks = tasks - update = true - } - } - } - - if d.HasChange("scaling_up_policy") { - if v, ok := d.GetOk("scaling_up_policy"); ok { - if policies, err := expandAwsGroupScalingPolicies(v); err != nil { - return err - } else { - if group.Scaling == nil { - group.Scaling = &spotinst.AwsGroupScaling{} - } - group.Scaling.Up = policies - update = true - } - } - } - - if d.HasChange("scaling_down_policy") { - if v, ok := d.GetOk("scaling_down_policy"); ok { - if policies, err := expandAwsGroupScalingPolicies(v); err != nil { - return err - } else { - if group.Scaling == nil { - group.Scaling = &spotinst.AwsGroupScaling{} - } - group.Scaling.Down = policies - update = true - } - } - } - - if d.HasChange("rancher_integration") { - if v, ok := d.GetOk("rancher_integration"); ok { - if integration, err := expandAwsGroupRancherIntegration(v); err != nil { - return err - } else { - if group.Integration == nil { - group.Integration = &spotinst.AwsGroupIntegration{} - } - group.Integration.Rancher = integration - update = true - } - } - } - - if d.HasChange("elastic_eanstalk_integration") { - if v, ok := d.GetOk("elastic_beanstalk_integration"); ok { - if integration, err := expandAwsGroupElasticBeanstalkIntegration(v); err != nil { - return err - } else { - if group.Integration == nil { - group.Integration = &spotinst.AwsGroupIntegration{} - } - group.Integration.ElasticBeanstalk = integration - update = true - } - } - } - - if d.HasChange("ec2_container_service_integration") { - if v, ok := d.GetOk("ec2_container_service_integration"); ok { - if integration, err := expandAwsGroupEC2ContainerServiceIntegration(v); err != nil { - return err - } else { - if group.Integration == nil { - group.Integration = &spotinst.AwsGroupIntegration{} - } - group.Integration.EC2ContainerService = integration - update = true - } - } - } - - if d.HasChange("kubernetes_integration") { - if v, ok := d.GetOk("kubernetes_integration"); ok { - if integration, err := expandAwsGroupKubernetesIntegration(v); err != nil { - return err - } else { - if group.Integration == nil { - group.Integration = &spotinst.AwsGroupIntegration{} - } - group.Integration.Kubernetes = integration - update = true - } - } - } - - if d.HasChange("mesosphere_integration") { - if v, ok := d.GetOk("mesosphere_integration"); ok { - if integration, err := expandAwsGroupMesosphereIntegration(v); err != nil { - return err - } else { - if group.Integration == nil { - group.Integration = &spotinst.AwsGroupIntegration{} - } - group.Integration.Mesosphere = integration - update = true - } - } - } - - if update { - log.Printf("[DEBUG] AwsGroup update configuration: %s\n", stringutil.Stringify(group)) - input := &spotinst.UpdateAwsGroupInput{Group: group} - if _, err := client.AwsGroupService.Update(input); err != nil { - return fmt.Errorf("Error updating group %s: %s", d.Id(), err) - } - } - - return resourceSpotinstAwsGroupRead(d, meta) -} - -func resourceSpotinstAwsGroupDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*spotinst.Client) - log.Printf("[INFO] Deleting group: %s\n", d.Id()) - input := &spotinst.DeleteAwsGroupInput{ID: spotinst.String(d.Id())} - if _, err := client.AwsGroupService.Delete(input); err != nil { - return fmt.Errorf("Error deleting group: %s", err) - } - d.SetId("") - return nil -} - -func flattenAwsGroupCapacity(capacity *spotinst.AwsGroupCapacity) []interface{} { - result := make(map[string]interface{}) - result["target"] = spotinst.IntValue(capacity.Target) - result["minimum"] = spotinst.IntValue(capacity.Minimum) - result["maximum"] = spotinst.IntValue(capacity.Maximum) - result["unit"] = spotinst.StringValue(capacity.Unit) - return []interface{}{result} -} - -func flattenAwsGroupStrategy(strategy *spotinst.AwsGroupStrategy) []interface{} { - result := make(map[string]interface{}) - result["risk"] = spotinst.Float64Value(strategy.Risk) - result["ondemand_count"] = spotinst.IntValue(strategy.OnDemandCount) - result["availability_vs_cost"] = spotinst.StringValue(strategy.AvailabilityVsCost) - result["draining_timeout"] = spotinst.IntValue(strategy.DrainingTimeout) - result["utilize_reserved_instances"] = spotinst.BoolValue(strategy.UtilizeReservedInstances) - result["fallback_to_ondemand"] = spotinst.BoolValue(strategy.FallbackToOnDemand) - return []interface{}{result} -} - -func flattenAwsGroupLaunchSpecification(lspec *spotinst.AwsGroupComputeLaunchSpecification, includeImageID bool) []interface{} { - result := make(map[string]interface{}) - result["health_check_grace_period"] = spotinst.IntValue(lspec.HealthCheckGracePeriod) - result["health_check_type"] = spotinst.StringValue(lspec.HealthCheckType) - if includeImageID { - result["image_id"] = spotinst.StringValue(lspec.ImageID) - } - result["key_pair"] = spotinst.StringValue(lspec.KeyPair) - if lspec.UserData != nil && spotinst.StringValue(lspec.UserData) != "" { - decodedUserData, _ := base64.StdEncoding.DecodeString(spotinst.StringValue(lspec.UserData)) - result["user_data"] = string(decodedUserData) - } else { - result["user_data"] = "" - } - result["monitoring"] = spotinst.BoolValue(lspec.Monitoring) - result["ebs_optimized"] = spotinst.BoolValue(lspec.EBSOptimized) - result["load_balancer_names"] = lspec.LoadBalancerNames - result["security_group_ids"] = lspec.SecurityGroupIDs - if lspec.IamInstanceProfile != nil { - if lspec.IamInstanceProfile.Arn != nil { - result["iam_instance_profile"] = spotinst.StringValue(lspec.IamInstanceProfile.Arn) - } else { - result["iam_instance_profile"] = spotinst.StringValue(lspec.IamInstanceProfile.Name) - } - } - return []interface{}{result} -} - -func flattenAwsGroupLoadBalancers(balancers []*spotinst.AwsGroupComputeLoadBalancer) []interface{} { - result := make([]interface{}, 0, len(balancers)) - for _, b := range balancers { - m := make(map[string]interface{}) - m["name"] = spotinst.StringValue(b.Name) - m["arn"] = spotinst.StringValue(b.Arn) - m["type"] = strings.ToLower(spotinst.StringValue(b.Type)) - result = append(result, m) - } - return result -} - -func flattenAwsGroupEBSVolumePool(volumes []*spotinst.AwsGroupComputeEBSVolume) []interface{} { - result := make([]interface{}, 0, len(volumes)) - for _, v := range volumes { - m := make(map[string]interface{}) - m["device_name"] = spotinst.StringValue(v.DeviceName) - m["volume_ids"] = v.VolumeIDs - result = append(result, m) - } - return result -} - -func flattenAwsGroupSignals(signals []*spotinst.AwsGroupStrategySignal) []interface{} { - result := make([]interface{}, 0, len(signals)) - for _, s := range signals { - m := make(map[string]interface{}) - m["name"] = strings.ToLower(spotinst.StringValue(s.Name)) - result = append(result, m) - } - return result -} - -func flattenAwsGroupScheduledTasks(tasks []*spotinst.AwsGroupScheduledTask) []interface{} { - result := make([]interface{}, 0, len(tasks)) - for _, t := range tasks { - m := make(map[string]interface{}) - m["task_type"] = spotinst.StringValue(t.TaskType) - m["cron_expression"] = spotinst.StringValue(t.CronExpression) - m["frequency"] = spotinst.StringValue(t.Frequency) - m["scale_target_capacity"] = spotinst.IntValue(t.ScaleTargetCapacity) - m["scale_min_capacity"] = spotinst.IntValue(t.ScaleMinCapacity) - m["scale_max_capacity"] = spotinst.IntValue(t.ScaleMaxCapacity) - result = append(result, m) - } - return result -} - -func flattenAwsGroupScalingPolicies(policies []*spotinst.AwsGroupScalingPolicy) []interface{} { - result := make([]interface{}, 0, len(policies)) - for _, p := range policies { - m := make(map[string]interface{}) - m["adjustment"] = spotinst.IntValue(p.Adjustment) - m["cooldown"] = spotinst.IntValue(p.Cooldown) - m["evaluation_periods"] = spotinst.IntValue(p.EvaluationPeriods) - m["min_target_capacity"] = spotinst.IntValue(p.MinTargetCapacity) - m["max_target_capacity"] = spotinst.IntValue(p.MaxTargetCapacity) - m["metric_name"] = spotinst.StringValue(p.MetricName) - m["namespace"] = spotinst.StringValue(p.Namespace) - m["operator"] = spotinst.StringValue(p.Operator) - m["period"] = spotinst.IntValue(p.Period) - m["policy_name"] = spotinst.StringValue(p.PolicyName) - m["statistic"] = spotinst.StringValue(p.Statistic) - m["threshold"] = spotinst.Float64Value(p.Threshold) - m["unit"] = spotinst.StringValue(p.Unit) - if len(p.Dimensions) > 0 { - flatDims := make(map[string]interface{}) - for _, d := range p.Dimensions { - flatDims[spotinst.StringValue(d.Name)] = *d.Value - } - m["dimensions"] = flatDims - } - result = append(result, m) - } - return result -} - -func flattenAwsGroupNetworkInterfaces(ifaces []*spotinst.AwsGroupComputeNetworkInterface) []interface{} { - result := make([]interface{}, 0, len(ifaces)) - for _, iface := range ifaces { - m := make(map[string]interface{}) - m["associate_public_ip_address"] = spotinst.BoolValue(iface.AssociatePublicIPAddress) - m["delete_on_termination"] = spotinst.BoolValue(iface.DeleteOnTermination) - m["description"] = spotinst.StringValue(iface.Description) - m["device_index"] = spotinst.IntValue(iface.DeviceIndex) - m["network_interface_id"] = spotinst.StringValue(iface.ID) - m["private_ip_address"] = spotinst.StringValue(iface.PrivateIPAddress) - m["secondary_private_ip_address_count"] = spotinst.IntValue(iface.SecondaryPrivateIPAddressCount) - m["subnet_id"] = spotinst.StringValue(iface.SubnetID) - m["security_group_ids"] = iface.SecurityGroupsIDs - result = append(result, m) - } - return result -} - -func flattenAwsGroupEBSBlockDevices(devices []*spotinst.AwsGroupComputeBlockDevice) []interface{} { - result := make([]interface{}, 0, len(devices)) - for _, dev := range devices { - if dev.EBS != nil { - m := make(map[string]interface{}) - m["device_name"] = spotinst.StringValue(dev.DeviceName) - m["delete_on_termination"] = spotinst.BoolValue(dev.EBS.DeleteOnTermination) - m["encrypted"] = spotinst.BoolValue(dev.EBS.Encrypted) - m["iops"] = spotinst.IntValue(dev.EBS.IOPS) - m["snapshot_id"] = spotinst.StringValue(dev.EBS.SnapshotID) - m["volume_type"] = spotinst.StringValue(dev.EBS.VolumeType) - m["volume_size"] = spotinst.IntValue(dev.EBS.VolumeSize) - result = append(result, m) - } - } - return result -} - -func flattenAwsGroupEphemeralBlockDevices(devices []*spotinst.AwsGroupComputeBlockDevice) []interface{} { - result := make([]interface{}, 0, len(devices)) - for _, dev := range devices { - if dev.EBS == nil { - m := make(map[string]interface{}) - m["device_name"] = spotinst.StringValue(dev.DeviceName) - m["virtual_name"] = spotinst.StringValue(dev.VirtualName) - result = append(result, m) - } - } - return result -} - -func flattenAwsGroupRancherIntegration(integration *spotinst.AwsGroupRancherIntegration) []interface{} { - result := make(map[string]interface{}) - result["master_host"] = spotinst.StringValue(integration.MasterHost) - result["access_key"] = spotinst.StringValue(integration.AccessKey) - result["secret_key"] = spotinst.StringValue(integration.SecretKey) - return []interface{}{result} -} - -func flattenAwsGroupElasticBeanstalkIntegration(integration *spotinst.AwsGroupElasticBeanstalkIntegration) []interface{} { - result := make(map[string]interface{}) - result["environment_id"] = spotinst.StringValue(integration.EnvironmentID) - return []interface{}{result} -} - -func flattenAwsGroupEC2ContainerServiceIntegration(integration *spotinst.AwsGroupEC2ContainerServiceIntegration) []interface{} { - result := make(map[string]interface{}) - result["cluster_name"] = spotinst.StringValue(integration.ClusterName) - return []interface{}{result} -} - -func flattenAwsGroupKubernetesIntegration(integration *spotinst.AwsGroupKubernetesIntegration) []interface{} { - result := make(map[string]interface{}) - result["api_server"] = spotinst.StringValue(integration.Server) - result["token"] = spotinst.StringValue(integration.Token) - return []interface{}{result} -} - -func flattenAwsGroupMesosphereIntegration(integration *spotinst.AwsGroupMesosphereIntegration) []interface{} { - result := make(map[string]interface{}) - result["api_server"] = spotinst.StringValue(integration.Server) - return []interface{}{result} -} - -// buildAwsGroupOpts builds the Spotinst AWS Group options. -func buildAwsGroupOpts(d *schema.ResourceData, meta interface{}) (*spotinst.AwsGroup, error) { - group := &spotinst.AwsGroup{ - Name: spotinst.String(d.Get("name").(string)), - Description: spotinst.String(d.Get("description").(string)), - Scaling: &spotinst.AwsGroupScaling{}, - Scheduling: &spotinst.AwsGroupScheduling{}, - Integration: &spotinst.AwsGroupIntegration{}, - Compute: &spotinst.AwsGroupCompute{ - Product: spotinst.String(d.Get("product").(string)), - LaunchSpecification: &spotinst.AwsGroupComputeLaunchSpecification{}, - }, - } - - if v, ok := d.GetOk("capacity"); ok { - if capacity, err := expandAwsGroupCapacity(v); err != nil { - return nil, err - } else { - group.Capacity = capacity - } - } - - if v, ok := d.GetOk("strategy"); ok { - if strategy, err := expandAwsGroupStrategy(v); err != nil { - return nil, err - } else { - group.Strategy = strategy - } - } - - if v, ok := d.GetOk("scaling_up_policy"); ok { - if policies, err := expandAwsGroupScalingPolicies(v); err != nil { - return nil, err - } else { - group.Scaling.Up = policies - } - } - - if v, ok := d.GetOk("scaling_down_policy"); ok { - if policies, err := expandAwsGroupScalingPolicies(v); err != nil { - return nil, err - } else { - group.Scaling.Down = policies - } - } - - if v, ok := d.GetOk("scheduled_task"); ok { - if tasks, err := expandAwsGroupScheduledTasks(v); err != nil { - return nil, err - } else { - group.Scheduling.Tasks = tasks - } - } - - if v, ok := d.GetOk("instance_types"); ok { - if types, err := expandAwsGroupInstanceTypes(v); err != nil { - return nil, err - } else { - group.Compute.InstanceTypes = types - } - } - - if v, ok := d.GetOk("elastic_ips"); ok { - if eips, err := expandAwsGroupElasticIPs(v); err != nil { - return nil, err - } else { - group.Compute.ElasticIPs = eips - } - } - - if v, ok := d.GetOk("availability_zone"); ok { - if zones, err := expandAwsGroupAvailabilityZones(v); err != nil { - return nil, err - } else { - group.Compute.AvailabilityZones = zones - } - } - - if v, ok := d.GetOk("availability_zones"); ok { - if zones, err := expandAwsGroupAvailabilityZonesSlice(v); err != nil { - return nil, err - } else { - group.Compute.AvailabilityZones = zones - } - } - - if v, ok := d.GetOk("hot_ebs_volume"); ok { - if ebsVolumePool, err := expandAwsGroupEBSVolumePool(v); err != nil { - return nil, err - } else { - group.Compute.EBSVolumePool = ebsVolumePool - } - } - - if v, ok := d.GetOk("signal"); ok { - if signals, err := expandAwsGroupSignals(v); err != nil { - return nil, err - } else { - group.Strategy.Signals = signals - } - } - - if v, ok := d.GetOk("launch_specification"); ok { - if lc, err := expandAwsGroupLaunchSpecification(v); err != nil { - return nil, err - } else { - group.Compute.LaunchSpecification = lc - } - } - - if v, ok := d.GetOk("image_id"); ok { - group.Compute.LaunchSpecification.ImageID = spotinst.String(v.(string)) - } - - if v, ok := d.GetOk("load_balancer"); ok { - if lbs, err := expandAwsGroupLoadBalancer(v); err != nil { - return nil, err - } else { - if group.Compute.LaunchSpecification.LoadBalancersConfig == nil { - group.Compute.LaunchSpecification.LoadBalancersConfig = &spotinst.AwsGroupComputeLoadBalancersConfig{} - } - group.Compute.LaunchSpecification.LoadBalancersConfig.LoadBalancers = lbs - } - } - - if v, ok := d.GetOk("tags"); ok { - if tags, err := expandAwsGroupTags(v); err != nil { - return nil, err - } else { - group.Compute.LaunchSpecification.Tags = tags - } - } - - if v, ok := d.GetOk("network_interface"); ok { - if interfaces, err := expandAwsGroupNetworkInterfaces(v); err != nil { - return nil, err - } else { - group.Compute.LaunchSpecification.NetworkInterfaces = interfaces - } - } - - if v, ok := d.GetOk("ebs_block_device"); ok { - if devices, err := expandAwsGroupEBSBlockDevices(v); err != nil { - return nil, err - } else { - group.Compute.LaunchSpecification.BlockDevices = devices - } - } - - if v, ok := d.GetOk("ephemeral_block_device"); ok { - if devices, err := expandAwsGroupEphemeralBlockDevices(v); err != nil { - return nil, err - } else { - if len(group.Compute.LaunchSpecification.BlockDevices) > 0 { - group.Compute.LaunchSpecification.BlockDevices = append(group.Compute.LaunchSpecification.BlockDevices, devices...) - } else { - group.Compute.LaunchSpecification.BlockDevices = devices - } - } - } - - if v, ok := d.GetOk("rancher_integration"); ok { - if integration, err := expandAwsGroupRancherIntegration(v); err != nil { - return nil, err - } else { - group.Integration.Rancher = integration - } - } - - if v, ok := d.GetOk("elastic_beanstalk_integration"); ok { - if integration, err := expandAwsGroupElasticBeanstalkIntegration(v); err != nil { - return nil, err - } else { - group.Integration.ElasticBeanstalk = integration - } - } - - if v, ok := d.GetOk("ec2_container_service_integration"); ok { - if integration, err := expandAwsGroupEC2ContainerServiceIntegration(v); err != nil { - return nil, err - } else { - group.Integration.EC2ContainerService = integration - } - } - - if v, ok := d.GetOk("kubernetes_integration"); ok { - if integration, err := expandAwsGroupKubernetesIntegration(v); err != nil { - return nil, err - } else { - group.Integration.Kubernetes = integration - } - } - - return group, nil -} - -// expandAwsGroupCapacity expands the Capacity block. -func expandAwsGroupCapacity(data interface{}) (*spotinst.AwsGroupCapacity, error) { - list := data.(*schema.Set).List() - m := list[0].(map[string]interface{}) - capacity := &spotinst.AwsGroupCapacity{} - - if v, ok := m["minimum"].(int); ok && v >= 0 { - capacity.Minimum = spotinst.Int(v) - } - - if v, ok := m["maximum"].(int); ok && v >= 0 { - capacity.Maximum = spotinst.Int(v) - } - - if v, ok := m["target"].(int); ok && v >= 0 { - capacity.Target = spotinst.Int(v) - } - - if v, ok := m["unit"].(string); ok && v != "" { - capacity.Unit = spotinst.String(v) - } - - log.Printf("[DEBUG] AwsGroup capacity configuration: %s\n", stringutil.Stringify(capacity)) - return capacity, nil -} - -// expandAwsGroupStrategy expands the Strategy block. -func expandAwsGroupStrategy(data interface{}) (*spotinst.AwsGroupStrategy, error) { - list := data.(*schema.Set).List() - m := list[0].(map[string]interface{}) - strategy := &spotinst.AwsGroupStrategy{} - - if v, ok := m["risk"].(float64); ok && v >= 0 { - strategy.Risk = spotinst.Float64(v) - } - - if v, ok := m["ondemand_count"].(int); ok && v >= 0 && spotinst.Float64Value(strategy.Risk) == 0 { - strategy.OnDemandCount = spotinst.Int(v) - strategy.Risk = nil - } - - if v, ok := m["availability_vs_cost"].(string); ok && v != "" { - strategy.AvailabilityVsCost = spotinst.String(v) - } - - if v, ok := m["draining_timeout"].(int); ok && v > 0 { - strategy.DrainingTimeout = spotinst.Int(v) - } - - if v, ok := m["utilize_reserved_instances"].(bool); ok { - strategy.UtilizeReservedInstances = spotinst.Bool(v) - } - - if v, ok := m["fallback_to_ondemand"].(bool); ok { - strategy.FallbackToOnDemand = spotinst.Bool(v) - } - - log.Printf("[DEBUG] AwsGroup strategy configuration: %s\n", stringutil.Stringify(strategy)) - return strategy, nil -} - -// expandAwsGroupScalingPolicies expands the Scaling Policy block. -func expandAwsGroupScalingPolicies(data interface{}) ([]*spotinst.AwsGroupScalingPolicy, error) { - list := data.(*schema.Set).List() - policies := make([]*spotinst.AwsGroupScalingPolicy, 0, len(list)) - for _, item := range list { - m := item.(map[string]interface{}) - policy := &spotinst.AwsGroupScalingPolicy{} - - if v, ok := m["policy_name"].(string); ok && v != "" { - policy.PolicyName = spotinst.String(v) - } - - if v, ok := m["metric_name"].(string); ok && v != "" { - policy.MetricName = spotinst.String(v) - } - - if v, ok := m["statistic"].(string); ok && v != "" { - policy.Statistic = spotinst.String(v) - } - - if v, ok := m["unit"].(string); ok && v != "" { - policy.Unit = spotinst.String(v) - } - - if v, ok := m["threshold"].(float64); ok && v > 0 { - policy.Threshold = spotinst.Float64(v) - } - - if v, ok := m["adjustment"].(int); ok && v > 0 { - policy.Adjustment = spotinst.Int(v) - } - - if v, ok := m["min_target_capacity"].(int); ok && v > 0 { - policy.MinTargetCapacity = spotinst.Int(v) - } - - if v, ok := m["max_target_capacity"].(int); ok && v > 0 { - policy.MaxTargetCapacity = spotinst.Int(v) - } - - if v, ok := m["namespace"].(string); ok && v != "" { - policy.Namespace = spotinst.String(v) - } - - if v, ok := m["operator"].(string); ok && v != "" { - policy.Operator = spotinst.String(v) - } - - if v, ok := m["period"].(int); ok && v > 0 { - policy.Period = spotinst.Int(v) - } - - if v, ok := m["evaluation_periods"].(int); ok && v > 0 { - policy.EvaluationPeriods = spotinst.Int(v) - } - - if v, ok := m["cooldown"].(int); ok && v > 0 { - policy.Cooldown = spotinst.Int(v) - } - - if v, ok := m["dimensions"]; ok { - dimensions := expandAwsGroupScalingPolicyDimensions(v.(map[string]interface{})) - policy.Dimensions = dimensions - } - - if v, ok := m["namespace"].(string); ok && v != "" { - log.Printf("[DEBUG] AwsGroup scaling policy configuration: %s\n", stringutil.Stringify(policy)) - policies = append(policies, policy) - } - } - - return policies, nil -} - -func expandAwsGroupScalingPolicyDimensions(list map[string]interface{}) []*spotinst.AwsGroupScalingPolicyDimension { - dimensions := make([]*spotinst.AwsGroupScalingPolicyDimension, 0, len(list)) - for name, val := range list { - dimension := &spotinst.AwsGroupScalingPolicyDimension{ - Name: spotinst.String(name), - Value: spotinst.String(val.(string)), - } - log.Printf("[DEBUG] AwsGroup scaling policy dimension: %s\n", stringutil.Stringify(dimension)) - dimensions = append(dimensions, dimension) - } - return dimensions -} - -// expandAwsGroupScheduledTasks expands the Scheduled Task block. -func expandAwsGroupScheduledTasks(data interface{}) ([]*spotinst.AwsGroupScheduledTask, error) { - list := data.(*schema.Set).List() - tasks := make([]*spotinst.AwsGroupScheduledTask, 0, len(list)) - for _, item := range list { - m := item.(map[string]interface{}) - task := &spotinst.AwsGroupScheduledTask{} - - if v, ok := m["task_type"].(string); ok && v != "" { - task.TaskType = spotinst.String(v) - } - - if v, ok := m["frequency"].(string); ok && v != "" { - task.Frequency = spotinst.String(v) - } - - if v, ok := m["cron_expression"].(string); ok && v != "" { - task.CronExpression = spotinst.String(v) - } - - if v, ok := m["scale_target_capacity"].(int); ok && v > 0 { - task.ScaleTargetCapacity = spotinst.Int(v) - } - - if v, ok := m["scale_min_capacity"].(int); ok && v > 0 { - task.ScaleMinCapacity = spotinst.Int(v) - } - - if v, ok := m["scale_max_capacity"].(int); ok && v > 0 { - task.ScaleMaxCapacity = spotinst.Int(v) - } - - log.Printf("[DEBUG] AwsGroup scheduled task configuration: %s\n", stringutil.Stringify(task)) - tasks = append(tasks, task) - } - - return tasks, nil -} - -// expandAwsGroupAvailabilityZones expands the Availability Zone block. -func expandAwsGroupAvailabilityZones(data interface{}) ([]*spotinst.AwsGroupComputeAvailabilityZone, error) { - list := data.(*schema.Set).List() - zones := make([]*spotinst.AwsGroupComputeAvailabilityZone, 0, len(list)) - for _, item := range list { - m := item.(map[string]interface{}) - zone := &spotinst.AwsGroupComputeAvailabilityZone{} - - if v, ok := m["name"].(string); ok && v != "" { - zone.Name = spotinst.String(v) - } - - if v, ok := m["subnet_id"].(string); ok && v != "" { - zone.SubnetID = spotinst.String(v) - } - - log.Printf("[DEBUG] AwsGroup availability zone configuration: %s\n", stringutil.Stringify(zone)) - zones = append(zones, zone) - } - - return zones, nil -} - -// expandAwsGroupAvailabilityZonesSlice expands the Availability Zone block when provided as a slice. -func expandAwsGroupAvailabilityZonesSlice(data interface{}) ([]*spotinst.AwsGroupComputeAvailabilityZone, error) { - list := data.([]interface{}) - zones := make([]*spotinst.AwsGroupComputeAvailabilityZone, 0, len(list)) - for _, str := range list { - if s, ok := str.(string); ok { - parts := strings.Split(s, ":") - zone := &spotinst.AwsGroupComputeAvailabilityZone{} - if len(parts) >= 1 && parts[0] != "" { - zone.Name = spotinst.String(parts[0]) - } - if len(parts) == 2 && parts[1] != "" { - zone.SubnetID = spotinst.String(parts[1]) - } - log.Printf("[DEBUG] AwsGroup availability zone configuration: %s\n", stringutil.Stringify(zone)) - zones = append(zones, zone) - } - } - - return zones, nil -} - -// expandAwsGroupEBSVolumePool expands the EBS Volume Pool block. -func expandAwsGroupEBSVolumePool(data interface{}) ([]*spotinst.AwsGroupComputeEBSVolume, error) { - list := data.(*schema.Set).List() - volumes := make([]*spotinst.AwsGroupComputeEBSVolume, 0, len(list)) - for _, item := range list { - m := item.(map[string]interface{}) - volume := &spotinst.AwsGroupComputeEBSVolume{} - - if v, ok := m["device_name"].(string); ok && v != "" { - volume.DeviceName = spotinst.String(v) - } - - if v, ok := m["volume_ids"].([]interface{}); ok { - ids := make([]string, len(v)) - for i, j := range v { - ids[i] = j.(string) - } - volume.VolumeIDs = ids - } - - log.Printf("[DEBUG] AwsGroup EBS volume (pool) configuration: %s\n", stringutil.Stringify(volume)) - volumes = append(volumes, volume) - } - - return volumes, nil -} - -// expandAwsGroupSignals expands the Signal block. -func expandAwsGroupSignals(data interface{}) ([]*spotinst.AwsGroupStrategySignal, error) { - list := data.(*schema.Set).List() - signals := make([]*spotinst.AwsGroupStrategySignal, 0, len(list)) - for _, item := range list { - m := item.(map[string]interface{}) - signal := &spotinst.AwsGroupStrategySignal{} - - if v, ok := m["name"].(string); ok && v != "" { - signal.Name = spotinst.String(strings.ToUpper(v)) - } - - log.Printf("[DEBUG] AwsGroup signal configuration: %s\n", stringutil.Stringify(signal)) - signals = append(signals, signal) - } - - return signals, nil -} - -// expandAwsGroupInstanceTypes expands the Instance Types block. -func expandAwsGroupInstanceTypes(data interface{}) (*spotinst.AwsGroupComputeInstanceType, error) { - list := data.(*schema.Set).List() - m := list[0].(map[string]interface{}) - types := &spotinst.AwsGroupComputeInstanceType{} - if v, ok := m["ondemand"].(string); ok && v != "" { - types.OnDemand = spotinst.String(v) - } - if v, ok := m["spot"].([]interface{}); ok { - it := make([]string, len(v)) - for i, j := range v { - it[i] = j.(string) - } - types.Spot = it - } - - log.Printf("[DEBUG] AwsGroup instance types configuration: %s\n", stringutil.Stringify(types)) - return types, nil -} - -// expandAwsGroupNetworkInterfaces expands the Elastic Network Interface block. -func expandAwsGroupNetworkInterfaces(data interface{}) ([]*spotinst.AwsGroupComputeNetworkInterface, error) { - list := data.(*schema.Set).List() - interfaces := make([]*spotinst.AwsGroupComputeNetworkInterface, 0, len(list)) - for _, item := range list { - m := item.(map[string]interface{}) - iface := &spotinst.AwsGroupComputeNetworkInterface{} - - if v, ok := m["network_interface_id"].(string); ok && v != "" { - iface.ID = spotinst.String(v) - } - - if v, ok := m["description"].(string); ok && v != "" { - iface.Description = spotinst.String(v) - } - - if v, ok := m["device_index"].(int); ok && v >= 0 { - iface.DeviceIndex = spotinst.Int(v) - } - - if v, ok := m["secondary_private_ip_address_count"].(int); ok && v > 0 { - iface.SecondaryPrivateIPAddressCount = spotinst.Int(v) - } - - if v, ok := m["associate_public_ip_address"].(bool); ok { - iface.AssociatePublicIPAddress = spotinst.Bool(v) - } - - if v, ok := m["delete_on_termination"].(bool); ok { - iface.DeleteOnTermination = spotinst.Bool(v) - } - - if v, ok := m["private_ip_address"].(string); ok && v != "" { - iface.PrivateIPAddress = spotinst.String(v) - } - - if v, ok := m["subnet_id"].(string); ok && v != "" { - iface.SubnetID = spotinst.String(v) - } - - if v, ok := m["security_group_ids"].([]interface{}); ok { - ids := make([]string, len(v)) - for i, j := range v { - ids[i] = j.(string) - } - iface.SecurityGroupsIDs = ids - } - - log.Printf("[DEBUG] AwsGroup network interface configuration: %s\n", stringutil.Stringify(iface)) - interfaces = append(interfaces, iface) - } - - return interfaces, nil -} - -// expandAwsGroupEphemeralBlockDevice expands the Ephemeral Block Device block. -func expandAwsGroupEphemeralBlockDevices(data interface{}) ([]*spotinst.AwsGroupComputeBlockDevice, error) { - list := data.(*schema.Set).List() - devices := make([]*spotinst.AwsGroupComputeBlockDevice, 0, len(list)) - for _, item := range list { - m := item.(map[string]interface{}) - device := &spotinst.AwsGroupComputeBlockDevice{} - - if v, ok := m["device_name"].(string); ok && v != "" { - device.DeviceName = spotinst.String(v) - } - - if v, ok := m["virtual_name"].(string); ok && v != "" { - device.VirtualName = spotinst.String(v) - } - - log.Printf("[DEBUG] AwsGroup ephemeral block device configuration: %s\n", stringutil.Stringify(device)) - devices = append(devices, device) - } - - return devices, nil -} - -// expandAwsGroupEBSBlockDevices expands the EBS Block Device block. -func expandAwsGroupEBSBlockDevices(data interface{}) ([]*spotinst.AwsGroupComputeBlockDevice, error) { - list := data.(*schema.Set).List() - devices := make([]*spotinst.AwsGroupComputeBlockDevice, 0, len(list)) - for _, item := range list { - m := item.(map[string]interface{}) - device := &spotinst.AwsGroupComputeBlockDevice{EBS: &spotinst.AwsGroupComputeEBS{}} - - if v, ok := m["device_name"].(string); ok && v != "" { - device.DeviceName = spotinst.String(v) - } - - if v, ok := m["delete_on_termination"].(bool); ok { - device.EBS.DeleteOnTermination = spotinst.Bool(v) - } - - if v, ok := m["encrypted"].(bool); ok { - device.EBS.Encrypted = spotinst.Bool(v) - } - - if v, ok := m["snapshot_id"].(string); ok && v != "" { - device.EBS.SnapshotID = spotinst.String(v) - } - - if v, ok := m["volume_type"].(string); ok && v != "" { - device.EBS.VolumeType = spotinst.String(v) - } - - if v, ok := m["volume_size"].(int); ok && v > 0 { - device.EBS.VolumeSize = spotinst.Int(v) - } - - if v, ok := m["iops"].(int); ok && v > 0 { - device.EBS.IOPS = spotinst.Int(v) - } - - log.Printf("[DEBUG] AwsGroup elastic block device configuration: %s\n", stringutil.Stringify(device)) - devices = append(devices, device) - } - - return devices, nil -} - -// iprofArnRE is a regular expression for matching IAM instance profile ARNs. -var iprofArnRE = regexp.MustCompile(`arn:aws:iam::\d{12}:instance-profile/?[a-zA-Z_0-9+=,.@\-_/]+`) - -// expandAwsGroupLaunchSpecification expands the launch Specification block. -func expandAwsGroupLaunchSpecification(data interface{}) (*spotinst.AwsGroupComputeLaunchSpecification, error) { - list := data.(*schema.Set).List() - m := list[0].(map[string]interface{}) - lc := &spotinst.AwsGroupComputeLaunchSpecification{} - - if v, ok := m["monitoring"].(bool); ok { - lc.Monitoring = spotinst.Bool(v) - } - - if v, ok := m["ebs_optimized"].(bool); ok { - lc.EBSOptimized = spotinst.Bool(v) - } - - if v, ok := m["image_id"].(string); ok && v != "" { - lc.ImageID = spotinst.String(v) - } - - if v, ok := m["key_pair"].(string); ok && v != "" { - lc.KeyPair = spotinst.String(v) - } - - if v, ok := m["health_check_type"].(string); ok && v != "" { - lc.HealthCheckType = spotinst.String(v) - } - - if v, ok := m["health_check_grace_period"].(int); ok && v > 0 { - lc.HealthCheckGracePeriod = spotinst.Int(v) - } - - if v, ok := m["iam_instance_profile"].(string); ok && v != "" { - iprof := &spotinst.AwsGroupComputeIamInstanceProfile{} - if iprofArnRE.MatchString(v) { - iprof.Arn = spotinst.String(v) - } else { - iprof.Name = spotinst.String(v) - } - lc.IamInstanceProfile = iprof - } - - if v, ok := m["user_data"].(string); ok && v != "" { - lc.UserData = spotinst.String(base64.StdEncoding.EncodeToString([]byte(v))) - } - - if v, ok := m["security_group_ids"].([]interface{}); ok { - ids := make([]string, len(v)) - for i, j := range v { - ids[i] = j.(string) - } - lc.SecurityGroupIDs = ids - } - - if v, ok := m["load_balancer_names"].([]interface{}); ok { - var names []string - for _, j := range v { - if name, ok := j.(string); ok && name != "" { - names = append(names, name) - } - } - lc.LoadBalancerNames = names - } - - log.Printf("[DEBUG] AwsGroup launch specification configuration: %s\n", stringutil.Stringify(lc)) - return lc, nil -} - -// expandAwsGroupLoadBalancer expands the Load Balancer block. -func expandAwsGroupLoadBalancer(data interface{}) ([]*spotinst.AwsGroupComputeLoadBalancer, error) { - list := data.(*schema.Set).List() - lbs := make([]*spotinst.AwsGroupComputeLoadBalancer, 0, len(list)) - for _, item := range list { - m := item.(map[string]interface{}) - lb := &spotinst.AwsGroupComputeLoadBalancer{} - - if v, ok := m["name"].(string); ok && v != "" { - lb.Name = spotinst.String(v) - } - - if v, ok := m["arn"].(string); ok && v != "" { - lb.Arn = spotinst.String(v) - } - - if v, ok := m["type"].(string); ok && v != "" { - lb.Type = spotinst.String(strings.ToUpper(v)) - } - - log.Printf("[DEBUG] AwsGroup load balancer configuration: %s\n", stringutil.Stringify(lb)) - lbs = append(lbs, lb) - } - - return lbs, nil -} - -// expandAwsGroupRancherIntegration expands the Rancher Integration block. -func expandAwsGroupRancherIntegration(data interface{}) (*spotinst.AwsGroupRancherIntegration, error) { - list := data.(*schema.Set).List() - m := list[0].(map[string]interface{}) - i := &spotinst.AwsGroupRancherIntegration{} - - if v, ok := m["master_host"].(string); ok && v != "" { - i.MasterHost = spotinst.String(v) - } - - if v, ok := m["access_key"].(string); ok && v != "" { - i.AccessKey = spotinst.String(v) - } - - if v, ok := m["secret_key"].(string); ok && v != "" { - i.SecretKey = spotinst.String(v) - } - - log.Printf("[DEBUG] AwsGroup Rancher integration configuration: %s\n", stringutil.Stringify(i)) - return i, nil -} - -// expandAwsGroupElasticBeanstalkIntegration expands the Elastic Beanstalk Integration block. -func expandAwsGroupElasticBeanstalkIntegration(data interface{}) (*spotinst.AwsGroupElasticBeanstalkIntegration, error) { - list := data.(*schema.Set).List() - m := list[0].(map[string]interface{}) - i := &spotinst.AwsGroupElasticBeanstalkIntegration{} - - if v, ok := m["environment_id"].(string); ok && v != "" { - i.EnvironmentID = spotinst.String(v) - } - - log.Printf("[DEBUG] AwsGroup Elastic Beanstalk integration configuration: %s\n", stringutil.Stringify(i)) - return i, nil -} - -// expandAwsGroupEC2ContainerServiceIntegration expands the EC2 Container Service Integration block. -func expandAwsGroupEC2ContainerServiceIntegration(data interface{}) (*spotinst.AwsGroupEC2ContainerServiceIntegration, error) { - list := data.(*schema.Set).List() - m := list[0].(map[string]interface{}) - i := &spotinst.AwsGroupEC2ContainerServiceIntegration{} - - if v, ok := m["cluster_name"].(string); ok && v != "" { - i.ClusterName = spotinst.String(v) - } - - log.Printf("[DEBUG] AwsGroup ECS integration configuration: %s\n", stringutil.Stringify(i)) - return i, nil -} - -// expandAwsGroupKubernetesIntegration expands the Kubernetes Integration block. -func expandAwsGroupKubernetesIntegration(data interface{}) (*spotinst.AwsGroupKubernetesIntegration, error) { - list := data.(*schema.Set).List() - m := list[0].(map[string]interface{}) - i := &spotinst.AwsGroupKubernetesIntegration{} - - if v, ok := m["api_server"].(string); ok && v != "" { - i.Server = spotinst.String(v) - } - - if v, ok := m["token"].(string); ok && v != "" { - i.Token = spotinst.String(v) - } - - log.Printf("[DEBUG] AwsGroup Kubernetes integration configuration: %s\n", stringutil.Stringify(i)) - return i, nil -} - -// expandAwsGroupMesosphereIntegration expands the Mesosphere Integration block. -func expandAwsGroupMesosphereIntegration(data interface{}) (*spotinst.AwsGroupMesosphereIntegration, error) { - list := data.(*schema.Set).List() - m := list[0].(map[string]interface{}) - i := &spotinst.AwsGroupMesosphereIntegration{} - - if v, ok := m["api_server"].(string); ok && v != "" { - i.Server = spotinst.String(v) - } - - log.Printf("[DEBUG] AwsGroup Mesosphere integration configuration: %s\n", stringutil.Stringify(i)) - return i, nil -} - -// expandAwsGroupElasticIPs expands the Elastic IPs block. -func expandAwsGroupElasticIPs(data interface{}) ([]string, error) { - list := data.([]interface{}) - eips := make([]string, 0, len(list)) - for _, str := range list { - if eip, ok := str.(string); ok { - log.Printf("[DEBUG] AwsGroup elastic IP configuration: %s\n", stringutil.Stringify(eip)) - eips = append(eips, eip) - } - } - - return eips, nil -} - -// expandAwsGroupTags expands the Tags block. -func expandAwsGroupTags(data interface{}) ([]*spotinst.AwsGroupComputeTag, error) { - list := data.(map[string]interface{}) - tags := make([]*spotinst.AwsGroupComputeTag, 0, len(list)) - for k, v := range list { - tag := &spotinst.AwsGroupComputeTag{ - Key: spotinst.String(k), - Value: spotinst.String(v.(string)), - } - - log.Printf("[DEBUG] AwsGroup tag configuration: %s\n", stringutil.Stringify(tag)) - tags = append(tags, tag) - } - - return tags, nil -} - -func hashAwsGroupCapacity(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - buf.WriteString(fmt.Sprintf("%d-", m["target"].(int))) - buf.WriteString(fmt.Sprintf("%d-", m["minimum"].(int))) - buf.WriteString(fmt.Sprintf("%d-", m["maximum"].(int))) - - return hashcode.String(buf.String()) -} - -func hashAwsGroupStrategy(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - buf.WriteString(fmt.Sprintf("%f-", m["risk"].(float64))) - buf.WriteString(fmt.Sprintf("%d-", m["ondemand_count"].(int))) - buf.WriteString(fmt.Sprintf("%t-", m["utilize_reserved_instances"].(bool))) - buf.WriteString(fmt.Sprintf("%t-", m["fallback_to_ondemand"].(bool))) - - return hashcode.String(buf.String()) -} - -func hashAwsGroupLoadBalancer(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["type"].(string))) - if v, ok := m["arn"].(string); ok && len(v) > 0 { - buf.WriteString(fmt.Sprintf("%s-", v)) - } - - return hashcode.String(buf.String()) -} - -func hashAwsGroupEBSBlockDevice(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["snapshot_id"].(string))) - buf.WriteString(fmt.Sprintf("%d-", m["volume_size"].(int))) - buf.WriteString(fmt.Sprintf("%t-", m["delete_on_termination"].(bool))) - buf.WriteString(fmt.Sprintf("%t-", m["encrypted"].(bool))) - buf.WriteString(fmt.Sprintf("%d-", m["iops"].(int))) - - return hashcode.String(buf.String()) -} - -func hashAwsGroupScalingPolicy(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - buf.WriteString(fmt.Sprintf("%d-", m["adjustment"].(int))) - buf.WriteString(fmt.Sprintf("%d-", m["cooldown"].(int))) - buf.WriteString(fmt.Sprintf("%d-", m["evaluation_periods"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["metric_name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["namespace"].(string))) - buf.WriteString(fmt.Sprintf("%d-", m["period"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["policy_name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["statistic"].(string))) - buf.WriteString(fmt.Sprintf("%f-", m["threshold"].(float64))) - buf.WriteString(fmt.Sprintf("%s-", m["unit"].(string))) - buf.WriteString(fmt.Sprintf("%d-", m["min_target_capacity"].(int))) - buf.WriteString(fmt.Sprintf("%d-", m["max_target_capacity"].(int))) - - // if v, ok := m["operator"].(string); ok && len(v) > 0 { - // buf.WriteString(fmt.Sprintf("%s-", v)) - // } - - if d, ok := m["dimensions"]; ok { - if len(d.(map[string]interface{})) > 0 { - e := d.(map[string]interface{}) - for k, v := range e { - buf.WriteString(fmt.Sprintf("%s:%s-", k, v.(string))) - } - } - } - - return hashcode.String(buf.String()) -} - -// tagsToMap turns the list of tags into a map. -func tagsToMap(ts []*spotinst.AwsGroupComputeTag) map[string]string { - result := make(map[string]string) - for _, t := range ts { - result[spotinst.StringValue(t.Key)] = spotinst.StringValue(t.Value) - } - return result -} diff --git a/builtin/providers/spotinst/resource_spotinst_aws_group_test.go b/builtin/providers/spotinst/resource_spotinst_aws_group_test.go deleted file mode 100755 index 703f66329..000000000 --- a/builtin/providers/spotinst/resource_spotinst_aws_group_test.go +++ /dev/null @@ -1,215 +0,0 @@ -package spotinst - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/spotinst/spotinst-sdk-go/spotinst" -) - -func TestAccSpotinstGroup_Basic(t *testing.T) { - var group spotinst.AwsGroup - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckSpotinstGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckSpotinstGroupConfigBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckSpotinstGroupExists("spotinst_aws_group.foo", &group), - testAccCheckSpotinstGroupAttributes(&group), - resource.TestCheckResourceAttr("spotinst_aws_group.foo", "name", "terraform"), - resource.TestCheckResourceAttr("spotinst_aws_group.foo", "description", "terraform"), - ), - }, - }, - }) -} - -func TestAccSpotinstGroup_Updated(t *testing.T) { - var group spotinst.AwsGroup - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckSpotinstGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckSpotinstGroupConfigBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckSpotinstGroupExists("spotinst_aws_group.foo", &group), - testAccCheckSpotinstGroupAttributes(&group), - resource.TestCheckResourceAttr("spotinst_aws_group.foo", "name", "terraform"), - resource.TestCheckResourceAttr("spotinst_aws_group.foo", "description", "terraform"), - ), - }, - { - Config: testAccCheckSpotinstGroupConfigNewValue, - Check: resource.ComposeTestCheckFunc( - testAccCheckSpotinstGroupExists("spotinst_aws_group.foo", &group), - testAccCheckSpotinstGroupAttributesUpdated(&group), - resource.TestCheckResourceAttr("spotinst_aws_group.foo", "name", "terraform_updated"), - resource.TestCheckResourceAttr("spotinst_aws_group.foo", "description", "terraform_updated"), - ), - }, - }, - }) -} - -func testAccCheckSpotinstGroupDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*spotinst.Client) - for _, rs := range s.RootModule().Resources { - if rs.Type != "spotinst_aws_group" { - continue - } - input := &spotinst.ReadAwsGroupInput{ID: spotinst.String(rs.Primary.ID)} - resp, err := client.AwsGroupService.Read(input) - if err == nil && resp != nil && resp.Group != nil { - return fmt.Errorf("Group still exists") - } - } - return nil -} - -func testAccCheckSpotinstGroupAttributes(group *spotinst.AwsGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - if spotinst.StringValue(group.Name) != "terraform" { - return fmt.Errorf("Bad content: %v", group.Name) - } - return nil - } -} - -func testAccCheckSpotinstGroupAttributesUpdated(group *spotinst.AwsGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - if spotinst.StringValue(group.Name) != "terraform_updated" { - return fmt.Errorf("Bad content: %v", group.Name) - } - return nil - } -} - -func testAccCheckSpotinstGroupExists(n string, group *spotinst.AwsGroup) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No resource ID is set") - } - client := testAccProvider.Meta().(*spotinst.Client) - input := &spotinst.ReadAwsGroupInput{ID: spotinst.String(rs.Primary.ID)} - resp, err := client.AwsGroupService.Read(input) - if err != nil { - return err - } - if spotinst.StringValue(resp.Group.Name) != rs.Primary.Attributes["name"] { - return fmt.Errorf("Group not found: %+v,\n %+v\n", resp.Group, rs.Primary.Attributes) - } - *group = *resp.Group - return nil - } -} - -const testAccCheckSpotinstGroupConfigBasic = ` -resource "spotinst_aws_group" "foo" { - name = "terraform" - description = "terraform" - product = "Linux/UNIX" - - capacity { - target = 0 - minimum = 0 - maximum = 5 - } - - strategy { - risk = 100 - } - - instance_types { - ondemand = "c3.large" - spot = ["c3.large", "m4.xlarge"] - } - - availability_zone { - name = "us-west-2b" - } - - launch_specification { - monitoring = false - image_id = "ami-f0091d91" - key_pair = "east" - security_group_ids = ["default"] - } - - scaling_up_policy { - policy_name = "Scaling Policy 1" - metric_name = "CPUUtilization" - statistic = "average" - unit = "percent" - threshold = 80 - adjustment = 1 - namespace = "AWS/EC2" - operator = "gte" - period = 300 - evaluation_periods = 2 - cooldown = 300 - dimensions { - env = "prod" - } - } -}` - -const testAccCheckSpotinstGroupConfigNewValue = ` -resource "spotinst_aws_group" "foo" { - name = "terraform_updated" - description = "terraform_updated" - product = "Linux/UNIX" - - capacity { - target = 0 - minimum = 0 - maximum = 5 - } - - strategy { - risk = 100 - } - - instance_types { - ondemand = "c3.large" - spot = ["c3.large", "m4.xlarge"] - } - - availability_zone { - name = "us-west-2b" - } - - launch_specification { - monitoring = false - image_id = "ami-f0091d91" - key_pair = "east" - security_group_ids = ["default"] - } - - scaling_up_policy { - policy_name = "Scaling Policy 2" - metric_name = "CPUUtilization" - statistic = "average" - unit = "percent" - threshold = 80 - adjustment = 1 - namespace = "AWS/EC2" - operator = "gte" - period = 300 - evaluation_periods = 2 - cooldown = 300 - dimensions { - env = "dev" - } - } -}` diff --git a/builtin/providers/spotinst/resource_spotinst_healthcheck.go b/builtin/providers/spotinst/resource_spotinst_healthcheck.go deleted file mode 100755 index f599c79d5..000000000 --- a/builtin/providers/spotinst/resource_spotinst_healthcheck.go +++ /dev/null @@ -1,333 +0,0 @@ -package spotinst - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/spotinst/spotinst-sdk-go/spotinst" - "github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil" -) - -func resourceSpotinstHealthCheck() *schema.Resource { - return &schema.Resource{ - Create: resourceSpotinstHealthCheckCreate, - Update: resourceSpotinstHealthCheckUpdate, - Read: resourceSpotinstHealthCheckRead, - Delete: resourceSpotinstHealthCheckDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "resource_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "check": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "endpoint": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "interval": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "timeout": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - }, - }, - }, - - "threshold": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "healthy": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "unhealthy": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - }, - }, - }, - - "proxy": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "addr": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "port": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - }, - }, - }, - }, - } -} - -func resourceSpotinstHealthCheckCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*spotinst.Client) - newHealthCheck, err := buildHealthCheckOpts(d, meta) - if err != nil { - return err - } - log.Printf("[DEBUG] HealthCheck create configuration: %#v\n", newHealthCheck) - input := &spotinst.CreateHealthCheckInput{HealthCheck: newHealthCheck} - resp, err := client.HealthCheckService.Create(input) - if err != nil { - return fmt.Errorf("Error creating health check: %s", err) - } - d.SetId(spotinst.StringValue(resp.HealthCheck.ID)) - log.Printf("[INFO] HealthCheck created successfully: %s\n", d.Id()) - return resourceSpotinstHealthCheckRead(d, meta) -} - -func resourceSpotinstHealthCheckRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*spotinst.Client) - input := &spotinst.ReadHealthCheckInput{ID: spotinst.String(d.Id())} - resp, err := client.HealthCheckService.Read(input) - if err != nil { - return fmt.Errorf("Error retrieving health check: %s", err) - } - if hc := resp.HealthCheck; hc != nil { - d.Set("name", hc.Name) - d.Set("resource_id", hc.ResourceID) - - // Set the check. - check := make([]map[string]interface{}, 0, 1) - check = append(check, map[string]interface{}{ - "protocol": hc.Check.Protocol, - "endpoint": hc.Check.Endpoint, - "port": hc.Check.Port, - "interval": hc.Check.Interval, - "timeout": hc.Check.Timeout, - }) - d.Set("check", check) - - // Set the threshold. - threshold := make([]map[string]interface{}, 0, 1) - threshold = append(threshold, map[string]interface{}{ - "healthy": hc.Check.Healthy, - "unhealthy": hc.Check.Unhealthy, - }) - d.Set("threshold", threshold) - - // Set the proxy. - proxy := make([]map[string]interface{}, 0, 1) - proxy = append(proxy, map[string]interface{}{ - "addr": hc.Addr, - "port": hc.Port, - }) - d.Set("proxy", proxy) - } else { - d.SetId("") - } - return nil -} - -func resourceSpotinstHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*spotinst.Client) - healthCheck := &spotinst.HealthCheck{ID: spotinst.String(d.Id())} - update := false - - if d.HasChange("name") { - healthCheck.Name = spotinst.String(d.Get("name").(string)) - update = true - } - - if d.HasChange("resource_id") { - healthCheck.ResourceID = spotinst.String(d.Get("resource_id").(string)) - update = true - } - - if d.HasChange("check") { - if v, ok := d.GetOk("check"); ok { - if check, err := expandHealthCheckConfig(v); err != nil { - return err - } else { - healthCheck.Check = check - update = true - } - } - } - - if d.HasChange("threshold") { - if v, ok := d.GetOk("threshold"); ok { - if threshold, err := expandHealthCheckThreshold(v); err != nil { - return err - } else { - healthCheck.Check.HealthCheckThreshold = threshold - update = true - } - } - } - - if d.HasChange("proxy") { - if v, ok := d.GetOk("proxy"); ok { - if proxy, err := expandHealthCheckProxy(v); err != nil { - return err - } else { - healthCheck.HealthCheckProxy = proxy - update = true - } - } - } - - if update { - log.Printf("[DEBUG] HealthCheck update configuration: %s\n", stringutil.Stringify(healthCheck)) - input := &spotinst.UpdateHealthCheckInput{HealthCheck: healthCheck} - if _, err := client.HealthCheckService.Update(input); err != nil { - return fmt.Errorf("Error updating health check %s: %s", d.Id(), err) - } - } - - return resourceSpotinstHealthCheckRead(d, meta) -} - -func resourceSpotinstHealthCheckDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*spotinst.Client) - log.Printf("[INFO] Deleting health check: %s\n", d.Id()) - input := &spotinst.DeleteHealthCheckInput{ID: spotinst.String(d.Id())} - if _, err := client.HealthCheckService.Delete(input); err != nil { - return fmt.Errorf("Error deleting health check: %s", err) - } - d.SetId("") - return nil -} - -// buildHealthCheckOpts builds the Spotinst HealthCheck options. -func buildHealthCheckOpts(d *schema.ResourceData, meta interface{}) (*spotinst.HealthCheck, error) { - healthCheck := &spotinst.HealthCheck{ - Name: spotinst.String(d.Get("name").(string)), - ResourceID: spotinst.String(d.Get("resource_id").(string)), - } - - if v, ok := d.GetOk("check"); ok { - if check, err := expandHealthCheckConfig(v); err != nil { - return nil, err - } else { - healthCheck.Check = check - } - } - - if v, ok := d.GetOk("threshold"); ok { - if threshold, err := expandHealthCheckThreshold(v); err != nil { - return nil, err - } else { - healthCheck.Check.HealthCheckThreshold = threshold - } - } - - if v, ok := d.GetOk("proxy"); ok { - if proxy, err := expandHealthCheckProxy(v); err != nil { - return nil, err - } else { - healthCheck.HealthCheckProxy = proxy - } - } - - return healthCheck, nil -} - -// expandHealthCheckConfig expands the Check block. -func expandHealthCheckConfig(data interface{}) (*spotinst.HealthCheckConfig, error) { - list := data.(*schema.Set).List() - m := list[0].(map[string]interface{}) - check := &spotinst.HealthCheckConfig{} - - if v, ok := m["protocol"].(string); ok && v != "" { - check.Protocol = spotinst.String(v) - } - - if v, ok := m["endpoint"].(string); ok && v != "" { - check.Endpoint = spotinst.String(v) - } - - if v, ok := m["port"].(int); ok && v >= 0 { - check.Port = spotinst.Int(v) - } - - if v, ok := m["interval"].(int); ok && v >= 0 { - check.Interval = spotinst.Int(v) - } - - if v, ok := m["timeout"].(int); ok && v >= 0 { - check.Timeout = spotinst.Int(v) - } - - log.Printf("[DEBUG] HealthCheck check configuration: %s\n", stringutil.Stringify(check)) - return check, nil -} - -// expandHealthCheckThreshold expands the Threshold block. -func expandHealthCheckThreshold(data interface{}) (*spotinst.HealthCheckThreshold, error) { - list := data.(*schema.Set).List() - m := list[0].(map[string]interface{}) - threshold := &spotinst.HealthCheckThreshold{} - - if v, ok := m["healthy"].(int); ok && v >= 0 { - threshold.Healthy = spotinst.Int(v) - } - - if v, ok := m["unhealthy"].(int); ok && v >= 0 { - threshold.Unhealthy = spotinst.Int(v) - } - - log.Printf("[DEBUG] HealthCheck threshold configuration: %s\n", stringutil.Stringify(threshold)) - return threshold, nil -} - -// expandHealthCheckProxy expands the Proxy block. -func expandHealthCheckProxy(data interface{}) (*spotinst.HealthCheckProxy, error) { - list := data.(*schema.Set).List() - m := list[0].(map[string]interface{}) - proxy := &spotinst.HealthCheckProxy{} - - if v, ok := m["addr"].(string); ok && v != "" { - proxy.Addr = spotinst.String(v) - } - - if v, ok := m["port"].(int); ok && v > 0 { - proxy.Port = spotinst.Int(v) - } - - log.Printf("[DEBUG] HealthCheck proxy configuration: %s\n", stringutil.Stringify(proxy)) - return proxy, nil -} diff --git a/builtin/providers/spotinst/resource_spotinst_healthcheck_test.go b/builtin/providers/spotinst/resource_spotinst_healthcheck_test.go deleted file mode 100755 index 20075ef7b..000000000 --- a/builtin/providers/spotinst/resource_spotinst_healthcheck_test.go +++ /dev/null @@ -1,160 +0,0 @@ -package spotinst - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/spotinst/spotinst-sdk-go/spotinst" -) - -func TestAccSpotinstHealthCheck_Basic(t *testing.T) { - var healthCheck spotinst.HealthCheck - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckSpotinstHealthCheckDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckSpotinstHealthCheckConfigBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckSpotinstHealthCheckExists("spotinst_healthcheck.foo", &healthCheck), - testAccCheckSpotinstHealthCheckAttributes(&healthCheck), - resource.TestCheckResourceAttr("spotinst_healthcheck.foo", "name", "hc-foo"), - ), - }, - }, - }) -} - -func TestAccSpotinstHealthCheck_Updated(t *testing.T) { - var healthCheck spotinst.HealthCheck - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckSpotinstHealthCheckDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckSpotinstHealthCheckConfigBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckSpotinstHealthCheckExists("spotinst_healthcheck.foo", &healthCheck), - testAccCheckSpotinstHealthCheckAttributes(&healthCheck), - resource.TestCheckResourceAttr("spotinst_healthcheck.foo", "name", "hc-foo"), - ), - }, - { - Config: testAccCheckSpotinstHealthCheckConfigNewValue, - Check: resource.ComposeTestCheckFunc( - testAccCheckSpotinstHealthCheckExists("spotinst_healthcheck.foo", &healthCheck), - testAccCheckSpotinstHealthCheckAttributesUpdated(&healthCheck), - resource.TestCheckResourceAttr("spotinst_healthcheck.foo", "name", "hc-bar"), - ), - }, - }, - }) -} - -func testAccCheckSpotinstHealthCheckDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*spotinst.Client) - for _, rs := range s.RootModule().Resources { - if rs.Type != "spotinst_healthcheck" { - continue - } - input := &spotinst.ReadHealthCheckInput{ID: spotinst.String(rs.Primary.ID)} - resp, err := client.HealthCheckService.Read(input) - if err == nil && resp != nil && resp.HealthCheck != nil { - return fmt.Errorf("HealthCheck still exists") - } - } - return nil -} - -func testAccCheckSpotinstHealthCheckAttributes(healthCheck *spotinst.HealthCheck) resource.TestCheckFunc { - return func(s *terraform.State) error { - if p := spotinst.StringValue(healthCheck.Check.Protocol); p != "http" { - return fmt.Errorf("Bad content: %s", p) - } - if e := spotinst.StringValue(healthCheck.Check.Endpoint); e != "http://endpoint.com" { - return fmt.Errorf("Bad content: %s", e) - } - return nil - } -} - -func testAccCheckSpotinstHealthCheckAttributesUpdated(healthCheck *spotinst.HealthCheck) resource.TestCheckFunc { - return func(s *terraform.State) error { - if p := spotinst.StringValue(healthCheck.Check.Protocol); p != "https" { - return fmt.Errorf("Bad content: %s", p) - } - if e := spotinst.StringValue(healthCheck.Check.Endpoint); e != "https://endpoint.com" { - return fmt.Errorf("Bad content: %s", e) - } - return nil - } -} - -func testAccCheckSpotinstHealthCheckExists(n string, healthCheck *spotinst.HealthCheck) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No resource ID is set") - } - client := testAccProvider.Meta().(*spotinst.Client) - input := &spotinst.ReadHealthCheckInput{ID: spotinst.String(rs.Primary.ID)} - resp, err := client.HealthCheckService.Read(input) - if err != nil { - return err - } - if spotinst.StringValue(resp.HealthCheck.ID) != rs.Primary.Attributes["id"] { - return fmt.Errorf("HealthCheck not found: %+v,\n %+v\n", resp.HealthCheck, rs.Primary.Attributes) - } - *healthCheck = *resp.HealthCheck - return nil - } -} - -const testAccCheckSpotinstHealthCheckConfigBasic = ` -resource "spotinst_healthcheck" "foo" { - name = "hc-foo" - resource_id = "sig-foo" - check { - protocol = "http" - endpoint = "http://endpoint.com" - port = 1337 - interval = 10 - timeout = 10 - } - threshold { - healthy = 1 - unhealthy = 1 - } - proxy { - addr = "http://proxy.com" - port = 80 - } -}` - -const testAccCheckSpotinstHealthCheckConfigNewValue = ` -resource "spotinst_healthcheck" "foo" { - name = "hc-bar" - resource_id = "sig-foo" - check { - protocol = "https" - endpoint = "https://endpoint.com" - port = 3000 - interval = 10 - timeout = 10 - } - threshold { - healthy = 2 - unhealthy = 2 - } - proxy { - addr = "http://proxy.com" - port = 8080 - } -}` diff --git a/builtin/providers/spotinst/resource_spotinst_subscription.go b/builtin/providers/spotinst/resource_spotinst_subscription.go deleted file mode 100755 index 7ae2827b9..000000000 --- a/builtin/providers/spotinst/resource_spotinst_subscription.go +++ /dev/null @@ -1,145 +0,0 @@ -package spotinst - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/spotinst/spotinst-sdk-go/spotinst" - "github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil" -) - -func resourceSpotinstSubscription() *schema.Resource { - return &schema.Resource{ - Create: resourceSpotinstSubscriptionCreate, - Update: resourceSpotinstSubscriptionUpdate, - Read: resourceSpotinstSubscriptionRead, - Delete: resourceSpotinstSubscriptionDelete, - - Schema: map[string]*schema.Schema{ - "resource_id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "event_type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - StateFunc: func(v interface{}) string { - value := v.(string) - return strings.ToUpper(value) - }, - }, - - "protocol": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "endpoint": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "format": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - }, - }, - } -} - -func resourceSpotinstSubscriptionCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*spotinst.Client) - newSubscription, err := buildSubscriptionOpts(d, meta) - if err != nil { - return err - } - log.Printf("[DEBUG] Subscription create configuration: %s\n", stringutil.Stringify(newSubscription)) - input := &spotinst.CreateSubscriptionInput{Subscription: newSubscription} - resp, err := client.SubscriptionService.Create(input) - if err != nil { - return fmt.Errorf("Error creating subscription: %s", err) - } - d.SetId(spotinst.StringValue(resp.Subscription.ID)) - log.Printf("[INFO] Subscription created successfully: %s\n", d.Id()) - return resourceSpotinstSubscriptionRead(d, meta) -} - -func resourceSpotinstSubscriptionRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*spotinst.Client) - input := &spotinst.ReadSubscriptionInput{ID: spotinst.String(d.Id())} - resp, err := client.SubscriptionService.Read(input) - if err != nil { - return fmt.Errorf("Error retrieving subscription: %s", err) - } - if s := resp.Subscription; s != nil { - d.Set("resource_id", s.ResourceID) - d.Set("event_type", s.EventType) - d.Set("protocol", s.Protocol) - d.Set("endpoint", s.Endpoint) - d.Set("format", s.Format) - } else { - d.SetId("") - } - return nil -} - -func resourceSpotinstSubscriptionUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*spotinst.Client) - subscription := &spotinst.Subscription{ID: spotinst.String(d.Id())} - update := false - - if d.HasChange("resource_id") { - subscription.ResourceID = spotinst.String(d.Get("resource_id").(string)) - update = true - } - - if d.HasChange("event_type") { - subscription.EventType = spotinst.String(d.Get("event_type").(string)) - update = true - } - - if d.HasChange("protocol") { - subscription.Protocol = spotinst.String(d.Get("protocol").(string)) - update = true - } - - if d.HasChange("endpoint") { - subscription.Endpoint = spotinst.String(d.Get("endpoint").(string)) - update = true - } - - if d.HasChange("format") { - subscription.Format = d.Get("format").(map[string]interface{}) - update = true - } - - if update { - log.Printf("[DEBUG] Subscription update configuration: %s\n", stringutil.Stringify(subscription)) - input := &spotinst.UpdateSubscriptionInput{Subscription: subscription} - if _, err := client.SubscriptionService.Update(input); err != nil { - return fmt.Errorf("Error updating subscription %s: %s", d.Id(), err) - } - } - - return resourceSpotinstSubscriptionRead(d, meta) -} - -func resourceSpotinstSubscriptionDelete(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} - -// buildSubscriptionOpts builds the Spotinst Subscription options. -func buildSubscriptionOpts(d *schema.ResourceData, meta interface{}) (*spotinst.Subscription, error) { - subscription := &spotinst.Subscription{ - ResourceID: spotinst.String(d.Get("resource_id").(string)), - EventType: spotinst.String(strings.ToUpper(d.Get("event_type").(string))), - Protocol: spotinst.String(d.Get("protocol").(string)), - Endpoint: spotinst.String(d.Get("endpoint").(string)), - Format: d.Get("format").(map[string]interface{}), - } - return subscription, nil -} diff --git a/builtin/providers/spotinst/resource_spotinst_subscription_test.go b/builtin/providers/spotinst/resource_spotinst_subscription_test.go deleted file mode 100755 index eea71080e..000000000 --- a/builtin/providers/spotinst/resource_spotinst_subscription_test.go +++ /dev/null @@ -1,144 +0,0 @@ -package spotinst - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/spotinst/spotinst-sdk-go/spotinst" -) - -func TestAccSpotinstSubscription_Basic(t *testing.T) { - var subscription spotinst.Subscription - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - //CheckDestroy: testAccCheckSpotinstSubscriptionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckSpotinstSubscriptionConfigBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckSpotinstSubscriptionExists("spotinst_subscription.foo", &subscription), - testAccCheckSpotinstSubscriptionAttributes(&subscription), - resource.TestCheckResourceAttr("spotinst_subscription.foo", "protocol", "http"), - resource.TestCheckResourceAttr("spotinst_subscription.foo", "endpoint", "http://endpoint.com"), - ), - }, - }, - }) -} - -func TestAccSpotinstSubscription_Updated(t *testing.T) { - var subscription spotinst.Subscription - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - //CheckDestroy: testAccCheckSpotinstSubscriptionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckSpotinstSubscriptionConfigBasic, - Check: resource.ComposeTestCheckFunc( - testAccCheckSpotinstSubscriptionExists("spotinst_subscription.foo", &subscription), - testAccCheckSpotinstSubscriptionAttributes(&subscription), - resource.TestCheckResourceAttr("spotinst_subscription.foo", "protocol", "http"), - resource.TestCheckResourceAttr("spotinst_subscription.foo", "endpoint", "http://endpoint.com"), - ), - }, - { - Config: testAccCheckSpotinstSubscriptionConfigNewValue, - Check: resource.ComposeTestCheckFunc( - testAccCheckSpotinstSubscriptionExists("spotinst_subscription.foo", &subscription), - testAccCheckSpotinstSubscriptionAttributesUpdated(&subscription), - resource.TestCheckResourceAttr("spotinst_subscription.foo", "protocol", "https"), - resource.TestCheckResourceAttr("spotinst_subscription.foo", "endpoint", "https://endpoint.com"), - ), - }, - }, - }) -} - -func testAccCheckSpotinstSubscriptionDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*spotinst.Client) - for _, rs := range s.RootModule().Resources { - if rs.Type != "spotinst_subscription" { - continue - } - input := &spotinst.ReadSubscriptionInput{ID: spotinst.String(rs.Primary.ID)} - if _, err := client.SubscriptionService.Read(input); err == nil { - return fmt.Errorf("Subscription still exists") - } - } - return nil -} - -func testAccCheckSpotinstSubscriptionAttributes(subscription *spotinst.Subscription) resource.TestCheckFunc { - return func(s *terraform.State) error { - if p := spotinst.StringValue(subscription.Protocol); p != "http" { - return fmt.Errorf("Bad content: %s", p) - } - if e := spotinst.StringValue(subscription.Endpoint); e != "http://endpoint.com" { - return fmt.Errorf("Bad content: %s", e) - } - return nil - } -} - -func testAccCheckSpotinstSubscriptionAttributesUpdated(subscription *spotinst.Subscription) resource.TestCheckFunc { - return func(s *terraform.State) error { - if p := spotinst.StringValue(subscription.Protocol); p != "https" { - return fmt.Errorf("Bad content: %s", p) - } - if e := spotinst.StringValue(subscription.Endpoint); e != "https://endpoint.com" { - return fmt.Errorf("Bad content: %s", e) - } - return nil - } -} - -func testAccCheckSpotinstSubscriptionExists(n string, subscription *spotinst.Subscription) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No resource ID is set") - } - client := testAccProvider.Meta().(*spotinst.Client) - input := &spotinst.ReadSubscriptionInput{ID: spotinst.String(rs.Primary.ID)} - resp, err := client.SubscriptionService.Read(input) - if err != nil { - return err - } - if spotinst.StringValue(resp.Subscription.ID) != rs.Primary.Attributes["id"] { - return fmt.Errorf("Subscription not found: %+v,\n %+v\n", resp.Subscription, rs.Primary.Attributes) - } - *subscription = *resp.Subscription - return nil - } -} - -const testAccCheckSpotinstSubscriptionConfigBasic = ` -resource "spotinst_subscription" "foo" { - resource_id = "sig-foo" - event_type = "aws_ec2_instance_launch" - protocol = "http" - endpoint = "http://endpoint.com" - format = { - instance_id = "%instance-id%" - tags = "foo,baz,baz" - } -}` - -const testAccCheckSpotinstSubscriptionConfigNewValue = ` -resource "spotinst_subscription" "foo" { - resource_id = "sig-foo" - event_type = "aws_ec2_instance_launch" - protocol = "https" - endpoint = "https://endpoint.com" - format = { - instance_id = "%instance-id%" - tags = "foo,baz,baz" - } -}` diff --git a/builtin/providers/statuscake/provider.go b/builtin/providers/statuscake/provider.go deleted file mode 100644 index abca37698..000000000 --- a/builtin/providers/statuscake/provider.go +++ /dev/null @@ -1,40 +0,0 @@ -package statuscake - -import ( - "github.com/DreamItGetIT/statuscake" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "username": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("STATUSCAKE_USERNAME", nil), - Description: "Username for StatusCake Account.", - }, - "apikey": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("STATUSCAKE_APIKEY", nil), - Description: "API Key for StatusCake", - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "statuscake_test": resourceStatusCakeTest(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - auth := statuscake.Auth{ - Username: d.Get("username").(string), - Apikey: d.Get("apikey").(string), - } - return statuscake.New(auth) -} diff --git a/builtin/providers/statuscake/provider_test.go b/builtin/providers/statuscake/provider_test.go deleted file mode 100644 index 83045d06f..000000000 --- a/builtin/providers/statuscake/provider_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package statuscake - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "statuscake": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("STATUSCAKE_USERNAME"); v == "" { - t.Fatal("STATUSCAKE_USERNAME must be set for acceptance tests") - } - if v := os.Getenv("STATUSCAKE_APIKEY"); v == "" { - t.Fatal("STATUSCAKE_APIKEY must be set for acceptance tests") - } -} diff --git a/builtin/providers/statuscake/resource_statuscaketest.go b/builtin/providers/statuscake/resource_statuscaketest.go deleted file mode 100644 index 101ee8358..000000000 --- a/builtin/providers/statuscake/resource_statuscaketest.go +++ /dev/null @@ -1,216 +0,0 @@ -package statuscake - -import ( - "fmt" - "strconv" - - "log" - - "github.com/DreamItGetIT/statuscake" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceStatusCakeTest() *schema.Resource { - return &schema.Resource{ - Create: CreateTest, - Update: UpdateTest, - Delete: DeleteTest, - Read: ReadTest, - - Schema: map[string]*schema.Schema{ - "test_id": { - Type: schema.TypeString, - Computed: true, - }, - - "website_name": { - Type: schema.TypeString, - Required: true, - }, - - "website_url": { - Type: schema.TypeString, - Required: true, - }, - - "contact_id": { - Type: schema.TypeInt, - Optional: true, - }, - - "check_rate": { - Type: schema.TypeInt, - Optional: true, - Default: 300, - }, - - "test_type": { - Type: schema.TypeString, - Required: true, - }, - - "paused": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "timeout": { - Type: schema.TypeInt, - Optional: true, - Default: 40, - }, - - "confirmations": { - Type: schema.TypeInt, - Optional: true, - }, - - "port": { - Type: schema.TypeInt, - Optional: true, - }, - - "trigger_rate": { - Type: schema.TypeInt, - Optional: true, - Default: 5, - }, - }, - } -} - -func CreateTest(d *schema.ResourceData, meta interface{}) error { - client := meta.(*statuscake.Client) - - newTest := &statuscake.Test{ - WebsiteName: d.Get("website_name").(string), - WebsiteURL: d.Get("website_url").(string), - CheckRate: d.Get("check_rate").(int), - TestType: d.Get("test_type").(string), - Paused: d.Get("paused").(bool), - Timeout: d.Get("timeout").(int), - ContactID: d.Get("contact_id").(int), - Confirmation: d.Get("confirmations").(int), - Port: d.Get("port").(int), - TriggerRate: d.Get("trigger_rate").(int), - } - - log.Printf("[DEBUG] Creating new StatusCake Test: %s", d.Get("website_name").(string)) - - response, err := client.Tests().Update(newTest) - if err != nil { - return fmt.Errorf("Error creating StatusCake Test: %s", err.Error()) - } - - d.Set("test_id", fmt.Sprintf("%d", response.TestID)) - d.SetId(fmt.Sprintf("%d", response.TestID)) - - return ReadTest(d, meta) -} - -func UpdateTest(d *schema.ResourceData, meta interface{}) error { - client := meta.(*statuscake.Client) - - params := getStatusCakeTestInput(d) - - log.Printf("[DEBUG] StatusCake Test Update for %s", d.Id()) - _, err := client.Tests().Update(params) - if err != nil { - return fmt.Errorf("Error Updating StatusCake Test: %s", err.Error()) - } - return nil -} - -func DeleteTest(d *schema.ResourceData, meta interface{}) error { - client := meta.(*statuscake.Client) - - testId, parseErr := strconv.Atoi(d.Id()) - if parseErr != nil { - return parseErr - } - log.Printf("[DEBUG] Deleting StatusCake Test: %s", d.Id()) - err := client.Tests().Delete(testId) - if err != nil { - return err - } - - return nil -} - -func ReadTest(d *schema.ResourceData, meta interface{}) error { - client := meta.(*statuscake.Client) - - testId, parseErr := strconv.Atoi(d.Id()) - if parseErr != nil { - return parseErr - } - testResp, err := client.Tests().Detail(testId) - if err != nil { - return fmt.Errorf("Error Getting StatusCake Test Details for %s: Error: %s", d.Id(), err) - } - d.Set("website_name", testResp.WebsiteName) - d.Set("website_url", testResp.WebsiteURL) - d.Set("check_rate", testResp.CheckRate) - d.Set("test_type", testResp.TestType) - d.Set("paused", testResp.Paused) - d.Set("timeout", testResp.Timeout) - d.Set("contact_id", testResp.ContactID) - d.Set("confirmations", testResp.Confirmation) - d.Set("port", testResp.Port) - d.Set("trigger_rate", testResp.TriggerRate) - - return nil -} - -func getStatusCakeTestInput(d *schema.ResourceData) *statuscake.Test { - testId, parseErr := strconv.Atoi(d.Id()) - if parseErr != nil { - log.Printf("[DEBUG] Error Parsing StatusCake TestID: %s", d.Id()) - } - test := &statuscake.Test{ - TestID: testId, - } - if v, ok := d.GetOk("website_name"); ok { - test.WebsiteName = v.(string) - } - if v, ok := d.GetOk("website_url"); ok { - test.WebsiteURL = v.(string) - } - if v, ok := d.GetOk("check_rate"); ok { - test.CheckRate = v.(int) - } - if v, ok := d.GetOk("contact_id"); ok { - test.ContactID = v.(int) - } - if v, ok := d.GetOk("test_type"); ok { - test.TestType = v.(string) - } - if v, ok := d.GetOk("paused"); ok { - test.Paused = v.(bool) - } - if v, ok := d.GetOk("timeout"); ok { - test.Timeout = v.(int) - } - if v, ok := d.GetOk("contact_id"); ok { - test.ContactID = v.(int) - } - if v, ok := d.GetOk("confirmations"); ok { - test.Confirmation = v.(int) - } - if v, ok := d.GetOk("port"); ok { - test.Port = v.(int) - } - if v, ok := d.GetOk("trigger_rate"); ok { - test.TriggerRate = v.(int) - } - - defaultStatusCodes := "204, 205, 206, 303, 400, 401, 403, 404, 405, 406, " + - "408, 410, 413, 444, 429, 494, 495, 496, 499, 500, 501, 502, 503, " + - "504, 505, 506, 507, 508, 509, 510, 511, 521, 522, 523, 524, 520, " + - "598, 599" - - test.StatusCodes = defaultStatusCodes - - return test -} diff --git a/builtin/providers/statuscake/resource_statuscaketest_test.go b/builtin/providers/statuscake/resource_statuscaketest_test.go deleted file mode 100644 index f07fcc55a..000000000 --- a/builtin/providers/statuscake/resource_statuscaketest_test.go +++ /dev/null @@ -1,202 +0,0 @@ -package statuscake - -import ( - "fmt" - "strconv" - "testing" - - "github.com/DreamItGetIT/statuscake" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccStatusCake_basic(t *testing.T) { - var test statuscake.Test - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccTestCheckDestroy(&test), - Steps: []resource.TestStep{ - { - Config: testAccTestConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccTestCheckExists("statuscake_test.google", &test), - testAccTestCheckAttributes("statuscake_test.google", &test), - ), - }, - }, - }) -} - -func TestAccStatusCake_tcp(t *testing.T) { - var test statuscake.Test - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccTestCheckDestroy(&test), - Steps: []resource.TestStep{ - { - Config: testAccTestConfig_tcp, - Check: resource.ComposeTestCheckFunc( - testAccTestCheckExists("statuscake_test.google", &test), - testAccTestCheckAttributes("statuscake_test.google", &test), - ), - }, - }, - }) -} - -func TestAccStatusCake_withUpdate(t *testing.T) { - var test statuscake.Test - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccTestCheckDestroy(&test), - Steps: []resource.TestStep{ - { - Config: testAccTestConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccTestCheckExists("statuscake_test.google", &test), - ), - }, - - { - Config: testAccTestConfig_update, - Check: resource.ComposeTestCheckFunc( - testAccTestCheckExists("statuscake_test.google", &test), - testAccTestCheckAttributes("statuscake_test.google", &test), - resource.TestCheckResourceAttr("statuscake_test.google", "check_rate", "500"), - resource.TestCheckResourceAttr("statuscake_test.google", "paused", "true"), - resource.TestCheckResourceAttr("statuscake_test.google", "timeout", "40"), - resource.TestCheckResourceAttr("statuscake_test.google", "contact_id", "0"), - resource.TestCheckResourceAttr("statuscake_test.google", "confirmations", "0"), - resource.TestCheckResourceAttr("statuscake_test.google", "trigger_rate", "20"), - ), - }, - }, - }) -} - -func testAccTestCheckExists(rn string, test *statuscake.Test) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[rn] - if !ok { - return fmt.Errorf("resource not found: %s", rn) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("TestID not set") - } - - client := testAccProvider.Meta().(*statuscake.Client) - testId, parseErr := strconv.Atoi(rs.Primary.ID) - if parseErr != nil { - return fmt.Errorf("error in statuscake test CheckExists: %s", parseErr) - } - - gotTest, err := client.Tests().Detail(testId) - if err != nil { - return fmt.Errorf("error getting test: %s", err) - } - - *test = *gotTest - - return nil - } -} - -func testAccTestCheckAttributes(rn string, test *statuscake.Test) resource.TestCheckFunc { - return func(s *terraform.State) error { - attrs := s.RootModule().Resources[rn].Primary.Attributes - - check := func(key, stateValue, testValue string) error { - if testValue != stateValue { - return fmt.Errorf("different values for %s in state (%s) and in statuscake (%s)", - key, stateValue, testValue) - } - return nil - } - - for key, value := range attrs { - var err error - - switch key { - case "website_name": - err = check(key, value, test.WebsiteName) - case "website_url": - err = check(key, value, test.WebsiteURL) - case "check_rate": - err = check(key, value, strconv.Itoa(test.CheckRate)) - case "test_type": - err = check(key, value, test.TestType) - case "paused": - err = check(key, value, strconv.FormatBool(test.Paused)) - case "timeout": - err = check(key, value, strconv.Itoa(test.Timeout)) - case "contact_id": - err = check(key, value, strconv.Itoa(test.ContactID)) - case "confirmations": - err = check(key, value, strconv.Itoa(test.Confirmation)) - case "trigger_rate": - err = check(key, value, strconv.Itoa(test.TriggerRate)) - } - - if err != nil { - return err - } - } - return nil - } -} - -func testAccTestCheckDestroy(test *statuscake.Test) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*statuscake.Client) - err := client.Tests().Delete(test.TestID) - if err == nil { - return fmt.Errorf("test still exists") - } - - return nil - } -} - -const testAccTestConfig_basic = ` -resource "statuscake_test" "google" { - website_name = "google.com" - website_url = "www.google.com" - test_type = "HTTP" - check_rate = 300 - timeout = 10 - contact_id = 43402 - confirmations = 1 - trigger_rate = 10 -} -` - -const testAccTestConfig_update = ` -resource "statuscake_test" "google" { - website_name = "google.com" - website_url = "www.google.com" - test_type = "HTTP" - check_rate = 500 - paused = true - trigger_rate = 20 -} -` - -const testAccTestConfig_tcp = ` -resource "statuscake_test" "google" { - website_name = "google.com" - website_url = "www.google.com" - test_type = "TCP" - check_rate = 300 - timeout = 10 - contact_id = 43402 - confirmations = 1 - port = 80 -} -` diff --git a/builtin/providers/template/datasource_cloudinit_config.go b/builtin/providers/template/datasource_cloudinit_config.go deleted file mode 100644 index 8a24c329a..000000000 --- a/builtin/providers/template/datasource_cloudinit_config.go +++ /dev/null @@ -1,185 +0,0 @@ -package template - -import ( - "bytes" - "compress/gzip" - "encoding/base64" - "fmt" - "io" - "mime/multipart" - "net/textproto" - "strconv" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceCloudinitConfig() *schema.Resource { - return &schema.Resource{ - Read: dataSourceCloudinitConfigRead, - - Schema: map[string]*schema.Schema{ - "part": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "content_type": { - Type: schema.TypeString, - Optional: true, - }, - "content": { - Type: schema.TypeString, - Required: true, - }, - "filename": { - Type: schema.TypeString, - Optional: true, - }, - "merge_type": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "gzip": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "base64_encode": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "rendered": { - Type: schema.TypeString, - Computed: true, - Description: "rendered cloudinit configuration", - }, - }, - } -} - -func dataSourceCloudinitConfigRead(d *schema.ResourceData, meta interface{}) error { - rendered, err := renderCloudinitConfig(d) - if err != nil { - return err - } - - d.Set("rendered", rendered) - d.SetId(strconv.Itoa(hashcode.String(rendered))) - return nil -} - -func renderCloudinitConfig(d *schema.ResourceData) (string, error) { - gzipOutput := d.Get("gzip").(bool) - base64Output := d.Get("base64_encode").(bool) - - partsValue, hasParts := d.GetOk("part") - if !hasParts { - return "", fmt.Errorf("No parts found in the cloudinit resource declaration") - } - - cloudInitParts := make(cloudInitParts, len(partsValue.([]interface{}))) - for i, v := range partsValue.([]interface{}) { - p, castOk := v.(map[string]interface{}) - if !castOk { - return "", fmt.Errorf("Unable to parse parts in cloudinit resource declaration") - } - - part := cloudInitPart{} - if p, ok := p["content_type"]; ok { - part.ContentType = p.(string) - } - if p, ok := p["content"]; ok { - part.Content = p.(string) - } - if p, ok := p["merge_type"]; ok { - part.MergeType = p.(string) - } - if p, ok := p["filename"]; ok { - part.Filename = p.(string) - } - cloudInitParts[i] = part - } - - var buffer bytes.Buffer - - var err error - if gzipOutput { - gzipWriter := gzip.NewWriter(&buffer) - err = renderPartsToWriter(cloudInitParts, gzipWriter) - gzipWriter.Close() - } else { - err = renderPartsToWriter(cloudInitParts, &buffer) - } - if err != nil { - return "", err - } - - output := "" - if base64Output { - output = base64.StdEncoding.EncodeToString(buffer.Bytes()) - } else { - output = buffer.String() - } - - return output, nil -} - -func renderPartsToWriter(parts cloudInitParts, writer io.Writer) error { - mimeWriter := multipart.NewWriter(writer) - defer mimeWriter.Close() - - // we need to set the boundary explictly, otherwise the boundary is random - // and this causes terraform to complain about the resource being different - if err := mimeWriter.SetBoundary("MIMEBOUNDARY"); err != nil { - return err - } - - writer.Write([]byte(fmt.Sprintf("Content-Type: multipart/mixed; boundary=\"%s\"\n", mimeWriter.Boundary()))) - writer.Write([]byte("MIME-Version: 1.0\r\n\r\n")) - - for _, part := range parts { - header := textproto.MIMEHeader{} - if part.ContentType == "" { - header.Set("Content-Type", "text/plain") - } else { - header.Set("Content-Type", part.ContentType) - } - - header.Set("MIME-Version", "1.0") - header.Set("Content-Transfer-Encoding", "7bit") - - if part.Filename != "" { - header.Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, part.Filename)) - } - - if part.MergeType != "" { - header.Set("X-Merge-Type", part.MergeType) - } - - partWriter, err := mimeWriter.CreatePart(header) - if err != nil { - return err - } - - _, err = partWriter.Write([]byte(part.Content)) - if err != nil { - return err - } - } - - return nil -} - -type cloudInitPart struct { - ContentType string - MergeType string - Filename string - Content string -} - -type cloudInitParts []cloudInitPart diff --git a/builtin/providers/template/datasource_cloudinit_config_test.go b/builtin/providers/template/datasource_cloudinit_config_test.go deleted file mode 100644 index 7ea49ca7d..000000000 --- a/builtin/providers/template/datasource_cloudinit_config_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package template - -import ( - "regexp" - "testing" - - r "github.com/hashicorp/terraform/helper/resource" -) - -func TestRender(t *testing.T) { - testCases := []struct { - ResourceBlock string - Expected string - }{ - { - `data "template_cloudinit_config" "foo" { - gzip = false - base64_encode = false - - part { - content_type = "text/x-shellscript" - content = "baz" - } - }`, - "Content-Type: multipart/mixed; boundary=\"MIMEBOUNDARY\"\nMIME-Version: 1.0\r\n\r\n--MIMEBOUNDARY\r\nContent-Transfer-Encoding: 7bit\r\nContent-Type: text/x-shellscript\r\nMime-Version: 1.0\r\n\r\nbaz\r\n--MIMEBOUNDARY--\r\n", - }, - { - `data "template_cloudinit_config" "foo" { - gzip = false - base64_encode = false - - part { - content_type = "text/x-shellscript" - content = "baz" - filename = "foobar.sh" - } - }`, - "Content-Type: multipart/mixed; boundary=\"MIMEBOUNDARY\"\nMIME-Version: 1.0\r\n\r\n--MIMEBOUNDARY\r\nContent-Disposition: attachment; filename=\"foobar.sh\"\r\nContent-Transfer-Encoding: 7bit\r\nContent-Type: text/x-shellscript\r\nMime-Version: 1.0\r\n\r\nbaz\r\n--MIMEBOUNDARY--\r\n", - }, - { - `data "template_cloudinit_config" "foo" { - gzip = false - base64_encode = false - - part { - content_type = "text/x-shellscript" - content = "baz" - } - part { - content_type = "text/x-shellscript" - content = "ffbaz" - } - }`, - "Content-Type: multipart/mixed; boundary=\"MIMEBOUNDARY\"\nMIME-Version: 1.0\r\n\r\n--MIMEBOUNDARY\r\nContent-Transfer-Encoding: 7bit\r\nContent-Type: text/x-shellscript\r\nMime-Version: 1.0\r\n\r\nbaz\r\n--MIMEBOUNDARY\r\nContent-Transfer-Encoding: 7bit\r\nContent-Type: text/x-shellscript\r\nMime-Version: 1.0\r\n\r\nffbaz\r\n--MIMEBOUNDARY--\r\n", - }, - } - - for _, tt := range testCases { - r.UnitTest(t, r.TestCase{ - Providers: testProviders, - Steps: []r.TestStep{ - { - Config: tt.ResourceBlock, - Check: r.ComposeTestCheckFunc( - r.TestCheckResourceAttr("data.template_cloudinit_config.foo", "rendered", tt.Expected), - ), - }, - }, - }) - } -} - -// From GH-13572, Correctly handle panic on a misconfigured cloudinit part -func TestRender_handlePanic(t *testing.T) { - r.UnitTest(t, r.TestCase{ - Providers: testProviders, - Steps: []r.TestStep{ - { - Config: testCloudInitConfig_misconfiguredParts, - ExpectError: regexp.MustCompile("Unable to parse parts in cloudinit resource declaration"), - }, - }, - }) -} - -var testCloudInitConfig_misconfiguredParts = ` -data "template_cloudinit_config" "foo" { - part { - content = "" - } -} -` diff --git a/builtin/providers/template/datasource_template_file.go b/builtin/providers/template/datasource_template_file.go deleted file mode 100644 index 28e5f6033..000000000 --- a/builtin/providers/template/datasource_template_file.go +++ /dev/null @@ -1,165 +0,0 @@ -package template - -import ( - "crypto/sha256" - "encoding/hex" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/hashicorp/hil" - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/helper/pathorcontents" - "github.com/hashicorp/terraform/helper/schema" -) - -func dataSourceFile() *schema.Resource { - return &schema.Resource{ - Read: dataSourceFileRead, - - Schema: map[string]*schema.Schema{ - "template": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "Contents of the template", - ConflictsWith: []string{"filename"}, - }, - "filename": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "file to read template from", - // Make a "best effort" attempt to relativize the file path. - StateFunc: func(v interface{}) string { - if v == nil || v.(string) == "" { - return "" - } - pwd, err := os.Getwd() - if err != nil { - return v.(string) - } - rel, err := filepath.Rel(pwd, v.(string)) - if err != nil { - return v.(string) - } - return rel - }, - Deprecated: "Use the 'template' attribute instead.", - ConflictsWith: []string{"template"}, - }, - "vars": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Default: make(map[string]interface{}), - Description: "variables to substitute", - ValidateFunc: validateVarsAttribute, - }, - "rendered": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: "rendered template", - }, - }, - } -} - -func dataSourceFileRead(d *schema.ResourceData, meta interface{}) error { - rendered, err := renderFile(d) - if err != nil { - return err - } - d.Set("rendered", rendered) - d.SetId(hash(rendered)) - return nil -} - -type templateRenderError error - -func renderFile(d *schema.ResourceData) (string, error) { - template := d.Get("template").(string) - filename := d.Get("filename").(string) - vars := d.Get("vars").(map[string]interface{}) - - contents := template - if template == "" && filename != "" { - data, _, err := pathorcontents.Read(filename) - if err != nil { - return "", err - } - - contents = data - } - - rendered, err := execute(contents, vars) - if err != nil { - return "", templateRenderError( - fmt.Errorf("failed to render %v: %v", filename, err), - ) - } - - return rendered, nil -} - -// execute parses and executes a template using vars. -func execute(s string, vars map[string]interface{}) (string, error) { - root, err := hil.Parse(s) - if err != nil { - return "", err - } - - varmap := make(map[string]ast.Variable) - for k, v := range vars { - // As far as I can tell, v is always a string. - // If it's not, tell the user gracefully. - s, ok := v.(string) - if !ok { - return "", fmt.Errorf("unexpected type for variable %q: %T", k, v) - } - varmap[k] = ast.Variable{ - Value: s, - Type: ast.TypeString, - } - } - - cfg := hil.EvalConfig{ - GlobalScope: &ast.BasicScope{ - VarMap: varmap, - FuncMap: config.Funcs(), - }, - } - - result, err := hil.Eval(root, &cfg) - if err != nil { - return "", err - } - if result.Type != hil.TypeString { - return "", fmt.Errorf("unexpected output hil.Type: %v", result.Type) - } - - return result.Value.(string), nil -} - -func hash(s string) string { - sha := sha256.Sum256([]byte(s)) - return hex.EncodeToString(sha[:]) -} - -func validateVarsAttribute(v interface{}, key string) (ws []string, es []error) { - // vars can only be primitives right now - var badVars []string - for k, v := range v.(map[string]interface{}) { - switch v.(type) { - case []interface{}: - badVars = append(badVars, fmt.Sprintf("%s (list)", k)) - case map[string]interface{}: - badVars = append(badVars, fmt.Sprintf("%s (map)", k)) - } - } - if len(badVars) > 0 { - es = append(es, fmt.Errorf( - "%s: cannot contain non-primitives; bad keys: %s", - key, strings.Join(badVars, ", "))) - } - return -} diff --git a/builtin/providers/template/datasource_template_file_test.go b/builtin/providers/template/datasource_template_file_test.go deleted file mode 100644 index da9c1d98a..000000000 --- a/builtin/providers/template/datasource_template_file_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package template - -import ( - "fmt" - "strings" - "sync" - "testing" - - r "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -var testProviders = map[string]terraform.ResourceProvider{ - "template": Provider(), -} - -func TestTemplateRendering(t *testing.T) { - var cases = []struct { - vars string - template string - want string - }{ - {`{}`, `ABC`, `ABC`}, - {`{a="foo"}`, `$${a}`, `foo`}, - {`{a="hello"}`, `$${replace(a, "ello", "i")}`, `hi`}, - {`{}`, `${1+2+3}`, `6`}, - {`{}`, `/`, `/`}, - } - - for _, tt := range cases { - r.UnitTest(t, r.TestCase{ - Providers: testProviders, - Steps: []r.TestStep{ - r.TestStep{ - Config: testTemplateConfig(tt.template, tt.vars), - Check: func(s *terraform.State) error { - got := s.RootModule().Outputs["rendered"] - if tt.want != got.Value { - return fmt.Errorf("template:\n%s\nvars:\n%s\ngot:\n%s\nwant:\n%s\n", tt.template, tt.vars, got, tt.want) - } - return nil - }, - }, - }, - }) - } -} - -func TestValidateVarsAttribute(t *testing.T) { - cases := map[string]struct { - Vars map[string]interface{} - ExpectErr string - }{ - "lists are invalid": { - map[string]interface{}{ - "list": []interface{}{}, - }, - `vars: cannot contain non-primitives`, - }, - "maps are invalid": { - map[string]interface{}{ - "map": map[string]interface{}{}, - }, - `vars: cannot contain non-primitives`, - }, - "strings, integers, floats, and bools are AOK": { - map[string]interface{}{ - "string": "foo", - "int": 1, - "bool": true, - "float": float64(1.0), - }, - ``, - }, - } - - for tn, tc := range cases { - _, es := validateVarsAttribute(tc.Vars, "vars") - if len(es) > 0 { - if tc.ExpectErr == "" { - t.Fatalf("%s: expected no err, got: %#v", tn, es) - } - if !strings.Contains(es[0].Error(), tc.ExpectErr) { - t.Fatalf("%s: expected\n%s\nto contain\n%s", tn, es[0], tc.ExpectErr) - } - } else if tc.ExpectErr != "" { - t.Fatalf("%s: expected err containing %q, got none!", tn, tc.ExpectErr) - } - } -} - -// This test covers a panic due to config.Func formerly being a -// shared map, causing multiple template_file resources to try and -// accessing it parallel during their lang.Eval() runs. -// -// Before fix, test fails under `go test -race` -func TestTemplateSharedMemoryRace(t *testing.T) { - var wg sync.WaitGroup - for i := 0; i < 100; i++ { - wg.Add(1) - go func(t *testing.T, i int) { - out, err := execute("don't panic!", map[string]interface{}{}) - if err != nil { - t.Fatalf("err: %s", err) - } - if out != "don't panic!" { - t.Fatalf("bad output: %s", out) - } - wg.Done() - }(t, i) - } - wg.Wait() -} - -func testTemplateConfig(template, vars string) string { - return fmt.Sprintf(` - data "template_file" "t0" { - template = "%s" - vars = %s - } - output "rendered" { - value = "${data.template_file.t0.rendered}" - }`, template, vars) -} diff --git a/builtin/providers/template/provider.go b/builtin/providers/template/provider.go deleted file mode 100644 index fb340754d..000000000 --- a/builtin/providers/template/provider.go +++ /dev/null @@ -1,26 +0,0 @@ -package template - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - DataSourcesMap: map[string]*schema.Resource{ - "template_file": dataSourceFile(), - "template_cloudinit_config": dataSourceCloudinitConfig(), - }, - ResourcesMap: map[string]*schema.Resource{ - "template_file": schema.DataSourceResourceShim( - "template_file", - dataSourceFile(), - ), - "template_cloudinit_config": schema.DataSourceResourceShim( - "template_cloudinit_config", - dataSourceCloudinitConfig(), - ), - "template_dir": resourceDir(), - }, - } -} diff --git a/builtin/providers/template/provider_test.go b/builtin/providers/template/provider_test.go deleted file mode 100644 index 37c02bb4a..000000000 --- a/builtin/providers/template/provider_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package template - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/schema" -) - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} diff --git a/builtin/providers/template/resource_template_dir.go b/builtin/providers/template/resource_template_dir.go deleted file mode 100644 index 63a2f18dc..000000000 --- a/builtin/providers/template/resource_template_dir.go +++ /dev/null @@ -1,234 +0,0 @@ -package template - -import ( - "archive/tar" - "bytes" - "crypto/sha1" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - - "github.com/hashicorp/terraform/helper/pathorcontents" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceDir() *schema.Resource { - return &schema.Resource{ - Create: resourceTemplateDirCreate, - Read: resourceTemplateDirRead, - Delete: resourceTemplateDirDelete, - - Schema: map[string]*schema.Schema{ - "source_dir": { - Type: schema.TypeString, - Description: "Path to the directory where the files to template reside", - Required: true, - ForceNew: true, - }, - "vars": { - Type: schema.TypeMap, - Optional: true, - Default: make(map[string]interface{}), - Description: "Variables to substitute", - ValidateFunc: validateVarsAttribute, - ForceNew: true, - }, - "destination_dir": { - Type: schema.TypeString, - Description: "Path to the directory where the templated files will be written", - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceTemplateDirRead(d *schema.ResourceData, meta interface{}) error { - sourceDir := d.Get("source_dir").(string) - destinationDir := d.Get("destination_dir").(string) - - // If the output doesn't exist, mark the resource for creation. - if _, err := os.Stat(destinationDir); os.IsNotExist(err) { - d.SetId("") - return nil - } - - // If the combined hash of the input and output directories is different from - // the stored one, mark the resource for re-creation. - // - // The output directory is technically enough for the general case, but by - // hashing the input directory as well, we make development much easier: when - // a developer modifies one of the input files, the generation is - // re-triggered. - hash, err := generateID(sourceDir, destinationDir) - if err != nil { - return err - } - if hash != d.Id() { - d.SetId("") - return nil - } - - return nil -} - -func resourceTemplateDirCreate(d *schema.ResourceData, meta interface{}) error { - sourceDir := d.Get("source_dir").(string) - destinationDir := d.Get("destination_dir").(string) - vars := d.Get("vars").(map[string]interface{}) - - // Always delete the output first, otherwise files that got deleted from the - // input directory might still be present in the output afterwards. - if err := resourceTemplateDirDelete(d, meta); err != nil { - return err - } - - // Create the destination directory and any other intermediate directories - // leading to it. - if _, err := os.Stat(destinationDir); err != nil { - if err := os.MkdirAll(destinationDir, 0777); err != nil { - return err - } - } - - // Recursively crawl the input files/directories and generate the output ones. - err := filepath.Walk(sourceDir, func(p string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - if f.IsDir() { - return nil - } - - relPath, _ := filepath.Rel(sourceDir, p) - return generateDirFile(p, path.Join(destinationDir, relPath), f, vars) - }) - if err != nil { - return err - } - - // Compute ID. - hash, err := generateID(sourceDir, destinationDir) - if err != nil { - return err - } - d.SetId(hash) - - return nil -} - -func resourceTemplateDirDelete(d *schema.ResourceData, _ interface{}) error { - d.SetId("") - - destinationDir := d.Get("destination_dir").(string) - if _, err := os.Stat(destinationDir); os.IsNotExist(err) { - return nil - } - - if err := os.RemoveAll(destinationDir); err != nil { - return fmt.Errorf("could not delete directory %q: %s", destinationDir, err) - } - - return nil -} - -func generateDirFile(sourceDir, destinationDir string, f os.FileInfo, vars map[string]interface{}) error { - inputContent, _, err := pathorcontents.Read(sourceDir) - if err != nil { - return err - } - - outputContent, err := execute(inputContent, vars) - if err != nil { - return templateRenderError(fmt.Errorf("failed to render %v: %v", sourceDir, err)) - } - - outputDir := path.Dir(destinationDir) - if _, err := os.Stat(outputDir); err != nil { - if err := os.MkdirAll(outputDir, 0777); err != nil { - return err - } - } - - err = ioutil.WriteFile(destinationDir, []byte(outputContent), f.Mode()) - if err != nil { - return err - } - - return nil -} - -func generateID(sourceDir, destinationDir string) (string, error) { - inputHash, err := generateDirHash(sourceDir) - if err != nil { - return "", err - } - outputHash, err := generateDirHash(destinationDir) - if err != nil { - return "", err - } - checksum := sha1.Sum([]byte(inputHash + outputHash)) - return hex.EncodeToString(checksum[:]), nil -} - -func generateDirHash(directoryPath string) (string, error) { - tarData, err := tarDir(directoryPath) - if err != nil { - return "", fmt.Errorf("could not generate output checksum: %s", err) - } - - checksum := sha1.Sum(tarData) - return hex.EncodeToString(checksum[:]), nil -} - -func tarDir(directoryPath string) ([]byte, error) { - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) - - writeFile := func(p string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - var header *tar.Header - var file *os.File - - header, err = tar.FileInfoHeader(f, f.Name()) - if err != nil { - return err - } - relPath, _ := filepath.Rel(directoryPath, p) - header.Name = relPath - - if err := tw.WriteHeader(header); err != nil { - return err - } - - if f.IsDir() { - return nil - } - - file, err = os.Open(p) - if err != nil { - return err - } - defer file.Close() - - _, err = io.Copy(tw, file) - return err - } - - if err := filepath.Walk(directoryPath, writeFile); err != nil { - return []byte{}, err - } - if err := tw.Flush(); err != nil { - return []byte{}, err - } - - return buf.Bytes(), nil -} diff --git a/builtin/providers/template/resource_template_dir_test.go b/builtin/providers/template/resource_template_dir_test.go deleted file mode 100644 index 716a5f0af..000000000 --- a/builtin/providers/template/resource_template_dir_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package template - -import ( - "fmt" - "testing" - - "errors" - r "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "io/ioutil" - "os" - "path/filepath" -) - -const templateDirRenderingConfig = ` -resource "template_dir" "dir" { - source_dir = "%s" - destination_dir = "%s" - vars = %s -}` - -type testTemplate struct { - template string - want string -} - -func testTemplateDirWriteFiles(files map[string]testTemplate) (in, out string, err error) { - in, err = ioutil.TempDir(os.TempDir(), "terraform_template_dir") - if err != nil { - return - } - - for name, file := range files { - path := filepath.Join(in, name) - - err = os.MkdirAll(filepath.Dir(path), 0777) - if err != nil { - return - } - - err = ioutil.WriteFile(path, []byte(file.template), 0777) - if err != nil { - return - } - } - - out = fmt.Sprintf("%s.out", in) - return -} - -func TestTemplateDirRendering(t *testing.T) { - var cases = []struct { - vars string - files map[string]testTemplate - }{ - { - files: map[string]testTemplate{ - "foo.txt": {"${bar}", "bar"}, - "nested/monkey.txt": {"ooh-ooh-ooh-eee-eee", "ooh-ooh-ooh-eee-eee"}, - "maths.txt": {"${1+2+3}", "6"}, - }, - vars: `{bar = "bar"}`, - }, - } - - for _, tt := range cases { - // Write the desired templates in a temporary directory. - in, out, err := testTemplateDirWriteFiles(tt.files) - if err != nil { - t.Skipf("could not write templates to temporary directory: %s", err) - continue - } - defer os.RemoveAll(in) - defer os.RemoveAll(out) - - // Run test case. - r.UnitTest(t, r.TestCase{ - Providers: testProviders, - Steps: []r.TestStep{ - { - Config: fmt.Sprintf(templateDirRenderingConfig, in, out, tt.vars), - Check: func(s *terraform.State) error { - for name, file := range tt.files { - content, err := ioutil.ReadFile(filepath.Join(out, name)) - if err != nil { - return fmt.Errorf("template:\n%s\nvars:\n%s\ngot:\n%s\nwant:\n%s\n", file.template, tt.vars, err, file.want) - } - if string(content) != file.want { - return fmt.Errorf("template:\n%s\nvars:\n%s\ngot:\n%s\nwant:\n%s\n", file.template, tt.vars, content, file.want) - } - } - return nil - }, - }, - }, - CheckDestroy: func(*terraform.State) error { - if _, err := os.Stat(out); os.IsNotExist(err) { - return nil - } - return errors.New("template_dir did not get destroyed") - }, - }) - } -} diff --git a/builtin/providers/terraform/data_source_state.go b/builtin/providers/terraform/data_source_state.go deleted file mode 100644 index c7f0170a3..000000000 --- a/builtin/providers/terraform/data_source_state.go +++ /dev/null @@ -1,124 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/backend" - backendinit "github.com/hashicorp/terraform/backend/init" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func dataSourceRemoteState() *schema.Resource { - return &schema.Resource{ - Read: dataSourceRemoteStateRead, - - Schema: map[string]*schema.Schema{ - "backend": { - Type: schema.TypeString, - Required: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - if vStr, ok := v.(string); ok && vStr == "_local" { - ws = append(ws, "Use of the %q backend is now officially "+ - "supported as %q. Please update your configuration to ensure "+ - "compatibility with future versions of Terraform.", - "_local", "local") - } - - return - }, - }, - - "config": { - Type: schema.TypeMap, - Optional: true, - }, - - "environment": { - Type: schema.TypeString, - Optional: true, - - ConflictsWith: []string{"workspace"}, - Deprecated: "use the \"workspace\" argument instead, with the same value", - }, - - "workspace": { - Type: schema.TypeString, - Optional: true, - Default: backend.DefaultStateName, - }, - - "__has_dynamic_attributes": { - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -func dataSourceRemoteStateRead(d *schema.ResourceData, meta interface{}) error { - backend := d.Get("backend").(string) - - // Get the configuration in a type we want. - rawConfig, err := config.NewRawConfig(d.Get("config").(map[string]interface{})) - if err != nil { - return fmt.Errorf("error initializing backend: %s", err) - } - - // Don't break people using the old _local syntax - but note warning above - if backend == "_local" { - log.Println(`[INFO] Switching old (unsupported) backend "_local" to "local"`) - backend = "local" - } - - // Create the client to access our remote state - log.Printf("[DEBUG] Initializing remote state backend: %s", backend) - f := backendinit.Backend(backend) - if f == nil { - return fmt.Errorf("Unknown backend type: %s", backend) - } - b := f() - - // Configure the backend - if err := b.Configure(terraform.NewResourceConfig(rawConfig)); err != nil { - return fmt.Errorf("error initializing backend: %s", err) - } - - // Get the state - workspace := d.Get("environment").(string) - if workspace == "" { - // This is actually the main path, since "environment" is deprecated. - workspace = d.Get("workspace").(string) - } - state, err := b.State(workspace) - if err != nil { - return fmt.Errorf("error loading the remote state: %s", err) - } - if err := state.RefreshState(); err != nil { - return err - } - - d.SetId(time.Now().UTC().String()) - - outputMap := make(map[string]interface{}) - - remoteState := state.State() - if remoteState.Empty() { - log.Println("[DEBUG] empty remote state") - return nil - } - - for key, val := range remoteState.RootModule().Outputs { - outputMap[key] = val.Value - } - - mappedOutputs := remoteStateFlatten(outputMap) - - for key, val := range mappedOutputs { - d.UnsafeSetFieldRaw(key, val) - } - return nil -} diff --git a/builtin/providers/terraform/data_source_state_test.go b/builtin/providers/terraform/data_source_state_test.go deleted file mode 100644 index c4d8af64e..000000000 --- a/builtin/providers/terraform/data_source_state_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package terraform - -import ( - "fmt" - "testing" - - backendinit "github.com/hashicorp/terraform/backend/init" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestState_basic(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccState_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckStateValue( - "data.terraform_remote_state.foo", "foo", "bar"), - ), - }, - }, - }) -} - -func TestState_backends(t *testing.T) { - backendinit.Set("_ds_test", backendinit.Backend("local")) - defer backendinit.Set("_ds_test", nil) - - resource.UnitTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccState_backend, - Check: resource.ComposeTestCheckFunc( - testAccCheckStateValue( - "data.terraform_remote_state.foo", "foo", "bar"), - ), - }, - }, - }) -} - -func TestState_complexOutputs(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccState_complexOutputs, - Check: resource.ComposeTestCheckFunc( - testAccCheckStateValue("terraform_remote_state.foo", "backend", "local"), - testAccCheckStateValue("terraform_remote_state.foo", "config.path", "./test-fixtures/complex_outputs.tfstate"), - testAccCheckStateValue("terraform_remote_state.foo", "computed_set.#", "2"), - testAccCheckStateValue("terraform_remote_state.foo", `map.%`, "2"), - testAccCheckStateValue("terraform_remote_state.foo", `map.key`, "test"), - ), - }, - }, - }) -} - -func TestState_workspace(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccState_workspace, - Check: resource.ComposeTestCheckFunc( - testAccCheckStateValue( - "data.terraform_remote_state.foo", "foo", "bar"), - ), - }, - }, - }) -} - -func TestState_legacyEnvironment(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccState_legacyEnvironment, - Check: resource.ComposeTestCheckFunc( - testAccCheckStateValue( - "data.terraform_remote_state.foo", "foo", "bar"), - ), - }, - }, - }) -} - -func testAccCheckStateValue(id, name, value string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[id] - if !ok { - return fmt.Errorf("Not found: %s", id) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - v := rs.Primary.Attributes[name] - if v != value { - return fmt.Errorf( - "Value for %s is %s, not %s", name, v, value) - } - - return nil - } -} - -const testAccState_basic = ` -data "terraform_remote_state" "foo" { - backend = "local" - - config { - path = "./test-fixtures/basic.tfstate" - } -}` - -const testAccState_backend = ` -data "terraform_remote_state" "foo" { - backend = "_ds_test" - - config { - path = "./test-fixtures/basic.tfstate" - } -}` - -const testAccState_complexOutputs = ` -resource "terraform_remote_state" "foo" { - backend = "local" - - config { - path = "./test-fixtures/complex_outputs.tfstate" - } -}` - -const testAccState_workspace = ` -data "terraform_remote_state" "foo" { - backend = "local" - workspace = "test" - - config { - workspace_dir = "./test-fixtures/workspaces" - } -}` - -const testAccState_legacyEnvironment = ` -data "terraform_remote_state" "foo" { - backend = "local" - environment = "test" # old, deprecated name for "workspace" - - config { - workspace_dir = "./test-fixtures/workspaces" - } -}` diff --git a/builtin/providers/terraform/flatten.go b/builtin/providers/terraform/flatten.go deleted file mode 100644 index 4766a4f5e..000000000 --- a/builtin/providers/terraform/flatten.go +++ /dev/null @@ -1,76 +0,0 @@ -package terraform - -import ( - "fmt" - "reflect" -) - -// remoteStateFlatten takes a structure and turns into a flat map[string]string. -// -// Within the "thing" parameter, only primitive values are allowed. Structs are -// not supported. Therefore, it can only be slices, maps, primitives, and -// any combination of those together. -// -// The difference between this version and the version in package flatmap is that -// we add the count key for maps in this version, and return a normal -// map[string]string instead of a flatmap.Map -func remoteStateFlatten(thing map[string]interface{}) map[string]string { - result := make(map[string]string) - - for k, raw := range thing { - flatten(result, k, reflect.ValueOf(raw)) - } - - return result -} - -func flatten(result map[string]string, prefix string, v reflect.Value) { - if v.Kind() == reflect.Interface { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Bool: - if v.Bool() { - result[prefix] = "true" - } else { - result[prefix] = "false" - } - case reflect.Int: - result[prefix] = fmt.Sprintf("%d", v.Int()) - case reflect.Map: - flattenMap(result, prefix, v) - case reflect.Slice: - flattenSlice(result, prefix, v) - case reflect.String: - result[prefix] = v.String() - default: - panic(fmt.Sprintf("Unknown: %s", v)) - } -} - -func flattenMap(result map[string]string, prefix string, v reflect.Value) { - mapKeys := v.MapKeys() - - result[fmt.Sprintf("%s.%%", prefix)] = fmt.Sprintf("%d", len(mapKeys)) - for _, k := range mapKeys { - if k.Kind() == reflect.Interface { - k = k.Elem() - } - - if k.Kind() != reflect.String { - panic(fmt.Sprintf("%s: map key is not string: %s", prefix, k)) - } - - flatten(result, fmt.Sprintf("%s.%s", prefix, k.String()), v.MapIndex(k)) - } -} - -func flattenSlice(result map[string]string, prefix string, v reflect.Value) { - prefix = prefix + "." - - result[prefix+"#"] = fmt.Sprintf("%d", v.Len()) - for i := 0; i < v.Len(); i++ { - flatten(result, fmt.Sprintf("%s%d", prefix, i), v.Index(i)) - } -} diff --git a/builtin/providers/terraform/provider.go b/builtin/providers/terraform/provider.go deleted file mode 100644 index 1bdf2c1d0..000000000 --- a/builtin/providers/terraform/provider.go +++ /dev/null @@ -1,21 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - ResourcesMap: map[string]*schema.Resource{ - "terraform_remote_state": schema.DataSourceResourceShim( - "terraform_remote_state", - dataSourceRemoteState(), - ), - }, - DataSourcesMap: map[string]*schema.Resource{ - "terraform_remote_state": dataSourceRemoteState(), - }, - } -} diff --git a/builtin/providers/terraform/provider_test.go b/builtin/providers/terraform/provider_test.go deleted file mode 100644 index 65f3ce4ad..000000000 --- a/builtin/providers/terraform/provider_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package terraform - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "terraform": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { -} diff --git a/builtin/providers/terraform/test-fixtures/basic.tfstate b/builtin/providers/terraform/test-fixtures/basic.tfstate deleted file mode 100644 index a10b2b6b1..000000000 --- a/builtin/providers/terraform/test-fixtures/basic.tfstate +++ /dev/null @@ -1,7 +0,0 @@ -{ - "version": 1, - "modules": [{ - "path": ["root"], - "outputs": { "foo": "bar" } - }] -} diff --git a/builtin/providers/terraform/test-fixtures/complex_outputs.tfstate b/builtin/providers/terraform/test-fixtures/complex_outputs.tfstate deleted file mode 100644 index ab50e427f..000000000 --- a/builtin/providers/terraform/test-fixtures/complex_outputs.tfstate +++ /dev/null @@ -1,88 +0,0 @@ -{ - "version": 3, - "terraform_version": "0.7.0", - "serial": 3, - "modules": [ - { - "path": [ - "root" - ], - "outputs": { - "computed_map": { - "sensitive": false, - "type": "map", - "value": { - "key1": "value1" - } - }, - "computed_set": { - "sensitive": false, - "type": "list", - "value": [ - "setval1", - "setval2" - ] - }, - "map": { - "sensitive": false, - "type": "map", - "value": { - "key": "test", - "test": "test" - } - }, - "set": { - "sensitive": false, - "type": "list", - "value": [ - "test1", - "test2" - ] - } - }, - "resources": { - "test_resource.main": { - "type": "test_resource", - "primary": { - "id": "testId", - "attributes": { - "computed_list.#": "2", - "computed_list.0": "listval1", - "computed_list.1": "listval2", - "computed_map.%": "1", - "computed_map.key1": "value1", - "computed_read_only": "value_from_api", - "computed_read_only_force_new": "value_from_api", - "computed_set.#": "2", - "computed_set.2337322984": "setval1", - "computed_set.307881554": "setval2", - "id": "testId", - "list_of_map.#": "2", - "list_of_map.0.%": "2", - "list_of_map.0.key1": "value1", - "list_of_map.0.key2": "value2", - "list_of_map.1.%": "2", - "list_of_map.1.key3": "value3", - "list_of_map.1.key4": "value4", - "map.%": "2", - "map.key": "test", - "map.test": "test", - "map_that_look_like_set.%": "2", - "map_that_look_like_set.12352223": "hello", - "map_that_look_like_set.36234341": "world", - "optional_computed_map.%": "0", - "required": "Hello World", - "required_map.%": "3", - "required_map.key1": "value1", - "required_map.key2": "value2", - "required_map.key3": "value3", - "set.#": "2", - "set.2326977762": "test1", - "set.331058520": "test2" - } - } - } - } - } - ] -} diff --git a/builtin/providers/terraform/test-fixtures/workspaces/test/terraform.tfstate b/builtin/providers/terraform/test-fixtures/workspaces/test/terraform.tfstate deleted file mode 100644 index a10b2b6b1..000000000 --- a/builtin/providers/terraform/test-fixtures/workspaces/test/terraform.tfstate +++ /dev/null @@ -1,7 +0,0 @@ -{ - "version": 1, - "modules": [{ - "path": ["root"], - "outputs": { "foo": "bar" } - }] -} diff --git a/builtin/providers/tls/provider.go b/builtin/providers/tls/provider.go deleted file mode 100644 index e6c1d6198..000000000 --- a/builtin/providers/tls/provider.go +++ /dev/null @@ -1,112 +0,0 @@ -package tls - -import ( - "crypto/sha1" - "crypto/x509/pkix" - "encoding/hex" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - ResourcesMap: map[string]*schema.Resource{ - "tls_private_key": resourcePrivateKey(), - "tls_locally_signed_cert": resourceLocallySignedCert(), - "tls_self_signed_cert": resourceSelfSignedCert(), - "tls_cert_request": resourceCertRequest(), - }, - } -} - -func hashForState(value string) string { - if value == "" { - return "" - } - hash := sha1.Sum([]byte(strings.TrimSpace(value))) - return hex.EncodeToString(hash[:]) -} - -func nameFromResourceData(nameMap map[string]interface{}) (*pkix.Name, error) { - result := &pkix.Name{} - - if value := nameMap["common_name"]; value != nil { - result.CommonName = value.(string) - } - if value := nameMap["organization"]; value != nil { - result.Organization = []string{value.(string)} - } - if value := nameMap["organizational_unit"]; value != nil { - result.OrganizationalUnit = []string{value.(string)} - } - if value := nameMap["street_address"]; value != nil { - valueI := value.([]interface{}) - result.StreetAddress = make([]string, len(valueI)) - for i, vi := range valueI { - result.StreetAddress[i] = vi.(string) - } - } - if value := nameMap["locality"]; value != nil { - result.Locality = []string{value.(string)} - } - if value := nameMap["province"]; value != nil { - result.Province = []string{value.(string)} - } - if value := nameMap["country"]; value != nil { - result.Country = []string{value.(string)} - } - if value := nameMap["postal_code"]; value != nil { - result.PostalCode = []string{value.(string)} - } - if value := nameMap["serial_number"]; value != nil { - result.SerialNumber = value.(string) - } - - return result, nil -} - -var nameSchema *schema.Resource = &schema.Resource{ - Schema: map[string]*schema.Schema{ - "organization": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "common_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "organizational_unit": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "street_address": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "locality": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "province": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "country": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "postal_code": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "serial_number": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, -} diff --git a/builtin/providers/tls/provider_test.go b/builtin/providers/tls/provider_test.go deleted file mode 100644 index 7dc7af0d2..000000000 --- a/builtin/providers/tls/provider_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package tls - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -var testProviders = map[string]terraform.ResourceProvider{ - "tls": Provider(), -} - -var testPrivateKey = ` ------BEGIN RSA PRIVATE KEY----- -MIICXAIBAAKBgQDPLaq43D9C596ko9yQipWUf2FbRhFs18D3wBDBqXLIoP7W3rm5 -S292/JiNPa+mX76IYFF416zTBGG9J5w4d4VFrROn8IuMWqHgdXsCUf2szN7EnJcV -BsBzTxxWqz4DjX315vbm/PFOLlKzC0Ngs4h1iDiCD9Hk2MajZuFnJiqj1QIDAQAB -AoGAG6eQ3lQn7Zpd0cQ9sN2O0d+e8zwLH2g9TdTJZ9Bijf1Phwb764vyOQPGqTPO -unqVSEbzGRpQ62nuUf1zkOYDV+gKMNO3mj9Zu+qPNr/nQPHIaGZksPdD34qDUnBl -eRWVGNTyEGQsRPNN0RtFj8ifa4+OWiE30n95PBq2bUGZj4ECQQDZvS5X/4jYxnzw -CscaL4vO9OCVd/Fzdpfak0DQE/KCVmZxzcXu6Q8WuhybCynX84WKHQxuFAo+nBvr -kgtWXX7dAkEA85Vs5ehuDujBKCu3NJYI2R5ie49L9fEMFJVZK9FpkKacoAkET5BZ -UzaZrx4Fg3Zhcv1TssZKSyle+2lYiIydWQJBAMW8/aJi6WdcUsg4MXrBZSlsz6xO -AhOGxv90LS8KfnkJd/2wDyoZs19DY4kWSUjZ2hOEr+4j+u3DHcQAnJUxUW0CQGXP -DrUJcPbKUfF4VBqmmwwkpwT938Hr/iCcS6kE3hqXiN9a5XJb4vnk2FdZNPS9hf2J -5HHUbzj7EbgDT/3CyAECQG0qv6LNQaQMm2lmQKmqpi43Bqj9wvx0xGai1qCOvSeL -rpxCHbX0xSJh0s8j7exRHMF8W16DHjjkc265YdWPXWo= ------END RSA PRIVATE KEY----- -` - -var testCertRequest = ` ------BEGIN CERTIFICATE REQUEST----- -MIICYDCCAckCAQAwgcUxFDASBgNVBAMMC2V4YW1wbGUuY29tMQswCQYDVQQGEwJV -UzELMAkGA1UECAwCQ0ExFjAUBgNVBAcMDVBpcmF0ZSBIYXJib3IxGTAXBgNVBAkM -EDU4NzkgQ290dG9uIExpbmsxEzARBgNVBBEMCjk1NTU5LTEyMjcxFTATBgNVBAoM -DEV4YW1wbGUsIEluYzEoMCYGA1UECwwfRGVwYXJ0bWVudCBvZiBUZXJyYWZvcm0g -VGVzdGluZzEKMAgGA1UEBRMBMjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA -qLFq7Tpmlt0uDCCn5bA/oTj4v16/pXXaD+Ice2bS4rBH2UUM2gca5U4j8QCxrIxh -91mBvloE4VS5xrIGotAwoMgwK3E2md5kzQJToDve/hm8JNOcms+OAOjfjajPc40e -+ue9roT8VjWGU0wz7ttQNuao56GXYr5kOpcfiZMs7RcCAwEAAaBaMFgGCSqGSIb3 -DQEJDjFLMEkwLwYDVR0RBCgwJoILZXhhbXBsZS5jb22CC2V4YW1wbGUubmV0hwR/ -AAABhwR/AAACMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgXgMA0GCSqGSIb3DQEBBQUA -A4GBAGEDWUYnGygtnvScamz3o4PuVMFubBfqIdWCu02hBgzL3Hi3/UkOEsV028GM -M3YMB+it7U8eDdT2XjzBDlvpxWT1hXWnmJFu6z6B8N/JFk8fOkaP7U6YjZlG5N9m -L1A4WtQz0SgXcnIujKisqIaymYrvpANnm4IsqTKsnwZD7CsQ ------END CERTIFICATE REQUEST----- -` - -var testCAPrivateKey = ` ------BEGIN RSA PRIVATE KEY----- -MIICXAIBAAKBgQC7QNFtw54heoD9KL2s2Qr7utKZFM/8GXYHh3Y5/Zis9USlJ7Mc -Lorbmm9Lopnr5zUBZULAxAgX51X0FbifK8Re3JIZvpFRyxNw8aWYBnOk/sX7UhUH -pI139dSAhkNAMkRQd1ySpDP+4okCptgZPs7h0bXwoYmWMNFKlaRZHuAQLQIDAQAB -AoGAQ/YwjLAU8n2t1zQ0M0nLDLYvvVOqcQskpXLq2/1Irm2OborMHQxfZXjVsBPh -3ZbazBjec2wyq8pQjfhcO5j8+fj9zLtRNDpWEa9t/VDky0MSGezQyLL1J5+htFDJ -JDCkKK441IWKGCMC31hoVP6PvE/3G2+vWAkrkT4U7ekLQVkCQQD1/RKMxDFJ57Qr -Zlu1y72dnGLsGqoxeNaco6G5JXAEEcWTx8qXghKQX0uHxooeRYQRupOGLBo1Js1p -/AZDR8inAkEAwt/J0GDsojV89RbpJ0h7C1kcxNULooCYQZs/rmJcVXSs6pUIIFdI -oYQIEGnRsfQUPo6EUUGMKh8sSEjF6R8nCwJBAMKYuoT7a9aAYwp2RhTSIaW+oo8P -JRZP9s8hr31tPWkqufeHdSBYOOFXUcQObxM1gR4ZUD0zRGRJ1vSB+F5fOj8CQEuG -HZnTpoHrBuWZnnyp+33XaG3kP2EYQ2nRuClmV3CLCmTTo1WdXjmyiMmLqUg1Vw8z -fpZbN+4vLKNLCOCjQScCQDWmNDrie4Omd5wWKV5B+LVZO8/xMlub6IEioZpMfDGZ -q1Ov/Qw2ge3yumfO+6GzKG0k13yYEn1AcatF5lP8BYY= ------END RSA PRIVATE KEY----- -` - -var testCACert = ` ------BEGIN CERTIFICATE----- -MIIDVTCCAr6gAwIBAgIJALLsVgWAcCvxMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV -BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNUGlyYXRlIEhhcmJvcjEVMBMG -A1UEChMMRXhhbXBsZSwgSW5jMSEwHwYDVQQLExhEZXBhcnRtZW50IG9mIENBIFRl -c3RpbmcxDTALBgNVBAMTBHJvb3QwHhcNMTUxMTE0MTY1MTQ0WhcNMTUxMjE0MTY1 -MTQ0WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVBpcmF0 -ZSBIYXJib3IxFTATBgNVBAoTDEV4YW1wbGUsIEluYzEhMB8GA1UECxMYRGVwYXJ0 -bWVudCBvZiBDQSBUZXN0aW5nMQ0wCwYDVQQDEwRyb290MIGfMA0GCSqGSIb3DQEB -AQUAA4GNADCBiQKBgQC7QNFtw54heoD9KL2s2Qr7utKZFM/8GXYHh3Y5/Zis9USl -J7McLorbmm9Lopnr5zUBZULAxAgX51X0FbifK8Re3JIZvpFRyxNw8aWYBnOk/sX7 -UhUHpI139dSAhkNAMkRQd1ySpDP+4okCptgZPs7h0bXwoYmWMNFKlaRZHuAQLQID -AQABo4HgMIHdMB0GA1UdDgQWBBQyrsMhTd85ATqm9vNybTtAbwnGkDCBrQYDVR0j -BIGlMIGigBQyrsMhTd85ATqm9vNybTtAbwnGkKF/pH0wezELMAkGA1UEBhMCVVMx -CzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1QaXJhdGUgSGFyYm9yMRUwEwYDVQQKEwxF -eGFtcGxlLCBJbmMxITAfBgNVBAsTGERlcGFydG1lbnQgb2YgQ0EgVGVzdGluZzEN -MAsGA1UEAxMEcm9vdIIJALLsVgWAcCvxMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN -AQEFBQADgYEAuJ7JGZlSzbQOuAFz2t3c1pQzUIiS74blFbg6RPvNPSSjoBg3Ly61 -FbliR8P3qiSWA/X03/XSMTH1XkHU8re+P0uILUzLJkKBkdHJfdwfk8kifDjdO14+ -tffPaqAEFUkwhbiQUoj9aeTOOS6kEjbMV6+o7fsz5pPUHbj/l4idys0= ------END CERTIFICATE----- -` diff --git a/builtin/providers/tls/resource_cert_request.go b/builtin/providers/tls/resource_cert_request.go deleted file mode 100644 index 267f0db39..000000000 --- a/builtin/providers/tls/resource_cert_request.go +++ /dev/null @@ -1,127 +0,0 @@ -package tls - -import ( - "crypto/rand" - "crypto/x509" - "encoding/pem" - "fmt" - "net" - - "github.com/hashicorp/terraform/helper/schema" -) - -const pemCertReqType = "CERTIFICATE REQUEST" - -func resourceCertRequest() *schema.Resource { - return &schema.Resource{ - Create: CreateCertRequest, - Delete: DeleteCertRequest, - Read: ReadCertRequest, - - Schema: map[string]*schema.Schema{ - - "dns_names": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Description: "List of DNS names to use as subjects of the certificate", - ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "ip_addresses": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Description: "List of IP addresses to use as subjects of the certificate", - ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "key_algorithm": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "Name of the algorithm to use to generate the certificate's private key", - ForceNew: true, - }, - - "private_key_pem": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "PEM-encoded private key that the certificate will belong to", - ForceNew: true, - StateFunc: func(v interface{}) string { - return hashForState(v.(string)) - }, - }, - - "subject": &schema.Schema{ - Type: schema.TypeList, - Required: true, - Elem: nameSchema, - ForceNew: true, - }, - - "cert_request_pem": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func CreateCertRequest(d *schema.ResourceData, meta interface{}) error { - key, err := parsePrivateKey(d, "private_key_pem", "key_algorithm") - if err != nil { - return err - } - - subjectConfs := d.Get("subject").([]interface{}) - if len(subjectConfs) != 1 { - return fmt.Errorf("must have exactly one 'subject' block") - } - subjectConf := subjectConfs[0].(map[string]interface{}) - subject, err := nameFromResourceData(subjectConf) - if err != nil { - return fmt.Errorf("invalid subject block: %s", err) - } - - certReq := x509.CertificateRequest{ - Subject: *subject, - } - - dnsNamesI := d.Get("dns_names").([]interface{}) - for _, nameI := range dnsNamesI { - certReq.DNSNames = append(certReq.DNSNames, nameI.(string)) - } - ipAddressesI := d.Get("ip_addresses").([]interface{}) - for _, ipStrI := range ipAddressesI { - ip := net.ParseIP(ipStrI.(string)) - if ip == nil { - return fmt.Errorf("invalid IP address %#v", ipStrI.(string)) - } - certReq.IPAddresses = append(certReq.IPAddresses, ip) - } - - certReqBytes, err := x509.CreateCertificateRequest(rand.Reader, &certReq, key) - if err != nil { - return fmt.Errorf("Error creating certificate request: %s", err) - } - certReqPem := string(pem.EncodeToMemory(&pem.Block{Type: pemCertReqType, Bytes: certReqBytes})) - - d.SetId(hashForState(string(certReqBytes))) - d.Set("cert_request_pem", certReqPem) - - return nil -} - -func DeleteCertRequest(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} - -func ReadCertRequest(d *schema.ResourceData, meta interface{}) error { - return nil -} diff --git a/builtin/providers/tls/resource_cert_request_test.go b/builtin/providers/tls/resource_cert_request_test.go deleted file mode 100644 index c31b8d6a1..000000000 --- a/builtin/providers/tls/resource_cert_request_test.go +++ /dev/null @@ -1,121 +0,0 @@ -package tls - -import ( - "crypto/x509" - "encoding/pem" - "fmt" - "strings" - "testing" - - r "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestCertRequest(t *testing.T) { - r.Test(t, r.TestCase{ - Providers: testProviders, - Steps: []r.TestStep{ - r.TestStep{ - Config: fmt.Sprintf(` - resource "tls_cert_request" "test" { - subject { - common_name = "example.com" - organization = "Example, Inc" - organizational_unit = "Department of Terraform Testing" - street_address = ["5879 Cotton Link"] - locality = "Pirate Harbor" - province = "CA" - country = "US" - postal_code = "95559-1227" - serial_number = "2" - } - - dns_names = [ - "example.com", - "example.net", - ] - - ip_addresses = [ - "127.0.0.1", - "127.0.0.2", - ] - - key_algorithm = "RSA" - private_key_pem = < (2 * time.Minute) { - return fmt.Errorf("certificate validity begins more than two minutes in the past") - } - if cert.NotAfter.Sub(cert.NotBefore) != time.Hour { - return fmt.Errorf("certificate validity is not one hour") - } - - caBlock, _ := pem.Decode([]byte(testCACert)) - caCert, err := x509.ParseCertificate(caBlock.Bytes) - if err != nil { - return fmt.Errorf("error parsing ca cert: %s", err) - } - certPool := x509.NewCertPool() - - // Verify certificate - _, err = cert.Verify(x509.VerifyOptions{Roots: certPool}) - if err == nil { - return errors.New("incorrectly verified certificate") - } else if _, ok := err.(x509.UnknownAuthorityError); !ok { - return fmt.Errorf("incorrect verify error: expected UnknownAuthorityError, got %v", err) - } - certPool.AddCert(caCert) - if _, err = cert.Verify(x509.VerifyOptions{Roots: certPool}); err != nil { - return fmt.Errorf("verify failed: %s", err) - } - - return nil - }, - }, - }, - }) -} diff --git a/builtin/providers/tls/resource_private_key.go b/builtin/providers/tls/resource_private_key.go deleted file mode 100644 index 8270cc624..000000000 --- a/builtin/providers/tls/resource_private_key.go +++ /dev/null @@ -1,178 +0,0 @@ -package tls - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "fmt" - - "golang.org/x/crypto/ssh" - - "github.com/hashicorp/terraform/helper/schema" -) - -type keyAlgo func(d *schema.ResourceData) (interface{}, error) -type keyParser func([]byte) (interface{}, error) - -var keyAlgos map[string]keyAlgo = map[string]keyAlgo{ - "RSA": func(d *schema.ResourceData) (interface{}, error) { - rsaBits := d.Get("rsa_bits").(int) - return rsa.GenerateKey(rand.Reader, rsaBits) - }, - "ECDSA": func(d *schema.ResourceData) (interface{}, error) { - curve := d.Get("ecdsa_curve").(string) - switch curve { - case "P224": - return ecdsa.GenerateKey(elliptic.P224(), rand.Reader) - case "P256": - return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - case "P384": - return ecdsa.GenerateKey(elliptic.P384(), rand.Reader) - case "P521": - return ecdsa.GenerateKey(elliptic.P521(), rand.Reader) - default: - return nil, fmt.Errorf("invalid ecdsa_curve; must be P224, P256, P384 or P521") - } - }, -} - -var keyParsers map[string]keyParser = map[string]keyParser{ - "RSA": func(der []byte) (interface{}, error) { - return x509.ParsePKCS1PrivateKey(der) - }, - "ECDSA": func(der []byte) (interface{}, error) { - return x509.ParseECPrivateKey(der) - }, -} - -func resourcePrivateKey() *schema.Resource { - return &schema.Resource{ - Create: CreatePrivateKey, - Delete: DeletePrivateKey, - Read: ReadPrivateKey, - - Schema: map[string]*schema.Schema{ - "algorithm": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "Name of the algorithm to use to generate the private key", - ForceNew: true, - }, - - "rsa_bits": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Description: "Number of bits to use when generating an RSA key", - ForceNew: true, - Default: 2048, - }, - - "ecdsa_curve": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "ECDSA curve to use when generating a key", - ForceNew: true, - Default: "P224", - }, - - "private_key_pem": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "public_key_pem": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "public_key_openssh": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func CreatePrivateKey(d *schema.ResourceData, meta interface{}) error { - keyAlgoName := d.Get("algorithm").(string) - var keyFunc keyAlgo - var ok bool - if keyFunc, ok = keyAlgos[keyAlgoName]; !ok { - return fmt.Errorf("invalid key_algorithm %#v", keyAlgoName) - } - - key, err := keyFunc(d) - if err != nil { - return err - } - - var keyPemBlock *pem.Block - switch k := key.(type) { - case *rsa.PrivateKey: - keyPemBlock = &pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(k), - } - case *ecdsa.PrivateKey: - keyBytes, err := x509.MarshalECPrivateKey(k) - if err != nil { - return fmt.Errorf("error encoding key to PEM: %s", err) - } - keyPemBlock = &pem.Block{ - Type: "EC PRIVATE KEY", - Bytes: keyBytes, - } - default: - return fmt.Errorf("unsupported private key type") - } - keyPem := string(pem.EncodeToMemory(keyPemBlock)) - - pubKey := publicKey(key) - pubKeyBytes, err := x509.MarshalPKIXPublicKey(pubKey) - if err != nil { - return fmt.Errorf("failed to marshal public key: %s", err) - } - pubKeyPemBlock := &pem.Block{ - Type: "PUBLIC KEY", - Bytes: pubKeyBytes, - } - - d.SetId(hashForState(string((pubKeyBytes)))) - d.Set("private_key_pem", keyPem) - d.Set("public_key_pem", string(pem.EncodeToMemory(pubKeyPemBlock))) - - sshPubKey, err := ssh.NewPublicKey(pubKey) - if err == nil { - // Not all EC types can be SSH keys, so we'll produce this only - // if an appropriate type was selected. - sshPubKeyBytes := ssh.MarshalAuthorizedKey(sshPubKey) - d.Set("public_key_openssh", string(sshPubKeyBytes)) - } else { - d.Set("public_key_openssh", "") - } - - return nil -} - -func DeletePrivateKey(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} - -func ReadPrivateKey(d *schema.ResourceData, meta interface{}) error { - return nil -} - -func publicKey(priv interface{}) interface{} { - switch k := priv.(type) { - case *rsa.PrivateKey: - return &k.PublicKey - case *ecdsa.PrivateKey: - return &k.PublicKey - default: - return nil - } -} diff --git a/builtin/providers/tls/resource_private_key_test.go b/builtin/providers/tls/resource_private_key_test.go deleted file mode 100644 index 5c25be87d..000000000 --- a/builtin/providers/tls/resource_private_key_test.go +++ /dev/null @@ -1,192 +0,0 @@ -package tls - -import ( - "fmt" - "strings" - "testing" - - r "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestPrivateKeyRSA(t *testing.T) { - r.Test(t, r.TestCase{ - Providers: testProviders, - Steps: []r.TestStep{ - r.TestStep{ - Config: ` - resource "tls_private_key" "test" { - algorithm = "RSA" - } - output "private_key_pem" { - value = "${tls_private_key.test.private_key_pem}" - } - output "public_key_pem" { - value = "${tls_private_key.test.public_key_pem}" - } - output "public_key_openssh" { - value = "${tls_private_key.test.public_key_openssh}" - } - `, - Check: func(s *terraform.State) error { - gotPrivateUntyped := s.RootModule().Outputs["private_key_pem"].Value - gotPrivate, ok := gotPrivateUntyped.(string) - if !ok { - return fmt.Errorf("output for \"private_key_pem\" is not a string") - } - - if !strings.HasPrefix(gotPrivate, "-----BEGIN RSA PRIVATE KEY----") { - return fmt.Errorf("private key is missing RSA key PEM preamble") - } - if len(gotPrivate) > 1700 { - return fmt.Errorf("private key PEM looks too long for a 2048-bit key (got %v characters)", len(gotPrivate)) - } - - gotPublicUntyped := s.RootModule().Outputs["public_key_pem"].Value - gotPublic, ok := gotPublicUntyped.(string) - if !ok { - return fmt.Errorf("output for \"public_key_pem\" is not a string") - } - if !strings.HasPrefix(gotPublic, "-----BEGIN PUBLIC KEY----") { - return fmt.Errorf("public key is missing public key PEM preamble") - } - - gotPublicSSHUntyped := s.RootModule().Outputs["public_key_openssh"].Value - gotPublicSSH, ok := gotPublicSSHUntyped.(string) - if !ok { - return fmt.Errorf("output for \"public_key_openssh\" is not a string") - } - if !strings.HasPrefix(gotPublicSSH, "ssh-rsa ") { - return fmt.Errorf("SSH public key is missing ssh-rsa prefix") - } - - return nil - }, - }, - r.TestStep{ - Config: ` - resource "tls_private_key" "test" { - algorithm = "RSA" - rsa_bits = 4096 - } - output "key_pem" { - value = "${tls_private_key.test.private_key_pem}" - } - `, - Check: func(s *terraform.State) error { - gotUntyped := s.RootModule().Outputs["key_pem"].Value - got, ok := gotUntyped.(string) - if !ok { - return fmt.Errorf("output for \"key_pem\" is not a string") - } - if !strings.HasPrefix(got, "-----BEGIN RSA PRIVATE KEY----") { - return fmt.Errorf("key is missing RSA key PEM preamble") - } - if len(got) < 1700 { - return fmt.Errorf("key PEM looks too short for a 4096-bit key (got %v characters)", len(got)) - } - return nil - }, - }, - }, - }) -} - -func TestPrivateKeyECDSA(t *testing.T) { - r.Test(t, r.TestCase{ - Providers: testProviders, - Steps: []r.TestStep{ - r.TestStep{ - Config: ` - resource "tls_private_key" "test" { - algorithm = "ECDSA" - } - output "private_key_pem" { - value = "${tls_private_key.test.private_key_pem}" - } - output "public_key_pem" { - value = "${tls_private_key.test.public_key_pem}" - } - output "public_key_openssh" { - value = "${tls_private_key.test.public_key_openssh}" - } - `, - Check: func(s *terraform.State) error { - gotPrivateUntyped := s.RootModule().Outputs["private_key_pem"].Value - gotPrivate, ok := gotPrivateUntyped.(string) - if !ok { - return fmt.Errorf("output for \"private_key_pem\" is not a string") - } - - if !strings.HasPrefix(gotPrivate, "-----BEGIN EC PRIVATE KEY----") { - return fmt.Errorf("Private key is missing EC key PEM preamble") - } - - gotPublicUntyped := s.RootModule().Outputs["public_key_pem"].Value - gotPublic, ok := gotPublicUntyped.(string) - if !ok { - return fmt.Errorf("output for \"public_key_pem\" is not a string") - } - - if !strings.HasPrefix(gotPublic, "-----BEGIN PUBLIC KEY----") { - return fmt.Errorf("public key is missing public key PEM preamble") - } - - gotPublicSSH := s.RootModule().Outputs["public_key_openssh"].Value.(string) - if gotPublicSSH != "" { - return fmt.Errorf("P224 EC key should not generate OpenSSH public key") - } - - return nil - }, - }, - r.TestStep{ - Config: ` - resource "tls_private_key" "test" { - algorithm = "ECDSA" - ecdsa_curve = "P256" - } - output "private_key_pem" { - value = "${tls_private_key.test.private_key_pem}" - } - output "public_key_pem" { - value = "${tls_private_key.test.public_key_pem}" - } - output "public_key_openssh" { - value = "${tls_private_key.test.public_key_openssh}" - } - `, - Check: func(s *terraform.State) error { - gotPrivateUntyped := s.RootModule().Outputs["private_key_pem"].Value - gotPrivate, ok := gotPrivateUntyped.(string) - if !ok { - return fmt.Errorf("output for \"private_key_pem\" is not a string") - } - if !strings.HasPrefix(gotPrivate, "-----BEGIN EC PRIVATE KEY----") { - return fmt.Errorf("Private key is missing EC key PEM preamble") - } - - gotPublicUntyped := s.RootModule().Outputs["public_key_pem"].Value - gotPublic, ok := gotPublicUntyped.(string) - if !ok { - return fmt.Errorf("output for \"public_key_pem\" is not a string") - } - if !strings.HasPrefix(gotPublic, "-----BEGIN PUBLIC KEY----") { - return fmt.Errorf("public key is missing public key PEM preamble") - } - - gotPublicSSHUntyped := s.RootModule().Outputs["public_key_openssh"].Value - gotPublicSSH, ok := gotPublicSSHUntyped.(string) - if !ok { - return fmt.Errorf("output for \"public_key_openssh\" is not a string") - } - if !strings.HasPrefix(gotPublicSSH, "ecdsa-sha2-nistp256 ") { - return fmt.Errorf("P256 SSH public key is missing ecdsa prefix") - } - - return nil - }, - }, - }, - }) -} diff --git a/builtin/providers/tls/resource_self_signed_cert.go b/builtin/providers/tls/resource_self_signed_cert.go deleted file mode 100644 index 29e04154d..000000000 --- a/builtin/providers/tls/resource_self_signed_cert.go +++ /dev/null @@ -1,101 +0,0 @@ -package tls - -import ( - "crypto/x509" - "fmt" - "net" - - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceSelfSignedCert() *schema.Resource { - s := resourceCertificateCommonSchema() - - s["subject"] = &schema.Schema{ - Type: schema.TypeList, - Required: true, - Elem: nameSchema, - ForceNew: true, - } - - s["dns_names"] = &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Description: "List of DNS names to use as subjects of the certificate", - ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - } - - s["ip_addresses"] = &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Description: "List of IP addresses to use as subjects of the certificate", - ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - } - - s["key_algorithm"] = &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "Name of the algorithm to use to generate the certificate's private key", - ForceNew: true, - } - - s["private_key_pem"] = &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "PEM-encoded private key that the certificate will belong to", - ForceNew: true, - StateFunc: func(v interface{}) string { - return hashForState(v.(string)) - }, - } - - return &schema.Resource{ - Create: CreateSelfSignedCert, - Delete: DeleteCertificate, - Read: ReadCertificate, - Schema: s, - } -} - -func CreateSelfSignedCert(d *schema.ResourceData, meta interface{}) error { - key, err := parsePrivateKey(d, "private_key_pem", "key_algorithm") - if err != nil { - return err - } - - subjectConfs := d.Get("subject").([]interface{}) - if len(subjectConfs) != 1 { - return fmt.Errorf("must have exactly one 'subject' block") - } - subjectConf := subjectConfs[0].(map[string]interface{}) - subject, err := nameFromResourceData(subjectConf) - if err != nil { - return fmt.Errorf("invalid subject block: %s", err) - } - - cert := x509.Certificate{ - Subject: *subject, - BasicConstraintsValid: true, - } - - dnsNamesI := d.Get("dns_names").([]interface{}) - for _, nameI := range dnsNamesI { - cert.DNSNames = append(cert.DNSNames, nameI.(string)) - } - ipAddressesI := d.Get("ip_addresses").([]interface{}) - for _, ipStrI := range ipAddressesI { - ip := net.ParseIP(ipStrI.(string)) - if ip == nil { - return fmt.Errorf("invalid IP address %#v", ipStrI.(string)) - } - cert.IPAddresses = append(cert.IPAddresses, ip) - } - - return createCertificate(d, &cert, &cert, publicKey(key), key) -} diff --git a/builtin/providers/tls/resource_self_signed_cert_test.go b/builtin/providers/tls/resource_self_signed_cert_test.go deleted file mode 100644 index 22cc66032..000000000 --- a/builtin/providers/tls/resource_self_signed_cert_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package tls - -import ( - "crypto/x509" - "encoding/pem" - "fmt" - "strings" - "testing" - "time" - - r "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestSelfSignedCert(t *testing.T) { - r.Test(t, r.TestCase{ - Providers: testProviders, - Steps: []r.TestStep{ - r.TestStep{ - Config: fmt.Sprintf(` - resource "tls_self_signed_cert" "test" { - subject { - common_name = "example.com" - organization = "Example, Inc" - organizational_unit = "Department of Terraform Testing" - street_address = ["5879 Cotton Link"] - locality = "Pirate Harbor" - province = "CA" - country = "US" - postal_code = "95559-1227" - serial_number = "2" - } - - dns_names = [ - "example.com", - "example.net", - ] - - ip_addresses = [ - "127.0.0.1", - "127.0.0.2", - ] - - validity_period_hours = 1 - - allowed_uses = [ - "key_encipherment", - "digital_signature", - "server_auth", - "client_auth", - ] - - key_algorithm = "RSA" - private_key_pem = < (2 * time.Minute) { - return fmt.Errorf("certificate validity begins more than two minutes in the past") - } - if cert.NotAfter.Sub(cert.NotBefore) != time.Hour { - return fmt.Errorf("certificate validity is not one hour") - } - - return nil - }, - }, - }, - }) -} diff --git a/builtin/providers/tls/util.go b/builtin/providers/tls/util.go deleted file mode 100644 index b1ff32e5b..000000000 --- a/builtin/providers/tls/util.go +++ /dev/null @@ -1,76 +0,0 @@ -package tls - -import ( - "crypto/x509" - "encoding/pem" - "fmt" - - "github.com/hashicorp/terraform/helper/schema" -) - -func decodePEM(d *schema.ResourceData, pemKey, pemType string) (*pem.Block, error) { - block, _ := pem.Decode([]byte(d.Get(pemKey).(string))) - if block == nil { - return nil, fmt.Errorf("no PEM block found in %s", pemKey) - } - if pemType != "" && block.Type != pemType { - return nil, fmt.Errorf("invalid PEM type in %s: %s", pemKey, block.Type) - } - - return block, nil -} - -func parsePrivateKey(d *schema.ResourceData, pemKey, algoKey string) (interface{}, error) { - algoName := d.Get(algoKey).(string) - - keyFunc, ok := keyParsers[algoName] - if !ok { - return nil, fmt.Errorf("invalid %s: %#v", algoKey, algoName) - } - - block, err := decodePEM(d, pemKey, "") - if err != nil { - return nil, err - } - - key, err := keyFunc(block.Bytes) - if err != nil { - return nil, fmt.Errorf("failed to decode %s: %s", pemKey, err) - } - - return key, nil -} - -func parseCertificate(d *schema.ResourceData, pemKey string) (*x509.Certificate, error) { - block, err := decodePEM(d, pemKey, "") - if err != nil { - return nil, err - } - - certs, err := x509.ParseCertificates(block.Bytes) - if err != nil { - return nil, fmt.Errorf("failed to parse %s: %s", pemKey, err) - } - if len(certs) < 1 { - return nil, fmt.Errorf("no certificates found in %s", pemKey) - } - if len(certs) > 1 { - return nil, fmt.Errorf("multiple certificates found in %s", pemKey) - } - - return certs[0], nil -} - -func parseCertificateRequest(d *schema.ResourceData, pemKey string) (*x509.CertificateRequest, error) { - block, err := decodePEM(d, pemKey, pemCertReqType) - if err != nil { - return nil, err - } - - certReq, err := x509.ParseCertificateRequest(block.Bytes) - if err != nil { - return nil, fmt.Errorf("failed to parse %s: %s", pemKey, err) - } - - return certReq, nil -} diff --git a/builtin/providers/triton/provider.go b/builtin/providers/triton/provider.go deleted file mode 100644 index 8a56b5dc4..000000000 --- a/builtin/providers/triton/provider.go +++ /dev/null @@ -1,180 +0,0 @@ -package triton - -import ( - "crypto/md5" - "encoding/base64" - "errors" - "sort" - "time" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - "github.com/joyent/triton-go" - "github.com/joyent/triton-go/authentication" -) - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "account": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{"TRITON_ACCOUNT", "SDC_ACCOUNT"}, ""), - }, - - "url": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{"TRITON_URL", "SDC_URL"}, "https://us-west-1.api.joyentcloud.com"), - }, - - "key_material": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{"TRITON_KEY_MATERIAL", "SDC_KEY_MATERIAL"}, ""), - }, - - "key_id": { - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{"TRITON_KEY_ID", "SDC_KEY_ID"}, ""), - }, - - "insecure_skip_tls_verify": { - Type: schema.TypeBool, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("TRITON_SKIP_TLS_VERIFY", ""), - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "triton_firewall_rule": resourceFirewallRule(), - "triton_machine": resourceMachine(), - "triton_key": resourceKey(), - "triton_vlan": resourceVLAN(), - "triton_fabric": resourceFabric(), - }, - ConfigureFunc: providerConfigure, - } -} - -type Config struct { - Account string - KeyMaterial string - KeyID string - URL string - InsecureSkipTLSVerify bool -} - -func (c Config) validate() error { - var err *multierror.Error - - if c.URL == "" { - err = multierror.Append(err, errors.New("URL must be configured for the Triton provider")) - } - if c.KeyID == "" { - err = multierror.Append(err, errors.New("Key ID must be configured for the Triton provider")) - } - if c.Account == "" { - err = multierror.Append(err, errors.New("Account must be configured for the Triton provider")) - } - - return err.ErrorOrNil() -} - -func (c Config) getTritonClient() (*triton.Client, error) { - var signer authentication.Signer - var err error - if c.KeyMaterial == "" { - signer, err = authentication.NewSSHAgentSigner(c.KeyID, c.Account) - if err != nil { - return nil, errwrap.Wrapf("Error Creating SSH Agent Signer: {{err}}", err) - } - } else { - signer, err = authentication.NewPrivateKeySigner(c.KeyID, []byte(c.KeyMaterial), c.Account) - if err != nil { - return nil, errwrap.Wrapf("Error Creating SSH Private Key Signer: {{err}}", err) - } - } - - client, err := triton.NewClient(c.URL, c.Account, signer) - if err != nil { - return nil, errwrap.Wrapf("Error Creating Triton Client: {{err}}", err) - } - - if c.InsecureSkipTLSVerify { - client.InsecureSkipTLSVerify() - } - - return client, nil -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - config := Config{ - Account: d.Get("account").(string), - URL: d.Get("url").(string), - KeyID: d.Get("key_id").(string), - - InsecureSkipTLSVerify: d.Get("insecure_skip_tls_verify").(bool), - } - - if keyMaterial, ok := d.GetOk("key_material"); ok { - config.KeyMaterial = keyMaterial.(string) - } - - if err := config.validate(); err != nil { - return nil, err - } - - client, err := config.getTritonClient() - if err != nil { - return nil, err - } - - return client, nil -} - -func resourceExists(resource interface{}, err error) (bool, error) { - if err != nil { - if triton.IsResourceNotFound(err) { - return false, nil - } - - return false, err - } - - return resource != nil, nil -} - -func stableMapHash(input map[string]string) string { - keys := make([]string, 0, len(input)) - for k := range input { - keys = append(keys, k) - } - sort.Strings(keys) - - hash := md5.New() - for _, key := range keys { - hash.Write([]byte(key)) - hash.Write([]byte(input[key])) - } - - return base64.StdEncoding.EncodeToString(hash.Sum([]byte{})) -} - -var fastResourceTimeout = &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(1 * time.Minute), - Read: schema.DefaultTimeout(30 * time.Second), - Update: schema.DefaultTimeout(1 * time.Minute), - Delete: schema.DefaultTimeout(1 * time.Minute), -} - -var slowResourceTimeout = &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Read: schema.DefaultTimeout(30 * time.Second), - Update: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), -} diff --git a/builtin/providers/triton/provider_test.go b/builtin/providers/triton/provider_test.go deleted file mode 100644 index 11b962aca..000000000 --- a/builtin/providers/triton/provider_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package triton - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "triton": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - sdcURL := os.Getenv("SDC_URL") - account := os.Getenv("SDC_ACCOUNT") - keyID := os.Getenv("SDC_KEY_ID") - - if sdcURL == "" { - sdcURL = "https://us-west-1.api.joyentcloud.com" - } - - if sdcURL == "" || account == "" || keyID == "" { - t.Fatal("SDC_ACCOUNT and SDC_KEY_ID must be set for acceptance tests. To test with the SSH" + - " private key signer, SDC_KEY_MATERIAL must also be set.") - } -} diff --git a/builtin/providers/triton/resource_fabric.go b/builtin/providers/triton/resource_fabric.go deleted file mode 100644 index 9b296d40a..000000000 --- a/builtin/providers/triton/resource_fabric.go +++ /dev/null @@ -1,179 +0,0 @@ -package triton - -import ( - "context" - "fmt" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/joyent/triton-go" -) - -func resourceFabric() *schema.Resource { - return &schema.Resource{ - Create: resourceFabricCreate, - Exists: resourceFabricExists, - Read: resourceFabricRead, - Delete: resourceFabricDelete, - - Schema: map[string]*schema.Schema{ - "name": { - Description: "Network name", - Required: true, - ForceNew: true, - Type: schema.TypeString, - }, - "public": { - Description: "Whether or not this is an RFC1918 network", - Computed: true, - Type: schema.TypeBool, - }, - "fabric": { - Description: "Whether or not this network is on a fabric", - Computed: true, - Type: schema.TypeBool, - }, - "description": { - Description: "Description of network", - Optional: true, - ForceNew: true, - Type: schema.TypeString, - }, - "subnet": { - Description: "CIDR formatted string describing network address space", - Required: true, - ForceNew: true, - Type: schema.TypeString, - }, - "provision_start_ip": { - Description: "First IP on the network that can be assigned", - Required: true, - ForceNew: true, - Type: schema.TypeString, - }, - "provision_end_ip": { - Description: "Last assignable IP on the network", - Required: true, - ForceNew: true, - Type: schema.TypeString, - }, - "gateway": { - Description: "Gateway IP", - Optional: true, - ForceNew: true, - Type: schema.TypeString, - }, - "resolvers": { - Description: "List of IP addresses for DNS resolvers", - Optional: true, - Computed: true, - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "routes": { - Description: "Map of CIDR block to Gateway IP address", - Computed: true, - Optional: true, - ForceNew: true, - Type: schema.TypeMap, - }, - "internet_nat": { - Description: "Whether or not a NAT zone is provisioned at the Gateway IP address", - Computed: true, - Optional: true, - ForceNew: true, - Type: schema.TypeBool, - }, - "vlan_id": { - Description: "VLAN on which the network exists", - Required: true, - ForceNew: true, - Type: schema.TypeInt, - }, - }, - } -} - -func resourceFabricCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*triton.Client) - - var resolvers []string - for _, resolver := range d.Get("resolvers").([]interface{}) { - resolvers = append(resolvers, resolver.(string)) - } - - routes := map[string]string{} - for cidr, v := range d.Get("routes").(map[string]interface{}) { - ip, ok := v.(string) - if !ok { - return fmt.Errorf(`Cannot use "%v" as an IP address`, v) - } - routes[cidr] = ip - } - - fabric, err := client.Fabrics().CreateFabricNetwork(context.Background(), &triton.CreateFabricNetworkInput{ - FabricVLANID: d.Get("vlan_id").(int), - Name: d.Get("name").(string), - Description: d.Get("description").(string), - Subnet: d.Get("subnet").(string), - ProvisionStartIP: d.Get("provision_start_ip").(string), - ProvisionEndIP: d.Get("provision_end_ip").(string), - Gateway: d.Get("gateway").(string), - Resolvers: resolvers, - Routes: routes, - InternetNAT: d.Get("internet_nat").(bool), - }, - ) - if err != nil { - return err - } - - d.SetId(fabric.Id) - - return resourceFabricRead(d, meta) -} - -func resourceFabricExists(d *schema.ResourceData, meta interface{}) (bool, error) { - client := meta.(*triton.Client) - - return resourceExists(client.Fabrics().GetFabricNetwork(context.Background(), &triton.GetFabricNetworkInput{ - FabricVLANID: d.Get("vlan_id").(int), - NetworkID: d.Id(), - })) -} - -func resourceFabricRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*triton.Client) - - fabric, err := client.Fabrics().GetFabricNetwork(context.Background(), &triton.GetFabricNetworkInput{ - FabricVLANID: d.Get("vlan_id").(int), - NetworkID: d.Id(), - }) - if err != nil { - return err - } - - d.SetId(fabric.Id) - d.Set("name", fabric.Name) - d.Set("public", fabric.Public) - d.Set("fabric", fabric.Fabric) - d.Set("description", fabric.Description) - d.Set("subnet", fabric.Subnet) - d.Set("provision_start_ip", fabric.ProvisioningStartIP) - d.Set("provision_end_ip", fabric.ProvisioningEndIP) - d.Set("gateway", fabric.Gateway) - d.Set("resolvers", fabric.Resolvers) - d.Set("routes", fabric.Routes) - d.Set("internet_nat", fabric.InternetNAT) - d.Set("vlan_id", d.Get("vlan_id").(int)) - - return nil -} - -func resourceFabricDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*triton.Client) - - return client.Fabrics().DeleteFabricNetwork(context.Background(), &triton.DeleteFabricNetworkInput{ - FabricVLANID: d.Get("vlan_id").(int), - NetworkID: d.Id(), - }) -} diff --git a/builtin/providers/triton/resource_fabric_test.go b/builtin/providers/triton/resource_fabric_test.go deleted file mode 100644 index 48198da23..000000000 --- a/builtin/providers/triton/resource_fabric_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package triton - -import ( - "context" - "fmt" - "strconv" - "testing" - "time" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/joyent/triton-go" -) - -func TestAccTritonFabric_basic(t *testing.T) { - fabricName := fmt.Sprintf("acctest-%d", acctest.RandInt()) - config := fmt.Sprintf(testAccTritonFabric_basic, acctest.RandIntRange(3, 2049), fabricName, fabricName) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckTritonFabricDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckTritonFabricExists("triton_fabric.test"), - func(*terraform.State) error { - time.Sleep(10 * time.Second) - return nil - }, - ), - }, - }, - }) -} - -func testCheckTritonFabricExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - conn := testAccProvider.Meta().(*triton.Client) - - vlanID, err := strconv.Atoi(rs.Primary.Attributes["vlan_id"]) - if err != nil { - return err - } - - exists, err := resourceExists(conn.Fabrics().GetFabricNetwork(context.Background(), &triton.GetFabricNetworkInput{ - FabricVLANID: vlanID, - NetworkID: rs.Primary.ID, - })) - if err != nil { - return fmt.Errorf("Error: Check Fabric Exists: %s", err) - } - - if exists { - return nil - } - - return fmt.Errorf("Error: Fabric %q (VLAN %d) Does Not Exist", rs.Primary.ID, vlanID) - } -} - -func testCheckTritonFabricDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*triton.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "triton_fabric" { - continue - } - - vlanID, err := strconv.Atoi(rs.Primary.Attributes["vlan_id"]) - if err != nil { - return err - } - - exists, err := resourceExists(conn.Fabrics().GetFabricNetwork(context.Background(), &triton.GetFabricNetworkInput{ - FabricVLANID: vlanID, - NetworkID: rs.Primary.ID, - })) - if err != nil { - return nil - } - - if exists { - return fmt.Errorf("Error: Fabric %q (VLAN %d) Still Exists", rs.Primary.ID, vlanID) - } - - return nil - } - - return nil -} - -var testAccTritonFabric_basic = ` -resource "triton_vlan" "test" { - vlan_id = "%d" - name = "%s" - description = "testAccTritonFabric_basic" -} - -resource "triton_fabric" "test" { - name = "%s" - description = "test network" - vlan_id = "${triton_vlan.test.id}" - - subnet = "10.0.0.0/22" - gateway = "10.0.0.1" - provision_start_ip = "10.0.0.5" - provision_end_ip = "10.0.3.250" - - resolvers = ["8.8.8.8", "8.8.4.4"] -} -` diff --git a/builtin/providers/triton/resource_firewall_rule.go b/builtin/providers/triton/resource_firewall_rule.go deleted file mode 100644 index 06abde0e2..000000000 --- a/builtin/providers/triton/resource_firewall_rule.go +++ /dev/null @@ -1,113 +0,0 @@ -package triton - -import ( - "context" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/joyent/triton-go" -) - -func resourceFirewallRule() *schema.Resource { - return &schema.Resource{ - Create: resourceFirewallRuleCreate, - Exists: resourceFirewallRuleExists, - Read: resourceFirewallRuleRead, - Update: resourceFirewallRuleUpdate, - Delete: resourceFirewallRuleDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "rule": { - Description: "firewall rule text", - Type: schema.TypeString, - Required: true, - }, - "enabled": { - Description: "Indicates if the rule is enabled", - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "description": { - Description: "Human-readable description of the rule", - Type: schema.TypeString, - Optional: true, - }, - "global": { - Description: "Indicates whether or not the rule is global", - Type: schema.TypeBool, - Computed: true, - }, - }, - } -} - -func resourceFirewallRuleCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*triton.Client) - - rule, err := client.Firewall().CreateFirewallRule(context.Background(), &triton.CreateFirewallRuleInput{ - Rule: d.Get("rule").(string), - Enabled: d.Get("enabled").(bool), - Description: d.Get("description").(string), - }) - if err != nil { - return err - } - - d.SetId(rule.ID) - - return resourceFirewallRuleRead(d, meta) -} - -func resourceFirewallRuleExists(d *schema.ResourceData, meta interface{}) (bool, error) { - client := meta.(*triton.Client) - - return resourceExists(client.Firewall().GetFirewallRule(context.Background(), &triton.GetFirewallRuleInput{ - ID: d.Id(), - })) -} - -func resourceFirewallRuleRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*triton.Client) - - rule, err := client.Firewall().GetFirewallRule(context.Background(), &triton.GetFirewallRuleInput{ - ID: d.Id(), - }) - if err != nil { - return err - } - - d.SetId(rule.ID) - d.Set("rule", rule.Rule) - d.Set("enabled", rule.Enabled) - d.Set("global", rule.Global) - d.Set("description", rule.Description) - - return nil -} - -func resourceFirewallRuleUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*triton.Client) - - _, err := client.Firewall().UpdateFirewallRule(context.Background(), &triton.UpdateFirewallRuleInput{ - ID: d.Id(), - Rule: d.Get("rule").(string), - Enabled: d.Get("enabled").(bool), - Description: d.Get("description").(string), - }) - if err != nil { - return err - } - - return resourceFirewallRuleRead(d, meta) -} - -func resourceFirewallRuleDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*triton.Client) - - return client.Firewall().DeleteFirewallRule(context.Background(), &triton.DeleteFirewallRuleInput{ - ID: d.Id(), - }) -} diff --git a/builtin/providers/triton/resource_firewall_rule_test.go b/builtin/providers/triton/resource_firewall_rule_test.go deleted file mode 100644 index fbaa5d7ee..000000000 --- a/builtin/providers/triton/resource_firewall_rule_test.go +++ /dev/null @@ -1,161 +0,0 @@ -package triton - -import ( - "context" - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/joyent/triton-go" -) - -func TestAccTritonFirewallRule_basic(t *testing.T) { - config := testAccTritonFirewallRule_basic - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckTritonFirewallRuleDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckTritonFirewallRuleExists("triton_firewall_rule.test"), - ), - }, - }, - }) -} - -func TestAccTritonFirewallRule_update(t *testing.T) { - preConfig := testAccTritonFirewallRule_basic - postConfig := testAccTritonFirewallRule_update - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckTritonFirewallRuleDestroy, - Steps: []resource.TestStep{ - { - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckTritonFirewallRuleExists("triton_firewall_rule.test"), - resource.TestCheckResourceAttr("triton_firewall_rule.test", "rule", "FROM any TO tag \"www\" ALLOW tcp PORT 80"), - resource.TestCheckResourceAttr("triton_firewall_rule.test", "enabled", "false"), - ), - }, - - { - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckTritonFirewallRuleExists("triton_firewall_rule.test"), - resource.TestCheckResourceAttr("triton_firewall_rule.test", "rule", "FROM any TO tag \"www\" BLOCK tcp PORT 80"), - resource.TestCheckResourceAttr("triton_firewall_rule.test", "enabled", "true"), - ), - }, - }, - }) -} - -func TestAccTritonFirewallRule_enable(t *testing.T) { - preConfig := testAccTritonFirewallRule_basic - postConfig := testAccTritonFirewallRule_enable - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckTritonFirewallRuleDestroy, - Steps: []resource.TestStep{ - { - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckTritonFirewallRuleExists("triton_firewall_rule.test"), - resource.TestCheckResourceAttr("triton_firewall_rule.test", "rule", "FROM any TO tag \"www\" ALLOW tcp PORT 80"), - resource.TestCheckResourceAttr("triton_firewall_rule.test", "enabled", "false"), - ), - }, - - { - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckTritonFirewallRuleExists("triton_firewall_rule.test"), - resource.TestCheckResourceAttr("triton_firewall_rule.test", "rule", "FROM any TO tag \"www\" ALLOW tcp PORT 80"), - resource.TestCheckResourceAttr("triton_firewall_rule.test", "enabled", "true"), - ), - }, - }, - }) -} - -func testCheckTritonFirewallRuleExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - conn := testAccProvider.Meta().(*triton.Client) - - resp, err := conn.Firewall().GetFirewallRule(context.Background(), &triton.GetFirewallRuleInput{ - ID: rs.Primary.ID, - }) - if err != nil && triton.IsResourceNotFound(err) { - return fmt.Errorf("Bad: Check Firewall Rule Exists: %s", err) - } else if err != nil { - return err - } - - if resp == nil { - return fmt.Errorf("Bad: Firewall Rule %q does not exist", rs.Primary.ID) - } - - return nil - } -} - -func testCheckTritonFirewallRuleDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*triton.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "triton_firewall_rule" { - continue - } - - resp, err := conn.Firewall().GetFirewallRule(context.Background(), &triton.GetFirewallRuleInput{ - ID: rs.Primary.ID, - }) - if triton.IsResourceNotFound(err) { - return nil - } else if err != nil { - return err - } - - if resp != nil { - return fmt.Errorf("Bad: Firewall Rule %q still exists", rs.Primary.ID) - } - } - - return nil -} - -var testAccTritonFirewallRule_basic = ` -resource "triton_firewall_rule" "test" { - rule = "FROM any TO tag \"www\" ALLOW tcp PORT 80" - enabled = false -} -` - -var testAccTritonFirewallRule_update = ` -resource "triton_firewall_rule" "test" { - rule = "FROM any TO tag \"www\" BLOCK tcp PORT 80" - enabled = true -} -` - -var testAccTritonFirewallRule_enable = ` -resource "triton_firewall_rule" "test" { - rule = "FROM any TO tag \"www\" ALLOW tcp PORT 80" - enabled = true -} -` diff --git a/builtin/providers/triton/resource_key.go b/builtin/providers/triton/resource_key.go deleted file mode 100644 index 21b13de8a..000000000 --- a/builtin/providers/triton/resource_key.go +++ /dev/null @@ -1,101 +0,0 @@ -package triton - -import ( - "context" - "errors" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/joyent/triton-go" -) - -func resourceKey() *schema.Resource { - return &schema.Resource{ - Create: resourceKeyCreate, - Exists: resourceKeyExists, - Read: resourceKeyRead, - Delete: resourceKeyDelete, - Timeouts: fastResourceTimeout, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Description: "Name of the key (generated from the key comment if not set)", - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "key": { - Description: "Content of public key from disk in OpenSSH format", - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - } -} - -func resourceKeyCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*triton.Client) - - if keyName := d.Get("name").(string); keyName == "" { - parts := strings.SplitN(d.Get("key").(string), " ", 3) - if len(parts) == 3 { - d.Set("name", parts[2]) - } else { - return errors.New("No key name specified, and key material has no comment") - } - } - - _, err := client.Keys().CreateKey(context.Background(), &triton.CreateKeyInput{ - Name: d.Get("name").(string), - Key: d.Get("key").(string), - }) - if err != nil { - return err - } - - d.SetId(d.Get("name").(string)) - - return resourceKeyRead(d, meta) -} - -func resourceKeyExists(d *schema.ResourceData, meta interface{}) (bool, error) { - client := meta.(*triton.Client) - - _, err := client.Keys().GetKey(context.Background(), &triton.GetKeyInput{ - KeyName: d.Id(), - }) - if err != nil { - return false, err - } - - return true, nil -} - -func resourceKeyRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*triton.Client) - - key, err := client.Keys().GetKey(context.Background(), &triton.GetKeyInput{ - KeyName: d.Id(), - }) - if err != nil { - return err - } - - d.Set("name", key.Name) - d.Set("key", key.Key) - - return nil -} - -func resourceKeyDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*triton.Client) - - return client.Keys().DeleteKey(context.Background(), &triton.DeleteKeyInput{ - KeyName: d.Id(), - }) -} diff --git a/builtin/providers/triton/resource_key_test.go b/builtin/providers/triton/resource_key_test.go deleted file mode 100644 index ae97eb25b..000000000 --- a/builtin/providers/triton/resource_key_test.go +++ /dev/null @@ -1,135 +0,0 @@ -package triton - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/joyent/triton-go" -) - -func TestAccTritonKey_basic(t *testing.T) { - keyName := fmt.Sprintf("acctest-%d", acctest.RandInt()) - publicKeyMaterial, _, err := acctest.RandSSHKeyPair("TestAccTritonKey_basic@terraform") - if err != nil { - t.Fatalf("Cannot generate test SSH key pair: %s", err) - } - config := testAccTritonKey_basic(keyName, publicKeyMaterial) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckTritonKeyDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckTritonKeyExists("triton_key.test"), - resource.TestCheckResourceAttr("triton_key.test", "name", keyName), - resource.TestCheckResourceAttr("triton_key.test", "key", publicKeyMaterial), - func(*terraform.State) error { - time.Sleep(10 * time.Second) - return nil - }, - ), - }, - }, - }) -} - -func TestAccTritonKey_noKeyName(t *testing.T) { - keyComment := fmt.Sprintf("acctest_%d@terraform", acctest.RandInt()) - keyMaterial, _, err := acctest.RandSSHKeyPair(keyComment) - if err != nil { - t.Fatalf("Cannot generate test SSH key pair: %s", err) - } - config := testAccTritonKey_noKeyName(keyMaterial) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckTritonKeyDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckTritonKeyExists("triton_key.test"), - resource.TestCheckResourceAttr("triton_key.test", "name", keyComment), - resource.TestCheckResourceAttr("triton_key.test", "key", keyMaterial), - func(*terraform.State) error { - time.Sleep(10 * time.Second) - return nil - }, - ), - }, - }, - }) -} - -func testCheckTritonKeyExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - conn := testAccProvider.Meta().(*triton.Client) - - key, err := conn.Keys().GetKey(context.Background(), &triton.GetKeyInput{ - KeyName: rs.Primary.ID, - }) - if err != nil { - return fmt.Errorf("Bad: Check Key Exists: %s", err) - } - - if key == nil { - return fmt.Errorf("Bad: Key %q does not exist", rs.Primary.ID) - } - - return nil - } -} - -func testCheckTritonKeyDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*triton.Client) - - return resource.Retry(1*time.Minute, func() *resource.RetryError { - for _, rs := range s.RootModule().Resources { - if rs.Type != "triton_key" { - continue - } - - key, err := conn.Keys().GetKey(context.Background(), &triton.GetKeyInput{ - KeyName: rs.Primary.ID, - }) - if err != nil { - return nil - } - - if key != nil { - return resource.RetryableError(fmt.Errorf("Bad: Key %q still exists", rs.Primary.ID)) - } - } - - return nil - }) -} - -var testAccTritonKey_basic = func(keyName string, keyMaterial string) string { - return fmt.Sprintf(`resource "triton_key" "test" { - name = "%s" - key = "%s" - } - `, keyName, keyMaterial) -} - -var testAccTritonKey_noKeyName = func(keyMaterial string) string { - return fmt.Sprintf(`resource "triton_key" "test" { - key = "%s" - } - `, keyMaterial) -} diff --git a/builtin/providers/triton/resource_machine.go b/builtin/providers/triton/resource_machine.go deleted file mode 100644 index 32505746d..000000000 --- a/builtin/providers/triton/resource_machine.go +++ /dev/null @@ -1,654 +0,0 @@ -package triton - -import ( - "context" - "fmt" - "regexp" - "time" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/joyent/triton-go" -) - -var ( - machineStateRunning = "running" - machineStateDeleted = "deleted" - - machineStateChangeTimeout = 10 * time.Minute - - resourceMachineMetadataKeys = map[string]string{ - // semantics: "schema_name": "metadata_name" - "root_authorized_keys": "root_authorized_keys", - "user_script": "user-script", - "user_data": "user-data", - "administrator_pw": "administrator-pw", - "cloud_config": "cloud-init:user-data", - } -) - -func resourceMachine() *schema.Resource { - return &schema.Resource{ - Create: resourceMachineCreate, - Exists: resourceMachineExists, - Read: resourceMachineRead, - Update: resourceMachineUpdate, - Delete: resourceMachineDelete, - Timeouts: slowResourceTimeout, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Description: "Friendly name for machine", - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: resourceMachineValidateName, - }, - "type": { - Description: "Machine type (smartmachine or virtualmachine)", - Type: schema.TypeString, - Computed: true, - }, - "dataset": { - Description: "Dataset URN with which the machine was provisioned", - Type: schema.TypeString, - Computed: true, - }, - "memory": { - Description: "Amount of memory allocated to the machine (in Mb)", - Type: schema.TypeInt, - Computed: true, - }, - "disk": { - Description: "Amount of disk allocated to the machine (in Gb)", - Type: schema.TypeInt, - Computed: true, - }, - "ips": { - Description: "IP addresses assigned to the machine", - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "tags": { - Description: "Machine tags", - Type: schema.TypeMap, - Optional: true, - }, - "created": { - Description: "When the machine was created", - Type: schema.TypeString, - Computed: true, - }, - "updated": { - Description: "When the machine was updated", - Type: schema.TypeString, - Computed: true, - }, - "package": { - Description: "The package for use for provisioning", - Type: schema.TypeString, - Required: true, - }, - "image": { - Description: "UUID of the image", - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "primaryip": { - Description: "Primary (public) IP address for the machine", - Type: schema.TypeString, - Computed: true, - }, - "nic": { - Description: "Network interface", - Type: schema.TypeSet, - Computed: true, - Optional: true, - Set: func(v interface{}) int { - m := v.(map[string]interface{}) - return hashcode.String(m["network"].(string)) - }, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ip": { - Description: "NIC's IPv4 address", - Computed: true, - Type: schema.TypeString, - }, - "mac": { - Description: "NIC's MAC address", - Computed: true, - Type: schema.TypeString, - }, - "primary": { - Description: "Whether this is the machine's primary NIC", - Computed: true, - Type: schema.TypeBool, - }, - "netmask": { - Description: "IPv4 netmask", - Computed: true, - Type: schema.TypeString, - }, - "gateway": { - Description: "IPv4 gateway", - Computed: true, - Type: schema.TypeString, - }, - "network": { - Description: "ID of the network to which the NIC is attached", - Required: true, - Type: schema.TypeString, - }, - "state": { - Description: "Provisioning state of the NIC", - Computed: true, - Type: schema.TypeString, - }, - }, - }, - }, - "firewall_enabled": { - Description: "Whether to enable the firewall for this machine", - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "domain_names": { - Description: "List of domain names from Triton CNS", - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - // computed resources from metadata - "root_authorized_keys": { - Description: "Authorized keys for the root user on this machine", - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "user_script": { - Description: "User script to run on boot (every boot on SmartMachines)", - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "cloud_config": { - Description: "copied to machine on boot", - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "user_data": { - Description: "Data copied to machine on boot", - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "administrator_pw": { - Description: "Administrator's initial password (Windows only)", - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - // deprecated fields - "networks": { - Description: "Desired network IDs", - Type: schema.TypeList, - Optional: true, - Computed: true, - Deprecated: "Networks is deprecated, please use `nic`", - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - } -} - -func resourceMachineCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*triton.Client) - - var networks []string - for _, network := range d.Get("networks").([]interface{}) { - networks = append(networks, network.(string)) - } - nics := d.Get("nic").(*schema.Set) - for _, nicI := range nics.List() { - nic := nicI.(map[string]interface{}) - networks = append(networks, nic["network"].(string)) - } - - metadata := map[string]string{} - for schemaName, metadataKey := range resourceMachineMetadataKeys { - if v, ok := d.GetOk(schemaName); ok { - metadata[metadataKey] = v.(string) - } - } - - tags := map[string]string{} - for k, v := range d.Get("tags").(map[string]interface{}) { - tags[k] = v.(string) - } - - machine, err := client.Machines().CreateMachine(context.Background(), &triton.CreateMachineInput{ - Name: d.Get("name").(string), - Package: d.Get("package").(string), - Image: d.Get("image").(string), - Networks: networks, - Metadata: metadata, - Tags: tags, - FirewallEnabled: d.Get("firewall_enabled").(bool), - }) - if err != nil { - return err - } - - d.SetId(machine.ID) - stateConf := &resource.StateChangeConf{ - Target: []string{fmt.Sprintf(machineStateRunning)}, - Refresh: func() (interface{}, string, error) { - getResp, err := client.Machines().GetMachine(context.Background(), &triton.GetMachineInput{ - ID: d.Id(), - }) - if err != nil { - return nil, "", err - } - - return getResp, getResp.State, nil - }, - Timeout: machineStateChangeTimeout, - MinTimeout: 3 * time.Second, - } - _, err = stateConf.WaitForState() - if err != nil { - return err - } - if err != nil { - return err - } - - // refresh state after it provisions - return resourceMachineRead(d, meta) -} - -func resourceMachineExists(d *schema.ResourceData, meta interface{}) (bool, error) { - client := meta.(*triton.Client) - - return resourceExists(client.Machines().GetMachine(context.Background(), &triton.GetMachineInput{ - ID: d.Id(), - })) -} - -func resourceMachineRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*triton.Client) - - machine, err := client.Machines().GetMachine(context.Background(), &triton.GetMachineInput{ - ID: d.Id(), - }) - if err != nil { - return err - } - - nics, err := client.Machines().ListNICs(context.Background(), &triton.ListNICsInput{ - MachineID: d.Id(), - }) - if err != nil { - return err - } - - d.Set("name", machine.Name) - d.Set("type", machine.Type) - d.Set("state", machine.State) - d.Set("dataset", machine.Image) - d.Set("image", machine.Image) - d.Set("memory", machine.Memory) - d.Set("disk", machine.Disk) - d.Set("ips", machine.IPs) - d.Set("tags", machine.Tags) - d.Set("created", machine.Created) - d.Set("updated", machine.Updated) - d.Set("package", machine.Package) - d.Set("image", machine.Image) - d.Set("primaryip", machine.PrimaryIP) - d.Set("firewall_enabled", machine.FirewallEnabled) - d.Set("domain_names", machine.DomainNames) - - // create and update NICs - var ( - machineNICs []map[string]interface{} - networks []string - ) - for _, nic := range nics { - machineNICs = append( - machineNICs, - map[string]interface{}{ - "ip": nic.IP, - "mac": nic.MAC, - "primary": nic.Primary, - "netmask": nic.Netmask, - "gateway": nic.Gateway, - "state": nic.State, - "network": nic.Network, - }, - ) - networks = append(networks, nic.Network) - } - d.Set("nic", machineNICs) - d.Set("networks", networks) - - // computed attributes from metadata - for schemaName, metadataKey := range resourceMachineMetadataKeys { - d.Set(schemaName, machine.Metadata[metadataKey]) - } - - return nil -} - -func resourceMachineUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*triton.Client) - - d.Partial(true) - - if d.HasChange("name") { - oldNameInterface, newNameInterface := d.GetChange("name") - oldName := oldNameInterface.(string) - newName := newNameInterface.(string) - - err := client.Machines().RenameMachine(context.Background(), &triton.RenameMachineInput{ - ID: d.Id(), - Name: newName, - }) - if err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Pending: []string{oldName}, - Target: []string{newName}, - Refresh: func() (interface{}, string, error) { - getResp, err := client.Machines().GetMachine(context.Background(), &triton.GetMachineInput{ - ID: d.Id(), - }) - if err != nil { - return nil, "", err - } - - return getResp, getResp.Name, nil - }, - Timeout: machineStateChangeTimeout, - MinTimeout: 3 * time.Second, - } - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - d.SetPartial("name") - } - - if d.HasChange("tags") { - tags := map[string]string{} - for k, v := range d.Get("tags").(map[string]interface{}) { - tags[k] = v.(string) - } - - var err error - if len(tags) == 0 { - err = client.Machines().DeleteMachineTags(context.Background(), &triton.DeleteMachineTagsInput{ - ID: d.Id(), - }) - } else { - err = client.Machines().ReplaceMachineTags(context.Background(), &triton.ReplaceMachineTagsInput{ - ID: d.Id(), - Tags: tags, - }) - } - if err != nil { - return err - } - - expectedTagsMD5 := stableMapHash(tags) - stateConf := &resource.StateChangeConf{ - Target: []string{expectedTagsMD5}, - Refresh: func() (interface{}, string, error) { - getResp, err := client.Machines().GetMachine(context.Background(), &triton.GetMachineInput{ - ID: d.Id(), - }) - if err != nil { - return nil, "", err - } - - return getResp, stableMapHash(getResp.Tags), nil - }, - Timeout: machineStateChangeTimeout, - MinTimeout: 3 * time.Second, - } - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - d.SetPartial("tags") - } - - if d.HasChange("package") { - newPackage := d.Get("package").(string) - - err := client.Machines().ResizeMachine(context.Background(), &triton.ResizeMachineInput{ - ID: d.Id(), - Package: newPackage, - }) - if err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Target: []string{fmt.Sprintf("%s@%s", newPackage, "running")}, - Refresh: func() (interface{}, string, error) { - getResp, err := client.Machines().GetMachine(context.Background(), &triton.GetMachineInput{ - ID: d.Id(), - }) - if err != nil { - return nil, "", err - } - - return getResp, fmt.Sprintf("%s@%s", getResp.Package, getResp.State), nil - }, - Timeout: machineStateChangeTimeout, - MinTimeout: 3 * time.Second, - } - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - d.SetPartial("package") - } - - if d.HasChange("firewall_enabled") { - enable := d.Get("firewall_enabled").(bool) - - var err error - if enable { - err = client.Machines().EnableMachineFirewall(context.Background(), &triton.EnableMachineFirewallInput{ - ID: d.Id(), - }) - } else { - err = client.Machines().DisableMachineFirewall(context.Background(), &triton.DisableMachineFirewallInput{ - ID: d.Id(), - }) - } - if err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Target: []string{fmt.Sprintf("%t", enable)}, - Refresh: func() (interface{}, string, error) { - getResp, err := client.Machines().GetMachine(context.Background(), &triton.GetMachineInput{ - ID: d.Id(), - }) - if err != nil { - return nil, "", err - } - - return getResp, fmt.Sprintf("%t", getResp.FirewallEnabled), nil - }, - Timeout: machineStateChangeTimeout, - MinTimeout: 3 * time.Second, - } - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - d.SetPartial("firewall_enabled") - } - - if d.HasChange("nic") { - o, n := d.GetChange("nic") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - oldNICs := o.(*schema.Set) - newNICs := n.(*schema.Set) - - for _, nicI := range newNICs.Difference(oldNICs).List() { - nic := nicI.(map[string]interface{}) - if _, err := client.Machines().AddNIC(context.Background(), &triton.AddNICInput{ - MachineID: d.Id(), - Network: nic["network"].(string), - }); err != nil { - return err - } - } - - for _, nicI := range oldNICs.Difference(newNICs).List() { - nic := nicI.(map[string]interface{}) - if err := client.Machines().RemoveNIC(context.Background(), &triton.RemoveNICInput{ - MachineID: d.Id(), - MAC: nic["mac"].(string), - }); err != nil { - return err - } - } - - d.SetPartial("nic") - } - - metadata := map[string]string{} - for schemaName, metadataKey := range resourceMachineMetadataKeys { - if d.HasChange(schemaName) { - metadata[metadataKey] = d.Get(schemaName).(string) - } - } - if len(metadata) > 0 { - if _, err := client.Machines().UpdateMachineMetadata(context.Background(), &triton.UpdateMachineMetadataInput{ - ID: d.Id(), - Metadata: metadata, - }); err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Target: []string{"converged"}, - Refresh: func() (interface{}, string, error) { - getResp, err := client.Machines().GetMachine(context.Background(), &triton.GetMachineInput{ - ID: d.Id(), - }) - if err != nil { - return nil, "", err - } - - for k, v := range metadata { - if upstream, ok := getResp.Metadata[k]; !ok || v != upstream { - return getResp, "converging", nil - } - } - - return getResp, "converged", nil - }, - Timeout: machineStateChangeTimeout, - MinTimeout: 3 * time.Second, - } - _, err := stateConf.WaitForState() - if err != nil { - return err - } - - for schemaName := range resourceMachineMetadataKeys { - if d.HasChange(schemaName) { - d.SetPartial(schemaName) - } - } - } - - d.Partial(false) - - return resourceMachineRead(d, meta) -} - -func resourceMachineDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*triton.Client) - - err := client.Machines().DeleteMachine(context.Background(), &triton.DeleteMachineInput{ - ID: d.Id(), - }) - if err != nil { - return err - } - - stateConf := &resource.StateChangeConf{ - Target: []string{machineStateDeleted}, - Refresh: func() (interface{}, string, error) { - getResp, err := client.Machines().GetMachine(context.Background(), &triton.GetMachineInput{ - ID: d.Id(), - }) - if err != nil { - if triton.IsResourceNotFound(err) { - return getResp, "deleted", nil - } - return nil, "", err - } - - return getResp, getResp.State, nil - }, - Timeout: machineStateChangeTimeout, - MinTimeout: 3 * time.Second, - } - _, err = stateConf.WaitForState() - if err != nil { - return err - } - - return nil -} - -func resourceMachineValidateName(value interface{}, name string) (warnings []string, errors []error) { - warnings = []string{} - errors = []error{} - - r := regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9\_\.\-]*$`) - if !r.Match([]byte(value.(string))) { - errors = append(errors, fmt.Errorf(`"%s" is not a valid %s`, value.(string), name)) - } - - return warnings, errors -} diff --git a/builtin/providers/triton/resource_machine_test.go b/builtin/providers/triton/resource_machine_test.go deleted file mode 100644 index 8c49f739e..000000000 --- a/builtin/providers/triton/resource_machine_test.go +++ /dev/null @@ -1,465 +0,0 @@ -package triton - -import ( - "context" - "fmt" - "log" - "regexp" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/joyent/triton-go" -) - -func TestAccTritonMachine_basic(t *testing.T) { - machineName := fmt.Sprintf("acctest-%d", acctest.RandInt()) - config := fmt.Sprintf(testAccTritonMachine_basic, machineName) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckTritonMachineDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckTritonMachineExists("triton_machine.test"), - func(*terraform.State) error { - time.Sleep(10 * time.Second) - return nil - }, - ), - }, - }, - }) -} - -func TestAccTritonMachine_dns(t *testing.T) { - machineName := fmt.Sprintf("acctest-%d", acctest.RandInt()) - dns_output := fmt.Sprintf(testAccTritonMachine_dns, machineName) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckTritonMachineDestroy, - Steps: []resource.TestStep{ - { - Config: dns_output, - Check: resource.ComposeTestCheckFunc( - testCheckTritonMachineExists("triton_machine.test"), - func(state *terraform.State) error { - time.Sleep(10 * time.Second) - log.Printf("[DEBUG] %s", spew.Sdump(state)) - return nil - }, - resource.TestMatchOutput("domain_names", regexp.MustCompile(".*acctest-.*")), - ), - }, - }, - }) -} - -func TestAccTritonMachine_nic(t *testing.T) { - machineName := fmt.Sprintf("acctest-%d", acctest.RandInt()) - config := testAccTritonMachine_singleNIC(machineName, acctest.RandIntRange(1024, 2048), acctest.RandIntRange(0, 256)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckTritonMachineDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckTritonMachineExists("triton_machine.test"), - func(*terraform.State) error { - time.Sleep(10 * time.Second) - return nil - }, - testCheckTritonMachineHasFabric("triton_machine.test", "triton_fabric.test"), - ), - }, - }, - }) -} - -func TestAccTritonMachine_addNIC(t *testing.T) { - machineName := fmt.Sprintf("acctest-%d", acctest.RandInt()) - vlanNumber := acctest.RandIntRange(1024, 2048) - subnetNumber := acctest.RandIntRange(0, 256) - - singleNICConfig := testAccTritonMachine_singleNIC(machineName, vlanNumber, subnetNumber) - dualNICConfig := testAccTritonMachine_dualNIC(machineName, vlanNumber, subnetNumber) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckTritonMachineDestroy, - Steps: []resource.TestStep{ - { - Config: singleNICConfig, - Check: resource.ComposeTestCheckFunc( - testCheckTritonMachineExists("triton_machine.test"), - func(*terraform.State) error { - time.Sleep(10 * time.Second) - return nil - }, - ), - }, - { - Config: dualNICConfig, - Check: resource.ComposeTestCheckFunc( - testCheckTritonMachineExists("triton_machine.test"), - testCheckTritonMachineHasFabric("triton_machine.test", "triton_fabric.test_add"), - ), - }, - }, - }) -} - -func testCheckTritonMachineExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - conn := testAccProvider.Meta().(*triton.Client) - - machine, err := conn.Machines().GetMachine(context.Background(), &triton.GetMachineInput{ - ID: rs.Primary.ID, - }) - if err != nil { - return fmt.Errorf("Bad: Check Machine Exists: %s", err) - } - - if machine == nil { - return fmt.Errorf("Bad: Machine %q does not exist", rs.Primary.ID) - } - - return nil - } -} - -func testCheckTritonMachineHasFabric(name, fabricName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - machine, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - network, ok := s.RootModule().Resources[fabricName] - if !ok { - return fmt.Errorf("Not found: %s", fabricName) - } - conn := testAccProvider.Meta().(*triton.Client) - - nics, err := conn.Machines().ListNICs(context.Background(), &triton.ListNICsInput{ - MachineID: machine.Primary.ID, - }) - if err != nil { - return fmt.Errorf("Bad: Check NICs Exist: %s", err) - } - - for _, nic := range nics { - if nic.Network == network.Primary.ID { - return nil - } - } - - return fmt.Errorf("Bad: Machine %q does not have Fabric %q", machine.Primary.ID, network.Primary.ID) - } -} - -func testCheckTritonMachineDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*triton.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "triton_machine" { - continue - } - - resp, err := conn.Machines().GetMachine(context.Background(), &triton.GetMachineInput{ - ID: rs.Primary.ID, - }) - if err != nil { - if triton.IsResourceNotFound(err) { - return nil - } - return err - } - - if resp != nil && resp.State != machineStateDeleted { - return fmt.Errorf("Bad: Machine %q still exists", rs.Primary.ID) - } - } - - return nil -} - -func TestAccTritonMachine_firewall(t *testing.T) { - machineName := fmt.Sprintf("acctest-%d", acctest.RandInt()) - disabled_config := fmt.Sprintf(testAccTritonMachine_firewall_0, machineName) - enabled_config := fmt.Sprintf(testAccTritonMachine_firewall_1, machineName) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckTritonMachineDestroy, - Steps: []resource.TestStep{ - { - Config: enabled_config, - Check: resource.ComposeTestCheckFunc( - testCheckTritonMachineExists("triton_machine.test"), - resource.TestCheckResourceAttr( - "triton_machine.test", "firewall_enabled", "true"), - ), - }, - { - Config: disabled_config, - Check: resource.ComposeTestCheckFunc( - testCheckTritonMachineExists("triton_machine.test"), - resource.TestCheckResourceAttr( - "triton_machine.test", "firewall_enabled", "false"), - ), - }, - { - Config: enabled_config, - Check: resource.ComposeTestCheckFunc( - testCheckTritonMachineExists("triton_machine.test"), - resource.TestCheckResourceAttr( - "triton_machine.test", "firewall_enabled", "true"), - ), - }, - }, - }) -} - -func TestAccTritonMachine_metadata(t *testing.T) { - machineName := fmt.Sprintf("acctest-%d", acctest.RandInt()) - basic := fmt.Sprintf(testAccTritonMachine_metadata_1, machineName) - add_metadata := fmt.Sprintf(testAccTritonMachine_metadata_1, machineName) - add_metadata_2 := fmt.Sprintf(testAccTritonMachine_metadata_2, machineName) - add_metadata_3 := fmt.Sprintf(testAccTritonMachine_metadata_3, machineName) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckTritonMachineDestroy, - Steps: []resource.TestStep{ - { - Config: basic, - Check: resource.ComposeTestCheckFunc( - testCheckTritonMachineExists("triton_machine.test"), - ), - }, - { - Config: add_metadata, - Check: resource.ComposeTestCheckFunc( - testCheckTritonMachineExists("triton_machine.test"), - resource.TestCheckResourceAttr( - "triton_machine.test", "user_data", "hello"), - ), - }, - { - Config: add_metadata_2, - Check: resource.ComposeTestCheckFunc( - testCheckTritonMachineExists("triton_machine.test"), - resource.TestCheckResourceAttr( - "triton_machine.test", - "tags.triton.cns.services", "test-cns-service"), - ), - }, - { - Config: add_metadata_3, - Check: resource.ComposeTestCheckFunc( - testCheckTritonMachineExists("triton_machine.test"), - resource.TestCheckResourceAttr( - "triton_machine.test", - "tags.triton.cns.services", "test-cns-service"), - ), - }, - }, - }) -} - -var testAccTritonMachine_basic = ` -resource "triton_machine" "test" { - name = "%s" - package = "g4-general-4G" - image = "fb5fe970-e6e4-11e6-9820-4b51be190db9" - - tags = { - test = "hello!" - } -} -` - -var testAccTritonMachine_firewall_0 = ` -resource "triton_machine" "test" { - name = "%s" - package = "g4-general-4G" - image = "c20b4b7c-e1a6-11e5-9a4d-ef590901732e" - - firewall_enabled = 0 -} -` -var testAccTritonMachine_firewall_1 = ` -resource "triton_machine" "test" { - name = "%s" - package = "g4-general-4G" - image = "fb5fe970-e6e4-11e6-9820-4b51be190db9" - - firewall_enabled = 1 -} -` - -var testAccTritonMachine_metadata_1 = ` -resource "triton_machine" "test" { - name = "%s" - package = "g4-general-4G" - image = "c20b4b7c-e1a6-11e5-9a4d-ef590901732e" - - user_data = "hello" - - tags = { - test = "hello!" - } -} -` -var testAccTritonMachine_metadata_2 = ` -variable "tags" { - default = { - test = "hello!" - triton.cns.services = "test-cns-service" - } -} -resource "triton_machine" "test" { - name = "%s" - package = "g4-highcpu-128M" - image = "fb5fe970-e6e4-11e6-9820-4b51be190db9" - - user_data = "hello" - - tags = "${var.tags}" -} -` -var testAccTritonMachine_metadata_3 = ` -resource "triton_machine" "test" { - name = "%s" - package = "g4-highcpu-128M" - image = "fb5fe970-e6e4-11e6-9820-4b51be190db9" - - user_data = "hello" - - tags = { - test = "hello!" - triton.cns.services = "test-cns-service" - } -} -` -var testAccTritonMachine_singleNIC = func(name string, vlanNumber int, subnetNumber int) string { - return fmt.Sprintf(`resource "triton_vlan" "test" { - vlan_id = %d - name = "%s-vlan" - description = "test vlan" -} - -resource "triton_fabric" "test" { - name = "%s-network" - description = "test network" - vlan_id = "${triton_vlan.test.vlan_id}" - - subnet = "10.%d.0.0/24" - gateway = "10.%d.0.1" - provision_start_ip = "10.%d.0.10" - provision_end_ip = "10.%d.0.250" - - resolvers = ["8.8.8.8", "8.8.4.4"] -} - -resource "triton_machine" "test" { - name = "%s-instance" - package = "g4-highcpu-128M" - image = "fb5fe970-e6e4-11e6-9820-4b51be190db9" - - tags = { - test = "Test" - } - - nic { - network = "${triton_fabric.test.id}" - } -}`, vlanNumber, name, name, subnetNumber, subnetNumber, subnetNumber, subnetNumber, name) -} - -var testAccTritonMachine_dualNIC = func(name string, vlanNumber, subnetNumber int) string { - return fmt.Sprintf(`resource "triton_vlan" "test" { - vlan_id = %d - name = "%s-vlan" - description = "test vlan" -} - -resource "triton_fabric" "test" { - name = "%s-network" - description = "test network" - vlan_id = "${triton_vlan.test.vlan_id}" - - subnet = "10.%d.0.0/24" - gateway = "10.%d.0.1" - provision_start_ip = "10.%d.0.10" - provision_end_ip = "10.%d.0.250" - - resolvers = ["8.8.8.8", "8.8.4.4"] -} - -resource "triton_fabric" "test_add" { - name = "%s-network-2" - description = "test network 2" - vlan_id = "${triton_vlan.test.vlan_id}" - - subnet = "172.23.%d.0/24" - gateway = "172.23.%d.1" - provision_start_ip = "172.23.%d.10" - provision_end_ip = "172.23.%d.250" - - resolvers = ["8.8.8.8", "8.8.4.4"] -} - -resource "triton_machine" "test" { - name = "%s-instance" - package = "g4-highcpu-128M" - image = "fb5fe970-e6e4-11e6-9820-4b51be190db9" - - tags = { - test = "Test" - } - - nic { - network = "${triton_fabric.test.id}" - } - nic { - network = "${triton_fabric.test_add.id}" - } -}`, vlanNumber, name, name, subnetNumber, subnetNumber, subnetNumber, subnetNumber, name, subnetNumber, subnetNumber, subnetNumber, subnetNumber, name) -} - -var testAccTritonMachine_dns = ` -provider "triton" { -} - -resource "triton_machine" "test" { - name = "%s" - package = "g4-highcpu-128M" - image = "fb5fe970-e6e4-11e6-9820-4b51be190db9" -} - -output "domain_names" { - value = "${join(", ", triton_machine.test.domain_names)}" -} -` diff --git a/builtin/providers/triton/resource_vlan.go b/builtin/providers/triton/resource_vlan.go deleted file mode 100644 index 6cf246bc2..000000000 --- a/builtin/providers/triton/resource_vlan.go +++ /dev/null @@ -1,139 +0,0 @@ -package triton - -import ( - "context" - "errors" - "strconv" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/joyent/triton-go" -) - -func resourceVLAN() *schema.Resource { - return &schema.Resource{ - Create: resourceVLANCreate, - Exists: resourceVLANExists, - Read: resourceVLANRead, - Update: resourceVLANUpdate, - Delete: resourceVLANDelete, - Timeouts: fastResourceTimeout, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "vlan_id": { - Description: "Number between 0-4095 indicating VLAN ID", - Required: true, - ForceNew: true, - Type: schema.TypeInt, - ValidateFunc: func(val interface{}, field string) (warn []string, err []error) { - value := val.(int) - if value < 0 || value > 4095 { - err = append(err, errors.New("vlan_id must be between 0 and 4095")) - } - return - }, - }, - "name": { - Description: "Unique name to identify VLAN", - Required: true, - Type: schema.TypeString, - }, - "description": { - Description: "Description of the VLAN", - Optional: true, - Type: schema.TypeString, - }, - }, - } -} - -func resourceVLANCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*triton.Client) - - vlan, err := client.Fabrics().CreateFabricVLAN(context.Background(), &triton.CreateFabricVLANInput{ - ID: d.Get("vlan_id").(int), - Name: d.Get("name").(string), - Description: d.Get("description").(string), - }) - if err != nil { - return err - } - - d.SetId(strconv.Itoa(vlan.ID)) - return resourceVLANRead(d, meta) -} - -func resourceVLANExists(d *schema.ResourceData, meta interface{}) (bool, error) { - client := meta.(*triton.Client) - - id, err := resourceVLANIDInt(d.Id()) - if err != nil { - return false, err - } - - return resourceExists(client.Fabrics().GetFabricVLAN(context.Background(), &triton.GetFabricVLANInput{ - ID: id, - })) -} - -func resourceVLANRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*triton.Client) - - id, err := resourceVLANIDInt(d.Id()) - if err != nil { - return err - } - - vlan, err := client.Fabrics().GetFabricVLAN(context.Background(), &triton.GetFabricVLANInput{ - ID: id, - }) - if err != nil { - return err - } - - d.Set("vlan_id", vlan.ID) - d.Set("name", vlan.Name) - d.Set("description", vlan.Description) - - return nil -} - -func resourceVLANUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*triton.Client) - - vlan, err := client.Fabrics().UpdateFabricVLAN(context.Background(), &triton.UpdateFabricVLANInput{ - ID: d.Get("vlan_id").(int), - Name: d.Get("name").(string), - Description: d.Get("description").(string), - }) - if err != nil { - return err - } - - d.SetId(strconv.Itoa(vlan.ID)) - return resourceVLANRead(d, meta) -} - -func resourceVLANDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*triton.Client) - - id, err := resourceVLANIDInt(d.Id()) - if err != nil { - return err - } - - return client.Fabrics().DeleteFabricVLAN(context.Background(), &triton.DeleteFabricVLANInput{ - ID: id, - }) -} - -func resourceVLANIDInt(id string) (int, error) { - result, err := strconv.ParseInt(id, 10, 32) - if err != nil { - return -1, err - } - - return int(result), nil -} diff --git a/builtin/providers/triton/resource_vlan_test.go b/builtin/providers/triton/resource_vlan_test.go deleted file mode 100644 index 1c2af5865..000000000 --- a/builtin/providers/triton/resource_vlan_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package triton - -import ( - "context" - "fmt" - "strconv" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/joyent/triton-go" -) - -func TestAccTritonVLAN_basic(t *testing.T) { - config := testAccTritonVLAN_basic(acctest.RandIntRange(3, 2048)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckTritonVLANDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testCheckTritonVLANExists("triton_vlan.test"), - ), - }, - }, - }) -} - -func TestAccTritonVLAN_update(t *testing.T) { - vlanNumber := acctest.RandIntRange(3, 2048) - preConfig := testAccTritonVLAN_basic(vlanNumber) - postConfig := testAccTritonVLAN_update(vlanNumber) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckTritonVLANDestroy, - Steps: []resource.TestStep{ - { - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckTritonVLANExists("triton_vlan.test"), - resource.TestCheckResourceAttr("triton_vlan.test", "vlan_id", strconv.Itoa(vlanNumber)), - resource.TestCheckResourceAttr("triton_vlan.test", "name", "test-vlan"), - resource.TestCheckResourceAttr("triton_vlan.test", "description", "test vlan"), - ), - }, - - { - Config: postConfig, - Check: resource.ComposeTestCheckFunc( - testCheckTritonVLANExists("triton_vlan.test"), - resource.TestCheckResourceAttr("triton_vlan.test", "vlan_id", strconv.Itoa(vlanNumber)), - resource.TestCheckResourceAttr("triton_vlan.test", "name", "test-vlan-2"), - resource.TestCheckResourceAttr("triton_vlan.test", "description", "test vlan 2"), - ), - }, - }, - }) -} - -func testCheckTritonVLANExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Ensure we have enough information in state to look up in API - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - conn := testAccProvider.Meta().(*triton.Client) - - id, err := resourceVLANIDInt(rs.Primary.ID) - if err != nil { - return err - } - - resp, err := conn.Fabrics().GetFabricVLAN(context.Background(), &triton.GetFabricVLANInput{ - ID: id, - }) - if err != nil && triton.IsResourceNotFound(err) { - return fmt.Errorf("Bad: Check VLAN Exists: %s", err) - } else if err != nil { - return err - } - - if resp == nil { - return fmt.Errorf("Bad: VLAN %q does not exist", rs.Primary.ID) - } - - return nil - } -} - -func testCheckTritonVLANDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*triton.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "triton_vlan" { - continue - } - - id, err := resourceVLANIDInt(rs.Primary.ID) - if err != nil { - return err - } - - resp, err := conn.Fabrics().GetFabricVLAN(context.Background(), &triton.GetFabricVLANInput{ - ID: id, - }) - if triton.IsResourceNotFound(err) { - return nil - } else if err != nil { - return err - } - - if resp != nil { - return fmt.Errorf("Bad: VLAN %q still exists", rs.Primary.ID) - } - } - - return nil -} - -var testAccTritonVLAN_basic = func(vlanID int) string { - return fmt.Sprintf(`resource "triton_vlan" "test" { - vlan_id = %d - name = "test-vlan" - description = "test vlan" - }`, vlanID) -} - -var testAccTritonVLAN_update = func(vlanID int) string { - return fmt.Sprintf(`resource "triton_vlan" "test" { - vlan_id = %d - name = "test-vlan-2" - description = "test vlan 2" - }`, vlanID) -} diff --git a/builtin/providers/ultradns/common.go b/builtin/providers/ultradns/common.go deleted file mode 100644 index 1512e7fb0..000000000 --- a/builtin/providers/ultradns/common.go +++ /dev/null @@ -1,198 +0,0 @@ -package ultradns - -import ( - "fmt" - "log" - - "github.com/Ensighten/udnssdk" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -// Conversion helper functions -type rRSetResource struct { - OwnerName string - RRType string - RData []string - TTL int - Profile udnssdk.RawProfile - Zone string -} - -// profileAttrSchemaMap is a map from each ultradns_tcpool attribute name onto its respective ProfileSchema URI -var profileAttrSchemaMap = map[string]udnssdk.ProfileSchema{ - "dirpool_profile": udnssdk.DirPoolSchema, - "rdpool_profile": udnssdk.RDPoolSchema, - "sbpool_profile": udnssdk.SBPoolSchema, - "tcpool_profile": udnssdk.TCPoolSchema, -} - -func (r rRSetResource) RRSetKey() udnssdk.RRSetKey { - return udnssdk.RRSetKey{ - Zone: r.Zone, - Type: r.RRType, - Name: r.OwnerName, - } -} - -func (r rRSetResource) RRSet() udnssdk.RRSet { - return udnssdk.RRSet{ - OwnerName: r.OwnerName, - RRType: r.RRType, - RData: r.RData, - TTL: r.TTL, - Profile: r.Profile, - } -} - -func (r rRSetResource) ID() string { - return fmt.Sprintf("%s.%s", r.OwnerName, r.Zone) -} - -func unzipRdataHosts(configured []interface{}) []string { - hs := make([]string, 0, len(configured)) - for _, rRaw := range configured { - data := rRaw.(map[string]interface{}) - h := data["host"].(string) - hs = append(hs, h) - } - return hs -} - -func schemaPingProbe() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "packets": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 3, - }, - "packet_size": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 56, - }, - "limit": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Set: hashLimits, - Elem: resourceProbeLimits(), - }, - }, - } -} - -func resourceProbeLimits() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "warning": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - "critical": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - "fail": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - }, - } -} - -type probeResource struct { - Name string - Zone string - ID string - - Agents []string - Interval string - PoolRecord string - Threshold int - Type udnssdk.ProbeType - - Details *udnssdk.ProbeDetailsDTO -} - -func (p probeResource) RRSetKey() udnssdk.RRSetKey { - return p.Key().RRSetKey() -} - -func (p probeResource) ProbeInfoDTO() udnssdk.ProbeInfoDTO { - return udnssdk.ProbeInfoDTO{ - ID: p.ID, - PoolRecord: p.PoolRecord, - ProbeType: p.Type, - Interval: p.Interval, - Agents: p.Agents, - Threshold: p.Threshold, - Details: p.Details, - } -} - -func (p probeResource) Key() udnssdk.ProbeKey { - return udnssdk.ProbeKey{ - Zone: p.Zone, - Name: p.Name, - ID: p.ID, - } -} - -func mapFromLimit(name string, l udnssdk.ProbeDetailsLimitDTO) map[string]interface{} { - return map[string]interface{}{ - "name": name, - "warning": l.Warning, - "critical": l.Critical, - "fail": l.Fail, - } -} - -// hashLimits generates a hashcode for a limits block -func hashLimits(v interface{}) int { - m := v.(map[string]interface{}) - h := hashcode.String(m["name"].(string)) - log.Printf("[INFO] hashLimits(): %v -> %v", m["name"].(string), h) - return h -} - -// makeSetFromLimits encodes an array of Limits into a -// *schema.Set in the appropriate structure for the schema -func makeSetFromLimits(ls map[string]udnssdk.ProbeDetailsLimitDTO) *schema.Set { - s := &schema.Set{F: hashLimits} - for name, l := range ls { - s.Add(mapFromLimit(name, l)) - } - return s -} - -func makeProbeDetailsLimit(configured interface{}) *udnssdk.ProbeDetailsLimitDTO { - l := configured.(map[string]interface{}) - return &udnssdk.ProbeDetailsLimitDTO{ - Warning: l["warning"].(int), - Critical: l["critical"].(int), - Fail: l["fail"].(int), - } -} - -// makeSetFromStrings encodes an []string into a -// *schema.Set in the appropriate structure for the schema -func makeSetFromStrings(ss []string) *schema.Set { - st := &schema.Set{F: schema.HashString} - for _, s := range ss { - st.Add(s) - } - return st -} - -// hashRdata generates a hashcode for an Rdata block -func hashRdatas(v interface{}) int { - m := v.(map[string]interface{}) - h := hashcode.String(m["host"].(string)) - log.Printf("[DEBUG] hashRdatas(): %v -> %v", m["host"].(string), h) - return h -} diff --git a/builtin/providers/ultradns/common_test.go b/builtin/providers/ultradns/common_test.go deleted file mode 100644 index 05823fdcd..000000000 --- a/builtin/providers/ultradns/common_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package ultradns - -import ( - "fmt" - - "github.com/Ensighten/udnssdk" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func testAccRdpoolCheckDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*udnssdk.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "ultradns_rdpool" { - continue - } - - k := udnssdk.RRSetKey{ - Zone: rs.Primary.Attributes["zone"], - Name: rs.Primary.Attributes["name"], - Type: rs.Primary.Attributes["type"], - } - - _, err := client.RRSets.Select(k) - if err == nil { - return fmt.Errorf("Record still exists") - } - } - - return nil -} - -func testAccTcpoolCheckDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*udnssdk.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "ultradns_tcpool" { - continue - } - - k := udnssdk.RRSetKey{ - Zone: rs.Primary.Attributes["zone"], - Name: rs.Primary.Attributes["name"], - Type: rs.Primary.Attributes["type"], - } - - _, err := client.RRSets.Select(k) - if err == nil { - return fmt.Errorf("Record still exists") - } - } - - return nil -} - -func testAccCheckUltradnsRecordExists(n string, record *udnssdk.RRSet) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - client := testAccProvider.Meta().(*udnssdk.Client) - k := udnssdk.RRSetKey{ - Zone: rs.Primary.Attributes["zone"], - Name: rs.Primary.Attributes["name"], - Type: rs.Primary.Attributes["type"], - } - - foundRecord, err := client.RRSets.Select(k) - - if err != nil { - return err - } - - if foundRecord[0].OwnerName != rs.Primary.Attributes["hostname"] { - return fmt.Errorf("Record not found: %+v,\n %+v\n", foundRecord, rs.Primary.Attributes) - } - - *record = foundRecord[0] - - return nil - } -} diff --git a/builtin/providers/ultradns/config.go b/builtin/providers/ultradns/config.go deleted file mode 100644 index d3b22d618..000000000 --- a/builtin/providers/ultradns/config.go +++ /dev/null @@ -1,28 +0,0 @@ -package ultradns - -import ( - "fmt" - "log" - - "github.com/Ensighten/udnssdk" -) - -// Config collects the connection service-endpoint and credentials -type Config struct { - Username string - Password string - BaseURL string -} - -// Client returns a new client for accessing UltraDNS. -func (c *Config) Client() (*udnssdk.Client, error) { - client, err := udnssdk.NewClient(c.Username, c.Password, c.BaseURL) - - if err != nil { - return nil, fmt.Errorf("Error setting up client: %s", err) - } - - log.Printf("[INFO] UltraDNS Client configured for user: %s", c.Username) - - return client, nil -} diff --git a/builtin/providers/ultradns/provider.go b/builtin/providers/ultradns/provider.go deleted file mode 100644 index 70f0dbed7..000000000 --- a/builtin/providers/ultradns/provider.go +++ /dev/null @@ -1,56 +0,0 @@ -package ultradns - -import ( - "github.com/Ensighten/udnssdk" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "username": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("ULTRADNS_USERNAME", nil), - Description: "UltraDNS Username.", - }, - - "password": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("ULTRADNS_PASSWORD", nil), - Description: "UltraDNS User Password", - }, - "baseurl": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("ULTRADNS_BASEURL", nil), - Default: udnssdk.DefaultLiveBaseURL, - Description: "UltraDNS Base URL", - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "ultradns_dirpool": resourceUltradnsDirpool(), - "ultradns_probe_http": resourceUltradnsProbeHTTP(), - "ultradns_probe_ping": resourceUltradnsProbePing(), - "ultradns_record": resourceUltradnsRecord(), - "ultradns_tcpool": resourceUltradnsTcpool(), - "ultradns_rdpool": resourceUltradnsRdpool(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - config := Config{ - Username: d.Get("username").(string), - Password: d.Get("password").(string), - BaseURL: d.Get("baseurl").(string), - } - - return config.Client() -} diff --git a/builtin/providers/ultradns/provider_test.go b/builtin/providers/ultradns/provider_test.go deleted file mode 100644 index 8b5f92904..000000000 --- a/builtin/providers/ultradns/provider_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package ultradns - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "ultradns": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("ULTRADNS_USERNAME"); v == "" { - t.Fatal("ULTRADNS_USERNAME must be set for acceptance tests") - } - - if v := os.Getenv("ULTRADNS_PASSWORD"); v == "" { - t.Fatal("ULTRADNS_PASSWORD must be set for acceptance tests") - } - - if v := os.Getenv("ULTRADNS_DOMAIN"); v == "" { - t.Fatal("ULTRADNS_DOMAIN must be set for acceptance tests. The domain is used to create and destroy record against.") - } -} diff --git a/builtin/providers/ultradns/resource_ultradns_dirpool.go b/builtin/providers/ultradns/resource_ultradns_dirpool.go deleted file mode 100644 index fb8df98a9..000000000 --- a/builtin/providers/ultradns/resource_ultradns_dirpool.go +++ /dev/null @@ -1,627 +0,0 @@ -package ultradns - -import ( - "bytes" - "encoding/json" - "fmt" - "log" - "strings" - - "github.com/Ensighten/udnssdk" - "github.com/fatih/structs" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" - "github.com/mitchellh/mapstructure" -) - -func resourceUltradnsDirpool() *schema.Resource { - return &schema.Resource{ - Create: resourceUltradnsDirpoolCreate, - Read: resourceUltradnsDirpoolRead, - Update: resourceUltradnsDirpoolUpdate, - Delete: resourceUltradnsDirpoolDelete, - - Schema: map[string]*schema.Schema{ - // Required - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if len(value) > 255 { - errors = append(errors, fmt.Errorf( - "'description' too long, must be less than 255 characters")) - } - return - }, - }, - "rdata": &schema.Schema{ - // UltraDNS API does not respect rdata ordering - Type: schema.TypeSet, - Set: hashRdatas, - Required: true, - // Valid: len(rdataInfo) == len(rdata) - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Required - "host": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "all_non_configured": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "geo_info": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "is_account_level": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "codes": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - }, - }, - "ip_info": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "is_account_level": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "ips": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Set: hashIPInfoIPs, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "start": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - // ConflictsWith: []string{"cidr", "address"}, - }, - "end": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - // ConflictsWith: []string{"cidr", "address"}, - }, - "cidr": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - // ConflictsWith: []string{"start", "end", "address"}, - }, - "address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - // ConflictsWith: []string{"start", "end", "cidr"}, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - // Optional - "ttl": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 3600, - }, - "conflict_resolve": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "GEO", - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "GEO" && value != "IP" { - errors = append(errors, fmt.Errorf( - "only 'GEO', and 'IP' are supported values for 'conflict_resolve'")) - } - return - }, - }, - "no_response": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "all_non_configured": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "geo_info": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "is_account_level": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "codes": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - }, - }, - "ip_info": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "is_account_level": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "ips": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Set: hashIPInfoIPs, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "start": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - // ConflictsWith: []string{"cidr", "address"}, - }, - "end": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - // ConflictsWith: []string{"cidr", "address"}, - }, - "cidr": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - // ConflictsWith: []string{"start", "end", "address"}, - }, - "address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - // ConflictsWith: []string{"start", "end", "cidr"}, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - // Computed - "hostname": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -// CRUD Operations - -func resourceUltradnsDirpoolCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*udnssdk.Client) - - r, err := makeDirpoolRRSetResource(d) - if err != nil { - return err - } - - log.Printf("[INFO] ultradns_dirpool create: %#v", r) - _, err = client.RRSets.Create(r.RRSetKey(), r.RRSet()) - if err != nil { - // FIXME: remove the json from log - marshalled, _ := json.Marshal(r) - ms := string(marshalled) - return fmt.Errorf("create failed: %#v [[[[ %v ]]]] -> %v", r, ms, err) - } - - d.SetId(r.ID()) - log.Printf("[INFO] ultradns_dirpool.id: %v", d.Id()) - - return resourceUltradnsDirpoolRead(d, meta) -} - -func resourceUltradnsDirpoolRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*udnssdk.Client) - - rr, err := makeDirpoolRRSetResource(d) - if err != nil { - return err - } - - rrsets, err := client.RRSets.Select(rr.RRSetKey()) - if err != nil { - uderr, ok := err.(*udnssdk.ErrorResponseList) - if ok { - for _, resps := range uderr.Responses { - // 70002 means Records Not Found - if resps.ErrorCode == 70002 { - d.SetId("") - return nil - } - return fmt.Errorf("resource not found: %v", err) - } - } - return fmt.Errorf("resource not found: %v", err) - } - - r := rrsets[0] - - return populateResourceFromDirpool(d, &r) -} - -func resourceUltradnsDirpoolUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*udnssdk.Client) - - r, err := makeDirpoolRRSetResource(d) - if err != nil { - return err - } - - log.Printf("[INFO] ultradns_dirpool update: %+v", r) - _, err = client.RRSets.Update(r.RRSetKey(), r.RRSet()) - if err != nil { - return fmt.Errorf("resource update failed: %v", err) - } - - return resourceUltradnsDirpoolRead(d, meta) -} - -func resourceUltradnsDirpoolDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*udnssdk.Client) - - r, err := makeDirpoolRRSetResource(d) - if err != nil { - return err - } - - log.Printf("[INFO] ultradns_dirpool delete: %+v", r) - _, err = client.RRSets.Delete(r.RRSetKey()) - if err != nil { - return fmt.Errorf("resource delete failed: %v", err) - } - - return nil -} - -// Resource Helpers - -// makeDirpoolRRSetResource converts ResourceData into an rRSetResource -// ready for use in any CRUD operation -func makeDirpoolRRSetResource(d *schema.ResourceData) (rRSetResource, error) { - rDataRaw := d.Get("rdata").(*schema.Set).List() - res := rRSetResource{ - RRType: d.Get("type").(string), - Zone: d.Get("zone").(string), - OwnerName: d.Get("name").(string), - TTL: d.Get("ttl").(int), - RData: unzipRdataHosts(rDataRaw), - } - - profile := udnssdk.DirPoolProfile{ - Context: udnssdk.DirPoolSchema, - Description: d.Get("description").(string), - ConflictResolve: d.Get("conflict_resolve").(string), - } - - ri, err := makeDirpoolRdataInfos(rDataRaw) - if err != nil { - return res, err - } - profile.RDataInfo = ri - - noResponseRaw := d.Get("no_response").([]interface{}) - if len(noResponseRaw) >= 1 { - if len(noResponseRaw) > 1 { - return res, fmt.Errorf("no_response: only 0 or 1 blocks alowed, got: %#v", len(noResponseRaw)) - } - nr, err := makeDirpoolRdataInfo(noResponseRaw[0]) - if err != nil { - return res, err - } - profile.NoResponse = nr - } - - res.Profile = profile.RawProfile() - - return res, nil -} - -// populateResourceFromDirpool takes an RRSet and populates the ResourceData -func populateResourceFromDirpool(d *schema.ResourceData, r *udnssdk.RRSet) error { - // TODO: fix from tcpool to dirpool - zone := d.Get("zone") - // ttl - d.Set("ttl", r.TTL) - // hostname - if r.OwnerName == "" { - d.Set("hostname", zone) - } else { - if strings.HasSuffix(r.OwnerName, ".") { - d.Set("hostname", r.OwnerName) - } else { - d.Set("hostname", fmt.Sprintf("%s.%s", r.OwnerName, zone)) - } - } - - // And now... the Profile! - if r.Profile == nil { - return fmt.Errorf("RRSet.profile missing: invalid DirPool schema in: %#v", r) - } - p, err := r.Profile.DirPoolProfile() - if err != nil { - return fmt.Errorf("RRSet.profile could not be unmarshalled: %v\n", err) - } - - // Set simple values - d.Set("description", p.Description) - - // Ensure default looks like "GEO", even when nothing is returned - if p.ConflictResolve == "" { - d.Set("conflict_resolve", "GEO") - } else { - d.Set("conflict_resolve", p.ConflictResolve) - } - - rd := makeSetFromDirpoolRdata(r.RData, p.RDataInfo) - err = d.Set("rdata", rd) - if err != nil { - return fmt.Errorf("rdata set failed: %v, from %#v", err, rd) - } - return nil -} - -// makeDirpoolRdataInfos converts []map[string]interface{} from rdata -// blocks into []DPRDataInfo -func makeDirpoolRdataInfos(configured []interface{}) ([]udnssdk.DPRDataInfo, error) { - res := make([]udnssdk.DPRDataInfo, 0, len(configured)) - for _, r := range configured { - ri, err := makeDirpoolRdataInfo(r) - if err != nil { - return res, err - } - res = append(res, ri) - } - return res, nil -} - -// makeDirpoolRdataInfo converts a map[string]interface{} from -// an rdata or no_response block into an DPRDataInfo -func makeDirpoolRdataInfo(configured interface{}) (udnssdk.DPRDataInfo, error) { - data := configured.(map[string]interface{}) - res := udnssdk.DPRDataInfo{ - AllNonConfigured: data["all_non_configured"].(bool), - } - // IPInfo - ipInfo := data["ip_info"].([]interface{}) - if len(ipInfo) >= 1 { - if len(ipInfo) > 1 { - return res, fmt.Errorf("ip_info: only 0 or 1 blocks alowed, got: %#v", len(ipInfo)) - } - ii, err := makeIPInfo(ipInfo[0]) - if err != nil { - return res, fmt.Errorf("%v ip_info: %#v", err, ii) - } - res.IPInfo = &ii - } - // GeoInfo - geoInfo := data["geo_info"].([]interface{}) - if len(geoInfo) >= 1 { - if len(geoInfo) > 1 { - return res, fmt.Errorf("geo_info: only 0 or 1 blocks alowed, got: %#v", len(geoInfo)) - } - gi, err := makeGeoInfo(geoInfo[0]) - if err != nil { - return res, fmt.Errorf("%v geo_info: %#v GeoInfo: %#v", err, geoInfo[0], gi) - } - res.GeoInfo = &gi - } - return res, nil -} - -// makeGeoInfo converts a map[string]interface{} from an geo_info block -// into an GeoInfo -func makeGeoInfo(configured interface{}) (udnssdk.GeoInfo, error) { - var res udnssdk.GeoInfo - c := configured.(map[string]interface{}) - err := mapDecode(c, &res) - if err != nil { - return res, err - } - - rawCodes := c["codes"].(*schema.Set).List() - res.Codes = make([]string, 0, len(rawCodes)) - for _, i := range rawCodes { - res.Codes = append(res.Codes, i.(string)) - } - return res, err -} - -// makeIPInfo converts a map[string]interface{} from an ip_info block -// into an IPInfo -func makeIPInfo(configured interface{}) (udnssdk.IPInfo, error) { - var res udnssdk.IPInfo - c := configured.(map[string]interface{}) - err := mapDecode(c, &res) - if err != nil { - return res, err - } - - rawIps := c["ips"].(*schema.Set).List() - res.Ips = make([]udnssdk.IPAddrDTO, 0, len(rawIps)) - for _, rawIa := range rawIps { - var i udnssdk.IPAddrDTO - err = mapDecode(rawIa, &i) - if err != nil { - return res, err - } - res.Ips = append(res.Ips, i) - } - return res, nil -} - -// collate and zip RData and RDataInfo into []map[string]interface{} -func zipDirpoolRData(rds []string, rdis []udnssdk.DPRDataInfo) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(rds)) - for i, rdi := range rdis { - r := map[string]interface{}{ - "host": rds[i], - "all_non_configured": rdi.AllNonConfigured, - "ip_info": mapFromIPInfos(rdi.IPInfo), - "geo_info": mapFromGeoInfos(rdi.GeoInfo), - } - result = append(result, r) - } - return result -} - -// makeSetFromDirpoolRdata encodes an array of Rdata into a -// *schema.Set in the appropriate structure for the schema -func makeSetFromDirpoolRdata(rds []string, rdis []udnssdk.DPRDataInfo) *schema.Set { - s := &schema.Set{F: hashRdatas} - rs := zipDirpoolRData(rds, rdis) - for _, r := range rs { - s.Add(r) - } - return s -} - -// mapFromIPInfos encodes 0 or 1 IPInfos into a []map[string]interface{} -// in the appropriate structure for the schema -func mapFromIPInfos(rdi *udnssdk.IPInfo) []map[string]interface{} { - res := make([]map[string]interface{}, 0, 1) - if rdi != nil { - m := map[string]interface{}{ - "name": rdi.Name, - "is_account_level": rdi.IsAccountLevel, - "ips": makeSetFromIPAddrDTOs(rdi.Ips), - } - res = append(res, m) - } - return res -} - -// makeSetFromIPAddrDTOs encodes an array of IPAddrDTO into a -// *schema.Set in the appropriate structure for the schema -func makeSetFromIPAddrDTOs(ias []udnssdk.IPAddrDTO) *schema.Set { - s := &schema.Set{F: hashIPInfoIPs} - for _, ia := range ias { - s.Add(mapEncode(ia)) - } - return s -} - -// mapFromGeoInfos encodes 0 or 1 GeoInfos into a []map[string]interface{} -// in the appropriate structure for the schema -func mapFromGeoInfos(gi *udnssdk.GeoInfo) []map[string]interface{} { - res := make([]map[string]interface{}, 0, 1) - if gi != nil { - m := mapEncode(gi) - m["codes"] = makeSetFromStrings(gi.Codes) - res = append(res, m) - } - return res -} - -// hashIPInfoIPs generates a hashcode for an ip_info.ips block -func hashIPInfoIPs(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["start"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["end"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["cidr"].(string))) - buf.WriteString(fmt.Sprintf("%s", m["address"].(string))) - - h := hashcode.String(buf.String()) - log.Printf("[DEBUG] hashIPInfoIPs(): %v -> %v", buf.String(), h) - return h -} - -// Map <-> Struct transcoding -// Ideally, we sould be able to handle almost all the type conversion -// in this resource using the following helpers. Unfortunately, some -// issues remain: -// - schema.Set values cannot be naively assigned, and must be -// manually converted -// - ip_info and geo_info come in as []map[string]interface{}, but are -// in DPRDataInfo as singluar. - -// mapDecode takes a map[string]interface{} and uses reflection to -// convert it into the given Go native structure. val must be a pointer -// to a struct. This is identical to mapstructure.Decode, but uses the -// `terraform:` tag instead of `mapstructure:` -func mapDecode(m interface{}, rawVal interface{}) error { - config := &mapstructure.DecoderConfig{ - Metadata: nil, - TagName: "terraform", - Result: rawVal, - WeaklyTypedInput: true, - } - - decoder, err := mapstructure.NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(m) -} - -func mapEncode(rawVal interface{}) map[string]interface{} { - s := structs.New(rawVal) - s.TagName = "terraform" - return s.Map() -} diff --git a/builtin/providers/ultradns/resource_ultradns_dirpool_test.go b/builtin/providers/ultradns/resource_ultradns_dirpool_test.go deleted file mode 100644 index f92c82fcd..000000000 --- a/builtin/providers/ultradns/resource_ultradns_dirpool_test.go +++ /dev/null @@ -1,192 +0,0 @@ -package ultradns - -import ( - "fmt" - "testing" - - "github.com/Ensighten/udnssdk" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccUltradnsDirpool(t *testing.T) { - var record udnssdk.RRSet - domain := "ultradns.phinze.com" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccDirpoolCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testCfgDirpoolMinimal, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckUltradnsRecordExists("ultradns_dirpool.it", &record), - // Specified - resource.TestCheckResourceAttr("ultradns_dirpool.it", "zone", domain), - resource.TestCheckResourceAttr("ultradns_dirpool.it", "name", "test-dirpool-minimal"), - resource.TestCheckResourceAttr("ultradns_dirpool.it", "type", "A"), - resource.TestCheckResourceAttr("ultradns_dirpool.it", "ttl", "300"), - resource.TestCheckResourceAttr("ultradns_dirpool.it", "description", "Minimal directional pool"), - // hashRdatas(): 10.1.0.1 -> 463398947 - resource.TestCheckResourceAttr("ultradns_dirpool.it", "rdata.463398947.host", "10.1.0.1"), - resource.TestCheckResourceAttr("ultradns_dirpool.it", "rdata.463398947.all_non_configured", "true"), - // Generated - resource.TestCheckResourceAttr("ultradns_dirpool.it", "id", "test-dirpool-minimal.ultradns.phinze.com"), - resource.TestCheckResourceAttr("ultradns_dirpool.it", "hostname", "test-dirpool-minimal.ultradns.phinze.com."), - ), - }, - resource.TestStep{ - Config: fmt.Sprintf(testCfgDirpoolMaximal, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckUltradnsRecordExists("ultradns_dirpool.it", &record), - // Specified - resource.TestCheckResourceAttr("ultradns_dirpool.it", "zone", domain), - resource.TestCheckResourceAttr("ultradns_dirpool.it", "name", "test-dirpool-maximal"), - resource.TestCheckResourceAttr("ultradns_dirpool.it", "type", "A"), - resource.TestCheckResourceAttr("ultradns_dirpool.it", "ttl", "300"), - resource.TestCheckResourceAttr("ultradns_dirpool.it", "description", "Description of pool"), - resource.TestCheckResourceAttr("ultradns_dirpool.it", "conflict_resolve", "GEO"), - - // hashRdatas(): 10.1.1.1 -> 442270228 - resource.TestCheckResourceAttr("ultradns_dirpool.it", "rdata.442270228.host", "10.1.1.1"), - resource.TestCheckResourceAttr("ultradns_dirpool.it", "rdata.442270228.all_non_configured", "true"), - // hashRdatas(): 10.1.1.2 -> 2203440046 - resource.TestCheckResourceAttr("ultradns_dirpool.it", "rdata.2203440046.host", "10.1.1.2"), - resource.TestCheckResourceAttr("ultradns_dirpool.it", "rdata.2203440046.geo_info.0.name", "North America"), - // hashRdatas(): 10.1.1.3 -> 4099072824 - resource.TestCheckResourceAttr("ultradns_dirpool.it", "rdata.4099072824.host", "10.1.1.3"), - resource.TestCheckResourceAttr("ultradns_dirpool.it", "rdata.4099072824.ip_info.0.name", "some Ips"), - resource.TestCheckResourceAttr("ultradns_dirpool.it", "no_response.0.geo_info.0.name", "nrGeo"), - resource.TestCheckResourceAttr("ultradns_dirpool.it", "no_response.0.ip_info.0.name", "nrIP"), - // Generated - resource.TestCheckResourceAttr("ultradns_dirpool.it", "id", "test-dirpool-maximal.ultradns.phinze.com"), - resource.TestCheckResourceAttr("ultradns_dirpool.it", "hostname", "test-dirpool-maximal.ultradns.phinze.com."), - ), - }, - }, - }) -} - -func testAccDirpoolCheckDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*udnssdk.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "ultradns_dirpool" { - continue - } - - k := udnssdk.RRSetKey{ - Zone: rs.Primary.Attributes["zone"], - Name: rs.Primary.Attributes["name"], - Type: rs.Primary.Attributes["type"], - } - - _, err := client.RRSets.Select(k) - - if err == nil { - return fmt.Errorf("Record still exists") - } - } - - return nil -} - -const testCfgDirpoolMinimal = ` -resource "ultradns_dirpool" "it" { - zone = "%s" - name = "test-dirpool-minimal" - type = "A" - ttl = 300 - description = "Minimal directional pool" - - rdata { - host = "10.1.0.1" - all_non_configured = true - } -} -` - -const testCfgDirpoolMaximal = ` -resource "ultradns_dirpool" "it" { - zone = "%s" - name = "test-dirpool-maximal" - type = "A" - ttl = 300 - description = "Description of pool" - - conflict_resolve = "GEO" - - rdata { - host = "10.1.1.1" - all_non_configured = true - } - - rdata { - host = "10.1.1.2" - - geo_info { - name = "North America" - - codes = [ - "US-OK", - "US-DC", - "US-MA", - ] - } - } - - rdata { - host = "10.1.1.3" - - ip_info { - name = "some Ips" - - ips { - start = "200.20.0.1" - end = "200.20.0.10" - } - - ips { - cidr = "20.20.20.0/24" - } - - ips { - address = "50.60.70.80" - } - } - } - -# rdata { -# host = "10.1.1.4" -# -# geo_info { -# name = "accountGeoGroup" -# is_account_level = true -# } -# -# ip_info { -# name = "accountIPGroup" -# is_account_level = true -# } -# } - - no_response { - geo_info { - name = "nrGeo" - - codes = [ - "Z4", - ] - } - - ip_info { - name = "nrIP" - - ips { - address = "197.231.41.3" - } - } - } -} -` diff --git a/builtin/providers/ultradns/resource_ultradns_probe_http.go b/builtin/providers/ultradns/resource_ultradns_probe_http.go deleted file mode 100644 index 4149ee6fb..000000000 --- a/builtin/providers/ultradns/resource_ultradns_probe_http.go +++ /dev/null @@ -1,316 +0,0 @@ -package ultradns - -import ( - "fmt" - "log" - - "github.com/Ensighten/udnssdk" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceUltradnsProbeHTTP() *schema.Resource { - return &schema.Resource{ - Create: resourceUltradnsProbeHTTPCreate, - Read: resourceUltradnsProbeHTTPRead, - Update: resourceUltradnsProbeHTTPUpdate, - Delete: resourceUltradnsProbeHTTPDelete, - - Schema: map[string]*schema.Schema{ - // Key - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "pool_record": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - // Required - "agents": &schema.Schema{ - Type: schema.TypeSet, - Set: schema.HashString, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "threshold": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - // Optional - "interval": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "FIVE_MINUTES", - }, - "http_probe": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: schemaHTTPProbe(), - }, - // Computed - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func schemaHTTPProbe() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "transaction": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "method": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "url": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "transmitted_data": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "follow_redirects": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "limit": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Set: hashLimits, - Elem: resourceProbeLimits(), - }, - }, - }, - }, - "total_limits": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "warning": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - "critical": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - "fail": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - }, - }, - }, - }, - } -} - -func resourceUltradnsProbeHTTPCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*udnssdk.Client) - - r, err := makeHTTPProbeResource(d) - if err != nil { - return fmt.Errorf("Could not load ultradns_probe_http configuration: %v", err) - } - - log.Printf("[INFO] ultradns_probe_http create: %#v, detail: %#v", r, r.Details.Detail) - resp, err := client.Probes.Create(r.Key().RRSetKey(), r.ProbeInfoDTO()) - if err != nil { - return fmt.Errorf("create failed: %v", err) - } - - uri := resp.Header.Get("Location") - d.Set("uri", uri) - d.SetId(uri) - log.Printf("[INFO] ultradns_probe_http.id: %v", d.Id()) - - return resourceUltradnsProbeHTTPRead(d, meta) -} - -func resourceUltradnsProbeHTTPRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*udnssdk.Client) - - r, err := makeHTTPProbeResource(d) - if err != nil { - return fmt.Errorf("Could not load ultradns_probe_http configuration: %v", err) - } - - log.Printf("[DEBUG] ultradns_probe_http read: %#v", r) - probe, _, err := client.Probes.Find(r.Key()) - log.Printf("[DEBUG] ultradns_probe_http response: %#v", probe) - - if err != nil { - uderr, ok := err.(*udnssdk.ErrorResponseList) - if ok { - for _, r := range uderr.Responses { - // 70002 means Probes Not Found - if r.ErrorCode == 70002 { - d.SetId("") - return nil - } - return fmt.Errorf("not found: %s", err) - } - } - return fmt.Errorf("not found: %s", err) - } - - return populateResourceDataFromHTTPProbe(probe, d) -} - -func resourceUltradnsProbeHTTPUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*udnssdk.Client) - - r, err := makeHTTPProbeResource(d) - if err != nil { - return fmt.Errorf("Could not load ultradns_probe_http configuration: %v", err) - } - - log.Printf("[INFO] ultradns_probe_http update: %+v", r) - _, err = client.Probes.Update(r.Key(), r.ProbeInfoDTO()) - if err != nil { - return fmt.Errorf("update failed: %s", err) - } - - return resourceUltradnsProbeHTTPRead(d, meta) -} - -func resourceUltradnsProbeHTTPDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*udnssdk.Client) - - r, err := makeHTTPProbeResource(d) - if err != nil { - return fmt.Errorf("Could not load ultradns_probe_http configuration: %s", err) - } - - log.Printf("[INFO] ultradns_probe_http delete: %+v", r) - _, err = client.Probes.Delete(r.Key()) - if err != nil { - return fmt.Errorf("delete failed: %s", err) - } - - return nil -} - -// Resource Helpers - -func makeHTTPProbeResource(d *schema.ResourceData) (probeResource, error) { - p := probeResource{} - p.Zone = d.Get("zone").(string) - p.Name = d.Get("name").(string) - p.ID = d.Id() - p.Interval = d.Get("interval").(string) - p.PoolRecord = d.Get("pool_record").(string) - p.Threshold = d.Get("threshold").(int) - for _, a := range d.Get("agents").(*schema.Set).List() { - p.Agents = append(p.Agents, a.(string)) - } - - p.Type = udnssdk.HTTPProbeType - hps := d.Get("http_probe").([]interface{}) - if len(hps) >= 1 { - if len(hps) > 1 { - return p, fmt.Errorf("http_probe: only 0 or 1 blocks alowed, got: %#v", len(hps)) - } - p.Details = makeHTTPProbeDetails(hps[0]) - } - - return p, nil -} - -func makeHTTPProbeDetails(configured interface{}) *udnssdk.ProbeDetailsDTO { - data := configured.(map[string]interface{}) - // Convert limits from flattened set format to mapping. - d := udnssdk.HTTPProbeDetailsDTO{} - - ts := []udnssdk.Transaction{} - for _, rt := range data["transaction"].([]interface{}) { - mt := rt.(map[string]interface{}) - ls := make(map[string]udnssdk.ProbeDetailsLimitDTO) - for _, limit := range mt["limit"].(*schema.Set).List() { - l := limit.(map[string]interface{}) - name := l["name"].(string) - ls[name] = *makeProbeDetailsLimit(l) - } - t := udnssdk.Transaction{ - Method: mt["method"].(string), - URL: mt["url"].(string), - TransmittedData: mt["transmitted_data"].(string), - FollowRedirects: mt["follow_redirects"].(bool), - Limits: ls, - } - ts = append(ts, t) - } - d.Transactions = ts - rawLims := data["total_limits"].([]interface{}) - if len(rawLims) >= 1 { - // TODO: validate 0 or 1 total_limits - // if len(rawLims) > 1 { - // return nil, fmt.Errorf("total_limits: only 0 or 1 blocks alowed, got: %#v", len(rawLims)) - // } - d.TotalLimits = makeProbeDetailsLimit(rawLims[0]) - } - res := udnssdk.ProbeDetailsDTO{ - Detail: d, - } - return &res -} - -func populateResourceDataFromHTTPProbe(p udnssdk.ProbeInfoDTO, d *schema.ResourceData) error { - d.SetId(p.ID) - d.Set("pool_record", p.PoolRecord) - d.Set("interval", p.Interval) - d.Set("agents", makeSetFromStrings(p.Agents)) - d.Set("threshold", p.Threshold) - - hp := map[string]interface{}{} - hd, err := p.Details.HTTPProbeDetails() - if err != nil { - return fmt.Errorf("ProbeInfo.details could not be unmarshalled: %v, Details: %#v", err, p.Details) - } - ts := make([]map[string]interface{}, 0, len(hd.Transactions)) - for _, rt := range hd.Transactions { - t := map[string]interface{}{ - "method": rt.Method, - "url": rt.URL, - "transmitted_data": rt.TransmittedData, - "follow_redirects": rt.FollowRedirects, - "limit": makeSetFromLimits(rt.Limits), - } - ts = append(ts, t) - } - hp["transaction"] = ts - - tls := []map[string]interface{}{} - rawtl := hd.TotalLimits - if rawtl != nil { - tl := map[string]interface{}{ - "warning": rawtl.Warning, - "critical": rawtl.Critical, - "fail": rawtl.Fail, - } - tls = append(tls, tl) - } - hp["total_limits"] = tls - - err = d.Set("http_probe", []map[string]interface{}{hp}) - if err != nil { - return fmt.Errorf("http_probe set failed: %v, from %#v", err, hp) - } - return nil -} diff --git a/builtin/providers/ultradns/resource_ultradns_probe_http_test.go b/builtin/providers/ultradns/resource_ultradns_probe_http_test.go deleted file mode 100644 index deb7c3d4b..000000000 --- a/builtin/providers/ultradns/resource_ultradns_probe_http_test.go +++ /dev/null @@ -1,260 +0,0 @@ -package ultradns - -import ( - "fmt" - "testing" - - "github.com/Ensighten/udnssdk" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccUltradnsProbeHTTP(t *testing.T) { - var record udnssdk.RRSet - domain := "ultradns.phinze.com" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccTcpoolCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testCfgProbeHTTPMinimal, domain, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckUltradnsRecordExists("ultradns_tcpool.test-probe-http-minimal", &record), - // Specified - resource.TestCheckResourceAttr("ultradns_probe_http.it", "zone", domain), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "name", "test-probe-http-minimal"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "pool_record", "10.2.0.1"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "agents.4091180299", "DALLAS"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "agents.2144410488", "AMSTERDAM"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "interval", "ONE_MINUTE"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "threshold", "2"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.method", "GET"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.url", "http://localhost/index"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.#", "2"), - - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.1959786783.name", "connect"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.1959786783.warning", "20"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.1959786783.critical", "20"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.1959786783.fail", "20"), - - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.1349952704.name", "run"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.1349952704.warning", "60"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.1349952704.critical", "60"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.1349952704.fail", "60"), - ), - }, - resource.TestStep{ - Config: fmt.Sprintf(testCfgProbeHTTPMaximal, domain, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckUltradnsRecordExists("ultradns_tcpool.test-probe-http-maximal", &record), - // Specified - resource.TestCheckResourceAttr("ultradns_probe_http.it", "zone", domain), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "name", "test-probe-http-maximal"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "pool_record", "10.2.1.1"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "agents.4091180299", "DALLAS"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "agents.2144410488", "AMSTERDAM"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "interval", "ONE_MINUTE"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "threshold", "2"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.method", "POST"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.url", "http://localhost/index"), - - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.#", "4"), - - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.1349952704.name", "run"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.1349952704.warning", "1"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.1349952704.critical", "2"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.1349952704.fail", "3"), - - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.2720402232.name", "avgConnect"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.2720402232.warning", "4"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.2720402232.critical", "5"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.2720402232.fail", "6"), - - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.896769211.name", "avgRun"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.896769211.warning", "7"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.896769211.critical", "8"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.896769211.fail", "9"), - - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.1959786783.name", "connect"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.1959786783.warning", "10"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.1959786783.critical", "11"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.transaction.0.limit.1959786783.fail", "12"), - - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.total_limits.0.warning", "13"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.total_limits.0.critical", "14"), - resource.TestCheckResourceAttr("ultradns_probe_http.it", "http_probe.0.total_limits.0.fail", "15"), - ), - }, - }, - }) -} - -const testCfgProbeHTTPMinimal = ` -resource "ultradns_tcpool" "test-probe-http-minimal" { - zone = "%s" - name = "test-probe-http-minimal" - - ttl = 30 - description = "traffic controller pool with probes" - - run_probes = true - act_on_probes = true - max_to_lb = 2 - - rdata { - host = "10.2.0.1" - - state = "NORMAL" - run_probes = true - priority = 1 - failover_delay = 0 - threshold = 1 - weight = 2 - } - - rdata { - host = "10.2.0.2" - - state = "NORMAL" - run_probes = true - priority = 2 - failover_delay = 0 - threshold = 1 - weight = 2 - } - - backup_record_rdata = "10.2.0.3" -} - -resource "ultradns_probe_http" "it" { - zone = "%s" - name = "test-probe-http-minimal" - - pool_record = "10.2.0.1" - - agents = ["DALLAS", "AMSTERDAM"] - - interval = "ONE_MINUTE" - threshold = 2 - - http_probe { - transaction { - method = "GET" - url = "http://localhost/index" - - limit { - name = "run" - warning = 60 - critical = 60 - fail = 60 - } - - limit { - name = "connect" - warning = 20 - critical = 20 - fail = 20 - } - } - } - - depends_on = ["ultradns_tcpool.test-probe-http-minimal"] -} -` - -const testCfgProbeHTTPMaximal = ` -resource "ultradns_tcpool" "test-probe-http-maximal" { - zone = "%s" - name = "test-probe-http-maximal" - - ttl = 30 - description = "traffic controller pool with probes" - - run_probes = true - act_on_probes = true - max_to_lb = 2 - - rdata { - host = "10.2.1.1" - - state = "NORMAL" - run_probes = true - priority = 1 - failover_delay = 0 - threshold = 1 - weight = 2 - } - - rdata { - host = "10.2.1.2" - - state = "NORMAL" - run_probes = true - priority = 2 - failover_delay = 0 - threshold = 1 - weight = 2 - } - - backup_record_rdata = "10.2.1.3" -} - -resource "ultradns_probe_http" "it" { - zone = "%s" - name = "test-probe-http-maximal" - - pool_record = "10.2.1.1" - - agents = ["DALLAS", "AMSTERDAM"] - - interval = "ONE_MINUTE" - threshold = 2 - - http_probe { - transaction { - method = "POST" - url = "http://localhost/index" - transmitted_data = "{}" - follow_redirects = true - - limit { - name = "run" - - warning = 1 - critical = 2 - fail = 3 - } - limit { - name = "avgConnect" - - warning = 4 - critical = 5 - fail = 6 - } - limit { - name = "avgRun" - - warning = 7 - critical = 8 - fail = 9 - } - limit { - name = "connect" - - warning = 10 - critical = 11 - fail = 12 - } - } - - total_limits { - warning = 13 - critical = 14 - fail = 15 - } - } - - depends_on = ["ultradns_tcpool.test-probe-http-maximal"] -} -` diff --git a/builtin/providers/ultradns/resource_ultradns_probe_ping.go b/builtin/providers/ultradns/resource_ultradns_probe_ping.go deleted file mode 100644 index 44445d5b2..000000000 --- a/builtin/providers/ultradns/resource_ultradns_probe_ping.go +++ /dev/null @@ -1,218 +0,0 @@ -package ultradns - -import ( - "fmt" - "log" - - "github.com/Ensighten/udnssdk" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceUltradnsProbePing() *schema.Resource { - return &schema.Resource{ - Create: resourceUltradnsProbePingCreate, - Read: resourceUltradnsProbePingRead, - Update: resourceUltradnsProbePingUpdate, - Delete: resourceUltradnsProbePingDelete, - - Schema: map[string]*schema.Schema{ - // Key - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "pool_record": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - // Required - "agents": &schema.Schema{ - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "threshold": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - // Optional - "interval": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "FIVE_MINUTES", - }, - "ping_probe": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: schemaPingProbe(), - }, - // Computed - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceUltradnsProbePingCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*udnssdk.Client) - - r, err := makePingProbeResource(d) - if err != nil { - return fmt.Errorf("Could not load ultradns_probe_ping configuration: %v", err) - } - - log.Printf("[INFO] ultradns_probe_ping create: %#v, detail: %#v", r, r.Details.Detail) - resp, err := client.Probes.Create(r.Key().RRSetKey(), r.ProbeInfoDTO()) - if err != nil { - return fmt.Errorf("create failed: %v", err) - } - - uri := resp.Header.Get("Location") - d.Set("uri", uri) - d.SetId(uri) - log.Printf("[INFO] ultradns_probe_ping.id: %v", d.Id()) - - return resourceUltradnsProbePingRead(d, meta) -} - -func resourceUltradnsProbePingRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*udnssdk.Client) - - r, err := makePingProbeResource(d) - if err != nil { - return fmt.Errorf("Could not load ultradns_probe_ping configuration: %v", err) - } - - log.Printf("[DEBUG] ultradns_probe_ping read: %#v", r) - probe, _, err := client.Probes.Find(r.Key()) - log.Printf("[DEBUG] ultradns_probe_ping response: %#v", probe) - - if err != nil { - uderr, ok := err.(*udnssdk.ErrorResponseList) - if ok { - for _, r := range uderr.Responses { - // 70002 means Probes Not Found - if r.ErrorCode == 70002 { - d.SetId("") - return nil - } - return fmt.Errorf("not found: %s", err) - } - } - return fmt.Errorf("not found: %s", err) - } - - return populateResourceDataFromPingProbe(probe, d) -} - -func resourceUltradnsProbePingUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*udnssdk.Client) - - r, err := makePingProbeResource(d) - if err != nil { - return fmt.Errorf("Could not load ultradns_probe_ping configuration: %v", err) - } - - log.Printf("[INFO] ultradns_probe_ping update: %+v", r) - _, err = client.Probes.Update(r.Key(), r.ProbeInfoDTO()) - if err != nil { - return fmt.Errorf("update failed: %s", err) - } - - return resourceUltradnsProbePingRead(d, meta) -} - -func resourceUltradnsProbePingDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*udnssdk.Client) - - r, err := makePingProbeResource(d) - if err != nil { - return fmt.Errorf("Could not load ultradns_probe_ping configuration: %s", err) - } - - log.Printf("[INFO] ultradns_probe_ping delete: %+v", r) - _, err = client.Probes.Delete(r.Key()) - if err != nil { - return fmt.Errorf("delete failed: %s", err) - } - - return nil -} - -// Resource Helpers - -func makePingProbeResource(d *schema.ResourceData) (probeResource, error) { - p := probeResource{} - p.Zone = d.Get("zone").(string) - p.Name = d.Get("name").(string) - p.ID = d.Id() - p.Interval = d.Get("interval").(string) - p.PoolRecord = d.Get("pool_record").(string) - p.Threshold = d.Get("threshold").(int) - for _, a := range d.Get("agents").([]interface{}) { - p.Agents = append(p.Agents, a.(string)) - } - - p.Type = udnssdk.PingProbeType - pps := d.Get("ping_probe").([]interface{}) - if len(pps) >= 1 { - if len(pps) > 1 { - return p, fmt.Errorf("ping_probe: only 0 or 1 blocks alowed, got: %#v", len(pps)) - } - p.Details = makePingProbeDetails(pps[0]) - } - - return p, nil -} - -func makePingProbeDetails(configured interface{}) *udnssdk.ProbeDetailsDTO { - data := configured.(map[string]interface{}) - // Convert limits from flattened set format to mapping. - ls := make(map[string]udnssdk.ProbeDetailsLimitDTO) - for _, limit := range data["limit"].(*schema.Set).List() { - l := limit.(map[string]interface{}) - name := l["name"].(string) - ls[name] = *makeProbeDetailsLimit(l) - } - res := udnssdk.ProbeDetailsDTO{ - Detail: udnssdk.PingProbeDetailsDTO{ - Limits: ls, - PacketSize: data["packet_size"].(int), - Packets: data["packets"].(int), - }, - } - return &res -} - -func populateResourceDataFromPingProbe(p udnssdk.ProbeInfoDTO, d *schema.ResourceData) error { - d.SetId(p.ID) - d.Set("pool_record", p.PoolRecord) - d.Set("interval", p.Interval) - d.Set("agents", p.Agents) - d.Set("threshold", p.Threshold) - - pd, err := p.Details.PingProbeDetails() - if err != nil { - return fmt.Errorf("ProbeInfo.details could not be unmarshalled: %v, Details: %#v", err, p.Details) - } - pp := map[string]interface{}{ - "packets": pd.Packets, - "packet_size": pd.PacketSize, - "limit": makeSetFromLimits(pd.Limits), - } - - err = d.Set("ping_probe", []map[string]interface{}{pp}) - if err != nil { - return fmt.Errorf("ping_probe set failed: %v, from %#v", err, pp) - } - return nil -} diff --git a/builtin/providers/ultradns/resource_ultradns_probe_ping_test.go b/builtin/providers/ultradns/resource_ultradns_probe_ping_test.go deleted file mode 100644 index 3c7b39eef..000000000 --- a/builtin/providers/ultradns/resource_ultradns_probe_ping_test.go +++ /dev/null @@ -1,219 +0,0 @@ -package ultradns - -import ( - "fmt" - "testing" - - "github.com/Ensighten/udnssdk" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccUltradnsProbePing(t *testing.T) { - var record udnssdk.RRSet - domain := "ultradns.phinze.com" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccTcpoolCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testCfgProbePingRecord, domain, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckUltradnsRecordExists("ultradns_tcpool.test-probe-ping-record", &record), - // Specified - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "zone", domain), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "name", "test-probe-ping-record"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "pool_record", "10.3.0.1"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "agents.0", "DALLAS"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "agents.1", "AMSTERDAM"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "interval", "ONE_MINUTE"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "threshold", "2"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "ping_probe.0.packets", "15"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "ping_probe.0.packet_size", "56"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "ping_probe.0.limit.#", "2"), - - // hashLimits(): lossPercent -> 3375621462 - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "ping_probe.0.limit.3375621462.name", "lossPercent"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "ping_probe.0.limit.3375621462.warning", "1"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "ping_probe.0.limit.3375621462.critical", "2"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "ping_probe.0.limit.3375621462.fail", "3"), - - // hashLimits(): total -> 3257917790 - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "ping_probe.0.limit.3257917790.name", "total"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "ping_probe.0.limit.3257917790.warning", "2"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "ping_probe.0.limit.3257917790.critical", "3"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "ping_probe.0.limit.3257917790.fail", "4"), - ), - }, - resource.TestStep{ - Config: fmt.Sprintf(testCfgProbePingPool, domain, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckUltradnsRecordExists("ultradns_tcpool.test-probe-ping-pool", &record), - // Specified - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "zone", domain), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "name", "test-probe-ping-pool"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "agents.0", "DALLAS"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "agents.1", "AMSTERDAM"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "interval", "ONE_MINUTE"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "threshold", "2"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "ping_probe.0.packets", "15"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "ping_probe.0.packet_size", "56"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "ping_probe.0.limit.#", "2"), - - // hashLimits(): lossPercent -> 3375621462 - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "ping_probe.0.limit.3375621462.name", "lossPercent"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "ping_probe.0.limit.3375621462.warning", "1"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "ping_probe.0.limit.3375621462.critical", "2"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "ping_probe.0.limit.3375621462.fail", "3"), - - // hashLimits(): total -> 3257917790 - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "ping_probe.0.limit.3257917790.name", "total"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "ping_probe.0.limit.3257917790.warning", "2"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "ping_probe.0.limit.3257917790.critical", "3"), - resource.TestCheckResourceAttr("ultradns_probe_ping.it", "ping_probe.0.limit.3257917790.fail", "4"), - ), - }, - }, - }) -} - -const testCfgProbePingRecord = ` -resource "ultradns_tcpool" "test-probe-ping-record" { - zone = "%s" - name = "test-probe-ping-record" - - ttl = 30 - description = "traffic controller pool with probes" - - run_probes = true - act_on_probes = true - max_to_lb = 2 - - rdata { - host = "10.3.0.1" - - state = "NORMAL" - run_probes = true - priority = 1 - failover_delay = 0 - threshold = 1 - weight = 2 - } - - rdata { - host = "10.3.0.2" - - state = "NORMAL" - run_probes = true - priority = 2 - failover_delay = 0 - threshold = 1 - weight = 2 - } - - backup_record_rdata = "10.3.0.3" -} - -resource "ultradns_probe_ping" "it" { - zone = "%s" - name = "test-probe-ping-record" - - pool_record = "10.3.0.1" - - agents = ["DALLAS", "AMSTERDAM"] - - interval = "ONE_MINUTE" - threshold = 2 - - ping_probe { - packets = 15 - packet_size = 56 - - limit { - name = "lossPercent" - warning = 1 - critical = 2 - fail = 3 - } - - limit { - name = "total" - warning = 2 - critical = 3 - fail = 4 - } - } - - depends_on = ["ultradns_tcpool.test-probe-ping-record"] -} -` - -const testCfgProbePingPool = ` -resource "ultradns_tcpool" "test-probe-ping-pool" { - zone = "%s" - name = "test-probe-ping-pool" - - ttl = 30 - description = "traffic controller pool with probes" - - run_probes = true - act_on_probes = true - max_to_lb = 2 - - rdata { - host = "10.3.0.1" - - state = "NORMAL" - run_probes = true - priority = 1 - failover_delay = 0 - threshold = 1 - weight = 2 - } - - rdata { - host = "10.3.0.2" - - state = "NORMAL" - run_probes = true - priority = 2 - failover_delay = 0 - threshold = 1 - weight = 2 - } - - backup_record_rdata = "10.3.0.3" -} - -resource "ultradns_probe_ping" "it" { - zone = "%s" - name = "test-probe-ping-pool" - - agents = ["DALLAS", "AMSTERDAM"] - - interval = "ONE_MINUTE" - threshold = 2 - - ping_probe { - packets = 15 - packet_size = 56 - - limit { - name = "lossPercent" - warning = 1 - critical = 2 - fail = 3 - } - - limit { - name = "total" - warning = 2 - critical = 3 - fail = 4 - } - } - - depends_on = ["ultradns_tcpool.test-probe-ping-pool"] -} -` diff --git a/builtin/providers/ultradns/resource_ultradns_rdpool.go b/builtin/providers/ultradns/resource_ultradns_rdpool.go deleted file mode 100644 index 5f8bbff6c..000000000 --- a/builtin/providers/ultradns/resource_ultradns_rdpool.go +++ /dev/null @@ -1,218 +0,0 @@ -package ultradns - -import ( - "fmt" - "log" - "strings" - - "github.com/Ensighten/udnssdk" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" -) - -func resourceUltradnsRdpool() *schema.Resource { - return &schema.Resource{ - Create: resourceUltradnsRdpoolCreate, - Read: resourceUltradnsRdpoolRead, - Update: resourceUltradnsRdpoolUpdate, - Delete: resourceUltradnsRdpoolDelete, - - Schema: map[string]*schema.Schema{ - // Required - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "rdata": &schema.Schema{ - Type: schema.TypeSet, - Set: schema.HashString, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - // Optional - "order": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "ROUND_ROBIN", - ValidateFunc: validation.StringInSlice([]string{ - "ROUND_ROBIN", - "FIXED", - "RANDOM", - }, false), - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(0, 255), - }, - "ttl": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 3600, - }, - // Computed - "hostname": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -// CRUD Operations - -func resourceUltradnsRdpoolCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] ultradns_rdpool create") - client := meta.(*udnssdk.Client) - - r, err := newRRSetResourceFromRdpool(d) - if err != nil { - return err - } - - log.Printf("[INFO] ultradns_rdpool create: %#v", r) - _, err = client.RRSets.Create(r.RRSetKey(), r.RRSet()) - if err != nil { - return fmt.Errorf("create failed: %#v -> %v", r, err) - } - - d.SetId(r.ID()) - log.Printf("[INFO] ultradns_rdpool.id: %v", d.Id()) - - return resourceUltradnsRdpoolRead(d, meta) -} - -func resourceUltradnsRdpoolRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] ultradns_rdpool read") - client := meta.(*udnssdk.Client) - - rr, err := newRRSetResourceFromRdpool(d) - if err != nil { - return err - } - - rrsets, err := client.RRSets.Select(rr.RRSetKey()) - if err != nil { - uderr, ok := err.(*udnssdk.ErrorResponseList) - if ok { - for _, resps := range uderr.Responses { - // 70002 means Records Not Found - if resps.ErrorCode == 70002 { - d.SetId("") - return nil - } - return fmt.Errorf("resource not found: %v", err) - } - } - return fmt.Errorf("resource not found: %v", err) - } - - r := rrsets[0] - - zone := d.Get("zone") - - // hostname - if r.OwnerName == "" { - d.Set("hostname", zone) - } else { - if strings.HasSuffix(r.OwnerName, ".") { - d.Set("hostname", r.OwnerName) - } else { - d.Set("hostname", fmt.Sprintf("%s.%s", r.OwnerName, zone)) - } - } - - // And now... the Profile! - if r.Profile == nil { - return fmt.Errorf("RRSet.profile missing: invalid RDPool schema in: %#v", r) - } - p, err := r.Profile.RDPoolProfile() - if err != nil { - return fmt.Errorf("RRSet.profile could not be unmarshalled: %v\n", err) - } - - // Set simple values - d.Set("ttl", r.TTL) - d.Set("description", p.Description) - d.Set("order", p.Order) - - err = d.Set("rdata", makeSetFromStrings(r.RData)) - if err != nil { - return fmt.Errorf("rdata set failed: %#v", err) - } - return nil -} - -func resourceUltradnsRdpoolUpdate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] ultradns_rdpool update") - client := meta.(*udnssdk.Client) - - r, err := newRRSetResourceFromRdpool(d) - if err != nil { - return err - } - - log.Printf("[INFO] ultradns_rdpool update: %+v", r) - _, err = client.RRSets.Update(r.RRSetKey(), r.RRSet()) - if err != nil { - return fmt.Errorf("resource update failed: %v", err) - } - - return resourceUltradnsRdpoolRead(d, meta) -} - -func resourceUltradnsRdpoolDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] ultradns_rdpool delete") - client := meta.(*udnssdk.Client) - - r, err := newRRSetResourceFromRdpool(d) - if err != nil { - return err - } - - log.Printf("[INFO] ultradns_rdpool delete: %+v", r) - _, err = client.RRSets.Delete(r.RRSetKey()) - if err != nil { - return fmt.Errorf("resource delete failed: %v", err) - } - - return nil -} - -// Resource Helpers - -func newRRSetResourceFromRdpool(d *schema.ResourceData) (rRSetResource, error) { - //rDataRaw := d.Get("rdata").(*schema.Set).List() - r := rRSetResource{ - // "The only valid rrtype value for RDpools is A" - // per https://portal.ultradns.com/static/docs/REST-API_User_Guide.pdf - RRType: "A", - Zone: d.Get("zone").(string), - OwnerName: d.Get("name").(string), - TTL: d.Get("ttl").(int), - } - if attr, ok := d.GetOk("rdata"); ok { - rdata := attr.(*schema.Set).List() - r.RData = make([]string, len(rdata)) - for i, j := range rdata { - r.RData[i] = j.(string) - } - } - - profile := udnssdk.RDPoolProfile{ - Context: udnssdk.RDPoolSchema, - Order: d.Get("order").(string), - Description: d.Get("description").(string), - } - - rp := profile.RawProfile() - r.Profile = rp - - return r, nil -} diff --git a/builtin/providers/ultradns/resource_ultradns_rdpool_test.go b/builtin/providers/ultradns/resource_ultradns_rdpool_test.go deleted file mode 100644 index 1ddd9c025..000000000 --- a/builtin/providers/ultradns/resource_ultradns_rdpool_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package ultradns - -import ( - "fmt" - "testing" - - "github.com/Ensighten/udnssdk" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccUltradnsRdpool(t *testing.T) { - var record udnssdk.RRSet - domain := "ultradns.phinze.com" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccRdpoolCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testCfgRdpoolMinimal, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckUltradnsRecordExists("ultradns_rdpool.it", &record), - // Specified - resource.TestCheckResourceAttr("ultradns_rdpool.it", "zone", domain), - resource.TestCheckResourceAttr("ultradns_rdpool.it", "name", "test-rdpool-minimal"), - resource.TestCheckResourceAttr("ultradns_rdpool.it", "ttl", "300"), - - // hashRdatas(): 10.6.0.1 -> 2847814707 - resource.TestCheckResourceAttr("ultradns_rdpool.it", "rdata.2847814707.host", "10.6.0.1"), - // Defaults - resource.TestCheckResourceAttr("ultradns_rdpool.it", "description", "Minimal RD Pool"), - resource.TestCheckResourceAttr("ultradns_rdpool.it", "rdata.2847814707.priority", "1"), - // Generated - resource.TestCheckResourceAttr("ultradns_rdpool.it", "id", "test-rdpool-minimal.ultradns.phinze.com"), - resource.TestCheckResourceAttr("ultradns_rdpool.it", "hostname", "test-rdpool-minimal.ultradns.phinze.com."), - ), - }, - resource.TestStep{ - Config: fmt.Sprintf(testCfgRdpoolMaximal, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckUltradnsRecordExists("ultradns_rdpool.it", &record), - // Specified - resource.TestCheckResourceAttr("ultradns_rdpool.it", "zone", domain), - resource.TestCheckResourceAttr("ultradns_rdpool.it", "name", "test-rdpool-maximal"), - resource.TestCheckResourceAttr("ultradns_rdpool.it", "ttl", "300"), - resource.TestCheckResourceAttr("ultradns_rdpool.it", "description", "traffic controller pool with all settings tuned"), - - resource.TestCheckResourceAttr("ultradns_rdpool.it", "act_on_probes", "false"), - resource.TestCheckResourceAttr("ultradns_rdpool.it", "max_to_lb", "2"), - resource.TestCheckResourceAttr("ultradns_rdpool.it", "run_probes", "false"), - - // hashRdatas(): 10.6.1.1 -> 2826722820 - resource.TestCheckResourceAttr("ultradns_rdpool.it", "rdata.2826722820.host", "10.6.1.1"), - resource.TestCheckResourceAttr("ultradns_rdpool.it", "rdata.2826722820.priority", "1"), - - // hashRdatas(): 10.6.1.2 -> 829755326 - resource.TestCheckResourceAttr("ultradns_rdpool.it", "rdata.829755326.host", "10.6.1.2"), - resource.TestCheckResourceAttr("ultradns_rdpool.it", "rdata.829755326.priority", "2"), - - // Generated - resource.TestCheckResourceAttr("ultradns_rdpool.it", "id", "test-rdpool-maximal.ultradns.phinze.com"), - resource.TestCheckResourceAttr("ultradns_rdpool.it", "hostname", "test-rdpool-maximal.ultradns.phinze.com."), - ), - }, - }, - }) -} - -const testCfgRdpoolMinimal = ` -resource "ultradns_rdpool" "it" { - zone = "%s" - name = "test-rdpool-minimal" - ttl = 300 - description = "Minimal RD Pool" - - rdata { - host = "10.6.0.1" - } -} -` - -const testCfgRdpoolMaximal = ` -resource "ultradns_rdpool" "it" { - zone = "%s" - name = "test-rdpool-maximal" - order = "ROUND_ROBIN" - ttl = 300 - description = "traffic controller pool with all settings tuned" - rdata { - host = "10.6.1.1" - priority = 1 - } - - rdata { - host = "10.6.1.2" - priority = 2 - } -} -` diff --git a/builtin/providers/ultradns/resource_ultradns_record.go b/builtin/providers/ultradns/resource_ultradns_record.go deleted file mode 100644 index 4d4181c3a..000000000 --- a/builtin/providers/ultradns/resource_ultradns_record.go +++ /dev/null @@ -1,214 +0,0 @@ -package ultradns - -import ( - "encoding/json" - "fmt" - "log" - "strconv" - "strings" - - "github.com/Ensighten/udnssdk" - "github.com/hashicorp/terraform/helper/schema" -) - -func newRRSetResource(d *schema.ResourceData) (rRSetResource, error) { - r := rRSetResource{} - - // TODO: return error if required attributes aren't ok - - if attr, ok := d.GetOk("name"); ok { - r.OwnerName = attr.(string) - } - - if attr, ok := d.GetOk("type"); ok { - r.RRType = attr.(string) - } - - if attr, ok := d.GetOk("zone"); ok { - r.Zone = attr.(string) - } - - if attr, ok := d.GetOk("rdata"); ok { - rdata := attr.(*schema.Set).List() - r.RData = make([]string, len(rdata)) - for i, j := range rdata { - r.RData[i] = j.(string) - } - } - - if attr, ok := d.GetOk("ttl"); ok { - r.TTL, _ = strconv.Atoi(attr.(string)) - } - - return r, nil -} - -func populateResourceDataFromRRSet(r udnssdk.RRSet, d *schema.ResourceData) error { - zone := d.Get("zone") - typ := d.Get("type") - // ttl - d.Set("ttl", r.TTL) - // rdata - rdata := r.RData - - // UltraDNS API returns answers double-encoded like JSON, so we must decode. This is their bug. - if typ == "TXT" { - rdata = make([]string, len(r.RData)) - for i := range r.RData { - var s string - err := json.Unmarshal([]byte(r.RData[i]), &s) - if err != nil { - log.Printf("[INFO] TXT answer parse error: %+v", err) - s = r.RData[i] - } - rdata[i] = s - - } - } - - err := d.Set("rdata", makeSetFromStrings(rdata)) - if err != nil { - return fmt.Errorf("ultradns_record.rdata set failed: %#v", err) - } - // hostname - if r.OwnerName == "" { - d.Set("hostname", zone) - } else { - if strings.HasSuffix(r.OwnerName, ".") { - d.Set("hostname", r.OwnerName) - } else { - d.Set("hostname", fmt.Sprintf("%s.%s", r.OwnerName, zone)) - } - } - return nil -} - -func resourceUltradnsRecord() *schema.Resource { - return &schema.Resource{ - Create: resourceUltraDNSRecordCreate, - Read: resourceUltraDNSRecordRead, - Update: resourceUltraDNSRecordUpdate, - Delete: resourceUltraDNSRecordDelete, - - Schema: map[string]*schema.Schema{ - // Required - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "rdata": &schema.Schema{ - Type: schema.TypeSet, - Set: schema.HashString, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - // Optional - "ttl": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "3600", - }, - // Computed - "hostname": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -// CRUD Operations - -func resourceUltraDNSRecordCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*udnssdk.Client) - - r, err := newRRSetResource(d) - if err != nil { - return err - } - - log.Printf("[INFO] ultradns_record create: %+v", r) - _, err = client.RRSets.Create(r.RRSetKey(), r.RRSet()) - if err != nil { - return fmt.Errorf("create failed: %v", err) - } - - d.SetId(r.ID()) - log.Printf("[INFO] ultradns_record.id: %v", d.Id()) - - return resourceUltraDNSRecordRead(d, meta) -} - -func resourceUltraDNSRecordRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*udnssdk.Client) - - r, err := newRRSetResource(d) - if err != nil { - return err - } - - rrsets, err := client.RRSets.Select(r.RRSetKey()) - if err != nil { - uderr, ok := err.(*udnssdk.ErrorResponseList) - if ok { - for _, r := range uderr.Responses { - // 70002 means Records Not Found - if r.ErrorCode == 70002 { - d.SetId("") - return nil - } - return fmt.Errorf("not found: %v", err) - } - } - return fmt.Errorf("not found: %v", err) - } - rec := rrsets[0] - return populateResourceDataFromRRSet(rec, d) -} - -func resourceUltraDNSRecordUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*udnssdk.Client) - - r, err := newRRSetResource(d) - if err != nil { - return err - } - - log.Printf("[INFO] ultradns_record update: %+v", r) - _, err = client.RRSets.Update(r.RRSetKey(), r.RRSet()) - if err != nil { - return fmt.Errorf("update failed: %v", err) - } - - return resourceUltraDNSRecordRead(d, meta) -} - -func resourceUltraDNSRecordDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*udnssdk.Client) - - r, err := newRRSetResource(d) - if err != nil { - return err - } - - log.Printf("[INFO] ultradns_record delete: %+v", r) - _, err = client.RRSets.Delete(r.RRSetKey()) - if err != nil { - return fmt.Errorf("delete failed: %v", err) - } - - return nil -} - -// Conversion helper functions diff --git a/builtin/providers/ultradns/resource_ultradns_record_test.go b/builtin/providers/ultradns/resource_ultradns_record_test.go deleted file mode 100644 index 56073b298..000000000 --- a/builtin/providers/ultradns/resource_ultradns_record_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package ultradns - -import ( - "fmt" - "testing" - - "github.com/Ensighten/udnssdk" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestAccUltradnsRecord(t *testing.T) { - var record udnssdk.RRSet - // domain := os.Getenv("ULTRADNS_DOMAIN") - domain := "ultradns.phinze.com" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccRecordCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testCfgRecordMinimal, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckUltradnsRecordExists("ultradns_record.it", &record), - resource.TestCheckResourceAttr("ultradns_record.it", "zone", domain), - resource.TestCheckResourceAttr("ultradns_record.it", "name", "test-record"), - resource.TestCheckResourceAttr("ultradns_record.it", "rdata.3994963683", "10.5.0.1"), - ), - }, - resource.TestStep{ - Config: fmt.Sprintf(testCfgRecordMinimal, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckUltradnsRecordExists("ultradns_record.it", &record), - resource.TestCheckResourceAttr("ultradns_record.it", "zone", domain), - resource.TestCheckResourceAttr("ultradns_record.it", "name", "test-record"), - resource.TestCheckResourceAttr("ultradns_record.it", "rdata.3994963683", "10.5.0.1"), - ), - }, - resource.TestStep{ - Config: fmt.Sprintf(testCfgRecordUpdated, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckUltradnsRecordExists("ultradns_record.it", &record), - resource.TestCheckResourceAttr("ultradns_record.it", "zone", domain), - resource.TestCheckResourceAttr("ultradns_record.it", "name", "test-record"), - resource.TestCheckResourceAttr("ultradns_record.it", "rdata.1998004057", "10.5.0.2"), - ), - }, - }, - }) -} - -func TestAccUltradnsRecordTXT(t *testing.T) { - var record udnssdk.RRSet - // domain := os.Getenv("ULTRADNS_DOMAIN") - domain := "ultradns.phinze.com" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccRecordCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testCfgRecordTXTMinimal, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckUltradnsRecordExists("ultradns_record.it", &record), - resource.TestCheckResourceAttr("ultradns_record.it", "zone", domain), - resource.TestCheckResourceAttr("ultradns_record.it", "name", "test-record-txt"), - resource.TestCheckResourceAttr("ultradns_record.it", "rdata.1447448707", "simple answer"), - resource.TestCheckResourceAttr("ultradns_record.it", "rdata.3337444205", "backslash answer \\"), - resource.TestCheckResourceAttr("ultradns_record.it", "rdata.3135730072", "quote answer \""), - resource.TestCheckResourceAttr("ultradns_record.it", "rdata.126343430", "complex answer \\ \""), - ), - }, - }, - }) -} - -func testAccRecordCheckDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*udnssdk.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "ultradns_record" { - continue - } - - k := udnssdk.RRSetKey{ - Zone: rs.Primary.Attributes["zone"], - Name: rs.Primary.Attributes["name"], - Type: rs.Primary.Attributes["type"], - } - - _, err := client.RRSets.Select(k) - - if err == nil { - return fmt.Errorf("Record still exists") - } - } - - return nil -} - -const testCfgRecordMinimal = ` -resource "ultradns_record" "it" { - zone = "%s" - name = "test-record" - - rdata = ["10.5.0.1"] - type = "A" - ttl = 3600 -} -` - -const testCfgRecordUpdated = ` -resource "ultradns_record" "it" { - zone = "%s" - name = "test-record" - - rdata = ["10.5.0.2"] - type = "A" - ttl = 3600 -} -` - -const testCfgRecordTXTMinimal = ` -resource "ultradns_record" "it" { - zone = "%s" - name = "test-record-txt" - - rdata = [ - "simple answer", - "backslash answer \\", - "quote answer \"", - "complex answer \\ \"", - ] - type = "TXT" - ttl = 3600 -} -` diff --git a/builtin/providers/ultradns/resource_ultradns_tcpool.go b/builtin/providers/ultradns/resource_ultradns_tcpool.go deleted file mode 100644 index a497a65b6..000000000 --- a/builtin/providers/ultradns/resource_ultradns_tcpool.go +++ /dev/null @@ -1,331 +0,0 @@ -package ultradns - -import ( - "fmt" - "log" - "strings" - - "github.com/Ensighten/udnssdk" - "github.com/hashicorp/terraform/helper/schema" -) - -func resourceUltradnsTcpool() *schema.Resource { - return &schema.Resource{ - Create: resourceUltradnsTcpoolCreate, - Read: resourceUltradnsTcpoolRead, - Update: resourceUltradnsTcpoolUpdate, - Delete: resourceUltradnsTcpoolDelete, - - Schema: map[string]*schema.Schema{ - // Required - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Required: true, - // 0-255 char - }, - "rdata": &schema.Schema{ - Type: schema.TypeSet, - Set: hashRdatas, - Required: true, - // Valid: len(rdataInfo) == len(rdata) - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Required - "host": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - // Optional - "failover_delay": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 0, - // Valid: 0-30 - // Units: Minutes - }, - "priority": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 1, - }, - "run_probes": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "state": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "NORMAL", - }, - "threshold": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 1, - }, - "weight": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 2, - // Valid: i%2 == 0 && 2 <= i <= 100 - }, - }, - }, - }, - // Optional - "ttl": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 3600, - }, - "run_probes": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "act_on_probes": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "max_to_lb": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - // Valid: 0 <= i <= len(rdata) - }, - "backup_record_rdata": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - // Valid: IPv4 address or CNAME - }, - "backup_record_failover_delay": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - // Valid: 0-30 - // Units: Minutes - }, - // Computed - "hostname": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -// CRUD Operations - -func resourceUltradnsTcpoolCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*udnssdk.Client) - - r, err := newRRSetResourceFromTcpool(d) - if err != nil { - return err - } - - log.Printf("[INFO] ultradns_tcpool create: %#v", r) - _, err = client.RRSets.Create(r.RRSetKey(), r.RRSet()) - if err != nil { - return fmt.Errorf("create failed: %#v -> %v", r, err) - } - - d.SetId(r.ID()) - log.Printf("[INFO] ultradns_tcpool.id: %v", d.Id()) - - return resourceUltradnsTcpoolRead(d, meta) -} - -func resourceUltradnsTcpoolRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*udnssdk.Client) - - rr, err := newRRSetResourceFromTcpool(d) - if err != nil { - return err - } - - rrsets, err := client.RRSets.Select(rr.RRSetKey()) - if err != nil { - uderr, ok := err.(*udnssdk.ErrorResponseList) - if ok { - for _, resps := range uderr.Responses { - // 70002 means Records Not Found - if resps.ErrorCode == 70002 { - d.SetId("") - return nil - } - return fmt.Errorf("resource not found: %v", err) - } - } - return fmt.Errorf("resource not found: %v", err) - } - - r := rrsets[0] - - zone := d.Get("zone") - // ttl - d.Set("ttl", r.TTL) - // hostname - if r.OwnerName == "" { - d.Set("hostname", zone) - } else { - if strings.HasSuffix(r.OwnerName, ".") { - d.Set("hostname", r.OwnerName) - } else { - d.Set("hostname", fmt.Sprintf("%s.%s", r.OwnerName, zone)) - } - } - - // And now... the Profile! - if r.Profile == nil { - return fmt.Errorf("RRSet.profile missing: invalid TCPool schema in: %#v", r) - } - p, err := r.Profile.TCPoolProfile() - if err != nil { - return fmt.Errorf("RRSet.profile could not be unmarshalled: %v\n", err) - } - - // Set simple values - d.Set("description", p.Description) - d.Set("run_probes", p.RunProbes) - d.Set("act_on_probes", p.ActOnProbes) - d.Set("max_to_lb", p.MaxToLB) - if p.BackupRecord != nil { - d.Set("backup_record_rdata", p.BackupRecord.RData) - d.Set("backup_record_failover_delay", p.BackupRecord.FailoverDelay) - } - - // TODO: rigorously test this to see if we can remove the error handling - err = d.Set("rdata", makeSetFromRdata(r.RData, p.RDataInfo)) - if err != nil { - return fmt.Errorf("rdata set failed: %#v", err) - } - return nil -} - -func resourceUltradnsTcpoolUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*udnssdk.Client) - - r, err := newRRSetResourceFromTcpool(d) - if err != nil { - return err - } - - log.Printf("[INFO] ultradns_tcpool update: %+v", r) - _, err = client.RRSets.Update(r.RRSetKey(), r.RRSet()) - if err != nil { - return fmt.Errorf("resource update failed: %v", err) - } - - return resourceUltradnsTcpoolRead(d, meta) -} - -func resourceUltradnsTcpoolDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*udnssdk.Client) - - r, err := newRRSetResourceFromTcpool(d) - if err != nil { - return err - } - - log.Printf("[INFO] ultradns_tcpool delete: %+v", r) - _, err = client.RRSets.Delete(r.RRSetKey()) - if err != nil { - return fmt.Errorf("resource delete failed: %v", err) - } - - return nil -} - -// Resource Helpers - -func newRRSetResourceFromTcpool(d *schema.ResourceData) (rRSetResource, error) { - rDataRaw := d.Get("rdata").(*schema.Set).List() - r := rRSetResource{ - // "The only valid rrtype value for SiteBacker or Traffic Controller pools is A" - // per https://portal.ultradns.com/static/docs/REST-API_User_Guide.pdf - RRType: "A", - Zone: d.Get("zone").(string), - OwnerName: d.Get("name").(string), - TTL: d.Get("ttl").(int), - RData: unzipRdataHosts(rDataRaw), - } - - profile := udnssdk.TCPoolProfile{ - Context: udnssdk.TCPoolSchema, - ActOnProbes: d.Get("act_on_probes").(bool), - Description: d.Get("description").(string), - MaxToLB: d.Get("max_to_lb").(int), - RunProbes: d.Get("run_probes").(bool), - RDataInfo: unzipRdataInfos(rDataRaw), - } - - // Only send BackupRecord if present - br := d.Get("backup_record_rdata").(string) - if br != "" { - profile.BackupRecord = &udnssdk.BackupRecord{ - RData: d.Get("backup_record_rdata").(string), - FailoverDelay: d.Get("backup_record_failover_delay").(int), - } - } - - rp := profile.RawProfile() - r.Profile = rp - - return r, nil -} - -func unzipRdataInfos(configured []interface{}) []udnssdk.SBRDataInfo { - rdataInfos := make([]udnssdk.SBRDataInfo, 0, len(configured)) - for _, rRaw := range configured { - data := rRaw.(map[string]interface{}) - r := udnssdk.SBRDataInfo{ - FailoverDelay: data["failover_delay"].(int), - Priority: data["priority"].(int), - RunProbes: data["run_probes"].(bool), - State: data["state"].(string), - Threshold: data["threshold"].(int), - Weight: data["weight"].(int), - } - rdataInfos = append(rdataInfos, r) - } - return rdataInfos -} - -// collate and zip RData and RDataInfo into []map[string]interface{} -func zipRData(rds []string, rdis []udnssdk.SBRDataInfo) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(rds)) - for i, rdi := range rdis { - r := map[string]interface{}{ - "host": rds[i], - "failover_delay": rdi.FailoverDelay, - "priority": rdi.Priority, - "run_probes": rdi.RunProbes, - "state": rdi.State, - "threshold": rdi.Threshold, - "weight": rdi.Weight, - } - result = append(result, r) - } - return result -} - -// makeSetFromRdatas encodes an array of Rdata into a -// *schema.Set in the appropriate structure for the schema -func makeSetFromRdata(rds []string, rdis []udnssdk.SBRDataInfo) *schema.Set { - s := &schema.Set{F: hashRdatas} - rs := zipRData(rds, rdis) - for _, r := range rs { - s.Add(r) - } - return s -} diff --git a/builtin/providers/ultradns/resource_ultradns_tcpool_test.go b/builtin/providers/ultradns/resource_ultradns_tcpool_test.go deleted file mode 100644 index 72c7e2a42..000000000 --- a/builtin/providers/ultradns/resource_ultradns_tcpool_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package ultradns - -import ( - "fmt" - "testing" - - "github.com/Ensighten/udnssdk" - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccUltradnsTcpool(t *testing.T) { - var record udnssdk.RRSet - domain := "ultradns.phinze.com" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccTcpoolCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf(testCfgTcpoolMinimal, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckUltradnsRecordExists("ultradns_tcpool.it", &record), - // Specified - resource.TestCheckResourceAttr("ultradns_tcpool.it", "zone", domain), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "name", "test-tcpool-minimal"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "ttl", "300"), - - // hashRdatas(): 10.6.0.1 -> 2847814707 - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.2847814707.host", "10.6.0.1"), - // Defaults - resource.TestCheckResourceAttr("ultradns_tcpool.it", "act_on_probes", "true"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "description", "Minimal TC Pool"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "max_to_lb", "0"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "run_probes", "true"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.2847814707.failover_delay", "0"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.2847814707.priority", "1"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.2847814707.run_probes", "true"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.2847814707.state", "NORMAL"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.2847814707.threshold", "1"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.2847814707.weight", "2"), - // Generated - resource.TestCheckResourceAttr("ultradns_tcpool.it", "id", "test-tcpool-minimal.ultradns.phinze.com"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "hostname", "test-tcpool-minimal.ultradns.phinze.com."), - ), - }, - resource.TestStep{ - Config: fmt.Sprintf(testCfgTcpoolMaximal, domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckUltradnsRecordExists("ultradns_tcpool.it", &record), - // Specified - resource.TestCheckResourceAttr("ultradns_tcpool.it", "zone", domain), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "name", "test-tcpool-maximal"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "ttl", "300"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "description", "traffic controller pool with all settings tuned"), - - resource.TestCheckResourceAttr("ultradns_tcpool.it", "act_on_probes", "false"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "max_to_lb", "2"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "run_probes", "false"), - - // hashRdatas(): 10.6.1.1 -> 2826722820 - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.2826722820.host", "10.6.1.1"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.2826722820.failover_delay", "30"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.2826722820.priority", "1"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.2826722820.run_probes", "true"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.2826722820.state", "ACTIVE"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.2826722820.threshold", "1"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.2826722820.weight", "2"), - - // hashRdatas(): 10.6.1.2 -> 829755326 - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.829755326.host", "10.6.1.2"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.829755326.failover_delay", "30"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.829755326.priority", "2"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.829755326.run_probes", "true"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.829755326.state", "INACTIVE"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.829755326.threshold", "1"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.829755326.weight", "4"), - - // hashRdatas(): 10.6.1.3 -> 1181892392 - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.1181892392.host", "10.6.1.3"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.1181892392.failover_delay", "30"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.1181892392.priority", "3"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.1181892392.run_probes", "false"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.1181892392.state", "NORMAL"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.1181892392.threshold", "1"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "rdata.1181892392.weight", "8"), - // Generated - resource.TestCheckResourceAttr("ultradns_tcpool.it", "id", "test-tcpool-maximal.ultradns.phinze.com"), - resource.TestCheckResourceAttr("ultradns_tcpool.it", "hostname", "test-tcpool-maximal.ultradns.phinze.com."), - ), - }, - }, - }) -} - -const testCfgTcpoolMinimal = ` -resource "ultradns_tcpool" "it" { - zone = "%s" - name = "test-tcpool-minimal" - ttl = 300 - description = "Minimal TC Pool" - - rdata { - host = "10.6.0.1" - } -} -` - -const testCfgTcpoolMaximal = ` -resource "ultradns_tcpool" "it" { - zone = "%s" - name = "test-tcpool-maximal" - ttl = 300 - description = "traffic controller pool with all settings tuned" - - act_on_probes = false - max_to_lb = 2 - run_probes = false - - rdata { - host = "10.6.1.1" - - failover_delay = 30 - priority = 1 - run_probes = true - state = "ACTIVE" - threshold = 1 - weight = 2 - } - - rdata { - host = "10.6.1.2" - - failover_delay = 30 - priority = 2 - run_probes = true - state = "INACTIVE" - threshold = 1 - weight = 4 - } - - rdata { - host = "10.6.1.3" - - failover_delay = 30 - priority = 3 - run_probes = false - state = "NORMAL" - threshold = 1 - weight = 8 - } - - backup_record_rdata = "10.6.1.4" - backup_record_failover_delay = 30 -} -` diff --git a/builtin/providers/vault/data_source_generic_secret.go b/builtin/providers/vault/data_source_generic_secret.go deleted file mode 100644 index 34f353707..000000000 --- a/builtin/providers/vault/data_source_generic_secret.go +++ /dev/null @@ -1,109 +0,0 @@ -package vault - -import ( - "encoding/json" - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform/helper/schema" - - "github.com/hashicorp/vault/api" -) - -func genericSecretDataSource() *schema.Resource { - return &schema.Resource{ - Read: genericSecretDataSourceRead, - - Schema: map[string]*schema.Schema{ - "path": { - Type: schema.TypeString, - Required: true, - Description: "Full path from which a secret will be read.", - }, - - "data_json": { - Type: schema.TypeString, - Computed: true, - Description: "JSON-encoded secret data read from Vault.", - }, - - "data": { - Type: schema.TypeMap, - Computed: true, - Description: "Map of strings read from Vault.", - }, - - "lease_id": { - Type: schema.TypeString, - Computed: true, - Description: "Lease identifier assigned by vault.", - }, - - "lease_duration": { - Type: schema.TypeInt, - Computed: true, - Description: "Lease duration in seconds relative to the time in lease_start_time.", - }, - - "lease_start_time": { - Type: schema.TypeString, - Computed: true, - Description: "Time at which the lease was read, using the clock of the system where Terraform was running", - }, - - "lease_renewable": { - Type: schema.TypeBool, - Computed: true, - Description: "True if the duration of this lease can be extended through renewal.", - }, - }, - } -} - -func genericSecretDataSourceRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*api.Client) - - path := d.Get("path").(string) - - log.Printf("[DEBUG] Reading %s from Vault", path) - secret, err := client.Logical().Read(path) - if err != nil { - return fmt.Errorf("error reading from Vault: %s", err) - } - if secret == nil { - return fmt.Errorf("No secret found at %q", path) - } - - d.SetId(secret.RequestID) - - // Ignoring error because this value came from JSON in the - // first place so no reason why it should fail to re-encode. - jsonDataBytes, _ := json.Marshal(secret.Data) - d.Set("data_json", string(jsonDataBytes)) - - // Since our "data" map can only contain string values, we - // will take strings from Data and write them in as-is, - // and write everything else in as a JSON serialization of - // whatever value we get so that complex types can be - // passed around and processed elsewhere if desired. - dataMap := map[string]string{} - for k, v := range secret.Data { - if vs, ok := v.(string); ok { - dataMap[k] = vs - } else { - // Again ignoring error because we know this value - // came from JSON in the first place and so must be valid. - vBytes, _ := json.Marshal(v) - dataMap[k] = string(vBytes) - } - } - d.Set("data", dataMap) - - d.Set("lease_id", secret.LeaseID) - d.Set("lease_duration", secret.LeaseDuration) - d.Set("lease_start_time", time.Now().Format("RFC3339")) - d.Set("lease_renewable", secret.Renewable) - - return nil -} diff --git a/builtin/providers/vault/data_source_generic_secret_test.go b/builtin/providers/vault/data_source_generic_secret_test.go deleted file mode 100644 index 00a5fbb17..000000000 --- a/builtin/providers/vault/data_source_generic_secret_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package vault - -import ( - "fmt" - "testing" - - r "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestDataSourceGenericSecret(t *testing.T) { - r.Test(t, r.TestCase{ - Providers: testProviders, - PreCheck: func() { testAccPreCheck(t) }, - Steps: []r.TestStep{ - r.TestStep{ - Config: testDataSourceGenericSecret_config, - Check: testDataSourceGenericSecret_check, - }, - }, - }) -} - -var testDataSourceGenericSecret_config = ` - -resource "vault_generic_secret" "test" { - path = "secret/foo" - data_json = < 1 { - return nil, fmt.Errorf("client_auth block may appear only once") - } - - clientAuthCert := "" - clientAuthKey := "" - if len(clientAuthI) == 1 { - clientAuth := clientAuthI[0].(map[string]interface{}) - clientAuthCert = clientAuth["cert_file"].(string) - clientAuthKey = clientAuth["key_file"].(string) - } - - err := config.ConfigureTLS(&api.TLSConfig{ - CACert: d.Get("ca_cert_file").(string), - CAPath: d.Get("ca_cert_dir").(string), - Insecure: d.Get("skip_tls_verify").(bool), - - ClientCert: clientAuthCert, - ClientKey: clientAuthKey, - }) - if err != nil { - return nil, fmt.Errorf("failed to configure TLS for Vault API: %s", err) - } - - client, err := api.NewClient(config) - if err != nil { - return nil, fmt.Errorf("failed to configure Vault API: %s", err) - } - - token := d.Get("token").(string) - if token == "" { - // Use the vault CLI's token, if present. - homePath, err := homedir.Dir() - if err != nil { - return nil, fmt.Errorf("Can't find home directory when looking for ~/.vault-token: %s", err) - } - tokenBytes, err := ioutil.ReadFile(homePath + "/.vault-token") - if err != nil { - return nil, fmt.Errorf("No vault token found: %s", err) - } - - token = strings.TrimSpace(string(tokenBytes)) - } - - // In order to enforce our relatively-short lease TTL, we derive a - // temporary child token that inherits all of the policies of the - // token we were given but expires after max_lease_ttl_seconds. - // - // The intent here is that Terraform will need to re-fetch any - // secrets on each run and so we limit the exposure risk of secrets - // that end up stored in the Terraform state, assuming that they are - // credentials that Vault is able to revoke. - // - // Caution is still required with state files since not all secrets - // can explicitly be revoked, and this limited scope won't apply to - // any secrets that are *written* by Terraform to Vault. - - client.SetToken(token) - renewable := false - childTokenLease, err := client.Auth().Token().Create(&api.TokenCreateRequest{ - DisplayName: "terraform", - TTL: fmt.Sprintf("%ds", d.Get("max_lease_ttl_seconds").(int)), - ExplicitMaxTTL: fmt.Sprintf("%ds", d.Get("max_lease_ttl_seconds").(int)), - Renewable: &renewable, - }) - if err != nil { - return nil, fmt.Errorf("failed to create limited child token: %s", err) - } - - childToken := childTokenLease.Auth.ClientToken - policies := childTokenLease.Auth.Policies - - log.Printf("[INFO] Using Vault token with the following policies: %s", strings.Join(policies, ", ")) - - client.SetToken(childToken) - - return client, nil -} diff --git a/builtin/providers/vault/provider_test.go b/builtin/providers/vault/provider_test.go deleted file mode 100644 index f26d163e1..000000000 --- a/builtin/providers/vault/provider_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package vault - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// How to run the acceptance tests for this provider: -// -// - Obtain an official Vault release from the Vault website at -// https://vaultproject.io/ and extract the "vault" binary -// somewhere. -// -// - Run the following to start the Vault server in development mode: -// vault server -dev -// -// - Take the "Root Token" value printed by Vault as the server started -// up and set it as the value of the VAULT_TOKEN environment variable -// in a new shell whose current working directory is the root of the -// Terraform repository. -// -// - As directed by the Vault server output, set the VAULT_ADDR environment -// variable. e.g.: -// export VAULT_ADDR='http://127.0.0.1:8200' -// -// - Run the Terraform acceptance tests as usual: -// make testacc TEST=./builtin/providers/vault -// -// The tests expect to be run in a fresh, empty Vault and thus do not attempt -// to randomize or otherwise make the generated resource paths unique on -// each run. In case of weird behavior, restart the Vault dev server to -// start over with a fresh Vault. (Remember to reset VAULT_TOKEN.) - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -var testProvider *schema.Provider -var testProviders map[string]terraform.ResourceProvider - -func init() { - testProvider = Provider().(*schema.Provider) - testProviders = map[string]terraform.ResourceProvider{ - "vault": testProvider, - } -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("VAULT_ADDR"); v == "" { - t.Fatal("VAULT_ADDR must be set for acceptance tests") - } - if v := os.Getenv("VAULT_TOKEN"); v == "" { - t.Fatal("VAULT_TOKEN must be set for acceptance tests") - } -} diff --git a/builtin/providers/vault/resource_auth_backend.go b/builtin/providers/vault/resource_auth_backend.go deleted file mode 100644 index 800155040..000000000 --- a/builtin/providers/vault/resource_auth_backend.go +++ /dev/null @@ -1,121 +0,0 @@ -package vault - -import ( - "errors" - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/vault/api" -) - -func authBackendResource() *schema.Resource { - return &schema.Resource{ - Create: authBackendWrite, - Delete: authBackendDelete, - Read: authBackendRead, - - Schema: map[string]*schema.Schema{ - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the auth backend", - }, - - "path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: "path to mount the backend. This defaults to the type.", - ValidateFunc: func(v interface{}, k string) (ws []string, errs []error) { - value := v.(string) - if strings.HasSuffix(value, "/") { - errs = append(errs, errors.New("cannot write to a path ending in '/'")) - } - return - }, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Description: "The description of the auth backend", - }, - }, - } -} - -func authBackendWrite(d *schema.ResourceData, meta interface{}) error { - client := meta.(*api.Client) - - name := d.Get("type").(string) - desc := d.Get("description").(string) - path := d.Get("path").(string) - - log.Printf("[DEBUG] Writing auth %s to Vault", name) - - var err error - - if path == "" { - path = name - err = d.Set("path", name) - if err != nil { - return fmt.Errorf("unable to set state: %s", err) - } - } - - err = client.Sys().EnableAuth(path, name, desc) - - if err != nil { - return fmt.Errorf("error writing to Vault: %s", err) - } - - d.SetId(name) - - return nil -} - -func authBackendDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*api.Client) - - name := d.Id() - - log.Printf("[DEBUG] Deleting auth %s from Vault", name) - - err := client.Sys().DisableAuth(name) - - if err != nil { - return fmt.Errorf("error disabling auth from Vault: %s", err) - } - - return nil -} - -func authBackendRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*api.Client) - - name := d.Id() - - auths, err := client.Sys().ListAuth() - - if err != nil { - return fmt.Errorf("error reading from Vault: %s", err) - } - - for path, auth := range auths { - configuredPath := d.Get("path").(string) - - vaultPath := configuredPath + "/" - if auth.Type == name && path == vaultPath { - return nil - } - } - - // If we fell out here then we didn't find our Auth in the list. - d.SetId("") - return nil -} diff --git a/builtin/providers/vault/resource_auth_backend_test.go b/builtin/providers/vault/resource_auth_backend_test.go deleted file mode 100644 index 344eafbd5..000000000 --- a/builtin/providers/vault/resource_auth_backend_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package vault - -import ( - "fmt" - "testing" - - r "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/hashicorp/vault/api" -) - -func TestResourceAuth(t *testing.T) { - r.Test(t, r.TestCase{ - Providers: testProviders, - PreCheck: func() { testAccPreCheck(t) }, - Steps: []r.TestStep{ - r.TestStep{ - Config: testResourceAuth_initialConfig, - Check: testResourceAuth_initialCheck, - }, - r.TestStep{ - Config: testResourceAuth_updateConfig, - Check: testResourceAuth_updateCheck, - }, - }, - }) -} - -var testResourceAuth_initialConfig = ` - -resource "vault_auth_backend" "test" { - type = "github" -} - -` - -func testResourceAuth_initialCheck(s *terraform.State) error { - resourceState := s.Modules[0].Resources["vault_auth_backend.test"] - if resourceState == nil { - return fmt.Errorf("resource not found in state") - } - - instanceState := resourceState.Primary - if instanceState == nil { - return fmt.Errorf("resource has no primary instance") - } - - name := instanceState.ID - - if name != instanceState.Attributes["type"] { - return fmt.Errorf("id doesn't match name") - } - - if name != "github" { - return fmt.Errorf("unexpected auth name %s", name) - } - - client := testProvider.Meta().(*api.Client) - auths, err := client.Sys().ListAuth() - - if err != nil { - return fmt.Errorf("error reading back auth: %s", err) - } - - found := false - for _, auth := range auths { - if auth.Type == name { - found = true - break - } - } - - if !found { - return fmt.Errorf("could not find auth backend %s in %+v", name, auths) - } - - return nil -} - -var testResourceAuth_updateConfig = ` - -resource "vault_auth_backend" "test" { - type = "ldap" -} - -` - -func testResourceAuth_updateCheck(s *terraform.State) error { - resourceState := s.Modules[0].Resources["vault_auth_backend.test"] - if resourceState == nil { - return fmt.Errorf("resource not found in state") - } - - instanceState := resourceState.Primary - if instanceState == nil { - return fmt.Errorf("resource has no primary instance") - } - - name := instanceState.ID - - if name != instanceState.Attributes["type"] { - return fmt.Errorf("id doesn't match name") - } - - if name != "ldap" { - return fmt.Errorf("unexpected auth name") - } - - client := testProvider.Meta().(*api.Client) - auths, err := client.Sys().ListAuth() - - if err != nil { - return fmt.Errorf("error reading back auth: %s", err) - } - - found := false - for _, auth := range auths { - if auth.Type == name { - found = true - break - } - } - - if !found { - return fmt.Errorf("could not find auth backend %s in %+v", name, auths) - } - - return nil -} diff --git a/builtin/providers/vault/resource_generic_secret.go b/builtin/providers/vault/resource_generic_secret.go deleted file mode 100644 index cbb8deda0..000000000 --- a/builtin/providers/vault/resource_generic_secret.go +++ /dev/null @@ -1,139 +0,0 @@ -package vault - -import ( - "encoding/json" - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - - "github.com/hashicorp/vault/api" -) - -func genericSecretResource() *schema.Resource { - return &schema.Resource{ - Create: genericSecretResourceWrite, - Update: genericSecretResourceWrite, - Delete: genericSecretResourceDelete, - Read: genericSecretResourceRead, - - Schema: map[string]*schema.Schema{ - "path": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Full path where the generic secret will be written.", - }, - - // Data is passed as JSON so that an arbitrary structure is - // possible, rather than forcing e.g. all values to be strings. - "data_json": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "JSON-encoded secret data to write.", - // We rebuild the attached JSON string to a simple singleline - // string. This makes terraform not want to change when an extra - // space is included in the JSON string. It is also necesarry - // when allow_read is true for comparing values. - StateFunc: NormalizeDataJSON, - ValidateFunc: ValidateDataJSON, - }, - - "allow_read": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "True if the provided token is allowed to read the secret from vault", - }, - }, - } -} - -func ValidateDataJSON(configI interface{}, k string) ([]string, []error) { - dataJSON := configI.(string) - dataMap := map[string]interface{}{} - err := json.Unmarshal([]byte(dataJSON), &dataMap) - if err != nil { - return nil, []error{err} - } - return nil, nil -} - -func NormalizeDataJSON(configI interface{}) string { - dataJSON := configI.(string) - - dataMap := map[string]interface{}{} - err := json.Unmarshal([]byte(dataJSON), &dataMap) - if err != nil { - // The validate function should've taken care of this. - return "" - } - - ret, err := json.Marshal(dataMap) - if err != nil { - // Should never happen. - return dataJSON - } - - return string(ret) -} - -func genericSecretResourceWrite(d *schema.ResourceData, meta interface{}) error { - client := meta.(*api.Client) - - path := d.Get("path").(string) - - var data map[string]interface{} - err := json.Unmarshal([]byte(d.Get("data_json").(string)), &data) - if err != nil { - return fmt.Errorf("data_json %#v syntax error: %s", d.Get("data_json"), err) - } - - log.Printf("[DEBUG] Writing generic Vault secret to %s", path) - _, err = client.Logical().Write(path, data) - if err != nil { - return fmt.Errorf("error writing to Vault: %s", err) - } - - d.SetId(path) - - return nil -} - -func genericSecretResourceDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*api.Client) - - path := d.Id() - - log.Printf("[DEBUG] Deleting generic Vault from %s", path) - _, err := client.Logical().Delete(path) - if err != nil { - return fmt.Errorf("error deleting from Vault: %s", err) - } - - return nil -} - -func genericSecretResourceRead(d *schema.ResourceData, meta interface{}) error { - allowed_to_read := d.Get("allow_read").(bool) - path := d.Get("path").(string) - - if allowed_to_read { - client := meta.(*api.Client) - - log.Printf("[DEBUG] Reading %s from Vault", path) - secret, err := client.Logical().Read(path) - if err != nil { - return fmt.Errorf("error reading from Vault: %s", err) - } - - // Ignoring error because this value came from JSON in the - // first place so no reason why it should fail to re-encode. - jsonDataBytes, _ := json.Marshal(secret.Data) - d.Set("data_json", string(jsonDataBytes)) - } - - d.SetId(path) - log.Printf("[WARN] vault_generic_secret does not automatically refresh if allow_read is set to false") - return nil -} diff --git a/builtin/providers/vault/resource_generic_secret_test.go b/builtin/providers/vault/resource_generic_secret_test.go deleted file mode 100644 index 5acaac9b1..000000000 --- a/builtin/providers/vault/resource_generic_secret_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package vault - -import ( - "fmt" - "testing" - - r "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - - "github.com/hashicorp/vault/api" -) - -func TestResourceGenericSecret(t *testing.T) { - r.Test(t, r.TestCase{ - Providers: testProviders, - PreCheck: func() { testAccPreCheck(t) }, - Steps: []r.TestStep{ - r.TestStep{ - Config: testResourceGenericSecret_initialConfig, - Check: testResourceGenericSecret_initialCheck, - }, - r.TestStep{ - Config: testResourceGenericSecret_updateConfig, - Check: testResourceGenericSecret_updateCheck, - }, - }, - }) -} - -var testResourceGenericSecret_initialConfig = ` - -resource "vault_generic_secret" "test" { - path = "secret/foo" - allow_read = true - data_json = < Access Control -> Roles -Click on "+" icon (Create role action), give it appropraite name and select following privileges: - * Datastore - - Allocate space - - Browse datastore - - Low level file operations - - Remove file - - Update virtual machine files - - Update virtual machine metadata - - * Folder (all) - - Create folder - - Delete folder - - Move folder - - Rename folder - - * Network - - Assign network - - * Resource - - Apply recommendation - - Assign virtual machine to resource pool - - * Virtual Machine - - Configuration (all) - for now - - Guest Operations (all) - for now - - Interaction (all) - - Inventory (all) - - Provisioning (all) - -These settings were tested with [vSphere 6.0](https://pubs.vmware.com/vsphere-60/index.jsp?topic=%2Fcom.vmware.vsphere.security.doc%2FGUID-18071E9A-EED1-4968-8D51-E0B4F526FDA3.html) and [vSphere 5.5](https://pubs.vmware.com/vsphere-55/index.jsp?topic=%2Fcom.vmware.vsphere.security.doc%2FGUID-18071E9A-EED1-4968-8D51-E0B4F526FDA3.html). For additional information on roles and permissions, please refer to official VMware documentation. - -This section is a work in progress and additional contributions are more than welcome. - diff --git a/builtin/providers/vsphere/config.go b/builtin/providers/vsphere/config.go deleted file mode 100644 index 759268bda..000000000 --- a/builtin/providers/vsphere/config.go +++ /dev/null @@ -1,85 +0,0 @@ -package vsphere - -import ( - "fmt" - "log" - "net/url" - "os" - "path/filepath" - "time" - - "github.com/vmware/govmomi" - "github.com/vmware/govmomi/vim25/debug" - "golang.org/x/net/context" -) - -type Config struct { - User string - Password string - VSphereServer string - InsecureFlag bool - Debug bool - DebugPath string - DebugPathRun string -} - -// Client() returns a new client for accessing VMWare vSphere. -func (c *Config) Client() (*govmomi.Client, error) { - u, err := url.Parse("https://" + c.VSphereServer + "/sdk") - if err != nil { - return nil, fmt.Errorf("Error parse url: %s", err) - } - - u.User = url.UserPassword(c.User, c.Password) - - err = c.EnableDebug() - if err != nil { - return nil, fmt.Errorf("Error setting up client debug: %s", err) - } - - client, err := govmomi.NewClient(context.TODO(), u, c.InsecureFlag) - if err != nil { - return nil, fmt.Errorf("Error setting up client: %s", err) - } - - log.Printf("[INFO] VMWare vSphere Client configured for URL: %s", c.VSphereServer) - - return client, nil -} - -func (c *Config) EnableDebug() error { - if !c.Debug { - return nil - } - - // Base path for storing debug logs. - r := c.DebugPath - if r == "" { - r = filepath.Join(os.Getenv("HOME"), ".govmomi") - } - r = filepath.Join(r, "debug") - - // Path for this particular run. - run := c.DebugPathRun - if run == "" { - now := time.Now().Format("2006-01-02T15-04-05.999999999") - r = filepath.Join(r, now) - } else { - // reuse the same path - r = filepath.Join(r, run) - _ = os.RemoveAll(r) - } - - err := os.MkdirAll(r, 0700) - if err != nil { - log.Printf("[ERROR] Client debug setup failed: %v", err) - return err - } - - p := debug.FileProvider{ - Path: r, - } - - debug.SetProvider(&p) - return nil -} diff --git a/builtin/providers/vsphere/provider.go b/builtin/providers/vsphere/provider.go deleted file mode 100644 index be46b50d6..000000000 --- a/builtin/providers/vsphere/provider.go +++ /dev/null @@ -1,103 +0,0 @@ -package vsphere - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -// Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "user": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("VSPHERE_USER", nil), - Description: "The user name for vSphere API operations.", - }, - - "password": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("VSPHERE_PASSWORD", nil), - Description: "The user password for vSphere API operations.", - }, - - "vsphere_server": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("VSPHERE_SERVER", nil), - Description: "The vSphere Server name for vSphere API operations.", - }, - "allow_unverified_ssl": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("VSPHERE_ALLOW_UNVERIFIED_SSL", false), - Description: "If set, VMware vSphere client will permit unverifiable SSL certificates.", - }, - "vcenter_server": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("VSPHERE_VCENTER", nil), - Deprecated: "This field has been renamed to vsphere_server.", - }, - "client_debug": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("VSPHERE_CLIENT_DEBUG", false), - Description: "govomomi debug", - }, - "client_debug_path_run": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("VSPHERE_CLIENT_DEBUG_PATH_RUN", ""), - Description: "govomomi debug path for a single run", - }, - "client_debug_path": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("VSPHERE_CLIENT_DEBUG_PATH", ""), - Description: "govomomi debug path for debug", - }, - }, - - ResourcesMap: map[string]*schema.Resource{ - "vsphere_file": resourceVSphereFile(), - "vsphere_folder": resourceVSphereFolder(), - "vsphere_virtual_disk": resourceVSphereVirtualDisk(), - "vsphere_virtual_machine": resourceVSphereVirtualMachine(), - }, - - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - // Handle backcompat support for vcenter_server; once that is removed, - // vsphere_server can just become a Required field that is referenced inline - // in Config below. - server := d.Get("vsphere_server").(string) - - if server == "" { - server = d.Get("vcenter_server").(string) - } - - if server == "" { - return nil, fmt.Errorf( - "One of vsphere_server or [deprecated] vcenter_server must be provided.") - } - - config := Config{ - User: d.Get("user").(string), - Password: d.Get("password").(string), - InsecureFlag: d.Get("allow_unverified_ssl").(bool), - VSphereServer: server, - Debug: d.Get("client_debug").(bool), - DebugPathRun: d.Get("client_debug_path_run").(string), - DebugPath: d.Get("client_debug_path").(string), - } - - return config.Client() -} diff --git a/builtin/providers/vsphere/provider_test.go b/builtin/providers/vsphere/provider_test.go deleted file mode 100644 index 313c4ef34..000000000 --- a/builtin/providers/vsphere/provider_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package vsphere - -import ( - "os" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "vsphere": testAccProvider, - } -} - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } - -} - -func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("VSPHERE_USER"); v == "" { - t.Fatal("VSPHERE_USER must be set for acceptance tests") - } - - if v := os.Getenv("VSPHERE_PASSWORD"); v == "" { - t.Fatal("VSPHERE_PASSWORD must be set for acceptance tests") - } - - if v := os.Getenv("VSPHERE_SERVER"); v == "" { - t.Fatal("VSPHERE_SERVER must be set for acceptance tests") - } -} diff --git a/builtin/providers/vsphere/resource_vsphere_file.go b/builtin/providers/vsphere/resource_vsphere_file.go deleted file mode 100644 index c8afe05d9..000000000 --- a/builtin/providers/vsphere/resource_vsphere_file.go +++ /dev/null @@ -1,405 +0,0 @@ -package vsphere - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/vmware/govmomi" - "github.com/vmware/govmomi/find" - "github.com/vmware/govmomi/object" - "github.com/vmware/govmomi/vim25/soap" - "golang.org/x/net/context" -) - -type file struct { - sourceDatacenter string - datacenter string - sourceDatastore string - datastore string - sourceFile string - destinationFile string - createDirectories bool - copyFile bool -} - -func resourceVSphereFile() *schema.Resource { - return &schema.Resource{ - Create: resourceVSphereFileCreate, - Read: resourceVSphereFileRead, - Update: resourceVSphereFileUpdate, - Delete: resourceVSphereFileDelete, - - Schema: map[string]*schema.Schema{ - "datacenter": { - Type: schema.TypeString, - Optional: true, - }, - - "source_datacenter": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "datastore": { - Type: schema.TypeString, - Required: true, - }, - - "source_datastore": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "source_file": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "destination_file": { - Type: schema.TypeString, - Required: true, - }, - - "create_directories": { - Type: schema.TypeBool, - Optional: true, - }, - }, - } -} - -func resourceVSphereFileCreate(d *schema.ResourceData, meta interface{}) error { - - log.Printf("[DEBUG] creating file: %#v", d) - client := meta.(*govmomi.Client) - - f := file{} - - if v, ok := d.GetOk("source_datacenter"); ok { - f.sourceDatacenter = v.(string) - f.copyFile = true - } - - if v, ok := d.GetOk("datacenter"); ok { - f.datacenter = v.(string) - } - - if v, ok := d.GetOk("source_datastore"); ok { - f.sourceDatastore = v.(string) - f.copyFile = true - } - - if v, ok := d.GetOk("datastore"); ok { - f.datastore = v.(string) - } else { - return fmt.Errorf("datastore argument is required") - } - - if v, ok := d.GetOk("source_file"); ok { - f.sourceFile = v.(string) - } else { - return fmt.Errorf("source_file argument is required") - } - - if v, ok := d.GetOk("destination_file"); ok { - f.destinationFile = v.(string) - } else { - return fmt.Errorf("destination_file argument is required") - } - - if v, ok := d.GetOk("create_directories"); ok { - f.createDirectories = v.(bool) - } - - err := createFile(client, &f) - if err != nil { - return err - } - - d.SetId(fmt.Sprintf("[%v] %v/%v", f.datastore, f.datacenter, f.destinationFile)) - log.Printf("[INFO] Created file: %s", f.destinationFile) - - return resourceVSphereFileRead(d, meta) -} - -func createFile(client *govmomi.Client, f *file) error { - - finder := find.NewFinder(client.Client, true) - - dc, err := finder.Datacenter(context.TODO(), f.datacenter) - if err != nil { - return fmt.Errorf("error %s", err) - } - finder = finder.SetDatacenter(dc) - - ds, err := getDatastore(finder, f.datastore) - if err != nil { - return fmt.Errorf("error %s", err) - } - - if f.copyFile { - // Copying file from withing vSphere - source_dc, err := finder.Datacenter(context.TODO(), f.sourceDatacenter) - if err != nil { - return fmt.Errorf("error %s", err) - } - finder = finder.SetDatacenter(dc) - - source_ds, err := getDatastore(finder, f.sourceDatastore) - if err != nil { - return fmt.Errorf("error %s", err) - } - - fm := object.NewFileManager(client.Client) - if f.createDirectories { - directoryPathIndex := strings.LastIndex(f.destinationFile, "/") - path := f.destinationFile[0:directoryPathIndex] - err = fm.MakeDirectory(context.TODO(), ds.Path(path), dc, true) - if err != nil { - return fmt.Errorf("error %s", err) - } - } - task, err := fm.CopyDatastoreFile(context.TODO(), source_ds.Path(f.sourceFile), source_dc, ds.Path(f.destinationFile), dc, true) - - if err != nil { - return fmt.Errorf("error %s", err) - } - - _, err = task.WaitForResult(context.TODO(), nil) - if err != nil { - return fmt.Errorf("error %s", err) - } - - } else { - // Uploading file to vSphere - dsurl, err := ds.URL(context.TODO(), dc, f.destinationFile) - if err != nil { - return fmt.Errorf("error %s", err) - } - - p := soap.DefaultUpload - err = client.Client.UploadFile(f.sourceFile, dsurl, &p) - if err != nil { - return fmt.Errorf("error %s", err) - } - } - - return nil -} - -func resourceVSphereFileRead(d *schema.ResourceData, meta interface{}) error { - - log.Printf("[DEBUG] reading file: %#v", d) - f := file{} - - if v, ok := d.GetOk("source_datacenter"); ok { - f.sourceDatacenter = v.(string) - } - - if v, ok := d.GetOk("datacenter"); ok { - f.datacenter = v.(string) - } - - if v, ok := d.GetOk("source_datastore"); ok { - f.sourceDatastore = v.(string) - } - - if v, ok := d.GetOk("datastore"); ok { - f.datastore = v.(string) - } else { - return fmt.Errorf("datastore argument is required") - } - - if v, ok := d.GetOk("source_file"); ok { - f.sourceFile = v.(string) - } else { - return fmt.Errorf("source_file argument is required") - } - - if v, ok := d.GetOk("destination_file"); ok { - f.destinationFile = v.(string) - } else { - return fmt.Errorf("destination_file argument is required") - } - - client := meta.(*govmomi.Client) - finder := find.NewFinder(client.Client, true) - - dc, err := finder.Datacenter(context.TODO(), f.datacenter) - if err != nil { - return fmt.Errorf("error %s", err) - } - finder = finder.SetDatacenter(dc) - - ds, err := getDatastore(finder, f.datastore) - if err != nil { - return fmt.Errorf("error %s", err) - } - - _, err = ds.Stat(context.TODO(), f.destinationFile) - if err != nil { - log.Printf("[DEBUG] resourceVSphereFileRead - stat failed on: %v", f.destinationFile) - d.SetId("") - - _, ok := err.(object.DatastoreNoSuchFileError) - if !ok { - return err - } - } - - return nil -} - -func resourceVSphereFileUpdate(d *schema.ResourceData, meta interface{}) error { - - log.Printf("[DEBUG] updating file: %#v", d) - - if d.HasChange("destination_file") || d.HasChange("datacenter") || d.HasChange("datastore") { - // File needs to be moved, get old and new destination changes - var oldDataceneter, newDatacenter, oldDatastore, newDatastore, oldDestinationFile, newDestinationFile string - if d.HasChange("datacenter") { - tmpOldDataceneter, tmpNewDatacenter := d.GetChange("datacenter") - oldDataceneter = tmpOldDataceneter.(string) - newDatacenter = tmpNewDatacenter.(string) - } else { - if v, ok := d.GetOk("datacenter"); ok { - oldDataceneter = v.(string) - newDatacenter = oldDataceneter - } - } - if d.HasChange("datastore") { - tmpOldDatastore, tmpNewDatastore := d.GetChange("datastore") - oldDatastore = tmpOldDatastore.(string) - newDatastore = tmpNewDatastore.(string) - } else { - oldDatastore = d.Get("datastore").(string) - newDatastore = oldDatastore - } - if d.HasChange("destination_file") { - tmpOldDestinationFile, tmpNewDestinationFile := d.GetChange("destination_file") - oldDestinationFile = tmpOldDestinationFile.(string) - newDestinationFile = tmpNewDestinationFile.(string) - } else { - oldDestinationFile = d.Get("destination_file").(string) - newDestinationFile = oldDestinationFile - } - - // Get old and new dataceter and datastore - client := meta.(*govmomi.Client) - dcOld, err := getDatacenter(client, oldDataceneter) - if err != nil { - return err - } - dcNew, err := getDatacenter(client, newDatacenter) - if err != nil { - return err - } - finder := find.NewFinder(client.Client, true) - finder = finder.SetDatacenter(dcOld) - dsOld, err := getDatastore(finder, oldDatastore) - if err != nil { - return fmt.Errorf("error %s", err) - } - finder = finder.SetDatacenter(dcNew) - dsNew, err := getDatastore(finder, newDatastore) - if err != nil { - return fmt.Errorf("error %s", err) - } - - // Move file between old/new dataceter, datastore and path (destination_file) - fm := object.NewFileManager(client.Client) - task, err := fm.MoveDatastoreFile(context.TODO(), dsOld.Path(oldDestinationFile), dcOld, dsNew.Path(newDestinationFile), dcNew, true) - if err != nil { - return err - } - _, err = task.WaitForResult(context.TODO(), nil) - if err != nil { - return err - } - } - - return nil -} - -func resourceVSphereFileDelete(d *schema.ResourceData, meta interface{}) error { - - log.Printf("[DEBUG] deleting file: %#v", d) - f := file{} - - if v, ok := d.GetOk("datacenter"); ok { - f.datacenter = v.(string) - } - - if v, ok := d.GetOk("datastore"); ok { - f.datastore = v.(string) - } else { - return fmt.Errorf("datastore argument is required") - } - - if v, ok := d.GetOk("source_file"); ok { - f.sourceFile = v.(string) - } else { - return fmt.Errorf("source_file argument is required") - } - - if v, ok := d.GetOk("destination_file"); ok { - f.destinationFile = v.(string) - } else { - return fmt.Errorf("destination_file argument is required") - } - - client := meta.(*govmomi.Client) - - err := deleteFile(client, &f) - if err != nil { - return err - } - - d.SetId("") - return nil -} - -func deleteFile(client *govmomi.Client, f *file) error { - - dc, err := getDatacenter(client, f.datacenter) - if err != nil { - return err - } - - finder := find.NewFinder(client.Client, true) - finder = finder.SetDatacenter(dc) - - ds, err := getDatastore(finder, f.datastore) - if err != nil { - return fmt.Errorf("error %s", err) - } - - fm := object.NewFileManager(client.Client) - task, err := fm.DeleteDatastoreFile(context.TODO(), ds.Path(f.destinationFile), dc) - if err != nil { - return err - } - - _, err = task.WaitForResult(context.TODO(), nil) - if err != nil { - return err - } - return nil -} - -// getDatastore gets datastore object -func getDatastore(f *find.Finder, ds string) (*object.Datastore, error) { - - if ds != "" { - dso, err := f.Datastore(context.TODO(), ds) - return dso, err - } else { - dso, err := f.DefaultDatastore(context.TODO()) - return dso, err - } -} diff --git a/builtin/providers/vsphere/resource_vsphere_file_test.go b/builtin/providers/vsphere/resource_vsphere_file_test.go deleted file mode 100644 index 7e5aa44e7..000000000 --- a/builtin/providers/vsphere/resource_vsphere_file_test.go +++ /dev/null @@ -1,350 +0,0 @@ -package vsphere - -import ( - "fmt" - "io/ioutil" - "os" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/vmware/govmomi" - "github.com/vmware/govmomi/find" - "github.com/vmware/govmomi/object" - "golang.org/x/net/context" -) - -// Basic file creation (upload to vSphere) -func TestAccVSphereFile_basic(t *testing.T) { - testVmdkFileData := []byte("# Disk DescriptorFile\n") - testVmdkFile := "/tmp/tf_test.vmdk" - err := ioutil.WriteFile(testVmdkFile, testVmdkFileData, 0644) - if err != nil { - t.Errorf("error %s", err) - return - } - - datacenter := os.Getenv("VSPHERE_DATACENTER") - datastore := os.Getenv("VSPHERE_DATASTORE") - testMethod := "basic" - resourceName := "vsphere_file." + testMethod - destinationFile := "tf_file_test.vmdk" - sourceFile := testVmdkFile - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereFileDestroy, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf( - testAccCheckVSphereFileConfig, - testMethod, - datacenter, - datastore, - sourceFile, - destinationFile, - ), - Check: resource.ComposeTestCheckFunc( - testAccCheckVSphereFileExists(resourceName, destinationFile, true), - resource.TestCheckResourceAttr(resourceName, "destination_file", destinationFile), - ), - }, - }, - }) - os.Remove(testVmdkFile) -} - -// Basic file copy within vSphere -func TestAccVSphereFile_basicUploadAndCopy(t *testing.T) { - testVmdkFileData := []byte("# Disk DescriptorFile\n") - sourceFile := "/tmp/tf_test.vmdk" - uploadResourceName := "myfileupload" - copyResourceName := "myfilecopy" - sourceDatacenter := os.Getenv("VSPHERE_DATACENTER") - datacenter := sourceDatacenter - sourceDatastore := os.Getenv("VSPHERE_DATASTORE") - datastore := sourceDatastore - destinationFile := "tf_file_test.vmdk" - sourceFileCopy := "${vsphere_file." + uploadResourceName + ".destination_file}" - destinationFileCopy := "tf_file_test_copy.vmdk" - - err := ioutil.WriteFile(sourceFile, testVmdkFileData, 0644) - if err != nil { - t.Errorf("error %s", err) - return - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereFileDestroy, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf( - testAccCheckVSphereFileCopyConfig, - uploadResourceName, - datacenter, - datastore, - sourceFile, - destinationFile, - copyResourceName, - datacenter, - datacenter, - datastore, - datastore, - sourceFileCopy, - destinationFileCopy, - ), - Check: resource.ComposeTestCheckFunc( - testAccCheckVSphereFileExists("vsphere_file."+uploadResourceName, destinationFile, true), - testAccCheckVSphereFileExists("vsphere_file."+copyResourceName, destinationFileCopy, true), - resource.TestCheckResourceAttr("vsphere_file."+uploadResourceName, "destination_file", destinationFile), - resource.TestCheckResourceAttr("vsphere_file."+copyResourceName, "destination_file", destinationFileCopy), - ), - }, - }, - }) - os.Remove(sourceFile) -} - -// file creation followed by a rename of file (update) -func TestAccVSphereFile_renamePostCreation(t *testing.T) { - testVmdkFileData := []byte("# Disk DescriptorFile\n") - testVmdkFile := "/tmp/tf_test.vmdk" - err := ioutil.WriteFile(testVmdkFile, testVmdkFileData, 0644) - if err != nil { - t.Errorf("error %s", err) - return - } - - datacenter := os.Getenv("VSPHERE_DATACENTER") - datastore := os.Getenv("VSPHERE_DATASTORE") - testMethod := "create_upgrade" - resourceName := "vsphere_file." + testMethod - destinationFile := "tf_test_file.vmdk" - destinationFileMoved := "tf_test_file_moved.vmdk" - sourceFile := testVmdkFile - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereFileDestroy, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf( - testAccCheckVSphereFileConfig, - testMethod, - datacenter, - datastore, - sourceFile, - destinationFile, - ), - Check: resource.ComposeTestCheckFunc( - testAccCheckVSphereFileExists(resourceName, destinationFile, true), - testAccCheckVSphereFileExists(resourceName, destinationFileMoved, false), - resource.TestCheckResourceAttr(resourceName, "destination_file", destinationFile), - ), - }, - { - Config: fmt.Sprintf( - testAccCheckVSphereFileConfig, - testMethod, - datacenter, - datastore, - sourceFile, - destinationFileMoved, - ), - Check: resource.ComposeTestCheckFunc( - testAccCheckVSphereFileExists(resourceName, destinationFile, false), - testAccCheckVSphereFileExists(resourceName, destinationFileMoved, true), - resource.TestCheckResourceAttr(resourceName, "destination_file", destinationFileMoved), - ), - }, - }, - }) - os.Remove(testVmdkFile) -} - -// file upload, then copy, finally the copy is renamed (moved) (update) -func TestAccVSphereFile_uploadAndCopyAndUpdate(t *testing.T) { - testVmdkFileData := []byte("# Disk DescriptorFile\n") - sourceFile := "/tmp/tf_test.vmdk" - uploadResourceName := "myfileupload" - copyResourceName := "myfilecopy" - sourceDatacenter := os.Getenv("VSPHERE_DATACENTER") - datacenter := sourceDatacenter - sourceDatastore := os.Getenv("VSPHERE_DATASTORE") - datastore := sourceDatastore - destinationFile := "tf_file_test.vmdk" - sourceFileCopy := "${vsphere_file." + uploadResourceName + ".destination_file}" - destinationFileCopy := "tf_file_test_copy.vmdk" - destinationFileMoved := "tf_test_file_moved.vmdk" - - err := ioutil.WriteFile(sourceFile, testVmdkFileData, 0644) - if err != nil { - t.Errorf("error %s", err) - return - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereFileDestroy, - Steps: []resource.TestStep{ - { - Config: fmt.Sprintf( - testAccCheckVSphereFileCopyConfig, - uploadResourceName, - datacenter, - datastore, - sourceFile, - destinationFile, - copyResourceName, - datacenter, - datacenter, - datastore, - datastore, - sourceFileCopy, - destinationFileCopy, - ), - Check: resource.ComposeTestCheckFunc( - testAccCheckVSphereFileExists("vsphere_file."+uploadResourceName, destinationFile, true), - testAccCheckVSphereFileExists("vsphere_file."+copyResourceName, destinationFileCopy, true), - resource.TestCheckResourceAttr("vsphere_file."+uploadResourceName, "destination_file", destinationFile), - resource.TestCheckResourceAttr("vsphere_file."+copyResourceName, "destination_file", destinationFileCopy), - ), - }, - { - Config: fmt.Sprintf( - testAccCheckVSphereFileCopyConfig, - uploadResourceName, - datacenter, - datastore, - sourceFile, - destinationFile, - copyResourceName, - datacenter, - datacenter, - datastore, - datastore, - sourceFileCopy, - destinationFileMoved, - ), - Check: resource.ComposeTestCheckFunc( - testAccCheckVSphereFileExists("vsphere_file."+uploadResourceName, destinationFile, true), - testAccCheckVSphereFileExists("vsphere_file."+copyResourceName, destinationFileCopy, false), - testAccCheckVSphereFileExists("vsphere_file."+copyResourceName, destinationFileMoved, true), - resource.TestCheckResourceAttr("vsphere_file."+uploadResourceName, "destination_file", destinationFile), - resource.TestCheckResourceAttr("vsphere_file."+copyResourceName, "destination_file", destinationFileMoved), - ), - }, - }, - }) - os.Remove(sourceFile) -} - -func testAccCheckVSphereFileDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*govmomi.Client) - finder := find.NewFinder(client.Client, true) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "vsphere_file" { - continue - } - - dc, err := finder.Datacenter(context.TODO(), rs.Primary.Attributes["datacenter"]) - if err != nil { - return fmt.Errorf("error %s", err) - } - - finder = finder.SetDatacenter(dc) - - ds, err := getDatastore(finder, rs.Primary.Attributes["datastore"]) - if err != nil { - return fmt.Errorf("error %s", err) - } - - _, err = ds.Stat(context.TODO(), rs.Primary.Attributes["destination_file"]) - if err != nil { - switch e := err.(type) { - case object.DatastoreNoSuchFileError: - fmt.Printf("Expected error received: %s\n", e.Error()) - return nil - default: - return err - } - } else { - return fmt.Errorf("File %s still exists", rs.Primary.Attributes["destination_file"]) - } - } - - return nil -} - -func testAccCheckVSphereFileExists(n string, df string, exists bool) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Resource not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - client := testAccProvider.Meta().(*govmomi.Client) - finder := find.NewFinder(client.Client, true) - - dc, err := finder.Datacenter(context.TODO(), rs.Primary.Attributes["datacenter"]) - if err != nil { - return fmt.Errorf("error %s", err) - } - finder = finder.SetDatacenter(dc) - - ds, err := getDatastore(finder, rs.Primary.Attributes["datastore"]) - if err != nil { - return fmt.Errorf("error %s", err) - } - - _, err = ds.Stat(context.TODO(), df) - if err != nil { - switch e := err.(type) { - case object.DatastoreNoSuchFileError: - if exists { - return fmt.Errorf("File does not exist: %s", e.Error()) - } - fmt.Printf("Expected error received: %s\n", e.Error()) - return nil - default: - return err - } - } - return nil - } -} - -const testAccCheckVSphereFileConfig = ` -resource "vsphere_file" "%s" { - datacenter = "%s" - datastore = "%s" - source_file = "%s" - destination_file = "%s" -} -` -const testAccCheckVSphereFileCopyConfig = ` -resource "vsphere_file" "%s" { - datacenter = "%s" - datastore = "%s" - source_file = "%s" - destination_file = "%s" -} -resource "vsphere_file" "%s" { - source_datacenter = "%s" - datacenter = "%s" - source_datastore = "%s" - datastore = "%s" - source_file = "%s" - destination_file = "%s" -} -` diff --git a/builtin/providers/vsphere/resource_vsphere_folder.go b/builtin/providers/vsphere/resource_vsphere_folder.go deleted file mode 100644 index 1a39eb901..000000000 --- a/builtin/providers/vsphere/resource_vsphere_folder.go +++ /dev/null @@ -1,237 +0,0 @@ -package vsphere - -import ( - "fmt" - "log" - "path" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/vmware/govmomi" - "github.com/vmware/govmomi/find" - "github.com/vmware/govmomi/object" - "golang.org/x/net/context" -) - -type folder struct { - datacenter string - existingPath string - path string -} - -func resourceVSphereFolder() *schema.Resource { - return &schema.Resource{ - Create: resourceVSphereFolderCreate, - Read: resourceVSphereFolderRead, - Delete: resourceVSphereFolderDelete, - - Schema: map[string]*schema.Schema{ - "datacenter": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "path": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "existing_path": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceVSphereFolderCreate(d *schema.ResourceData, meta interface{}) error { - - client := meta.(*govmomi.Client) - - f := folder{ - path: strings.TrimRight(d.Get("path").(string), "/"), - } - - if v, ok := d.GetOk("datacenter"); ok { - f.datacenter = v.(string) - } - - err := createFolder(client, &f) - if err != nil { - return err - } - - d.Set("existing_path", f.existingPath) - d.SetId(fmt.Sprintf("%v/%v", f.datacenter, f.path)) - log.Printf("[INFO] Created folder: %s", f.path) - - return resourceVSphereFolderRead(d, meta) -} - -func createFolder(client *govmomi.Client, f *folder) error { - - finder := find.NewFinder(client.Client, true) - - dc, err := finder.Datacenter(context.TODO(), f.datacenter) - if err != nil { - return fmt.Errorf("error %s", err) - } - finder = finder.SetDatacenter(dc) - si := object.NewSearchIndex(client.Client) - - dcFolders, err := dc.Folders(context.TODO()) - if err != nil { - return fmt.Errorf("error %s", err) - } - - folder := dcFolders.VmFolder - var workingPath string - - pathParts := strings.Split(f.path, "/") - for _, pathPart := range pathParts { - if len(workingPath) > 0 { - workingPath += "/" - } - workingPath += pathPart - subfolder, err := si.FindByInventoryPath( - context.TODO(), fmt.Sprintf("%v/vm/%v", f.datacenter, workingPath)) - - if err != nil { - return fmt.Errorf("error %s", err) - } else if subfolder == nil { - log.Printf("[DEBUG] folder not found; creating: %s", workingPath) - folder, err = folder.CreateFolder(context.TODO(), pathPart) - if err != nil { - return fmt.Errorf("Failed to create folder at %s; %s", workingPath, err) - } - } else { - log.Printf("[DEBUG] folder already exists: %s", workingPath) - f.existingPath = workingPath - folder = subfolder.(*object.Folder) - } - } - return nil -} - -func resourceVSphereFolderRead(d *schema.ResourceData, meta interface{}) error { - - log.Printf("[DEBUG] reading folder: %#v", d) - client := meta.(*govmomi.Client) - - dc, err := getDatacenter(client, d.Get("datacenter").(string)) - if err != nil { - return err - } - - finder := find.NewFinder(client.Client, true) - finder = finder.SetDatacenter(dc) - - folder, err := object.NewSearchIndex(client.Client).FindByInventoryPath( - context.TODO(), fmt.Sprintf("%v/vm/%v", d.Get("datacenter").(string), - d.Get("path").(string))) - - if err != nil { - return err - } - - if folder == nil { - d.SetId("") - } - - return nil -} - -func resourceVSphereFolderDelete(d *schema.ResourceData, meta interface{}) error { - - f := folder{ - path: strings.TrimRight(d.Get("path").(string), "/"), - existingPath: d.Get("existing_path").(string), - } - - if v, ok := d.GetOk("datacenter"); ok { - f.datacenter = v.(string) - } - - client := meta.(*govmomi.Client) - - err := deleteFolder(client, &f) - if err != nil { - return err - } - - d.SetId("") - return nil -} - -func deleteFolder(client *govmomi.Client, f *folder) error { - dc, err := getDatacenter(client, f.datacenter) - if err != nil { - return err - } - var folder *object.Folder - currentPath := f.path - - finder := find.NewFinder(client.Client, true) - finder = finder.SetDatacenter(dc) - si := object.NewSearchIndex(client.Client) - - folderRef, err := si.FindByInventoryPath( - context.TODO(), fmt.Sprintf("%v/vm/%v", f.datacenter, f.path)) - - if err != nil { - return fmt.Errorf("[ERROR] Could not locate folder %s: %v", f.path, err) - } else { - folder = folderRef.(*object.Folder) - } - - log.Printf("[INFO] Deleting empty sub-folders of existing path: %s", f.existingPath) - for currentPath != f.existingPath { - log.Printf("[INFO] Deleting folder: %s", currentPath) - children, err := folder.Children(context.TODO()) - if err != nil { - return err - } - - if len(children) > 0 { - return fmt.Errorf("Folder %s is non-empty and will not be deleted", currentPath) - } else { - log.Printf("[DEBUG] current folder: %#v", folder) - currentPath = path.Dir(currentPath) - if currentPath == "." { - currentPath = "" - } - log.Printf("[INFO] parent path of %s is calculated as %s", f.path, currentPath) - task, err := folder.Destroy(context.TODO()) - if err != nil { - return err - } - err = task.Wait(context.TODO()) - if err != nil { - return err - } - folderRef, err = si.FindByInventoryPath( - context.TODO(), fmt.Sprintf("%v/vm/%v", f.datacenter, currentPath)) - - if err != nil { - return err - } else if folderRef != nil { - folder = folderRef.(*object.Folder) - } - } - } - return nil -} - -// getDatacenter gets datacenter object -func getDatacenter(c *govmomi.Client, dc string) (*object.Datacenter, error) { - finder := find.NewFinder(c.Client, true) - if dc != "" { - d, err := finder.Datacenter(context.TODO(), dc) - return d, err - } else { - d, err := finder.DefaultDatacenter(context.TODO()) - return d, err - } -} diff --git a/builtin/providers/vsphere/resource_vsphere_folder_test.go b/builtin/providers/vsphere/resource_vsphere_folder_test.go deleted file mode 100644 index 7a8a164ef..000000000 --- a/builtin/providers/vsphere/resource_vsphere_folder_test.go +++ /dev/null @@ -1,276 +0,0 @@ -package vsphere - -import ( - "fmt" - "os" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/vmware/govmomi" - "github.com/vmware/govmomi/find" - "github.com/vmware/govmomi/object" - "golang.org/x/net/context" -) - -// Basic top-level folder creation -func TestAccVSphereFolder_basic(t *testing.T) { - var f folder - datacenter := os.Getenv("VSPHERE_DATACENTER") - testMethod := "basic" - resourceName := "vsphere_folder." + testMethod - path := "tf_test_basic" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereFolderDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf( - testAccCheckVSphereFolderConfig, - testMethod, - path, - datacenter, - ), - Check: resource.ComposeTestCheckFunc( - testAccCheckVSphereFolderExists(resourceName, &f), - resource.TestCheckResourceAttr( - resourceName, "path", path), - resource.TestCheckResourceAttr( - resourceName, "existing_path", ""), - ), - }, - }, - }) -} - -func TestAccVSphereFolder_nested(t *testing.T) { - - var f folder - datacenter := os.Getenv("VSPHERE_DATACENTER") - testMethod := "nested" - resourceName := "vsphere_folder." + testMethod - path := "tf_test_nested/tf_test_folder" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereFolderDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: fmt.Sprintf( - testAccCheckVSphereFolderConfig, - testMethod, - path, - datacenter, - ), - Check: resource.ComposeTestCheckFunc( - testAccCheckVSphereFolderExists(resourceName, &f), - resource.TestCheckResourceAttr( - resourceName, "path", path), - resource.TestCheckResourceAttr( - resourceName, "existing_path", ""), - ), - }, - }, - }) -} - -func TestAccVSphereFolder_dontDeleteExisting(t *testing.T) { - - var f folder - datacenter := os.Getenv("VSPHERE_DATACENTER") - testMethod := "dontDeleteExisting" - resourceName := "vsphere_folder." + testMethod - existingPath := "tf_test_dontDeleteExisting/tf_existing" - path := existingPath + "/tf_nested/tf_test" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: resource.ComposeTestCheckFunc( - assertVSphereFolderExists(datacenter, existingPath), - removeVSphereFolder(datacenter, existingPath, ""), - ), - Steps: []resource.TestStep{ - resource.TestStep{ - PreConfig: func() { - createVSphereFolder(datacenter, existingPath) - }, - Config: fmt.Sprintf( - testAccCheckVSphereFolderConfig, - testMethod, - path, - datacenter, - ), - Check: resource.ComposeTestCheckFunc( - testAccCheckVSphereFolderExistingPathExists(resourceName, &f), - resource.TestCheckResourceAttr( - resourceName, "path", path), - resource.TestCheckResourceAttr( - resourceName, "existing_path", existingPath), - ), - }, - }, - }) -} - -func testAccCheckVSphereFolderDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*govmomi.Client) - finder := find.NewFinder(client.Client, true) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "vsphere_folder" { - continue - } - - dc, err := finder.Datacenter(context.TODO(), rs.Primary.Attributes["datacenter"]) - if err != nil { - return fmt.Errorf("error %s", err) - } - - dcFolders, err := dc.Folders(context.TODO()) - if err != nil { - return fmt.Errorf("error %s", err) - } - - f, err := object.NewSearchIndex(client.Client).FindChild(context.TODO(), dcFolders.VmFolder, rs.Primary.Attributes["path"]) - if f != nil { - return fmt.Errorf("Record still exists") - } - } - - return nil -} - -func testAccCheckVSphereFolderExists(n string, f *folder) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Resource not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - client := testAccProvider.Meta().(*govmomi.Client) - finder := find.NewFinder(client.Client, true) - - dc, err := finder.Datacenter(context.TODO(), rs.Primary.Attributes["datacenter"]) - if err != nil { - return fmt.Errorf("error %s", err) - } - - dcFolders, err := dc.Folders(context.TODO()) - if err != nil { - return fmt.Errorf("error %s", err) - } - - _, err = object.NewSearchIndex(client.Client).FindChild(context.TODO(), dcFolders.VmFolder, rs.Primary.Attributes["path"]) - - *f = folder{ - path: rs.Primary.Attributes["path"], - } - - return nil - } -} - -func testAccCheckVSphereFolderExistingPathExists(n string, f *folder) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Resource %s not found in %#v", n, s.RootModule().Resources) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - client := testAccProvider.Meta().(*govmomi.Client) - finder := find.NewFinder(client.Client, true) - - dc, err := finder.Datacenter(context.TODO(), rs.Primary.Attributes["datacenter"]) - if err != nil { - return fmt.Errorf("error %s", err) - } - - dcFolders, err := dc.Folders(context.TODO()) - if err != nil { - return fmt.Errorf("error %s", err) - } - - _, err = object.NewSearchIndex(client.Client).FindChild(context.TODO(), dcFolders.VmFolder, rs.Primary.Attributes["existing_path"]) - - *f = folder{ - path: rs.Primary.Attributes["path"], - } - - return nil - } -} - -func assertVSphereFolderExists(datacenter string, folder_name string) resource.TestCheckFunc { - - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*govmomi.Client) - folder, err := object.NewSearchIndex(client.Client).FindByInventoryPath( - context.TODO(), fmt.Sprintf("%v/vm/%v", datacenter, folder_name)) - if err != nil { - return fmt.Errorf("Error: %s", err) - } else if folder == nil { - return fmt.Errorf("Folder %s does not exist!", folder_name) - } - - return nil - } -} - -func createVSphereFolder(datacenter string, folder_name string) error { - - client := testAccProvider.Meta().(*govmomi.Client) - - f := folder{path: folder_name, datacenter: datacenter} - - folder, err := object.NewSearchIndex(client.Client).FindByInventoryPath( - context.TODO(), fmt.Sprintf("%v/vm/%v", datacenter, folder_name)) - if err != nil { - return fmt.Errorf("error %s", err) - } - - if folder == nil { - createFolder(client, &f) - } else { - return fmt.Errorf("Folder %s already exists", folder_name) - } - - return nil -} - -func removeVSphereFolder(datacenter string, folder_name string, existing_path string) resource.TestCheckFunc { - - f := folder{path: folder_name, datacenter: datacenter, existingPath: existing_path} - - return func(s *terraform.State) error { - - client := testAccProvider.Meta().(*govmomi.Client) - // finder := find.NewFinder(client.Client, true) - - folder, _ := object.NewSearchIndex(client.Client).FindByInventoryPath( - context.TODO(), fmt.Sprintf("%v/vm/%v", datacenter, folder_name)) - if folder != nil { - deleteFolder(client, &f) - } - - return nil - } -} - -const testAccCheckVSphereFolderConfig = ` -resource "vsphere_folder" "%s" { - path = "%s" - datacenter = "%s" -} -` diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_disk.go b/builtin/providers/vsphere/resource_vsphere_virtual_disk.go deleted file mode 100644 index a17d43d5c..000000000 --- a/builtin/providers/vsphere/resource_vsphere_virtual_disk.go +++ /dev/null @@ -1,342 +0,0 @@ -package vsphere - -import ( - "fmt" - "log" - - "errors" - "github.com/hashicorp/terraform/helper/schema" - "github.com/vmware/govmomi" - "github.com/vmware/govmomi/find" - "github.com/vmware/govmomi/object" - "github.com/vmware/govmomi/vim25/types" - "golang.org/x/net/context" - "path" -) - -type virtualDisk struct { - size int - vmdkPath string - initType string - adapterType string - datacenter string - datastore string -} - -// Define VirtualDisk args -func resourceVSphereVirtualDisk() *schema.Resource { - return &schema.Resource{ - Create: resourceVSphereVirtualDiskCreate, - Read: resourceVSphereVirtualDiskRead, - Delete: resourceVSphereVirtualDiskDelete, - - Schema: map[string]*schema.Schema{ - // Size in GB - "size": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, //TODO Can this be optional (resize)? - }, - - "vmdk_path": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, //TODO Can this be optional (move)? - }, - - "type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "eagerZeroedThick", - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "thin" && value != "eagerZeroedThick" && value != "lazy" { - errors = append(errors, fmt.Errorf( - "only 'thin', 'eagerZeroedThick', and 'lazy' are supported values for 'type'")) - } - return - }, - }, - - "adapter_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "ide", - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "ide" && value != "busLogic" && value != "lsiLogic" { - errors = append(errors, fmt.Errorf( - "only 'ide', 'busLogic', and 'lsiLogic' are supported values for 'adapter_type'")) - } - return - }, - }, - - "datacenter": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "datastore": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func resourceVSphereVirtualDiskCreate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[INFO] Creating Virtual Disk") - client := meta.(*govmomi.Client) - - vDisk := virtualDisk{ - size: d.Get("size").(int), - } - - if v, ok := d.GetOk("vmdk_path"); ok { - vDisk.vmdkPath = v.(string) - } - - if v, ok := d.GetOk("type"); ok { - vDisk.initType = v.(string) - } - - if v, ok := d.GetOk("adapter_type"); ok { - vDisk.adapterType = v.(string) - } - - if v, ok := d.GetOk("datacenter"); ok { - vDisk.datacenter = v.(string) - } - - if v, ok := d.GetOk("datastore"); ok { - vDisk.datastore = v.(string) - } - - finder := find.NewFinder(client.Client, true) - - dc, err := getDatacenter(client, d.Get("datacenter").(string)) - if err != nil { - return fmt.Errorf("Error finding Datacenter: %s: %s", vDisk.datacenter, err) - } - finder = finder.SetDatacenter(dc) - - ds, err := getDatastore(finder, vDisk.datastore) - if err != nil { - return fmt.Errorf("Error finding Datastore: %s: %s", vDisk.datastore, err) - } - - err = createHardDisk(client, vDisk.size, ds.Path(vDisk.vmdkPath), vDisk.initType, vDisk.adapterType, vDisk.datacenter) - if err != nil { - return err - } - - d.SetId(ds.Path(vDisk.vmdkPath)) - log.Printf("[DEBUG] Virtual Disk id: %v", ds.Path(vDisk.vmdkPath)) - - return resourceVSphereVirtualDiskRead(d, meta) -} - -func resourceVSphereVirtualDiskRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] Reading virtual disk.") - client := meta.(*govmomi.Client) - - vDisk := virtualDisk{ - size: d.Get("size").(int), - } - - if v, ok := d.GetOk("vmdk_path"); ok { - vDisk.vmdkPath = v.(string) - } - - if v, ok := d.GetOk("type"); ok { - vDisk.initType = v.(string) - } - - if v, ok := d.GetOk("adapter_type"); ok { - vDisk.adapterType = v.(string) - } - - if v, ok := d.GetOk("datacenter"); ok { - vDisk.datacenter = v.(string) - } - - if v, ok := d.GetOk("datastore"); ok { - vDisk.datastore = v.(string) - } - - dc, err := getDatacenter(client, d.Get("datacenter").(string)) - if err != nil { - return err - } - - finder := find.NewFinder(client.Client, true) - finder = finder.SetDatacenter(dc) - - ds, err := finder.Datastore(context.TODO(), d.Get("datastore").(string)) - if err != nil { - return err - } - - ctx := context.TODO() - b, err := ds.Browser(ctx) - if err != nil { - return err - } - - // `Datastore.Stat` does not allow to query `VmDiskFileQuery`. Instead, we - // search the datastore manually. - spec := types.HostDatastoreBrowserSearchSpec{ - Query: []types.BaseFileQuery{&types.VmDiskFileQuery{Details: &types.VmDiskFileQueryFlags{ - CapacityKb: true, - DiskType: true, - }}}, - Details: &types.FileQueryFlags{ - FileSize: true, - FileType: true, - Modification: true, - FileOwner: types.NewBool(true), - }, - MatchPattern: []string{path.Base(vDisk.vmdkPath)}, - } - - dsPath := ds.Path(path.Dir(vDisk.vmdkPath)) - task, err := b.SearchDatastore(context.TODO(), dsPath, &spec) - - if err != nil { - log.Printf("[DEBUG] resourceVSphereVirtualDiskRead - could not search datastore for: %v", vDisk.vmdkPath) - return err - } - - info, err := task.WaitForResult(context.TODO(), nil) - if err != nil { - if info == nil || info.Error != nil { - _, ok := info.Error.Fault.(*types.FileNotFound) - if ok { - log.Printf("[DEBUG] resourceVSphereVirtualDiskRead - could not find: %v", vDisk.vmdkPath) - d.SetId("") - return nil - } - } - - log.Printf("[DEBUG] resourceVSphereVirtualDiskRead - could not search datastore for: %v", vDisk.vmdkPath) - return err - } - - res := info.Result.(types.HostDatastoreBrowserSearchResults) - log.Printf("[DEBUG] num results: %d", len(res.File)) - if len(res.File) == 0 { - d.SetId("") - log.Printf("[DEBUG] resourceVSphereVirtualDiskRead - could not find: %v", vDisk.vmdkPath) - return nil - } - - if len(res.File) != 1 { - return errors.New("Datastore search did not return exactly one result") - } - - fileInfo := res.File[0] - log.Printf("[DEBUG] resourceVSphereVirtualDiskRead - fileinfo: %#v", fileInfo) - size := fileInfo.(*types.VmDiskFileInfo).CapacityKb / 1024 / 1024 - - d.SetId(vDisk.vmdkPath) - - d.Set("size", size) - d.Set("vmdk_path", vDisk.vmdkPath) - d.Set("datacenter", d.Get("datacenter")) - d.Set("datastore", d.Get("datastore")) - // Todo collect and write type info - - return nil - -} - -func resourceVSphereVirtualDiskDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*govmomi.Client) - - vDisk := virtualDisk{} - - if v, ok := d.GetOk("vmdk_path"); ok { - vDisk.vmdkPath = v.(string) - } - if v, ok := d.GetOk("datastore"); ok { - vDisk.datastore = v.(string) - } - - dc, err := getDatacenter(client, d.Get("datacenter").(string)) - if err != nil { - return err - } - - finder := find.NewFinder(client.Client, true) - finder = finder.SetDatacenter(dc) - - ds, err := getDatastore(finder, vDisk.datastore) - if err != nil { - return err - } - - diskPath := ds.Path(vDisk.vmdkPath) - - virtualDiskManager := object.NewVirtualDiskManager(client.Client) - - task, err := virtualDiskManager.DeleteVirtualDisk(context.TODO(), diskPath, dc) - if err != nil { - return err - } - - _, err = task.WaitForResult(context.TODO(), nil) - if err != nil { - log.Printf("[INFO] Failed to delete disk: %v", err) - return err - } - - log.Printf("[INFO] Deleted disk: %v", diskPath) - d.SetId("") - return nil -} - -// createHardDisk creates a new Hard Disk. -func createHardDisk(client *govmomi.Client, size int, diskPath string, diskType string, adapterType string, dc string) error { - var vDiskType string - switch diskType { - case "thin": - vDiskType = "thin" - case "eagerZeroedThick": - vDiskType = "eagerZeroedThick" - case "lazy": - vDiskType = "preallocated" - } - - virtualDiskManager := object.NewVirtualDiskManager(client.Client) - spec := &types.FileBackedVirtualDiskSpec{ - VirtualDiskSpec: types.VirtualDiskSpec{ - AdapterType: adapterType, - DiskType: vDiskType, - }, - CapacityKb: int64(1024 * 1024 * size), - } - datacenter, err := getDatacenter(client, dc) - if err != nil { - return err - } - log.Printf("[DEBUG] Disk spec: %v", spec) - - task, err := virtualDiskManager.CreateVirtualDisk(context.TODO(), diskPath, datacenter, spec) - if err != nil { - return err - } - - _, err = task.WaitForResult(context.TODO(), nil) - if err != nil { - log.Printf("[INFO] Failed to create disk: %v", err) - return err - } - log.Printf("[INFO] Created disk.") - - return nil -} diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_disk_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_disk_test.go deleted file mode 100644 index d3fef92d0..000000000 --- a/builtin/providers/vsphere/resource_vsphere_virtual_disk_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package vsphere - -import ( - "fmt" - "log" - "os" - "testing" - - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/vmware/govmomi" - "github.com/vmware/govmomi/find" - "golang.org/x/net/context" -) - -func TestAccVSphereVirtualDisk_basic(t *testing.T) { - var datacenterOpt string - var datastoreOpt string - var initTypeOpt string - var adapterTypeOpt string - - rString := acctest.RandString(5) - - if v := os.Getenv("VSPHERE_DATACENTER"); v != "" { - datacenterOpt = v - } - if v := os.Getenv("VSPHERE_DATASTORE"); v != "" { - datastoreOpt = v - } - if v := os.Getenv("VSPHERE_INIT_TYPE"); v != "" { - initTypeOpt += fmt.Sprintf(" type = \"%s\"\n", v) - } else { - initTypeOpt += fmt.Sprintf(" type = \"%s\"\n", "thin") - } - if v := os.Getenv("VSPHERE_ADAPTER_TYPE"); v != "" { - adapterTypeOpt += fmt.Sprintf(" adapter_type = \"%s\"\n", v) - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereVirtualDiskDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckVSphereVirtuaDiskConfig_basic(rString, initTypeOpt, adapterTypeOpt, datacenterOpt, datastoreOpt), - Check: resource.ComposeTestCheckFunc( - testAccVSphereVirtualDiskExists("vsphere_virtual_disk.foo"), - ), - }, - }, - }) -} - -func testAccVSphereVirtualDiskExists(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - client := testAccProvider.Meta().(*govmomi.Client) - finder := find.NewFinder(client.Client, true) - - dc, err := finder.Datacenter(context.TODO(), rs.Primary.Attributes["datacenter"]) - if err != nil { - return fmt.Errorf("error %s", err) - } - finder = finder.SetDatacenter(dc) - - ds, err := finder.Datastore(context.TODO(), rs.Primary.Attributes["datastore"]) - if err != nil { - return fmt.Errorf("error %s", err) - } - - _, err = ds.Stat(context.TODO(), rs.Primary.Attributes["vmdk_path"]) - if err != nil { - return fmt.Errorf("error %s", err) - } - - return nil - } -} - -func testAccCheckVSphereVirtualDiskDestroy(s *terraform.State) error { - log.Printf("[FINDME] test Destroy") - client := testAccProvider.Meta().(*govmomi.Client) - finder := find.NewFinder(client.Client, true) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "vsphere_virtual_disk" { - continue - } - - dc, err := finder.Datacenter(context.TODO(), rs.Primary.Attributes["datacenter"]) - if err != nil { - return fmt.Errorf("error %s", err) - } - - finder = finder.SetDatacenter(dc) - - ds, err := finder.Datastore(context.TODO(), rs.Primary.Attributes["datastore"]) - if err != nil { - return fmt.Errorf("error %s", err) - } - - _, err = ds.Stat(context.TODO(), rs.Primary.Attributes["vmdk_path"]) - if err == nil { - return fmt.Errorf("error %s", err) - } - } - - return nil -} - -func testAccCheckVSphereVirtuaDiskConfig_basic(rName, initTypeOpt, adapterTypeOpt, datacenterOpt, datastoreOpt string) string { - return fmt.Sprintf(` -resource "vsphere_virtual_disk" "foo" { - size = 1 - vmdk_path = "tfTestDisk-%s.vmdk" -%s -%s - datacenter = "%s" - datastore = "%s" -} -`, rName, initTypeOpt, adapterTypeOpt, datacenterOpt, datastoreOpt) -} diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go deleted file mode 100644 index 15e291a38..000000000 --- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go +++ /dev/null @@ -1,2147 +0,0 @@ -package vsphere - -import ( - "fmt" - "log" - "net" - "strconv" - "strings" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/vmware/govmomi" - "github.com/vmware/govmomi/find" - "github.com/vmware/govmomi/object" - "github.com/vmware/govmomi/property" - "github.com/vmware/govmomi/vim25/mo" - "github.com/vmware/govmomi/vim25/types" - "golang.org/x/net/context" -) - -var DefaultDNSSuffixes = []string{ - "vsphere.local", -} - -var DefaultDNSServers = []string{ - "8.8.8.8", - "8.8.4.4", -} - -var DiskControllerTypes = []string{ - "scsi", - "scsi-lsi-parallel", - "scsi-buslogic", - "scsi-paravirtual", - "scsi-lsi-sas", - "ide", -} - -type networkInterface struct { - deviceName string - label string - ipv4Address string - ipv4PrefixLength int - ipv4Gateway string - ipv6Address string - ipv6PrefixLength int - ipv6Gateway string - adapterType string // TODO: Make "adapter_type" argument - macAddress string -} - -type hardDisk struct { - name string - size int64 - iops int64 - initType string - vmdkPath string - controller string - bootable bool -} - -//Additional options Vsphere can use clones of windows machines -type windowsOptConfig struct { - productKey string - adminPassword string - domainUser string - domain string - domainUserPassword string -} - -type cdrom struct { - datastore string - path string -} - -type memoryAllocation struct { - reservation int64 -} - -type virtualMachine struct { - name string - folder string - datacenter string - cluster string - resourcePool string - datastore string - vcpu int32 - memoryMb int64 - memoryAllocation memoryAllocation - template string - networkInterfaces []networkInterface - hardDisks []hardDisk - cdroms []cdrom - domain string - timeZone string - dnsSuffixes []string - dnsServers []string - hasBootableVmdk bool - linkedClone bool - skipCustomization bool - enableDiskUUID bool - moid string - windowsOptionalConfig windowsOptConfig - customConfigurations map[string](types.AnyType) -} - -func (v virtualMachine) Path() string { - return vmPath(v.folder, v.name) -} - -func vmPath(folder string, name string) string { - var path string - if len(folder) > 0 { - path += folder + "/" - } - return path + name -} - -func resourceVSphereVirtualMachine() *schema.Resource { - return &schema.Resource{ - Create: resourceVSphereVirtualMachineCreate, - Read: resourceVSphereVirtualMachineRead, - Update: resourceVSphereVirtualMachineUpdate, - Delete: resourceVSphereVirtualMachineDelete, - - SchemaVersion: 1, - MigrateState: resourceVSphereVirtualMachineMigrateState, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "folder": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "vcpu": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "memory": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - }, - - "memory_reservation": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 0, - ForceNew: true, - }, - - "datacenter": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "cluster": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "resource_pool": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "linked_clone": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - }, - "gateway": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Deprecated: "Please use network_interface.ipv4_gateway", - }, - - "domain": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "vsphere.local", - }, - - "time_zone": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "Etc/UTC", - }, - - "dns_suffixes": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - ForceNew: true, - }, - - "dns_servers": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - ForceNew: true, - }, - - "skip_customization": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Default: false, - }, - - "enable_disk_uuid": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Default: false, - }, - - "uuid": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "moid": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "custom_configuration_parameters": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - - "windows_opt_config": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "product_key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "admin_password": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "domain_user": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "domain": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "domain_user_password": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - }, - }, - - "network_interface": &schema.Schema{ - Type: schema.TypeList, - Required: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "label": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "ip_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - Deprecated: "Please use ipv4_address", - }, - - "subnet_mask": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - Deprecated: "Please use ipv4_prefix_length", - }, - - "ipv4_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "ipv4_prefix_length": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "ipv4_gateway": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "ipv6_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "ipv6_prefix_length": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "ipv6_gateway": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "adapter_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "mac_address": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - }, - }, - - "disk": &schema.Schema{ - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "uuid": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "key": &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - }, - - "template": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "eager_zeroed", - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if value != "thin" && value != "eager_zeroed" && value != "lazy" { - errors = append(errors, fmt.Errorf( - "only 'thin', 'eager_zeroed', and 'lazy' are supported values for 'type'")) - } - return - }, - }, - - "datastore": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "size": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "iops": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - - "vmdk": &schema.Schema{ - // TODO: Add ValidateFunc to confirm path exists - Type: schema.TypeString, - Optional: true, - }, - - "bootable": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - - "keep_on_remove": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - - "controller_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "scsi", - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - found := false - for _, t := range DiskControllerTypes { - if t == value { - found = true - } - } - if !found { - errors = append(errors, fmt.Errorf( - "Supported values for 'controller_type' are %v", strings.Join(DiskControllerTypes, ", "))) - } - return - }, - }, - }, - }, - }, - - "detach_unknown_disks_on_delete": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "cdrom": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "datastore": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "path": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, - }, - }, - }, - } -} - -func resourceVSphereVirtualMachineUpdate(d *schema.ResourceData, meta interface{}) error { - // flag if changes have to be applied - hasChanges := false - // flag if changes have to be done when powered off - rebootRequired := false - - // make config spec - configSpec := types.VirtualMachineConfigSpec{} - - if d.HasChange("vcpu") { - configSpec.NumCPUs = int32(d.Get("vcpu").(int)) - hasChanges = true - rebootRequired = true - } - - if d.HasChange("memory") { - configSpec.MemoryMB = int64(d.Get("memory").(int)) - hasChanges = true - rebootRequired = true - } - - client := meta.(*govmomi.Client) - dc, err := getDatacenter(client, d.Get("datacenter").(string)) - if err != nil { - return err - } - finder := find.NewFinder(client.Client, true) - finder = finder.SetDatacenter(dc) - - vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string))) - if err != nil { - return err - } - - if d.HasChange("disk") { - hasChanges = true - oldDisks, newDisks := d.GetChange("disk") - oldDiskSet := oldDisks.(*schema.Set) - newDiskSet := newDisks.(*schema.Set) - - addedDisks := newDiskSet.Difference(oldDiskSet) - removedDisks := oldDiskSet.Difference(newDiskSet) - - // Removed disks - for _, diskRaw := range removedDisks.List() { - if disk, ok := diskRaw.(map[string]interface{}); ok { - devices, err := vm.Device(context.TODO()) - if err != nil { - return fmt.Errorf("[ERROR] Update Remove Disk - Could not get virtual device list: %v", err) - } - virtualDisk := devices.FindByKey(int32(disk["key"].(int))) - - keep := false - if v, ok := disk["keep_on_remove"].(bool); ok { - keep = v - } - - err = vm.RemoveDevice(context.TODO(), keep, virtualDisk) - if err != nil { - return fmt.Errorf("[ERROR] Update Remove Disk - Error removing disk: %v", err) - } - } - } - // Added disks - for _, diskRaw := range addedDisks.List() { - if disk, ok := diskRaw.(map[string]interface{}); ok { - - var datastore *object.Datastore - if disk["datastore"] == "" { - datastore, err = finder.DefaultDatastore(context.TODO()) - if err != nil { - return fmt.Errorf("[ERROR] Update Remove Disk - Error finding datastore: %v", err) - } - } else { - datastore, err = finder.Datastore(context.TODO(), disk["datastore"].(string)) - if err != nil { - log.Printf("[ERROR] Couldn't find datastore %v. %s", disk["datastore"].(string), err) - return err - } - } - - var size int64 - if disk["size"] == 0 { - size = 0 - } else { - size = int64(disk["size"].(int)) - } - iops := int64(disk["iops"].(int)) - controller_type := disk["controller_type"].(string) - - var mo mo.VirtualMachine - vm.Properties(context.TODO(), vm.Reference(), []string{"summary", "config"}, &mo) - - var diskPath string - switch { - case disk["vmdk"] != "": - diskPath = disk["vmdk"].(string) - case disk["name"] != "": - snapshotFullDir := mo.Config.Files.SnapshotDirectory - split := strings.Split(snapshotFullDir, " ") - if len(split) != 2 { - return fmt.Errorf("[ERROR] createVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir) - } - vmWorkingPath := split[1] - diskPath = vmWorkingPath + disk["name"].(string) - default: - return fmt.Errorf("[ERROR] resourceVSphereVirtualMachineUpdate - Neither vmdk path nor vmdk name was given") - } - - var initType string - if disk["type"] != "" { - initType = disk["type"].(string) - } else { - initType = "thin" - } - - log.Printf("[INFO] Attaching disk: %v", diskPath) - err = addHardDisk(vm, size, iops, initType, datastore, diskPath, controller_type) - if err != nil { - log.Printf("[ERROR] Add Hard Disk Failed: %v", err) - return err - } - } - if err != nil { - return err - } - } - } - - // do nothing if there are no changes - if !hasChanges { - return nil - } - - log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) - - if rebootRequired { - log.Printf("[INFO] Shutting down virtual machine: %s", d.Id()) - - task, err := vm.PowerOff(context.TODO()) - if err != nil { - return err - } - - err = task.Wait(context.TODO()) - if err != nil { - return err - } - } - - log.Printf("[INFO] Reconfiguring virtual machine: %s", d.Id()) - - task, err := vm.Reconfigure(context.TODO(), configSpec) - if err != nil { - log.Printf("[ERROR] %s", err) - } - - err = task.Wait(context.TODO()) - if err != nil { - log.Printf("[ERROR] %s", err) - } - - if rebootRequired { - task, err = vm.PowerOn(context.TODO()) - if err != nil { - return err - } - - err = task.Wait(context.TODO()) - if err != nil { - log.Printf("[ERROR] %s", err) - } - } - - return resourceVSphereVirtualMachineRead(d, meta) -} - -func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*govmomi.Client) - - vm := virtualMachine{ - name: d.Get("name").(string), - vcpu: int32(d.Get("vcpu").(int)), - memoryMb: int64(d.Get("memory").(int)), - memoryAllocation: memoryAllocation{ - reservation: int64(d.Get("memory_reservation").(int)), - }, - } - - if v, ok := d.GetOk("folder"); ok { - vm.folder = v.(string) - } - - if v, ok := d.GetOk("datacenter"); ok { - vm.datacenter = v.(string) - } - - if v, ok := d.GetOk("cluster"); ok { - vm.cluster = v.(string) - } - - if v, ok := d.GetOk("resource_pool"); ok { - vm.resourcePool = v.(string) - } - - if v, ok := d.GetOk("domain"); ok { - vm.domain = v.(string) - } - - if v, ok := d.GetOk("time_zone"); ok { - vm.timeZone = v.(string) - } - - if v, ok := d.GetOk("linked_clone"); ok { - vm.linkedClone = v.(bool) - } - - if v, ok := d.GetOk("skip_customization"); ok { - vm.skipCustomization = v.(bool) - } - - if v, ok := d.GetOk("enable_disk_uuid"); ok { - vm.enableDiskUUID = v.(bool) - } - - if raw, ok := d.GetOk("dns_suffixes"); ok { - for _, v := range raw.([]interface{}) { - vm.dnsSuffixes = append(vm.dnsSuffixes, v.(string)) - } - } else { - vm.dnsSuffixes = DefaultDNSSuffixes - } - - if raw, ok := d.GetOk("dns_servers"); ok { - for _, v := range raw.([]interface{}) { - vm.dnsServers = append(vm.dnsServers, v.(string)) - } - } else { - vm.dnsServers = DefaultDNSServers - } - - if vL, ok := d.GetOk("custom_configuration_parameters"); ok { - if custom_configs, ok := vL.(map[string]interface{}); ok { - custom := make(map[string]types.AnyType) - for k, v := range custom_configs { - custom[k] = v - } - vm.customConfigurations = custom - log.Printf("[DEBUG] custom_configuration_parameters init: %v", vm.customConfigurations) - } - } - - if vL, ok := d.GetOk("network_interface"); ok { - networks := make([]networkInterface, len(vL.([]interface{}))) - for i, v := range vL.([]interface{}) { - network := v.(map[string]interface{}) - networks[i].label = network["label"].(string) - if v, ok := network["ip_address"].(string); ok && v != "" { - networks[i].ipv4Address = v - } - if v, ok := d.GetOk("gateway"); ok { - networks[i].ipv4Gateway = v.(string) - } - if v, ok := network["subnet_mask"].(string); ok && v != "" { - ip := net.ParseIP(v).To4() - if ip != nil { - mask := net.IPv4Mask(ip[0], ip[1], ip[2], ip[3]) - pl, _ := mask.Size() - networks[i].ipv4PrefixLength = pl - } else { - return fmt.Errorf("subnet_mask parameter is invalid.") - } - } - if v, ok := network["ipv4_address"].(string); ok && v != "" { - networks[i].ipv4Address = v - } - if v, ok := network["ipv4_prefix_length"].(int); ok && v != 0 { - networks[i].ipv4PrefixLength = v - } - if v, ok := network["ipv4_gateway"].(string); ok && v != "" { - networks[i].ipv4Gateway = v - } - if v, ok := network["ipv6_address"].(string); ok && v != "" { - networks[i].ipv6Address = v - } - if v, ok := network["ipv6_prefix_length"].(int); ok && v != 0 { - networks[i].ipv6PrefixLength = v - } - if v, ok := network["ipv6_gateway"].(string); ok && v != "" { - networks[i].ipv6Gateway = v - } - if v, ok := network["mac_address"].(string); ok && v != "" { - networks[i].macAddress = v - } - } - vm.networkInterfaces = networks - log.Printf("[DEBUG] network_interface init: %v", networks) - } - - if vL, ok := d.GetOk("windows_opt_config"); ok { - var winOpt windowsOptConfig - custom_configs := (vL.([]interface{}))[0].(map[string]interface{}) - if v, ok := custom_configs["admin_password"].(string); ok && v != "" { - winOpt.adminPassword = v - } - if v, ok := custom_configs["domain"].(string); ok && v != "" { - winOpt.domain = v - } - if v, ok := custom_configs["domain_user"].(string); ok && v != "" { - winOpt.domainUser = v - } - if v, ok := custom_configs["product_key"].(string); ok && v != "" { - winOpt.productKey = v - } - if v, ok := custom_configs["domain_user_password"].(string); ok && v != "" { - winOpt.domainUserPassword = v - } - vm.windowsOptionalConfig = winOpt - log.Printf("[DEBUG] windows config init: %v", winOpt) - } - - if vL, ok := d.GetOk("disk"); ok { - if diskSet, ok := vL.(*schema.Set); ok { - - disks := []hardDisk{} - for _, value := range diskSet.List() { - disk := value.(map[string]interface{}) - newDisk := hardDisk{} - - if v, ok := disk["template"].(string); ok && v != "" { - if v, ok := disk["name"].(string); ok && v != "" { - return fmt.Errorf("Cannot specify name of a template") - } - vm.template = v - if vm.hasBootableVmdk { - return fmt.Errorf("[ERROR] Only one bootable disk or template may be given") - } - vm.hasBootableVmdk = true - } - - if v, ok := disk["type"].(string); ok && v != "" { - newDisk.initType = v - } - - if v, ok := disk["datastore"].(string); ok && v != "" { - vm.datastore = v - } - - if v, ok := disk["size"].(int); ok && v != 0 { - if v, ok := disk["template"].(string); ok && v != "" { - return fmt.Errorf("Cannot specify size of a template") - } - - if v, ok := disk["name"].(string); ok && v != "" { - newDisk.name = v - } else { - return fmt.Errorf("[ERROR] Disk name must be provided when creating a new disk") - } - - newDisk.size = int64(v) - } - - if v, ok := disk["iops"].(int); ok && v != 0 { - newDisk.iops = int64(v) - } - - if v, ok := disk["controller_type"].(string); ok && v != "" { - newDisk.controller = v - } - - if vVmdk, ok := disk["vmdk"].(string); ok && vVmdk != "" { - if v, ok := disk["template"].(string); ok && v != "" { - return fmt.Errorf("Cannot specify a vmdk for a template") - } - if v, ok := disk["size"].(string); ok && v != "" { - return fmt.Errorf("Cannot specify size of a vmdk") - } - if v, ok := disk["name"].(string); ok && v != "" { - return fmt.Errorf("Cannot specify name of a vmdk") - } - if vBootable, ok := disk["bootable"].(bool); ok { - if vBootable && vm.hasBootableVmdk { - return fmt.Errorf("[ERROR] Only one bootable disk or template may be given") - } - newDisk.bootable = vBootable - vm.hasBootableVmdk = vm.hasBootableVmdk || vBootable - } - newDisk.vmdkPath = vVmdk - } - // Preserves order so bootable disk is first - if newDisk.bootable == true || disk["template"] != "" { - disks = append([]hardDisk{newDisk}, disks...) - } else { - disks = append(disks, newDisk) - } - } - vm.hardDisks = disks - log.Printf("[DEBUG] disk init: %v", disks) - } - } - - if vL, ok := d.GetOk("cdrom"); ok { - cdroms := make([]cdrom, len(vL.([]interface{}))) - for i, v := range vL.([]interface{}) { - c := v.(map[string]interface{}) - if v, ok := c["datastore"].(string); ok && v != "" { - cdroms[i].datastore = v - } else { - return fmt.Errorf("Datastore argument must be specified when attaching a cdrom image.") - } - if v, ok := c["path"].(string); ok && v != "" { - cdroms[i].path = v - } else { - return fmt.Errorf("Path argument must be specified when attaching a cdrom image.") - } - } - vm.cdroms = cdroms - log.Printf("[DEBUG] cdrom init: %v", cdroms) - } - - err := vm.setupVirtualMachine(client) - if err != nil { - return err - } - - d.SetId(vm.Path()) - log.Printf("[INFO] Created virtual machine: %s", d.Id()) - - return resourceVSphereVirtualMachineRead(d, meta) -} - -func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG] virtual machine resource data: %#v", d) - client := meta.(*govmomi.Client) - dc, err := getDatacenter(client, d.Get("datacenter").(string)) - if err != nil { - return err - } - finder := find.NewFinder(client.Client, true) - finder = finder.SetDatacenter(dc) - - vm, err := finder.VirtualMachine(context.TODO(), d.Id()) - if err != nil { - d.SetId("") - return nil - } - - err = d.Set("moid", vm.Reference().Value) - if err != nil { - return fmt.Errorf("Invalid moid to set: %#v", vm.Reference().Value) - } else { - log.Printf("[DEBUG] Set the moid: %#v", vm.Reference().Value) - } - - state, err := vm.PowerState(context.TODO()) - if err != nil { - return err - } - - if state == types.VirtualMachinePowerStatePoweredOn { - // wait for interfaces to appear - log.Printf("[DEBUG] Waiting for interfaces to appear") - - _, err = vm.WaitForNetIP(context.TODO(), false) - if err != nil { - return err - } - - log.Printf("[DEBUG] Successfully waited for interfaces to appear") - } - - var mvm mo.VirtualMachine - collector := property.DefaultCollector(client.Client) - if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore", "config"}, &mvm); err != nil { - return err - } - - log.Printf("[DEBUG] Datacenter - %#v", dc) - log.Printf("[DEBUG] mvm.Summary.Config - %#v", mvm.Summary.Config) - log.Printf("[DEBUG] mvm.Summary.Config - %#v", mvm.Config) - log.Printf("[DEBUG] mvm.Guest.Net - %#v", mvm.Guest.Net) - - err = d.Set("moid", mvm.Reference().Value) - if err != nil { - return fmt.Errorf("Invalid moid to set: %#v", mvm.Reference().Value) - } else { - log.Printf("[DEBUG] Set the moid: %#v", mvm.Reference().Value) - } - - disks := make([]map[string]interface{}, 0) - templateDisk := make(map[string]interface{}, 1) - for _, device := range mvm.Config.Hardware.Device { - if vd, ok := device.(*types.VirtualDisk); ok { - - virtualDevice := vd.GetVirtualDevice() - - backingInfo := virtualDevice.Backing - var diskFullPath string - var diskUuid string - if v, ok := backingInfo.(*types.VirtualDiskFlatVer2BackingInfo); ok { - diskFullPath = v.FileName - diskUuid = v.Uuid - } else if v, ok := backingInfo.(*types.VirtualDiskSparseVer2BackingInfo); ok { - diskFullPath = v.FileName - diskUuid = v.Uuid - } - log.Printf("[DEBUG] resourceVSphereVirtualMachineRead - Analyzing disk: %v", diskFullPath) - - // Separate datastore and path - diskFullPathSplit := strings.Split(diskFullPath, " ") - if len(diskFullPathSplit) != 2 { - return fmt.Errorf("[ERROR] Failed trying to parse disk path: %v", diskFullPath) - } - diskPath := diskFullPathSplit[1] - // Isolate filename - diskNameSplit := strings.Split(diskPath, "/") - diskName := diskNameSplit[len(diskNameSplit)-1] - // Remove possible extension - diskName = strings.Split(diskName, ".")[0] - - if prevDisks, ok := d.GetOk("disk"); ok { - if prevDiskSet, ok := prevDisks.(*schema.Set); ok { - for _, v := range prevDiskSet.List() { - prevDisk := v.(map[string]interface{}) - - // We're guaranteed only one template disk. Passing value directly through since templates should be immutable - if prevDisk["template"] != "" { - if len(templateDisk) == 0 { - templateDisk = prevDisk - disks = append(disks, templateDisk) - break - } - } - - // It is enforced that prevDisk["name"] should only be set in the case - // of creating a new disk for the user. - // size case: name was set by user, compare parsed filename from mo.filename (without path or .vmdk extension) with name - // vmdk case: compare prevDisk["vmdk"] and mo.Filename - if diskName == prevDisk["name"] || diskPath == prevDisk["vmdk"] { - - prevDisk["key"] = virtualDevice.Key - prevDisk["uuid"] = diskUuid - - disks = append(disks, prevDisk) - break - } - } - } - } - log.Printf("[DEBUG] disks: %#v", disks) - } - } - err = d.Set("disk", disks) - if err != nil { - return fmt.Errorf("Invalid disks to set: %#v", disks) - } - - networkInterfaces := make([]map[string]interface{}, 0) - for _, v := range mvm.Guest.Net { - if v.DeviceConfigId >= 0 { - log.Printf("[DEBUG] v.Network - %#v", v.Network) - networkInterface := make(map[string]interface{}) - networkInterface["label"] = v.Network - networkInterface["mac_address"] = v.MacAddress - for _, ip := range v.IpConfig.IpAddress { - p := net.ParseIP(ip.IpAddress) - if p.To4() != nil { - log.Printf("[DEBUG] p.String - %#v", p.String()) - log.Printf("[DEBUG] ip.PrefixLength - %#v", ip.PrefixLength) - networkInterface["ipv4_address"] = p.String() - networkInterface["ipv4_prefix_length"] = ip.PrefixLength - } else if p.To16() != nil { - log.Printf("[DEBUG] p.String - %#v", p.String()) - log.Printf("[DEBUG] ip.PrefixLength - %#v", ip.PrefixLength) - networkInterface["ipv6_address"] = p.String() - networkInterface["ipv6_prefix_length"] = ip.PrefixLength - } - log.Printf("[DEBUG] networkInterface: %#v", networkInterface) - } - log.Printf("[DEBUG] networkInterface: %#v", networkInterface) - networkInterfaces = append(networkInterfaces, networkInterface) - } - } - if mvm.Guest.IpStack != nil { - for _, v := range mvm.Guest.IpStack { - if v.IpRouteConfig != nil && v.IpRouteConfig.IpRoute != nil { - for _, route := range v.IpRouteConfig.IpRoute { - if route.Gateway.Device != "" { - gatewaySetting := "" - if route.Network == "::" { - gatewaySetting = "ipv6_gateway" - } else if route.Network == "0.0.0.0" { - gatewaySetting = "ipv4_gateway" - } - if gatewaySetting != "" { - deviceID, err := strconv.Atoi(route.Gateway.Device) - if len(networkInterfaces) == 1 { - deviceID = 0 - } - if err != nil { - log.Printf("[WARN] error at processing %s of device id %#v: %#v", gatewaySetting, route.Gateway.Device, err) - } else { - log.Printf("[DEBUG] %s of device id %d: %s", gatewaySetting, deviceID, route.Gateway.IpAddress) - networkInterfaces[deviceID][gatewaySetting] = route.Gateway.IpAddress - } - } - } - } - } - } - } - log.Printf("[DEBUG] networkInterfaces: %#v", networkInterfaces) - err = d.Set("network_interface", networkInterfaces) - if err != nil { - return fmt.Errorf("Invalid network interfaces to set: %#v", networkInterfaces) - } - - if len(networkInterfaces) > 0 { - if _, ok := networkInterfaces[0]["ipv4_address"]; ok { - log.Printf("[DEBUG] ip address: %v", networkInterfaces[0]["ipv4_address"].(string)) - d.SetConnInfo(map[string]string{ - "type": "ssh", - "host": networkInterfaces[0]["ipv4_address"].(string), - }) - } - } - - var rootDatastore string - for _, v := range mvm.Datastore { - var md mo.Datastore - if err := collector.RetrieveOne(context.TODO(), v, []string{"name", "parent"}, &md); err != nil { - return err - } - if md.Parent.Type == "StoragePod" { - var msp mo.StoragePod - if err := collector.RetrieveOne(context.TODO(), *md.Parent, []string{"name"}, &msp); err != nil { - return err - } - rootDatastore = msp.Name - log.Printf("[DEBUG] %#v", msp.Name) - } else { - rootDatastore = md.Name - log.Printf("[DEBUG] %#v", md.Name) - } - break - } - - d.Set("datacenter", dc) - d.Set("memory", mvm.Summary.Config.MemorySizeMB) - d.Set("memory_reservation", mvm.Summary.Config.MemoryReservation) - d.Set("cpu", mvm.Summary.Config.NumCpu) - d.Set("datastore", rootDatastore) - d.Set("uuid", mvm.Summary.Config.Uuid) - - return nil -} - -func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*govmomi.Client) - dc, err := getDatacenter(client, d.Get("datacenter").(string)) - if err != nil { - return err - } - finder := find.NewFinder(client.Client, true) - finder = finder.SetDatacenter(dc) - - vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string))) - if err != nil { - return err - } - devices, err := vm.Device(context.TODO()) - if err != nil { - log.Printf("[DEBUG] resourceVSphereVirtualMachineDelete - Failed to get device list: %v", err) - return err - } - - log.Printf("[INFO] Deleting virtual machine: %s", d.Id()) - state, err := vm.PowerState(context.TODO()) - if err != nil { - return err - } - - if state == types.VirtualMachinePowerStatePoweredOn { - task, err := vm.PowerOff(context.TODO()) - if err != nil { - return err - } - - err = task.Wait(context.TODO()) - if err != nil { - return err - } - } - - // Safely eject any disks the user marked as keep_on_remove - var diskSetList []interface{} - if vL, ok := d.GetOk("disk"); ok { - if diskSet, ok := vL.(*schema.Set); ok { - diskSetList = diskSet.List() - for _, value := range diskSetList { - disk := value.(map[string]interface{}) - - if v, ok := disk["keep_on_remove"].(bool); ok && v == true { - log.Printf("[DEBUG] not destroying %v", disk["name"]) - virtualDisk := devices.FindByKey(int32(disk["key"].(int))) - err = vm.RemoveDevice(context.TODO(), true, virtualDisk) - if err != nil { - log.Printf("[ERROR] Update Remove Disk - Error removing disk: %v", err) - return err - } - } - } - } - } - - // Safely eject any disks that are not managed by this resource - if v, ok := d.GetOk("detach_unknown_disks_on_delete"); ok && v.(bool) { - var disksToRemove object.VirtualDeviceList - for _, device := range devices { - if devices.TypeName(device) != "VirtualDisk" { - continue - } - vd := device.GetVirtualDevice() - var skip bool - for _, value := range diskSetList { - disk := value.(map[string]interface{}) - if int32(disk["key"].(int)) == vd.Key { - skip = true - break - } - } - if skip { - continue - } - disksToRemove = append(disksToRemove, device) - } - if len(disksToRemove) != 0 { - err = vm.RemoveDevice(context.TODO(), true, disksToRemove...) - if err != nil { - log.Printf("[ERROR] Update Remove Disk - Error removing disk: %v", err) - return err - } - } - } - - task, err := vm.Destroy(context.TODO()) - if err != nil { - return err - } - - err = task.Wait(context.TODO()) - if err != nil { - return err - } - - d.SetId("") - return nil -} - -// addHardDisk adds a new Hard Disk to the VirtualMachine. -func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string, datastore *object.Datastore, diskPath string, controller_type string) error { - devices, err := vm.Device(context.TODO()) - if err != nil { - return err - } - log.Printf("[DEBUG] vm devices: %#v\n", devices) - - var controller types.BaseVirtualController - switch controller_type { - case "scsi": - controller, err = devices.FindDiskController(controller_type) - case "scsi-lsi-parallel": - controller = devices.PickController(&types.VirtualLsiLogicController{}) - case "scsi-buslogic": - controller = devices.PickController(&types.VirtualBusLogicController{}) - case "scsi-paravirtual": - controller = devices.PickController(&types.ParaVirtualSCSIController{}) - case "scsi-lsi-sas": - controller = devices.PickController(&types.VirtualLsiLogicSASController{}) - case "ide": - controller, err = devices.FindDiskController(controller_type) - default: - return fmt.Errorf("[ERROR] Unsupported disk controller provided: %v", controller_type) - } - - if err != nil || controller == nil { - // Check if max number of scsi controller are already used - diskControllers := getSCSIControllers(devices) - if len(diskControllers) >= 4 { - return fmt.Errorf("[ERROR] Maximum number of SCSI controllers created") - } - - log.Printf("[DEBUG] Couldn't find a %v controller. Creating one..", controller_type) - - var c types.BaseVirtualDevice - switch controller_type { - case "scsi": - // Create scsi controller - c, err = devices.CreateSCSIController("scsi") - if err != nil { - return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) - } - case "scsi-lsi-parallel": - // Create scsi controller - c, err = devices.CreateSCSIController("lsilogic") - if err != nil { - return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) - } - case "scsi-buslogic": - // Create scsi controller - c, err = devices.CreateSCSIController("buslogic") - if err != nil { - return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) - } - case "scsi-paravirtual": - // Create scsi controller - c, err = devices.CreateSCSIController("pvscsi") - if err != nil { - return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) - } - case "scsi-lsi-sas": - // Create scsi controller - c, err = devices.CreateSCSIController("lsilogic-sas") - if err != nil { - return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) - } - case "ide": - // Create ide controller - c, err = devices.CreateIDEController() - if err != nil { - return fmt.Errorf("[ERROR] Failed creating IDE controller: %v", err) - } - default: - return fmt.Errorf("[ERROR] Unsupported disk controller provided: %v", controller_type) - } - - vm.AddDevice(context.TODO(), c) - // Update our devices list - devices, err := vm.Device(context.TODO()) - if err != nil { - return err - } - controller = devices.PickController(c.(types.BaseVirtualController)) - if controller == nil { - log.Printf("[ERROR] Could not find the new %v controller", controller_type) - return fmt.Errorf("Could not find the new %v controller", controller_type) - } - } - - log.Printf("[DEBUG] disk controller: %#v\n", controller) - - // TODO Check if diskPath & datastore exist - // If diskPath is not specified, pass empty string to CreateDisk() - if diskPath == "" { - return fmt.Errorf("[ERROR] addHardDisk - No path proided") - } else { - diskPath = datastore.Path(diskPath) - } - log.Printf("[DEBUG] addHardDisk - diskPath: %v", diskPath) - disk := devices.CreateDisk(controller, datastore.Reference(), diskPath) - - if strings.Contains(controller_type, "scsi") { - unitNumber, err := getNextUnitNumber(devices, controller) - if err != nil { - return err - } - *disk.UnitNumber = unitNumber - } - - existing := devices.SelectByBackingInfo(disk.Backing) - log.Printf("[DEBUG] disk: %#v\n", disk) - - if len(existing) == 0 { - disk.CapacityInKB = int64(size * 1024 * 1024) - if iops != 0 { - disk.StorageIOAllocation = &types.StorageIOAllocationInfo{ - Limit: iops, - } - } - backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) - - if diskType == "eager_zeroed" { - // eager zeroed thick virtual disk - backing.ThinProvisioned = types.NewBool(false) - backing.EagerlyScrub = types.NewBool(true) - } else if diskType == "lazy" { - // lazy zeroed thick virtual disk - backing.ThinProvisioned = types.NewBool(false) - backing.EagerlyScrub = types.NewBool(false) - } else if diskType == "thin" { - // thin provisioned virtual disk - backing.ThinProvisioned = types.NewBool(true) - } - - log.Printf("[DEBUG] addHardDisk: %#v\n", disk) - log.Printf("[DEBUG] addHardDisk capacity: %#v\n", disk.CapacityInKB) - - return vm.AddDevice(context.TODO(), disk) - } else { - log.Printf("[DEBUG] addHardDisk: Disk already present.\n") - - return nil - } -} - -func getSCSIControllers(vmDevices object.VirtualDeviceList) []*types.VirtualController { - // get virtual scsi controllers of all supported types - var scsiControllers []*types.VirtualController - for _, device := range vmDevices { - devType := vmDevices.Type(device) - switch devType { - case "scsi", "lsilogic", "buslogic", "pvscsi", "lsilogic-sas": - if c, ok := device.(types.BaseVirtualController); ok { - scsiControllers = append(scsiControllers, c.GetVirtualController()) - } - } - } - return scsiControllers -} - -func getNextUnitNumber(devices object.VirtualDeviceList, c types.BaseVirtualController) (int32, error) { - key := c.GetVirtualController().Key - - var unitNumbers [16]bool - unitNumbers[7] = true - - for _, device := range devices { - d := device.GetVirtualDevice() - - if d.ControllerKey == key { - if d.UnitNumber != nil { - unitNumbers[*d.UnitNumber] = true - } - } - } - for i, taken := range unitNumbers { - if !taken { - return int32(i), nil - } - } - return -1, fmt.Errorf("[ERROR] getNextUnitNumber - controller is full") -} - -// addCdrom adds a new virtual cdrom drive to the VirtualMachine and attaches an image (ISO) to it from a datastore path. -func addCdrom(client *govmomi.Client, vm *object.VirtualMachine, datacenter *object.Datacenter, datastore, path string) error { - devices, err := vm.Device(context.TODO()) - if err != nil { - return err - } - log.Printf("[DEBUG] vm devices: %#v", devices) - - var controller *types.VirtualIDEController - controller, err = devices.FindIDEController("") - if err != nil { - log.Printf("[DEBUG] Couldn't find a ide controller. Creating one..") - - var c types.BaseVirtualDevice - c, err := devices.CreateIDEController() - if err != nil { - return fmt.Errorf("[ERROR] Failed creating IDE controller: %v", err) - } - - if v, ok := c.(*types.VirtualIDEController); ok { - controller = v - } else { - return fmt.Errorf("[ERROR] Controller type could not be asserted") - } - vm.AddDevice(context.TODO(), c) - // Update our devices list - devices, err := vm.Device(context.TODO()) - if err != nil { - return err - } - controller, err = devices.FindIDEController("") - if err != nil { - log.Printf("[ERROR] Could not find the new disk IDE controller: %v", err) - return err - } - } - log.Printf("[DEBUG] ide controller: %#v", controller) - - c, err := devices.CreateCdrom(controller) - if err != nil { - return err - } - - finder := find.NewFinder(client.Client, true) - finder = finder.SetDatacenter(datacenter) - ds, err := getDatastore(finder, datastore) - if err != nil { - return err - } - - c = devices.InsertIso(c, ds.Path(path)) - log.Printf("[DEBUG] addCdrom: %#v", c) - - return vm.AddDevice(context.TODO(), c) -} - -// buildNetworkDevice builds VirtualDeviceConfigSpec for Network Device. -func buildNetworkDevice(f *find.Finder, label, adapterType string, macAddress string) (*types.VirtualDeviceConfigSpec, error) { - network, err := f.Network(context.TODO(), "*"+label) - if err != nil { - return nil, err - } - - backing, err := network.EthernetCardBackingInfo(context.TODO()) - if err != nil { - return nil, err - } - - var address_type string - if macAddress == "" { - address_type = string(types.VirtualEthernetCardMacTypeGenerated) - } else { - address_type = string(types.VirtualEthernetCardMacTypeManual) - } - - if adapterType == "vmxnet3" { - return &types.VirtualDeviceConfigSpec{ - Operation: types.VirtualDeviceConfigSpecOperationAdd, - Device: &types.VirtualVmxnet3{ - VirtualVmxnet: types.VirtualVmxnet{ - VirtualEthernetCard: types.VirtualEthernetCard{ - VirtualDevice: types.VirtualDevice{ - Key: -1, - Backing: backing, - }, - AddressType: address_type, - MacAddress: macAddress, - }, - }, - }, - }, nil - } else if adapterType == "e1000" { - return &types.VirtualDeviceConfigSpec{ - Operation: types.VirtualDeviceConfigSpecOperationAdd, - Device: &types.VirtualE1000{ - VirtualEthernetCard: types.VirtualEthernetCard{ - VirtualDevice: types.VirtualDevice{ - Key: -1, - Backing: backing, - }, - AddressType: address_type, - MacAddress: macAddress, - }, - }, - }, nil - } else { - return nil, fmt.Errorf("Invalid network adapter type.") - } -} - -// buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine. -func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine, linkedClone bool, initType string) (types.VirtualMachineRelocateSpec, error) { - var key int32 - var moveType string - if linkedClone { - moveType = "createNewChildDiskBacking" - } else { - moveType = "moveAllDiskBackingsAndDisallowSharing" - } - log.Printf("[DEBUG] relocate type: [%s]", moveType) - - devices, err := vm.Device(context.TODO()) - if err != nil { - return types.VirtualMachineRelocateSpec{}, err - } - for _, d := range devices { - if devices.Type(d) == "disk" { - key = int32(d.GetVirtualDevice().Key) - } - } - - isThin := initType == "thin" - eagerScrub := initType == "eager_zeroed" - rpr := rp.Reference() - dsr := ds.Reference() - return types.VirtualMachineRelocateSpec{ - Datastore: &dsr, - Pool: &rpr, - DiskMoveType: moveType, - Disk: []types.VirtualMachineRelocateSpecDiskLocator{ - { - Datastore: dsr, - DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{ - DiskMode: "persistent", - ThinProvisioned: types.NewBool(isThin), - EagerlyScrub: types.NewBool(eagerScrub), - }, - DiskId: key, - }, - }, - }, nil -} - -// getDatastoreObject gets datastore object. -func getDatastoreObject(client *govmomi.Client, f *object.DatacenterFolders, name string) (types.ManagedObjectReference, error) { - s := object.NewSearchIndex(client.Client) - ref, err := s.FindChild(context.TODO(), f.DatastoreFolder, name) - if err != nil { - return types.ManagedObjectReference{}, err - } - if ref == nil { - return types.ManagedObjectReference{}, fmt.Errorf("Datastore '%s' not found.", name) - } - log.Printf("[DEBUG] getDatastoreObject: reference: %#v", ref) - return ref.Reference(), nil -} - -// buildStoragePlacementSpecCreate builds StoragePlacementSpec for create action. -func buildStoragePlacementSpecCreate(f *object.DatacenterFolders, rp *object.ResourcePool, storagePod object.StoragePod, configSpec types.VirtualMachineConfigSpec) types.StoragePlacementSpec { - vmfr := f.VmFolder.Reference() - rpr := rp.Reference() - spr := storagePod.Reference() - - sps := types.StoragePlacementSpec{ - Type: "create", - ConfigSpec: &configSpec, - PodSelectionSpec: types.StorageDrsPodSelectionSpec{ - StoragePod: &spr, - }, - Folder: &vmfr, - ResourcePool: &rpr, - } - log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps) - return sps -} - -// buildStoragePlacementSpecClone builds StoragePlacementSpec for clone action. -func buildStoragePlacementSpecClone(c *govmomi.Client, f *object.DatacenterFolders, vm *object.VirtualMachine, rp *object.ResourcePool, storagePod object.StoragePod) types.StoragePlacementSpec { - vmr := vm.Reference() - vmfr := f.VmFolder.Reference() - rpr := rp.Reference() - spr := storagePod.Reference() - - var o mo.VirtualMachine - err := vm.Properties(context.TODO(), vmr, []string{"datastore"}, &o) - if err != nil { - return types.StoragePlacementSpec{} - } - ds := object.NewDatastore(c.Client, o.Datastore[0]) - log.Printf("[DEBUG] findDatastore: datastore: %#v\n", ds) - - devices, err := vm.Device(context.TODO()) - if err != nil { - return types.StoragePlacementSpec{} - } - - var key int32 - for _, d := range devices.SelectByType((*types.VirtualDisk)(nil)) { - key = int32(d.GetVirtualDevice().Key) - log.Printf("[DEBUG] findDatastore: virtual devices: %#v\n", d.GetVirtualDevice()) - } - - sps := types.StoragePlacementSpec{ - Type: "clone", - Vm: &vmr, - PodSelectionSpec: types.StorageDrsPodSelectionSpec{ - StoragePod: &spr, - }, - CloneSpec: &types.VirtualMachineCloneSpec{ - Location: types.VirtualMachineRelocateSpec{ - Disk: []types.VirtualMachineRelocateSpecDiskLocator{ - { - Datastore: ds.Reference(), - DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{}, - DiskId: key, - }, - }, - Pool: &rpr, - }, - PowerOn: false, - Template: false, - }, - CloneName: "dummy", - Folder: &vmfr, - } - return sps -} - -// findDatastore finds Datastore object. -func findDatastore(c *govmomi.Client, sps types.StoragePlacementSpec) (*object.Datastore, error) { - var datastore *object.Datastore - log.Printf("[DEBUG] findDatastore: StoragePlacementSpec: %#v\n", sps) - - srm := object.NewStorageResourceManager(c.Client) - rds, err := srm.RecommendDatastores(context.TODO(), sps) - if err != nil { - return nil, err - } - log.Printf("[DEBUG] findDatastore: recommendDatastores: %#v\n", rds) - - spa := rds.Recommendations[0].Action[0].(*types.StoragePlacementAction) - datastore = object.NewDatastore(c.Client, spa.Destination) - log.Printf("[DEBUG] findDatastore: datastore: %#v", datastore) - - return datastore, nil -} - -// createCdroms is a helper function to attach virtual cdrom devices (and their attached disk images) to a virtual IDE controller. -func createCdroms(client *govmomi.Client, vm *object.VirtualMachine, datacenter *object.Datacenter, cdroms []cdrom) error { - log.Printf("[DEBUG] add cdroms: %v", cdroms) - for _, cd := range cdroms { - log.Printf("[DEBUG] add cdrom (datastore): %v", cd.datastore) - log.Printf("[DEBUG] add cdrom (cd path): %v", cd.path) - err := addCdrom(client, vm, datacenter, cd.datastore, cd.path) - if err != nil { - return err - } - } - - return nil -} - -func (vm *virtualMachine) setupVirtualMachine(c *govmomi.Client) error { - dc, err := getDatacenter(c, vm.datacenter) - - if err != nil { - return err - } - finder := find.NewFinder(c.Client, true) - finder = finder.SetDatacenter(dc) - - var template *object.VirtualMachine - var template_mo mo.VirtualMachine - var vm_mo mo.VirtualMachine - if vm.template != "" { - template, err = finder.VirtualMachine(context.TODO(), vm.template) - if err != nil { - return err - } - log.Printf("[DEBUG] template: %#v", template) - - err = template.Properties(context.TODO(), template.Reference(), []string{"parent", "config.template", "config.guestId", "resourcePool", "snapshot", "guest.toolsVersionStatus2", "config.guestFullName"}, &template_mo) - if err != nil { - return err - } - } - - var resourcePool *object.ResourcePool - if vm.resourcePool == "" { - if vm.cluster == "" { - resourcePool, err = finder.DefaultResourcePool(context.TODO()) - if err != nil { - return err - } - } else { - resourcePool, err = finder.ResourcePool(context.TODO(), "*"+vm.cluster+"/Resources") - if err != nil { - return err - } - } - } else { - resourcePool, err = finder.ResourcePool(context.TODO(), vm.resourcePool) - if err != nil { - return err - } - } - log.Printf("[DEBUG] resource pool: %#v", resourcePool) - - dcFolders, err := dc.Folders(context.TODO()) - if err != nil { - return err - } - log.Printf("[DEBUG] folder: %#v", vm.folder) - - folder := dcFolders.VmFolder - if len(vm.folder) > 0 { - si := object.NewSearchIndex(c.Client) - folderRef, err := si.FindByInventoryPath( - context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder)) - if err != nil { - return fmt.Errorf("Error reading folder %s: %s", vm.folder, err) - } else if folderRef == nil { - return fmt.Errorf("Cannot find folder %s", vm.folder) - } else { - folder = folderRef.(*object.Folder) - } - } - - // make config spec - configSpec := types.VirtualMachineConfigSpec{ - Name: vm.name, - NumCPUs: vm.vcpu, - NumCoresPerSocket: 1, - MemoryMB: vm.memoryMb, - MemoryAllocation: &types.ResourceAllocationInfo{ - Reservation: vm.memoryAllocation.reservation, - }, - Flags: &types.VirtualMachineFlagInfo{ - DiskUuidEnabled: &vm.enableDiskUUID, - }, - } - if vm.template == "" { - configSpec.GuestId = "otherLinux64Guest" - } - log.Printf("[DEBUG] virtual machine config spec: %v", configSpec) - - // make ExtraConfig - log.Printf("[DEBUG] virtual machine Extra Config spec start") - if len(vm.customConfigurations) > 0 { - var ov []types.BaseOptionValue - for k, v := range vm.customConfigurations { - key := k - value := v - o := types.OptionValue{ - Key: key, - Value: &value, - } - log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k, v) - ov = append(ov, &o) - } - configSpec.ExtraConfig = ov - log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig) - } - - var datastore *object.Datastore - if vm.datastore == "" { - datastore, err = finder.DefaultDatastore(context.TODO()) - if err != nil { - return err - } - } else { - datastore, err = finder.Datastore(context.TODO(), vm.datastore) - if err != nil { - // TODO: datastore cluster support in govmomi finder function - d, err := getDatastoreObject(c, dcFolders, vm.datastore) - if err != nil { - return err - } - - if d.Type == "StoragePod" { - sp := object.StoragePod{ - Folder: object.NewFolder(c.Client, d), - } - - var sps types.StoragePlacementSpec - if vm.template != "" { - sps = buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp) - } else { - sps = buildStoragePlacementSpecCreate(dcFolders, resourcePool, sp, configSpec) - } - - datastore, err = findDatastore(c, sps) - if err != nil { - return err - } - } else { - datastore = object.NewDatastore(c.Client, d) - } - } - } - - log.Printf("[DEBUG] datastore: %#v", datastore) - - // network - networkDevices := []types.BaseVirtualDeviceConfigSpec{} - networkConfigs := []types.CustomizationAdapterMapping{} - for _, network := range vm.networkInterfaces { - // network device - var networkDeviceType string - if vm.template == "" { - networkDeviceType = "e1000" - } else { - networkDeviceType = "vmxnet3" - } - nd, err := buildNetworkDevice(finder, network.label, networkDeviceType, network.macAddress) - if err != nil { - return err - } - log.Printf("[DEBUG] network device: %+v", nd.Device) - networkDevices = append(networkDevices, nd) - - if vm.template != "" { - var ipSetting types.CustomizationIPSettings - if network.ipv4Address == "" { - ipSetting.Ip = &types.CustomizationDhcpIpGenerator{} - } else { - if network.ipv4PrefixLength == 0 { - return fmt.Errorf("Error: ipv4_prefix_length argument is empty.") - } - m := net.CIDRMask(network.ipv4PrefixLength, 32) - sm := net.IPv4(m[0], m[1], m[2], m[3]) - subnetMask := sm.String() - log.Printf("[DEBUG] ipv4 gateway: %v\n", network.ipv4Gateway) - log.Printf("[DEBUG] ipv4 address: %v\n", network.ipv4Address) - log.Printf("[DEBUG] ipv4 prefix length: %v\n", network.ipv4PrefixLength) - log.Printf("[DEBUG] ipv4 subnet mask: %v\n", subnetMask) - ipSetting.Gateway = []string{ - network.ipv4Gateway, - } - ipSetting.Ip = &types.CustomizationFixedIp{ - IpAddress: network.ipv4Address, - } - ipSetting.SubnetMask = subnetMask - } - - ipv6Spec := &types.CustomizationIPSettingsIpV6AddressSpec{} - if network.ipv6Address == "" { - ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{ - &types.CustomizationDhcpIpV6Generator{}, - } - } else { - log.Printf("[DEBUG] ipv6 gateway: %v\n", network.ipv6Gateway) - log.Printf("[DEBUG] ipv6 address: %v\n", network.ipv6Address) - log.Printf("[DEBUG] ipv6 prefix length: %v\n", network.ipv6PrefixLength) - - ipv6Spec.Ip = []types.BaseCustomizationIpV6Generator{ - &types.CustomizationFixedIpV6{ - IpAddress: network.ipv6Address, - SubnetMask: int32(network.ipv6PrefixLength), - }, - } - ipv6Spec.Gateway = []string{network.ipv6Gateway} - } - ipSetting.IpV6Spec = ipv6Spec - - // network config - config := types.CustomizationAdapterMapping{ - Adapter: ipSetting, - } - networkConfigs = append(networkConfigs, config) - } - } - log.Printf("[DEBUG] network devices: %#v", networkDevices) - log.Printf("[DEBUG] network configs: %#v", networkConfigs) - - var task *object.Task - if vm.template == "" { - var mds mo.Datastore - if err = datastore.Properties(context.TODO(), datastore.Reference(), []string{"name"}, &mds); err != nil { - return err - } - log.Printf("[DEBUG] datastore: %#v", mds.Name) - scsi, err := object.SCSIControllerTypes().CreateSCSIController("scsi") - if err != nil { - log.Printf("[ERROR] %s", err) - } - - configSpec.DeviceChange = append(configSpec.DeviceChange, &types.VirtualDeviceConfigSpec{ - Operation: types.VirtualDeviceConfigSpecOperationAdd, - Device: scsi, - }) - - configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)} - - task, err = folder.CreateVM(context.TODO(), configSpec, resourcePool, nil) - if err != nil { - log.Printf("[ERROR] %s", err) - } - - err = task.Wait(context.TODO()) - if err != nil { - log.Printf("[ERROR] %s", err) - } - - } else { - - relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template, vm.linkedClone, vm.hardDisks[0].initType) - if err != nil { - return err - } - - log.Printf("[DEBUG] relocate spec: %v", relocateSpec) - - // make vm clone spec - cloneSpec := types.VirtualMachineCloneSpec{ - Location: relocateSpec, - Template: false, - Config: &configSpec, - PowerOn: false, - } - if vm.linkedClone { - if template_mo.Snapshot == nil { - return fmt.Errorf("`linkedClone=true`, but image VM has no snapshots") - } - cloneSpec.Snapshot = template_mo.Snapshot.CurrentSnapshot - } - log.Printf("[DEBUG] clone spec: %v", cloneSpec) - - task, err = template.Clone(context.TODO(), folder, vm.name, cloneSpec) - if err != nil { - return err - } - } - - err = task.Wait(context.TODO()) - if err != nil { - log.Printf("[ERROR] %s", err) - } - - newVM, err := finder.VirtualMachine(context.TODO(), vm.Path()) - if err != nil { - return err - } - log.Printf("[DEBUG] new vm: %v", newVM) - - devices, err := newVM.Device(context.TODO()) - if err != nil { - log.Printf("[DEBUG] Template devices can't be found") - return err - } - - for _, dvc := range devices { - // Issue 3559/3560: Delete all ethernet devices to add the correct ones later - if devices.Type(dvc) == "ethernet" { - err := newVM.RemoveDevice(context.TODO(), false, dvc) - if err != nil { - return err - } - } - } - // Add Network devices - for _, dvc := range networkDevices { - err := newVM.AddDevice( - context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device) - if err != nil { - return err - } - } - - // Create the cdroms if needed. - if err := createCdroms(c, newVM, dc, vm.cdroms); err != nil { - return err - } - - newVM.Properties(context.TODO(), newVM.Reference(), []string{"summary", "config"}, &vm_mo) - firstDisk := 0 - if vm.template != "" { - firstDisk++ - } - for i := firstDisk; i < len(vm.hardDisks); i++ { - log.Printf("[DEBUG] disk index: %v", i) - - var diskPath string - switch { - case vm.hardDisks[i].vmdkPath != "": - diskPath = vm.hardDisks[i].vmdkPath - case vm.hardDisks[i].name != "": - snapshotFullDir := vm_mo.Config.Files.SnapshotDirectory - split := strings.Split(snapshotFullDir, " ") - if len(split) != 2 { - return fmt.Errorf("[ERROR] setupVirtualMachine - failed to split snapshot directory: %v", snapshotFullDir) - } - vmWorkingPath := split[1] - diskPath = vmWorkingPath + vm.hardDisks[i].name - default: - return fmt.Errorf("[ERROR] setupVirtualMachine - Neither vmdk path nor vmdk name was given: %#v", vm.hardDisks[i]) - } - err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, diskPath, vm.hardDisks[i].controller) - if err != nil { - err2 := addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, diskPath, vm.hardDisks[i].controller) - if err2 != nil { - return err2 - } - return err - } - } - - if vm.skipCustomization || vm.template == "" { - log.Printf("[DEBUG] VM customization skipped") - } else { - var identity_options types.BaseCustomizationIdentitySettings - if strings.HasPrefix(template_mo.Config.GuestId, "win") { - var timeZone int - if vm.timeZone == "Etc/UTC" { - vm.timeZone = "085" - } - timeZone, err := strconv.Atoi(vm.timeZone) - if err != nil { - return fmt.Errorf("Error converting TimeZone: %s", err) - } - - guiUnattended := types.CustomizationGuiUnattended{ - AutoLogon: false, - AutoLogonCount: 1, - TimeZone: int32(timeZone), - } - - customIdentification := types.CustomizationIdentification{} - - userData := types.CustomizationUserData{ - ComputerName: &types.CustomizationFixedName{ - Name: strings.Split(vm.name, ".")[0], - }, - ProductId: vm.windowsOptionalConfig.productKey, - FullName: "terraform", - OrgName: "terraform", - } - - if vm.windowsOptionalConfig.domainUserPassword != "" && vm.windowsOptionalConfig.domainUser != "" && vm.windowsOptionalConfig.domain != "" { - customIdentification.DomainAdminPassword = &types.CustomizationPassword{ - PlainText: true, - Value: vm.windowsOptionalConfig.domainUserPassword, - } - customIdentification.DomainAdmin = vm.windowsOptionalConfig.domainUser - customIdentification.JoinDomain = vm.windowsOptionalConfig.domain - } - - if vm.windowsOptionalConfig.adminPassword != "" { - guiUnattended.Password = &types.CustomizationPassword{ - PlainText: true, - Value: vm.windowsOptionalConfig.adminPassword, - } - } - - identity_options = &types.CustomizationSysprep{ - GuiUnattended: guiUnattended, - Identification: customIdentification, - UserData: userData, - } - } else { - identity_options = &types.CustomizationLinuxPrep{ - HostName: &types.CustomizationFixedName{ - Name: strings.Split(vm.name, ".")[0], - }, - Domain: vm.domain, - TimeZone: vm.timeZone, - HwClockUTC: types.NewBool(true), - } - } - - // create CustomizationSpec - customSpec := types.CustomizationSpec{ - Identity: identity_options, - GlobalIPSettings: types.CustomizationGlobalIPSettings{ - DnsSuffixList: vm.dnsSuffixes, - DnsServerList: vm.dnsServers, - }, - NicSettingMap: networkConfigs, - } - log.Printf("[DEBUG] custom spec: %v", customSpec) - - log.Printf("[DEBUG] VM customization starting") - taskb, err := newVM.Customize(context.TODO(), customSpec) - if err != nil { - return err - } - _, err = taskb.WaitForResult(context.TODO(), nil) - if err != nil { - return err - } - log.Printf("[DEBUG] VM customization finished") - } - - if vm.hasBootableVmdk || vm.template != "" { - t, err := newVM.PowerOn(context.TODO()) - if err != nil { - return err - } - _, err = t.WaitForResult(context.TODO(), nil) - if err != nil { - return err - } - err = newVM.WaitForPowerState(context.TODO(), types.VirtualMachinePowerStatePoweredOn) - if err != nil { - return err - } - } - return nil -} diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_migrate.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_migrate.go deleted file mode 100644 index c33a550b7..000000000 --- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_migrate.go +++ /dev/null @@ -1,62 +0,0 @@ -package vsphere - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/terraform" -) - -func resourceVSphereVirtualMachineMigrateState( - v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - if is.Empty() { - log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return is, nil - } - - switch v { - case 0: - log.Println("[INFO] Found Compute Instance State v0; migrating to v1") - is, err := migrateVSphereVirtualMachineStateV0toV1(is) - if err != nil { - return is, err - } - return is, nil - default: - return is, fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateVSphereVirtualMachineStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { - if is.Empty() || is.Attributes == nil { - log.Println("[DEBUG] Empty VSphere Virtual Machine State; nothing to migrate.") - return is, nil - } - - log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - - if is.Attributes["skip_customization"] == "" { - is.Attributes["skip_customization"] = "false" - } - - if is.Attributes["enable_disk_uuid"] == "" { - is.Attributes["enable_disk_uuid"] = "false" - } - - for k, _ := range is.Attributes { - if strings.HasPrefix(k, "disk.") && strings.HasSuffix(k, ".size") { - diskParts := strings.Split(k, ".") - if len(diskParts) != 3 { - continue - } - s := strings.Join([]string{diskParts[0], diskParts[1], "controller_type"}, ".") - if _, ok := is.Attributes[s]; !ok { - is.Attributes[s] = "scsi" - } - } - } - - log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_migrate_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_migrate_test.go deleted file mode 100644 index e2c5a90d3..000000000 --- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_migrate_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package vsphere - -import ( - "testing" - - "github.com/hashicorp/terraform/terraform" -) - -func TestVSphereVirtualMachineMigrateState(t *testing.T) { - cases := map[string]struct { - StateVersion int - Attributes map[string]string - Expected map[string]string - Meta interface{} - }{ - "skip_customization before 0.6.16": { - StateVersion: 0, - Attributes: map[string]string{}, - Expected: map[string]string{ - "skip_customization": "false", - }, - }, - "enable_disk_uuid before 0.6.16": { - StateVersion: 0, - Attributes: map[string]string{}, - Expected: map[string]string{ - "enable_disk_uuid": "false", - }, - }, - "disk controller_type": { - StateVersion: 0, - Attributes: map[string]string{ - "disk.1234.size": "0", - "disk.5678.size": "0", - "disk.9999.size": "0", - "disk.9999.controller_type": "ide", - }, - Expected: map[string]string{ - "disk.1234.size": "0", - "disk.1234.controller_type": "scsi", - "disk.5678.size": "0", - "disk.5678.controller_type": "scsi", - "disk.9999.size": "0", - "disk.9999.controller_type": "ide", - }, - }, - } - - for tn, tc := range cases { - is := &terraform.InstanceState{ - ID: "i-abc123", - Attributes: tc.Attributes, - } - is, err := resourceVSphereVirtualMachineMigrateState( - tc.StateVersion, is, tc.Meta) - - if err != nil { - t.Fatalf("bad: %s, err: %#v", tn, err) - } - - for k, v := range tc.Expected { - if is.Attributes[k] != v { - t.Fatalf( - "bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", - tn, k, v, k, is.Attributes[k], is.Attributes) - } - } - } -} - -func TestComputeInstanceMigrateState_empty(t *testing.T) { - var is *terraform.InstanceState - var meta interface{} - - // should handle nil - is, err := resourceVSphereVirtualMachineMigrateState(0, is, meta) - - if err != nil { - t.Fatalf("err: %#v", err) - } - if is != nil { - t.Fatalf("expected nil instancestate, got: %#v", is) - } - - // should handle non-nil but empty - is = &terraform.InstanceState{} - is, err = resourceVSphereVirtualMachineMigrateState(0, is, meta) - - if err != nil { - t.Fatalf("err: %#v", err) - } -} diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go deleted file mode 100644 index 8f9ef96ae..000000000 --- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go +++ /dev/null @@ -1,1565 +0,0 @@ -package vsphere - -import ( - "fmt" - "log" - "os" - "regexp" - "testing" - - "path/filepath" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/vmware/govmomi" - "github.com/vmware/govmomi/find" - "github.com/vmware/govmomi/object" - "github.com/vmware/govmomi/property" - "github.com/vmware/govmomi/vim25/mo" - "github.com/vmware/govmomi/vim25/types" - "golang.org/x/net/context" -) - -/////// -// Various ENV vars are used to setup these tests. Look for `os.Getenv` -/////// - -// Base setup function to check that a template, and nic information is set -// TODO needs some TLC - determine exactly how we want to do this -func testBasicPreCheck(t *testing.T) { - - testAccPreCheck(t) - - if v := os.Getenv("VSPHERE_TEMPLATE"); v == "" { - t.Fatal("env variable VSPHERE_TEMPLATE must be set for acceptance tests") - } - - if v := os.Getenv("VSPHERE_IPV4_GATEWAY"); v == "" { - t.Fatal("env variable VSPHERE_IPV4_GATEWAY must be set for acceptance tests") - } - - if v := os.Getenv("VSPHERE_IPV4_ADDRESS"); v == "" { - t.Fatal("env variable VSPHERE_IPV4_ADDRESS must be set for acceptance tests") - } - - if v := os.Getenv("VSPHERE_NETWORK_LABEL"); v == "" { - t.Fatal("env variable VSPHERE_NETWORK_LABEL must be set for acceptance tests") - } -} - -//// -// Collects optional env vars used in the tests -//// -func setupBaseVars() (string, string) { - var locationOpt string - var datastoreOpt string - - if v := os.Getenv("VSPHERE_DATACENTER"); v != "" { - locationOpt += fmt.Sprintf(" datacenter = \"%s\"\n", v) - } - if v := os.Getenv("VSPHERE_CLUSTER"); v != "" { - locationOpt += fmt.Sprintf(" cluster = \"%s\"\n", v) - } - if v := os.Getenv("VSPHERE_RESOURCE_POOL"); v != "" { - locationOpt += fmt.Sprintf(" resource_pool = \"%s\"\n", v) - } - if v := os.Getenv("VSPHERE_DATASTORE"); v != "" { - datastoreOpt = fmt.Sprintf(" datastore = \"%s\"\n", v) - } - - return locationOpt, datastoreOpt -} - -//// -// Structs and funcs used with DHCP data template -//// -type TestDHCPBodyData struct { - template string - locationOpt string - datastoreOpt string - label string -} - -func (body TestDHCPBodyData) parseDHCPTemplateConfigWithTemplate(template string) string { - return fmt.Sprintf( - template, - body.locationOpt, - body.label, - body.datastoreOpt, - body.template, - ) - -} - -const testAccCheckVSphereTemplate_dhcp = ` -%s - vcpu = 2 - memory = 1024 - network_interface { - label = "%s" - } - disk { -%s - template = "%s" - } -} -` - -// replaces data in the above template -func (body TestDHCPBodyData) parseDHCPTemplateConfig() string { - return fmt.Sprintf( - testAccCheckVSphereTemplate_dhcp, - body.locationOpt, - body.label, - body.datastoreOpt, - body.template, - ) -} - -func (body TestDHCPBodyData) testSprintfDHCPTemplateBodySecondArgDynamic(template string, arg string) string { - return fmt.Sprintf( - template, - body.locationOpt, - arg, - body.label, - body.datastoreOpt, - body.template, - ) -} - -// returns variables that are used in DHCP tests -func setupTemplateFuncDHCPData() TestDHCPBodyData { - - locationOpt, datastoreOpt := setupBaseVars() - data := TestDHCPBodyData{ - template: os.Getenv("VSPHERE_TEMPLATE"), - label: os.Getenv("VSPHERE_NETWORK_LABEL_DHCP"), - locationOpt: locationOpt, - datastoreOpt: datastoreOpt, - } - // log.Printf("[DEBUG] basic vars= %v", data) - return data - -} - -//// -// Structs and funcs used with static ip data templates -//// -type TemplateBasicBodyVars struct { - locationOpt string - label string - ipv4IpAddress string - ipv4Prefix string - ipv4Gateway string - datastoreOpt string - template string -} - -// Takes a base template that has seven "%s" values in it, used by most fixed ip -// tests -func (body TemplateBasicBodyVars) testSprintfTemplateBody(template string) string { - - return fmt.Sprintf( - template, - body.locationOpt, - body.label, - body.ipv4IpAddress, - body.ipv4Prefix, - body.ipv4Gateway, - body.datastoreOpt, - body.template, - ) -} - -// setups variables used by fixed ip tests -func setupTemplateBasicBodyVars() TemplateBasicBodyVars { - - locationOpt, datastoreOpt := setupBaseVars() - prefix := os.Getenv("VSPHERE_IPV4_PREFIX") - if prefix == "" { - prefix = "24" - } - data := TemplateBasicBodyVars{ - template: os.Getenv("VSPHERE_TEMPLATE"), - ipv4Gateway: os.Getenv("VSPHERE_IPV4_GATEWAY"), - label: os.Getenv("VSPHERE_NETWORK_LABEL"), - ipv4IpAddress: os.Getenv("VSPHERE_IPV4_ADDRESS"), - ipv4Prefix: prefix, - locationOpt: locationOpt, - datastoreOpt: datastoreOpt, - } - // log.Printf("[DEBUG] basic vars= %v", data) - return data -} - -//// -// Basic data to create series of testing functions -//// -type TestFuncData struct { - vm virtualMachine - label string - vmName string - vmResource string - numDisks string - numCPU string - mem string -} - -// returns TestCheckFunc's that are used in many of our tests -// mem defaults to 1024 -// cpu defaults to 2 -// disks defatuls to 1 -// vmResource defaults to "terraform-test" -// vmName defaults to "vsphere_virtual_machine.foo -func (test TestFuncData) testCheckFuncBasic() ( - resource.TestCheckFunc, resource.TestCheckFunc, resource.TestCheckFunc, resource.TestCheckFunc, - resource.TestCheckFunc, resource.TestCheckFunc, resource.TestCheckFunc, resource.TestCheckFunc) { - //log.Printf("[DEBUG] data= %v", test) - mem := test.mem - if mem == "" { - mem = "1024" - } - cpu := test.numCPU - if cpu == "" { - cpu = "2" - } - disks := test.numDisks - if disks == "" { - disks = "1" - } - res := test.vmResource - if res == "" { - res = "terraform-test" - } - vmName := test.vmName - if vmName == "" { - vmName = "vsphere_virtual_machine.foo" - } - return testAccCheckVSphereVirtualMachineExists(vmName, &test.vm), - resource.TestCheckResourceAttr(vmName, "name", res), - resource.TestCheckResourceAttr(vmName, "vcpu", cpu), - resource.TestMatchResourceAttr(vmName, "uuid", regexp.MustCompile("[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}")), - resource.TestCheckResourceAttr(vmName, "memory", mem), - resource.TestCheckResourceAttr(vmName, "disk.#", disks), - resource.TestCheckResourceAttr(vmName, "network_interface.#", "1"), - resource.TestCheckResourceAttr(vmName, "network_interface.0.label", test.label) -} - -const testAccCheckVSphereVirtualMachineConfig_really_basic = ` -resource "vsphere_virtual_machine" "foo" { - name = "terraform-test" -` + testAccTemplateBasicBodyWithEnd - -// WARNING this is one of the base templates. You change this and you will -// be impacting multiple tests -const testAccTemplateBasicBody = ` -%s - vcpu = 2 - memory = 1024 - network_interface { - label = "%s" - ipv4_address = "%s" - ipv4_prefix_length = %s - ipv4_gateway = "%s" - } - disk { -%s - template = "%s" - iops = 500 - } -` -const testAccTemplateBasicBodyWithEnd = testAccTemplateBasicBody + ` -}` - -func TestAccVSphereVirtualMachine_basic(t *testing.T) { - var vm virtualMachine - basic_vars := setupTemplateBasicBodyVars() - config := basic_vars.testSprintfTemplateBody(testAccCheckVSphereVirtualMachineConfig_really_basic) - - log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_really_basic) - log.Printf("[DEBUG] template config= %s", config) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testBasicPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereVirtualMachineDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - TestFuncData{vm: vm, label: basic_vars.label}.testCheckFuncBasic(), - ), - }, - }, - }) -} - -const testAccCheckVSphereVirtualMachineConfig_debug = ` -provider "vsphere" { - client_debug = true -} - -` + testAccCheckVSphereVirtualMachineConfig_really_basic - -func TestAccVSphereVirtualMachine_client_debug(t *testing.T) { - var vm virtualMachine - basic_vars := setupTemplateBasicBodyVars() - config := basic_vars.testSprintfTemplateBody(testAccCheckVSphereVirtualMachineConfig_debug) - - log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_debug) - log.Printf("[DEBUG] template config= %s", config) - - test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label := - TestFuncData{vm: vm, label: basic_vars.label}.testCheckFuncBasic() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testBasicPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereVirtualMachineDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label, - testAccCheckDebugExists(), - ), - }, - }, - }) -} - -const testAccCheckVSphereVirtualMachineConfig_diskSCSICapacity = ` -resource "vsphere_virtual_machine" "scsiCapacity" { - name = "terraform-test" -` + testAccTemplateBasicBody + ` - disk { - size = 1 - controller_type = "scsi-paravirtual" - name = "one" - } - disk { - size = 1 - controller_type = "scsi-paravirtual" - name = "two" - } - disk { - size = 1 - controller_type = "scsi-paravirtual" - name = "three" - } - disk { - size = 1 - controller_type = "scsi-paravirtual" - name = "four" - } - disk { - size = 1 - controller_type = "scsi-paravirtual" - name = "five" - } - disk { - size = 1 - controller_type = "scsi-paravirtual" - name = "six" - } - disk { - size = 1 - controller_type = "scsi-paravirtual" - name = "seven" - } -} -` - -func TestAccVSphereVirtualMachine_diskSCSICapacity(t *testing.T) { - var vm virtualMachine - basic_vars := setupTemplateBasicBodyVars() - config := basic_vars.testSprintfTemplateBody(testAccCheckVSphereVirtualMachineConfig_diskSCSICapacity) - - vmName := "vsphere_virtual_machine.scsiCapacity" - - test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label := - TestFuncData{vm: vm, label: basic_vars.label, vmName: vmName, numDisks: "8"}.testCheckFuncBasic() - - log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_diskSCSICapacity) - log.Printf("[DEBUG] template config= %s", config) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereVirtualMachineDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label, - ), - }, - }, - }) -} - -const testAccCheckVSphereVirtualMachineConfig_initTypeEager = ` -resource "vsphere_virtual_machine" "thickEagerZero" { - name = "terraform-test" -` + testAccTemplateBasicBody + ` - disk { - size = 1 - iops = 500 - controller_type = "scsi" - name = "one" - } - disk { - size = 1 - controller_type = "ide" - type = "eager_zeroed" - name = "two" - } -} -` - -func TestAccVSphereVirtualMachine_diskInitTypeEager(t *testing.T) { - var vm virtualMachine - basic_vars := setupTemplateBasicBodyVars() - config := basic_vars.testSprintfTemplateBody(testAccCheckVSphereVirtualMachineConfig_initTypeEager) - - vmName := "vsphere_virtual_machine.thickEagerZero" - - test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label := - TestFuncData{vm: vm, label: basic_vars.label, vmName: vmName, numDisks: "3"}.testCheckFuncBasic() - - log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_initTypeEager) - log.Printf("[DEBUG] template config= %s", config) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereVirtualMachineDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label, - // FIXME dynmically calculate the hashes - resource.TestCheckResourceAttr(vmName, "disk.294918912.type", "eager_zeroed"), - resource.TestCheckResourceAttr(vmName, "disk.294918912.controller_type", "ide"), - resource.TestCheckResourceAttr(vmName, "disk.1380467090.controller_type", "scsi"), - ), - }, - }, - }) -} - -const testAccCheckVSphereVirtualMachineConfig_initTypeLazy = ` -resource "vsphere_virtual_machine" "lazy" { - name = "terraform-test" -` + testAccTemplateBasicBody + ` - disk { - size = 1 - iops = 500 - controller_type = "scsi" - name = "one" - } - disk { - size = 1 - controller_type = "ide" - type = "lazy" - name = "two" - } -} -` - -func TestAccVSphereVirtualMachine_diskInitTypeLazy(t *testing.T) { - var vm virtualMachine - basic_vars := setupTemplateBasicBodyVars() - config := basic_vars.testSprintfTemplateBody(testAccCheckVSphereVirtualMachineConfig_initTypeLazy) - - vmName := "vsphere_virtual_machine.lazy" - - test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label := - TestFuncData{vm: vm, label: basic_vars.label, vmName: vmName, numDisks: "3"}.testCheckFuncBasic() - - log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_initTypeLazy) - log.Printf("[DEBUG] template config= %s", config) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereVirtualMachineDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label, - // FIXME dynmically calculate the hashes - resource.TestCheckResourceAttr(vmName, "disk.692719290.type", "lazy"), - resource.TestCheckResourceAttr(vmName, "disk.692719290.controller_type", "ide"), - resource.TestCheckResourceAttr(vmName, "disk.531766495.controller_type", "scsi"), - ), - }, - }, - }) -} - -const testAccCheckVSphereVirtualMachineConfig_dhcp = ` -resource "vsphere_virtual_machine" "bar" { - name = "terraform-test" -` - -func TestAccVSphereVirtualMachine_dhcp(t *testing.T) { - var vm virtualMachine - data := setupTemplateFuncDHCPData() - config := testAccCheckVSphereVirtualMachineConfig_dhcp + data.parseDHCPTemplateConfigWithTemplate(testAccCheckVSphereTemplate_dhcp) - log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_dhcp+testAccCheckVSphereTemplate_dhcp) - log.Printf("[DEBUG] config= %s", config) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereVirtualMachineDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - TestFuncData{vm: vm, label: data.label, vmName: "vsphere_virtual_machine.bar"}.testCheckFuncBasic(), - ), - }, - }, - }) -} - -const testAccCheckVSphereVirtualMachineConfig_custom_configs = ` -resource "vsphere_virtual_machine" "car" { - name = "terraform-test-custom" - custom_configuration_parameters { - "foo" = "bar" - "car" = "ferrari" - "num" = 42 - } - enable_disk_uuid = true -` - -func TestAccVSphereVirtualMachine_custom_configs(t *testing.T) { - - var vm virtualMachine - data := setupTemplateFuncDHCPData() - config := testAccCheckVSphereVirtualMachineConfig_custom_configs + data.parseDHCPTemplateConfigWithTemplate(testAccCheckVSphereTemplate_dhcp) - vmName := "vsphere_virtual_machine.car" - res := "terraform-test-custom" - - test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label := - TestFuncData{vm: vm, label: data.label, vmName: vmName, vmResource: res}.testCheckFuncBasic() - - log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_custom_configs+testAccCheckVSphereTemplate_dhcp) - log.Printf("[DEBUG] config= %s", config) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereVirtualMachineDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label, - testAccCheckVSphereVirtualMachineExistsHasCustomConfig(vmName, &vm), - resource.TestCheckResourceAttr(vmName, "custom_configuration_parameters.foo", "bar"), - resource.TestCheckResourceAttr(vmName, "custom_configuration_parameters.car", "ferrari"), - resource.TestCheckResourceAttr(vmName, "custom_configuration_parameters.num", "42"), - resource.TestCheckResourceAttr(vmName, "enable_disk_uuid", "true"), - ), - }, - }, - }) -} - -const testAccCheckVSphereVirtualMachineConfig_createInFolder = ` -resource "vsphere_virtual_machine" "folder" { - name = "terraform-test-folder" - folder = "%s" -` - -func TestAccVSphereVirtualMachine_createInExistingFolder(t *testing.T) { - var vm virtualMachine - datacenter := os.Getenv("VSPHERE_DATACENTER") - - folder := "tf_test_cpureateInExistingFolder" - - data := setupTemplateFuncDHCPData() - config := fmt.Sprintf(testAccCheckVSphereVirtualMachineConfig_createInFolder, - folder, - ) + data.parseDHCPTemplateConfig() - - log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_createInFolder) - log.Printf("[DEBUG] template config= %s", config) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: resource.ComposeTestCheckFunc( - testAccCheckVSphereVirtualMachineDestroy, - removeVSphereFolder(datacenter, folder, ""), - ), - Steps: []resource.TestStep{ - resource.TestStep{ - PreConfig: func() { createVSphereFolder(datacenter, folder) }, - Config: config, - Check: resource.ComposeTestCheckFunc( - TestFuncData{vm: vm, label: data.label, vmName: "vsphere_virtual_machine.folder", vmResource: "terraform-test-folder"}.testCheckFuncBasic(), - ), - }, - }, - }) -} - -const testAccCheckVSphereVirtualMachineConfig_createWithFolder = ` -resource "vsphere_folder" "with_folder" { - path = "%s" -%s -} -resource "vsphere_virtual_machine" "with_folder" { - name = "terraform-test-with-folder" - folder = "${vsphere_folder.with_folder.path}" -` - -func TestAccVSphereVirtualMachine_createWithFolder(t *testing.T) { - var vm virtualMachine - var folderLocationOpt string - var f folder - - if v := os.Getenv("VSPHERE_DATACENTER"); v != "" { - folderLocationOpt = fmt.Sprintf(" datacenter = \"%s\"\n", v) - } - - folder := "tf_test_cpureateWithFolder" - - data := setupTemplateFuncDHCPData() - vmName := "vsphere_virtual_machine.with_folder" - test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label := - TestFuncData{vm: vm, label: data.label, vmName: vmName, vmResource: "terraform-test-with-folder"}.testCheckFuncBasic() - - config := fmt.Sprintf(testAccCheckVSphereVirtualMachineConfig_createWithFolder, - folder, - folderLocationOpt, - ) + data.parseDHCPTemplateConfig() - - log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_createWithFolder+testAccCheckVSphereTemplate_dhcp) - log.Printf("[DEBUG] template config= %s", config) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: resource.ComposeTestCheckFunc( - testAccCheckVSphereVirtualMachineDestroy, - testAccCheckVSphereFolderDestroy, - ), - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label, - testAccCheckVSphereFolderExists(vmName, &f), - resource.TestCheckResourceAttr(vmName, "folder", folder), - ), - }, - }, - }) -} - -const testAccCheckVsphereVirtualMachineConfig_cdrom = ` -resource "vsphere_virtual_machine" "with_cdrom" { - name = "terraform-test-with-cdrom" - cdrom { - datastore = "%s" - path = "%s" - } -` - -func TestAccVSphereVirtualMachine_createWithCdrom(t *testing.T) { - var vm virtualMachine - - // FIXME check that these exist - cdromDatastore := os.Getenv("VSPHERE_CDROM_DATASTORE") - cdromPath := os.Getenv("VSPHERE_CDROM_PATH") - vmName := "vsphere_virtual_machine.with_cdrom" - - data := setupTemplateFuncDHCPData() - test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label := - TestFuncData{vm: vm, label: data.label, vmName: vmName, vmResource: "terraform-test-with-cdrom"}.testCheckFuncBasic() - - config := fmt.Sprintf( - testAccCheckVsphereVirtualMachineConfig_cdrom, - cdromDatastore, - cdromPath, - ) + data.parseDHCPTemplateConfig() - - log.Printf("[DEBUG] template= %s", testAccCheckVsphereVirtualMachineConfig_cdrom) - log.Printf("[DEBUG] template config= %s", config) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereVirtualMachineDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label, - //resource.TestCheckResourceAttr( - // "vsphere_virtual_machine.with_cdrom", "disk.4088143748.template", template), - resource.TestCheckResourceAttr(vmName, "cdrom.#", "1"), - resource.TestCheckResourceAttr(vmName, "cdrom.0.datastore", cdromDatastore), - resource.TestCheckResourceAttr(vmName, "cdrom.0.path", cdromPath), - ), - }, - }, - }) -} - -const testAccCheckVSphereVirtualMachineConfig_withExistingVmdk = ` -resource "vsphere_virtual_machine" "with_existing_vmdk" { - name = "terraform-test-with-existing-vmdk" -%s - vcpu = 2 - memory = 1024 - network_interface { - label = "%s" - } - disk { -%s - vmdk = "%s" - bootable = true - } - disk { - size = 1 - iops = 500 - name = "one" - } -} -` - -func TestAccVSphereVirtualMachine_createWithExistingVmdk(t *testing.T) { - var vm virtualMachine - vmdk_path := os.Getenv("VSPHERE_VMDK_PATH") - - data := setupTemplateFuncDHCPData() - config := fmt.Sprintf( - testAccCheckVSphereVirtualMachineConfig_withExistingVmdk, - data.locationOpt, - data.label, - data.datastoreOpt, - vmdk_path, - ) - log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_withExistingVmdk) - log.Printf("[DEBUG] template config= %s", config) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereVirtualMachineDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - TestFuncData{vm: vm, label: data.label, vmName: "vsphere_virtual_machine.with_existing_vmdk", - vmResource: "terraform-test-with-existing-vmdk", numDisks: "2"}.testCheckFuncBasic(), - //resource.TestCheckResourceAttr( - // "vsphere_virtual_machine.with_existing_vmdk", "disk.2393891804.vmdk", vmdk_path), - //resource.TestCheckResourceAttr( - // "vsphere_virtual_machine.with_existing_vmdk", "disk.2393891804.bootable", "true"), - ), - }, - }, - }) -} - -const testAccCheckVSphereVirtualMachineConfig_updateMemory = ` -resource "vsphere_virtual_machine" "bar" { - name = "terraform-test" -%s - vcpu = 2 - memory = %s - network_interface { - label = "%s" - } - disk { -%s - template = "%s" - } -} -` - -func TestAccVSphereVirtualMachine_updateMemory(t *testing.T) { - var vm virtualMachine - data := setupTemplateFuncDHCPData() - - log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_updateMemory) - - config := data.testSprintfDHCPTemplateBodySecondArgDynamic(testAccCheckVSphereVirtualMachineConfig_updateMemory, "1024") - log.Printf("[DEBUG] template config= %s", config) - - configUpdate := data.testSprintfDHCPTemplateBodySecondArgDynamic(testAccCheckVSphereVirtualMachineConfig_updateMemory, "2048") - log.Printf("[DEBUG] template configUpdate= %s", configUpdate) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereVirtualMachineDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - TestFuncData{vm: vm, label: data.label, vmName: "vsphere_virtual_machine.bar"}.testCheckFuncBasic(), - ), - }, - resource.TestStep{ - Config: configUpdate, - Check: resource.ComposeTestCheckFunc( - TestFuncData{vm: vm, label: data.label, mem: "2048", vmName: "vsphere_virtual_machine.bar"}.testCheckFuncBasic(), - ), - }, - }, - }) -} - -const testAccCheckVSphereVirtualMachineConfig_updateVcpu = ` -resource "vsphere_virtual_machine" "bar" { - name = "terraform-test" -%s - vcpu = %s - memory = 1024 - network_interface { - label = "%s" - } - disk { -%s - template = "%s" - } -} -` - -func TestAccVSphereVirtualMachine_updateVcpu(t *testing.T) { - var vm virtualMachine - data := setupTemplateFuncDHCPData() - log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_updateVcpu) - - config := data.testSprintfDHCPTemplateBodySecondArgDynamic(testAccCheckVSphereVirtualMachineConfig_updateVcpu, "2") - log.Printf("[DEBUG] template config= %s", config) - - configUpdate := data.testSprintfDHCPTemplateBodySecondArgDynamic(testAccCheckVSphereVirtualMachineConfig_updateVcpu, "4") - log.Printf("[DEBUG] template configUpdate= %s", configUpdate) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereVirtualMachineDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - TestFuncData{vm: vm, label: data.label, vmName: "vsphere_virtual_machine.bar"}.testCheckFuncBasic(), - ), - }, - resource.TestStep{ - Config: configUpdate, - Check: resource.ComposeTestCheckFunc( - TestFuncData{vm: vm, label: data.label, vmName: "vsphere_virtual_machine.bar", numCPU: "4"}.testCheckFuncBasic(), - ), - }, - }, - }) -} - -const testAccCheckVSphereVirtualMachineConfig_ipv6 = ` -resource "vsphere_virtual_machine" "ipv6" { - name = "terraform-test-ipv6" -%s - vcpu = 2 - memory = 1024 - network_interface { - label = "%s" - %s - ipv6_address = "%s" - ipv6_prefix_length = 64 - ipv6_gateway = "%s" - } - disk { -%s - template = "%s" - iops = 500 - } - disk { - size = 1 - iops = 500 - name = "one" - } -} -` - -func TestAccVSphereVirtualMachine_ipv4Andipv6(t *testing.T) { - var vm virtualMachine - data := setupTemplateBasicBodyVars() - log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_ipv6) - - vmName := "vsphere_virtual_machine.ipv6" - - test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label := - TestFuncData{vm: vm, label: data.label, vmName: vmName, numDisks: "2", vmResource: "terraform-test-ipv6"}.testCheckFuncBasic() - - // FIXME test for this or warn?? - ipv6Address := os.Getenv("VSPHERE_IPV6_ADDRESS") - ipv6Gateway := os.Getenv("VSPHERE_IPV6_GATEWAY") - - ipv4Settings := fmt.Sprintf(` - ipv4_address = "%s" - ipv4_prefix_length = %s - ipv4_gateway = "%s" - `, data.ipv4IpAddress, data.ipv4Prefix, data.ipv4Gateway) - - config := fmt.Sprintf( - testAccCheckVSphereVirtualMachineConfig_ipv6, - data.locationOpt, - data.label, - ipv4Settings, - ipv6Address, - ipv6Gateway, - data.datastoreOpt, - data.template, - ) - - log.Printf("[DEBUG] template config= %s", config) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereVirtualMachineDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label, - resource.TestCheckResourceAttr(vmName, "network_interface.0.ipv4_address", data.ipv4IpAddress), - resource.TestCheckResourceAttr(vmName, "network_interface.0.ipv4_gateway", data.ipv4Gateway), - resource.TestCheckResourceAttr(vmName, "network_interface.0.ipv6_address", ipv6Address), - resource.TestCheckResourceAttr(vmName, "network_interface.0.ipv6_gateway", ipv6Gateway), - ), - }, - }, - }) -} - -func TestAccVSphereVirtualMachine_ipv6Only(t *testing.T) { - var vm virtualMachine - data := setupTemplateBasicBodyVars() - log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_ipv6) - - vmName := "vsphere_virtual_machine.ipv6" - - test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label := - TestFuncData{vm: vm, label: data.label, vmName: vmName, numDisks: "2", vmResource: "terraform-test-ipv6"}.testCheckFuncBasic() - - // Checks for this will be handled when this code is merged with https://github.com/hashicorp/terraform/pull/7575. - ipv6Address := os.Getenv("VSPHERE_IPV6_ADDRESS") - ipv6Gateway := os.Getenv("VSPHERE_IPV6_GATEWAY") - - config := fmt.Sprintf( - testAccCheckVSphereVirtualMachineConfig_ipv6, - data.locationOpt, - data.label, - "", - ipv6Address, - ipv6Gateway, - data.datastoreOpt, - data.template, - ) - - log.Printf("[DEBUG] template config= %s", config) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereVirtualMachineDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label, - resource.TestCheckResourceAttr(vmName, "network_interface.0.ipv6_address", ipv6Address), - resource.TestCheckResourceAttr(vmName, "network_interface.0.ipv6_gateway", ipv6Gateway), - ), - }, - }, - }) -} - -const testAccCheckVSphereVirtualMachineConfig_updateAddDisks = ` -resource "vsphere_virtual_machine" "foo" { - name = "terraform-test" -` + testAccTemplateBasicBody + ` - disk { - size = 1 - iops = 500 - name = "one" -%s - } - disk { - size = 1 - iops = 500 - name = "two" -%s - } - disk { - size = 1 - iops = 500 - name = "three" -%s - } -} -` -const testAccCheckVSphereVirtualMachineConfig_basic = ` -resource "vsphere_virtual_machine" "foo" { - name = "terraform-test" -` + testAccTemplateBasicBody + ` - disk { - size = 1 - iops = 500 - name = "one" - } -} -` - -func TestAccVSphereVirtualMachine_updateDisks(t *testing.T) { - var vm virtualMachine - basic_vars := setupTemplateBasicBodyVars() - config_basic := basic_vars.testSprintfTemplateBody(testAccCheckVSphereVirtualMachineConfig_basic) - - log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_basic) - log.Printf("[DEBUG] template config= %s", config_basic) - - config_add := fmt.Sprintf( - testAccCheckVSphereVirtualMachineConfig_updateAddDisks, - basic_vars.locationOpt, - basic_vars.label, - basic_vars.ipv4IpAddress, - basic_vars.ipv4Prefix, - basic_vars.ipv4Gateway, - basic_vars.datastoreOpt, - basic_vars.template, - basic_vars.datastoreOpt, - basic_vars.datastoreOpt, - basic_vars.datastoreOpt, - ) - - log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_basic) - log.Printf("[DEBUG] template config= %s", config_add) - - config_del := basic_vars.testSprintfTemplateBody(testAccCheckVSphereVirtualMachineConfig_really_basic) - - log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_really_basic) - log.Printf("[DEBUG] template config= %s", config_del) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereVirtualMachineDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config_basic, - Check: resource.ComposeTestCheckFunc( - TestFuncData{vm: vm, label: basic_vars.label, numDisks: "2"}.testCheckFuncBasic(), - ), - }, - resource.TestStep{ - Config: config_add, - Check: resource.ComposeTestCheckFunc( - TestFuncData{vm: vm, label: basic_vars.label, numDisks: "4"}.testCheckFuncBasic(), - ), - }, - resource.TestStep{ - Config: config_del, - Check: resource.ComposeTestCheckFunc( - TestFuncData{vm: vm, label: basic_vars.label, numDisks: "1"}.testCheckFuncBasic(), - ), - }, - }, - }) -} - -const testAccCheckVSphereVirtualMachineConfig_mac_address = ` -resource "vsphere_virtual_machine" "mac_address" { - name = "terraform-mac-address" -%s - vcpu = 2 - memory = 1024 - network_interface { - label = "%s" - mac_address = "%s" - } - disk { -%s - template = "%s" - } -} -` - -// VSPHERE_NETWORK_MAC_ADDRESS needs to be set to run TestAccVSphereVirtualMachine_mac_address -// use a basic NIC MAC address like 6:5c:89:2b:a0:64 -func testMacPreCheck(t *testing.T) { - - testBasicPreCheck(t) - - // TODO should start do parse values to ensure they are correct - // for instance - // func ParseMAC(s string) (hw HardwareAddr, err error) - if v := os.Getenv("VSPHERE_NETWORK_MAC_ADDRESS"); v == "" { - t.Fatal("env variable VSPHERE_NETWORK_MAC_ADDRESS must be set for this acceptance test") - } -} - -// test new mac address feature -func TestAccVSphereVirtualMachine_mac_address(t *testing.T) { - var vm virtualMachine - data := setupTemplateFuncDHCPData() - vmName := "vsphere_virtual_machine.mac_address" - - macAddress := os.Getenv("VSPHERE_NETWORK_MAC_ADDRESS") - - log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_mac_address) - config := fmt.Sprintf( - testAccCheckVSphereVirtualMachineConfig_mac_address, - data.locationOpt, - data.label, - macAddress, - data.datastoreOpt, - data.template, - ) - log.Printf("[DEBUG] template config= %s", config) - - test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label := - TestFuncData{vm: vm, label: data.label, vmName: vmName, numDisks: "1", vmResource: "terraform-mac-address"}.testCheckFuncBasic() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testMacPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereVirtualMachineDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label, - resource.TestCheckResourceAttr(vmName, "network_interface.0.mac_address", macAddress), - ), - }, - }, - }) -} - -func testAccCheckVSphereVirtualMachineDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*govmomi.Client) - finder := find.NewFinder(client.Client, true) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "vsphere_virtual_machine" { - continue - } - - dc, err := finder.Datacenter(context.TODO(), rs.Primary.Attributes["datacenter"]) - if err != nil { - return fmt.Errorf("error %s", err) - } - - dcFolders, err := dc.Folders(context.TODO()) - if err != nil { - return fmt.Errorf("error %s", err) - } - - folder := dcFolders.VmFolder - if len(rs.Primary.Attributes["folder"]) > 0 { - si := object.NewSearchIndex(client.Client) - folderRef, err := si.FindByInventoryPath( - context.TODO(), fmt.Sprintf("%v/vm/%v", rs.Primary.Attributes["datacenter"], rs.Primary.Attributes["folder"])) - if err != nil { - return err - } else if folderRef != nil { - folder = folderRef.(*object.Folder) - } - } - - v, err := object.NewSearchIndex(client.Client).FindChild(context.TODO(), folder, rs.Primary.Attributes["name"]) - - if v != nil { - return fmt.Errorf("Record still exists") - } - } - - return nil -} - -func testAccCheckVSphereVirtualMachineExistsHasCustomConfig(n string, vm *virtualMachine) resource.TestCheckFunc { - return func(s *terraform.State) error { - - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - client := testAccProvider.Meta().(*govmomi.Client) - finder := find.NewFinder(client.Client, true) - - dc, err := finder.Datacenter(context.TODO(), rs.Primary.Attributes["datacenter"]) - if err != nil { - return fmt.Errorf("error %s", err) - } - - dcFolders, err := dc.Folders(context.TODO()) - if err != nil { - return fmt.Errorf("error %s", err) - } - - _, err = object.NewSearchIndex(client.Client).FindChild(context.TODO(), dcFolders.VmFolder, rs.Primary.Attributes["name"]) - if err != nil { - return fmt.Errorf("error %s", err) - } - - finder = finder.SetDatacenter(dc) - instance, err := finder.VirtualMachine(context.TODO(), rs.Primary.Attributes["name"]) - if err != nil { - return fmt.Errorf("error %s", err) - } - - var mvm mo.VirtualMachine - - collector := property.DefaultCollector(client.Client) - - if err := collector.RetrieveOne(context.TODO(), instance.Reference(), []string{"config.extraConfig"}, &mvm); err != nil { - return fmt.Errorf("error %s", err) - } - - var configMap = make(map[string]types.AnyType) - if mvm.Config != nil && mvm.Config.ExtraConfig != nil && len(mvm.Config.ExtraConfig) > 0 { - for _, v := range mvm.Config.ExtraConfig { - value := v.GetOptionValue() - configMap[value.Key] = value.Value - } - } else { - return fmt.Errorf("error no ExtraConfig") - } - - if configMap["foo"] == nil { - return fmt.Errorf("error no ExtraConfig for 'foo'") - } - - if configMap["foo"] != "bar" { - return fmt.Errorf("error ExtraConfig 'foo' != bar") - } - - if configMap["car"] == nil { - return fmt.Errorf("error no ExtraConfig for 'car'") - } - - if configMap["car"] != "ferrari" { - return fmt.Errorf("error ExtraConfig 'car' != ferrari") - } - - if configMap["num"] == nil { - return fmt.Errorf("error no ExtraConfig for 'num'") - } - - // todo this should be an int, getting back a string - if configMap["num"] != "42" { - return fmt.Errorf("error ExtraConfig 'num' != 42") - } - *vm = virtualMachine{ - name: rs.Primary.ID, - } - - return nil - } -} - -func testAccCheckDebugExists() resource.TestCheckFunc { - return func(s *terraform.State) error { - if _, err := os.Stat(filepath.Join(os.Getenv("HOME"), ".govmomi")); os.IsNotExist(err) { - return fmt.Errorf("Debug logs not found") - } - - return nil - } - -} -func testAccCheckVSphereVirtualMachineExists(n string, vm *virtualMachine) resource.TestCheckFunc { - return func(s *terraform.State) error { - if n == "" { - return fmt.Errorf("No vm name passed in") - } - if vm == nil { - return fmt.Errorf("No vm obj passed in") - } - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - client := testAccProvider.Meta().(*govmomi.Client) - finder := find.NewFinder(client.Client, true) - - dc, err := finder.Datacenter(context.TODO(), rs.Primary.Attributes["datacenter"]) - if err != nil { - return fmt.Errorf("error %s", err) - } - - dcFolders, err := dc.Folders(context.TODO()) - if err != nil { - return fmt.Errorf("error %s", err) - } - - folder := dcFolders.VmFolder - if len(rs.Primary.Attributes["folder"]) > 0 { - si := object.NewSearchIndex(client.Client) - folderRef, err := si.FindByInventoryPath( - context.TODO(), fmt.Sprintf("%v/vm/%v", rs.Primary.Attributes["datacenter"], rs.Primary.Attributes["folder"])) - if err != nil { - return err - } else if folderRef != nil { - folder = folderRef.(*object.Folder) - } - } - - _, err = object.NewSearchIndex(client.Client).FindChild(context.TODO(), folder, rs.Primary.Attributes["name"]) - - *vm = virtualMachine{ - name: rs.Primary.ID, - } - - return nil - } -} - -const testAccCheckVSphereVirtualMachineConfig_keepOnRemove = ` -resource "vsphere_virtual_machine" "keep_disk" { - name = "terraform-test" -` + testAccTemplateBasicBody + ` - disk { - size = 1 - iops = 500 - controller_type = "scsi" - name = "one" - keep_on_remove = true - } -} -` - -func TestAccVSphereVirtualMachine_keepOnRemove(t *testing.T) { - var vm virtualMachine - basic_vars := setupTemplateBasicBodyVars() - config := basic_vars.testSprintfTemplateBody(testAccCheckVSphereVirtualMachineConfig_keepOnRemove) - var datastore string - if v := os.Getenv("VSPHERE_DATASTORE"); v != "" { - datastore = v - } - var datacenter string - if v := os.Getenv("VSPHERE_DATACENTER"); v != "" { - datacenter = v - } - - vmName := "vsphere_virtual_machine.keep_disk" - test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label := - TestFuncData{vm: vm, label: basic_vars.label, vmName: vmName, numDisks: "2"}.testCheckFuncBasic() - - log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_keepOnRemove) - log.Printf("[DEBUG] template config= %s", config) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereVirtualMachineDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label, - ), - }, - resource.TestStep{ - Config: " ", - Check: checkForDisk(datacenter, datastore, "terraform-test", "one.vmdk", true, true), - }, - }, - }) -} - -const testAccVSphereVirtualMachine_DetachUnknownDisks = ` -resource "vsphere_virtual_machine" "detach_unkown_disks" { - name = "terraform-test" -` + testAccTemplateBasicBody + ` - detach_unknown_disks_on_delete = true - disk { - size = 1 - iops = 500 - controller_type = "scsi" - name = "one" - keep_on_remove = true - } - disk { - size = 2 - iops = 500 - controller_type = "scsi" - name = "two" - keep_on_remove = false - } - disk { - size = 3 - iops = 500 - controller_type = "scsi" - name = "three" - keep_on_remove = true - } -} -` - -func TestAccVSphereVirtualMachine_DetachUnknownDisks(t *testing.T) { - var vm virtualMachine - basic_vars := setupTemplateBasicBodyVars() - config := basic_vars.testSprintfTemplateBody(testAccVSphereVirtualMachine_DetachUnknownDisks) - var datastore string - if v := os.Getenv("VSPHERE_DATASTORE"); v != "" { - datastore = v - } - var datacenter string - if v := os.Getenv("VSPHERE_DATACENTER"); v != "" { - datacenter = v - } - - vmName := "vsphere_virtual_machine.detach_unkown_disks" - test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label := - TestFuncData{vm: vm, label: basic_vars.label, vmName: vmName, numDisks: "4"}.testCheckFuncBasic() - - log.Printf("[DEBUG] template= %s", testAccVSphereVirtualMachine_DetachUnknownDisks) - log.Printf("[DEBUG] template config= %s", config) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereVirtualMachineDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label, - ), - }, - resource.TestStep{ - PreConfig: func() { - createAndAttachDisk(t, "terraform-test", 1, datastore, "terraform-test/tf_custom_disk.vmdk", "lazy", "scsi", datacenter) - }, - Config: " ", - Check: resource.ComposeTestCheckFunc( - checkForDisk(datacenter, datastore, "terraform-test", "one.vmdk", true, false), - checkForDisk(datacenter, datastore, "terraform-test", "two.vmdk", false, false), - checkForDisk(datacenter, datastore, "terraform-test", "three.vmdk", true, false), - checkForDisk(datacenter, datastore, "terraform-test", "tf_custom_disk.vmdk", true, true), - ), - }, - }, - }) -} - -func createAndAttachDisk(t *testing.T, vmName string, size int, datastore string, diskPath string, diskType string, adapterType string, datacenter string) { - client := testAccProvider.Meta().(*govmomi.Client) - finder := find.NewFinder(client.Client, true) - - dc, err := finder.Datacenter(context.TODO(), datacenter) - if err != nil { - log.Printf("[ERROR] finding Datacenter %s: %v", datacenter, err) - t.Fail() - return - } - finder = finder.SetDatacenter(dc) - ds, err := getDatastore(finder, datastore) - if err != nil { - log.Printf("[ERROR] getDatastore %s: %v", datastore, err) - t.Fail() - return - } - vm, err := finder.VirtualMachine(context.TODO(), vmName) - if err != nil { - log.Printf("[ERROR] finding VM %s: %v", vmName, err) - t.Fail() - return - } - err = addHardDisk(vm, int64(size), int64(0), diskType, ds, diskPath, adapterType) - if err != nil { - log.Printf("[ERROR] addHardDisk: %v", err) - t.Fail() - return - } -} - -func vmCleanup(dc *object.Datacenter, ds *object.Datastore, vmName string) error { - client := testAccProvider.Meta().(*govmomi.Client) - fileManager := object.NewFileManager(client.Client) - task, err := fileManager.DeleteDatastoreFile(context.TODO(), ds.Path(vmName), dc) - if err != nil { - log.Printf("[ERROR] checkForDisk - Couldn't delete vm folder '%v': %v", vmName, err) - return err - } - - _, err = task.WaitForResult(context.TODO(), nil) - if err != nil { - log.Printf("[ERROR] checForDisk - Failed while deleting vm folder '%v': %v", vmName, err) - return err - } - return nil -} - -func checkForDisk(datacenter string, datastore string, vmName string, path string, exists bool, cleanup bool) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*govmomi.Client) - finder := find.NewFinder(client.Client, true) - - dc, err := getDatacenter(client, datacenter) - if err != nil { - return err - } - finder.SetDatacenter(dc) - - ds, err := finder.Datastore(context.TODO(), datastore) - if err != nil { - log.Printf("[ERROR] checkForDisk - Couldn't find Datastore '%v': %v", datastore, err) - return err - } - - diskPath := vmName + "/" + path - - _, err = ds.Stat(context.TODO(), diskPath) - if err != nil && exists { - log.Printf("[ERROR] checkForDisk - Couldn't stat file '%v': %v", diskPath, err) - return err - } else if err == nil && !exists { - errorMessage := fmt.Sprintf("checkForDisk - disk %s still exists", diskPath) - err = vmCleanup(dc, ds, vmName) - if err != nil { - return fmt.Errorf("[ERROR] %s, cleanup also failed: %v", errorMessage, err) - } - return fmt.Errorf("[ERROR] %s", errorMessage) - } - - if !cleanup || !exists { - return nil - } - - err = vmCleanup(dc, ds, vmName) - if err != nil { - return fmt.Errorf("[ERROR] cleanup failed: %v", err) - } - - return nil - } -}